diff options
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 21 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 4 | ||||
-rw-r--r-- | drivers/crypto/amcc/crypto4xx_core.c | 13 | ||||
-rw-r--r-- | drivers/crypto/amcc/crypto4xx_reg_def.h | 2 | ||||
-rw-r--r-- | drivers/crypto/geode-aes.c | 38 | ||||
-rw-r--r-- | drivers/crypto/hifn_795x.c | 22 | ||||
-rw-r--r-- | drivers/crypto/mv_cesa.c | 702 | ||||
-rw-r--r-- | drivers/crypto/mv_cesa.h | 40 | ||||
-rw-r--r-- | drivers/crypto/n2_asm.S | 95 | ||||
-rw-r--r-- | drivers/crypto/n2_core.c | 2267 | ||||
-rw-r--r-- | drivers/crypto/n2_core.h | 231 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 1258 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 715 | ||||
-rw-r--r-- | drivers/crypto/talitos.h | 12 |
14 files changed, 5250 insertions, 170 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index b08403d7d1ca..fbf94cf496f0 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -170,6 +170,18 @@ config CRYPTO_DEV_MV_CESA | |||
170 | 170 | ||
171 | Currently the driver supports AES in ECB and CBC mode without DMA. | 171 | Currently the driver supports AES in ECB and CBC mode without DMA. |
172 | 172 | ||
173 | config CRYPTO_DEV_NIAGARA2 | ||
174 | tristate "Niagara2 Stream Processing Unit driver" | ||
175 | select CRYPTO_ALGAPI | ||
176 | depends on SPARC64 | ||
177 | help | ||
178 | Each core of a Niagara2 processor contains a Stream | ||
179 | Processing Unit, which itself contains several cryptographic | ||
180 | sub-units. One set provides the Modular Arithmetic Unit, | ||
181 | used for SSL offload. The other set provides the Cipher | ||
182 | Group, which can perform encryption, decryption, hashing, | ||
183 | checksumming, and raw copies. | ||
184 | |||
173 | config CRYPTO_DEV_HIFN_795X | 185 | config CRYPTO_DEV_HIFN_795X |
174 | tristate "Driver HIFN 795x crypto accelerator chips" | 186 | tristate "Driver HIFN 795x crypto accelerator chips" |
175 | select CRYPTO_DES | 187 | select CRYPTO_DES |
@@ -222,4 +234,13 @@ config CRYPTO_DEV_PPC4XX | |||
222 | help | 234 | help |
223 | This option allows you to have support for AMCC crypto acceleration. | 235 | This option allows you to have support for AMCC crypto acceleration. |
224 | 236 | ||
237 | config CRYPTO_DEV_OMAP_SHAM | ||
238 | tristate "Support for OMAP SHA1/MD5 hw accelerator" | ||
239 | depends on ARCH_OMAP2 || ARCH_OMAP3 | ||
240 | select CRYPTO_SHA1 | ||
241 | select CRYPTO_MD5 | ||
242 | help | ||
243 | OMAP processors have SHA1/MD5 hw accelerator. Select this if you | ||
244 | want to use the OMAP module for SHA1/MD5 algorithms. | ||
245 | |||
225 | endif # CRYPTO_HW | 246 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 6ffcb3f7f942..6dbbe00c4524 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -1,8 +1,12 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o | 1 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o |
2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o | 2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o |
3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o | 3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o |
4 | obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o | ||
5 | n2_crypto-objs := n2_core.o n2_asm.o | ||
4 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o | 6 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o |
5 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o | 7 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o |
6 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o | 8 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o |
7 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o | 9 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o |
8 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ | 10 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ |
11 | obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o | ||
12 | |||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 6c4c8b7ce3aa..983530ba04a7 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -1158,7 +1158,7 @@ static int __init crypto4xx_probe(struct of_device *ofdev, | |||
1158 | struct device *dev = &ofdev->dev; | 1158 | struct device *dev = &ofdev->dev; |
1159 | struct crypto4xx_core_device *core_dev; | 1159 | struct crypto4xx_core_device *core_dev; |
1160 | 1160 | ||
1161 | rc = of_address_to_resource(ofdev->node, 0, &res); | 1161 | rc = of_address_to_resource(ofdev->dev.of_node, 0, &res); |
1162 | if (rc) | 1162 | if (rc) |
1163 | return -ENODEV; | 1163 | return -ENODEV; |
1164 | 1164 | ||
@@ -1215,13 +1215,13 @@ static int __init crypto4xx_probe(struct of_device *ofdev, | |||
1215 | (unsigned long) dev); | 1215 | (unsigned long) dev); |
1216 | 1216 | ||
1217 | /* Register for Crypto isr, Crypto Engine IRQ */ | 1217 | /* Register for Crypto isr, Crypto Engine IRQ */ |
1218 | core_dev->irq = irq_of_parse_and_map(ofdev->node, 0); | 1218 | core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); |
1219 | rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, | 1219 | rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, |
1220 | core_dev->dev->name, dev); | 1220 | core_dev->dev->name, dev); |
1221 | if (rc) | 1221 | if (rc) |
1222 | goto err_request_irq; | 1222 | goto err_request_irq; |
1223 | 1223 | ||
1224 | core_dev->dev->ce_base = of_iomap(ofdev->node, 0); | 1224 | core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0); |
1225 | if (!core_dev->dev->ce_base) { | 1225 | if (!core_dev->dev->ce_base) { |
1226 | dev_err(dev, "failed to of_iomap\n"); | 1226 | dev_err(dev, "failed to of_iomap\n"); |
1227 | goto err_iomap; | 1227 | goto err_iomap; |
@@ -1281,8 +1281,11 @@ static const struct of_device_id crypto4xx_match[] = { | |||
1281 | }; | 1281 | }; |
1282 | 1282 | ||
1283 | static struct of_platform_driver crypto4xx_driver = { | 1283 | static struct of_platform_driver crypto4xx_driver = { |
1284 | .name = "crypto4xx", | 1284 | .driver = { |
1285 | .match_table = crypto4xx_match, | 1285 | .name = "crypto4xx", |
1286 | .owner = THIS_MODULE, | ||
1287 | .of_match_table = crypto4xx_match, | ||
1288 | }, | ||
1286 | .probe = crypto4xx_probe, | 1289 | .probe = crypto4xx_probe, |
1287 | .remove = crypto4xx_remove, | 1290 | .remove = crypto4xx_remove, |
1288 | }; | 1291 | }; |
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h index 7d4edb002619..5f5fbc0716ff 100644 --- a/drivers/crypto/amcc/crypto4xx_reg_def.h +++ b/drivers/crypto/amcc/crypto4xx_reg_def.h | |||
@@ -113,7 +113,7 @@ | |||
113 | #define CRYPTO4XX_PRNG_LFSR_H 0x00070034 | 113 | #define CRYPTO4XX_PRNG_LFSR_H 0x00070034 |
114 | 114 | ||
115 | /** | 115 | /** |
116 | * Initilize CRYPTO ENGINE registers, and memory bases. | 116 | * Initialize CRYPTO ENGINE registers, and memory bases. |
117 | */ | 117 | */ |
118 | #define PPC4XX_PDR_POLL 0x3ff | 118 | #define PPC4XX_PDR_POLL 0x3ff |
119 | #define PPC4XX_OUTPUT_THRESHOLD 2 | 119 | #define PPC4XX_OUTPUT_THRESHOLD 2 |
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index c7a5a43ba691..219d09cbb0d1 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c | |||
@@ -15,14 +15,14 @@ | |||
15 | #include <crypto/algapi.h> | 15 | #include <crypto/algapi.h> |
16 | #include <crypto/aes.h> | 16 | #include <crypto/aes.h> |
17 | 17 | ||
18 | #include <asm/io.h> | 18 | #include <linux/io.h> |
19 | #include <asm/delay.h> | 19 | #include <linux/delay.h> |
20 | 20 | ||
21 | #include "geode-aes.h" | 21 | #include "geode-aes.h" |
22 | 22 | ||
23 | /* Static structures */ | 23 | /* Static structures */ |
24 | 24 | ||
25 | static void __iomem * _iobase; | 25 | static void __iomem *_iobase; |
26 | static spinlock_t lock; | 26 | static spinlock_t lock; |
27 | 27 | ||
28 | /* Write a 128 bit field (either a writable key or IV) */ | 28 | /* Write a 128 bit field (either a writable key or IV) */ |
@@ -30,7 +30,7 @@ static inline void | |||
30 | _writefield(u32 offset, void *value) | 30 | _writefield(u32 offset, void *value) |
31 | { | 31 | { |
32 | int i; | 32 | int i; |
33 | for(i = 0; i < 4; i++) | 33 | for (i = 0; i < 4; i++) |
34 | iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); | 34 | iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); |
35 | } | 35 | } |
36 | 36 | ||
@@ -39,7 +39,7 @@ static inline void | |||
39 | _readfield(u32 offset, void *value) | 39 | _readfield(u32 offset, void *value) |
40 | { | 40 | { |
41 | int i; | 41 | int i; |
42 | for(i = 0; i < 4; i++) | 42 | for (i = 0; i < 4; i++) |
43 | ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); | 43 | ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); |
44 | } | 44 | } |
45 | 45 | ||
@@ -59,7 +59,7 @@ do_crypt(void *src, void *dst, int len, u32 flags) | |||
59 | do { | 59 | do { |
60 | status = ioread32(_iobase + AES_INTR_REG); | 60 | status = ioread32(_iobase + AES_INTR_REG); |
61 | cpu_relax(); | 61 | cpu_relax(); |
62 | } while(!(status & AES_INTRA_PENDING) && --counter); | 62 | } while (!(status & AES_INTRA_PENDING) && --counter); |
63 | 63 | ||
64 | /* Clear the event */ | 64 | /* Clear the event */ |
65 | iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); | 65 | iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); |
@@ -317,7 +317,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc, | |||
317 | err = blkcipher_walk_virt(desc, &walk); | 317 | err = blkcipher_walk_virt(desc, &walk); |
318 | op->iv = walk.iv; | 318 | op->iv = walk.iv; |
319 | 319 | ||
320 | while((nbytes = walk.nbytes)) { | 320 | while ((nbytes = walk.nbytes)) { |
321 | op->src = walk.src.virt.addr, | 321 | op->src = walk.src.virt.addr, |
322 | op->dst = walk.dst.virt.addr; | 322 | op->dst = walk.dst.virt.addr; |
323 | op->mode = AES_MODE_CBC; | 323 | op->mode = AES_MODE_CBC; |
@@ -349,7 +349,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc, | |||
349 | err = blkcipher_walk_virt(desc, &walk); | 349 | err = blkcipher_walk_virt(desc, &walk); |
350 | op->iv = walk.iv; | 350 | op->iv = walk.iv; |
351 | 351 | ||
352 | while((nbytes = walk.nbytes)) { | 352 | while ((nbytes = walk.nbytes)) { |
353 | op->src = walk.src.virt.addr, | 353 | op->src = walk.src.virt.addr, |
354 | op->dst = walk.dst.virt.addr; | 354 | op->dst = walk.dst.virt.addr; |
355 | op->mode = AES_MODE_CBC; | 355 | op->mode = AES_MODE_CBC; |
@@ -429,7 +429,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc, | |||
429 | blkcipher_walk_init(&walk, dst, src, nbytes); | 429 | blkcipher_walk_init(&walk, dst, src, nbytes); |
430 | err = blkcipher_walk_virt(desc, &walk); | 430 | err = blkcipher_walk_virt(desc, &walk); |
431 | 431 | ||
432 | while((nbytes = walk.nbytes)) { | 432 | while ((nbytes = walk.nbytes)) { |
433 | op->src = walk.src.virt.addr, | 433 | op->src = walk.src.virt.addr, |
434 | op->dst = walk.dst.virt.addr; | 434 | op->dst = walk.dst.virt.addr; |
435 | op->mode = AES_MODE_ECB; | 435 | op->mode = AES_MODE_ECB; |
@@ -459,7 +459,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc, | |||
459 | blkcipher_walk_init(&walk, dst, src, nbytes); | 459 | blkcipher_walk_init(&walk, dst, src, nbytes); |
460 | err = blkcipher_walk_virt(desc, &walk); | 460 | err = blkcipher_walk_virt(desc, &walk); |
461 | 461 | ||
462 | while((nbytes = walk.nbytes)) { | 462 | while ((nbytes = walk.nbytes)) { |
463 | op->src = walk.src.virt.addr, | 463 | op->src = walk.src.virt.addr, |
464 | op->dst = walk.dst.virt.addr; | 464 | op->dst = walk.dst.virt.addr; |
465 | op->mode = AES_MODE_ECB; | 465 | op->mode = AES_MODE_ECB; |
@@ -518,11 +518,12 @@ static int __devinit | |||
518 | geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) | 518 | geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) |
519 | { | 519 | { |
520 | int ret; | 520 | int ret; |
521 | 521 | ret = pci_enable_device(dev); | |
522 | if ((ret = pci_enable_device(dev))) | 522 | if (ret) |
523 | return ret; | 523 | return ret; |
524 | 524 | ||
525 | if ((ret = pci_request_regions(dev, "geode-aes"))) | 525 | ret = pci_request_regions(dev, "geode-aes"); |
526 | if (ret) | ||
526 | goto eenable; | 527 | goto eenable; |
527 | 528 | ||
528 | _iobase = pci_iomap(dev, 0, 0); | 529 | _iobase = pci_iomap(dev, 0, 0); |
@@ -537,13 +538,16 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
537 | /* Clear any pending activity */ | 538 | /* Clear any pending activity */ |
538 | iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); | 539 | iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); |
539 | 540 | ||
540 | if ((ret = crypto_register_alg(&geode_alg))) | 541 | ret = crypto_register_alg(&geode_alg); |
542 | if (ret) | ||
541 | goto eiomap; | 543 | goto eiomap; |
542 | 544 | ||
543 | if ((ret = crypto_register_alg(&geode_ecb_alg))) | 545 | ret = crypto_register_alg(&geode_ecb_alg); |
546 | if (ret) | ||
544 | goto ealg; | 547 | goto ealg; |
545 | 548 | ||
546 | if ((ret = crypto_register_alg(&geode_cbc_alg))) | 549 | ret = crypto_register_alg(&geode_cbc_alg); |
550 | if (ret) | ||
547 | goto eecb; | 551 | goto eecb; |
548 | 552 | ||
549 | printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n"); | 553 | printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n"); |
@@ -569,7 +573,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
569 | } | 573 | } |
570 | 574 | ||
571 | static struct pci_device_id geode_aes_tbl[] = { | 575 | static struct pci_device_id geode_aes_tbl[] = { |
572 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} , | 576 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } , |
573 | { 0, } | 577 | { 0, } |
574 | }; | 578 | }; |
575 | 579 | ||
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 73e8b1713b54..e449ac5627a5 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -638,7 +638,7 @@ struct hifn_crypto_alg | |||
638 | 638 | ||
639 | #define ASYNC_FLAGS_MISALIGNED (1<<0) | 639 | #define ASYNC_FLAGS_MISALIGNED (1<<0) |
640 | 640 | ||
641 | struct ablkcipher_walk | 641 | struct hifn_cipher_walk |
642 | { | 642 | { |
643 | struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; | 643 | struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; |
644 | u32 flags; | 644 | u32 flags; |
@@ -657,7 +657,7 @@ struct hifn_request_context | |||
657 | u8 *iv; | 657 | u8 *iv; |
658 | unsigned int ivsize; | 658 | unsigned int ivsize; |
659 | u8 op, type, mode, unused; | 659 | u8 op, type, mode, unused; |
660 | struct ablkcipher_walk walk; | 660 | struct hifn_cipher_walk walk; |
661 | }; | 661 | }; |
662 | 662 | ||
663 | #define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) | 663 | #define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) |
@@ -1417,7 +1417,7 @@ static int hifn_setup_dma(struct hifn_device *dev, | |||
1417 | return 0; | 1417 | return 0; |
1418 | } | 1418 | } |
1419 | 1419 | ||
1420 | static int ablkcipher_walk_init(struct ablkcipher_walk *w, | 1420 | static int hifn_cipher_walk_init(struct hifn_cipher_walk *w, |
1421 | int num, gfp_t gfp_flags) | 1421 | int num, gfp_t gfp_flags) |
1422 | { | 1422 | { |
1423 | int i; | 1423 | int i; |
@@ -1442,7 +1442,7 @@ static int ablkcipher_walk_init(struct ablkcipher_walk *w, | |||
1442 | return i; | 1442 | return i; |
1443 | } | 1443 | } |
1444 | 1444 | ||
1445 | static void ablkcipher_walk_exit(struct ablkcipher_walk *w) | 1445 | static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w) |
1446 | { | 1446 | { |
1447 | int i; | 1447 | int i; |
1448 | 1448 | ||
@@ -1486,8 +1486,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, | |||
1486 | return idx; | 1486 | return idx; |
1487 | } | 1487 | } |
1488 | 1488 | ||
1489 | static int ablkcipher_walk(struct ablkcipher_request *req, | 1489 | static int hifn_cipher_walk(struct ablkcipher_request *req, |
1490 | struct ablkcipher_walk *w) | 1490 | struct hifn_cipher_walk *w) |
1491 | { | 1491 | { |
1492 | struct scatterlist *dst, *t; | 1492 | struct scatterlist *dst, *t; |
1493 | unsigned int nbytes = req->nbytes, offset, copy, diff; | 1493 | unsigned int nbytes = req->nbytes, offset, copy, diff; |
@@ -1600,12 +1600,12 @@ static int hifn_setup_session(struct ablkcipher_request *req) | |||
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { | 1602 | if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { |
1603 | err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); | 1603 | err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); |
1604 | if (err < 0) | 1604 | if (err < 0) |
1605 | return err; | 1605 | return err; |
1606 | } | 1606 | } |
1607 | 1607 | ||
1608 | sg_num = ablkcipher_walk(req, &rctx->walk); | 1608 | sg_num = hifn_cipher_walk(req, &rctx->walk); |
1609 | if (sg_num < 0) { | 1609 | if (sg_num < 0) { |
1610 | err = sg_num; | 1610 | err = sg_num; |
1611 | goto err_out_exit; | 1611 | goto err_out_exit; |
@@ -1806,7 +1806,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error) | |||
1806 | kunmap_atomic(saddr, KM_SOFTIRQ0); | 1806 | kunmap_atomic(saddr, KM_SOFTIRQ0); |
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | ablkcipher_walk_exit(&rctx->walk); | 1809 | hifn_cipher_walk_exit(&rctx->walk); |
1810 | } | 1810 | } |
1811 | 1811 | ||
1812 | req->base.complete(&req->base, error); | 1812 | req->base.complete(&req->base, error); |
@@ -2018,7 +2018,6 @@ static void hifn_flush(struct hifn_device *dev) | |||
2018 | { | 2018 | { |
2019 | unsigned long flags; | 2019 | unsigned long flags; |
2020 | struct crypto_async_request *async_req; | 2020 | struct crypto_async_request *async_req; |
2021 | struct hifn_context *ctx; | ||
2022 | struct ablkcipher_request *req; | 2021 | struct ablkcipher_request *req; |
2023 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | 2022 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; |
2024 | int i; | 2023 | int i; |
@@ -2035,7 +2034,6 @@ static void hifn_flush(struct hifn_device *dev) | |||
2035 | 2034 | ||
2036 | spin_lock_irqsave(&dev->lock, flags); | 2035 | spin_lock_irqsave(&dev->lock, flags); |
2037 | while ((async_req = crypto_dequeue_request(&dev->queue))) { | 2036 | while ((async_req = crypto_dequeue_request(&dev->queue))) { |
2038 | ctx = crypto_tfm_ctx(async_req->tfm); | ||
2039 | req = container_of(async_req, struct ablkcipher_request, base); | 2037 | req = container_of(async_req, struct ablkcipher_request, base); |
2040 | spin_unlock_irqrestore(&dev->lock, flags); | 2038 | spin_unlock_irqrestore(&dev->lock, flags); |
2041 | 2039 | ||
@@ -2139,7 +2137,6 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, | |||
2139 | static int hifn_process_queue(struct hifn_device *dev) | 2137 | static int hifn_process_queue(struct hifn_device *dev) |
2140 | { | 2138 | { |
2141 | struct crypto_async_request *async_req, *backlog; | 2139 | struct crypto_async_request *async_req, *backlog; |
2142 | struct hifn_context *ctx; | ||
2143 | struct ablkcipher_request *req; | 2140 | struct ablkcipher_request *req; |
2144 | unsigned long flags; | 2141 | unsigned long flags; |
2145 | int err = 0; | 2142 | int err = 0; |
@@ -2156,7 +2153,6 @@ static int hifn_process_queue(struct hifn_device *dev) | |||
2156 | if (backlog) | 2153 | if (backlog) |
2157 | backlog->complete(backlog, -EINPROGRESS); | 2154 | backlog->complete(backlog, -EINPROGRESS); |
2158 | 2155 | ||
2159 | ctx = crypto_tfm_ctx(async_req->tfm); | ||
2160 | req = container_of(async_req, struct ablkcipher_request, base); | 2156 | req = container_of(async_req, struct ablkcipher_request, base); |
2161 | 2157 | ||
2162 | err = hifn_handle_req(req); | 2158 | err = hifn_handle_req(req); |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 6f29012bcc43..7d279e578df5 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -15,8 +15,14 @@ | |||
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/scatterlist.h> | 16 | #include <linux/scatterlist.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <crypto/internal/hash.h> | ||
19 | #include <crypto/sha.h> | ||
18 | 20 | ||
19 | #include "mv_cesa.h" | 21 | #include "mv_cesa.h" |
22 | |||
23 | #define MV_CESA "MV-CESA:" | ||
24 | #define MAX_HW_HASH_SIZE 0xFFFF | ||
25 | |||
20 | /* | 26 | /* |
21 | * STM: | 27 | * STM: |
22 | * /---------------------------------------\ | 28 | * /---------------------------------------\ |
@@ -39,10 +45,12 @@ enum engine_status { | |||
39 | * @dst_sg_it: sg iterator for dst | 45 | * @dst_sg_it: sg iterator for dst |
40 | * @sg_src_left: bytes left in src to process (scatter list) | 46 | * @sg_src_left: bytes left in src to process (scatter list) |
41 | * @src_start: offset to add to src start position (scatter list) | 47 | * @src_start: offset to add to src start position (scatter list) |
42 | * @crypt_len: length of current crypt process | 48 | * @crypt_len: length of current hw crypt/hash process |
49 | * @hw_nbytes: total bytes to process in hw for this request | ||
50 | * @copy_back: whether to copy data back (crypt) or not (hash) | ||
43 | * @sg_dst_left: bytes left dst to process in this scatter list | 51 | * @sg_dst_left: bytes left dst to process in this scatter list |
44 | * @dst_start: offset to add to dst start position (scatter list) | 52 | * @dst_start: offset to add to dst start position (scatter list) |
45 | * @total_req_bytes: total number of bytes processed (request). | 53 | * @hw_processed_bytes: number of bytes processed by hw (request). |
46 | * | 54 | * |
47 | * sg helper are used to iterate over the scatterlist. Since the size of the | 55 | * sg helper are used to iterate over the scatterlist. Since the size of the |
48 | * SRAM may be less than the scatter size, this struct struct is used to keep | 56 | * SRAM may be less than the scatter size, this struct struct is used to keep |
@@ -51,15 +59,19 @@ enum engine_status { | |||
51 | struct req_progress { | 59 | struct req_progress { |
52 | struct sg_mapping_iter src_sg_it; | 60 | struct sg_mapping_iter src_sg_it; |
53 | struct sg_mapping_iter dst_sg_it; | 61 | struct sg_mapping_iter dst_sg_it; |
62 | void (*complete) (void); | ||
63 | void (*process) (int is_first); | ||
54 | 64 | ||
55 | /* src mostly */ | 65 | /* src mostly */ |
56 | int sg_src_left; | 66 | int sg_src_left; |
57 | int src_start; | 67 | int src_start; |
58 | int crypt_len; | 68 | int crypt_len; |
69 | int hw_nbytes; | ||
59 | /* dst mostly */ | 70 | /* dst mostly */ |
71 | int copy_back; | ||
60 | int sg_dst_left; | 72 | int sg_dst_left; |
61 | int dst_start; | 73 | int dst_start; |
62 | int total_req_bytes; | 74 | int hw_processed_bytes; |
63 | }; | 75 | }; |
64 | 76 | ||
65 | struct crypto_priv { | 77 | struct crypto_priv { |
@@ -72,10 +84,12 @@ struct crypto_priv { | |||
72 | spinlock_t lock; | 84 | spinlock_t lock; |
73 | struct crypto_queue queue; | 85 | struct crypto_queue queue; |
74 | enum engine_status eng_st; | 86 | enum engine_status eng_st; |
75 | struct ablkcipher_request *cur_req; | 87 | struct crypto_async_request *cur_req; |
76 | struct req_progress p; | 88 | struct req_progress p; |
77 | int max_req_size; | 89 | int max_req_size; |
78 | int sram_size; | 90 | int sram_size; |
91 | int has_sha1; | ||
92 | int has_hmac_sha1; | ||
79 | }; | 93 | }; |
80 | 94 | ||
81 | static struct crypto_priv *cpg; | 95 | static struct crypto_priv *cpg; |
@@ -97,6 +111,31 @@ struct mv_req_ctx { | |||
97 | int decrypt; | 111 | int decrypt; |
98 | }; | 112 | }; |
99 | 113 | ||
114 | enum hash_op { | ||
115 | COP_SHA1, | ||
116 | COP_HMAC_SHA1 | ||
117 | }; | ||
118 | |||
119 | struct mv_tfm_hash_ctx { | ||
120 | struct crypto_shash *fallback; | ||
121 | struct crypto_shash *base_hash; | ||
122 | u32 ivs[2 * SHA1_DIGEST_SIZE / 4]; | ||
123 | int count_add; | ||
124 | enum hash_op op; | ||
125 | }; | ||
126 | |||
127 | struct mv_req_hash_ctx { | ||
128 | u64 count; | ||
129 | u32 state[SHA1_DIGEST_SIZE / 4]; | ||
130 | u8 buffer[SHA1_BLOCK_SIZE]; | ||
131 | int first_hash; /* marks that we don't have previous state */ | ||
132 | int last_chunk; /* marks that this is the 'final' request */ | ||
133 | int extra_bytes; /* unprocessed bytes in buffer */ | ||
134 | enum hash_op op; | ||
135 | int count_add; | ||
136 | struct scatterlist dummysg; | ||
137 | }; | ||
138 | |||
100 | static void compute_aes_dec_key(struct mv_ctx *ctx) | 139 | static void compute_aes_dec_key(struct mv_ctx *ctx) |
101 | { | 140 | { |
102 | struct crypto_aes_ctx gen_aes_key; | 141 | struct crypto_aes_ctx gen_aes_key; |
@@ -144,32 +183,51 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, | |||
144 | return 0; | 183 | return 0; |
145 | } | 184 | } |
146 | 185 | ||
147 | static void setup_data_in(struct ablkcipher_request *req) | 186 | static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) |
148 | { | 187 | { |
149 | int ret; | 188 | int ret; |
150 | void *buf; | 189 | void *sbuf; |
190 | int copied = 0; | ||
151 | 191 | ||
152 | if (!cpg->p.sg_src_left) { | 192 | while (1) { |
153 | ret = sg_miter_next(&cpg->p.src_sg_it); | 193 | if (!p->sg_src_left) { |
154 | BUG_ON(!ret); | 194 | ret = sg_miter_next(&p->src_sg_it); |
155 | cpg->p.sg_src_left = cpg->p.src_sg_it.length; | 195 | BUG_ON(!ret); |
156 | cpg->p.src_start = 0; | 196 | p->sg_src_left = p->src_sg_it.length; |
157 | } | 197 | p->src_start = 0; |
158 | 198 | } | |
159 | cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size); | ||
160 | |||
161 | buf = cpg->p.src_sg_it.addr; | ||
162 | buf += cpg->p.src_start; | ||
163 | 199 | ||
164 | memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len); | 200 | sbuf = p->src_sg_it.addr + p->src_start; |
201 | |||
202 | if (p->sg_src_left <= len - copied) { | ||
203 | memcpy(dbuf + copied, sbuf, p->sg_src_left); | ||
204 | copied += p->sg_src_left; | ||
205 | p->sg_src_left = 0; | ||
206 | if (copied >= len) | ||
207 | break; | ||
208 | } else { | ||
209 | int copy_len = len - copied; | ||
210 | memcpy(dbuf + copied, sbuf, copy_len); | ||
211 | p->src_start += copy_len; | ||
212 | p->sg_src_left -= copy_len; | ||
213 | break; | ||
214 | } | ||
215 | } | ||
216 | } | ||
165 | 217 | ||
166 | cpg->p.sg_src_left -= cpg->p.crypt_len; | 218 | static void setup_data_in(void) |
167 | cpg->p.src_start += cpg->p.crypt_len; | 219 | { |
220 | struct req_progress *p = &cpg->p; | ||
221 | int data_in_sram = | ||
222 | min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); | ||
223 | copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, | ||
224 | data_in_sram - p->crypt_len); | ||
225 | p->crypt_len = data_in_sram; | ||
168 | } | 226 | } |
169 | 227 | ||
170 | static void mv_process_current_q(int first_block) | 228 | static void mv_process_current_q(int first_block) |
171 | { | 229 | { |
172 | struct ablkcipher_request *req = cpg->cur_req; | 230 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); |
173 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 231 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
174 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 232 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
175 | struct sec_accel_config op; | 233 | struct sec_accel_config op; |
@@ -179,6 +237,7 @@ static void mv_process_current_q(int first_block) | |||
179 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; | 237 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; |
180 | break; | 238 | break; |
181 | case COP_AES_CBC: | 239 | case COP_AES_CBC: |
240 | default: | ||
182 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; | 241 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; |
183 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | | 242 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | |
184 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); | 243 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); |
@@ -211,7 +270,7 @@ static void mv_process_current_q(int first_block) | |||
211 | ENC_P_DST(SRAM_DATA_OUT_START); | 270 | ENC_P_DST(SRAM_DATA_OUT_START); |
212 | op.enc_key_p = SRAM_DATA_KEY_P; | 271 | op.enc_key_p = SRAM_DATA_KEY_P; |
213 | 272 | ||
214 | setup_data_in(req); | 273 | setup_data_in(); |
215 | op.enc_len = cpg->p.crypt_len; | 274 | op.enc_len = cpg->p.crypt_len; |
216 | memcpy(cpg->sram + SRAM_CONFIG, &op, | 275 | memcpy(cpg->sram + SRAM_CONFIG, &op, |
217 | sizeof(struct sec_accel_config)); | 276 | sizeof(struct sec_accel_config)); |
@@ -228,91 +287,294 @@ static void mv_process_current_q(int first_block) | |||
228 | 287 | ||
229 | static void mv_crypto_algo_completion(void) | 288 | static void mv_crypto_algo_completion(void) |
230 | { | 289 | { |
231 | struct ablkcipher_request *req = cpg->cur_req; | 290 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); |
232 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 291 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
233 | 292 | ||
293 | sg_miter_stop(&cpg->p.src_sg_it); | ||
294 | sg_miter_stop(&cpg->p.dst_sg_it); | ||
295 | |||
234 | if (req_ctx->op != COP_AES_CBC) | 296 | if (req_ctx->op != COP_AES_CBC) |
235 | return ; | 297 | return ; |
236 | 298 | ||
237 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); | 299 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); |
238 | } | 300 | } |
239 | 301 | ||
302 | static void mv_process_hash_current(int first_block) | ||
303 | { | ||
304 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | ||
305 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | ||
306 | struct req_progress *p = &cpg->p; | ||
307 | struct sec_accel_config op = { 0 }; | ||
308 | int is_last; | ||
309 | |||
310 | switch (req_ctx->op) { | ||
311 | case COP_SHA1: | ||
312 | default: | ||
313 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; | ||
314 | break; | ||
315 | case COP_HMAC_SHA1: | ||
316 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; | ||
317 | break; | ||
318 | } | ||
319 | |||
320 | op.mac_src_p = | ||
321 | MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) | ||
322 | req_ctx-> | ||
323 | count); | ||
324 | |||
325 | setup_data_in(); | ||
326 | |||
327 | op.mac_digest = | ||
328 | MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); | ||
329 | op.mac_iv = | ||
330 | MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | | ||
331 | MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); | ||
332 | |||
333 | is_last = req_ctx->last_chunk | ||
334 | && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) | ||
335 | && (req_ctx->count <= MAX_HW_HASH_SIZE); | ||
336 | if (req_ctx->first_hash) { | ||
337 | if (is_last) | ||
338 | op.config |= CFG_NOT_FRAG; | ||
339 | else | ||
340 | op.config |= CFG_FIRST_FRAG; | ||
341 | |||
342 | req_ctx->first_hash = 0; | ||
343 | } else { | ||
344 | if (is_last) | ||
345 | op.config |= CFG_LAST_FRAG; | ||
346 | else | ||
347 | op.config |= CFG_MID_FRAG; | ||
348 | } | ||
349 | |||
350 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | ||
351 | |||
352 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
353 | /* GO */ | ||
354 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | ||
355 | |||
356 | /* | ||
357 | * XXX: add timer if the interrupt does not occur for some mystery | ||
358 | * reason | ||
359 | */ | ||
360 | } | ||
361 | |||
362 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, | ||
363 | struct shash_desc *desc) | ||
364 | { | ||
365 | int i; | ||
366 | struct sha1_state shash_state; | ||
367 | |||
368 | shash_state.count = ctx->count + ctx->count_add; | ||
369 | for (i = 0; i < 5; i++) | ||
370 | shash_state.state[i] = ctx->state[i]; | ||
371 | memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer)); | ||
372 | return crypto_shash_import(desc, &shash_state); | ||
373 | } | ||
374 | |||
375 | static int mv_hash_final_fallback(struct ahash_request *req) | ||
376 | { | ||
377 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
378 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | ||
379 | struct { | ||
380 | struct shash_desc shash; | ||
381 | char ctx[crypto_shash_descsize(tfm_ctx->fallback)]; | ||
382 | } desc; | ||
383 | int rc; | ||
384 | |||
385 | desc.shash.tfm = tfm_ctx->fallback; | ||
386 | desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
387 | if (unlikely(req_ctx->first_hash)) { | ||
388 | crypto_shash_init(&desc.shash); | ||
389 | crypto_shash_update(&desc.shash, req_ctx->buffer, | ||
390 | req_ctx->extra_bytes); | ||
391 | } else { | ||
392 | /* only SHA1 for now.... | ||
393 | */ | ||
394 | rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash); | ||
395 | if (rc) | ||
396 | goto out; | ||
397 | } | ||
398 | rc = crypto_shash_final(&desc.shash, req->result); | ||
399 | out: | ||
400 | return rc; | ||
401 | } | ||
402 | |||
403 | static void mv_hash_algo_completion(void) | ||
404 | { | ||
405 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | ||
406 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | ||
407 | |||
408 | if (ctx->extra_bytes) | ||
409 | copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); | ||
410 | sg_miter_stop(&cpg->p.src_sg_it); | ||
411 | |||
412 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | ||
413 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
414 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
415 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
416 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
417 | |||
418 | if (likely(ctx->last_chunk)) { | ||
419 | if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { | ||
420 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, | ||
421 | crypto_ahash_digestsize(crypto_ahash_reqtfm | ||
422 | (req))); | ||
423 | } else | ||
424 | mv_hash_final_fallback(req); | ||
425 | } | ||
426 | } | ||
427 | |||
240 | static void dequeue_complete_req(void) | 428 | static void dequeue_complete_req(void) |
241 | { | 429 | { |
242 | struct ablkcipher_request *req = cpg->cur_req; | 430 | struct crypto_async_request *req = cpg->cur_req; |
243 | void *buf; | 431 | void *buf; |
244 | int ret; | 432 | int ret; |
433 | cpg->p.hw_processed_bytes += cpg->p.crypt_len; | ||
434 | if (cpg->p.copy_back) { | ||
435 | int need_copy_len = cpg->p.crypt_len; | ||
436 | int sram_offset = 0; | ||
437 | do { | ||
438 | int dst_copy; | ||
439 | |||
440 | if (!cpg->p.sg_dst_left) { | ||
441 | ret = sg_miter_next(&cpg->p.dst_sg_it); | ||
442 | BUG_ON(!ret); | ||
443 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; | ||
444 | cpg->p.dst_start = 0; | ||
445 | } | ||
245 | 446 | ||
246 | cpg->p.total_req_bytes += cpg->p.crypt_len; | 447 | buf = cpg->p.dst_sg_it.addr; |
247 | do { | 448 | buf += cpg->p.dst_start; |
248 | int dst_copy; | ||
249 | |||
250 | if (!cpg->p.sg_dst_left) { | ||
251 | ret = sg_miter_next(&cpg->p.dst_sg_it); | ||
252 | BUG_ON(!ret); | ||
253 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; | ||
254 | cpg->p.dst_start = 0; | ||
255 | } | ||
256 | |||
257 | buf = cpg->p.dst_sg_it.addr; | ||
258 | buf += cpg->p.dst_start; | ||
259 | 449 | ||
260 | dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left); | 450 | dst_copy = min(need_copy_len, cpg->p.sg_dst_left); |
261 | 451 | ||
262 | memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy); | 452 | memcpy(buf, |
453 | cpg->sram + SRAM_DATA_OUT_START + sram_offset, | ||
454 | dst_copy); | ||
455 | sram_offset += dst_copy; | ||
456 | cpg->p.sg_dst_left -= dst_copy; | ||
457 | need_copy_len -= dst_copy; | ||
458 | cpg->p.dst_start += dst_copy; | ||
459 | } while (need_copy_len > 0); | ||
460 | } | ||
263 | 461 | ||
264 | cpg->p.sg_dst_left -= dst_copy; | 462 | cpg->p.crypt_len = 0; |
265 | cpg->p.crypt_len -= dst_copy; | ||
266 | cpg->p.dst_start += dst_copy; | ||
267 | } while (cpg->p.crypt_len > 0); | ||
268 | 463 | ||
269 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); | 464 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); |
270 | if (cpg->p.total_req_bytes < req->nbytes) { | 465 | if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { |
271 | /* process next scatter list entry */ | 466 | /* process next scatter list entry */ |
272 | cpg->eng_st = ENGINE_BUSY; | 467 | cpg->eng_st = ENGINE_BUSY; |
273 | mv_process_current_q(0); | 468 | cpg->p.process(0); |
274 | } else { | 469 | } else { |
275 | sg_miter_stop(&cpg->p.src_sg_it); | 470 | cpg->p.complete(); |
276 | sg_miter_stop(&cpg->p.dst_sg_it); | ||
277 | mv_crypto_algo_completion(); | ||
278 | cpg->eng_st = ENGINE_IDLE; | 471 | cpg->eng_st = ENGINE_IDLE; |
279 | req->base.complete(&req->base, 0); | 472 | local_bh_disable(); |
473 | req->complete(req, 0); | ||
474 | local_bh_enable(); | ||
280 | } | 475 | } |
281 | } | 476 | } |
282 | 477 | ||
283 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) | 478 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) |
284 | { | 479 | { |
285 | int i = 0; | 480 | int i = 0; |
286 | 481 | size_t cur_len; | |
287 | do { | 482 | |
288 | total_bytes -= sl[i].length; | 483 | while (1) { |
289 | i++; | 484 | cur_len = sl[i].length; |
290 | 485 | ++i; | |
291 | } while (total_bytes > 0); | 486 | if (total_bytes > cur_len) |
487 | total_bytes -= cur_len; | ||
488 | else | ||
489 | break; | ||
490 | } | ||
292 | 491 | ||
293 | return i; | 492 | return i; |
294 | } | 493 | } |
295 | 494 | ||
296 | static void mv_enqueue_new_req(struct ablkcipher_request *req) | 495 | static void mv_start_new_crypt_req(struct ablkcipher_request *req) |
297 | { | 496 | { |
497 | struct req_progress *p = &cpg->p; | ||
298 | int num_sgs; | 498 | int num_sgs; |
299 | 499 | ||
300 | cpg->cur_req = req; | 500 | cpg->cur_req = &req->base; |
301 | memset(&cpg->p, 0, sizeof(struct req_progress)); | 501 | memset(p, 0, sizeof(struct req_progress)); |
502 | p->hw_nbytes = req->nbytes; | ||
503 | p->complete = mv_crypto_algo_completion; | ||
504 | p->process = mv_process_current_q; | ||
505 | p->copy_back = 1; | ||
302 | 506 | ||
303 | num_sgs = count_sgs(req->src, req->nbytes); | 507 | num_sgs = count_sgs(req->src, req->nbytes); |
304 | sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | 508 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); |
305 | 509 | ||
306 | num_sgs = count_sgs(req->dst, req->nbytes); | 510 | num_sgs = count_sgs(req->dst, req->nbytes); |
307 | sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); | 511 | sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); |
512 | |||
308 | mv_process_current_q(1); | 513 | mv_process_current_q(1); |
309 | } | 514 | } |
310 | 515 | ||
516 | static void mv_start_new_hash_req(struct ahash_request *req) | ||
517 | { | ||
518 | struct req_progress *p = &cpg->p; | ||
519 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | ||
520 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
521 | int num_sgs, hw_bytes, old_extra_bytes, rc; | ||
522 | cpg->cur_req = &req->base; | ||
523 | memset(p, 0, sizeof(struct req_progress)); | ||
524 | hw_bytes = req->nbytes + ctx->extra_bytes; | ||
525 | old_extra_bytes = ctx->extra_bytes; | ||
526 | |||
527 | if (unlikely(ctx->extra_bytes)) { | ||
528 | memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, | ||
529 | ctx->extra_bytes); | ||
530 | p->crypt_len = ctx->extra_bytes; | ||
531 | } | ||
532 | |||
533 | memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); | ||
534 | |||
535 | if (unlikely(!ctx->first_hash)) { | ||
536 | writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | ||
537 | writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | ||
538 | writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | ||
539 | writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | ||
540 | writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | ||
541 | } | ||
542 | |||
543 | ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; | ||
544 | if (ctx->extra_bytes != 0 | ||
545 | && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) | ||
546 | hw_bytes -= ctx->extra_bytes; | ||
547 | else | ||
548 | ctx->extra_bytes = 0; | ||
549 | |||
550 | num_sgs = count_sgs(req->src, req->nbytes); | ||
551 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | ||
552 | |||
553 | if (hw_bytes) { | ||
554 | p->hw_nbytes = hw_bytes; | ||
555 | p->complete = mv_hash_algo_completion; | ||
556 | p->process = mv_process_hash_current; | ||
557 | |||
558 | mv_process_hash_current(1); | ||
559 | } else { | ||
560 | copy_src_to_buf(p, ctx->buffer + old_extra_bytes, | ||
561 | ctx->extra_bytes - old_extra_bytes); | ||
562 | sg_miter_stop(&p->src_sg_it); | ||
563 | if (ctx->last_chunk) | ||
564 | rc = mv_hash_final_fallback(req); | ||
565 | else | ||
566 | rc = 0; | ||
567 | cpg->eng_st = ENGINE_IDLE; | ||
568 | local_bh_disable(); | ||
569 | req->base.complete(&req->base, rc); | ||
570 | local_bh_enable(); | ||
571 | } | ||
572 | } | ||
573 | |||
311 | static int queue_manag(void *data) | 574 | static int queue_manag(void *data) |
312 | { | 575 | { |
313 | cpg->eng_st = ENGINE_IDLE; | 576 | cpg->eng_st = ENGINE_IDLE; |
314 | do { | 577 | do { |
315 | struct ablkcipher_request *req; | ||
316 | struct crypto_async_request *async_req = NULL; | 578 | struct crypto_async_request *async_req = NULL; |
317 | struct crypto_async_request *backlog; | 579 | struct crypto_async_request *backlog; |
318 | 580 | ||
@@ -338,9 +600,18 @@ static int queue_manag(void *data) | |||
338 | } | 600 | } |
339 | 601 | ||
340 | if (async_req) { | 602 | if (async_req) { |
341 | req = container_of(async_req, | 603 | if (async_req->tfm->__crt_alg->cra_type != |
342 | struct ablkcipher_request, base); | 604 | &crypto_ahash_type) { |
343 | mv_enqueue_new_req(req); | 605 | struct ablkcipher_request *req = |
606 | container_of(async_req, | ||
607 | struct ablkcipher_request, | ||
608 | base); | ||
609 | mv_start_new_crypt_req(req); | ||
610 | } else { | ||
611 | struct ahash_request *req = | ||
612 | ahash_request_cast(async_req); | ||
613 | mv_start_new_hash_req(req); | ||
614 | } | ||
344 | async_req = NULL; | 615 | async_req = NULL; |
345 | } | 616 | } |
346 | 617 | ||
@@ -350,13 +621,13 @@ static int queue_manag(void *data) | |||
350 | return 0; | 621 | return 0; |
351 | } | 622 | } |
352 | 623 | ||
353 | static int mv_handle_req(struct ablkcipher_request *req) | 624 | static int mv_handle_req(struct crypto_async_request *req) |
354 | { | 625 | { |
355 | unsigned long flags; | 626 | unsigned long flags; |
356 | int ret; | 627 | int ret; |
357 | 628 | ||
358 | spin_lock_irqsave(&cpg->lock, flags); | 629 | spin_lock_irqsave(&cpg->lock, flags); |
359 | ret = ablkcipher_enqueue_request(&cpg->queue, req); | 630 | ret = crypto_enqueue_request(&cpg->queue, req); |
360 | spin_unlock_irqrestore(&cpg->lock, flags); | 631 | spin_unlock_irqrestore(&cpg->lock, flags); |
361 | wake_up_process(cpg->queue_th); | 632 | wake_up_process(cpg->queue_th); |
362 | return ret; | 633 | return ret; |
@@ -369,7 +640,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req) | |||
369 | req_ctx->op = COP_AES_ECB; | 640 | req_ctx->op = COP_AES_ECB; |
370 | req_ctx->decrypt = 0; | 641 | req_ctx->decrypt = 0; |
371 | 642 | ||
372 | return mv_handle_req(req); | 643 | return mv_handle_req(&req->base); |
373 | } | 644 | } |
374 | 645 | ||
375 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) | 646 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) |
@@ -381,7 +652,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req) | |||
381 | req_ctx->decrypt = 1; | 652 | req_ctx->decrypt = 1; |
382 | 653 | ||
383 | compute_aes_dec_key(ctx); | 654 | compute_aes_dec_key(ctx); |
384 | return mv_handle_req(req); | 655 | return mv_handle_req(&req->base); |
385 | } | 656 | } |
386 | 657 | ||
387 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) | 658 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) |
@@ -391,7 +662,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req) | |||
391 | req_ctx->op = COP_AES_CBC; | 662 | req_ctx->op = COP_AES_CBC; |
392 | req_ctx->decrypt = 0; | 663 | req_ctx->decrypt = 0; |
393 | 664 | ||
394 | return mv_handle_req(req); | 665 | return mv_handle_req(&req->base); |
395 | } | 666 | } |
396 | 667 | ||
397 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) | 668 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) |
@@ -403,7 +674,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req) | |||
403 | req_ctx->decrypt = 1; | 674 | req_ctx->decrypt = 1; |
404 | 675 | ||
405 | compute_aes_dec_key(ctx); | 676 | compute_aes_dec_key(ctx); |
406 | return mv_handle_req(req); | 677 | return mv_handle_req(&req->base); |
407 | } | 678 | } |
408 | 679 | ||
409 | static int mv_cra_init(struct crypto_tfm *tfm) | 680 | static int mv_cra_init(struct crypto_tfm *tfm) |
@@ -412,6 +683,215 @@ static int mv_cra_init(struct crypto_tfm *tfm) | |||
412 | return 0; | 683 | return 0; |
413 | } | 684 | } |
414 | 685 | ||
686 | static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op, | ||
687 | int is_last, unsigned int req_len, | ||
688 | int count_add) | ||
689 | { | ||
690 | memset(ctx, 0, sizeof(*ctx)); | ||
691 | ctx->op = op; | ||
692 | ctx->count = req_len; | ||
693 | ctx->first_hash = 1; | ||
694 | ctx->last_chunk = is_last; | ||
695 | ctx->count_add = count_add; | ||
696 | } | ||
697 | |||
698 | static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last, | ||
699 | unsigned req_len) | ||
700 | { | ||
701 | ctx->last_chunk = is_last; | ||
702 | ctx->count += req_len; | ||
703 | } | ||
704 | |||
705 | static int mv_hash_init(struct ahash_request *req) | ||
706 | { | ||
707 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
708 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, | ||
709 | tfm_ctx->count_add); | ||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | static int mv_hash_update(struct ahash_request *req) | ||
714 | { | ||
715 | if (!req->nbytes) | ||
716 | return 0; | ||
717 | |||
718 | mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); | ||
719 | return mv_handle_req(&req->base); | ||
720 | } | ||
721 | |||
722 | static int mv_hash_final(struct ahash_request *req) | ||
723 | { | ||
724 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | ||
725 | /* dummy buffer of 4 bytes */ | ||
726 | sg_init_one(&ctx->dummysg, ctx->buffer, 4); | ||
727 | /* I think I'm allowed to do that... */ | ||
728 | ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0); | ||
729 | mv_update_hash_req_ctx(ctx, 1, 0); | ||
730 | return mv_handle_req(&req->base); | ||
731 | } | ||
732 | |||
733 | static int mv_hash_finup(struct ahash_request *req) | ||
734 | { | ||
735 | if (!req->nbytes) | ||
736 | return mv_hash_final(req); | ||
737 | |||
738 | mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); | ||
739 | return mv_handle_req(&req->base); | ||
740 | } | ||
741 | |||
742 | static int mv_hash_digest(struct ahash_request *req) | ||
743 | { | ||
744 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
745 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, | ||
746 | req->nbytes, tfm_ctx->count_add); | ||
747 | return mv_handle_req(&req->base); | ||
748 | } | ||
749 | |||
750 | static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate, | ||
751 | const void *ostate) | ||
752 | { | ||
753 | const struct sha1_state *isha1_state = istate, *osha1_state = ostate; | ||
754 | int i; | ||
755 | for (i = 0; i < 5; i++) { | ||
756 | ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]); | ||
757 | ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]); | ||
758 | } | ||
759 | } | ||
760 | |||
761 | static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, | ||
762 | unsigned int keylen) | ||
763 | { | ||
764 | int rc; | ||
765 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base); | ||
766 | int bs, ds, ss; | ||
767 | |||
768 | if (!ctx->base_hash) | ||
769 | return 0; | ||
770 | |||
771 | rc = crypto_shash_setkey(ctx->fallback, key, keylen); | ||
772 | if (rc) | ||
773 | return rc; | ||
774 | |||
775 | /* Can't see a way to extract the ipad/opad from the fallback tfm | ||
776 | so I'm basically copying code from the hmac module */ | ||
777 | bs = crypto_shash_blocksize(ctx->base_hash); | ||
778 | ds = crypto_shash_digestsize(ctx->base_hash); | ||
779 | ss = crypto_shash_statesize(ctx->base_hash); | ||
780 | |||
781 | { | ||
782 | struct { | ||
783 | struct shash_desc shash; | ||
784 | char ctx[crypto_shash_descsize(ctx->base_hash)]; | ||
785 | } desc; | ||
786 | unsigned int i; | ||
787 | char ipad[ss]; | ||
788 | char opad[ss]; | ||
789 | |||
790 | desc.shash.tfm = ctx->base_hash; | ||
791 | desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) & | ||
792 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
793 | |||
794 | if (keylen > bs) { | ||
795 | int err; | ||
796 | |||
797 | err = | ||
798 | crypto_shash_digest(&desc.shash, key, keylen, ipad); | ||
799 | if (err) | ||
800 | return err; | ||
801 | |||
802 | keylen = ds; | ||
803 | } else | ||
804 | memcpy(ipad, key, keylen); | ||
805 | |||
806 | memset(ipad + keylen, 0, bs - keylen); | ||
807 | memcpy(opad, ipad, bs); | ||
808 | |||
809 | for (i = 0; i < bs; i++) { | ||
810 | ipad[i] ^= 0x36; | ||
811 | opad[i] ^= 0x5c; | ||
812 | } | ||
813 | |||
814 | rc = crypto_shash_init(&desc.shash) ? : | ||
815 | crypto_shash_update(&desc.shash, ipad, bs) ? : | ||
816 | crypto_shash_export(&desc.shash, ipad) ? : | ||
817 | crypto_shash_init(&desc.shash) ? : | ||
818 | crypto_shash_update(&desc.shash, opad, bs) ? : | ||
819 | crypto_shash_export(&desc.shash, opad); | ||
820 | |||
821 | if (rc == 0) | ||
822 | mv_hash_init_ivs(ctx, ipad, opad); | ||
823 | |||
824 | return rc; | ||
825 | } | ||
826 | } | ||
827 | |||
828 | static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, | ||
829 | enum hash_op op, int count_add) | ||
830 | { | ||
831 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | ||
832 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
833 | struct crypto_shash *fallback_tfm = NULL; | ||
834 | struct crypto_shash *base_hash = NULL; | ||
835 | int err = -ENOMEM; | ||
836 | |||
837 | ctx->op = op; | ||
838 | ctx->count_add = count_add; | ||
839 | |||
840 | /* Allocate a fallback and abort if it failed. */ | ||
841 | fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, | ||
842 | CRYPTO_ALG_NEED_FALLBACK); | ||
843 | if (IS_ERR(fallback_tfm)) { | ||
844 | printk(KERN_WARNING MV_CESA | ||
845 | "Fallback driver '%s' could not be loaded!\n", | ||
846 | fallback_driver_name); | ||
847 | err = PTR_ERR(fallback_tfm); | ||
848 | goto out; | ||
849 | } | ||
850 | ctx->fallback = fallback_tfm; | ||
851 | |||
852 | if (base_hash_name) { | ||
853 | /* Allocate a hash to compute the ipad/opad of hmac. */ | ||
854 | base_hash = crypto_alloc_shash(base_hash_name, 0, | ||
855 | CRYPTO_ALG_NEED_FALLBACK); | ||
856 | if (IS_ERR(base_hash)) { | ||
857 | printk(KERN_WARNING MV_CESA | ||
858 | "Base driver '%s' could not be loaded!\n", | ||
859 | base_hash_name); | ||
860 | err = PTR_ERR(fallback_tfm); | ||
861 | goto err_bad_base; | ||
862 | } | ||
863 | } | ||
864 | ctx->base_hash = base_hash; | ||
865 | |||
866 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
867 | sizeof(struct mv_req_hash_ctx) + | ||
868 | crypto_shash_descsize(ctx->fallback)); | ||
869 | return 0; | ||
870 | err_bad_base: | ||
871 | crypto_free_shash(fallback_tfm); | ||
872 | out: | ||
873 | return err; | ||
874 | } | ||
875 | |||
876 | static void mv_cra_hash_exit(struct crypto_tfm *tfm) | ||
877 | { | ||
878 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
879 | |||
880 | crypto_free_shash(ctx->fallback); | ||
881 | if (ctx->base_hash) | ||
882 | crypto_free_shash(ctx->base_hash); | ||
883 | } | ||
884 | |||
885 | static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm) | ||
886 | { | ||
887 | return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0); | ||
888 | } | ||
889 | |||
890 | static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) | ||
891 | { | ||
892 | return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); | ||
893 | } | ||
894 | |||
415 | irqreturn_t crypto_int(int irq, void *priv) | 895 | irqreturn_t crypto_int(int irq, void *priv) |
416 | { | 896 | { |
417 | u32 val; | 897 | u32 val; |
@@ -474,6 +954,53 @@ struct crypto_alg mv_aes_alg_cbc = { | |||
474 | }, | 954 | }, |
475 | }; | 955 | }; |
476 | 956 | ||
957 | struct ahash_alg mv_sha1_alg = { | ||
958 | .init = mv_hash_init, | ||
959 | .update = mv_hash_update, | ||
960 | .final = mv_hash_final, | ||
961 | .finup = mv_hash_finup, | ||
962 | .digest = mv_hash_digest, | ||
963 | .halg = { | ||
964 | .digestsize = SHA1_DIGEST_SIZE, | ||
965 | .base = { | ||
966 | .cra_name = "sha1", | ||
967 | .cra_driver_name = "mv-sha1", | ||
968 | .cra_priority = 300, | ||
969 | .cra_flags = | ||
970 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | ||
971 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
972 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), | ||
973 | .cra_init = mv_cra_hash_sha1_init, | ||
974 | .cra_exit = mv_cra_hash_exit, | ||
975 | .cra_module = THIS_MODULE, | ||
976 | } | ||
977 | } | ||
978 | }; | ||
979 | |||
980 | struct ahash_alg mv_hmac_sha1_alg = { | ||
981 | .init = mv_hash_init, | ||
982 | .update = mv_hash_update, | ||
983 | .final = mv_hash_final, | ||
984 | .finup = mv_hash_finup, | ||
985 | .digest = mv_hash_digest, | ||
986 | .setkey = mv_hash_setkey, | ||
987 | .halg = { | ||
988 | .digestsize = SHA1_DIGEST_SIZE, | ||
989 | .base = { | ||
990 | .cra_name = "hmac(sha1)", | ||
991 | .cra_driver_name = "mv-hmac-sha1", | ||
992 | .cra_priority = 300, | ||
993 | .cra_flags = | ||
994 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | ||
995 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
996 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), | ||
997 | .cra_init = mv_cra_hash_hmac_sha1_init, | ||
998 | .cra_exit = mv_cra_hash_exit, | ||
999 | .cra_module = THIS_MODULE, | ||
1000 | } | ||
1001 | } | ||
1002 | }; | ||
1003 | |||
477 | static int mv_probe(struct platform_device *pdev) | 1004 | static int mv_probe(struct platform_device *pdev) |
478 | { | 1005 | { |
479 | struct crypto_priv *cp; | 1006 | struct crypto_priv *cp; |
@@ -482,7 +1009,7 @@ static int mv_probe(struct platform_device *pdev) | |||
482 | int ret; | 1009 | int ret; |
483 | 1010 | ||
484 | if (cpg) { | 1011 | if (cpg) { |
485 | printk(KERN_ERR "Second crypto dev?\n"); | 1012 | printk(KERN_ERR MV_CESA "Second crypto dev?\n"); |
486 | return -EEXIST; | 1013 | return -EEXIST; |
487 | } | 1014 | } |
488 | 1015 | ||
@@ -496,7 +1023,7 @@ static int mv_probe(struct platform_device *pdev) | |||
496 | 1023 | ||
497 | spin_lock_init(&cp->lock); | 1024 | spin_lock_init(&cp->lock); |
498 | crypto_init_queue(&cp->queue, 50); | 1025 | crypto_init_queue(&cp->queue, 50); |
499 | cp->reg = ioremap(res->start, res->end - res->start + 1); | 1026 | cp->reg = ioremap(res->start, resource_size(res)); |
500 | if (!cp->reg) { | 1027 | if (!cp->reg) { |
501 | ret = -ENOMEM; | 1028 | ret = -ENOMEM; |
502 | goto err; | 1029 | goto err; |
@@ -507,7 +1034,7 @@ static int mv_probe(struct platform_device *pdev) | |||
507 | ret = -ENXIO; | 1034 | ret = -ENXIO; |
508 | goto err_unmap_reg; | 1035 | goto err_unmap_reg; |
509 | } | 1036 | } |
510 | cp->sram_size = res->end - res->start + 1; | 1037 | cp->sram_size = resource_size(res); |
511 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; | 1038 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; |
512 | cp->sram = ioremap(res->start, cp->sram_size); | 1039 | cp->sram = ioremap(res->start, cp->sram_size); |
513 | if (!cp->sram) { | 1040 | if (!cp->sram) { |
@@ -528,30 +1055,45 @@ static int mv_probe(struct platform_device *pdev) | |||
528 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); | 1055 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); |
529 | if (IS_ERR(cp->queue_th)) { | 1056 | if (IS_ERR(cp->queue_th)) { |
530 | ret = PTR_ERR(cp->queue_th); | 1057 | ret = PTR_ERR(cp->queue_th); |
531 | goto err_thread; | 1058 | goto err_unmap_sram; |
532 | } | 1059 | } |
533 | 1060 | ||
534 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), | 1061 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), |
535 | cp); | 1062 | cp); |
536 | if (ret) | 1063 | if (ret) |
537 | goto err_unmap_sram; | 1064 | goto err_thread; |
538 | 1065 | ||
539 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | 1066 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); |
540 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | 1067 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); |
541 | 1068 | ||
542 | ret = crypto_register_alg(&mv_aes_alg_ecb); | 1069 | ret = crypto_register_alg(&mv_aes_alg_ecb); |
543 | if (ret) | 1070 | if (ret) |
544 | goto err_reg; | 1071 | goto err_irq; |
545 | 1072 | ||
546 | ret = crypto_register_alg(&mv_aes_alg_cbc); | 1073 | ret = crypto_register_alg(&mv_aes_alg_cbc); |
547 | if (ret) | 1074 | if (ret) |
548 | goto err_unreg_ecb; | 1075 | goto err_unreg_ecb; |
1076 | |||
1077 | ret = crypto_register_ahash(&mv_sha1_alg); | ||
1078 | if (ret == 0) | ||
1079 | cpg->has_sha1 = 1; | ||
1080 | else | ||
1081 | printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n"); | ||
1082 | |||
1083 | ret = crypto_register_ahash(&mv_hmac_sha1_alg); | ||
1084 | if (ret == 0) { | ||
1085 | cpg->has_hmac_sha1 = 1; | ||
1086 | } else { | ||
1087 | printk(KERN_WARNING MV_CESA | ||
1088 | "Could not register hmac-sha1 driver\n"); | ||
1089 | } | ||
1090 | |||
549 | return 0; | 1091 | return 0; |
550 | err_unreg_ecb: | 1092 | err_unreg_ecb: |
551 | crypto_unregister_alg(&mv_aes_alg_ecb); | 1093 | crypto_unregister_alg(&mv_aes_alg_ecb); |
552 | err_thread: | 1094 | err_irq: |
553 | free_irq(irq, cp); | 1095 | free_irq(irq, cp); |
554 | err_reg: | 1096 | err_thread: |
555 | kthread_stop(cp->queue_th); | 1097 | kthread_stop(cp->queue_th); |
556 | err_unmap_sram: | 1098 | err_unmap_sram: |
557 | iounmap(cp->sram); | 1099 | iounmap(cp->sram); |
@@ -570,6 +1112,10 @@ static int mv_remove(struct platform_device *pdev) | |||
570 | 1112 | ||
571 | crypto_unregister_alg(&mv_aes_alg_ecb); | 1113 | crypto_unregister_alg(&mv_aes_alg_ecb); |
572 | crypto_unregister_alg(&mv_aes_alg_cbc); | 1114 | crypto_unregister_alg(&mv_aes_alg_cbc); |
1115 | if (cp->has_sha1) | ||
1116 | crypto_unregister_ahash(&mv_sha1_alg); | ||
1117 | if (cp->has_hmac_sha1) | ||
1118 | crypto_unregister_ahash(&mv_hmac_sha1_alg); | ||
573 | kthread_stop(cp->queue_th); | 1119 | kthread_stop(cp->queue_th); |
574 | free_irq(cp->irq, cp); | 1120 | free_irq(cp->irq, cp); |
575 | memset(cp->sram, 0, cp->sram_size); | 1121 | memset(cp->sram, 0, cp->sram_size); |
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h index c3e25d3bb171..08fcb1116d90 100644 --- a/drivers/crypto/mv_cesa.h +++ b/drivers/crypto/mv_cesa.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef __MV_CRYPTO_H__ | 1 | #ifndef __MV_CRYPTO_H__ |
2 | 2 | ||
3 | #define DIGEST_INITIAL_VAL_A 0xdd00 | 3 | #define DIGEST_INITIAL_VAL_A 0xdd00 |
4 | #define DIGEST_INITIAL_VAL_B 0xdd04 | ||
5 | #define DIGEST_INITIAL_VAL_C 0xdd08 | ||
6 | #define DIGEST_INITIAL_VAL_D 0xdd0c | ||
7 | #define DIGEST_INITIAL_VAL_E 0xdd10 | ||
4 | #define DES_CMD_REG 0xdd58 | 8 | #define DES_CMD_REG 0xdd58 |
5 | 9 | ||
6 | #define SEC_ACCEL_CMD 0xde00 | 10 | #define SEC_ACCEL_CMD 0xde00 |
@@ -70,6 +74,10 @@ struct sec_accel_config { | |||
70 | #define CFG_AES_LEN_128 (0 << 24) | 74 | #define CFG_AES_LEN_128 (0 << 24) |
71 | #define CFG_AES_LEN_192 (1 << 24) | 75 | #define CFG_AES_LEN_192 (1 << 24) |
72 | #define CFG_AES_LEN_256 (2 << 24) | 76 | #define CFG_AES_LEN_256 (2 << 24) |
77 | #define CFG_NOT_FRAG (0 << 30) | ||
78 | #define CFG_FIRST_FRAG (1 << 30) | ||
79 | #define CFG_LAST_FRAG (2 << 30) | ||
80 | #define CFG_MID_FRAG (3 << 30) | ||
73 | 81 | ||
74 | u32 enc_p; | 82 | u32 enc_p; |
75 | #define ENC_P_SRC(x) (x) | 83 | #define ENC_P_SRC(x) (x) |
@@ -90,7 +98,11 @@ struct sec_accel_config { | |||
90 | #define MAC_SRC_TOTAL_LEN(x) ((x) << 16) | 98 | #define MAC_SRC_TOTAL_LEN(x) ((x) << 16) |
91 | 99 | ||
92 | u32 mac_digest; | 100 | u32 mac_digest; |
101 | #define MAC_DIGEST_P(x) (x) | ||
102 | #define MAC_FRAG_LEN(x) ((x) << 16) | ||
93 | u32 mac_iv; | 103 | u32 mac_iv; |
104 | #define MAC_INNER_IV_P(x) (x) | ||
105 | #define MAC_OUTER_IV_P(x) ((x) << 16) | ||
94 | }__attribute__ ((packed)); | 106 | }__attribute__ ((packed)); |
95 | /* | 107 | /* |
96 | * /-----------\ 0 | 108 | * /-----------\ 0 |
@@ -101,19 +113,37 @@ struct sec_accel_config { | |||
101 | * | IV IN | 4 * 4 | 113 | * | IV IN | 4 * 4 |
102 | * |-----------| 0x40 (inplace) | 114 | * |-----------| 0x40 (inplace) |
103 | * | IV BUF | 4 * 4 | 115 | * | IV BUF | 4 * 4 |
104 | * |-----------| 0x50 | 116 | * |-----------| 0x80 |
105 | * | DATA IN | 16 * x (max ->max_req_size) | 117 | * | DATA IN | 16 * x (max ->max_req_size) |
106 | * |-----------| 0x50 (inplace operation) | 118 | * |-----------| 0x80 (inplace operation) |
107 | * | DATA OUT | 16 * x (max ->max_req_size) | 119 | * | DATA OUT | 16 * x (max ->max_req_size) |
108 | * \-----------/ SRAM size | 120 | * \-----------/ SRAM size |
109 | */ | 121 | */ |
122 | |||
123 | /* Hashing memory map: | ||
124 | * /-----------\ 0 | ||
125 | * | ACCEL CFG | 4 * 8 | ||
126 | * |-----------| 0x20 | ||
127 | * | Inner IV | 5 * 4 | ||
128 | * |-----------| 0x34 | ||
129 | * | Outer IV | 5 * 4 | ||
130 | * |-----------| 0x48 | ||
131 | * | Output BUF| 5 * 4 | ||
132 | * |-----------| 0x80 | ||
133 | * | DATA IN | 64 * x (max ->max_req_size) | ||
134 | * \-----------/ SRAM size | ||
135 | */ | ||
110 | #define SRAM_CONFIG 0x00 | 136 | #define SRAM_CONFIG 0x00 |
111 | #define SRAM_DATA_KEY_P 0x20 | 137 | #define SRAM_DATA_KEY_P 0x20 |
112 | #define SRAM_DATA_IV 0x40 | 138 | #define SRAM_DATA_IV 0x40 |
113 | #define SRAM_DATA_IV_BUF 0x40 | 139 | #define SRAM_DATA_IV_BUF 0x40 |
114 | #define SRAM_DATA_IN_START 0x50 | 140 | #define SRAM_DATA_IN_START 0x80 |
115 | #define SRAM_DATA_OUT_START 0x50 | 141 | #define SRAM_DATA_OUT_START 0x80 |
142 | |||
143 | #define SRAM_HMAC_IV_IN 0x20 | ||
144 | #define SRAM_HMAC_IV_OUT 0x34 | ||
145 | #define SRAM_DIGEST_BUF 0x48 | ||
116 | 146 | ||
117 | #define SRAM_CFG_SPACE 0x50 | 147 | #define SRAM_CFG_SPACE 0x80 |
118 | 148 | ||
119 | #endif | 149 | #endif |
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S new file mode 100644 index 000000000000..f7c793745a1e --- /dev/null +++ b/drivers/crypto/n2_asm.S | |||
@@ -0,0 +1,95 @@ | |||
1 | /* n2_asm.S: Hypervisor calls for NCS support. | ||
2 | * | ||
3 | * Copyright (C) 2009 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/linkage.h> | ||
7 | #include <asm/hypervisor.h> | ||
8 | #include "n2_core.h" | ||
9 | |||
10 | /* o0: queue type | ||
11 | * o1: RA of queue | ||
12 | * o2: num entries in queue | ||
13 | * o3: address of queue handle return | ||
14 | */ | ||
15 | ENTRY(sun4v_ncs_qconf) | ||
16 | mov HV_FAST_NCS_QCONF, %o5 | ||
17 | ta HV_FAST_TRAP | ||
18 | stx %o1, [%o3] | ||
19 | retl | ||
20 | nop | ||
21 | ENDPROC(sun4v_ncs_qconf) | ||
22 | |||
23 | /* %o0: queue handle | ||
24 | * %o1: address of queue type return | ||
25 | * %o2: address of queue base address return | ||
26 | * %o3: address of queue num entries return | ||
27 | */ | ||
28 | ENTRY(sun4v_ncs_qinfo) | ||
29 | mov %o1, %g1 | ||
30 | mov %o2, %g2 | ||
31 | mov %o3, %g3 | ||
32 | mov HV_FAST_NCS_QINFO, %o5 | ||
33 | ta HV_FAST_TRAP | ||
34 | stx %o1, [%g1] | ||
35 | stx %o2, [%g2] | ||
36 | stx %o3, [%g3] | ||
37 | retl | ||
38 | nop | ||
39 | ENDPROC(sun4v_ncs_qinfo) | ||
40 | |||
41 | /* %o0: queue handle | ||
42 | * %o1: address of head offset return | ||
43 | */ | ||
44 | ENTRY(sun4v_ncs_gethead) | ||
45 | mov %o1, %o2 | ||
46 | mov HV_FAST_NCS_GETHEAD, %o5 | ||
47 | ta HV_FAST_TRAP | ||
48 | stx %o1, [%o2] | ||
49 | retl | ||
50 | nop | ||
51 | ENDPROC(sun4v_ncs_gethead) | ||
52 | |||
53 | /* %o0: queue handle | ||
54 | * %o1: address of tail offset return | ||
55 | */ | ||
56 | ENTRY(sun4v_ncs_gettail) | ||
57 | mov %o1, %o2 | ||
58 | mov HV_FAST_NCS_GETTAIL, %o5 | ||
59 | ta HV_FAST_TRAP | ||
60 | stx %o1, [%o2] | ||
61 | retl | ||
62 | nop | ||
63 | ENDPROC(sun4v_ncs_gettail) | ||
64 | |||
65 | /* %o0: queue handle | ||
66 | * %o1: new tail offset | ||
67 | */ | ||
68 | ENTRY(sun4v_ncs_settail) | ||
69 | mov HV_FAST_NCS_SETTAIL, %o5 | ||
70 | ta HV_FAST_TRAP | ||
71 | retl | ||
72 | nop | ||
73 | ENDPROC(sun4v_ncs_settail) | ||
74 | |||
75 | /* %o0: queue handle | ||
76 | * %o1: address of devino return | ||
77 | */ | ||
78 | ENTRY(sun4v_ncs_qhandle_to_devino) | ||
79 | mov %o1, %o2 | ||
80 | mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5 | ||
81 | ta HV_FAST_TRAP | ||
82 | stx %o1, [%o2] | ||
83 | retl | ||
84 | nop | ||
85 | ENDPROC(sun4v_ncs_qhandle_to_devino) | ||
86 | |||
87 | /* %o0: queue handle | ||
88 | * %o1: new head offset | ||
89 | */ | ||
90 | ENTRY(sun4v_ncs_sethead_marker) | ||
91 | mov HV_FAST_NCS_SETHEAD_MARKER, %o5 | ||
92 | ta HV_FAST_TRAP | ||
93 | retl | ||
94 | nop | ||
95 | ENDPROC(sun4v_ncs_sethead_marker) | ||
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c new file mode 100644 index 000000000000..26af2dd5d831 --- /dev/null +++ b/drivers/crypto/n2_core.c | |||
@@ -0,0 +1,2267 @@ | |||
1 | /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. | ||
2 | * | ||
3 | * Copyright (C) 2010 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/of.h> | ||
11 | #include <linux/of_device.h> | ||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/crypto.h> | ||
16 | #include <crypto/md5.h> | ||
17 | #include <crypto/sha.h> | ||
18 | #include <crypto/aes.h> | ||
19 | #include <crypto/des.h> | ||
20 | #include <linux/mutex.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/sched.h> | ||
23 | |||
24 | #include <crypto/internal/hash.h> | ||
25 | #include <crypto/scatterwalk.h> | ||
26 | #include <crypto/algapi.h> | ||
27 | |||
28 | #include <asm/hypervisor.h> | ||
29 | #include <asm/mdesc.h> | ||
30 | |||
31 | #include "n2_core.h" | ||
32 | |||
33 | #define DRV_MODULE_NAME "n2_crypto" | ||
34 | #define DRV_MODULE_VERSION "0.1" | ||
35 | #define DRV_MODULE_RELDATE "April 29, 2010" | ||
36 | |||
37 | static char version[] __devinitdata = | ||
38 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
39 | |||
40 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
41 | MODULE_DESCRIPTION("Niagara2 Crypto driver"); | ||
42 | MODULE_LICENSE("GPL"); | ||
43 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
44 | |||
45 | #define N2_CRA_PRIORITY 300 | ||
46 | |||
47 | static DEFINE_MUTEX(spu_lock); | ||
48 | |||
49 | struct spu_queue { | ||
50 | cpumask_t sharing; | ||
51 | unsigned long qhandle; | ||
52 | |||
53 | spinlock_t lock; | ||
54 | u8 q_type; | ||
55 | void *q; | ||
56 | unsigned long head; | ||
57 | unsigned long tail; | ||
58 | struct list_head jobs; | ||
59 | |||
60 | unsigned long devino; | ||
61 | |||
62 | char irq_name[32]; | ||
63 | unsigned int irq; | ||
64 | |||
65 | struct list_head list; | ||
66 | }; | ||
67 | |||
68 | static struct spu_queue **cpu_to_cwq; | ||
69 | static struct spu_queue **cpu_to_mau; | ||
70 | |||
71 | static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) | ||
72 | { | ||
73 | if (q->q_type == HV_NCS_QTYPE_MAU) { | ||
74 | off += MAU_ENTRY_SIZE; | ||
75 | if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) | ||
76 | off = 0; | ||
77 | } else { | ||
78 | off += CWQ_ENTRY_SIZE; | ||
79 | if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) | ||
80 | off = 0; | ||
81 | } | ||
82 | return off; | ||
83 | } | ||
84 | |||
85 | struct n2_request_common { | ||
86 | struct list_head entry; | ||
87 | unsigned int offset; | ||
88 | }; | ||
89 | #define OFFSET_NOT_RUNNING (~(unsigned int)0) | ||
90 | |||
91 | /* An async job request records the final tail value it used in | ||
92 | * n2_request_common->offset, test to see if that offset is in | ||
93 | * the range old_head, new_head, inclusive. | ||
94 | */ | ||
95 | static inline bool job_finished(struct spu_queue *q, unsigned int offset, | ||
96 | unsigned long old_head, unsigned long new_head) | ||
97 | { | ||
98 | if (old_head <= new_head) { | ||
99 | if (offset > old_head && offset <= new_head) | ||
100 | return true; | ||
101 | } else { | ||
102 | if (offset > old_head || offset <= new_head) | ||
103 | return true; | ||
104 | } | ||
105 | return false; | ||
106 | } | ||
107 | |||
108 | /* When the HEAD marker is unequal to the actual HEAD, we get | ||
109 | * a virtual device INO interrupt. We should process the | ||
110 | * completed CWQ entries and adjust the HEAD marker to clear | ||
111 | * the IRQ. | ||
112 | */ | ||
113 | static irqreturn_t cwq_intr(int irq, void *dev_id) | ||
114 | { | ||
115 | unsigned long off, new_head, hv_ret; | ||
116 | struct spu_queue *q = dev_id; | ||
117 | |||
118 | pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", | ||
119 | smp_processor_id(), q->qhandle); | ||
120 | |||
121 | spin_lock(&q->lock); | ||
122 | |||
123 | hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); | ||
124 | |||
125 | pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", | ||
126 | smp_processor_id(), new_head, hv_ret); | ||
127 | |||
128 | for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { | ||
129 | /* XXX ... XXX */ | ||
130 | } | ||
131 | |||
132 | hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); | ||
133 | if (hv_ret == HV_EOK) | ||
134 | q->head = new_head; | ||
135 | |||
136 | spin_unlock(&q->lock); | ||
137 | |||
138 | return IRQ_HANDLED; | ||
139 | } | ||
140 | |||
141 | static irqreturn_t mau_intr(int irq, void *dev_id) | ||
142 | { | ||
143 | struct spu_queue *q = dev_id; | ||
144 | unsigned long head, hv_ret; | ||
145 | |||
146 | spin_lock(&q->lock); | ||
147 | |||
148 | pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", | ||
149 | smp_processor_id(), q->qhandle); | ||
150 | |||
151 | hv_ret = sun4v_ncs_gethead(q->qhandle, &head); | ||
152 | |||
153 | pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", | ||
154 | smp_processor_id(), head, hv_ret); | ||
155 | |||
156 | sun4v_ncs_sethead_marker(q->qhandle, head); | ||
157 | |||
158 | spin_unlock(&q->lock); | ||
159 | |||
160 | return IRQ_HANDLED; | ||
161 | } | ||
162 | |||
163 | static void *spu_queue_next(struct spu_queue *q, void *cur) | ||
164 | { | ||
165 | return q->q + spu_next_offset(q, cur - q->q); | ||
166 | } | ||
167 | |||
168 | static int spu_queue_num_free(struct spu_queue *q) | ||
169 | { | ||
170 | unsigned long head = q->head; | ||
171 | unsigned long tail = q->tail; | ||
172 | unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); | ||
173 | unsigned long diff; | ||
174 | |||
175 | if (head > tail) | ||
176 | diff = head - tail; | ||
177 | else | ||
178 | diff = (end - tail) + head; | ||
179 | |||
180 | return (diff / CWQ_ENTRY_SIZE) - 1; | ||
181 | } | ||
182 | |||
183 | static void *spu_queue_alloc(struct spu_queue *q, int num_entries) | ||
184 | { | ||
185 | int avail = spu_queue_num_free(q); | ||
186 | |||
187 | if (avail >= num_entries) | ||
188 | return q->q + q->tail; | ||
189 | |||
190 | return NULL; | ||
191 | } | ||
192 | |||
193 | static unsigned long spu_queue_submit(struct spu_queue *q, void *last) | ||
194 | { | ||
195 | unsigned long hv_ret, new_tail; | ||
196 | |||
197 | new_tail = spu_next_offset(q, last - q->q); | ||
198 | |||
199 | hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); | ||
200 | if (hv_ret == HV_EOK) | ||
201 | q->tail = new_tail; | ||
202 | return hv_ret; | ||
203 | } | ||
204 | |||
205 | static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, | ||
206 | int enc_type, int auth_type, | ||
207 | unsigned int hash_len, | ||
208 | bool sfas, bool sob, bool eob, bool encrypt, | ||
209 | int opcode) | ||
210 | { | ||
211 | u64 word = (len - 1) & CONTROL_LEN; | ||
212 | |||
213 | word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); | ||
214 | word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); | ||
215 | word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); | ||
216 | if (sfas) | ||
217 | word |= CONTROL_STORE_FINAL_AUTH_STATE; | ||
218 | if (sob) | ||
219 | word |= CONTROL_START_OF_BLOCK; | ||
220 | if (eob) | ||
221 | word |= CONTROL_END_OF_BLOCK; | ||
222 | if (encrypt) | ||
223 | word |= CONTROL_ENCRYPT; | ||
224 | if (hmac_key_len) | ||
225 | word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; | ||
226 | if (hash_len) | ||
227 | word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; | ||
228 | |||
229 | return word; | ||
230 | } | ||
231 | |||
232 | #if 0 | ||
233 | static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) | ||
234 | { | ||
235 | if (this_len >= 64 || | ||
236 | qp->head != qp->tail) | ||
237 | return true; | ||
238 | return false; | ||
239 | } | ||
240 | #endif | ||
241 | |||
242 | struct n2_ahash_alg { | ||
243 | struct list_head entry; | ||
244 | const char *hash_zero; | ||
245 | const u32 *hash_init; | ||
246 | u8 hw_op_hashsz; | ||
247 | u8 digest_size; | ||
248 | u8 auth_type; | ||
249 | u8 hmac_type; | ||
250 | struct ahash_alg alg; | ||
251 | }; | ||
252 | |||
253 | static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) | ||
254 | { | ||
255 | struct crypto_alg *alg = tfm->__crt_alg; | ||
256 | struct ahash_alg *ahash_alg; | ||
257 | |||
258 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); | ||
259 | |||
260 | return container_of(ahash_alg, struct n2_ahash_alg, alg); | ||
261 | } | ||
262 | |||
263 | struct n2_hmac_alg { | ||
264 | const char *child_alg; | ||
265 | struct n2_ahash_alg derived; | ||
266 | }; | ||
267 | |||
268 | static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) | ||
269 | { | ||
270 | struct crypto_alg *alg = tfm->__crt_alg; | ||
271 | struct ahash_alg *ahash_alg; | ||
272 | |||
273 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); | ||
274 | |||
275 | return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); | ||
276 | } | ||
277 | |||
278 | struct n2_hash_ctx { | ||
279 | struct crypto_ahash *fallback_tfm; | ||
280 | }; | ||
281 | |||
282 | #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ | ||
283 | |||
284 | struct n2_hmac_ctx { | ||
285 | struct n2_hash_ctx base; | ||
286 | |||
287 | struct crypto_shash *child_shash; | ||
288 | |||
289 | int hash_key_len; | ||
290 | unsigned char hash_key[N2_HASH_KEY_MAX]; | ||
291 | }; | ||
292 | |||
293 | struct n2_hash_req_ctx { | ||
294 | union { | ||
295 | struct md5_state md5; | ||
296 | struct sha1_state sha1; | ||
297 | struct sha256_state sha256; | ||
298 | } u; | ||
299 | |||
300 | struct ahash_request fallback_req; | ||
301 | }; | ||
302 | |||
303 | static int n2_hash_async_init(struct ahash_request *req) | ||
304 | { | ||
305 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
306 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
307 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
308 | |||
309 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
310 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
311 | |||
312 | return crypto_ahash_init(&rctx->fallback_req); | ||
313 | } | ||
314 | |||
315 | static int n2_hash_async_update(struct ahash_request *req) | ||
316 | { | ||
317 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
318 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
319 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
320 | |||
321 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
322 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
323 | rctx->fallback_req.nbytes = req->nbytes; | ||
324 | rctx->fallback_req.src = req->src; | ||
325 | |||
326 | return crypto_ahash_update(&rctx->fallback_req); | ||
327 | } | ||
328 | |||
329 | static int n2_hash_async_final(struct ahash_request *req) | ||
330 | { | ||
331 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
332 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
333 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
334 | |||
335 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
336 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
337 | rctx->fallback_req.result = req->result; | ||
338 | |||
339 | return crypto_ahash_final(&rctx->fallback_req); | ||
340 | } | ||
341 | |||
342 | static int n2_hash_async_finup(struct ahash_request *req) | ||
343 | { | ||
344 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
345 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
346 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
347 | |||
348 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
349 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
350 | rctx->fallback_req.nbytes = req->nbytes; | ||
351 | rctx->fallback_req.src = req->src; | ||
352 | rctx->fallback_req.result = req->result; | ||
353 | |||
354 | return crypto_ahash_finup(&rctx->fallback_req); | ||
355 | } | ||
356 | |||
357 | static int n2_hash_cra_init(struct crypto_tfm *tfm) | ||
358 | { | ||
359 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | ||
360 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
361 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
362 | struct crypto_ahash *fallback_tfm; | ||
363 | int err; | ||
364 | |||
365 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, | ||
366 | CRYPTO_ALG_NEED_FALLBACK); | ||
367 | if (IS_ERR(fallback_tfm)) { | ||
368 | pr_warning("Fallback driver '%s' could not be loaded!\n", | ||
369 | fallback_driver_name); | ||
370 | err = PTR_ERR(fallback_tfm); | ||
371 | goto out; | ||
372 | } | ||
373 | |||
374 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + | ||
375 | crypto_ahash_reqsize(fallback_tfm))); | ||
376 | |||
377 | ctx->fallback_tfm = fallback_tfm; | ||
378 | return 0; | ||
379 | |||
380 | out: | ||
381 | return err; | ||
382 | } | ||
383 | |||
384 | static void n2_hash_cra_exit(struct crypto_tfm *tfm) | ||
385 | { | ||
386 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
387 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
388 | |||
389 | crypto_free_ahash(ctx->fallback_tfm); | ||
390 | } | ||
391 | |||
392 | static int n2_hmac_cra_init(struct crypto_tfm *tfm) | ||
393 | { | ||
394 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | ||
395 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
396 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); | ||
397 | struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); | ||
398 | struct crypto_ahash *fallback_tfm; | ||
399 | struct crypto_shash *child_shash; | ||
400 | int err; | ||
401 | |||
402 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, | ||
403 | CRYPTO_ALG_NEED_FALLBACK); | ||
404 | if (IS_ERR(fallback_tfm)) { | ||
405 | pr_warning("Fallback driver '%s' could not be loaded!\n", | ||
406 | fallback_driver_name); | ||
407 | err = PTR_ERR(fallback_tfm); | ||
408 | goto out; | ||
409 | } | ||
410 | |||
411 | child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); | ||
412 | if (IS_ERR(child_shash)) { | ||
413 | pr_warning("Child shash '%s' could not be loaded!\n", | ||
414 | n2alg->child_alg); | ||
415 | err = PTR_ERR(child_shash); | ||
416 | goto out_free_fallback; | ||
417 | } | ||
418 | |||
419 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + | ||
420 | crypto_ahash_reqsize(fallback_tfm))); | ||
421 | |||
422 | ctx->child_shash = child_shash; | ||
423 | ctx->base.fallback_tfm = fallback_tfm; | ||
424 | return 0; | ||
425 | |||
426 | out_free_fallback: | ||
427 | crypto_free_ahash(fallback_tfm); | ||
428 | |||
429 | out: | ||
430 | return err; | ||
431 | } | ||
432 | |||
433 | static void n2_hmac_cra_exit(struct crypto_tfm *tfm) | ||
434 | { | ||
435 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
436 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); | ||
437 | |||
438 | crypto_free_ahash(ctx->base.fallback_tfm); | ||
439 | crypto_free_shash(ctx->child_shash); | ||
440 | } | ||
441 | |||
442 | static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
443 | unsigned int keylen) | ||
444 | { | ||
445 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); | ||
446 | struct crypto_shash *child_shash = ctx->child_shash; | ||
447 | struct crypto_ahash *fallback_tfm; | ||
448 | struct { | ||
449 | struct shash_desc shash; | ||
450 | char ctx[crypto_shash_descsize(child_shash)]; | ||
451 | } desc; | ||
452 | int err, bs, ds; | ||
453 | |||
454 | fallback_tfm = ctx->base.fallback_tfm; | ||
455 | err = crypto_ahash_setkey(fallback_tfm, key, keylen); | ||
456 | if (err) | ||
457 | return err; | ||
458 | |||
459 | desc.shash.tfm = child_shash; | ||
460 | desc.shash.flags = crypto_ahash_get_flags(tfm) & | ||
461 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
462 | |||
463 | bs = crypto_shash_blocksize(child_shash); | ||
464 | ds = crypto_shash_digestsize(child_shash); | ||
465 | BUG_ON(ds > N2_HASH_KEY_MAX); | ||
466 | if (keylen > bs) { | ||
467 | err = crypto_shash_digest(&desc.shash, key, keylen, | ||
468 | ctx->hash_key); | ||
469 | if (err) | ||
470 | return err; | ||
471 | keylen = ds; | ||
472 | } else if (keylen <= N2_HASH_KEY_MAX) | ||
473 | memcpy(ctx->hash_key, key, keylen); | ||
474 | |||
475 | ctx->hash_key_len = keylen; | ||
476 | |||
477 | return err; | ||
478 | } | ||
479 | |||
480 | static unsigned long wait_for_tail(struct spu_queue *qp) | ||
481 | { | ||
482 | unsigned long head, hv_ret; | ||
483 | |||
484 | do { | ||
485 | hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); | ||
486 | if (hv_ret != HV_EOK) { | ||
487 | pr_err("Hypervisor error on gethead\n"); | ||
488 | break; | ||
489 | } | ||
490 | if (head == qp->tail) { | ||
491 | qp->head = head; | ||
492 | break; | ||
493 | } | ||
494 | } while (1); | ||
495 | return hv_ret; | ||
496 | } | ||
497 | |||
498 | static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, | ||
499 | struct cwq_initial_entry *ent) | ||
500 | { | ||
501 | unsigned long hv_ret = spu_queue_submit(qp, ent); | ||
502 | |||
503 | if (hv_ret == HV_EOK) | ||
504 | hv_ret = wait_for_tail(qp); | ||
505 | |||
506 | return hv_ret; | ||
507 | } | ||
508 | |||
509 | static int n2_do_async_digest(struct ahash_request *req, | ||
510 | unsigned int auth_type, unsigned int digest_size, | ||
511 | unsigned int result_size, void *hash_loc, | ||
512 | unsigned long auth_key, unsigned int auth_key_len) | ||
513 | { | ||
514 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
515 | struct cwq_initial_entry *ent; | ||
516 | struct crypto_hash_walk walk; | ||
517 | struct spu_queue *qp; | ||
518 | unsigned long flags; | ||
519 | int err = -ENODEV; | ||
520 | int nbytes, cpu; | ||
521 | |||
522 | /* The total effective length of the operation may not | ||
523 | * exceed 2^16. | ||
524 | */ | ||
525 | if (unlikely(req->nbytes > (1 << 16))) { | ||
526 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
527 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
528 | |||
529 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
530 | rctx->fallback_req.base.flags = | ||
531 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
532 | rctx->fallback_req.nbytes = req->nbytes; | ||
533 | rctx->fallback_req.src = req->src; | ||
534 | rctx->fallback_req.result = req->result; | ||
535 | |||
536 | return crypto_ahash_digest(&rctx->fallback_req); | ||
537 | } | ||
538 | |||
539 | nbytes = crypto_hash_walk_first(req, &walk); | ||
540 | |||
541 | cpu = get_cpu(); | ||
542 | qp = cpu_to_cwq[cpu]; | ||
543 | if (!qp) | ||
544 | goto out; | ||
545 | |||
546 | spin_lock_irqsave(&qp->lock, flags); | ||
547 | |||
548 | /* XXX can do better, improve this later by doing a by-hand scatterlist | ||
549 | * XXX walk, etc. | ||
550 | */ | ||
551 | ent = qp->q + qp->tail; | ||
552 | |||
553 | ent->control = control_word_base(nbytes, auth_key_len, 0, | ||
554 | auth_type, digest_size, | ||
555 | false, true, false, false, | ||
556 | OPCODE_INPLACE_BIT | | ||
557 | OPCODE_AUTH_MAC); | ||
558 | ent->src_addr = __pa(walk.data); | ||
559 | ent->auth_key_addr = auth_key; | ||
560 | ent->auth_iv_addr = __pa(hash_loc); | ||
561 | ent->final_auth_state_addr = 0UL; | ||
562 | ent->enc_key_addr = 0UL; | ||
563 | ent->enc_iv_addr = 0UL; | ||
564 | ent->dest_addr = __pa(hash_loc); | ||
565 | |||
566 | nbytes = crypto_hash_walk_done(&walk, 0); | ||
567 | while (nbytes > 0) { | ||
568 | ent = spu_queue_next(qp, ent); | ||
569 | |||
570 | ent->control = (nbytes - 1); | ||
571 | ent->src_addr = __pa(walk.data); | ||
572 | ent->auth_key_addr = 0UL; | ||
573 | ent->auth_iv_addr = 0UL; | ||
574 | ent->final_auth_state_addr = 0UL; | ||
575 | ent->enc_key_addr = 0UL; | ||
576 | ent->enc_iv_addr = 0UL; | ||
577 | ent->dest_addr = 0UL; | ||
578 | |||
579 | nbytes = crypto_hash_walk_done(&walk, 0); | ||
580 | } | ||
581 | ent->control |= CONTROL_END_OF_BLOCK; | ||
582 | |||
583 | if (submit_and_wait_for_tail(qp, ent) != HV_EOK) | ||
584 | err = -EINVAL; | ||
585 | else | ||
586 | err = 0; | ||
587 | |||
588 | spin_unlock_irqrestore(&qp->lock, flags); | ||
589 | |||
590 | if (!err) | ||
591 | memcpy(req->result, hash_loc, result_size); | ||
592 | out: | ||
593 | put_cpu(); | ||
594 | |||
595 | return err; | ||
596 | } | ||
597 | |||
598 | static int n2_hash_async_digest(struct ahash_request *req) | ||
599 | { | ||
600 | struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); | ||
601 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
602 | int ds; | ||
603 | |||
604 | ds = n2alg->digest_size; | ||
605 | if (unlikely(req->nbytes == 0)) { | ||
606 | memcpy(req->result, n2alg->hash_zero, ds); | ||
607 | return 0; | ||
608 | } | ||
609 | memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); | ||
610 | |||
611 | return n2_do_async_digest(req, n2alg->auth_type, | ||
612 | n2alg->hw_op_hashsz, ds, | ||
613 | &rctx->u, 0UL, 0); | ||
614 | } | ||
615 | |||
616 | static int n2_hmac_async_digest(struct ahash_request *req) | ||
617 | { | ||
618 | struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); | ||
619 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
620 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
621 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); | ||
622 | int ds; | ||
623 | |||
624 | ds = n2alg->derived.digest_size; | ||
625 | if (unlikely(req->nbytes == 0) || | ||
626 | unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { | ||
627 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
628 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
629 | |||
630 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
631 | rctx->fallback_req.base.flags = | ||
632 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
633 | rctx->fallback_req.nbytes = req->nbytes; | ||
634 | rctx->fallback_req.src = req->src; | ||
635 | rctx->fallback_req.result = req->result; | ||
636 | |||
637 | return crypto_ahash_digest(&rctx->fallback_req); | ||
638 | } | ||
639 | memcpy(&rctx->u, n2alg->derived.hash_init, | ||
640 | n2alg->derived.hw_op_hashsz); | ||
641 | |||
642 | return n2_do_async_digest(req, n2alg->derived.hmac_type, | ||
643 | n2alg->derived.hw_op_hashsz, ds, | ||
644 | &rctx->u, | ||
645 | __pa(&ctx->hash_key), | ||
646 | ctx->hash_key_len); | ||
647 | } | ||
648 | |||
649 | struct n2_cipher_context { | ||
650 | int key_len; | ||
651 | int enc_type; | ||
652 | union { | ||
653 | u8 aes[AES_MAX_KEY_SIZE]; | ||
654 | u8 des[DES_KEY_SIZE]; | ||
655 | u8 des3[3 * DES_KEY_SIZE]; | ||
656 | u8 arc4[258]; /* S-box, X, Y */ | ||
657 | } key; | ||
658 | }; | ||
659 | |||
660 | #define N2_CHUNK_ARR_LEN 16 | ||
661 | |||
662 | struct n2_crypto_chunk { | ||
663 | struct list_head entry; | ||
664 | unsigned long iv_paddr : 44; | ||
665 | unsigned long arr_len : 20; | ||
666 | unsigned long dest_paddr; | ||
667 | unsigned long dest_final; | ||
668 | struct { | ||
669 | unsigned long src_paddr : 44; | ||
670 | unsigned long src_len : 20; | ||
671 | } arr[N2_CHUNK_ARR_LEN]; | ||
672 | }; | ||
673 | |||
674 | struct n2_request_context { | ||
675 | struct ablkcipher_walk walk; | ||
676 | struct list_head chunk_list; | ||
677 | struct n2_crypto_chunk chunk; | ||
678 | u8 temp_iv[16]; | ||
679 | }; | ||
680 | |||
681 | /* The SPU allows some level of flexibility for partial cipher blocks | ||
682 | * being specified in a descriptor. | ||
683 | * | ||
684 | * It merely requires that every descriptor's length field is at least | ||
685 | * as large as the cipher block size. This means that a cipher block | ||
686 | * can span at most 2 descriptors. However, this does not allow a | ||
687 | * partial block to span into the final descriptor as that would | ||
688 | * violate the rule (since every descriptor's length must be at lest | ||
689 | * the block size). So, for example, assuming an 8 byte block size: | ||
690 | * | ||
691 | * 0xe --> 0xa --> 0x8 | ||
692 | * | ||
693 | * is a valid length sequence, whereas: | ||
694 | * | ||
695 | * 0xe --> 0xb --> 0x7 | ||
696 | * | ||
697 | * is not a valid sequence. | ||
698 | */ | ||
699 | |||
700 | struct n2_cipher_alg { | ||
701 | struct list_head entry; | ||
702 | u8 enc_type; | ||
703 | struct crypto_alg alg; | ||
704 | }; | ||
705 | |||
706 | static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) | ||
707 | { | ||
708 | struct crypto_alg *alg = tfm->__crt_alg; | ||
709 | |||
710 | return container_of(alg, struct n2_cipher_alg, alg); | ||
711 | } | ||
712 | |||
713 | struct n2_cipher_request_context { | ||
714 | struct ablkcipher_walk walk; | ||
715 | }; | ||
716 | |||
717 | static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
718 | unsigned int keylen) | ||
719 | { | ||
720 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
721 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | ||
722 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | ||
723 | |||
724 | ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); | ||
725 | |||
726 | switch (keylen) { | ||
727 | case AES_KEYSIZE_128: | ||
728 | ctx->enc_type |= ENC_TYPE_ALG_AES128; | ||
729 | break; | ||
730 | case AES_KEYSIZE_192: | ||
731 | ctx->enc_type |= ENC_TYPE_ALG_AES192; | ||
732 | break; | ||
733 | case AES_KEYSIZE_256: | ||
734 | ctx->enc_type |= ENC_TYPE_ALG_AES256; | ||
735 | break; | ||
736 | default: | ||
737 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
738 | return -EINVAL; | ||
739 | } | ||
740 | |||
741 | ctx->key_len = keylen; | ||
742 | memcpy(ctx->key.aes, key, keylen); | ||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
747 | unsigned int keylen) | ||
748 | { | ||
749 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
750 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | ||
751 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | ||
752 | u32 tmp[DES_EXPKEY_WORDS]; | ||
753 | int err; | ||
754 | |||
755 | ctx->enc_type = n2alg->enc_type; | ||
756 | |||
757 | if (keylen != DES_KEY_SIZE) { | ||
758 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
759 | return -EINVAL; | ||
760 | } | ||
761 | |||
762 | err = des_ekey(tmp, key); | ||
763 | if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | ||
764 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
765 | return -EINVAL; | ||
766 | } | ||
767 | |||
768 | ctx->key_len = keylen; | ||
769 | memcpy(ctx->key.des, key, keylen); | ||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
774 | unsigned int keylen) | ||
775 | { | ||
776 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
777 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | ||
778 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | ||
779 | |||
780 | ctx->enc_type = n2alg->enc_type; | ||
781 | |||
782 | if (keylen != (3 * DES_KEY_SIZE)) { | ||
783 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
784 | return -EINVAL; | ||
785 | } | ||
786 | ctx->key_len = keylen; | ||
787 | memcpy(ctx->key.des3, key, keylen); | ||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
792 | unsigned int keylen) | ||
793 | { | ||
794 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
795 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | ||
796 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | ||
797 | u8 *s = ctx->key.arc4; | ||
798 | u8 *x = s + 256; | ||
799 | u8 *y = x + 1; | ||
800 | int i, j, k; | ||
801 | |||
802 | ctx->enc_type = n2alg->enc_type; | ||
803 | |||
804 | j = k = 0; | ||
805 | *x = 0; | ||
806 | *y = 0; | ||
807 | for (i = 0; i < 256; i++) | ||
808 | s[i] = i; | ||
809 | for (i = 0; i < 256; i++) { | ||
810 | u8 a = s[i]; | ||
811 | j = (j + key[k] + a) & 0xff; | ||
812 | s[i] = s[j]; | ||
813 | s[j] = a; | ||
814 | if (++k >= keylen) | ||
815 | k = 0; | ||
816 | } | ||
817 | |||
818 | return 0; | ||
819 | } | ||
820 | |||
821 | static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) | ||
822 | { | ||
823 | int this_len = nbytes; | ||
824 | |||
825 | this_len -= (nbytes & (block_size - 1)); | ||
826 | return this_len > (1 << 16) ? (1 << 16) : this_len; | ||
827 | } | ||
828 | |||
829 | static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, | ||
830 | struct spu_queue *qp, bool encrypt) | ||
831 | { | ||
832 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | ||
833 | struct cwq_initial_entry *ent; | ||
834 | bool in_place; | ||
835 | int i; | ||
836 | |||
837 | ent = spu_queue_alloc(qp, cp->arr_len); | ||
838 | if (!ent) { | ||
839 | pr_info("queue_alloc() of %d fails\n", | ||
840 | cp->arr_len); | ||
841 | return -EBUSY; | ||
842 | } | ||
843 | |||
844 | in_place = (cp->dest_paddr == cp->arr[0].src_paddr); | ||
845 | |||
846 | ent->control = control_word_base(cp->arr[0].src_len, | ||
847 | 0, ctx->enc_type, 0, 0, | ||
848 | false, true, false, encrypt, | ||
849 | OPCODE_ENCRYPT | | ||
850 | (in_place ? OPCODE_INPLACE_BIT : 0)); | ||
851 | ent->src_addr = cp->arr[0].src_paddr; | ||
852 | ent->auth_key_addr = 0UL; | ||
853 | ent->auth_iv_addr = 0UL; | ||
854 | ent->final_auth_state_addr = 0UL; | ||
855 | ent->enc_key_addr = __pa(&ctx->key); | ||
856 | ent->enc_iv_addr = cp->iv_paddr; | ||
857 | ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); | ||
858 | |||
859 | for (i = 1; i < cp->arr_len; i++) { | ||
860 | ent = spu_queue_next(qp, ent); | ||
861 | |||
862 | ent->control = cp->arr[i].src_len - 1; | ||
863 | ent->src_addr = cp->arr[i].src_paddr; | ||
864 | ent->auth_key_addr = 0UL; | ||
865 | ent->auth_iv_addr = 0UL; | ||
866 | ent->final_auth_state_addr = 0UL; | ||
867 | ent->enc_key_addr = 0UL; | ||
868 | ent->enc_iv_addr = 0UL; | ||
869 | ent->dest_addr = 0UL; | ||
870 | } | ||
871 | ent->control |= CONTROL_END_OF_BLOCK; | ||
872 | |||
873 | return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; | ||
874 | } | ||
875 | |||
876 | static int n2_compute_chunks(struct ablkcipher_request *req) | ||
877 | { | ||
878 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | ||
879 | struct ablkcipher_walk *walk = &rctx->walk; | ||
880 | struct n2_crypto_chunk *chunk; | ||
881 | unsigned long dest_prev; | ||
882 | unsigned int tot_len; | ||
883 | bool prev_in_place; | ||
884 | int err, nbytes; | ||
885 | |||
886 | ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); | ||
887 | err = ablkcipher_walk_phys(req, walk); | ||
888 | if (err) | ||
889 | return err; | ||
890 | |||
891 | INIT_LIST_HEAD(&rctx->chunk_list); | ||
892 | |||
893 | chunk = &rctx->chunk; | ||
894 | INIT_LIST_HEAD(&chunk->entry); | ||
895 | |||
896 | chunk->iv_paddr = 0UL; | ||
897 | chunk->arr_len = 0; | ||
898 | chunk->dest_paddr = 0UL; | ||
899 | |||
900 | prev_in_place = false; | ||
901 | dest_prev = ~0UL; | ||
902 | tot_len = 0; | ||
903 | |||
904 | while ((nbytes = walk->nbytes) != 0) { | ||
905 | unsigned long dest_paddr, src_paddr; | ||
906 | bool in_place; | ||
907 | int this_len; | ||
908 | |||
909 | src_paddr = (page_to_phys(walk->src.page) + | ||
910 | walk->src.offset); | ||
911 | dest_paddr = (page_to_phys(walk->dst.page) + | ||
912 | walk->dst.offset); | ||
913 | in_place = (src_paddr == dest_paddr); | ||
914 | this_len = cipher_descriptor_len(nbytes, walk->blocksize); | ||
915 | |||
916 | if (chunk->arr_len != 0) { | ||
917 | if (in_place != prev_in_place || | ||
918 | (!prev_in_place && | ||
919 | dest_paddr != dest_prev) || | ||
920 | chunk->arr_len == N2_CHUNK_ARR_LEN || | ||
921 | tot_len + this_len > (1 << 16)) { | ||
922 | chunk->dest_final = dest_prev; | ||
923 | list_add_tail(&chunk->entry, | ||
924 | &rctx->chunk_list); | ||
925 | chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); | ||
926 | if (!chunk) { | ||
927 | err = -ENOMEM; | ||
928 | break; | ||
929 | } | ||
930 | INIT_LIST_HEAD(&chunk->entry); | ||
931 | } | ||
932 | } | ||
933 | if (chunk->arr_len == 0) { | ||
934 | chunk->dest_paddr = dest_paddr; | ||
935 | tot_len = 0; | ||
936 | } | ||
937 | chunk->arr[chunk->arr_len].src_paddr = src_paddr; | ||
938 | chunk->arr[chunk->arr_len].src_len = this_len; | ||
939 | chunk->arr_len++; | ||
940 | |||
941 | dest_prev = dest_paddr + this_len; | ||
942 | prev_in_place = in_place; | ||
943 | tot_len += this_len; | ||
944 | |||
945 | err = ablkcipher_walk_done(req, walk, nbytes - this_len); | ||
946 | if (err) | ||
947 | break; | ||
948 | } | ||
949 | if (!err && chunk->arr_len != 0) { | ||
950 | chunk->dest_final = dest_prev; | ||
951 | list_add_tail(&chunk->entry, &rctx->chunk_list); | ||
952 | } | ||
953 | |||
954 | return err; | ||
955 | } | ||
956 | |||
957 | static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) | ||
958 | { | ||
959 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | ||
960 | struct n2_crypto_chunk *c, *tmp; | ||
961 | |||
962 | if (final_iv) | ||
963 | memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); | ||
964 | |||
965 | ablkcipher_walk_complete(&rctx->walk); | ||
966 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { | ||
967 | list_del(&c->entry); | ||
968 | if (unlikely(c != &rctx->chunk)) | ||
969 | kfree(c); | ||
970 | } | ||
971 | |||
972 | } | ||
973 | |||
974 | static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) | ||
975 | { | ||
976 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | ||
977 | struct crypto_tfm *tfm = req->base.tfm; | ||
978 | int err = n2_compute_chunks(req); | ||
979 | struct n2_crypto_chunk *c, *tmp; | ||
980 | unsigned long flags, hv_ret; | ||
981 | struct spu_queue *qp; | ||
982 | |||
983 | if (err) | ||
984 | return err; | ||
985 | |||
986 | qp = cpu_to_cwq[get_cpu()]; | ||
987 | err = -ENODEV; | ||
988 | if (!qp) | ||
989 | goto out; | ||
990 | |||
991 | spin_lock_irqsave(&qp->lock, flags); | ||
992 | |||
993 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { | ||
994 | err = __n2_crypt_chunk(tfm, c, qp, encrypt); | ||
995 | if (err) | ||
996 | break; | ||
997 | list_del(&c->entry); | ||
998 | if (unlikely(c != &rctx->chunk)) | ||
999 | kfree(c); | ||
1000 | } | ||
1001 | if (!err) { | ||
1002 | hv_ret = wait_for_tail(qp); | ||
1003 | if (hv_ret != HV_EOK) | ||
1004 | err = -EINVAL; | ||
1005 | } | ||
1006 | |||
1007 | spin_unlock_irqrestore(&qp->lock, flags); | ||
1008 | |||
1009 | put_cpu(); | ||
1010 | |||
1011 | out: | ||
1012 | n2_chunk_complete(req, NULL); | ||
1013 | return err; | ||
1014 | } | ||
1015 | |||
1016 | static int n2_encrypt_ecb(struct ablkcipher_request *req) | ||
1017 | { | ||
1018 | return n2_do_ecb(req, true); | ||
1019 | } | ||
1020 | |||
1021 | static int n2_decrypt_ecb(struct ablkcipher_request *req) | ||
1022 | { | ||
1023 | return n2_do_ecb(req, false); | ||
1024 | } | ||
1025 | |||
1026 | static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) | ||
1027 | { | ||
1028 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | ||
1029 | struct crypto_tfm *tfm = req->base.tfm; | ||
1030 | unsigned long flags, hv_ret, iv_paddr; | ||
1031 | int err = n2_compute_chunks(req); | ||
1032 | struct n2_crypto_chunk *c, *tmp; | ||
1033 | struct spu_queue *qp; | ||
1034 | void *final_iv_addr; | ||
1035 | |||
1036 | final_iv_addr = NULL; | ||
1037 | |||
1038 | if (err) | ||
1039 | return err; | ||
1040 | |||
1041 | qp = cpu_to_cwq[get_cpu()]; | ||
1042 | err = -ENODEV; | ||
1043 | if (!qp) | ||
1044 | goto out; | ||
1045 | |||
1046 | spin_lock_irqsave(&qp->lock, flags); | ||
1047 | |||
1048 | if (encrypt) { | ||
1049 | iv_paddr = __pa(rctx->walk.iv); | ||
1050 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, | ||
1051 | entry) { | ||
1052 | c->iv_paddr = iv_paddr; | ||
1053 | err = __n2_crypt_chunk(tfm, c, qp, true); | ||
1054 | if (err) | ||
1055 | break; | ||
1056 | iv_paddr = c->dest_final - rctx->walk.blocksize; | ||
1057 | list_del(&c->entry); | ||
1058 | if (unlikely(c != &rctx->chunk)) | ||
1059 | kfree(c); | ||
1060 | } | ||
1061 | final_iv_addr = __va(iv_paddr); | ||
1062 | } else { | ||
1063 | list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, | ||
1064 | entry) { | ||
1065 | if (c == &rctx->chunk) { | ||
1066 | iv_paddr = __pa(rctx->walk.iv); | ||
1067 | } else { | ||
1068 | iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + | ||
1069 | tmp->arr[tmp->arr_len-1].src_len - | ||
1070 | rctx->walk.blocksize); | ||
1071 | } | ||
1072 | if (!final_iv_addr) { | ||
1073 | unsigned long pa; | ||
1074 | |||
1075 | pa = (c->arr[c->arr_len-1].src_paddr + | ||
1076 | c->arr[c->arr_len-1].src_len - | ||
1077 | rctx->walk.blocksize); | ||
1078 | final_iv_addr = rctx->temp_iv; | ||
1079 | memcpy(rctx->temp_iv, __va(pa), | ||
1080 | rctx->walk.blocksize); | ||
1081 | } | ||
1082 | c->iv_paddr = iv_paddr; | ||
1083 | err = __n2_crypt_chunk(tfm, c, qp, false); | ||
1084 | if (err) | ||
1085 | break; | ||
1086 | list_del(&c->entry); | ||
1087 | if (unlikely(c != &rctx->chunk)) | ||
1088 | kfree(c); | ||
1089 | } | ||
1090 | } | ||
1091 | if (!err) { | ||
1092 | hv_ret = wait_for_tail(qp); | ||
1093 | if (hv_ret != HV_EOK) | ||
1094 | err = -EINVAL; | ||
1095 | } | ||
1096 | |||
1097 | spin_unlock_irqrestore(&qp->lock, flags); | ||
1098 | |||
1099 | put_cpu(); | ||
1100 | |||
1101 | out: | ||
1102 | n2_chunk_complete(req, err ? NULL : final_iv_addr); | ||
1103 | return err; | ||
1104 | } | ||
1105 | |||
1106 | static int n2_encrypt_chaining(struct ablkcipher_request *req) | ||
1107 | { | ||
1108 | return n2_do_chaining(req, true); | ||
1109 | } | ||
1110 | |||
1111 | static int n2_decrypt_chaining(struct ablkcipher_request *req) | ||
1112 | { | ||
1113 | return n2_do_chaining(req, false); | ||
1114 | } | ||
1115 | |||
1116 | struct n2_cipher_tmpl { | ||
1117 | const char *name; | ||
1118 | const char *drv_name; | ||
1119 | u8 block_size; | ||
1120 | u8 enc_type; | ||
1121 | struct ablkcipher_alg ablkcipher; | ||
1122 | }; | ||
1123 | |||
1124 | static const struct n2_cipher_tmpl cipher_tmpls[] = { | ||
1125 | /* ARC4: only ECB is supported (chaining bits ignored) */ | ||
1126 | { .name = "ecb(arc4)", | ||
1127 | .drv_name = "ecb-arc4", | ||
1128 | .block_size = 1, | ||
1129 | .enc_type = (ENC_TYPE_ALG_RC4_STREAM | | ||
1130 | ENC_TYPE_CHAINING_ECB), | ||
1131 | .ablkcipher = { | ||
1132 | .min_keysize = 1, | ||
1133 | .max_keysize = 256, | ||
1134 | .setkey = n2_arc4_setkey, | ||
1135 | .encrypt = n2_encrypt_ecb, | ||
1136 | .decrypt = n2_decrypt_ecb, | ||
1137 | }, | ||
1138 | }, | ||
1139 | |||
1140 | /* DES: ECB CBC and CFB are supported */ | ||
1141 | { .name = "ecb(des)", | ||
1142 | .drv_name = "ecb-des", | ||
1143 | .block_size = DES_BLOCK_SIZE, | ||
1144 | .enc_type = (ENC_TYPE_ALG_DES | | ||
1145 | ENC_TYPE_CHAINING_ECB), | ||
1146 | .ablkcipher = { | ||
1147 | .min_keysize = DES_KEY_SIZE, | ||
1148 | .max_keysize = DES_KEY_SIZE, | ||
1149 | .setkey = n2_des_setkey, | ||
1150 | .encrypt = n2_encrypt_ecb, | ||
1151 | .decrypt = n2_decrypt_ecb, | ||
1152 | }, | ||
1153 | }, | ||
1154 | { .name = "cbc(des)", | ||
1155 | .drv_name = "cbc-des", | ||
1156 | .block_size = DES_BLOCK_SIZE, | ||
1157 | .enc_type = (ENC_TYPE_ALG_DES | | ||
1158 | ENC_TYPE_CHAINING_CBC), | ||
1159 | .ablkcipher = { | ||
1160 | .ivsize = DES_BLOCK_SIZE, | ||
1161 | .min_keysize = DES_KEY_SIZE, | ||
1162 | .max_keysize = DES_KEY_SIZE, | ||
1163 | .setkey = n2_des_setkey, | ||
1164 | .encrypt = n2_encrypt_chaining, | ||
1165 | .decrypt = n2_decrypt_chaining, | ||
1166 | }, | ||
1167 | }, | ||
1168 | { .name = "cfb(des)", | ||
1169 | .drv_name = "cfb-des", | ||
1170 | .block_size = DES_BLOCK_SIZE, | ||
1171 | .enc_type = (ENC_TYPE_ALG_DES | | ||
1172 | ENC_TYPE_CHAINING_CFB), | ||
1173 | .ablkcipher = { | ||
1174 | .min_keysize = DES_KEY_SIZE, | ||
1175 | .max_keysize = DES_KEY_SIZE, | ||
1176 | .setkey = n2_des_setkey, | ||
1177 | .encrypt = n2_encrypt_chaining, | ||
1178 | .decrypt = n2_decrypt_chaining, | ||
1179 | }, | ||
1180 | }, | ||
1181 | |||
1182 | /* 3DES: ECB CBC and CFB are supported */ | ||
1183 | { .name = "ecb(des3_ede)", | ||
1184 | .drv_name = "ecb-3des", | ||
1185 | .block_size = DES_BLOCK_SIZE, | ||
1186 | .enc_type = (ENC_TYPE_ALG_3DES | | ||
1187 | ENC_TYPE_CHAINING_ECB), | ||
1188 | .ablkcipher = { | ||
1189 | .min_keysize = 3 * DES_KEY_SIZE, | ||
1190 | .max_keysize = 3 * DES_KEY_SIZE, | ||
1191 | .setkey = n2_3des_setkey, | ||
1192 | .encrypt = n2_encrypt_ecb, | ||
1193 | .decrypt = n2_decrypt_ecb, | ||
1194 | }, | ||
1195 | }, | ||
1196 | { .name = "cbc(des3_ede)", | ||
1197 | .drv_name = "cbc-3des", | ||
1198 | .block_size = DES_BLOCK_SIZE, | ||
1199 | .enc_type = (ENC_TYPE_ALG_3DES | | ||
1200 | ENC_TYPE_CHAINING_CBC), | ||
1201 | .ablkcipher = { | ||
1202 | .ivsize = DES_BLOCK_SIZE, | ||
1203 | .min_keysize = 3 * DES_KEY_SIZE, | ||
1204 | .max_keysize = 3 * DES_KEY_SIZE, | ||
1205 | .setkey = n2_3des_setkey, | ||
1206 | .encrypt = n2_encrypt_chaining, | ||
1207 | .decrypt = n2_decrypt_chaining, | ||
1208 | }, | ||
1209 | }, | ||
1210 | { .name = "cfb(des3_ede)", | ||
1211 | .drv_name = "cfb-3des", | ||
1212 | .block_size = DES_BLOCK_SIZE, | ||
1213 | .enc_type = (ENC_TYPE_ALG_3DES | | ||
1214 | ENC_TYPE_CHAINING_CFB), | ||
1215 | .ablkcipher = { | ||
1216 | .min_keysize = 3 * DES_KEY_SIZE, | ||
1217 | .max_keysize = 3 * DES_KEY_SIZE, | ||
1218 | .setkey = n2_3des_setkey, | ||
1219 | .encrypt = n2_encrypt_chaining, | ||
1220 | .decrypt = n2_decrypt_chaining, | ||
1221 | }, | ||
1222 | }, | ||
1223 | /* AES: ECB CBC and CTR are supported */ | ||
1224 | { .name = "ecb(aes)", | ||
1225 | .drv_name = "ecb-aes", | ||
1226 | .block_size = AES_BLOCK_SIZE, | ||
1227 | .enc_type = (ENC_TYPE_ALG_AES128 | | ||
1228 | ENC_TYPE_CHAINING_ECB), | ||
1229 | .ablkcipher = { | ||
1230 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1231 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1232 | .setkey = n2_aes_setkey, | ||
1233 | .encrypt = n2_encrypt_ecb, | ||
1234 | .decrypt = n2_decrypt_ecb, | ||
1235 | }, | ||
1236 | }, | ||
1237 | { .name = "cbc(aes)", | ||
1238 | .drv_name = "cbc-aes", | ||
1239 | .block_size = AES_BLOCK_SIZE, | ||
1240 | .enc_type = (ENC_TYPE_ALG_AES128 | | ||
1241 | ENC_TYPE_CHAINING_CBC), | ||
1242 | .ablkcipher = { | ||
1243 | .ivsize = AES_BLOCK_SIZE, | ||
1244 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1245 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1246 | .setkey = n2_aes_setkey, | ||
1247 | .encrypt = n2_encrypt_chaining, | ||
1248 | .decrypt = n2_decrypt_chaining, | ||
1249 | }, | ||
1250 | }, | ||
1251 | { .name = "ctr(aes)", | ||
1252 | .drv_name = "ctr-aes", | ||
1253 | .block_size = AES_BLOCK_SIZE, | ||
1254 | .enc_type = (ENC_TYPE_ALG_AES128 | | ||
1255 | ENC_TYPE_CHAINING_COUNTER), | ||
1256 | .ablkcipher = { | ||
1257 | .ivsize = AES_BLOCK_SIZE, | ||
1258 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1259 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1260 | .setkey = n2_aes_setkey, | ||
1261 | .encrypt = n2_encrypt_chaining, | ||
1262 | .decrypt = n2_encrypt_chaining, | ||
1263 | }, | ||
1264 | }, | ||
1265 | |||
1266 | }; | ||
1267 | #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) | ||
1268 | |||
1269 | static LIST_HEAD(cipher_algs); | ||
1270 | |||
1271 | struct n2_hash_tmpl { | ||
1272 | const char *name; | ||
1273 | const char *hash_zero; | ||
1274 | const u32 *hash_init; | ||
1275 | u8 hw_op_hashsz; | ||
1276 | u8 digest_size; | ||
1277 | u8 block_size; | ||
1278 | u8 auth_type; | ||
1279 | u8 hmac_type; | ||
1280 | }; | ||
1281 | |||
1282 | static const char md5_zero[MD5_DIGEST_SIZE] = { | ||
1283 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | ||
1284 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | ||
1285 | }; | ||
1286 | static const u32 md5_init[MD5_HASH_WORDS] = { | ||
1287 | cpu_to_le32(0x67452301), | ||
1288 | cpu_to_le32(0xefcdab89), | ||
1289 | cpu_to_le32(0x98badcfe), | ||
1290 | cpu_to_le32(0x10325476), | ||
1291 | }; | ||
1292 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | ||
1293 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | ||
1294 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, | ||
1295 | 0x07, 0x09 | ||
1296 | }; | ||
1297 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { | ||
1298 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, | ||
1299 | }; | ||
1300 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | ||
1301 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, | ||
1302 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, | ||
1303 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, | ||
1304 | 0x1b, 0x78, 0x52, 0xb8, 0x55 | ||
1305 | }; | ||
1306 | static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { | ||
1307 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | ||
1308 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, | ||
1309 | }; | ||
1310 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | ||
1311 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | ||
1312 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | ||
1313 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | ||
1314 | 0x2f | ||
1315 | }; | ||
1316 | static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { | ||
1317 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, | ||
1318 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, | ||
1319 | }; | ||
1320 | |||
1321 | static const struct n2_hash_tmpl hash_tmpls[] = { | ||
1322 | { .name = "md5", | ||
1323 | .hash_zero = md5_zero, | ||
1324 | .hash_init = md5_init, | ||
1325 | .auth_type = AUTH_TYPE_MD5, | ||
1326 | .hmac_type = AUTH_TYPE_HMAC_MD5, | ||
1327 | .hw_op_hashsz = MD5_DIGEST_SIZE, | ||
1328 | .digest_size = MD5_DIGEST_SIZE, | ||
1329 | .block_size = MD5_HMAC_BLOCK_SIZE }, | ||
1330 | { .name = "sha1", | ||
1331 | .hash_zero = sha1_zero, | ||
1332 | .hash_init = sha1_init, | ||
1333 | .auth_type = AUTH_TYPE_SHA1, | ||
1334 | .hmac_type = AUTH_TYPE_HMAC_SHA1, | ||
1335 | .hw_op_hashsz = SHA1_DIGEST_SIZE, | ||
1336 | .digest_size = SHA1_DIGEST_SIZE, | ||
1337 | .block_size = SHA1_BLOCK_SIZE }, | ||
1338 | { .name = "sha256", | ||
1339 | .hash_zero = sha256_zero, | ||
1340 | .hash_init = sha256_init, | ||
1341 | .auth_type = AUTH_TYPE_SHA256, | ||
1342 | .hmac_type = AUTH_TYPE_HMAC_SHA256, | ||
1343 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | ||
1344 | .digest_size = SHA256_DIGEST_SIZE, | ||
1345 | .block_size = SHA256_BLOCK_SIZE }, | ||
1346 | { .name = "sha224", | ||
1347 | .hash_zero = sha224_zero, | ||
1348 | .hash_init = sha224_init, | ||
1349 | .auth_type = AUTH_TYPE_SHA256, | ||
1350 | .hmac_type = AUTH_TYPE_RESERVED, | ||
1351 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | ||
1352 | .digest_size = SHA224_DIGEST_SIZE, | ||
1353 | .block_size = SHA224_BLOCK_SIZE }, | ||
1354 | }; | ||
1355 | #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) | ||
1356 | |||
1357 | static LIST_HEAD(ahash_algs); | ||
1358 | static LIST_HEAD(hmac_algs); | ||
1359 | |||
1360 | static int algs_registered; | ||
1361 | |||
1362 | static void __n2_unregister_algs(void) | ||
1363 | { | ||
1364 | struct n2_cipher_alg *cipher, *cipher_tmp; | ||
1365 | struct n2_ahash_alg *alg, *alg_tmp; | ||
1366 | struct n2_hmac_alg *hmac, *hmac_tmp; | ||
1367 | |||
1368 | list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { | ||
1369 | crypto_unregister_alg(&cipher->alg); | ||
1370 | list_del(&cipher->entry); | ||
1371 | kfree(cipher); | ||
1372 | } | ||
1373 | list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { | ||
1374 | crypto_unregister_ahash(&hmac->derived.alg); | ||
1375 | list_del(&hmac->derived.entry); | ||
1376 | kfree(hmac); | ||
1377 | } | ||
1378 | list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { | ||
1379 | crypto_unregister_ahash(&alg->alg); | ||
1380 | list_del(&alg->entry); | ||
1381 | kfree(alg); | ||
1382 | } | ||
1383 | } | ||
1384 | |||
1385 | static int n2_cipher_cra_init(struct crypto_tfm *tfm) | ||
1386 | { | ||
1387 | tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); | ||
1388 | return 0; | ||
1389 | } | ||
1390 | |||
1391 | static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) | ||
1392 | { | ||
1393 | struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
1394 | struct crypto_alg *alg; | ||
1395 | int err; | ||
1396 | |||
1397 | if (!p) | ||
1398 | return -ENOMEM; | ||
1399 | |||
1400 | alg = &p->alg; | ||
1401 | |||
1402 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); | ||
1403 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); | ||
1404 | alg->cra_priority = N2_CRA_PRIORITY; | ||
1405 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | ||
1406 | alg->cra_blocksize = tmpl->block_size; | ||
1407 | p->enc_type = tmpl->enc_type; | ||
1408 | alg->cra_ctxsize = sizeof(struct n2_cipher_context); | ||
1409 | alg->cra_type = &crypto_ablkcipher_type; | ||
1410 | alg->cra_u.ablkcipher = tmpl->ablkcipher; | ||
1411 | alg->cra_init = n2_cipher_cra_init; | ||
1412 | alg->cra_module = THIS_MODULE; | ||
1413 | |||
1414 | list_add(&p->entry, &cipher_algs); | ||
1415 | err = crypto_register_alg(alg); | ||
1416 | if (err) { | ||
1417 | pr_err("%s alg registration failed\n", alg->cra_name); | ||
1418 | list_del(&p->entry); | ||
1419 | kfree(p); | ||
1420 | } else { | ||
1421 | pr_info("%s alg registered\n", alg->cra_name); | ||
1422 | } | ||
1423 | return err; | ||
1424 | } | ||
1425 | |||
1426 | static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) | ||
1427 | { | ||
1428 | struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
1429 | struct ahash_alg *ahash; | ||
1430 | struct crypto_alg *base; | ||
1431 | int err; | ||
1432 | |||
1433 | if (!p) | ||
1434 | return -ENOMEM; | ||
1435 | |||
1436 | p->child_alg = n2ahash->alg.halg.base.cra_name; | ||
1437 | memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); | ||
1438 | INIT_LIST_HEAD(&p->derived.entry); | ||
1439 | |||
1440 | ahash = &p->derived.alg; | ||
1441 | ahash->digest = n2_hmac_async_digest; | ||
1442 | ahash->setkey = n2_hmac_async_setkey; | ||
1443 | |||
1444 | base = &ahash->halg.base; | ||
1445 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); | ||
1446 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); | ||
1447 | |||
1448 | base->cra_ctxsize = sizeof(struct n2_hmac_ctx); | ||
1449 | base->cra_init = n2_hmac_cra_init; | ||
1450 | base->cra_exit = n2_hmac_cra_exit; | ||
1451 | |||
1452 | list_add(&p->derived.entry, &hmac_algs); | ||
1453 | err = crypto_register_ahash(ahash); | ||
1454 | if (err) { | ||
1455 | pr_err("%s alg registration failed\n", base->cra_name); | ||
1456 | list_del(&p->derived.entry); | ||
1457 | kfree(p); | ||
1458 | } else { | ||
1459 | pr_info("%s alg registered\n", base->cra_name); | ||
1460 | } | ||
1461 | return err; | ||
1462 | } | ||
1463 | |||
1464 | static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) | ||
1465 | { | ||
1466 | struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
1467 | struct hash_alg_common *halg; | ||
1468 | struct crypto_alg *base; | ||
1469 | struct ahash_alg *ahash; | ||
1470 | int err; | ||
1471 | |||
1472 | if (!p) | ||
1473 | return -ENOMEM; | ||
1474 | |||
1475 | p->hash_zero = tmpl->hash_zero; | ||
1476 | p->hash_init = tmpl->hash_init; | ||
1477 | p->auth_type = tmpl->auth_type; | ||
1478 | p->hmac_type = tmpl->hmac_type; | ||
1479 | p->hw_op_hashsz = tmpl->hw_op_hashsz; | ||
1480 | p->digest_size = tmpl->digest_size; | ||
1481 | |||
1482 | ahash = &p->alg; | ||
1483 | ahash->init = n2_hash_async_init; | ||
1484 | ahash->update = n2_hash_async_update; | ||
1485 | ahash->final = n2_hash_async_final; | ||
1486 | ahash->finup = n2_hash_async_finup; | ||
1487 | ahash->digest = n2_hash_async_digest; | ||
1488 | |||
1489 | halg = &ahash->halg; | ||
1490 | halg->digestsize = tmpl->digest_size; | ||
1491 | |||
1492 | base = &halg->base; | ||
1493 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); | ||
1494 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); | ||
1495 | base->cra_priority = N2_CRA_PRIORITY; | ||
1496 | base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK; | ||
1497 | base->cra_blocksize = tmpl->block_size; | ||
1498 | base->cra_ctxsize = sizeof(struct n2_hash_ctx); | ||
1499 | base->cra_module = THIS_MODULE; | ||
1500 | base->cra_init = n2_hash_cra_init; | ||
1501 | base->cra_exit = n2_hash_cra_exit; | ||
1502 | |||
1503 | list_add(&p->entry, &ahash_algs); | ||
1504 | err = crypto_register_ahash(ahash); | ||
1505 | if (err) { | ||
1506 | pr_err("%s alg registration failed\n", base->cra_name); | ||
1507 | list_del(&p->entry); | ||
1508 | kfree(p); | ||
1509 | } else { | ||
1510 | pr_info("%s alg registered\n", base->cra_name); | ||
1511 | } | ||
1512 | if (!err && p->hmac_type != AUTH_TYPE_RESERVED) | ||
1513 | err = __n2_register_one_hmac(p); | ||
1514 | return err; | ||
1515 | } | ||
1516 | |||
1517 | static int __devinit n2_register_algs(void) | ||
1518 | { | ||
1519 | int i, err = 0; | ||
1520 | |||
1521 | mutex_lock(&spu_lock); | ||
1522 | if (algs_registered++) | ||
1523 | goto out; | ||
1524 | |||
1525 | for (i = 0; i < NUM_HASH_TMPLS; i++) { | ||
1526 | err = __n2_register_one_ahash(&hash_tmpls[i]); | ||
1527 | if (err) { | ||
1528 | __n2_unregister_algs(); | ||
1529 | goto out; | ||
1530 | } | ||
1531 | } | ||
1532 | for (i = 0; i < NUM_CIPHER_TMPLS; i++) { | ||
1533 | err = __n2_register_one_cipher(&cipher_tmpls[i]); | ||
1534 | if (err) { | ||
1535 | __n2_unregister_algs(); | ||
1536 | goto out; | ||
1537 | } | ||
1538 | } | ||
1539 | |||
1540 | out: | ||
1541 | mutex_unlock(&spu_lock); | ||
1542 | return err; | ||
1543 | } | ||
1544 | |||
1545 | static void __exit n2_unregister_algs(void) | ||
1546 | { | ||
1547 | mutex_lock(&spu_lock); | ||
1548 | if (!--algs_registered) | ||
1549 | __n2_unregister_algs(); | ||
1550 | mutex_unlock(&spu_lock); | ||
1551 | } | ||
1552 | |||
1553 | /* To map CWQ queues to interrupt sources, the hypervisor API provides | ||
1554 | * a devino. This isn't very useful to us because all of the | ||
1555 | * interrupts listed in the of_device node have been translated to | ||
1556 | * Linux virtual IRQ cookie numbers. | ||
1557 | * | ||
1558 | * So we have to back-translate, going through the 'intr' and 'ino' | ||
1559 | * property tables of the n2cp MDESC node, matching it with the OF | ||
1560 | * 'interrupts' property entries, in order to to figure out which | ||
1561 | * devino goes to which already-translated IRQ. | ||
1562 | */ | ||
1563 | static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip, | ||
1564 | unsigned long dev_ino) | ||
1565 | { | ||
1566 | const unsigned int *dev_intrs; | ||
1567 | unsigned int intr; | ||
1568 | int i; | ||
1569 | |||
1570 | for (i = 0; i < ip->num_intrs; i++) { | ||
1571 | if (ip->ino_table[i].ino == dev_ino) | ||
1572 | break; | ||
1573 | } | ||
1574 | if (i == ip->num_intrs) | ||
1575 | return -ENODEV; | ||
1576 | |||
1577 | intr = ip->ino_table[i].intr; | ||
1578 | |||
1579 | dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); | ||
1580 | if (!dev_intrs) | ||
1581 | return -ENODEV; | ||
1582 | |||
1583 | for (i = 0; i < dev->num_irqs; i++) { | ||
1584 | if (dev_intrs[i] == intr) | ||
1585 | return i; | ||
1586 | } | ||
1587 | |||
1588 | return -ENODEV; | ||
1589 | } | ||
1590 | |||
1591 | static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip, | ||
1592 | const char *irq_name, struct spu_queue *p, | ||
1593 | irq_handler_t handler) | ||
1594 | { | ||
1595 | unsigned long herr; | ||
1596 | int index; | ||
1597 | |||
1598 | herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); | ||
1599 | if (herr) | ||
1600 | return -EINVAL; | ||
1601 | |||
1602 | index = find_devino_index(dev, ip, p->devino); | ||
1603 | if (index < 0) | ||
1604 | return index; | ||
1605 | |||
1606 | p->irq = dev->irqs[index]; | ||
1607 | |||
1608 | sprintf(p->irq_name, "%s-%d", irq_name, index); | ||
1609 | |||
1610 | return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM, | ||
1611 | p->irq_name, p); | ||
1612 | } | ||
1613 | |||
1614 | static struct kmem_cache *queue_cache[2]; | ||
1615 | |||
1616 | static void *new_queue(unsigned long q_type) | ||
1617 | { | ||
1618 | return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); | ||
1619 | } | ||
1620 | |||
1621 | static void free_queue(void *p, unsigned long q_type) | ||
1622 | { | ||
1623 | return kmem_cache_free(queue_cache[q_type - 1], p); | ||
1624 | } | ||
1625 | |||
1626 | static int queue_cache_init(void) | ||
1627 | { | ||
1628 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | ||
1629 | queue_cache[HV_NCS_QTYPE_MAU - 1] = | ||
1630 | kmem_cache_create("mau_queue", | ||
1631 | (MAU_NUM_ENTRIES * | ||
1632 | MAU_ENTRY_SIZE), | ||
1633 | MAU_ENTRY_SIZE, 0, NULL); | ||
1634 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | ||
1635 | return -ENOMEM; | ||
1636 | |||
1637 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) | ||
1638 | queue_cache[HV_NCS_QTYPE_CWQ - 1] = | ||
1639 | kmem_cache_create("cwq_queue", | ||
1640 | (CWQ_NUM_ENTRIES * | ||
1641 | CWQ_ENTRY_SIZE), | ||
1642 | CWQ_ENTRY_SIZE, 0, NULL); | ||
1643 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { | ||
1644 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | ||
1645 | return -ENOMEM; | ||
1646 | } | ||
1647 | return 0; | ||
1648 | } | ||
1649 | |||
1650 | static void queue_cache_destroy(void) | ||
1651 | { | ||
1652 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | ||
1653 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); | ||
1654 | } | ||
1655 | |||
1656 | static int spu_queue_register(struct spu_queue *p, unsigned long q_type) | ||
1657 | { | ||
1658 | cpumask_var_t old_allowed; | ||
1659 | unsigned long hv_ret; | ||
1660 | |||
1661 | if (cpumask_empty(&p->sharing)) | ||
1662 | return -EINVAL; | ||
1663 | |||
1664 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) | ||
1665 | return -ENOMEM; | ||
1666 | |||
1667 | cpumask_copy(old_allowed, ¤t->cpus_allowed); | ||
1668 | |||
1669 | set_cpus_allowed_ptr(current, &p->sharing); | ||
1670 | |||
1671 | hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), | ||
1672 | CWQ_NUM_ENTRIES, &p->qhandle); | ||
1673 | if (!hv_ret) | ||
1674 | sun4v_ncs_sethead_marker(p->qhandle, 0); | ||
1675 | |||
1676 | set_cpus_allowed_ptr(current, old_allowed); | ||
1677 | |||
1678 | free_cpumask_var(old_allowed); | ||
1679 | |||
1680 | return (hv_ret ? -EINVAL : 0); | ||
1681 | } | ||
1682 | |||
1683 | static int spu_queue_setup(struct spu_queue *p) | ||
1684 | { | ||
1685 | int err; | ||
1686 | |||
1687 | p->q = new_queue(p->q_type); | ||
1688 | if (!p->q) | ||
1689 | return -ENOMEM; | ||
1690 | |||
1691 | err = spu_queue_register(p, p->q_type); | ||
1692 | if (err) { | ||
1693 | free_queue(p->q, p->q_type); | ||
1694 | p->q = NULL; | ||
1695 | } | ||
1696 | |||
1697 | return err; | ||
1698 | } | ||
1699 | |||
1700 | static void spu_queue_destroy(struct spu_queue *p) | ||
1701 | { | ||
1702 | unsigned long hv_ret; | ||
1703 | |||
1704 | if (!p->q) | ||
1705 | return; | ||
1706 | |||
1707 | hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); | ||
1708 | |||
1709 | if (!hv_ret) | ||
1710 | free_queue(p->q, p->q_type); | ||
1711 | } | ||
1712 | |||
1713 | static void spu_list_destroy(struct list_head *list) | ||
1714 | { | ||
1715 | struct spu_queue *p, *n; | ||
1716 | |||
1717 | list_for_each_entry_safe(p, n, list, list) { | ||
1718 | int i; | ||
1719 | |||
1720 | for (i = 0; i < NR_CPUS; i++) { | ||
1721 | if (cpu_to_cwq[i] == p) | ||
1722 | cpu_to_cwq[i] = NULL; | ||
1723 | } | ||
1724 | |||
1725 | if (p->irq) { | ||
1726 | free_irq(p->irq, p); | ||
1727 | p->irq = 0; | ||
1728 | } | ||
1729 | spu_queue_destroy(p); | ||
1730 | list_del(&p->list); | ||
1731 | kfree(p); | ||
1732 | } | ||
1733 | } | ||
1734 | |||
1735 | /* Walk the backward arcs of a CWQ 'exec-unit' node, | ||
1736 | * gathering cpu membership information. | ||
1737 | */ | ||
1738 | static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, | ||
1739 | struct of_device *dev, | ||
1740 | u64 node, struct spu_queue *p, | ||
1741 | struct spu_queue **table) | ||
1742 | { | ||
1743 | u64 arc; | ||
1744 | |||
1745 | mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { | ||
1746 | u64 tgt = mdesc_arc_target(mdesc, arc); | ||
1747 | const char *name = mdesc_node_name(mdesc, tgt); | ||
1748 | const u64 *id; | ||
1749 | |||
1750 | if (strcmp(name, "cpu")) | ||
1751 | continue; | ||
1752 | id = mdesc_get_property(mdesc, tgt, "id", NULL); | ||
1753 | if (table[*id] != NULL) { | ||
1754 | dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", | ||
1755 | dev->dev.of_node->full_name); | ||
1756 | return -EINVAL; | ||
1757 | } | ||
1758 | cpu_set(*id, p->sharing); | ||
1759 | table[*id] = p; | ||
1760 | } | ||
1761 | return 0; | ||
1762 | } | ||
1763 | |||
1764 | /* Process an 'exec-unit' MDESC node of type 'cwq'. */ | ||
1765 | static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, | ||
1766 | struct of_device *dev, struct mdesc_handle *mdesc, | ||
1767 | u64 node, const char *iname, unsigned long q_type, | ||
1768 | irq_handler_t handler, struct spu_queue **table) | ||
1769 | { | ||
1770 | struct spu_queue *p; | ||
1771 | int err; | ||
1772 | |||
1773 | p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); | ||
1774 | if (!p) { | ||
1775 | dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", | ||
1776 | dev->dev.of_node->full_name); | ||
1777 | return -ENOMEM; | ||
1778 | } | ||
1779 | |||
1780 | cpus_clear(p->sharing); | ||
1781 | spin_lock_init(&p->lock); | ||
1782 | p->q_type = q_type; | ||
1783 | INIT_LIST_HEAD(&p->jobs); | ||
1784 | list_add(&p->list, list); | ||
1785 | |||
1786 | err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); | ||
1787 | if (err) | ||
1788 | return err; | ||
1789 | |||
1790 | err = spu_queue_setup(p); | ||
1791 | if (err) | ||
1792 | return err; | ||
1793 | |||
1794 | return spu_map_ino(dev, ip, iname, p, handler); | ||
1795 | } | ||
1796 | |||
1797 | static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev, | ||
1798 | struct spu_mdesc_info *ip, struct list_head *list, | ||
1799 | const char *exec_name, unsigned long q_type, | ||
1800 | irq_handler_t handler, struct spu_queue **table) | ||
1801 | { | ||
1802 | int err = 0; | ||
1803 | u64 node; | ||
1804 | |||
1805 | mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { | ||
1806 | const char *type; | ||
1807 | |||
1808 | type = mdesc_get_property(mdesc, node, "type", NULL); | ||
1809 | if (!type || strcmp(type, exec_name)) | ||
1810 | continue; | ||
1811 | |||
1812 | err = handle_exec_unit(ip, list, dev, mdesc, node, | ||
1813 | exec_name, q_type, handler, table); | ||
1814 | if (err) { | ||
1815 | spu_list_destroy(list); | ||
1816 | break; | ||
1817 | } | ||
1818 | } | ||
1819 | |||
1820 | return err; | ||
1821 | } | ||
1822 | |||
1823 | static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, | ||
1824 | struct spu_mdesc_info *ip) | ||
1825 | { | ||
1826 | const u64 *intr, *ino; | ||
1827 | int intr_len, ino_len; | ||
1828 | int i; | ||
1829 | |||
1830 | intr = mdesc_get_property(mdesc, node, "intr", &intr_len); | ||
1831 | if (!intr) | ||
1832 | return -ENODEV; | ||
1833 | |||
1834 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); | ||
1835 | if (!intr) | ||
1836 | return -ENODEV; | ||
1837 | |||
1838 | if (intr_len != ino_len) | ||
1839 | return -EINVAL; | ||
1840 | |||
1841 | ip->num_intrs = intr_len / sizeof(u64); | ||
1842 | ip->ino_table = kzalloc((sizeof(struct ino_blob) * | ||
1843 | ip->num_intrs), | ||
1844 | GFP_KERNEL); | ||
1845 | if (!ip->ino_table) | ||
1846 | return -ENOMEM; | ||
1847 | |||
1848 | for (i = 0; i < ip->num_intrs; i++) { | ||
1849 | struct ino_blob *b = &ip->ino_table[i]; | ||
1850 | b->intr = intr[i]; | ||
1851 | b->ino = ino[i]; | ||
1852 | } | ||
1853 | |||
1854 | return 0; | ||
1855 | } | ||
1856 | |||
1857 | static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, | ||
1858 | struct of_device *dev, | ||
1859 | struct spu_mdesc_info *ip, | ||
1860 | const char *node_name) | ||
1861 | { | ||
1862 | const unsigned int *reg; | ||
1863 | u64 node; | ||
1864 | |||
1865 | reg = of_get_property(dev->dev.of_node, "reg", NULL); | ||
1866 | if (!reg) | ||
1867 | return -ENODEV; | ||
1868 | |||
1869 | mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { | ||
1870 | const char *name; | ||
1871 | const u64 *chdl; | ||
1872 | |||
1873 | name = mdesc_get_property(mdesc, node, "name", NULL); | ||
1874 | if (!name || strcmp(name, node_name)) | ||
1875 | continue; | ||
1876 | chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); | ||
1877 | if (!chdl || (*chdl != *reg)) | ||
1878 | continue; | ||
1879 | ip->cfg_handle = *chdl; | ||
1880 | return get_irq_props(mdesc, node, ip); | ||
1881 | } | ||
1882 | |||
1883 | return -ENODEV; | ||
1884 | } | ||
1885 | |||
1886 | static unsigned long n2_spu_hvapi_major; | ||
1887 | static unsigned long n2_spu_hvapi_minor; | ||
1888 | |||
1889 | static int __devinit n2_spu_hvapi_register(void) | ||
1890 | { | ||
1891 | int err; | ||
1892 | |||
1893 | n2_spu_hvapi_major = 2; | ||
1894 | n2_spu_hvapi_minor = 0; | ||
1895 | |||
1896 | err = sun4v_hvapi_register(HV_GRP_NCS, | ||
1897 | n2_spu_hvapi_major, | ||
1898 | &n2_spu_hvapi_minor); | ||
1899 | |||
1900 | if (!err) | ||
1901 | pr_info("Registered NCS HVAPI version %lu.%lu\n", | ||
1902 | n2_spu_hvapi_major, | ||
1903 | n2_spu_hvapi_minor); | ||
1904 | |||
1905 | return err; | ||
1906 | } | ||
1907 | |||
1908 | static void n2_spu_hvapi_unregister(void) | ||
1909 | { | ||
1910 | sun4v_hvapi_unregister(HV_GRP_NCS); | ||
1911 | } | ||
1912 | |||
1913 | static int global_ref; | ||
1914 | |||
1915 | static int __devinit grab_global_resources(void) | ||
1916 | { | ||
1917 | int err = 0; | ||
1918 | |||
1919 | mutex_lock(&spu_lock); | ||
1920 | |||
1921 | if (global_ref++) | ||
1922 | goto out; | ||
1923 | |||
1924 | err = n2_spu_hvapi_register(); | ||
1925 | if (err) | ||
1926 | goto out; | ||
1927 | |||
1928 | err = queue_cache_init(); | ||
1929 | if (err) | ||
1930 | goto out_hvapi_release; | ||
1931 | |||
1932 | err = -ENOMEM; | ||
1933 | cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, | ||
1934 | GFP_KERNEL); | ||
1935 | if (!cpu_to_cwq) | ||
1936 | goto out_queue_cache_destroy; | ||
1937 | |||
1938 | cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, | ||
1939 | GFP_KERNEL); | ||
1940 | if (!cpu_to_mau) | ||
1941 | goto out_free_cwq_table; | ||
1942 | |||
1943 | err = 0; | ||
1944 | |||
1945 | out: | ||
1946 | if (err) | ||
1947 | global_ref--; | ||
1948 | mutex_unlock(&spu_lock); | ||
1949 | return err; | ||
1950 | |||
1951 | out_free_cwq_table: | ||
1952 | kfree(cpu_to_cwq); | ||
1953 | cpu_to_cwq = NULL; | ||
1954 | |||
1955 | out_queue_cache_destroy: | ||
1956 | queue_cache_destroy(); | ||
1957 | |||
1958 | out_hvapi_release: | ||
1959 | n2_spu_hvapi_unregister(); | ||
1960 | goto out; | ||
1961 | } | ||
1962 | |||
1963 | static void release_global_resources(void) | ||
1964 | { | ||
1965 | mutex_lock(&spu_lock); | ||
1966 | if (!--global_ref) { | ||
1967 | kfree(cpu_to_cwq); | ||
1968 | cpu_to_cwq = NULL; | ||
1969 | |||
1970 | kfree(cpu_to_mau); | ||
1971 | cpu_to_mau = NULL; | ||
1972 | |||
1973 | queue_cache_destroy(); | ||
1974 | n2_spu_hvapi_unregister(); | ||
1975 | } | ||
1976 | mutex_unlock(&spu_lock); | ||
1977 | } | ||
1978 | |||
1979 | static struct n2_crypto * __devinit alloc_n2cp(void) | ||
1980 | { | ||
1981 | struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); | ||
1982 | |||
1983 | if (np) | ||
1984 | INIT_LIST_HEAD(&np->cwq_list); | ||
1985 | |||
1986 | return np; | ||
1987 | } | ||
1988 | |||
1989 | static void free_n2cp(struct n2_crypto *np) | ||
1990 | { | ||
1991 | if (np->cwq_info.ino_table) { | ||
1992 | kfree(np->cwq_info.ino_table); | ||
1993 | np->cwq_info.ino_table = NULL; | ||
1994 | } | ||
1995 | |||
1996 | kfree(np); | ||
1997 | } | ||
1998 | |||
1999 | static void __devinit n2_spu_driver_version(void) | ||
2000 | { | ||
2001 | static int n2_spu_version_printed; | ||
2002 | |||
2003 | if (n2_spu_version_printed++ == 0) | ||
2004 | pr_info("%s", version); | ||
2005 | } | ||
2006 | |||
2007 | static int __devinit n2_crypto_probe(struct of_device *dev, | ||
2008 | const struct of_device_id *match) | ||
2009 | { | ||
2010 | struct mdesc_handle *mdesc; | ||
2011 | const char *full_name; | ||
2012 | struct n2_crypto *np; | ||
2013 | int err; | ||
2014 | |||
2015 | n2_spu_driver_version(); | ||
2016 | |||
2017 | full_name = dev->dev.of_node->full_name; | ||
2018 | pr_info("Found N2CP at %s\n", full_name); | ||
2019 | |||
2020 | np = alloc_n2cp(); | ||
2021 | if (!np) { | ||
2022 | dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", | ||
2023 | full_name); | ||
2024 | return -ENOMEM; | ||
2025 | } | ||
2026 | |||
2027 | err = grab_global_resources(); | ||
2028 | if (err) { | ||
2029 | dev_err(&dev->dev, "%s: Unable to grab " | ||
2030 | "global resources.\n", full_name); | ||
2031 | goto out_free_n2cp; | ||
2032 | } | ||
2033 | |||
2034 | mdesc = mdesc_grab(); | ||
2035 | |||
2036 | if (!mdesc) { | ||
2037 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", | ||
2038 | full_name); | ||
2039 | err = -ENODEV; | ||
2040 | goto out_free_global; | ||
2041 | } | ||
2042 | err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); | ||
2043 | if (err) { | ||
2044 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", | ||
2045 | full_name); | ||
2046 | mdesc_release(mdesc); | ||
2047 | goto out_free_global; | ||
2048 | } | ||
2049 | |||
2050 | err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, | ||
2051 | "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, | ||
2052 | cpu_to_cwq); | ||
2053 | mdesc_release(mdesc); | ||
2054 | |||
2055 | if (err) { | ||
2056 | dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", | ||
2057 | full_name); | ||
2058 | goto out_free_global; | ||
2059 | } | ||
2060 | |||
2061 | err = n2_register_algs(); | ||
2062 | if (err) { | ||
2063 | dev_err(&dev->dev, "%s: Unable to register algorithms.\n", | ||
2064 | full_name); | ||
2065 | goto out_free_spu_list; | ||
2066 | } | ||
2067 | |||
2068 | dev_set_drvdata(&dev->dev, np); | ||
2069 | |||
2070 | return 0; | ||
2071 | |||
2072 | out_free_spu_list: | ||
2073 | spu_list_destroy(&np->cwq_list); | ||
2074 | |||
2075 | out_free_global: | ||
2076 | release_global_resources(); | ||
2077 | |||
2078 | out_free_n2cp: | ||
2079 | free_n2cp(np); | ||
2080 | |||
2081 | return err; | ||
2082 | } | ||
2083 | |||
2084 | static int __devexit n2_crypto_remove(struct of_device *dev) | ||
2085 | { | ||
2086 | struct n2_crypto *np = dev_get_drvdata(&dev->dev); | ||
2087 | |||
2088 | n2_unregister_algs(); | ||
2089 | |||
2090 | spu_list_destroy(&np->cwq_list); | ||
2091 | |||
2092 | release_global_resources(); | ||
2093 | |||
2094 | free_n2cp(np); | ||
2095 | |||
2096 | return 0; | ||
2097 | } | ||
2098 | |||
2099 | static struct n2_mau * __devinit alloc_ncp(void) | ||
2100 | { | ||
2101 | struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); | ||
2102 | |||
2103 | if (mp) | ||
2104 | INIT_LIST_HEAD(&mp->mau_list); | ||
2105 | |||
2106 | return mp; | ||
2107 | } | ||
2108 | |||
2109 | static void free_ncp(struct n2_mau *mp) | ||
2110 | { | ||
2111 | if (mp->mau_info.ino_table) { | ||
2112 | kfree(mp->mau_info.ino_table); | ||
2113 | mp->mau_info.ino_table = NULL; | ||
2114 | } | ||
2115 | |||
2116 | kfree(mp); | ||
2117 | } | ||
2118 | |||
2119 | static int __devinit n2_mau_probe(struct of_device *dev, | ||
2120 | const struct of_device_id *match) | ||
2121 | { | ||
2122 | struct mdesc_handle *mdesc; | ||
2123 | const char *full_name; | ||
2124 | struct n2_mau *mp; | ||
2125 | int err; | ||
2126 | |||
2127 | n2_spu_driver_version(); | ||
2128 | |||
2129 | full_name = dev->dev.of_node->full_name; | ||
2130 | pr_info("Found NCP at %s\n", full_name); | ||
2131 | |||
2132 | mp = alloc_ncp(); | ||
2133 | if (!mp) { | ||
2134 | dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", | ||
2135 | full_name); | ||
2136 | return -ENOMEM; | ||
2137 | } | ||
2138 | |||
2139 | err = grab_global_resources(); | ||
2140 | if (err) { | ||
2141 | dev_err(&dev->dev, "%s: Unable to grab " | ||
2142 | "global resources.\n", full_name); | ||
2143 | goto out_free_ncp; | ||
2144 | } | ||
2145 | |||
2146 | mdesc = mdesc_grab(); | ||
2147 | |||
2148 | if (!mdesc) { | ||
2149 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", | ||
2150 | full_name); | ||
2151 | err = -ENODEV; | ||
2152 | goto out_free_global; | ||
2153 | } | ||
2154 | |||
2155 | err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); | ||
2156 | if (err) { | ||
2157 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", | ||
2158 | full_name); | ||
2159 | mdesc_release(mdesc); | ||
2160 | goto out_free_global; | ||
2161 | } | ||
2162 | |||
2163 | err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, | ||
2164 | "mau", HV_NCS_QTYPE_MAU, mau_intr, | ||
2165 | cpu_to_mau); | ||
2166 | mdesc_release(mdesc); | ||
2167 | |||
2168 | if (err) { | ||
2169 | dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", | ||
2170 | full_name); | ||
2171 | goto out_free_global; | ||
2172 | } | ||
2173 | |||
2174 | dev_set_drvdata(&dev->dev, mp); | ||
2175 | |||
2176 | return 0; | ||
2177 | |||
2178 | out_free_global: | ||
2179 | release_global_resources(); | ||
2180 | |||
2181 | out_free_ncp: | ||
2182 | free_ncp(mp); | ||
2183 | |||
2184 | return err; | ||
2185 | } | ||
2186 | |||
2187 | static int __devexit n2_mau_remove(struct of_device *dev) | ||
2188 | { | ||
2189 | struct n2_mau *mp = dev_get_drvdata(&dev->dev); | ||
2190 | |||
2191 | spu_list_destroy(&mp->mau_list); | ||
2192 | |||
2193 | release_global_resources(); | ||
2194 | |||
2195 | free_ncp(mp); | ||
2196 | |||
2197 | return 0; | ||
2198 | } | ||
2199 | |||
2200 | static struct of_device_id n2_crypto_match[] = { | ||
2201 | { | ||
2202 | .name = "n2cp", | ||
2203 | .compatible = "SUNW,n2-cwq", | ||
2204 | }, | ||
2205 | { | ||
2206 | .name = "n2cp", | ||
2207 | .compatible = "SUNW,vf-cwq", | ||
2208 | }, | ||
2209 | {}, | ||
2210 | }; | ||
2211 | |||
2212 | MODULE_DEVICE_TABLE(of, n2_crypto_match); | ||
2213 | |||
2214 | static struct of_platform_driver n2_crypto_driver = { | ||
2215 | .driver = { | ||
2216 | .name = "n2cp", | ||
2217 | .owner = THIS_MODULE, | ||
2218 | .of_match_table = n2_crypto_match, | ||
2219 | }, | ||
2220 | .probe = n2_crypto_probe, | ||
2221 | .remove = __devexit_p(n2_crypto_remove), | ||
2222 | }; | ||
2223 | |||
2224 | static struct of_device_id n2_mau_match[] = { | ||
2225 | { | ||
2226 | .name = "ncp", | ||
2227 | .compatible = "SUNW,n2-mau", | ||
2228 | }, | ||
2229 | { | ||
2230 | .name = "ncp", | ||
2231 | .compatible = "SUNW,vf-mau", | ||
2232 | }, | ||
2233 | {}, | ||
2234 | }; | ||
2235 | |||
2236 | MODULE_DEVICE_TABLE(of, n2_mau_match); | ||
2237 | |||
2238 | static struct of_platform_driver n2_mau_driver = { | ||
2239 | .driver = { | ||
2240 | .name = "ncp", | ||
2241 | .owner = THIS_MODULE, | ||
2242 | .of_match_table = n2_mau_match, | ||
2243 | }, | ||
2244 | .probe = n2_mau_probe, | ||
2245 | .remove = __devexit_p(n2_mau_remove), | ||
2246 | }; | ||
2247 | |||
2248 | static int __init n2_init(void) | ||
2249 | { | ||
2250 | int err = of_register_platform_driver(&n2_crypto_driver); | ||
2251 | |||
2252 | if (!err) { | ||
2253 | err = of_register_platform_driver(&n2_mau_driver); | ||
2254 | if (err) | ||
2255 | of_unregister_platform_driver(&n2_crypto_driver); | ||
2256 | } | ||
2257 | return err; | ||
2258 | } | ||
2259 | |||
2260 | static void __exit n2_exit(void) | ||
2261 | { | ||
2262 | of_unregister_platform_driver(&n2_mau_driver); | ||
2263 | of_unregister_platform_driver(&n2_crypto_driver); | ||
2264 | } | ||
2265 | |||
2266 | module_init(n2_init); | ||
2267 | module_exit(n2_exit); | ||
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h new file mode 100644 index 000000000000..4bcbbeae98f5 --- /dev/null +++ b/drivers/crypto/n2_core.h | |||
@@ -0,0 +1,231 @@ | |||
1 | #ifndef _N2_CORE_H | ||
2 | #define _N2_CORE_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | struct ino_blob { | ||
7 | u64 intr; | ||
8 | u64 ino; | ||
9 | }; | ||
10 | |||
11 | struct spu_mdesc_info { | ||
12 | u64 cfg_handle; | ||
13 | struct ino_blob *ino_table; | ||
14 | int num_intrs; | ||
15 | }; | ||
16 | |||
17 | struct n2_crypto { | ||
18 | struct spu_mdesc_info cwq_info; | ||
19 | struct list_head cwq_list; | ||
20 | }; | ||
21 | |||
22 | struct n2_mau { | ||
23 | struct spu_mdesc_info mau_info; | ||
24 | struct list_head mau_list; | ||
25 | }; | ||
26 | |||
27 | #define CWQ_ENTRY_SIZE 64 | ||
28 | #define CWQ_NUM_ENTRIES 64 | ||
29 | |||
30 | #define MAU_ENTRY_SIZE 64 | ||
31 | #define MAU_NUM_ENTRIES 64 | ||
32 | |||
33 | struct cwq_initial_entry { | ||
34 | u64 control; | ||
35 | u64 src_addr; | ||
36 | u64 auth_key_addr; | ||
37 | u64 auth_iv_addr; | ||
38 | u64 final_auth_state_addr; | ||
39 | u64 enc_key_addr; | ||
40 | u64 enc_iv_addr; | ||
41 | u64 dest_addr; | ||
42 | }; | ||
43 | |||
44 | struct cwq_ext_entry { | ||
45 | u64 len; | ||
46 | u64 src_addr; | ||
47 | u64 resv1; | ||
48 | u64 resv2; | ||
49 | u64 resv3; | ||
50 | u64 resv4; | ||
51 | u64 resv5; | ||
52 | u64 resv6; | ||
53 | }; | ||
54 | |||
55 | struct cwq_final_entry { | ||
56 | u64 control; | ||
57 | u64 src_addr; | ||
58 | u64 resv1; | ||
59 | u64 resv2; | ||
60 | u64 resv3; | ||
61 | u64 resv4; | ||
62 | u64 resv5; | ||
63 | u64 resv6; | ||
64 | }; | ||
65 | |||
66 | #define CONTROL_LEN 0x000000000000ffffULL | ||
67 | #define CONTROL_LEN_SHIFT 0 | ||
68 | #define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL | ||
69 | #define CONTROL_HMAC_KEY_LEN_SHIFT 16 | ||
70 | #define CONTROL_ENC_TYPE 0x00000000ff000000ULL | ||
71 | #define CONTROL_ENC_TYPE_SHIFT 24 | ||
72 | #define ENC_TYPE_ALG_RC4_STREAM 0x00ULL | ||
73 | #define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL | ||
74 | #define ENC_TYPE_ALG_DES 0x08ULL | ||
75 | #define ENC_TYPE_ALG_3DES 0x0cULL | ||
76 | #define ENC_TYPE_ALG_AES128 0x10ULL | ||
77 | #define ENC_TYPE_ALG_AES192 0x14ULL | ||
78 | #define ENC_TYPE_ALG_AES256 0x18ULL | ||
79 | #define ENC_TYPE_ALG_RESERVED 0x1cULL | ||
80 | #define ENC_TYPE_ALG_MASK 0x1cULL | ||
81 | #define ENC_TYPE_CHAINING_ECB 0x00ULL | ||
82 | #define ENC_TYPE_CHAINING_CBC 0x01ULL | ||
83 | #define ENC_TYPE_CHAINING_CFB 0x02ULL | ||
84 | #define ENC_TYPE_CHAINING_COUNTER 0x03ULL | ||
85 | #define ENC_TYPE_CHAINING_MASK 0x03ULL | ||
86 | #define CONTROL_AUTH_TYPE 0x0000001f00000000ULL | ||
87 | #define CONTROL_AUTH_TYPE_SHIFT 32 | ||
88 | #define AUTH_TYPE_RESERVED 0x00ULL | ||
89 | #define AUTH_TYPE_MD5 0x01ULL | ||
90 | #define AUTH_TYPE_SHA1 0x02ULL | ||
91 | #define AUTH_TYPE_SHA256 0x03ULL | ||
92 | #define AUTH_TYPE_CRC32 0x04ULL | ||
93 | #define AUTH_TYPE_HMAC_MD5 0x05ULL | ||
94 | #define AUTH_TYPE_HMAC_SHA1 0x06ULL | ||
95 | #define AUTH_TYPE_HMAC_SHA256 0x07ULL | ||
96 | #define AUTH_TYPE_TCP_CHECKSUM 0x08ULL | ||
97 | #define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL | ||
98 | #define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL | ||
99 | #define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL | ||
100 | #define CONTROL_STRAND 0x000000e000000000ULL | ||
101 | #define CONTROL_STRAND_SHIFT 37 | ||
102 | #define CONTROL_HASH_LEN 0x0000ff0000000000ULL | ||
103 | #define CONTROL_HASH_LEN_SHIFT 40 | ||
104 | #define CONTROL_INTERRUPT 0x0001000000000000ULL | ||
105 | #define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL | ||
106 | #define CONTROL_RESERVED 0x001c000000000000ULL | ||
107 | #define CONTROL_HV_DONE 0x0004000000000000ULL | ||
108 | #define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL | ||
109 | #define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL | ||
110 | #define CONTROL_END_OF_BLOCK 0x0020000000000000ULL | ||
111 | #define CONTROL_START_OF_BLOCK 0x0040000000000000ULL | ||
112 | #define CONTROL_ENCRYPT 0x0080000000000000ULL | ||
113 | #define CONTROL_OPCODE 0xff00000000000000ULL | ||
114 | #define CONTROL_OPCODE_SHIFT 56 | ||
115 | #define OPCODE_INPLACE_BIT 0x80ULL | ||
116 | #define OPCODE_SSL_KEYBLOCK 0x10ULL | ||
117 | #define OPCODE_COPY 0x20ULL | ||
118 | #define OPCODE_ENCRYPT 0x40ULL | ||
119 | #define OPCODE_AUTH_MAC 0x41ULL | ||
120 | |||
121 | #endif /* !(__ASSEMBLY__) */ | ||
122 | |||
123 | /* NCS v2.0 hypervisor interfaces */ | ||
124 | #define HV_NCS_QTYPE_MAU 0x01 | ||
125 | #define HV_NCS_QTYPE_CWQ 0x02 | ||
126 | |||
127 | /* ncs_qconf() | ||
128 | * TRAP: HV_FAST_TRAP | ||
129 | * FUNCTION: HV_FAST_NCS_QCONF | ||
130 | * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ}) | ||
131 | * ARG1: Real address of queue, or handle for unconfigure | ||
132 | * ARG2: Number of entries in queue, zero for unconfigure | ||
133 | * RET0: status | ||
134 | * RET1: queue handle | ||
135 | * | ||
136 | * Configure a queue in the stream processing unit. | ||
137 | * | ||
138 | * The real address given as the base must be 64-byte | ||
139 | * aligned. | ||
140 | * | ||
141 | * The queue size can range from a minimum of 2 to a maximum | ||
142 | * of 64. The queue size must be a power of two. | ||
143 | * | ||
144 | * To unconfigure a queue, specify a length of zero and place | ||
145 | * the queue handle into ARG1. | ||
146 | * | ||
147 | * On configure success the hypervisor will set the FIRST, HEAD, | ||
148 | * and TAIL registers to the address of the first entry in the | ||
149 | * queue. The LAST register will be set to point to the last | ||
150 | * entry in the queue. | ||
151 | */ | ||
152 | #define HV_FAST_NCS_QCONF 0x111 | ||
153 | |||
154 | /* ncs_qinfo() | ||
155 | * TRAP: HV_FAST_TRAP | ||
156 | * FUNCTION: HV_FAST_NCS_QINFO | ||
157 | * ARG0: Queue handle | ||
158 | * RET0: status | ||
159 | * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ}) | ||
160 | * RET2: Queue base address | ||
161 | * RET3: Number of entries | ||
162 | */ | ||
163 | #define HV_FAST_NCS_QINFO 0x112 | ||
164 | |||
165 | /* ncs_gethead() | ||
166 | * TRAP: HV_FAST_TRAP | ||
167 | * FUNCTION: HV_FAST_NCS_GETHEAD | ||
168 | * ARG0: Queue handle | ||
169 | * RET0: status | ||
170 | * RET1: queue head offset | ||
171 | */ | ||
172 | #define HV_FAST_NCS_GETHEAD 0x113 | ||
173 | |||
174 | /* ncs_gettail() | ||
175 | * TRAP: HV_FAST_TRAP | ||
176 | * FUNCTION: HV_FAST_NCS_GETTAIL | ||
177 | * ARG0: Queue handle | ||
178 | * RET0: status | ||
179 | * RET1: queue tail offset | ||
180 | */ | ||
181 | #define HV_FAST_NCS_GETTAIL 0x114 | ||
182 | |||
183 | /* ncs_settail() | ||
184 | * TRAP: HV_FAST_TRAP | ||
185 | * FUNCTION: HV_FAST_NCS_SETTAIL | ||
186 | * ARG0: Queue handle | ||
187 | * ARG1: New tail offset | ||
188 | * RET0: status | ||
189 | */ | ||
190 | #define HV_FAST_NCS_SETTAIL 0x115 | ||
191 | |||
192 | /* ncs_qhandle_to_devino() | ||
193 | * TRAP: HV_FAST_TRAP | ||
194 | * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO | ||
195 | * ARG0: Queue handle | ||
196 | * RET0: status | ||
197 | * RET1: devino | ||
198 | */ | ||
199 | #define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116 | ||
200 | |||
201 | /* ncs_sethead_marker() | ||
202 | * TRAP: HV_FAST_TRAP | ||
203 | * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER | ||
204 | * ARG0: Queue handle | ||
205 | * ARG1: New head offset | ||
206 | * RET0: status | ||
207 | */ | ||
208 | #define HV_FAST_NCS_SETHEAD_MARKER 0x117 | ||
209 | |||
210 | #ifndef __ASSEMBLY__ | ||
211 | extern unsigned long sun4v_ncs_qconf(unsigned long queue_type, | ||
212 | unsigned long queue_ra, | ||
213 | unsigned long num_entries, | ||
214 | unsigned long *qhandle); | ||
215 | extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle, | ||
216 | unsigned long *queue_type, | ||
217 | unsigned long *queue_ra, | ||
218 | unsigned long *num_entries); | ||
219 | extern unsigned long sun4v_ncs_gethead(unsigned long qhandle, | ||
220 | unsigned long *head); | ||
221 | extern unsigned long sun4v_ncs_gettail(unsigned long qhandle, | ||
222 | unsigned long *tail); | ||
223 | extern unsigned long sun4v_ncs_settail(unsigned long qhandle, | ||
224 | unsigned long tail); | ||
225 | extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle, | ||
226 | unsigned long *devino); | ||
227 | extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle, | ||
228 | unsigned long head); | ||
229 | #endif /* !(__ASSEMBLY__) */ | ||
230 | |||
231 | #endif /* _N2_CORE_H */ | ||
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c new file mode 100644 index 000000000000..7d1485676886 --- /dev/null +++ b/drivers/crypto/omap-sham.c | |||
@@ -0,0 +1,1258 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for OMAP SHA1/MD5 HW acceleration. | ||
5 | * | ||
6 | * Copyright (c) 2010 Nokia Corporation | ||
7 | * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * Some ideas are from old omap-sha1-md5.c driver. | ||
14 | */ | ||
15 | |||
16 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
17 | |||
18 | #include <linux/err.h> | ||
19 | #include <linux/device.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/clk.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/scatterlist.h> | ||
30 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/crypto.h> | ||
33 | #include <linux/cryptohash.h> | ||
34 | #include <crypto/scatterwalk.h> | ||
35 | #include <crypto/algapi.h> | ||
36 | #include <crypto/sha.h> | ||
37 | #include <crypto/hash.h> | ||
38 | #include <crypto/internal/hash.h> | ||
39 | |||
40 | #include <plat/cpu.h> | ||
41 | #include <plat/dma.h> | ||
42 | #include <mach/irqs.h> | ||
43 | |||
44 | #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04)) | ||
45 | #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04)) | ||
46 | |||
47 | #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE | ||
48 | #define MD5_DIGEST_SIZE 16 | ||
49 | |||
50 | #define SHA_REG_DIGCNT 0x14 | ||
51 | |||
52 | #define SHA_REG_CTRL 0x18 | ||
53 | #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) | ||
54 | #define SHA_REG_CTRL_CLOSE_HASH (1 << 4) | ||
55 | #define SHA_REG_CTRL_ALGO_CONST (1 << 3) | ||
56 | #define SHA_REG_CTRL_ALGO (1 << 2) | ||
57 | #define SHA_REG_CTRL_INPUT_READY (1 << 1) | ||
58 | #define SHA_REG_CTRL_OUTPUT_READY (1 << 0) | ||
59 | |||
60 | #define SHA_REG_REV 0x5C | ||
61 | #define SHA_REG_REV_MAJOR 0xF0 | ||
62 | #define SHA_REG_REV_MINOR 0x0F | ||
63 | |||
64 | #define SHA_REG_MASK 0x60 | ||
65 | #define SHA_REG_MASK_DMA_EN (1 << 3) | ||
66 | #define SHA_REG_MASK_IT_EN (1 << 2) | ||
67 | #define SHA_REG_MASK_SOFTRESET (1 << 1) | ||
68 | #define SHA_REG_AUTOIDLE (1 << 0) | ||
69 | |||
70 | #define SHA_REG_SYSSTATUS 0x64 | ||
71 | #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) | ||
72 | |||
73 | #define DEFAULT_TIMEOUT_INTERVAL HZ | ||
74 | |||
75 | #define FLAGS_FIRST 0x0001 | ||
76 | #define FLAGS_FINUP 0x0002 | ||
77 | #define FLAGS_FINAL 0x0004 | ||
78 | #define FLAGS_FAST 0x0008 | ||
79 | #define FLAGS_SHA1 0x0010 | ||
80 | #define FLAGS_DMA_ACTIVE 0x0020 | ||
81 | #define FLAGS_OUTPUT_READY 0x0040 | ||
82 | #define FLAGS_CLEAN 0x0080 | ||
83 | #define FLAGS_INIT 0x0100 | ||
84 | #define FLAGS_CPU 0x0200 | ||
85 | #define FLAGS_HMAC 0x0400 | ||
86 | |||
87 | /* 3rd byte */ | ||
88 | #define FLAGS_BUSY 16 | ||
89 | |||
90 | #define OP_UPDATE 1 | ||
91 | #define OP_FINAL 2 | ||
92 | |||
93 | struct omap_sham_dev; | ||
94 | |||
95 | struct omap_sham_reqctx { | ||
96 | struct omap_sham_dev *dd; | ||
97 | unsigned long flags; | ||
98 | unsigned long op; | ||
99 | |||
100 | size_t digcnt; | ||
101 | u8 *buffer; | ||
102 | size_t bufcnt; | ||
103 | size_t buflen; | ||
104 | dma_addr_t dma_addr; | ||
105 | |||
106 | /* walk state */ | ||
107 | struct scatterlist *sg; | ||
108 | unsigned int offset; /* offset in current sg */ | ||
109 | unsigned int total; /* total request */ | ||
110 | }; | ||
111 | |||
112 | struct omap_sham_hmac_ctx { | ||
113 | struct crypto_shash *shash; | ||
114 | u8 ipad[SHA1_MD5_BLOCK_SIZE]; | ||
115 | u8 opad[SHA1_MD5_BLOCK_SIZE]; | ||
116 | }; | ||
117 | |||
118 | struct omap_sham_ctx { | ||
119 | struct omap_sham_dev *dd; | ||
120 | |||
121 | unsigned long flags; | ||
122 | |||
123 | /* fallback stuff */ | ||
124 | struct crypto_shash *fallback; | ||
125 | |||
126 | struct omap_sham_hmac_ctx base[0]; | ||
127 | }; | ||
128 | |||
129 | #define OMAP_SHAM_QUEUE_LENGTH 1 | ||
130 | |||
131 | struct omap_sham_dev { | ||
132 | struct list_head list; | ||
133 | unsigned long phys_base; | ||
134 | struct device *dev; | ||
135 | void __iomem *io_base; | ||
136 | int irq; | ||
137 | struct clk *iclk; | ||
138 | spinlock_t lock; | ||
139 | int dma; | ||
140 | int dma_lch; | ||
141 | struct tasklet_struct done_task; | ||
142 | struct tasklet_struct queue_task; | ||
143 | |||
144 | unsigned long flags; | ||
145 | struct crypto_queue queue; | ||
146 | struct ahash_request *req; | ||
147 | }; | ||
148 | |||
149 | struct omap_sham_drv { | ||
150 | struct list_head dev_list; | ||
151 | spinlock_t lock; | ||
152 | unsigned long flags; | ||
153 | }; | ||
154 | |||
155 | static struct omap_sham_drv sham = { | ||
156 | .dev_list = LIST_HEAD_INIT(sham.dev_list), | ||
157 | .lock = __SPIN_LOCK_UNLOCKED(sham.lock), | ||
158 | }; | ||
159 | |||
160 | static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset) | ||
161 | { | ||
162 | return __raw_readl(dd->io_base + offset); | ||
163 | } | ||
164 | |||
165 | static inline void omap_sham_write(struct omap_sham_dev *dd, | ||
166 | u32 offset, u32 value) | ||
167 | { | ||
168 | __raw_writel(value, dd->io_base + offset); | ||
169 | } | ||
170 | |||
171 | static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address, | ||
172 | u32 value, u32 mask) | ||
173 | { | ||
174 | u32 val; | ||
175 | |||
176 | val = omap_sham_read(dd, address); | ||
177 | val &= ~mask; | ||
178 | val |= value; | ||
179 | omap_sham_write(dd, address, val); | ||
180 | } | ||
181 | |||
182 | static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) | ||
183 | { | ||
184 | unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL; | ||
185 | |||
186 | while (!(omap_sham_read(dd, offset) & bit)) { | ||
187 | if (time_is_before_jiffies(timeout)) | ||
188 | return -ETIMEDOUT; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | static void omap_sham_copy_hash(struct ahash_request *req, int out) | ||
195 | { | ||
196 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
197 | u32 *hash = (u32 *)req->result; | ||
198 | int i; | ||
199 | |||
200 | if (likely(ctx->flags & FLAGS_SHA1)) { | ||
201 | /* SHA1 results are in big endian */ | ||
202 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | ||
203 | if (out) | ||
204 | hash[i] = be32_to_cpu(omap_sham_read(ctx->dd, | ||
205 | SHA_REG_DIGEST(i))); | ||
206 | else | ||
207 | omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), | ||
208 | cpu_to_be32(hash[i])); | ||
209 | } else { | ||
210 | /* MD5 results are in little endian */ | ||
211 | for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) | ||
212 | if (out) | ||
213 | hash[i] = le32_to_cpu(omap_sham_read(ctx->dd, | ||
214 | SHA_REG_DIGEST(i))); | ||
215 | else | ||
216 | omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), | ||
217 | cpu_to_le32(hash[i])); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | ||
222 | int final, int dma) | ||
223 | { | ||
224 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
225 | u32 val = length << 5, mask; | ||
226 | |||
227 | if (unlikely(!ctx->digcnt)) { | ||
228 | |||
229 | clk_enable(dd->iclk); | ||
230 | |||
231 | if (!(dd->flags & FLAGS_INIT)) { | ||
232 | omap_sham_write_mask(dd, SHA_REG_MASK, | ||
233 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); | ||
234 | |||
235 | if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, | ||
236 | SHA_REG_SYSSTATUS_RESETDONE)) | ||
237 | return -ETIMEDOUT; | ||
238 | |||
239 | dd->flags |= FLAGS_INIT; | ||
240 | } | ||
241 | } else { | ||
242 | omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); | ||
243 | } | ||
244 | |||
245 | omap_sham_write_mask(dd, SHA_REG_MASK, | ||
246 | SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), | ||
247 | SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); | ||
248 | /* | ||
249 | * Setting ALGO_CONST only for the first iteration | ||
250 | * and CLOSE_HASH only for the last one. | ||
251 | */ | ||
252 | if (ctx->flags & FLAGS_SHA1) | ||
253 | val |= SHA_REG_CTRL_ALGO; | ||
254 | if (!ctx->digcnt) | ||
255 | val |= SHA_REG_CTRL_ALGO_CONST; | ||
256 | if (final) | ||
257 | val |= SHA_REG_CTRL_CLOSE_HASH; | ||
258 | |||
259 | mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH | | ||
260 | SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; | ||
261 | |||
262 | omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | ||
268 | size_t length, int final) | ||
269 | { | ||
270 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
271 | int err, count, len32; | ||
272 | const u32 *buffer = (const u32 *)buf; | ||
273 | |||
274 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | ||
275 | ctx->digcnt, length, final); | ||
276 | |||
277 | err = omap_sham_write_ctrl(dd, length, final, 0); | ||
278 | if (err) | ||
279 | return err; | ||
280 | |||
281 | if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) | ||
282 | return -ETIMEDOUT; | ||
283 | |||
284 | ctx->digcnt += length; | ||
285 | |||
286 | if (final) | ||
287 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | ||
288 | |||
289 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
290 | |||
291 | for (count = 0; count < len32; count++) | ||
292 | omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]); | ||
293 | |||
294 | return -EINPROGRESS; | ||
295 | } | ||
296 | |||
297 | static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | ||
298 | size_t length, int final) | ||
299 | { | ||
300 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
301 | int err, len32; | ||
302 | |||
303 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", | ||
304 | ctx->digcnt, length, final); | ||
305 | |||
306 | /* flush cache entries related to our page */ | ||
307 | if (dma_addr == ctx->dma_addr) | ||
308 | dma_sync_single_for_device(dd->dev, dma_addr, length, | ||
309 | DMA_TO_DEVICE); | ||
310 | |||
311 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
312 | |||
313 | omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, | ||
314 | 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC); | ||
315 | |||
316 | omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, | ||
317 | dma_addr, 0, 0); | ||
318 | |||
319 | err = omap_sham_write_ctrl(dd, length, final, 1); | ||
320 | if (err) | ||
321 | return err; | ||
322 | |||
323 | ctx->digcnt += length; | ||
324 | |||
325 | if (final) | ||
326 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | ||
327 | |||
328 | dd->flags |= FLAGS_DMA_ACTIVE; | ||
329 | |||
330 | omap_start_dma(dd->dma_lch); | ||
331 | |||
332 | return -EINPROGRESS; | ||
333 | } | ||
334 | |||
335 | static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, | ||
336 | const u8 *data, size_t length) | ||
337 | { | ||
338 | size_t count = min(length, ctx->buflen - ctx->bufcnt); | ||
339 | |||
340 | count = min(count, ctx->total); | ||
341 | if (count <= 0) | ||
342 | return 0; | ||
343 | memcpy(ctx->buffer + ctx->bufcnt, data, count); | ||
344 | ctx->bufcnt += count; | ||
345 | |||
346 | return count; | ||
347 | } | ||
348 | |||
349 | static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) | ||
350 | { | ||
351 | size_t count; | ||
352 | |||
353 | while (ctx->sg) { | ||
354 | count = omap_sham_append_buffer(ctx, | ||
355 | sg_virt(ctx->sg) + ctx->offset, | ||
356 | ctx->sg->length - ctx->offset); | ||
357 | if (!count) | ||
358 | break; | ||
359 | ctx->offset += count; | ||
360 | ctx->total -= count; | ||
361 | if (ctx->offset == ctx->sg->length) { | ||
362 | ctx->sg = sg_next(ctx->sg); | ||
363 | if (ctx->sg) | ||
364 | ctx->offset = 0; | ||
365 | else | ||
366 | ctx->total = 0; | ||
367 | } | ||
368 | } | ||
369 | |||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | ||
374 | { | ||
375 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
376 | unsigned int final; | ||
377 | size_t count; | ||
378 | |||
379 | if (!ctx->total) | ||
380 | return 0; | ||
381 | |||
382 | omap_sham_append_sg(ctx); | ||
383 | |||
384 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | ||
385 | |||
386 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", | ||
387 | ctx->bufcnt, ctx->digcnt, final); | ||
388 | |||
389 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { | ||
390 | count = ctx->bufcnt; | ||
391 | ctx->bufcnt = 0; | ||
392 | return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final); | ||
393 | } | ||
394 | |||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) | ||
399 | { | ||
400 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
401 | unsigned int length; | ||
402 | |||
403 | ctx->flags |= FLAGS_FAST; | ||
404 | |||
405 | length = min(ctx->total, sg_dma_len(ctx->sg)); | ||
406 | ctx->total = length; | ||
407 | |||
408 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { | ||
409 | dev_err(dd->dev, "dma_map_sg error\n"); | ||
410 | return -EINVAL; | ||
411 | } | ||
412 | |||
413 | ctx->total -= length; | ||
414 | |||
415 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); | ||
416 | } | ||
417 | |||
418 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) | ||
419 | { | ||
420 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
421 | int bufcnt; | ||
422 | |||
423 | omap_sham_append_sg(ctx); | ||
424 | bufcnt = ctx->bufcnt; | ||
425 | ctx->bufcnt = 0; | ||
426 | |||
427 | return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1); | ||
428 | } | ||
429 | |||
430 | static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | ||
431 | { | ||
432 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
433 | |||
434 | omap_stop_dma(dd->dma_lch); | ||
435 | if (ctx->flags & FLAGS_FAST) | ||
436 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | static void omap_sham_cleanup(struct ahash_request *req) | ||
442 | { | ||
443 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
444 | struct omap_sham_dev *dd = ctx->dd; | ||
445 | unsigned long flags; | ||
446 | |||
447 | spin_lock_irqsave(&dd->lock, flags); | ||
448 | if (ctx->flags & FLAGS_CLEAN) { | ||
449 | spin_unlock_irqrestore(&dd->lock, flags); | ||
450 | return; | ||
451 | } | ||
452 | ctx->flags |= FLAGS_CLEAN; | ||
453 | spin_unlock_irqrestore(&dd->lock, flags); | ||
454 | |||
455 | if (ctx->digcnt) | ||
456 | clk_disable(dd->iclk); | ||
457 | |||
458 | if (ctx->dma_addr) | ||
459 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, | ||
460 | DMA_TO_DEVICE); | ||
461 | |||
462 | if (ctx->buffer) | ||
463 | free_page((unsigned long)ctx->buffer); | ||
464 | |||
465 | dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); | ||
466 | } | ||
467 | |||
468 | static int omap_sham_init(struct ahash_request *req) | ||
469 | { | ||
470 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
471 | struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); | ||
472 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
473 | struct omap_sham_dev *dd = NULL, *tmp; | ||
474 | |||
475 | spin_lock_bh(&sham.lock); | ||
476 | if (!tctx->dd) { | ||
477 | list_for_each_entry(tmp, &sham.dev_list, list) { | ||
478 | dd = tmp; | ||
479 | break; | ||
480 | } | ||
481 | tctx->dd = dd; | ||
482 | } else { | ||
483 | dd = tctx->dd; | ||
484 | } | ||
485 | spin_unlock_bh(&sham.lock); | ||
486 | |||
487 | ctx->dd = dd; | ||
488 | |||
489 | ctx->flags = 0; | ||
490 | |||
491 | ctx->flags |= FLAGS_FIRST; | ||
492 | |||
493 | dev_dbg(dd->dev, "init: digest size: %d\n", | ||
494 | crypto_ahash_digestsize(tfm)); | ||
495 | |||
496 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | ||
497 | ctx->flags |= FLAGS_SHA1; | ||
498 | |||
499 | ctx->bufcnt = 0; | ||
500 | ctx->digcnt = 0; | ||
501 | |||
502 | ctx->buflen = PAGE_SIZE; | ||
503 | ctx->buffer = (void *)__get_free_page( | ||
504 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
505 | GFP_KERNEL : GFP_ATOMIC); | ||
506 | if (!ctx->buffer) | ||
507 | return -ENOMEM; | ||
508 | |||
509 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, | ||
510 | DMA_TO_DEVICE); | ||
511 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | ||
512 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); | ||
513 | free_page((unsigned long)ctx->buffer); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | |||
517 | if (tctx->flags & FLAGS_HMAC) { | ||
518 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
519 | |||
520 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); | ||
521 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; | ||
522 | ctx->flags |= FLAGS_HMAC; | ||
523 | } | ||
524 | |||
525 | return 0; | ||
526 | |||
527 | } | ||
528 | |||
529 | static int omap_sham_update_req(struct omap_sham_dev *dd) | ||
530 | { | ||
531 | struct ahash_request *req = dd->req; | ||
532 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
533 | int err; | ||
534 | |||
535 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", | ||
536 | ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0); | ||
537 | |||
538 | if (ctx->flags & FLAGS_CPU) | ||
539 | err = omap_sham_update_cpu(dd); | ||
540 | else if (ctx->flags & FLAGS_FAST) | ||
541 | err = omap_sham_update_dma_fast(dd); | ||
542 | else | ||
543 | err = omap_sham_update_dma_slow(dd); | ||
544 | |||
545 | /* wait for dma completion before can take more data */ | ||
546 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); | ||
547 | |||
548 | return err; | ||
549 | } | ||
550 | |||
551 | static int omap_sham_final_req(struct omap_sham_dev *dd) | ||
552 | { | ||
553 | struct ahash_request *req = dd->req; | ||
554 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
555 | int err = 0, use_dma = 1; | ||
556 | |||
557 | if (ctx->bufcnt <= 64) | ||
558 | /* faster to handle last block with cpu */ | ||
559 | use_dma = 0; | ||
560 | |||
561 | if (use_dma) | ||
562 | err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1); | ||
563 | else | ||
564 | err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); | ||
565 | |||
566 | ctx->bufcnt = 0; | ||
567 | |||
568 | if (err != -EINPROGRESS) | ||
569 | omap_sham_cleanup(req); | ||
570 | |||
571 | dev_dbg(dd->dev, "final_req: err: %d\n", err); | ||
572 | |||
573 | return err; | ||
574 | } | ||
575 | |||
576 | static int omap_sham_finish_req_hmac(struct ahash_request *req) | ||
577 | { | ||
578 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
579 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
580 | int bs = crypto_shash_blocksize(bctx->shash); | ||
581 | int ds = crypto_shash_digestsize(bctx->shash); | ||
582 | struct { | ||
583 | struct shash_desc shash; | ||
584 | char ctx[crypto_shash_descsize(bctx->shash)]; | ||
585 | } desc; | ||
586 | |||
587 | desc.shash.tfm = bctx->shash; | ||
588 | desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ | ||
589 | |||
590 | return crypto_shash_init(&desc.shash) ?: | ||
591 | crypto_shash_update(&desc.shash, bctx->opad, bs) ?: | ||
592 | crypto_shash_finup(&desc.shash, req->result, ds, req->result); | ||
593 | } | ||
594 | |||
595 | static void omap_sham_finish_req(struct ahash_request *req, int err) | ||
596 | { | ||
597 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
598 | |||
599 | if (!err) { | ||
600 | omap_sham_copy_hash(ctx->dd->req, 1); | ||
601 | if (ctx->flags & FLAGS_HMAC) | ||
602 | err = omap_sham_finish_req_hmac(req); | ||
603 | } | ||
604 | |||
605 | if (ctx->flags & FLAGS_FINAL) | ||
606 | omap_sham_cleanup(req); | ||
607 | |||
608 | clear_bit(FLAGS_BUSY, &ctx->dd->flags); | ||
609 | |||
610 | if (req->base.complete) | ||
611 | req->base.complete(&req->base, err); | ||
612 | } | ||
613 | |||
614 | static int omap_sham_handle_queue(struct omap_sham_dev *dd) | ||
615 | { | ||
616 | struct crypto_async_request *async_req, *backlog; | ||
617 | struct omap_sham_reqctx *ctx; | ||
618 | struct ahash_request *req, *prev_req; | ||
619 | unsigned long flags; | ||
620 | int err = 0; | ||
621 | |||
622 | if (test_and_set_bit(FLAGS_BUSY, &dd->flags)) | ||
623 | return 0; | ||
624 | |||
625 | spin_lock_irqsave(&dd->lock, flags); | ||
626 | backlog = crypto_get_backlog(&dd->queue); | ||
627 | async_req = crypto_dequeue_request(&dd->queue); | ||
628 | if (!async_req) | ||
629 | clear_bit(FLAGS_BUSY, &dd->flags); | ||
630 | spin_unlock_irqrestore(&dd->lock, flags); | ||
631 | |||
632 | if (!async_req) | ||
633 | return 0; | ||
634 | |||
635 | if (backlog) | ||
636 | backlog->complete(backlog, -EINPROGRESS); | ||
637 | |||
638 | req = ahash_request_cast(async_req); | ||
639 | |||
640 | prev_req = dd->req; | ||
641 | dd->req = req; | ||
642 | |||
643 | ctx = ahash_request_ctx(req); | ||
644 | |||
645 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | ||
646 | ctx->op, req->nbytes); | ||
647 | |||
648 | if (req != prev_req && ctx->digcnt) | ||
649 | /* request has changed - restore hash */ | ||
650 | omap_sham_copy_hash(req, 0); | ||
651 | |||
652 | if (ctx->op == OP_UPDATE) { | ||
653 | err = omap_sham_update_req(dd); | ||
654 | if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP)) | ||
655 | /* no final() after finup() */ | ||
656 | err = omap_sham_final_req(dd); | ||
657 | } else if (ctx->op == OP_FINAL) { | ||
658 | err = omap_sham_final_req(dd); | ||
659 | } | ||
660 | |||
661 | if (err != -EINPROGRESS) { | ||
662 | /* done_task will not finish it, so do it here */ | ||
663 | omap_sham_finish_req(req, err); | ||
664 | tasklet_schedule(&dd->queue_task); | ||
665 | } | ||
666 | |||
667 | dev_dbg(dd->dev, "exit, err: %d\n", err); | ||
668 | |||
669 | return err; | ||
670 | } | ||
671 | |||
672 | static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) | ||
673 | { | ||
674 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
675 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
676 | struct omap_sham_dev *dd = tctx->dd; | ||
677 | unsigned long flags; | ||
678 | int err; | ||
679 | |||
680 | ctx->op = op; | ||
681 | |||
682 | spin_lock_irqsave(&dd->lock, flags); | ||
683 | err = ahash_enqueue_request(&dd->queue, req); | ||
684 | spin_unlock_irqrestore(&dd->lock, flags); | ||
685 | |||
686 | omap_sham_handle_queue(dd); | ||
687 | |||
688 | return err; | ||
689 | } | ||
690 | |||
691 | static int omap_sham_update(struct ahash_request *req) | ||
692 | { | ||
693 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
694 | |||
695 | if (!req->nbytes) | ||
696 | return 0; | ||
697 | |||
698 | ctx->total = req->nbytes; | ||
699 | ctx->sg = req->src; | ||
700 | ctx->offset = 0; | ||
701 | |||
702 | if (ctx->flags & FLAGS_FINUP) { | ||
703 | if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { | ||
704 | /* | ||
705 | * OMAP HW accel works only with buffers >= 9 | ||
706 | * will switch to bypass in final() | ||
707 | * final has the same request and data | ||
708 | */ | ||
709 | omap_sham_append_sg(ctx); | ||
710 | return 0; | ||
711 | } else if (ctx->bufcnt + ctx->total <= 64) { | ||
712 | ctx->flags |= FLAGS_CPU; | ||
713 | } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) { | ||
714 | /* may be can use faster functions */ | ||
715 | int aligned = IS_ALIGNED((u32)ctx->sg->offset, | ||
716 | sizeof(u32)); | ||
717 | |||
718 | if (aligned && (ctx->flags & FLAGS_FIRST)) | ||
719 | /* digest: first and final */ | ||
720 | ctx->flags |= FLAGS_FAST; | ||
721 | |||
722 | ctx->flags &= ~FLAGS_FIRST; | ||
723 | } | ||
724 | } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { | ||
725 | /* if not finaup -> not fast */ | ||
726 | omap_sham_append_sg(ctx); | ||
727 | return 0; | ||
728 | } | ||
729 | |||
730 | return omap_sham_enqueue(req, OP_UPDATE); | ||
731 | } | ||
732 | |||
733 | static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags, | ||
734 | const u8 *data, unsigned int len, u8 *out) | ||
735 | { | ||
736 | struct { | ||
737 | struct shash_desc shash; | ||
738 | char ctx[crypto_shash_descsize(shash)]; | ||
739 | } desc; | ||
740 | |||
741 | desc.shash.tfm = shash; | ||
742 | desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
743 | |||
744 | return crypto_shash_digest(&desc.shash, data, len, out); | ||
745 | } | ||
746 | |||
747 | static int omap_sham_final_shash(struct ahash_request *req) | ||
748 | { | ||
749 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
750 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
751 | |||
752 | return omap_sham_shash_digest(tctx->fallback, req->base.flags, | ||
753 | ctx->buffer, ctx->bufcnt, req->result); | ||
754 | } | ||
755 | |||
756 | static int omap_sham_final(struct ahash_request *req) | ||
757 | { | ||
758 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
759 | int err = 0; | ||
760 | |||
761 | ctx->flags |= FLAGS_FINUP; | ||
762 | |||
763 | /* OMAP HW accel works only with buffers >= 9 */ | ||
764 | /* HMAC is always >= 9 because of ipad */ | ||
765 | if ((ctx->digcnt + ctx->bufcnt) < 9) | ||
766 | err = omap_sham_final_shash(req); | ||
767 | else if (ctx->bufcnt) | ||
768 | return omap_sham_enqueue(req, OP_FINAL); | ||
769 | |||
770 | omap_sham_cleanup(req); | ||
771 | |||
772 | return err; | ||
773 | } | ||
774 | |||
775 | static int omap_sham_finup(struct ahash_request *req) | ||
776 | { | ||
777 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
778 | int err1, err2; | ||
779 | |||
780 | ctx->flags |= FLAGS_FINUP; | ||
781 | |||
782 | err1 = omap_sham_update(req); | ||
783 | if (err1 == -EINPROGRESS) | ||
784 | return err1; | ||
785 | /* | ||
786 | * final() has to be always called to cleanup resources | ||
787 | * even if udpate() failed, except EINPROGRESS | ||
788 | */ | ||
789 | err2 = omap_sham_final(req); | ||
790 | |||
791 | return err1 ?: err2; | ||
792 | } | ||
793 | |||
794 | static int omap_sham_digest(struct ahash_request *req) | ||
795 | { | ||
796 | return omap_sham_init(req) ?: omap_sham_finup(req); | ||
797 | } | ||
798 | |||
799 | static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
800 | unsigned int keylen) | ||
801 | { | ||
802 | struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); | ||
803 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
804 | int bs = crypto_shash_blocksize(bctx->shash); | ||
805 | int ds = crypto_shash_digestsize(bctx->shash); | ||
806 | int err, i; | ||
807 | err = crypto_shash_setkey(tctx->fallback, key, keylen); | ||
808 | if (err) | ||
809 | return err; | ||
810 | |||
811 | if (keylen > bs) { | ||
812 | err = omap_sham_shash_digest(bctx->shash, | ||
813 | crypto_shash_get_flags(bctx->shash), | ||
814 | key, keylen, bctx->ipad); | ||
815 | if (err) | ||
816 | return err; | ||
817 | keylen = ds; | ||
818 | } else { | ||
819 | memcpy(bctx->ipad, key, keylen); | ||
820 | } | ||
821 | |||
822 | memset(bctx->ipad + keylen, 0, bs - keylen); | ||
823 | memcpy(bctx->opad, bctx->ipad, bs); | ||
824 | |||
825 | for (i = 0; i < bs; i++) { | ||
826 | bctx->ipad[i] ^= 0x36; | ||
827 | bctx->opad[i] ^= 0x5c; | ||
828 | } | ||
829 | |||
830 | return err; | ||
831 | } | ||
832 | |||
833 | static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | ||
834 | { | ||
835 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); | ||
836 | const char *alg_name = crypto_tfm_alg_name(tfm); | ||
837 | |||
838 | /* Allocate a fallback and abort if it failed. */ | ||
839 | tctx->fallback = crypto_alloc_shash(alg_name, 0, | ||
840 | CRYPTO_ALG_NEED_FALLBACK); | ||
841 | if (IS_ERR(tctx->fallback)) { | ||
842 | pr_err("omap-sham: fallback driver '%s' " | ||
843 | "could not be loaded.\n", alg_name); | ||
844 | return PTR_ERR(tctx->fallback); | ||
845 | } | ||
846 | |||
847 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
848 | sizeof(struct omap_sham_reqctx)); | ||
849 | |||
850 | if (alg_base) { | ||
851 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
852 | tctx->flags |= FLAGS_HMAC; | ||
853 | bctx->shash = crypto_alloc_shash(alg_base, 0, | ||
854 | CRYPTO_ALG_NEED_FALLBACK); | ||
855 | if (IS_ERR(bctx->shash)) { | ||
856 | pr_err("omap-sham: base driver '%s' " | ||
857 | "could not be loaded.\n", alg_base); | ||
858 | crypto_free_shash(tctx->fallback); | ||
859 | return PTR_ERR(bctx->shash); | ||
860 | } | ||
861 | |||
862 | } | ||
863 | |||
864 | return 0; | ||
865 | } | ||
866 | |||
867 | static int omap_sham_cra_init(struct crypto_tfm *tfm) | ||
868 | { | ||
869 | return omap_sham_cra_init_alg(tfm, NULL); | ||
870 | } | ||
871 | |||
872 | static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm) | ||
873 | { | ||
874 | return omap_sham_cra_init_alg(tfm, "sha1"); | ||
875 | } | ||
876 | |||
877 | static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) | ||
878 | { | ||
879 | return omap_sham_cra_init_alg(tfm, "md5"); | ||
880 | } | ||
881 | |||
882 | static void omap_sham_cra_exit(struct crypto_tfm *tfm) | ||
883 | { | ||
884 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); | ||
885 | |||
886 | crypto_free_shash(tctx->fallback); | ||
887 | tctx->fallback = NULL; | ||
888 | |||
889 | if (tctx->flags & FLAGS_HMAC) { | ||
890 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
891 | crypto_free_shash(bctx->shash); | ||
892 | } | ||
893 | } | ||
894 | |||
895 | static struct ahash_alg algs[] = { | ||
896 | { | ||
897 | .init = omap_sham_init, | ||
898 | .update = omap_sham_update, | ||
899 | .final = omap_sham_final, | ||
900 | .finup = omap_sham_finup, | ||
901 | .digest = omap_sham_digest, | ||
902 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
903 | .halg.base = { | ||
904 | .cra_name = "sha1", | ||
905 | .cra_driver_name = "omap-sha1", | ||
906 | .cra_priority = 100, | ||
907 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
908 | CRYPTO_ALG_ASYNC | | ||
909 | CRYPTO_ALG_NEED_FALLBACK, | ||
910 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
911 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | ||
912 | .cra_alignmask = 0, | ||
913 | .cra_module = THIS_MODULE, | ||
914 | .cra_init = omap_sham_cra_init, | ||
915 | .cra_exit = omap_sham_cra_exit, | ||
916 | } | ||
917 | }, | ||
918 | { | ||
919 | .init = omap_sham_init, | ||
920 | .update = omap_sham_update, | ||
921 | .final = omap_sham_final, | ||
922 | .finup = omap_sham_finup, | ||
923 | .digest = omap_sham_digest, | ||
924 | .halg.digestsize = MD5_DIGEST_SIZE, | ||
925 | .halg.base = { | ||
926 | .cra_name = "md5", | ||
927 | .cra_driver_name = "omap-md5", | ||
928 | .cra_priority = 100, | ||
929 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
930 | CRYPTO_ALG_ASYNC | | ||
931 | CRYPTO_ALG_NEED_FALLBACK, | ||
932 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
933 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | ||
934 | .cra_alignmask = 0, | ||
935 | .cra_module = THIS_MODULE, | ||
936 | .cra_init = omap_sham_cra_init, | ||
937 | .cra_exit = omap_sham_cra_exit, | ||
938 | } | ||
939 | }, | ||
940 | { | ||
941 | .init = omap_sham_init, | ||
942 | .update = omap_sham_update, | ||
943 | .final = omap_sham_final, | ||
944 | .finup = omap_sham_finup, | ||
945 | .digest = omap_sham_digest, | ||
946 | .setkey = omap_sham_setkey, | ||
947 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
948 | .halg.base = { | ||
949 | .cra_name = "hmac(sha1)", | ||
950 | .cra_driver_name = "omap-hmac-sha1", | ||
951 | .cra_priority = 100, | ||
952 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
953 | CRYPTO_ALG_ASYNC | | ||
954 | CRYPTO_ALG_NEED_FALLBACK, | ||
955 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
956 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | ||
957 | sizeof(struct omap_sham_hmac_ctx), | ||
958 | .cra_alignmask = 0, | ||
959 | .cra_module = THIS_MODULE, | ||
960 | .cra_init = omap_sham_cra_sha1_init, | ||
961 | .cra_exit = omap_sham_cra_exit, | ||
962 | } | ||
963 | }, | ||
964 | { | ||
965 | .init = omap_sham_init, | ||
966 | .update = omap_sham_update, | ||
967 | .final = omap_sham_final, | ||
968 | .finup = omap_sham_finup, | ||
969 | .digest = omap_sham_digest, | ||
970 | .setkey = omap_sham_setkey, | ||
971 | .halg.digestsize = MD5_DIGEST_SIZE, | ||
972 | .halg.base = { | ||
973 | .cra_name = "hmac(md5)", | ||
974 | .cra_driver_name = "omap-hmac-md5", | ||
975 | .cra_priority = 100, | ||
976 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
977 | CRYPTO_ALG_ASYNC | | ||
978 | CRYPTO_ALG_NEED_FALLBACK, | ||
979 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
980 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | ||
981 | sizeof(struct omap_sham_hmac_ctx), | ||
982 | .cra_alignmask = 0, | ||
983 | .cra_module = THIS_MODULE, | ||
984 | .cra_init = omap_sham_cra_md5_init, | ||
985 | .cra_exit = omap_sham_cra_exit, | ||
986 | } | ||
987 | } | ||
988 | }; | ||
989 | |||
990 | static void omap_sham_done_task(unsigned long data) | ||
991 | { | ||
992 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | ||
993 | struct ahash_request *req = dd->req; | ||
994 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
995 | int ready = 1; | ||
996 | |||
997 | if (ctx->flags & FLAGS_OUTPUT_READY) { | ||
998 | ctx->flags &= ~FLAGS_OUTPUT_READY; | ||
999 | ready = 1; | ||
1000 | } | ||
1001 | |||
1002 | if (dd->flags & FLAGS_DMA_ACTIVE) { | ||
1003 | dd->flags &= ~FLAGS_DMA_ACTIVE; | ||
1004 | omap_sham_update_dma_stop(dd); | ||
1005 | omap_sham_update_dma_slow(dd); | ||
1006 | } | ||
1007 | |||
1008 | if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) { | ||
1009 | dev_dbg(dd->dev, "update done\n"); | ||
1010 | /* finish curent request */ | ||
1011 | omap_sham_finish_req(req, 0); | ||
1012 | /* start new request */ | ||
1013 | omap_sham_handle_queue(dd); | ||
1014 | } | ||
1015 | } | ||
1016 | |||
1017 | static void omap_sham_queue_task(unsigned long data) | ||
1018 | { | ||
1019 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | ||
1020 | |||
1021 | omap_sham_handle_queue(dd); | ||
1022 | } | ||
1023 | |||
1024 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) | ||
1025 | { | ||
1026 | struct omap_sham_dev *dd = dev_id; | ||
1027 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
1028 | |||
1029 | if (!ctx) { | ||
1030 | dev_err(dd->dev, "unknown interrupt.\n"); | ||
1031 | return IRQ_HANDLED; | ||
1032 | } | ||
1033 | |||
1034 | if (unlikely(ctx->flags & FLAGS_FINAL)) | ||
1035 | /* final -> allow device to go to power-saving mode */ | ||
1036 | omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); | ||
1037 | |||
1038 | omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY, | ||
1039 | SHA_REG_CTRL_OUTPUT_READY); | ||
1040 | omap_sham_read(dd, SHA_REG_CTRL); | ||
1041 | |||
1042 | ctx->flags |= FLAGS_OUTPUT_READY; | ||
1043 | tasklet_schedule(&dd->done_task); | ||
1044 | |||
1045 | return IRQ_HANDLED; | ||
1046 | } | ||
1047 | |||
1048 | static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) | ||
1049 | { | ||
1050 | struct omap_sham_dev *dd = data; | ||
1051 | |||
1052 | if (likely(lch == dd->dma_lch)) | ||
1053 | tasklet_schedule(&dd->done_task); | ||
1054 | } | ||
1055 | |||
1056 | static int omap_sham_dma_init(struct omap_sham_dev *dd) | ||
1057 | { | ||
1058 | int err; | ||
1059 | |||
1060 | dd->dma_lch = -1; | ||
1061 | |||
1062 | err = omap_request_dma(dd->dma, dev_name(dd->dev), | ||
1063 | omap_sham_dma_callback, dd, &dd->dma_lch); | ||
1064 | if (err) { | ||
1065 | dev_err(dd->dev, "Unable to request DMA channel\n"); | ||
1066 | return err; | ||
1067 | } | ||
1068 | omap_set_dma_dest_params(dd->dma_lch, 0, | ||
1069 | OMAP_DMA_AMODE_CONSTANT, | ||
1070 | dd->phys_base + SHA_REG_DIN(0), 0, 16); | ||
1071 | |||
1072 | omap_set_dma_dest_burst_mode(dd->dma_lch, | ||
1073 | OMAP_DMA_DATA_BURST_16); | ||
1074 | |||
1075 | return 0; | ||
1076 | } | ||
1077 | |||
1078 | static void omap_sham_dma_cleanup(struct omap_sham_dev *dd) | ||
1079 | { | ||
1080 | if (dd->dma_lch >= 0) { | ||
1081 | omap_free_dma(dd->dma_lch); | ||
1082 | dd->dma_lch = -1; | ||
1083 | } | ||
1084 | } | ||
1085 | |||
1086 | static int __devinit omap_sham_probe(struct platform_device *pdev) | ||
1087 | { | ||
1088 | struct omap_sham_dev *dd; | ||
1089 | struct device *dev = &pdev->dev; | ||
1090 | struct resource *res; | ||
1091 | int err, i, j; | ||
1092 | |||
1093 | dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); | ||
1094 | if (dd == NULL) { | ||
1095 | dev_err(dev, "unable to alloc data struct.\n"); | ||
1096 | err = -ENOMEM; | ||
1097 | goto data_err; | ||
1098 | } | ||
1099 | dd->dev = dev; | ||
1100 | platform_set_drvdata(pdev, dd); | ||
1101 | |||
1102 | INIT_LIST_HEAD(&dd->list); | ||
1103 | spin_lock_init(&dd->lock); | ||
1104 | tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); | ||
1105 | tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd); | ||
1106 | crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); | ||
1107 | |||
1108 | dd->irq = -1; | ||
1109 | |||
1110 | /* Get the base address */ | ||
1111 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1112 | if (!res) { | ||
1113 | dev_err(dev, "no MEM resource info\n"); | ||
1114 | err = -ENODEV; | ||
1115 | goto res_err; | ||
1116 | } | ||
1117 | dd->phys_base = res->start; | ||
1118 | |||
1119 | /* Get the DMA */ | ||
1120 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1121 | if (!res) { | ||
1122 | dev_err(dev, "no DMA resource info\n"); | ||
1123 | err = -ENODEV; | ||
1124 | goto res_err; | ||
1125 | } | ||
1126 | dd->dma = res->start; | ||
1127 | |||
1128 | /* Get the IRQ */ | ||
1129 | dd->irq = platform_get_irq(pdev, 0); | ||
1130 | if (dd->irq < 0) { | ||
1131 | dev_err(dev, "no IRQ resource info\n"); | ||
1132 | err = dd->irq; | ||
1133 | goto res_err; | ||
1134 | } | ||
1135 | |||
1136 | err = request_irq(dd->irq, omap_sham_irq, | ||
1137 | IRQF_TRIGGER_LOW, dev_name(dev), dd); | ||
1138 | if (err) { | ||
1139 | dev_err(dev, "unable to request irq.\n"); | ||
1140 | goto res_err; | ||
1141 | } | ||
1142 | |||
1143 | err = omap_sham_dma_init(dd); | ||
1144 | if (err) | ||
1145 | goto dma_err; | ||
1146 | |||
1147 | /* Initializing the clock */ | ||
1148 | dd->iclk = clk_get(dev, "ick"); | ||
1149 | if (!dd->iclk) { | ||
1150 | dev_err(dev, "clock intialization failed.\n"); | ||
1151 | err = -ENODEV; | ||
1152 | goto clk_err; | ||
1153 | } | ||
1154 | |||
1155 | dd->io_base = ioremap(dd->phys_base, SZ_4K); | ||
1156 | if (!dd->io_base) { | ||
1157 | dev_err(dev, "can't ioremap\n"); | ||
1158 | err = -ENOMEM; | ||
1159 | goto io_err; | ||
1160 | } | ||
1161 | |||
1162 | clk_enable(dd->iclk); | ||
1163 | dev_info(dev, "hw accel on OMAP rev %u.%u\n", | ||
1164 | (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4, | ||
1165 | omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR); | ||
1166 | clk_disable(dd->iclk); | ||
1167 | |||
1168 | spin_lock(&sham.lock); | ||
1169 | list_add_tail(&dd->list, &sham.dev_list); | ||
1170 | spin_unlock(&sham.lock); | ||
1171 | |||
1172 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | ||
1173 | err = crypto_register_ahash(&algs[i]); | ||
1174 | if (err) | ||
1175 | goto err_algs; | ||
1176 | } | ||
1177 | |||
1178 | return 0; | ||
1179 | |||
1180 | err_algs: | ||
1181 | for (j = 0; j < i; j++) | ||
1182 | crypto_unregister_ahash(&algs[j]); | ||
1183 | iounmap(dd->io_base); | ||
1184 | io_err: | ||
1185 | clk_put(dd->iclk); | ||
1186 | clk_err: | ||
1187 | omap_sham_dma_cleanup(dd); | ||
1188 | dma_err: | ||
1189 | if (dd->irq >= 0) | ||
1190 | free_irq(dd->irq, dd); | ||
1191 | res_err: | ||
1192 | kfree(dd); | ||
1193 | dd = NULL; | ||
1194 | data_err: | ||
1195 | dev_err(dev, "initialization failed.\n"); | ||
1196 | |||
1197 | return err; | ||
1198 | } | ||
1199 | |||
1200 | static int __devexit omap_sham_remove(struct platform_device *pdev) | ||
1201 | { | ||
1202 | static struct omap_sham_dev *dd; | ||
1203 | int i; | ||
1204 | |||
1205 | dd = platform_get_drvdata(pdev); | ||
1206 | if (!dd) | ||
1207 | return -ENODEV; | ||
1208 | spin_lock(&sham.lock); | ||
1209 | list_del(&dd->list); | ||
1210 | spin_unlock(&sham.lock); | ||
1211 | for (i = 0; i < ARRAY_SIZE(algs); i++) | ||
1212 | crypto_unregister_ahash(&algs[i]); | ||
1213 | tasklet_kill(&dd->done_task); | ||
1214 | tasklet_kill(&dd->queue_task); | ||
1215 | iounmap(dd->io_base); | ||
1216 | clk_put(dd->iclk); | ||
1217 | omap_sham_dma_cleanup(dd); | ||
1218 | if (dd->irq >= 0) | ||
1219 | free_irq(dd->irq, dd); | ||
1220 | kfree(dd); | ||
1221 | dd = NULL; | ||
1222 | |||
1223 | return 0; | ||
1224 | } | ||
1225 | |||
1226 | static struct platform_driver omap_sham_driver = { | ||
1227 | .probe = omap_sham_probe, | ||
1228 | .remove = omap_sham_remove, | ||
1229 | .driver = { | ||
1230 | .name = "omap-sham", | ||
1231 | .owner = THIS_MODULE, | ||
1232 | }, | ||
1233 | }; | ||
1234 | |||
1235 | static int __init omap_sham_mod_init(void) | ||
1236 | { | ||
1237 | pr_info("loading %s driver\n", "omap-sham"); | ||
1238 | |||
1239 | if (!cpu_class_is_omap2() || | ||
1240 | omap_type() != OMAP2_DEVICE_TYPE_SEC) { | ||
1241 | pr_err("Unsupported cpu\n"); | ||
1242 | return -ENODEV; | ||
1243 | } | ||
1244 | |||
1245 | return platform_driver_register(&omap_sham_driver); | ||
1246 | } | ||
1247 | |||
1248 | static void __exit omap_sham_mod_exit(void) | ||
1249 | { | ||
1250 | platform_driver_unregister(&omap_sham_driver); | ||
1251 | } | ||
1252 | |||
1253 | module_init(omap_sham_mod_init); | ||
1254 | module_exit(omap_sham_mod_exit); | ||
1255 | |||
1256 | MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); | ||
1257 | MODULE_LICENSE("GPL v2"); | ||
1258 | MODULE_AUTHOR("Dmitry Kasatkin"); | ||
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index dc558a097311..97f4af1d8a64 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * talitos - Freescale Integrated Security Engine (SEC) device driver | 2 | * talitos - Freescale Integrated Security Engine (SEC) device driver |
3 | * | 3 | * |
4 | * Copyright (c) 2008 Freescale Semiconductor, Inc. | 4 | * Copyright (c) 2008-2010 Freescale Semiconductor, Inc. |
5 | * | 5 | * |
6 | * Scatterlist Crypto API glue code copied from files with the following: | 6 | * Scatterlist Crypto API glue code copied from files with the following: |
7 | * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> | 7 | * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> |
@@ -43,9 +43,12 @@ | |||
43 | #include <crypto/aes.h> | 43 | #include <crypto/aes.h> |
44 | #include <crypto/des.h> | 44 | #include <crypto/des.h> |
45 | #include <crypto/sha.h> | 45 | #include <crypto/sha.h> |
46 | #include <crypto/md5.h> | ||
46 | #include <crypto/aead.h> | 47 | #include <crypto/aead.h> |
47 | #include <crypto/authenc.h> | 48 | #include <crypto/authenc.h> |
48 | #include <crypto/skcipher.h> | 49 | #include <crypto/skcipher.h> |
50 | #include <crypto/hash.h> | ||
51 | #include <crypto/internal/hash.h> | ||
49 | #include <crypto/scatterwalk.h> | 52 | #include <crypto/scatterwalk.h> |
50 | 53 | ||
51 | #include "talitos.h" | 54 | #include "talitos.h" |
@@ -65,6 +68,13 @@ struct talitos_ptr { | |||
65 | __be32 ptr; /* address */ | 68 | __be32 ptr; /* address */ |
66 | }; | 69 | }; |
67 | 70 | ||
71 | static const struct talitos_ptr zero_entry = { | ||
72 | .len = 0, | ||
73 | .j_extent = 0, | ||
74 | .eptr = 0, | ||
75 | .ptr = 0 | ||
76 | }; | ||
77 | |||
68 | /* descriptor */ | 78 | /* descriptor */ |
69 | struct talitos_desc { | 79 | struct talitos_desc { |
70 | __be32 hdr; /* header high bits */ | 80 | __be32 hdr; /* header high bits */ |
@@ -146,6 +156,7 @@ struct talitos_private { | |||
146 | /* .features flag */ | 156 | /* .features flag */ |
147 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | 157 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 |
148 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | 158 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 |
159 | #define TALITOS_FTR_SHA224_HWINIT 0x00000004 | ||
149 | 160 | ||
150 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) | 161 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) |
151 | { | 162 | { |
@@ -692,7 +703,7 @@ static void talitos_unregister_rng(struct device *dev) | |||
692 | #define TALITOS_MAX_KEY_SIZE 64 | 703 | #define TALITOS_MAX_KEY_SIZE 64 |
693 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | 704 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ |
694 | 705 | ||
695 | #define MD5_DIGEST_SIZE 16 | 706 | #define MD5_BLOCK_SIZE 64 |
696 | 707 | ||
697 | struct talitos_ctx { | 708 | struct talitos_ctx { |
698 | struct device *dev; | 709 | struct device *dev; |
@@ -705,6 +716,23 @@ struct talitos_ctx { | |||
705 | unsigned int authsize; | 716 | unsigned int authsize; |
706 | }; | 717 | }; |
707 | 718 | ||
719 | #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE | ||
720 | #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 | ||
721 | |||
722 | struct talitos_ahash_req_ctx { | ||
723 | u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; | ||
724 | unsigned int hw_context_size; | ||
725 | u8 buf[HASH_MAX_BLOCK_SIZE]; | ||
726 | u8 bufnext[HASH_MAX_BLOCK_SIZE]; | ||
727 | unsigned int swinit; | ||
728 | unsigned int first; | ||
729 | unsigned int last; | ||
730 | unsigned int to_hash_later; | ||
731 | u64 nbuf; | ||
732 | struct scatterlist bufsl[2]; | ||
733 | struct scatterlist *psrc; | ||
734 | }; | ||
735 | |||
708 | static int aead_setauthsize(struct crypto_aead *authenc, | 736 | static int aead_setauthsize(struct crypto_aead *authenc, |
709 | unsigned int authsize) | 737 | unsigned int authsize) |
710 | { | 738 | { |
@@ -821,10 +849,14 @@ static void talitos_sg_unmap(struct device *dev, | |||
821 | else | 849 | else |
822 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); | 850 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); |
823 | 851 | ||
824 | if (edesc->dst_is_chained) | 852 | if (dst) { |
825 | talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE); | 853 | if (edesc->dst_is_chained) |
826 | else | 854 | talitos_unmap_sg_chain(dev, dst, |
827 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); | 855 | DMA_FROM_DEVICE); |
856 | else | ||
857 | dma_unmap_sg(dev, dst, dst_nents, | ||
858 | DMA_FROM_DEVICE); | ||
859 | } | ||
828 | } else | 860 | } else |
829 | if (edesc->src_is_chained) | 861 | if (edesc->src_is_chained) |
830 | talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); | 862 | talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); |
@@ -1114,12 +1146,71 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) | |||
1114 | return sg_nents; | 1146 | return sg_nents; |
1115 | } | 1147 | } |
1116 | 1148 | ||
1149 | /** | ||
1150 | * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer | ||
1151 | * @sgl: The SG list | ||
1152 | * @nents: Number of SG entries | ||
1153 | * @buf: Where to copy to | ||
1154 | * @buflen: The number of bytes to copy | ||
1155 | * @skip: The number of bytes to skip before copying. | ||
1156 | * Note: skip + buflen should equal SG total size. | ||
1157 | * | ||
1158 | * Returns the number of copied bytes. | ||
1159 | * | ||
1160 | **/ | ||
1161 | static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents, | ||
1162 | void *buf, size_t buflen, unsigned int skip) | ||
1163 | { | ||
1164 | unsigned int offset = 0; | ||
1165 | unsigned int boffset = 0; | ||
1166 | struct sg_mapping_iter miter; | ||
1167 | unsigned long flags; | ||
1168 | unsigned int sg_flags = SG_MITER_ATOMIC; | ||
1169 | size_t total_buffer = buflen + skip; | ||
1170 | |||
1171 | sg_flags |= SG_MITER_FROM_SG; | ||
1172 | |||
1173 | sg_miter_start(&miter, sgl, nents, sg_flags); | ||
1174 | |||
1175 | local_irq_save(flags); | ||
1176 | |||
1177 | while (sg_miter_next(&miter) && offset < total_buffer) { | ||
1178 | unsigned int len; | ||
1179 | unsigned int ignore; | ||
1180 | |||
1181 | if ((offset + miter.length) > skip) { | ||
1182 | if (offset < skip) { | ||
1183 | /* Copy part of this segment */ | ||
1184 | ignore = skip - offset; | ||
1185 | len = miter.length - ignore; | ||
1186 | if (boffset + len > buflen) | ||
1187 | len = buflen - boffset; | ||
1188 | memcpy(buf + boffset, miter.addr + ignore, len); | ||
1189 | } else { | ||
1190 | /* Copy all of this segment (up to buflen) */ | ||
1191 | len = miter.length; | ||
1192 | if (boffset + len > buflen) | ||
1193 | len = buflen - boffset; | ||
1194 | memcpy(buf + boffset, miter.addr, len); | ||
1195 | } | ||
1196 | boffset += len; | ||
1197 | } | ||
1198 | offset += miter.length; | ||
1199 | } | ||
1200 | |||
1201 | sg_miter_stop(&miter); | ||
1202 | |||
1203 | local_irq_restore(flags); | ||
1204 | return boffset; | ||
1205 | } | ||
1206 | |||
1117 | /* | 1207 | /* |
1118 | * allocate and map the extended descriptor | 1208 | * allocate and map the extended descriptor |
1119 | */ | 1209 | */ |
1120 | static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | 1210 | static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
1121 | struct scatterlist *src, | 1211 | struct scatterlist *src, |
1122 | struct scatterlist *dst, | 1212 | struct scatterlist *dst, |
1213 | int hash_result, | ||
1123 | unsigned int cryptlen, | 1214 | unsigned int cryptlen, |
1124 | unsigned int authsize, | 1215 | unsigned int authsize, |
1125 | int icv_stashing, | 1216 | int icv_stashing, |
@@ -1139,11 +1230,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1139 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); | 1230 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); |
1140 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1231 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1141 | 1232 | ||
1142 | if (dst == src) { | 1233 | if (hash_result) { |
1143 | dst_nents = src_nents; | 1234 | dst_nents = 0; |
1144 | } else { | 1235 | } else { |
1145 | dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained); | 1236 | if (dst == src) { |
1146 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | 1237 | dst_nents = src_nents; |
1238 | } else { | ||
1239 | dst_nents = sg_count(dst, cryptlen + authsize, | ||
1240 | &dst_chained); | ||
1241 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | ||
1242 | } | ||
1147 | } | 1243 | } |
1148 | 1244 | ||
1149 | /* | 1245 | /* |
@@ -1172,8 +1268,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1172 | edesc->src_is_chained = src_chained; | 1268 | edesc->src_is_chained = src_chained; |
1173 | edesc->dst_is_chained = dst_chained; | 1269 | edesc->dst_is_chained = dst_chained; |
1174 | edesc->dma_len = dma_len; | 1270 | edesc->dma_len = dma_len; |
1175 | edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], | 1271 | if (dma_len) |
1176 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1272 | edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], |
1273 | edesc->dma_len, | ||
1274 | DMA_BIDIRECTIONAL); | ||
1177 | 1275 | ||
1178 | return edesc; | 1276 | return edesc; |
1179 | } | 1277 | } |
@@ -1184,7 +1282,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, | |||
1184 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1282 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
1185 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1283 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1186 | 1284 | ||
1187 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, | 1285 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0, |
1188 | areq->cryptlen, ctx->authsize, icv_stashing, | 1286 | areq->cryptlen, ctx->authsize, icv_stashing, |
1189 | areq->base.flags); | 1287 | areq->base.flags); |
1190 | } | 1288 | } |
@@ -1441,8 +1539,8 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | |||
1441 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | 1539 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
1442 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1540 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
1443 | 1541 | ||
1444 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes, | 1542 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0, |
1445 | 0, 0, areq->base.flags); | 1543 | areq->nbytes, 0, 0, areq->base.flags); |
1446 | } | 1544 | } |
1447 | 1545 | ||
1448 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) | 1546 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) |
@@ -1478,15 +1576,332 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) | |||
1478 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); | 1576 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); |
1479 | } | 1577 | } |
1480 | 1578 | ||
1579 | static void common_nonsnoop_hash_unmap(struct device *dev, | ||
1580 | struct talitos_edesc *edesc, | ||
1581 | struct ahash_request *areq) | ||
1582 | { | ||
1583 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1584 | |||
1585 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); | ||
1586 | |||
1587 | /* When using hashctx-in, must unmap it. */ | ||
1588 | if (edesc->desc.ptr[1].len) | ||
1589 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], | ||
1590 | DMA_TO_DEVICE); | ||
1591 | |||
1592 | if (edesc->desc.ptr[2].len) | ||
1593 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], | ||
1594 | DMA_TO_DEVICE); | ||
1595 | |||
1596 | talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL); | ||
1597 | |||
1598 | if (edesc->dma_len) | ||
1599 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | ||
1600 | DMA_BIDIRECTIONAL); | ||
1601 | |||
1602 | } | ||
1603 | |||
1604 | static void ahash_done(struct device *dev, | ||
1605 | struct talitos_desc *desc, void *context, | ||
1606 | int err) | ||
1607 | { | ||
1608 | struct ahash_request *areq = context; | ||
1609 | struct talitos_edesc *edesc = | ||
1610 | container_of(desc, struct talitos_edesc, desc); | ||
1611 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1612 | |||
1613 | if (!req_ctx->last && req_ctx->to_hash_later) { | ||
1614 | /* Position any partial block for next update/final/finup */ | ||
1615 | memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); | ||
1616 | req_ctx->nbuf = req_ctx->to_hash_later; | ||
1617 | } | ||
1618 | common_nonsnoop_hash_unmap(dev, edesc, areq); | ||
1619 | |||
1620 | kfree(edesc); | ||
1621 | |||
1622 | areq->base.complete(&areq->base, err); | ||
1623 | } | ||
1624 | |||
1625 | static int common_nonsnoop_hash(struct talitos_edesc *edesc, | ||
1626 | struct ahash_request *areq, unsigned int length, | ||
1627 | void (*callback) (struct device *dev, | ||
1628 | struct talitos_desc *desc, | ||
1629 | void *context, int error)) | ||
1630 | { | ||
1631 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
1632 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1633 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1634 | struct device *dev = ctx->dev; | ||
1635 | struct talitos_desc *desc = &edesc->desc; | ||
1636 | int sg_count, ret; | ||
1637 | |||
1638 | /* first DWORD empty */ | ||
1639 | desc->ptr[0] = zero_entry; | ||
1640 | |||
1641 | /* hash context in */ | ||
1642 | if (!req_ctx->first || req_ctx->swinit) { | ||
1643 | map_single_talitos_ptr(dev, &desc->ptr[1], | ||
1644 | req_ctx->hw_context_size, | ||
1645 | (char *)req_ctx->hw_context, 0, | ||
1646 | DMA_TO_DEVICE); | ||
1647 | req_ctx->swinit = 0; | ||
1648 | } else { | ||
1649 | desc->ptr[1] = zero_entry; | ||
1650 | /* Indicate next op is not the first. */ | ||
1651 | req_ctx->first = 0; | ||
1652 | } | ||
1653 | |||
1654 | /* HMAC key */ | ||
1655 | if (ctx->keylen) | ||
1656 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, | ||
1657 | (char *)&ctx->key, 0, DMA_TO_DEVICE); | ||
1658 | else | ||
1659 | desc->ptr[2] = zero_entry; | ||
1660 | |||
1661 | /* | ||
1662 | * data in | ||
1663 | */ | ||
1664 | desc->ptr[3].len = cpu_to_be16(length); | ||
1665 | desc->ptr[3].j_extent = 0; | ||
1666 | |||
1667 | sg_count = talitos_map_sg(dev, req_ctx->psrc, | ||
1668 | edesc->src_nents ? : 1, | ||
1669 | DMA_TO_DEVICE, | ||
1670 | edesc->src_is_chained); | ||
1671 | |||
1672 | if (sg_count == 1) { | ||
1673 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc)); | ||
1674 | } else { | ||
1675 | sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length, | ||
1676 | &edesc->link_tbl[0]); | ||
1677 | if (sg_count > 1) { | ||
1678 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1679 | to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); | ||
1680 | dma_sync_single_for_device(ctx->dev, | ||
1681 | edesc->dma_link_tbl, | ||
1682 | edesc->dma_len, | ||
1683 | DMA_BIDIRECTIONAL); | ||
1684 | } else { | ||
1685 | /* Only one segment now, so no link tbl needed */ | ||
1686 | to_talitos_ptr(&desc->ptr[3], | ||
1687 | sg_dma_address(req_ctx->psrc)); | ||
1688 | } | ||
1689 | } | ||
1690 | |||
1691 | /* fifth DWORD empty */ | ||
1692 | desc->ptr[4] = zero_entry; | ||
1693 | |||
1694 | /* hash/HMAC out -or- hash context out */ | ||
1695 | if (req_ctx->last) | ||
1696 | map_single_talitos_ptr(dev, &desc->ptr[5], | ||
1697 | crypto_ahash_digestsize(tfm), | ||
1698 | areq->result, 0, DMA_FROM_DEVICE); | ||
1699 | else | ||
1700 | map_single_talitos_ptr(dev, &desc->ptr[5], | ||
1701 | req_ctx->hw_context_size, | ||
1702 | req_ctx->hw_context, 0, DMA_FROM_DEVICE); | ||
1703 | |||
1704 | /* last DWORD empty */ | ||
1705 | desc->ptr[6] = zero_entry; | ||
1706 | |||
1707 | ret = talitos_submit(dev, desc, callback, areq); | ||
1708 | if (ret != -EINPROGRESS) { | ||
1709 | common_nonsnoop_hash_unmap(dev, edesc, areq); | ||
1710 | kfree(edesc); | ||
1711 | } | ||
1712 | return ret; | ||
1713 | } | ||
1714 | |||
1715 | static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, | ||
1716 | unsigned int nbytes) | ||
1717 | { | ||
1718 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
1719 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1720 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1721 | |||
1722 | return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1, | ||
1723 | nbytes, 0, 0, areq->base.flags); | ||
1724 | } | ||
1725 | |||
1726 | static int ahash_init(struct ahash_request *areq) | ||
1727 | { | ||
1728 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
1729 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1730 | |||
1731 | /* Initialize the context */ | ||
1732 | req_ctx->nbuf = 0; | ||
1733 | req_ctx->first = 1; /* first indicates h/w must init its context */ | ||
1734 | req_ctx->swinit = 0; /* assume h/w init of context */ | ||
1735 | req_ctx->hw_context_size = | ||
1736 | (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) | ||
1737 | ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 | ||
1738 | : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; | ||
1739 | |||
1740 | return 0; | ||
1741 | } | ||
1742 | |||
1743 | /* | ||
1744 | * on h/w without explicit sha224 support, we initialize h/w context | ||
1745 | * manually with sha224 constants, and tell it to run sha256. | ||
1746 | */ | ||
1747 | static int ahash_init_sha224_swinit(struct ahash_request *areq) | ||
1748 | { | ||
1749 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1750 | |||
1751 | ahash_init(areq); | ||
1752 | req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ | ||
1753 | |||
1754 | req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0); | ||
1755 | req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1); | ||
1756 | req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2); | ||
1757 | req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3); | ||
1758 | req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4); | ||
1759 | req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5); | ||
1760 | req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6); | ||
1761 | req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7); | ||
1762 | |||
1763 | /* init 64-bit count */ | ||
1764 | req_ctx->hw_context[8] = 0; | ||
1765 | req_ctx->hw_context[9] = 0; | ||
1766 | |||
1767 | return 0; | ||
1768 | } | ||
1769 | |||
1770 | static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | ||
1771 | { | ||
1772 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
1773 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1774 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1775 | struct talitos_edesc *edesc; | ||
1776 | unsigned int blocksize = | ||
1777 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
1778 | unsigned int nbytes_to_hash; | ||
1779 | unsigned int to_hash_later; | ||
1780 | unsigned int nsg; | ||
1781 | int chained; | ||
1782 | |||
1783 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { | ||
1784 | /* Buffer up to one whole block */ | ||
1785 | sg_copy_to_buffer(areq->src, | ||
1786 | sg_count(areq->src, nbytes, &chained), | ||
1787 | req_ctx->buf + req_ctx->nbuf, nbytes); | ||
1788 | req_ctx->nbuf += nbytes; | ||
1789 | return 0; | ||
1790 | } | ||
1791 | |||
1792 | /* At least (blocksize + 1) bytes are available to hash */ | ||
1793 | nbytes_to_hash = nbytes + req_ctx->nbuf; | ||
1794 | to_hash_later = nbytes_to_hash & (blocksize - 1); | ||
1795 | |||
1796 | if (req_ctx->last) | ||
1797 | to_hash_later = 0; | ||
1798 | else if (to_hash_later) | ||
1799 | /* There is a partial block. Hash the full block(s) now */ | ||
1800 | nbytes_to_hash -= to_hash_later; | ||
1801 | else { | ||
1802 | /* Keep one block buffered */ | ||
1803 | nbytes_to_hash -= blocksize; | ||
1804 | to_hash_later = blocksize; | ||
1805 | } | ||
1806 | |||
1807 | /* Chain in any previously buffered data */ | ||
1808 | if (req_ctx->nbuf) { | ||
1809 | nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; | ||
1810 | sg_init_table(req_ctx->bufsl, nsg); | ||
1811 | sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); | ||
1812 | if (nsg > 1) | ||
1813 | scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src); | ||
1814 | req_ctx->psrc = req_ctx->bufsl; | ||
1815 | } else | ||
1816 | req_ctx->psrc = areq->src; | ||
1817 | |||
1818 | if (to_hash_later) { | ||
1819 | int nents = sg_count(areq->src, nbytes, &chained); | ||
1820 | sg_copy_end_to_buffer(areq->src, nents, | ||
1821 | req_ctx->bufnext, | ||
1822 | to_hash_later, | ||
1823 | nbytes - to_hash_later); | ||
1824 | } | ||
1825 | req_ctx->to_hash_later = to_hash_later; | ||
1826 | |||
1827 | /* Allocate extended descriptor */ | ||
1828 | edesc = ahash_edesc_alloc(areq, nbytes_to_hash); | ||
1829 | if (IS_ERR(edesc)) | ||
1830 | return PTR_ERR(edesc); | ||
1831 | |||
1832 | edesc->desc.hdr = ctx->desc_hdr_template; | ||
1833 | |||
1834 | /* On last one, request SEC to pad; otherwise continue */ | ||
1835 | if (req_ctx->last) | ||
1836 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; | ||
1837 | else | ||
1838 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; | ||
1839 | |||
1840 | /* request SEC to INIT hash. */ | ||
1841 | if (req_ctx->first && !req_ctx->swinit) | ||
1842 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; | ||
1843 | |||
1844 | /* When the tfm context has a keylen, it's an HMAC. | ||
1845 | * A first or last (ie. not middle) descriptor must request HMAC. | ||
1846 | */ | ||
1847 | if (ctx->keylen && (req_ctx->first || req_ctx->last)) | ||
1848 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; | ||
1849 | |||
1850 | return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, | ||
1851 | ahash_done); | ||
1852 | } | ||
1853 | |||
1854 | static int ahash_update(struct ahash_request *areq) | ||
1855 | { | ||
1856 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1857 | |||
1858 | req_ctx->last = 0; | ||
1859 | |||
1860 | return ahash_process_req(areq, areq->nbytes); | ||
1861 | } | ||
1862 | |||
1863 | static int ahash_final(struct ahash_request *areq) | ||
1864 | { | ||
1865 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1866 | |||
1867 | req_ctx->last = 1; | ||
1868 | |||
1869 | return ahash_process_req(areq, 0); | ||
1870 | } | ||
1871 | |||
1872 | static int ahash_finup(struct ahash_request *areq) | ||
1873 | { | ||
1874 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1875 | |||
1876 | req_ctx->last = 1; | ||
1877 | |||
1878 | return ahash_process_req(areq, areq->nbytes); | ||
1879 | } | ||
1880 | |||
1881 | static int ahash_digest(struct ahash_request *areq) | ||
1882 | { | ||
1883 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1884 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
1885 | |||
1886 | ahash->init(areq); | ||
1887 | req_ctx->last = 1; | ||
1888 | |||
1889 | return ahash_process_req(areq, areq->nbytes); | ||
1890 | } | ||
1891 | |||
1481 | struct talitos_alg_template { | 1892 | struct talitos_alg_template { |
1482 | struct crypto_alg alg; | 1893 | u32 type; |
1894 | union { | ||
1895 | struct crypto_alg crypto; | ||
1896 | struct ahash_alg hash; | ||
1897 | } alg; | ||
1483 | __be32 desc_hdr_template; | 1898 | __be32 desc_hdr_template; |
1484 | }; | 1899 | }; |
1485 | 1900 | ||
1486 | static struct talitos_alg_template driver_algs[] = { | 1901 | static struct talitos_alg_template driver_algs[] = { |
1487 | /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ | 1902 | /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ |
1488 | { | 1903 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1489 | .alg = { | 1904 | .alg.crypto = { |
1490 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 1905 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
1491 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", | 1906 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", |
1492 | .cra_blocksize = AES_BLOCK_SIZE, | 1907 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1511,8 +1926,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1511 | DESC_HDR_MODE1_MDEU_PAD | | 1926 | DESC_HDR_MODE1_MDEU_PAD | |
1512 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 1927 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
1513 | }, | 1928 | }, |
1514 | { | 1929 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1515 | .alg = { | 1930 | .alg.crypto = { |
1516 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", | 1931 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", |
1517 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", | 1932 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", |
1518 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1933 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
@@ -1538,8 +1953,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1538 | DESC_HDR_MODE1_MDEU_PAD | | 1953 | DESC_HDR_MODE1_MDEU_PAD | |
1539 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 1954 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
1540 | }, | 1955 | }, |
1541 | { | 1956 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1542 | .alg = { | 1957 | .alg.crypto = { |
1543 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | 1958 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
1544 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", | 1959 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", |
1545 | .cra_blocksize = AES_BLOCK_SIZE, | 1960 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1564,8 +1979,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1564 | DESC_HDR_MODE1_MDEU_PAD | | 1979 | DESC_HDR_MODE1_MDEU_PAD | |
1565 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, | 1980 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, |
1566 | }, | 1981 | }, |
1567 | { | 1982 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1568 | .alg = { | 1983 | .alg.crypto = { |
1569 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", | 1984 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", |
1570 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", | 1985 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", |
1571 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1986 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
@@ -1591,8 +2006,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1591 | DESC_HDR_MODE1_MDEU_PAD | | 2006 | DESC_HDR_MODE1_MDEU_PAD | |
1592 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, | 2007 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, |
1593 | }, | 2008 | }, |
1594 | { | 2009 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1595 | .alg = { | 2010 | .alg.crypto = { |
1596 | .cra_name = "authenc(hmac(md5),cbc(aes))", | 2011 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
1597 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", | 2012 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", |
1598 | .cra_blocksize = AES_BLOCK_SIZE, | 2013 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1617,8 +2032,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1617 | DESC_HDR_MODE1_MDEU_PAD | | 2032 | DESC_HDR_MODE1_MDEU_PAD | |
1618 | DESC_HDR_MODE1_MDEU_MD5_HMAC, | 2033 | DESC_HDR_MODE1_MDEU_MD5_HMAC, |
1619 | }, | 2034 | }, |
1620 | { | 2035 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1621 | .alg = { | 2036 | .alg.crypto = { |
1622 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", | 2037 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", |
1623 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", | 2038 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", |
1624 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2039 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
@@ -1645,8 +2060,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1645 | DESC_HDR_MODE1_MDEU_MD5_HMAC, | 2060 | DESC_HDR_MODE1_MDEU_MD5_HMAC, |
1646 | }, | 2061 | }, |
1647 | /* ABLKCIPHER algorithms. */ | 2062 | /* ABLKCIPHER algorithms. */ |
1648 | { | 2063 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
1649 | .alg = { | 2064 | .alg.crypto = { |
1650 | .cra_name = "cbc(aes)", | 2065 | .cra_name = "cbc(aes)", |
1651 | .cra_driver_name = "cbc-aes-talitos", | 2066 | .cra_driver_name = "cbc-aes-talitos", |
1652 | .cra_blocksize = AES_BLOCK_SIZE, | 2067 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1667,8 +2082,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1667 | DESC_HDR_SEL0_AESU | | 2082 | DESC_HDR_SEL0_AESU | |
1668 | DESC_HDR_MODE0_AESU_CBC, | 2083 | DESC_HDR_MODE0_AESU_CBC, |
1669 | }, | 2084 | }, |
1670 | { | 2085 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
1671 | .alg = { | 2086 | .alg.crypto = { |
1672 | .cra_name = "cbc(des3_ede)", | 2087 | .cra_name = "cbc(des3_ede)", |
1673 | .cra_driver_name = "cbc-3des-talitos", | 2088 | .cra_driver_name = "cbc-3des-talitos", |
1674 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2089 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
@@ -1689,14 +2104,140 @@ static struct talitos_alg_template driver_algs[] = { | |||
1689 | DESC_HDR_SEL0_DEU | | 2104 | DESC_HDR_SEL0_DEU | |
1690 | DESC_HDR_MODE0_DEU_CBC | | 2105 | DESC_HDR_MODE0_DEU_CBC | |
1691 | DESC_HDR_MODE0_DEU_3DES, | 2106 | DESC_HDR_MODE0_DEU_3DES, |
1692 | } | 2107 | }, |
2108 | /* AHASH algorithms. */ | ||
2109 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2110 | .alg.hash = { | ||
2111 | .init = ahash_init, | ||
2112 | .update = ahash_update, | ||
2113 | .final = ahash_final, | ||
2114 | .finup = ahash_finup, | ||
2115 | .digest = ahash_digest, | ||
2116 | .halg.digestsize = MD5_DIGEST_SIZE, | ||
2117 | .halg.base = { | ||
2118 | .cra_name = "md5", | ||
2119 | .cra_driver_name = "md5-talitos", | ||
2120 | .cra_blocksize = MD5_BLOCK_SIZE, | ||
2121 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2122 | CRYPTO_ALG_ASYNC, | ||
2123 | .cra_type = &crypto_ahash_type | ||
2124 | } | ||
2125 | }, | ||
2126 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2127 | DESC_HDR_SEL0_MDEUA | | ||
2128 | DESC_HDR_MODE0_MDEU_MD5, | ||
2129 | }, | ||
2130 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2131 | .alg.hash = { | ||
2132 | .init = ahash_init, | ||
2133 | .update = ahash_update, | ||
2134 | .final = ahash_final, | ||
2135 | .finup = ahash_finup, | ||
2136 | .digest = ahash_digest, | ||
2137 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
2138 | .halg.base = { | ||
2139 | .cra_name = "sha1", | ||
2140 | .cra_driver_name = "sha1-talitos", | ||
2141 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
2142 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2143 | CRYPTO_ALG_ASYNC, | ||
2144 | .cra_type = &crypto_ahash_type | ||
2145 | } | ||
2146 | }, | ||
2147 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2148 | DESC_HDR_SEL0_MDEUA | | ||
2149 | DESC_HDR_MODE0_MDEU_SHA1, | ||
2150 | }, | ||
2151 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2152 | .alg.hash = { | ||
2153 | .init = ahash_init, | ||
2154 | .update = ahash_update, | ||
2155 | .final = ahash_final, | ||
2156 | .finup = ahash_finup, | ||
2157 | .digest = ahash_digest, | ||
2158 | .halg.digestsize = SHA224_DIGEST_SIZE, | ||
2159 | .halg.base = { | ||
2160 | .cra_name = "sha224", | ||
2161 | .cra_driver_name = "sha224-talitos", | ||
2162 | .cra_blocksize = SHA224_BLOCK_SIZE, | ||
2163 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2164 | CRYPTO_ALG_ASYNC, | ||
2165 | .cra_type = &crypto_ahash_type | ||
2166 | } | ||
2167 | }, | ||
2168 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2169 | DESC_HDR_SEL0_MDEUA | | ||
2170 | DESC_HDR_MODE0_MDEU_SHA224, | ||
2171 | }, | ||
2172 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2173 | .alg.hash = { | ||
2174 | .init = ahash_init, | ||
2175 | .update = ahash_update, | ||
2176 | .final = ahash_final, | ||
2177 | .finup = ahash_finup, | ||
2178 | .digest = ahash_digest, | ||
2179 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
2180 | .halg.base = { | ||
2181 | .cra_name = "sha256", | ||
2182 | .cra_driver_name = "sha256-talitos", | ||
2183 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
2184 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2185 | CRYPTO_ALG_ASYNC, | ||
2186 | .cra_type = &crypto_ahash_type | ||
2187 | } | ||
2188 | }, | ||
2189 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2190 | DESC_HDR_SEL0_MDEUA | | ||
2191 | DESC_HDR_MODE0_MDEU_SHA256, | ||
2192 | }, | ||
2193 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2194 | .alg.hash = { | ||
2195 | .init = ahash_init, | ||
2196 | .update = ahash_update, | ||
2197 | .final = ahash_final, | ||
2198 | .finup = ahash_finup, | ||
2199 | .digest = ahash_digest, | ||
2200 | .halg.digestsize = SHA384_DIGEST_SIZE, | ||
2201 | .halg.base = { | ||
2202 | .cra_name = "sha384", | ||
2203 | .cra_driver_name = "sha384-talitos", | ||
2204 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
2205 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2206 | CRYPTO_ALG_ASYNC, | ||
2207 | .cra_type = &crypto_ahash_type | ||
2208 | } | ||
2209 | }, | ||
2210 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2211 | DESC_HDR_SEL0_MDEUB | | ||
2212 | DESC_HDR_MODE0_MDEUB_SHA384, | ||
2213 | }, | ||
2214 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2215 | .alg.hash = { | ||
2216 | .init = ahash_init, | ||
2217 | .update = ahash_update, | ||
2218 | .final = ahash_final, | ||
2219 | .finup = ahash_finup, | ||
2220 | .digest = ahash_digest, | ||
2221 | .halg.digestsize = SHA512_DIGEST_SIZE, | ||
2222 | .halg.base = { | ||
2223 | .cra_name = "sha512", | ||
2224 | .cra_driver_name = "sha512-talitos", | ||
2225 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
2226 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2227 | CRYPTO_ALG_ASYNC, | ||
2228 | .cra_type = &crypto_ahash_type | ||
2229 | } | ||
2230 | }, | ||
2231 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2232 | DESC_HDR_SEL0_MDEUB | | ||
2233 | DESC_HDR_MODE0_MDEUB_SHA512, | ||
2234 | }, | ||
1693 | }; | 2235 | }; |
1694 | 2236 | ||
1695 | struct talitos_crypto_alg { | 2237 | struct talitos_crypto_alg { |
1696 | struct list_head entry; | 2238 | struct list_head entry; |
1697 | struct device *dev; | 2239 | struct device *dev; |
1698 | __be32 desc_hdr_template; | 2240 | struct talitos_alg_template algt; |
1699 | struct crypto_alg crypto_alg; | ||
1700 | }; | 2241 | }; |
1701 | 2242 | ||
1702 | static int talitos_cra_init(struct crypto_tfm *tfm) | 2243 | static int talitos_cra_init(struct crypto_tfm *tfm) |
@@ -1705,13 +2246,28 @@ static int talitos_cra_init(struct crypto_tfm *tfm) | |||
1705 | struct talitos_crypto_alg *talitos_alg; | 2246 | struct talitos_crypto_alg *talitos_alg; |
1706 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | 2247 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); |
1707 | 2248 | ||
1708 | talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg); | 2249 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) |
2250 | talitos_alg = container_of(__crypto_ahash_alg(alg), | ||
2251 | struct talitos_crypto_alg, | ||
2252 | algt.alg.hash); | ||
2253 | else | ||
2254 | talitos_alg = container_of(alg, struct talitos_crypto_alg, | ||
2255 | algt.alg.crypto); | ||
1709 | 2256 | ||
1710 | /* update context with ptr to dev */ | 2257 | /* update context with ptr to dev */ |
1711 | ctx->dev = talitos_alg->dev; | 2258 | ctx->dev = talitos_alg->dev; |
1712 | 2259 | ||
1713 | /* copy descriptor header template value */ | 2260 | /* copy descriptor header template value */ |
1714 | ctx->desc_hdr_template = talitos_alg->desc_hdr_template; | 2261 | ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; |
2262 | |||
2263 | return 0; | ||
2264 | } | ||
2265 | |||
2266 | static int talitos_cra_init_aead(struct crypto_tfm *tfm) | ||
2267 | { | ||
2268 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2269 | |||
2270 | talitos_cra_init(tfm); | ||
1715 | 2271 | ||
1716 | /* random first IV */ | 2272 | /* random first IV */ |
1717 | get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); | 2273 | get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); |
@@ -1719,6 +2275,19 @@ static int talitos_cra_init(struct crypto_tfm *tfm) | |||
1719 | return 0; | 2275 | return 0; |
1720 | } | 2276 | } |
1721 | 2277 | ||
2278 | static int talitos_cra_init_ahash(struct crypto_tfm *tfm) | ||
2279 | { | ||
2280 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2281 | |||
2282 | talitos_cra_init(tfm); | ||
2283 | |||
2284 | ctx->keylen = 0; | ||
2285 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
2286 | sizeof(struct talitos_ahash_req_ctx)); | ||
2287 | |||
2288 | return 0; | ||
2289 | } | ||
2290 | |||
1722 | /* | 2291 | /* |
1723 | * given the alg's descriptor header template, determine whether descriptor | 2292 | * given the alg's descriptor header template, determine whether descriptor |
1724 | * type and primary/secondary execution units required match the hw | 2293 | * type and primary/secondary execution units required match the hw |
@@ -1747,7 +2316,15 @@ static int talitos_remove(struct of_device *ofdev) | |||
1747 | int i; | 2316 | int i; |
1748 | 2317 | ||
1749 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { | 2318 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { |
1750 | crypto_unregister_alg(&t_alg->crypto_alg); | 2319 | switch (t_alg->algt.type) { |
2320 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
2321 | case CRYPTO_ALG_TYPE_AEAD: | ||
2322 | crypto_unregister_alg(&t_alg->algt.alg.crypto); | ||
2323 | break; | ||
2324 | case CRYPTO_ALG_TYPE_AHASH: | ||
2325 | crypto_unregister_ahash(&t_alg->algt.alg.hash); | ||
2326 | break; | ||
2327 | } | ||
1751 | list_del(&t_alg->entry); | 2328 | list_del(&t_alg->entry); |
1752 | kfree(t_alg); | 2329 | kfree(t_alg); |
1753 | } | 2330 | } |
@@ -1781,6 +2358,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
1781 | struct talitos_alg_template | 2358 | struct talitos_alg_template |
1782 | *template) | 2359 | *template) |
1783 | { | 2360 | { |
2361 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1784 | struct talitos_crypto_alg *t_alg; | 2362 | struct talitos_crypto_alg *t_alg; |
1785 | struct crypto_alg *alg; | 2363 | struct crypto_alg *alg; |
1786 | 2364 | ||
@@ -1788,16 +2366,36 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
1788 | if (!t_alg) | 2366 | if (!t_alg) |
1789 | return ERR_PTR(-ENOMEM); | 2367 | return ERR_PTR(-ENOMEM); |
1790 | 2368 | ||
1791 | alg = &t_alg->crypto_alg; | 2369 | t_alg->algt = *template; |
1792 | *alg = template->alg; | 2370 | |
2371 | switch (t_alg->algt.type) { | ||
2372 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
2373 | alg = &t_alg->algt.alg.crypto; | ||
2374 | alg->cra_init = talitos_cra_init; | ||
2375 | break; | ||
2376 | case CRYPTO_ALG_TYPE_AEAD: | ||
2377 | alg = &t_alg->algt.alg.crypto; | ||
2378 | alg->cra_init = talitos_cra_init_aead; | ||
2379 | break; | ||
2380 | case CRYPTO_ALG_TYPE_AHASH: | ||
2381 | alg = &t_alg->algt.alg.hash.halg.base; | ||
2382 | alg->cra_init = talitos_cra_init_ahash; | ||
2383 | if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && | ||
2384 | !strcmp(alg->cra_name, "sha224")) { | ||
2385 | t_alg->algt.alg.hash.init = ahash_init_sha224_swinit; | ||
2386 | t_alg->algt.desc_hdr_template = | ||
2387 | DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2388 | DESC_HDR_SEL0_MDEUA | | ||
2389 | DESC_HDR_MODE0_MDEU_SHA256; | ||
2390 | } | ||
2391 | break; | ||
2392 | } | ||
1793 | 2393 | ||
1794 | alg->cra_module = THIS_MODULE; | 2394 | alg->cra_module = THIS_MODULE; |
1795 | alg->cra_init = talitos_cra_init; | ||
1796 | alg->cra_priority = TALITOS_CRA_PRIORITY; | 2395 | alg->cra_priority = TALITOS_CRA_PRIORITY; |
1797 | alg->cra_alignmask = 0; | 2396 | alg->cra_alignmask = 0; |
1798 | alg->cra_ctxsize = sizeof(struct talitos_ctx); | 2397 | alg->cra_ctxsize = sizeof(struct talitos_ctx); |
1799 | 2398 | ||
1800 | t_alg->desc_hdr_template = template->desc_hdr_template; | ||
1801 | t_alg->dev = dev; | 2399 | t_alg->dev = dev; |
1802 | 2400 | ||
1803 | return t_alg; | 2401 | return t_alg; |
@@ -1807,7 +2405,7 @@ static int talitos_probe(struct of_device *ofdev, | |||
1807 | const struct of_device_id *match) | 2405 | const struct of_device_id *match) |
1808 | { | 2406 | { |
1809 | struct device *dev = &ofdev->dev; | 2407 | struct device *dev = &ofdev->dev; |
1810 | struct device_node *np = ofdev->node; | 2408 | struct device_node *np = ofdev->dev.of_node; |
1811 | struct talitos_private *priv; | 2409 | struct talitos_private *priv; |
1812 | const unsigned int *prop; | 2410 | const unsigned int *prop; |
1813 | int i, err; | 2411 | int i, err; |
@@ -1877,7 +2475,8 @@ static int talitos_probe(struct of_device *ofdev, | |||
1877 | priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; | 2475 | priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; |
1878 | 2476 | ||
1879 | if (of_device_is_compatible(np, "fsl,sec2.1")) | 2477 | if (of_device_is_compatible(np, "fsl,sec2.1")) |
1880 | priv->features |= TALITOS_FTR_HW_AUTH_CHECK; | 2478 | priv->features |= TALITOS_FTR_HW_AUTH_CHECK | |
2479 | TALITOS_FTR_SHA224_HWINIT; | ||
1881 | 2480 | ||
1882 | priv->chan = kzalloc(sizeof(struct talitos_channel) * | 2481 | priv->chan = kzalloc(sizeof(struct talitos_channel) * |
1883 | priv->num_channels, GFP_KERNEL); | 2482 | priv->num_channels, GFP_KERNEL); |
@@ -1931,6 +2530,7 @@ static int talitos_probe(struct of_device *ofdev, | |||
1931 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | 2530 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
1932 | if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { | 2531 | if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { |
1933 | struct talitos_crypto_alg *t_alg; | 2532 | struct talitos_crypto_alg *t_alg; |
2533 | char *name = NULL; | ||
1934 | 2534 | ||
1935 | t_alg = talitos_alg_alloc(dev, &driver_algs[i]); | 2535 | t_alg = talitos_alg_alloc(dev, &driver_algs[i]); |
1936 | if (IS_ERR(t_alg)) { | 2536 | if (IS_ERR(t_alg)) { |
@@ -1938,15 +2538,27 @@ static int talitos_probe(struct of_device *ofdev, | |||
1938 | goto err_out; | 2538 | goto err_out; |
1939 | } | 2539 | } |
1940 | 2540 | ||
1941 | err = crypto_register_alg(&t_alg->crypto_alg); | 2541 | switch (t_alg->algt.type) { |
2542 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
2543 | case CRYPTO_ALG_TYPE_AEAD: | ||
2544 | err = crypto_register_alg( | ||
2545 | &t_alg->algt.alg.crypto); | ||
2546 | name = t_alg->algt.alg.crypto.cra_driver_name; | ||
2547 | break; | ||
2548 | case CRYPTO_ALG_TYPE_AHASH: | ||
2549 | err = crypto_register_ahash( | ||
2550 | &t_alg->algt.alg.hash); | ||
2551 | name = | ||
2552 | t_alg->algt.alg.hash.halg.base.cra_driver_name; | ||
2553 | break; | ||
2554 | } | ||
1942 | if (err) { | 2555 | if (err) { |
1943 | dev_err(dev, "%s alg registration failed\n", | 2556 | dev_err(dev, "%s alg registration failed\n", |
1944 | t_alg->crypto_alg.cra_driver_name); | 2557 | name); |
1945 | kfree(t_alg); | 2558 | kfree(t_alg); |
1946 | } else { | 2559 | } else { |
1947 | list_add_tail(&t_alg->entry, &priv->alg_list); | 2560 | list_add_tail(&t_alg->entry, &priv->alg_list); |
1948 | dev_info(dev, "%s\n", | 2561 | dev_info(dev, "%s\n", name); |
1949 | t_alg->crypto_alg.cra_driver_name); | ||
1950 | } | 2562 | } |
1951 | } | 2563 | } |
1952 | } | 2564 | } |
@@ -1968,8 +2580,11 @@ static const struct of_device_id talitos_match[] = { | |||
1968 | MODULE_DEVICE_TABLE(of, talitos_match); | 2580 | MODULE_DEVICE_TABLE(of, talitos_match); |
1969 | 2581 | ||
1970 | static struct of_platform_driver talitos_driver = { | 2582 | static struct of_platform_driver talitos_driver = { |
1971 | .name = "talitos", | 2583 | .driver = { |
1972 | .match_table = talitos_match, | 2584 | .name = "talitos", |
2585 | .owner = THIS_MODULE, | ||
2586 | .of_match_table = talitos_match, | ||
2587 | }, | ||
1973 | .probe = talitos_probe, | 2588 | .probe = talitos_probe, |
1974 | .remove = talitos_remove, | 2589 | .remove = talitos_remove, |
1975 | }; | 2590 | }; |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index ff5a1450e145..0b746aca4587 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale SEC (talitos) device register and descriptor header defines | 2 | * Freescale SEC (talitos) device register and descriptor header defines |
3 | * | 3 | * |
4 | * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. | 4 | * Copyright (c) 2006-2010 Freescale Semiconductor, Inc. |
5 | * | 5 | * |
6 | * Redistribution and use in source and binary forms, with or without | 6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions | 7 | * modification, are permitted provided that the following conditions |
@@ -130,6 +130,9 @@ | |||
130 | #define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/ | 130 | #define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/ |
131 | #define TALITOS_CRCUISR_LO 0xf034 | 131 | #define TALITOS_CRCUISR_LO 0xf034 |
132 | 132 | ||
133 | #define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28 | ||
134 | #define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48 | ||
135 | |||
133 | /* | 136 | /* |
134 | * talitos descriptor header (hdr) bits | 137 | * talitos descriptor header (hdr) bits |
135 | */ | 138 | */ |
@@ -157,12 +160,16 @@ | |||
157 | #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) | 160 | #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) |
158 | #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) | 161 | #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) |
159 | #define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) | 162 | #define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) |
163 | #define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000) | ||
160 | #define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000) | 164 | #define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000) |
161 | #define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000) | 165 | #define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000) |
162 | #define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000) | 166 | #define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000) |
167 | #define DESC_HDR_MODE0_MDEU_SHA224 cpu_to_be32(0x00300000) | ||
163 | #define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000) | 168 | #define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000) |
164 | #define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000) | 169 | #define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000) |
165 | #define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000) | 170 | #define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000) |
171 | #define DESC_HDR_MODE0_MDEUB_SHA384 cpu_to_be32(0x00000000) | ||
172 | #define DESC_HDR_MODE0_MDEUB_SHA512 cpu_to_be32(0x00200000) | ||
166 | #define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \ | 173 | #define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \ |
167 | DESC_HDR_MODE0_MDEU_HMAC) | 174 | DESC_HDR_MODE0_MDEU_HMAC) |
168 | #define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \ | 175 | #define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \ |
@@ -181,9 +188,12 @@ | |||
181 | #define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000) | 188 | #define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000) |
182 | #define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800) | 189 | #define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800) |
183 | #define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400) | 190 | #define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400) |
191 | #define DESC_HDR_MODE1_MDEU_SHA224 cpu_to_be32(0x00000300) | ||
184 | #define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200) | 192 | #define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200) |
185 | #define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100) | 193 | #define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100) |
186 | #define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000) | 194 | #define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000) |
195 | #define DESC_HDR_MODE1_MDEUB_SHA384 cpu_to_be32(0x00000000) | ||
196 | #define DESC_HDR_MODE1_MDEUB_SHA512 cpu_to_be32(0x00000200) | ||
187 | #define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \ | 197 | #define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \ |
188 | DESC_HDR_MODE1_MDEU_HMAC) | 198 | DESC_HDR_MODE1_MDEU_HMAC) |
189 | #define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \ | 199 | #define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \ |