diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-03 12:28:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-03 12:28:16 -0400 |
commit | 59ecc26004e77e100c700b1d0da7502b0fdadb46 (patch) | |
tree | 1faec47bda8439cc2cbe3bd9bf15756e67808e63 /drivers/crypto | |
parent | bea803183e12a1c78a12ec70907174d13d958333 (diff) | |
parent | 8ceee72808d1ae3fb191284afc2257a2be964725 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Here is the crypto update for 3.15:
- Added 3DES driver for OMAP4/AM43xx
- Added AVX2 acceleration for SHA
- Added hash-only AEAD algorithms in caam
- Removed tegra driver as it is not functioning and the hardware is
too slow
- Allow blkcipher walks over AEAD (needed for ARM)
- Fixed unprotected FPU/SSE access in ghash-clmulni-intel
- Fixed highmem crash in omap-sham
- Add (zero entropy) randomness when initialising hardware RNGs
- Fixed unaligned ahash comletion functions
- Added soft module depedency for crc32c for initrds that use crc32c"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (60 commits)
crypto: ghash-clmulni-intel - use C implementation for setkey()
crypto: x86/sha1 - reduce size of the AVX2 asm implementation
crypto: x86/sha1 - fix stack alignment of AVX2 variant
crypto: x86/sha1 - re-enable the AVX variant
crypto: sha - SHA1 transform x86_64 AVX2
crypto: crypto_wq - Fix late crypto work queue initialization
crypto: caam - add missing key_dma unmap
crypto: caam - add support for aead null encryption
crypto: testmgr - add aead null encryption test vectors
crypto: export NULL algorithms defines
crypto: caam - remove error propagation handling
crypto: hash - Simplify the ahash_finup implementation
crypto: hash - Pull out the functions to save/restore request
crypto: hash - Fix the pointer voodoo in unaligned ahash
crypto: caam - Fix first parameter to caam_init_rng
crypto: omap-sham - Map SG pages if they are HIGHMEM before accessing
crypto: caam - Dynamic memory allocation for caam_rng_ctx object
crypto: allow blkcipher walks over AEAD data
crypto: remove direct blkcipher_walk dependency on transform
hwrng: add randomness to system from rng sources
...
Diffstat (limited to 'drivers/crypto')
25 files changed, 1941 insertions, 1679 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 13857f5d28f7..03ccdb0ccf9e 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -262,6 +262,17 @@ config CRYPTO_DEV_OMAP_AES | |||
262 | OMAP processors have AES module accelerator. Select this if you | 262 | OMAP processors have AES module accelerator. Select this if you |
263 | want to use the OMAP module for AES algorithms. | 263 | want to use the OMAP module for AES algorithms. |
264 | 264 | ||
265 | config CRYPTO_DEV_OMAP_DES | ||
266 | tristate "Support for OMAP DES3DES hw engine" | ||
267 | depends on ARCH_OMAP2PLUS | ||
268 | select CRYPTO_DES | ||
269 | select CRYPTO_BLKCIPHER2 | ||
270 | help | ||
271 | OMAP processors have DES/3DES module accelerator. Select this if you | ||
272 | want to use the OMAP module for DES and 3DES algorithms. Currently | ||
273 | the ECB and CBC modes of operation supported by the driver. Also | ||
274 | accesses made on unaligned boundaries are also supported. | ||
275 | |||
265 | config CRYPTO_DEV_PICOXCELL | 276 | config CRYPTO_DEV_PICOXCELL |
266 | tristate "Support for picoXcell IPSEC and Layer2 crypto engines" | 277 | tristate "Support for picoXcell IPSEC and Layer2 crypto engines" |
267 | depends on ARCH_PICOXCELL && HAVE_CLK | 278 | depends on ARCH_PICOXCELL && HAVE_CLK |
@@ -300,17 +311,6 @@ config CRYPTO_DEV_S5P | |||
300 | Select this to offload Samsung S5PV210 or S5PC110 from AES | 311 | Select this to offload Samsung S5PV210 or S5PC110 from AES |
301 | algorithms execution. | 312 | algorithms execution. |
302 | 313 | ||
303 | config CRYPTO_DEV_TEGRA_AES | ||
304 | tristate "Support for TEGRA AES hw engine" | ||
305 | depends on ARCH_TEGRA | ||
306 | select CRYPTO_AES | ||
307 | help | ||
308 | TEGRA processors have AES module accelerator. Select this if you | ||
309 | want to use the TEGRA module for AES algorithms. | ||
310 | |||
311 | To compile this driver as a module, choose M here: the module | ||
312 | will be called tegra-aes. | ||
313 | |||
314 | config CRYPTO_DEV_NX | 314 | config CRYPTO_DEV_NX |
315 | bool "Support for IBM Power7+ in-Nest cryptographic acceleration" | 315 | bool "Support for IBM Power7+ in-Nest cryptographic acceleration" |
316 | depends on PPC64 && IBMVIO | 316 | depends on PPC64 && IBMVIO |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 0bc6aa0a54d7..482f090d16d0 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -13,6 +13,7 @@ obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o | |||
13 | n2_crypto-y := n2_core.o n2_asm.o | 13 | n2_crypto-y := n2_core.o n2_asm.o |
14 | obj-$(CONFIG_CRYPTO_DEV_NX) += nx/ | 14 | obj-$(CONFIG_CRYPTO_DEV_NX) += nx/ |
15 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o | 15 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o |
16 | obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o | ||
16 | obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o | 17 | obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o |
17 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o | 18 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o |
18 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o | 19 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o |
@@ -21,5 +22,4 @@ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ | |||
21 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o | 22 | obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o |
22 | obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o | 23 | obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o |
23 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o | 24 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o |
24 | obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o | ||
25 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ | 25 | obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ |
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index d797f31f5d85..c9ff298e6d26 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c | |||
@@ -139,7 +139,6 @@ static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key) | |||
139 | /* setup CRC interrupts */ | 139 | /* setup CRC interrupts */ |
140 | crc->regs->status = CMPERRI | DCNTEXPI; | 140 | crc->regs->status = CMPERRI | DCNTEXPI; |
141 | crc->regs->intrenset = CMPERRI | DCNTEXPI; | 141 | crc->regs->intrenset = CMPERRI | DCNTEXPI; |
142 | SSYNC(); | ||
143 | 142 | ||
144 | return 0; | 143 | return 0; |
145 | } | 144 | } |
@@ -285,17 +284,12 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc) | |||
285 | if (i == 0) | 284 | if (i == 0) |
286 | return; | 285 | return; |
287 | 286 | ||
288 | flush_dcache_range((unsigned int)crc->sg_cpu, | ||
289 | (unsigned int)crc->sg_cpu + | ||
290 | i * sizeof(struct dma_desc_array)); | ||
291 | |||
292 | /* Set the last descriptor to stop mode */ | 287 | /* Set the last descriptor to stop mode */ |
293 | crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE); | 288 | crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE); |
294 | crc->sg_cpu[i - 1].cfg |= DI_EN; | 289 | crc->sg_cpu[i - 1].cfg |= DI_EN; |
295 | set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma); | 290 | set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma); |
296 | set_dma_x_count(crc->dma_ch, 0); | 291 | set_dma_x_count(crc->dma_ch, 0); |
297 | set_dma_x_modify(crc->dma_ch, 0); | 292 | set_dma_x_modify(crc->dma_ch, 0); |
298 | SSYNC(); | ||
299 | set_dma_config(crc->dma_ch, dma_config); | 293 | set_dma_config(crc->dma_ch, dma_config); |
300 | } | 294 | } |
301 | 295 | ||
@@ -415,7 +409,6 @@ finish_update: | |||
415 | 409 | ||
416 | /* finally kick off CRC operation */ | 410 | /* finally kick off CRC operation */ |
417 | crc->regs->control |= BLKEN; | 411 | crc->regs->control |= BLKEN; |
418 | SSYNC(); | ||
419 | 412 | ||
420 | return -EINPROGRESS; | 413 | return -EINPROGRESS; |
421 | } | 414 | } |
@@ -539,7 +532,6 @@ static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id) | |||
539 | 532 | ||
540 | if (crc->regs->status & DCNTEXP) { | 533 | if (crc->regs->status & DCNTEXP) { |
541 | crc->regs->status = DCNTEXP; | 534 | crc->regs->status = DCNTEXP; |
542 | SSYNC(); | ||
543 | 535 | ||
544 | /* prepare results */ | 536 | /* prepare results */ |
545 | put_unaligned_le32(crc->regs->result, crc->req->result); | 537 | put_unaligned_le32(crc->regs->result, crc->req->result); |
@@ -594,7 +586,7 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev) | |||
594 | unsigned int timeout = 100000; | 586 | unsigned int timeout = 100000; |
595 | int ret; | 587 | int ret; |
596 | 588 | ||
597 | crc = kzalloc(sizeof(*crc), GFP_KERNEL); | 589 | crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL); |
598 | if (!crc) { | 590 | if (!crc) { |
599 | dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n"); | 591 | dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n"); |
600 | return -ENOMEM; | 592 | return -ENOMEM; |
@@ -610,42 +602,39 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev) | |||
610 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 602 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
611 | if (res == NULL) { | 603 | if (res == NULL) { |
612 | dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); | 604 | dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); |
613 | ret = -ENOENT; | 605 | return -ENOENT; |
614 | goto out_error_free_mem; | ||
615 | } | 606 | } |
616 | 607 | ||
617 | crc->regs = ioremap(res->start, resource_size(res)); | 608 | crc->regs = devm_ioremap_resource(dev, res); |
618 | if (!crc->regs) { | 609 | if (IS_ERR((void *)crc->regs)) { |
619 | dev_err(&pdev->dev, "Cannot map CRC IO\n"); | 610 | dev_err(&pdev->dev, "Cannot map CRC IO\n"); |
620 | ret = -ENXIO; | 611 | return PTR_ERR((void *)crc->regs); |
621 | goto out_error_free_mem; | ||
622 | } | 612 | } |
623 | 613 | ||
624 | crc->irq = platform_get_irq(pdev, 0); | 614 | crc->irq = platform_get_irq(pdev, 0); |
625 | if (crc->irq < 0) { | 615 | if (crc->irq < 0) { |
626 | dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n"); | 616 | dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n"); |
627 | ret = -ENOENT; | 617 | return -ENOENT; |
628 | goto out_error_unmap; | ||
629 | } | 618 | } |
630 | 619 | ||
631 | ret = request_irq(crc->irq, bfin_crypto_crc_handler, IRQF_SHARED, dev_name(dev), crc); | 620 | ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler, |
621 | IRQF_SHARED, dev_name(dev), crc); | ||
632 | if (ret) { | 622 | if (ret) { |
633 | dev_err(&pdev->dev, "Unable to request blackfin crc irq\n"); | 623 | dev_err(&pdev->dev, "Unable to request blackfin crc irq\n"); |
634 | goto out_error_unmap; | 624 | return ret; |
635 | } | 625 | } |
636 | 626 | ||
637 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | 627 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); |
638 | if (res == NULL) { | 628 | if (res == NULL) { |
639 | dev_err(&pdev->dev, "No CRC DMA channel specified\n"); | 629 | dev_err(&pdev->dev, "No CRC DMA channel specified\n"); |
640 | ret = -ENOENT; | 630 | return -ENOENT; |
641 | goto out_error_irq; | ||
642 | } | 631 | } |
643 | crc->dma_ch = res->start; | 632 | crc->dma_ch = res->start; |
644 | 633 | ||
645 | ret = request_dma(crc->dma_ch, dev_name(dev)); | 634 | ret = request_dma(crc->dma_ch, dev_name(dev)); |
646 | if (ret) { | 635 | if (ret) { |
647 | dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n"); | 636 | dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n"); |
648 | goto out_error_irq; | 637 | return ret; |
649 | } | 638 | } |
650 | 639 | ||
651 | crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL); | 640 | crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL); |
@@ -660,9 +649,7 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev) | |||
660 | crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1)); | 649 | crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1)); |
661 | 650 | ||
662 | crc->regs->control = 0; | 651 | crc->regs->control = 0; |
663 | SSYNC(); | ||
664 | crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data; | 652 | crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data; |
665 | SSYNC(); | ||
666 | 653 | ||
667 | while (!(crc->regs->status & LUTDONE) && (--timeout) > 0) | 654 | while (!(crc->regs->status & LUTDONE) && (--timeout) > 0) |
668 | cpu_relax(); | 655 | cpu_relax(); |
@@ -693,12 +680,6 @@ out_error_dma: | |||
693 | if (crc->sg_cpu) | 680 | if (crc->sg_cpu) |
694 | dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma); | 681 | dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma); |
695 | free_dma(crc->dma_ch); | 682 | free_dma(crc->dma_ch); |
696 | out_error_irq: | ||
697 | free_irq(crc->irq, crc); | ||
698 | out_error_unmap: | ||
699 | iounmap((void *)crc->regs); | ||
700 | out_error_free_mem: | ||
701 | kfree(crc); | ||
702 | 683 | ||
703 | return ret; | 684 | return ret; |
704 | } | 685 | } |
@@ -721,10 +702,6 @@ static int bfin_crypto_crc_remove(struct platform_device *pdev) | |||
721 | crypto_unregister_ahash(&algs); | 702 | crypto_unregister_ahash(&algs); |
722 | tasklet_kill(&crc->done_task); | 703 | tasklet_kill(&crc->done_task); |
723 | free_dma(crc->dma_ch); | 704 | free_dma(crc->dma_ch); |
724 | if (crc->irq > 0) | ||
725 | free_irq(crc->irq, crc); | ||
726 | iounmap((void *)crc->regs); | ||
727 | kfree(crc); | ||
728 | 705 | ||
729 | return 0; | 706 | return 0; |
730 | } | 707 | } |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b71f2fd749df..5f891254db73 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -66,10 +66,14 @@ | |||
66 | 66 | ||
67 | /* length of descriptors text */ | 67 | /* length of descriptors text */ |
68 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) | 68 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) |
69 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) | 69 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) |
70 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) | 70 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ) |
71 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) | 71 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) |
72 | 72 | ||
73 | #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) | ||
74 | #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ) | ||
75 | #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) | ||
76 | |||
73 | #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) | 77 | #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) |
74 | #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ | 78 | #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ |
75 | 20 * CAAM_CMD_SZ) | 79 | 20 * CAAM_CMD_SZ) |
@@ -104,27 +108,14 @@ static inline void append_dec_op1(u32 *desc, u32 type) | |||
104 | } | 108 | } |
105 | 109 | ||
106 | /* | 110 | /* |
107 | * Wait for completion of class 1 key loading before allowing | ||
108 | * error propagation | ||
109 | */ | ||
110 | static inline void append_dec_shr_done(u32 *desc) | ||
111 | { | ||
112 | u32 *jump_cmd; | ||
113 | |||
114 | jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); | ||
115 | set_jump_tgt_here(desc, jump_cmd); | ||
116 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * For aead functions, read payload and write payload, | 111 | * For aead functions, read payload and write payload, |
121 | * both of which are specified in req->src and req->dst | 112 | * both of which are specified in req->src and req->dst |
122 | */ | 113 | */ |
123 | static inline void aead_append_src_dst(u32 *desc, u32 msg_type) | 114 | static inline void aead_append_src_dst(u32 *desc, u32 msg_type) |
124 | { | 115 | { |
116 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | ||
125 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | | 117 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | |
126 | KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); | 118 | KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); |
127 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | ||
128 | } | 119 | } |
129 | 120 | ||
130 | /* | 121 | /* |
@@ -211,9 +202,196 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, | |||
211 | append_key_aead(desc, ctx, keys_fit_inline); | 202 | append_key_aead(desc, ctx, keys_fit_inline); |
212 | 203 | ||
213 | set_jump_tgt_here(desc, key_jump_cmd); | 204 | set_jump_tgt_here(desc, key_jump_cmd); |
205 | } | ||
206 | |||
207 | static int aead_null_set_sh_desc(struct crypto_aead *aead) | ||
208 | { | ||
209 | struct aead_tfm *tfm = &aead->base.crt_aead; | ||
210 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
211 | struct device *jrdev = ctx->jrdev; | ||
212 | bool keys_fit_inline = false; | ||
213 | u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd; | ||
214 | u32 *desc; | ||
215 | |||
216 | /* | ||
217 | * Job Descriptor and Shared Descriptors | ||
218 | * must all fit into the 64-word Descriptor h/w Buffer | ||
219 | */ | ||
220 | if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN + | ||
221 | ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) | ||
222 | keys_fit_inline = true; | ||
223 | |||
224 | /* aead_encrypt shared descriptor */ | ||
225 | desc = ctx->sh_desc_enc; | ||
226 | |||
227 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
228 | |||
229 | /* Skip if already shared */ | ||
230 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
231 | JUMP_COND_SHRD); | ||
232 | if (keys_fit_inline) | ||
233 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
234 | ctx->split_key_len, CLASS_2 | | ||
235 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
236 | else | ||
237 | append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | | ||
238 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
239 | set_jump_tgt_here(desc, key_jump_cmd); | ||
240 | |||
241 | /* cryptlen = seqoutlen - authsize */ | ||
242 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
243 | |||
244 | /* | ||
245 | * NULL encryption; IV is zero | ||
246 | * assoclen = (assoclen + cryptlen) - cryptlen | ||
247 | */ | ||
248 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
249 | |||
250 | /* read assoc before reading payload */ | ||
251 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
252 | KEY_VLF); | ||
253 | |||
254 | /* Prepare to read and write cryptlen bytes */ | ||
255 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
256 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
257 | |||
258 | /* | ||
259 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
260 | * thus need to do some magic, i.e. self-patch the descriptor | ||
261 | * buffer. | ||
262 | */ | ||
263 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | | ||
264 | MOVE_DEST_MATH3 | | ||
265 | (0x6 << MOVE_LEN_SHIFT)); | ||
266 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | | ||
267 | MOVE_DEST_DESCBUF | | ||
268 | MOVE_WAITCOMP | | ||
269 | (0x8 << MOVE_LEN_SHIFT)); | ||
270 | |||
271 | /* Class 2 operation */ | ||
272 | append_operation(desc, ctx->class2_alg_type | | ||
273 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
274 | |||
275 | /* Read and write cryptlen bytes */ | ||
276 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | ||
277 | |||
278 | set_move_tgt_here(desc, read_move_cmd); | ||
279 | set_move_tgt_here(desc, write_move_cmd); | ||
280 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
281 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | | ||
282 | MOVE_AUX_LS); | ||
283 | |||
284 | /* Write ICV */ | ||
285 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | ||
286 | LDST_SRCDST_BYTE_CONTEXT); | ||
287 | |||
288 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | ||
289 | desc_bytes(desc), | ||
290 | DMA_TO_DEVICE); | ||
291 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | ||
292 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
293 | return -ENOMEM; | ||
294 | } | ||
295 | #ifdef DEBUG | ||
296 | print_hex_dump(KERN_ERR, | ||
297 | "aead null enc shdesc@"__stringify(__LINE__)": ", | ||
298 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
299 | desc_bytes(desc), 1); | ||
300 | #endif | ||
301 | |||
302 | /* | ||
303 | * Job Descriptor and Shared Descriptors | ||
304 | * must all fit into the 64-word Descriptor h/w Buffer | ||
305 | */ | ||
306 | if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN + | ||
307 | ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) | ||
308 | keys_fit_inline = true; | ||
309 | |||
310 | desc = ctx->sh_desc_dec; | ||
311 | |||
312 | /* aead_decrypt shared descriptor */ | ||
313 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
314 | |||
315 | /* Skip if already shared */ | ||
316 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
317 | JUMP_COND_SHRD); | ||
318 | if (keys_fit_inline) | ||
319 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
320 | ctx->split_key_len, CLASS_2 | | ||
321 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
322 | else | ||
323 | append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | | ||
324 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
325 | set_jump_tgt_here(desc, key_jump_cmd); | ||
326 | |||
327 | /* Class 2 operation */ | ||
328 | append_operation(desc, ctx->class2_alg_type | | ||
329 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
330 | |||
331 | /* assoclen + cryptlen = seqinlen - ivsize - authsize */ | ||
332 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | ||
333 | ctx->authsize + tfm->ivsize); | ||
334 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | ||
335 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
336 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | ||
337 | |||
338 | /* read assoc before reading payload */ | ||
339 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
340 | KEY_VLF); | ||
341 | |||
342 | /* Prepare to read and write cryptlen bytes */ | ||
343 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
344 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
345 | |||
346 | /* | ||
347 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
348 | * thus need to do some magic, i.e. self-patch the descriptor | ||
349 | * buffer. | ||
350 | */ | ||
351 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | | ||
352 | MOVE_DEST_MATH2 | | ||
353 | (0x6 << MOVE_LEN_SHIFT)); | ||
354 | write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | | ||
355 | MOVE_DEST_DESCBUF | | ||
356 | MOVE_WAITCOMP | | ||
357 | (0x8 << MOVE_LEN_SHIFT)); | ||
358 | |||
359 | /* Read and write cryptlen bytes */ | ||
360 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | ||
361 | |||
362 | /* | ||
363 | * Insert a NOP here, since we need at least 4 instructions between | ||
364 | * code patching the descriptor buffer and the location being patched. | ||
365 | */ | ||
366 | jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
367 | set_jump_tgt_here(desc, jump_cmd); | ||
214 | 368 | ||
215 | /* Propagate errors from shared to job descriptor */ | 369 | set_move_tgt_here(desc, read_move_cmd); |
216 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | 370 | set_move_tgt_here(desc, write_move_cmd); |
371 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
372 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | | ||
373 | MOVE_AUX_LS); | ||
374 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
375 | |||
376 | /* Load ICV */ | ||
377 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | | ||
378 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | ||
379 | |||
380 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | ||
381 | desc_bytes(desc), | ||
382 | DMA_TO_DEVICE); | ||
383 | if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { | ||
384 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
385 | return -ENOMEM; | ||
386 | } | ||
387 | #ifdef DEBUG | ||
388 | print_hex_dump(KERN_ERR, | ||
389 | "aead null dec shdesc@"__stringify(__LINE__)": ", | ||
390 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
391 | desc_bytes(desc), 1); | ||
392 | #endif | ||
393 | |||
394 | return 0; | ||
217 | } | 395 | } |
218 | 396 | ||
219 | static int aead_set_sh_desc(struct crypto_aead *aead) | 397 | static int aead_set_sh_desc(struct crypto_aead *aead) |
@@ -222,13 +400,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
222 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 400 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
223 | struct device *jrdev = ctx->jrdev; | 401 | struct device *jrdev = ctx->jrdev; |
224 | bool keys_fit_inline = false; | 402 | bool keys_fit_inline = false; |
225 | u32 *key_jump_cmd, *jump_cmd; | ||
226 | u32 geniv, moveiv; | 403 | u32 geniv, moveiv; |
227 | u32 *desc; | 404 | u32 *desc; |
228 | 405 | ||
229 | if (!ctx->enckeylen || !ctx->authsize) | 406 | if (!ctx->authsize) |
230 | return 0; | 407 | return 0; |
231 | 408 | ||
409 | /* NULL encryption / decryption */ | ||
410 | if (!ctx->enckeylen) | ||
411 | return aead_null_set_sh_desc(aead); | ||
412 | |||
232 | /* | 413 | /* |
233 | * Job Descriptor and Shared Descriptors | 414 | * Job Descriptor and Shared Descriptors |
234 | * must all fit into the 64-word Descriptor h/w Buffer | 415 | * must all fit into the 64-word Descriptor h/w Buffer |
@@ -253,7 +434,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
253 | /* assoclen + cryptlen = seqinlen - ivsize */ | 434 | /* assoclen + cryptlen = seqinlen - ivsize */ |
254 | append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); | 435 | append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); |
255 | 436 | ||
256 | /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ | 437 | /* assoclen = (assoclen + cryptlen) - cryptlen */ |
257 | append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); | 438 | append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); |
258 | 439 | ||
259 | /* read assoc before reading payload */ | 440 | /* read assoc before reading payload */ |
@@ -296,30 +477,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
296 | CAAM_DESC_BYTES_MAX) | 477 | CAAM_DESC_BYTES_MAX) |
297 | keys_fit_inline = true; | 478 | keys_fit_inline = true; |
298 | 479 | ||
299 | desc = ctx->sh_desc_dec; | ||
300 | |||
301 | /* aead_decrypt shared descriptor */ | 480 | /* aead_decrypt shared descriptor */ |
302 | init_sh_desc(desc, HDR_SHARE_SERIAL); | 481 | desc = ctx->sh_desc_dec; |
303 | |||
304 | /* Skip if already shared */ | ||
305 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
306 | JUMP_COND_SHRD); | ||
307 | |||
308 | append_key_aead(desc, ctx, keys_fit_inline); | ||
309 | 482 | ||
310 | /* Only propagate error immediately if shared */ | 483 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline); |
311 | jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
312 | set_jump_tgt_here(desc, key_jump_cmd); | ||
313 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | ||
314 | set_jump_tgt_here(desc, jump_cmd); | ||
315 | 484 | ||
316 | /* Class 2 operation */ | 485 | /* Class 2 operation */ |
317 | append_operation(desc, ctx->class2_alg_type | | 486 | append_operation(desc, ctx->class2_alg_type | |
318 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | 487 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
319 | 488 | ||
320 | /* assoclen + cryptlen = seqinlen - ivsize */ | 489 | /* assoclen + cryptlen = seqinlen - ivsize - authsize */ |
321 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | 490 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, |
322 | ctx->authsize + tfm->ivsize) | 491 | ctx->authsize + tfm->ivsize); |
323 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | 492 | /* assoclen = (assoclen + cryptlen) - cryptlen */ |
324 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | 493 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
325 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | 494 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); |
@@ -340,7 +509,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
340 | /* Load ICV */ | 509 | /* Load ICV */ |
341 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | | 510 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | |
342 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | 511 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); |
343 | append_dec_shr_done(desc); | ||
344 | 512 | ||
345 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 513 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
346 | desc_bytes(desc), | 514 | desc_bytes(desc), |
@@ -532,7 +700,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
532 | struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; | 700 | struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; |
533 | struct device *jrdev = ctx->jrdev; | 701 | struct device *jrdev = ctx->jrdev; |
534 | int ret = 0; | 702 | int ret = 0; |
535 | u32 *key_jump_cmd, *jump_cmd; | 703 | u32 *key_jump_cmd; |
536 | u32 *desc; | 704 | u32 *desc; |
537 | 705 | ||
538 | #ifdef DEBUG | 706 | #ifdef DEBUG |
@@ -563,9 +731,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
563 | 731 | ||
564 | set_jump_tgt_here(desc, key_jump_cmd); | 732 | set_jump_tgt_here(desc, key_jump_cmd); |
565 | 733 | ||
566 | /* Propagate errors from shared to job descriptor */ | ||
567 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | ||
568 | |||
569 | /* Load iv */ | 734 | /* Load iv */ |
570 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | 735 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | |
571 | LDST_CLASS_1_CCB | tfm->ivsize); | 736 | LDST_CLASS_1_CCB | tfm->ivsize); |
@@ -603,11 +768,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
603 | ctx->enckeylen, CLASS_1 | | 768 | ctx->enckeylen, CLASS_1 | |
604 | KEY_DEST_CLASS_REG); | 769 | KEY_DEST_CLASS_REG); |
605 | 770 | ||
606 | /* For aead, only propagate error immediately if shared */ | ||
607 | jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
608 | set_jump_tgt_here(desc, key_jump_cmd); | 771 | set_jump_tgt_here(desc, key_jump_cmd); |
609 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | ||
610 | set_jump_tgt_here(desc, jump_cmd); | ||
611 | 772 | ||
612 | /* load IV */ | 773 | /* load IV */ |
613 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | 774 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | |
@@ -619,9 +780,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
619 | /* Perform operation */ | 780 | /* Perform operation */ |
620 | ablkcipher_append_src_dst(desc); | 781 | ablkcipher_append_src_dst(desc); |
621 | 782 | ||
622 | /* Wait for key to load before allowing propagating error */ | ||
623 | append_dec_shr_done(desc); | ||
624 | |||
625 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 783 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
626 | desc_bytes(desc), | 784 | desc_bytes(desc), |
627 | DMA_TO_DEVICE); | 785 | DMA_TO_DEVICE); |
@@ -1459,6 +1617,11 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) | |||
1459 | return ret; | 1617 | return ret; |
1460 | } | 1618 | } |
1461 | 1619 | ||
1620 | static int aead_null_givencrypt(struct aead_givcrypt_request *areq) | ||
1621 | { | ||
1622 | return aead_encrypt(&areq->areq); | ||
1623 | } | ||
1624 | |||
1462 | /* | 1625 | /* |
1463 | * allocate and map the ablkcipher extended descriptor for ablkcipher | 1626 | * allocate and map the ablkcipher extended descriptor for ablkcipher |
1464 | */ | 1627 | */ |
@@ -1648,6 +1811,124 @@ struct caam_alg_template { | |||
1648 | static struct caam_alg_template driver_algs[] = { | 1811 | static struct caam_alg_template driver_algs[] = { |
1649 | /* single-pass ipsec_esp descriptor */ | 1812 | /* single-pass ipsec_esp descriptor */ |
1650 | { | 1813 | { |
1814 | .name = "authenc(hmac(md5),ecb(cipher_null))", | ||
1815 | .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam", | ||
1816 | .blocksize = NULL_BLOCK_SIZE, | ||
1817 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1818 | .template_aead = { | ||
1819 | .setkey = aead_setkey, | ||
1820 | .setauthsize = aead_setauthsize, | ||
1821 | .encrypt = aead_encrypt, | ||
1822 | .decrypt = aead_decrypt, | ||
1823 | .givencrypt = aead_null_givencrypt, | ||
1824 | .geniv = "<built-in>", | ||
1825 | .ivsize = NULL_IV_SIZE, | ||
1826 | .maxauthsize = MD5_DIGEST_SIZE, | ||
1827 | }, | ||
1828 | .class1_alg_type = 0, | ||
1829 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1830 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
1831 | }, | ||
1832 | { | ||
1833 | .name = "authenc(hmac(sha1),ecb(cipher_null))", | ||
1834 | .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam", | ||
1835 | .blocksize = NULL_BLOCK_SIZE, | ||
1836 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1837 | .template_aead = { | ||
1838 | .setkey = aead_setkey, | ||
1839 | .setauthsize = aead_setauthsize, | ||
1840 | .encrypt = aead_encrypt, | ||
1841 | .decrypt = aead_decrypt, | ||
1842 | .givencrypt = aead_null_givencrypt, | ||
1843 | .geniv = "<built-in>", | ||
1844 | .ivsize = NULL_IV_SIZE, | ||
1845 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
1846 | }, | ||
1847 | .class1_alg_type = 0, | ||
1848 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1849 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
1850 | }, | ||
1851 | { | ||
1852 | .name = "authenc(hmac(sha224),ecb(cipher_null))", | ||
1853 | .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam", | ||
1854 | .blocksize = NULL_BLOCK_SIZE, | ||
1855 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1856 | .template_aead = { | ||
1857 | .setkey = aead_setkey, | ||
1858 | .setauthsize = aead_setauthsize, | ||
1859 | .encrypt = aead_encrypt, | ||
1860 | .decrypt = aead_decrypt, | ||
1861 | .givencrypt = aead_null_givencrypt, | ||
1862 | .geniv = "<built-in>", | ||
1863 | .ivsize = NULL_IV_SIZE, | ||
1864 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
1865 | }, | ||
1866 | .class1_alg_type = 0, | ||
1867 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | ||
1868 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1869 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
1870 | }, | ||
1871 | { | ||
1872 | .name = "authenc(hmac(sha256),ecb(cipher_null))", | ||
1873 | .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam", | ||
1874 | .blocksize = NULL_BLOCK_SIZE, | ||
1875 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1876 | .template_aead = { | ||
1877 | .setkey = aead_setkey, | ||
1878 | .setauthsize = aead_setauthsize, | ||
1879 | .encrypt = aead_encrypt, | ||
1880 | .decrypt = aead_decrypt, | ||
1881 | .givencrypt = aead_null_givencrypt, | ||
1882 | .geniv = "<built-in>", | ||
1883 | .ivsize = NULL_IV_SIZE, | ||
1884 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
1885 | }, | ||
1886 | .class1_alg_type = 0, | ||
1887 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
1888 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1889 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
1890 | }, | ||
1891 | { | ||
1892 | .name = "authenc(hmac(sha384),ecb(cipher_null))", | ||
1893 | .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam", | ||
1894 | .blocksize = NULL_BLOCK_SIZE, | ||
1895 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1896 | .template_aead = { | ||
1897 | .setkey = aead_setkey, | ||
1898 | .setauthsize = aead_setauthsize, | ||
1899 | .encrypt = aead_encrypt, | ||
1900 | .decrypt = aead_decrypt, | ||
1901 | .givencrypt = aead_null_givencrypt, | ||
1902 | .geniv = "<built-in>", | ||
1903 | .ivsize = NULL_IV_SIZE, | ||
1904 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
1905 | }, | ||
1906 | .class1_alg_type = 0, | ||
1907 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | ||
1908 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1909 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
1910 | }, | ||
1911 | { | ||
1912 | .name = "authenc(hmac(sha512),ecb(cipher_null))", | ||
1913 | .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam", | ||
1914 | .blocksize = NULL_BLOCK_SIZE, | ||
1915 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
1916 | .template_aead = { | ||
1917 | .setkey = aead_setkey, | ||
1918 | .setauthsize = aead_setauthsize, | ||
1919 | .encrypt = aead_encrypt, | ||
1920 | .decrypt = aead_decrypt, | ||
1921 | .givencrypt = aead_null_givencrypt, | ||
1922 | .geniv = "<built-in>", | ||
1923 | .ivsize = NULL_IV_SIZE, | ||
1924 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
1925 | }, | ||
1926 | .class1_alg_type = 0, | ||
1927 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
1928 | OP_ALG_AAI_HMAC_PRECOMP, | ||
1929 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
1930 | }, | ||
1931 | { | ||
1651 | .name = "authenc(hmac(md5),cbc(aes))", | 1932 | .name = "authenc(hmac(md5),cbc(aes))", |
1652 | .driver_name = "authenc-hmac-md5-cbc-aes-caam", | 1933 | .driver_name = "authenc-hmac-md5-cbc-aes-caam", |
1653 | .blocksize = AES_BLOCK_SIZE, | 1934 | .blocksize = AES_BLOCK_SIZE, |
@@ -2099,6 +2380,11 @@ static void caam_cra_exit(struct crypto_tfm *tfm) | |||
2099 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, | 2380 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, |
2100 | desc_bytes(ctx->sh_desc_givenc), | 2381 | desc_bytes(ctx->sh_desc_givenc), |
2101 | DMA_TO_DEVICE); | 2382 | DMA_TO_DEVICE); |
2383 | if (ctx->key_dma && | ||
2384 | !dma_mapping_error(ctx->jrdev, ctx->key_dma)) | ||
2385 | dma_unmap_single(ctx->jrdev, ctx->key_dma, | ||
2386 | ctx->enckeylen + ctx->split_key_pad_len, | ||
2387 | DMA_TO_DEVICE); | ||
2102 | 2388 | ||
2103 | caam_jr_free(ctx->jrdev); | 2389 | caam_jr_free(ctx->jrdev); |
2104 | } | 2390 | } |
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 28486b19fc36..3529b54048c9 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
@@ -76,7 +76,7 @@ struct caam_rng_ctx { | |||
76 | struct buf_data bufs[2]; | 76 | struct buf_data bufs[2]; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static struct caam_rng_ctx rng_ctx; | 79 | static struct caam_rng_ctx *rng_ctx; |
80 | 80 | ||
81 | static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) | 81 | static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) |
82 | { | 82 | { |
@@ -137,7 +137,7 @@ static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) | |||
137 | 137 | ||
138 | static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait) | 138 | static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait) |
139 | { | 139 | { |
140 | struct caam_rng_ctx *ctx = &rng_ctx; | 140 | struct caam_rng_ctx *ctx = rng_ctx; |
141 | struct buf_data *bd = &ctx->bufs[ctx->current_buf]; | 141 | struct buf_data *bd = &ctx->bufs[ctx->current_buf]; |
142 | int next_buf_idx, copied_idx; | 142 | int next_buf_idx, copied_idx; |
143 | int err; | 143 | int err; |
@@ -237,12 +237,12 @@ static void caam_cleanup(struct hwrng *rng) | |||
237 | struct buf_data *bd; | 237 | struct buf_data *bd; |
238 | 238 | ||
239 | for (i = 0; i < 2; i++) { | 239 | for (i = 0; i < 2; i++) { |
240 | bd = &rng_ctx.bufs[i]; | 240 | bd = &rng_ctx->bufs[i]; |
241 | if (atomic_read(&bd->empty) == BUF_PENDING) | 241 | if (atomic_read(&bd->empty) == BUF_PENDING) |
242 | wait_for_completion(&bd->filled); | 242 | wait_for_completion(&bd->filled); |
243 | } | 243 | } |
244 | 244 | ||
245 | rng_unmap_ctx(&rng_ctx); | 245 | rng_unmap_ctx(rng_ctx); |
246 | } | 246 | } |
247 | 247 | ||
248 | static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) | 248 | static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) |
@@ -273,8 +273,9 @@ static struct hwrng caam_rng = { | |||
273 | 273 | ||
274 | static void __exit caam_rng_exit(void) | 274 | static void __exit caam_rng_exit(void) |
275 | { | 275 | { |
276 | caam_jr_free(rng_ctx.jrdev); | 276 | caam_jr_free(rng_ctx->jrdev); |
277 | hwrng_unregister(&caam_rng); | 277 | hwrng_unregister(&caam_rng); |
278 | kfree(rng_ctx); | ||
278 | } | 279 | } |
279 | 280 | ||
280 | static int __init caam_rng_init(void) | 281 | static int __init caam_rng_init(void) |
@@ -286,8 +287,10 @@ static int __init caam_rng_init(void) | |||
286 | pr_err("Job Ring Device allocation for transform failed\n"); | 287 | pr_err("Job Ring Device allocation for transform failed\n"); |
287 | return PTR_ERR(dev); | 288 | return PTR_ERR(dev); |
288 | } | 289 | } |
289 | 290 | rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA); | |
290 | caam_init_rng(&rng_ctx, dev); | 291 | if (!rng_ctx) |
292 | return -ENOMEM; | ||
293 | caam_init_rng(rng_ctx, dev); | ||
291 | 294 | ||
292 | dev_info(dev, "registering rng-caam\n"); | 295 | dev_info(dev, "registering rng-caam\n"); |
293 | return hwrng_register(&caam_rng); | 296 | return hwrng_register(&caam_rng); |
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 762aeff626ac..f227922cea38 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <net/xfrm.h> | 26 | #include <net/xfrm.h> |
27 | 27 | ||
28 | #include <crypto/algapi.h> | 28 | #include <crypto/algapi.h> |
29 | #include <crypto/null.h> | ||
29 | #include <crypto/aes.h> | 30 | #include <crypto/aes.h> |
30 | #include <crypto/des.h> | 31 | #include <crypto/des.h> |
31 | #include <crypto/sha.h> | 32 | #include <crypto/sha.h> |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 63fb1af2c431..1c38f86bf63a 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include "jr.h" | 14 | #include "jr.h" |
15 | #include "desc_constr.h" | 15 | #include "desc_constr.h" |
16 | #include "error.h" | 16 | #include "error.h" |
17 | #include "ctrl.h" | ||
18 | 17 | ||
19 | /* | 18 | /* |
20 | * Descriptor to instantiate RNG State Handle 0 in normal mode and | 19 | * Descriptor to instantiate RNG State Handle 0 in normal mode and |
@@ -352,32 +351,17 @@ static void kick_trng(struct platform_device *pdev, int ent_delay) | |||
352 | 351 | ||
353 | /** | 352 | /** |
354 | * caam_get_era() - Return the ERA of the SEC on SoC, based | 353 | * caam_get_era() - Return the ERA of the SEC on SoC, based |
355 | * on the SEC_VID register. | 354 | * on "sec-era" propery in the DTS. This property is updated by u-boot. |
356 | * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown. | ||
357 | * @caam_id - the value of the SEC_VID register | ||
358 | **/ | 355 | **/ |
359 | int caam_get_era(u64 caam_id) | 356 | int caam_get_era(void) |
360 | { | 357 | { |
361 | struct sec_vid *sec_vid = (struct sec_vid *)&caam_id; | 358 | struct device_node *caam_node; |
362 | static const struct { | 359 | for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { |
363 | u16 ip_id; | 360 | const uint32_t *prop = (uint32_t *)of_get_property(caam_node, |
364 | u8 maj_rev; | 361 | "fsl,sec-era", |
365 | u8 era; | 362 | NULL); |
366 | } caam_eras[] = { | 363 | return prop ? *prop : -ENOTSUPP; |
367 | {0x0A10, 1, 1}, | 364 | } |
368 | {0x0A10, 2, 2}, | ||
369 | {0x0A12, 1, 3}, | ||
370 | {0x0A14, 1, 3}, | ||
371 | {0x0A14, 2, 4}, | ||
372 | {0x0A16, 1, 4}, | ||
373 | {0x0A11, 1, 4} | ||
374 | }; | ||
375 | int i; | ||
376 | |||
377 | for (i = 0; i < ARRAY_SIZE(caam_eras); i++) | ||
378 | if (caam_eras[i].ip_id == sec_vid->ip_id && | ||
379 | caam_eras[i].maj_rev == sec_vid->maj_rev) | ||
380 | return caam_eras[i].era; | ||
381 | 365 | ||
382 | return -ENOTSUPP; | 366 | return -ENOTSUPP; |
383 | } | 367 | } |
@@ -443,13 +427,10 @@ static int caam_probe(struct platform_device *pdev) | |||
443 | * for all, then go probe each one. | 427 | * for all, then go probe each one. |
444 | */ | 428 | */ |
445 | rspec = 0; | 429 | rspec = 0; |
446 | for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") | 430 | for_each_available_child_of_node(nprop, np) |
447 | rspec++; | 431 | if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || |
448 | if (!rspec) { | 432 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) |
449 | /* for backward compatible with device trees */ | ||
450 | for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") | ||
451 | rspec++; | 433 | rspec++; |
452 | } | ||
453 | 434 | ||
454 | ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec, | 435 | ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec, |
455 | GFP_KERNEL); | 436 | GFP_KERNEL); |
@@ -460,18 +441,9 @@ static int caam_probe(struct platform_device *pdev) | |||
460 | 441 | ||
461 | ring = 0; | 442 | ring = 0; |
462 | ctrlpriv->total_jobrs = 0; | 443 | ctrlpriv->total_jobrs = 0; |
463 | for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { | 444 | for_each_available_child_of_node(nprop, np) |
464 | ctrlpriv->jrpdev[ring] = | 445 | if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || |
465 | of_platform_device_create(np, NULL, dev); | 446 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { |
466 | if (!ctrlpriv->jrpdev[ring]) { | ||
467 | pr_warn("JR%d Platform device creation error\n", ring); | ||
468 | continue; | ||
469 | } | ||
470 | ctrlpriv->total_jobrs++; | ||
471 | ring++; | ||
472 | } | ||
473 | if (!ring) { | ||
474 | for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") { | ||
475 | ctrlpriv->jrpdev[ring] = | 447 | ctrlpriv->jrpdev[ring] = |
476 | of_platform_device_create(np, NULL, dev); | 448 | of_platform_device_create(np, NULL, dev); |
477 | if (!ctrlpriv->jrpdev[ring]) { | 449 | if (!ctrlpriv->jrpdev[ring]) { |
@@ -482,7 +454,6 @@ static int caam_probe(struct platform_device *pdev) | |||
482 | ctrlpriv->total_jobrs++; | 454 | ctrlpriv->total_jobrs++; |
483 | ring++; | 455 | ring++; |
484 | } | 456 | } |
485 | } | ||
486 | 457 | ||
487 | /* Check to see if QI present. If so, enable */ | 458 | /* Check to see if QI present. If so, enable */ |
488 | ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) & | 459 | ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) & |
@@ -564,7 +535,7 @@ static int caam_probe(struct platform_device *pdev) | |||
564 | 535 | ||
565 | /* Report "alive" for developer to see */ | 536 | /* Report "alive" for developer to see */ |
566 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, | 537 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, |
567 | caam_get_era(caam_id)); | 538 | caam_get_era()); |
568 | dev_info(dev, "job rings = %d, qi = %d\n", | 539 | dev_info(dev, "job rings = %d, qi = %d\n", |
569 | ctrlpriv->total_jobrs, ctrlpriv->qi_present); | 540 | ctrlpriv->total_jobrs, ctrlpriv->qi_present); |
570 | 541 | ||
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h index 980d44eaaf40..cac5402a46eb 100644 --- a/drivers/crypto/caam/ctrl.h +++ b/drivers/crypto/caam/ctrl.h | |||
@@ -8,6 +8,6 @@ | |||
8 | #define CTRL_H | 8 | #define CTRL_H |
9 | 9 | ||
10 | /* Prototypes for backend-level services exposed to APIs */ | 10 | /* Prototypes for backend-level services exposed to APIs */ |
11 | int caam_get_era(u64 caam_id); | 11 | int caam_get_era(void); |
12 | 12 | ||
13 | #endif /* CTRL_H */ | 13 | #endif /* CTRL_H */ |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index cd5f678847ce..7eec20bb3849 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -155,21 +155,29 @@ static inline void append_cmd_data(u32 *desc, void *data, int len, | |||
155 | append_data(desc, data, len); | 155 | append_data(desc, data, len); |
156 | } | 156 | } |
157 | 157 | ||
158 | static inline u32 *append_jump(u32 *desc, u32 options) | 158 | #define APPEND_CMD_RET(cmd, op) \ |
159 | { | 159 | static inline u32 *append_##cmd(u32 *desc, u32 options) \ |
160 | u32 *cmd = desc_end(desc); | 160 | { \ |
161 | 161 | u32 *cmd = desc_end(desc); \ | |
162 | PRINT_POS; | 162 | PRINT_POS; \ |
163 | append_cmd(desc, CMD_JUMP | options); | 163 | append_cmd(desc, CMD_##op | options); \ |
164 | 164 | return cmd; \ | |
165 | return cmd; | ||
166 | } | 165 | } |
166 | APPEND_CMD_RET(jump, JUMP) | ||
167 | APPEND_CMD_RET(move, MOVE) | ||
167 | 168 | ||
168 | static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd) | 169 | static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd) |
169 | { | 170 | { |
170 | *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc)); | 171 | *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc)); |
171 | } | 172 | } |
172 | 173 | ||
174 | static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd) | ||
175 | { | ||
176 | *move_cmd &= ~MOVE_OFFSET_MASK; | ||
177 | *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & | ||
178 | MOVE_OFFSET_MASK); | ||
179 | } | ||
180 | |||
173 | #define APPEND_CMD(cmd, op) \ | 181 | #define APPEND_CMD(cmd, op) \ |
174 | static inline void append_##cmd(u32 *desc, u32 options) \ | 182 | static inline void append_##cmd(u32 *desc, u32 options) \ |
175 | { \ | 183 | { \ |
@@ -177,7 +185,6 @@ static inline void append_##cmd(u32 *desc, u32 options) \ | |||
177 | append_cmd(desc, CMD_##op | options); \ | 185 | append_cmd(desc, CMD_##op | options); \ |
178 | } | 186 | } |
179 | APPEND_CMD(operation, OPERATION) | 187 | APPEND_CMD(operation, OPERATION) |
180 | APPEND_CMD(move, MOVE) | ||
181 | 188 | ||
182 | #define APPEND_CMD_LEN(cmd, op) \ | 189 | #define APPEND_CMD_LEN(cmd, op) \ |
183 | static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \ | 190 | static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \ |
@@ -328,7 +335,7 @@ append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ | |||
328 | do { \ | 335 | do { \ |
329 | APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \ | 336 | APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \ |
330 | append_cmd(desc, data); \ | 337 | append_cmd(desc, data); \ |
331 | } while (0); | 338 | } while (0) |
332 | 339 | ||
333 | #define append_math_add_imm_u32(desc, dest, src0, src1, data) \ | 340 | #define append_math_add_imm_u32(desc, dest, src0, src1, data) \ |
334 | APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data) | 341 | APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data) |
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index d50174f45b21..cbde8b95a6f8 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -74,10 +74,10 @@ | |||
74 | #endif | 74 | #endif |
75 | #else | 75 | #else |
76 | #ifdef __LITTLE_ENDIAN | 76 | #ifdef __LITTLE_ENDIAN |
77 | #define wr_reg32(reg, data) __raw_writel(reg, data) | 77 | #define wr_reg32(reg, data) __raw_writel(data, reg) |
78 | #define rd_reg32(reg) __raw_readl(reg) | 78 | #define rd_reg32(reg) __raw_readl(reg) |
79 | #ifdef CONFIG_64BIT | 79 | #ifdef CONFIG_64BIT |
80 | #define wr_reg64(reg, data) __raw_writeq(reg, data) | 80 | #define wr_reg64(reg, data) __raw_writeq(data, reg) |
81 | #define rd_reg64(reg) __raw_readq(reg) | 81 | #define rd_reg64(reg) __raw_readq(reg) |
82 | #endif | 82 | #endif |
83 | #endif | 83 | #endif |
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 2636f044789d..20dc848481e7 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/moduleparam.h> | ||
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/list.h> | 16 | #include <linux/list.h> |
16 | #include <linux/ccp.h> | 17 | #include <linux/ccp.h> |
@@ -24,28 +25,33 @@ MODULE_LICENSE("GPL"); | |||
24 | MODULE_VERSION("1.0.0"); | 25 | MODULE_VERSION("1.0.0"); |
25 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support"); | 26 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support"); |
26 | 27 | ||
28 | static unsigned int aes_disable; | ||
29 | module_param(aes_disable, uint, 0444); | ||
30 | MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value"); | ||
31 | |||
32 | static unsigned int sha_disable; | ||
33 | module_param(sha_disable, uint, 0444); | ||
34 | MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); | ||
35 | |||
27 | 36 | ||
28 | /* List heads for the supported algorithms */ | 37 | /* List heads for the supported algorithms */ |
29 | static LIST_HEAD(hash_algs); | 38 | static LIST_HEAD(hash_algs); |
30 | static LIST_HEAD(cipher_algs); | 39 | static LIST_HEAD(cipher_algs); |
31 | 40 | ||
32 | /* For any tfm, requests for that tfm on the same CPU must be returned | 41 | /* For any tfm, requests for that tfm must be returned on the order |
33 | * in the order received. With multiple queues available, the CCP can | 42 | * received. With multiple queues available, the CCP can process more |
34 | * process more than one cmd at a time. Therefore we must maintain | 43 | * than one cmd at a time. Therefore we must maintain a cmd list to insure |
35 | * a cmd list to insure the proper ordering of requests on a given tfm/cpu | 44 | * the proper ordering of requests on a given tfm. |
36 | * combination. | ||
37 | */ | 45 | */ |
38 | struct ccp_crypto_cpu_queue { | 46 | struct ccp_crypto_queue { |
39 | struct list_head cmds; | 47 | struct list_head cmds; |
40 | struct list_head *backlog; | 48 | struct list_head *backlog; |
41 | unsigned int cmd_count; | 49 | unsigned int cmd_count; |
42 | }; | 50 | }; |
43 | #define CCP_CRYPTO_MAX_QLEN 50 | 51 | #define CCP_CRYPTO_MAX_QLEN 100 |
44 | 52 | ||
45 | struct ccp_crypto_percpu_queue { | 53 | static struct ccp_crypto_queue req_queue; |
46 | struct ccp_crypto_cpu_queue __percpu *cpu_queue; | 54 | static spinlock_t req_queue_lock; |
47 | }; | ||
48 | static struct ccp_crypto_percpu_queue req_queue; | ||
49 | 55 | ||
50 | struct ccp_crypto_cmd { | 56 | struct ccp_crypto_cmd { |
51 | struct list_head entry; | 57 | struct list_head entry; |
@@ -62,8 +68,6 @@ struct ccp_crypto_cmd { | |||
62 | 68 | ||
63 | /* Used for held command processing to determine state */ | 69 | /* Used for held command processing to determine state */ |
64 | int ret; | 70 | int ret; |
65 | |||
66 | int cpu; | ||
67 | }; | 71 | }; |
68 | 72 | ||
69 | struct ccp_crypto_cpu { | 73 | struct ccp_crypto_cpu { |
@@ -82,25 +86,21 @@ static inline bool ccp_crypto_success(int err) | |||
82 | return true; | 86 | return true; |
83 | } | 87 | } |
84 | 88 | ||
85 | /* | ||
86 | * ccp_crypto_cmd_complete must be called while running on the appropriate | ||
87 | * cpu and the caller must have done a get_cpu to disable preemption | ||
88 | */ | ||
89 | static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( | 89 | static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( |
90 | struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) | 90 | struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) |
91 | { | 91 | { |
92 | struct ccp_crypto_cpu_queue *cpu_queue; | ||
93 | struct ccp_crypto_cmd *held = NULL, *tmp; | 92 | struct ccp_crypto_cmd *held = NULL, *tmp; |
93 | unsigned long flags; | ||
94 | 94 | ||
95 | *backlog = NULL; | 95 | *backlog = NULL; |
96 | 96 | ||
97 | cpu_queue = this_cpu_ptr(req_queue.cpu_queue); | 97 | spin_lock_irqsave(&req_queue_lock, flags); |
98 | 98 | ||
99 | /* Held cmds will be after the current cmd in the queue so start | 99 | /* Held cmds will be after the current cmd in the queue so start |
100 | * searching for a cmd with a matching tfm for submission. | 100 | * searching for a cmd with a matching tfm for submission. |
101 | */ | 101 | */ |
102 | tmp = crypto_cmd; | 102 | tmp = crypto_cmd; |
103 | list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) { | 103 | list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { |
104 | if (crypto_cmd->tfm != tmp->tfm) | 104 | if (crypto_cmd->tfm != tmp->tfm) |
105 | continue; | 105 | continue; |
106 | held = tmp; | 106 | held = tmp; |
@@ -111,47 +111,45 @@ static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( | |||
111 | * Because cmds can be executed from any point in the cmd list | 111 | * Because cmds can be executed from any point in the cmd list |
112 | * special precautions have to be taken when handling the backlog. | 112 | * special precautions have to be taken when handling the backlog. |
113 | */ | 113 | */ |
114 | if (cpu_queue->backlog != &cpu_queue->cmds) { | 114 | if (req_queue.backlog != &req_queue.cmds) { |
115 | /* Skip over this cmd if it is the next backlog cmd */ | 115 | /* Skip over this cmd if it is the next backlog cmd */ |
116 | if (cpu_queue->backlog == &crypto_cmd->entry) | 116 | if (req_queue.backlog == &crypto_cmd->entry) |
117 | cpu_queue->backlog = crypto_cmd->entry.next; | 117 | req_queue.backlog = crypto_cmd->entry.next; |
118 | 118 | ||
119 | *backlog = container_of(cpu_queue->backlog, | 119 | *backlog = container_of(req_queue.backlog, |
120 | struct ccp_crypto_cmd, entry); | 120 | struct ccp_crypto_cmd, entry); |
121 | cpu_queue->backlog = cpu_queue->backlog->next; | 121 | req_queue.backlog = req_queue.backlog->next; |
122 | 122 | ||
123 | /* Skip over this cmd if it is now the next backlog cmd */ | 123 | /* Skip over this cmd if it is now the next backlog cmd */ |
124 | if (cpu_queue->backlog == &crypto_cmd->entry) | 124 | if (req_queue.backlog == &crypto_cmd->entry) |
125 | cpu_queue->backlog = crypto_cmd->entry.next; | 125 | req_queue.backlog = crypto_cmd->entry.next; |
126 | } | 126 | } |
127 | 127 | ||
128 | /* Remove the cmd entry from the list of cmds */ | 128 | /* Remove the cmd entry from the list of cmds */ |
129 | cpu_queue->cmd_count--; | 129 | req_queue.cmd_count--; |
130 | list_del(&crypto_cmd->entry); | 130 | list_del(&crypto_cmd->entry); |
131 | 131 | ||
132 | spin_unlock_irqrestore(&req_queue_lock, flags); | ||
133 | |||
132 | return held; | 134 | return held; |
133 | } | 135 | } |
134 | 136 | ||
135 | static void ccp_crypto_complete_on_cpu(struct work_struct *work) | 137 | static void ccp_crypto_complete(void *data, int err) |
136 | { | 138 | { |
137 | struct ccp_crypto_cpu *cpu_work = | 139 | struct ccp_crypto_cmd *crypto_cmd = data; |
138 | container_of(work, struct ccp_crypto_cpu, work); | ||
139 | struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd; | ||
140 | struct ccp_crypto_cmd *held, *next, *backlog; | 140 | struct ccp_crypto_cmd *held, *next, *backlog; |
141 | struct crypto_async_request *req = crypto_cmd->req; | 141 | struct crypto_async_request *req = crypto_cmd->req; |
142 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); | 142 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); |
143 | int cpu, ret; | 143 | int ret; |
144 | |||
145 | cpu = get_cpu(); | ||
146 | 144 | ||
147 | if (cpu_work->err == -EINPROGRESS) { | 145 | if (err == -EINPROGRESS) { |
148 | /* Only propogate the -EINPROGRESS if necessary */ | 146 | /* Only propogate the -EINPROGRESS if necessary */ |
149 | if (crypto_cmd->ret == -EBUSY) { | 147 | if (crypto_cmd->ret == -EBUSY) { |
150 | crypto_cmd->ret = -EINPROGRESS; | 148 | crypto_cmd->ret = -EINPROGRESS; |
151 | req->complete(req, -EINPROGRESS); | 149 | req->complete(req, -EINPROGRESS); |
152 | } | 150 | } |
153 | 151 | ||
154 | goto e_cpu; | 152 | return; |
155 | } | 153 | } |
156 | 154 | ||
157 | /* Operation has completed - update the queue before invoking | 155 | /* Operation has completed - update the queue before invoking |
@@ -169,18 +167,25 @@ static void ccp_crypto_complete_on_cpu(struct work_struct *work) | |||
169 | req->complete(req, -EINPROGRESS); | 167 | req->complete(req, -EINPROGRESS); |
170 | 168 | ||
171 | /* Completion callbacks */ | 169 | /* Completion callbacks */ |
172 | ret = cpu_work->err; | 170 | ret = err; |
173 | if (ctx->complete) | 171 | if (ctx->complete) |
174 | ret = ctx->complete(req, ret); | 172 | ret = ctx->complete(req, ret); |
175 | req->complete(req, ret); | 173 | req->complete(req, ret); |
176 | 174 | ||
177 | /* Submit the next cmd */ | 175 | /* Submit the next cmd */ |
178 | while (held) { | 176 | while (held) { |
177 | /* Since we have already queued the cmd, we must indicate that | ||
178 | * we can backlog so as not to "lose" this request. | ||
179 | */ | ||
180 | held->cmd->flags |= CCP_CMD_MAY_BACKLOG; | ||
179 | ret = ccp_enqueue_cmd(held->cmd); | 181 | ret = ccp_enqueue_cmd(held->cmd); |
180 | if (ccp_crypto_success(ret)) | 182 | if (ccp_crypto_success(ret)) |
181 | break; | 183 | break; |
182 | 184 | ||
183 | /* Error occurred, report it and get the next entry */ | 185 | /* Error occurred, report it and get the next entry */ |
186 | ctx = crypto_tfm_ctx(held->req->tfm); | ||
187 | if (ctx->complete) | ||
188 | ret = ctx->complete(held->req, ret); | ||
184 | held->req->complete(held->req, ret); | 189 | held->req->complete(held->req, ret); |
185 | 190 | ||
186 | next = ccp_crypto_cmd_complete(held, &backlog); | 191 | next = ccp_crypto_cmd_complete(held, &backlog); |
@@ -194,52 +199,29 @@ static void ccp_crypto_complete_on_cpu(struct work_struct *work) | |||
194 | } | 199 | } |
195 | 200 | ||
196 | kfree(crypto_cmd); | 201 | kfree(crypto_cmd); |
197 | |||
198 | e_cpu: | ||
199 | put_cpu(); | ||
200 | |||
201 | complete(&cpu_work->completion); | ||
202 | } | ||
203 | |||
204 | static void ccp_crypto_complete(void *data, int err) | ||
205 | { | ||
206 | struct ccp_crypto_cmd *crypto_cmd = data; | ||
207 | struct ccp_crypto_cpu cpu_work; | ||
208 | |||
209 | INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu); | ||
210 | init_completion(&cpu_work.completion); | ||
211 | cpu_work.crypto_cmd = crypto_cmd; | ||
212 | cpu_work.err = err; | ||
213 | |||
214 | schedule_work_on(crypto_cmd->cpu, &cpu_work.work); | ||
215 | |||
216 | /* Keep the completion call synchronous */ | ||
217 | wait_for_completion(&cpu_work.completion); | ||
218 | } | 202 | } |
219 | 203 | ||
220 | static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) | 204 | static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) |
221 | { | 205 | { |
222 | struct ccp_crypto_cpu_queue *cpu_queue; | ||
223 | struct ccp_crypto_cmd *active = NULL, *tmp; | 206 | struct ccp_crypto_cmd *active = NULL, *tmp; |
224 | int cpu, ret; | 207 | unsigned long flags; |
225 | 208 | bool free_cmd = true; | |
226 | cpu = get_cpu(); | 209 | int ret; |
227 | crypto_cmd->cpu = cpu; | ||
228 | 210 | ||
229 | cpu_queue = this_cpu_ptr(req_queue.cpu_queue); | 211 | spin_lock_irqsave(&req_queue_lock, flags); |
230 | 212 | ||
231 | /* Check if the cmd can/should be queued */ | 213 | /* Check if the cmd can/should be queued */ |
232 | if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) { | 214 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
233 | ret = -EBUSY; | 215 | ret = -EBUSY; |
234 | if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) | 216 | if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) |
235 | goto e_cpu; | 217 | goto e_lock; |
236 | } | 218 | } |
237 | 219 | ||
238 | /* Look for an entry with the same tfm. If there is a cmd | 220 | /* Look for an entry with the same tfm. If there is a cmd |
239 | * with the same tfm in the list for this cpu then the current | 221 | * with the same tfm in the list then the current cmd cannot |
240 | * cmd cannot be submitted to the CCP yet. | 222 | * be submitted to the CCP yet. |
241 | */ | 223 | */ |
242 | list_for_each_entry(tmp, &cpu_queue->cmds, entry) { | 224 | list_for_each_entry(tmp, &req_queue.cmds, entry) { |
243 | if (crypto_cmd->tfm != tmp->tfm) | 225 | if (crypto_cmd->tfm != tmp->tfm) |
244 | continue; | 226 | continue; |
245 | active = tmp; | 227 | active = tmp; |
@@ -250,21 +232,29 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) | |||
250 | if (!active) { | 232 | if (!active) { |
251 | ret = ccp_enqueue_cmd(crypto_cmd->cmd); | 233 | ret = ccp_enqueue_cmd(crypto_cmd->cmd); |
252 | if (!ccp_crypto_success(ret)) | 234 | if (!ccp_crypto_success(ret)) |
253 | goto e_cpu; | 235 | goto e_lock; /* Error, don't queue it */ |
236 | if ((ret == -EBUSY) && | ||
237 | !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) | ||
238 | goto e_lock; /* Not backlogging, don't queue it */ | ||
254 | } | 239 | } |
255 | 240 | ||
256 | if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) { | 241 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
257 | ret = -EBUSY; | 242 | ret = -EBUSY; |
258 | if (cpu_queue->backlog == &cpu_queue->cmds) | 243 | if (req_queue.backlog == &req_queue.cmds) |
259 | cpu_queue->backlog = &crypto_cmd->entry; | 244 | req_queue.backlog = &crypto_cmd->entry; |
260 | } | 245 | } |
261 | crypto_cmd->ret = ret; | 246 | crypto_cmd->ret = ret; |
262 | 247 | ||
263 | cpu_queue->cmd_count++; | 248 | req_queue.cmd_count++; |
264 | list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds); | 249 | list_add_tail(&crypto_cmd->entry, &req_queue.cmds); |
250 | |||
251 | free_cmd = false; | ||
265 | 252 | ||
266 | e_cpu: | 253 | e_lock: |
267 | put_cpu(); | 254 | spin_unlock_irqrestore(&req_queue_lock, flags); |
255 | |||
256 | if (free_cmd) | ||
257 | kfree(crypto_cmd); | ||
268 | 258 | ||
269 | return ret; | 259 | return ret; |
270 | } | 260 | } |
@@ -281,7 +271,6 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req, | |||
281 | { | 271 | { |
282 | struct ccp_crypto_cmd *crypto_cmd; | 272 | struct ccp_crypto_cmd *crypto_cmd; |
283 | gfp_t gfp; | 273 | gfp_t gfp; |
284 | int ret; | ||
285 | 274 | ||
286 | gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; | 275 | gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; |
287 | 276 | ||
@@ -306,11 +295,7 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req, | |||
306 | else | 295 | else |
307 | cmd->flags &= ~CCP_CMD_MAY_BACKLOG; | 296 | cmd->flags &= ~CCP_CMD_MAY_BACKLOG; |
308 | 297 | ||
309 | ret = ccp_crypto_enqueue_cmd(crypto_cmd); | 298 | return ccp_crypto_enqueue_cmd(crypto_cmd); |
310 | if (!ccp_crypto_success(ret)) | ||
311 | kfree(crypto_cmd); | ||
312 | |||
313 | return ret; | ||
314 | } | 299 | } |
315 | 300 | ||
316 | struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, | 301 | struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, |
@@ -337,21 +322,25 @@ static int ccp_register_algs(void) | |||
337 | { | 322 | { |
338 | int ret; | 323 | int ret; |
339 | 324 | ||
340 | ret = ccp_register_aes_algs(&cipher_algs); | 325 | if (!aes_disable) { |
341 | if (ret) | 326 | ret = ccp_register_aes_algs(&cipher_algs); |
342 | return ret; | 327 | if (ret) |
328 | return ret; | ||
343 | 329 | ||
344 | ret = ccp_register_aes_cmac_algs(&hash_algs); | 330 | ret = ccp_register_aes_cmac_algs(&hash_algs); |
345 | if (ret) | 331 | if (ret) |
346 | return ret; | 332 | return ret; |
347 | 333 | ||
348 | ret = ccp_register_aes_xts_algs(&cipher_algs); | 334 | ret = ccp_register_aes_xts_algs(&cipher_algs); |
349 | if (ret) | 335 | if (ret) |
350 | return ret; | 336 | return ret; |
337 | } | ||
351 | 338 | ||
352 | ret = ccp_register_sha_algs(&hash_algs); | 339 | if (!sha_disable) { |
353 | if (ret) | 340 | ret = ccp_register_sha_algs(&hash_algs); |
354 | return ret; | 341 | if (ret) |
342 | return ret; | ||
343 | } | ||
355 | 344 | ||
356 | return 0; | 345 | return 0; |
357 | } | 346 | } |
@@ -374,50 +363,18 @@ static void ccp_unregister_algs(void) | |||
374 | } | 363 | } |
375 | } | 364 | } |
376 | 365 | ||
377 | static int ccp_init_queues(void) | ||
378 | { | ||
379 | struct ccp_crypto_cpu_queue *cpu_queue; | ||
380 | int cpu; | ||
381 | |||
382 | req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue); | ||
383 | if (!req_queue.cpu_queue) | ||
384 | return -ENOMEM; | ||
385 | |||
386 | for_each_possible_cpu(cpu) { | ||
387 | cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu); | ||
388 | INIT_LIST_HEAD(&cpu_queue->cmds); | ||
389 | cpu_queue->backlog = &cpu_queue->cmds; | ||
390 | cpu_queue->cmd_count = 0; | ||
391 | } | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | static void ccp_fini_queue(void) | ||
397 | { | ||
398 | struct ccp_crypto_cpu_queue *cpu_queue; | ||
399 | int cpu; | ||
400 | |||
401 | for_each_possible_cpu(cpu) { | ||
402 | cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu); | ||
403 | BUG_ON(!list_empty(&cpu_queue->cmds)); | ||
404 | } | ||
405 | free_percpu(req_queue.cpu_queue); | ||
406 | } | ||
407 | |||
408 | static int ccp_crypto_init(void) | 366 | static int ccp_crypto_init(void) |
409 | { | 367 | { |
410 | int ret; | 368 | int ret; |
411 | 369 | ||
412 | ret = ccp_init_queues(); | 370 | spin_lock_init(&req_queue_lock); |
413 | if (ret) | 371 | INIT_LIST_HEAD(&req_queue.cmds); |
414 | return ret; | 372 | req_queue.backlog = &req_queue.cmds; |
373 | req_queue.cmd_count = 0; | ||
415 | 374 | ||
416 | ret = ccp_register_algs(); | 375 | ret = ccp_register_algs(); |
417 | if (ret) { | 376 | if (ret) |
418 | ccp_unregister_algs(); | 377 | ccp_unregister_algs(); |
419 | ccp_fini_queue(); | ||
420 | } | ||
421 | 378 | ||
422 | return ret; | 379 | return ret; |
423 | } | 380 | } |
@@ -425,7 +382,6 @@ static int ccp_crypto_init(void) | |||
425 | static void ccp_crypto_exit(void) | 382 | static void ccp_crypto_exit(void) |
426 | { | 383 | { |
427 | ccp_unregister_algs(); | 384 | ccp_unregister_algs(); |
428 | ccp_fini_queue(); | ||
429 | } | 385 | } |
430 | 386 | ||
431 | module_init(ccp_crypto_init); | 387 | module_init(ccp_crypto_init); |
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 3867290b3531..873f23425245 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c | |||
@@ -24,75 +24,10 @@ | |||
24 | #include "ccp-crypto.h" | 24 | #include "ccp-crypto.h" |
25 | 25 | ||
26 | 26 | ||
27 | struct ccp_sha_result { | ||
28 | struct completion completion; | ||
29 | int err; | ||
30 | }; | ||
31 | |||
32 | static void ccp_sync_hash_complete(struct crypto_async_request *req, int err) | ||
33 | { | ||
34 | struct ccp_sha_result *result = req->data; | ||
35 | |||
36 | if (err == -EINPROGRESS) | ||
37 | return; | ||
38 | |||
39 | result->err = err; | ||
40 | complete(&result->completion); | ||
41 | } | ||
42 | |||
43 | static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf, | ||
44 | struct scatterlist *sg, unsigned int len) | ||
45 | { | ||
46 | struct ccp_sha_result result; | ||
47 | struct ahash_request *req; | ||
48 | int ret; | ||
49 | |||
50 | init_completion(&result.completion); | ||
51 | |||
52 | req = ahash_request_alloc(tfm, GFP_KERNEL); | ||
53 | if (!req) | ||
54 | return -ENOMEM; | ||
55 | |||
56 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
57 | ccp_sync_hash_complete, &result); | ||
58 | ahash_request_set_crypt(req, sg, buf, len); | ||
59 | |||
60 | ret = crypto_ahash_digest(req); | ||
61 | if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { | ||
62 | ret = wait_for_completion_interruptible(&result.completion); | ||
63 | if (!ret) | ||
64 | ret = result.err; | ||
65 | } | ||
66 | |||
67 | ahash_request_free(req); | ||
68 | |||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | static int ccp_sha_finish_hmac(struct crypto_async_request *async_req) | ||
73 | { | ||
74 | struct ahash_request *req = ahash_request_cast(async_req); | ||
75 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
76 | struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); | ||
77 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); | ||
78 | struct scatterlist sg[2]; | ||
79 | unsigned int block_size = | ||
80 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
81 | unsigned int digest_size = crypto_ahash_digestsize(tfm); | ||
82 | |||
83 | sg_init_table(sg, ARRAY_SIZE(sg)); | ||
84 | sg_set_buf(&sg[0], ctx->u.sha.opad, block_size); | ||
85 | sg_set_buf(&sg[1], rctx->ctx, digest_size); | ||
86 | |||
87 | return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg, | ||
88 | block_size + digest_size); | ||
89 | } | ||
90 | |||
91 | static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) | 27 | static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) |
92 | { | 28 | { |
93 | struct ahash_request *req = ahash_request_cast(async_req); | 29 | struct ahash_request *req = ahash_request_cast(async_req); |
94 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 30 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
95 | struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); | ||
96 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); | 31 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); |
97 | unsigned int digest_size = crypto_ahash_digestsize(tfm); | 32 | unsigned int digest_size = crypto_ahash_digestsize(tfm); |
98 | 33 | ||
@@ -112,10 +47,6 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) | |||
112 | if (req->result) | 47 | if (req->result) |
113 | memcpy(req->result, rctx->ctx, digest_size); | 48 | memcpy(req->result, rctx->ctx, digest_size); |
114 | 49 | ||
115 | /* If we're doing an HMAC, we need to perform that on the final op */ | ||
116 | if (rctx->final && ctx->u.sha.key_len) | ||
117 | ret = ccp_sha_finish_hmac(async_req); | ||
118 | |||
119 | e_free: | 50 | e_free: |
120 | sg_free_table(&rctx->data_sg); | 51 | sg_free_table(&rctx->data_sg); |
121 | 52 | ||
@@ -126,6 +57,7 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, | |||
126 | unsigned int final) | 57 | unsigned int final) |
127 | { | 58 | { |
128 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 59 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
60 | struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); | ||
129 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); | 61 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); |
130 | struct scatterlist *sg; | 62 | struct scatterlist *sg; |
131 | unsigned int block_size = | 63 | unsigned int block_size = |
@@ -196,6 +128,11 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, | |||
196 | rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); | 128 | rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); |
197 | rctx->cmd.u.sha.src = sg; | 129 | rctx->cmd.u.sha.src = sg; |
198 | rctx->cmd.u.sha.src_len = rctx->hash_cnt; | 130 | rctx->cmd.u.sha.src_len = rctx->hash_cnt; |
131 | rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? | ||
132 | &ctx->u.sha.opad_sg : NULL; | ||
133 | rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ? | ||
134 | ctx->u.sha.opad_count : 0; | ||
135 | rctx->cmd.u.sha.first = rctx->first; | ||
199 | rctx->cmd.u.sha.final = rctx->final; | 136 | rctx->cmd.u.sha.final = rctx->final; |
200 | rctx->cmd.u.sha.msg_bits = rctx->msg_bits; | 137 | rctx->cmd.u.sha.msg_bits = rctx->msg_bits; |
201 | 138 | ||
@@ -218,7 +155,6 @@ static int ccp_sha_init(struct ahash_request *req) | |||
218 | 155 | ||
219 | memset(rctx, 0, sizeof(*rctx)); | 156 | memset(rctx, 0, sizeof(*rctx)); |
220 | 157 | ||
221 | memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx)); | ||
222 | rctx->type = alg->type; | 158 | rctx->type = alg->type; |
223 | rctx->first = 1; | 159 | rctx->first = 1; |
224 | 160 | ||
@@ -261,10 +197,13 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
261 | unsigned int key_len) | 197 | unsigned int key_len) |
262 | { | 198 | { |
263 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | 199 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); |
264 | struct scatterlist sg; | 200 | struct crypto_shash *shash = ctx->u.sha.hmac_tfm; |
265 | unsigned int block_size = | 201 | struct { |
266 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | 202 | struct shash_desc sdesc; |
267 | unsigned int digest_size = crypto_ahash_digestsize(tfm); | 203 | char ctx[crypto_shash_descsize(shash)]; |
204 | } desc; | ||
205 | unsigned int block_size = crypto_shash_blocksize(shash); | ||
206 | unsigned int digest_size = crypto_shash_digestsize(shash); | ||
268 | int i, ret; | 207 | int i, ret; |
269 | 208 | ||
270 | /* Set to zero until complete */ | 209 | /* Set to zero until complete */ |
@@ -277,8 +216,12 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
277 | 216 | ||
278 | if (key_len > block_size) { | 217 | if (key_len > block_size) { |
279 | /* Must hash the input key */ | 218 | /* Must hash the input key */ |
280 | sg_init_one(&sg, key, key_len); | 219 | desc.sdesc.tfm = shash; |
281 | ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len); | 220 | desc.sdesc.flags = crypto_ahash_get_flags(tfm) & |
221 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
222 | |||
223 | ret = crypto_shash_digest(&desc.sdesc, key, key_len, | ||
224 | ctx->u.sha.key); | ||
282 | if (ret) { | 225 | if (ret) { |
283 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 226 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
284 | return -EINVAL; | 227 | return -EINVAL; |
@@ -293,6 +236,9 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
293 | ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; | 236 | ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; |
294 | } | 237 | } |
295 | 238 | ||
239 | sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); | ||
240 | ctx->u.sha.opad_count = block_size; | ||
241 | |||
296 | ctx->u.sha.key_len = key_len; | 242 | ctx->u.sha.key_len = key_len; |
297 | 243 | ||
298 | return 0; | 244 | return 0; |
@@ -319,10 +265,9 @@ static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) | |||
319 | { | 265 | { |
320 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | 266 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); |
321 | struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); | 267 | struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); |
322 | struct crypto_ahash *hmac_tfm; | 268 | struct crypto_shash *hmac_tfm; |
323 | 269 | ||
324 | hmac_tfm = crypto_alloc_ahash(alg->child_alg, | 270 | hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); |
325 | CRYPTO_ALG_TYPE_AHASH, 0); | ||
326 | if (IS_ERR(hmac_tfm)) { | 271 | if (IS_ERR(hmac_tfm)) { |
327 | pr_warn("could not load driver %s need for HMAC support\n", | 272 | pr_warn("could not load driver %s need for HMAC support\n", |
328 | alg->child_alg); | 273 | alg->child_alg); |
@@ -339,35 +284,14 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) | |||
339 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | 284 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); |
340 | 285 | ||
341 | if (ctx->u.sha.hmac_tfm) | 286 | if (ctx->u.sha.hmac_tfm) |
342 | crypto_free_ahash(ctx->u.sha.hmac_tfm); | 287 | crypto_free_shash(ctx->u.sha.hmac_tfm); |
343 | 288 | ||
344 | ccp_sha_cra_exit(tfm); | 289 | ccp_sha_cra_exit(tfm); |
345 | } | 290 | } |
346 | 291 | ||
347 | static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | ||
348 | cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), | ||
349 | cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), | ||
350 | cpu_to_be32(SHA1_H4), 0, 0, 0, | ||
351 | }; | ||
352 | |||
353 | static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | ||
354 | cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), | ||
355 | cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), | ||
356 | cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), | ||
357 | cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), | ||
358 | }; | ||
359 | |||
360 | static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | ||
361 | cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), | ||
362 | cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), | ||
363 | cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), | ||
364 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), | ||
365 | }; | ||
366 | |||
367 | struct ccp_sha_def { | 292 | struct ccp_sha_def { |
368 | const char *name; | 293 | const char *name; |
369 | const char *drv_name; | 294 | const char *drv_name; |
370 | const __be32 *init; | ||
371 | enum ccp_sha_type type; | 295 | enum ccp_sha_type type; |
372 | u32 digest_size; | 296 | u32 digest_size; |
373 | u32 block_size; | 297 | u32 block_size; |
@@ -377,7 +301,6 @@ static struct ccp_sha_def sha_algs[] = { | |||
377 | { | 301 | { |
378 | .name = "sha1", | 302 | .name = "sha1", |
379 | .drv_name = "sha1-ccp", | 303 | .drv_name = "sha1-ccp", |
380 | .init = sha1_init, | ||
381 | .type = CCP_SHA_TYPE_1, | 304 | .type = CCP_SHA_TYPE_1, |
382 | .digest_size = SHA1_DIGEST_SIZE, | 305 | .digest_size = SHA1_DIGEST_SIZE, |
383 | .block_size = SHA1_BLOCK_SIZE, | 306 | .block_size = SHA1_BLOCK_SIZE, |
@@ -385,7 +308,6 @@ static struct ccp_sha_def sha_algs[] = { | |||
385 | { | 308 | { |
386 | .name = "sha224", | 309 | .name = "sha224", |
387 | .drv_name = "sha224-ccp", | 310 | .drv_name = "sha224-ccp", |
388 | .init = sha224_init, | ||
389 | .type = CCP_SHA_TYPE_224, | 311 | .type = CCP_SHA_TYPE_224, |
390 | .digest_size = SHA224_DIGEST_SIZE, | 312 | .digest_size = SHA224_DIGEST_SIZE, |
391 | .block_size = SHA224_BLOCK_SIZE, | 313 | .block_size = SHA224_BLOCK_SIZE, |
@@ -393,7 +315,6 @@ static struct ccp_sha_def sha_algs[] = { | |||
393 | { | 315 | { |
394 | .name = "sha256", | 316 | .name = "sha256", |
395 | .drv_name = "sha256-ccp", | 317 | .drv_name = "sha256-ccp", |
396 | .init = sha256_init, | ||
397 | .type = CCP_SHA_TYPE_256, | 318 | .type = CCP_SHA_TYPE_256, |
398 | .digest_size = SHA256_DIGEST_SIZE, | 319 | .digest_size = SHA256_DIGEST_SIZE, |
399 | .block_size = SHA256_BLOCK_SIZE, | 320 | .block_size = SHA256_BLOCK_SIZE, |
@@ -460,7 +381,6 @@ static int ccp_register_sha_alg(struct list_head *head, | |||
460 | 381 | ||
461 | INIT_LIST_HEAD(&ccp_alg->entry); | 382 | INIT_LIST_HEAD(&ccp_alg->entry); |
462 | 383 | ||
463 | ccp_alg->init = def->init; | ||
464 | ccp_alg->type = def->type; | 384 | ccp_alg->type = def->type; |
465 | 385 | ||
466 | alg = &ccp_alg->alg; | 386 | alg = &ccp_alg->alg; |
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index b222231b6169..9aa4ae184f7f 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h | |||
@@ -137,11 +137,14 @@ struct ccp_aes_cmac_req_ctx { | |||
137 | #define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE | 137 | #define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE |
138 | 138 | ||
139 | struct ccp_sha_ctx { | 139 | struct ccp_sha_ctx { |
140 | struct scatterlist opad_sg; | ||
141 | unsigned int opad_count; | ||
142 | |||
140 | unsigned int key_len; | 143 | unsigned int key_len; |
141 | u8 key[MAX_SHA_BLOCK_SIZE]; | 144 | u8 key[MAX_SHA_BLOCK_SIZE]; |
142 | u8 ipad[MAX_SHA_BLOCK_SIZE]; | 145 | u8 ipad[MAX_SHA_BLOCK_SIZE]; |
143 | u8 opad[MAX_SHA_BLOCK_SIZE]; | 146 | u8 opad[MAX_SHA_BLOCK_SIZE]; |
144 | struct crypto_ahash *hmac_tfm; | 147 | struct crypto_shash *hmac_tfm; |
145 | }; | 148 | }; |
146 | 149 | ||
147 | struct ccp_sha_req_ctx { | 150 | struct ccp_sha_req_ctx { |
@@ -167,9 +170,6 @@ struct ccp_sha_req_ctx { | |||
167 | unsigned int buf_count; | 170 | unsigned int buf_count; |
168 | u8 buf[MAX_SHA_BLOCK_SIZE]; | 171 | u8 buf[MAX_SHA_BLOCK_SIZE]; |
169 | 172 | ||
170 | /* HMAC support field */ | ||
171 | struct scatterlist pad_sg; | ||
172 | |||
173 | /* CCP driver command */ | 173 | /* CCP driver command */ |
174 | struct ccp_cmd cmd; | 174 | struct ccp_cmd cmd; |
175 | }; | 175 | }; |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index c3bc21264600..2c7816149b01 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
@@ -30,6 +30,11 @@ MODULE_LICENSE("GPL"); | |||
30 | MODULE_VERSION("1.0.0"); | 30 | MODULE_VERSION("1.0.0"); |
31 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); | 31 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); |
32 | 32 | ||
33 | struct ccp_tasklet_data { | ||
34 | struct completion completion; | ||
35 | struct ccp_cmd *cmd; | ||
36 | }; | ||
37 | |||
33 | 38 | ||
34 | static struct ccp_device *ccp_dev; | 39 | static struct ccp_device *ccp_dev; |
35 | static inline struct ccp_device *ccp_get_device(void) | 40 | static inline struct ccp_device *ccp_get_device(void) |
@@ -192,17 +197,23 @@ static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) | |||
192 | return cmd; | 197 | return cmd; |
193 | } | 198 | } |
194 | 199 | ||
195 | static void ccp_do_cmd_complete(struct work_struct *work) | 200 | static void ccp_do_cmd_complete(unsigned long data) |
196 | { | 201 | { |
197 | struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); | 202 | struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; |
203 | struct ccp_cmd *cmd = tdata->cmd; | ||
198 | 204 | ||
199 | cmd->callback(cmd->data, cmd->ret); | 205 | cmd->callback(cmd->data, cmd->ret); |
206 | complete(&tdata->completion); | ||
200 | } | 207 | } |
201 | 208 | ||
202 | static int ccp_cmd_queue_thread(void *data) | 209 | static int ccp_cmd_queue_thread(void *data) |
203 | { | 210 | { |
204 | struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; | 211 | struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; |
205 | struct ccp_cmd *cmd; | 212 | struct ccp_cmd *cmd; |
213 | struct ccp_tasklet_data tdata; | ||
214 | struct tasklet_struct tasklet; | ||
215 | |||
216 | tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata); | ||
206 | 217 | ||
207 | set_current_state(TASK_INTERRUPTIBLE); | 218 | set_current_state(TASK_INTERRUPTIBLE); |
208 | while (!kthread_should_stop()) { | 219 | while (!kthread_should_stop()) { |
@@ -220,8 +231,10 @@ static int ccp_cmd_queue_thread(void *data) | |||
220 | cmd->ret = ccp_run_cmd(cmd_q, cmd); | 231 | cmd->ret = ccp_run_cmd(cmd_q, cmd); |
221 | 232 | ||
222 | /* Schedule the completion callback */ | 233 | /* Schedule the completion callback */ |
223 | INIT_WORK(&cmd->work, ccp_do_cmd_complete); | 234 | tdata.cmd = cmd; |
224 | schedule_work(&cmd->work); | 235 | init_completion(&tdata.completion); |
236 | tasklet_schedule(&tasklet); | ||
237 | wait_for_completion(&tdata.completion); | ||
225 | } | 238 | } |
226 | 239 | ||
227 | __set_current_state(TASK_RUNNING); | 240 | __set_current_state(TASK_RUNNING); |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 71ed3ade7e12..9ae006d69df4 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/ccp.h> | 23 | #include <linux/ccp.h> |
24 | #include <linux/scatterlist.h> | 24 | #include <linux/scatterlist.h> |
25 | #include <crypto/scatterwalk.h> | 25 | #include <crypto/scatterwalk.h> |
26 | #include <crypto/sha.h> | ||
26 | 27 | ||
27 | #include "ccp-dev.h" | 28 | #include "ccp-dev.h" |
28 | 29 | ||
@@ -132,6 +133,27 @@ struct ccp_op { | |||
132 | } u; | 133 | } u; |
133 | }; | 134 | }; |
134 | 135 | ||
136 | /* SHA initial context values */ | ||
137 | static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | ||
138 | cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), | ||
139 | cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), | ||
140 | cpu_to_be32(SHA1_H4), 0, 0, 0, | ||
141 | }; | ||
142 | |||
143 | static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | ||
144 | cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), | ||
145 | cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), | ||
146 | cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), | ||
147 | cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), | ||
148 | }; | ||
149 | |||
150 | static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | ||
151 | cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), | ||
152 | cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), | ||
153 | cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), | ||
154 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), | ||
155 | }; | ||
156 | |||
135 | /* The CCP cannot perform zero-length sha operations so the caller | 157 | /* The CCP cannot perform zero-length sha operations so the caller |
136 | * is required to buffer data for the final operation. However, a | 158 | * is required to buffer data for the final operation. However, a |
137 | * sha operation for a message with a total length of zero is valid | 159 | * sha operation for a message with a total length of zero is valid |
@@ -1411,7 +1433,27 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1411 | if (ret) | 1433 | if (ret) |
1412 | return ret; | 1434 | return ret; |
1413 | 1435 | ||
1414 | ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); | 1436 | if (sha->first) { |
1437 | const __be32 *init; | ||
1438 | |||
1439 | switch (sha->type) { | ||
1440 | case CCP_SHA_TYPE_1: | ||
1441 | init = ccp_sha1_init; | ||
1442 | break; | ||
1443 | case CCP_SHA_TYPE_224: | ||
1444 | init = ccp_sha224_init; | ||
1445 | break; | ||
1446 | case CCP_SHA_TYPE_256: | ||
1447 | init = ccp_sha256_init; | ||
1448 | break; | ||
1449 | default: | ||
1450 | ret = -EINVAL; | ||
1451 | goto e_ctx; | ||
1452 | } | ||
1453 | memcpy(ctx.address, init, CCP_SHA_CTXSIZE); | ||
1454 | } else | ||
1455 | ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); | ||
1456 | |||
1415 | ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 1457 | ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, |
1416 | CCP_PASSTHRU_BYTESWAP_256BIT); | 1458 | CCP_PASSTHRU_BYTESWAP_256BIT); |
1417 | if (ret) { | 1459 | if (ret) { |
@@ -1451,6 +1493,66 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1451 | 1493 | ||
1452 | ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); | 1494 | ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); |
1453 | 1495 | ||
1496 | if (sha->final && sha->opad) { | ||
1497 | /* HMAC operation, recursively perform final SHA */ | ||
1498 | struct ccp_cmd hmac_cmd; | ||
1499 | struct scatterlist sg; | ||
1500 | u64 block_size, digest_size; | ||
1501 | u8 *hmac_buf; | ||
1502 | |||
1503 | switch (sha->type) { | ||
1504 | case CCP_SHA_TYPE_1: | ||
1505 | block_size = SHA1_BLOCK_SIZE; | ||
1506 | digest_size = SHA1_DIGEST_SIZE; | ||
1507 | break; | ||
1508 | case CCP_SHA_TYPE_224: | ||
1509 | block_size = SHA224_BLOCK_SIZE; | ||
1510 | digest_size = SHA224_DIGEST_SIZE; | ||
1511 | break; | ||
1512 | case CCP_SHA_TYPE_256: | ||
1513 | block_size = SHA256_BLOCK_SIZE; | ||
1514 | digest_size = SHA256_DIGEST_SIZE; | ||
1515 | break; | ||
1516 | default: | ||
1517 | ret = -EINVAL; | ||
1518 | goto e_data; | ||
1519 | } | ||
1520 | |||
1521 | if (sha->opad_len != block_size) { | ||
1522 | ret = -EINVAL; | ||
1523 | goto e_data; | ||
1524 | } | ||
1525 | |||
1526 | hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); | ||
1527 | if (!hmac_buf) { | ||
1528 | ret = -ENOMEM; | ||
1529 | goto e_data; | ||
1530 | } | ||
1531 | sg_init_one(&sg, hmac_buf, block_size + digest_size); | ||
1532 | |||
1533 | scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); | ||
1534 | memcpy(hmac_buf + block_size, ctx.address, digest_size); | ||
1535 | |||
1536 | memset(&hmac_cmd, 0, sizeof(hmac_cmd)); | ||
1537 | hmac_cmd.engine = CCP_ENGINE_SHA; | ||
1538 | hmac_cmd.u.sha.type = sha->type; | ||
1539 | hmac_cmd.u.sha.ctx = sha->ctx; | ||
1540 | hmac_cmd.u.sha.ctx_len = sha->ctx_len; | ||
1541 | hmac_cmd.u.sha.src = &sg; | ||
1542 | hmac_cmd.u.sha.src_len = block_size + digest_size; | ||
1543 | hmac_cmd.u.sha.opad = NULL; | ||
1544 | hmac_cmd.u.sha.opad_len = 0; | ||
1545 | hmac_cmd.u.sha.first = 1; | ||
1546 | hmac_cmd.u.sha.final = 1; | ||
1547 | hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; | ||
1548 | |||
1549 | ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); | ||
1550 | if (ret) | ||
1551 | cmd->engine_error = hmac_cmd.engine_error; | ||
1552 | |||
1553 | kfree(hmac_buf); | ||
1554 | } | ||
1555 | |||
1454 | e_data: | 1556 | e_data: |
1455 | ccp_free_data(&src, cmd_q); | 1557 | ccp_free_data(&src, cmd_q); |
1456 | 1558 | ||
@@ -1666,8 +1768,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, | |||
1666 | 1768 | ||
1667 | op.dst.type = CCP_MEMTYPE_SYSTEM; | 1769 | op.dst.type = CCP_MEMTYPE_SYSTEM; |
1668 | op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); | 1770 | op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); |
1669 | op.src.u.dma.offset = dst.sg_wa.sg_used; | 1771 | op.dst.u.dma.offset = dst.sg_wa.sg_used; |
1670 | op.src.u.dma.length = op.src.u.dma.length; | 1772 | op.dst.u.dma.length = op.src.u.dma.length; |
1671 | 1773 | ||
1672 | ret = ccp_perform_passthru(&op); | 1774 | ret = ccp_perform_passthru(&op); |
1673 | if (ret) { | 1775 | if (ret) { |
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index a6db7fa6f891..7bbe0ab21eca 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #define DCP_MAX_CHANS 4 | 29 | #define DCP_MAX_CHANS 4 |
30 | #define DCP_BUF_SZ PAGE_SIZE | 30 | #define DCP_BUF_SZ PAGE_SIZE |
31 | 31 | ||
32 | #define DCP_ALIGNMENT 64 | ||
33 | |||
32 | /* DCP DMA descriptor. */ | 34 | /* DCP DMA descriptor. */ |
33 | struct dcp_dma_desc { | 35 | struct dcp_dma_desc { |
34 | uint32_t next_cmd_addr; | 36 | uint32_t next_cmd_addr; |
@@ -48,7 +50,6 @@ struct dcp_coherent_block { | |||
48 | uint8_t sha_in_buf[DCP_BUF_SZ]; | 50 | uint8_t sha_in_buf[DCP_BUF_SZ]; |
49 | 51 | ||
50 | uint8_t aes_key[2 * AES_KEYSIZE_128]; | 52 | uint8_t aes_key[2 * AES_KEYSIZE_128]; |
51 | uint8_t sha_digest[SHA256_DIGEST_SIZE]; | ||
52 | 53 | ||
53 | struct dcp_dma_desc desc[DCP_MAX_CHANS]; | 54 | struct dcp_dma_desc desc[DCP_MAX_CHANS]; |
54 | }; | 55 | }; |
@@ -83,13 +84,16 @@ struct dcp_async_ctx { | |||
83 | unsigned int hot:1; | 84 | unsigned int hot:1; |
84 | 85 | ||
85 | /* Crypto-specific context */ | 86 | /* Crypto-specific context */ |
86 | unsigned int enc:1; | ||
87 | unsigned int ecb:1; | ||
88 | struct crypto_ablkcipher *fallback; | 87 | struct crypto_ablkcipher *fallback; |
89 | unsigned int key_len; | 88 | unsigned int key_len; |
90 | uint8_t key[AES_KEYSIZE_128]; | 89 | uint8_t key[AES_KEYSIZE_128]; |
91 | }; | 90 | }; |
92 | 91 | ||
92 | struct dcp_aes_req_ctx { | ||
93 | unsigned int enc:1; | ||
94 | unsigned int ecb:1; | ||
95 | }; | ||
96 | |||
93 | struct dcp_sha_req_ctx { | 97 | struct dcp_sha_req_ctx { |
94 | unsigned int init:1; | 98 | unsigned int init:1; |
95 | unsigned int fini:1; | 99 | unsigned int fini:1; |
@@ -190,10 +194,12 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) | |||
190 | /* | 194 | /* |
191 | * Encryption (AES128) | 195 | * Encryption (AES128) |
192 | */ | 196 | */ |
193 | static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init) | 197 | static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, |
198 | struct ablkcipher_request *req, int init) | ||
194 | { | 199 | { |
195 | struct dcp *sdcp = global_sdcp; | 200 | struct dcp *sdcp = global_sdcp; |
196 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; | 201 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; |
202 | struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); | ||
197 | int ret; | 203 | int ret; |
198 | 204 | ||
199 | dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, | 205 | dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, |
@@ -212,14 +218,14 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init) | |||
212 | /* Payload contains the key. */ | 218 | /* Payload contains the key. */ |
213 | desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; | 219 | desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; |
214 | 220 | ||
215 | if (actx->enc) | 221 | if (rctx->enc) |
216 | desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; | 222 | desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; |
217 | if (init) | 223 | if (init) |
218 | desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; | 224 | desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; |
219 | 225 | ||
220 | desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; | 226 | desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; |
221 | 227 | ||
222 | if (actx->ecb) | 228 | if (rctx->ecb) |
223 | desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; | 229 | desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; |
224 | else | 230 | else |
225 | desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; | 231 | desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; |
@@ -247,6 +253,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) | |||
247 | 253 | ||
248 | struct ablkcipher_request *req = ablkcipher_request_cast(arq); | 254 | struct ablkcipher_request *req = ablkcipher_request_cast(arq); |
249 | struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); | 255 | struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); |
256 | struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); | ||
250 | 257 | ||
251 | struct scatterlist *dst = req->dst; | 258 | struct scatterlist *dst = req->dst; |
252 | struct scatterlist *src = req->src; | 259 | struct scatterlist *src = req->src; |
@@ -271,7 +278,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) | |||
271 | /* Copy the key from the temporary location. */ | 278 | /* Copy the key from the temporary location. */ |
272 | memcpy(key, actx->key, actx->key_len); | 279 | memcpy(key, actx->key, actx->key_len); |
273 | 280 | ||
274 | if (!actx->ecb) { | 281 | if (!rctx->ecb) { |
275 | /* Copy the CBC IV just past the key. */ | 282 | /* Copy the CBC IV just past the key. */ |
276 | memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); | 283 | memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); |
277 | /* CBC needs the INIT set. */ | 284 | /* CBC needs the INIT set. */ |
@@ -300,7 +307,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) | |||
300 | * submit the buffer. | 307 | * submit the buffer. |
301 | */ | 308 | */ |
302 | if (actx->fill == out_off || sg_is_last(src)) { | 309 | if (actx->fill == out_off || sg_is_last(src)) { |
303 | ret = mxs_dcp_run_aes(actx, init); | 310 | ret = mxs_dcp_run_aes(actx, req, init); |
304 | if (ret) | 311 | if (ret) |
305 | return ret; | 312 | return ret; |
306 | init = 0; | 313 | init = 0; |
@@ -391,13 +398,14 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) | |||
391 | struct dcp *sdcp = global_sdcp; | 398 | struct dcp *sdcp = global_sdcp; |
392 | struct crypto_async_request *arq = &req->base; | 399 | struct crypto_async_request *arq = &req->base; |
393 | struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); | 400 | struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); |
401 | struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); | ||
394 | int ret; | 402 | int ret; |
395 | 403 | ||
396 | if (unlikely(actx->key_len != AES_KEYSIZE_128)) | 404 | if (unlikely(actx->key_len != AES_KEYSIZE_128)) |
397 | return mxs_dcp_block_fallback(req, enc); | 405 | return mxs_dcp_block_fallback(req, enc); |
398 | 406 | ||
399 | actx->enc = enc; | 407 | rctx->enc = enc; |
400 | actx->ecb = ecb; | 408 | rctx->ecb = ecb; |
401 | actx->chan = DCP_CHAN_CRYPTO; | 409 | actx->chan = DCP_CHAN_CRYPTO; |
402 | 410 | ||
403 | mutex_lock(&sdcp->mutex[actx->chan]); | 411 | mutex_lock(&sdcp->mutex[actx->chan]); |
@@ -484,7 +492,7 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) | |||
484 | return PTR_ERR(blk); | 492 | return PTR_ERR(blk); |
485 | 493 | ||
486 | actx->fallback = blk; | 494 | actx->fallback = blk; |
487 | tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_async_ctx); | 495 | tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx); |
488 | return 0; | 496 | return 0; |
489 | } | 497 | } |
490 | 498 | ||
@@ -507,13 +515,11 @@ static int mxs_dcp_run_sha(struct ahash_request *req) | |||
507 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 515 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
508 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); | 516 | struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); |
509 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); | 517 | struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); |
518 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); | ||
510 | 519 | ||
511 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; | 520 | struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; |
512 | dma_addr_t digest_phys = dma_map_single(sdcp->dev, | ||
513 | sdcp->coh->sha_digest, | ||
514 | SHA256_DIGEST_SIZE, | ||
515 | DMA_FROM_DEVICE); | ||
516 | 521 | ||
522 | dma_addr_t digest_phys = 0; | ||
517 | dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, | 523 | dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, |
518 | DCP_BUF_SZ, DMA_TO_DEVICE); | 524 | DCP_BUF_SZ, DMA_TO_DEVICE); |
519 | 525 | ||
@@ -534,14 +540,18 @@ static int mxs_dcp_run_sha(struct ahash_request *req) | |||
534 | 540 | ||
535 | /* Set HASH_TERM bit for last transfer block. */ | 541 | /* Set HASH_TERM bit for last transfer block. */ |
536 | if (rctx->fini) { | 542 | if (rctx->fini) { |
543 | digest_phys = dma_map_single(sdcp->dev, req->result, | ||
544 | halg->digestsize, DMA_FROM_DEVICE); | ||
537 | desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; | 545 | desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; |
538 | desc->payload = digest_phys; | 546 | desc->payload = digest_phys; |
539 | } | 547 | } |
540 | 548 | ||
541 | ret = mxs_dcp_start_dma(actx); | 549 | ret = mxs_dcp_start_dma(actx); |
542 | 550 | ||
543 | dma_unmap_single(sdcp->dev, digest_phys, SHA256_DIGEST_SIZE, | 551 | if (rctx->fini) |
544 | DMA_FROM_DEVICE); | 552 | dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize, |
553 | DMA_FROM_DEVICE); | ||
554 | |||
545 | dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); | 555 | dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); |
546 | 556 | ||
547 | return ret; | 557 | return ret; |
@@ -558,7 +568,6 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) | |||
558 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); | 568 | struct hash_alg_common *halg = crypto_hash_alg_common(tfm); |
559 | const int nents = sg_nents(req->src); | 569 | const int nents = sg_nents(req->src); |
560 | 570 | ||
561 | uint8_t *digest = sdcp->coh->sha_digest; | ||
562 | uint8_t *in_buf = sdcp->coh->sha_in_buf; | 571 | uint8_t *in_buf = sdcp->coh->sha_in_buf; |
563 | 572 | ||
564 | uint8_t *src_buf; | 573 | uint8_t *src_buf; |
@@ -605,14 +614,20 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) | |||
605 | rctx->fini = 1; | 614 | rctx->fini = 1; |
606 | 615 | ||
607 | /* Submit whatever is left. */ | 616 | /* Submit whatever is left. */ |
617 | if (!req->result) | ||
618 | return -EINVAL; | ||
619 | |||
608 | ret = mxs_dcp_run_sha(req); | 620 | ret = mxs_dcp_run_sha(req); |
609 | if (ret || !req->result) | 621 | if (ret) |
610 | return ret; | 622 | return ret; |
623 | |||
611 | actx->fill = 0; | 624 | actx->fill = 0; |
612 | 625 | ||
613 | /* For some reason, the result is flipped. */ | 626 | /* For some reason, the result is flipped. */ |
614 | for (i = 0; i < halg->digestsize; i++) | 627 | for (i = 0; i < halg->digestsize / 2; i++) { |
615 | req->result[i] = digest[halg->digestsize - i - 1]; | 628 | swap(req->result[i], |
629 | req->result[halg->digestsize - i - 1]); | ||
630 | } | ||
616 | } | 631 | } |
617 | 632 | ||
618 | return 0; | 633 | return 0; |
@@ -901,9 +916,14 @@ static int mxs_dcp_probe(struct platform_device *pdev) | |||
901 | 916 | ||
902 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 917 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
903 | dcp_vmi_irq = platform_get_irq(pdev, 0); | 918 | dcp_vmi_irq = platform_get_irq(pdev, 0); |
919 | if (dcp_vmi_irq < 0) { | ||
920 | ret = dcp_vmi_irq; | ||
921 | goto err_mutex; | ||
922 | } | ||
923 | |||
904 | dcp_irq = platform_get_irq(pdev, 1); | 924 | dcp_irq = platform_get_irq(pdev, 1); |
905 | if (dcp_vmi_irq < 0 || dcp_irq < 0) { | 925 | if (dcp_irq < 0) { |
906 | ret = -EINVAL; | 926 | ret = dcp_irq; |
907 | goto err_mutex; | 927 | goto err_mutex; |
908 | } | 928 | } |
909 | 929 | ||
@@ -935,15 +955,20 @@ static int mxs_dcp_probe(struct platform_device *pdev) | |||
935 | } | 955 | } |
936 | 956 | ||
937 | /* Allocate coherent helper block. */ | 957 | /* Allocate coherent helper block. */ |
938 | sdcp->coh = kzalloc(sizeof(struct dcp_coherent_block), GFP_KERNEL); | 958 | sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT, |
959 | GFP_KERNEL); | ||
939 | if (!sdcp->coh) { | 960 | if (!sdcp->coh) { |
940 | dev_err(dev, "Error allocating coherent block\n"); | ||
941 | ret = -ENOMEM; | 961 | ret = -ENOMEM; |
942 | goto err_mutex; | 962 | goto err_mutex; |
943 | } | 963 | } |
944 | 964 | ||
965 | /* Re-align the structure so it fits the DCP constraints. */ | ||
966 | sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT); | ||
967 | |||
945 | /* Restart the DCP block. */ | 968 | /* Restart the DCP block. */ |
946 | stmp_reset_block(sdcp->base); | 969 | ret = stmp_reset_block(sdcp->base); |
970 | if (ret) | ||
971 | goto err_mutex; | ||
947 | 972 | ||
948 | /* Initialize control register. */ | 973 | /* Initialize control register. */ |
949 | writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | | 974 | writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES | |
@@ -982,7 +1007,7 @@ static int mxs_dcp_probe(struct platform_device *pdev) | |||
982 | if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { | 1007 | if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) { |
983 | dev_err(dev, "Error starting SHA thread!\n"); | 1008 | dev_err(dev, "Error starting SHA thread!\n"); |
984 | ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); | 1009 | ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]); |
985 | goto err_free_coherent; | 1010 | goto err_mutex; |
986 | } | 1011 | } |
987 | 1012 | ||
988 | sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, | 1013 | sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes, |
@@ -1040,8 +1065,6 @@ err_destroy_aes_thread: | |||
1040 | err_destroy_sha_thread: | 1065 | err_destroy_sha_thread: |
1041 | kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); | 1066 | kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]); |
1042 | 1067 | ||
1043 | err_free_coherent: | ||
1044 | kfree(sdcp->coh); | ||
1045 | err_mutex: | 1068 | err_mutex: |
1046 | mutex_unlock(&global_mutex); | 1069 | mutex_unlock(&global_mutex); |
1047 | return ret; | 1070 | return ret; |
@@ -1051,8 +1074,6 @@ static int mxs_dcp_remove(struct platform_device *pdev) | |||
1051 | { | 1074 | { |
1052 | struct dcp *sdcp = platform_get_drvdata(pdev); | 1075 | struct dcp *sdcp = platform_get_drvdata(pdev); |
1053 | 1076 | ||
1054 | kfree(sdcp->coh); | ||
1055 | |||
1056 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) | 1077 | if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) |
1057 | crypto_unregister_ahash(&dcp_sha256_alg); | 1078 | crypto_unregister_ahash(&dcp_sha256_alg); |
1058 | 1079 | ||
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index dde41f1df608..cb98fa54573d 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -1307,9 +1307,7 @@ static int omap_aes_resume(struct device *dev) | |||
1307 | } | 1307 | } |
1308 | #endif | 1308 | #endif |
1309 | 1309 | ||
1310 | static const struct dev_pm_ops omap_aes_pm_ops = { | 1310 | static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume); |
1311 | SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume) | ||
1312 | }; | ||
1313 | 1311 | ||
1314 | static struct platform_driver omap_aes_driver = { | 1312 | static struct platform_driver omap_aes_driver = { |
1315 | .probe = omap_aes_probe, | 1313 | .probe = omap_aes_probe, |
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c new file mode 100644 index 000000000000..ec5f13162b73 --- /dev/null +++ b/drivers/crypto/omap-des.c | |||
@@ -0,0 +1,1216 @@ | |||
1 | /* | ||
2 | * Support for OMAP DES and Triple DES HW acceleration. | ||
3 | * | ||
4 | * Copyright (c) 2013 Texas Instruments Incorporated | ||
5 | * Author: Joel Fernandes <joelf@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as published | ||
9 | * by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
14 | |||
15 | #ifdef DEBUG | ||
16 | #define prn(num) printk(#num "=%d\n", num) | ||
17 | #define prx(num) printk(#num "=%x\n", num) | ||
18 | #else | ||
19 | #define prn(num) do { } while (0) | ||
20 | #define prx(num) do { } while (0) | ||
21 | #endif | ||
22 | |||
23 | #include <linux/err.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/scatterlist.h> | ||
30 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/dmaengine.h> | ||
32 | #include <linux/omap-dma.h> | ||
33 | #include <linux/pm_runtime.h> | ||
34 | #include <linux/of.h> | ||
35 | #include <linux/of_device.h> | ||
36 | #include <linux/of_address.h> | ||
37 | #include <linux/io.h> | ||
38 | #include <linux/crypto.h> | ||
39 | #include <linux/interrupt.h> | ||
40 | #include <crypto/scatterwalk.h> | ||
41 | #include <crypto/des.h> | ||
42 | |||
43 | #define DST_MAXBURST 2 | ||
44 | |||
45 | #define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2) | ||
46 | |||
47 | #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) | ||
48 | |||
49 | #define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ | ||
50 | ((x ^ 0x01) * 0x04)) | ||
51 | |||
52 | #define DES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) | ||
53 | |||
54 | #define DES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) | ||
55 | #define DES_REG_CTRL_CBC BIT(4) | ||
56 | #define DES_REG_CTRL_TDES BIT(3) | ||
57 | #define DES_REG_CTRL_DIRECTION BIT(2) | ||
58 | #define DES_REG_CTRL_INPUT_READY BIT(1) | ||
59 | #define DES_REG_CTRL_OUTPUT_READY BIT(0) | ||
60 | |||
61 | #define DES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) | ||
62 | |||
63 | #define DES_REG_REV(dd) ((dd)->pdata->rev_ofs) | ||
64 | |||
65 | #define DES_REG_MASK(dd) ((dd)->pdata->mask_ofs) | ||
66 | |||
67 | #define DES_REG_LENGTH_N(x) (0x24 + ((x) * 0x04)) | ||
68 | |||
69 | #define DES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) | ||
70 | #define DES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) | ||
71 | #define DES_REG_IRQ_DATA_IN BIT(1) | ||
72 | #define DES_REG_IRQ_DATA_OUT BIT(2) | ||
73 | |||
74 | #define FLAGS_MODE_MASK 0x000f | ||
75 | #define FLAGS_ENCRYPT BIT(0) | ||
76 | #define FLAGS_CBC BIT(1) | ||
77 | #define FLAGS_INIT BIT(4) | ||
78 | #define FLAGS_BUSY BIT(6) | ||
79 | |||
80 | struct omap_des_ctx { | ||
81 | struct omap_des_dev *dd; | ||
82 | |||
83 | int keylen; | ||
84 | u32 key[(3 * DES_KEY_SIZE) / sizeof(u32)]; | ||
85 | unsigned long flags; | ||
86 | }; | ||
87 | |||
88 | struct omap_des_reqctx { | ||
89 | unsigned long mode; | ||
90 | }; | ||
91 | |||
92 | #define OMAP_DES_QUEUE_LENGTH 1 | ||
93 | #define OMAP_DES_CACHE_SIZE 0 | ||
94 | |||
95 | struct omap_des_algs_info { | ||
96 | struct crypto_alg *algs_list; | ||
97 | unsigned int size; | ||
98 | unsigned int registered; | ||
99 | }; | ||
100 | |||
101 | struct omap_des_pdata { | ||
102 | struct omap_des_algs_info *algs_info; | ||
103 | unsigned int algs_info_size; | ||
104 | |||
105 | void (*trigger)(struct omap_des_dev *dd, int length); | ||
106 | |||
107 | u32 key_ofs; | ||
108 | u32 iv_ofs; | ||
109 | u32 ctrl_ofs; | ||
110 | u32 data_ofs; | ||
111 | u32 rev_ofs; | ||
112 | u32 mask_ofs; | ||
113 | u32 irq_enable_ofs; | ||
114 | u32 irq_status_ofs; | ||
115 | |||
116 | u32 dma_enable_in; | ||
117 | u32 dma_enable_out; | ||
118 | u32 dma_start; | ||
119 | |||
120 | u32 major_mask; | ||
121 | u32 major_shift; | ||
122 | u32 minor_mask; | ||
123 | u32 minor_shift; | ||
124 | }; | ||
125 | |||
126 | struct omap_des_dev { | ||
127 | struct list_head list; | ||
128 | unsigned long phys_base; | ||
129 | void __iomem *io_base; | ||
130 | struct omap_des_ctx *ctx; | ||
131 | struct device *dev; | ||
132 | unsigned long flags; | ||
133 | int err; | ||
134 | |||
135 | /* spinlock used for queues */ | ||
136 | spinlock_t lock; | ||
137 | struct crypto_queue queue; | ||
138 | |||
139 | struct tasklet_struct done_task; | ||
140 | struct tasklet_struct queue_task; | ||
141 | |||
142 | struct ablkcipher_request *req; | ||
143 | /* | ||
144 | * total is used by PIO mode for book keeping so introduce | ||
145 | * variable total_save as need it to calc page_order | ||
146 | */ | ||
147 | size_t total; | ||
148 | size_t total_save; | ||
149 | |||
150 | struct scatterlist *in_sg; | ||
151 | struct scatterlist *out_sg; | ||
152 | |||
153 | /* Buffers for copying for unaligned cases */ | ||
154 | struct scatterlist in_sgl; | ||
155 | struct scatterlist out_sgl; | ||
156 | struct scatterlist *orig_out; | ||
157 | int sgs_copied; | ||
158 | |||
159 | struct scatter_walk in_walk; | ||
160 | struct scatter_walk out_walk; | ||
161 | int dma_in; | ||
162 | struct dma_chan *dma_lch_in; | ||
163 | int dma_out; | ||
164 | struct dma_chan *dma_lch_out; | ||
165 | int in_sg_len; | ||
166 | int out_sg_len; | ||
167 | int pio_only; | ||
168 | const struct omap_des_pdata *pdata; | ||
169 | }; | ||
170 | |||
171 | /* keep registered devices data here */ | ||
172 | static LIST_HEAD(dev_list); | ||
173 | static DEFINE_SPINLOCK(list_lock); | ||
174 | |||
175 | #ifdef DEBUG | ||
176 | #define omap_des_read(dd, offset) \ | ||
177 | ({ \ | ||
178 | int _read_ret; \ | ||
179 | _read_ret = __raw_readl(dd->io_base + offset); \ | ||
180 | pr_err("omap_des_read(" #offset "=%#x)= %#x\n", \ | ||
181 | offset, _read_ret); \ | ||
182 | _read_ret; \ | ||
183 | }) | ||
184 | #else | ||
185 | static inline u32 omap_des_read(struct omap_des_dev *dd, u32 offset) | ||
186 | { | ||
187 | return __raw_readl(dd->io_base + offset); | ||
188 | } | ||
189 | #endif | ||
190 | |||
191 | #ifdef DEBUG | ||
192 | #define omap_des_write(dd, offset, value) \ | ||
193 | do { \ | ||
194 | pr_err("omap_des_write(" #offset "=%#x) value=%#x\n", \ | ||
195 | offset, value); \ | ||
196 | __raw_writel(value, dd->io_base + offset); \ | ||
197 | } while (0) | ||
198 | #else | ||
199 | static inline void omap_des_write(struct omap_des_dev *dd, u32 offset, | ||
200 | u32 value) | ||
201 | { | ||
202 | __raw_writel(value, dd->io_base + offset); | ||
203 | } | ||
204 | #endif | ||
205 | |||
206 | static inline void omap_des_write_mask(struct omap_des_dev *dd, u32 offset, | ||
207 | u32 value, u32 mask) | ||
208 | { | ||
209 | u32 val; | ||
210 | |||
211 | val = omap_des_read(dd, offset); | ||
212 | val &= ~mask; | ||
213 | val |= value; | ||
214 | omap_des_write(dd, offset, val); | ||
215 | } | ||
216 | |||
217 | static void omap_des_write_n(struct omap_des_dev *dd, u32 offset, | ||
218 | u32 *value, int count) | ||
219 | { | ||
220 | for (; count--; value++, offset += 4) | ||
221 | omap_des_write(dd, offset, *value); | ||
222 | } | ||
223 | |||
224 | static int omap_des_hw_init(struct omap_des_dev *dd) | ||
225 | { | ||
226 | /* | ||
227 | * clocks are enabled when request starts and disabled when finished. | ||
228 | * It may be long delays between requests. | ||
229 | * Device might go to off mode to save power. | ||
230 | */ | ||
231 | pm_runtime_get_sync(dd->dev); | ||
232 | |||
233 | if (!(dd->flags & FLAGS_INIT)) { | ||
234 | dd->flags |= FLAGS_INIT; | ||
235 | dd->err = 0; | ||
236 | } | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static int omap_des_write_ctrl(struct omap_des_dev *dd) | ||
242 | { | ||
243 | unsigned int key32; | ||
244 | int i, err; | ||
245 | u32 val = 0, mask = 0; | ||
246 | |||
247 | err = omap_des_hw_init(dd); | ||
248 | if (err) | ||
249 | return err; | ||
250 | |||
251 | key32 = dd->ctx->keylen / sizeof(u32); | ||
252 | |||
253 | /* it seems a key should always be set even if it has not changed */ | ||
254 | for (i = 0; i < key32; i++) { | ||
255 | omap_des_write(dd, DES_REG_KEY(dd, i), | ||
256 | __le32_to_cpu(dd->ctx->key[i])); | ||
257 | } | ||
258 | |||
259 | if ((dd->flags & FLAGS_CBC) && dd->req->info) | ||
260 | omap_des_write_n(dd, DES_REG_IV(dd, 0), dd->req->info, 2); | ||
261 | |||
262 | if (dd->flags & FLAGS_CBC) | ||
263 | val |= DES_REG_CTRL_CBC; | ||
264 | if (dd->flags & FLAGS_ENCRYPT) | ||
265 | val |= DES_REG_CTRL_DIRECTION; | ||
266 | if (key32 == 6) | ||
267 | val |= DES_REG_CTRL_TDES; | ||
268 | |||
269 | mask |= DES_REG_CTRL_CBC | DES_REG_CTRL_DIRECTION | DES_REG_CTRL_TDES; | ||
270 | |||
271 | omap_des_write_mask(dd, DES_REG_CTRL(dd), val, mask); | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static void omap_des_dma_trigger_omap4(struct omap_des_dev *dd, int length) | ||
277 | { | ||
278 | u32 mask, val; | ||
279 | |||
280 | omap_des_write(dd, DES_REG_LENGTH_N(0), length); | ||
281 | |||
282 | val = dd->pdata->dma_start; | ||
283 | |||
284 | if (dd->dma_lch_out != NULL) | ||
285 | val |= dd->pdata->dma_enable_out; | ||
286 | if (dd->dma_lch_in != NULL) | ||
287 | val |= dd->pdata->dma_enable_in; | ||
288 | |||
289 | mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | | ||
290 | dd->pdata->dma_start; | ||
291 | |||
292 | omap_des_write_mask(dd, DES_REG_MASK(dd), val, mask); | ||
293 | } | ||
294 | |||
295 | static void omap_des_dma_stop(struct omap_des_dev *dd) | ||
296 | { | ||
297 | u32 mask; | ||
298 | |||
299 | mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | | ||
300 | dd->pdata->dma_start; | ||
301 | |||
302 | omap_des_write_mask(dd, DES_REG_MASK(dd), 0, mask); | ||
303 | } | ||
304 | |||
305 | static struct omap_des_dev *omap_des_find_dev(struct omap_des_ctx *ctx) | ||
306 | { | ||
307 | struct omap_des_dev *dd = NULL, *tmp; | ||
308 | |||
309 | spin_lock_bh(&list_lock); | ||
310 | if (!ctx->dd) { | ||
311 | list_for_each_entry(tmp, &dev_list, list) { | ||
312 | /* FIXME: take fist available des core */ | ||
313 | dd = tmp; | ||
314 | break; | ||
315 | } | ||
316 | ctx->dd = dd; | ||
317 | } else { | ||
318 | /* already found before */ | ||
319 | dd = ctx->dd; | ||
320 | } | ||
321 | spin_unlock_bh(&list_lock); | ||
322 | |||
323 | return dd; | ||
324 | } | ||
325 | |||
326 | static void omap_des_dma_out_callback(void *data) | ||
327 | { | ||
328 | struct omap_des_dev *dd = data; | ||
329 | |||
330 | /* dma_lch_out - completed */ | ||
331 | tasklet_schedule(&dd->done_task); | ||
332 | } | ||
333 | |||
334 | static int omap_des_dma_init(struct omap_des_dev *dd) | ||
335 | { | ||
336 | int err = -ENOMEM; | ||
337 | dma_cap_mask_t mask; | ||
338 | |||
339 | dd->dma_lch_out = NULL; | ||
340 | dd->dma_lch_in = NULL; | ||
341 | |||
342 | dma_cap_zero(mask); | ||
343 | dma_cap_set(DMA_SLAVE, mask); | ||
344 | |||
345 | dd->dma_lch_in = dma_request_slave_channel_compat(mask, | ||
346 | omap_dma_filter_fn, | ||
347 | &dd->dma_in, | ||
348 | dd->dev, "rx"); | ||
349 | if (!dd->dma_lch_in) { | ||
350 | dev_err(dd->dev, "Unable to request in DMA channel\n"); | ||
351 | goto err_dma_in; | ||
352 | } | ||
353 | |||
354 | dd->dma_lch_out = dma_request_slave_channel_compat(mask, | ||
355 | omap_dma_filter_fn, | ||
356 | &dd->dma_out, | ||
357 | dd->dev, "tx"); | ||
358 | if (!dd->dma_lch_out) { | ||
359 | dev_err(dd->dev, "Unable to request out DMA channel\n"); | ||
360 | goto err_dma_out; | ||
361 | } | ||
362 | |||
363 | return 0; | ||
364 | |||
365 | err_dma_out: | ||
366 | dma_release_channel(dd->dma_lch_in); | ||
367 | err_dma_in: | ||
368 | if (err) | ||
369 | pr_err("error: %d\n", err); | ||
370 | return err; | ||
371 | } | ||
372 | |||
373 | static void omap_des_dma_cleanup(struct omap_des_dev *dd) | ||
374 | { | ||
375 | dma_release_channel(dd->dma_lch_out); | ||
376 | dma_release_channel(dd->dma_lch_in); | ||
377 | } | ||
378 | |||
379 | static void sg_copy_buf(void *buf, struct scatterlist *sg, | ||
380 | unsigned int start, unsigned int nbytes, int out) | ||
381 | { | ||
382 | struct scatter_walk walk; | ||
383 | |||
384 | if (!nbytes) | ||
385 | return; | ||
386 | |||
387 | scatterwalk_start(&walk, sg); | ||
388 | scatterwalk_advance(&walk, start); | ||
389 | scatterwalk_copychunks(buf, &walk, nbytes, out); | ||
390 | scatterwalk_done(&walk, out, 0); | ||
391 | } | ||
392 | |||
393 | static int omap_des_crypt_dma(struct crypto_tfm *tfm, | ||
394 | struct scatterlist *in_sg, struct scatterlist *out_sg, | ||
395 | int in_sg_len, int out_sg_len) | ||
396 | { | ||
397 | struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm); | ||
398 | struct omap_des_dev *dd = ctx->dd; | ||
399 | struct dma_async_tx_descriptor *tx_in, *tx_out; | ||
400 | struct dma_slave_config cfg; | ||
401 | int ret; | ||
402 | |||
403 | if (dd->pio_only) { | ||
404 | scatterwalk_start(&dd->in_walk, dd->in_sg); | ||
405 | scatterwalk_start(&dd->out_walk, dd->out_sg); | ||
406 | |||
407 | /* Enable DATAIN interrupt and let it take | ||
408 | care of the rest */ | ||
409 | omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2); | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE); | ||
414 | |||
415 | memset(&cfg, 0, sizeof(cfg)); | ||
416 | |||
417 | cfg.src_addr = dd->phys_base + DES_REG_DATA_N(dd, 0); | ||
418 | cfg.dst_addr = dd->phys_base + DES_REG_DATA_N(dd, 0); | ||
419 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
420 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
421 | cfg.src_maxburst = DST_MAXBURST; | ||
422 | cfg.dst_maxburst = DST_MAXBURST; | ||
423 | |||
424 | /* IN */ | ||
425 | ret = dmaengine_slave_config(dd->dma_lch_in, &cfg); | ||
426 | if (ret) { | ||
427 | dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", | ||
428 | ret); | ||
429 | return ret; | ||
430 | } | ||
431 | |||
432 | tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len, | ||
433 | DMA_MEM_TO_DEV, | ||
434 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
435 | if (!tx_in) { | ||
436 | dev_err(dd->dev, "IN prep_slave_sg() failed\n"); | ||
437 | return -EINVAL; | ||
438 | } | ||
439 | |||
440 | /* No callback necessary */ | ||
441 | tx_in->callback_param = dd; | ||
442 | |||
443 | /* OUT */ | ||
444 | ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); | ||
445 | if (ret) { | ||
446 | dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", | ||
447 | ret); | ||
448 | return ret; | ||
449 | } | ||
450 | |||
451 | tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len, | ||
452 | DMA_DEV_TO_MEM, | ||
453 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
454 | if (!tx_out) { | ||
455 | dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | |||
459 | tx_out->callback = omap_des_dma_out_callback; | ||
460 | tx_out->callback_param = dd; | ||
461 | |||
462 | dmaengine_submit(tx_in); | ||
463 | dmaengine_submit(tx_out); | ||
464 | |||
465 | dma_async_issue_pending(dd->dma_lch_in); | ||
466 | dma_async_issue_pending(dd->dma_lch_out); | ||
467 | |||
468 | /* start DMA */ | ||
469 | dd->pdata->trigger(dd, dd->total); | ||
470 | |||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | static int omap_des_crypt_dma_start(struct omap_des_dev *dd) | ||
475 | { | ||
476 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm( | ||
477 | crypto_ablkcipher_reqtfm(dd->req)); | ||
478 | int err; | ||
479 | |||
480 | pr_debug("total: %d\n", dd->total); | ||
481 | |||
482 | if (!dd->pio_only) { | ||
483 | err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, | ||
484 | DMA_TO_DEVICE); | ||
485 | if (!err) { | ||
486 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
487 | return -EINVAL; | ||
488 | } | ||
489 | |||
490 | err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, | ||
491 | DMA_FROM_DEVICE); | ||
492 | if (!err) { | ||
493 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
494 | return -EINVAL; | ||
495 | } | ||
496 | } | ||
497 | |||
498 | err = omap_des_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len, | ||
499 | dd->out_sg_len); | ||
500 | if (err && !dd->pio_only) { | ||
501 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); | ||
502 | dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, | ||
503 | DMA_FROM_DEVICE); | ||
504 | } | ||
505 | |||
506 | return err; | ||
507 | } | ||
508 | |||
509 | static void omap_des_finish_req(struct omap_des_dev *dd, int err) | ||
510 | { | ||
511 | struct ablkcipher_request *req = dd->req; | ||
512 | |||
513 | pr_debug("err: %d\n", err); | ||
514 | |||
515 | pm_runtime_put(dd->dev); | ||
516 | dd->flags &= ~FLAGS_BUSY; | ||
517 | |||
518 | req->base.complete(&req->base, err); | ||
519 | } | ||
520 | |||
521 | static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) | ||
522 | { | ||
523 | int err = 0; | ||
524 | |||
525 | pr_debug("total: %d\n", dd->total); | ||
526 | |||
527 | omap_des_dma_stop(dd); | ||
528 | |||
529 | dmaengine_terminate_all(dd->dma_lch_in); | ||
530 | dmaengine_terminate_all(dd->dma_lch_out); | ||
531 | |||
532 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); | ||
533 | dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); | ||
534 | |||
535 | return err; | ||
536 | } | ||
537 | |||
538 | static int omap_des_copy_needed(struct scatterlist *sg) | ||
539 | { | ||
540 | while (sg) { | ||
541 | if (!IS_ALIGNED(sg->offset, 4)) | ||
542 | return -1; | ||
543 | if (!IS_ALIGNED(sg->length, DES_BLOCK_SIZE)) | ||
544 | return -1; | ||
545 | sg = sg_next(sg); | ||
546 | } | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static int omap_des_copy_sgs(struct omap_des_dev *dd) | ||
551 | { | ||
552 | void *buf_in, *buf_out; | ||
553 | int pages; | ||
554 | |||
555 | pages = dd->total >> PAGE_SHIFT; | ||
556 | |||
557 | if (dd->total & (PAGE_SIZE-1)) | ||
558 | pages++; | ||
559 | |||
560 | BUG_ON(!pages); | ||
561 | |||
562 | buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
563 | buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
564 | |||
565 | if (!buf_in || !buf_out) { | ||
566 | pr_err("Couldn't allocated pages for unaligned cases.\n"); | ||
567 | return -1; | ||
568 | } | ||
569 | |||
570 | dd->orig_out = dd->out_sg; | ||
571 | |||
572 | sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); | ||
573 | |||
574 | sg_init_table(&dd->in_sgl, 1); | ||
575 | sg_set_buf(&dd->in_sgl, buf_in, dd->total); | ||
576 | dd->in_sg = &dd->in_sgl; | ||
577 | |||
578 | sg_init_table(&dd->out_sgl, 1); | ||
579 | sg_set_buf(&dd->out_sgl, buf_out, dd->total); | ||
580 | dd->out_sg = &dd->out_sgl; | ||
581 | |||
582 | return 0; | ||
583 | } | ||
584 | |||
585 | static int omap_des_handle_queue(struct omap_des_dev *dd, | ||
586 | struct ablkcipher_request *req) | ||
587 | { | ||
588 | struct crypto_async_request *async_req, *backlog; | ||
589 | struct omap_des_ctx *ctx; | ||
590 | struct omap_des_reqctx *rctx; | ||
591 | unsigned long flags; | ||
592 | int err, ret = 0; | ||
593 | |||
594 | spin_lock_irqsave(&dd->lock, flags); | ||
595 | if (req) | ||
596 | ret = ablkcipher_enqueue_request(&dd->queue, req); | ||
597 | if (dd->flags & FLAGS_BUSY) { | ||
598 | spin_unlock_irqrestore(&dd->lock, flags); | ||
599 | return ret; | ||
600 | } | ||
601 | backlog = crypto_get_backlog(&dd->queue); | ||
602 | async_req = crypto_dequeue_request(&dd->queue); | ||
603 | if (async_req) | ||
604 | dd->flags |= FLAGS_BUSY; | ||
605 | spin_unlock_irqrestore(&dd->lock, flags); | ||
606 | |||
607 | if (!async_req) | ||
608 | return ret; | ||
609 | |||
610 | if (backlog) | ||
611 | backlog->complete(backlog, -EINPROGRESS); | ||
612 | |||
613 | req = ablkcipher_request_cast(async_req); | ||
614 | |||
615 | /* assign new request to device */ | ||
616 | dd->req = req; | ||
617 | dd->total = req->nbytes; | ||
618 | dd->total_save = req->nbytes; | ||
619 | dd->in_sg = req->src; | ||
620 | dd->out_sg = req->dst; | ||
621 | |||
622 | if (omap_des_copy_needed(dd->in_sg) || | ||
623 | omap_des_copy_needed(dd->out_sg)) { | ||
624 | if (omap_des_copy_sgs(dd)) | ||
625 | pr_err("Failed to copy SGs for unaligned cases\n"); | ||
626 | dd->sgs_copied = 1; | ||
627 | } else { | ||
628 | dd->sgs_copied = 0; | ||
629 | } | ||
630 | |||
631 | dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); | ||
632 | dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); | ||
633 | BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); | ||
634 | |||
635 | rctx = ablkcipher_request_ctx(req); | ||
636 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
637 | rctx->mode &= FLAGS_MODE_MASK; | ||
638 | dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; | ||
639 | |||
640 | dd->ctx = ctx; | ||
641 | ctx->dd = dd; | ||
642 | |||
643 | err = omap_des_write_ctrl(dd); | ||
644 | if (!err) | ||
645 | err = omap_des_crypt_dma_start(dd); | ||
646 | if (err) { | ||
647 | /* des_task will not finish it, so do it here */ | ||
648 | omap_des_finish_req(dd, err); | ||
649 | tasklet_schedule(&dd->queue_task); | ||
650 | } | ||
651 | |||
652 | return ret; /* return ret, which is enqueue return value */ | ||
653 | } | ||
654 | |||
655 | static void omap_des_done_task(unsigned long data) | ||
656 | { | ||
657 | struct omap_des_dev *dd = (struct omap_des_dev *)data; | ||
658 | void *buf_in, *buf_out; | ||
659 | int pages; | ||
660 | |||
661 | pr_debug("enter done_task\n"); | ||
662 | |||
663 | if (!dd->pio_only) { | ||
664 | dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, | ||
665 | DMA_FROM_DEVICE); | ||
666 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); | ||
667 | dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, | ||
668 | DMA_FROM_DEVICE); | ||
669 | omap_des_crypt_dma_stop(dd); | ||
670 | } | ||
671 | |||
672 | if (dd->sgs_copied) { | ||
673 | buf_in = sg_virt(&dd->in_sgl); | ||
674 | buf_out = sg_virt(&dd->out_sgl); | ||
675 | |||
676 | sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); | ||
677 | |||
678 | pages = get_order(dd->total_save); | ||
679 | free_pages((unsigned long)buf_in, pages); | ||
680 | free_pages((unsigned long)buf_out, pages); | ||
681 | } | ||
682 | |||
683 | omap_des_finish_req(dd, 0); | ||
684 | omap_des_handle_queue(dd, NULL); | ||
685 | |||
686 | pr_debug("exit\n"); | ||
687 | } | ||
688 | |||
689 | static void omap_des_queue_task(unsigned long data) | ||
690 | { | ||
691 | struct omap_des_dev *dd = (struct omap_des_dev *)data; | ||
692 | |||
693 | omap_des_handle_queue(dd, NULL); | ||
694 | } | ||
695 | |||
696 | static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
697 | { | ||
698 | struct omap_des_ctx *ctx = crypto_ablkcipher_ctx( | ||
699 | crypto_ablkcipher_reqtfm(req)); | ||
700 | struct omap_des_reqctx *rctx = ablkcipher_request_ctx(req); | ||
701 | struct omap_des_dev *dd; | ||
702 | |||
703 | pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, | ||
704 | !!(mode & FLAGS_ENCRYPT), | ||
705 | !!(mode & FLAGS_CBC)); | ||
706 | |||
707 | if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) { | ||
708 | pr_err("request size is not exact amount of DES blocks\n"); | ||
709 | return -EINVAL; | ||
710 | } | ||
711 | |||
712 | dd = omap_des_find_dev(ctx); | ||
713 | if (!dd) | ||
714 | return -ENODEV; | ||
715 | |||
716 | rctx->mode = mode; | ||
717 | |||
718 | return omap_des_handle_queue(dd, req); | ||
719 | } | ||
720 | |||
721 | /* ********************** ALG API ************************************ */ | ||
722 | |||
723 | static int omap_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
724 | unsigned int keylen) | ||
725 | { | ||
726 | struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
727 | |||
728 | if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE)) | ||
729 | return -EINVAL; | ||
730 | |||
731 | pr_debug("enter, keylen: %d\n", keylen); | ||
732 | |||
733 | memcpy(ctx->key, key, keylen); | ||
734 | ctx->keylen = keylen; | ||
735 | |||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | static int omap_des_ecb_encrypt(struct ablkcipher_request *req) | ||
740 | { | ||
741 | return omap_des_crypt(req, FLAGS_ENCRYPT); | ||
742 | } | ||
743 | |||
744 | static int omap_des_ecb_decrypt(struct ablkcipher_request *req) | ||
745 | { | ||
746 | return omap_des_crypt(req, 0); | ||
747 | } | ||
748 | |||
749 | static int omap_des_cbc_encrypt(struct ablkcipher_request *req) | ||
750 | { | ||
751 | return omap_des_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); | ||
752 | } | ||
753 | |||
754 | static int omap_des_cbc_decrypt(struct ablkcipher_request *req) | ||
755 | { | ||
756 | return omap_des_crypt(req, FLAGS_CBC); | ||
757 | } | ||
758 | |||
759 | static int omap_des_cra_init(struct crypto_tfm *tfm) | ||
760 | { | ||
761 | pr_debug("enter\n"); | ||
762 | |||
763 | tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx); | ||
764 | |||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | static void omap_des_cra_exit(struct crypto_tfm *tfm) | ||
769 | { | ||
770 | pr_debug("enter\n"); | ||
771 | } | ||
772 | |||
773 | /* ********************** ALGS ************************************ */ | ||
774 | |||
775 | static struct crypto_alg algs_ecb_cbc[] = { | ||
776 | { | ||
777 | .cra_name = "ecb(des)", | ||
778 | .cra_driver_name = "ecb-des-omap", | ||
779 | .cra_priority = 100, | ||
780 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
781 | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
782 | CRYPTO_ALG_ASYNC, | ||
783 | .cra_blocksize = DES_BLOCK_SIZE, | ||
784 | .cra_ctxsize = sizeof(struct omap_des_ctx), | ||
785 | .cra_alignmask = 0, | ||
786 | .cra_type = &crypto_ablkcipher_type, | ||
787 | .cra_module = THIS_MODULE, | ||
788 | .cra_init = omap_des_cra_init, | ||
789 | .cra_exit = omap_des_cra_exit, | ||
790 | .cra_u.ablkcipher = { | ||
791 | .min_keysize = DES_KEY_SIZE, | ||
792 | .max_keysize = DES_KEY_SIZE, | ||
793 | .setkey = omap_des_setkey, | ||
794 | .encrypt = omap_des_ecb_encrypt, | ||
795 | .decrypt = omap_des_ecb_decrypt, | ||
796 | } | ||
797 | }, | ||
798 | { | ||
799 | .cra_name = "cbc(des)", | ||
800 | .cra_driver_name = "cbc-des-omap", | ||
801 | .cra_priority = 100, | ||
802 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
803 | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
804 | CRYPTO_ALG_ASYNC, | ||
805 | .cra_blocksize = DES_BLOCK_SIZE, | ||
806 | .cra_ctxsize = sizeof(struct omap_des_ctx), | ||
807 | .cra_alignmask = 0, | ||
808 | .cra_type = &crypto_ablkcipher_type, | ||
809 | .cra_module = THIS_MODULE, | ||
810 | .cra_init = omap_des_cra_init, | ||
811 | .cra_exit = omap_des_cra_exit, | ||
812 | .cra_u.ablkcipher = { | ||
813 | .min_keysize = DES_KEY_SIZE, | ||
814 | .max_keysize = DES_KEY_SIZE, | ||
815 | .ivsize = DES_BLOCK_SIZE, | ||
816 | .setkey = omap_des_setkey, | ||
817 | .encrypt = omap_des_cbc_encrypt, | ||
818 | .decrypt = omap_des_cbc_decrypt, | ||
819 | } | ||
820 | }, | ||
821 | { | ||
822 | .cra_name = "ecb(des3_ede)", | ||
823 | .cra_driver_name = "ecb-des3-omap", | ||
824 | .cra_priority = 100, | ||
825 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
826 | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
827 | CRYPTO_ALG_ASYNC, | ||
828 | .cra_blocksize = DES_BLOCK_SIZE, | ||
829 | .cra_ctxsize = sizeof(struct omap_des_ctx), | ||
830 | .cra_alignmask = 0, | ||
831 | .cra_type = &crypto_ablkcipher_type, | ||
832 | .cra_module = THIS_MODULE, | ||
833 | .cra_init = omap_des_cra_init, | ||
834 | .cra_exit = omap_des_cra_exit, | ||
835 | .cra_u.ablkcipher = { | ||
836 | .min_keysize = 3*DES_KEY_SIZE, | ||
837 | .max_keysize = 3*DES_KEY_SIZE, | ||
838 | .setkey = omap_des_setkey, | ||
839 | .encrypt = omap_des_ecb_encrypt, | ||
840 | .decrypt = omap_des_ecb_decrypt, | ||
841 | } | ||
842 | }, | ||
843 | { | ||
844 | .cra_name = "cbc(des3_ede)", | ||
845 | .cra_driver_name = "cbc-des3-omap", | ||
846 | .cra_priority = 100, | ||
847 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
848 | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
849 | CRYPTO_ALG_ASYNC, | ||
850 | .cra_blocksize = DES_BLOCK_SIZE, | ||
851 | .cra_ctxsize = sizeof(struct omap_des_ctx), | ||
852 | .cra_alignmask = 0, | ||
853 | .cra_type = &crypto_ablkcipher_type, | ||
854 | .cra_module = THIS_MODULE, | ||
855 | .cra_init = omap_des_cra_init, | ||
856 | .cra_exit = omap_des_cra_exit, | ||
857 | .cra_u.ablkcipher = { | ||
858 | .min_keysize = 3*DES_KEY_SIZE, | ||
859 | .max_keysize = 3*DES_KEY_SIZE, | ||
860 | .ivsize = DES_BLOCK_SIZE, | ||
861 | .setkey = omap_des_setkey, | ||
862 | .encrypt = omap_des_cbc_encrypt, | ||
863 | .decrypt = omap_des_cbc_decrypt, | ||
864 | } | ||
865 | } | ||
866 | }; | ||
867 | |||
868 | static struct omap_des_algs_info omap_des_algs_info_ecb_cbc[] = { | ||
869 | { | ||
870 | .algs_list = algs_ecb_cbc, | ||
871 | .size = ARRAY_SIZE(algs_ecb_cbc), | ||
872 | }, | ||
873 | }; | ||
874 | |||
875 | #ifdef CONFIG_OF | ||
876 | static const struct omap_des_pdata omap_des_pdata_omap4 = { | ||
877 | .algs_info = omap_des_algs_info_ecb_cbc, | ||
878 | .algs_info_size = ARRAY_SIZE(omap_des_algs_info_ecb_cbc), | ||
879 | .trigger = omap_des_dma_trigger_omap4, | ||
880 | .key_ofs = 0x14, | ||
881 | .iv_ofs = 0x18, | ||
882 | .ctrl_ofs = 0x20, | ||
883 | .data_ofs = 0x28, | ||
884 | .rev_ofs = 0x30, | ||
885 | .mask_ofs = 0x34, | ||
886 | .irq_status_ofs = 0x3c, | ||
887 | .irq_enable_ofs = 0x40, | ||
888 | .dma_enable_in = BIT(5), | ||
889 | .dma_enable_out = BIT(6), | ||
890 | .major_mask = 0x0700, | ||
891 | .major_shift = 8, | ||
892 | .minor_mask = 0x003f, | ||
893 | .minor_shift = 0, | ||
894 | }; | ||
895 | |||
896 | static irqreturn_t omap_des_irq(int irq, void *dev_id) | ||
897 | { | ||
898 | struct omap_des_dev *dd = dev_id; | ||
899 | u32 status, i; | ||
900 | u32 *src, *dst; | ||
901 | |||
902 | status = omap_des_read(dd, DES_REG_IRQ_STATUS(dd)); | ||
903 | if (status & DES_REG_IRQ_DATA_IN) { | ||
904 | omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x0); | ||
905 | |||
906 | BUG_ON(!dd->in_sg); | ||
907 | |||
908 | BUG_ON(_calc_walked(in) > dd->in_sg->length); | ||
909 | |||
910 | src = sg_virt(dd->in_sg) + _calc_walked(in); | ||
911 | |||
912 | for (i = 0; i < DES_BLOCK_WORDS; i++) { | ||
913 | omap_des_write(dd, DES_REG_DATA_N(dd, i), *src); | ||
914 | |||
915 | scatterwalk_advance(&dd->in_walk, 4); | ||
916 | if (dd->in_sg->length == _calc_walked(in)) { | ||
917 | dd->in_sg = scatterwalk_sg_next(dd->in_sg); | ||
918 | if (dd->in_sg) { | ||
919 | scatterwalk_start(&dd->in_walk, | ||
920 | dd->in_sg); | ||
921 | src = sg_virt(dd->in_sg) + | ||
922 | _calc_walked(in); | ||
923 | } | ||
924 | } else { | ||
925 | src++; | ||
926 | } | ||
927 | } | ||
928 | |||
929 | /* Clear IRQ status */ | ||
930 | status &= ~DES_REG_IRQ_DATA_IN; | ||
931 | omap_des_write(dd, DES_REG_IRQ_STATUS(dd), status); | ||
932 | |||
933 | /* Enable DATA_OUT interrupt */ | ||
934 | omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x4); | ||
935 | |||
936 | } else if (status & DES_REG_IRQ_DATA_OUT) { | ||
937 | omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x0); | ||
938 | |||
939 | BUG_ON(!dd->out_sg); | ||
940 | |||
941 | BUG_ON(_calc_walked(out) > dd->out_sg->length); | ||
942 | |||
943 | dst = sg_virt(dd->out_sg) + _calc_walked(out); | ||
944 | |||
945 | for (i = 0; i < DES_BLOCK_WORDS; i++) { | ||
946 | *dst = omap_des_read(dd, DES_REG_DATA_N(dd, i)); | ||
947 | scatterwalk_advance(&dd->out_walk, 4); | ||
948 | if (dd->out_sg->length == _calc_walked(out)) { | ||
949 | dd->out_sg = scatterwalk_sg_next(dd->out_sg); | ||
950 | if (dd->out_sg) { | ||
951 | scatterwalk_start(&dd->out_walk, | ||
952 | dd->out_sg); | ||
953 | dst = sg_virt(dd->out_sg) + | ||
954 | _calc_walked(out); | ||
955 | } | ||
956 | } else { | ||
957 | dst++; | ||
958 | } | ||
959 | } | ||
960 | |||
961 | dd->total -= DES_BLOCK_SIZE; | ||
962 | |||
963 | BUG_ON(dd->total < 0); | ||
964 | |||
965 | /* Clear IRQ status */ | ||
966 | status &= ~DES_REG_IRQ_DATA_OUT; | ||
967 | omap_des_write(dd, DES_REG_IRQ_STATUS(dd), status); | ||
968 | |||
969 | if (!dd->total) | ||
970 | /* All bytes read! */ | ||
971 | tasklet_schedule(&dd->done_task); | ||
972 | else | ||
973 | /* Enable DATA_IN interrupt for next block */ | ||
974 | omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2); | ||
975 | } | ||
976 | |||
977 | return IRQ_HANDLED; | ||
978 | } | ||
979 | |||
980 | static const struct of_device_id omap_des_of_match[] = { | ||
981 | { | ||
982 | .compatible = "ti,omap4-des", | ||
983 | .data = &omap_des_pdata_omap4, | ||
984 | }, | ||
985 | {}, | ||
986 | }; | ||
987 | MODULE_DEVICE_TABLE(of, omap_des_of_match); | ||
988 | |||
989 | static int omap_des_get_of(struct omap_des_dev *dd, | ||
990 | struct platform_device *pdev) | ||
991 | { | ||
992 | const struct of_device_id *match; | ||
993 | |||
994 | match = of_match_device(of_match_ptr(omap_des_of_match), &pdev->dev); | ||
995 | if (!match) { | ||
996 | dev_err(&pdev->dev, "no compatible OF match\n"); | ||
997 | return -EINVAL; | ||
998 | } | ||
999 | |||
1000 | dd->dma_out = -1; /* Dummy value that's unused */ | ||
1001 | dd->dma_in = -1; /* Dummy value that's unused */ | ||
1002 | dd->pdata = match->data; | ||
1003 | |||
1004 | return 0; | ||
1005 | } | ||
1006 | #else | ||
1007 | static int omap_des_get_of(struct omap_des_dev *dd, | ||
1008 | struct device *dev) | ||
1009 | { | ||
1010 | return -EINVAL; | ||
1011 | } | ||
1012 | #endif | ||
1013 | |||
1014 | static int omap_des_get_pdev(struct omap_des_dev *dd, | ||
1015 | struct platform_device *pdev) | ||
1016 | { | ||
1017 | struct device *dev = &pdev->dev; | ||
1018 | struct resource *r; | ||
1019 | int err = 0; | ||
1020 | |||
1021 | /* Get the DMA out channel */ | ||
1022 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1023 | if (!r) { | ||
1024 | dev_err(dev, "no DMA out resource info\n"); | ||
1025 | err = -ENODEV; | ||
1026 | goto err; | ||
1027 | } | ||
1028 | dd->dma_out = r->start; | ||
1029 | |||
1030 | /* Get the DMA in channel */ | ||
1031 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
1032 | if (!r) { | ||
1033 | dev_err(dev, "no DMA in resource info\n"); | ||
1034 | err = -ENODEV; | ||
1035 | goto err; | ||
1036 | } | ||
1037 | dd->dma_in = r->start; | ||
1038 | |||
1039 | /* non-DT devices get pdata from pdev */ | ||
1040 | dd->pdata = pdev->dev.platform_data; | ||
1041 | |||
1042 | err: | ||
1043 | return err; | ||
1044 | } | ||
1045 | |||
1046 | static int omap_des_probe(struct platform_device *pdev) | ||
1047 | { | ||
1048 | struct device *dev = &pdev->dev; | ||
1049 | struct omap_des_dev *dd; | ||
1050 | struct crypto_alg *algp; | ||
1051 | struct resource *res; | ||
1052 | int err = -ENOMEM, i, j, irq = -1; | ||
1053 | u32 reg; | ||
1054 | |||
1055 | dd = devm_kzalloc(dev, sizeof(struct omap_des_dev), GFP_KERNEL); | ||
1056 | if (dd == NULL) { | ||
1057 | dev_err(dev, "unable to alloc data struct.\n"); | ||
1058 | goto err_data; | ||
1059 | } | ||
1060 | dd->dev = dev; | ||
1061 | platform_set_drvdata(pdev, dd); | ||
1062 | |||
1063 | spin_lock_init(&dd->lock); | ||
1064 | crypto_init_queue(&dd->queue, OMAP_DES_QUEUE_LENGTH); | ||
1065 | |||
1066 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1067 | if (!res) { | ||
1068 | dev_err(dev, "no MEM resource info\n"); | ||
1069 | goto err_res; | ||
1070 | } | ||
1071 | |||
1072 | err = (dev->of_node) ? omap_des_get_of(dd, pdev) : | ||
1073 | omap_des_get_pdev(dd, pdev); | ||
1074 | if (err) | ||
1075 | goto err_res; | ||
1076 | |||
1077 | dd->io_base = devm_request_and_ioremap(dev, res); | ||
1078 | if (!dd->io_base) { | ||
1079 | dev_err(dev, "can't ioremap\n"); | ||
1080 | err = -ENOMEM; | ||
1081 | goto err_res; | ||
1082 | } | ||
1083 | dd->phys_base = res->start; | ||
1084 | |||
1085 | pm_runtime_enable(dev); | ||
1086 | pm_runtime_get_sync(dev); | ||
1087 | |||
1088 | omap_des_dma_stop(dd); | ||
1089 | |||
1090 | reg = omap_des_read(dd, DES_REG_REV(dd)); | ||
1091 | |||
1092 | pm_runtime_put_sync(dev); | ||
1093 | |||
1094 | dev_info(dev, "OMAP DES hw accel rev: %u.%u\n", | ||
1095 | (reg & dd->pdata->major_mask) >> dd->pdata->major_shift, | ||
1096 | (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); | ||
1097 | |||
1098 | tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd); | ||
1099 | tasklet_init(&dd->queue_task, omap_des_queue_task, (unsigned long)dd); | ||
1100 | |||
1101 | err = omap_des_dma_init(dd); | ||
1102 | if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) { | ||
1103 | dd->pio_only = 1; | ||
1104 | |||
1105 | irq = platform_get_irq(pdev, 0); | ||
1106 | if (irq < 0) { | ||
1107 | dev_err(dev, "can't get IRQ resource\n"); | ||
1108 | goto err_irq; | ||
1109 | } | ||
1110 | |||
1111 | err = devm_request_irq(dev, irq, omap_des_irq, 0, | ||
1112 | dev_name(dev), dd); | ||
1113 | if (err) { | ||
1114 | dev_err(dev, "Unable to grab omap-des IRQ\n"); | ||
1115 | goto err_irq; | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | |||
1120 | INIT_LIST_HEAD(&dd->list); | ||
1121 | spin_lock(&list_lock); | ||
1122 | list_add_tail(&dd->list, &dev_list); | ||
1123 | spin_unlock(&list_lock); | ||
1124 | |||
1125 | for (i = 0; i < dd->pdata->algs_info_size; i++) { | ||
1126 | for (j = 0; j < dd->pdata->algs_info[i].size; j++) { | ||
1127 | algp = &dd->pdata->algs_info[i].algs_list[j]; | ||
1128 | |||
1129 | pr_debug("reg alg: %s\n", algp->cra_name); | ||
1130 | INIT_LIST_HEAD(&algp->cra_list); | ||
1131 | |||
1132 | err = crypto_register_alg(algp); | ||
1133 | if (err) | ||
1134 | goto err_algs; | ||
1135 | |||
1136 | dd->pdata->algs_info[i].registered++; | ||
1137 | } | ||
1138 | } | ||
1139 | |||
1140 | return 0; | ||
1141 | err_algs: | ||
1142 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) | ||
1143 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) | ||
1144 | crypto_unregister_alg( | ||
1145 | &dd->pdata->algs_info[i].algs_list[j]); | ||
1146 | if (!dd->pio_only) | ||
1147 | omap_des_dma_cleanup(dd); | ||
1148 | err_irq: | ||
1149 | tasklet_kill(&dd->done_task); | ||
1150 | tasklet_kill(&dd->queue_task); | ||
1151 | pm_runtime_disable(dev); | ||
1152 | err_res: | ||
1153 | dd = NULL; | ||
1154 | err_data: | ||
1155 | dev_err(dev, "initialization failed.\n"); | ||
1156 | return err; | ||
1157 | } | ||
1158 | |||
1159 | static int omap_des_remove(struct platform_device *pdev) | ||
1160 | { | ||
1161 | struct omap_des_dev *dd = platform_get_drvdata(pdev); | ||
1162 | int i, j; | ||
1163 | |||
1164 | if (!dd) | ||
1165 | return -ENODEV; | ||
1166 | |||
1167 | spin_lock(&list_lock); | ||
1168 | list_del(&dd->list); | ||
1169 | spin_unlock(&list_lock); | ||
1170 | |||
1171 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) | ||
1172 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) | ||
1173 | crypto_unregister_alg( | ||
1174 | &dd->pdata->algs_info[i].algs_list[j]); | ||
1175 | |||
1176 | tasklet_kill(&dd->done_task); | ||
1177 | tasklet_kill(&dd->queue_task); | ||
1178 | omap_des_dma_cleanup(dd); | ||
1179 | pm_runtime_disable(dd->dev); | ||
1180 | dd = NULL; | ||
1181 | |||
1182 | return 0; | ||
1183 | } | ||
1184 | |||
1185 | #ifdef CONFIG_PM_SLEEP | ||
1186 | static int omap_des_suspend(struct device *dev) | ||
1187 | { | ||
1188 | pm_runtime_put_sync(dev); | ||
1189 | return 0; | ||
1190 | } | ||
1191 | |||
1192 | static int omap_des_resume(struct device *dev) | ||
1193 | { | ||
1194 | pm_runtime_get_sync(dev); | ||
1195 | return 0; | ||
1196 | } | ||
1197 | #endif | ||
1198 | |||
1199 | static SIMPLE_DEV_PM_OPS(omap_des_pm_ops, omap_des_suspend, omap_des_resume); | ||
1200 | |||
1201 | static struct platform_driver omap_des_driver = { | ||
1202 | .probe = omap_des_probe, | ||
1203 | .remove = omap_des_remove, | ||
1204 | .driver = { | ||
1205 | .name = "omap-des", | ||
1206 | .owner = THIS_MODULE, | ||
1207 | .pm = &omap_des_pm_ops, | ||
1208 | .of_match_table = of_match_ptr(omap_des_of_match), | ||
1209 | }, | ||
1210 | }; | ||
1211 | |||
1212 | module_platform_driver(omap_des_driver); | ||
1213 | |||
1214 | MODULE_DESCRIPTION("OMAP DES hw acceleration support."); | ||
1215 | MODULE_LICENSE("GPL v2"); | ||
1216 | MODULE_AUTHOR("Joel Fernandes <joelf@ti.com>"); | ||
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index a727a6a59653..710d86386965 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -636,11 +636,17 @@ static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, | |||
636 | static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) | 636 | static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) |
637 | { | 637 | { |
638 | size_t count; | 638 | size_t count; |
639 | const u8 *vaddr; | ||
639 | 640 | ||
640 | while (ctx->sg) { | 641 | while (ctx->sg) { |
642 | vaddr = kmap_atomic(sg_page(ctx->sg)); | ||
643 | |||
641 | count = omap_sham_append_buffer(ctx, | 644 | count = omap_sham_append_buffer(ctx, |
642 | sg_virt(ctx->sg) + ctx->offset, | 645 | vaddr + ctx->offset, |
643 | ctx->sg->length - ctx->offset); | 646 | ctx->sg->length - ctx->offset); |
647 | |||
648 | kunmap_atomic((void *)vaddr); | ||
649 | |||
644 | if (!count) | 650 | if (!count) |
645 | break; | 651 | break; |
646 | ctx->offset += count; | 652 | ctx->offset += count; |
@@ -2022,9 +2028,7 @@ static int omap_sham_resume(struct device *dev) | |||
2022 | } | 2028 | } |
2023 | #endif | 2029 | #endif |
2024 | 2030 | ||
2025 | static const struct dev_pm_ops omap_sham_pm_ops = { | 2031 | static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume); |
2026 | SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume) | ||
2027 | }; | ||
2028 | 2032 | ||
2029 | static struct platform_driver omap_sham_driver = { | 2033 | static struct platform_driver omap_sham_driver = { |
2030 | .probe = omap_sham_probe, | 2034 | .probe = omap_sham_probe, |
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index a6175ba6d238..5da5b98b8f29 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c | |||
@@ -1720,22 +1720,16 @@ static int spacc_probe(struct platform_device *pdev) | |||
1720 | engine->name = dev_name(&pdev->dev); | 1720 | engine->name = dev_name(&pdev->dev); |
1721 | 1721 | ||
1722 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1722 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1723 | engine->regs = devm_ioremap_resource(&pdev->dev, mem); | ||
1724 | if (IS_ERR(engine->regs)) | ||
1725 | return PTR_ERR(engine->regs); | ||
1726 | |||
1723 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1727 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1724 | if (!mem || !irq) { | 1728 | if (!irq) { |
1725 | dev_err(&pdev->dev, "no memory/irq resource for engine\n"); | 1729 | dev_err(&pdev->dev, "no memory/irq resource for engine\n"); |
1726 | return -ENXIO; | 1730 | return -ENXIO; |
1727 | } | 1731 | } |
1728 | 1732 | ||
1729 | if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), | ||
1730 | engine->name)) | ||
1731 | return -ENOMEM; | ||
1732 | |||
1733 | engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); | ||
1734 | if (!engine->regs) { | ||
1735 | dev_err(&pdev->dev, "memory map failed\n"); | ||
1736 | return -ENOMEM; | ||
1737 | } | ||
1738 | |||
1739 | if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, | 1733 | if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, |
1740 | engine->name, engine)) { | 1734 | engine->name, engine)) { |
1741 | dev_err(engine->dev, "failed to request IRQ\n"); | 1735 | dev_err(engine->dev, "failed to request IRQ\n"); |
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index cf149b19ff47..be45762f390a 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
@@ -568,17 +568,14 @@ static int s5p_aes_probe(struct platform_device *pdev) | |||
568 | if (s5p_dev) | 568 | if (s5p_dev) |
569 | return -EEXIST; | 569 | return -EEXIST; |
570 | 570 | ||
571 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
572 | if (!res) | ||
573 | return -ENODEV; | ||
574 | |||
575 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | 571 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); |
576 | if (!pdata) | 572 | if (!pdata) |
577 | return -ENOMEM; | 573 | return -ENOMEM; |
578 | 574 | ||
579 | if (!devm_request_mem_region(dev, res->start, | 575 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
580 | resource_size(res), pdev->name)) | 576 | pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); |
581 | return -EBUSY; | 577 | if (IS_ERR(pdata->ioaddr)) |
578 | return PTR_ERR(pdata->ioaddr); | ||
582 | 579 | ||
583 | pdata->clk = devm_clk_get(dev, "secss"); | 580 | pdata->clk = devm_clk_get(dev, "secss"); |
584 | if (IS_ERR(pdata->clk)) { | 581 | if (IS_ERR(pdata->clk)) { |
@@ -589,8 +586,6 @@ static int s5p_aes_probe(struct platform_device *pdev) | |||
589 | clk_enable(pdata->clk); | 586 | clk_enable(pdata->clk); |
590 | 587 | ||
591 | spin_lock_init(&pdata->lock); | 588 | spin_lock_init(&pdata->lock); |
592 | pdata->ioaddr = devm_ioremap(dev, res->start, | ||
593 | resource_size(res)); | ||
594 | 589 | ||
595 | pdata->irq_hash = platform_get_irq_byname(pdev, "hash"); | 590 | pdata->irq_hash = platform_get_irq_byname(pdev, "hash"); |
596 | if (pdata->irq_hash < 0) { | 591 | if (pdata->irq_hash < 0) { |
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 785a9ded7bdf..07a5987ce67d 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
@@ -885,22 +885,9 @@ static int sahara_probe(struct platform_device *pdev) | |||
885 | 885 | ||
886 | /* Get the base address */ | 886 | /* Get the base address */ |
887 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 887 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
888 | if (!res) { | 888 | dev->regs_base = devm_ioremap_resource(&pdev->dev, res); |
889 | dev_err(&pdev->dev, "failed to get memory region resource\n"); | 889 | if (IS_ERR(dev->regs_base)) |
890 | return -ENODEV; | 890 | return PTR_ERR(dev->regs_base); |
891 | } | ||
892 | |||
893 | if (devm_request_mem_region(&pdev->dev, res->start, | ||
894 | resource_size(res), SAHARA_NAME) == NULL) { | ||
895 | dev_err(&pdev->dev, "failed to request memory region\n"); | ||
896 | return -ENOENT; | ||
897 | } | ||
898 | dev->regs_base = devm_ioremap(&pdev->dev, res->start, | ||
899 | resource_size(res)); | ||
900 | if (!dev->regs_base) { | ||
901 | dev_err(&pdev->dev, "failed to ioremap address region\n"); | ||
902 | return -ENOENT; | ||
903 | } | ||
904 | 891 | ||
905 | /* Get the IRQ */ | 892 | /* Get the IRQ */ |
906 | irq = platform_get_irq(pdev, 0); | 893 | irq = platform_get_irq(pdev, 0); |
@@ -909,10 +896,11 @@ static int sahara_probe(struct platform_device *pdev) | |||
909 | return irq; | 896 | return irq; |
910 | } | 897 | } |
911 | 898 | ||
912 | if (devm_request_irq(&pdev->dev, irq, sahara_irq_handler, | 899 | err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler, |
913 | 0, SAHARA_NAME, dev) < 0) { | 900 | 0, dev_name(&pdev->dev), dev); |
901 | if (err) { | ||
914 | dev_err(&pdev->dev, "failed to request irq\n"); | 902 | dev_err(&pdev->dev, "failed to request irq\n"); |
915 | return -ENOENT; | 903 | return err; |
916 | } | 904 | } |
917 | 905 | ||
918 | /* clocks */ | 906 | /* clocks */ |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 5967667e1a8f..624b8be0c365 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -2637,6 +2637,8 @@ static int talitos_probe(struct platform_device *ofdev) | |||
2637 | if (!priv) | 2637 | if (!priv) |
2638 | return -ENOMEM; | 2638 | return -ENOMEM; |
2639 | 2639 | ||
2640 | INIT_LIST_HEAD(&priv->alg_list); | ||
2641 | |||
2640 | dev_set_drvdata(dev, priv); | 2642 | dev_set_drvdata(dev, priv); |
2641 | 2643 | ||
2642 | priv->ofdev = ofdev; | 2644 | priv->ofdev = ofdev; |
@@ -2657,8 +2659,6 @@ static int talitos_probe(struct platform_device *ofdev) | |||
2657 | (unsigned long)dev); | 2659 | (unsigned long)dev); |
2658 | } | 2660 | } |
2659 | 2661 | ||
2660 | INIT_LIST_HEAD(&priv->alg_list); | ||
2661 | |||
2662 | priv->reg = of_iomap(np, 0); | 2662 | priv->reg = of_iomap(np, 0); |
2663 | if (!priv->reg) { | 2663 | if (!priv->reg) { |
2664 | dev_err(dev, "failed to of_iomap\n"); | 2664 | dev_err(dev, "failed to of_iomap\n"); |
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c deleted file mode 100644 index 060eecc5dbc3..000000000000 --- a/drivers/crypto/tegra-aes.c +++ /dev/null | |||
@@ -1,1087 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/crypto/tegra-aes.c | ||
3 | * | ||
4 | * Driver for NVIDIA Tegra AES hardware engine residing inside the | ||
5 | * Bit Stream Engine for Video (BSEV) hardware block. | ||
6 | * | ||
7 | * The programming sequence for this engine is with the help | ||
8 | * of commands which travel via a command queue residing between the | ||
9 | * CPU and the BSEV block. The BSEV engine has an internal RAM (VRAM) | ||
10 | * where the final input plaintext, keys and the IV have to be copied | ||
11 | * before starting the encrypt/decrypt operation. | ||
12 | * | ||
13 | * Copyright (c) 2010, NVIDIA Corporation. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
21 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
22 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
23 | * more details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public License along | ||
26 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
27 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
28 | */ | ||
29 | |||
30 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
31 | |||
32 | #include <linux/module.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/errno.h> | ||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/clk.h> | ||
37 | #include <linux/platform_device.h> | ||
38 | #include <linux/scatterlist.h> | ||
39 | #include <linux/dma-mapping.h> | ||
40 | #include <linux/io.h> | ||
41 | #include <linux/mutex.h> | ||
42 | #include <linux/interrupt.h> | ||
43 | #include <linux/completion.h> | ||
44 | #include <linux/workqueue.h> | ||
45 | |||
46 | #include <crypto/scatterwalk.h> | ||
47 | #include <crypto/aes.h> | ||
48 | #include <crypto/internal/rng.h> | ||
49 | |||
50 | #include "tegra-aes.h" | ||
51 | |||
52 | #define FLAGS_MODE_MASK 0x00FF | ||
53 | #define FLAGS_ENCRYPT BIT(0) | ||
54 | #define FLAGS_CBC BIT(1) | ||
55 | #define FLAGS_GIV BIT(2) | ||
56 | #define FLAGS_RNG BIT(3) | ||
57 | #define FLAGS_OFB BIT(4) | ||
58 | #define FLAGS_NEW_KEY BIT(5) | ||
59 | #define FLAGS_NEW_IV BIT(6) | ||
60 | #define FLAGS_INIT BIT(7) | ||
61 | #define FLAGS_FAST BIT(8) | ||
62 | #define FLAGS_BUSY 9 | ||
63 | |||
64 | /* | ||
65 | * Defines AES engine Max process bytes size in one go, which takes 1 msec. | ||
66 | * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte | ||
67 | * The duration CPU can use the BSE to 1 msec, then the number of available | ||
68 | * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB | ||
69 | * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB. | ||
70 | */ | ||
71 | #define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000 | ||
72 | |||
73 | /* | ||
74 | * The key table length is 64 bytes | ||
75 | * (This includes first upto 32 bytes key + 16 bytes original initial vector | ||
76 | * and 16 bytes updated initial vector) | ||
77 | */ | ||
78 | #define AES_HW_KEY_TABLE_LENGTH_BYTES 64 | ||
79 | |||
80 | /* | ||
81 | * The memory being used is divides as follows: | ||
82 | * 1. Key - 32 bytes | ||
83 | * 2. Original IV - 16 bytes | ||
84 | * 3. Updated IV - 16 bytes | ||
85 | * 4. Key schedule - 256 bytes | ||
86 | * | ||
87 | * 1+2+3 constitute the hw key table. | ||
88 | */ | ||
89 | #define AES_HW_IV_SIZE 16 | ||
90 | #define AES_HW_KEYSCHEDULE_LEN 256 | ||
91 | #define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN) | ||
92 | |||
93 | /* Define commands required for AES operation */ | ||
94 | enum { | ||
95 | CMD_BLKSTARTENGINE = 0x0E, | ||
96 | CMD_DMASETUP = 0x10, | ||
97 | CMD_DMACOMPLETE = 0x11, | ||
98 | CMD_SETTABLE = 0x15, | ||
99 | CMD_MEMDMAVD = 0x22, | ||
100 | }; | ||
101 | |||
102 | /* Define sub-commands */ | ||
103 | enum { | ||
104 | SUBCMD_VRAM_SEL = 0x1, | ||
105 | SUBCMD_CRYPTO_TABLE_SEL = 0x3, | ||
106 | SUBCMD_KEY_TABLE_SEL = 0x8, | ||
107 | }; | ||
108 | |||
109 | /* memdma_vd command */ | ||
110 | #define MEMDMA_DIR_DTOVRAM 0 /* sdram -> vram */ | ||
111 | #define MEMDMA_DIR_VTODRAM 1 /* vram -> sdram */ | ||
112 | #define MEMDMA_DIR_SHIFT 25 | ||
113 | #define MEMDMA_NUM_WORDS_SHIFT 12 | ||
114 | |||
115 | /* command queue bit shifts */ | ||
116 | enum { | ||
117 | CMDQ_KEYTABLEADDR_SHIFT = 0, | ||
118 | CMDQ_KEYTABLEID_SHIFT = 17, | ||
119 | CMDQ_VRAMSEL_SHIFT = 23, | ||
120 | CMDQ_TABLESEL_SHIFT = 24, | ||
121 | CMDQ_OPCODE_SHIFT = 26, | ||
122 | }; | ||
123 | |||
124 | /* | ||
125 | * The secure key slot contains a unique secure key generated | ||
126 | * and loaded by the bootloader. This slot is marked as non-accessible | ||
127 | * to the kernel. | ||
128 | */ | ||
129 | #define SSK_SLOT_NUM 4 | ||
130 | |||
131 | #define AES_NR_KEYSLOTS 8 | ||
132 | #define TEGRA_AES_QUEUE_LENGTH 50 | ||
133 | #define DEFAULT_RNG_BLK_SZ 16 | ||
134 | |||
135 | /* The command queue depth */ | ||
136 | #define AES_HW_MAX_ICQ_LENGTH 5 | ||
137 | |||
138 | struct tegra_aes_slot { | ||
139 | struct list_head node; | ||
140 | int slot_num; | ||
141 | }; | ||
142 | |||
143 | static struct tegra_aes_slot ssk = { | ||
144 | .slot_num = SSK_SLOT_NUM, | ||
145 | }; | ||
146 | |||
147 | struct tegra_aes_reqctx { | ||
148 | unsigned long mode; | ||
149 | }; | ||
150 | |||
151 | struct tegra_aes_dev { | ||
152 | struct device *dev; | ||
153 | void __iomem *io_base; | ||
154 | dma_addr_t ivkey_phys_base; | ||
155 | void __iomem *ivkey_base; | ||
156 | struct clk *aes_clk; | ||
157 | struct tegra_aes_ctx *ctx; | ||
158 | int irq; | ||
159 | unsigned long flags; | ||
160 | struct completion op_complete; | ||
161 | u32 *buf_in; | ||
162 | dma_addr_t dma_buf_in; | ||
163 | u32 *buf_out; | ||
164 | dma_addr_t dma_buf_out; | ||
165 | u8 *iv; | ||
166 | u8 dt[DEFAULT_RNG_BLK_SZ]; | ||
167 | int ivlen; | ||
168 | u64 ctr; | ||
169 | spinlock_t lock; | ||
170 | struct crypto_queue queue; | ||
171 | struct tegra_aes_slot *slots; | ||
172 | struct ablkcipher_request *req; | ||
173 | size_t total; | ||
174 | struct scatterlist *in_sg; | ||
175 | size_t in_offset; | ||
176 | struct scatterlist *out_sg; | ||
177 | size_t out_offset; | ||
178 | }; | ||
179 | |||
180 | static struct tegra_aes_dev *aes_dev; | ||
181 | |||
182 | struct tegra_aes_ctx { | ||
183 | struct tegra_aes_dev *dd; | ||
184 | unsigned long flags; | ||
185 | struct tegra_aes_slot *slot; | ||
186 | u8 key[AES_MAX_KEY_SIZE]; | ||
187 | size_t keylen; | ||
188 | }; | ||
189 | |||
190 | static struct tegra_aes_ctx rng_ctx = { | ||
191 | .flags = FLAGS_NEW_KEY, | ||
192 | .keylen = AES_KEYSIZE_128, | ||
193 | }; | ||
194 | |||
195 | /* keep registered devices data here */ | ||
196 | static struct list_head dev_list; | ||
197 | static DEFINE_SPINLOCK(list_lock); | ||
198 | static DEFINE_MUTEX(aes_lock); | ||
199 | |||
200 | static void aes_workqueue_handler(struct work_struct *work); | ||
201 | static DECLARE_WORK(aes_work, aes_workqueue_handler); | ||
202 | static struct workqueue_struct *aes_wq; | ||
203 | |||
204 | static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) | ||
205 | { | ||
206 | return readl(dd->io_base + offset); | ||
207 | } | ||
208 | |||
209 | static inline void aes_writel(struct tegra_aes_dev *dd, u32 val, u32 offset) | ||
210 | { | ||
211 | writel(val, dd->io_base + offset); | ||
212 | } | ||
213 | |||
214 | static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr, | ||
215 | int nblocks, int mode, bool upd_iv) | ||
216 | { | ||
217 | u32 cmdq[AES_HW_MAX_ICQ_LENGTH]; | ||
218 | int i, eng_busy, icq_empty, ret; | ||
219 | u32 value; | ||
220 | |||
221 | /* reset all the interrupt bits */ | ||
222 | aes_writel(dd, 0xFFFFFFFF, TEGRA_AES_INTR_STATUS); | ||
223 | |||
224 | /* enable error, dma xfer complete interrupts */ | ||
225 | aes_writel(dd, 0x33, TEGRA_AES_INT_ENB); | ||
226 | |||
227 | cmdq[0] = CMD_DMASETUP << CMDQ_OPCODE_SHIFT; | ||
228 | cmdq[1] = in_addr; | ||
229 | cmdq[2] = CMD_BLKSTARTENGINE << CMDQ_OPCODE_SHIFT | (nblocks-1); | ||
230 | cmdq[3] = CMD_DMACOMPLETE << CMDQ_OPCODE_SHIFT; | ||
231 | |||
232 | value = aes_readl(dd, TEGRA_AES_CMDQUE_CONTROL); | ||
233 | /* access SDRAM through AHB */ | ||
234 | value &= ~TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD; | ||
235 | value &= ~TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD; | ||
236 | value |= TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD | | ||
237 | TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD | | ||
238 | TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD; | ||
239 | aes_writel(dd, value, TEGRA_AES_CMDQUE_CONTROL); | ||
240 | dev_dbg(dd->dev, "cmd_q_ctrl=0x%x", value); | ||
241 | |||
242 | value = (0x1 << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT) | | ||
243 | ((dd->ctx->keylen * 8) << | ||
244 | TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT) | | ||
245 | ((u32)upd_iv << TEGRA_AES_SECURE_IV_SELECT_SHIFT); | ||
246 | |||
247 | if (mode & FLAGS_CBC) { | ||
248 | value |= ((((mode & FLAGS_ENCRYPT) ? 2 : 3) | ||
249 | << TEGRA_AES_SECURE_XOR_POS_SHIFT) | | ||
250 | (((mode & FLAGS_ENCRYPT) ? 2 : 3) | ||
251 | << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT) | | ||
252 | ((mode & FLAGS_ENCRYPT) ? 1 : 0) | ||
253 | << TEGRA_AES_SECURE_CORE_SEL_SHIFT); | ||
254 | } else if (mode & FLAGS_OFB) { | ||
255 | value |= ((TEGRA_AES_SECURE_XOR_POS_FIELD) | | ||
256 | (2 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT) | | ||
257 | (TEGRA_AES_SECURE_CORE_SEL_FIELD)); | ||
258 | } else if (mode & FLAGS_RNG) { | ||
259 | value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0) | ||
260 | << TEGRA_AES_SECURE_CORE_SEL_SHIFT | | ||
261 | TEGRA_AES_SECURE_RNG_ENB_FIELD); | ||
262 | } else { | ||
263 | value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0) | ||
264 | << TEGRA_AES_SECURE_CORE_SEL_SHIFT); | ||
265 | } | ||
266 | |||
267 | dev_dbg(dd->dev, "secure_in_sel=0x%x", value); | ||
268 | aes_writel(dd, value, TEGRA_AES_SECURE_INPUT_SELECT); | ||
269 | |||
270 | aes_writel(dd, out_addr, TEGRA_AES_SECURE_DEST_ADDR); | ||
271 | reinit_completion(&dd->op_complete); | ||
272 | |||
273 | for (i = 0; i < AES_HW_MAX_ICQ_LENGTH - 1; i++) { | ||
274 | do { | ||
275 | value = aes_readl(dd, TEGRA_AES_INTR_STATUS); | ||
276 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; | ||
277 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; | ||
278 | } while (eng_busy && !icq_empty); | ||
279 | aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR); | ||
280 | } | ||
281 | |||
282 | ret = wait_for_completion_timeout(&dd->op_complete, | ||
283 | msecs_to_jiffies(150)); | ||
284 | if (ret == 0) { | ||
285 | dev_err(dd->dev, "timed out (0x%x)\n", | ||
286 | aes_readl(dd, TEGRA_AES_INTR_STATUS)); | ||
287 | return -ETIMEDOUT; | ||
288 | } | ||
289 | |||
290 | aes_writel(dd, cmdq[AES_HW_MAX_ICQ_LENGTH - 1], TEGRA_AES_ICMDQUE_WR); | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | static void aes_release_key_slot(struct tegra_aes_slot *slot) | ||
295 | { | ||
296 | if (slot->slot_num == SSK_SLOT_NUM) | ||
297 | return; | ||
298 | |||
299 | spin_lock(&list_lock); | ||
300 | list_add_tail(&slot->node, &dev_list); | ||
301 | slot = NULL; | ||
302 | spin_unlock(&list_lock); | ||
303 | } | ||
304 | |||
305 | static struct tegra_aes_slot *aes_find_key_slot(void) | ||
306 | { | ||
307 | struct tegra_aes_slot *slot = NULL; | ||
308 | struct list_head *new_head; | ||
309 | int empty; | ||
310 | |||
311 | spin_lock(&list_lock); | ||
312 | empty = list_empty(&dev_list); | ||
313 | if (!empty) { | ||
314 | slot = list_entry(&dev_list, struct tegra_aes_slot, node); | ||
315 | new_head = dev_list.next; | ||
316 | list_del(&dev_list); | ||
317 | dev_list.next = new_head->next; | ||
318 | dev_list.prev = NULL; | ||
319 | } | ||
320 | spin_unlock(&list_lock); | ||
321 | |||
322 | return slot; | ||
323 | } | ||
324 | |||
325 | static int aes_set_key(struct tegra_aes_dev *dd) | ||
326 | { | ||
327 | u32 value, cmdq[2]; | ||
328 | struct tegra_aes_ctx *ctx = dd->ctx; | ||
329 | int eng_busy, icq_empty, dma_busy; | ||
330 | bool use_ssk = false; | ||
331 | |||
332 | /* use ssk? */ | ||
333 | if (!dd->ctx->slot) { | ||
334 | dev_dbg(dd->dev, "using ssk"); | ||
335 | dd->ctx->slot = &ssk; | ||
336 | use_ssk = true; | ||
337 | } | ||
338 | |||
339 | /* enable key schedule generation in hardware */ | ||
340 | value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG_EXT); | ||
341 | value &= ~TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD; | ||
342 | aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG_EXT); | ||
343 | |||
344 | /* select the key slot */ | ||
345 | value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG); | ||
346 | value &= ~TEGRA_AES_SECURE_KEY_INDEX_FIELD; | ||
347 | value |= (ctx->slot->slot_num << TEGRA_AES_SECURE_KEY_INDEX_SHIFT); | ||
348 | aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG); | ||
349 | |||
350 | if (use_ssk) | ||
351 | return 0; | ||
352 | |||
353 | /* copy the key table from sdram to vram */ | ||
354 | cmdq[0] = CMD_MEMDMAVD << CMDQ_OPCODE_SHIFT | | ||
355 | MEMDMA_DIR_DTOVRAM << MEMDMA_DIR_SHIFT | | ||
356 | AES_HW_KEY_TABLE_LENGTH_BYTES / sizeof(u32) << | ||
357 | MEMDMA_NUM_WORDS_SHIFT; | ||
358 | cmdq[1] = (u32)dd->ivkey_phys_base; | ||
359 | |||
360 | aes_writel(dd, cmdq[0], TEGRA_AES_ICMDQUE_WR); | ||
361 | aes_writel(dd, cmdq[1], TEGRA_AES_ICMDQUE_WR); | ||
362 | |||
363 | do { | ||
364 | value = aes_readl(dd, TEGRA_AES_INTR_STATUS); | ||
365 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; | ||
366 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; | ||
367 | dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD; | ||
368 | } while (eng_busy && !icq_empty && dma_busy); | ||
369 | |||
370 | /* settable command to get key into internal registers */ | ||
371 | value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT | | ||
372 | SUBCMD_CRYPTO_TABLE_SEL << CMDQ_TABLESEL_SHIFT | | ||
373 | SUBCMD_VRAM_SEL << CMDQ_VRAMSEL_SHIFT | | ||
374 | (SUBCMD_KEY_TABLE_SEL | ctx->slot->slot_num) << | ||
375 | CMDQ_KEYTABLEID_SHIFT; | ||
376 | aes_writel(dd, value, TEGRA_AES_ICMDQUE_WR); | ||
377 | |||
378 | do { | ||
379 | value = aes_readl(dd, TEGRA_AES_INTR_STATUS); | ||
380 | eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD; | ||
381 | icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD; | ||
382 | } while (eng_busy && !icq_empty); | ||
383 | |||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | static int tegra_aes_handle_req(struct tegra_aes_dev *dd) | ||
388 | { | ||
389 | struct crypto_async_request *async_req, *backlog; | ||
390 | struct crypto_ablkcipher *tfm; | ||
391 | struct tegra_aes_ctx *ctx; | ||
392 | struct tegra_aes_reqctx *rctx; | ||
393 | struct ablkcipher_request *req; | ||
394 | unsigned long flags; | ||
395 | int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES; | ||
396 | int ret = 0, nblocks, total; | ||
397 | int count = 0; | ||
398 | dma_addr_t addr_in, addr_out; | ||
399 | struct scatterlist *in_sg, *out_sg; | ||
400 | |||
401 | if (!dd) | ||
402 | return -EINVAL; | ||
403 | |||
404 | spin_lock_irqsave(&dd->lock, flags); | ||
405 | backlog = crypto_get_backlog(&dd->queue); | ||
406 | async_req = crypto_dequeue_request(&dd->queue); | ||
407 | if (!async_req) | ||
408 | clear_bit(FLAGS_BUSY, &dd->flags); | ||
409 | spin_unlock_irqrestore(&dd->lock, flags); | ||
410 | |||
411 | if (!async_req) | ||
412 | return -ENODATA; | ||
413 | |||
414 | if (backlog) | ||
415 | backlog->complete(backlog, -EINPROGRESS); | ||
416 | |||
417 | req = ablkcipher_request_cast(async_req); | ||
418 | |||
419 | dev_dbg(dd->dev, "%s: get new req\n", __func__); | ||
420 | |||
421 | if (!req->src || !req->dst) | ||
422 | return -EINVAL; | ||
423 | |||
424 | /* take mutex to access the aes hw */ | ||
425 | mutex_lock(&aes_lock); | ||
426 | |||
427 | /* assign new request to device */ | ||
428 | dd->req = req; | ||
429 | dd->total = req->nbytes; | ||
430 | dd->in_offset = 0; | ||
431 | dd->in_sg = req->src; | ||
432 | dd->out_offset = 0; | ||
433 | dd->out_sg = req->dst; | ||
434 | |||
435 | in_sg = dd->in_sg; | ||
436 | out_sg = dd->out_sg; | ||
437 | |||
438 | total = dd->total; | ||
439 | |||
440 | tfm = crypto_ablkcipher_reqtfm(req); | ||
441 | rctx = ablkcipher_request_ctx(req); | ||
442 | ctx = crypto_ablkcipher_ctx(tfm); | ||
443 | rctx->mode &= FLAGS_MODE_MASK; | ||
444 | dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; | ||
445 | |||
446 | dd->iv = (u8 *)req->info; | ||
447 | dd->ivlen = crypto_ablkcipher_ivsize(tfm); | ||
448 | |||
449 | /* assign new context to device */ | ||
450 | ctx->dd = dd; | ||
451 | dd->ctx = ctx; | ||
452 | |||
453 | if (ctx->flags & FLAGS_NEW_KEY) { | ||
454 | /* copy the key */ | ||
455 | memcpy(dd->ivkey_base, ctx->key, ctx->keylen); | ||
456 | memset(dd->ivkey_base + ctx->keylen, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - ctx->keylen); | ||
457 | aes_set_key(dd); | ||
458 | ctx->flags &= ~FLAGS_NEW_KEY; | ||
459 | } | ||
460 | |||
461 | if (((dd->flags & FLAGS_CBC) || (dd->flags & FLAGS_OFB)) && dd->iv) { | ||
462 | /* set iv to the aes hw slot | ||
463 | * Hw generates updated iv only after iv is set in slot. | ||
464 | * So key and iv is passed asynchronously. | ||
465 | */ | ||
466 | memcpy(dd->buf_in, dd->iv, dd->ivlen); | ||
467 | |||
468 | ret = aes_start_crypt(dd, (u32)dd->dma_buf_in, | ||
469 | dd->dma_buf_out, 1, FLAGS_CBC, false); | ||
470 | if (ret < 0) { | ||
471 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
472 | goto out; | ||
473 | } | ||
474 | } | ||
475 | |||
476 | while (total) { | ||
477 | dev_dbg(dd->dev, "remain: %d\n", total); | ||
478 | ret = dma_map_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE); | ||
479 | if (!ret) { | ||
480 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
481 | goto out; | ||
482 | } | ||
483 | |||
484 | ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE); | ||
485 | if (!ret) { | ||
486 | dev_err(dd->dev, "dma_map_sg() error\n"); | ||
487 | dma_unmap_sg(dd->dev, dd->in_sg, | ||
488 | 1, DMA_TO_DEVICE); | ||
489 | goto out; | ||
490 | } | ||
491 | |||
492 | addr_in = sg_dma_address(in_sg); | ||
493 | addr_out = sg_dma_address(out_sg); | ||
494 | dd->flags |= FLAGS_FAST; | ||
495 | count = min_t(int, sg_dma_len(in_sg), dma_max); | ||
496 | WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg)); | ||
497 | nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE); | ||
498 | |||
499 | ret = aes_start_crypt(dd, addr_in, addr_out, nblocks, | ||
500 | dd->flags, true); | ||
501 | |||
502 | dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE); | ||
503 | dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE); | ||
504 | |||
505 | if (ret < 0) { | ||
506 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
507 | goto out; | ||
508 | } | ||
509 | dd->flags &= ~FLAGS_FAST; | ||
510 | |||
511 | dev_dbg(dd->dev, "out: copied %d\n", count); | ||
512 | total -= count; | ||
513 | in_sg = sg_next(in_sg); | ||
514 | out_sg = sg_next(out_sg); | ||
515 | WARN_ON(((total != 0) && (!in_sg || !out_sg))); | ||
516 | } | ||
517 | |||
518 | out: | ||
519 | mutex_unlock(&aes_lock); | ||
520 | |||
521 | dd->total = total; | ||
522 | |||
523 | if (dd->req->base.complete) | ||
524 | dd->req->base.complete(&dd->req->base, ret); | ||
525 | |||
526 | dev_dbg(dd->dev, "%s: exit\n", __func__); | ||
527 | return ret; | ||
528 | } | ||
529 | |||
530 | static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
531 | unsigned int keylen) | ||
532 | { | ||
533 | struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
534 | struct tegra_aes_dev *dd = aes_dev; | ||
535 | struct tegra_aes_slot *key_slot; | ||
536 | |||
537 | if ((keylen != AES_KEYSIZE_128) && (keylen != AES_KEYSIZE_192) && | ||
538 | (keylen != AES_KEYSIZE_256)) { | ||
539 | dev_err(dd->dev, "unsupported key size\n"); | ||
540 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
541 | return -EINVAL; | ||
542 | } | ||
543 | |||
544 | dev_dbg(dd->dev, "keylen: %d\n", keylen); | ||
545 | |||
546 | ctx->dd = dd; | ||
547 | |||
548 | if (key) { | ||
549 | if (!ctx->slot) { | ||
550 | key_slot = aes_find_key_slot(); | ||
551 | if (!key_slot) { | ||
552 | dev_err(dd->dev, "no empty slot\n"); | ||
553 | return -ENOMEM; | ||
554 | } | ||
555 | |||
556 | ctx->slot = key_slot; | ||
557 | } | ||
558 | |||
559 | memcpy(ctx->key, key, keylen); | ||
560 | ctx->keylen = keylen; | ||
561 | } | ||
562 | |||
563 | ctx->flags |= FLAGS_NEW_KEY; | ||
564 | dev_dbg(dd->dev, "done\n"); | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | static void aes_workqueue_handler(struct work_struct *work) | ||
569 | { | ||
570 | struct tegra_aes_dev *dd = aes_dev; | ||
571 | int ret; | ||
572 | |||
573 | ret = clk_prepare_enable(dd->aes_clk); | ||
574 | if (ret) | ||
575 | BUG_ON("clock enable failed"); | ||
576 | |||
577 | /* empty the crypto queue and then return */ | ||
578 | do { | ||
579 | ret = tegra_aes_handle_req(dd); | ||
580 | } while (!ret); | ||
581 | |||
582 | clk_disable_unprepare(dd->aes_clk); | ||
583 | } | ||
584 | |||
585 | static irqreturn_t aes_irq(int irq, void *dev_id) | ||
586 | { | ||
587 | struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id; | ||
588 | u32 value = aes_readl(dd, TEGRA_AES_INTR_STATUS); | ||
589 | int busy = test_bit(FLAGS_BUSY, &dd->flags); | ||
590 | |||
591 | if (!busy) { | ||
592 | dev_dbg(dd->dev, "spurious interrupt\n"); | ||
593 | return IRQ_NONE; | ||
594 | } | ||
595 | |||
596 | dev_dbg(dd->dev, "irq_stat: 0x%x\n", value); | ||
597 | if (value & TEGRA_AES_INT_ERROR_MASK) | ||
598 | aes_writel(dd, TEGRA_AES_INT_ERROR_MASK, TEGRA_AES_INTR_STATUS); | ||
599 | |||
600 | if (!(value & TEGRA_AES_ENGINE_BUSY_FIELD)) | ||
601 | complete(&dd->op_complete); | ||
602 | else | ||
603 | return IRQ_NONE; | ||
604 | |||
605 | return IRQ_HANDLED; | ||
606 | } | ||
607 | |||
608 | static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
609 | { | ||
610 | struct tegra_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
611 | struct tegra_aes_dev *dd = aes_dev; | ||
612 | unsigned long flags; | ||
613 | int err = 0; | ||
614 | int busy; | ||
615 | |||
616 | dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n", | ||
617 | req->nbytes, !!(mode & FLAGS_ENCRYPT), | ||
618 | !!(mode & FLAGS_CBC), !!(mode & FLAGS_OFB)); | ||
619 | |||
620 | rctx->mode = mode; | ||
621 | |||
622 | spin_lock_irqsave(&dd->lock, flags); | ||
623 | err = ablkcipher_enqueue_request(&dd->queue, req); | ||
624 | busy = test_and_set_bit(FLAGS_BUSY, &dd->flags); | ||
625 | spin_unlock_irqrestore(&dd->lock, flags); | ||
626 | |||
627 | if (!busy) | ||
628 | queue_work(aes_wq, &aes_work); | ||
629 | |||
630 | return err; | ||
631 | } | ||
632 | |||
633 | static int tegra_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
634 | { | ||
635 | return tegra_aes_crypt(req, FLAGS_ENCRYPT); | ||
636 | } | ||
637 | |||
638 | static int tegra_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
639 | { | ||
640 | return tegra_aes_crypt(req, 0); | ||
641 | } | ||
642 | |||
643 | static int tegra_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
644 | { | ||
645 | return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); | ||
646 | } | ||
647 | |||
648 | static int tegra_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
649 | { | ||
650 | return tegra_aes_crypt(req, FLAGS_CBC); | ||
651 | } | ||
652 | |||
653 | static int tegra_aes_ofb_encrypt(struct ablkcipher_request *req) | ||
654 | { | ||
655 | return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_OFB); | ||
656 | } | ||
657 | |||
658 | static int tegra_aes_ofb_decrypt(struct ablkcipher_request *req) | ||
659 | { | ||
660 | return tegra_aes_crypt(req, FLAGS_OFB); | ||
661 | } | ||
662 | |||
663 | static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata, | ||
664 | unsigned int dlen) | ||
665 | { | ||
666 | struct tegra_aes_dev *dd = aes_dev; | ||
667 | struct tegra_aes_ctx *ctx = &rng_ctx; | ||
668 | int ret, i; | ||
669 | u8 *dest = rdata, *dt = dd->dt; | ||
670 | |||
671 | /* take mutex to access the aes hw */ | ||
672 | mutex_lock(&aes_lock); | ||
673 | |||
674 | ret = clk_prepare_enable(dd->aes_clk); | ||
675 | if (ret) { | ||
676 | mutex_unlock(&aes_lock); | ||
677 | return ret; | ||
678 | } | ||
679 | |||
680 | ctx->dd = dd; | ||
681 | dd->ctx = ctx; | ||
682 | dd->flags = FLAGS_ENCRYPT | FLAGS_RNG; | ||
683 | |||
684 | memcpy(dd->buf_in, dt, DEFAULT_RNG_BLK_SZ); | ||
685 | |||
686 | ret = aes_start_crypt(dd, (u32)dd->dma_buf_in, | ||
687 | (u32)dd->dma_buf_out, 1, dd->flags, true); | ||
688 | if (ret < 0) { | ||
689 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
690 | dlen = ret; | ||
691 | goto out; | ||
692 | } | ||
693 | memcpy(dest, dd->buf_out, dlen); | ||
694 | |||
695 | /* update the DT */ | ||
696 | for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) { | ||
697 | dt[i] += 1; | ||
698 | if (dt[i] != 0) | ||
699 | break; | ||
700 | } | ||
701 | |||
702 | out: | ||
703 | clk_disable_unprepare(dd->aes_clk); | ||
704 | mutex_unlock(&aes_lock); | ||
705 | |||
706 | dev_dbg(dd->dev, "%s: done\n", __func__); | ||
707 | return dlen; | ||
708 | } | ||
709 | |||
710 | static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, | ||
711 | unsigned int slen) | ||
712 | { | ||
713 | struct tegra_aes_dev *dd = aes_dev; | ||
714 | struct tegra_aes_ctx *ctx = &rng_ctx; | ||
715 | struct tegra_aes_slot *key_slot; | ||
716 | int ret = 0; | ||
717 | u8 tmp[16]; /* 16 bytes = 128 bits of entropy */ | ||
718 | u8 *dt; | ||
719 | |||
720 | if (!ctx || !dd) { | ||
721 | pr_err("ctx=0x%x, dd=0x%x\n", | ||
722 | (unsigned int)ctx, (unsigned int)dd); | ||
723 | return -EINVAL; | ||
724 | } | ||
725 | |||
726 | if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { | ||
727 | dev_err(dd->dev, "seed size invalid"); | ||
728 | return -ENOMEM; | ||
729 | } | ||
730 | |||
731 | /* take mutex to access the aes hw */ | ||
732 | mutex_lock(&aes_lock); | ||
733 | |||
734 | if (!ctx->slot) { | ||
735 | key_slot = aes_find_key_slot(); | ||
736 | if (!key_slot) { | ||
737 | dev_err(dd->dev, "no empty slot\n"); | ||
738 | mutex_unlock(&aes_lock); | ||
739 | return -ENOMEM; | ||
740 | } | ||
741 | ctx->slot = key_slot; | ||
742 | } | ||
743 | |||
744 | ctx->dd = dd; | ||
745 | dd->ctx = ctx; | ||
746 | dd->ctr = 0; | ||
747 | |||
748 | ctx->keylen = AES_KEYSIZE_128; | ||
749 | ctx->flags |= FLAGS_NEW_KEY; | ||
750 | |||
751 | /* copy the key to the key slot */ | ||
752 | memcpy(dd->ivkey_base, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128); | ||
753 | memset(dd->ivkey_base + AES_KEYSIZE_128, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - AES_KEYSIZE_128); | ||
754 | |||
755 | dd->iv = seed; | ||
756 | dd->ivlen = slen; | ||
757 | |||
758 | dd->flags = FLAGS_ENCRYPT | FLAGS_RNG; | ||
759 | |||
760 | ret = clk_prepare_enable(dd->aes_clk); | ||
761 | if (ret) { | ||
762 | mutex_unlock(&aes_lock); | ||
763 | return ret; | ||
764 | } | ||
765 | |||
766 | aes_set_key(dd); | ||
767 | |||
768 | /* set seed to the aes hw slot */ | ||
769 | memcpy(dd->buf_in, dd->iv, DEFAULT_RNG_BLK_SZ); | ||
770 | ret = aes_start_crypt(dd, (u32)dd->dma_buf_in, | ||
771 | dd->dma_buf_out, 1, FLAGS_CBC, false); | ||
772 | if (ret < 0) { | ||
773 | dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret); | ||
774 | goto out; | ||
775 | } | ||
776 | |||
777 | if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { | ||
778 | dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; | ||
779 | } else { | ||
780 | get_random_bytes(tmp, sizeof(tmp)); | ||
781 | dt = tmp; | ||
782 | } | ||
783 | memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); | ||
784 | |||
785 | out: | ||
786 | clk_disable_unprepare(dd->aes_clk); | ||
787 | mutex_unlock(&aes_lock); | ||
788 | |||
789 | dev_dbg(dd->dev, "%s: done\n", __func__); | ||
790 | return ret; | ||
791 | } | ||
792 | |||
793 | static int tegra_aes_cra_init(struct crypto_tfm *tfm) | ||
794 | { | ||
795 | tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx); | ||
796 | |||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | static void tegra_aes_cra_exit(struct crypto_tfm *tfm) | ||
801 | { | ||
802 | struct tegra_aes_ctx *ctx = | ||
803 | crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); | ||
804 | |||
805 | if (ctx && ctx->slot) | ||
806 | aes_release_key_slot(ctx->slot); | ||
807 | } | ||
808 | |||
809 | static struct crypto_alg algs[] = { | ||
810 | { | ||
811 | .cra_name = "ecb(aes)", | ||
812 | .cra_driver_name = "ecb-aes-tegra", | ||
813 | .cra_priority = 300, | ||
814 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
815 | .cra_blocksize = AES_BLOCK_SIZE, | ||
816 | .cra_alignmask = 3, | ||
817 | .cra_type = &crypto_ablkcipher_type, | ||
818 | .cra_u.ablkcipher = { | ||
819 | .min_keysize = AES_MIN_KEY_SIZE, | ||
820 | .max_keysize = AES_MAX_KEY_SIZE, | ||
821 | .setkey = tegra_aes_setkey, | ||
822 | .encrypt = tegra_aes_ecb_encrypt, | ||
823 | .decrypt = tegra_aes_ecb_decrypt, | ||
824 | }, | ||
825 | }, { | ||
826 | .cra_name = "cbc(aes)", | ||
827 | .cra_driver_name = "cbc-aes-tegra", | ||
828 | .cra_priority = 300, | ||
829 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
830 | .cra_blocksize = AES_BLOCK_SIZE, | ||
831 | .cra_alignmask = 3, | ||
832 | .cra_type = &crypto_ablkcipher_type, | ||
833 | .cra_u.ablkcipher = { | ||
834 | .min_keysize = AES_MIN_KEY_SIZE, | ||
835 | .max_keysize = AES_MAX_KEY_SIZE, | ||
836 | .ivsize = AES_MIN_KEY_SIZE, | ||
837 | .setkey = tegra_aes_setkey, | ||
838 | .encrypt = tegra_aes_cbc_encrypt, | ||
839 | .decrypt = tegra_aes_cbc_decrypt, | ||
840 | } | ||
841 | }, { | ||
842 | .cra_name = "ofb(aes)", | ||
843 | .cra_driver_name = "ofb-aes-tegra", | ||
844 | .cra_priority = 300, | ||
845 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
846 | .cra_blocksize = AES_BLOCK_SIZE, | ||
847 | .cra_alignmask = 3, | ||
848 | .cra_type = &crypto_ablkcipher_type, | ||
849 | .cra_u.ablkcipher = { | ||
850 | .min_keysize = AES_MIN_KEY_SIZE, | ||
851 | .max_keysize = AES_MAX_KEY_SIZE, | ||
852 | .ivsize = AES_MIN_KEY_SIZE, | ||
853 | .setkey = tegra_aes_setkey, | ||
854 | .encrypt = tegra_aes_ofb_encrypt, | ||
855 | .decrypt = tegra_aes_ofb_decrypt, | ||
856 | } | ||
857 | }, { | ||
858 | .cra_name = "ansi_cprng", | ||
859 | .cra_driver_name = "rng-aes-tegra", | ||
860 | .cra_flags = CRYPTO_ALG_TYPE_RNG, | ||
861 | .cra_ctxsize = sizeof(struct tegra_aes_ctx), | ||
862 | .cra_type = &crypto_rng_type, | ||
863 | .cra_u.rng = { | ||
864 | .rng_make_random = tegra_aes_get_random, | ||
865 | .rng_reset = tegra_aes_rng_reset, | ||
866 | .seedsize = AES_KEYSIZE_128 + (2 * DEFAULT_RNG_BLK_SZ), | ||
867 | } | ||
868 | } | ||
869 | }; | ||
870 | |||
871 | static int tegra_aes_probe(struct platform_device *pdev) | ||
872 | { | ||
873 | struct device *dev = &pdev->dev; | ||
874 | struct tegra_aes_dev *dd; | ||
875 | struct resource *res; | ||
876 | int err = -ENOMEM, i = 0, j; | ||
877 | |||
878 | dd = devm_kzalloc(dev, sizeof(struct tegra_aes_dev), GFP_KERNEL); | ||
879 | if (dd == NULL) { | ||
880 | dev_err(dev, "unable to alloc data struct.\n"); | ||
881 | return err; | ||
882 | } | ||
883 | |||
884 | dd->dev = dev; | ||
885 | platform_set_drvdata(pdev, dd); | ||
886 | |||
887 | dd->slots = devm_kzalloc(dev, sizeof(struct tegra_aes_slot) * | ||
888 | AES_NR_KEYSLOTS, GFP_KERNEL); | ||
889 | if (dd->slots == NULL) { | ||
890 | dev_err(dev, "unable to alloc slot struct.\n"); | ||
891 | goto out; | ||
892 | } | ||
893 | |||
894 | spin_lock_init(&dd->lock); | ||
895 | crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH); | ||
896 | |||
897 | /* Get the module base address */ | ||
898 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
899 | if (!res) { | ||
900 | dev_err(dev, "invalid resource type: base\n"); | ||
901 | err = -ENODEV; | ||
902 | goto out; | ||
903 | } | ||
904 | |||
905 | if (!devm_request_mem_region(&pdev->dev, res->start, | ||
906 | resource_size(res), | ||
907 | dev_name(&pdev->dev))) { | ||
908 | dev_err(&pdev->dev, "Couldn't request MEM resource\n"); | ||
909 | return -ENODEV; | ||
910 | } | ||
911 | |||
912 | dd->io_base = devm_ioremap(dev, res->start, resource_size(res)); | ||
913 | if (!dd->io_base) { | ||
914 | dev_err(dev, "can't ioremap register space\n"); | ||
915 | err = -ENOMEM; | ||
916 | goto out; | ||
917 | } | ||
918 | |||
919 | /* Initialize the vde clock */ | ||
920 | dd->aes_clk = devm_clk_get(dev, "vde"); | ||
921 | if (IS_ERR(dd->aes_clk)) { | ||
922 | dev_err(dev, "iclock intialization failed.\n"); | ||
923 | err = -ENODEV; | ||
924 | goto out; | ||
925 | } | ||
926 | |||
927 | err = clk_set_rate(dd->aes_clk, ULONG_MAX); | ||
928 | if (err) { | ||
929 | dev_err(dd->dev, "iclk set_rate fail(%d)\n", err); | ||
930 | goto out; | ||
931 | } | ||
932 | |||
933 | /* | ||
934 | * the foll contiguous memory is allocated as follows - | ||
935 | * - hardware key table | ||
936 | * - key schedule | ||
937 | */ | ||
938 | dd->ivkey_base = dma_alloc_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES, | ||
939 | &dd->ivkey_phys_base, | ||
940 | GFP_KERNEL); | ||
941 | if (!dd->ivkey_base) { | ||
942 | dev_err(dev, "can not allocate iv/key buffer\n"); | ||
943 | err = -ENOMEM; | ||
944 | goto out; | ||
945 | } | ||
946 | |||
947 | dd->buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
948 | &dd->dma_buf_in, GFP_KERNEL); | ||
949 | if (!dd->buf_in) { | ||
950 | dev_err(dev, "can not allocate dma-in buffer\n"); | ||
951 | err = -ENOMEM; | ||
952 | goto out; | ||
953 | } | ||
954 | |||
955 | dd->buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
956 | &dd->dma_buf_out, GFP_KERNEL); | ||
957 | if (!dd->buf_out) { | ||
958 | dev_err(dev, "can not allocate dma-out buffer\n"); | ||
959 | err = -ENOMEM; | ||
960 | goto out; | ||
961 | } | ||
962 | |||
963 | init_completion(&dd->op_complete); | ||
964 | aes_wq = alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI | WQ_UNBOUND, 1); | ||
965 | if (!aes_wq) { | ||
966 | dev_err(dev, "alloc_workqueue failed\n"); | ||
967 | err = -ENOMEM; | ||
968 | goto out; | ||
969 | } | ||
970 | |||
971 | /* get the irq */ | ||
972 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
973 | if (!res) { | ||
974 | dev_err(dev, "invalid resource type: base\n"); | ||
975 | err = -ENODEV; | ||
976 | goto out; | ||
977 | } | ||
978 | dd->irq = res->start; | ||
979 | |||
980 | err = devm_request_irq(dev, dd->irq, aes_irq, IRQF_TRIGGER_HIGH | | ||
981 | IRQF_SHARED, "tegra-aes", dd); | ||
982 | if (err) { | ||
983 | dev_err(dev, "request_irq failed\n"); | ||
984 | goto out; | ||
985 | } | ||
986 | |||
987 | mutex_init(&aes_lock); | ||
988 | INIT_LIST_HEAD(&dev_list); | ||
989 | |||
990 | spin_lock_init(&list_lock); | ||
991 | spin_lock(&list_lock); | ||
992 | for (i = 0; i < AES_NR_KEYSLOTS; i++) { | ||
993 | if (i == SSK_SLOT_NUM) | ||
994 | continue; | ||
995 | dd->slots[i].slot_num = i; | ||
996 | INIT_LIST_HEAD(&dd->slots[i].node); | ||
997 | list_add_tail(&dd->slots[i].node, &dev_list); | ||
998 | } | ||
999 | spin_unlock(&list_lock); | ||
1000 | |||
1001 | aes_dev = dd; | ||
1002 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | ||
1003 | algs[i].cra_priority = 300; | ||
1004 | algs[i].cra_ctxsize = sizeof(struct tegra_aes_ctx); | ||
1005 | algs[i].cra_module = THIS_MODULE; | ||
1006 | algs[i].cra_init = tegra_aes_cra_init; | ||
1007 | algs[i].cra_exit = tegra_aes_cra_exit; | ||
1008 | |||
1009 | err = crypto_register_alg(&algs[i]); | ||
1010 | if (err) | ||
1011 | goto out; | ||
1012 | } | ||
1013 | |||
1014 | dev_info(dev, "registered"); | ||
1015 | return 0; | ||
1016 | |||
1017 | out: | ||
1018 | for (j = 0; j < i; j++) | ||
1019 | crypto_unregister_alg(&algs[j]); | ||
1020 | if (dd->ivkey_base) | ||
1021 | dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES, | ||
1022 | dd->ivkey_base, dd->ivkey_phys_base); | ||
1023 | if (dd->buf_in) | ||
1024 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1025 | dd->buf_in, dd->dma_buf_in); | ||
1026 | if (dd->buf_out) | ||
1027 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1028 | dd->buf_out, dd->dma_buf_out); | ||
1029 | if (aes_wq) | ||
1030 | destroy_workqueue(aes_wq); | ||
1031 | spin_lock(&list_lock); | ||
1032 | list_del(&dev_list); | ||
1033 | spin_unlock(&list_lock); | ||
1034 | |||
1035 | aes_dev = NULL; | ||
1036 | |||
1037 | dev_err(dev, "%s: initialization failed.\n", __func__); | ||
1038 | return err; | ||
1039 | } | ||
1040 | |||
1041 | static int tegra_aes_remove(struct platform_device *pdev) | ||
1042 | { | ||
1043 | struct device *dev = &pdev->dev; | ||
1044 | struct tegra_aes_dev *dd = platform_get_drvdata(pdev); | ||
1045 | int i; | ||
1046 | |||
1047 | for (i = 0; i < ARRAY_SIZE(algs); i++) | ||
1048 | crypto_unregister_alg(&algs[i]); | ||
1049 | |||
1050 | cancel_work_sync(&aes_work); | ||
1051 | destroy_workqueue(aes_wq); | ||
1052 | spin_lock(&list_lock); | ||
1053 | list_del(&dev_list); | ||
1054 | spin_unlock(&list_lock); | ||
1055 | |||
1056 | dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES, | ||
1057 | dd->ivkey_base, dd->ivkey_phys_base); | ||
1058 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1059 | dd->buf_in, dd->dma_buf_in); | ||
1060 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | ||
1061 | dd->buf_out, dd->dma_buf_out); | ||
1062 | aes_dev = NULL; | ||
1063 | |||
1064 | return 0; | ||
1065 | } | ||
1066 | |||
1067 | static struct of_device_id tegra_aes_of_match[] = { | ||
1068 | { .compatible = "nvidia,tegra20-aes", }, | ||
1069 | { .compatible = "nvidia,tegra30-aes", }, | ||
1070 | { }, | ||
1071 | }; | ||
1072 | |||
1073 | static struct platform_driver tegra_aes_driver = { | ||
1074 | .probe = tegra_aes_probe, | ||
1075 | .remove = tegra_aes_remove, | ||
1076 | .driver = { | ||
1077 | .name = "tegra-aes", | ||
1078 | .owner = THIS_MODULE, | ||
1079 | .of_match_table = tegra_aes_of_match, | ||
1080 | }, | ||
1081 | }; | ||
1082 | |||
1083 | module_platform_driver(tegra_aes_driver); | ||
1084 | |||
1085 | MODULE_DESCRIPTION("Tegra AES/OFB/CPRNG hw acceleration support."); | ||
1086 | MODULE_AUTHOR("NVIDIA Corporation"); | ||
1087 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/crypto/tegra-aes.h b/drivers/crypto/tegra-aes.h deleted file mode 100644 index 6006333a8934..000000000000 --- a/drivers/crypto/tegra-aes.h +++ /dev/null | |||
@@ -1,103 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010, NVIDIA Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __CRYPTODEV_TEGRA_AES_H | ||
20 | #define __CRYPTODEV_TEGRA_AES_H | ||
21 | |||
22 | #define TEGRA_AES_ICMDQUE_WR 0x1000 | ||
23 | #define TEGRA_AES_CMDQUE_CONTROL 0x1008 | ||
24 | #define TEGRA_AES_INTR_STATUS 0x1018 | ||
25 | #define TEGRA_AES_INT_ENB 0x1040 | ||
26 | #define TEGRA_AES_CONFIG 0x1044 | ||
27 | #define TEGRA_AES_IRAM_ACCESS_CFG 0x10A0 | ||
28 | #define TEGRA_AES_SECURE_DEST_ADDR 0x1100 | ||
29 | #define TEGRA_AES_SECURE_INPUT_SELECT 0x1104 | ||
30 | #define TEGRA_AES_SECURE_CONFIG 0x1108 | ||
31 | #define TEGRA_AES_SECURE_CONFIG_EXT 0x110C | ||
32 | #define TEGRA_AES_SECURE_SECURITY 0x1110 | ||
33 | #define TEGRA_AES_SECURE_HASH_RESULT0 0x1120 | ||
34 | #define TEGRA_AES_SECURE_HASH_RESULT1 0x1124 | ||
35 | #define TEGRA_AES_SECURE_HASH_RESULT2 0x1128 | ||
36 | #define TEGRA_AES_SECURE_HASH_RESULT3 0x112C | ||
37 | #define TEGRA_AES_SECURE_SEC_SEL0 0x1140 | ||
38 | #define TEGRA_AES_SECURE_SEC_SEL1 0x1144 | ||
39 | #define TEGRA_AES_SECURE_SEC_SEL2 0x1148 | ||
40 | #define TEGRA_AES_SECURE_SEC_SEL3 0x114C | ||
41 | #define TEGRA_AES_SECURE_SEC_SEL4 0x1150 | ||
42 | #define TEGRA_AES_SECURE_SEC_SEL5 0x1154 | ||
43 | #define TEGRA_AES_SECURE_SEC_SEL6 0x1158 | ||
44 | #define TEGRA_AES_SECURE_SEC_SEL7 0x115C | ||
45 | |||
46 | /* interrupt status reg masks and shifts */ | ||
47 | #define TEGRA_AES_ENGINE_BUSY_FIELD BIT(0) | ||
48 | #define TEGRA_AES_ICQ_EMPTY_FIELD BIT(3) | ||
49 | #define TEGRA_AES_DMA_BUSY_FIELD BIT(23) | ||
50 | |||
51 | /* secure select reg masks and shifts */ | ||
52 | #define TEGRA_AES_SECURE_SEL0_KEYREAD_ENB0_FIELD BIT(0) | ||
53 | |||
54 | /* secure config ext masks and shifts */ | ||
55 | #define TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD BIT(15) | ||
56 | |||
57 | /* secure config masks and shifts */ | ||
58 | #define TEGRA_AES_SECURE_KEY_INDEX_SHIFT 20 | ||
59 | #define TEGRA_AES_SECURE_KEY_INDEX_FIELD (0x1F << TEGRA_AES_SECURE_KEY_INDEX_SHIFT) | ||
60 | #define TEGRA_AES_SECURE_BLOCK_CNT_SHIFT 0 | ||
61 | #define TEGRA_AES_SECURE_BLOCK_CNT_FIELD (0xFFFFF << TEGRA_AES_SECURE_BLOCK_CNT_SHIFT) | ||
62 | |||
63 | /* stream interface select masks and shifts */ | ||
64 | #define TEGRA_AES_CMDQ_CTRL_UCMDQEN_FIELD BIT(0) | ||
65 | #define TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD BIT(1) | ||
66 | #define TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD BIT(4) | ||
67 | #define TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD BIT(5) | ||
68 | |||
69 | /* config register masks and shifts */ | ||
70 | #define TEGRA_AES_CONFIG_ENDIAN_ENB_FIELD BIT(10) | ||
71 | #define TEGRA_AES_CONFIG_MODE_SEL_SHIFT 0 | ||
72 | #define TEGRA_AES_CONFIG_MODE_SEL_FIELD (0x1F << TEGRA_AES_CONFIG_MODE_SEL_SHIFT) | ||
73 | |||
74 | /* extended config */ | ||
75 | #define TEGRA_AES_SECURE_OFFSET_CNT_SHIFT 24 | ||
76 | #define TEGRA_AES_SECURE_OFFSET_CNT_FIELD (0xFF << TEGRA_AES_SECURE_OFFSET_CNT_SHIFT) | ||
77 | #define TEGRA_AES_SECURE_KEYSCHED_GEN_FIELD BIT(15) | ||
78 | |||
79 | /* init vector select */ | ||
80 | #define TEGRA_AES_SECURE_IV_SELECT_SHIFT 10 | ||
81 | #define TEGRA_AES_SECURE_IV_SELECT_FIELD BIT(10) | ||
82 | |||
83 | /* secure engine input */ | ||
84 | #define TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT 28 | ||
85 | #define TEGRA_AES_SECURE_INPUT_ALG_SEL_FIELD (0xF << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT) | ||
86 | #define TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT 16 | ||
87 | #define TEGRA_AES_SECURE_INPUT_KEY_LEN_FIELD (0xFFF << TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT) | ||
88 | #define TEGRA_AES_SECURE_RNG_ENB_FIELD BIT(11) | ||
89 | #define TEGRA_AES_SECURE_CORE_SEL_SHIFT 9 | ||
90 | #define TEGRA_AES_SECURE_CORE_SEL_FIELD BIT(9) | ||
91 | #define TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT 7 | ||
92 | #define TEGRA_AES_SECURE_VCTRAM_SEL_FIELD (0x3 << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT) | ||
93 | #define TEGRA_AES_SECURE_INPUT_SEL_SHIFT 5 | ||
94 | #define TEGRA_AES_SECURE_INPUT_SEL_FIELD (0x3 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT) | ||
95 | #define TEGRA_AES_SECURE_XOR_POS_SHIFT 3 | ||
96 | #define TEGRA_AES_SECURE_XOR_POS_FIELD (0x3 << TEGRA_AES_SECURE_XOR_POS_SHIFT) | ||
97 | #define TEGRA_AES_SECURE_HASH_ENB_FIELD BIT(2) | ||
98 | #define TEGRA_AES_SECURE_ON_THE_FLY_FIELD BIT(0) | ||
99 | |||
100 | /* interrupt error mask */ | ||
101 | #define TEGRA_AES_INT_ERROR_MASK 0xFFF000 | ||
102 | |||
103 | #endif | ||