diff options
Diffstat (limited to 'drivers/crypto/caam')
| -rw-r--r-- | drivers/crypto/caam/Kconfig | 46 | ||||
| -rw-r--r-- | drivers/crypto/caam/Makefile | 18 | ||||
| -rw-r--r-- | drivers/crypto/caam/caamalg.c | 338 | ||||
| -rw-r--r-- | drivers/crypto/caam/caamalg_desc.c | 147 | ||||
| -rw-r--r-- | drivers/crypto/caam/caamalg_desc.h | 4 | ||||
| -rw-r--r-- | drivers/crypto/caam/caamalg_qi.c | 267 | ||||
| -rw-r--r-- | drivers/crypto/caam/caamalg_qi2.c | 202 | ||||
| -rw-r--r-- | drivers/crypto/caam/caamhash.c | 329 | ||||
| -rw-r--r-- | drivers/crypto/caam/caampkc.c | 177 | ||||
| -rw-r--r-- | drivers/crypto/caam/caampkc.h | 9 | ||||
| -rw-r--r-- | drivers/crypto/caam/caamrng.c | 76 | ||||
| -rw-r--r-- | drivers/crypto/caam/ctrl.c | 56 | ||||
| -rw-r--r-- | drivers/crypto/caam/desc_constr.h | 11 | ||||
| -rw-r--r-- | drivers/crypto/caam/error.c | 8 | ||||
| -rw-r--r-- | drivers/crypto/caam/error.h | 2 | ||||
| -rw-r--r-- | drivers/crypto/caam/intern.h | 102 | ||||
| -rw-r--r-- | drivers/crypto/caam/jr.c | 43 | ||||
| -rw-r--r-- | drivers/crypto/caam/key_gen.c | 28 | ||||
| -rw-r--r-- | drivers/crypto/caam/qi.c | 52 | ||||
| -rw-r--r-- | drivers/crypto/caam/sg_sw_qm.h | 18 | ||||
| -rw-r--r-- | drivers/crypto/caam/sg_sw_qm2.h | 18 | ||||
| -rw-r--r-- | drivers/crypto/caam/sg_sw_sec4.h | 26 |
22 files changed, 999 insertions, 978 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index 577c9844b322..3720ddabb507 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
| @@ -2,6 +2,12 @@ | |||
| 2 | config CRYPTO_DEV_FSL_CAAM_COMMON | 2 | config CRYPTO_DEV_FSL_CAAM_COMMON |
| 3 | tristate | 3 | tristate |
| 4 | 4 | ||
| 5 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
| 6 | tristate | ||
| 7 | |||
| 8 | config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC | ||
| 9 | tristate | ||
| 10 | |||
| 5 | config CRYPTO_DEV_FSL_CAAM | 11 | config CRYPTO_DEV_FSL_CAAM |
| 6 | tristate "Freescale CAAM-Multicore platform driver backend" | 12 | tristate "Freescale CAAM-Multicore platform driver backend" |
| 7 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE | 13 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE |
| @@ -25,7 +31,7 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG | |||
| 25 | Selecting this will enable printing of various debug | 31 | Selecting this will enable printing of various debug |
| 26 | information in the CAAM driver. | 32 | information in the CAAM driver. |
| 27 | 33 | ||
| 28 | config CRYPTO_DEV_FSL_CAAM_JR | 34 | menuconfig CRYPTO_DEV_FSL_CAAM_JR |
| 29 | tristate "Freescale CAAM Job Ring driver backend" | 35 | tristate "Freescale CAAM Job Ring driver backend" |
| 30 | default y | 36 | default y |
| 31 | help | 37 | help |
| @@ -86,8 +92,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD | |||
| 86 | threshold. Range is 1-65535. | 92 | threshold. Range is 1-65535. |
| 87 | 93 | ||
| 88 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | 94 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API |
| 89 | tristate "Register algorithm implementations with the Crypto API" | 95 | bool "Register algorithm implementations with the Crypto API" |
| 90 | default y | 96 | default y |
| 97 | select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
| 91 | select CRYPTO_AEAD | 98 | select CRYPTO_AEAD |
| 92 | select CRYPTO_AUTHENC | 99 | select CRYPTO_AUTHENC |
| 93 | select CRYPTO_BLKCIPHER | 100 | select CRYPTO_BLKCIPHER |
| @@ -97,13 +104,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | |||
| 97 | scatterlist crypto API (such as the linux native IPSec | 104 | scatterlist crypto API (such as the linux native IPSec |
| 98 | stack) to the SEC4 via job ring. | 105 | stack) to the SEC4 via job ring. |
| 99 | 106 | ||
| 100 | To compile this as a module, choose M here: the module | ||
| 101 | will be called caamalg. | ||
| 102 | |||
| 103 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI | 107 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI |
| 104 | tristate "Queue Interface as Crypto API backend" | 108 | bool "Queue Interface as Crypto API backend" |
| 105 | depends on FSL_DPAA && NET | 109 | depends on FSL_DPAA && NET |
| 106 | default y | 110 | default y |
| 111 | select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
| 107 | select CRYPTO_AUTHENC | 112 | select CRYPTO_AUTHENC |
| 108 | select CRYPTO_BLKCIPHER | 113 | select CRYPTO_BLKCIPHER |
| 109 | help | 114 | help |
| @@ -114,33 +119,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI | |||
| 114 | assigned to the kernel should also be more than the number of | 119 | assigned to the kernel should also be more than the number of |
| 115 | job rings. | 120 | job rings. |
| 116 | 121 | ||
| 117 | To compile this as a module, choose M here: the module | ||
| 118 | will be called caamalg_qi. | ||
| 119 | |||
| 120 | config CRYPTO_DEV_FSL_CAAM_AHASH_API | 122 | config CRYPTO_DEV_FSL_CAAM_AHASH_API |
| 121 | tristate "Register hash algorithm implementations with Crypto API" | 123 | bool "Register hash algorithm implementations with Crypto API" |
| 122 | default y | 124 | default y |
| 125 | select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC | ||
| 123 | select CRYPTO_HASH | 126 | select CRYPTO_HASH |
| 124 | help | 127 | help |
| 125 | Selecting this will offload ahash for users of the | 128 | Selecting this will offload ahash for users of the |
| 126 | scatterlist crypto API to the SEC4 via job ring. | 129 | scatterlist crypto API to the SEC4 via job ring. |
| 127 | 130 | ||
| 128 | To compile this as a module, choose M here: the module | ||
| 129 | will be called caamhash. | ||
| 130 | |||
| 131 | config CRYPTO_DEV_FSL_CAAM_PKC_API | 131 | config CRYPTO_DEV_FSL_CAAM_PKC_API |
| 132 | tristate "Register public key cryptography implementations with Crypto API" | 132 | bool "Register public key cryptography implementations with Crypto API" |
| 133 | default y | 133 | default y |
| 134 | select CRYPTO_RSA | 134 | select CRYPTO_RSA |
| 135 | help | 135 | help |
| 136 | Selecting this will allow SEC Public key support for RSA. | 136 | Selecting this will allow SEC Public key support for RSA. |
| 137 | Supported cryptographic primitives: encryption, decryption, | 137 | Supported cryptographic primitives: encryption, decryption, |
| 138 | signature and verification. | 138 | signature and verification. |
| 139 | To compile this as a module, choose M here: the module | ||
| 140 | will be called caam_pkc. | ||
| 141 | 139 | ||
| 142 | config CRYPTO_DEV_FSL_CAAM_RNG_API | 140 | config CRYPTO_DEV_FSL_CAAM_RNG_API |
| 143 | tristate "Register caam device for hwrng API" | 141 | bool "Register caam device for hwrng API" |
| 144 | default y | 142 | default y |
| 145 | select CRYPTO_RNG | 143 | select CRYPTO_RNG |
| 146 | select HW_RANDOM | 144 | select HW_RANDOM |
| @@ -148,9 +146,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API | |||
| 148 | Selecting this will register the SEC4 hardware rng to | 146 | Selecting this will register the SEC4 hardware rng to |
| 149 | the hw_random API for suppying the kernel entropy pool. | 147 | the hw_random API for suppying the kernel entropy pool. |
| 150 | 148 | ||
| 151 | To compile this as a module, choose M here: the module | ||
| 152 | will be called caamrng. | ||
| 153 | |||
| 154 | endif # CRYPTO_DEV_FSL_CAAM_JR | 149 | endif # CRYPTO_DEV_FSL_CAAM_JR |
| 155 | 150 | ||
| 156 | endif # CRYPTO_DEV_FSL_CAAM | 151 | endif # CRYPTO_DEV_FSL_CAAM |
| @@ -160,6 +155,8 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM | |||
| 160 | depends on FSL_MC_DPIO | 155 | depends on FSL_MC_DPIO |
| 161 | depends on NETDEVICES | 156 | depends on NETDEVICES |
| 162 | select CRYPTO_DEV_FSL_CAAM_COMMON | 157 | select CRYPTO_DEV_FSL_CAAM_COMMON |
| 158 | select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
| 159 | select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC | ||
| 163 | select CRYPTO_BLKCIPHER | 160 | select CRYPTO_BLKCIPHER |
| 164 | select CRYPTO_AUTHENC | 161 | select CRYPTO_AUTHENC |
| 165 | select CRYPTO_AEAD | 162 | select CRYPTO_AEAD |
| @@ -171,12 +168,3 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM | |||
| 171 | 168 | ||
| 172 | To compile this as a module, choose M here: the module | 169 | To compile this as a module, choose M here: the module |
| 173 | will be called dpaa2_caam. | 170 | will be called dpaa2_caam. |
| 174 | |||
| 175 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
| 176 | def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \ | ||
| 177 | CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \ | ||
| 178 | CRYPTO_DEV_FSL_DPAA2_CAAM) | ||
| 179 | |||
| 180 | config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC | ||
| 181 | def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \ | ||
| 182 | CRYPTO_DEV_FSL_DPAA2_CAAM) | ||
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index 7bbfd06a11ff..9ab4e81ea21e 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile | |||
| @@ -11,20 +11,20 @@ ccflags-y += -DVERSION=\"\" | |||
| 11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o | 11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o |
| 12 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o | 12 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o |
| 13 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o | 13 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o |
| 14 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o | ||
| 15 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o | ||
| 16 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o | 14 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o |
| 17 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o | ||
| 18 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o | 15 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o |
| 19 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o | ||
| 20 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o | ||
| 21 | 16 | ||
| 22 | caam-objs := ctrl.o | 17 | caam-y := ctrl.o |
| 23 | caam_jr-objs := jr.o key_gen.o | 18 | caam_jr-y := jr.o key_gen.o |
| 24 | caam_pkc-y := caampkc.o pkc_desc.o | 19 | caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o |
| 20 | caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o | ||
| 21 | caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o | ||
| 22 | caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o | ||
| 23 | caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o | ||
| 24 | |||
| 25 | caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o | ||
| 25 | ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),) | 26 | ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),) |
| 26 | ccflags-y += -DCONFIG_CAAM_QI | 27 | ccflags-y += -DCONFIG_CAAM_QI |
| 27 | caam-objs += qi.o | ||
| 28 | endif | 28 | endif |
| 29 | 29 | ||
| 30 | obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o | 30 | obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index c0ece44f303b..43f18253e5b6 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
| @@ -77,13 +77,6 @@ | |||
| 77 | #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) | 77 | #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) |
| 78 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) | 78 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) |
| 79 | 79 | ||
| 80 | #ifdef DEBUG | ||
| 81 | /* for print_hex_dumps with line references */ | ||
| 82 | #define debug(format, arg...) printk(format, arg) | ||
| 83 | #else | ||
| 84 | #define debug(format, arg...) | ||
| 85 | #endif | ||
| 86 | |||
| 87 | struct caam_alg_entry { | 80 | struct caam_alg_entry { |
| 88 | int class1_alg_type; | 81 | int class1_alg_type; |
| 89 | int class2_alg_type; | 82 | int class2_alg_type; |
| @@ -583,13 +576,11 @@ static int aead_setkey(struct crypto_aead *aead, | |||
| 583 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) | 576 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
| 584 | goto badkey; | 577 | goto badkey; |
| 585 | 578 | ||
| 586 | #ifdef DEBUG | 579 | dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", |
| 587 | printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", | ||
| 588 | keys.authkeylen + keys.enckeylen, keys.enckeylen, | 580 | keys.authkeylen + keys.enckeylen, keys.enckeylen, |
| 589 | keys.authkeylen); | 581 | keys.authkeylen); |
| 590 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 582 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
| 591 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 583 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 592 | #endif | ||
| 593 | 584 | ||
| 594 | /* | 585 | /* |
| 595 | * If DKP is supported, use it in the shared descriptor to generate | 586 | * If DKP is supported, use it in the shared descriptor to generate |
| @@ -623,11 +614,10 @@ static int aead_setkey(struct crypto_aead *aead, | |||
| 623 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); | 614 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
| 624 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + | 615 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + |
| 625 | keys.enckeylen, ctx->dir); | 616 | keys.enckeylen, ctx->dir); |
| 626 | #ifdef DEBUG | 617 | |
| 627 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", | 618 | print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", |
| 628 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 619 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
| 629 | ctx->adata.keylen_pad + keys.enckeylen, 1); | 620 | ctx->adata.keylen_pad + keys.enckeylen, 1); |
| 630 | #endif | ||
| 631 | 621 | ||
| 632 | skip_split_key: | 622 | skip_split_key: |
| 633 | ctx->cdata.keylen = keys.enckeylen; | 623 | ctx->cdata.keylen = keys.enckeylen; |
| @@ -678,10 +668,8 @@ static int gcm_setkey(struct crypto_aead *aead, | |||
| 678 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 668 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 679 | struct device *jrdev = ctx->jrdev; | 669 | struct device *jrdev = ctx->jrdev; |
| 680 | 670 | ||
| 681 | #ifdef DEBUG | 671 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
| 682 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 672 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 683 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
| 684 | #endif | ||
| 685 | 673 | ||
| 686 | memcpy(ctx->key, key, keylen); | 674 | memcpy(ctx->key, key, keylen); |
| 687 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); | 675 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); |
| @@ -699,10 +687,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
| 699 | if (keylen < 4) | 687 | if (keylen < 4) |
| 700 | return -EINVAL; | 688 | return -EINVAL; |
| 701 | 689 | ||
| 702 | #ifdef DEBUG | 690 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
| 703 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 691 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 704 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
| 705 | #endif | ||
| 706 | 692 | ||
| 707 | memcpy(ctx->key, key, keylen); | 693 | memcpy(ctx->key, key, keylen); |
| 708 | 694 | ||
| @@ -725,10 +711,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
| 725 | if (keylen < 4) | 711 | if (keylen < 4) |
| 726 | return -EINVAL; | 712 | return -EINVAL; |
| 727 | 713 | ||
| 728 | #ifdef DEBUG | 714 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
| 729 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 715 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 730 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
| 731 | #endif | ||
| 732 | 716 | ||
| 733 | memcpy(ctx->key, key, keylen); | 717 | memcpy(ctx->key, key, keylen); |
| 734 | 718 | ||
| @@ -757,10 +741,8 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | |||
| 757 | OP_ALG_AAI_CTR_MOD128); | 741 | OP_ALG_AAI_CTR_MOD128); |
| 758 | const bool is_rfc3686 = alg->caam.rfc3686; | 742 | const bool is_rfc3686 = alg->caam.rfc3686; |
| 759 | 743 | ||
| 760 | #ifdef DEBUG | 744 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
| 761 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 745 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 762 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
| 763 | #endif | ||
| 764 | /* | 746 | /* |
| 765 | * AES-CTR needs to load IV in CONTEXT1 reg | 747 | * AES-CTR needs to load IV in CONTEXT1 reg |
| 766 | * at an offset of 128bits (16bytes) | 748 | * at an offset of 128bits (16bytes) |
| @@ -916,7 +898,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, | |||
| 916 | } | 898 | } |
| 917 | 899 | ||
| 918 | if (iv_dma) | 900 | if (iv_dma) |
| 919 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 901 | dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL); |
| 920 | if (sec4_sg_bytes) | 902 | if (sec4_sg_bytes) |
| 921 | dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, | 903 | dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, |
| 922 | DMA_TO_DEVICE); | 904 | DMA_TO_DEVICE); |
| @@ -949,9 +931,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 949 | struct aead_request *req = context; | 931 | struct aead_request *req = context; |
| 950 | struct aead_edesc *edesc; | 932 | struct aead_edesc *edesc; |
| 951 | 933 | ||
| 952 | #ifdef DEBUG | 934 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 953 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 954 | #endif | ||
| 955 | 935 | ||
| 956 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); | 936 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); |
| 957 | 937 | ||
| @@ -971,9 +951,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 971 | struct aead_request *req = context; | 951 | struct aead_request *req = context; |
| 972 | struct aead_edesc *edesc; | 952 | struct aead_edesc *edesc; |
| 973 | 953 | ||
| 974 | #ifdef DEBUG | 954 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 975 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 976 | #endif | ||
| 977 | 955 | ||
| 978 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); | 956 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); |
| 979 | 957 | ||
| @@ -1001,33 +979,32 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 1001 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 979 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 1002 | int ivsize = crypto_skcipher_ivsize(skcipher); | 980 | int ivsize = crypto_skcipher_ivsize(skcipher); |
| 1003 | 981 | ||
| 1004 | #ifdef DEBUG | 982 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 1005 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 1006 | #endif | ||
| 1007 | 983 | ||
| 1008 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); | 984 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); |
| 1009 | 985 | ||
| 1010 | if (err) | 986 | if (err) |
| 1011 | caam_jr_strstatus(jrdev, err); | 987 | caam_jr_strstatus(jrdev, err); |
| 1012 | 988 | ||
| 1013 | #ifdef DEBUG | ||
| 1014 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", | ||
| 1015 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
| 1016 | edesc->src_nents > 1 ? 100 : ivsize, 1); | ||
| 1017 | #endif | ||
| 1018 | caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", | ||
| 1019 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | ||
| 1020 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | ||
| 1021 | |||
| 1022 | skcipher_unmap(jrdev, edesc, req); | 989 | skcipher_unmap(jrdev, edesc, req); |
| 1023 | 990 | ||
| 1024 | /* | 991 | /* |
| 1025 | * The crypto API expects us to set the IV (req->iv) to the last | 992 | * The crypto API expects us to set the IV (req->iv) to the last |
| 1026 | * ciphertext block. This is used e.g. by the CTS mode. | 993 | * ciphertext block (CBC mode) or last counter (CTR mode). |
| 994 | * This is used e.g. by the CTS mode. | ||
| 1027 | */ | 995 | */ |
| 1028 | if (ivsize) | 996 | if (ivsize) { |
| 1029 | scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - | 997 | memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, |
| 1030 | ivsize, ivsize, 0); | 998 | ivsize); |
| 999 | |||
| 1000 | print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ", | ||
| 1001 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
| 1002 | edesc->src_nents > 1 ? 100 : ivsize, 1); | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | caam_dump_sg("dst @" __stringify(__LINE__)": ", | ||
| 1006 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | ||
| 1007 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | ||
| 1031 | 1008 | ||
| 1032 | kfree(edesc); | 1009 | kfree(edesc); |
| 1033 | 1010 | ||
| @@ -1039,26 +1016,35 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 1039 | { | 1016 | { |
| 1040 | struct skcipher_request *req = context; | 1017 | struct skcipher_request *req = context; |
| 1041 | struct skcipher_edesc *edesc; | 1018 | struct skcipher_edesc *edesc; |
| 1042 | #ifdef DEBUG | ||
| 1043 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1019 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 1044 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1020 | int ivsize = crypto_skcipher_ivsize(skcipher); |
| 1045 | 1021 | ||
| 1046 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 1022 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 1047 | #endif | ||
| 1048 | 1023 | ||
| 1049 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); | 1024 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); |
| 1050 | if (err) | 1025 | if (err) |
| 1051 | caam_jr_strstatus(jrdev, err); | 1026 | caam_jr_strstatus(jrdev, err); |
| 1052 | 1027 | ||
| 1053 | #ifdef DEBUG | 1028 | skcipher_unmap(jrdev, edesc, req); |
| 1054 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", | 1029 | |
| 1055 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); | 1030 | /* |
| 1056 | #endif | 1031 | * The crypto API expects us to set the IV (req->iv) to the last |
| 1057 | caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", | 1032 | * ciphertext block (CBC mode) or last counter (CTR mode). |
| 1033 | * This is used e.g. by the CTS mode. | ||
| 1034 | */ | ||
| 1035 | if (ivsize) { | ||
| 1036 | memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, | ||
| 1037 | ivsize); | ||
| 1038 | |||
| 1039 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", | ||
| 1040 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
| 1041 | ivsize, 1); | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | caam_dump_sg("dst @" __stringify(__LINE__)": ", | ||
| 1058 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 1045 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
| 1059 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | 1046 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
| 1060 | 1047 | ||
| 1061 | skcipher_unmap(jrdev, edesc, req); | ||
| 1062 | kfree(edesc); | 1048 | kfree(edesc); |
| 1063 | 1049 | ||
| 1064 | skcipher_request_complete(req, err); | 1050 | skcipher_request_complete(req, err); |
| @@ -1106,6 +1092,7 @@ static void init_aead_job(struct aead_request *req, | |||
| 1106 | if (unlikely(req->src != req->dst)) { | 1092 | if (unlikely(req->src != req->dst)) { |
| 1107 | if (!edesc->mapped_dst_nents) { | 1093 | if (!edesc->mapped_dst_nents) { |
| 1108 | dst_dma = 0; | 1094 | dst_dma = 0; |
| 1095 | out_options = 0; | ||
| 1109 | } else if (edesc->mapped_dst_nents == 1) { | 1096 | } else if (edesc->mapped_dst_nents == 1) { |
| 1110 | dst_dma = sg_dma_address(req->dst); | 1097 | dst_dma = sg_dma_address(req->dst); |
| 1111 | out_options = 0; | 1098 | out_options = 0; |
| @@ -1249,6 +1236,7 @@ static void init_skcipher_job(struct skcipher_request *req, | |||
| 1249 | { | 1236 | { |
| 1250 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1237 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 1251 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 1238 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
| 1239 | struct device *jrdev = ctx->jrdev; | ||
| 1252 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1240 | int ivsize = crypto_skcipher_ivsize(skcipher); |
| 1253 | u32 *desc = edesc->hw_desc; | 1241 | u32 *desc = edesc->hw_desc; |
| 1254 | u32 *sh_desc; | 1242 | u32 *sh_desc; |
| @@ -1256,13 +1244,12 @@ static void init_skcipher_job(struct skcipher_request *req, | |||
| 1256 | dma_addr_t src_dma, dst_dma, ptr; | 1244 | dma_addr_t src_dma, dst_dma, ptr; |
| 1257 | int len, sec4_sg_index = 0; | 1245 | int len, sec4_sg_index = 0; |
| 1258 | 1246 | ||
| 1259 | #ifdef DEBUG | 1247 | print_hex_dump_debug("presciv@"__stringify(__LINE__)": ", |
| 1260 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", | 1248 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); |
| 1261 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); | 1249 | dev_dbg(jrdev, "asked=%d, cryptlen%d\n", |
| 1262 | pr_err("asked=%d, cryptlen%d\n", | ||
| 1263 | (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); | 1250 | (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); |
| 1264 | #endif | 1251 | |
| 1265 | caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", | 1252 | caam_dump_sg("src @" __stringify(__LINE__)": ", |
| 1266 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, | 1253 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, |
| 1267 | edesc->src_nents > 1 ? 100 : req->cryptlen, 1); | 1254 | edesc->src_nents > 1 ? 100 : req->cryptlen, 1); |
| 1268 | 1255 | ||
| @@ -1285,7 +1272,7 @@ static void init_skcipher_job(struct skcipher_request *req, | |||
| 1285 | if (likely(req->src == req->dst)) { | 1272 | if (likely(req->src == req->dst)) { |
| 1286 | dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); | 1273 | dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); |
| 1287 | out_options = in_options; | 1274 | out_options = in_options; |
| 1288 | } else if (edesc->mapped_dst_nents == 1) { | 1275 | } else if (!ivsize && edesc->mapped_dst_nents == 1) { |
| 1289 | dst_dma = sg_dma_address(req->dst); | 1276 | dst_dma = sg_dma_address(req->dst); |
| 1290 | } else { | 1277 | } else { |
| 1291 | dst_dma = edesc->sec4_sg_dma + sec4_sg_index * | 1278 | dst_dma = edesc->sec4_sg_dma + sec4_sg_index * |
| @@ -1293,7 +1280,7 @@ static void init_skcipher_job(struct skcipher_request *req, | |||
| 1293 | out_options = LDST_SGF; | 1280 | out_options = LDST_SGF; |
| 1294 | } | 1281 | } |
| 1295 | 1282 | ||
| 1296 | append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); | 1283 | append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options); |
| 1297 | } | 1284 | } |
| 1298 | 1285 | ||
| 1299 | /* | 1286 | /* |
| @@ -1309,37 +1296,36 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1309 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 1296 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
| 1310 | GFP_KERNEL : GFP_ATOMIC; | 1297 | GFP_KERNEL : GFP_ATOMIC; |
| 1311 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 1298 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
| 1299 | int src_len, dst_len = 0; | ||
| 1312 | struct aead_edesc *edesc; | 1300 | struct aead_edesc *edesc; |
| 1313 | int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; | 1301 | int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; |
| 1314 | unsigned int authsize = ctx->authsize; | 1302 | unsigned int authsize = ctx->authsize; |
| 1315 | 1303 | ||
| 1316 | if (unlikely(req->dst != req->src)) { | 1304 | if (unlikely(req->dst != req->src)) { |
| 1317 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 1305 | src_len = req->assoclen + req->cryptlen; |
| 1318 | req->cryptlen); | 1306 | dst_len = src_len + (encrypt ? authsize : (-authsize)); |
| 1307 | |||
| 1308 | src_nents = sg_nents_for_len(req->src, src_len); | ||
| 1319 | if (unlikely(src_nents < 0)) { | 1309 | if (unlikely(src_nents < 0)) { |
| 1320 | dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", | 1310 | dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", |
| 1321 | req->assoclen + req->cryptlen); | 1311 | src_len); |
| 1322 | return ERR_PTR(src_nents); | 1312 | return ERR_PTR(src_nents); |
| 1323 | } | 1313 | } |
| 1324 | 1314 | ||
| 1325 | dst_nents = sg_nents_for_len(req->dst, req->assoclen + | 1315 | dst_nents = sg_nents_for_len(req->dst, dst_len); |
| 1326 | req->cryptlen + | ||
| 1327 | (encrypt ? authsize : | ||
| 1328 | (-authsize))); | ||
| 1329 | if (unlikely(dst_nents < 0)) { | 1316 | if (unlikely(dst_nents < 0)) { |
| 1330 | dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", | 1317 | dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", |
| 1331 | req->assoclen + req->cryptlen + | 1318 | dst_len); |
| 1332 | (encrypt ? authsize : (-authsize))); | ||
| 1333 | return ERR_PTR(dst_nents); | 1319 | return ERR_PTR(dst_nents); |
| 1334 | } | 1320 | } |
| 1335 | } else { | 1321 | } else { |
| 1336 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 1322 | src_len = req->assoclen + req->cryptlen + |
| 1337 | req->cryptlen + | 1323 | (encrypt ? authsize : 0); |
| 1338 | (encrypt ? authsize : 0)); | 1324 | |
| 1325 | src_nents = sg_nents_for_len(req->src, src_len); | ||
| 1339 | if (unlikely(src_nents < 0)) { | 1326 | if (unlikely(src_nents < 0)) { |
| 1340 | dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", | 1327 | dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", |
| 1341 | req->assoclen + req->cryptlen + | 1328 | src_len); |
| 1342 | (encrypt ? authsize : 0)); | ||
| 1343 | return ERR_PTR(src_nents); | 1329 | return ERR_PTR(src_nents); |
| 1344 | } | 1330 | } |
| 1345 | } | 1331 | } |
| @@ -1380,8 +1366,16 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1380 | } | 1366 | } |
| 1381 | } | 1367 | } |
| 1382 | 1368 | ||
| 1369 | /* | ||
| 1370 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
| 1371 | * the end of the table by allocating more S/G entries. | ||
| 1372 | */ | ||
| 1383 | sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; | 1373 | sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; |
| 1384 | sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0; | 1374 | if (mapped_dst_nents > 1) |
| 1375 | sec4_sg_len += pad_sg_nents(mapped_dst_nents); | ||
| 1376 | else | ||
| 1377 | sec4_sg_len = pad_sg_nents(sec4_sg_len); | ||
| 1378 | |||
| 1385 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); | 1379 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
| 1386 | 1380 | ||
| 1387 | /* allocate space for base edesc and hw desc commands, link tables */ | 1381 | /* allocate space for base edesc and hw desc commands, link tables */ |
| @@ -1403,12 +1397,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1403 | 1397 | ||
| 1404 | sec4_sg_index = 0; | 1398 | sec4_sg_index = 0; |
| 1405 | if (mapped_src_nents > 1) { | 1399 | if (mapped_src_nents > 1) { |
| 1406 | sg_to_sec4_sg_last(req->src, mapped_src_nents, | 1400 | sg_to_sec4_sg_last(req->src, src_len, |
| 1407 | edesc->sec4_sg + sec4_sg_index, 0); | 1401 | edesc->sec4_sg + sec4_sg_index, 0); |
| 1408 | sec4_sg_index += mapped_src_nents; | 1402 | sec4_sg_index += mapped_src_nents; |
| 1409 | } | 1403 | } |
| 1410 | if (mapped_dst_nents > 1) { | 1404 | if (mapped_dst_nents > 1) { |
| 1411 | sg_to_sec4_sg_last(req->dst, mapped_dst_nents, | 1405 | sg_to_sec4_sg_last(req->dst, dst_len, |
| 1412 | edesc->sec4_sg + sec4_sg_index, 0); | 1406 | edesc->sec4_sg + sec4_sg_index, 0); |
| 1413 | } | 1407 | } |
| 1414 | 1408 | ||
| @@ -1446,11 +1440,10 @@ static int gcm_encrypt(struct aead_request *req) | |||
| 1446 | 1440 | ||
| 1447 | /* Create and submit job descriptor */ | 1441 | /* Create and submit job descriptor */ |
| 1448 | init_gcm_job(req, edesc, all_contig, true); | 1442 | init_gcm_job(req, edesc, all_contig, true); |
| 1449 | #ifdef DEBUG | 1443 | |
| 1450 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 1444 | print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", |
| 1451 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1445 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
| 1452 | desc_bytes(edesc->hw_desc), 1); | 1446 | desc_bytes(edesc->hw_desc), 1); |
| 1453 | #endif | ||
| 1454 | 1447 | ||
| 1455 | desc = edesc->hw_desc; | 1448 | desc = edesc->hw_desc; |
| 1456 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); | 1449 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); |
| @@ -1556,11 +1549,10 @@ static int aead_encrypt(struct aead_request *req) | |||
| 1556 | 1549 | ||
| 1557 | /* Create and submit job descriptor */ | 1550 | /* Create and submit job descriptor */ |
| 1558 | init_authenc_job(req, edesc, all_contig, true); | 1551 | init_authenc_job(req, edesc, all_contig, true); |
| 1559 | #ifdef DEBUG | 1552 | |
| 1560 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 1553 | print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", |
| 1561 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1554 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
| 1562 | desc_bytes(edesc->hw_desc), 1); | 1555 | desc_bytes(edesc->hw_desc), 1); |
| 1563 | #endif | ||
| 1564 | 1556 | ||
| 1565 | desc = edesc->hw_desc; | 1557 | desc = edesc->hw_desc; |
| 1566 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); | 1558 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); |
| @@ -1591,11 +1583,10 @@ static int gcm_decrypt(struct aead_request *req) | |||
| 1591 | 1583 | ||
| 1592 | /* Create and submit job descriptor*/ | 1584 | /* Create and submit job descriptor*/ |
| 1593 | init_gcm_job(req, edesc, all_contig, false); | 1585 | init_gcm_job(req, edesc, all_contig, false); |
| 1594 | #ifdef DEBUG | 1586 | |
| 1595 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 1587 | print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", |
| 1596 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1588 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
| 1597 | desc_bytes(edesc->hw_desc), 1); | 1589 | desc_bytes(edesc->hw_desc), 1); |
| 1598 | #endif | ||
| 1599 | 1590 | ||
| 1600 | desc = edesc->hw_desc; | 1591 | desc = edesc->hw_desc; |
| 1601 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); | 1592 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); |
| @@ -1627,7 +1618,7 @@ static int aead_decrypt(struct aead_request *req) | |||
| 1627 | u32 *desc; | 1618 | u32 *desc; |
| 1628 | int ret = 0; | 1619 | int ret = 0; |
| 1629 | 1620 | ||
| 1630 | caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ", | 1621 | caam_dump_sg("dec src@" __stringify(__LINE__)": ", |
| 1631 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, | 1622 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, |
| 1632 | req->assoclen + req->cryptlen, 1); | 1623 | req->assoclen + req->cryptlen, 1); |
| 1633 | 1624 | ||
| @@ -1639,11 +1630,10 @@ static int aead_decrypt(struct aead_request *req) | |||
| 1639 | 1630 | ||
| 1640 | /* Create and submit job descriptor*/ | 1631 | /* Create and submit job descriptor*/ |
| 1641 | init_authenc_job(req, edesc, all_contig, false); | 1632 | init_authenc_job(req, edesc, all_contig, false); |
| 1642 | #ifdef DEBUG | 1633 | |
| 1643 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 1634 | print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", |
| 1644 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1635 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
| 1645 | desc_bytes(edesc->hw_desc), 1); | 1636 | desc_bytes(edesc->hw_desc), 1); |
| 1646 | #endif | ||
| 1647 | 1637 | ||
| 1648 | desc = edesc->hw_desc; | 1638 | desc = edesc->hw_desc; |
| 1649 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); | 1639 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); |
| @@ -1719,7 +1709,29 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
| 1719 | else | 1709 | else |
| 1720 | sec4_sg_ents = mapped_src_nents + !!ivsize; | 1710 | sec4_sg_ents = mapped_src_nents + !!ivsize; |
| 1721 | dst_sg_idx = sec4_sg_ents; | 1711 | dst_sg_idx = sec4_sg_ents; |
| 1722 | sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; | 1712 | |
| 1713 | /* | ||
| 1714 | * Input, output HW S/G tables: [IV, src][dst, IV] | ||
| 1715 | * IV entries point to the same buffer | ||
| 1716 | * If src == dst, S/G entries are reused (S/G tables overlap) | ||
| 1717 | * | ||
| 1718 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
| 1719 | * the end of the table by allocating more S/G entries. Logic: | ||
| 1720 | * if (output S/G) | ||
| 1721 | * pad output S/G, if needed | ||
| 1722 | * else if (input S/G) ... | ||
| 1723 | * pad input S/G, if needed | ||
| 1724 | */ | ||
| 1725 | if (ivsize || mapped_dst_nents > 1) { | ||
| 1726 | if (req->src == req->dst) | ||
| 1727 | sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents); | ||
| 1728 | else | ||
| 1729 | sec4_sg_ents += pad_sg_nents(mapped_dst_nents + | ||
| 1730 | !!ivsize); | ||
| 1731 | } else { | ||
| 1732 | sec4_sg_ents = pad_sg_nents(sec4_sg_ents); | ||
| 1733 | } | ||
| 1734 | |||
| 1723 | sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); | 1735 | sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); |
| 1724 | 1736 | ||
| 1725 | /* | 1737 | /* |
| @@ -1744,10 +1756,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
| 1744 | 1756 | ||
| 1745 | /* Make sure IV is located in a DMAable area */ | 1757 | /* Make sure IV is located in a DMAable area */ |
| 1746 | if (ivsize) { | 1758 | if (ivsize) { |
| 1747 | iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; | 1759 | iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes; |
| 1748 | memcpy(iv, req->iv, ivsize); | 1760 | memcpy(iv, req->iv, ivsize); |
| 1749 | 1761 | ||
| 1750 | iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); | 1762 | iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL); |
| 1751 | if (dma_mapping_error(jrdev, iv_dma)) { | 1763 | if (dma_mapping_error(jrdev, iv_dma)) { |
| 1752 | dev_err(jrdev, "unable to map IV\n"); | 1764 | dev_err(jrdev, "unable to map IV\n"); |
| 1753 | caam_unmap(jrdev, req->src, req->dst, src_nents, | 1765 | caam_unmap(jrdev, req->src, req->dst, src_nents, |
| @@ -1759,13 +1771,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
| 1759 | dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); | 1771 | dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); |
| 1760 | } | 1772 | } |
| 1761 | if (dst_sg_idx) | 1773 | if (dst_sg_idx) |
| 1762 | sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + | 1774 | sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg + |
| 1763 | !!ivsize, 0); | 1775 | !!ivsize, 0); |
| 1764 | 1776 | ||
| 1765 | if (mapped_dst_nents > 1) { | 1777 | if (req->src != req->dst && (ivsize || mapped_dst_nents > 1)) |
| 1766 | sg_to_sec4_sg_last(req->dst, mapped_dst_nents, | 1778 | sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg + |
| 1767 | edesc->sec4_sg + dst_sg_idx, 0); | 1779 | dst_sg_idx, 0); |
| 1768 | } | 1780 | |
| 1781 | if (ivsize) | ||
| 1782 | dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx + | ||
| 1783 | mapped_dst_nents, iv_dma, ivsize, 0); | ||
| 1784 | |||
| 1785 | if (ivsize || mapped_dst_nents > 1) | ||
| 1786 | sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx + | ||
| 1787 | mapped_dst_nents); | ||
| 1769 | 1788 | ||
| 1770 | if (sec4_sg_bytes) { | 1789 | if (sec4_sg_bytes) { |
| 1771 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1790 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
| @@ -1782,11 +1801,9 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
| 1782 | 1801 | ||
| 1783 | edesc->iv_dma = iv_dma; | 1802 | edesc->iv_dma = iv_dma; |
| 1784 | 1803 | ||
| 1785 | #ifdef DEBUG | 1804 | print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ", |
| 1786 | print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ", | 1805 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, |
| 1787 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, | 1806 | sec4_sg_bytes, 1); |
| 1788 | sec4_sg_bytes, 1); | ||
| 1789 | #endif | ||
| 1790 | 1807 | ||
| 1791 | return edesc; | 1808 | return edesc; |
| 1792 | } | 1809 | } |
| @@ -1807,11 +1824,11 @@ static int skcipher_encrypt(struct skcipher_request *req) | |||
| 1807 | 1824 | ||
| 1808 | /* Create and submit job descriptor*/ | 1825 | /* Create and submit job descriptor*/ |
| 1809 | init_skcipher_job(req, edesc, true); | 1826 | init_skcipher_job(req, edesc, true); |
| 1810 | #ifdef DEBUG | 1827 | |
| 1811 | print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", | 1828 | print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", |
| 1812 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1829 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
| 1813 | desc_bytes(edesc->hw_desc), 1); | 1830 | desc_bytes(edesc->hw_desc), 1); |
| 1814 | #endif | 1831 | |
| 1815 | desc = edesc->hw_desc; | 1832 | desc = edesc->hw_desc; |
| 1816 | ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req); | 1833 | ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req); |
| 1817 | 1834 | ||
| @@ -1830,7 +1847,6 @@ static int skcipher_decrypt(struct skcipher_request *req) | |||
| 1830 | struct skcipher_edesc *edesc; | 1847 | struct skcipher_edesc *edesc; |
| 1831 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1848 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 1832 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 1849 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
| 1833 | int ivsize = crypto_skcipher_ivsize(skcipher); | ||
| 1834 | struct device *jrdev = ctx->jrdev; | 1850 | struct device *jrdev = ctx->jrdev; |
| 1835 | u32 *desc; | 1851 | u32 *desc; |
| 1836 | int ret = 0; | 1852 | int ret = 0; |
| @@ -1840,22 +1856,13 @@ static int skcipher_decrypt(struct skcipher_request *req) | |||
| 1840 | if (IS_ERR(edesc)) | 1856 | if (IS_ERR(edesc)) |
| 1841 | return PTR_ERR(edesc); | 1857 | return PTR_ERR(edesc); |
| 1842 | 1858 | ||
| 1843 | /* | ||
| 1844 | * The crypto API expects us to set the IV (req->iv) to the last | ||
| 1845 | * ciphertext block. | ||
| 1846 | */ | ||
| 1847 | if (ivsize) | ||
| 1848 | scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - | ||
| 1849 | ivsize, ivsize, 0); | ||
| 1850 | |||
| 1851 | /* Create and submit job descriptor*/ | 1859 | /* Create and submit job descriptor*/ |
| 1852 | init_skcipher_job(req, edesc, false); | 1860 | init_skcipher_job(req, edesc, false); |
| 1853 | desc = edesc->hw_desc; | 1861 | desc = edesc->hw_desc; |
| 1854 | #ifdef DEBUG | 1862 | |
| 1855 | print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", | 1863 | print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", |
| 1856 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1864 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
| 1857 | desc_bytes(edesc->hw_desc), 1); | 1865 | desc_bytes(edesc->hw_desc), 1); |
| 1858 | #endif | ||
| 1859 | 1866 | ||
| 1860 | ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req); | 1867 | ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req); |
| 1861 | if (!ret) { | 1868 | if (!ret) { |
| @@ -3444,7 +3451,7 @@ static void caam_aead_exit(struct crypto_aead *tfm) | |||
| 3444 | caam_exit_common(crypto_aead_ctx(tfm)); | 3451 | caam_exit_common(crypto_aead_ctx(tfm)); |
| 3445 | } | 3452 | } |
| 3446 | 3453 | ||
| 3447 | static void __exit caam_algapi_exit(void) | 3454 | void caam_algapi_exit(void) |
| 3448 | { | 3455 | { |
| 3449 | int i; | 3456 | int i; |
| 3450 | 3457 | ||
| @@ -3489,43 +3496,15 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) | |||
| 3489 | alg->exit = caam_aead_exit; | 3496 | alg->exit = caam_aead_exit; |
| 3490 | } | 3497 | } |
| 3491 | 3498 | ||
| 3492 | static int __init caam_algapi_init(void) | 3499 | int caam_algapi_init(struct device *ctrldev) |
| 3493 | { | 3500 | { |
| 3494 | struct device_node *dev_node; | 3501 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
| 3495 | struct platform_device *pdev; | ||
| 3496 | struct caam_drv_private *priv; | ||
| 3497 | int i = 0, err = 0; | 3502 | int i = 0, err = 0; |
| 3498 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; | 3503 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; |
| 3499 | u32 arc4_inst; | 3504 | u32 arc4_inst; |
| 3500 | unsigned int md_limit = SHA512_DIGEST_SIZE; | 3505 | unsigned int md_limit = SHA512_DIGEST_SIZE; |
| 3501 | bool registered = false, gcm_support; | 3506 | bool registered = false, gcm_support; |
| 3502 | 3507 | ||
| 3503 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
| 3504 | if (!dev_node) { | ||
| 3505 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
| 3506 | if (!dev_node) | ||
| 3507 | return -ENODEV; | ||
| 3508 | } | ||
| 3509 | |||
| 3510 | pdev = of_find_device_by_node(dev_node); | ||
| 3511 | if (!pdev) { | ||
| 3512 | of_node_put(dev_node); | ||
| 3513 | return -ENODEV; | ||
| 3514 | } | ||
| 3515 | |||
| 3516 | priv = dev_get_drvdata(&pdev->dev); | ||
| 3517 | of_node_put(dev_node); | ||
| 3518 | |||
| 3519 | /* | ||
| 3520 | * If priv is NULL, it's probably because the caam driver wasn't | ||
| 3521 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
| 3522 | */ | ||
| 3523 | if (!priv) { | ||
| 3524 | err = -ENODEV; | ||
| 3525 | goto out_put_dev; | ||
| 3526 | } | ||
| 3527 | |||
| 3528 | |||
| 3529 | /* | 3508 | /* |
| 3530 | * Register crypto algorithms the device supports. | 3509 | * Register crypto algorithms the device supports. |
| 3531 | * First, detect presence and attributes of DES, AES, and MD blocks. | 3510 | * First, detect presence and attributes of DES, AES, and MD blocks. |
| @@ -3668,14 +3647,5 @@ static int __init caam_algapi_init(void) | |||
| 3668 | if (registered) | 3647 | if (registered) |
| 3669 | pr_info("caam algorithms registered in /proc/crypto\n"); | 3648 | pr_info("caam algorithms registered in /proc/crypto\n"); |
| 3670 | 3649 | ||
| 3671 | out_put_dev: | ||
| 3672 | put_device(&pdev->dev); | ||
| 3673 | return err; | 3650 | return err; |
| 3674 | } | 3651 | } |
| 3675 | |||
| 3676 | module_init(caam_algapi_init); | ||
| 3677 | module_exit(caam_algapi_exit); | ||
| 3678 | |||
| 3679 | MODULE_LICENSE("GPL"); | ||
| 3680 | MODULE_DESCRIPTION("FSL CAAM support for crypto API"); | ||
| 3681 | MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); | ||
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 1e1a376edc2f..72531837571e 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c | |||
| @@ -33,12 +33,11 @@ static inline void append_dec_op1(u32 *desc, u32 type) | |||
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | 35 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); |
| 36 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | 36 | append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT); |
| 37 | OP_ALG_DECRYPT); | ||
| 38 | uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); | 37 | uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); |
| 39 | set_jump_tgt_here(desc, jump_cmd); | 38 | set_jump_tgt_here(desc, jump_cmd); |
| 40 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | 39 | append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT | |
| 41 | OP_ALG_DECRYPT | OP_ALG_AAI_DK); | 40 | OP_ALG_AAI_DK); |
| 42 | set_jump_tgt_here(desc, uncond_jump_cmd); | 41 | set_jump_tgt_here(desc, uncond_jump_cmd); |
| 43 | } | 42 | } |
| 44 | 43 | ||
| @@ -115,11 +114,9 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, | |||
| 115 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | | 114 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | |
| 116 | LDST_SRCDST_BYTE_CONTEXT); | 115 | LDST_SRCDST_BYTE_CONTEXT); |
| 117 | 116 | ||
| 118 | #ifdef DEBUG | 117 | print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ", |
| 119 | print_hex_dump(KERN_ERR, | 118 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 120 | "aead null enc shdesc@" __stringify(__LINE__)": ", | 119 | 1); |
| 121 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 122 | #endif | ||
| 123 | } | 120 | } |
| 124 | EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); | 121 | EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); |
| 125 | 122 | ||
| @@ -204,11 +201,9 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, | |||
| 204 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | | 201 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | |
| 205 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | 202 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); |
| 206 | 203 | ||
| 207 | #ifdef DEBUG | 204 | print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ", |
| 208 | print_hex_dump(KERN_ERR, | 205 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 209 | "aead null dec shdesc@" __stringify(__LINE__)": ", | 206 | 1); |
| 210 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 211 | #endif | ||
| 212 | } | 207 | } |
| 213 | EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); | 208 | EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); |
| 214 | 209 | ||
| @@ -358,10 +353,9 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, | |||
| 358 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | | 353 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | |
| 359 | LDST_SRCDST_BYTE_CONTEXT); | 354 | LDST_SRCDST_BYTE_CONTEXT); |
| 360 | 355 | ||
| 361 | #ifdef DEBUG | 356 | print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ", |
| 362 | print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ", | 357 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 363 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 358 | 1); |
| 364 | #endif | ||
| 365 | } | 359 | } |
| 366 | EXPORT_SYMBOL(cnstr_shdsc_aead_encap); | 360 | EXPORT_SYMBOL(cnstr_shdsc_aead_encap); |
| 367 | 361 | ||
| @@ -475,10 +469,9 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, | |||
| 475 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | | 469 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | |
| 476 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | 470 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); |
| 477 | 471 | ||
| 478 | #ifdef DEBUG | 472 | print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ", |
| 479 | print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ", | 473 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 480 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 474 | 1); |
| 481 | #endif | ||
| 482 | } | 475 | } |
| 483 | EXPORT_SYMBOL(cnstr_shdsc_aead_decap); | 476 | EXPORT_SYMBOL(cnstr_shdsc_aead_decap); |
| 484 | 477 | ||
| @@ -613,11 +606,9 @@ copy_iv: | |||
| 613 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | | 606 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | |
| 614 | LDST_SRCDST_BYTE_CONTEXT); | 607 | LDST_SRCDST_BYTE_CONTEXT); |
| 615 | 608 | ||
| 616 | #ifdef DEBUG | 609 | print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ", |
| 617 | print_hex_dump(KERN_ERR, | 610 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 618 | "aead givenc shdesc@" __stringify(__LINE__)": ", | 611 | 1); |
| 619 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 620 | #endif | ||
| 621 | } | 612 | } |
| 622 | EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); | 613 | EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); |
| 623 | 614 | ||
| @@ -742,10 +733,9 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, | |||
| 742 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | | 733 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | |
| 743 | LDST_SRCDST_BYTE_CONTEXT); | 734 | LDST_SRCDST_BYTE_CONTEXT); |
| 744 | 735 | ||
| 745 | #ifdef DEBUG | 736 | print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ", |
| 746 | print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ", | 737 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 747 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 738 | 1); |
| 748 | #endif | ||
| 749 | } | 739 | } |
| 750 | EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); | 740 | EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); |
| 751 | 741 | ||
| @@ -838,10 +828,9 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, | |||
| 838 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | | 828 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | |
| 839 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | 829 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); |
| 840 | 830 | ||
| 841 | #ifdef DEBUG | 831 | print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ", |
| 842 | print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ", | 832 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 843 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 833 | 1); |
| 844 | #endif | ||
| 845 | } | 834 | } |
| 846 | EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); | 835 | EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); |
| 847 | 836 | ||
| @@ -933,11 +922,9 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, | |||
| 933 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | | 922 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | |
| 934 | LDST_SRCDST_BYTE_CONTEXT); | 923 | LDST_SRCDST_BYTE_CONTEXT); |
| 935 | 924 | ||
| 936 | #ifdef DEBUG | 925 | print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ", |
| 937 | print_hex_dump(KERN_ERR, | 926 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 938 | "rfc4106 enc shdesc@" __stringify(__LINE__)": ", | 927 | 1); |
| 939 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 940 | #endif | ||
| 941 | } | 928 | } |
| 942 | EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap); | 929 | EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap); |
| 943 | 930 | ||
| @@ -1030,11 +1017,9 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, | |||
| 1030 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | | 1017 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | |
| 1031 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | 1018 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); |
| 1032 | 1019 | ||
| 1033 | #ifdef DEBUG | 1020 | print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ", |
| 1034 | print_hex_dump(KERN_ERR, | 1021 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 1035 | "rfc4106 dec shdesc@" __stringify(__LINE__)": ", | 1022 | 1); |
| 1036 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1037 | #endif | ||
| 1038 | } | 1023 | } |
| 1039 | EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap); | 1024 | EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap); |
| 1040 | 1025 | ||
| @@ -1115,11 +1100,9 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, | |||
| 1115 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | | 1100 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | |
| 1116 | LDST_SRCDST_BYTE_CONTEXT); | 1101 | LDST_SRCDST_BYTE_CONTEXT); |
| 1117 | 1102 | ||
| 1118 | #ifdef DEBUG | 1103 | print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ", |
| 1119 | print_hex_dump(KERN_ERR, | 1104 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 1120 | "rfc4543 enc shdesc@" __stringify(__LINE__)": ", | 1105 | 1); |
| 1121 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1122 | #endif | ||
| 1123 | } | 1106 | } |
| 1124 | EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap); | 1107 | EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap); |
| 1125 | 1108 | ||
| @@ -1205,11 +1188,9 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, | |||
| 1205 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | | 1188 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | |
| 1206 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | 1189 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); |
| 1207 | 1190 | ||
| 1208 | #ifdef DEBUG | 1191 | print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ", |
| 1209 | print_hex_dump(KERN_ERR, | 1192 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 1210 | "rfc4543 dec shdesc@" __stringify(__LINE__)": ", | 1193 | 1); |
| 1211 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1212 | #endif | ||
| 1213 | } | 1194 | } |
| 1214 | EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); | 1195 | EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); |
| 1215 | 1196 | ||
| @@ -1410,17 +1391,21 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, | |||
| 1410 | LDST_OFFSET_SHIFT)); | 1391 | LDST_OFFSET_SHIFT)); |
| 1411 | 1392 | ||
| 1412 | /* Load operation */ | 1393 | /* Load operation */ |
| 1413 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | 1394 | append_operation(desc, cdata->algtype | OP_ALG_AS_INIT | |
| 1414 | OP_ALG_ENCRYPT); | 1395 | OP_ALG_ENCRYPT); |
| 1415 | 1396 | ||
| 1416 | /* Perform operation */ | 1397 | /* Perform operation */ |
| 1417 | skcipher_append_src_dst(desc); | 1398 | skcipher_append_src_dst(desc); |
| 1418 | 1399 | ||
| 1419 | #ifdef DEBUG | 1400 | /* Store IV */ |
| 1420 | print_hex_dump(KERN_ERR, | 1401 | if (ivsize) |
| 1421 | "skcipher enc shdesc@" __stringify(__LINE__)": ", | 1402 | append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | |
| 1422 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1403 | LDST_CLASS_1_CCB | (ctx1_iv_off << |
| 1423 | #endif | 1404 | LDST_OFFSET_SHIFT)); |
| 1405 | |||
| 1406 | print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ", | ||
| 1407 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | ||
| 1408 | 1); | ||
| 1424 | } | 1409 | } |
| 1425 | EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap); | 1410 | EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap); |
| 1426 | 1411 | ||
| @@ -1479,7 +1464,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, | |||
| 1479 | 1464 | ||
| 1480 | /* Choose operation */ | 1465 | /* Choose operation */ |
| 1481 | if (ctx1_iv_off) | 1466 | if (ctx1_iv_off) |
| 1482 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | 1467 | append_operation(desc, cdata->algtype | OP_ALG_AS_INIT | |
| 1483 | OP_ALG_DECRYPT); | 1468 | OP_ALG_DECRYPT); |
| 1484 | else | 1469 | else |
| 1485 | append_dec_op1(desc, cdata->algtype); | 1470 | append_dec_op1(desc, cdata->algtype); |
| @@ -1487,11 +1472,15 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, | |||
| 1487 | /* Perform operation */ | 1472 | /* Perform operation */ |
| 1488 | skcipher_append_src_dst(desc); | 1473 | skcipher_append_src_dst(desc); |
| 1489 | 1474 | ||
| 1490 | #ifdef DEBUG | 1475 | /* Store IV */ |
| 1491 | print_hex_dump(KERN_ERR, | 1476 | if (ivsize) |
| 1492 | "skcipher dec shdesc@" __stringify(__LINE__)": ", | 1477 | append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | |
| 1493 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1478 | LDST_CLASS_1_CCB | (ctx1_iv_off << |
| 1494 | #endif | 1479 | LDST_OFFSET_SHIFT)); |
| 1480 | |||
| 1481 | print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ", | ||
| 1482 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | ||
| 1483 | 1); | ||
| 1495 | } | 1484 | } |
| 1496 | EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap); | 1485 | EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap); |
| 1497 | 1486 | ||
| @@ -1538,11 +1527,13 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata) | |||
| 1538 | /* Perform operation */ | 1527 | /* Perform operation */ |
| 1539 | skcipher_append_src_dst(desc); | 1528 | skcipher_append_src_dst(desc); |
| 1540 | 1529 | ||
| 1541 | #ifdef DEBUG | 1530 | /* Store upper 8B of IV */ |
| 1542 | print_hex_dump(KERN_ERR, | 1531 | append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | |
| 1543 | "xts skcipher enc shdesc@" __stringify(__LINE__) ": ", | 1532 | (0x20 << LDST_OFFSET_SHIFT)); |
| 1544 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1533 | |
| 1545 | #endif | 1534 | print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__) |
| 1535 | ": ", DUMP_PREFIX_ADDRESS, 16, 4, | ||
| 1536 | desc, desc_bytes(desc), 1); | ||
| 1546 | } | 1537 | } |
| 1547 | EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap); | 1538 | EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap); |
| 1548 | 1539 | ||
| @@ -1588,11 +1579,13 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata) | |||
| 1588 | /* Perform operation */ | 1579 | /* Perform operation */ |
| 1589 | skcipher_append_src_dst(desc); | 1580 | skcipher_append_src_dst(desc); |
| 1590 | 1581 | ||
| 1591 | #ifdef DEBUG | 1582 | /* Store upper 8B of IV */ |
| 1592 | print_hex_dump(KERN_ERR, | 1583 | append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | |
| 1593 | "xts skcipher dec shdesc@" __stringify(__LINE__) ": ", | 1584 | (0x20 << LDST_OFFSET_SHIFT)); |
| 1594 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1585 | |
| 1595 | #endif | 1586 | print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__) |
| 1587 | ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 1588 | desc_bytes(desc), 1); | ||
| 1596 | } | 1589 | } |
| 1597 | EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap); | 1590 | EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap); |
| 1598 | 1591 | ||
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index d5ca42ff961a..da4a4ee60c80 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h | |||
| @@ -44,9 +44,9 @@ | |||
| 44 | 44 | ||
| 45 | #define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ) | 45 | #define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ) |
| 46 | #define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \ | 46 | #define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \ |
| 47 | 20 * CAAM_CMD_SZ) | 47 | 21 * CAAM_CMD_SZ) |
| 48 | #define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \ | 48 | #define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \ |
| 49 | 15 * CAAM_CMD_SZ) | 49 | 16 * CAAM_CMD_SZ) |
| 50 | 50 | ||
| 51 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, | 51 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, |
| 52 | unsigned int icvsize, int era); | 52 | unsigned int icvsize, int era); |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index d290d6b41825..32f0f8a72067 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | * Based on caamalg.c | 4 | * Based on caamalg.c |
| 5 | * | 5 | * |
| 6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. | 6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. |
| 7 | * Copyright 2016-2018 NXP | 7 | * Copyright 2016-2019 NXP |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include "compat.h" | 10 | #include "compat.h" |
| @@ -214,13 +214,11 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
| 214 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) | 214 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
| 215 | goto badkey; | 215 | goto badkey; |
| 216 | 216 | ||
| 217 | #ifdef DEBUG | 217 | dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", |
| 218 | dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", | ||
| 219 | keys.authkeylen + keys.enckeylen, keys.enckeylen, | 218 | keys.authkeylen + keys.enckeylen, keys.enckeylen, |
| 220 | keys.authkeylen); | 219 | keys.authkeylen); |
| 221 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 220 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
| 222 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 221 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 223 | #endif | ||
| 224 | 222 | ||
| 225 | /* | 223 | /* |
| 226 | * If DKP is supported, use it in the shared descriptor to generate | 224 | * If DKP is supported, use it in the shared descriptor to generate |
| @@ -237,7 +235,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
| 237 | memcpy(ctx->key, keys.authkey, keys.authkeylen); | 235 | memcpy(ctx->key, keys.authkey, keys.authkeylen); |
| 238 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, | 236 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, |
| 239 | keys.enckeylen); | 237 | keys.enckeylen); |
| 240 | dma_sync_single_for_device(jrdev, ctx->key_dma, | 238 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, |
| 241 | ctx->adata.keylen_pad + | 239 | ctx->adata.keylen_pad + |
| 242 | keys.enckeylen, ctx->dir); | 240 | keys.enckeylen, ctx->dir); |
| 243 | goto skip_split_key; | 241 | goto skip_split_key; |
| @@ -251,8 +249,9 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
| 251 | 249 | ||
| 252 | /* postpend encryption key to auth split key */ | 250 | /* postpend encryption key to auth split key */ |
| 253 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); | 251 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
| 254 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + | 252 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, |
| 255 | keys.enckeylen, ctx->dir); | 253 | ctx->adata.keylen_pad + keys.enckeylen, |
| 254 | ctx->dir); | ||
| 256 | #ifdef DEBUG | 255 | #ifdef DEBUG |
| 257 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", | 256 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", |
| 258 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 257 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
| @@ -386,13 +385,12 @@ static int gcm_setkey(struct crypto_aead *aead, | |||
| 386 | struct device *jrdev = ctx->jrdev; | 385 | struct device *jrdev = ctx->jrdev; |
| 387 | int ret; | 386 | int ret; |
| 388 | 387 | ||
| 389 | #ifdef DEBUG | 388 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
| 390 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 389 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 391 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
| 392 | #endif | ||
| 393 | 390 | ||
| 394 | memcpy(ctx->key, key, keylen); | 391 | memcpy(ctx->key, key, keylen); |
| 395 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); | 392 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, |
| 393 | ctx->dir); | ||
| 396 | ctx->cdata.keylen = keylen; | 394 | ctx->cdata.keylen = keylen; |
| 397 | 395 | ||
| 398 | ret = gcm_set_sh_desc(aead); | 396 | ret = gcm_set_sh_desc(aead); |
| @@ -485,10 +483,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
| 485 | if (keylen < 4) | 483 | if (keylen < 4) |
| 486 | return -EINVAL; | 484 | return -EINVAL; |
| 487 | 485 | ||
| 488 | #ifdef DEBUG | 486 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
| 489 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 487 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 490 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
| 491 | #endif | ||
| 492 | 488 | ||
| 493 | memcpy(ctx->key, key, keylen); | 489 | memcpy(ctx->key, key, keylen); |
| 494 | /* | 490 | /* |
| @@ -496,8 +492,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
| 496 | * in the nonce. Update the AES key length. | 492 | * in the nonce. Update the AES key length. |
| 497 | */ | 493 | */ |
| 498 | ctx->cdata.keylen = keylen - 4; | 494 | ctx->cdata.keylen = keylen - 4; |
| 499 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, | 495 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, |
| 500 | ctx->dir); | 496 | ctx->cdata.keylen, ctx->dir); |
| 501 | 497 | ||
| 502 | ret = rfc4106_set_sh_desc(aead); | 498 | ret = rfc4106_set_sh_desc(aead); |
| 503 | if (ret) | 499 | if (ret) |
| @@ -589,10 +585,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
| 589 | if (keylen < 4) | 585 | if (keylen < 4) |
| 590 | return -EINVAL; | 586 | return -EINVAL; |
| 591 | 587 | ||
| 592 | #ifdef DEBUG | 588 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
| 593 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 589 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 594 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
| 595 | #endif | ||
| 596 | 590 | ||
| 597 | memcpy(ctx->key, key, keylen); | 591 | memcpy(ctx->key, key, keylen); |
| 598 | /* | 592 | /* |
| @@ -600,8 +594,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
| 600 | * in the nonce. Update the AES key length. | 594 | * in the nonce. Update the AES key length. |
| 601 | */ | 595 | */ |
| 602 | ctx->cdata.keylen = keylen - 4; | 596 | ctx->cdata.keylen = keylen - 4; |
| 603 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, | 597 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, |
| 604 | ctx->dir); | 598 | ctx->cdata.keylen, ctx->dir); |
| 605 | 599 | ||
| 606 | ret = rfc4543_set_sh_desc(aead); | 600 | ret = rfc4543_set_sh_desc(aead); |
| 607 | if (ret) | 601 | if (ret) |
| @@ -644,10 +638,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | |||
| 644 | const bool is_rfc3686 = alg->caam.rfc3686; | 638 | const bool is_rfc3686 = alg->caam.rfc3686; |
| 645 | int ret = 0; | 639 | int ret = 0; |
| 646 | 640 | ||
| 647 | #ifdef DEBUG | 641 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
| 648 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 642 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 649 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 643 | |
| 650 | #endif | ||
| 651 | /* | 644 | /* |
| 652 | * AES-CTR needs to load IV in CONTEXT1 reg | 645 | * AES-CTR needs to load IV in CONTEXT1 reg |
| 653 | * at an offset of 128bits (16bytes) | 646 | * at an offset of 128bits (16bytes) |
| @@ -838,7 +831,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, | |||
| 838 | static void caam_unmap(struct device *dev, struct scatterlist *src, | 831 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
| 839 | struct scatterlist *dst, int src_nents, | 832 | struct scatterlist *dst, int src_nents, |
| 840 | int dst_nents, dma_addr_t iv_dma, int ivsize, | 833 | int dst_nents, dma_addr_t iv_dma, int ivsize, |
| 841 | dma_addr_t qm_sg_dma, int qm_sg_bytes) | 834 | enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, |
| 835 | int qm_sg_bytes) | ||
| 842 | { | 836 | { |
| 843 | if (dst != src) { | 837 | if (dst != src) { |
| 844 | if (src_nents) | 838 | if (src_nents) |
| @@ -850,7 +844,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, | |||
| 850 | } | 844 | } |
| 851 | 845 | ||
| 852 | if (iv_dma) | 846 | if (iv_dma) |
| 853 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 847 | dma_unmap_single(dev, iv_dma, ivsize, iv_dir); |
| 854 | if (qm_sg_bytes) | 848 | if (qm_sg_bytes) |
| 855 | dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); | 849 | dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); |
| 856 | } | 850 | } |
| @@ -863,7 +857,8 @@ static void aead_unmap(struct device *dev, | |||
| 863 | int ivsize = crypto_aead_ivsize(aead); | 857 | int ivsize = crypto_aead_ivsize(aead); |
| 864 | 858 | ||
| 865 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, | 859 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, |
| 866 | edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); | 860 | edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, |
| 861 | edesc->qm_sg_bytes); | ||
| 867 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); | 862 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
| 868 | } | 863 | } |
| 869 | 864 | ||
| @@ -874,7 +869,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, | |||
| 874 | int ivsize = crypto_skcipher_ivsize(skcipher); | 869 | int ivsize = crypto_skcipher_ivsize(skcipher); |
| 875 | 870 | ||
| 876 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, | 871 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, |
| 877 | edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); | 872 | edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, |
| 873 | edesc->qm_sg_bytes); | ||
| 878 | } | 874 | } |
| 879 | 875 | ||
| 880 | static void aead_done(struct caam_drv_req *drv_req, u32 status) | 876 | static void aead_done(struct caam_drv_req *drv_req, u32 status) |
| @@ -924,6 +920,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 924 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 920 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
| 925 | GFP_KERNEL : GFP_ATOMIC; | 921 | GFP_KERNEL : GFP_ATOMIC; |
| 926 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 922 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
| 923 | int src_len, dst_len = 0; | ||
| 927 | struct aead_edesc *edesc; | 924 | struct aead_edesc *edesc; |
| 928 | dma_addr_t qm_sg_dma, iv_dma = 0; | 925 | dma_addr_t qm_sg_dma, iv_dma = 0; |
| 929 | int ivsize = 0; | 926 | int ivsize = 0; |
| @@ -945,13 +942,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 945 | } | 942 | } |
| 946 | 943 | ||
| 947 | if (likely(req->src == req->dst)) { | 944 | if (likely(req->src == req->dst)) { |
| 948 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 945 | src_len = req->assoclen + req->cryptlen + |
| 949 | req->cryptlen + | 946 | (encrypt ? authsize : 0); |
| 950 | (encrypt ? authsize : 0)); | 947 | |
| 948 | src_nents = sg_nents_for_len(req->src, src_len); | ||
| 951 | if (unlikely(src_nents < 0)) { | 949 | if (unlikely(src_nents < 0)) { |
| 952 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", | 950 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", |
| 953 | req->assoclen + req->cryptlen + | 951 | src_len); |
| 954 | (encrypt ? authsize : 0)); | ||
| 955 | qi_cache_free(edesc); | 952 | qi_cache_free(edesc); |
| 956 | return ERR_PTR(src_nents); | 953 | return ERR_PTR(src_nents); |
| 957 | } | 954 | } |
| @@ -964,23 +961,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 964 | return ERR_PTR(-ENOMEM); | 961 | return ERR_PTR(-ENOMEM); |
| 965 | } | 962 | } |
| 966 | } else { | 963 | } else { |
| 967 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 964 | src_len = req->assoclen + req->cryptlen; |
| 968 | req->cryptlen); | 965 | dst_len = src_len + (encrypt ? authsize : (-authsize)); |
| 966 | |||
| 967 | src_nents = sg_nents_for_len(req->src, src_len); | ||
| 969 | if (unlikely(src_nents < 0)) { | 968 | if (unlikely(src_nents < 0)) { |
| 970 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", | 969 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", |
| 971 | req->assoclen + req->cryptlen); | 970 | src_len); |
| 972 | qi_cache_free(edesc); | 971 | qi_cache_free(edesc); |
| 973 | return ERR_PTR(src_nents); | 972 | return ERR_PTR(src_nents); |
| 974 | } | 973 | } |
| 975 | 974 | ||
| 976 | dst_nents = sg_nents_for_len(req->dst, req->assoclen + | 975 | dst_nents = sg_nents_for_len(req->dst, dst_len); |
| 977 | req->cryptlen + | ||
| 978 | (encrypt ? authsize : | ||
| 979 | (-authsize))); | ||
| 980 | if (unlikely(dst_nents < 0)) { | 976 | if (unlikely(dst_nents < 0)) { |
| 981 | dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", | 977 | dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", |
| 982 | req->assoclen + req->cryptlen + | 978 | dst_len); |
| 983 | (encrypt ? authsize : (-authsize))); | ||
| 984 | qi_cache_free(edesc); | 979 | qi_cache_free(edesc); |
| 985 | return ERR_PTR(dst_nents); | 980 | return ERR_PTR(dst_nents); |
| 986 | } | 981 | } |
| @@ -1019,9 +1014,24 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1019 | /* | 1014 | /* |
| 1020 | * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. | 1015 | * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. |
| 1021 | * Input is not contiguous. | 1016 | * Input is not contiguous. |
| 1017 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
| 1018 | * the end of the table by allocating more S/G entries. Logic: | ||
| 1019 | * if (src != dst && output S/G) | ||
| 1020 | * pad output S/G, if needed | ||
| 1021 | * else if (src == dst && S/G) | ||
| 1022 | * overlapping S/Gs; pad one of them | ||
| 1023 | * else if (input S/G) ... | ||
| 1024 | * pad input S/G, if needed | ||
| 1022 | */ | 1025 | */ |
| 1023 | qm_sg_ents = 1 + !!ivsize + mapped_src_nents + | 1026 | qm_sg_ents = 1 + !!ivsize + mapped_src_nents; |
| 1024 | (mapped_dst_nents > 1 ? mapped_dst_nents : 0); | 1027 | if (mapped_dst_nents > 1) |
| 1028 | qm_sg_ents += pad_sg_nents(mapped_dst_nents); | ||
| 1029 | else if ((req->src == req->dst) && (mapped_src_nents > 1)) | ||
| 1030 | qm_sg_ents = max(pad_sg_nents(qm_sg_ents), | ||
| 1031 | 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); | ||
| 1032 | else | ||
| 1033 | qm_sg_ents = pad_sg_nents(qm_sg_ents); | ||
| 1034 | |||
| 1025 | sg_table = &edesc->sgt[0]; | 1035 | sg_table = &edesc->sgt[0]; |
| 1026 | qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); | 1036 | qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); |
| 1027 | if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > | 1037 | if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > |
| @@ -1029,7 +1039,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1029 | dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", | 1039 | dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", |
| 1030 | qm_sg_ents, ivsize); | 1040 | qm_sg_ents, ivsize); |
| 1031 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, | 1041 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, |
| 1032 | 0, 0, 0); | 1042 | 0, DMA_NONE, 0, 0); |
| 1033 | qi_cache_free(edesc); | 1043 | qi_cache_free(edesc); |
| 1034 | return ERR_PTR(-ENOMEM); | 1044 | return ERR_PTR(-ENOMEM); |
| 1035 | } | 1045 | } |
| @@ -1044,7 +1054,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1044 | if (dma_mapping_error(qidev, iv_dma)) { | 1054 | if (dma_mapping_error(qidev, iv_dma)) { |
| 1045 | dev_err(qidev, "unable to map IV\n"); | 1055 | dev_err(qidev, "unable to map IV\n"); |
| 1046 | caam_unmap(qidev, req->src, req->dst, src_nents, | 1056 | caam_unmap(qidev, req->src, req->dst, src_nents, |
| 1047 | dst_nents, 0, 0, 0, 0); | 1057 | dst_nents, 0, 0, DMA_NONE, 0, 0); |
| 1048 | qi_cache_free(edesc); | 1058 | qi_cache_free(edesc); |
| 1049 | return ERR_PTR(-ENOMEM); | 1059 | return ERR_PTR(-ENOMEM); |
| 1050 | } | 1060 | } |
| @@ -1063,7 +1073,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1063 | if (dma_mapping_error(qidev, edesc->assoclen_dma)) { | 1073 | if (dma_mapping_error(qidev, edesc->assoclen_dma)) { |
| 1064 | dev_err(qidev, "unable to map assoclen\n"); | 1074 | dev_err(qidev, "unable to map assoclen\n"); |
| 1065 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, | 1075 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, |
| 1066 | iv_dma, ivsize, 0, 0); | 1076 | iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); |
| 1067 | qi_cache_free(edesc); | 1077 | qi_cache_free(edesc); |
| 1068 | return ERR_PTR(-ENOMEM); | 1078 | return ERR_PTR(-ENOMEM); |
| 1069 | } | 1079 | } |
| @@ -1074,19 +1084,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1074 | dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); | 1084 | dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); |
| 1075 | qm_sg_index++; | 1085 | qm_sg_index++; |
| 1076 | } | 1086 | } |
| 1077 | sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); | 1087 | sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); |
| 1078 | qm_sg_index += mapped_src_nents; | 1088 | qm_sg_index += mapped_src_nents; |
| 1079 | 1089 | ||
| 1080 | if (mapped_dst_nents > 1) | 1090 | if (mapped_dst_nents > 1) |
| 1081 | sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + | 1091 | sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); |
| 1082 | qm_sg_index, 0); | ||
| 1083 | 1092 | ||
| 1084 | qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); | 1093 | qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); |
| 1085 | if (dma_mapping_error(qidev, qm_sg_dma)) { | 1094 | if (dma_mapping_error(qidev, qm_sg_dma)) { |
| 1086 | dev_err(qidev, "unable to map S/G table\n"); | 1095 | dev_err(qidev, "unable to map S/G table\n"); |
| 1087 | dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); | 1096 | dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
| 1088 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, | 1097 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, |
| 1089 | iv_dma, ivsize, 0, 0); | 1098 | iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); |
| 1090 | qi_cache_free(edesc); | 1099 | qi_cache_free(edesc); |
| 1091 | return ERR_PTR(-ENOMEM); | 1100 | return ERR_PTR(-ENOMEM); |
| 1092 | } | 1101 | } |
| @@ -1109,7 +1118,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 1109 | dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + | 1118 | dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + |
| 1110 | (1 + !!ivsize) * sizeof(*sg_table), | 1119 | (1 + !!ivsize) * sizeof(*sg_table), |
| 1111 | out_len, 0); | 1120 | out_len, 0); |
| 1112 | } else if (mapped_dst_nents == 1) { | 1121 | } else if (mapped_dst_nents <= 1) { |
| 1113 | dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, | 1122 | dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, |
| 1114 | 0); | 1123 | 0); |
| 1115 | } else { | 1124 | } else { |
| @@ -1182,33 +1191,28 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status) | |||
| 1182 | struct device *qidev = caam_ctx->qidev; | 1191 | struct device *qidev = caam_ctx->qidev; |
| 1183 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1192 | int ivsize = crypto_skcipher_ivsize(skcipher); |
| 1184 | 1193 | ||
| 1185 | #ifdef DEBUG | 1194 | dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); |
| 1186 | dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); | ||
| 1187 | #endif | ||
| 1188 | 1195 | ||
| 1189 | edesc = container_of(drv_req, typeof(*edesc), drv_req); | 1196 | edesc = container_of(drv_req, typeof(*edesc), drv_req); |
| 1190 | 1197 | ||
| 1191 | if (status) | 1198 | if (status) |
| 1192 | caam_jr_strstatus(qidev, status); | 1199 | caam_jr_strstatus(qidev, status); |
| 1193 | 1200 | ||
| 1194 | #ifdef DEBUG | 1201 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", |
| 1195 | print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", | 1202 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
| 1196 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 1203 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
| 1197 | edesc->src_nents > 1 ? 100 : ivsize, 1); | 1204 | caam_dump_sg("dst @" __stringify(__LINE__)": ", |
| 1198 | caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", | ||
| 1199 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 1205 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
| 1200 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | 1206 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
| 1201 | #endif | ||
| 1202 | 1207 | ||
| 1203 | skcipher_unmap(qidev, edesc, req); | 1208 | skcipher_unmap(qidev, edesc, req); |
| 1204 | 1209 | ||
| 1205 | /* | 1210 | /* |
| 1206 | * The crypto API expects us to set the IV (req->iv) to the last | 1211 | * The crypto API expects us to set the IV (req->iv) to the last |
| 1207 | * ciphertext block. This is used e.g. by the CTS mode. | 1212 | * ciphertext block (CBC mode) or last counter (CTR mode). |
| 1213 | * This is used e.g. by the CTS mode. | ||
| 1208 | */ | 1214 | */ |
| 1209 | if (edesc->drv_req.drv_ctx->op_type == ENCRYPT) | 1215 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); |
| 1210 | scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - | ||
| 1211 | ivsize, ivsize, 0); | ||
| 1212 | 1216 | ||
| 1213 | qi_cache_free(edesc); | 1217 | qi_cache_free(edesc); |
| 1214 | skcipher_request_complete(req, status); | 1218 | skcipher_request_complete(req, status); |
| @@ -1276,14 +1280,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
| 1276 | qm_sg_ents = 1 + mapped_src_nents; | 1280 | qm_sg_ents = 1 + mapped_src_nents; |
| 1277 | dst_sg_idx = qm_sg_ents; | 1281 | dst_sg_idx = qm_sg_ents; |
| 1278 | 1282 | ||
| 1279 | qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; | 1283 | /* |
| 1284 | * Input, output HW S/G tables: [IV, src][dst, IV] | ||
| 1285 | * IV entries point to the same buffer | ||
| 1286 | * If src == dst, S/G entries are reused (S/G tables overlap) | ||
| 1287 | * | ||
| 1288 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
| 1289 | * the end of the table by allocating more S/G entries. | ||
| 1290 | */ | ||
| 1291 | if (req->src != req->dst) | ||
| 1292 | qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); | ||
| 1293 | else | ||
| 1294 | qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); | ||
| 1295 | |||
| 1280 | qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); | 1296 | qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); |
| 1281 | if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + | 1297 | if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + |
| 1282 | ivsize > CAAM_QI_MEMCACHE_SIZE)) { | 1298 | ivsize > CAAM_QI_MEMCACHE_SIZE)) { |
| 1283 | dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", | 1299 | dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", |
| 1284 | qm_sg_ents, ivsize); | 1300 | qm_sg_ents, ivsize); |
| 1285 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, | 1301 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, |
| 1286 | 0, 0, 0); | 1302 | 0, DMA_NONE, 0, 0); |
| 1287 | return ERR_PTR(-ENOMEM); | 1303 | return ERR_PTR(-ENOMEM); |
| 1288 | } | 1304 | } |
| 1289 | 1305 | ||
| @@ -1292,7 +1308,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
| 1292 | if (unlikely(!edesc)) { | 1308 | if (unlikely(!edesc)) { |
| 1293 | dev_err(qidev, "could not allocate extended descriptor\n"); | 1309 | dev_err(qidev, "could not allocate extended descriptor\n"); |
| 1294 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, | 1310 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, |
| 1295 | 0, 0, 0); | 1311 | 0, DMA_NONE, 0, 0); |
| 1296 | return ERR_PTR(-ENOMEM); | 1312 | return ERR_PTR(-ENOMEM); |
| 1297 | } | 1313 | } |
| 1298 | 1314 | ||
| @@ -1301,11 +1317,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
| 1301 | iv = (u8 *)(sg_table + qm_sg_ents); | 1317 | iv = (u8 *)(sg_table + qm_sg_ents); |
| 1302 | memcpy(iv, req->iv, ivsize); | 1318 | memcpy(iv, req->iv, ivsize); |
| 1303 | 1319 | ||
| 1304 | iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); | 1320 | iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); |
| 1305 | if (dma_mapping_error(qidev, iv_dma)) { | 1321 | if (dma_mapping_error(qidev, iv_dma)) { |
| 1306 | dev_err(qidev, "unable to map IV\n"); | 1322 | dev_err(qidev, "unable to map IV\n"); |
| 1307 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, | 1323 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, |
| 1308 | 0, 0, 0); | 1324 | 0, DMA_NONE, 0, 0); |
| 1309 | qi_cache_free(edesc); | 1325 | qi_cache_free(edesc); |
| 1310 | return ERR_PTR(-ENOMEM); | 1326 | return ERR_PTR(-ENOMEM); |
| 1311 | } | 1327 | } |
| @@ -1319,18 +1335,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
| 1319 | edesc->drv_req.drv_ctx = drv_ctx; | 1335 | edesc->drv_req.drv_ctx = drv_ctx; |
| 1320 | 1336 | ||
| 1321 | dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); | 1337 | dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); |
| 1322 | sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); | 1338 | sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); |
| 1323 | 1339 | ||
| 1324 | if (mapped_dst_nents > 1) | 1340 | if (req->src != req->dst) |
| 1325 | sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + | 1341 | sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); |
| 1326 | dst_sg_idx, 0); | 1342 | |
| 1343 | dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, | ||
| 1344 | ivsize, 0); | ||
| 1327 | 1345 | ||
| 1328 | edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, | 1346 | edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, |
| 1329 | DMA_TO_DEVICE); | 1347 | DMA_TO_DEVICE); |
| 1330 | if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { | 1348 | if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { |
| 1331 | dev_err(qidev, "unable to map S/G table\n"); | 1349 | dev_err(qidev, "unable to map S/G table\n"); |
| 1332 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, | 1350 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, |
| 1333 | iv_dma, ivsize, 0, 0); | 1351 | iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); |
| 1334 | qi_cache_free(edesc); | 1352 | qi_cache_free(edesc); |
| 1335 | return ERR_PTR(-ENOMEM); | 1353 | return ERR_PTR(-ENOMEM); |
| 1336 | } | 1354 | } |
| @@ -1340,16 +1358,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
| 1340 | dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, | 1358 | dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, |
| 1341 | ivsize + req->cryptlen, 0); | 1359 | ivsize + req->cryptlen, 0); |
| 1342 | 1360 | ||
| 1343 | if (req->src == req->dst) { | 1361 | if (req->src == req->dst) |
| 1344 | dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + | 1362 | dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + |
| 1345 | sizeof(*sg_table), req->cryptlen, 0); | 1363 | sizeof(*sg_table), req->cryptlen + ivsize, |
| 1346 | } else if (mapped_dst_nents > 1) { | 1364 | 0); |
| 1365 | else | ||
| 1347 | dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * | 1366 | dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * |
| 1348 | sizeof(*sg_table), req->cryptlen, 0); | 1367 | sizeof(*sg_table), req->cryptlen + ivsize, |
| 1349 | } else { | 1368 | 0); |
| 1350 | dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), | ||
| 1351 | req->cryptlen, 0); | ||
| 1352 | } | ||
| 1353 | 1369 | ||
| 1354 | return edesc; | 1370 | return edesc; |
| 1355 | } | 1371 | } |
| @@ -1359,7 +1375,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) | |||
| 1359 | struct skcipher_edesc *edesc; | 1375 | struct skcipher_edesc *edesc; |
| 1360 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1376 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 1361 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 1377 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
| 1362 | int ivsize = crypto_skcipher_ivsize(skcipher); | ||
| 1363 | int ret; | 1378 | int ret; |
| 1364 | 1379 | ||
| 1365 | if (unlikely(caam_congested)) | 1380 | if (unlikely(caam_congested)) |
| @@ -1370,14 +1385,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) | |||
| 1370 | if (IS_ERR(edesc)) | 1385 | if (IS_ERR(edesc)) |
| 1371 | return PTR_ERR(edesc); | 1386 | return PTR_ERR(edesc); |
| 1372 | 1387 | ||
| 1373 | /* | ||
| 1374 | * The crypto API expects us to set the IV (req->iv) to the last | ||
| 1375 | * ciphertext block. | ||
| 1376 | */ | ||
| 1377 | if (!encrypt) | ||
| 1378 | scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - | ||
| 1379 | ivsize, ivsize, 0); | ||
| 1380 | |||
| 1381 | ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); | 1388 | ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); |
| 1382 | if (!ret) { | 1389 | if (!ret) { |
| 1383 | ret = -EINPROGRESS; | 1390 | ret = -EINPROGRESS; |
| @@ -2382,6 +2389,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, | |||
| 2382 | bool uses_dkp) | 2389 | bool uses_dkp) |
| 2383 | { | 2390 | { |
| 2384 | struct caam_drv_private *priv; | 2391 | struct caam_drv_private *priv; |
| 2392 | struct device *dev; | ||
| 2385 | 2393 | ||
| 2386 | /* | 2394 | /* |
| 2387 | * distribute tfms across job rings to ensure in-order | 2395 | * distribute tfms across job rings to ensure in-order |
| @@ -2393,16 +2401,17 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, | |||
| 2393 | return PTR_ERR(ctx->jrdev); | 2401 | return PTR_ERR(ctx->jrdev); |
| 2394 | } | 2402 | } |
| 2395 | 2403 | ||
| 2396 | priv = dev_get_drvdata(ctx->jrdev->parent); | 2404 | dev = ctx->jrdev->parent; |
| 2405 | priv = dev_get_drvdata(dev); | ||
| 2397 | if (priv->era >= 6 && uses_dkp) | 2406 | if (priv->era >= 6 && uses_dkp) |
| 2398 | ctx->dir = DMA_BIDIRECTIONAL; | 2407 | ctx->dir = DMA_BIDIRECTIONAL; |
| 2399 | else | 2408 | else |
| 2400 | ctx->dir = DMA_TO_DEVICE; | 2409 | ctx->dir = DMA_TO_DEVICE; |
| 2401 | 2410 | ||
| 2402 | ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), | 2411 | ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), |
| 2403 | ctx->dir); | 2412 | ctx->dir); |
| 2404 | if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { | 2413 | if (dma_mapping_error(dev, ctx->key_dma)) { |
| 2405 | dev_err(ctx->jrdev, "unable to map key\n"); | 2414 | dev_err(dev, "unable to map key\n"); |
| 2406 | caam_jr_free(ctx->jrdev); | 2415 | caam_jr_free(ctx->jrdev); |
| 2407 | return -ENOMEM; | 2416 | return -ENOMEM; |
| 2408 | } | 2417 | } |
| @@ -2411,7 +2420,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, | |||
| 2411 | ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; | 2420 | ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; |
| 2412 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; | 2421 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; |
| 2413 | 2422 | ||
| 2414 | ctx->qidev = priv->qidev; | 2423 | ctx->qidev = dev; |
| 2415 | 2424 | ||
| 2416 | spin_lock_init(&ctx->lock); | 2425 | spin_lock_init(&ctx->lock); |
| 2417 | ctx->drv_ctx[ENCRYPT] = NULL; | 2426 | ctx->drv_ctx[ENCRYPT] = NULL; |
| @@ -2445,7 +2454,8 @@ static void caam_exit_common(struct caam_ctx *ctx) | |||
| 2445 | caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); | 2454 | caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); |
| 2446 | caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); | 2455 | caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); |
| 2447 | 2456 | ||
| 2448 | dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); | 2457 | dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), |
| 2458 | ctx->dir); | ||
| 2449 | 2459 | ||
| 2450 | caam_jr_free(ctx->jrdev); | 2460 | caam_jr_free(ctx->jrdev); |
| 2451 | } | 2461 | } |
| @@ -2460,7 +2470,7 @@ static void caam_aead_exit(struct crypto_aead *tfm) | |||
| 2460 | caam_exit_common(crypto_aead_ctx(tfm)); | 2470 | caam_exit_common(crypto_aead_ctx(tfm)); |
| 2461 | } | 2471 | } |
| 2462 | 2472 | ||
| 2463 | static void __exit caam_qi_algapi_exit(void) | 2473 | void caam_qi_algapi_exit(void) |
| 2464 | { | 2474 | { |
| 2465 | int i; | 2475 | int i; |
| 2466 | 2476 | ||
| @@ -2505,45 +2515,17 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) | |||
| 2505 | alg->exit = caam_aead_exit; | 2515 | alg->exit = caam_aead_exit; |
| 2506 | } | 2516 | } |
| 2507 | 2517 | ||
| 2508 | static int __init caam_qi_algapi_init(void) | 2518 | int caam_qi_algapi_init(struct device *ctrldev) |
| 2509 | { | 2519 | { |
| 2510 | struct device_node *dev_node; | 2520 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
| 2511 | struct platform_device *pdev; | ||
| 2512 | struct device *ctrldev; | ||
| 2513 | struct caam_drv_private *priv; | ||
| 2514 | int i = 0, err = 0; | 2521 | int i = 0, err = 0; |
| 2515 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; | 2522 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; |
| 2516 | unsigned int md_limit = SHA512_DIGEST_SIZE; | 2523 | unsigned int md_limit = SHA512_DIGEST_SIZE; |
| 2517 | bool registered = false; | 2524 | bool registered = false; |
| 2518 | 2525 | ||
| 2519 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
| 2520 | if (!dev_node) { | ||
| 2521 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
| 2522 | if (!dev_node) | ||
| 2523 | return -ENODEV; | ||
| 2524 | } | ||
| 2525 | |||
| 2526 | pdev = of_find_device_by_node(dev_node); | ||
| 2527 | of_node_put(dev_node); | ||
| 2528 | if (!pdev) | ||
| 2529 | return -ENODEV; | ||
| 2530 | |||
| 2531 | ctrldev = &pdev->dev; | ||
| 2532 | priv = dev_get_drvdata(ctrldev); | ||
| 2533 | |||
| 2534 | /* | ||
| 2535 | * If priv is NULL, it's probably because the caam driver wasn't | ||
| 2536 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
| 2537 | */ | ||
| 2538 | if (!priv || !priv->qi_present) { | ||
| 2539 | err = -ENODEV; | ||
| 2540 | goto out_put_dev; | ||
| 2541 | } | ||
| 2542 | |||
| 2543 | if (caam_dpaa2) { | 2526 | if (caam_dpaa2) { |
| 2544 | dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); | 2527 | dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); |
| 2545 | err = -ENODEV; | 2528 | return -ENODEV; |
| 2546 | goto out_put_dev; | ||
| 2547 | } | 2529 | } |
| 2548 | 2530 | ||
| 2549 | /* | 2531 | /* |
| @@ -2598,7 +2580,7 @@ static int __init caam_qi_algapi_init(void) | |||
| 2598 | 2580 | ||
| 2599 | err = crypto_register_skcipher(&t_alg->skcipher); | 2581 | err = crypto_register_skcipher(&t_alg->skcipher); |
| 2600 | if (err) { | 2582 | if (err) { |
| 2601 | dev_warn(priv->qidev, "%s alg registration failed\n", | 2583 | dev_warn(ctrldev, "%s alg registration failed\n", |
| 2602 | t_alg->skcipher.base.cra_driver_name); | 2584 | t_alg->skcipher.base.cra_driver_name); |
| 2603 | continue; | 2585 | continue; |
| 2604 | } | 2586 | } |
| @@ -2654,16 +2636,7 @@ static int __init caam_qi_algapi_init(void) | |||
| 2654 | } | 2636 | } |
| 2655 | 2637 | ||
| 2656 | if (registered) | 2638 | if (registered) |
| 2657 | dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); | 2639 | dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); |
| 2658 | 2640 | ||
| 2659 | out_put_dev: | ||
| 2660 | put_device(ctrldev); | ||
| 2661 | return err; | 2641 | return err; |
| 2662 | } | 2642 | } |
| 2663 | |||
| 2664 | module_init(caam_qi_algapi_init); | ||
| 2665 | module_exit(caam_qi_algapi_exit); | ||
| 2666 | |||
| 2667 | MODULE_LICENSE("GPL"); | ||
| 2668 | MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend"); | ||
| 2669 | MODULE_AUTHOR("Freescale Semiconductor"); | ||
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 2b2980a8a9b9..06bf32c32cbd 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) | 1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
| 2 | /* | 2 | /* |
| 3 | * Copyright 2015-2016 Freescale Semiconductor Inc. | 3 | * Copyright 2015-2016 Freescale Semiconductor Inc. |
| 4 | * Copyright 2017-2018 NXP | 4 | * Copyright 2017-2019 NXP |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include "compat.h" | 7 | #include "compat.h" |
| @@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq) | |||
| 140 | static void caam_unmap(struct device *dev, struct scatterlist *src, | 140 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
| 141 | struct scatterlist *dst, int src_nents, | 141 | struct scatterlist *dst, int src_nents, |
| 142 | int dst_nents, dma_addr_t iv_dma, int ivsize, | 142 | int dst_nents, dma_addr_t iv_dma, int ivsize, |
| 143 | dma_addr_t qm_sg_dma, int qm_sg_bytes) | 143 | enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, |
| 144 | int qm_sg_bytes) | ||
| 144 | { | 145 | { |
| 145 | if (dst != src) { | 146 | if (dst != src) { |
| 146 | if (src_nents) | 147 | if (src_nents) |
| @@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, | |||
| 152 | } | 153 | } |
| 153 | 154 | ||
| 154 | if (iv_dma) | 155 | if (iv_dma) |
| 155 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 156 | dma_unmap_single(dev, iv_dma, ivsize, iv_dir); |
| 156 | 157 | ||
| 157 | if (qm_sg_bytes) | 158 | if (qm_sg_bytes) |
| 158 | dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); | 159 | dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); |
| @@ -371,6 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 371 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 372 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
| 372 | GFP_KERNEL : GFP_ATOMIC; | 373 | GFP_KERNEL : GFP_ATOMIC; |
| 373 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 374 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
| 375 | int src_len, dst_len = 0; | ||
| 374 | struct aead_edesc *edesc; | 376 | struct aead_edesc *edesc; |
| 375 | dma_addr_t qm_sg_dma, iv_dma = 0; | 377 | dma_addr_t qm_sg_dma, iv_dma = 0; |
| 376 | int ivsize = 0; | 378 | int ivsize = 0; |
| @@ -387,23 +389,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 387 | } | 389 | } |
| 388 | 390 | ||
| 389 | if (unlikely(req->dst != req->src)) { | 391 | if (unlikely(req->dst != req->src)) { |
| 390 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 392 | src_len = req->assoclen + req->cryptlen; |
| 391 | req->cryptlen); | 393 | dst_len = src_len + (encrypt ? authsize : (-authsize)); |
| 394 | |||
| 395 | src_nents = sg_nents_for_len(req->src, src_len); | ||
| 392 | if (unlikely(src_nents < 0)) { | 396 | if (unlikely(src_nents < 0)) { |
| 393 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n", | 397 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n", |
| 394 | req->assoclen + req->cryptlen); | 398 | src_len); |
| 395 | qi_cache_free(edesc); | 399 | qi_cache_free(edesc); |
| 396 | return ERR_PTR(src_nents); | 400 | return ERR_PTR(src_nents); |
| 397 | } | 401 | } |
| 398 | 402 | ||
| 399 | dst_nents = sg_nents_for_len(req->dst, req->assoclen + | 403 | dst_nents = sg_nents_for_len(req->dst, dst_len); |
| 400 | req->cryptlen + | ||
| 401 | (encrypt ? authsize : | ||
| 402 | (-authsize))); | ||
| 403 | if (unlikely(dst_nents < 0)) { | 404 | if (unlikely(dst_nents < 0)) { |
| 404 | dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", | 405 | dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", |
| 405 | req->assoclen + req->cryptlen + | 406 | dst_len); |
| 406 | (encrypt ? authsize : (-authsize))); | ||
| 407 | qi_cache_free(edesc); | 407 | qi_cache_free(edesc); |
| 408 | return ERR_PTR(dst_nents); | 408 | return ERR_PTR(dst_nents); |
| 409 | } | 409 | } |
| @@ -434,13 +434,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 434 | mapped_dst_nents = 0; | 434 | mapped_dst_nents = 0; |
| 435 | } | 435 | } |
| 436 | } else { | 436 | } else { |
| 437 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 437 | src_len = req->assoclen + req->cryptlen + |
| 438 | req->cryptlen + | 438 | (encrypt ? authsize : 0); |
| 439 | (encrypt ? authsize : 0)); | 439 | |
| 440 | src_nents = sg_nents_for_len(req->src, src_len); | ||
| 440 | if (unlikely(src_nents < 0)) { | 441 | if (unlikely(src_nents < 0)) { |
| 441 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n", | 442 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n", |
| 442 | req->assoclen + req->cryptlen + | 443 | src_len); |
| 443 | (encrypt ? authsize : 0)); | ||
| 444 | qi_cache_free(edesc); | 444 | qi_cache_free(edesc); |
| 445 | return ERR_PTR(src_nents); | 445 | return ERR_PTR(src_nents); |
| 446 | } | 446 | } |
| @@ -460,9 +460,25 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 460 | /* | 460 | /* |
| 461 | * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. | 461 | * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. |
| 462 | * Input is not contiguous. | 462 | * Input is not contiguous. |
| 463 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
| 464 | * the end of the table by allocating more S/G entries. Logic: | ||
| 465 | * if (src != dst && output S/G) | ||
| 466 | * pad output S/G, if needed | ||
| 467 | * else if (src == dst && S/G) | ||
| 468 | * overlapping S/Gs; pad one of them | ||
| 469 | * else if (input S/G) ... | ||
| 470 | * pad input S/G, if needed | ||
| 463 | */ | 471 | */ |
| 464 | qm_sg_nents = 1 + !!ivsize + mapped_src_nents + | 472 | qm_sg_nents = 1 + !!ivsize + mapped_src_nents; |
| 465 | (mapped_dst_nents > 1 ? mapped_dst_nents : 0); | 473 | if (mapped_dst_nents > 1) |
| 474 | qm_sg_nents += pad_sg_nents(mapped_dst_nents); | ||
| 475 | else if ((req->src == req->dst) && (mapped_src_nents > 1)) | ||
| 476 | qm_sg_nents = max(pad_sg_nents(qm_sg_nents), | ||
| 477 | 1 + !!ivsize + | ||
| 478 | pad_sg_nents(mapped_src_nents)); | ||
| 479 | else | ||
| 480 | qm_sg_nents = pad_sg_nents(qm_sg_nents); | ||
| 481 | |||
| 466 | sg_table = &edesc->sgt[0]; | 482 | sg_table = &edesc->sgt[0]; |
| 467 | qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); | 483 | qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); |
| 468 | if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > | 484 | if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > |
| @@ -470,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 470 | dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", | 486 | dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", |
| 471 | qm_sg_nents, ivsize); | 487 | qm_sg_nents, ivsize); |
| 472 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, | 488 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
| 473 | 0, 0, 0); | 489 | 0, DMA_NONE, 0, 0); |
| 474 | qi_cache_free(edesc); | 490 | qi_cache_free(edesc); |
| 475 | return ERR_PTR(-ENOMEM); | 491 | return ERR_PTR(-ENOMEM); |
| 476 | } | 492 | } |
| @@ -485,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 485 | if (dma_mapping_error(dev, iv_dma)) { | 501 | if (dma_mapping_error(dev, iv_dma)) { |
| 486 | dev_err(dev, "unable to map IV\n"); | 502 | dev_err(dev, "unable to map IV\n"); |
| 487 | caam_unmap(dev, req->src, req->dst, src_nents, | 503 | caam_unmap(dev, req->src, req->dst, src_nents, |
| 488 | dst_nents, 0, 0, 0, 0); | 504 | dst_nents, 0, 0, DMA_NONE, 0, 0); |
| 489 | qi_cache_free(edesc); | 505 | qi_cache_free(edesc); |
| 490 | return ERR_PTR(-ENOMEM); | 506 | return ERR_PTR(-ENOMEM); |
| 491 | } | 507 | } |
| @@ -509,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 509 | if (dma_mapping_error(dev, edesc->assoclen_dma)) { | 525 | if (dma_mapping_error(dev, edesc->assoclen_dma)) { |
| 510 | dev_err(dev, "unable to map assoclen\n"); | 526 | dev_err(dev, "unable to map assoclen\n"); |
| 511 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, | 527 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, |
| 512 | iv_dma, ivsize, 0, 0); | 528 | iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); |
| 513 | qi_cache_free(edesc); | 529 | qi_cache_free(edesc); |
| 514 | return ERR_PTR(-ENOMEM); | 530 | return ERR_PTR(-ENOMEM); |
| 515 | } | 531 | } |
| @@ -520,19 +536,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 520 | dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); | 536 | dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); |
| 521 | qm_sg_index++; | 537 | qm_sg_index++; |
| 522 | } | 538 | } |
| 523 | sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); | 539 | sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); |
| 524 | qm_sg_index += mapped_src_nents; | 540 | qm_sg_index += mapped_src_nents; |
| 525 | 541 | ||
| 526 | if (mapped_dst_nents > 1) | 542 | if (mapped_dst_nents > 1) |
| 527 | sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + | 543 | sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); |
| 528 | qm_sg_index, 0); | ||
| 529 | 544 | ||
| 530 | qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); | 545 | qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); |
| 531 | if (dma_mapping_error(dev, qm_sg_dma)) { | 546 | if (dma_mapping_error(dev, qm_sg_dma)) { |
| 532 | dev_err(dev, "unable to map S/G table\n"); | 547 | dev_err(dev, "unable to map S/G table\n"); |
| 533 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); | 548 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
| 534 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, | 549 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, |
| 535 | iv_dma, ivsize, 0, 0); | 550 | iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); |
| 536 | qi_cache_free(edesc); | 551 | qi_cache_free(edesc); |
| 537 | return ERR_PTR(-ENOMEM); | 552 | return ERR_PTR(-ENOMEM); |
| 538 | } | 553 | } |
| @@ -559,6 +574,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 559 | dpaa2_fl_set_addr(out_fle, qm_sg_dma + | 574 | dpaa2_fl_set_addr(out_fle, qm_sg_dma + |
| 560 | (1 + !!ivsize) * sizeof(*sg_table)); | 575 | (1 + !!ivsize) * sizeof(*sg_table)); |
| 561 | } | 576 | } |
| 577 | } else if (!mapped_dst_nents) { | ||
| 578 | /* | ||
| 579 | * crypto engine requires the output entry to be present when | ||
| 580 | * "frame list" FD is used. | ||
| 581 | * Since engine does not support FMT=2'b11 (unused entry type), | ||
| 582 | * leaving out_fle zeroized is the best option. | ||
| 583 | */ | ||
| 584 | goto skip_out_fle; | ||
| 562 | } else if (mapped_dst_nents == 1) { | 585 | } else if (mapped_dst_nents == 1) { |
| 563 | dpaa2_fl_set_format(out_fle, dpaa2_fl_single); | 586 | dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
| 564 | dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); | 587 | dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); |
| @@ -570,6 +593,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 570 | 593 | ||
| 571 | dpaa2_fl_set_len(out_fle, out_len); | 594 | dpaa2_fl_set_len(out_fle, out_len); |
| 572 | 595 | ||
| 596 | skip_out_fle: | ||
| 573 | return edesc; | 597 | return edesc; |
| 574 | } | 598 | } |
| 575 | 599 | ||
| @@ -1077,14 +1101,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) | |||
| 1077 | qm_sg_ents = 1 + mapped_src_nents; | 1101 | qm_sg_ents = 1 + mapped_src_nents; |
| 1078 | dst_sg_idx = qm_sg_ents; | 1102 | dst_sg_idx = qm_sg_ents; |
| 1079 | 1103 | ||
| 1080 | qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; | 1104 | /* |
| 1105 | * Input, output HW S/G tables: [IV, src][dst, IV] | ||
| 1106 | * IV entries point to the same buffer | ||
| 1107 | * If src == dst, S/G entries are reused (S/G tables overlap) | ||
| 1108 | * | ||
| 1109 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
| 1110 | * the end of the table by allocating more S/G entries. | ||
| 1111 | */ | ||
| 1112 | if (req->src != req->dst) | ||
| 1113 | qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); | ||
| 1114 | else | ||
| 1115 | qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); | ||
| 1116 | |||
| 1081 | qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); | 1117 | qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); |
| 1082 | if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + | 1118 | if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + |
| 1083 | ivsize > CAAM_QI_MEMCACHE_SIZE)) { | 1119 | ivsize > CAAM_QI_MEMCACHE_SIZE)) { |
| 1084 | dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", | 1120 | dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", |
| 1085 | qm_sg_ents, ivsize); | 1121 | qm_sg_ents, ivsize); |
| 1086 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, | 1122 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
| 1087 | 0, 0, 0); | 1123 | 0, DMA_NONE, 0, 0); |
| 1088 | return ERR_PTR(-ENOMEM); | 1124 | return ERR_PTR(-ENOMEM); |
| 1089 | } | 1125 | } |
| 1090 | 1126 | ||
| @@ -1093,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) | |||
| 1093 | if (unlikely(!edesc)) { | 1129 | if (unlikely(!edesc)) { |
| 1094 | dev_err(dev, "could not allocate extended descriptor\n"); | 1130 | dev_err(dev, "could not allocate extended descriptor\n"); |
| 1095 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, | 1131 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
| 1096 | 0, 0, 0); | 1132 | 0, DMA_NONE, 0, 0); |
| 1097 | return ERR_PTR(-ENOMEM); | 1133 | return ERR_PTR(-ENOMEM); |
| 1098 | } | 1134 | } |
| 1099 | 1135 | ||
| @@ -1102,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) | |||
| 1102 | iv = (u8 *)(sg_table + qm_sg_ents); | 1138 | iv = (u8 *)(sg_table + qm_sg_ents); |
| 1103 | memcpy(iv, req->iv, ivsize); | 1139 | memcpy(iv, req->iv, ivsize); |
| 1104 | 1140 | ||
| 1105 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | 1141 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL); |
| 1106 | if (dma_mapping_error(dev, iv_dma)) { | 1142 | if (dma_mapping_error(dev, iv_dma)) { |
| 1107 | dev_err(dev, "unable to map IV\n"); | 1143 | dev_err(dev, "unable to map IV\n"); |
| 1108 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, | 1144 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
| 1109 | 0, 0, 0); | 1145 | 0, DMA_NONE, 0, 0); |
| 1110 | qi_cache_free(edesc); | 1146 | qi_cache_free(edesc); |
| 1111 | return ERR_PTR(-ENOMEM); | 1147 | return ERR_PTR(-ENOMEM); |
| 1112 | } | 1148 | } |
| @@ -1117,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) | |||
| 1117 | edesc->qm_sg_bytes = qm_sg_bytes; | 1153 | edesc->qm_sg_bytes = qm_sg_bytes; |
| 1118 | 1154 | ||
| 1119 | dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); | 1155 | dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); |
| 1120 | sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); | 1156 | sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); |
| 1121 | 1157 | ||
| 1122 | if (mapped_dst_nents > 1) | 1158 | if (req->src != req->dst) |
| 1123 | sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + | 1159 | sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); |
| 1124 | dst_sg_idx, 0); | 1160 | |
| 1161 | dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, | ||
| 1162 | ivsize, 0); | ||
| 1125 | 1163 | ||
| 1126 | edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, | 1164 | edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, |
| 1127 | DMA_TO_DEVICE); | 1165 | DMA_TO_DEVICE); |
| 1128 | if (dma_mapping_error(dev, edesc->qm_sg_dma)) { | 1166 | if (dma_mapping_error(dev, edesc->qm_sg_dma)) { |
| 1129 | dev_err(dev, "unable to map S/G table\n"); | 1167 | dev_err(dev, "unable to map S/G table\n"); |
| 1130 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, | 1168 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, |
| 1131 | iv_dma, ivsize, 0, 0); | 1169 | iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); |
| 1132 | qi_cache_free(edesc); | 1170 | qi_cache_free(edesc); |
| 1133 | return ERR_PTR(-ENOMEM); | 1171 | return ERR_PTR(-ENOMEM); |
| 1134 | } | 1172 | } |
| @@ -1136,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) | |||
| 1136 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); | 1174 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
| 1137 | dpaa2_fl_set_final(in_fle, true); | 1175 | dpaa2_fl_set_final(in_fle, true); |
| 1138 | dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); | 1176 | dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); |
| 1139 | dpaa2_fl_set_len(out_fle, req->cryptlen); | 1177 | dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize); |
| 1140 | 1178 | ||
| 1141 | dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); | 1179 | dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
| 1142 | dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); | 1180 | dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); |
| 1143 | 1181 | ||
| 1144 | if (req->src == req->dst) { | 1182 | dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); |
| 1145 | dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); | 1183 | |
| 1184 | if (req->src == req->dst) | ||
| 1146 | dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + | 1185 | dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + |
| 1147 | sizeof(*sg_table)); | 1186 | sizeof(*sg_table)); |
| 1148 | } else if (mapped_dst_nents > 1) { | 1187 | else |
| 1149 | dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); | ||
| 1150 | dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * | 1188 | dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * |
| 1151 | sizeof(*sg_table)); | 1189 | sizeof(*sg_table)); |
| 1152 | } else { | ||
| 1153 | dpaa2_fl_set_format(out_fle, dpaa2_fl_single); | ||
| 1154 | dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); | ||
| 1155 | } | ||
| 1156 | 1190 | ||
| 1157 | return edesc; | 1191 | return edesc; |
| 1158 | } | 1192 | } |
| @@ -1164,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc, | |||
| 1164 | int ivsize = crypto_aead_ivsize(aead); | 1198 | int ivsize = crypto_aead_ivsize(aead); |
| 1165 | 1199 | ||
| 1166 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, | 1200 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, |
| 1167 | edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); | 1201 | edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, |
| 1202 | edesc->qm_sg_bytes); | ||
| 1168 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); | 1203 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
| 1169 | } | 1204 | } |
| 1170 | 1205 | ||
| @@ -1175,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, | |||
| 1175 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1210 | int ivsize = crypto_skcipher_ivsize(skcipher); |
| 1176 | 1211 | ||
| 1177 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, | 1212 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, |
| 1178 | edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); | 1213 | edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, |
| 1214 | edesc->qm_sg_bytes); | ||
| 1179 | } | 1215 | } |
| 1180 | 1216 | ||
| 1181 | static void aead_encrypt_done(void *cbk_ctx, u32 status) | 1217 | static void aead_encrypt_done(void *cbk_ctx, u32 status) |
| @@ -1324,7 +1360,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) | |||
| 1324 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", | 1360 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", |
| 1325 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 1361 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
| 1326 | edesc->src_nents > 1 ? 100 : ivsize, 1); | 1362 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
| 1327 | caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ", | 1363 | caam_dump_sg("dst @" __stringify(__LINE__)": ", |
| 1328 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 1364 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
| 1329 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | 1365 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
| 1330 | 1366 | ||
| @@ -1332,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) | |||
| 1332 | 1368 | ||
| 1333 | /* | 1369 | /* |
| 1334 | * The crypto API expects us to set the IV (req->iv) to the last | 1370 | * The crypto API expects us to set the IV (req->iv) to the last |
| 1335 | * ciphertext block. This is used e.g. by the CTS mode. | 1371 | * ciphertext block (CBC mode) or last counter (CTR mode). |
| 1372 | * This is used e.g. by the CTS mode. | ||
| 1336 | */ | 1373 | */ |
| 1337 | scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, | 1374 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); |
| 1338 | ivsize, 0); | ||
| 1339 | 1375 | ||
| 1340 | qi_cache_free(edesc); | 1376 | qi_cache_free(edesc); |
| 1341 | skcipher_request_complete(req, ecode); | 1377 | skcipher_request_complete(req, ecode); |
| @@ -1362,11 +1398,19 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) | |||
| 1362 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", | 1398 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", |
| 1363 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 1399 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
| 1364 | edesc->src_nents > 1 ? 100 : ivsize, 1); | 1400 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
| 1365 | caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ", | 1401 | caam_dump_sg("dst @" __stringify(__LINE__)": ", |
| 1366 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 1402 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
| 1367 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | 1403 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
| 1368 | 1404 | ||
| 1369 | skcipher_unmap(ctx->dev, edesc, req); | 1405 | skcipher_unmap(ctx->dev, edesc, req); |
| 1406 | |||
| 1407 | /* | ||
| 1408 | * The crypto API expects us to set the IV (req->iv) to the last | ||
| 1409 | * ciphertext block (CBC mode) or last counter (CTR mode). | ||
| 1410 | * This is used e.g. by the CTS mode. | ||
| 1411 | */ | ||
| 1412 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); | ||
| 1413 | |||
| 1370 | qi_cache_free(edesc); | 1414 | qi_cache_free(edesc); |
| 1371 | skcipher_request_complete(req, ecode); | 1415 | skcipher_request_complete(req, ecode); |
| 1372 | } | 1416 | } |
| @@ -1405,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req) | |||
| 1405 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1449 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 1406 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 1450 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
| 1407 | struct caam_request *caam_req = skcipher_request_ctx(req); | 1451 | struct caam_request *caam_req = skcipher_request_ctx(req); |
| 1408 | int ivsize = crypto_skcipher_ivsize(skcipher); | ||
| 1409 | int ret; | 1452 | int ret; |
| 1410 | 1453 | ||
| 1411 | /* allocate extended descriptor */ | 1454 | /* allocate extended descriptor */ |
| @@ -1413,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req) | |||
| 1413 | if (IS_ERR(edesc)) | 1456 | if (IS_ERR(edesc)) |
| 1414 | return PTR_ERR(edesc); | 1457 | return PTR_ERR(edesc); |
| 1415 | 1458 | ||
| 1416 | /* | ||
| 1417 | * The crypto API expects us to set the IV (req->iv) to the last | ||
| 1418 | * ciphertext block. | ||
| 1419 | */ | ||
| 1420 | scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize, | ||
| 1421 | ivsize, 0); | ||
| 1422 | |||
| 1423 | caam_req->flc = &ctx->flc[DECRYPT]; | 1459 | caam_req->flc = &ctx->flc[DECRYPT]; |
| 1424 | caam_req->flc_dma = ctx->flc_dma[DECRYPT]; | 1460 | caam_req->flc_dma = ctx->flc_dma[DECRYPT]; |
| 1425 | caam_req->cbk = skcipher_decrypt_done; | 1461 | caam_req->cbk = skcipher_decrypt_done; |
| @@ -3380,9 +3416,9 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 3380 | 3416 | ||
| 3381 | if (to_hash) { | 3417 | if (to_hash) { |
| 3382 | struct dpaa2_sg_entry *sg_table; | 3418 | struct dpaa2_sg_entry *sg_table; |
| 3419 | int src_len = req->nbytes - *next_buflen; | ||
| 3383 | 3420 | ||
| 3384 | src_nents = sg_nents_for_len(req->src, | 3421 | src_nents = sg_nents_for_len(req->src, src_len); |
| 3385 | req->nbytes - (*next_buflen)); | ||
| 3386 | if (src_nents < 0) { | 3422 | if (src_nents < 0) { |
| 3387 | dev_err(ctx->dev, "Invalid number of src SG.\n"); | 3423 | dev_err(ctx->dev, "Invalid number of src SG.\n"); |
| 3388 | return src_nents; | 3424 | return src_nents; |
| @@ -3409,7 +3445,7 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 3409 | 3445 | ||
| 3410 | edesc->src_nents = src_nents; | 3446 | edesc->src_nents = src_nents; |
| 3411 | qm_sg_src_index = 1 + (*buflen ? 1 : 0); | 3447 | qm_sg_src_index = 1 + (*buflen ? 1 : 0); |
| 3412 | qm_sg_bytes = (qm_sg_src_index + mapped_nents) * | 3448 | qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * |
| 3413 | sizeof(*sg_table); | 3449 | sizeof(*sg_table); |
| 3414 | sg_table = &edesc->sgt[0]; | 3450 | sg_table = &edesc->sgt[0]; |
| 3415 | 3451 | ||
| @@ -3423,7 +3459,7 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 3423 | goto unmap_ctx; | 3459 | goto unmap_ctx; |
| 3424 | 3460 | ||
| 3425 | if (mapped_nents) { | 3461 | if (mapped_nents) { |
| 3426 | sg_to_qm_sg_last(req->src, mapped_nents, | 3462 | sg_to_qm_sg_last(req->src, src_len, |
| 3427 | sg_table + qm_sg_src_index, 0); | 3463 | sg_table + qm_sg_src_index, 0); |
| 3428 | if (*next_buflen) | 3464 | if (*next_buflen) |
| 3429 | scatterwalk_map_and_copy(next_buf, req->src, | 3465 | scatterwalk_map_and_copy(next_buf, req->src, |
| @@ -3494,7 +3530,7 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 3494 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 3530 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
| 3495 | GFP_KERNEL : GFP_ATOMIC; | 3531 | GFP_KERNEL : GFP_ATOMIC; |
| 3496 | int buflen = *current_buflen(state); | 3532 | int buflen = *current_buflen(state); |
| 3497 | int qm_sg_bytes, qm_sg_src_index; | 3533 | int qm_sg_bytes; |
| 3498 | int digestsize = crypto_ahash_digestsize(ahash); | 3534 | int digestsize = crypto_ahash_digestsize(ahash); |
| 3499 | struct ahash_edesc *edesc; | 3535 | struct ahash_edesc *edesc; |
| 3500 | struct dpaa2_sg_entry *sg_table; | 3536 | struct dpaa2_sg_entry *sg_table; |
| @@ -3505,8 +3541,7 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 3505 | if (!edesc) | 3541 | if (!edesc) |
| 3506 | return -ENOMEM; | 3542 | return -ENOMEM; |
| 3507 | 3543 | ||
| 3508 | qm_sg_src_index = 1 + (buflen ? 1 : 0); | 3544 | qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table); |
| 3509 | qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table); | ||
| 3510 | sg_table = &edesc->sgt[0]; | 3545 | sg_table = &edesc->sgt[0]; |
| 3511 | 3546 | ||
| 3512 | ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, | 3547 | ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, |
| @@ -3518,7 +3553,7 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 3518 | if (ret) | 3553 | if (ret) |
| 3519 | goto unmap_ctx; | 3554 | goto unmap_ctx; |
| 3520 | 3555 | ||
| 3521 | dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true); | 3556 | dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true); |
| 3522 | 3557 | ||
| 3523 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, | 3558 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
| 3524 | DMA_TO_DEVICE); | 3559 | DMA_TO_DEVICE); |
| @@ -3599,7 +3634,8 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
| 3599 | 3634 | ||
| 3600 | edesc->src_nents = src_nents; | 3635 | edesc->src_nents = src_nents; |
| 3601 | qm_sg_src_index = 1 + (buflen ? 1 : 0); | 3636 | qm_sg_src_index = 1 + (buflen ? 1 : 0); |
| 3602 | qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table); | 3637 | qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * |
| 3638 | sizeof(*sg_table); | ||
| 3603 | sg_table = &edesc->sgt[0]; | 3639 | sg_table = &edesc->sgt[0]; |
| 3604 | 3640 | ||
| 3605 | ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, | 3641 | ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, |
| @@ -3611,7 +3647,7 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
| 3611 | if (ret) | 3647 | if (ret) |
| 3612 | goto unmap_ctx; | 3648 | goto unmap_ctx; |
| 3613 | 3649 | ||
| 3614 | sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0); | 3650 | sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0); |
| 3615 | 3651 | ||
| 3616 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, | 3652 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
| 3617 | DMA_TO_DEVICE); | 3653 | DMA_TO_DEVICE); |
| @@ -3696,8 +3732,8 @@ static int ahash_digest(struct ahash_request *req) | |||
| 3696 | int qm_sg_bytes; | 3732 | int qm_sg_bytes; |
| 3697 | struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; | 3733 | struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; |
| 3698 | 3734 | ||
| 3699 | qm_sg_bytes = mapped_nents * sizeof(*sg_table); | 3735 | qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table); |
| 3700 | sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); | 3736 | sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0); |
| 3701 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, | 3737 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
| 3702 | qm_sg_bytes, DMA_TO_DEVICE); | 3738 | qm_sg_bytes, DMA_TO_DEVICE); |
| 3703 | if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { | 3739 | if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { |
| @@ -3840,9 +3876,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 3840 | 3876 | ||
| 3841 | if (to_hash) { | 3877 | if (to_hash) { |
| 3842 | struct dpaa2_sg_entry *sg_table; | 3878 | struct dpaa2_sg_entry *sg_table; |
| 3879 | int src_len = req->nbytes - *next_buflen; | ||
| 3843 | 3880 | ||
| 3844 | src_nents = sg_nents_for_len(req->src, | 3881 | src_nents = sg_nents_for_len(req->src, src_len); |
| 3845 | req->nbytes - *next_buflen); | ||
| 3846 | if (src_nents < 0) { | 3882 | if (src_nents < 0) { |
| 3847 | dev_err(ctx->dev, "Invalid number of src SG.\n"); | 3883 | dev_err(ctx->dev, "Invalid number of src SG.\n"); |
| 3848 | return src_nents; | 3884 | return src_nents; |
| @@ -3868,14 +3904,15 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 3868 | } | 3904 | } |
| 3869 | 3905 | ||
| 3870 | edesc->src_nents = src_nents; | 3906 | edesc->src_nents = src_nents; |
| 3871 | qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table); | 3907 | qm_sg_bytes = pad_sg_nents(1 + mapped_nents) * |
| 3908 | sizeof(*sg_table); | ||
| 3872 | sg_table = &edesc->sgt[0]; | 3909 | sg_table = &edesc->sgt[0]; |
| 3873 | 3910 | ||
| 3874 | ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); | 3911 | ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); |
| 3875 | if (ret) | 3912 | if (ret) |
| 3876 | goto unmap_ctx; | 3913 | goto unmap_ctx; |
| 3877 | 3914 | ||
| 3878 | sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); | 3915 | sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); |
| 3879 | 3916 | ||
| 3880 | if (*next_buflen) | 3917 | if (*next_buflen) |
| 3881 | scatterwalk_map_and_copy(next_buf, req->src, | 3918 | scatterwalk_map_and_copy(next_buf, req->src, |
| @@ -3987,14 +4024,14 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
| 3987 | } | 4024 | } |
| 3988 | 4025 | ||
| 3989 | edesc->src_nents = src_nents; | 4026 | edesc->src_nents = src_nents; |
| 3990 | qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table); | 4027 | qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table); |
| 3991 | sg_table = &edesc->sgt[0]; | 4028 | sg_table = &edesc->sgt[0]; |
| 3992 | 4029 | ||
| 3993 | ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); | 4030 | ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); |
| 3994 | if (ret) | 4031 | if (ret) |
| 3995 | goto unmap; | 4032 | goto unmap; |
| 3996 | 4033 | ||
| 3997 | sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); | 4034 | sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0); |
| 3998 | 4035 | ||
| 3999 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, | 4036 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
| 4000 | DMA_TO_DEVICE); | 4037 | DMA_TO_DEVICE); |
| @@ -4064,9 +4101,9 @@ static int ahash_update_first(struct ahash_request *req) | |||
| 4064 | 4101 | ||
| 4065 | if (to_hash) { | 4102 | if (to_hash) { |
| 4066 | struct dpaa2_sg_entry *sg_table; | 4103 | struct dpaa2_sg_entry *sg_table; |
| 4104 | int src_len = req->nbytes - *next_buflen; | ||
| 4067 | 4105 | ||
| 4068 | src_nents = sg_nents_for_len(req->src, | 4106 | src_nents = sg_nents_for_len(req->src, src_len); |
| 4069 | req->nbytes - (*next_buflen)); | ||
| 4070 | if (src_nents < 0) { | 4107 | if (src_nents < 0) { |
| 4071 | dev_err(ctx->dev, "Invalid number of src SG.\n"); | 4108 | dev_err(ctx->dev, "Invalid number of src SG.\n"); |
| 4072 | return src_nents; | 4109 | return src_nents; |
| @@ -4101,8 +4138,9 @@ static int ahash_update_first(struct ahash_request *req) | |||
| 4101 | if (mapped_nents > 1) { | 4138 | if (mapped_nents > 1) { |
| 4102 | int qm_sg_bytes; | 4139 | int qm_sg_bytes; |
| 4103 | 4140 | ||
| 4104 | sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); | 4141 | sg_to_qm_sg_last(req->src, src_len, sg_table, 0); |
| 4105 | qm_sg_bytes = mapped_nents * sizeof(*sg_table); | 4142 | qm_sg_bytes = pad_sg_nents(mapped_nents) * |
| 4143 | sizeof(*sg_table); | ||
| 4106 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, | 4144 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
| 4107 | qm_sg_bytes, | 4145 | qm_sg_bytes, |
| 4108 | DMA_TO_DEVICE); | 4146 | DMA_TO_DEVICE); |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 7205d9f4029e..e4ac5d591ad6 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -82,14 +82,6 @@ | |||
| 82 | #define HASH_MSG_LEN 8 | 82 | #define HASH_MSG_LEN 8 |
| 83 | #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) | 83 | #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) |
| 84 | 84 | ||
| 85 | #ifdef DEBUG | ||
| 86 | /* for print_hex_dumps with line references */ | ||
| 87 | #define debug(format, arg...) printk(format, arg) | ||
| 88 | #else | ||
| 89 | #define debug(format, arg...) | ||
| 90 | #endif | ||
| 91 | |||
| 92 | |||
| 93 | static struct list_head hash_list; | 85 | static struct list_head hash_list; |
| 94 | 86 | ||
| 95 | /* ahash per-session context */ | 87 | /* ahash per-session context */ |
| @@ -243,11 +235,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
| 243 | ctx->ctx_len, true, ctrlpriv->era); | 235 | ctx->ctx_len, true, ctrlpriv->era); |
| 244 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, | 236 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, |
| 245 | desc_bytes(desc), ctx->dir); | 237 | desc_bytes(desc), ctx->dir); |
| 246 | #ifdef DEBUG | 238 | |
| 247 | print_hex_dump(KERN_ERR, | 239 | print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ", |
| 248 | "ahash update shdesc@"__stringify(__LINE__)": ", | 240 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 249 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 241 | 1); |
| 250 | #endif | ||
| 251 | 242 | ||
| 252 | /* ahash_update_first shared descriptor */ | 243 | /* ahash_update_first shared descriptor */ |
| 253 | desc = ctx->sh_desc_update_first; | 244 | desc = ctx->sh_desc_update_first; |
| @@ -255,11 +246,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
| 255 | ctx->ctx_len, false, ctrlpriv->era); | 246 | ctx->ctx_len, false, ctrlpriv->era); |
| 256 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, | 247 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
| 257 | desc_bytes(desc), ctx->dir); | 248 | desc_bytes(desc), ctx->dir); |
| 258 | #ifdef DEBUG | 249 | print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__) |
| 259 | print_hex_dump(KERN_ERR, | 250 | ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, |
| 260 | "ahash update first shdesc@"__stringify(__LINE__)": ", | 251 | desc_bytes(desc), 1); |
| 261 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 262 | #endif | ||
| 263 | 252 | ||
| 264 | /* ahash_final shared descriptor */ | 253 | /* ahash_final shared descriptor */ |
| 265 | desc = ctx->sh_desc_fin; | 254 | desc = ctx->sh_desc_fin; |
| @@ -267,11 +256,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
| 267 | ctx->ctx_len, true, ctrlpriv->era); | 256 | ctx->ctx_len, true, ctrlpriv->era); |
| 268 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, | 257 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, |
| 269 | desc_bytes(desc), ctx->dir); | 258 | desc_bytes(desc), ctx->dir); |
| 270 | #ifdef DEBUG | 259 | |
| 271 | print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", | 260 | print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ", |
| 272 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 261 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
| 273 | desc_bytes(desc), 1); | 262 | desc_bytes(desc), 1); |
| 274 | #endif | ||
| 275 | 263 | ||
| 276 | /* ahash_digest shared descriptor */ | 264 | /* ahash_digest shared descriptor */ |
| 277 | desc = ctx->sh_desc_digest; | 265 | desc = ctx->sh_desc_digest; |
| @@ -279,12 +267,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
| 279 | ctx->ctx_len, false, ctrlpriv->era); | 267 | ctx->ctx_len, false, ctrlpriv->era); |
| 280 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, | 268 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, |
| 281 | desc_bytes(desc), ctx->dir); | 269 | desc_bytes(desc), ctx->dir); |
| 282 | #ifdef DEBUG | 270 | |
| 283 | print_hex_dump(KERN_ERR, | 271 | print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ", |
| 284 | "ahash digest shdesc@"__stringify(__LINE__)": ", | 272 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
| 285 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 273 | desc_bytes(desc), 1); |
| 286 | desc_bytes(desc), 1); | ||
| 287 | #endif | ||
| 288 | 274 | ||
| 289 | return 0; | 275 | return 0; |
| 290 | } | 276 | } |
| @@ -328,9 +314,9 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) | |||
| 328 | ctx->ctx_len, ctx->key_dma); | 314 | ctx->ctx_len, ctx->key_dma); |
| 329 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, | 315 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
| 330 | desc_bytes(desc), ctx->dir); | 316 | desc_bytes(desc), ctx->dir); |
| 331 | print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ", | 317 | print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) |
| 332 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | 318 | " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, |
| 333 | 1); | 319 | desc_bytes(desc), 1); |
| 334 | 320 | ||
| 335 | /* shared descriptor for ahash_digest */ | 321 | /* shared descriptor for ahash_digest */ |
| 336 | desc = ctx->sh_desc_digest; | 322 | desc = ctx->sh_desc_digest; |
| @@ -377,8 +363,8 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash) | |||
| 377 | ctx->ctx_len, 0); | 363 | ctx->ctx_len, 0); |
| 378 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, | 364 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
| 379 | desc_bytes(desc), ctx->dir); | 365 | desc_bytes(desc), ctx->dir); |
| 380 | print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ", | 366 | print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) |
| 381 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 367 | " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, |
| 382 | desc_bytes(desc), 1); | 368 | desc_bytes(desc), 1); |
| 383 | 369 | ||
| 384 | /* shared descriptor for ahash_digest */ | 370 | /* shared descriptor for ahash_digest */ |
| @@ -429,12 +415,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, | |||
| 429 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | | 415 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | |
| 430 | LDST_SRCDST_BYTE_CONTEXT); | 416 | LDST_SRCDST_BYTE_CONTEXT); |
| 431 | 417 | ||
| 432 | #ifdef DEBUG | 418 | print_hex_dump_debug("key_in@"__stringify(__LINE__)": ", |
| 433 | print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", | 419 | DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); |
| 434 | DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); | 420 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
| 435 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 421 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 436 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 422 | 1); |
| 437 | #endif | ||
| 438 | 423 | ||
| 439 | result.err = 0; | 424 | result.err = 0; |
| 440 | init_completion(&result.completion); | 425 | init_completion(&result.completion); |
| @@ -444,11 +429,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, | |||
| 444 | /* in progress */ | 429 | /* in progress */ |
| 445 | wait_for_completion(&result.completion); | 430 | wait_for_completion(&result.completion); |
| 446 | ret = result.err; | 431 | ret = result.err; |
| 447 | #ifdef DEBUG | 432 | |
| 448 | print_hex_dump(KERN_ERR, | 433 | print_hex_dump_debug("digested key@"__stringify(__LINE__)": ", |
| 449 | "digested key@"__stringify(__LINE__)": ", | 434 | DUMP_PREFIX_ADDRESS, 16, 4, key, |
| 450 | DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); | 435 | digestsize, 1); |
| 451 | #endif | ||
| 452 | } | 436 | } |
| 453 | dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); | 437 | dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); |
| 454 | 438 | ||
| @@ -463,15 +447,14 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
| 463 | const u8 *key, unsigned int keylen) | 447 | const u8 *key, unsigned int keylen) |
| 464 | { | 448 | { |
| 465 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 449 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
| 450 | struct device *jrdev = ctx->jrdev; | ||
| 466 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); | 451 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); |
| 467 | int digestsize = crypto_ahash_digestsize(ahash); | 452 | int digestsize = crypto_ahash_digestsize(ahash); |
| 468 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); | 453 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); |
| 469 | int ret; | 454 | int ret; |
| 470 | u8 *hashed_key = NULL; | 455 | u8 *hashed_key = NULL; |
| 471 | 456 | ||
| 472 | #ifdef DEBUG | 457 | dev_dbg(jrdev, "keylen %d\n", keylen); |
| 473 | printk(KERN_ERR "keylen %d\n", keylen); | ||
| 474 | #endif | ||
| 475 | 458 | ||
| 476 | if (keylen > blocksize) { | 459 | if (keylen > blocksize) { |
| 477 | hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); | 460 | hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); |
| @@ -600,11 +583,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 600 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | 583 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
| 601 | int digestsize = crypto_ahash_digestsize(ahash); | 584 | int digestsize = crypto_ahash_digestsize(ahash); |
| 602 | struct caam_hash_state *state = ahash_request_ctx(req); | 585 | struct caam_hash_state *state = ahash_request_ctx(req); |
| 603 | #ifdef DEBUG | ||
| 604 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 586 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
| 605 | 587 | ||
| 606 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 588 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 607 | #endif | ||
| 608 | 589 | ||
| 609 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 590 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
| 610 | if (err) | 591 | if (err) |
| @@ -614,11 +595,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 614 | memcpy(req->result, state->caam_ctx, digestsize); | 595 | memcpy(req->result, state->caam_ctx, digestsize); |
| 615 | kfree(edesc); | 596 | kfree(edesc); |
| 616 | 597 | ||
| 617 | #ifdef DEBUG | 598 | print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", |
| 618 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", | 599 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
| 619 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 600 | ctx->ctx_len, 1); |
| 620 | ctx->ctx_len, 1); | ||
| 621 | #endif | ||
| 622 | 601 | ||
| 623 | req->base.complete(&req->base, err); | 602 | req->base.complete(&req->base, err); |
| 624 | } | 603 | } |
| @@ -631,11 +610,9 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | |||
| 631 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | 610 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
| 632 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 611 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
| 633 | struct caam_hash_state *state = ahash_request_ctx(req); | 612 | struct caam_hash_state *state = ahash_request_ctx(req); |
| 634 | #ifdef DEBUG | ||
| 635 | int digestsize = crypto_ahash_digestsize(ahash); | 613 | int digestsize = crypto_ahash_digestsize(ahash); |
| 636 | 614 | ||
| 637 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 615 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 638 | #endif | ||
| 639 | 616 | ||
| 640 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 617 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
| 641 | if (err) | 618 | if (err) |
| @@ -645,15 +622,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | |||
| 645 | switch_buf(state); | 622 | switch_buf(state); |
| 646 | kfree(edesc); | 623 | kfree(edesc); |
| 647 | 624 | ||
| 648 | #ifdef DEBUG | 625 | print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", |
| 649 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", | 626 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
| 650 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 627 | ctx->ctx_len, 1); |
| 651 | ctx->ctx_len, 1); | ||
| 652 | if (req->result) | 628 | if (req->result) |
| 653 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", | 629 | print_hex_dump_debug("result@"__stringify(__LINE__)": ", |
| 654 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | 630 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
| 655 | digestsize, 1); | 631 | digestsize, 1); |
| 656 | #endif | ||
| 657 | 632 | ||
| 658 | req->base.complete(&req->base, err); | 633 | req->base.complete(&req->base, err); |
| 659 | } | 634 | } |
| @@ -666,11 +641,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |||
| 666 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | 641 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
| 667 | int digestsize = crypto_ahash_digestsize(ahash); | 642 | int digestsize = crypto_ahash_digestsize(ahash); |
| 668 | struct caam_hash_state *state = ahash_request_ctx(req); | 643 | struct caam_hash_state *state = ahash_request_ctx(req); |
| 669 | #ifdef DEBUG | ||
| 670 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 644 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
| 671 | 645 | ||
| 672 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 646 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 673 | #endif | ||
| 674 | 647 | ||
| 675 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 648 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
| 676 | if (err) | 649 | if (err) |
| @@ -680,11 +653,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |||
| 680 | memcpy(req->result, state->caam_ctx, digestsize); | 653 | memcpy(req->result, state->caam_ctx, digestsize); |
| 681 | kfree(edesc); | 654 | kfree(edesc); |
| 682 | 655 | ||
| 683 | #ifdef DEBUG | 656 | print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", |
| 684 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", | 657 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
| 685 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 658 | ctx->ctx_len, 1); |
| 686 | ctx->ctx_len, 1); | ||
| 687 | #endif | ||
| 688 | 659 | ||
| 689 | req->base.complete(&req->base, err); | 660 | req->base.complete(&req->base, err); |
| 690 | } | 661 | } |
| @@ -697,11 +668,9 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
| 697 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | 668 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
| 698 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 669 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
| 699 | struct caam_hash_state *state = ahash_request_ctx(req); | 670 | struct caam_hash_state *state = ahash_request_ctx(req); |
| 700 | #ifdef DEBUG | ||
| 701 | int digestsize = crypto_ahash_digestsize(ahash); | 671 | int digestsize = crypto_ahash_digestsize(ahash); |
| 702 | 672 | ||
| 703 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 673 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 704 | #endif | ||
| 705 | 674 | ||
| 706 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 675 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
| 707 | if (err) | 676 | if (err) |
| @@ -711,15 +680,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
| 711 | switch_buf(state); | 680 | switch_buf(state); |
| 712 | kfree(edesc); | 681 | kfree(edesc); |
| 713 | 682 | ||
| 714 | #ifdef DEBUG | 683 | print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", |
| 715 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", | 684 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
| 716 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 685 | ctx->ctx_len, 1); |
| 717 | ctx->ctx_len, 1); | ||
| 718 | if (req->result) | 686 | if (req->result) |
| 719 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", | 687 | print_hex_dump_debug("result@"__stringify(__LINE__)": ", |
| 720 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | 688 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
| 721 | digestsize, 1); | 689 | digestsize, 1); |
| 722 | #endif | ||
| 723 | 690 | ||
| 724 | req->base.complete(&req->base, err); | 691 | req->base.complete(&req->base, err); |
| 725 | } | 692 | } |
| @@ -759,9 +726,10 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, | |||
| 759 | 726 | ||
| 760 | if (nents > 1 || first_sg) { | 727 | if (nents > 1 || first_sg) { |
| 761 | struct sec4_sg_entry *sg = edesc->sec4_sg; | 728 | struct sec4_sg_entry *sg = edesc->sec4_sg; |
| 762 | unsigned int sgsize = sizeof(*sg) * (first_sg + nents); | 729 | unsigned int sgsize = sizeof(*sg) * |
| 730 | pad_sg_nents(first_sg + nents); | ||
| 763 | 731 | ||
| 764 | sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); | 732 | sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0); |
| 765 | 733 | ||
| 766 | src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); | 734 | src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); |
| 767 | if (dma_mapping_error(ctx->jrdev, src_dma)) { | 735 | if (dma_mapping_error(ctx->jrdev, src_dma)) { |
| @@ -819,8 +787,10 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 819 | } | 787 | } |
| 820 | 788 | ||
| 821 | if (to_hash) { | 789 | if (to_hash) { |
| 822 | src_nents = sg_nents_for_len(req->src, | 790 | int pad_nents; |
| 823 | req->nbytes - (*next_buflen)); | 791 | int src_len = req->nbytes - *next_buflen; |
| 792 | |||
| 793 | src_nents = sg_nents_for_len(req->src, src_len); | ||
| 824 | if (src_nents < 0) { | 794 | if (src_nents < 0) { |
| 825 | dev_err(jrdev, "Invalid number of src SG.\n"); | 795 | dev_err(jrdev, "Invalid number of src SG.\n"); |
| 826 | return src_nents; | 796 | return src_nents; |
| @@ -838,15 +808,14 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 838 | } | 808 | } |
| 839 | 809 | ||
| 840 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); | 810 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); |
| 841 | sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * | 811 | pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents); |
| 842 | sizeof(struct sec4_sg_entry); | 812 | sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); |
| 843 | 813 | ||
| 844 | /* | 814 | /* |
| 845 | * allocate space for base edesc and hw desc commands, | 815 | * allocate space for base edesc and hw desc commands, |
| 846 | * link tables | 816 | * link tables |
| 847 | */ | 817 | */ |
| 848 | edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, | 818 | edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update, |
| 849 | ctx->sh_desc_update, | ||
| 850 | ctx->sh_desc_update_dma, flags); | 819 | ctx->sh_desc_update_dma, flags); |
| 851 | if (!edesc) { | 820 | if (!edesc) { |
| 852 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); | 821 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
| @@ -866,7 +835,7 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 866 | goto unmap_ctx; | 835 | goto unmap_ctx; |
| 867 | 836 | ||
| 868 | if (mapped_nents) | 837 | if (mapped_nents) |
| 869 | sg_to_sec4_sg_last(req->src, mapped_nents, | 838 | sg_to_sec4_sg_last(req->src, src_len, |
| 870 | edesc->sec4_sg + sec4_sg_src_index, | 839 | edesc->sec4_sg + sec4_sg_src_index, |
| 871 | 0); | 840 | 0); |
| 872 | else | 841 | else |
| @@ -893,11 +862,9 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 893 | 862 | ||
| 894 | append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); | 863 | append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); |
| 895 | 864 | ||
| 896 | #ifdef DEBUG | 865 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
| 897 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 866 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
| 898 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 867 | desc_bytes(desc), 1); |
| 899 | desc_bytes(desc), 1); | ||
| 900 | #endif | ||
| 901 | 868 | ||
| 902 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); | 869 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); |
| 903 | if (ret) | 870 | if (ret) |
| @@ -910,13 +877,12 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 910 | *buflen = *next_buflen; | 877 | *buflen = *next_buflen; |
| 911 | *next_buflen = last_buflen; | 878 | *next_buflen = last_buflen; |
| 912 | } | 879 | } |
| 913 | #ifdef DEBUG | 880 | |
| 914 | print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", | 881 | print_hex_dump_debug("buf@"__stringify(__LINE__)": ", |
| 915 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); | 882 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); |
| 916 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", | 883 | print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", |
| 917 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | 884 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, |
| 918 | *next_buflen, 1); | 885 | *next_buflen, 1); |
| 919 | #endif | ||
| 920 | 886 | ||
| 921 | return ret; | 887 | return ret; |
| 922 | unmap_ctx: | 888 | unmap_ctx: |
| @@ -935,18 +901,17 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 935 | GFP_KERNEL : GFP_ATOMIC; | 901 | GFP_KERNEL : GFP_ATOMIC; |
| 936 | int buflen = *current_buflen(state); | 902 | int buflen = *current_buflen(state); |
| 937 | u32 *desc; | 903 | u32 *desc; |
| 938 | int sec4_sg_bytes, sec4_sg_src_index; | 904 | int sec4_sg_bytes; |
| 939 | int digestsize = crypto_ahash_digestsize(ahash); | 905 | int digestsize = crypto_ahash_digestsize(ahash); |
| 940 | struct ahash_edesc *edesc; | 906 | struct ahash_edesc *edesc; |
| 941 | int ret; | 907 | int ret; |
| 942 | 908 | ||
| 943 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); | 909 | sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * |
| 944 | sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); | 910 | sizeof(struct sec4_sg_entry); |
| 945 | 911 | ||
| 946 | /* allocate space for base edesc and hw desc commands, link tables */ | 912 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 947 | edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, | 913 | edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin, |
| 948 | ctx->sh_desc_fin, ctx->sh_desc_fin_dma, | 914 | ctx->sh_desc_fin_dma, flags); |
| 949 | flags); | ||
| 950 | if (!edesc) | 915 | if (!edesc) |
| 951 | return -ENOMEM; | 916 | return -ENOMEM; |
| 952 | 917 | ||
| @@ -963,7 +928,7 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 963 | if (ret) | 928 | if (ret) |
| 964 | goto unmap_ctx; | 929 | goto unmap_ctx; |
| 965 | 930 | ||
| 966 | sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); | 931 | sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); |
| 967 | 932 | ||
| 968 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 933 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
| 969 | sec4_sg_bytes, DMA_TO_DEVICE); | 934 | sec4_sg_bytes, DMA_TO_DEVICE); |
| @@ -977,10 +942,9 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 977 | LDST_SGF); | 942 | LDST_SGF); |
| 978 | append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); | 943 | append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); |
| 979 | 944 | ||
| 980 | #ifdef DEBUG | 945 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
| 981 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 946 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 982 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 947 | 1); |
| 983 | #endif | ||
| 984 | 948 | ||
| 985 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | 949 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); |
| 986 | if (ret) | 950 | if (ret) |
| @@ -1058,10 +1022,9 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
| 1058 | 1022 | ||
| 1059 | append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); | 1023 | append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); |
| 1060 | 1024 | ||
| 1061 | #ifdef DEBUG | 1025 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
| 1062 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1026 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 1063 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1027 | 1); |
| 1064 | #endif | ||
| 1065 | 1028 | ||
| 1066 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | 1029 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); |
| 1067 | if (ret) | 1030 | if (ret) |
| @@ -1135,10 +1098,9 @@ static int ahash_digest(struct ahash_request *req) | |||
| 1135 | return -ENOMEM; | 1098 | return -ENOMEM; |
| 1136 | } | 1099 | } |
| 1137 | 1100 | ||
| 1138 | #ifdef DEBUG | 1101 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
| 1139 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1102 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 1140 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1103 | 1); |
| 1141 | #endif | ||
| 1142 | 1104 | ||
| 1143 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | 1105 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); |
| 1144 | if (!ret) { | 1106 | if (!ret) { |
| @@ -1190,10 +1152,9 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
| 1190 | if (ret) | 1152 | if (ret) |
| 1191 | goto unmap; | 1153 | goto unmap; |
| 1192 | 1154 | ||
| 1193 | #ifdef DEBUG | 1155 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
| 1194 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1156 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 1195 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1157 | 1); |
| 1196 | #endif | ||
| 1197 | 1158 | ||
| 1198 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | 1159 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); |
| 1199 | if (!ret) { | 1160 | if (!ret) { |
| @@ -1246,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 1246 | } | 1207 | } |
| 1247 | 1208 | ||
| 1248 | if (to_hash) { | 1209 | if (to_hash) { |
| 1249 | src_nents = sg_nents_for_len(req->src, | 1210 | int pad_nents; |
| 1250 | req->nbytes - *next_buflen); | 1211 | int src_len = req->nbytes - *next_buflen; |
| 1212 | |||
| 1213 | src_nents = sg_nents_for_len(req->src, src_len); | ||
| 1251 | if (src_nents < 0) { | 1214 | if (src_nents < 0) { |
| 1252 | dev_err(jrdev, "Invalid number of src SG.\n"); | 1215 | dev_err(jrdev, "Invalid number of src SG.\n"); |
| 1253 | return src_nents; | 1216 | return src_nents; |
| @@ -1264,14 +1227,14 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 1264 | mapped_nents = 0; | 1227 | mapped_nents = 0; |
| 1265 | } | 1228 | } |
| 1266 | 1229 | ||
| 1267 | sec4_sg_bytes = (1 + mapped_nents) * | 1230 | pad_nents = pad_sg_nents(1 + mapped_nents); |
| 1268 | sizeof(struct sec4_sg_entry); | 1231 | sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); |
| 1269 | 1232 | ||
| 1270 | /* | 1233 | /* |
| 1271 | * allocate space for base edesc and hw desc commands, | 1234 | * allocate space for base edesc and hw desc commands, |
| 1272 | * link tables | 1235 | * link tables |
| 1273 | */ | 1236 | */ |
| 1274 | edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, | 1237 | edesc = ahash_edesc_alloc(ctx, pad_nents, |
| 1275 | ctx->sh_desc_update_first, | 1238 | ctx->sh_desc_update_first, |
| 1276 | ctx->sh_desc_update_first_dma, | 1239 | ctx->sh_desc_update_first_dma, |
| 1277 | flags); | 1240 | flags); |
| @@ -1287,8 +1250,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 1287 | if (ret) | 1250 | if (ret) |
| 1288 | goto unmap_ctx; | 1251 | goto unmap_ctx; |
| 1289 | 1252 | ||
| 1290 | sg_to_sec4_sg_last(req->src, mapped_nents, | 1253 | sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); |
| 1291 | edesc->sec4_sg + 1, 0); | ||
| 1292 | 1254 | ||
| 1293 | if (*next_buflen) { | 1255 | if (*next_buflen) { |
| 1294 | scatterwalk_map_and_copy(next_buf, req->src, | 1256 | scatterwalk_map_and_copy(next_buf, req->src, |
| @@ -1313,11 +1275,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 1313 | if (ret) | 1275 | if (ret) |
| 1314 | goto unmap_ctx; | 1276 | goto unmap_ctx; |
| 1315 | 1277 | ||
| 1316 | #ifdef DEBUG | 1278 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
| 1317 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1279 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
| 1318 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 1280 | desc_bytes(desc), 1); |
| 1319 | desc_bytes(desc), 1); | ||
| 1320 | #endif | ||
| 1321 | 1281 | ||
| 1322 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); | 1282 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); |
| 1323 | if (ret) | 1283 | if (ret) |
| @@ -1333,13 +1293,12 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 1333 | *buflen = *next_buflen; | 1293 | *buflen = *next_buflen; |
| 1334 | *next_buflen = 0; | 1294 | *next_buflen = 0; |
| 1335 | } | 1295 | } |
| 1336 | #ifdef DEBUG | 1296 | |
| 1337 | print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", | 1297 | print_hex_dump_debug("buf@"__stringify(__LINE__)": ", |
| 1338 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); | 1298 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); |
| 1339 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", | 1299 | print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", |
| 1340 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | 1300 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, |
| 1341 | *next_buflen, 1); | 1301 | 1); |
| 1342 | #endif | ||
| 1343 | 1302 | ||
| 1344 | return ret; | 1303 | return ret; |
| 1345 | unmap_ctx: | 1304 | unmap_ctx: |
| @@ -1414,10 +1373,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
| 1414 | if (ret) | 1373 | if (ret) |
| 1415 | goto unmap; | 1374 | goto unmap; |
| 1416 | 1375 | ||
| 1417 | #ifdef DEBUG | 1376 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
| 1418 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1377 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 1419 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1378 | 1); |
| 1420 | #endif | ||
| 1421 | 1379 | ||
| 1422 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | 1380 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); |
| 1423 | if (!ret) { | 1381 | if (!ret) { |
| @@ -1517,11 +1475,9 @@ static int ahash_update_first(struct ahash_request *req) | |||
| 1517 | if (ret) | 1475 | if (ret) |
| 1518 | goto unmap_ctx; | 1476 | goto unmap_ctx; |
| 1519 | 1477 | ||
| 1520 | #ifdef DEBUG | 1478 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
| 1521 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1479 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
| 1522 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 1480 | desc_bytes(desc), 1); |
| 1523 | desc_bytes(desc), 1); | ||
| 1524 | #endif | ||
| 1525 | 1481 | ||
| 1526 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); | 1482 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); |
| 1527 | if (ret) | 1483 | if (ret) |
| @@ -1539,11 +1495,10 @@ static int ahash_update_first(struct ahash_request *req) | |||
| 1539 | req->nbytes, 0); | 1495 | req->nbytes, 0); |
| 1540 | switch_buf(state); | 1496 | switch_buf(state); |
| 1541 | } | 1497 | } |
| 1542 | #ifdef DEBUG | 1498 | |
| 1543 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", | 1499 | print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", |
| 1544 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | 1500 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, |
| 1545 | *next_buflen, 1); | 1501 | 1); |
| 1546 | #endif | ||
| 1547 | 1502 | ||
| 1548 | return ret; | 1503 | return ret; |
| 1549 | unmap_ctx: | 1504 | unmap_ctx: |
| @@ -1930,7 +1885,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) | |||
| 1930 | caam_jr_free(ctx->jrdev); | 1885 | caam_jr_free(ctx->jrdev); |
| 1931 | } | 1886 | } |
| 1932 | 1887 | ||
| 1933 | static void __exit caam_algapi_hash_exit(void) | 1888 | void caam_algapi_hash_exit(void) |
| 1934 | { | 1889 | { |
| 1935 | struct caam_hash_alg *t_alg, *n; | 1890 | struct caam_hash_alg *t_alg, *n; |
| 1936 | 1891 | ||
| @@ -1988,40 +1943,13 @@ caam_hash_alloc(struct caam_hash_template *template, | |||
| 1988 | return t_alg; | 1943 | return t_alg; |
| 1989 | } | 1944 | } |
| 1990 | 1945 | ||
| 1991 | static int __init caam_algapi_hash_init(void) | 1946 | int caam_algapi_hash_init(struct device *ctrldev) |
| 1992 | { | 1947 | { |
| 1993 | struct device_node *dev_node; | ||
| 1994 | struct platform_device *pdev; | ||
| 1995 | int i = 0, err = 0; | 1948 | int i = 0, err = 0; |
| 1996 | struct caam_drv_private *priv; | 1949 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
| 1997 | unsigned int md_limit = SHA512_DIGEST_SIZE; | 1950 | unsigned int md_limit = SHA512_DIGEST_SIZE; |
| 1998 | u32 md_inst, md_vid; | 1951 | u32 md_inst, md_vid; |
| 1999 | 1952 | ||
| 2000 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
| 2001 | if (!dev_node) { | ||
| 2002 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
| 2003 | if (!dev_node) | ||
| 2004 | return -ENODEV; | ||
| 2005 | } | ||
| 2006 | |||
| 2007 | pdev = of_find_device_by_node(dev_node); | ||
| 2008 | if (!pdev) { | ||
| 2009 | of_node_put(dev_node); | ||
| 2010 | return -ENODEV; | ||
| 2011 | } | ||
| 2012 | |||
| 2013 | priv = dev_get_drvdata(&pdev->dev); | ||
| 2014 | of_node_put(dev_node); | ||
| 2015 | |||
| 2016 | /* | ||
| 2017 | * If priv is NULL, it's probably because the caam driver wasn't | ||
| 2018 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
| 2019 | */ | ||
| 2020 | if (!priv) { | ||
| 2021 | err = -ENODEV; | ||
| 2022 | goto out_put_dev; | ||
| 2023 | } | ||
| 2024 | |||
| 2025 | /* | 1953 | /* |
| 2026 | * Register crypto algorithms the device supports. First, identify | 1954 | * Register crypto algorithms the device supports. First, identify |
| 2027 | * presence and attributes of MD block. | 1955 | * presence and attributes of MD block. |
| @@ -2042,10 +1970,8 @@ static int __init caam_algapi_hash_init(void) | |||
| 2042 | * Skip registration of any hashing algorithms if MD block | 1970 | * Skip registration of any hashing algorithms if MD block |
| 2043 | * is not present. | 1971 | * is not present. |
| 2044 | */ | 1972 | */ |
| 2045 | if (!md_inst) { | 1973 | if (!md_inst) |
| 2046 | err = -ENODEV; | 1974 | return -ENODEV; |
| 2047 | goto out_put_dev; | ||
| 2048 | } | ||
| 2049 | 1975 | ||
| 2050 | /* Limit digest size based on LP256 */ | 1976 | /* Limit digest size based on LP256 */ |
| 2051 | if (md_vid == CHA_VER_VID_MD_LP256) | 1977 | if (md_vid == CHA_VER_VID_MD_LP256) |
| @@ -2102,14 +2028,5 @@ static int __init caam_algapi_hash_init(void) | |||
| 2102 | list_add_tail(&t_alg->entry, &hash_list); | 2028 | list_add_tail(&t_alg->entry, &hash_list); |
| 2103 | } | 2029 | } |
| 2104 | 2030 | ||
| 2105 | out_put_dev: | ||
| 2106 | put_device(&pdev->dev); | ||
| 2107 | return err; | 2031 | return err; |
| 2108 | } | 2032 | } |
| 2109 | |||
| 2110 | module_init(caam_algapi_hash_init); | ||
| 2111 | module_exit(caam_algapi_hash_exit); | ||
| 2112 | |||
| 2113 | MODULE_LICENSE("GPL"); | ||
| 2114 | MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); | ||
| 2115 | MODULE_AUTHOR("Freescale Semiconductor - NMG"); | ||
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index fe24485274e1..80574106af29 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * caam - Freescale FSL CAAM support for Public Key Cryptography | 3 | * caam - Freescale FSL CAAM support for Public Key Cryptography |
| 4 | * | 4 | * |
| 5 | * Copyright 2016 Freescale Semiconductor, Inc. | 5 | * Copyright 2016 Freescale Semiconductor, Inc. |
| 6 | * Copyright 2018 NXP | 6 | * Copyright 2018-2019 NXP |
| 7 | * | 7 | * |
| 8 | * There is no Shared Descriptor for PKC so that the Job Descriptor must carry | 8 | * There is no Shared Descriptor for PKC so that the Job Descriptor must carry |
| 9 | * all the desired key parameters, input and output pointers. | 9 | * all the desired key parameters, input and output pointers. |
| @@ -24,12 +24,18 @@ | |||
| 24 | sizeof(struct rsa_priv_f2_pdb)) | 24 | sizeof(struct rsa_priv_f2_pdb)) |
| 25 | #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ | 25 | #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ |
| 26 | sizeof(struct rsa_priv_f3_pdb)) | 26 | sizeof(struct rsa_priv_f3_pdb)) |
| 27 | #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ | ||
| 28 | |||
| 29 | /* buffer filled with zeros, used for padding */ | ||
| 30 | static u8 *zero_buffer; | ||
| 27 | 31 | ||
| 28 | static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, | 32 | static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, |
| 29 | struct akcipher_request *req) | 33 | struct akcipher_request *req) |
| 30 | { | 34 | { |
| 35 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); | ||
| 36 | |||
| 31 | dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); | 37 | dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); |
| 32 | dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); | 38 | dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); |
| 33 | 39 | ||
| 34 | if (edesc->sec4_sg_bytes) | 40 | if (edesc->sec4_sg_bytes) |
| 35 | dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, | 41 | dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, |
| @@ -168,6 +174,13 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, | |||
| 168 | akcipher_request_complete(req, err); | 174 | akcipher_request_complete(req, err); |
| 169 | } | 175 | } |
| 170 | 176 | ||
| 177 | /** | ||
| 178 | * Count leading zeros, need it to strip, from a given scatterlist | ||
| 179 | * | ||
| 180 | * @sgl : scatterlist to count zeros from | ||
| 181 | * @nbytes: number of zeros, in bytes, to strip | ||
| 182 | * @flags : operation flags | ||
| 183 | */ | ||
| 171 | static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, | 184 | static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, |
| 172 | unsigned int nbytes, | 185 | unsigned int nbytes, |
| 173 | unsigned int flags) | 186 | unsigned int flags) |
| @@ -187,7 +200,8 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, | |||
| 187 | lzeros = 0; | 200 | lzeros = 0; |
| 188 | len = 0; | 201 | len = 0; |
| 189 | while (nbytes > 0) { | 202 | while (nbytes > 0) { |
| 190 | while (len && !*buff) { | 203 | /* do not strip more than given bytes */ |
| 204 | while (len && !*buff && lzeros < nbytes) { | ||
| 191 | lzeros++; | 205 | lzeros++; |
| 192 | len--; | 206 | len--; |
| 193 | buff++; | 207 | buff++; |
| @@ -218,6 +232,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
| 218 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 232 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
| 219 | struct device *dev = ctx->dev; | 233 | struct device *dev = ctx->dev; |
| 220 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); | 234 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
| 235 | struct caam_rsa_key *key = &ctx->key; | ||
| 221 | struct rsa_edesc *edesc; | 236 | struct rsa_edesc *edesc; |
| 222 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 237 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
| 223 | GFP_KERNEL : GFP_ATOMIC; | 238 | GFP_KERNEL : GFP_ATOMIC; |
| @@ -225,22 +240,45 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
| 225 | int sgc; | 240 | int sgc; |
| 226 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | 241 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; |
| 227 | int src_nents, dst_nents; | 242 | int src_nents, dst_nents; |
| 243 | unsigned int diff_size = 0; | ||
| 228 | int lzeros; | 244 | int lzeros; |
| 229 | 245 | ||
| 230 | lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags); | 246 | if (req->src_len > key->n_sz) { |
| 231 | if (lzeros < 0) | 247 | /* |
| 232 | return ERR_PTR(lzeros); | 248 | * strip leading zeros and |
| 233 | 249 | * return the number of zeros to skip | |
| 234 | req->src_len -= lzeros; | 250 | */ |
| 235 | req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); | 251 | lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len - |
| 252 | key->n_sz, sg_flags); | ||
| 253 | if (lzeros < 0) | ||
| 254 | return ERR_PTR(lzeros); | ||
| 255 | |||
| 256 | req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src, | ||
| 257 | lzeros); | ||
| 258 | req_ctx->fixup_src_len = req->src_len - lzeros; | ||
| 259 | } else { | ||
| 260 | /* | ||
| 261 | * input src is less then n key modulus, | ||
| 262 | * so there will be zero padding | ||
| 263 | */ | ||
| 264 | diff_size = key->n_sz - req->src_len; | ||
| 265 | req_ctx->fixup_src = req->src; | ||
| 266 | req_ctx->fixup_src_len = req->src_len; | ||
| 267 | } | ||
| 236 | 268 | ||
| 237 | src_nents = sg_nents_for_len(req->src, req->src_len); | 269 | src_nents = sg_nents_for_len(req_ctx->fixup_src, |
| 270 | req_ctx->fixup_src_len); | ||
| 238 | dst_nents = sg_nents_for_len(req->dst, req->dst_len); | 271 | dst_nents = sg_nents_for_len(req->dst, req->dst_len); |
| 239 | 272 | ||
| 240 | if (src_nents > 1) | 273 | if (!diff_size && src_nents == 1) |
| 241 | sec4_sg_len = src_nents; | 274 | sec4_sg_len = 0; /* no need for an input hw s/g table */ |
| 275 | else | ||
| 276 | sec4_sg_len = src_nents + !!diff_size; | ||
| 277 | sec4_sg_index = sec4_sg_len; | ||
| 242 | if (dst_nents > 1) | 278 | if (dst_nents > 1) |
| 243 | sec4_sg_len += dst_nents; | 279 | sec4_sg_len += pad_sg_nents(dst_nents); |
| 280 | else | ||
| 281 | sec4_sg_len = pad_sg_nents(sec4_sg_len); | ||
| 244 | 282 | ||
| 245 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); | 283 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
| 246 | 284 | ||
| @@ -250,7 +288,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
| 250 | if (!edesc) | 288 | if (!edesc) |
| 251 | return ERR_PTR(-ENOMEM); | 289 | return ERR_PTR(-ENOMEM); |
| 252 | 290 | ||
| 253 | sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE); | 291 | sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); |
| 254 | if (unlikely(!sgc)) { | 292 | if (unlikely(!sgc)) { |
| 255 | dev_err(dev, "unable to map source\n"); | 293 | dev_err(dev, "unable to map source\n"); |
| 256 | goto src_fail; | 294 | goto src_fail; |
| @@ -263,14 +301,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
| 263 | } | 301 | } |
| 264 | 302 | ||
| 265 | edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; | 303 | edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; |
| 304 | if (diff_size) | ||
| 305 | dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size, | ||
| 306 | 0); | ||
| 307 | |||
| 308 | if (sec4_sg_index) | ||
| 309 | sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len, | ||
| 310 | edesc->sec4_sg + !!diff_size, 0); | ||
| 266 | 311 | ||
| 267 | sec4_sg_index = 0; | ||
| 268 | if (src_nents > 1) { | ||
| 269 | sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); | ||
| 270 | sec4_sg_index += src_nents; | ||
| 271 | } | ||
| 272 | if (dst_nents > 1) | 312 | if (dst_nents > 1) |
| 273 | sg_to_sec4_sg_last(req->dst, dst_nents, | 313 | sg_to_sec4_sg_last(req->dst, req->dst_len, |
| 274 | edesc->sec4_sg + sec4_sg_index, 0); | 314 | edesc->sec4_sg + sec4_sg_index, 0); |
| 275 | 315 | ||
| 276 | /* Save nents for later use in Job Descriptor */ | 316 | /* Save nents for later use in Job Descriptor */ |
| @@ -289,12 +329,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
| 289 | 329 | ||
| 290 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 330 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
| 291 | 331 | ||
| 332 | print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ", | ||
| 333 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, | ||
| 334 | edesc->sec4_sg_bytes, 1); | ||
| 335 | |||
| 292 | return edesc; | 336 | return edesc; |
| 293 | 337 | ||
| 294 | sec4_sg_fail: | 338 | sec4_sg_fail: |
| 295 | dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); | 339 | dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); |
| 296 | dst_fail: | 340 | dst_fail: |
| 297 | dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); | 341 | dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); |
| 298 | src_fail: | 342 | src_fail: |
| 299 | kfree(edesc); | 343 | kfree(edesc); |
| 300 | return ERR_PTR(-ENOMEM); | 344 | return ERR_PTR(-ENOMEM); |
| @@ -304,6 +348,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req, | |||
| 304 | struct rsa_edesc *edesc) | 348 | struct rsa_edesc *edesc) |
| 305 | { | 349 | { |
| 306 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | 350 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
| 351 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); | ||
| 307 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 352 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
| 308 | struct caam_rsa_key *key = &ctx->key; | 353 | struct caam_rsa_key *key = &ctx->key; |
| 309 | struct device *dev = ctx->dev; | 354 | struct device *dev = ctx->dev; |
| @@ -328,7 +373,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req, | |||
| 328 | pdb->f_dma = edesc->sec4_sg_dma; | 373 | pdb->f_dma = edesc->sec4_sg_dma; |
| 329 | sec4_sg_index += edesc->src_nents; | 374 | sec4_sg_index += edesc->src_nents; |
| 330 | } else { | 375 | } else { |
| 331 | pdb->f_dma = sg_dma_address(req->src); | 376 | pdb->f_dma = sg_dma_address(req_ctx->fixup_src); |
| 332 | } | 377 | } |
| 333 | 378 | ||
| 334 | if (edesc->dst_nents > 1) { | 379 | if (edesc->dst_nents > 1) { |
| @@ -340,7 +385,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req, | |||
| 340 | } | 385 | } |
| 341 | 386 | ||
| 342 | pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; | 387 | pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; |
| 343 | pdb->f_len = req->src_len; | 388 | pdb->f_len = req_ctx->fixup_src_len; |
| 344 | 389 | ||
| 345 | return 0; | 390 | return 0; |
| 346 | } | 391 | } |
| @@ -373,7 +418,9 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req, | |||
| 373 | pdb->g_dma = edesc->sec4_sg_dma; | 418 | pdb->g_dma = edesc->sec4_sg_dma; |
| 374 | sec4_sg_index += edesc->src_nents; | 419 | sec4_sg_index += edesc->src_nents; |
| 375 | } else { | 420 | } else { |
| 376 | pdb->g_dma = sg_dma_address(req->src); | 421 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
| 422 | |||
| 423 | pdb->g_dma = sg_dma_address(req_ctx->fixup_src); | ||
| 377 | } | 424 | } |
| 378 | 425 | ||
| 379 | if (edesc->dst_nents > 1) { | 426 | if (edesc->dst_nents > 1) { |
| @@ -436,7 +483,9 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
| 436 | pdb->g_dma = edesc->sec4_sg_dma; | 483 | pdb->g_dma = edesc->sec4_sg_dma; |
| 437 | sec4_sg_index += edesc->src_nents; | 484 | sec4_sg_index += edesc->src_nents; |
| 438 | } else { | 485 | } else { |
| 439 | pdb->g_dma = sg_dma_address(req->src); | 486 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
| 487 | |||
| 488 | pdb->g_dma = sg_dma_address(req_ctx->fixup_src); | ||
| 440 | } | 489 | } |
| 441 | 490 | ||
| 442 | if (edesc->dst_nents > 1) { | 491 | if (edesc->dst_nents > 1) { |
| @@ -523,7 +572,9 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
| 523 | pdb->g_dma = edesc->sec4_sg_dma; | 572 | pdb->g_dma = edesc->sec4_sg_dma; |
| 524 | sec4_sg_index += edesc->src_nents; | 573 | sec4_sg_index += edesc->src_nents; |
| 525 | } else { | 574 | } else { |
| 526 | pdb->g_dma = sg_dma_address(req->src); | 575 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
| 576 | |||
| 577 | pdb->g_dma = sg_dma_address(req_ctx->fixup_src); | ||
| 527 | } | 578 | } |
| 528 | 579 | ||
| 529 | if (edesc->dst_nents > 1) { | 580 | if (edesc->dst_nents > 1) { |
| @@ -978,6 +1029,15 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) | |||
| 978 | return PTR_ERR(ctx->dev); | 1029 | return PTR_ERR(ctx->dev); |
| 979 | } | 1030 | } |
| 980 | 1031 | ||
| 1032 | ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer, | ||
| 1033 | CAAM_RSA_MAX_INPUT_SIZE - 1, | ||
| 1034 | DMA_TO_DEVICE); | ||
| 1035 | if (dma_mapping_error(ctx->dev, ctx->padding_dma)) { | ||
| 1036 | dev_err(ctx->dev, "unable to map padding\n"); | ||
| 1037 | caam_jr_free(ctx->dev); | ||
| 1038 | return -ENOMEM; | ||
| 1039 | } | ||
| 1040 | |||
| 981 | return 0; | 1041 | return 0; |
| 982 | } | 1042 | } |
| 983 | 1043 | ||
| @@ -987,6 +1047,8 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) | |||
| 987 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 1047 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
| 988 | struct caam_rsa_key *key = &ctx->key; | 1048 | struct caam_rsa_key *key = &ctx->key; |
| 989 | 1049 | ||
| 1050 | dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE - | ||
| 1051 | 1, DMA_TO_DEVICE); | ||
| 990 | caam_rsa_free_key(key); | 1052 | caam_rsa_free_key(key); |
| 991 | caam_jr_free(ctx->dev); | 1053 | caam_jr_free(ctx->dev); |
| 992 | } | 1054 | } |
| @@ -1010,41 +1072,12 @@ static struct akcipher_alg caam_rsa = { | |||
| 1010 | }; | 1072 | }; |
| 1011 | 1073 | ||
| 1012 | /* Public Key Cryptography module initialization handler */ | 1074 | /* Public Key Cryptography module initialization handler */ |
| 1013 | static int __init caam_pkc_init(void) | 1075 | int caam_pkc_init(struct device *ctrldev) |
| 1014 | { | 1076 | { |
| 1015 | struct device_node *dev_node; | 1077 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
| 1016 | struct platform_device *pdev; | ||
| 1017 | struct device *ctrldev; | ||
| 1018 | struct caam_drv_private *priv; | ||
| 1019 | u32 pk_inst; | 1078 | u32 pk_inst; |
| 1020 | int err; | 1079 | int err; |
| 1021 | 1080 | ||
| 1022 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
| 1023 | if (!dev_node) { | ||
| 1024 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
| 1025 | if (!dev_node) | ||
| 1026 | return -ENODEV; | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | pdev = of_find_device_by_node(dev_node); | ||
| 1030 | if (!pdev) { | ||
| 1031 | of_node_put(dev_node); | ||
| 1032 | return -ENODEV; | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | ctrldev = &pdev->dev; | ||
| 1036 | priv = dev_get_drvdata(ctrldev); | ||
| 1037 | of_node_put(dev_node); | ||
| 1038 | |||
| 1039 | /* | ||
| 1040 | * If priv is NULL, it's probably because the caam driver wasn't | ||
| 1041 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
| 1042 | */ | ||
| 1043 | if (!priv) { | ||
| 1044 | err = -ENODEV; | ||
| 1045 | goto out_put_dev; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | /* Determine public key hardware accelerator presence. */ | 1081 | /* Determine public key hardware accelerator presence. */ |
| 1049 | if (priv->era < 10) | 1082 | if (priv->era < 10) |
| 1050 | pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & | 1083 | pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & |
| @@ -1053,31 +1086,29 @@ static int __init caam_pkc_init(void) | |||
| 1053 | pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; | 1086 | pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; |
| 1054 | 1087 | ||
| 1055 | /* Do not register algorithms if PKHA is not present. */ | 1088 | /* Do not register algorithms if PKHA is not present. */ |
| 1056 | if (!pk_inst) { | 1089 | if (!pk_inst) |
| 1057 | err = -ENODEV; | 1090 | return 0; |
| 1058 | goto out_put_dev; | 1091 | |
| 1059 | } | 1092 | /* allocate zero buffer, used for padding input */ |
| 1093 | zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA | | ||
| 1094 | GFP_KERNEL); | ||
| 1095 | if (!zero_buffer) | ||
| 1096 | return -ENOMEM; | ||
| 1060 | 1097 | ||
| 1061 | err = crypto_register_akcipher(&caam_rsa); | 1098 | err = crypto_register_akcipher(&caam_rsa); |
| 1062 | if (err) | 1099 | if (err) { |
| 1100 | kfree(zero_buffer); | ||
| 1063 | dev_warn(ctrldev, "%s alg registration failed\n", | 1101 | dev_warn(ctrldev, "%s alg registration failed\n", |
| 1064 | caam_rsa.base.cra_driver_name); | 1102 | caam_rsa.base.cra_driver_name); |
| 1065 | else | 1103 | } else { |
| 1066 | dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); | 1104 | dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); |
| 1105 | } | ||
| 1067 | 1106 | ||
| 1068 | out_put_dev: | ||
| 1069 | put_device(ctrldev); | ||
| 1070 | return err; | 1107 | return err; |
| 1071 | } | 1108 | } |
| 1072 | 1109 | ||
| 1073 | static void __exit caam_pkc_exit(void) | 1110 | void caam_pkc_exit(void) |
| 1074 | { | 1111 | { |
| 1112 | kfree(zero_buffer); | ||
| 1075 | crypto_unregister_akcipher(&caam_rsa); | 1113 | crypto_unregister_akcipher(&caam_rsa); |
| 1076 | } | 1114 | } |
| 1077 | |||
| 1078 | module_init(caam_pkc_init); | ||
| 1079 | module_exit(caam_pkc_exit); | ||
| 1080 | |||
| 1081 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 1082 | MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API"); | ||
| 1083 | MODULE_AUTHOR("Freescale Semiconductor"); | ||
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h index 82645bcf8b27..2c488c9a3812 100644 --- a/drivers/crypto/caam/caampkc.h +++ b/drivers/crypto/caam/caampkc.h | |||
| @@ -89,18 +89,25 @@ struct caam_rsa_key { | |||
| 89 | * caam_rsa_ctx - per session context. | 89 | * caam_rsa_ctx - per session context. |
| 90 | * @key : RSA key in DMA zone | 90 | * @key : RSA key in DMA zone |
| 91 | * @dev : device structure | 91 | * @dev : device structure |
| 92 | * @padding_dma : dma address of padding, for adding it to the input | ||
| 92 | */ | 93 | */ |
| 93 | struct caam_rsa_ctx { | 94 | struct caam_rsa_ctx { |
| 94 | struct caam_rsa_key key; | 95 | struct caam_rsa_key key; |
| 95 | struct device *dev; | 96 | struct device *dev; |
| 97 | dma_addr_t padding_dma; | ||
| 98 | |||
| 96 | }; | 99 | }; |
| 97 | 100 | ||
| 98 | /** | 101 | /** |
| 99 | * caam_rsa_req_ctx - per request context. | 102 | * caam_rsa_req_ctx - per request context. |
| 100 | * @src: input scatterlist (stripped of leading zeros) | 103 | * @src : input scatterlist (stripped of leading zeros) |
| 104 | * @fixup_src : input scatterlist (that might be stripped of leading zeros) | ||
| 105 | * @fixup_src_len : length of the fixup_src input scatterlist | ||
| 101 | */ | 106 | */ |
| 102 | struct caam_rsa_req_ctx { | 107 | struct caam_rsa_req_ctx { |
| 103 | struct scatterlist src[2]; | 108 | struct scatterlist src[2]; |
| 109 | struct scatterlist *fixup_src; | ||
| 110 | unsigned int fixup_src_len; | ||
| 104 | }; | 111 | }; |
| 105 | 112 | ||
| 106 | /** | 113 | /** |
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 95eb5402c59f..561bcb535184 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * caam - Freescale FSL CAAM support for hw_random | 3 | * caam - Freescale FSL CAAM support for hw_random |
| 4 | * | 4 | * |
| 5 | * Copyright 2011 Freescale Semiconductor, Inc. | 5 | * Copyright 2011 Freescale Semiconductor, Inc. |
| 6 | * Copyright 2018 NXP | 6 | * Copyright 2018-2019 NXP |
| 7 | * | 7 | * |
| 8 | * Based on caamalg.c crypto API driver. | 8 | * Based on caamalg.c crypto API driver. |
| 9 | * | 9 | * |
| @@ -113,10 +113,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) | |||
| 113 | /* Buffer refilled, invalidate cache */ | 113 | /* Buffer refilled, invalidate cache */ |
| 114 | dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE); | 114 | dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE); |
| 115 | 115 | ||
| 116 | #ifdef DEBUG | 116 | print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
| 117 | print_hex_dump(KERN_ERR, "rng refreshed buf@: ", | 117 | bd->buf, RN_BUF_SIZE, 1); |
| 118 | DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); | ||
| 119 | #endif | ||
| 120 | } | 118 | } |
| 121 | 119 | ||
| 122 | static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) | 120 | static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) |
| @@ -209,10 +207,10 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx) | |||
| 209 | dev_err(jrdev, "unable to map shared descriptor\n"); | 207 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 210 | return -ENOMEM; | 208 | return -ENOMEM; |
| 211 | } | 209 | } |
| 212 | #ifdef DEBUG | 210 | |
| 213 | print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | 211 | print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
| 214 | desc, desc_bytes(desc), 1); | 212 | desc, desc_bytes(desc), 1); |
| 215 | #endif | 213 | |
| 216 | return 0; | 214 | return 0; |
| 217 | } | 215 | } |
| 218 | 216 | ||
| @@ -233,10 +231,10 @@ static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) | |||
| 233 | } | 231 | } |
| 234 | 232 | ||
| 235 | append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); | 233 | append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); |
| 236 | #ifdef DEBUG | 234 | |
| 237 | print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | 235 | print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
| 238 | desc, desc_bytes(desc), 1); | 236 | desc, desc_bytes(desc), 1); |
| 239 | #endif | 237 | |
| 240 | return 0; | 238 | return 0; |
| 241 | } | 239 | } |
| 242 | 240 | ||
| @@ -296,47 +294,20 @@ static struct hwrng caam_rng = { | |||
| 296 | .read = caam_read, | 294 | .read = caam_read, |
| 297 | }; | 295 | }; |
| 298 | 296 | ||
| 299 | static void __exit caam_rng_exit(void) | 297 | void caam_rng_exit(void) |
| 300 | { | 298 | { |
| 301 | caam_jr_free(rng_ctx->jrdev); | 299 | caam_jr_free(rng_ctx->jrdev); |
| 302 | hwrng_unregister(&caam_rng); | 300 | hwrng_unregister(&caam_rng); |
| 303 | kfree(rng_ctx); | 301 | kfree(rng_ctx); |
| 304 | } | 302 | } |
| 305 | 303 | ||
| 306 | static int __init caam_rng_init(void) | 304 | int caam_rng_init(struct device *ctrldev) |
| 307 | { | 305 | { |
| 308 | struct device *dev; | 306 | struct device *dev; |
| 309 | struct device_node *dev_node; | ||
| 310 | struct platform_device *pdev; | ||
| 311 | struct caam_drv_private *priv; | ||
| 312 | u32 rng_inst; | 307 | u32 rng_inst; |
| 308 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); | ||
| 313 | int err; | 309 | int err; |
| 314 | 310 | ||
| 315 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
| 316 | if (!dev_node) { | ||
| 317 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
| 318 | if (!dev_node) | ||
| 319 | return -ENODEV; | ||
| 320 | } | ||
| 321 | |||
| 322 | pdev = of_find_device_by_node(dev_node); | ||
| 323 | if (!pdev) { | ||
| 324 | of_node_put(dev_node); | ||
| 325 | return -ENODEV; | ||
| 326 | } | ||
| 327 | |||
| 328 | priv = dev_get_drvdata(&pdev->dev); | ||
| 329 | of_node_put(dev_node); | ||
| 330 | |||
| 331 | /* | ||
| 332 | * If priv is NULL, it's probably because the caam driver wasn't | ||
| 333 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
| 334 | */ | ||
| 335 | if (!priv) { | ||
| 336 | err = -ENODEV; | ||
| 337 | goto out_put_dev; | ||
| 338 | } | ||
| 339 | |||
| 340 | /* Check for an instantiated RNG before registration */ | 311 | /* Check for an instantiated RNG before registration */ |
| 341 | if (priv->era < 10) | 312 | if (priv->era < 10) |
| 342 | rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & | 313 | rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & |
| @@ -344,16 +315,13 @@ static int __init caam_rng_init(void) | |||
| 344 | else | 315 | else |
| 345 | rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; | 316 | rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; |
| 346 | 317 | ||
| 347 | if (!rng_inst) { | 318 | if (!rng_inst) |
| 348 | err = -ENODEV; | 319 | return 0; |
| 349 | goto out_put_dev; | ||
| 350 | } | ||
| 351 | 320 | ||
| 352 | dev = caam_jr_alloc(); | 321 | dev = caam_jr_alloc(); |
| 353 | if (IS_ERR(dev)) { | 322 | if (IS_ERR(dev)) { |
| 354 | pr_err("Job Ring Device allocation for transform failed\n"); | 323 | pr_err("Job Ring Device allocation for transform failed\n"); |
| 355 | err = PTR_ERR(dev); | 324 | return PTR_ERR(dev); |
| 356 | goto out_put_dev; | ||
| 357 | } | 325 | } |
| 358 | rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); | 326 | rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); |
| 359 | if (!rng_ctx) { | 327 | if (!rng_ctx) { |
| @@ -364,7 +332,6 @@ static int __init caam_rng_init(void) | |||
| 364 | if (err) | 332 | if (err) |
| 365 | goto free_rng_ctx; | 333 | goto free_rng_ctx; |
| 366 | 334 | ||
| 367 | put_device(&pdev->dev); | ||
| 368 | dev_info(dev, "registering rng-caam\n"); | 335 | dev_info(dev, "registering rng-caam\n"); |
| 369 | return hwrng_register(&caam_rng); | 336 | return hwrng_register(&caam_rng); |
| 370 | 337 | ||
| @@ -372,14 +339,5 @@ free_rng_ctx: | |||
| 372 | kfree(rng_ctx); | 339 | kfree(rng_ctx); |
| 373 | free_caam_alloc: | 340 | free_caam_alloc: |
| 374 | caam_jr_free(dev); | 341 | caam_jr_free(dev); |
| 375 | out_put_dev: | ||
| 376 | put_device(&pdev->dev); | ||
| 377 | return err; | 342 | return err; |
| 378 | } | 343 | } |
| 379 | |||
| 380 | module_init(caam_rng_init); | ||
| 381 | module_exit(caam_rng_exit); | ||
| 382 | |||
| 383 | MODULE_LICENSE("GPL"); | ||
| 384 | MODULE_DESCRIPTION("FSL CAAM support for hw_random API"); | ||
| 385 | MODULE_AUTHOR("Freescale Semiconductor - NMG"); | ||
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index fec39c35c877..4e43ca4d3656 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * Controller-level driver, kernel property detection, initialization | 3 | * Controller-level driver, kernel property detection, initialization |
| 4 | * | 4 | * |
| 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. | 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
| 6 | * Copyright 2018 NXP | 6 | * Copyright 2018-2019 NXP |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
| @@ -323,8 +323,8 @@ static int caam_remove(struct platform_device *pdev) | |||
| 323 | of_platform_depopulate(ctrldev); | 323 | of_platform_depopulate(ctrldev); |
| 324 | 324 | ||
| 325 | #ifdef CONFIG_CAAM_QI | 325 | #ifdef CONFIG_CAAM_QI |
| 326 | if (ctrlpriv->qidev) | 326 | if (ctrlpriv->qi_init) |
| 327 | caam_qi_shutdown(ctrlpriv->qidev); | 327 | caam_qi_shutdown(ctrldev); |
| 328 | #endif | 328 | #endif |
| 329 | 329 | ||
| 330 | /* | 330 | /* |
| @@ -540,7 +540,8 @@ static int caam_probe(struct platform_device *pdev) | |||
| 540 | ctrlpriv->caam_ipg = clk; | 540 | ctrlpriv->caam_ipg = clk; |
| 541 | 541 | ||
| 542 | if (!of_machine_is_compatible("fsl,imx7d") && | 542 | if (!of_machine_is_compatible("fsl,imx7d") && |
| 543 | !of_machine_is_compatible("fsl,imx7s")) { | 543 | !of_machine_is_compatible("fsl,imx7s") && |
| 544 | !of_machine_is_compatible("fsl,imx7ulp")) { | ||
| 544 | clk = caam_drv_identify_clk(&pdev->dev, "mem"); | 545 | clk = caam_drv_identify_clk(&pdev->dev, "mem"); |
| 545 | if (IS_ERR(clk)) { | 546 | if (IS_ERR(clk)) { |
| 546 | ret = PTR_ERR(clk); | 547 | ret = PTR_ERR(clk); |
| @@ -562,7 +563,8 @@ static int caam_probe(struct platform_device *pdev) | |||
| 562 | 563 | ||
| 563 | if (!of_machine_is_compatible("fsl,imx6ul") && | 564 | if (!of_machine_is_compatible("fsl,imx6ul") && |
| 564 | !of_machine_is_compatible("fsl,imx7d") && | 565 | !of_machine_is_compatible("fsl,imx7d") && |
| 565 | !of_machine_is_compatible("fsl,imx7s")) { | 566 | !of_machine_is_compatible("fsl,imx7s") && |
| 567 | !of_machine_is_compatible("fsl,imx7ulp")) { | ||
| 566 | clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); | 568 | clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); |
| 567 | if (IS_ERR(clk)) { | 569 | if (IS_ERR(clk)) { |
| 568 | ret = PTR_ERR(clk); | 570 | ret = PTR_ERR(clk); |
| @@ -702,12 +704,7 @@ static int caam_probe(struct platform_device *pdev) | |||
| 702 | } | 704 | } |
| 703 | 705 | ||
| 704 | ctrlpriv->era = caam_get_era(ctrl); | 706 | ctrlpriv->era = caam_get_era(ctrl); |
| 705 | 707 | ctrlpriv->domain = iommu_get_domain_for_dev(dev); | |
| 706 | ret = of_platform_populate(nprop, caam_match, NULL, dev); | ||
| 707 | if (ret) { | ||
| 708 | dev_err(dev, "JR platform devices creation error\n"); | ||
| 709 | goto iounmap_ctrl; | ||
| 710 | } | ||
| 711 | 708 | ||
| 712 | #ifdef CONFIG_DEBUG_FS | 709 | #ifdef CONFIG_DEBUG_FS |
| 713 | /* | 710 | /* |
| @@ -721,19 +718,6 @@ static int caam_probe(struct platform_device *pdev) | |||
| 721 | ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); | 718 | ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); |
| 722 | #endif | 719 | #endif |
| 723 | 720 | ||
| 724 | ring = 0; | ||
| 725 | for_each_available_child_of_node(nprop, np) | ||
| 726 | if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || | ||
| 727 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { | ||
| 728 | ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) | ||
| 729 | ((__force uint8_t *)ctrl + | ||
| 730 | (ring + JR_BLOCK_NUMBER) * | ||
| 731 | BLOCK_OFFSET | ||
| 732 | ); | ||
| 733 | ctrlpriv->total_jobrs++; | ||
| 734 | ring++; | ||
| 735 | } | ||
| 736 | |||
| 737 | /* Check to see if (DPAA 1.x) QI present. If so, enable */ | 721 | /* Check to see if (DPAA 1.x) QI present. If so, enable */ |
| 738 | ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); | 722 | ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); |
| 739 | if (ctrlpriv->qi_present && !caam_dpaa2) { | 723 | if (ctrlpriv->qi_present && !caam_dpaa2) { |
| @@ -752,6 +736,25 @@ static int caam_probe(struct platform_device *pdev) | |||
| 752 | #endif | 736 | #endif |
| 753 | } | 737 | } |
| 754 | 738 | ||
| 739 | ret = of_platform_populate(nprop, caam_match, NULL, dev); | ||
| 740 | if (ret) { | ||
| 741 | dev_err(dev, "JR platform devices creation error\n"); | ||
| 742 | goto shutdown_qi; | ||
| 743 | } | ||
| 744 | |||
| 745 | ring = 0; | ||
| 746 | for_each_available_child_of_node(nprop, np) | ||
| 747 | if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || | ||
| 748 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { | ||
| 749 | ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) | ||
| 750 | ((__force uint8_t *)ctrl + | ||
| 751 | (ring + JR_BLOCK_NUMBER) * | ||
| 752 | BLOCK_OFFSET | ||
| 753 | ); | ||
| 754 | ctrlpriv->total_jobrs++; | ||
| 755 | ring++; | ||
| 756 | } | ||
| 757 | |||
| 755 | /* If no QI and no rings specified, quit and go home */ | 758 | /* If no QI and no rings specified, quit and go home */ |
| 756 | if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { | 759 | if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { |
| 757 | dev_err(dev, "no queues configured, terminating\n"); | 760 | dev_err(dev, "no queues configured, terminating\n"); |
| @@ -898,6 +901,11 @@ caam_remove: | |||
| 898 | caam_remove(pdev); | 901 | caam_remove(pdev); |
| 899 | return ret; | 902 | return ret; |
| 900 | 903 | ||
| 904 | shutdown_qi: | ||
| 905 | #ifdef CONFIG_CAAM_QI | ||
| 906 | if (ctrlpriv->qi_init) | ||
| 907 | caam_qi_shutdown(dev); | ||
| 908 | #endif | ||
| 901 | iounmap_ctrl: | 909 | iounmap_ctrl: |
| 902 | iounmap(ctrl); | 910 | iounmap(ctrl); |
| 903 | disable_caam_emi_slow: | 911 | disable_caam_emi_slow: |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 2980b8ef1fb1..5988a26a2441 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | * caam descriptor construction helper functions | 3 | * caam descriptor construction helper functions |
| 4 | * | 4 | * |
| 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. | 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
| 6 | * Copyright 2019 NXP | ||
| 6 | */ | 7 | */ |
| 7 | 8 | ||
| 8 | #ifndef DESC_CONSTR_H | 9 | #ifndef DESC_CONSTR_H |
| @@ -37,6 +38,16 @@ | |||
| 37 | 38 | ||
| 38 | extern bool caam_little_end; | 39 | extern bool caam_little_end; |
| 39 | 40 | ||
| 41 | /* | ||
| 42 | * HW fetches 4 S/G table entries at a time, irrespective of how many entries | ||
| 43 | * are in the table. It's SW's responsibility to make sure these accesses | ||
| 44 | * do not have side effects. | ||
| 45 | */ | ||
| 46 | static inline int pad_sg_nents(int sg_nents) | ||
| 47 | { | ||
| 48 | return ALIGN(sg_nents, 4); | ||
| 49 | } | ||
| 50 | |||
| 40 | static inline int desc_len(u32 * const desc) | 51 | static inline int desc_len(u32 * const desc) |
| 41 | { | 52 | { |
| 42 | return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; | 53 | return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; |
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 4da844e4b61d..4f0d45865aa2 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c | |||
| @@ -13,7 +13,7 @@ | |||
| 13 | #ifdef DEBUG | 13 | #ifdef DEBUG |
| 14 | #include <linux/highmem.h> | 14 | #include <linux/highmem.h> |
| 15 | 15 | ||
| 16 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | 16 | void caam_dump_sg(const char *prefix_str, int prefix_type, |
| 17 | int rowsize, int groupsize, struct scatterlist *sg, | 17 | int rowsize, int groupsize, struct scatterlist *sg, |
| 18 | size_t tlen, bool ascii) | 18 | size_t tlen, bool ascii) |
| 19 | { | 19 | { |
| @@ -35,15 +35,15 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | |||
| 35 | 35 | ||
| 36 | buf = it_page + it->offset; | 36 | buf = it_page + it->offset; |
| 37 | len = min_t(size_t, tlen, it->length); | 37 | len = min_t(size_t, tlen, it->length); |
| 38 | print_hex_dump(level, prefix_str, prefix_type, rowsize, | 38 | print_hex_dump_debug(prefix_str, prefix_type, rowsize, |
| 39 | groupsize, buf, len, ascii); | 39 | groupsize, buf, len, ascii); |
| 40 | tlen -= len; | 40 | tlen -= len; |
| 41 | 41 | ||
| 42 | kunmap_atomic(it_page); | 42 | kunmap_atomic(it_page); |
| 43 | } | 43 | } |
| 44 | } | 44 | } |
| 45 | #else | 45 | #else |
| 46 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | 46 | void caam_dump_sg(const char *prefix_str, int prefix_type, |
| 47 | int rowsize, int groupsize, struct scatterlist *sg, | 47 | int rowsize, int groupsize, struct scatterlist *sg, |
| 48 | size_t tlen, bool ascii) | 48 | size_t tlen, bool ascii) |
| 49 | {} | 49 | {} |
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index 8c6b83e02a70..d9726e66edbf 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h | |||
| @@ -17,7 +17,7 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2); | |||
| 17 | #define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) | 17 | #define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) |
| 18 | #define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) | 18 | #define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) |
| 19 | 19 | ||
| 20 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | 20 | void caam_dump_sg(const char *prefix_str, int prefix_type, |
| 21 | int rowsize, int groupsize, struct scatterlist *sg, | 21 | int rowsize, int groupsize, struct scatterlist *sg, |
| 22 | size_t tlen, bool ascii); | 22 | size_t tlen, bool ascii); |
| 23 | 23 | ||
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 3392615dc91b..6af84bbc612c 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | * Private/internal definitions between modules | 4 | * Private/internal definitions between modules |
| 5 | * | 5 | * |
| 6 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | 6 | * Copyright 2008-2011 Freescale Semiconductor, Inc. |
| 7 | * | 7 | * Copyright 2019 NXP |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #ifndef INTERN_H | 10 | #ifndef INTERN_H |
| @@ -63,10 +63,6 @@ struct caam_drv_private_jr { | |||
| 63 | * Driver-private storage for a single CAAM block instance | 63 | * Driver-private storage for a single CAAM block instance |
| 64 | */ | 64 | */ |
| 65 | struct caam_drv_private { | 65 | struct caam_drv_private { |
| 66 | #ifdef CONFIG_CAAM_QI | ||
| 67 | struct device *qidev; | ||
| 68 | #endif | ||
| 69 | |||
| 70 | /* Physical-presence section */ | 66 | /* Physical-presence section */ |
| 71 | struct caam_ctrl __iomem *ctrl; /* controller region */ | 67 | struct caam_ctrl __iomem *ctrl; /* controller region */ |
| 72 | struct caam_deco __iomem *deco; /* DECO/CCB views */ | 68 | struct caam_deco __iomem *deco; /* DECO/CCB views */ |
| @@ -74,12 +70,17 @@ struct caam_drv_private { | |||
| 74 | struct caam_queue_if __iomem *qi; /* QI control region */ | 70 | struct caam_queue_if __iomem *qi; /* QI control region */ |
| 75 | struct caam_job_ring __iomem *jr[4]; /* JobR's register space */ | 71 | struct caam_job_ring __iomem *jr[4]; /* JobR's register space */ |
| 76 | 72 | ||
| 73 | struct iommu_domain *domain; | ||
| 74 | |||
| 77 | /* | 75 | /* |
| 78 | * Detected geometry block. Filled in from device tree if powerpc, | 76 | * Detected geometry block. Filled in from device tree if powerpc, |
| 79 | * or from register-based version detection code | 77 | * or from register-based version detection code |
| 80 | */ | 78 | */ |
| 81 | u8 total_jobrs; /* Total Job Rings in device */ | 79 | u8 total_jobrs; /* Total Job Rings in device */ |
| 82 | u8 qi_present; /* Nonzero if QI present in device */ | 80 | u8 qi_present; /* Nonzero if QI present in device */ |
| 81 | #ifdef CONFIG_CAAM_QI | ||
| 82 | u8 qi_init; /* Nonzero if QI has been initialized */ | ||
| 83 | #endif | ||
| 83 | u8 mc_en; /* Nonzero if MC f/w is active */ | 84 | u8 mc_en; /* Nonzero if MC f/w is active */ |
| 84 | int secvio_irq; /* Security violation interrupt number */ | 85 | int secvio_irq; /* Security violation interrupt number */ |
| 85 | int virt_en; /* Virtualization enabled in CAAM */ | 86 | int virt_en; /* Virtualization enabled in CAAM */ |
| @@ -107,8 +108,95 @@ struct caam_drv_private { | |||
| 107 | #endif | 108 | #endif |
| 108 | }; | 109 | }; |
| 109 | 110 | ||
| 110 | void caam_jr_algapi_init(struct device *dev); | 111 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API |
| 111 | void caam_jr_algapi_remove(struct device *dev); | 112 | |
| 113 | int caam_algapi_init(struct device *dev); | ||
| 114 | void caam_algapi_exit(void); | ||
| 115 | |||
| 116 | #else | ||
| 117 | |||
| 118 | static inline int caam_algapi_init(struct device *dev) | ||
| 119 | { | ||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | static inline void caam_algapi_exit(void) | ||
| 124 | { | ||
| 125 | } | ||
| 126 | |||
| 127 | #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */ | ||
| 128 | |||
| 129 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API | ||
| 130 | |||
| 131 | int caam_algapi_hash_init(struct device *dev); | ||
| 132 | void caam_algapi_hash_exit(void); | ||
| 133 | |||
| 134 | #else | ||
| 135 | |||
| 136 | static inline int caam_algapi_hash_init(struct device *dev) | ||
| 137 | { | ||
| 138 | return 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | static inline void caam_algapi_hash_exit(void) | ||
| 142 | { | ||
| 143 | } | ||
| 144 | |||
| 145 | #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */ | ||
| 146 | |||
| 147 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API | ||
| 148 | |||
| 149 | int caam_pkc_init(struct device *dev); | ||
| 150 | void caam_pkc_exit(void); | ||
| 151 | |||
| 152 | #else | ||
| 153 | |||
| 154 | static inline int caam_pkc_init(struct device *dev) | ||
| 155 | { | ||
| 156 | return 0; | ||
| 157 | } | ||
| 158 | |||
| 159 | static inline void caam_pkc_exit(void) | ||
| 160 | { | ||
| 161 | } | ||
| 162 | |||
| 163 | #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */ | ||
| 164 | |||
| 165 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API | ||
| 166 | |||
| 167 | int caam_rng_init(struct device *dev); | ||
| 168 | void caam_rng_exit(void); | ||
| 169 | |||
| 170 | #else | ||
| 171 | |||
| 172 | static inline int caam_rng_init(struct device *dev) | ||
| 173 | { | ||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | static inline void caam_rng_exit(void) | ||
| 178 | { | ||
| 179 | } | ||
| 180 | |||
| 181 | #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */ | ||
| 182 | |||
| 183 | #ifdef CONFIG_CAAM_QI | ||
| 184 | |||
| 185 | int caam_qi_algapi_init(struct device *dev); | ||
| 186 | void caam_qi_algapi_exit(void); | ||
| 187 | |||
| 188 | #else | ||
| 189 | |||
| 190 | static inline int caam_qi_algapi_init(struct device *dev) | ||
| 191 | { | ||
| 192 | return 0; | ||
| 193 | } | ||
| 194 | |||
| 195 | static inline void caam_qi_algapi_exit(void) | ||
| 196 | { | ||
| 197 | } | ||
| 198 | |||
| 199 | #endif /* CONFIG_CAAM_QI */ | ||
| 112 | 200 | ||
| 113 | #ifdef CONFIG_DEBUG_FS | 201 | #ifdef CONFIG_DEBUG_FS |
| 114 | static int caam_debugfs_u64_get(void *data, u64 *val) | 202 | static int caam_debugfs_u64_get(void *data, u64 *val) |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 1de2562d0982..cea811fed320 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * JobR backend functionality | 4 | * JobR backend functionality |
| 5 | * | 5 | * |
| 6 | * Copyright 2008-2012 Freescale Semiconductor, Inc. | 6 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
| 7 | * Copyright 2019 NXP | ||
| 7 | */ | 8 | */ |
| 8 | 9 | ||
| 9 | #include <linux/of_irq.h> | 10 | #include <linux/of_irq.h> |
| @@ -23,6 +24,43 @@ struct jr_driver_data { | |||
| 23 | } ____cacheline_aligned; | 24 | } ____cacheline_aligned; |
| 24 | 25 | ||
| 25 | static struct jr_driver_data driver_data; | 26 | static struct jr_driver_data driver_data; |
| 27 | static DEFINE_MUTEX(algs_lock); | ||
| 28 | static unsigned int active_devs; | ||
| 29 | |||
| 30 | static void register_algs(struct device *dev) | ||
| 31 | { | ||
| 32 | mutex_lock(&algs_lock); | ||
| 33 | |||
| 34 | if (++active_devs != 1) | ||
| 35 | goto algs_unlock; | ||
| 36 | |||
| 37 | caam_algapi_init(dev); | ||
| 38 | caam_algapi_hash_init(dev); | ||
| 39 | caam_pkc_init(dev); | ||
| 40 | caam_rng_init(dev); | ||
| 41 | caam_qi_algapi_init(dev); | ||
| 42 | |||
| 43 | algs_unlock: | ||
| 44 | mutex_unlock(&algs_lock); | ||
| 45 | } | ||
| 46 | |||
| 47 | static void unregister_algs(void) | ||
| 48 | { | ||
| 49 | mutex_lock(&algs_lock); | ||
| 50 | |||
| 51 | if (--active_devs != 0) | ||
| 52 | goto algs_unlock; | ||
| 53 | |||
| 54 | caam_qi_algapi_exit(); | ||
| 55 | |||
| 56 | caam_rng_exit(); | ||
| 57 | caam_pkc_exit(); | ||
| 58 | caam_algapi_hash_exit(); | ||
| 59 | caam_algapi_exit(); | ||
| 60 | |||
| 61 | algs_unlock: | ||
| 62 | mutex_unlock(&algs_lock); | ||
| 63 | } | ||
| 26 | 64 | ||
| 27 | static int caam_reset_hw_jr(struct device *dev) | 65 | static int caam_reset_hw_jr(struct device *dev) |
| 28 | { | 66 | { |
| @@ -109,6 +147,9 @@ static int caam_jr_remove(struct platform_device *pdev) | |||
| 109 | return -EBUSY; | 147 | return -EBUSY; |
| 110 | } | 148 | } |
| 111 | 149 | ||
| 150 | /* Unregister JR-based RNG & crypto algorithms */ | ||
| 151 | unregister_algs(); | ||
| 152 | |||
| 112 | /* Remove the node from Physical JobR list maintained by driver */ | 153 | /* Remove the node from Physical JobR list maintained by driver */ |
| 113 | spin_lock(&driver_data.jr_alloc_lock); | 154 | spin_lock(&driver_data.jr_alloc_lock); |
| 114 | list_del(&jrpriv->list_node); | 155 | list_del(&jrpriv->list_node); |
| @@ -541,6 +582,8 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
| 541 | 582 | ||
| 542 | atomic_set(&jrpriv->tfm_count, 0); | 583 | atomic_set(&jrpriv->tfm_count, 0); |
| 543 | 584 | ||
| 585 | register_algs(jrdev->parent); | ||
| 586 | |||
| 544 | return 0; | 587 | return 0; |
| 545 | } | 588 | } |
| 546 | 589 | ||
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 8d0713fae6ac..48dd3536060d 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c | |||
| @@ -16,9 +16,7 @@ void split_key_done(struct device *dev, u32 *desc, u32 err, | |||
| 16 | { | 16 | { |
| 17 | struct split_key_result *res = context; | 17 | struct split_key_result *res = context; |
| 18 | 18 | ||
| 19 | #ifdef DEBUG | 19 | dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 20 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 21 | #endif | ||
| 22 | 20 | ||
| 23 | if (err) | 21 | if (err) |
| 24 | caam_jr_strstatus(dev, err); | 22 | caam_jr_strstatus(dev, err); |
| @@ -55,12 +53,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, | |||
| 55 | adata->keylen_pad = split_key_pad_len(adata->algtype & | 53 | adata->keylen_pad = split_key_pad_len(adata->algtype & |
| 56 | OP_ALG_ALGSEL_MASK); | 54 | OP_ALG_ALGSEL_MASK); |
| 57 | 55 | ||
| 58 | #ifdef DEBUG | 56 | dev_dbg(jrdev, "split keylen %d split keylen padded %d\n", |
| 59 | dev_err(jrdev, "split keylen %d split keylen padded %d\n", | ||
| 60 | adata->keylen, adata->keylen_pad); | 57 | adata->keylen, adata->keylen_pad); |
| 61 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", | 58 | print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", |
| 62 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); | 59 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); |
| 63 | #endif | ||
| 64 | 60 | ||
| 65 | if (adata->keylen_pad > max_keylen) | 61 | if (adata->keylen_pad > max_keylen) |
| 66 | return -EINVAL; | 62 | return -EINVAL; |
| @@ -102,10 +98,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, | |||
| 102 | append_fifo_store(desc, dma_addr, adata->keylen, | 98 | append_fifo_store(desc, dma_addr, adata->keylen, |
| 103 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | 99 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); |
| 104 | 100 | ||
| 105 | #ifdef DEBUG | 101 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
| 106 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 102 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
| 107 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 103 | 1); |
| 108 | #endif | ||
| 109 | 104 | ||
| 110 | result.err = 0; | 105 | result.err = 0; |
| 111 | init_completion(&result.completion); | 106 | init_completion(&result.completion); |
| @@ -115,11 +110,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, | |||
| 115 | /* in progress */ | 110 | /* in progress */ |
| 116 | wait_for_completion(&result.completion); | 111 | wait_for_completion(&result.completion); |
| 117 | ret = result.err; | 112 | ret = result.err; |
| 118 | #ifdef DEBUG | 113 | |
| 119 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", | 114 | print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", |
| 120 | DUMP_PREFIX_ADDRESS, 16, 4, key_out, | 115 | DUMP_PREFIX_ADDRESS, 16, 4, key_out, |
| 121 | adata->keylen_pad, 1); | 116 | adata->keylen_pad, 1); |
| 122 | #endif | ||
| 123 | } | 117 | } |
| 124 | 118 | ||
| 125 | dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); | 119 | dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); |
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 9f08f84cca59..0fe618e3804a 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | * Queue Interface backend functionality | 4 | * Queue Interface backend functionality |
| 5 | * | 5 | * |
| 6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. | 6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. |
| 7 | * Copyright 2016-2017 NXP | 7 | * Copyright 2016-2017, 2019 NXP |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/cpumask.h> | 10 | #include <linux/cpumask.h> |
| @@ -18,6 +18,7 @@ | |||
| 18 | #include "desc_constr.h" | 18 | #include "desc_constr.h" |
| 19 | 19 | ||
| 20 | #define PREHDR_RSLS_SHIFT 31 | 20 | #define PREHDR_RSLS_SHIFT 31 |
| 21 | #define PREHDR_ABS BIT(25) | ||
| 21 | 22 | ||
| 22 | /* | 23 | /* |
| 23 | * Use a reasonable backlog of frames (per CPU) as congestion threshold, | 24 | * Use a reasonable backlog of frames (per CPU) as congestion threshold, |
| @@ -58,11 +59,9 @@ static DEFINE_PER_CPU(int, last_cpu); | |||
| 58 | /* | 59 | /* |
| 59 | * caam_qi_priv - CAAM QI backend private params | 60 | * caam_qi_priv - CAAM QI backend private params |
| 60 | * @cgr: QMan congestion group | 61 | * @cgr: QMan congestion group |
| 61 | * @qi_pdev: platform device for QI backend | ||
| 62 | */ | 62 | */ |
| 63 | struct caam_qi_priv { | 63 | struct caam_qi_priv { |
| 64 | struct qman_cgr cgr; | 64 | struct qman_cgr cgr; |
| 65 | struct platform_device *qi_pdev; | ||
| 66 | }; | 65 | }; |
| 67 | 66 | ||
| 68 | static struct caam_qi_priv qipriv ____cacheline_aligned; | 67 | static struct caam_qi_priv qipriv ____cacheline_aligned; |
| @@ -95,6 +94,16 @@ static u64 times_congested; | |||
| 95 | */ | 94 | */ |
| 96 | static struct kmem_cache *qi_cache; | 95 | static struct kmem_cache *qi_cache; |
| 97 | 96 | ||
| 97 | static void *caam_iova_to_virt(struct iommu_domain *domain, | ||
| 98 | dma_addr_t iova_addr) | ||
| 99 | { | ||
| 100 | phys_addr_t phys_addr; | ||
| 101 | |||
| 102 | phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; | ||
| 103 | |||
| 104 | return phys_to_virt(phys_addr); | ||
| 105 | } | ||
| 106 | |||
| 98 | int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) | 107 | int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) |
| 99 | { | 108 | { |
| 100 | struct qm_fd fd; | 109 | struct qm_fd fd; |
| @@ -135,6 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, | |||
| 135 | const struct qm_fd *fd; | 144 | const struct qm_fd *fd; |
| 136 | struct caam_drv_req *drv_req; | 145 | struct caam_drv_req *drv_req; |
| 137 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); | 146 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
| 147 | struct caam_drv_private *priv = dev_get_drvdata(qidev); | ||
| 138 | 148 | ||
| 139 | fd = &msg->ern.fd; | 149 | fd = &msg->ern.fd; |
| 140 | 150 | ||
| @@ -143,7 +153,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, | |||
| 143 | return; | 153 | return; |
| 144 | } | 154 | } |
| 145 | 155 | ||
| 146 | drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); | 156 | drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); |
| 147 | if (!drv_req) { | 157 | if (!drv_req) { |
| 148 | dev_err(qidev, | 158 | dev_err(qidev, |
| 149 | "Can't find original request for CAAM response\n"); | 159 | "Can't find original request for CAAM response\n"); |
| @@ -346,6 +356,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) | |||
| 346 | */ | 356 | */ |
| 347 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | | 357 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | |
| 348 | num_words); | 358 | num_words); |
| 359 | drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); | ||
| 349 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); | 360 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); |
| 350 | dma_sync_single_for_device(qidev, drv_ctx->context_a, | 361 | dma_sync_single_for_device(qidev, drv_ctx->context_a, |
| 351 | sizeof(drv_ctx->sh_desc) + | 362 | sizeof(drv_ctx->sh_desc) + |
| @@ -401,6 +412,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, | |||
| 401 | */ | 412 | */ |
| 402 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | | 413 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | |
| 403 | num_words); | 414 | num_words); |
| 415 | drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); | ||
| 404 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); | 416 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); |
| 405 | size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); | 417 | size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); |
| 406 | hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, | 418 | hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, |
| @@ -488,7 +500,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel); | |||
| 488 | void caam_qi_shutdown(struct device *qidev) | 500 | void caam_qi_shutdown(struct device *qidev) |
| 489 | { | 501 | { |
| 490 | int i; | 502 | int i; |
| 491 | struct caam_qi_priv *priv = dev_get_drvdata(qidev); | 503 | struct caam_qi_priv *priv = &qipriv; |
| 492 | const cpumask_t *cpus = qman_affine_cpus(); | 504 | const cpumask_t *cpus = qman_affine_cpus(); |
| 493 | 505 | ||
| 494 | for_each_cpu(i, cpus) { | 506 | for_each_cpu(i, cpus) { |
| @@ -506,8 +518,6 @@ void caam_qi_shutdown(struct device *qidev) | |||
| 506 | qman_release_cgrid(priv->cgr.cgrid); | 518 | qman_release_cgrid(priv->cgr.cgrid); |
| 507 | 519 | ||
| 508 | kmem_cache_destroy(qi_cache); | 520 | kmem_cache_destroy(qi_cache); |
| 509 | |||
| 510 | platform_device_unregister(priv->qi_pdev); | ||
| 511 | } | 521 | } |
| 512 | 522 | ||
| 513 | static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) | 523 | static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) |
| @@ -550,6 +560,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, | |||
| 550 | struct caam_drv_req *drv_req; | 560 | struct caam_drv_req *drv_req; |
| 551 | const struct qm_fd *fd; | 561 | const struct qm_fd *fd; |
| 552 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); | 562 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
| 563 | struct caam_drv_private *priv = dev_get_drvdata(qidev); | ||
| 553 | u32 status; | 564 | u32 status; |
| 554 | 565 | ||
| 555 | if (caam_qi_napi_schedule(p, caam_napi)) | 566 | if (caam_qi_napi_schedule(p, caam_napi)) |
| @@ -572,7 +583,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, | |||
| 572 | return qman_cb_dqrr_consume; | 583 | return qman_cb_dqrr_consume; |
| 573 | } | 584 | } |
| 574 | 585 | ||
| 575 | drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); | 586 | drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); |
| 576 | if (unlikely(!drv_req)) { | 587 | if (unlikely(!drv_req)) { |
| 577 | dev_err(qidev, | 588 | dev_err(qidev, |
| 578 | "Can't find original request for caam response\n"); | 589 | "Can't find original request for caam response\n"); |
| @@ -692,33 +703,17 @@ static void free_rsp_fqs(void) | |||
| 692 | int caam_qi_init(struct platform_device *caam_pdev) | 703 | int caam_qi_init(struct platform_device *caam_pdev) |
| 693 | { | 704 | { |
| 694 | int err, i; | 705 | int err, i; |
| 695 | struct platform_device *qi_pdev; | ||
| 696 | struct device *ctrldev = &caam_pdev->dev, *qidev; | 706 | struct device *ctrldev = &caam_pdev->dev, *qidev; |
| 697 | struct caam_drv_private *ctrlpriv; | 707 | struct caam_drv_private *ctrlpriv; |
| 698 | const cpumask_t *cpus = qman_affine_cpus(); | 708 | const cpumask_t *cpus = qman_affine_cpus(); |
| 699 | static struct platform_device_info qi_pdev_info = { | ||
| 700 | .name = "caam_qi", | ||
| 701 | .id = PLATFORM_DEVID_NONE | ||
| 702 | }; | ||
| 703 | |||
| 704 | qi_pdev_info.parent = ctrldev; | ||
| 705 | qi_pdev_info.dma_mask = dma_get_mask(ctrldev); | ||
| 706 | qi_pdev = platform_device_register_full(&qi_pdev_info); | ||
| 707 | if (IS_ERR(qi_pdev)) | ||
| 708 | return PTR_ERR(qi_pdev); | ||
| 709 | set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev)); | ||
| 710 | 709 | ||
| 711 | ctrlpriv = dev_get_drvdata(ctrldev); | 710 | ctrlpriv = dev_get_drvdata(ctrldev); |
| 712 | qidev = &qi_pdev->dev; | 711 | qidev = ctrldev; |
| 713 | |||
| 714 | qipriv.qi_pdev = qi_pdev; | ||
| 715 | dev_set_drvdata(qidev, &qipriv); | ||
| 716 | 712 | ||
| 717 | /* Initialize the congestion detection */ | 713 | /* Initialize the congestion detection */ |
| 718 | err = init_cgr(qidev); | 714 | err = init_cgr(qidev); |
| 719 | if (err) { | 715 | if (err) { |
| 720 | dev_err(qidev, "CGR initialization failed: %d\n", err); | 716 | dev_err(qidev, "CGR initialization failed: %d\n", err); |
| 721 | platform_device_unregister(qi_pdev); | ||
| 722 | return err; | 717 | return err; |
| 723 | } | 718 | } |
| 724 | 719 | ||
| @@ -727,7 +722,6 @@ int caam_qi_init(struct platform_device *caam_pdev) | |||
| 727 | if (err) { | 722 | if (err) { |
| 728 | dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); | 723 | dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); |
| 729 | free_rsp_fqs(); | 724 | free_rsp_fqs(); |
| 730 | platform_device_unregister(qi_pdev); | ||
| 731 | return err; | 725 | return err; |
| 732 | } | 726 | } |
| 733 | 727 | ||
| @@ -750,15 +744,11 @@ int caam_qi_init(struct platform_device *caam_pdev) | |||
| 750 | napi_enable(irqtask); | 744 | napi_enable(irqtask); |
| 751 | } | 745 | } |
| 752 | 746 | ||
| 753 | /* Hook up QI device to parent controlling caam device */ | ||
| 754 | ctrlpriv->qidev = qidev; | ||
| 755 | |||
| 756 | qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, | 747 | qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, |
| 757 | SLAB_CACHE_DMA, NULL); | 748 | SLAB_CACHE_DMA, NULL); |
| 758 | if (!qi_cache) { | 749 | if (!qi_cache) { |
| 759 | dev_err(qidev, "Can't allocate CAAM cache\n"); | 750 | dev_err(qidev, "Can't allocate CAAM cache\n"); |
| 760 | free_rsp_fqs(); | 751 | free_rsp_fqs(); |
| 761 | platform_device_unregister(qi_pdev); | ||
| 762 | return -ENOMEM; | 752 | return -ENOMEM; |
| 763 | } | 753 | } |
| 764 | 754 | ||
| @@ -766,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev) | |||
| 766 | debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, | 756 | debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, |
| 767 | ×_congested, &caam_fops_u64_ro); | 757 | ×_congested, &caam_fops_u64_ro); |
| 768 | #endif | 758 | #endif |
| 759 | |||
| 760 | ctrlpriv->qi_init = 1; | ||
| 769 | dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); | 761 | dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); |
| 770 | return 0; | 762 | return 0; |
| 771 | } | 763 | } |
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h index b3e1aaaeffea..d56cc7efbc13 100644 --- a/drivers/crypto/caam/sg_sw_qm.h +++ b/drivers/crypto/caam/sg_sw_qm.h | |||
| @@ -54,15 +54,19 @@ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr, | |||
| 54 | * but does not have final bit; instead, returns last entry | 54 | * but does not have final bit; instead, returns last entry |
| 55 | */ | 55 | */ |
| 56 | static inline struct qm_sg_entry * | 56 | static inline struct qm_sg_entry * |
| 57 | sg_to_qm_sg(struct scatterlist *sg, int sg_count, | 57 | sg_to_qm_sg(struct scatterlist *sg, int len, |
| 58 | struct qm_sg_entry *qm_sg_ptr, u16 offset) | 58 | struct qm_sg_entry *qm_sg_ptr, u16 offset) |
| 59 | { | 59 | { |
| 60 | while (sg_count && sg) { | 60 | int ent_len; |
| 61 | dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), | 61 | |
| 62 | sg_dma_len(sg), offset); | 62 | while (len) { |
| 63 | ent_len = min_t(int, sg_dma_len(sg), len); | ||
| 64 | |||
| 65 | dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len, | ||
| 66 | offset); | ||
| 63 | qm_sg_ptr++; | 67 | qm_sg_ptr++; |
| 64 | sg = sg_next(sg); | 68 | sg = sg_next(sg); |
| 65 | sg_count--; | 69 | len -= ent_len; |
| 66 | } | 70 | } |
| 67 | return qm_sg_ptr - 1; | 71 | return qm_sg_ptr - 1; |
| 68 | } | 72 | } |
| @@ -71,10 +75,10 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count, | |||
| 71 | * convert scatterlist to h/w link table format | 75 | * convert scatterlist to h/w link table format |
| 72 | * scatterlist must have been previously dma mapped | 76 | * scatterlist must have been previously dma mapped |
| 73 | */ | 77 | */ |
| 74 | static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, | 78 | static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len, |
| 75 | struct qm_sg_entry *qm_sg_ptr, u16 offset) | 79 | struct qm_sg_entry *qm_sg_ptr, u16 offset) |
| 76 | { | 80 | { |
| 77 | qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); | 81 | qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset); |
| 78 | qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr)); | 82 | qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr)); |
| 79 | } | 83 | } |
| 80 | 84 | ||
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h index c9378402a5f8..b8b737d2b0ea 100644 --- a/drivers/crypto/caam/sg_sw_qm2.h +++ b/drivers/crypto/caam/sg_sw_qm2.h | |||
| @@ -25,15 +25,19 @@ static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr, | |||
| 25 | * but does not have final bit; instead, returns last entry | 25 | * but does not have final bit; instead, returns last entry |
| 26 | */ | 26 | */ |
| 27 | static inline struct dpaa2_sg_entry * | 27 | static inline struct dpaa2_sg_entry * |
| 28 | sg_to_qm_sg(struct scatterlist *sg, int sg_count, | 28 | sg_to_qm_sg(struct scatterlist *sg, int len, |
| 29 | struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) | 29 | struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) |
| 30 | { | 30 | { |
| 31 | while (sg_count && sg) { | 31 | int ent_len; |
| 32 | dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), | 32 | |
| 33 | sg_dma_len(sg), offset); | 33 | while (len) { |
| 34 | ent_len = min_t(int, sg_dma_len(sg), len); | ||
| 35 | |||
| 36 | dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len, | ||
| 37 | offset); | ||
| 34 | qm_sg_ptr++; | 38 | qm_sg_ptr++; |
| 35 | sg = sg_next(sg); | 39 | sg = sg_next(sg); |
| 36 | sg_count--; | 40 | len -= ent_len; |
| 37 | } | 41 | } |
| 38 | return qm_sg_ptr - 1; | 42 | return qm_sg_ptr - 1; |
| 39 | } | 43 | } |
| @@ -42,11 +46,11 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count, | |||
| 42 | * convert scatterlist to h/w link table format | 46 | * convert scatterlist to h/w link table format |
| 43 | * scatterlist must have been previously dma mapped | 47 | * scatterlist must have been previously dma mapped |
| 44 | */ | 48 | */ |
| 45 | static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, | 49 | static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len, |
| 46 | struct dpaa2_sg_entry *qm_sg_ptr, | 50 | struct dpaa2_sg_entry *qm_sg_ptr, |
| 47 | u16 offset) | 51 | u16 offset) |
| 48 | { | 52 | { |
| 49 | qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); | 53 | qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset); |
| 50 | dpaa2_sg_set_final(qm_sg_ptr, true); | 54 | dpaa2_sg_set_final(qm_sg_ptr, true); |
| 51 | } | 55 | } |
| 52 | 56 | ||
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index dbfa9fce33e0..07e1ee99273b 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
| @@ -35,11 +35,9 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, | |||
| 35 | sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & | 35 | sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & |
| 36 | SEC4_SG_OFFSET_MASK); | 36 | SEC4_SG_OFFSET_MASK); |
| 37 | } | 37 | } |
| 38 | #ifdef DEBUG | 38 | |
| 39 | print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", | 39 | print_hex_dump_debug("sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
| 40 | DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, | 40 | sec4_sg_ptr, sizeof(struct sec4_sg_entry), 1); |
| 41 | sizeof(struct sec4_sg_entry), 1); | ||
| 42 | #endif | ||
| 43 | } | 41 | } |
| 44 | 42 | ||
| 45 | /* | 43 | /* |
| @@ -47,15 +45,19 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, | |||
| 47 | * but does not have final bit; instead, returns last entry | 45 | * but does not have final bit; instead, returns last entry |
| 48 | */ | 46 | */ |
| 49 | static inline struct sec4_sg_entry * | 47 | static inline struct sec4_sg_entry * |
| 50 | sg_to_sec4_sg(struct scatterlist *sg, int sg_count, | 48 | sg_to_sec4_sg(struct scatterlist *sg, int len, |
| 51 | struct sec4_sg_entry *sec4_sg_ptr, u16 offset) | 49 | struct sec4_sg_entry *sec4_sg_ptr, u16 offset) |
| 52 | { | 50 | { |
| 53 | while (sg_count) { | 51 | int ent_len; |
| 54 | dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), | 52 | |
| 55 | sg_dma_len(sg), offset); | 53 | while (len) { |
| 54 | ent_len = min_t(int, sg_dma_len(sg), len); | ||
| 55 | |||
| 56 | dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len, | ||
| 57 | offset); | ||
| 56 | sec4_sg_ptr++; | 58 | sec4_sg_ptr++; |
| 57 | sg = sg_next(sg); | 59 | sg = sg_next(sg); |
| 58 | sg_count--; | 60 | len -= ent_len; |
| 59 | } | 61 | } |
| 60 | return sec4_sg_ptr - 1; | 62 | return sec4_sg_ptr - 1; |
| 61 | } | 63 | } |
| @@ -72,11 +74,11 @@ static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr) | |||
| 72 | * convert scatterlist to h/w link table format | 74 | * convert scatterlist to h/w link table format |
| 73 | * scatterlist must have been previously dma mapped | 75 | * scatterlist must have been previously dma mapped |
| 74 | */ | 76 | */ |
| 75 | static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, | 77 | static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len, |
| 76 | struct sec4_sg_entry *sec4_sg_ptr, | 78 | struct sec4_sg_entry *sec4_sg_ptr, |
| 77 | u16 offset) | 79 | u16 offset) |
| 78 | { | 80 | { |
| 79 | sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); | 81 | sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset); |
| 80 | sg_to_sec4_set_last(sec4_sg_ptr); | 82 | sg_to_sec4_set_last(sec4_sg_ptr); |
| 81 | } | 83 | } |
| 82 | 84 | ||
