diff options
151 files changed, 15711 insertions, 4462 deletions
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt index beda682e8d77..45d943fcae5b 100644 --- a/Documentation/crypto/api-intro.txt +++ b/Documentation/crypto/api-intro.txt | |||
| @@ -44,12 +44,9 @@ one block while the former can operate on an arbitrary amount of data, | |||
| 44 | subject to block size requirements (i.e., non-stream ciphers can only | 44 | subject to block size requirements (i.e., non-stream ciphers can only |
| 45 | process multiples of blocks). | 45 | process multiples of blocks). |
| 46 | 46 | ||
| 47 | Support for hardware crypto devices via an asynchronous interface is | ||
| 48 | under development. | ||
| 49 | |||
| 50 | Here's an example of how to use the API: | 47 | Here's an example of how to use the API: |
| 51 | 48 | ||
| 52 | #include <crypto/ahash.h> | 49 | #include <crypto/hash.h> |
| 53 | #include <linux/err.h> | 50 | #include <linux/err.h> |
| 54 | #include <linux/scatterlist.h> | 51 | #include <linux/scatterlist.h> |
| 55 | 52 | ||
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt index adeca34c5a33..10a425f451fc 100644 --- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt +++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt | |||
| @@ -123,6 +123,9 @@ PROPERTIES | |||
| 123 | 123 | ||
| 124 | 124 | ||
| 125 | EXAMPLE | 125 | EXAMPLE |
| 126 | |||
| 127 | iMX6QDL/SX requires four clocks | ||
| 128 | |||
| 126 | crypto@300000 { | 129 | crypto@300000 { |
| 127 | compatible = "fsl,sec-v4.0"; | 130 | compatible = "fsl,sec-v4.0"; |
| 128 | fsl,sec-era = <2>; | 131 | fsl,sec-era = <2>; |
| @@ -139,6 +142,23 @@ EXAMPLE | |||
| 139 | clock-names = "mem", "aclk", "ipg", "emi_slow"; | 142 | clock-names = "mem", "aclk", "ipg", "emi_slow"; |
| 140 | }; | 143 | }; |
| 141 | 144 | ||
| 145 | |||
| 146 | iMX6UL does only require three clocks | ||
| 147 | |||
| 148 | crypto: caam@2140000 { | ||
| 149 | compatible = "fsl,sec-v4.0"; | ||
| 150 | #address-cells = <1>; | ||
| 151 | #size-cells = <1>; | ||
| 152 | reg = <0x2140000 0x3c000>; | ||
| 153 | ranges = <0 0x2140000 0x3c000>; | ||
| 154 | interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>; | ||
| 155 | |||
| 156 | clocks = <&clks IMX6UL_CLK_CAAM_MEM>, | ||
| 157 | <&clks IMX6UL_CLK_CAAM_ACLK>, | ||
| 158 | <&clks IMX6UL_CLK_CAAM_IPG>; | ||
| 159 | clock-names = "mem", "aclk", "ipg"; | ||
| 160 | }; | ||
| 161 | |||
| 142 | ===================================================================== | 162 | ===================================================================== |
| 143 | Job Ring (JR) Node | 163 | Job Ring (JR) Node |
| 144 | 164 | ||
diff --git a/Documentation/devicetree/bindings/rng/omap_rng.txt b/Documentation/devicetree/bindings/rng/omap_rng.txt index 6a62acd86953..471477299ece 100644 --- a/Documentation/devicetree/bindings/rng/omap_rng.txt +++ b/Documentation/devicetree/bindings/rng/omap_rng.txt | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | OMAP SoC HWRNG Module | 1 | OMAP SoC and Inside-Secure HWRNG Module |
| 2 | 2 | ||
| 3 | Required properties: | 3 | Required properties: |
| 4 | 4 | ||
| @@ -6,11 +6,13 @@ Required properties: | |||
| 6 | RNG versions: | 6 | RNG versions: |
| 7 | - "ti,omap2-rng" for OMAP2. | 7 | - "ti,omap2-rng" for OMAP2. |
| 8 | - "ti,omap4-rng" for OMAP4, OMAP5 and AM33XX. | 8 | - "ti,omap4-rng" for OMAP4, OMAP5 and AM33XX. |
| 9 | - "inside-secure,safexcel-eip76" for SoCs with EIP76 IP block | ||
| 9 | Note that these two versions are incompatible. | 10 | Note that these two versions are incompatible. |
| 10 | - ti,hwmods: Name of the hwmod associated with the RNG module | 11 | - ti,hwmods: Name of the hwmod associated with the RNG module |
| 11 | - reg : Offset and length of the register set for the module | 12 | - reg : Offset and length of the register set for the module |
| 12 | - interrupts : the interrupt number for the RNG module. | 13 | - interrupts : the interrupt number for the RNG module. |
| 13 | Only used for "ti,omap4-rng". | 14 | Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76" |
| 15 | - clocks: the trng clock source | ||
| 14 | 16 | ||
| 15 | Example: | 17 | Example: |
| 16 | /* AM335x */ | 18 | /* AM335x */ |
| @@ -20,3 +22,11 @@ rng: rng@48310000 { | |||
| 20 | reg = <0x48310000 0x2000>; | 22 | reg = <0x48310000 0x2000>; |
| 21 | interrupts = <111>; | 23 | interrupts = <111>; |
| 22 | }; | 24 | }; |
| 25 | |||
| 26 | /* SafeXcel IP-76 */ | ||
| 27 | trng: rng@f2760000 { | ||
| 28 | compatible = "inside-secure,safexcel-eip76"; | ||
| 29 | reg = <0xf2760000 0x7d>; | ||
| 30 | interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>; | ||
| 31 | clocks = <&cpm_syscon0 1 25>; | ||
| 32 | }; | ||
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 98371753a08f..078834a5fd85 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
| @@ -137,6 +137,7 @@ infineon Infineon Technologies | |||
| 137 | inforce Inforce Computing | 137 | inforce Inforce Computing |
| 138 | ingenic Ingenic Semiconductor | 138 | ingenic Ingenic Semiconductor |
| 139 | innolux Innolux Corporation | 139 | innolux Innolux Corporation |
| 140 | inside-secure INSIDE Secure | ||
| 140 | intel Intel Corporation | 141 | intel Intel Corporation |
| 141 | intercontrol Inter Control Group | 142 | intercontrol Inter Control Group |
| 142 | invensense InvenSense Inc. | 143 | invensense InvenSense Inc. |
diff --git a/MAINTAINERS b/MAINTAINERS index 60a01bdaf224..59c9895d73d5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -3470,6 +3470,7 @@ F: arch/*/crypto/ | |||
| 3470 | F: crypto/ | 3470 | F: crypto/ |
| 3471 | F: drivers/crypto/ | 3471 | F: drivers/crypto/ |
| 3472 | F: include/crypto/ | 3472 | F: include/crypto/ |
| 3473 | F: include/linux/crypto* | ||
| 3473 | 3474 | ||
| 3474 | CRYPTOGRAPHIC RANDOM NUMBER GENERATOR | 3475 | CRYPTOGRAPHIC RANDOM NUMBER GENERATOR |
| 3475 | M: Neil Horman <nhorman@tuxdriver.com> | 3476 | M: Neil Horman <nhorman@tuxdriver.com> |
| @@ -5086,6 +5087,14 @@ F: include/linux/fb.h | |||
| 5086 | F: include/uapi/video/ | 5087 | F: include/uapi/video/ |
| 5087 | F: include/uapi/linux/fb.h | 5088 | F: include/uapi/linux/fb.h |
| 5088 | 5089 | ||
| 5090 | FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER | ||
| 5091 | M: Horia Geantă <horia.geanta@nxp.com> | ||
| 5092 | M: Dan Douglass <dan.douglass@nxp.com> | ||
| 5093 | L: linux-crypto@vger.kernel.org | ||
| 5094 | S: Maintained | ||
| 5095 | F: drivers/crypto/caam/ | ||
| 5096 | F: Documentation/devicetree/bindings/crypto/fsl-sec4.txt | ||
| 5097 | |||
| 5089 | FREESCALE DIU FRAMEBUFFER DRIVER | 5098 | FREESCALE DIU FRAMEBUFFER DRIVER |
| 5090 | M: Timur Tabi <timur@tabi.org> | 5099 | M: Timur Tabi <timur@tabi.org> |
| 5091 | L: linux-fbdev@vger.kernel.org | 5100 | L: linux-fbdev@vger.kernel.org |
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 27ed1b1cd1d7..13f1b4c289d4 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig | |||
| @@ -88,9 +88,9 @@ config CRYPTO_AES_ARM | |||
| 88 | config CRYPTO_AES_ARM_BS | 88 | config CRYPTO_AES_ARM_BS |
| 89 | tristate "Bit sliced AES using NEON instructions" | 89 | tristate "Bit sliced AES using NEON instructions" |
| 90 | depends on KERNEL_MODE_NEON | 90 | depends on KERNEL_MODE_NEON |
| 91 | select CRYPTO_ALGAPI | ||
| 92 | select CRYPTO_AES_ARM | 91 | select CRYPTO_AES_ARM |
| 93 | select CRYPTO_ABLK_HELPER | 92 | select CRYPTO_BLKCIPHER |
| 93 | select CRYPTO_SIMD | ||
| 94 | help | 94 | help |
| 95 | Use a faster and more secure NEON based implementation of AES in CBC, | 95 | Use a faster and more secure NEON based implementation of AES in CBC, |
| 96 | CTR and XTS modes | 96 | CTR and XTS modes |
| @@ -104,8 +104,8 @@ config CRYPTO_AES_ARM_BS | |||
| 104 | config CRYPTO_AES_ARM_CE | 104 | config CRYPTO_AES_ARM_CE |
| 105 | tristate "Accelerated AES using ARMv8 Crypto Extensions" | 105 | tristate "Accelerated AES using ARMv8 Crypto Extensions" |
| 106 | depends on KERNEL_MODE_NEON | 106 | depends on KERNEL_MODE_NEON |
| 107 | select CRYPTO_ALGAPI | 107 | select CRYPTO_BLKCIPHER |
| 108 | select CRYPTO_ABLK_HELPER | 108 | select CRYPTO_SIMD |
| 109 | help | 109 | help |
| 110 | Use an implementation of AES in CBC, CTR and XTS modes that uses | 110 | Use an implementation of AES in CBC, CTR and XTS modes that uses |
| 111 | ARMv8 Crypto Extensions | 111 | ARMv8 Crypto Extensions |
| @@ -120,4 +120,14 @@ config CRYPTO_GHASH_ARM_CE | |||
| 120 | that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64) | 120 | that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64) |
| 121 | that is part of the ARMv8 Crypto Extensions | 121 | that is part of the ARMv8 Crypto Extensions |
| 122 | 122 | ||
| 123 | config CRYPTO_CRCT10DIF_ARM_CE | ||
| 124 | tristate "CRCT10DIF digest algorithm using PMULL instructions" | ||
| 125 | depends on KERNEL_MODE_NEON && CRC_T10DIF | ||
| 126 | select CRYPTO_HASH | ||
| 127 | |||
| 128 | config CRYPTO_CRC32_ARM_CE | ||
| 129 | tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions" | ||
| 130 | depends on KERNEL_MODE_NEON && CRC32 | ||
| 131 | select CRYPTO_HASH | ||
| 132 | |||
| 123 | endif | 133 | endif |
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index fc5150702b64..b578a1820ab1 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile | |||
| @@ -13,6 +13,8 @@ ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o | |||
| 13 | ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o | 13 | ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o |
| 14 | ce-obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o | 14 | ce-obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o |
| 15 | ce-obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o | 15 | ce-obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o |
| 16 | ce-obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM_CE) += crct10dif-arm-ce.o | ||
| 17 | ce-obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o | ||
| 16 | 18 | ||
| 17 | ifneq ($(ce-obj-y)$(ce-obj-m),) | 19 | ifneq ($(ce-obj-y)$(ce-obj-m),) |
| 18 | ifeq ($(call as-instr,.fpu crypto-neon-fp-armv8,y,n),y) | 20 | ifeq ($(call as-instr,.fpu crypto-neon-fp-armv8,y,n),y) |
| @@ -36,6 +38,8 @@ sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o | |||
| 36 | sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o | 38 | sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o |
| 37 | aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o | 39 | aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o |
| 38 | ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o | 40 | ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o |
| 41 | crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o | ||
| 42 | crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o | ||
| 39 | 43 | ||
| 40 | quiet_cmd_perl = PERL $@ | 44 | quiet_cmd_perl = PERL $@ |
| 41 | cmd_perl = $(PERL) $(<) > $(@) | 45 | cmd_perl = $(PERL) $(<) > $(@) |
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index aef022a87c53..8857531915bf 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | #include <asm/neon.h> | 12 | #include <asm/neon.h> |
| 13 | #include <asm/hwcap.h> | 13 | #include <asm/hwcap.h> |
| 14 | #include <crypto/aes.h> | 14 | #include <crypto/aes.h> |
| 15 | #include <crypto/ablk_helper.h> | 15 | #include <crypto/internal/simd.h> |
| 16 | #include <crypto/algapi.h> | 16 | #include <crypto/internal/skcipher.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <crypto/xts.h> | 18 | #include <crypto/xts.h> |
| 19 | 19 | ||
| @@ -88,8 +88,13 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, | |||
| 88 | u32 *rki = ctx->key_enc + (i * kwords); | 88 | u32 *rki = ctx->key_enc + (i * kwords); |
| 89 | u32 *rko = rki + kwords; | 89 | u32 *rko = rki + kwords; |
| 90 | 90 | ||
| 91 | #ifndef CONFIG_CPU_BIG_ENDIAN | ||
| 91 | rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8); | 92 | rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8); |
| 92 | rko[0] = rko[0] ^ rki[0] ^ rcon[i]; | 93 | rko[0] = rko[0] ^ rki[0] ^ rcon[i]; |
| 94 | #else | ||
| 95 | rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8); | ||
| 96 | rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24); | ||
| 97 | #endif | ||
| 93 | rko[1] = rko[0] ^ rki[1]; | 98 | rko[1] = rko[0] ^ rki[1]; |
| 94 | rko[2] = rko[1] ^ rki[2]; | 99 | rko[2] = rko[1] ^ rki[2]; |
| 95 | rko[3] = rko[2] ^ rki[3]; | 100 | rko[3] = rko[2] ^ rki[3]; |
| @@ -128,17 +133,17 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, | |||
| 128 | return 0; | 133 | return 0; |
| 129 | } | 134 | } |
| 130 | 135 | ||
| 131 | static int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, | 136 | static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, |
| 132 | unsigned int key_len) | 137 | unsigned int key_len) |
| 133 | { | 138 | { |
| 134 | struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 139 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 135 | int ret; | 140 | int ret; |
| 136 | 141 | ||
| 137 | ret = ce_aes_expandkey(ctx, in_key, key_len); | 142 | ret = ce_aes_expandkey(ctx, in_key, key_len); |
| 138 | if (!ret) | 143 | if (!ret) |
| 139 | return 0; | 144 | return 0; |
| 140 | 145 | ||
| 141 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 146 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 142 | return -EINVAL; | 147 | return -EINVAL; |
| 143 | } | 148 | } |
| 144 | 149 | ||
| @@ -147,13 +152,13 @@ struct crypto_aes_xts_ctx { | |||
| 147 | struct crypto_aes_ctx __aligned(8) key2; | 152 | struct crypto_aes_ctx __aligned(8) key2; |
| 148 | }; | 153 | }; |
| 149 | 154 | ||
| 150 | static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 155 | static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key, |
| 151 | unsigned int key_len) | 156 | unsigned int key_len) |
| 152 | { | 157 | { |
| 153 | struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); | 158 | struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 154 | int ret; | 159 | int ret; |
| 155 | 160 | ||
| 156 | ret = xts_check_key(tfm, in_key, key_len); | 161 | ret = xts_verify_key(tfm, in_key, key_len); |
| 157 | if (ret) | 162 | if (ret) |
| 158 | return ret; | 163 | return ret; |
| 159 | 164 | ||
| @@ -164,130 +169,113 @@ static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
| 164 | if (!ret) | 169 | if (!ret) |
| 165 | return 0; | 170 | return 0; |
| 166 | 171 | ||
| 167 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 172 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 168 | return -EINVAL; | 173 | return -EINVAL; |
| 169 | } | 174 | } |
| 170 | 175 | ||
| 171 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 176 | static int ecb_encrypt(struct skcipher_request *req) |
| 172 | struct scatterlist *src, unsigned int nbytes) | ||
| 173 | { | 177 | { |
| 174 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 178 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 175 | struct blkcipher_walk walk; | 179 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 180 | struct skcipher_walk walk; | ||
| 176 | unsigned int blocks; | 181 | unsigned int blocks; |
| 177 | int err; | 182 | int err; |
| 178 | 183 | ||
| 179 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 184 | err = skcipher_walk_virt(&walk, req, true); |
| 180 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 181 | err = blkcipher_walk_virt(desc, &walk); | ||
| 182 | 185 | ||
| 183 | kernel_neon_begin(); | 186 | kernel_neon_begin(); |
| 184 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { | 187 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { |
| 185 | ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 188 | ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 186 | (u8 *)ctx->key_enc, num_rounds(ctx), blocks); | 189 | (u8 *)ctx->key_enc, num_rounds(ctx), blocks); |
| 187 | err = blkcipher_walk_done(desc, &walk, | 190 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 188 | walk.nbytes % AES_BLOCK_SIZE); | ||
| 189 | } | 191 | } |
| 190 | kernel_neon_end(); | 192 | kernel_neon_end(); |
| 191 | return err; | 193 | return err; |
| 192 | } | 194 | } |
| 193 | 195 | ||
| 194 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 196 | static int ecb_decrypt(struct skcipher_request *req) |
| 195 | struct scatterlist *src, unsigned int nbytes) | ||
| 196 | { | 197 | { |
| 197 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 198 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 198 | struct blkcipher_walk walk; | 199 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 200 | struct skcipher_walk walk; | ||
| 199 | unsigned int blocks; | 201 | unsigned int blocks; |
| 200 | int err; | 202 | int err; |
| 201 | 203 | ||
| 202 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 204 | err = skcipher_walk_virt(&walk, req, true); |
| 203 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 204 | err = blkcipher_walk_virt(desc, &walk); | ||
| 205 | 205 | ||
| 206 | kernel_neon_begin(); | 206 | kernel_neon_begin(); |
| 207 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { | 207 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { |
| 208 | ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | 208 | ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 209 | (u8 *)ctx->key_dec, num_rounds(ctx), blocks); | 209 | (u8 *)ctx->key_dec, num_rounds(ctx), blocks); |
| 210 | err = blkcipher_walk_done(desc, &walk, | 210 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 211 | walk.nbytes % AES_BLOCK_SIZE); | ||
| 212 | } | 211 | } |
| 213 | kernel_neon_end(); | 212 | kernel_neon_end(); |
| 214 | return err; | 213 | return err; |
| 215 | } | 214 | } |
| 216 | 215 | ||
| 217 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 216 | static int cbc_encrypt(struct skcipher_request *req) |
| 218 | struct scatterlist *src, unsigned int nbytes) | ||
| 219 | { | 217 | { |
| 220 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 218 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 221 | struct blkcipher_walk walk; | 219 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 220 | struct skcipher_walk walk; | ||
| 222 | unsigned int blocks; | 221 | unsigned int blocks; |
| 223 | int err; | 222 | int err; |
| 224 | 223 | ||
| 225 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 224 | err = skcipher_walk_virt(&walk, req, true); |
| 226 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 227 | err = blkcipher_walk_virt(desc, &walk); | ||
| 228 | 225 | ||
| 229 | kernel_neon_begin(); | 226 | kernel_neon_begin(); |
| 230 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { | 227 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { |
| 231 | ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 228 | ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 232 | (u8 *)ctx->key_enc, num_rounds(ctx), blocks, | 229 | (u8 *)ctx->key_enc, num_rounds(ctx), blocks, |
| 233 | walk.iv); | 230 | walk.iv); |
| 234 | err = blkcipher_walk_done(desc, &walk, | 231 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 235 | walk.nbytes % AES_BLOCK_SIZE); | ||
| 236 | } | 232 | } |
| 237 | kernel_neon_end(); | 233 | kernel_neon_end(); |
| 238 | return err; | 234 | return err; |
| 239 | } | 235 | } |
| 240 | 236 | ||
| 241 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 237 | static int cbc_decrypt(struct skcipher_request *req) |
| 242 | struct scatterlist *src, unsigned int nbytes) | ||
| 243 | { | 238 | { |
| 244 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 239 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 245 | struct blkcipher_walk walk; | 240 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 241 | struct skcipher_walk walk; | ||
| 246 | unsigned int blocks; | 242 | unsigned int blocks; |
| 247 | int err; | 243 | int err; |
| 248 | 244 | ||
| 249 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 245 | err = skcipher_walk_virt(&walk, req, true); |
| 250 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 251 | err = blkcipher_walk_virt(desc, &walk); | ||
| 252 | 246 | ||
| 253 | kernel_neon_begin(); | 247 | kernel_neon_begin(); |
| 254 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { | 248 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { |
| 255 | ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | 249 | ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 256 | (u8 *)ctx->key_dec, num_rounds(ctx), blocks, | 250 | (u8 *)ctx->key_dec, num_rounds(ctx), blocks, |
| 257 | walk.iv); | 251 | walk.iv); |
| 258 | err = blkcipher_walk_done(desc, &walk, | 252 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 259 | walk.nbytes % AES_BLOCK_SIZE); | ||
| 260 | } | 253 | } |
| 261 | kernel_neon_end(); | 254 | kernel_neon_end(); |
| 262 | return err; | 255 | return err; |
| 263 | } | 256 | } |
| 264 | 257 | ||
| 265 | static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 258 | static int ctr_encrypt(struct skcipher_request *req) |
| 266 | struct scatterlist *src, unsigned int nbytes) | ||
| 267 | { | 259 | { |
| 268 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 260 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 269 | struct blkcipher_walk walk; | 261 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 262 | struct skcipher_walk walk; | ||
| 270 | int err, blocks; | 263 | int err, blocks; |
| 271 | 264 | ||
| 272 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 265 | err = skcipher_walk_virt(&walk, req, true); |
| 273 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 274 | err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); | ||
| 275 | 266 | ||
| 276 | kernel_neon_begin(); | 267 | kernel_neon_begin(); |
| 277 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { | 268 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { |
| 278 | ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 269 | ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 279 | (u8 *)ctx->key_enc, num_rounds(ctx), blocks, | 270 | (u8 *)ctx->key_enc, num_rounds(ctx), blocks, |
| 280 | walk.iv); | 271 | walk.iv); |
| 281 | nbytes -= blocks * AES_BLOCK_SIZE; | 272 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 282 | if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE) | ||
| 283 | break; | ||
| 284 | err = blkcipher_walk_done(desc, &walk, | ||
| 285 | walk.nbytes % AES_BLOCK_SIZE); | ||
| 286 | } | 273 | } |
| 287 | if (walk.nbytes % AES_BLOCK_SIZE) { | 274 | if (walk.nbytes) { |
| 288 | u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; | ||
| 289 | u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; | ||
| 290 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; | 275 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; |
| 276 | unsigned int nbytes = walk.nbytes; | ||
| 277 | u8 *tdst = walk.dst.virt.addr; | ||
| 278 | u8 *tsrc = walk.src.virt.addr; | ||
| 291 | 279 | ||
| 292 | /* | 280 | /* |
| 293 | * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need | 281 | * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need |
| @@ -298,231 +286,172 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
| 298 | ce_aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, | 286 | ce_aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, |
| 299 | num_rounds(ctx), blocks, walk.iv); | 287 | num_rounds(ctx), blocks, walk.iv); |
| 300 | memcpy(tdst, tail, nbytes); | 288 | memcpy(tdst, tail, nbytes); |
| 301 | err = blkcipher_walk_done(desc, &walk, 0); | 289 | err = skcipher_walk_done(&walk, 0); |
| 302 | } | 290 | } |
| 303 | kernel_neon_end(); | 291 | kernel_neon_end(); |
| 304 | 292 | ||
| 305 | return err; | 293 | return err; |
| 306 | } | 294 | } |
| 307 | 295 | ||
| 308 | static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 296 | static int xts_encrypt(struct skcipher_request *req) |
| 309 | struct scatterlist *src, unsigned int nbytes) | ||
| 310 | { | 297 | { |
| 311 | struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 298 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 299 | struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 312 | int err, first, rounds = num_rounds(&ctx->key1); | 300 | int err, first, rounds = num_rounds(&ctx->key1); |
| 313 | struct blkcipher_walk walk; | 301 | struct skcipher_walk walk; |
| 314 | unsigned int blocks; | 302 | unsigned int blocks; |
| 315 | 303 | ||
| 316 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 304 | err = skcipher_walk_virt(&walk, req, true); |
| 317 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 318 | err = blkcipher_walk_virt(desc, &walk); | ||
| 319 | 305 | ||
| 320 | kernel_neon_begin(); | 306 | kernel_neon_begin(); |
| 321 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | 307 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { |
| 322 | ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 308 | ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 323 | (u8 *)ctx->key1.key_enc, rounds, blocks, | 309 | (u8 *)ctx->key1.key_enc, rounds, blocks, |
| 324 | walk.iv, (u8 *)ctx->key2.key_enc, first); | 310 | walk.iv, (u8 *)ctx->key2.key_enc, first); |
| 325 | err = blkcipher_walk_done(desc, &walk, | 311 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 326 | walk.nbytes % AES_BLOCK_SIZE); | ||
| 327 | } | 312 | } |
| 328 | kernel_neon_end(); | 313 | kernel_neon_end(); |
| 329 | 314 | ||
| 330 | return err; | 315 | return err; |
| 331 | } | 316 | } |
| 332 | 317 | ||
| 333 | static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 318 | static int xts_decrypt(struct skcipher_request *req) |
| 334 | struct scatterlist *src, unsigned int nbytes) | ||
| 335 | { | 319 | { |
| 336 | struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 320 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 321 | struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 337 | int err, first, rounds = num_rounds(&ctx->key1); | 322 | int err, first, rounds = num_rounds(&ctx->key1); |
| 338 | struct blkcipher_walk walk; | 323 | struct skcipher_walk walk; |
| 339 | unsigned int blocks; | 324 | unsigned int blocks; |
| 340 | 325 | ||
| 341 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 326 | err = skcipher_walk_virt(&walk, req, true); |
| 342 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 343 | err = blkcipher_walk_virt(desc, &walk); | ||
| 344 | 327 | ||
| 345 | kernel_neon_begin(); | 328 | kernel_neon_begin(); |
| 346 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | 329 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { |
| 347 | ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | 330 | ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 348 | (u8 *)ctx->key1.key_dec, rounds, blocks, | 331 | (u8 *)ctx->key1.key_dec, rounds, blocks, |
| 349 | walk.iv, (u8 *)ctx->key2.key_enc, first); | 332 | walk.iv, (u8 *)ctx->key2.key_enc, first); |
| 350 | err = blkcipher_walk_done(desc, &walk, | 333 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 351 | walk.nbytes % AES_BLOCK_SIZE); | ||
| 352 | } | 334 | } |
| 353 | kernel_neon_end(); | 335 | kernel_neon_end(); |
| 354 | 336 | ||
| 355 | return err; | 337 | return err; |
| 356 | } | 338 | } |
| 357 | 339 | ||
| 358 | static struct crypto_alg aes_algs[] = { { | 340 | static struct skcipher_alg aes_algs[] = { { |
| 359 | .cra_name = "__ecb-aes-ce", | 341 | .base = { |
| 360 | .cra_driver_name = "__driver-ecb-aes-ce", | 342 | .cra_name = "__ecb(aes)", |
| 361 | .cra_priority = 0, | 343 | .cra_driver_name = "__ecb-aes-ce", |
| 362 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 344 | .cra_priority = 300, |
| 363 | CRYPTO_ALG_INTERNAL, | 345 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 364 | .cra_blocksize = AES_BLOCK_SIZE, | 346 | .cra_blocksize = AES_BLOCK_SIZE, |
| 365 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | 347 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
| 366 | .cra_alignmask = 7, | 348 | .cra_alignmask = 7, |
| 367 | .cra_type = &crypto_blkcipher_type, | 349 | .cra_module = THIS_MODULE, |
| 368 | .cra_module = THIS_MODULE, | ||
| 369 | .cra_blkcipher = { | ||
| 370 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 371 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 372 | .ivsize = 0, | ||
| 373 | .setkey = ce_aes_setkey, | ||
| 374 | .encrypt = ecb_encrypt, | ||
| 375 | .decrypt = ecb_decrypt, | ||
| 376 | }, | 350 | }, |
| 351 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 352 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 353 | .setkey = ce_aes_setkey, | ||
| 354 | .encrypt = ecb_encrypt, | ||
| 355 | .decrypt = ecb_decrypt, | ||
| 377 | }, { | 356 | }, { |
| 378 | .cra_name = "__cbc-aes-ce", | 357 | .base = { |
| 379 | .cra_driver_name = "__driver-cbc-aes-ce", | 358 | .cra_name = "__cbc(aes)", |
| 380 | .cra_priority = 0, | 359 | .cra_driver_name = "__cbc-aes-ce", |
| 381 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 360 | .cra_priority = 300, |
| 382 | CRYPTO_ALG_INTERNAL, | 361 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 383 | .cra_blocksize = AES_BLOCK_SIZE, | 362 | .cra_blocksize = AES_BLOCK_SIZE, |
| 384 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | 363 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
| 385 | .cra_alignmask = 7, | 364 | .cra_alignmask = 7, |
| 386 | .cra_type = &crypto_blkcipher_type, | 365 | .cra_module = THIS_MODULE, |
| 387 | .cra_module = THIS_MODULE, | ||
| 388 | .cra_blkcipher = { | ||
| 389 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 390 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 391 | .ivsize = AES_BLOCK_SIZE, | ||
| 392 | .setkey = ce_aes_setkey, | ||
| 393 | .encrypt = cbc_encrypt, | ||
| 394 | .decrypt = cbc_decrypt, | ||
| 395 | }, | 366 | }, |
| 367 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 368 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 369 | .ivsize = AES_BLOCK_SIZE, | ||
| 370 | .setkey = ce_aes_setkey, | ||
| 371 | .encrypt = cbc_encrypt, | ||
| 372 | .decrypt = cbc_decrypt, | ||
| 396 | }, { | 373 | }, { |
| 397 | .cra_name = "__ctr-aes-ce", | 374 | .base = { |
| 398 | .cra_driver_name = "__driver-ctr-aes-ce", | 375 | .cra_name = "__ctr(aes)", |
| 399 | .cra_priority = 0, | 376 | .cra_driver_name = "__ctr-aes-ce", |
| 400 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 377 | .cra_priority = 300, |
| 401 | CRYPTO_ALG_INTERNAL, | 378 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 402 | .cra_blocksize = 1, | 379 | .cra_blocksize = 1, |
| 403 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | 380 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
| 404 | .cra_alignmask = 7, | 381 | .cra_alignmask = 7, |
| 405 | .cra_type = &crypto_blkcipher_type, | 382 | .cra_module = THIS_MODULE, |
| 406 | .cra_module = THIS_MODULE, | ||
| 407 | .cra_blkcipher = { | ||
| 408 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 409 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 410 | .ivsize = AES_BLOCK_SIZE, | ||
| 411 | .setkey = ce_aes_setkey, | ||
| 412 | .encrypt = ctr_encrypt, | ||
| 413 | .decrypt = ctr_encrypt, | ||
| 414 | }, | 383 | }, |
| 384 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 385 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 386 | .ivsize = AES_BLOCK_SIZE, | ||
| 387 | .chunksize = AES_BLOCK_SIZE, | ||
| 388 | .setkey = ce_aes_setkey, | ||
| 389 | .encrypt = ctr_encrypt, | ||
| 390 | .decrypt = ctr_encrypt, | ||
| 415 | }, { | 391 | }, { |
| 416 | .cra_name = "__xts-aes-ce", | 392 | .base = { |
| 417 | .cra_driver_name = "__driver-xts-aes-ce", | 393 | .cra_name = "__xts(aes)", |
| 418 | .cra_priority = 0, | 394 | .cra_driver_name = "__xts-aes-ce", |
| 419 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 395 | .cra_priority = 300, |
| 420 | CRYPTO_ALG_INTERNAL, | 396 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 421 | .cra_blocksize = AES_BLOCK_SIZE, | 397 | .cra_blocksize = AES_BLOCK_SIZE, |
| 422 | .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), | 398 | .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), |
| 423 | .cra_alignmask = 7, | 399 | .cra_alignmask = 7, |
| 424 | .cra_type = &crypto_blkcipher_type, | 400 | .cra_module = THIS_MODULE, |
| 425 | .cra_module = THIS_MODULE, | ||
| 426 | .cra_blkcipher = { | ||
| 427 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
| 428 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
| 429 | .ivsize = AES_BLOCK_SIZE, | ||
| 430 | .setkey = xts_set_key, | ||
| 431 | .encrypt = xts_encrypt, | ||
| 432 | .decrypt = xts_decrypt, | ||
| 433 | }, | 401 | }, |
| 434 | }, { | 402 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
| 435 | .cra_name = "ecb(aes)", | 403 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
| 436 | .cra_driver_name = "ecb-aes-ce", | 404 | .ivsize = AES_BLOCK_SIZE, |
| 437 | .cra_priority = 300, | 405 | .setkey = xts_set_key, |
| 438 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | 406 | .encrypt = xts_encrypt, |
| 439 | .cra_blocksize = AES_BLOCK_SIZE, | 407 | .decrypt = xts_decrypt, |
| 440 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 441 | .cra_alignmask = 7, | ||
| 442 | .cra_type = &crypto_ablkcipher_type, | ||
| 443 | .cra_module = THIS_MODULE, | ||
| 444 | .cra_init = ablk_init, | ||
| 445 | .cra_exit = ablk_exit, | ||
| 446 | .cra_ablkcipher = { | ||
| 447 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 448 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 449 | .ivsize = 0, | ||
| 450 | .setkey = ablk_set_key, | ||
| 451 | .encrypt = ablk_encrypt, | ||
| 452 | .decrypt = ablk_decrypt, | ||
| 453 | } | ||
| 454 | }, { | ||
| 455 | .cra_name = "cbc(aes)", | ||
| 456 | .cra_driver_name = "cbc-aes-ce", | ||
| 457 | .cra_priority = 300, | ||
| 458 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
| 459 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 460 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 461 | .cra_alignmask = 7, | ||
| 462 | .cra_type = &crypto_ablkcipher_type, | ||
| 463 | .cra_module = THIS_MODULE, | ||
| 464 | .cra_init = ablk_init, | ||
| 465 | .cra_exit = ablk_exit, | ||
| 466 | .cra_ablkcipher = { | ||
| 467 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 468 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 469 | .ivsize = AES_BLOCK_SIZE, | ||
| 470 | .setkey = ablk_set_key, | ||
| 471 | .encrypt = ablk_encrypt, | ||
| 472 | .decrypt = ablk_decrypt, | ||
| 473 | } | ||
| 474 | }, { | ||
| 475 | .cra_name = "ctr(aes)", | ||
| 476 | .cra_driver_name = "ctr-aes-ce", | ||
| 477 | .cra_priority = 300, | ||
| 478 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
| 479 | .cra_blocksize = 1, | ||
| 480 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 481 | .cra_alignmask = 7, | ||
| 482 | .cra_type = &crypto_ablkcipher_type, | ||
| 483 | .cra_module = THIS_MODULE, | ||
| 484 | .cra_init = ablk_init, | ||
| 485 | .cra_exit = ablk_exit, | ||
| 486 | .cra_ablkcipher = { | ||
| 487 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 488 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 489 | .ivsize = AES_BLOCK_SIZE, | ||
| 490 | .setkey = ablk_set_key, | ||
| 491 | .encrypt = ablk_encrypt, | ||
| 492 | .decrypt = ablk_decrypt, | ||
| 493 | } | ||
| 494 | }, { | ||
| 495 | .cra_name = "xts(aes)", | ||
| 496 | .cra_driver_name = "xts-aes-ce", | ||
| 497 | .cra_priority = 300, | ||
| 498 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
| 499 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 500 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 501 | .cra_alignmask = 7, | ||
| 502 | .cra_type = &crypto_ablkcipher_type, | ||
| 503 | .cra_module = THIS_MODULE, | ||
| 504 | .cra_init = ablk_init, | ||
| 505 | .cra_exit = ablk_exit, | ||
| 506 | .cra_ablkcipher = { | ||
| 507 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
| 508 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
| 509 | .ivsize = AES_BLOCK_SIZE, | ||
| 510 | .setkey = ablk_set_key, | ||
| 511 | .encrypt = ablk_encrypt, | ||
| 512 | .decrypt = ablk_decrypt, | ||
| 513 | } | ||
| 514 | } }; | 408 | } }; |
| 515 | 409 | ||
| 410 | static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; | ||
| 411 | |||
| 412 | static void aes_exit(void) | ||
| 413 | { | ||
| 414 | int i; | ||
| 415 | |||
| 416 | for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++) | ||
| 417 | simd_skcipher_free(aes_simd_algs[i]); | ||
| 418 | |||
| 419 | crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); | ||
| 420 | } | ||
| 421 | |||
| 516 | static int __init aes_init(void) | 422 | static int __init aes_init(void) |
| 517 | { | 423 | { |
| 424 | struct simd_skcipher_alg *simd; | ||
| 425 | const char *basename; | ||
| 426 | const char *algname; | ||
| 427 | const char *drvname; | ||
| 428 | int err; | ||
| 429 | int i; | ||
| 430 | |||
| 518 | if (!(elf_hwcap2 & HWCAP2_AES)) | 431 | if (!(elf_hwcap2 & HWCAP2_AES)) |
| 519 | return -ENODEV; | 432 | return -ENODEV; |
| 520 | return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs)); | ||
| 521 | } | ||
| 522 | 433 | ||
| 523 | static void __exit aes_exit(void) | 434 | err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); |
| 524 | { | 435 | if (err) |
| 525 | crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs)); | 436 | return err; |
| 437 | |||
| 438 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { | ||
| 439 | algname = aes_algs[i].base.cra_name + 2; | ||
| 440 | drvname = aes_algs[i].base.cra_driver_name + 2; | ||
| 441 | basename = aes_algs[i].base.cra_driver_name; | ||
| 442 | simd = simd_skcipher_create_compat(algname, drvname, basename); | ||
| 443 | err = PTR_ERR(simd); | ||
| 444 | if (IS_ERR(simd)) | ||
| 445 | goto unregister_simds; | ||
| 446 | |||
| 447 | aes_simd_algs[i] = simd; | ||
| 448 | } | ||
| 449 | |||
| 450 | return 0; | ||
| 451 | |||
| 452 | unregister_simds: | ||
| 453 | aes_exit(); | ||
| 454 | return err; | ||
| 526 | } | 455 | } |
| 527 | 456 | ||
| 528 | module_init(aes_init); | 457 | module_init(aes_init); |
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c index 0511a6cafe24..d8e06de72ef3 100644 --- a/arch/arm/crypto/aesbs-glue.c +++ b/arch/arm/crypto/aesbs-glue.c | |||
| @@ -10,8 +10,9 @@ | |||
| 10 | 10 | ||
| 11 | #include <asm/neon.h> | 11 | #include <asm/neon.h> |
| 12 | #include <crypto/aes.h> | 12 | #include <crypto/aes.h> |
| 13 | #include <crypto/ablk_helper.h> | 13 | #include <crypto/cbc.h> |
| 14 | #include <crypto/algapi.h> | 14 | #include <crypto/internal/simd.h> |
| 15 | #include <crypto/internal/skcipher.h> | ||
| 15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 16 | #include <crypto/xts.h> | 17 | #include <crypto/xts.h> |
| 17 | 18 | ||
| @@ -55,14 +56,14 @@ struct aesbs_xts_ctx { | |||
| 55 | struct AES_KEY twkey; | 56 | struct AES_KEY twkey; |
| 56 | }; | 57 | }; |
| 57 | 58 | ||
| 58 | static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 59 | static int aesbs_cbc_set_key(struct crypto_skcipher *tfm, const u8 *in_key, |
| 59 | unsigned int key_len) | 60 | unsigned int key_len) |
| 60 | { | 61 | { |
| 61 | struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 62 | struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 62 | int bits = key_len * 8; | 63 | int bits = key_len * 8; |
| 63 | 64 | ||
| 64 | if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) { | 65 | if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) { |
| 65 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 66 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 66 | return -EINVAL; | 67 | return -EINVAL; |
| 67 | } | 68 | } |
| 68 | ctx->dec.rk = ctx->enc; | 69 | ctx->dec.rk = ctx->enc; |
| @@ -71,33 +72,33 @@ static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
| 71 | return 0; | 72 | return 0; |
| 72 | } | 73 | } |
| 73 | 74 | ||
| 74 | static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 75 | static int aesbs_ctr_set_key(struct crypto_skcipher *tfm, const u8 *in_key, |
| 75 | unsigned int key_len) | 76 | unsigned int key_len) |
| 76 | { | 77 | { |
| 77 | struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | 78 | struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 78 | int bits = key_len * 8; | 79 | int bits = key_len * 8; |
| 79 | 80 | ||
| 80 | if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) { | 81 | if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) { |
| 81 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 82 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 82 | return -EINVAL; | 83 | return -EINVAL; |
| 83 | } | 84 | } |
| 84 | ctx->enc.converted = 0; | 85 | ctx->enc.converted = 0; |
| 85 | return 0; | 86 | return 0; |
| 86 | } | 87 | } |
| 87 | 88 | ||
| 88 | static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 89 | static int aesbs_xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key, |
| 89 | unsigned int key_len) | 90 | unsigned int key_len) |
| 90 | { | 91 | { |
| 91 | struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); | 92 | struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 92 | int bits = key_len * 4; | 93 | int bits = key_len * 4; |
| 93 | int err; | 94 | int err; |
| 94 | 95 | ||
| 95 | err = xts_check_key(tfm, in_key, key_len); | 96 | err = xts_verify_key(tfm, in_key, key_len); |
| 96 | if (err) | 97 | if (err) |
| 97 | return err; | 98 | return err; |
| 98 | 99 | ||
| 99 | if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) { | 100 | if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) { |
| 100 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 101 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 101 | return -EINVAL; | 102 | return -EINVAL; |
| 102 | } | 103 | } |
| 103 | ctx->dec.rk = ctx->enc.rk; | 104 | ctx->dec.rk = ctx->enc.rk; |
| @@ -107,88 +108,52 @@ static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
| 107 | return 0; | 108 | return 0; |
| 108 | } | 109 | } |
| 109 | 110 | ||
| 110 | static int aesbs_cbc_encrypt(struct blkcipher_desc *desc, | 111 | static inline void aesbs_encrypt_one(struct crypto_skcipher *tfm, |
| 111 | struct scatterlist *dst, | 112 | const u8 *src, u8 *dst) |
| 112 | struct scatterlist *src, unsigned int nbytes) | ||
| 113 | { | 113 | { |
| 114 | struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 114 | struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 115 | struct blkcipher_walk walk; | ||
| 116 | int err; | ||
| 117 | 115 | ||
| 118 | blkcipher_walk_init(&walk, dst, src, nbytes); | 116 | AES_encrypt(src, dst, &ctx->enc); |
| 119 | err = blkcipher_walk_virt(desc, &walk); | 117 | } |
| 120 | 118 | ||
| 121 | while (walk.nbytes) { | 119 | static int aesbs_cbc_encrypt(struct skcipher_request *req) |
| 122 | u32 blocks = walk.nbytes / AES_BLOCK_SIZE; | 120 | { |
| 123 | u8 *src = walk.src.virt.addr; | 121 | return crypto_cbc_encrypt_walk(req, aesbs_encrypt_one); |
| 122 | } | ||
| 124 | 123 | ||
| 125 | if (walk.dst.virt.addr == walk.src.virt.addr) { | 124 | static inline void aesbs_decrypt_one(struct crypto_skcipher *tfm, |
| 126 | u8 *iv = walk.iv; | 125 | const u8 *src, u8 *dst) |
| 127 | 126 | { | |
| 128 | do { | 127 | struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 129 | crypto_xor(src, iv, AES_BLOCK_SIZE); | 128 | |
| 130 | AES_encrypt(src, src, &ctx->enc); | 129 | AES_decrypt(src, dst, &ctx->dec.rk); |
| 131 | iv = src; | ||
| 132 | src += AES_BLOCK_SIZE; | ||
| 133 | } while (--blocks); | ||
| 134 | memcpy(walk.iv, iv, AES_BLOCK_SIZE); | ||
| 135 | } else { | ||
| 136 | u8 *dst = walk.dst.virt.addr; | ||
| 137 | |||
| 138 | do { | ||
| 139 | crypto_xor(walk.iv, src, AES_BLOCK_SIZE); | ||
| 140 | AES_encrypt(walk.iv, dst, &ctx->enc); | ||
| 141 | memcpy(walk.iv, dst, AES_BLOCK_SIZE); | ||
| 142 | src += AES_BLOCK_SIZE; | ||
| 143 | dst += AES_BLOCK_SIZE; | ||
| 144 | } while (--blocks); | ||
| 145 | } | ||
| 146 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | ||
| 147 | } | ||
| 148 | return err; | ||
| 149 | } | 130 | } |
| 150 | 131 | ||
| 151 | static int aesbs_cbc_decrypt(struct blkcipher_desc *desc, | 132 | static int aesbs_cbc_decrypt(struct skcipher_request *req) |
| 152 | struct scatterlist *dst, | ||
| 153 | struct scatterlist *src, unsigned int nbytes) | ||
| 154 | { | 133 | { |
| 155 | struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 134 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 156 | struct blkcipher_walk walk; | 135 | struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 136 | struct skcipher_walk walk; | ||
| 137 | unsigned int nbytes; | ||
| 157 | int err; | 138 | int err; |
| 158 | 139 | ||
| 159 | blkcipher_walk_init(&walk, dst, src, nbytes); | 140 | for (err = skcipher_walk_virt(&walk, req, false); |
| 160 | err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE); | 141 | (nbytes = walk.nbytes); err = skcipher_walk_done(&walk, nbytes)) { |
| 161 | 142 | u32 blocks = nbytes / AES_BLOCK_SIZE; | |
| 162 | while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) { | ||
| 163 | kernel_neon_begin(); | ||
| 164 | bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | ||
| 165 | walk.nbytes, &ctx->dec, walk.iv); | ||
| 166 | kernel_neon_end(); | ||
| 167 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | ||
| 168 | } | ||
| 169 | while (walk.nbytes) { | ||
| 170 | u32 blocks = walk.nbytes / AES_BLOCK_SIZE; | ||
| 171 | u8 *dst = walk.dst.virt.addr; | 143 | u8 *dst = walk.dst.virt.addr; |
| 172 | u8 *src = walk.src.virt.addr; | 144 | u8 *src = walk.src.virt.addr; |
| 173 | u8 bk[2][AES_BLOCK_SIZE]; | ||
| 174 | u8 *iv = walk.iv; | 145 | u8 *iv = walk.iv; |
| 175 | 146 | ||
| 176 | do { | 147 | if (blocks >= 8) { |
| 177 | if (walk.dst.virt.addr == walk.src.virt.addr) | 148 | kernel_neon_begin(); |
| 178 | memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE); | 149 | bsaes_cbc_encrypt(src, dst, nbytes, &ctx->dec, iv); |
| 179 | 150 | kernel_neon_end(); | |
| 180 | AES_decrypt(src, dst, &ctx->dec.rk); | 151 | nbytes %= AES_BLOCK_SIZE; |
| 181 | crypto_xor(dst, iv, AES_BLOCK_SIZE); | 152 | continue; |
| 182 | 153 | } | |
| 183 | if (walk.dst.virt.addr == walk.src.virt.addr) | ||
| 184 | iv = bk[blocks & 1]; | ||
| 185 | else | ||
| 186 | iv = src; | ||
| 187 | 154 | ||
| 188 | dst += AES_BLOCK_SIZE; | 155 | nbytes = crypto_cbc_decrypt_blocks(&walk, tfm, |
| 189 | src += AES_BLOCK_SIZE; | 156 | aesbs_decrypt_one); |
| 190 | } while (--blocks); | ||
| 191 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | ||
| 192 | } | 157 | } |
| 193 | return err; | 158 | return err; |
| 194 | } | 159 | } |
| @@ -206,17 +171,15 @@ static void inc_be128_ctr(__be32 ctr[], u32 addend) | |||
| 206 | } | 171 | } |
| 207 | } | 172 | } |
| 208 | 173 | ||
| 209 | static int aesbs_ctr_encrypt(struct blkcipher_desc *desc, | 174 | static int aesbs_ctr_encrypt(struct skcipher_request *req) |
| 210 | struct scatterlist *dst, struct scatterlist *src, | ||
| 211 | unsigned int nbytes) | ||
| 212 | { | 175 | { |
| 213 | struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 176 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 214 | struct blkcipher_walk walk; | 177 | struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 178 | struct skcipher_walk walk; | ||
| 215 | u32 blocks; | 179 | u32 blocks; |
| 216 | int err; | 180 | int err; |
| 217 | 181 | ||
| 218 | blkcipher_walk_init(&walk, dst, src, nbytes); | 182 | err = skcipher_walk_virt(&walk, req, false); |
| 219 | err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE); | ||
| 220 | 183 | ||
| 221 | while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) { | 184 | while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) { |
| 222 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; | 185 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; |
| @@ -235,11 +198,7 @@ static int aesbs_ctr_encrypt(struct blkcipher_desc *desc, | |||
| 235 | kernel_neon_end(); | 198 | kernel_neon_end(); |
| 236 | inc_be128_ctr(ctr, blocks); | 199 | inc_be128_ctr(ctr, blocks); |
| 237 | 200 | ||
| 238 | nbytes -= blocks * AES_BLOCK_SIZE; | 201 | err = skcipher_walk_done(&walk, tail); |
| 239 | if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE) | ||
| 240 | break; | ||
| 241 | |||
| 242 | err = blkcipher_walk_done(desc, &walk, tail); | ||
| 243 | } | 202 | } |
| 244 | if (walk.nbytes) { | 203 | if (walk.nbytes) { |
| 245 | u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; | 204 | u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; |
| @@ -248,23 +207,21 @@ static int aesbs_ctr_encrypt(struct blkcipher_desc *desc, | |||
| 248 | 207 | ||
| 249 | AES_encrypt(walk.iv, ks, &ctx->enc.rk); | 208 | AES_encrypt(walk.iv, ks, &ctx->enc.rk); |
| 250 | if (tdst != tsrc) | 209 | if (tdst != tsrc) |
| 251 | memcpy(tdst, tsrc, nbytes); | 210 | memcpy(tdst, tsrc, walk.nbytes); |
| 252 | crypto_xor(tdst, ks, nbytes); | 211 | crypto_xor(tdst, ks, walk.nbytes); |
| 253 | err = blkcipher_walk_done(desc, &walk, 0); | 212 | err = skcipher_walk_done(&walk, 0); |
| 254 | } | 213 | } |
| 255 | return err; | 214 | return err; |
| 256 | } | 215 | } |
| 257 | 216 | ||
| 258 | static int aesbs_xts_encrypt(struct blkcipher_desc *desc, | 217 | static int aesbs_xts_encrypt(struct skcipher_request *req) |
| 259 | struct scatterlist *dst, | ||
| 260 | struct scatterlist *src, unsigned int nbytes) | ||
| 261 | { | 218 | { |
| 262 | struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 219 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 263 | struct blkcipher_walk walk; | 220 | struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 221 | struct skcipher_walk walk; | ||
| 264 | int err; | 222 | int err; |
| 265 | 223 | ||
| 266 | blkcipher_walk_init(&walk, dst, src, nbytes); | 224 | err = skcipher_walk_virt(&walk, req, false); |
| 267 | err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE); | ||
| 268 | 225 | ||
| 269 | /* generate the initial tweak */ | 226 | /* generate the initial tweak */ |
| 270 | AES_encrypt(walk.iv, walk.iv, &ctx->twkey); | 227 | AES_encrypt(walk.iv, walk.iv, &ctx->twkey); |
| @@ -274,21 +231,19 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc, | |||
| 274 | bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | 231 | bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, |
| 275 | walk.nbytes, &ctx->enc, walk.iv); | 232 | walk.nbytes, &ctx->enc, walk.iv); |
| 276 | kernel_neon_end(); | 233 | kernel_neon_end(); |
| 277 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | 234 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 278 | } | 235 | } |
| 279 | return err; | 236 | return err; |
| 280 | } | 237 | } |
| 281 | 238 | ||
| 282 | static int aesbs_xts_decrypt(struct blkcipher_desc *desc, | 239 | static int aesbs_xts_decrypt(struct skcipher_request *req) |
| 283 | struct scatterlist *dst, | ||
| 284 | struct scatterlist *src, unsigned int nbytes) | ||
| 285 | { | 240 | { |
| 286 | struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 241 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 287 | struct blkcipher_walk walk; | 242 | struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 243 | struct skcipher_walk walk; | ||
| 288 | int err; | 244 | int err; |
| 289 | 245 | ||
| 290 | blkcipher_walk_init(&walk, dst, src, nbytes); | 246 | err = skcipher_walk_virt(&walk, req, false); |
| 291 | err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE); | ||
| 292 | 247 | ||
| 293 | /* generate the initial tweak */ | 248 | /* generate the initial tweak */ |
| 294 | AES_encrypt(walk.iv, walk.iv, &ctx->twkey); | 249 | AES_encrypt(walk.iv, walk.iv, &ctx->twkey); |
| @@ -298,141 +253,110 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc, | |||
| 298 | bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, | 253 | bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, |
| 299 | walk.nbytes, &ctx->dec, walk.iv); | 254 | walk.nbytes, &ctx->dec, walk.iv); |
| 300 | kernel_neon_end(); | 255 | kernel_neon_end(); |
| 301 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | 256 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 302 | } | 257 | } |
| 303 | return err; | 258 | return err; |
| 304 | } | 259 | } |
| 305 | 260 | ||
| 306 | static struct crypto_alg aesbs_algs[] = { { | 261 | static struct skcipher_alg aesbs_algs[] = { { |
| 307 | .cra_name = "__cbc-aes-neonbs", | 262 | .base = { |
| 308 | .cra_driver_name = "__driver-cbc-aes-neonbs", | 263 | .cra_name = "__cbc(aes)", |
| 309 | .cra_priority = 0, | 264 | .cra_driver_name = "__cbc-aes-neonbs", |
| 310 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 265 | .cra_priority = 300, |
| 311 | CRYPTO_ALG_INTERNAL, | 266 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 312 | .cra_blocksize = AES_BLOCK_SIZE, | 267 | .cra_blocksize = AES_BLOCK_SIZE, |
| 313 | .cra_ctxsize = sizeof(struct aesbs_cbc_ctx), | 268 | .cra_ctxsize = sizeof(struct aesbs_cbc_ctx), |
| 314 | .cra_alignmask = 7, | 269 | .cra_alignmask = 7, |
| 315 | .cra_type = &crypto_blkcipher_type, | 270 | .cra_module = THIS_MODULE, |
| 316 | .cra_module = THIS_MODULE, | ||
| 317 | .cra_blkcipher = { | ||
| 318 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 319 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 320 | .ivsize = AES_BLOCK_SIZE, | ||
| 321 | .setkey = aesbs_cbc_set_key, | ||
| 322 | .encrypt = aesbs_cbc_encrypt, | ||
| 323 | .decrypt = aesbs_cbc_decrypt, | ||
| 324 | }, | 271 | }, |
| 272 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 273 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 274 | .ivsize = AES_BLOCK_SIZE, | ||
| 275 | .setkey = aesbs_cbc_set_key, | ||
| 276 | .encrypt = aesbs_cbc_encrypt, | ||
| 277 | .decrypt = aesbs_cbc_decrypt, | ||
| 325 | }, { | 278 | }, { |
| 326 | .cra_name = "__ctr-aes-neonbs", | 279 | .base = { |
| 327 | .cra_driver_name = "__driver-ctr-aes-neonbs", | 280 | .cra_name = "__ctr(aes)", |
| 328 | .cra_priority = 0, | 281 | .cra_driver_name = "__ctr-aes-neonbs", |
| 329 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 282 | .cra_priority = 300, |
| 330 | CRYPTO_ALG_INTERNAL, | 283 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 331 | .cra_blocksize = 1, | 284 | .cra_blocksize = 1, |
| 332 | .cra_ctxsize = sizeof(struct aesbs_ctr_ctx), | 285 | .cra_ctxsize = sizeof(struct aesbs_ctr_ctx), |
| 333 | .cra_alignmask = 7, | 286 | .cra_alignmask = 7, |
| 334 | .cra_type = &crypto_blkcipher_type, | 287 | .cra_module = THIS_MODULE, |
| 335 | .cra_module = THIS_MODULE, | ||
| 336 | .cra_blkcipher = { | ||
| 337 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 338 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 339 | .ivsize = AES_BLOCK_SIZE, | ||
| 340 | .setkey = aesbs_ctr_set_key, | ||
| 341 | .encrypt = aesbs_ctr_encrypt, | ||
| 342 | .decrypt = aesbs_ctr_encrypt, | ||
| 343 | }, | 288 | }, |
| 289 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 290 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 291 | .ivsize = AES_BLOCK_SIZE, | ||
| 292 | .chunksize = AES_BLOCK_SIZE, | ||
| 293 | .setkey = aesbs_ctr_set_key, | ||
| 294 | .encrypt = aesbs_ctr_encrypt, | ||
| 295 | .decrypt = aesbs_ctr_encrypt, | ||
| 344 | }, { | 296 | }, { |
| 345 | .cra_name = "__xts-aes-neonbs", | 297 | .base = { |
| 346 | .cra_driver_name = "__driver-xts-aes-neonbs", | 298 | .cra_name = "__xts(aes)", |
| 347 | .cra_priority = 0, | 299 | .cra_driver_name = "__xts-aes-neonbs", |
| 348 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 300 | .cra_priority = 300, |
| 349 | CRYPTO_ALG_INTERNAL, | 301 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 350 | .cra_blocksize = AES_BLOCK_SIZE, | 302 | .cra_blocksize = AES_BLOCK_SIZE, |
| 351 | .cra_ctxsize = sizeof(struct aesbs_xts_ctx), | 303 | .cra_ctxsize = sizeof(struct aesbs_xts_ctx), |
| 352 | .cra_alignmask = 7, | 304 | .cra_alignmask = 7, |
| 353 | .cra_type = &crypto_blkcipher_type, | 305 | .cra_module = THIS_MODULE, |
| 354 | .cra_module = THIS_MODULE, | ||
| 355 | .cra_blkcipher = { | ||
| 356 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
| 357 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
| 358 | .ivsize = AES_BLOCK_SIZE, | ||
| 359 | .setkey = aesbs_xts_set_key, | ||
| 360 | .encrypt = aesbs_xts_encrypt, | ||
| 361 | .decrypt = aesbs_xts_decrypt, | ||
| 362 | }, | 306 | }, |
| 363 | }, { | 307 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
| 364 | .cra_name = "cbc(aes)", | 308 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
| 365 | .cra_driver_name = "cbc-aes-neonbs", | 309 | .ivsize = AES_BLOCK_SIZE, |
| 366 | .cra_priority = 300, | 310 | .setkey = aesbs_xts_set_key, |
| 367 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | 311 | .encrypt = aesbs_xts_encrypt, |
| 368 | .cra_blocksize = AES_BLOCK_SIZE, | 312 | .decrypt = aesbs_xts_decrypt, |
| 369 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 370 | .cra_alignmask = 7, | ||
| 371 | .cra_type = &crypto_ablkcipher_type, | ||
| 372 | .cra_module = THIS_MODULE, | ||
| 373 | .cra_init = ablk_init, | ||
| 374 | .cra_exit = ablk_exit, | ||
| 375 | .cra_ablkcipher = { | ||
| 376 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 377 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 378 | .ivsize = AES_BLOCK_SIZE, | ||
| 379 | .setkey = ablk_set_key, | ||
| 380 | .encrypt = __ablk_encrypt, | ||
| 381 | .decrypt = ablk_decrypt, | ||
| 382 | } | ||
| 383 | }, { | ||
| 384 | .cra_name = "ctr(aes)", | ||
| 385 | .cra_driver_name = "ctr-aes-neonbs", | ||
| 386 | .cra_priority = 300, | ||
| 387 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
| 388 | .cra_blocksize = 1, | ||
| 389 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 390 | .cra_alignmask = 7, | ||
| 391 | .cra_type = &crypto_ablkcipher_type, | ||
| 392 | .cra_module = THIS_MODULE, | ||
| 393 | .cra_init = ablk_init, | ||
| 394 | .cra_exit = ablk_exit, | ||
| 395 | .cra_ablkcipher = { | ||
| 396 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 397 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 398 | .ivsize = AES_BLOCK_SIZE, | ||
| 399 | .setkey = ablk_set_key, | ||
| 400 | .encrypt = ablk_encrypt, | ||
| 401 | .decrypt = ablk_decrypt, | ||
| 402 | } | ||
| 403 | }, { | ||
| 404 | .cra_name = "xts(aes)", | ||
| 405 | .cra_driver_name = "xts-aes-neonbs", | ||
| 406 | .cra_priority = 300, | ||
| 407 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
| 408 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 409 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 410 | .cra_alignmask = 7, | ||
| 411 | .cra_type = &crypto_ablkcipher_type, | ||
| 412 | .cra_module = THIS_MODULE, | ||
| 413 | .cra_init = ablk_init, | ||
| 414 | .cra_exit = ablk_exit, | ||
| 415 | .cra_ablkcipher = { | ||
| 416 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
| 417 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
| 418 | .ivsize = AES_BLOCK_SIZE, | ||
| 419 | .setkey = ablk_set_key, | ||
| 420 | .encrypt = ablk_encrypt, | ||
| 421 | .decrypt = ablk_decrypt, | ||
| 422 | } | ||
| 423 | } }; | 313 | } }; |
| 424 | 314 | ||
| 315 | struct simd_skcipher_alg *aesbs_simd_algs[ARRAY_SIZE(aesbs_algs)]; | ||
| 316 | |||
| 317 | static void aesbs_mod_exit(void) | ||
| 318 | { | ||
| 319 | int i; | ||
| 320 | |||
| 321 | for (i = 0; i < ARRAY_SIZE(aesbs_simd_algs) && aesbs_simd_algs[i]; i++) | ||
| 322 | simd_skcipher_free(aesbs_simd_algs[i]); | ||
| 323 | |||
| 324 | crypto_unregister_skciphers(aesbs_algs, ARRAY_SIZE(aesbs_algs)); | ||
| 325 | } | ||
| 326 | |||
| 425 | static int __init aesbs_mod_init(void) | 327 | static int __init aesbs_mod_init(void) |
| 426 | { | 328 | { |
| 329 | struct simd_skcipher_alg *simd; | ||
| 330 | const char *basename; | ||
| 331 | const char *algname; | ||
| 332 | const char *drvname; | ||
| 333 | int err; | ||
| 334 | int i; | ||
| 335 | |||
| 427 | if (!cpu_has_neon()) | 336 | if (!cpu_has_neon()) |
| 428 | return -ENODEV; | 337 | return -ENODEV; |
| 429 | 338 | ||
| 430 | return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs)); | 339 | err = crypto_register_skciphers(aesbs_algs, ARRAY_SIZE(aesbs_algs)); |
| 431 | } | 340 | if (err) |
| 341 | return err; | ||
| 432 | 342 | ||
| 433 | static void __exit aesbs_mod_exit(void) | 343 | for (i = 0; i < ARRAY_SIZE(aesbs_algs); i++) { |
| 434 | { | 344 | algname = aesbs_algs[i].base.cra_name + 2; |
| 435 | crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs)); | 345 | drvname = aesbs_algs[i].base.cra_driver_name + 2; |
| 346 | basename = aesbs_algs[i].base.cra_driver_name; | ||
| 347 | simd = simd_skcipher_create_compat(algname, drvname, basename); | ||
| 348 | err = PTR_ERR(simd); | ||
| 349 | if (IS_ERR(simd)) | ||
| 350 | goto unregister_simds; | ||
| 351 | |||
| 352 | aesbs_simd_algs[i] = simd; | ||
| 353 | } | ||
| 354 | |||
| 355 | return 0; | ||
| 356 | |||
| 357 | unregister_simds: | ||
| 358 | aesbs_mod_exit(); | ||
| 359 | return err; | ||
| 436 | } | 360 | } |
| 437 | 361 | ||
| 438 | module_init(aesbs_mod_init); | 362 | module_init(aesbs_mod_init); |
diff --git a/arch/arm/crypto/crc32-ce-core.S b/arch/arm/crypto/crc32-ce-core.S new file mode 100644 index 000000000000..e63d400dc5c1 --- /dev/null +++ b/arch/arm/crypto/crc32-ce-core.S | |||
| @@ -0,0 +1,306 @@ | |||
| 1 | /* | ||
| 2 | * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | /* GPL HEADER START | ||
| 12 | * | ||
| 13 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | ||
| 14 | * | ||
| 15 | * This program is free software; you can redistribute it and/or modify | ||
| 16 | * it under the terms of the GNU General Public License version 2 only, | ||
| 17 | * as published by the Free Software Foundation. | ||
| 18 | * | ||
| 19 | * This program is distributed in the hope that it will be useful, but | ||
| 20 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 22 | * General Public License version 2 for more details (a copy is included | ||
| 23 | * in the LICENSE file that accompanied this code). | ||
| 24 | * | ||
| 25 | * You should have received a copy of the GNU General Public License | ||
| 26 | * version 2 along with this program; If not, see http://www.gnu.org/licenses | ||
| 27 | * | ||
| 28 | * Please visit http://www.xyratex.com/contact if you need additional | ||
| 29 | * information or have any questions. | ||
| 30 | * | ||
| 31 | * GPL HEADER END | ||
| 32 | */ | ||
| 33 | |||
| 34 | /* | ||
| 35 | * Copyright 2012 Xyratex Technology Limited | ||
| 36 | * | ||
| 37 | * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32 | ||
| 38 | * calculation. | ||
| 39 | * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE) | ||
| 40 | * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found | ||
| 41 | * at: | ||
| 42 | * http://www.intel.com/products/processor/manuals/ | ||
| 43 | * Intel(R) 64 and IA-32 Architectures Software Developer's Manual | ||
| 44 | * Volume 2B: Instruction Set Reference, N-Z | ||
| 45 | * | ||
| 46 | * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com> | ||
| 47 | * Alexander Boyko <Alexander_Boyko@xyratex.com> | ||
| 48 | */ | ||
| 49 | |||
| 50 | #include <linux/linkage.h> | ||
| 51 | #include <asm/assembler.h> | ||
| 52 | |||
| 53 | .text | ||
| 54 | .align 6 | ||
| 55 | .arch armv8-a | ||
| 56 | .arch_extension crc | ||
| 57 | .fpu crypto-neon-fp-armv8 | ||
| 58 | |||
| 59 | .Lcrc32_constants: | ||
| 60 | /* | ||
| 61 | * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 | ||
| 62 | * #define CONSTANT_R1 0x154442bd4LL | ||
| 63 | * | ||
| 64 | * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596 | ||
| 65 | * #define CONSTANT_R2 0x1c6e41596LL | ||
| 66 | */ | ||
| 67 | .quad 0x0000000154442bd4 | ||
| 68 | .quad 0x00000001c6e41596 | ||
| 69 | |||
| 70 | /* | ||
| 71 | * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0 | ||
| 72 | * #define CONSTANT_R3 0x1751997d0LL | ||
| 73 | * | ||
| 74 | * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e | ||
| 75 | * #define CONSTANT_R4 0x0ccaa009eLL | ||
| 76 | */ | ||
| 77 | .quad 0x00000001751997d0 | ||
| 78 | .quad 0x00000000ccaa009e | ||
| 79 | |||
| 80 | /* | ||
| 81 | * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124 | ||
| 82 | * #define CONSTANT_R5 0x163cd6124LL | ||
| 83 | */ | ||
| 84 | .quad 0x0000000163cd6124 | ||
| 85 | .quad 0x00000000FFFFFFFF | ||
| 86 | |||
| 87 | /* | ||
| 88 | * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL | ||
| 89 | * | ||
| 90 | * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` | ||
| 91 | * = 0x1F7011641LL | ||
| 92 | * #define CONSTANT_RU 0x1F7011641LL | ||
| 93 | */ | ||
| 94 | .quad 0x00000001DB710641 | ||
| 95 | .quad 0x00000001F7011641 | ||
| 96 | |||
| 97 | .Lcrc32c_constants: | ||
| 98 | .quad 0x00000000740eef02 | ||
| 99 | .quad 0x000000009e4addf8 | ||
| 100 | .quad 0x00000000f20c0dfe | ||
| 101 | .quad 0x000000014cd00bd6 | ||
| 102 | .quad 0x00000000dd45aab8 | ||
| 103 | .quad 0x00000000FFFFFFFF | ||
| 104 | .quad 0x0000000105ec76f0 | ||
| 105 | .quad 0x00000000dea713f1 | ||
| 106 | |||
| 107 | dCONSTANTl .req d0 | ||
| 108 | dCONSTANTh .req d1 | ||
| 109 | qCONSTANT .req q0 | ||
| 110 | |||
| 111 | BUF .req r0 | ||
| 112 | LEN .req r1 | ||
| 113 | CRC .req r2 | ||
| 114 | |||
| 115 | qzr .req q9 | ||
| 116 | |||
| 117 | /** | ||
| 118 | * Calculate crc32 | ||
| 119 | * BUF - buffer | ||
| 120 | * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63 | ||
| 121 | * CRC - initial crc32 | ||
| 122 | * return %eax crc32 | ||
| 123 | * uint crc32_pmull_le(unsigned char const *buffer, | ||
| 124 | * size_t len, uint crc32) | ||
| 125 | */ | ||
| 126 | ENTRY(crc32_pmull_le) | ||
| 127 | adr r3, .Lcrc32_constants | ||
| 128 | b 0f | ||
| 129 | |||
| 130 | ENTRY(crc32c_pmull_le) | ||
| 131 | adr r3, .Lcrc32c_constants | ||
| 132 | |||
| 133 | 0: bic LEN, LEN, #15 | ||
| 134 | vld1.8 {q1-q2}, [BUF, :128]! | ||
| 135 | vld1.8 {q3-q4}, [BUF, :128]! | ||
| 136 | vmov.i8 qzr, #0 | ||
| 137 | vmov.i8 qCONSTANT, #0 | ||
| 138 | vmov dCONSTANTl[0], CRC | ||
| 139 | veor.8 d2, d2, dCONSTANTl | ||
| 140 | sub LEN, LEN, #0x40 | ||
| 141 | cmp LEN, #0x40 | ||
| 142 | blt less_64 | ||
| 143 | |||
| 144 | vld1.64 {qCONSTANT}, [r3] | ||
| 145 | |||
| 146 | loop_64: /* 64 bytes Full cache line folding */ | ||
| 147 | sub LEN, LEN, #0x40 | ||
| 148 | |||
| 149 | vmull.p64 q5, d3, dCONSTANTh | ||
| 150 | vmull.p64 q6, d5, dCONSTANTh | ||
| 151 | vmull.p64 q7, d7, dCONSTANTh | ||
| 152 | vmull.p64 q8, d9, dCONSTANTh | ||
| 153 | |||
| 154 | vmull.p64 q1, d2, dCONSTANTl | ||
| 155 | vmull.p64 q2, d4, dCONSTANTl | ||
| 156 | vmull.p64 q3, d6, dCONSTANTl | ||
| 157 | vmull.p64 q4, d8, dCONSTANTl | ||
| 158 | |||
| 159 | veor.8 q1, q1, q5 | ||
| 160 | vld1.8 {q5}, [BUF, :128]! | ||
| 161 | veor.8 q2, q2, q6 | ||
| 162 | vld1.8 {q6}, [BUF, :128]! | ||
| 163 | veor.8 q3, q3, q7 | ||
| 164 | vld1.8 {q7}, [BUF, :128]! | ||
| 165 | veor.8 q4, q4, q8 | ||
| 166 | vld1.8 {q8}, [BUF, :128]! | ||
| 167 | |||
| 168 | veor.8 q1, q1, q5 | ||
| 169 | veor.8 q2, q2, q6 | ||
| 170 | veor.8 q3, q3, q7 | ||
| 171 | veor.8 q4, q4, q8 | ||
| 172 | |||
| 173 | cmp LEN, #0x40 | ||
| 174 | bge loop_64 | ||
| 175 | |||
| 176 | less_64: /* Folding cache line into 128bit */ | ||
| 177 | vldr dCONSTANTl, [r3, #16] | ||
| 178 | vldr dCONSTANTh, [r3, #24] | ||
| 179 | |||
| 180 | vmull.p64 q5, d3, dCONSTANTh | ||
| 181 | vmull.p64 q1, d2, dCONSTANTl | ||
| 182 | veor.8 q1, q1, q5 | ||
| 183 | veor.8 q1, q1, q2 | ||
| 184 | |||
| 185 | vmull.p64 q5, d3, dCONSTANTh | ||
| 186 | vmull.p64 q1, d2, dCONSTANTl | ||
| 187 | veor.8 q1, q1, q5 | ||
| 188 | veor.8 q1, q1, q3 | ||
| 189 | |||
| 190 | vmull.p64 q5, d3, dCONSTANTh | ||
| 191 | vmull.p64 q1, d2, dCONSTANTl | ||
| 192 | veor.8 q1, q1, q5 | ||
| 193 | veor.8 q1, q1, q4 | ||
| 194 | |||
| 195 | teq LEN, #0 | ||
| 196 | beq fold_64 | ||
| 197 | |||
| 198 | loop_16: /* Folding rest buffer into 128bit */ | ||
| 199 | subs LEN, LEN, #0x10 | ||
| 200 | |||
| 201 | vld1.8 {q2}, [BUF, :128]! | ||
| 202 | vmull.p64 q5, d3, dCONSTANTh | ||
| 203 | vmull.p64 q1, d2, dCONSTANTl | ||
| 204 | veor.8 q1, q1, q5 | ||
| 205 | veor.8 q1, q1, q2 | ||
| 206 | |||
| 207 | bne loop_16 | ||
| 208 | |||
| 209 | fold_64: | ||
| 210 | /* perform the last 64 bit fold, also adds 32 zeroes | ||
| 211 | * to the input stream */ | ||
| 212 | vmull.p64 q2, d2, dCONSTANTh | ||
| 213 | vext.8 q1, q1, qzr, #8 | ||
| 214 | veor.8 q1, q1, q2 | ||
| 215 | |||
| 216 | /* final 32-bit fold */ | ||
| 217 | vldr dCONSTANTl, [r3, #32] | ||
| 218 | vldr d6, [r3, #40] | ||
| 219 | vmov.i8 d7, #0 | ||
| 220 | |||
| 221 | vext.8 q2, q1, qzr, #4 | ||
| 222 | vand.8 d2, d2, d6 | ||
| 223 | vmull.p64 q1, d2, dCONSTANTl | ||
| 224 | veor.8 q1, q1, q2 | ||
| 225 | |||
| 226 | /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ | ||
| 227 | vldr dCONSTANTl, [r3, #48] | ||
| 228 | vldr dCONSTANTh, [r3, #56] | ||
| 229 | |||
| 230 | vand.8 q2, q1, q3 | ||
| 231 | vext.8 q2, qzr, q2, #8 | ||
| 232 | vmull.p64 q2, d5, dCONSTANTh | ||
| 233 | vand.8 q2, q2, q3 | ||
| 234 | vmull.p64 q2, d4, dCONSTANTl | ||
| 235 | veor.8 q1, q1, q2 | ||
| 236 | vmov r0, s5 | ||
| 237 | |||
| 238 | bx lr | ||
| 239 | ENDPROC(crc32_pmull_le) | ||
| 240 | ENDPROC(crc32c_pmull_le) | ||
| 241 | |||
| 242 | .macro __crc32, c | ||
| 243 | subs ip, r2, #8 | ||
| 244 | bmi .Ltail\c | ||
| 245 | |||
| 246 | tst r1, #3 | ||
| 247 | bne .Lunaligned\c | ||
| 248 | |||
| 249 | teq ip, #0 | ||
| 250 | .Laligned8\c: | ||
| 251 | ldrd r2, r3, [r1], #8 | ||
| 252 | ARM_BE8(rev r2, r2 ) | ||
| 253 | ARM_BE8(rev r3, r3 ) | ||
| 254 | crc32\c\()w r0, r0, r2 | ||
| 255 | crc32\c\()w r0, r0, r3 | ||
| 256 | bxeq lr | ||
| 257 | subs ip, ip, #8 | ||
| 258 | bpl .Laligned8\c | ||
| 259 | |||
| 260 | .Ltail\c: | ||
| 261 | tst ip, #4 | ||
| 262 | beq 2f | ||
| 263 | ldr r3, [r1], #4 | ||
| 264 | ARM_BE8(rev r3, r3 ) | ||
| 265 | crc32\c\()w r0, r0, r3 | ||
| 266 | |||
| 267 | 2: tst ip, #2 | ||
| 268 | beq 1f | ||
| 269 | ldrh r3, [r1], #2 | ||
| 270 | ARM_BE8(rev16 r3, r3 ) | ||
| 271 | crc32\c\()h r0, r0, r3 | ||
| 272 | |||
| 273 | 1: tst ip, #1 | ||
| 274 | bxeq lr | ||
| 275 | ldrb r3, [r1] | ||
| 276 | crc32\c\()b r0, r0, r3 | ||
| 277 | bx lr | ||
| 278 | |||
| 279 | .Lunaligned\c: | ||
| 280 | tst r1, #1 | ||
| 281 | beq 2f | ||
| 282 | ldrb r3, [r1], #1 | ||
| 283 | subs r2, r2, #1 | ||
| 284 | crc32\c\()b r0, r0, r3 | ||
| 285 | |||
| 286 | tst r1, #2 | ||
| 287 | beq 0f | ||
| 288 | 2: ldrh r3, [r1], #2 | ||
| 289 | subs r2, r2, #2 | ||
| 290 | ARM_BE8(rev16 r3, r3 ) | ||
| 291 | crc32\c\()h r0, r0, r3 | ||
| 292 | |||
| 293 | 0: subs ip, r2, #8 | ||
| 294 | bpl .Laligned8\c | ||
| 295 | b .Ltail\c | ||
| 296 | .endm | ||
| 297 | |||
| 298 | .align 5 | ||
| 299 | ENTRY(crc32_armv8_le) | ||
| 300 | __crc32 | ||
| 301 | ENDPROC(crc32_armv8_le) | ||
| 302 | |||
| 303 | .align 5 | ||
| 304 | ENTRY(crc32c_armv8_le) | ||
| 305 | __crc32 c | ||
| 306 | ENDPROC(crc32c_armv8_le) | ||
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c new file mode 100644 index 000000000000..e1566bec1016 --- /dev/null +++ b/arch/arm/crypto/crc32-ce-glue.c | |||
| @@ -0,0 +1,242 @@ | |||
| 1 | /* | ||
| 2 | * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/crc32.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/string.h> | ||
| 16 | |||
| 17 | #include <crypto/internal/hash.h> | ||
| 18 | |||
| 19 | #include <asm/hwcap.h> | ||
| 20 | #include <asm/neon.h> | ||
| 21 | #include <asm/simd.h> | ||
| 22 | #include <asm/unaligned.h> | ||
| 23 | |||
| 24 | #define PMULL_MIN_LEN 64L /* minimum size of buffer | ||
| 25 | * for crc32_pmull_le_16 */ | ||
| 26 | #define SCALE_F 16L /* size of NEON register */ | ||
| 27 | |||
| 28 | asmlinkage u32 crc32_pmull_le(const u8 buf[], u32 len, u32 init_crc); | ||
| 29 | asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], u32 len); | ||
| 30 | |||
| 31 | asmlinkage u32 crc32c_pmull_le(const u8 buf[], u32 len, u32 init_crc); | ||
| 32 | asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], u32 len); | ||
| 33 | |||
| 34 | static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], u32 len); | ||
| 35 | static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], u32 len); | ||
| 36 | |||
| 37 | static int crc32_cra_init(struct crypto_tfm *tfm) | ||
| 38 | { | ||
| 39 | u32 *key = crypto_tfm_ctx(tfm); | ||
| 40 | |||
| 41 | *key = 0; | ||
| 42 | return 0; | ||
| 43 | } | ||
| 44 | |||
| 45 | static int crc32c_cra_init(struct crypto_tfm *tfm) | ||
| 46 | { | ||
| 47 | u32 *key = crypto_tfm_ctx(tfm); | ||
| 48 | |||
| 49 | *key = ~0; | ||
| 50 | return 0; | ||
| 51 | } | ||
| 52 | |||
| 53 | static int crc32_setkey(struct crypto_shash *hash, const u8 *key, | ||
| 54 | unsigned int keylen) | ||
| 55 | { | ||
| 56 | u32 *mctx = crypto_shash_ctx(hash); | ||
| 57 | |||
| 58 | if (keylen != sizeof(u32)) { | ||
| 59 | crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 60 | return -EINVAL; | ||
| 61 | } | ||
| 62 | *mctx = le32_to_cpup((__le32 *)key); | ||
| 63 | return 0; | ||
| 64 | } | ||
| 65 | |||
| 66 | static int crc32_init(struct shash_desc *desc) | ||
| 67 | { | ||
| 68 | u32 *mctx = crypto_shash_ctx(desc->tfm); | ||
| 69 | u32 *crc = shash_desc_ctx(desc); | ||
| 70 | |||
| 71 | *crc = *mctx; | ||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | |||
| 75 | static int crc32_update(struct shash_desc *desc, const u8 *data, | ||
| 76 | unsigned int length) | ||
| 77 | { | ||
| 78 | u32 *crc = shash_desc_ctx(desc); | ||
| 79 | |||
| 80 | *crc = crc32_armv8_le(*crc, data, length); | ||
| 81 | return 0; | ||
| 82 | } | ||
| 83 | |||
| 84 | static int crc32c_update(struct shash_desc *desc, const u8 *data, | ||
| 85 | unsigned int length) | ||
| 86 | { | ||
| 87 | u32 *crc = shash_desc_ctx(desc); | ||
| 88 | |||
| 89 | *crc = crc32c_armv8_le(*crc, data, length); | ||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | static int crc32_final(struct shash_desc *desc, u8 *out) | ||
| 94 | { | ||
| 95 | u32 *crc = shash_desc_ctx(desc); | ||
| 96 | |||
| 97 | put_unaligned_le32(*crc, out); | ||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | static int crc32c_final(struct shash_desc *desc, u8 *out) | ||
| 102 | { | ||
| 103 | u32 *crc = shash_desc_ctx(desc); | ||
| 104 | |||
| 105 | put_unaligned_le32(~*crc, out); | ||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | |||
| 109 | static int crc32_pmull_update(struct shash_desc *desc, const u8 *data, | ||
| 110 | unsigned int length) | ||
| 111 | { | ||
| 112 | u32 *crc = shash_desc_ctx(desc); | ||
| 113 | unsigned int l; | ||
| 114 | |||
| 115 | if (may_use_simd()) { | ||
| 116 | if ((u32)data % SCALE_F) { | ||
| 117 | l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F)); | ||
| 118 | |||
| 119 | *crc = fallback_crc32(*crc, data, l); | ||
| 120 | |||
| 121 | data += l; | ||
| 122 | length -= l; | ||
| 123 | } | ||
| 124 | |||
| 125 | if (length >= PMULL_MIN_LEN) { | ||
| 126 | l = round_down(length, SCALE_F); | ||
| 127 | |||
| 128 | kernel_neon_begin(); | ||
| 129 | *crc = crc32_pmull_le(data, l, *crc); | ||
| 130 | kernel_neon_end(); | ||
| 131 | |||
| 132 | data += l; | ||
| 133 | length -= l; | ||
| 134 | } | ||
| 135 | } | ||
| 136 | |||
| 137 | if (length > 0) | ||
| 138 | *crc = fallback_crc32(*crc, data, length); | ||
| 139 | |||
| 140 | return 0; | ||
| 141 | } | ||
| 142 | |||
| 143 | static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data, | ||
| 144 | unsigned int length) | ||
| 145 | { | ||
| 146 | u32 *crc = shash_desc_ctx(desc); | ||
| 147 | unsigned int l; | ||
| 148 | |||
| 149 | if (may_use_simd()) { | ||
| 150 | if ((u32)data % SCALE_F) { | ||
| 151 | l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F)); | ||
| 152 | |||
| 153 | *crc = fallback_crc32c(*crc, data, l); | ||
| 154 | |||
| 155 | data += l; | ||
| 156 | length -= l; | ||
| 157 | } | ||
| 158 | |||
| 159 | if (length >= PMULL_MIN_LEN) { | ||
| 160 | l = round_down(length, SCALE_F); | ||
| 161 | |||
| 162 | kernel_neon_begin(); | ||
| 163 | *crc = crc32c_pmull_le(data, l, *crc); | ||
| 164 | kernel_neon_end(); | ||
| 165 | |||
| 166 | data += l; | ||
| 167 | length -= l; | ||
| 168 | } | ||
| 169 | } | ||
| 170 | |||
| 171 | if (length > 0) | ||
| 172 | *crc = fallback_crc32c(*crc, data, length); | ||
| 173 | |||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | static struct shash_alg crc32_pmull_algs[] = { { | ||
| 178 | .setkey = crc32_setkey, | ||
| 179 | .init = crc32_init, | ||
| 180 | .update = crc32_update, | ||
| 181 | .final = crc32_final, | ||
| 182 | .descsize = sizeof(u32), | ||
| 183 | .digestsize = sizeof(u32), | ||
| 184 | |||
| 185 | .base.cra_ctxsize = sizeof(u32), | ||
| 186 | .base.cra_init = crc32_cra_init, | ||
| 187 | .base.cra_name = "crc32", | ||
| 188 | .base.cra_driver_name = "crc32-arm-ce", | ||
| 189 | .base.cra_priority = 200, | ||
| 190 | .base.cra_blocksize = 1, | ||
| 191 | .base.cra_module = THIS_MODULE, | ||
| 192 | }, { | ||
| 193 | .setkey = crc32_setkey, | ||
| 194 | .init = crc32_init, | ||
| 195 | .update = crc32c_update, | ||
| 196 | .final = crc32c_final, | ||
| 197 | .descsize = sizeof(u32), | ||
| 198 | .digestsize = sizeof(u32), | ||
| 199 | |||
| 200 | .base.cra_ctxsize = sizeof(u32), | ||
| 201 | .base.cra_init = crc32c_cra_init, | ||
| 202 | .base.cra_name = "crc32c", | ||
| 203 | .base.cra_driver_name = "crc32c-arm-ce", | ||
| 204 | .base.cra_priority = 200, | ||
| 205 | .base.cra_blocksize = 1, | ||
| 206 | .base.cra_module = THIS_MODULE, | ||
| 207 | } }; | ||
| 208 | |||
| 209 | static int __init crc32_pmull_mod_init(void) | ||
| 210 | { | ||
| 211 | if (elf_hwcap2 & HWCAP2_PMULL) { | ||
| 212 | crc32_pmull_algs[0].update = crc32_pmull_update; | ||
| 213 | crc32_pmull_algs[1].update = crc32c_pmull_update; | ||
| 214 | |||
| 215 | if (elf_hwcap2 & HWCAP2_CRC32) { | ||
| 216 | fallback_crc32 = crc32_armv8_le; | ||
| 217 | fallback_crc32c = crc32c_armv8_le; | ||
| 218 | } else { | ||
| 219 | fallback_crc32 = crc32_le; | ||
| 220 | fallback_crc32c = __crc32c_le; | ||
| 221 | } | ||
| 222 | } else if (!(elf_hwcap2 & HWCAP2_CRC32)) { | ||
| 223 | return -ENODEV; | ||
| 224 | } | ||
| 225 | |||
| 226 | return crypto_register_shashes(crc32_pmull_algs, | ||
| 227 | ARRAY_SIZE(crc32_pmull_algs)); | ||
| 228 | } | ||
| 229 | |||
| 230 | static void __exit crc32_pmull_mod_exit(void) | ||
| 231 | { | ||
| 232 | crypto_unregister_shashes(crc32_pmull_algs, | ||
| 233 | ARRAY_SIZE(crc32_pmull_algs)); | ||
| 234 | } | ||
| 235 | |||
| 236 | module_init(crc32_pmull_mod_init); | ||
| 237 | module_exit(crc32_pmull_mod_exit); | ||
| 238 | |||
| 239 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
| 240 | MODULE_LICENSE("GPL v2"); | ||
| 241 | MODULE_ALIAS_CRYPTO("crc32"); | ||
| 242 | MODULE_ALIAS_CRYPTO("crc32c"); | ||
diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S new file mode 100644 index 000000000000..ce45ba0c0687 --- /dev/null +++ b/arch/arm/crypto/crct10dif-ce-core.S | |||
| @@ -0,0 +1,427 @@ | |||
| 1 | // | ||
| 2 | // Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions | ||
| 3 | // | ||
| 4 | // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
| 5 | // | ||
| 6 | // This program is free software; you can redistribute it and/or modify | ||
| 7 | // it under the terms of the GNU General Public License version 2 as | ||
| 8 | // published by the Free Software Foundation. | ||
| 9 | // | ||
| 10 | |||
| 11 | // | ||
| 12 | // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions | ||
| 13 | // | ||
| 14 | // Copyright (c) 2013, Intel Corporation | ||
| 15 | // | ||
| 16 | // Authors: | ||
| 17 | // Erdinc Ozturk <erdinc.ozturk@intel.com> | ||
| 18 | // Vinodh Gopal <vinodh.gopal@intel.com> | ||
| 19 | // James Guilford <james.guilford@intel.com> | ||
| 20 | // Tim Chen <tim.c.chen@linux.intel.com> | ||
| 21 | // | ||
| 22 | // This software is available to you under a choice of one of two | ||
| 23 | // licenses. You may choose to be licensed under the terms of the GNU | ||
| 24 | // General Public License (GPL) Version 2, available from the file | ||
| 25 | // COPYING in the main directory of this source tree, or the | ||
| 26 | // OpenIB.org BSD license below: | ||
| 27 | // | ||
| 28 | // Redistribution and use in source and binary forms, with or without | ||
| 29 | // modification, are permitted provided that the following conditions are | ||
| 30 | // met: | ||
| 31 | // | ||
| 32 | // * Redistributions of source code must retain the above copyright | ||
| 33 | // notice, this list of conditions and the following disclaimer. | ||
| 34 | // | ||
| 35 | // * Redistributions in binary form must reproduce the above copyright | ||
| 36 | // notice, this list of conditions and the following disclaimer in the | ||
| 37 | // documentation and/or other materials provided with the | ||
| 38 | // distribution. | ||
| 39 | // | ||
| 40 | // * Neither the name of the Intel Corporation nor the names of its | ||
| 41 | // contributors may be used to endorse or promote products derived from | ||
| 42 | // this software without specific prior written permission. | ||
| 43 | // | ||
| 44 | // | ||
| 45 | // THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY | ||
| 46 | // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 47 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
| 48 | // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR | ||
| 49 | // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | ||
| 50 | // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | ||
| 51 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | ||
| 52 | // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
| 53 | // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | ||
| 54 | // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
| 55 | // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 56 | // | ||
| 57 | // Function API: | ||
| 58 | // UINT16 crc_t10dif_pcl( | ||
| 59 | // UINT16 init_crc, //initial CRC value, 16 bits | ||
| 60 | // const unsigned char *buf, //buffer pointer to calculate CRC on | ||
| 61 | // UINT64 len //buffer length in bytes (64-bit data) | ||
| 62 | // ); | ||
| 63 | // | ||
| 64 | // Reference paper titled "Fast CRC Computation for Generic | ||
| 65 | // Polynomials Using PCLMULQDQ Instruction" | ||
| 66 | // URL: http://www.intel.com/content/dam/www/public/us/en/documents | ||
| 67 | // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf | ||
| 68 | // | ||
| 69 | // | ||
| 70 | |||
| 71 | #include <linux/linkage.h> | ||
| 72 | #include <asm/assembler.h> | ||
| 73 | |||
| 74 | #ifdef CONFIG_CPU_ENDIAN_BE8 | ||
| 75 | #define CPU_LE(code...) | ||
| 76 | #else | ||
| 77 | #define CPU_LE(code...) code | ||
| 78 | #endif | ||
| 79 | |||
| 80 | .text | ||
| 81 | .fpu crypto-neon-fp-armv8 | ||
| 82 | |||
| 83 | arg1_low32 .req r0 | ||
| 84 | arg2 .req r1 | ||
| 85 | arg3 .req r2 | ||
| 86 | |||
| 87 | qzr .req q13 | ||
| 88 | |||
| 89 | q0l .req d0 | ||
| 90 | q0h .req d1 | ||
| 91 | q1l .req d2 | ||
| 92 | q1h .req d3 | ||
| 93 | q2l .req d4 | ||
| 94 | q2h .req d5 | ||
| 95 | q3l .req d6 | ||
| 96 | q3h .req d7 | ||
| 97 | q4l .req d8 | ||
| 98 | q4h .req d9 | ||
| 99 | q5l .req d10 | ||
| 100 | q5h .req d11 | ||
| 101 | q6l .req d12 | ||
| 102 | q6h .req d13 | ||
| 103 | q7l .req d14 | ||
| 104 | q7h .req d15 | ||
| 105 | |||
| 106 | ENTRY(crc_t10dif_pmull) | ||
| 107 | vmov.i8 qzr, #0 // init zero register | ||
| 108 | |||
| 109 | // adjust the 16-bit initial_crc value, scale it to 32 bits | ||
| 110 | lsl arg1_low32, arg1_low32, #16 | ||
| 111 | |||
| 112 | // check if smaller than 256 | ||
| 113 | cmp arg3, #256 | ||
| 114 | |||
| 115 | // for sizes less than 128, we can't fold 64B at a time... | ||
| 116 | blt _less_than_128 | ||
| 117 | |||
| 118 | // load the initial crc value | ||
| 119 | // crc value does not need to be byte-reflected, but it needs | ||
| 120 | // to be moved to the high part of the register. | ||
| 121 | // because data will be byte-reflected and will align with | ||
| 122 | // initial crc at correct place. | ||
| 123 | vmov s0, arg1_low32 // initial crc | ||
| 124 | vext.8 q10, qzr, q0, #4 | ||
| 125 | |||
| 126 | // receive the initial 64B data, xor the initial crc value | ||
| 127 | vld1.64 {q0-q1}, [arg2, :128]! | ||
| 128 | vld1.64 {q2-q3}, [arg2, :128]! | ||
| 129 | vld1.64 {q4-q5}, [arg2, :128]! | ||
| 130 | vld1.64 {q6-q7}, [arg2, :128]! | ||
| 131 | CPU_LE( vrev64.8 q0, q0 ) | ||
| 132 | CPU_LE( vrev64.8 q1, q1 ) | ||
| 133 | CPU_LE( vrev64.8 q2, q2 ) | ||
| 134 | CPU_LE( vrev64.8 q3, q3 ) | ||
| 135 | CPU_LE( vrev64.8 q4, q4 ) | ||
| 136 | CPU_LE( vrev64.8 q5, q5 ) | ||
| 137 | CPU_LE( vrev64.8 q6, q6 ) | ||
| 138 | CPU_LE( vrev64.8 q7, q7 ) | ||
| 139 | |||
| 140 | vswp d0, d1 | ||
| 141 | vswp d2, d3 | ||
| 142 | vswp d4, d5 | ||
| 143 | vswp d6, d7 | ||
| 144 | vswp d8, d9 | ||
| 145 | vswp d10, d11 | ||
| 146 | vswp d12, d13 | ||
| 147 | vswp d14, d15 | ||
| 148 | |||
| 149 | // XOR the initial_crc value | ||
| 150 | veor.8 q0, q0, q10 | ||
| 151 | |||
| 152 | adr ip, rk3 | ||
| 153 | vld1.64 {q10}, [ip, :128] // xmm10 has rk3 and rk4 | ||
| 154 | |||
| 155 | // | ||
| 156 | // we subtract 256 instead of 128 to save one instruction from the loop | ||
| 157 | // | ||
| 158 | sub arg3, arg3, #256 | ||
| 159 | |||
| 160 | // at this section of the code, there is 64*x+y (0<=y<64) bytes of | ||
| 161 | // buffer. The _fold_64_B_loop will fold 64B at a time | ||
| 162 | // until we have 64+y Bytes of buffer | ||
| 163 | |||
| 164 | |||
| 165 | // fold 64B at a time. This section of the code folds 4 vector | ||
| 166 | // registers in parallel | ||
| 167 | _fold_64_B_loop: | ||
| 168 | |||
| 169 | .macro fold64, reg1, reg2 | ||
| 170 | vld1.64 {q11-q12}, [arg2, :128]! | ||
| 171 | |||
| 172 | vmull.p64 q8, \reg1\()h, d21 | ||
| 173 | vmull.p64 \reg1, \reg1\()l, d20 | ||
| 174 | vmull.p64 q9, \reg2\()h, d21 | ||
| 175 | vmull.p64 \reg2, \reg2\()l, d20 | ||
| 176 | |||
| 177 | CPU_LE( vrev64.8 q11, q11 ) | ||
| 178 | CPU_LE( vrev64.8 q12, q12 ) | ||
| 179 | vswp d22, d23 | ||
| 180 | vswp d24, d25 | ||
| 181 | |||
| 182 | veor.8 \reg1, \reg1, q8 | ||
| 183 | veor.8 \reg2, \reg2, q9 | ||
| 184 | veor.8 \reg1, \reg1, q11 | ||
| 185 | veor.8 \reg2, \reg2, q12 | ||
| 186 | .endm | ||
| 187 | |||
| 188 | fold64 q0, q1 | ||
| 189 | fold64 q2, q3 | ||
| 190 | fold64 q4, q5 | ||
| 191 | fold64 q6, q7 | ||
| 192 | |||
| 193 | subs arg3, arg3, #128 | ||
| 194 | |||
| 195 | // check if there is another 64B in the buffer to be able to fold | ||
| 196 | bge _fold_64_B_loop | ||
| 197 | |||
| 198 | // at this point, the buffer pointer is pointing at the last y Bytes | ||
| 199 | // of the buffer the 64B of folded data is in 4 of the vector | ||
| 200 | // registers: v0, v1, v2, v3 | ||
| 201 | |||
| 202 | // fold the 8 vector registers to 1 vector register with different | ||
| 203 | // constants | ||
| 204 | |||
| 205 | adr ip, rk9 | ||
| 206 | vld1.64 {q10}, [ip, :128]! | ||
| 207 | |||
| 208 | .macro fold16, reg, rk | ||
| 209 | vmull.p64 q8, \reg\()l, d20 | ||
| 210 | vmull.p64 \reg, \reg\()h, d21 | ||
| 211 | .ifnb \rk | ||
| 212 | vld1.64 {q10}, [ip, :128]! | ||
| 213 | .endif | ||
| 214 | veor.8 q7, q7, q8 | ||
| 215 | veor.8 q7, q7, \reg | ||
| 216 | .endm | ||
| 217 | |||
| 218 | fold16 q0, rk11 | ||
| 219 | fold16 q1, rk13 | ||
| 220 | fold16 q2, rk15 | ||
| 221 | fold16 q3, rk17 | ||
| 222 | fold16 q4, rk19 | ||
| 223 | fold16 q5, rk1 | ||
| 224 | fold16 q6 | ||
| 225 | |||
| 226 | // instead of 64, we add 48 to the loop counter to save 1 instruction | ||
| 227 | // from the loop instead of a cmp instruction, we use the negative | ||
| 228 | // flag with the jl instruction | ||
| 229 | adds arg3, arg3, #(128-16) | ||
| 230 | blt _final_reduction_for_128 | ||
| 231 | |||
| 232 | // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 | ||
| 233 | // and the rest is in memory. We can fold 16 bytes at a time if y>=16 | ||
| 234 | // continue folding 16B at a time | ||
| 235 | |||
| 236 | _16B_reduction_loop: | ||
| 237 | vmull.p64 q8, d14, d20 | ||
| 238 | vmull.p64 q7, d15, d21 | ||
| 239 | veor.8 q7, q7, q8 | ||
| 240 | |||
| 241 | vld1.64 {q0}, [arg2, :128]! | ||
| 242 | CPU_LE( vrev64.8 q0, q0 ) | ||
| 243 | vswp d0, d1 | ||
| 244 | veor.8 q7, q7, q0 | ||
| 245 | subs arg3, arg3, #16 | ||
| 246 | |||
| 247 | // instead of a cmp instruction, we utilize the flags with the | ||
| 248 | // jge instruction equivalent of: cmp arg3, 16-16 | ||
| 249 | // check if there is any more 16B in the buffer to be able to fold | ||
| 250 | bge _16B_reduction_loop | ||
| 251 | |||
| 252 | // now we have 16+z bytes left to reduce, where 0<= z < 16. | ||
| 253 | // first, we reduce the data in the xmm7 register | ||
| 254 | |||
| 255 | _final_reduction_for_128: | ||
| 256 | // check if any more data to fold. If not, compute the CRC of | ||
| 257 | // the final 128 bits | ||
| 258 | adds arg3, arg3, #16 | ||
| 259 | beq _128_done | ||
| 260 | |||
| 261 | // here we are getting data that is less than 16 bytes. | ||
| 262 | // since we know that there was data before the pointer, we can | ||
| 263 | // offset the input pointer before the actual point, to receive | ||
| 264 | // exactly 16 bytes. after that the registers need to be adjusted. | ||
| 265 | _get_last_two_regs: | ||
| 266 | add arg2, arg2, arg3 | ||
| 267 | sub arg2, arg2, #16 | ||
| 268 | vld1.64 {q1}, [arg2] | ||
| 269 | CPU_LE( vrev64.8 q1, q1 ) | ||
| 270 | vswp d2, d3 | ||
| 271 | |||
| 272 | // get rid of the extra data that was loaded before | ||
| 273 | // load the shift constant | ||
| 274 | adr ip, tbl_shf_table + 16 | ||
| 275 | sub ip, ip, arg3 | ||
| 276 | vld1.8 {q0}, [ip] | ||
| 277 | |||
| 278 | // shift v2 to the left by arg3 bytes | ||
| 279 | vtbl.8 d4, {d14-d15}, d0 | ||
| 280 | vtbl.8 d5, {d14-d15}, d1 | ||
| 281 | |||
| 282 | // shift v7 to the right by 16-arg3 bytes | ||
| 283 | vmov.i8 q9, #0x80 | ||
| 284 | veor.8 q0, q0, q9 | ||
| 285 | vtbl.8 d18, {d14-d15}, d0 | ||
| 286 | vtbl.8 d19, {d14-d15}, d1 | ||
| 287 | |||
| 288 | // blend | ||
| 289 | vshr.s8 q0, q0, #7 // convert to 8-bit mask | ||
| 290 | vbsl.8 q0, q2, q1 | ||
| 291 | |||
| 292 | // fold 16 Bytes | ||
| 293 | vmull.p64 q8, d18, d20 | ||
| 294 | vmull.p64 q7, d19, d21 | ||
| 295 | veor.8 q7, q7, q8 | ||
| 296 | veor.8 q7, q7, q0 | ||
| 297 | |||
| 298 | _128_done: | ||
| 299 | // compute crc of a 128-bit value | ||
| 300 | vldr d20, rk5 | ||
| 301 | vldr d21, rk6 // rk5 and rk6 in xmm10 | ||
| 302 | |||
| 303 | // 64b fold | ||
| 304 | vext.8 q0, qzr, q7, #8 | ||
| 305 | vmull.p64 q7, d15, d20 | ||
| 306 | veor.8 q7, q7, q0 | ||
| 307 | |||
| 308 | // 32b fold | ||
| 309 | vext.8 q0, q7, qzr, #12 | ||
| 310 | vmov s31, s3 | ||
| 311 | vmull.p64 q0, d0, d21 | ||
| 312 | veor.8 q7, q0, q7 | ||
| 313 | |||
| 314 | // barrett reduction | ||
| 315 | _barrett: | ||
| 316 | vldr d20, rk7 | ||
| 317 | vldr d21, rk8 | ||
| 318 | |||
| 319 | vmull.p64 q0, d15, d20 | ||
| 320 | vext.8 q0, qzr, q0, #12 | ||
| 321 | vmull.p64 q0, d1, d21 | ||
| 322 | vext.8 q0, qzr, q0, #12 | ||
| 323 | veor.8 q7, q7, q0 | ||
| 324 | vmov r0, s29 | ||
| 325 | |||
| 326 | _cleanup: | ||
| 327 | // scale the result back to 16 bits | ||
| 328 | lsr r0, r0, #16 | ||
| 329 | bx lr | ||
| 330 | |||
| 331 | _less_than_128: | ||
| 332 | teq arg3, #0 | ||
| 333 | beq _cleanup | ||
| 334 | |||
| 335 | vmov.i8 q0, #0 | ||
| 336 | vmov s3, arg1_low32 // get the initial crc value | ||
| 337 | |||
| 338 | vld1.64 {q7}, [arg2, :128]! | ||
| 339 | CPU_LE( vrev64.8 q7, q7 ) | ||
| 340 | vswp d14, d15 | ||
| 341 | veor.8 q7, q7, q0 | ||
| 342 | |||
| 343 | cmp arg3, #16 | ||
| 344 | beq _128_done // exactly 16 left | ||
| 345 | blt _less_than_16_left | ||
| 346 | |||
| 347 | // now if there is, load the constants | ||
| 348 | vldr d20, rk1 | ||
| 349 | vldr d21, rk2 // rk1 and rk2 in xmm10 | ||
| 350 | |||
| 351 | // check if there is enough buffer to be able to fold 16B at a time | ||
| 352 | subs arg3, arg3, #32 | ||
| 353 | addlt arg3, arg3, #16 | ||
| 354 | blt _get_last_two_regs | ||
| 355 | b _16B_reduction_loop | ||
| 356 | |||
| 357 | _less_than_16_left: | ||
| 358 | // shl r9, 4 | ||
| 359 | adr ip, tbl_shf_table + 16 | ||
| 360 | sub ip, ip, arg3 | ||
| 361 | vld1.8 {q0}, [ip] | ||
| 362 | vmov.i8 q9, #0x80 | ||
| 363 | veor.8 q0, q0, q9 | ||
| 364 | vtbl.8 d18, {d14-d15}, d0 | ||
| 365 | vtbl.8 d15, {d14-d15}, d1 | ||
| 366 | vmov d14, d18 | ||
| 367 | b _128_done | ||
| 368 | ENDPROC(crc_t10dif_pmull) | ||
| 369 | |||
| 370 | // precomputed constants | ||
| 371 | // these constants are precomputed from the poly: | ||
| 372 | // 0x8bb70000 (0x8bb7 scaled to 32 bits) | ||
| 373 | .align 4 | ||
| 374 | // Q = 0x18BB70000 | ||
| 375 | // rk1 = 2^(32*3) mod Q << 32 | ||
| 376 | // rk2 = 2^(32*5) mod Q << 32 | ||
| 377 | // rk3 = 2^(32*15) mod Q << 32 | ||
| 378 | // rk4 = 2^(32*17) mod Q << 32 | ||
| 379 | // rk5 = 2^(32*3) mod Q << 32 | ||
| 380 | // rk6 = 2^(32*2) mod Q << 32 | ||
| 381 | // rk7 = floor(2^64/Q) | ||
| 382 | // rk8 = Q | ||
| 383 | |||
| 384 | rk3: .quad 0x9d9d000000000000 | ||
| 385 | rk4: .quad 0x7cf5000000000000 | ||
| 386 | rk5: .quad 0x2d56000000000000 | ||
| 387 | rk6: .quad 0x1368000000000000 | ||
| 388 | rk7: .quad 0x00000001f65a57f8 | ||
| 389 | rk8: .quad 0x000000018bb70000 | ||
| 390 | rk9: .quad 0xceae000000000000 | ||
| 391 | rk10: .quad 0xbfd6000000000000 | ||
| 392 | rk11: .quad 0x1e16000000000000 | ||
| 393 | rk12: .quad 0x713c000000000000 | ||
| 394 | rk13: .quad 0xf7f9000000000000 | ||
| 395 | rk14: .quad 0x80a6000000000000 | ||
| 396 | rk15: .quad 0x044c000000000000 | ||
| 397 | rk16: .quad 0xe658000000000000 | ||
| 398 | rk17: .quad 0xad18000000000000 | ||
| 399 | rk18: .quad 0xa497000000000000 | ||
| 400 | rk19: .quad 0x6ee3000000000000 | ||
| 401 | rk20: .quad 0xe7b5000000000000 | ||
| 402 | rk1: .quad 0x2d56000000000000 | ||
| 403 | rk2: .quad 0x06df000000000000 | ||
| 404 | |||
| 405 | tbl_shf_table: | ||
| 406 | // use these values for shift constants for the tbl/tbx instruction | ||
| 407 | // different alignments result in values as shown: | ||
| 408 | // DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 | ||
| 409 | // DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 | ||
| 410 | // DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 | ||
| 411 | // DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 | ||
| 412 | // DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 | ||
| 413 | // DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 | ||
| 414 | // DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 | ||
| 415 | // DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 | ||
| 416 | // DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 | ||
| 417 | // DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 | ||
| 418 | // DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 | ||
| 419 | // DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 | ||
| 420 | // DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 | ||
| 421 | // DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 | ||
| 422 | // DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 | ||
| 423 | |||
| 424 | .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 | ||
| 425 | .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f | ||
| 426 | .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 | ||
| 427 | .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0 | ||
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c new file mode 100644 index 000000000000..d428355cf38d --- /dev/null +++ b/arch/arm/crypto/crct10dif-ce-glue.c | |||
| @@ -0,0 +1,101 @@ | |||
| 1 | /* | ||
| 2 | * Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/crc-t10dif.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/string.h> | ||
| 16 | |||
| 17 | #include <crypto/internal/hash.h> | ||
| 18 | |||
| 19 | #include <asm/neon.h> | ||
| 20 | #include <asm/simd.h> | ||
| 21 | |||
| 22 | #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U | ||
| 23 | |||
| 24 | asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u32 len); | ||
| 25 | |||
| 26 | static int crct10dif_init(struct shash_desc *desc) | ||
| 27 | { | ||
| 28 | u16 *crc = shash_desc_ctx(desc); | ||
| 29 | |||
| 30 | *crc = 0; | ||
| 31 | return 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | static int crct10dif_update(struct shash_desc *desc, const u8 *data, | ||
| 35 | unsigned int length) | ||
| 36 | { | ||
| 37 | u16 *crc = shash_desc_ctx(desc); | ||
| 38 | unsigned int l; | ||
| 39 | |||
| 40 | if (!may_use_simd()) { | ||
| 41 | *crc = crc_t10dif_generic(*crc, data, length); | ||
| 42 | } else { | ||
| 43 | if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) { | ||
| 44 | l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE - | ||
| 45 | ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)); | ||
| 46 | |||
| 47 | *crc = crc_t10dif_generic(*crc, data, l); | ||
| 48 | |||
| 49 | length -= l; | ||
| 50 | data += l; | ||
| 51 | } | ||
| 52 | if (length > 0) { | ||
| 53 | kernel_neon_begin(); | ||
| 54 | *crc = crc_t10dif_pmull(*crc, data, length); | ||
| 55 | kernel_neon_end(); | ||
| 56 | } | ||
| 57 | } | ||
| 58 | return 0; | ||
| 59 | } | ||
| 60 | |||
| 61 | static int crct10dif_final(struct shash_desc *desc, u8 *out) | ||
| 62 | { | ||
| 63 | u16 *crc = shash_desc_ctx(desc); | ||
| 64 | |||
| 65 | *(u16 *)out = *crc; | ||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | |||
| 69 | static struct shash_alg crc_t10dif_alg = { | ||
| 70 | .digestsize = CRC_T10DIF_DIGEST_SIZE, | ||
| 71 | .init = crct10dif_init, | ||
| 72 | .update = crct10dif_update, | ||
| 73 | .final = crct10dif_final, | ||
| 74 | .descsize = CRC_T10DIF_DIGEST_SIZE, | ||
| 75 | |||
| 76 | .base.cra_name = "crct10dif", | ||
| 77 | .base.cra_driver_name = "crct10dif-arm-ce", | ||
| 78 | .base.cra_priority = 200, | ||
| 79 | .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, | ||
| 80 | .base.cra_module = THIS_MODULE, | ||
| 81 | }; | ||
| 82 | |||
| 83 | static int __init crc_t10dif_mod_init(void) | ||
| 84 | { | ||
| 85 | if (!(elf_hwcap2 & HWCAP2_PMULL)) | ||
| 86 | return -ENODEV; | ||
| 87 | |||
| 88 | return crypto_register_shash(&crc_t10dif_alg); | ||
| 89 | } | ||
| 90 | |||
| 91 | static void __exit crc_t10dif_mod_exit(void) | ||
| 92 | { | ||
| 93 | crypto_unregister_shash(&crc_t10dif_alg); | ||
| 94 | } | ||
| 95 | |||
| 96 | module_init(crc_t10dif_mod_init); | ||
| 97 | module_exit(crc_t10dif_mod_exit); | ||
| 98 | |||
| 99 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
| 100 | MODULE_LICENSE("GPL v2"); | ||
| 101 | MODULE_ALIAS_CRYPTO("crct10dif"); | ||
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi index 602e2c2e9a4d..93ec8fef82a1 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi | |||
| @@ -164,6 +164,14 @@ | |||
| 164 | clocks = <&cpm_syscon0 1 21>; | 164 | clocks = <&cpm_syscon0 1 21>; |
| 165 | status = "disabled"; | 165 | status = "disabled"; |
| 166 | }; | 166 | }; |
| 167 | |||
| 168 | cpm_trng: trng@760000 { | ||
| 169 | compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76"; | ||
| 170 | reg = <0x760000 0x7d>; | ||
| 171 | interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>; | ||
| 172 | clocks = <&cpm_syscon0 1 25>; | ||
| 173 | status = "okay"; | ||
| 174 | }; | ||
| 167 | }; | 175 | }; |
| 168 | 176 | ||
| 169 | cpm_pcie0: pcie@f2600000 { | 177 | cpm_pcie0: pcie@f2600000 { |
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi index 6bf9e241179b..ee8db0556791 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi | |||
| @@ -164,6 +164,14 @@ | |||
| 164 | clocks = <&cps_syscon0 1 21>; | 164 | clocks = <&cps_syscon0 1 21>; |
| 165 | status = "disabled"; | 165 | status = "disabled"; |
| 166 | }; | 166 | }; |
| 167 | |||
| 168 | cps_trng: trng@760000 { | ||
| 169 | compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76"; | ||
| 170 | reg = <0x760000 0x7d>; | ||
| 171 | interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>; | ||
| 172 | clocks = <&cps_syscon0 1 25>; | ||
| 173 | status = "okay"; | ||
| 174 | }; | ||
| 167 | }; | 175 | }; |
| 168 | 176 | ||
| 169 | cps_pcie0: pcie@f4600000 { | 177 | cps_pcie0: pcie@f4600000 { |
diff --git a/arch/arm64/crypto/.gitignore b/arch/arm64/crypto/.gitignore new file mode 100644 index 000000000000..879df8781ed5 --- /dev/null +++ b/arch/arm64/crypto/.gitignore | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | sha256-core.S | ||
| 2 | sha512-core.S | ||
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 2cf32e9887e1..450a85df041a 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig | |||
| @@ -8,6 +8,14 @@ menuconfig ARM64_CRYPTO | |||
| 8 | 8 | ||
| 9 | if ARM64_CRYPTO | 9 | if ARM64_CRYPTO |
| 10 | 10 | ||
| 11 | config CRYPTO_SHA256_ARM64 | ||
| 12 | tristate "SHA-224/SHA-256 digest algorithm for arm64" | ||
| 13 | select CRYPTO_HASH | ||
| 14 | |||
| 15 | config CRYPTO_SHA512_ARM64 | ||
| 16 | tristate "SHA-384/SHA-512 digest algorithm for arm64" | ||
| 17 | select CRYPTO_HASH | ||
| 18 | |||
| 11 | config CRYPTO_SHA1_ARM64_CE | 19 | config CRYPTO_SHA1_ARM64_CE |
| 12 | tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)" | 20 | tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)" |
| 13 | depends on ARM64 && KERNEL_MODE_NEON | 21 | depends on ARM64 && KERNEL_MODE_NEON |
| @@ -23,6 +31,16 @@ config CRYPTO_GHASH_ARM64_CE | |||
| 23 | depends on ARM64 && KERNEL_MODE_NEON | 31 | depends on ARM64 && KERNEL_MODE_NEON |
| 24 | select CRYPTO_HASH | 32 | select CRYPTO_HASH |
| 25 | 33 | ||
| 34 | config CRYPTO_CRCT10DIF_ARM64_CE | ||
| 35 | tristate "CRCT10DIF digest algorithm using PMULL instructions" | ||
| 36 | depends on KERNEL_MODE_NEON && CRC_T10DIF | ||
| 37 | select CRYPTO_HASH | ||
| 38 | |||
| 39 | config CRYPTO_CRC32_ARM64_CE | ||
| 40 | tristate "CRC32 and CRC32C digest algorithms using PMULL instructions" | ||
| 41 | depends on KERNEL_MODE_NEON && CRC32 | ||
| 42 | select CRYPTO_HASH | ||
| 43 | |||
| 26 | config CRYPTO_AES_ARM64_CE | 44 | config CRYPTO_AES_ARM64_CE |
| 27 | tristate "AES core cipher using ARMv8 Crypto Extensions" | 45 | tristate "AES core cipher using ARMv8 Crypto Extensions" |
| 28 | depends on ARM64 && KERNEL_MODE_NEON | 46 | depends on ARM64 && KERNEL_MODE_NEON |
| @@ -40,17 +58,18 @@ config CRYPTO_AES_ARM64_CE_BLK | |||
| 40 | depends on ARM64 && KERNEL_MODE_NEON | 58 | depends on ARM64 && KERNEL_MODE_NEON |
| 41 | select CRYPTO_BLKCIPHER | 59 | select CRYPTO_BLKCIPHER |
| 42 | select CRYPTO_AES_ARM64_CE | 60 | select CRYPTO_AES_ARM64_CE |
| 43 | select CRYPTO_ABLK_HELPER | 61 | select CRYPTO_SIMD |
| 44 | 62 | ||
| 45 | config CRYPTO_AES_ARM64_NEON_BLK | 63 | config CRYPTO_AES_ARM64_NEON_BLK |
| 46 | tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions" | 64 | tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions" |
| 47 | depends on ARM64 && KERNEL_MODE_NEON | 65 | depends on ARM64 && KERNEL_MODE_NEON |
| 48 | select CRYPTO_BLKCIPHER | 66 | select CRYPTO_BLKCIPHER |
| 49 | select CRYPTO_AES | 67 | select CRYPTO_AES |
| 50 | select CRYPTO_ABLK_HELPER | 68 | select CRYPTO_SIMD |
| 51 | 69 | ||
| 52 | config CRYPTO_CRC32_ARM64 | 70 | config CRYPTO_CRC32_ARM64 |
| 53 | tristate "CRC32 and CRC32C using optional ARMv8 instructions" | 71 | tristate "CRC32 and CRC32C using optional ARMv8 instructions" |
| 54 | depends on ARM64 | 72 | depends on ARM64 |
| 55 | select CRYPTO_HASH | 73 | select CRYPTO_HASH |
| 74 | |||
| 56 | endif | 75 | endif |
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index abb79b3cfcfe..aa8888d7b744 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile | |||
| @@ -17,6 +17,12 @@ sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o | |||
| 17 | obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o | 17 | obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o |
| 18 | ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o | 18 | ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o |
| 19 | 19 | ||
| 20 | obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o | ||
| 21 | crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o | ||
| 22 | |||
| 23 | obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o | ||
| 24 | crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o | ||
| 25 | |||
| 20 | obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o | 26 | obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o |
| 21 | CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto | 27 | CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto |
| 22 | 28 | ||
| @@ -29,6 +35,12 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o | |||
| 29 | obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o | 35 | obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o |
| 30 | aes-neon-blk-y := aes-glue-neon.o aes-neon.o | 36 | aes-neon-blk-y := aes-glue-neon.o aes-neon.o |
| 31 | 37 | ||
| 38 | obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o | ||
| 39 | sha256-arm64-y := sha256-glue.o sha256-core.o | ||
| 40 | |||
| 41 | obj-$(CONFIG_CRYPTO_SHA512_ARM64) += sha512-arm64.o | ||
| 42 | sha512-arm64-y := sha512-glue.o sha512-core.o | ||
| 43 | |||
| 32 | AFLAGS_aes-ce.o := -DINTERLEAVE=4 | 44 | AFLAGS_aes-ce.o := -DINTERLEAVE=4 |
| 33 | AFLAGS_aes-neon.o := -DINTERLEAVE=4 | 45 | AFLAGS_aes-neon.o := -DINTERLEAVE=4 |
| 34 | 46 | ||
| @@ -40,3 +52,14 @@ CFLAGS_crc32-arm64.o := -mcpu=generic+crc | |||
| 40 | 52 | ||
| 41 | $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE | 53 | $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE |
| 42 | $(call if_changed_rule,cc_o_c) | 54 | $(call if_changed_rule,cc_o_c) |
| 55 | |||
| 56 | quiet_cmd_perlasm = PERLASM $@ | ||
| 57 | cmd_perlasm = $(PERL) $(<) void $(@) | ||
| 58 | |||
| 59 | $(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl | ||
| 60 | $(call cmd,perlasm) | ||
| 61 | |||
| 62 | $(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl | ||
| 63 | $(call cmd,perlasm) | ||
| 64 | |||
| 65 | .PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S | ||
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index a2a7fbcacc14..3363560c79b7 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
| 12 | #include <asm/assembler.h> | ||
| 12 | 13 | ||
| 13 | .text | 14 | .text |
| 14 | .arch armv8-a+crypto | 15 | .arch armv8-a+crypto |
| @@ -19,7 +20,7 @@ | |||
| 19 | */ | 20 | */ |
| 20 | ENTRY(ce_aes_ccm_auth_data) | 21 | ENTRY(ce_aes_ccm_auth_data) |
| 21 | ldr w8, [x3] /* leftover from prev round? */ | 22 | ldr w8, [x3] /* leftover from prev round? */ |
| 22 | ld1 {v0.2d}, [x0] /* load mac */ | 23 | ld1 {v0.16b}, [x0] /* load mac */ |
| 23 | cbz w8, 1f | 24 | cbz w8, 1f |
| 24 | sub w8, w8, #16 | 25 | sub w8, w8, #16 |
| 25 | eor v1.16b, v1.16b, v1.16b | 26 | eor v1.16b, v1.16b, v1.16b |
| @@ -31,7 +32,7 @@ ENTRY(ce_aes_ccm_auth_data) | |||
| 31 | beq 8f /* out of input? */ | 32 | beq 8f /* out of input? */ |
| 32 | cbnz w8, 0b | 33 | cbnz w8, 0b |
| 33 | eor v0.16b, v0.16b, v1.16b | 34 | eor v0.16b, v0.16b, v1.16b |
| 34 | 1: ld1 {v3.2d}, [x4] /* load first round key */ | 35 | 1: ld1 {v3.16b}, [x4] /* load first round key */ |
| 35 | prfm pldl1strm, [x1] | 36 | prfm pldl1strm, [x1] |
| 36 | cmp w5, #12 /* which key size? */ | 37 | cmp w5, #12 /* which key size? */ |
| 37 | add x6, x4, #16 | 38 | add x6, x4, #16 |
| @@ -41,17 +42,17 @@ ENTRY(ce_aes_ccm_auth_data) | |||
| 41 | mov v5.16b, v3.16b | 42 | mov v5.16b, v3.16b |
| 42 | b 4f | 43 | b 4f |
| 43 | 2: mov v4.16b, v3.16b | 44 | 2: mov v4.16b, v3.16b |
| 44 | ld1 {v5.2d}, [x6], #16 /* load 2nd round key */ | 45 | ld1 {v5.16b}, [x6], #16 /* load 2nd round key */ |
| 45 | 3: aese v0.16b, v4.16b | 46 | 3: aese v0.16b, v4.16b |
| 46 | aesmc v0.16b, v0.16b | 47 | aesmc v0.16b, v0.16b |
| 47 | 4: ld1 {v3.2d}, [x6], #16 /* load next round key */ | 48 | 4: ld1 {v3.16b}, [x6], #16 /* load next round key */ |
| 48 | aese v0.16b, v5.16b | 49 | aese v0.16b, v5.16b |
| 49 | aesmc v0.16b, v0.16b | 50 | aesmc v0.16b, v0.16b |
| 50 | 5: ld1 {v4.2d}, [x6], #16 /* load next round key */ | 51 | 5: ld1 {v4.16b}, [x6], #16 /* load next round key */ |
| 51 | subs w7, w7, #3 | 52 | subs w7, w7, #3 |
| 52 | aese v0.16b, v3.16b | 53 | aese v0.16b, v3.16b |
| 53 | aesmc v0.16b, v0.16b | 54 | aesmc v0.16b, v0.16b |
| 54 | ld1 {v5.2d}, [x6], #16 /* load next round key */ | 55 | ld1 {v5.16b}, [x6], #16 /* load next round key */ |
| 55 | bpl 3b | 56 | bpl 3b |
| 56 | aese v0.16b, v4.16b | 57 | aese v0.16b, v4.16b |
| 57 | subs w2, w2, #16 /* last data? */ | 58 | subs w2, w2, #16 /* last data? */ |
| @@ -60,7 +61,7 @@ ENTRY(ce_aes_ccm_auth_data) | |||
| 60 | ld1 {v1.16b}, [x1], #16 /* load next input block */ | 61 | ld1 {v1.16b}, [x1], #16 /* load next input block */ |
| 61 | eor v0.16b, v0.16b, v1.16b /* xor with mac */ | 62 | eor v0.16b, v0.16b, v1.16b /* xor with mac */ |
| 62 | bne 1b | 63 | bne 1b |
| 63 | 6: st1 {v0.2d}, [x0] /* store mac */ | 64 | 6: st1 {v0.16b}, [x0] /* store mac */ |
| 64 | beq 10f | 65 | beq 10f |
| 65 | adds w2, w2, #16 | 66 | adds w2, w2, #16 |
| 66 | beq 10f | 67 | beq 10f |
| @@ -79,7 +80,7 @@ ENTRY(ce_aes_ccm_auth_data) | |||
| 79 | adds w7, w7, #1 | 80 | adds w7, w7, #1 |
| 80 | bne 9b | 81 | bne 9b |
| 81 | eor v0.16b, v0.16b, v1.16b | 82 | eor v0.16b, v0.16b, v1.16b |
| 82 | st1 {v0.2d}, [x0] | 83 | st1 {v0.16b}, [x0] |
| 83 | 10: str w8, [x3] | 84 | 10: str w8, [x3] |
| 84 | ret | 85 | ret |
| 85 | ENDPROC(ce_aes_ccm_auth_data) | 86 | ENDPROC(ce_aes_ccm_auth_data) |
| @@ -89,27 +90,27 @@ ENDPROC(ce_aes_ccm_auth_data) | |||
| 89 | * u32 rounds); | 90 | * u32 rounds); |
| 90 | */ | 91 | */ |
| 91 | ENTRY(ce_aes_ccm_final) | 92 | ENTRY(ce_aes_ccm_final) |
| 92 | ld1 {v3.2d}, [x2], #16 /* load first round key */ | 93 | ld1 {v3.16b}, [x2], #16 /* load first round key */ |
| 93 | ld1 {v0.2d}, [x0] /* load mac */ | 94 | ld1 {v0.16b}, [x0] /* load mac */ |
| 94 | cmp w3, #12 /* which key size? */ | 95 | cmp w3, #12 /* which key size? */ |
| 95 | sub w3, w3, #2 /* modified # of rounds */ | 96 | sub w3, w3, #2 /* modified # of rounds */ |
| 96 | ld1 {v1.2d}, [x1] /* load 1st ctriv */ | 97 | ld1 {v1.16b}, [x1] /* load 1st ctriv */ |
| 97 | bmi 0f | 98 | bmi 0f |
| 98 | bne 3f | 99 | bne 3f |
| 99 | mov v5.16b, v3.16b | 100 | mov v5.16b, v3.16b |
| 100 | b 2f | 101 | b 2f |
| 101 | 0: mov v4.16b, v3.16b | 102 | 0: mov v4.16b, v3.16b |
| 102 | 1: ld1 {v5.2d}, [x2], #16 /* load next round key */ | 103 | 1: ld1 {v5.16b}, [x2], #16 /* load next round key */ |
| 103 | aese v0.16b, v4.16b | 104 | aese v0.16b, v4.16b |
| 104 | aesmc v0.16b, v0.16b | 105 | aesmc v0.16b, v0.16b |
| 105 | aese v1.16b, v4.16b | 106 | aese v1.16b, v4.16b |
| 106 | aesmc v1.16b, v1.16b | 107 | aesmc v1.16b, v1.16b |
| 107 | 2: ld1 {v3.2d}, [x2], #16 /* load next round key */ | 108 | 2: ld1 {v3.16b}, [x2], #16 /* load next round key */ |
| 108 | aese v0.16b, v5.16b | 109 | aese v0.16b, v5.16b |
| 109 | aesmc v0.16b, v0.16b | 110 | aesmc v0.16b, v0.16b |
| 110 | aese v1.16b, v5.16b | 111 | aese v1.16b, v5.16b |
| 111 | aesmc v1.16b, v1.16b | 112 | aesmc v1.16b, v1.16b |
| 112 | 3: ld1 {v4.2d}, [x2], #16 /* load next round key */ | 113 | 3: ld1 {v4.16b}, [x2], #16 /* load next round key */ |
| 113 | subs w3, w3, #3 | 114 | subs w3, w3, #3 |
| 114 | aese v0.16b, v3.16b | 115 | aese v0.16b, v3.16b |
| 115 | aesmc v0.16b, v0.16b | 116 | aesmc v0.16b, v0.16b |
| @@ -120,47 +121,47 @@ ENTRY(ce_aes_ccm_final) | |||
| 120 | aese v1.16b, v4.16b | 121 | aese v1.16b, v4.16b |
| 121 | /* final round key cancels out */ | 122 | /* final round key cancels out */ |
| 122 | eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ | 123 | eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ |
| 123 | st1 {v0.2d}, [x0] /* store result */ | 124 | st1 {v0.16b}, [x0] /* store result */ |
| 124 | ret | 125 | ret |
| 125 | ENDPROC(ce_aes_ccm_final) | 126 | ENDPROC(ce_aes_ccm_final) |
| 126 | 127 | ||
| 127 | .macro aes_ccm_do_crypt,enc | 128 | .macro aes_ccm_do_crypt,enc |
| 128 | ldr x8, [x6, #8] /* load lower ctr */ | 129 | ldr x8, [x6, #8] /* load lower ctr */ |
| 129 | ld1 {v0.2d}, [x5] /* load mac */ | 130 | ld1 {v0.16b}, [x5] /* load mac */ |
| 130 | rev x8, x8 /* keep swabbed ctr in reg */ | 131 | CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ |
| 131 | 0: /* outer loop */ | 132 | 0: /* outer loop */ |
| 132 | ld1 {v1.1d}, [x6] /* load upper ctr */ | 133 | ld1 {v1.8b}, [x6] /* load upper ctr */ |
| 133 | prfm pldl1strm, [x1] | 134 | prfm pldl1strm, [x1] |
| 134 | add x8, x8, #1 | 135 | add x8, x8, #1 |
| 135 | rev x9, x8 | 136 | rev x9, x8 |
| 136 | cmp w4, #12 /* which key size? */ | 137 | cmp w4, #12 /* which key size? */ |
| 137 | sub w7, w4, #2 /* get modified # of rounds */ | 138 | sub w7, w4, #2 /* get modified # of rounds */ |
| 138 | ins v1.d[1], x9 /* no carry in lower ctr */ | 139 | ins v1.d[1], x9 /* no carry in lower ctr */ |
| 139 | ld1 {v3.2d}, [x3] /* load first round key */ | 140 | ld1 {v3.16b}, [x3] /* load first round key */ |
| 140 | add x10, x3, #16 | 141 | add x10, x3, #16 |
| 141 | bmi 1f | 142 | bmi 1f |
| 142 | bne 4f | 143 | bne 4f |
| 143 | mov v5.16b, v3.16b | 144 | mov v5.16b, v3.16b |
| 144 | b 3f | 145 | b 3f |
| 145 | 1: mov v4.16b, v3.16b | 146 | 1: mov v4.16b, v3.16b |
| 146 | ld1 {v5.2d}, [x10], #16 /* load 2nd round key */ | 147 | ld1 {v5.16b}, [x10], #16 /* load 2nd round key */ |
| 147 | 2: /* inner loop: 3 rounds, 2x interleaved */ | 148 | 2: /* inner loop: 3 rounds, 2x interleaved */ |
| 148 | aese v0.16b, v4.16b | 149 | aese v0.16b, v4.16b |
| 149 | aesmc v0.16b, v0.16b | 150 | aesmc v0.16b, v0.16b |
| 150 | aese v1.16b, v4.16b | 151 | aese v1.16b, v4.16b |
| 151 | aesmc v1.16b, v1.16b | 152 | aesmc v1.16b, v1.16b |
| 152 | 3: ld1 {v3.2d}, [x10], #16 /* load next round key */ | 153 | 3: ld1 {v3.16b}, [x10], #16 /* load next round key */ |
| 153 | aese v0.16b, v5.16b | 154 | aese v0.16b, v5.16b |
| 154 | aesmc v0.16b, v0.16b | 155 | aesmc v0.16b, v0.16b |
| 155 | aese v1.16b, v5.16b | 156 | aese v1.16b, v5.16b |
| 156 | aesmc v1.16b, v1.16b | 157 | aesmc v1.16b, v1.16b |
| 157 | 4: ld1 {v4.2d}, [x10], #16 /* load next round key */ | 158 | 4: ld1 {v4.16b}, [x10], #16 /* load next round key */ |
| 158 | subs w7, w7, #3 | 159 | subs w7, w7, #3 |
| 159 | aese v0.16b, v3.16b | 160 | aese v0.16b, v3.16b |
| 160 | aesmc v0.16b, v0.16b | 161 | aesmc v0.16b, v0.16b |
| 161 | aese v1.16b, v3.16b | 162 | aese v1.16b, v3.16b |
| 162 | aesmc v1.16b, v1.16b | 163 | aesmc v1.16b, v1.16b |
| 163 | ld1 {v5.2d}, [x10], #16 /* load next round key */ | 164 | ld1 {v5.16b}, [x10], #16 /* load next round key */ |
| 164 | bpl 2b | 165 | bpl 2b |
| 165 | aese v0.16b, v4.16b | 166 | aese v0.16b, v4.16b |
| 166 | aese v1.16b, v4.16b | 167 | aese v1.16b, v4.16b |
| @@ -177,14 +178,14 @@ ENDPROC(ce_aes_ccm_final) | |||
| 177 | eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ | 178 | eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ |
| 178 | st1 {v1.16b}, [x0], #16 /* write output block */ | 179 | st1 {v1.16b}, [x0], #16 /* write output block */ |
| 179 | bne 0b | 180 | bne 0b |
| 180 | rev x8, x8 | 181 | CPU_LE( rev x8, x8 ) |
| 181 | st1 {v0.2d}, [x5] /* store mac */ | 182 | st1 {v0.16b}, [x5] /* store mac */ |
| 182 | str x8, [x6, #8] /* store lsb end of ctr (BE) */ | 183 | str x8, [x6, #8] /* store lsb end of ctr (BE) */ |
| 183 | 5: ret | 184 | 5: ret |
| 184 | 185 | ||
| 185 | 6: eor v0.16b, v0.16b, v5.16b /* final round mac */ | 186 | 6: eor v0.16b, v0.16b, v5.16b /* final round mac */ |
| 186 | eor v1.16b, v1.16b, v5.16b /* final round enc */ | 187 | eor v1.16b, v1.16b, v5.16b /* final round enc */ |
| 187 | st1 {v0.2d}, [x5] /* store mac */ | 188 | st1 {v0.16b}, [x5] /* store mac */ |
| 188 | add w2, w2, #16 /* process partial tail block */ | 189 | add w2, w2, #16 /* process partial tail block */ |
| 189 | 7: ldrb w9, [x1], #1 /* get 1 byte of input */ | 190 | 7: ldrb w9, [x1], #1 /* get 1 byte of input */ |
| 190 | umov w6, v1.b[0] /* get top crypted ctr byte */ | 191 | umov w6, v1.b[0] /* get top crypted ctr byte */ |
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index f4bf2f2a014c..cc5515dac74a 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c | |||
| @@ -11,9 +11,9 @@ | |||
| 11 | #include <asm/neon.h> | 11 | #include <asm/neon.h> |
| 12 | #include <asm/unaligned.h> | 12 | #include <asm/unaligned.h> |
| 13 | #include <crypto/aes.h> | 13 | #include <crypto/aes.h> |
| 14 | #include <crypto/algapi.h> | ||
| 15 | #include <crypto/scatterwalk.h> | 14 | #include <crypto/scatterwalk.h> |
| 16 | #include <crypto/internal/aead.h> | 15 | #include <crypto/internal/aead.h> |
| 16 | #include <crypto/internal/skcipher.h> | ||
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | 18 | ||
| 19 | #include "aes-ce-setkey.h" | 19 | #include "aes-ce-setkey.h" |
| @@ -149,12 +149,7 @@ static int ccm_encrypt(struct aead_request *req) | |||
| 149 | { | 149 | { |
| 150 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 150 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 151 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); | 151 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); |
| 152 | struct blkcipher_desc desc = { .info = req->iv }; | 152 | struct skcipher_walk walk; |
| 153 | struct blkcipher_walk walk; | ||
| 154 | struct scatterlist srcbuf[2]; | ||
| 155 | struct scatterlist dstbuf[2]; | ||
| 156 | struct scatterlist *src; | ||
| 157 | struct scatterlist *dst; | ||
| 158 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; | 153 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; |
| 159 | u8 buf[AES_BLOCK_SIZE]; | 154 | u8 buf[AES_BLOCK_SIZE]; |
| 160 | u32 len = req->cryptlen; | 155 | u32 len = req->cryptlen; |
| @@ -172,27 +167,19 @@ static int ccm_encrypt(struct aead_request *req) | |||
| 172 | /* preserve the original iv for the final round */ | 167 | /* preserve the original iv for the final round */ |
| 173 | memcpy(buf, req->iv, AES_BLOCK_SIZE); | 168 | memcpy(buf, req->iv, AES_BLOCK_SIZE); |
| 174 | 169 | ||
| 175 | src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen); | 170 | err = skcipher_walk_aead_encrypt(&walk, req, true); |
| 176 | dst = src; | ||
| 177 | if (req->src != req->dst) | ||
| 178 | dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen); | ||
| 179 | |||
| 180 | blkcipher_walk_init(&walk, dst, src, len); | ||
| 181 | err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, | ||
| 182 | AES_BLOCK_SIZE); | ||
| 183 | 171 | ||
| 184 | while (walk.nbytes) { | 172 | while (walk.nbytes) { |
| 185 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; | 173 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; |
| 186 | 174 | ||
| 187 | if (walk.nbytes == len) | 175 | if (walk.nbytes == walk.total) |
| 188 | tail = 0; | 176 | tail = 0; |
| 189 | 177 | ||
| 190 | ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 178 | ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 191 | walk.nbytes - tail, ctx->key_enc, | 179 | walk.nbytes - tail, ctx->key_enc, |
| 192 | num_rounds(ctx), mac, walk.iv); | 180 | num_rounds(ctx), mac, walk.iv); |
| 193 | 181 | ||
| 194 | len -= walk.nbytes - tail; | 182 | err = skcipher_walk_done(&walk, tail); |
| 195 | err = blkcipher_walk_done(&desc, &walk, tail); | ||
| 196 | } | 183 | } |
| 197 | if (!err) | 184 | if (!err) |
| 198 | ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); | 185 | ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); |
| @@ -203,7 +190,7 @@ static int ccm_encrypt(struct aead_request *req) | |||
| 203 | return err; | 190 | return err; |
| 204 | 191 | ||
| 205 | /* copy authtag to end of dst */ | 192 | /* copy authtag to end of dst */ |
| 206 | scatterwalk_map_and_copy(mac, dst, req->cryptlen, | 193 | scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen, |
| 207 | crypto_aead_authsize(aead), 1); | 194 | crypto_aead_authsize(aead), 1); |
| 208 | 195 | ||
| 209 | return 0; | 196 | return 0; |
| @@ -214,12 +201,7 @@ static int ccm_decrypt(struct aead_request *req) | |||
| 214 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 201 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 215 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); | 202 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); |
| 216 | unsigned int authsize = crypto_aead_authsize(aead); | 203 | unsigned int authsize = crypto_aead_authsize(aead); |
| 217 | struct blkcipher_desc desc = { .info = req->iv }; | 204 | struct skcipher_walk walk; |
| 218 | struct blkcipher_walk walk; | ||
| 219 | struct scatterlist srcbuf[2]; | ||
| 220 | struct scatterlist dstbuf[2]; | ||
| 221 | struct scatterlist *src; | ||
| 222 | struct scatterlist *dst; | ||
| 223 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; | 205 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; |
| 224 | u8 buf[AES_BLOCK_SIZE]; | 206 | u8 buf[AES_BLOCK_SIZE]; |
| 225 | u32 len = req->cryptlen - authsize; | 207 | u32 len = req->cryptlen - authsize; |
| @@ -237,27 +219,19 @@ static int ccm_decrypt(struct aead_request *req) | |||
| 237 | /* preserve the original iv for the final round */ | 219 | /* preserve the original iv for the final round */ |
| 238 | memcpy(buf, req->iv, AES_BLOCK_SIZE); | 220 | memcpy(buf, req->iv, AES_BLOCK_SIZE); |
| 239 | 221 | ||
| 240 | src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen); | 222 | err = skcipher_walk_aead_decrypt(&walk, req, true); |
| 241 | dst = src; | ||
| 242 | if (req->src != req->dst) | ||
| 243 | dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen); | ||
| 244 | |||
| 245 | blkcipher_walk_init(&walk, dst, src, len); | ||
| 246 | err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, | ||
| 247 | AES_BLOCK_SIZE); | ||
| 248 | 223 | ||
| 249 | while (walk.nbytes) { | 224 | while (walk.nbytes) { |
| 250 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; | 225 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; |
| 251 | 226 | ||
| 252 | if (walk.nbytes == len) | 227 | if (walk.nbytes == walk.total) |
| 253 | tail = 0; | 228 | tail = 0; |
| 254 | 229 | ||
| 255 | ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | 230 | ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 256 | walk.nbytes - tail, ctx->key_enc, | 231 | walk.nbytes - tail, ctx->key_enc, |
| 257 | num_rounds(ctx), mac, walk.iv); | 232 | num_rounds(ctx), mac, walk.iv); |
| 258 | 233 | ||
| 259 | len -= walk.nbytes - tail; | 234 | err = skcipher_walk_done(&walk, tail); |
| 260 | err = blkcipher_walk_done(&desc, &walk, tail); | ||
| 261 | } | 235 | } |
| 262 | if (!err) | 236 | if (!err) |
| 263 | ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); | 237 | ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); |
| @@ -268,7 +242,8 @@ static int ccm_decrypt(struct aead_request *req) | |||
| 268 | return err; | 242 | return err; |
| 269 | 243 | ||
| 270 | /* compare calculated auth tag with the stored one */ | 244 | /* compare calculated auth tag with the stored one */ |
| 271 | scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize, | 245 | scatterwalk_map_and_copy(buf, req->src, |
| 246 | req->assoclen + req->cryptlen - authsize, | ||
| 272 | authsize, 0); | 247 | authsize, 0); |
| 273 | 248 | ||
| 274 | if (crypto_memneq(mac, buf, authsize)) | 249 | if (crypto_memneq(mac, buf, authsize)) |
| @@ -287,6 +262,7 @@ static struct aead_alg ccm_aes_alg = { | |||
| 287 | .cra_module = THIS_MODULE, | 262 | .cra_module = THIS_MODULE, |
| 288 | }, | 263 | }, |
| 289 | .ivsize = AES_BLOCK_SIZE, | 264 | .ivsize = AES_BLOCK_SIZE, |
| 265 | .chunksize = AES_BLOCK_SIZE, | ||
| 290 | .maxauthsize = AES_BLOCK_SIZE, | 266 | .maxauthsize = AES_BLOCK_SIZE, |
| 291 | .setkey = ccm_setkey, | 267 | .setkey = ccm_setkey, |
| 292 | .setauthsize = ccm_setauthsize, | 268 | .setauthsize = ccm_setauthsize, |
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c index f7bd9bf0bbb3..50d9fe11d0c8 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-cipher.c | |||
| @@ -47,24 +47,24 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) | |||
| 47 | kernel_neon_begin_partial(4); | 47 | kernel_neon_begin_partial(4); |
| 48 | 48 | ||
| 49 | __asm__(" ld1 {v0.16b}, %[in] ;" | 49 | __asm__(" ld1 {v0.16b}, %[in] ;" |
| 50 | " ld1 {v1.2d}, [%[key]], #16 ;" | 50 | " ld1 {v1.16b}, [%[key]], #16 ;" |
| 51 | " cmp %w[rounds], #10 ;" | 51 | " cmp %w[rounds], #10 ;" |
| 52 | " bmi 0f ;" | 52 | " bmi 0f ;" |
| 53 | " bne 3f ;" | 53 | " bne 3f ;" |
| 54 | " mov v3.16b, v1.16b ;" | 54 | " mov v3.16b, v1.16b ;" |
| 55 | " b 2f ;" | 55 | " b 2f ;" |
| 56 | "0: mov v2.16b, v1.16b ;" | 56 | "0: mov v2.16b, v1.16b ;" |
| 57 | " ld1 {v3.2d}, [%[key]], #16 ;" | 57 | " ld1 {v3.16b}, [%[key]], #16 ;" |
| 58 | "1: aese v0.16b, v2.16b ;" | 58 | "1: aese v0.16b, v2.16b ;" |
| 59 | " aesmc v0.16b, v0.16b ;" | 59 | " aesmc v0.16b, v0.16b ;" |
| 60 | "2: ld1 {v1.2d}, [%[key]], #16 ;" | 60 | "2: ld1 {v1.16b}, [%[key]], #16 ;" |
| 61 | " aese v0.16b, v3.16b ;" | 61 | " aese v0.16b, v3.16b ;" |
| 62 | " aesmc v0.16b, v0.16b ;" | 62 | " aesmc v0.16b, v0.16b ;" |
| 63 | "3: ld1 {v2.2d}, [%[key]], #16 ;" | 63 | "3: ld1 {v2.16b}, [%[key]], #16 ;" |
| 64 | " subs %w[rounds], %w[rounds], #3 ;" | 64 | " subs %w[rounds], %w[rounds], #3 ;" |
| 65 | " aese v0.16b, v1.16b ;" | 65 | " aese v0.16b, v1.16b ;" |
| 66 | " aesmc v0.16b, v0.16b ;" | 66 | " aesmc v0.16b, v0.16b ;" |
| 67 | " ld1 {v3.2d}, [%[key]], #16 ;" | 67 | " ld1 {v3.16b}, [%[key]], #16 ;" |
| 68 | " bpl 1b ;" | 68 | " bpl 1b ;" |
| 69 | " aese v0.16b, v2.16b ;" | 69 | " aese v0.16b, v2.16b ;" |
| 70 | " eor v0.16b, v0.16b, v3.16b ;" | 70 | " eor v0.16b, v0.16b, v3.16b ;" |
| @@ -92,24 +92,24 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) | |||
| 92 | kernel_neon_begin_partial(4); | 92 | kernel_neon_begin_partial(4); |
| 93 | 93 | ||
| 94 | __asm__(" ld1 {v0.16b}, %[in] ;" | 94 | __asm__(" ld1 {v0.16b}, %[in] ;" |
| 95 | " ld1 {v1.2d}, [%[key]], #16 ;" | 95 | " ld1 {v1.16b}, [%[key]], #16 ;" |
| 96 | " cmp %w[rounds], #10 ;" | 96 | " cmp %w[rounds], #10 ;" |
| 97 | " bmi 0f ;" | 97 | " bmi 0f ;" |
| 98 | " bne 3f ;" | 98 | " bne 3f ;" |
| 99 | " mov v3.16b, v1.16b ;" | 99 | " mov v3.16b, v1.16b ;" |
| 100 | " b 2f ;" | 100 | " b 2f ;" |
| 101 | "0: mov v2.16b, v1.16b ;" | 101 | "0: mov v2.16b, v1.16b ;" |
| 102 | " ld1 {v3.2d}, [%[key]], #16 ;" | 102 | " ld1 {v3.16b}, [%[key]], #16 ;" |
| 103 | "1: aesd v0.16b, v2.16b ;" | 103 | "1: aesd v0.16b, v2.16b ;" |
| 104 | " aesimc v0.16b, v0.16b ;" | 104 | " aesimc v0.16b, v0.16b ;" |
| 105 | "2: ld1 {v1.2d}, [%[key]], #16 ;" | 105 | "2: ld1 {v1.16b}, [%[key]], #16 ;" |
| 106 | " aesd v0.16b, v3.16b ;" | 106 | " aesd v0.16b, v3.16b ;" |
| 107 | " aesimc v0.16b, v0.16b ;" | 107 | " aesimc v0.16b, v0.16b ;" |
| 108 | "3: ld1 {v2.2d}, [%[key]], #16 ;" | 108 | "3: ld1 {v2.16b}, [%[key]], #16 ;" |
| 109 | " subs %w[rounds], %w[rounds], #3 ;" | 109 | " subs %w[rounds], %w[rounds], #3 ;" |
| 110 | " aesd v0.16b, v1.16b ;" | 110 | " aesd v0.16b, v1.16b ;" |
| 111 | " aesimc v0.16b, v0.16b ;" | 111 | " aesimc v0.16b, v0.16b ;" |
| 112 | " ld1 {v3.2d}, [%[key]], #16 ;" | 112 | " ld1 {v3.16b}, [%[key]], #16 ;" |
| 113 | " bpl 1b ;" | 113 | " bpl 1b ;" |
| 114 | " aesd v0.16b, v2.16b ;" | 114 | " aesd v0.16b, v2.16b ;" |
| 115 | " eor v0.16b, v0.16b, v3.16b ;" | 115 | " eor v0.16b, v0.16b, v3.16b ;" |
| @@ -173,7 +173,12 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, | |||
| 173 | u32 *rki = ctx->key_enc + (i * kwords); | 173 | u32 *rki = ctx->key_enc + (i * kwords); |
| 174 | u32 *rko = rki + kwords; | 174 | u32 *rko = rki + kwords; |
| 175 | 175 | ||
| 176 | #ifndef CONFIG_CPU_BIG_ENDIAN | ||
| 176 | rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; | 177 | rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; |
| 178 | #else | ||
| 179 | rko[0] = rol32(aes_sub(rki[kwords - 1]), 8) ^ (rcon[i] << 24) ^ | ||
| 180 | rki[0]; | ||
| 181 | #endif | ||
| 177 | rko[1] = rko[0] ^ rki[1]; | 182 | rko[1] = rko[0] ^ rki[1]; |
| 178 | rko[2] = rko[1] ^ rki[2]; | 183 | rko[2] = rko[1] ^ rki[2]; |
| 179 | rko[3] = rko[2] ^ rki[3]; | 184 | rko[3] = rko[2] ^ rki[3]; |
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S index 78f3cfe92c08..b46093d567e5 100644 --- a/arch/arm64/crypto/aes-ce.S +++ b/arch/arm64/crypto/aes-ce.S | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
| 13 | #include <asm/assembler.h> | ||
| 13 | 14 | ||
| 14 | #define AES_ENTRY(func) ENTRY(ce_ ## func) | 15 | #define AES_ENTRY(func) ENTRY(ce_ ## func) |
| 15 | #define AES_ENDPROC(func) ENDPROC(ce_ ## func) | 16 | #define AES_ENDPROC(func) ENDPROC(ce_ ## func) |
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 6b2aa0fd6cd0..4e3f8adb1793 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c | |||
| @@ -11,8 +11,8 @@ | |||
| 11 | #include <asm/neon.h> | 11 | #include <asm/neon.h> |
| 12 | #include <asm/hwcap.h> | 12 | #include <asm/hwcap.h> |
| 13 | #include <crypto/aes.h> | 13 | #include <crypto/aes.h> |
| 14 | #include <crypto/ablk_helper.h> | 14 | #include <crypto/internal/simd.h> |
| 15 | #include <crypto/algapi.h> | 15 | #include <crypto/internal/skcipher.h> |
| 16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 17 | #include <linux/cpufeature.h> | 17 | #include <linux/cpufeature.h> |
| 18 | #include <crypto/xts.h> | 18 | #include <crypto/xts.h> |
| @@ -80,13 +80,19 @@ struct crypto_aes_xts_ctx { | |||
| 80 | struct crypto_aes_ctx __aligned(8) key2; | 80 | struct crypto_aes_ctx __aligned(8) key2; |
| 81 | }; | 81 | }; |
| 82 | 82 | ||
| 83 | static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, | 83 | static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, |
| 84 | unsigned int key_len) | ||
| 85 | { | ||
| 86 | return aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len); | ||
| 87 | } | ||
| 88 | |||
| 89 | static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key, | ||
| 84 | unsigned int key_len) | 90 | unsigned int key_len) |
| 85 | { | 91 | { |
| 86 | struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); | 92 | struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 87 | int ret; | 93 | int ret; |
| 88 | 94 | ||
| 89 | ret = xts_check_key(tfm, in_key, key_len); | 95 | ret = xts_verify_key(tfm, in_key, key_len); |
| 90 | if (ret) | 96 | if (ret) |
| 91 | return ret; | 97 | return ret; |
| 92 | 98 | ||
| @@ -97,111 +103,101 @@ static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
| 97 | if (!ret) | 103 | if (!ret) |
| 98 | return 0; | 104 | return 0; |
| 99 | 105 | ||
| 100 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 106 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 101 | return -EINVAL; | 107 | return -EINVAL; |
| 102 | } | 108 | } |
| 103 | 109 | ||
| 104 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 110 | static int ecb_encrypt(struct skcipher_request *req) |
| 105 | struct scatterlist *src, unsigned int nbytes) | ||
| 106 | { | 111 | { |
| 107 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 112 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 113 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 108 | int err, first, rounds = 6 + ctx->key_length / 4; | 114 | int err, first, rounds = 6 + ctx->key_length / 4; |
| 109 | struct blkcipher_walk walk; | 115 | struct skcipher_walk walk; |
| 110 | unsigned int blocks; | 116 | unsigned int blocks; |
| 111 | 117 | ||
| 112 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 118 | err = skcipher_walk_virt(&walk, req, true); |
| 113 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 114 | err = blkcipher_walk_virt(desc, &walk); | ||
| 115 | 119 | ||
| 116 | kernel_neon_begin(); | 120 | kernel_neon_begin(); |
| 117 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | 121 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { |
| 118 | aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 122 | aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 119 | (u8 *)ctx->key_enc, rounds, blocks, first); | 123 | (u8 *)ctx->key_enc, rounds, blocks, first); |
| 120 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | 124 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 121 | } | 125 | } |
| 122 | kernel_neon_end(); | 126 | kernel_neon_end(); |
| 123 | return err; | 127 | return err; |
| 124 | } | 128 | } |
| 125 | 129 | ||
| 126 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 130 | static int ecb_decrypt(struct skcipher_request *req) |
| 127 | struct scatterlist *src, unsigned int nbytes) | ||
| 128 | { | 131 | { |
| 129 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 132 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 133 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 130 | int err, first, rounds = 6 + ctx->key_length / 4; | 134 | int err, first, rounds = 6 + ctx->key_length / 4; |
| 131 | struct blkcipher_walk walk; | 135 | struct skcipher_walk walk; |
| 132 | unsigned int blocks; | 136 | unsigned int blocks; |
| 133 | 137 | ||
| 134 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 138 | err = skcipher_walk_virt(&walk, req, true); |
| 135 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 136 | err = blkcipher_walk_virt(desc, &walk); | ||
| 137 | 139 | ||
| 138 | kernel_neon_begin(); | 140 | kernel_neon_begin(); |
| 139 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | 141 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { |
| 140 | aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | 142 | aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 141 | (u8 *)ctx->key_dec, rounds, blocks, first); | 143 | (u8 *)ctx->key_dec, rounds, blocks, first); |
| 142 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | 144 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 143 | } | 145 | } |
| 144 | kernel_neon_end(); | 146 | kernel_neon_end(); |
| 145 | return err; | 147 | return err; |
| 146 | } | 148 | } |
| 147 | 149 | ||
| 148 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 150 | static int cbc_encrypt(struct skcipher_request *req) |
| 149 | struct scatterlist *src, unsigned int nbytes) | ||
| 150 | { | 151 | { |
| 151 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 152 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 153 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 152 | int err, first, rounds = 6 + ctx->key_length / 4; | 154 | int err, first, rounds = 6 + ctx->key_length / 4; |
| 153 | struct blkcipher_walk walk; | 155 | struct skcipher_walk walk; |
| 154 | unsigned int blocks; | 156 | unsigned int blocks; |
| 155 | 157 | ||
| 156 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 158 | err = skcipher_walk_virt(&walk, req, true); |
| 157 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 158 | err = blkcipher_walk_virt(desc, &walk); | ||
| 159 | 159 | ||
| 160 | kernel_neon_begin(); | 160 | kernel_neon_begin(); |
| 161 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | 161 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { |
| 162 | aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 162 | aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 163 | (u8 *)ctx->key_enc, rounds, blocks, walk.iv, | 163 | (u8 *)ctx->key_enc, rounds, blocks, walk.iv, |
| 164 | first); | 164 | first); |
| 165 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | 165 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 166 | } | 166 | } |
| 167 | kernel_neon_end(); | 167 | kernel_neon_end(); |
| 168 | return err; | 168 | return err; |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 171 | static int cbc_decrypt(struct skcipher_request *req) |
| 172 | struct scatterlist *src, unsigned int nbytes) | ||
| 173 | { | 172 | { |
| 174 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 173 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 174 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 175 | int err, first, rounds = 6 + ctx->key_length / 4; | 175 | int err, first, rounds = 6 + ctx->key_length / 4; |
| 176 | struct blkcipher_walk walk; | 176 | struct skcipher_walk walk; |
| 177 | unsigned int blocks; | 177 | unsigned int blocks; |
| 178 | 178 | ||
| 179 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 179 | err = skcipher_walk_virt(&walk, req, true); |
| 180 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 181 | err = blkcipher_walk_virt(desc, &walk); | ||
| 182 | 180 | ||
| 183 | kernel_neon_begin(); | 181 | kernel_neon_begin(); |
| 184 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | 182 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { |
| 185 | aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | 183 | aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 186 | (u8 *)ctx->key_dec, rounds, blocks, walk.iv, | 184 | (u8 *)ctx->key_dec, rounds, blocks, walk.iv, |
| 187 | first); | 185 | first); |
| 188 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | 186 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 189 | } | 187 | } |
| 190 | kernel_neon_end(); | 188 | kernel_neon_end(); |
| 191 | return err; | 189 | return err; |
| 192 | } | 190 | } |
| 193 | 191 | ||
| 194 | static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 192 | static int ctr_encrypt(struct skcipher_request *req) |
| 195 | struct scatterlist *src, unsigned int nbytes) | ||
| 196 | { | 193 | { |
| 197 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 194 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 195 | struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 198 | int err, first, rounds = 6 + ctx->key_length / 4; | 196 | int err, first, rounds = 6 + ctx->key_length / 4; |
| 199 | struct blkcipher_walk walk; | 197 | struct skcipher_walk walk; |
| 200 | int blocks; | 198 | int blocks; |
| 201 | 199 | ||
| 202 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 200 | err = skcipher_walk_virt(&walk, req, true); |
| 203 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 204 | err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); | ||
| 205 | 201 | ||
| 206 | first = 1; | 202 | first = 1; |
| 207 | kernel_neon_begin(); | 203 | kernel_neon_begin(); |
| @@ -209,17 +205,14 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
| 209 | aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 205 | aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 210 | (u8 *)ctx->key_enc, rounds, blocks, walk.iv, | 206 | (u8 *)ctx->key_enc, rounds, blocks, walk.iv, |
| 211 | first); | 207 | first); |
| 208 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); | ||
| 212 | first = 0; | 209 | first = 0; |
| 213 | nbytes -= blocks * AES_BLOCK_SIZE; | ||
| 214 | if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE) | ||
| 215 | break; | ||
| 216 | err = blkcipher_walk_done(desc, &walk, | ||
| 217 | walk.nbytes % AES_BLOCK_SIZE); | ||
| 218 | } | 210 | } |
| 219 | if (walk.nbytes % AES_BLOCK_SIZE) { | 211 | if (walk.nbytes) { |
| 220 | u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; | ||
| 221 | u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; | ||
| 222 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; | 212 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; |
| 213 | unsigned int nbytes = walk.nbytes; | ||
| 214 | u8 *tdst = walk.dst.virt.addr; | ||
| 215 | u8 *tsrc = walk.src.virt.addr; | ||
| 223 | 216 | ||
| 224 | /* | 217 | /* |
| 225 | * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need | 218 | * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need |
| @@ -230,227 +223,169 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
| 230 | aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds, | 223 | aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds, |
| 231 | blocks, walk.iv, first); | 224 | blocks, walk.iv, first); |
| 232 | memcpy(tdst, tail, nbytes); | 225 | memcpy(tdst, tail, nbytes); |
| 233 | err = blkcipher_walk_done(desc, &walk, 0); | 226 | err = skcipher_walk_done(&walk, 0); |
| 234 | } | 227 | } |
| 235 | kernel_neon_end(); | 228 | kernel_neon_end(); |
| 236 | 229 | ||
| 237 | return err; | 230 | return err; |
| 238 | } | 231 | } |
| 239 | 232 | ||
| 240 | static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 233 | static int xts_encrypt(struct skcipher_request *req) |
| 241 | struct scatterlist *src, unsigned int nbytes) | ||
| 242 | { | 234 | { |
| 243 | struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 235 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 236 | struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 244 | int err, first, rounds = 6 + ctx->key1.key_length / 4; | 237 | int err, first, rounds = 6 + ctx->key1.key_length / 4; |
| 245 | struct blkcipher_walk walk; | 238 | struct skcipher_walk walk; |
| 246 | unsigned int blocks; | 239 | unsigned int blocks; |
| 247 | 240 | ||
| 248 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 241 | err = skcipher_walk_virt(&walk, req, true); |
| 249 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 250 | err = blkcipher_walk_virt(desc, &walk); | ||
| 251 | 242 | ||
| 252 | kernel_neon_begin(); | 243 | kernel_neon_begin(); |
| 253 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | 244 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { |
| 254 | aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 245 | aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 255 | (u8 *)ctx->key1.key_enc, rounds, blocks, | 246 | (u8 *)ctx->key1.key_enc, rounds, blocks, |
| 256 | (u8 *)ctx->key2.key_enc, walk.iv, first); | 247 | (u8 *)ctx->key2.key_enc, walk.iv, first); |
| 257 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | 248 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 258 | } | 249 | } |
| 259 | kernel_neon_end(); | 250 | kernel_neon_end(); |
| 260 | 251 | ||
| 261 | return err; | 252 | return err; |
| 262 | } | 253 | } |
| 263 | 254 | ||
| 264 | static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 255 | static int xts_decrypt(struct skcipher_request *req) |
| 265 | struct scatterlist *src, unsigned int nbytes) | ||
| 266 | { | 256 | { |
| 267 | struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 257 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 258 | struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 268 | int err, first, rounds = 6 + ctx->key1.key_length / 4; | 259 | int err, first, rounds = 6 + ctx->key1.key_length / 4; |
| 269 | struct blkcipher_walk walk; | 260 | struct skcipher_walk walk; |
| 270 | unsigned int blocks; | 261 | unsigned int blocks; |
| 271 | 262 | ||
| 272 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | 263 | err = skcipher_walk_virt(&walk, req, true); |
| 273 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 274 | err = blkcipher_walk_virt(desc, &walk); | ||
| 275 | 264 | ||
| 276 | kernel_neon_begin(); | 265 | kernel_neon_begin(); |
| 277 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | 266 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { |
| 278 | aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | 267 | aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, |
| 279 | (u8 *)ctx->key1.key_dec, rounds, blocks, | 268 | (u8 *)ctx->key1.key_dec, rounds, blocks, |
| 280 | (u8 *)ctx->key2.key_enc, walk.iv, first); | 269 | (u8 *)ctx->key2.key_enc, walk.iv, first); |
| 281 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); | 270 | err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); |
| 282 | } | 271 | } |
| 283 | kernel_neon_end(); | 272 | kernel_neon_end(); |
| 284 | 273 | ||
| 285 | return err; | 274 | return err; |
| 286 | } | 275 | } |
| 287 | 276 | ||
| 288 | static struct crypto_alg aes_algs[] = { { | 277 | static struct skcipher_alg aes_algs[] = { { |
| 289 | .cra_name = "__ecb-aes-" MODE, | 278 | .base = { |
| 290 | .cra_driver_name = "__driver-ecb-aes-" MODE, | 279 | .cra_name = "__ecb(aes)", |
| 291 | .cra_priority = 0, | 280 | .cra_driver_name = "__ecb-aes-" MODE, |
| 292 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 281 | .cra_priority = PRIO, |
| 293 | CRYPTO_ALG_INTERNAL, | 282 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 294 | .cra_blocksize = AES_BLOCK_SIZE, | 283 | .cra_blocksize = AES_BLOCK_SIZE, |
| 295 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | 284 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
| 296 | .cra_alignmask = 7, | 285 | .cra_alignmask = 7, |
| 297 | .cra_type = &crypto_blkcipher_type, | 286 | .cra_module = THIS_MODULE, |
| 298 | .cra_module = THIS_MODULE, | ||
| 299 | .cra_blkcipher = { | ||
| 300 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 301 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 302 | .ivsize = 0, | ||
| 303 | .setkey = aes_setkey, | ||
| 304 | .encrypt = ecb_encrypt, | ||
| 305 | .decrypt = ecb_decrypt, | ||
| 306 | }, | 287 | }, |
| 288 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 289 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 290 | .setkey = skcipher_aes_setkey, | ||
| 291 | .encrypt = ecb_encrypt, | ||
| 292 | .decrypt = ecb_decrypt, | ||
| 307 | }, { | 293 | }, { |
| 308 | .cra_name = "__cbc-aes-" MODE, | 294 | .base = { |
| 309 | .cra_driver_name = "__driver-cbc-aes-" MODE, | 295 | .cra_name = "__cbc(aes)", |
| 310 | .cra_priority = 0, | 296 | .cra_driver_name = "__cbc-aes-" MODE, |
| 311 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 297 | .cra_priority = PRIO, |
| 312 | CRYPTO_ALG_INTERNAL, | 298 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 313 | .cra_blocksize = AES_BLOCK_SIZE, | 299 | .cra_blocksize = AES_BLOCK_SIZE, |
| 314 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | 300 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
| 315 | .cra_alignmask = 7, | 301 | .cra_alignmask = 7, |
| 316 | .cra_type = &crypto_blkcipher_type, | 302 | .cra_module = THIS_MODULE, |
| 317 | .cra_module = THIS_MODULE, | ||
| 318 | .cra_blkcipher = { | ||
| 319 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 320 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 321 | .ivsize = AES_BLOCK_SIZE, | ||
| 322 | .setkey = aes_setkey, | ||
| 323 | .encrypt = cbc_encrypt, | ||
| 324 | .decrypt = cbc_decrypt, | ||
| 325 | }, | 303 | }, |
| 304 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 305 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 306 | .ivsize = AES_BLOCK_SIZE, | ||
| 307 | .setkey = skcipher_aes_setkey, | ||
| 308 | .encrypt = cbc_encrypt, | ||
| 309 | .decrypt = cbc_decrypt, | ||
| 326 | }, { | 310 | }, { |
| 327 | .cra_name = "__ctr-aes-" MODE, | 311 | .base = { |
| 328 | .cra_driver_name = "__driver-ctr-aes-" MODE, | 312 | .cra_name = "__ctr(aes)", |
| 329 | .cra_priority = 0, | 313 | .cra_driver_name = "__ctr-aes-" MODE, |
| 330 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 314 | .cra_priority = PRIO, |
| 331 | CRYPTO_ALG_INTERNAL, | 315 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 332 | .cra_blocksize = 1, | 316 | .cra_blocksize = 1, |
| 333 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | 317 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
| 334 | .cra_alignmask = 7, | 318 | .cra_alignmask = 7, |
| 335 | .cra_type = &crypto_blkcipher_type, | 319 | .cra_module = THIS_MODULE, |
| 336 | .cra_module = THIS_MODULE, | ||
| 337 | .cra_blkcipher = { | ||
| 338 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 339 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 340 | .ivsize = AES_BLOCK_SIZE, | ||
| 341 | .setkey = aes_setkey, | ||
| 342 | .encrypt = ctr_encrypt, | ||
| 343 | .decrypt = ctr_encrypt, | ||
| 344 | }, | 320 | }, |
| 321 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 322 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 323 | .ivsize = AES_BLOCK_SIZE, | ||
| 324 | .chunksize = AES_BLOCK_SIZE, | ||
| 325 | .setkey = skcipher_aes_setkey, | ||
| 326 | .encrypt = ctr_encrypt, | ||
| 327 | .decrypt = ctr_encrypt, | ||
| 345 | }, { | 328 | }, { |
| 346 | .cra_name = "__xts-aes-" MODE, | 329 | .base = { |
| 347 | .cra_driver_name = "__driver-xts-aes-" MODE, | 330 | .cra_name = "__xts(aes)", |
| 348 | .cra_priority = 0, | 331 | .cra_driver_name = "__xts-aes-" MODE, |
| 349 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 332 | .cra_priority = PRIO, |
| 350 | CRYPTO_ALG_INTERNAL, | 333 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 351 | .cra_blocksize = AES_BLOCK_SIZE, | 334 | .cra_blocksize = AES_BLOCK_SIZE, |
| 352 | .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), | 335 | .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), |
| 353 | .cra_alignmask = 7, | 336 | .cra_alignmask = 7, |
| 354 | .cra_type = &crypto_blkcipher_type, | 337 | .cra_module = THIS_MODULE, |
| 355 | .cra_module = THIS_MODULE, | ||
| 356 | .cra_blkcipher = { | ||
| 357 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
| 358 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
| 359 | .ivsize = AES_BLOCK_SIZE, | ||
| 360 | .setkey = xts_set_key, | ||
| 361 | .encrypt = xts_encrypt, | ||
| 362 | .decrypt = xts_decrypt, | ||
| 363 | }, | 338 | }, |
| 364 | }, { | 339 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
| 365 | .cra_name = "ecb(aes)", | 340 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
| 366 | .cra_driver_name = "ecb-aes-" MODE, | 341 | .ivsize = AES_BLOCK_SIZE, |
| 367 | .cra_priority = PRIO, | 342 | .setkey = xts_set_key, |
| 368 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | 343 | .encrypt = xts_encrypt, |
| 369 | .cra_blocksize = AES_BLOCK_SIZE, | 344 | .decrypt = xts_decrypt, |
| 370 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 371 | .cra_alignmask = 7, | ||
| 372 | .cra_type = &crypto_ablkcipher_type, | ||
| 373 | .cra_module = THIS_MODULE, | ||
| 374 | .cra_init = ablk_init, | ||
| 375 | .cra_exit = ablk_exit, | ||
| 376 | .cra_ablkcipher = { | ||
| 377 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 378 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 379 | .ivsize = 0, | ||
| 380 | .setkey = ablk_set_key, | ||
| 381 | .encrypt = ablk_encrypt, | ||
| 382 | .decrypt = ablk_decrypt, | ||
| 383 | } | ||
| 384 | }, { | ||
| 385 | .cra_name = "cbc(aes)", | ||
| 386 | .cra_driver_name = "cbc-aes-" MODE, | ||
| 387 | .cra_priority = PRIO, | ||
| 388 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
| 389 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 390 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 391 | .cra_alignmask = 7, | ||
| 392 | .cra_type = &crypto_ablkcipher_type, | ||
| 393 | .cra_module = THIS_MODULE, | ||
| 394 | .cra_init = ablk_init, | ||
| 395 | .cra_exit = ablk_exit, | ||
| 396 | .cra_ablkcipher = { | ||
| 397 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 398 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 399 | .ivsize = AES_BLOCK_SIZE, | ||
| 400 | .setkey = ablk_set_key, | ||
| 401 | .encrypt = ablk_encrypt, | ||
| 402 | .decrypt = ablk_decrypt, | ||
| 403 | } | ||
| 404 | }, { | ||
| 405 | .cra_name = "ctr(aes)", | ||
| 406 | .cra_driver_name = "ctr-aes-" MODE, | ||
| 407 | .cra_priority = PRIO, | ||
| 408 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
| 409 | .cra_blocksize = 1, | ||
| 410 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 411 | .cra_alignmask = 7, | ||
| 412 | .cra_type = &crypto_ablkcipher_type, | ||
| 413 | .cra_module = THIS_MODULE, | ||
| 414 | .cra_init = ablk_init, | ||
| 415 | .cra_exit = ablk_exit, | ||
| 416 | .cra_ablkcipher = { | ||
| 417 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 418 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 419 | .ivsize = AES_BLOCK_SIZE, | ||
| 420 | .setkey = ablk_set_key, | ||
| 421 | .encrypt = ablk_encrypt, | ||
| 422 | .decrypt = ablk_decrypt, | ||
| 423 | } | ||
| 424 | }, { | ||
| 425 | .cra_name = "xts(aes)", | ||
| 426 | .cra_driver_name = "xts-aes-" MODE, | ||
| 427 | .cra_priority = PRIO, | ||
| 428 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
| 429 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 430 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 431 | .cra_alignmask = 7, | ||
| 432 | .cra_type = &crypto_ablkcipher_type, | ||
| 433 | .cra_module = THIS_MODULE, | ||
| 434 | .cra_init = ablk_init, | ||
| 435 | .cra_exit = ablk_exit, | ||
| 436 | .cra_ablkcipher = { | ||
| 437 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
| 438 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
| 439 | .ivsize = AES_BLOCK_SIZE, | ||
| 440 | .setkey = ablk_set_key, | ||
| 441 | .encrypt = ablk_encrypt, | ||
| 442 | .decrypt = ablk_decrypt, | ||
| 443 | } | ||
| 444 | } }; | 345 | } }; |
| 445 | 346 | ||
| 446 | static int __init aes_init(void) | 347 | static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; |
| 348 | |||
| 349 | static void aes_exit(void) | ||
| 447 | { | 350 | { |
| 448 | return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs)); | 351 | int i; |
| 352 | |||
| 353 | for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++) | ||
| 354 | simd_skcipher_free(aes_simd_algs[i]); | ||
| 355 | |||
| 356 | crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); | ||
| 449 | } | 357 | } |
| 450 | 358 | ||
| 451 | static void __exit aes_exit(void) | 359 | static int __init aes_init(void) |
| 452 | { | 360 | { |
| 453 | crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs)); | 361 | struct simd_skcipher_alg *simd; |
| 362 | const char *basename; | ||
| 363 | const char *algname; | ||
| 364 | const char *drvname; | ||
| 365 | int err; | ||
| 366 | int i; | ||
| 367 | |||
| 368 | err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); | ||
| 369 | if (err) | ||
| 370 | return err; | ||
| 371 | |||
| 372 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { | ||
| 373 | algname = aes_algs[i].base.cra_name + 2; | ||
| 374 | drvname = aes_algs[i].base.cra_driver_name + 2; | ||
| 375 | basename = aes_algs[i].base.cra_driver_name; | ||
| 376 | simd = simd_skcipher_create_compat(algname, drvname, basename); | ||
| 377 | err = PTR_ERR(simd); | ||
| 378 | if (IS_ERR(simd)) | ||
| 379 | goto unregister_simds; | ||
| 380 | |||
| 381 | aes_simd_algs[i] = simd; | ||
| 382 | } | ||
| 383 | |||
| 384 | return 0; | ||
| 385 | |||
| 386 | unregister_simds: | ||
| 387 | aes_exit(); | ||
| 388 | return err; | ||
| 454 | } | 389 | } |
| 455 | 390 | ||
| 456 | #ifdef USE_V8_CRYPTO_EXTENSIONS | 391 | #ifdef USE_V8_CRYPTO_EXTENSIONS |
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index f6e372c528eb..c53dbeae79f2 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S | |||
| @@ -386,7 +386,8 @@ AES_ENDPROC(aes_ctr_encrypt) | |||
| 386 | .endm | 386 | .endm |
| 387 | 387 | ||
| 388 | .Lxts_mul_x: | 388 | .Lxts_mul_x: |
| 389 | .word 1, 0, 0x87, 0 | 389 | CPU_LE( .quad 1, 0x87 ) |
| 390 | CPU_BE( .quad 0x87, 1 ) | ||
| 390 | 391 | ||
| 391 | AES_ENTRY(aes_xts_encrypt) | 392 | AES_ENTRY(aes_xts_encrypt) |
| 392 | FRAME_PUSH | 393 | FRAME_PUSH |
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S index b93170e1cc93..85f07ead7c5c 100644 --- a/arch/arm64/crypto/aes-neon.S +++ b/arch/arm64/crypto/aes-neon.S | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
| 12 | #include <asm/assembler.h> | ||
| 12 | 13 | ||
| 13 | #define AES_ENTRY(func) ENTRY(neon_ ## func) | 14 | #define AES_ENTRY(func) ENTRY(neon_ ## func) |
| 14 | #define AES_ENDPROC(func) ENDPROC(neon_ ## func) | 15 | #define AES_ENDPROC(func) ENDPROC(neon_ ## func) |
| @@ -83,13 +84,13 @@ | |||
| 83 | .endm | 84 | .endm |
| 84 | 85 | ||
| 85 | .macro do_block, enc, in, rounds, rk, rkp, i | 86 | .macro do_block, enc, in, rounds, rk, rkp, i |
| 86 | ld1 {v15.16b}, [\rk] | 87 | ld1 {v15.4s}, [\rk] |
| 87 | add \rkp, \rk, #16 | 88 | add \rkp, \rk, #16 |
| 88 | mov \i, \rounds | 89 | mov \i, \rounds |
| 89 | 1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */ | 90 | 1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */ |
| 90 | tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */ | 91 | tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */ |
| 91 | sub_bytes \in | 92 | sub_bytes \in |
| 92 | ld1 {v15.16b}, [\rkp], #16 | 93 | ld1 {v15.4s}, [\rkp], #16 |
| 93 | subs \i, \i, #1 | 94 | subs \i, \i, #1 |
| 94 | beq 2222f | 95 | beq 2222f |
| 95 | .if \enc == 1 | 96 | .if \enc == 1 |
| @@ -229,7 +230,7 @@ | |||
| 229 | .endm | 230 | .endm |
| 230 | 231 | ||
| 231 | .macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i | 232 | .macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i |
| 232 | ld1 {v15.16b}, [\rk] | 233 | ld1 {v15.4s}, [\rk] |
| 233 | add \rkp, \rk, #16 | 234 | add \rkp, \rk, #16 |
| 234 | mov \i, \rounds | 235 | mov \i, \rounds |
| 235 | 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ | 236 | 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ |
| @@ -237,7 +238,7 @@ | |||
| 237 | sub_bytes_2x \in0, \in1 | 238 | sub_bytes_2x \in0, \in1 |
| 238 | tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ | 239 | tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ |
| 239 | tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ | 240 | tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ |
| 240 | ld1 {v15.16b}, [\rkp], #16 | 241 | ld1 {v15.4s}, [\rkp], #16 |
| 241 | subs \i, \i, #1 | 242 | subs \i, \i, #1 |
| 242 | beq 2222f | 243 | beq 2222f |
| 243 | .if \enc == 1 | 244 | .if \enc == 1 |
| @@ -254,7 +255,7 @@ | |||
| 254 | .endm | 255 | .endm |
| 255 | 256 | ||
| 256 | .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i | 257 | .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i |
| 257 | ld1 {v15.16b}, [\rk] | 258 | ld1 {v15.4s}, [\rk] |
| 258 | add \rkp, \rk, #16 | 259 | add \rkp, \rk, #16 |
| 259 | mov \i, \rounds | 260 | mov \i, \rounds |
| 260 | 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ | 261 | 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ |
| @@ -266,7 +267,7 @@ | |||
| 266 | tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ | 267 | tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ |
| 267 | tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */ | 268 | tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */ |
| 268 | tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */ | 269 | tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */ |
| 269 | ld1 {v15.16b}, [\rkp], #16 | 270 | ld1 {v15.4s}, [\rkp], #16 |
| 270 | subs \i, \i, #1 | 271 | subs \i, \i, #1 |
| 271 | beq 2222f | 272 | beq 2222f |
| 272 | .if \enc == 1 | 273 | .if \enc == 1 |
| @@ -306,12 +307,16 @@ | |||
| 306 | .text | 307 | .text |
| 307 | .align 4 | 308 | .align 4 |
| 308 | .LForward_ShiftRows: | 309 | .LForward_ShiftRows: |
| 309 | .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3 | 310 | CPU_LE( .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3 ) |
| 310 | .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb | 311 | CPU_LE( .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb ) |
| 312 | CPU_BE( .byte 0xb, 0x6, 0x1, 0xc, 0x7, 0x2, 0xd, 0x8 ) | ||
| 313 | CPU_BE( .byte 0x3, 0xe, 0x9, 0x4, 0xf, 0xa, 0x5, 0x0 ) | ||
| 311 | 314 | ||
| 312 | .LReverse_ShiftRows: | 315 | .LReverse_ShiftRows: |
| 313 | .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb | 316 | CPU_LE( .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb ) |
| 314 | .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3 | 317 | CPU_LE( .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3 ) |
| 318 | CPU_BE( .byte 0x3, 0x6, 0x9, 0xc, 0xf, 0x2, 0x5, 0x8 ) | ||
| 319 | CPU_BE( .byte 0xb, 0xe, 0x1, 0x4, 0x7, 0xa, 0xd, 0x0 ) | ||
| 315 | 320 | ||
| 316 | .LForward_Sbox: | 321 | .LForward_Sbox: |
| 317 | .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 | 322 | .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 |
diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S new file mode 100644 index 000000000000..18f5a8442276 --- /dev/null +++ b/arch/arm64/crypto/crc32-ce-core.S | |||
| @@ -0,0 +1,266 @@ | |||
| 1 | /* | ||
| 2 | * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | /* GPL HEADER START | ||
| 12 | * | ||
| 13 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | ||
| 14 | * | ||
| 15 | * This program is free software; you can redistribute it and/or modify | ||
| 16 | * it under the terms of the GNU General Public License version 2 only, | ||
| 17 | * as published by the Free Software Foundation. | ||
| 18 | * | ||
| 19 | * This program is distributed in the hope that it will be useful, but | ||
| 20 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 22 | * General Public License version 2 for more details (a copy is included | ||
| 23 | * in the LICENSE file that accompanied this code). | ||
| 24 | * | ||
| 25 | * You should have received a copy of the GNU General Public License | ||
| 26 | * version 2 along with this program; If not, see http://www.gnu.org/licenses | ||
| 27 | * | ||
| 28 | * Please visit http://www.xyratex.com/contact if you need additional | ||
| 29 | * information or have any questions. | ||
| 30 | * | ||
| 31 | * GPL HEADER END | ||
| 32 | */ | ||
| 33 | |||
| 34 | /* | ||
| 35 | * Copyright 2012 Xyratex Technology Limited | ||
| 36 | * | ||
| 37 | * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32 | ||
| 38 | * calculation. | ||
| 39 | * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE) | ||
| 40 | * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found | ||
| 41 | * at: | ||
| 42 | * http://www.intel.com/products/processor/manuals/ | ||
| 43 | * Intel(R) 64 and IA-32 Architectures Software Developer's Manual | ||
| 44 | * Volume 2B: Instruction Set Reference, N-Z | ||
| 45 | * | ||
| 46 | * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com> | ||
| 47 | * Alexander Boyko <Alexander_Boyko@xyratex.com> | ||
| 48 | */ | ||
| 49 | |||
| 50 | #include <linux/linkage.h> | ||
| 51 | #include <asm/assembler.h> | ||
| 52 | |||
| 53 | .text | ||
| 54 | .align 6 | ||
| 55 | .cpu generic+crypto+crc | ||
| 56 | |||
| 57 | .Lcrc32_constants: | ||
| 58 | /* | ||
| 59 | * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 | ||
| 60 | * #define CONSTANT_R1 0x154442bd4LL | ||
| 61 | * | ||
| 62 | * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596 | ||
| 63 | * #define CONSTANT_R2 0x1c6e41596LL | ||
| 64 | */ | ||
| 65 | .octa 0x00000001c6e415960000000154442bd4 | ||
| 66 | |||
| 67 | /* | ||
| 68 | * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0 | ||
| 69 | * #define CONSTANT_R3 0x1751997d0LL | ||
| 70 | * | ||
| 71 | * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e | ||
| 72 | * #define CONSTANT_R4 0x0ccaa009eLL | ||
| 73 | */ | ||
| 74 | .octa 0x00000000ccaa009e00000001751997d0 | ||
| 75 | |||
| 76 | /* | ||
| 77 | * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124 | ||
| 78 | * #define CONSTANT_R5 0x163cd6124LL | ||
| 79 | */ | ||
| 80 | .quad 0x0000000163cd6124 | ||
| 81 | .quad 0x00000000FFFFFFFF | ||
| 82 | |||
| 83 | /* | ||
| 84 | * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL | ||
| 85 | * | ||
| 86 | * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` | ||
| 87 | * = 0x1F7011641LL | ||
| 88 | * #define CONSTANT_RU 0x1F7011641LL | ||
| 89 | */ | ||
| 90 | .octa 0x00000001F701164100000001DB710641 | ||
| 91 | |||
| 92 | .Lcrc32c_constants: | ||
| 93 | .octa 0x000000009e4addf800000000740eef02 | ||
| 94 | .octa 0x000000014cd00bd600000000f20c0dfe | ||
| 95 | .quad 0x00000000dd45aab8 | ||
| 96 | .quad 0x00000000FFFFFFFF | ||
| 97 | .octa 0x00000000dea713f10000000105ec76f0 | ||
| 98 | |||
| 99 | vCONSTANT .req v0 | ||
| 100 | dCONSTANT .req d0 | ||
| 101 | qCONSTANT .req q0 | ||
| 102 | |||
| 103 | BUF .req x0 | ||
| 104 | LEN .req x1 | ||
| 105 | CRC .req x2 | ||
| 106 | |||
| 107 | vzr .req v9 | ||
| 108 | |||
| 109 | /** | ||
| 110 | * Calculate crc32 | ||
| 111 | * BUF - buffer | ||
| 112 | * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63 | ||
| 113 | * CRC - initial crc32 | ||
| 114 | * return %eax crc32 | ||
| 115 | * uint crc32_pmull_le(unsigned char const *buffer, | ||
| 116 | * size_t len, uint crc32) | ||
| 117 | */ | ||
| 118 | ENTRY(crc32_pmull_le) | ||
| 119 | adr x3, .Lcrc32_constants | ||
| 120 | b 0f | ||
| 121 | |||
| 122 | ENTRY(crc32c_pmull_le) | ||
| 123 | adr x3, .Lcrc32c_constants | ||
| 124 | |||
| 125 | 0: bic LEN, LEN, #15 | ||
| 126 | ld1 {v1.16b-v4.16b}, [BUF], #0x40 | ||
| 127 | movi vzr.16b, #0 | ||
| 128 | fmov dCONSTANT, CRC | ||
| 129 | eor v1.16b, v1.16b, vCONSTANT.16b | ||
| 130 | sub LEN, LEN, #0x40 | ||
| 131 | cmp LEN, #0x40 | ||
| 132 | b.lt less_64 | ||
| 133 | |||
| 134 | ldr qCONSTANT, [x3] | ||
| 135 | |||
| 136 | loop_64: /* 64 bytes Full cache line folding */ | ||
| 137 | sub LEN, LEN, #0x40 | ||
| 138 | |||
| 139 | pmull2 v5.1q, v1.2d, vCONSTANT.2d | ||
| 140 | pmull2 v6.1q, v2.2d, vCONSTANT.2d | ||
| 141 | pmull2 v7.1q, v3.2d, vCONSTANT.2d | ||
| 142 | pmull2 v8.1q, v4.2d, vCONSTANT.2d | ||
| 143 | |||
| 144 | pmull v1.1q, v1.1d, vCONSTANT.1d | ||
| 145 | pmull v2.1q, v2.1d, vCONSTANT.1d | ||
| 146 | pmull v3.1q, v3.1d, vCONSTANT.1d | ||
| 147 | pmull v4.1q, v4.1d, vCONSTANT.1d | ||
| 148 | |||
| 149 | eor v1.16b, v1.16b, v5.16b | ||
| 150 | ld1 {v5.16b}, [BUF], #0x10 | ||
| 151 | eor v2.16b, v2.16b, v6.16b | ||
| 152 | ld1 {v6.16b}, [BUF], #0x10 | ||
| 153 | eor v3.16b, v3.16b, v7.16b | ||
| 154 | ld1 {v7.16b}, [BUF], #0x10 | ||
| 155 | eor v4.16b, v4.16b, v8.16b | ||
| 156 | ld1 {v8.16b}, [BUF], #0x10 | ||
| 157 | |||
| 158 | eor v1.16b, v1.16b, v5.16b | ||
| 159 | eor v2.16b, v2.16b, v6.16b | ||
| 160 | eor v3.16b, v3.16b, v7.16b | ||
| 161 | eor v4.16b, v4.16b, v8.16b | ||
| 162 | |||
| 163 | cmp LEN, #0x40 | ||
| 164 | b.ge loop_64 | ||
| 165 | |||
| 166 | less_64: /* Folding cache line into 128bit */ | ||
| 167 | ldr qCONSTANT, [x3, #16] | ||
| 168 | |||
| 169 | pmull2 v5.1q, v1.2d, vCONSTANT.2d | ||
| 170 | pmull v1.1q, v1.1d, vCONSTANT.1d | ||
| 171 | eor v1.16b, v1.16b, v5.16b | ||
| 172 | eor v1.16b, v1.16b, v2.16b | ||
| 173 | |||
| 174 | pmull2 v5.1q, v1.2d, vCONSTANT.2d | ||
| 175 | pmull v1.1q, v1.1d, vCONSTANT.1d | ||
| 176 | eor v1.16b, v1.16b, v5.16b | ||
| 177 | eor v1.16b, v1.16b, v3.16b | ||
| 178 | |||
| 179 | pmull2 v5.1q, v1.2d, vCONSTANT.2d | ||
| 180 | pmull v1.1q, v1.1d, vCONSTANT.1d | ||
| 181 | eor v1.16b, v1.16b, v5.16b | ||
| 182 | eor v1.16b, v1.16b, v4.16b | ||
| 183 | |||
| 184 | cbz LEN, fold_64 | ||
| 185 | |||
| 186 | loop_16: /* Folding rest buffer into 128bit */ | ||
| 187 | subs LEN, LEN, #0x10 | ||
| 188 | |||
| 189 | ld1 {v2.16b}, [BUF], #0x10 | ||
| 190 | pmull2 v5.1q, v1.2d, vCONSTANT.2d | ||
| 191 | pmull v1.1q, v1.1d, vCONSTANT.1d | ||
| 192 | eor v1.16b, v1.16b, v5.16b | ||
| 193 | eor v1.16b, v1.16b, v2.16b | ||
| 194 | |||
| 195 | b.ne loop_16 | ||
| 196 | |||
| 197 | fold_64: | ||
| 198 | /* perform the last 64 bit fold, also adds 32 zeroes | ||
| 199 | * to the input stream */ | ||
| 200 | ext v2.16b, v1.16b, v1.16b, #8 | ||
| 201 | pmull2 v2.1q, v2.2d, vCONSTANT.2d | ||
| 202 | ext v1.16b, v1.16b, vzr.16b, #8 | ||
| 203 | eor v1.16b, v1.16b, v2.16b | ||
| 204 | |||
| 205 | /* final 32-bit fold */ | ||
| 206 | ldr dCONSTANT, [x3, #32] | ||
| 207 | ldr d3, [x3, #40] | ||
| 208 | |||
| 209 | ext v2.16b, v1.16b, vzr.16b, #4 | ||
| 210 | and v1.16b, v1.16b, v3.16b | ||
| 211 | pmull v1.1q, v1.1d, vCONSTANT.1d | ||
| 212 | eor v1.16b, v1.16b, v2.16b | ||
| 213 | |||
| 214 | /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ | ||
| 215 | ldr qCONSTANT, [x3, #48] | ||
| 216 | |||
| 217 | and v2.16b, v1.16b, v3.16b | ||
| 218 | ext v2.16b, vzr.16b, v2.16b, #8 | ||
| 219 | pmull2 v2.1q, v2.2d, vCONSTANT.2d | ||
| 220 | and v2.16b, v2.16b, v3.16b | ||
| 221 | pmull v2.1q, v2.1d, vCONSTANT.1d | ||
| 222 | eor v1.16b, v1.16b, v2.16b | ||
| 223 | mov w0, v1.s[1] | ||
| 224 | |||
| 225 | ret | ||
| 226 | ENDPROC(crc32_pmull_le) | ||
| 227 | ENDPROC(crc32c_pmull_le) | ||
| 228 | |||
| 229 | .macro __crc32, c | ||
| 230 | 0: subs x2, x2, #16 | ||
| 231 | b.mi 8f | ||
| 232 | ldp x3, x4, [x1], #16 | ||
| 233 | CPU_BE( rev x3, x3 ) | ||
| 234 | CPU_BE( rev x4, x4 ) | ||
| 235 | crc32\c\()x w0, w0, x3 | ||
| 236 | crc32\c\()x w0, w0, x4 | ||
| 237 | b.ne 0b | ||
| 238 | ret | ||
| 239 | |||
| 240 | 8: tbz x2, #3, 4f | ||
| 241 | ldr x3, [x1], #8 | ||
| 242 | CPU_BE( rev x3, x3 ) | ||
| 243 | crc32\c\()x w0, w0, x3 | ||
| 244 | 4: tbz x2, #2, 2f | ||
| 245 | ldr w3, [x1], #4 | ||
| 246 | CPU_BE( rev w3, w3 ) | ||
| 247 | crc32\c\()w w0, w0, w3 | ||
| 248 | 2: tbz x2, #1, 1f | ||
| 249 | ldrh w3, [x1], #2 | ||
| 250 | CPU_BE( rev16 w3, w3 ) | ||
| 251 | crc32\c\()h w0, w0, w3 | ||
| 252 | 1: tbz x2, #0, 0f | ||
| 253 | ldrb w3, [x1] | ||
| 254 | crc32\c\()b w0, w0, w3 | ||
| 255 | 0: ret | ||
| 256 | .endm | ||
| 257 | |||
| 258 | .align 5 | ||
| 259 | ENTRY(crc32_armv8_le) | ||
| 260 | __crc32 | ||
| 261 | ENDPROC(crc32_armv8_le) | ||
| 262 | |||
| 263 | .align 5 | ||
| 264 | ENTRY(crc32c_armv8_le) | ||
| 265 | __crc32 c | ||
| 266 | ENDPROC(crc32c_armv8_le) | ||
diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c new file mode 100644 index 000000000000..8594127d5e01 --- /dev/null +++ b/arch/arm64/crypto/crc32-ce-glue.c | |||
| @@ -0,0 +1,212 @@ | |||
| 1 | /* | ||
| 2 | * Accelerated CRC32(C) using arm64 NEON and Crypto Extensions instructions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/cpufeature.h> | ||
| 12 | #include <linux/crc32.h> | ||
| 13 | #include <linux/init.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/string.h> | ||
| 17 | |||
| 18 | #include <crypto/internal/hash.h> | ||
| 19 | |||
| 20 | #include <asm/hwcap.h> | ||
| 21 | #include <asm/neon.h> | ||
| 22 | #include <asm/unaligned.h> | ||
| 23 | |||
| 24 | #define PMULL_MIN_LEN 64L /* minimum size of buffer | ||
| 25 | * for crc32_pmull_le_16 */ | ||
| 26 | #define SCALE_F 16L /* size of NEON register */ | ||
| 27 | |||
| 28 | asmlinkage u32 crc32_pmull_le(const u8 buf[], u64 len, u32 init_crc); | ||
| 29 | asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], size_t len); | ||
| 30 | |||
| 31 | asmlinkage u32 crc32c_pmull_le(const u8 buf[], u64 len, u32 init_crc); | ||
| 32 | asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], size_t len); | ||
| 33 | |||
| 34 | static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], size_t len); | ||
| 35 | static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], size_t len); | ||
| 36 | |||
| 37 | static int crc32_pmull_cra_init(struct crypto_tfm *tfm) | ||
| 38 | { | ||
| 39 | u32 *key = crypto_tfm_ctx(tfm); | ||
| 40 | |||
| 41 | *key = 0; | ||
| 42 | return 0; | ||
| 43 | } | ||
| 44 | |||
| 45 | static int crc32c_pmull_cra_init(struct crypto_tfm *tfm) | ||
| 46 | { | ||
| 47 | u32 *key = crypto_tfm_ctx(tfm); | ||
| 48 | |||
| 49 | *key = ~0; | ||
| 50 | return 0; | ||
| 51 | } | ||
| 52 | |||
| 53 | static int crc32_pmull_setkey(struct crypto_shash *hash, const u8 *key, | ||
| 54 | unsigned int keylen) | ||
| 55 | { | ||
| 56 | u32 *mctx = crypto_shash_ctx(hash); | ||
| 57 | |||
| 58 | if (keylen != sizeof(u32)) { | ||
| 59 | crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 60 | return -EINVAL; | ||
| 61 | } | ||
| 62 | *mctx = le32_to_cpup((__le32 *)key); | ||
| 63 | return 0; | ||
| 64 | } | ||
| 65 | |||
| 66 | static int crc32_pmull_init(struct shash_desc *desc) | ||
| 67 | { | ||
| 68 | u32 *mctx = crypto_shash_ctx(desc->tfm); | ||
| 69 | u32 *crc = shash_desc_ctx(desc); | ||
| 70 | |||
| 71 | *crc = *mctx; | ||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | |||
| 75 | static int crc32_pmull_update(struct shash_desc *desc, const u8 *data, | ||
| 76 | unsigned int length) | ||
| 77 | { | ||
| 78 | u32 *crc = shash_desc_ctx(desc); | ||
| 79 | unsigned int l; | ||
| 80 | |||
| 81 | if ((u64)data % SCALE_F) { | ||
| 82 | l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F)); | ||
| 83 | |||
| 84 | *crc = fallback_crc32(*crc, data, l); | ||
| 85 | |||
| 86 | data += l; | ||
| 87 | length -= l; | ||
| 88 | } | ||
| 89 | |||
| 90 | if (length >= PMULL_MIN_LEN) { | ||
| 91 | l = round_down(length, SCALE_F); | ||
| 92 | |||
| 93 | kernel_neon_begin_partial(10); | ||
| 94 | *crc = crc32_pmull_le(data, l, *crc); | ||
| 95 | kernel_neon_end(); | ||
| 96 | |||
| 97 | data += l; | ||
| 98 | length -= l; | ||
| 99 | } | ||
| 100 | |||
| 101 | if (length > 0) | ||
| 102 | *crc = fallback_crc32(*crc, data, length); | ||
| 103 | |||
| 104 | return 0; | ||
| 105 | } | ||
| 106 | |||
| 107 | static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data, | ||
| 108 | unsigned int length) | ||
| 109 | { | ||
| 110 | u32 *crc = shash_desc_ctx(desc); | ||
| 111 | unsigned int l; | ||
| 112 | |||
| 113 | if ((u64)data % SCALE_F) { | ||
| 114 | l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F)); | ||
| 115 | |||
| 116 | *crc = fallback_crc32c(*crc, data, l); | ||
| 117 | |||
| 118 | data += l; | ||
| 119 | length -= l; | ||
| 120 | } | ||
| 121 | |||
| 122 | if (length >= PMULL_MIN_LEN) { | ||
| 123 | l = round_down(length, SCALE_F); | ||
| 124 | |||
| 125 | kernel_neon_begin_partial(10); | ||
| 126 | *crc = crc32c_pmull_le(data, l, *crc); | ||
| 127 | kernel_neon_end(); | ||
| 128 | |||
| 129 | data += l; | ||
| 130 | length -= l; | ||
| 131 | } | ||
| 132 | |||
| 133 | if (length > 0) { | ||
| 134 | *crc = fallback_crc32c(*crc, data, length); | ||
| 135 | } | ||
| 136 | |||
| 137 | return 0; | ||
| 138 | } | ||
| 139 | |||
| 140 | static int crc32_pmull_final(struct shash_desc *desc, u8 *out) | ||
| 141 | { | ||
| 142 | u32 *crc = shash_desc_ctx(desc); | ||
| 143 | |||
| 144 | put_unaligned_le32(*crc, out); | ||
| 145 | return 0; | ||
| 146 | } | ||
| 147 | |||
| 148 | static int crc32c_pmull_final(struct shash_desc *desc, u8 *out) | ||
| 149 | { | ||
| 150 | u32 *crc = shash_desc_ctx(desc); | ||
| 151 | |||
| 152 | put_unaligned_le32(~*crc, out); | ||
| 153 | return 0; | ||
| 154 | } | ||
| 155 | |||
| 156 | static struct shash_alg crc32_pmull_algs[] = { { | ||
| 157 | .setkey = crc32_pmull_setkey, | ||
| 158 | .init = crc32_pmull_init, | ||
| 159 | .update = crc32_pmull_update, | ||
| 160 | .final = crc32_pmull_final, | ||
| 161 | .descsize = sizeof(u32), | ||
| 162 | .digestsize = sizeof(u32), | ||
| 163 | |||
| 164 | .base.cra_ctxsize = sizeof(u32), | ||
| 165 | .base.cra_init = crc32_pmull_cra_init, | ||
| 166 | .base.cra_name = "crc32", | ||
| 167 | .base.cra_driver_name = "crc32-arm64-ce", | ||
| 168 | .base.cra_priority = 200, | ||
| 169 | .base.cra_blocksize = 1, | ||
| 170 | .base.cra_module = THIS_MODULE, | ||
| 171 | }, { | ||
| 172 | .setkey = crc32_pmull_setkey, | ||
| 173 | .init = crc32_pmull_init, | ||
| 174 | .update = crc32c_pmull_update, | ||
| 175 | .final = crc32c_pmull_final, | ||
| 176 | .descsize = sizeof(u32), | ||
| 177 | .digestsize = sizeof(u32), | ||
| 178 | |||
| 179 | .base.cra_ctxsize = sizeof(u32), | ||
| 180 | .base.cra_init = crc32c_pmull_cra_init, | ||
| 181 | .base.cra_name = "crc32c", | ||
| 182 | .base.cra_driver_name = "crc32c-arm64-ce", | ||
| 183 | .base.cra_priority = 200, | ||
| 184 | .base.cra_blocksize = 1, | ||
| 185 | .base.cra_module = THIS_MODULE, | ||
| 186 | } }; | ||
| 187 | |||
| 188 | static int __init crc32_pmull_mod_init(void) | ||
| 189 | { | ||
| 190 | if (elf_hwcap & HWCAP_CRC32) { | ||
| 191 | fallback_crc32 = crc32_armv8_le; | ||
| 192 | fallback_crc32c = crc32c_armv8_le; | ||
| 193 | } else { | ||
| 194 | fallback_crc32 = crc32_le; | ||
| 195 | fallback_crc32c = __crc32c_le; | ||
| 196 | } | ||
| 197 | |||
| 198 | return crypto_register_shashes(crc32_pmull_algs, | ||
| 199 | ARRAY_SIZE(crc32_pmull_algs)); | ||
| 200 | } | ||
| 201 | |||
| 202 | static void __exit crc32_pmull_mod_exit(void) | ||
| 203 | { | ||
| 204 | crypto_unregister_shashes(crc32_pmull_algs, | ||
| 205 | ARRAY_SIZE(crc32_pmull_algs)); | ||
| 206 | } | ||
| 207 | |||
| 208 | module_cpu_feature_match(PMULL, crc32_pmull_mod_init); | ||
| 209 | module_exit(crc32_pmull_mod_exit); | ||
| 210 | |||
| 211 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
| 212 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S new file mode 100644 index 000000000000..d5b5a8c038c8 --- /dev/null +++ b/arch/arm64/crypto/crct10dif-ce-core.S | |||
| @@ -0,0 +1,392 @@ | |||
| 1 | // | ||
| 2 | // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions | ||
| 3 | // | ||
| 4 | // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
| 5 | // | ||
| 6 | // This program is free software; you can redistribute it and/or modify | ||
| 7 | // it under the terms of the GNU General Public License version 2 as | ||
| 8 | // published by the Free Software Foundation. | ||
| 9 | // | ||
| 10 | |||
| 11 | // | ||
| 12 | // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions | ||
| 13 | // | ||
| 14 | // Copyright (c) 2013, Intel Corporation | ||
| 15 | // | ||
| 16 | // Authors: | ||
| 17 | // Erdinc Ozturk <erdinc.ozturk@intel.com> | ||
| 18 | // Vinodh Gopal <vinodh.gopal@intel.com> | ||
| 19 | // James Guilford <james.guilford@intel.com> | ||
| 20 | // Tim Chen <tim.c.chen@linux.intel.com> | ||
| 21 | // | ||
| 22 | // This software is available to you under a choice of one of two | ||
| 23 | // licenses. You may choose to be licensed under the terms of the GNU | ||
| 24 | // General Public License (GPL) Version 2, available from the file | ||
| 25 | // COPYING in the main directory of this source tree, or the | ||
| 26 | // OpenIB.org BSD license below: | ||
| 27 | // | ||
| 28 | // Redistribution and use in source and binary forms, with or without | ||
| 29 | // modification, are permitted provided that the following conditions are | ||
| 30 | // met: | ||
| 31 | // | ||
| 32 | // * Redistributions of source code must retain the above copyright | ||
| 33 | // notice, this list of conditions and the following disclaimer. | ||
| 34 | // | ||
| 35 | // * Redistributions in binary form must reproduce the above copyright | ||
| 36 | // notice, this list of conditions and the following disclaimer in the | ||
| 37 | // documentation and/or other materials provided with the | ||
| 38 | // distribution. | ||
| 39 | // | ||
| 40 | // * Neither the name of the Intel Corporation nor the names of its | ||
| 41 | // contributors may be used to endorse or promote products derived from | ||
| 42 | // this software without specific prior written permission. | ||
| 43 | // | ||
| 44 | // | ||
| 45 | // THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY | ||
| 46 | // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 47 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
| 48 | // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR | ||
| 49 | // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | ||
| 50 | // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | ||
| 51 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | ||
| 52 | // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
| 53 | // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | ||
| 54 | // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
| 55 | // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 56 | // | ||
| 57 | // Function API: | ||
| 58 | // UINT16 crc_t10dif_pcl( | ||
| 59 | // UINT16 init_crc, //initial CRC value, 16 bits | ||
| 60 | // const unsigned char *buf, //buffer pointer to calculate CRC on | ||
| 61 | // UINT64 len //buffer length in bytes (64-bit data) | ||
| 62 | // ); | ||
| 63 | // | ||
| 64 | // Reference paper titled "Fast CRC Computation for Generic | ||
| 65 | // Polynomials Using PCLMULQDQ Instruction" | ||
| 66 | // URL: http://www.intel.com/content/dam/www/public/us/en/documents | ||
| 67 | // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf | ||
| 68 | // | ||
| 69 | // | ||
| 70 | |||
| 71 | #include <linux/linkage.h> | ||
| 72 | #include <asm/assembler.h> | ||
| 73 | |||
| 74 | .text | ||
| 75 | .cpu generic+crypto | ||
| 76 | |||
| 77 | arg1_low32 .req w0 | ||
| 78 | arg2 .req x1 | ||
| 79 | arg3 .req x2 | ||
| 80 | |||
| 81 | vzr .req v13 | ||
| 82 | |||
| 83 | ENTRY(crc_t10dif_pmull) | ||
| 84 | movi vzr.16b, #0 // init zero register | ||
| 85 | |||
| 86 | // adjust the 16-bit initial_crc value, scale it to 32 bits | ||
| 87 | lsl arg1_low32, arg1_low32, #16 | ||
| 88 | |||
| 89 | // check if smaller than 256 | ||
| 90 | cmp arg3, #256 | ||
| 91 | |||
| 92 | // for sizes less than 128, we can't fold 64B at a time... | ||
| 93 | b.lt _less_than_128 | ||
| 94 | |||
| 95 | // load the initial crc value | ||
| 96 | // crc value does not need to be byte-reflected, but it needs | ||
| 97 | // to be moved to the high part of the register. | ||
| 98 | // because data will be byte-reflected and will align with | ||
| 99 | // initial crc at correct place. | ||
| 100 | movi v10.16b, #0 | ||
| 101 | mov v10.s[3], arg1_low32 // initial crc | ||
| 102 | |||
| 103 | // receive the initial 64B data, xor the initial crc value | ||
| 104 | ldp q0, q1, [arg2] | ||
| 105 | ldp q2, q3, [arg2, #0x20] | ||
| 106 | ldp q4, q5, [arg2, #0x40] | ||
| 107 | ldp q6, q7, [arg2, #0x60] | ||
| 108 | add arg2, arg2, #0x80 | ||
| 109 | |||
| 110 | CPU_LE( rev64 v0.16b, v0.16b ) | ||
| 111 | CPU_LE( rev64 v1.16b, v1.16b ) | ||
| 112 | CPU_LE( rev64 v2.16b, v2.16b ) | ||
| 113 | CPU_LE( rev64 v3.16b, v3.16b ) | ||
| 114 | CPU_LE( rev64 v4.16b, v4.16b ) | ||
| 115 | CPU_LE( rev64 v5.16b, v5.16b ) | ||
| 116 | CPU_LE( rev64 v6.16b, v6.16b ) | ||
| 117 | CPU_LE( rev64 v7.16b, v7.16b ) | ||
| 118 | |||
| 119 | CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) | ||
| 120 | CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) | ||
| 121 | CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 ) | ||
| 122 | CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 ) | ||
| 123 | CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 ) | ||
| 124 | CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 ) | ||
| 125 | CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 ) | ||
| 126 | CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) | ||
| 127 | |||
| 128 | // XOR the initial_crc value | ||
| 129 | eor v0.16b, v0.16b, v10.16b | ||
| 130 | |||
| 131 | ldr q10, rk3 // xmm10 has rk3 and rk4 | ||
| 132 | // type of pmull instruction | ||
| 133 | // will determine which constant to use | ||
| 134 | |||
| 135 | // | ||
| 136 | // we subtract 256 instead of 128 to save one instruction from the loop | ||
| 137 | // | ||
| 138 | sub arg3, arg3, #256 | ||
| 139 | |||
| 140 | // at this section of the code, there is 64*x+y (0<=y<64) bytes of | ||
| 141 | // buffer. The _fold_64_B_loop will fold 64B at a time | ||
| 142 | // until we have 64+y Bytes of buffer | ||
| 143 | |||
| 144 | |||
| 145 | // fold 64B at a time. This section of the code folds 4 vector | ||
| 146 | // registers in parallel | ||
| 147 | _fold_64_B_loop: | ||
| 148 | |||
| 149 | .macro fold64, reg1, reg2 | ||
| 150 | ldp q11, q12, [arg2], #0x20 | ||
| 151 | |||
| 152 | pmull2 v8.1q, \reg1\().2d, v10.2d | ||
| 153 | pmull \reg1\().1q, \reg1\().1d, v10.1d | ||
| 154 | |||
| 155 | CPU_LE( rev64 v11.16b, v11.16b ) | ||
| 156 | CPU_LE( rev64 v12.16b, v12.16b ) | ||
| 157 | |||
| 158 | pmull2 v9.1q, \reg2\().2d, v10.2d | ||
| 159 | pmull \reg2\().1q, \reg2\().1d, v10.1d | ||
| 160 | |||
| 161 | CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 ) | ||
| 162 | CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) | ||
| 163 | |||
| 164 | eor \reg1\().16b, \reg1\().16b, v8.16b | ||
| 165 | eor \reg2\().16b, \reg2\().16b, v9.16b | ||
| 166 | eor \reg1\().16b, \reg1\().16b, v11.16b | ||
| 167 | eor \reg2\().16b, \reg2\().16b, v12.16b | ||
| 168 | .endm | ||
| 169 | |||
| 170 | fold64 v0, v1 | ||
| 171 | fold64 v2, v3 | ||
| 172 | fold64 v4, v5 | ||
| 173 | fold64 v6, v7 | ||
| 174 | |||
| 175 | subs arg3, arg3, #128 | ||
| 176 | |||
| 177 | // check if there is another 64B in the buffer to be able to fold | ||
| 178 | b.ge _fold_64_B_loop | ||
| 179 | |||
| 180 | // at this point, the buffer pointer is pointing at the last y Bytes | ||
| 181 | // of the buffer the 64B of folded data is in 4 of the vector | ||
| 182 | // registers: v0, v1, v2, v3 | ||
| 183 | |||
| 184 | // fold the 8 vector registers to 1 vector register with different | ||
| 185 | // constants | ||
| 186 | |||
| 187 | ldr q10, rk9 | ||
| 188 | |||
| 189 | .macro fold16, reg, rk | ||
| 190 | pmull v8.1q, \reg\().1d, v10.1d | ||
| 191 | pmull2 \reg\().1q, \reg\().2d, v10.2d | ||
| 192 | .ifnb \rk | ||
| 193 | ldr q10, \rk | ||
| 194 | .endif | ||
| 195 | eor v7.16b, v7.16b, v8.16b | ||
| 196 | eor v7.16b, v7.16b, \reg\().16b | ||
| 197 | .endm | ||
| 198 | |||
| 199 | fold16 v0, rk11 | ||
| 200 | fold16 v1, rk13 | ||
| 201 | fold16 v2, rk15 | ||
| 202 | fold16 v3, rk17 | ||
| 203 | fold16 v4, rk19 | ||
| 204 | fold16 v5, rk1 | ||
| 205 | fold16 v6 | ||
| 206 | |||
| 207 | // instead of 64, we add 48 to the loop counter to save 1 instruction | ||
| 208 | // from the loop instead of a cmp instruction, we use the negative | ||
| 209 | // flag with the jl instruction | ||
| 210 | adds arg3, arg3, #(128-16) | ||
| 211 | b.lt _final_reduction_for_128 | ||
| 212 | |||
| 213 | // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 | ||
| 214 | // and the rest is in memory. We can fold 16 bytes at a time if y>=16 | ||
| 215 | // continue folding 16B at a time | ||
| 216 | |||
| 217 | _16B_reduction_loop: | ||
| 218 | pmull v8.1q, v7.1d, v10.1d | ||
| 219 | pmull2 v7.1q, v7.2d, v10.2d | ||
| 220 | eor v7.16b, v7.16b, v8.16b | ||
| 221 | |||
| 222 | ldr q0, [arg2], #16 | ||
| 223 | CPU_LE( rev64 v0.16b, v0.16b ) | ||
| 224 | CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) | ||
| 225 | eor v7.16b, v7.16b, v0.16b | ||
| 226 | subs arg3, arg3, #16 | ||
| 227 | |||
| 228 | // instead of a cmp instruction, we utilize the flags with the | ||
| 229 | // jge instruction equivalent of: cmp arg3, 16-16 | ||
| 230 | // check if there is any more 16B in the buffer to be able to fold | ||
| 231 | b.ge _16B_reduction_loop | ||
| 232 | |||
| 233 | // now we have 16+z bytes left to reduce, where 0<= z < 16. | ||
| 234 | // first, we reduce the data in the xmm7 register | ||
| 235 | |||
| 236 | _final_reduction_for_128: | ||
| 237 | // check if any more data to fold. If not, compute the CRC of | ||
| 238 | // the final 128 bits | ||
| 239 | adds arg3, arg3, #16 | ||
| 240 | b.eq _128_done | ||
| 241 | |||
| 242 | // here we are getting data that is less than 16 bytes. | ||
| 243 | // since we know that there was data before the pointer, we can | ||
| 244 | // offset the input pointer before the actual point, to receive | ||
| 245 | // exactly 16 bytes. after that the registers need to be adjusted. | ||
| 246 | _get_last_two_regs: | ||
| 247 | add arg2, arg2, arg3 | ||
| 248 | ldr q1, [arg2, #-16] | ||
| 249 | CPU_LE( rev64 v1.16b, v1.16b ) | ||
| 250 | CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) | ||
| 251 | |||
| 252 | // get rid of the extra data that was loaded before | ||
| 253 | // load the shift constant | ||
| 254 | adr x4, tbl_shf_table + 16 | ||
| 255 | sub x4, x4, arg3 | ||
| 256 | ld1 {v0.16b}, [x4] | ||
| 257 | |||
| 258 | // shift v2 to the left by arg3 bytes | ||
| 259 | tbl v2.16b, {v7.16b}, v0.16b | ||
| 260 | |||
| 261 | // shift v7 to the right by 16-arg3 bytes | ||
| 262 | movi v9.16b, #0x80 | ||
| 263 | eor v0.16b, v0.16b, v9.16b | ||
| 264 | tbl v7.16b, {v7.16b}, v0.16b | ||
| 265 | |||
| 266 | // blend | ||
| 267 | sshr v0.16b, v0.16b, #7 // convert to 8-bit mask | ||
| 268 | bsl v0.16b, v2.16b, v1.16b | ||
| 269 | |||
| 270 | // fold 16 Bytes | ||
| 271 | pmull v8.1q, v7.1d, v10.1d | ||
| 272 | pmull2 v7.1q, v7.2d, v10.2d | ||
| 273 | eor v7.16b, v7.16b, v8.16b | ||
| 274 | eor v7.16b, v7.16b, v0.16b | ||
| 275 | |||
| 276 | _128_done: | ||
| 277 | // compute crc of a 128-bit value | ||
| 278 | ldr q10, rk5 // rk5 and rk6 in xmm10 | ||
| 279 | |||
| 280 | // 64b fold | ||
| 281 | ext v0.16b, vzr.16b, v7.16b, #8 | ||
| 282 | mov v7.d[0], v7.d[1] | ||
| 283 | pmull v7.1q, v7.1d, v10.1d | ||
| 284 | eor v7.16b, v7.16b, v0.16b | ||
| 285 | |||
| 286 | // 32b fold | ||
| 287 | ext v0.16b, v7.16b, vzr.16b, #4 | ||
| 288 | mov v7.s[3], vzr.s[0] | ||
| 289 | pmull2 v0.1q, v0.2d, v10.2d | ||
| 290 | eor v7.16b, v7.16b, v0.16b | ||
| 291 | |||
| 292 | // barrett reduction | ||
| 293 | _barrett: | ||
| 294 | ldr q10, rk7 | ||
| 295 | mov v0.d[0], v7.d[1] | ||
| 296 | |||
| 297 | pmull v0.1q, v0.1d, v10.1d | ||
| 298 | ext v0.16b, vzr.16b, v0.16b, #12 | ||
| 299 | pmull2 v0.1q, v0.2d, v10.2d | ||
| 300 | ext v0.16b, vzr.16b, v0.16b, #12 | ||
| 301 | eor v7.16b, v7.16b, v0.16b | ||
| 302 | mov w0, v7.s[1] | ||
| 303 | |||
| 304 | _cleanup: | ||
| 305 | // scale the result back to 16 bits | ||
| 306 | lsr x0, x0, #16 | ||
| 307 | ret | ||
| 308 | |||
| 309 | _less_than_128: | ||
| 310 | cbz arg3, _cleanup | ||
| 311 | |||
| 312 | movi v0.16b, #0 | ||
| 313 | mov v0.s[3], arg1_low32 // get the initial crc value | ||
| 314 | |||
| 315 | ldr q7, [arg2], #0x10 | ||
| 316 | CPU_LE( rev64 v7.16b, v7.16b ) | ||
| 317 | CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) | ||
| 318 | eor v7.16b, v7.16b, v0.16b // xor the initial crc value | ||
| 319 | |||
| 320 | cmp arg3, #16 | ||
| 321 | b.eq _128_done // exactly 16 left | ||
| 322 | b.lt _less_than_16_left | ||
| 323 | |||
| 324 | ldr q10, rk1 // rk1 and rk2 in xmm10 | ||
| 325 | |||
| 326 | // update the counter. subtract 32 instead of 16 to save one | ||
| 327 | // instruction from the loop | ||
| 328 | subs arg3, arg3, #32 | ||
| 329 | b.ge _16B_reduction_loop | ||
| 330 | |||
| 331 | add arg3, arg3, #16 | ||
| 332 | b _get_last_two_regs | ||
| 333 | |||
| 334 | _less_than_16_left: | ||
| 335 | // shl r9, 4 | ||
| 336 | adr x0, tbl_shf_table + 16 | ||
| 337 | sub x0, x0, arg3 | ||
| 338 | ld1 {v0.16b}, [x0] | ||
| 339 | movi v9.16b, #0x80 | ||
| 340 | eor v0.16b, v0.16b, v9.16b | ||
| 341 | tbl v7.16b, {v7.16b}, v0.16b | ||
| 342 | b _128_done | ||
| 343 | ENDPROC(crc_t10dif_pmull) | ||
| 344 | |||
| 345 | // precomputed constants | ||
| 346 | // these constants are precomputed from the poly: | ||
| 347 | // 0x8bb70000 (0x8bb7 scaled to 32 bits) | ||
| 348 | .align 4 | ||
| 349 | // Q = 0x18BB70000 | ||
| 350 | // rk1 = 2^(32*3) mod Q << 32 | ||
| 351 | // rk2 = 2^(32*5) mod Q << 32 | ||
| 352 | // rk3 = 2^(32*15) mod Q << 32 | ||
| 353 | // rk4 = 2^(32*17) mod Q << 32 | ||
| 354 | // rk5 = 2^(32*3) mod Q << 32 | ||
| 355 | // rk6 = 2^(32*2) mod Q << 32 | ||
| 356 | // rk7 = floor(2^64/Q) | ||
| 357 | // rk8 = Q | ||
| 358 | |||
| 359 | rk1: .octa 0x06df0000000000002d56000000000000 | ||
| 360 | rk3: .octa 0x7cf50000000000009d9d000000000000 | ||
| 361 | rk5: .octa 0x13680000000000002d56000000000000 | ||
| 362 | rk7: .octa 0x000000018bb7000000000001f65a57f8 | ||
| 363 | rk9: .octa 0xbfd6000000000000ceae000000000000 | ||
| 364 | rk11: .octa 0x713c0000000000001e16000000000000 | ||
| 365 | rk13: .octa 0x80a6000000000000f7f9000000000000 | ||
| 366 | rk15: .octa 0xe658000000000000044c000000000000 | ||
| 367 | rk17: .octa 0xa497000000000000ad18000000000000 | ||
| 368 | rk19: .octa 0xe7b50000000000006ee3000000000000 | ||
| 369 | |||
| 370 | tbl_shf_table: | ||
| 371 | // use these values for shift constants for the tbl/tbx instruction | ||
| 372 | // different alignments result in values as shown: | ||
| 373 | // DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 | ||
| 374 | // DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 | ||
| 375 | // DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 | ||
| 376 | // DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 | ||
| 377 | // DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 | ||
| 378 | // DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 | ||
| 379 | // DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 | ||
| 380 | // DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 | ||
| 381 | // DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 | ||
| 382 | // DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 | ||
| 383 | // DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 | ||
| 384 | // DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 | ||
| 385 | // DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 | ||
| 386 | // DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 | ||
| 387 | // DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 | ||
| 388 | |||
| 389 | .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 | ||
| 390 | .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f | ||
| 391 | .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 | ||
| 392 | .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0 | ||
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c new file mode 100644 index 000000000000..60cb590c2590 --- /dev/null +++ b/arch/arm64/crypto/crct10dif-ce-glue.c | |||
| @@ -0,0 +1,95 @@ | |||
| 1 | /* | ||
| 2 | * Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/cpufeature.h> | ||
| 12 | #include <linux/crc-t10dif.h> | ||
| 13 | #include <linux/init.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/string.h> | ||
| 17 | |||
| 18 | #include <crypto/internal/hash.h> | ||
| 19 | |||
| 20 | #include <asm/neon.h> | ||
| 21 | |||
| 22 | #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U | ||
| 23 | |||
| 24 | asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u64 len); | ||
| 25 | |||
| 26 | static int crct10dif_init(struct shash_desc *desc) | ||
| 27 | { | ||
| 28 | u16 *crc = shash_desc_ctx(desc); | ||
| 29 | |||
| 30 | *crc = 0; | ||
| 31 | return 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | static int crct10dif_update(struct shash_desc *desc, const u8 *data, | ||
| 35 | unsigned int length) | ||
| 36 | { | ||
| 37 | u16 *crc = shash_desc_ctx(desc); | ||
| 38 | unsigned int l; | ||
| 39 | |||
| 40 | if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) { | ||
| 41 | l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE - | ||
| 42 | ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)); | ||
| 43 | |||
| 44 | *crc = crc_t10dif_generic(*crc, data, l); | ||
| 45 | |||
| 46 | length -= l; | ||
| 47 | data += l; | ||
| 48 | } | ||
| 49 | |||
| 50 | if (length > 0) { | ||
| 51 | kernel_neon_begin_partial(14); | ||
| 52 | *crc = crc_t10dif_pmull(*crc, data, length); | ||
| 53 | kernel_neon_end(); | ||
| 54 | } | ||
| 55 | |||
| 56 | return 0; | ||
| 57 | } | ||
| 58 | |||
| 59 | static int crct10dif_final(struct shash_desc *desc, u8 *out) | ||
| 60 | { | ||
| 61 | u16 *crc = shash_desc_ctx(desc); | ||
| 62 | |||
| 63 | *(u16 *)out = *crc; | ||
| 64 | return 0; | ||
| 65 | } | ||
| 66 | |||
| 67 | static struct shash_alg crc_t10dif_alg = { | ||
| 68 | .digestsize = CRC_T10DIF_DIGEST_SIZE, | ||
| 69 | .init = crct10dif_init, | ||
| 70 | .update = crct10dif_update, | ||
| 71 | .final = crct10dif_final, | ||
| 72 | .descsize = CRC_T10DIF_DIGEST_SIZE, | ||
| 73 | |||
| 74 | .base.cra_name = "crct10dif", | ||
| 75 | .base.cra_driver_name = "crct10dif-arm64-ce", | ||
| 76 | .base.cra_priority = 200, | ||
| 77 | .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, | ||
| 78 | .base.cra_module = THIS_MODULE, | ||
| 79 | }; | ||
| 80 | |||
| 81 | static int __init crc_t10dif_mod_init(void) | ||
| 82 | { | ||
| 83 | return crypto_register_shash(&crc_t10dif_alg); | ||
| 84 | } | ||
| 85 | |||
| 86 | static void __exit crc_t10dif_mod_exit(void) | ||
| 87 | { | ||
| 88 | crypto_unregister_shash(&crc_t10dif_alg); | ||
| 89 | } | ||
| 90 | |||
| 91 | module_cpu_feature_match(PMULL, crc_t10dif_mod_init); | ||
| 92 | module_exit(crc_t10dif_mod_exit); | ||
| 93 | |||
| 94 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
| 95 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S index dc457015884e..f0bb9f0b524f 100644 --- a/arch/arm64/crypto/ghash-ce-core.S +++ b/arch/arm64/crypto/ghash-ce-core.S | |||
| @@ -29,8 +29,8 @@ | |||
| 29 | * struct ghash_key const *k, const char *head) | 29 | * struct ghash_key const *k, const char *head) |
| 30 | */ | 30 | */ |
| 31 | ENTRY(pmull_ghash_update) | 31 | ENTRY(pmull_ghash_update) |
| 32 | ld1 {SHASH.16b}, [x3] | 32 | ld1 {SHASH.2d}, [x3] |
| 33 | ld1 {XL.16b}, [x1] | 33 | ld1 {XL.2d}, [x1] |
| 34 | movi MASK.16b, #0xe1 | 34 | movi MASK.16b, #0xe1 |
| 35 | ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 | 35 | ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 |
| 36 | shl MASK.2d, MASK.2d, #57 | 36 | shl MASK.2d, MASK.2d, #57 |
| @@ -74,6 +74,6 @@ CPU_LE( rev64 T1.16b, T1.16b ) | |||
| 74 | 74 | ||
| 75 | cbnz w0, 0b | 75 | cbnz w0, 0b |
| 76 | 76 | ||
| 77 | st1 {XL.16b}, [x1] | 77 | st1 {XL.2d}, [x1] |
| 78 | ret | 78 | ret |
| 79 | ENDPROC(pmull_ghash_update) | 79 | ENDPROC(pmull_ghash_update) |
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S index 033aae6d732a..c98e7e849f06 100644 --- a/arch/arm64/crypto/sha1-ce-core.S +++ b/arch/arm64/crypto/sha1-ce-core.S | |||
| @@ -78,7 +78,7 @@ ENTRY(sha1_ce_transform) | |||
| 78 | ld1r {k3.4s}, [x6] | 78 | ld1r {k3.4s}, [x6] |
| 79 | 79 | ||
| 80 | /* load state */ | 80 | /* load state */ |
| 81 | ldr dga, [x0] | 81 | ld1 {dgav.4s}, [x0] |
| 82 | ldr dgb, [x0, #16] | 82 | ldr dgb, [x0, #16] |
| 83 | 83 | ||
| 84 | /* load sha1_ce_state::finalize */ | 84 | /* load sha1_ce_state::finalize */ |
| @@ -144,7 +144,7 @@ CPU_LE( rev32 v11.16b, v11.16b ) | |||
| 144 | b 1b | 144 | b 1b |
| 145 | 145 | ||
| 146 | /* store new state */ | 146 | /* store new state */ |
| 147 | 3: str dga, [x0] | 147 | 3: st1 {dgav.4s}, [x0] |
| 148 | str dgb, [x0, #16] | 148 | str dgb, [x0, #16] |
| 149 | ret | 149 | ret |
| 150 | ENDPROC(sha1_ce_transform) | 150 | ENDPROC(sha1_ce_transform) |
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S index 5df9d9d470ad..01cfee066837 100644 --- a/arch/arm64/crypto/sha2-ce-core.S +++ b/arch/arm64/crypto/sha2-ce-core.S | |||
| @@ -85,7 +85,7 @@ ENTRY(sha2_ce_transform) | |||
| 85 | ld1 {v12.4s-v15.4s}, [x8] | 85 | ld1 {v12.4s-v15.4s}, [x8] |
| 86 | 86 | ||
| 87 | /* load state */ | 87 | /* load state */ |
| 88 | ldp dga, dgb, [x0] | 88 | ld1 {dgav.4s, dgbv.4s}, [x0] |
| 89 | 89 | ||
| 90 | /* load sha256_ce_state::finalize */ | 90 | /* load sha256_ce_state::finalize */ |
| 91 | ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize] | 91 | ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize] |
| @@ -148,6 +148,6 @@ CPU_LE( rev32 v19.16b, v19.16b ) | |||
| 148 | b 1b | 148 | b 1b |
| 149 | 149 | ||
| 150 | /* store new state */ | 150 | /* store new state */ |
| 151 | 3: stp dga, dgb, [x0] | 151 | 3: st1 {dgav.4s, dgbv.4s}, [x0] |
| 152 | ret | 152 | ret |
| 153 | ENDPROC(sha2_ce_transform) | 153 | ENDPROC(sha2_ce_transform) |
diff --git a/arch/arm64/crypto/sha256-core.S_shipped b/arch/arm64/crypto/sha256-core.S_shipped new file mode 100644 index 000000000000..3ce82cc860bc --- /dev/null +++ b/arch/arm64/crypto/sha256-core.S_shipped | |||
| @@ -0,0 +1,2061 @@ | |||
| 1 | // Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. | ||
| 2 | // | ||
| 3 | // Licensed under the OpenSSL license (the "License"). You may not use | ||
| 4 | // this file except in compliance with the License. You can obtain a copy | ||
| 5 | // in the file LICENSE in the source distribution or at | ||
| 6 | // https://www.openssl.org/source/license.html | ||
| 7 | |||
| 8 | // ==================================================================== | ||
| 9 | // Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
| 10 | // project. The module is, however, dual licensed under OpenSSL and | ||
| 11 | // CRYPTOGAMS licenses depending on where you obtain it. For further | ||
| 12 | // details see http://www.openssl.org/~appro/cryptogams/. | ||
| 13 | // | ||
| 14 | // Permission to use under GPLv2 terms is granted. | ||
| 15 | // ==================================================================== | ||
| 16 | // | ||
| 17 | // SHA256/512 for ARMv8. | ||
| 18 | // | ||
| 19 | // Performance in cycles per processed byte and improvement coefficient | ||
| 20 | // over code generated with "default" compiler: | ||
| 21 | // | ||
| 22 | // SHA256-hw SHA256(*) SHA512 | ||
| 23 | // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) | ||
| 24 | // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) | ||
| 25 | // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) | ||
| 26 | // Denver 2.01 10.5 (+26%) 6.70 (+8%) | ||
| 27 | // X-Gene 20.0 (+100%) 12.8 (+300%(***)) | ||
| 28 | // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) | ||
| 29 | // | ||
| 30 | // (*) Software SHA256 results are of lesser relevance, presented | ||
| 31 | // mostly for informational purposes. | ||
| 32 | // (**) The result is a trade-off: it's possible to improve it by | ||
| 33 | // 10% (or by 1 cycle per round), but at the cost of 20% loss | ||
| 34 | // on Cortex-A53 (or by 4 cycles per round). | ||
| 35 | // (***) Super-impressive coefficients over gcc-generated code are | ||
| 36 | // indication of some compiler "pathology", most notably code | ||
| 37 | // generated with -mgeneral-regs-only is significanty faster | ||
| 38 | // and the gap is only 40-90%. | ||
| 39 | // | ||
| 40 | // October 2016. | ||
| 41 | // | ||
| 42 | // Originally it was reckoned that it makes no sense to implement NEON | ||
| 43 | // version of SHA256 for 64-bit processors. This is because performance | ||
| 44 | // improvement on most wide-spread Cortex-A5x processors was observed | ||
| 45 | // to be marginal, same on Cortex-A53 and ~10% on A57. But then it was | ||
| 46 | // observed that 32-bit NEON SHA256 performs significantly better than | ||
| 47 | // 64-bit scalar version on *some* of the more recent processors. As | ||
| 48 | // result 64-bit NEON version of SHA256 was added to provide best | ||
| 49 | // all-round performance. For example it executes ~30% faster on X-Gene | ||
| 50 | // and Mongoose. [For reference, NEON version of SHA512 is bound to | ||
| 51 | // deliver much less improvement, likely *negative* on Cortex-A5x. | ||
| 52 | // Which is why NEON support is limited to SHA256.] | ||
| 53 | |||
| 54 | #ifndef __KERNEL__ | ||
| 55 | # include "arm_arch.h" | ||
| 56 | #endif | ||
| 57 | |||
| 58 | .text | ||
| 59 | |||
| 60 | .extern OPENSSL_armcap_P | ||
| 61 | .globl sha256_block_data_order | ||
| 62 | .type sha256_block_data_order,%function | ||
| 63 | .align 6 | ||
| 64 | sha256_block_data_order: | ||
| 65 | #ifndef __KERNEL__ | ||
| 66 | # ifdef __ILP32__ | ||
| 67 | ldrsw x16,.LOPENSSL_armcap_P | ||
| 68 | # else | ||
| 69 | ldr x16,.LOPENSSL_armcap_P | ||
| 70 | # endif | ||
| 71 | adr x17,.LOPENSSL_armcap_P | ||
| 72 | add x16,x16,x17 | ||
| 73 | ldr w16,[x16] | ||
| 74 | tst w16,#ARMV8_SHA256 | ||
| 75 | b.ne .Lv8_entry | ||
| 76 | tst w16,#ARMV7_NEON | ||
| 77 | b.ne .Lneon_entry | ||
| 78 | #endif | ||
| 79 | stp x29,x30,[sp,#-128]! | ||
| 80 | add x29,sp,#0 | ||
| 81 | |||
| 82 | stp x19,x20,[sp,#16] | ||
| 83 | stp x21,x22,[sp,#32] | ||
| 84 | stp x23,x24,[sp,#48] | ||
| 85 | stp x25,x26,[sp,#64] | ||
| 86 | stp x27,x28,[sp,#80] | ||
| 87 | sub sp,sp,#4*4 | ||
| 88 | |||
| 89 | ldp w20,w21,[x0] // load context | ||
| 90 | ldp w22,w23,[x0,#2*4] | ||
| 91 | ldp w24,w25,[x0,#4*4] | ||
| 92 | add x2,x1,x2,lsl#6 // end of input | ||
| 93 | ldp w26,w27,[x0,#6*4] | ||
| 94 | adr x30,.LK256 | ||
| 95 | stp x0,x2,[x29,#96] | ||
| 96 | |||
| 97 | .Loop: | ||
| 98 | ldp w3,w4,[x1],#2*4 | ||
| 99 | ldr w19,[x30],#4 // *K++ | ||
| 100 | eor w28,w21,w22 // magic seed | ||
| 101 | str x1,[x29,#112] | ||
| 102 | #ifndef __AARCH64EB__ | ||
| 103 | rev w3,w3 // 0 | ||
| 104 | #endif | ||
| 105 | ror w16,w24,#6 | ||
| 106 | add w27,w27,w19 // h+=K[i] | ||
| 107 | eor w6,w24,w24,ror#14 | ||
| 108 | and w17,w25,w24 | ||
| 109 | bic w19,w26,w24 | ||
| 110 | add w27,w27,w3 // h+=X[i] | ||
| 111 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 112 | eor w19,w20,w21 // a^b, b^c in next round | ||
| 113 | eor w16,w16,w6,ror#11 // Sigma1(e) | ||
| 114 | ror w6,w20,#2 | ||
| 115 | add w27,w27,w17 // h+=Ch(e,f,g) | ||
| 116 | eor w17,w20,w20,ror#9 | ||
| 117 | add w27,w27,w16 // h+=Sigma1(e) | ||
| 118 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 119 | add w23,w23,w27 // d+=h | ||
| 120 | eor w28,w28,w21 // Maj(a,b,c) | ||
| 121 | eor w17,w6,w17,ror#13 // Sigma0(a) | ||
| 122 | add w27,w27,w28 // h+=Maj(a,b,c) | ||
| 123 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 124 | //add w27,w27,w17 // h+=Sigma0(a) | ||
| 125 | #ifndef __AARCH64EB__ | ||
| 126 | rev w4,w4 // 1 | ||
| 127 | #endif | ||
| 128 | ldp w5,w6,[x1],#2*4 | ||
| 129 | add w27,w27,w17 // h+=Sigma0(a) | ||
| 130 | ror w16,w23,#6 | ||
| 131 | add w26,w26,w28 // h+=K[i] | ||
| 132 | eor w7,w23,w23,ror#14 | ||
| 133 | and w17,w24,w23 | ||
| 134 | bic w28,w25,w23 | ||
| 135 | add w26,w26,w4 // h+=X[i] | ||
| 136 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 137 | eor w28,w27,w20 // a^b, b^c in next round | ||
| 138 | eor w16,w16,w7,ror#11 // Sigma1(e) | ||
| 139 | ror w7,w27,#2 | ||
| 140 | add w26,w26,w17 // h+=Ch(e,f,g) | ||
| 141 | eor w17,w27,w27,ror#9 | ||
| 142 | add w26,w26,w16 // h+=Sigma1(e) | ||
| 143 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 144 | add w22,w22,w26 // d+=h | ||
| 145 | eor w19,w19,w20 // Maj(a,b,c) | ||
| 146 | eor w17,w7,w17,ror#13 // Sigma0(a) | ||
| 147 | add w26,w26,w19 // h+=Maj(a,b,c) | ||
| 148 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 149 | //add w26,w26,w17 // h+=Sigma0(a) | ||
| 150 | #ifndef __AARCH64EB__ | ||
| 151 | rev w5,w5 // 2 | ||
| 152 | #endif | ||
| 153 | add w26,w26,w17 // h+=Sigma0(a) | ||
| 154 | ror w16,w22,#6 | ||
| 155 | add w25,w25,w19 // h+=K[i] | ||
| 156 | eor w8,w22,w22,ror#14 | ||
| 157 | and w17,w23,w22 | ||
| 158 | bic w19,w24,w22 | ||
| 159 | add w25,w25,w5 // h+=X[i] | ||
| 160 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 161 | eor w19,w26,w27 // a^b, b^c in next round | ||
| 162 | eor w16,w16,w8,ror#11 // Sigma1(e) | ||
| 163 | ror w8,w26,#2 | ||
| 164 | add w25,w25,w17 // h+=Ch(e,f,g) | ||
| 165 | eor w17,w26,w26,ror#9 | ||
| 166 | add w25,w25,w16 // h+=Sigma1(e) | ||
| 167 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 168 | add w21,w21,w25 // d+=h | ||
| 169 | eor w28,w28,w27 // Maj(a,b,c) | ||
| 170 | eor w17,w8,w17,ror#13 // Sigma0(a) | ||
| 171 | add w25,w25,w28 // h+=Maj(a,b,c) | ||
| 172 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 173 | //add w25,w25,w17 // h+=Sigma0(a) | ||
| 174 | #ifndef __AARCH64EB__ | ||
| 175 | rev w6,w6 // 3 | ||
| 176 | #endif | ||
| 177 | ldp w7,w8,[x1],#2*4 | ||
| 178 | add w25,w25,w17 // h+=Sigma0(a) | ||
| 179 | ror w16,w21,#6 | ||
| 180 | add w24,w24,w28 // h+=K[i] | ||
| 181 | eor w9,w21,w21,ror#14 | ||
| 182 | and w17,w22,w21 | ||
| 183 | bic w28,w23,w21 | ||
| 184 | add w24,w24,w6 // h+=X[i] | ||
| 185 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 186 | eor w28,w25,w26 // a^b, b^c in next round | ||
| 187 | eor w16,w16,w9,ror#11 // Sigma1(e) | ||
| 188 | ror w9,w25,#2 | ||
| 189 | add w24,w24,w17 // h+=Ch(e,f,g) | ||
| 190 | eor w17,w25,w25,ror#9 | ||
| 191 | add w24,w24,w16 // h+=Sigma1(e) | ||
| 192 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 193 | add w20,w20,w24 // d+=h | ||
| 194 | eor w19,w19,w26 // Maj(a,b,c) | ||
| 195 | eor w17,w9,w17,ror#13 // Sigma0(a) | ||
| 196 | add w24,w24,w19 // h+=Maj(a,b,c) | ||
| 197 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 198 | //add w24,w24,w17 // h+=Sigma0(a) | ||
| 199 | #ifndef __AARCH64EB__ | ||
| 200 | rev w7,w7 // 4 | ||
| 201 | #endif | ||
| 202 | add w24,w24,w17 // h+=Sigma0(a) | ||
| 203 | ror w16,w20,#6 | ||
| 204 | add w23,w23,w19 // h+=K[i] | ||
| 205 | eor w10,w20,w20,ror#14 | ||
| 206 | and w17,w21,w20 | ||
| 207 | bic w19,w22,w20 | ||
| 208 | add w23,w23,w7 // h+=X[i] | ||
| 209 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 210 | eor w19,w24,w25 // a^b, b^c in next round | ||
| 211 | eor w16,w16,w10,ror#11 // Sigma1(e) | ||
| 212 | ror w10,w24,#2 | ||
| 213 | add w23,w23,w17 // h+=Ch(e,f,g) | ||
| 214 | eor w17,w24,w24,ror#9 | ||
| 215 | add w23,w23,w16 // h+=Sigma1(e) | ||
| 216 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 217 | add w27,w27,w23 // d+=h | ||
| 218 | eor w28,w28,w25 // Maj(a,b,c) | ||
| 219 | eor w17,w10,w17,ror#13 // Sigma0(a) | ||
| 220 | add w23,w23,w28 // h+=Maj(a,b,c) | ||
| 221 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 222 | //add w23,w23,w17 // h+=Sigma0(a) | ||
| 223 | #ifndef __AARCH64EB__ | ||
| 224 | rev w8,w8 // 5 | ||
| 225 | #endif | ||
| 226 | ldp w9,w10,[x1],#2*4 | ||
| 227 | add w23,w23,w17 // h+=Sigma0(a) | ||
| 228 | ror w16,w27,#6 | ||
| 229 | add w22,w22,w28 // h+=K[i] | ||
| 230 | eor w11,w27,w27,ror#14 | ||
| 231 | and w17,w20,w27 | ||
| 232 | bic w28,w21,w27 | ||
| 233 | add w22,w22,w8 // h+=X[i] | ||
| 234 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 235 | eor w28,w23,w24 // a^b, b^c in next round | ||
| 236 | eor w16,w16,w11,ror#11 // Sigma1(e) | ||
| 237 | ror w11,w23,#2 | ||
| 238 | add w22,w22,w17 // h+=Ch(e,f,g) | ||
| 239 | eor w17,w23,w23,ror#9 | ||
| 240 | add w22,w22,w16 // h+=Sigma1(e) | ||
| 241 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 242 | add w26,w26,w22 // d+=h | ||
| 243 | eor w19,w19,w24 // Maj(a,b,c) | ||
| 244 | eor w17,w11,w17,ror#13 // Sigma0(a) | ||
| 245 | add w22,w22,w19 // h+=Maj(a,b,c) | ||
| 246 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 247 | //add w22,w22,w17 // h+=Sigma0(a) | ||
| 248 | #ifndef __AARCH64EB__ | ||
| 249 | rev w9,w9 // 6 | ||
| 250 | #endif | ||
| 251 | add w22,w22,w17 // h+=Sigma0(a) | ||
| 252 | ror w16,w26,#6 | ||
| 253 | add w21,w21,w19 // h+=K[i] | ||
| 254 | eor w12,w26,w26,ror#14 | ||
| 255 | and w17,w27,w26 | ||
| 256 | bic w19,w20,w26 | ||
| 257 | add w21,w21,w9 // h+=X[i] | ||
| 258 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 259 | eor w19,w22,w23 // a^b, b^c in next round | ||
| 260 | eor w16,w16,w12,ror#11 // Sigma1(e) | ||
| 261 | ror w12,w22,#2 | ||
| 262 | add w21,w21,w17 // h+=Ch(e,f,g) | ||
| 263 | eor w17,w22,w22,ror#9 | ||
| 264 | add w21,w21,w16 // h+=Sigma1(e) | ||
| 265 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 266 | add w25,w25,w21 // d+=h | ||
| 267 | eor w28,w28,w23 // Maj(a,b,c) | ||
| 268 | eor w17,w12,w17,ror#13 // Sigma0(a) | ||
| 269 | add w21,w21,w28 // h+=Maj(a,b,c) | ||
| 270 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 271 | //add w21,w21,w17 // h+=Sigma0(a) | ||
| 272 | #ifndef __AARCH64EB__ | ||
| 273 | rev w10,w10 // 7 | ||
| 274 | #endif | ||
| 275 | ldp w11,w12,[x1],#2*4 | ||
| 276 | add w21,w21,w17 // h+=Sigma0(a) | ||
| 277 | ror w16,w25,#6 | ||
| 278 | add w20,w20,w28 // h+=K[i] | ||
| 279 | eor w13,w25,w25,ror#14 | ||
| 280 | and w17,w26,w25 | ||
| 281 | bic w28,w27,w25 | ||
| 282 | add w20,w20,w10 // h+=X[i] | ||
| 283 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 284 | eor w28,w21,w22 // a^b, b^c in next round | ||
| 285 | eor w16,w16,w13,ror#11 // Sigma1(e) | ||
| 286 | ror w13,w21,#2 | ||
| 287 | add w20,w20,w17 // h+=Ch(e,f,g) | ||
| 288 | eor w17,w21,w21,ror#9 | ||
| 289 | add w20,w20,w16 // h+=Sigma1(e) | ||
| 290 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 291 | add w24,w24,w20 // d+=h | ||
| 292 | eor w19,w19,w22 // Maj(a,b,c) | ||
| 293 | eor w17,w13,w17,ror#13 // Sigma0(a) | ||
| 294 | add w20,w20,w19 // h+=Maj(a,b,c) | ||
| 295 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 296 | //add w20,w20,w17 // h+=Sigma0(a) | ||
| 297 | #ifndef __AARCH64EB__ | ||
| 298 | rev w11,w11 // 8 | ||
| 299 | #endif | ||
| 300 | add w20,w20,w17 // h+=Sigma0(a) | ||
| 301 | ror w16,w24,#6 | ||
| 302 | add w27,w27,w19 // h+=K[i] | ||
| 303 | eor w14,w24,w24,ror#14 | ||
| 304 | and w17,w25,w24 | ||
| 305 | bic w19,w26,w24 | ||
| 306 | add w27,w27,w11 // h+=X[i] | ||
| 307 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 308 | eor w19,w20,w21 // a^b, b^c in next round | ||
| 309 | eor w16,w16,w14,ror#11 // Sigma1(e) | ||
| 310 | ror w14,w20,#2 | ||
| 311 | add w27,w27,w17 // h+=Ch(e,f,g) | ||
| 312 | eor w17,w20,w20,ror#9 | ||
| 313 | add w27,w27,w16 // h+=Sigma1(e) | ||
| 314 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 315 | add w23,w23,w27 // d+=h | ||
| 316 | eor w28,w28,w21 // Maj(a,b,c) | ||
| 317 | eor w17,w14,w17,ror#13 // Sigma0(a) | ||
| 318 | add w27,w27,w28 // h+=Maj(a,b,c) | ||
| 319 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 320 | //add w27,w27,w17 // h+=Sigma0(a) | ||
| 321 | #ifndef __AARCH64EB__ | ||
| 322 | rev w12,w12 // 9 | ||
| 323 | #endif | ||
| 324 | ldp w13,w14,[x1],#2*4 | ||
| 325 | add w27,w27,w17 // h+=Sigma0(a) | ||
| 326 | ror w16,w23,#6 | ||
| 327 | add w26,w26,w28 // h+=K[i] | ||
| 328 | eor w15,w23,w23,ror#14 | ||
| 329 | and w17,w24,w23 | ||
| 330 | bic w28,w25,w23 | ||
| 331 | add w26,w26,w12 // h+=X[i] | ||
| 332 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 333 | eor w28,w27,w20 // a^b, b^c in next round | ||
| 334 | eor w16,w16,w15,ror#11 // Sigma1(e) | ||
| 335 | ror w15,w27,#2 | ||
| 336 | add w26,w26,w17 // h+=Ch(e,f,g) | ||
| 337 | eor w17,w27,w27,ror#9 | ||
| 338 | add w26,w26,w16 // h+=Sigma1(e) | ||
| 339 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 340 | add w22,w22,w26 // d+=h | ||
| 341 | eor w19,w19,w20 // Maj(a,b,c) | ||
| 342 | eor w17,w15,w17,ror#13 // Sigma0(a) | ||
| 343 | add w26,w26,w19 // h+=Maj(a,b,c) | ||
| 344 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 345 | //add w26,w26,w17 // h+=Sigma0(a) | ||
| 346 | #ifndef __AARCH64EB__ | ||
| 347 | rev w13,w13 // 10 | ||
| 348 | #endif | ||
| 349 | add w26,w26,w17 // h+=Sigma0(a) | ||
| 350 | ror w16,w22,#6 | ||
| 351 | add w25,w25,w19 // h+=K[i] | ||
| 352 | eor w0,w22,w22,ror#14 | ||
| 353 | and w17,w23,w22 | ||
| 354 | bic w19,w24,w22 | ||
| 355 | add w25,w25,w13 // h+=X[i] | ||
| 356 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 357 | eor w19,w26,w27 // a^b, b^c in next round | ||
| 358 | eor w16,w16,w0,ror#11 // Sigma1(e) | ||
| 359 | ror w0,w26,#2 | ||
| 360 | add w25,w25,w17 // h+=Ch(e,f,g) | ||
| 361 | eor w17,w26,w26,ror#9 | ||
| 362 | add w25,w25,w16 // h+=Sigma1(e) | ||
| 363 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 364 | add w21,w21,w25 // d+=h | ||
| 365 | eor w28,w28,w27 // Maj(a,b,c) | ||
| 366 | eor w17,w0,w17,ror#13 // Sigma0(a) | ||
| 367 | add w25,w25,w28 // h+=Maj(a,b,c) | ||
| 368 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 369 | //add w25,w25,w17 // h+=Sigma0(a) | ||
| 370 | #ifndef __AARCH64EB__ | ||
| 371 | rev w14,w14 // 11 | ||
| 372 | #endif | ||
| 373 | ldp w15,w0,[x1],#2*4 | ||
| 374 | add w25,w25,w17 // h+=Sigma0(a) | ||
| 375 | str w6,[sp,#12] | ||
| 376 | ror w16,w21,#6 | ||
| 377 | add w24,w24,w28 // h+=K[i] | ||
| 378 | eor w6,w21,w21,ror#14 | ||
| 379 | and w17,w22,w21 | ||
| 380 | bic w28,w23,w21 | ||
| 381 | add w24,w24,w14 // h+=X[i] | ||
| 382 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 383 | eor w28,w25,w26 // a^b, b^c in next round | ||
| 384 | eor w16,w16,w6,ror#11 // Sigma1(e) | ||
| 385 | ror w6,w25,#2 | ||
| 386 | add w24,w24,w17 // h+=Ch(e,f,g) | ||
| 387 | eor w17,w25,w25,ror#9 | ||
| 388 | add w24,w24,w16 // h+=Sigma1(e) | ||
| 389 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 390 | add w20,w20,w24 // d+=h | ||
| 391 | eor w19,w19,w26 // Maj(a,b,c) | ||
| 392 | eor w17,w6,w17,ror#13 // Sigma0(a) | ||
| 393 | add w24,w24,w19 // h+=Maj(a,b,c) | ||
| 394 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 395 | //add w24,w24,w17 // h+=Sigma0(a) | ||
| 396 | #ifndef __AARCH64EB__ | ||
| 397 | rev w15,w15 // 12 | ||
| 398 | #endif | ||
| 399 | add w24,w24,w17 // h+=Sigma0(a) | ||
| 400 | str w7,[sp,#0] | ||
| 401 | ror w16,w20,#6 | ||
| 402 | add w23,w23,w19 // h+=K[i] | ||
| 403 | eor w7,w20,w20,ror#14 | ||
| 404 | and w17,w21,w20 | ||
| 405 | bic w19,w22,w20 | ||
| 406 | add w23,w23,w15 // h+=X[i] | ||
| 407 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 408 | eor w19,w24,w25 // a^b, b^c in next round | ||
| 409 | eor w16,w16,w7,ror#11 // Sigma1(e) | ||
| 410 | ror w7,w24,#2 | ||
| 411 | add w23,w23,w17 // h+=Ch(e,f,g) | ||
| 412 | eor w17,w24,w24,ror#9 | ||
| 413 | add w23,w23,w16 // h+=Sigma1(e) | ||
| 414 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 415 | add w27,w27,w23 // d+=h | ||
| 416 | eor w28,w28,w25 // Maj(a,b,c) | ||
| 417 | eor w17,w7,w17,ror#13 // Sigma0(a) | ||
| 418 | add w23,w23,w28 // h+=Maj(a,b,c) | ||
| 419 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 420 | //add w23,w23,w17 // h+=Sigma0(a) | ||
| 421 | #ifndef __AARCH64EB__ | ||
| 422 | rev w0,w0 // 13 | ||
| 423 | #endif | ||
| 424 | ldp w1,w2,[x1] | ||
| 425 | add w23,w23,w17 // h+=Sigma0(a) | ||
| 426 | str w8,[sp,#4] | ||
| 427 | ror w16,w27,#6 | ||
| 428 | add w22,w22,w28 // h+=K[i] | ||
| 429 | eor w8,w27,w27,ror#14 | ||
| 430 | and w17,w20,w27 | ||
| 431 | bic w28,w21,w27 | ||
| 432 | add w22,w22,w0 // h+=X[i] | ||
| 433 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 434 | eor w28,w23,w24 // a^b, b^c in next round | ||
| 435 | eor w16,w16,w8,ror#11 // Sigma1(e) | ||
| 436 | ror w8,w23,#2 | ||
| 437 | add w22,w22,w17 // h+=Ch(e,f,g) | ||
| 438 | eor w17,w23,w23,ror#9 | ||
| 439 | add w22,w22,w16 // h+=Sigma1(e) | ||
| 440 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 441 | add w26,w26,w22 // d+=h | ||
| 442 | eor w19,w19,w24 // Maj(a,b,c) | ||
| 443 | eor w17,w8,w17,ror#13 // Sigma0(a) | ||
| 444 | add w22,w22,w19 // h+=Maj(a,b,c) | ||
| 445 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 446 | //add w22,w22,w17 // h+=Sigma0(a) | ||
| 447 | #ifndef __AARCH64EB__ | ||
| 448 | rev w1,w1 // 14 | ||
| 449 | #endif | ||
| 450 | ldr w6,[sp,#12] | ||
| 451 | add w22,w22,w17 // h+=Sigma0(a) | ||
| 452 | str w9,[sp,#8] | ||
| 453 | ror w16,w26,#6 | ||
| 454 | add w21,w21,w19 // h+=K[i] | ||
| 455 | eor w9,w26,w26,ror#14 | ||
| 456 | and w17,w27,w26 | ||
| 457 | bic w19,w20,w26 | ||
| 458 | add w21,w21,w1 // h+=X[i] | ||
| 459 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 460 | eor w19,w22,w23 // a^b, b^c in next round | ||
| 461 | eor w16,w16,w9,ror#11 // Sigma1(e) | ||
| 462 | ror w9,w22,#2 | ||
| 463 | add w21,w21,w17 // h+=Ch(e,f,g) | ||
| 464 | eor w17,w22,w22,ror#9 | ||
| 465 | add w21,w21,w16 // h+=Sigma1(e) | ||
| 466 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 467 | add w25,w25,w21 // d+=h | ||
| 468 | eor w28,w28,w23 // Maj(a,b,c) | ||
| 469 | eor w17,w9,w17,ror#13 // Sigma0(a) | ||
| 470 | add w21,w21,w28 // h+=Maj(a,b,c) | ||
| 471 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 472 | //add w21,w21,w17 // h+=Sigma0(a) | ||
| 473 | #ifndef __AARCH64EB__ | ||
| 474 | rev w2,w2 // 15 | ||
| 475 | #endif | ||
| 476 | ldr w7,[sp,#0] | ||
| 477 | add w21,w21,w17 // h+=Sigma0(a) | ||
| 478 | str w10,[sp,#12] | ||
| 479 | ror w16,w25,#6 | ||
| 480 | add w20,w20,w28 // h+=K[i] | ||
| 481 | ror w9,w4,#7 | ||
| 482 | and w17,w26,w25 | ||
| 483 | ror w8,w1,#17 | ||
| 484 | bic w28,w27,w25 | ||
| 485 | ror w10,w21,#2 | ||
| 486 | add w20,w20,w2 // h+=X[i] | ||
| 487 | eor w16,w16,w25,ror#11 | ||
| 488 | eor w9,w9,w4,ror#18 | ||
| 489 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 490 | eor w28,w21,w22 // a^b, b^c in next round | ||
| 491 | eor w16,w16,w25,ror#25 // Sigma1(e) | ||
| 492 | eor w10,w10,w21,ror#13 | ||
| 493 | add w20,w20,w17 // h+=Ch(e,f,g) | ||
| 494 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 495 | eor w8,w8,w1,ror#19 | ||
| 496 | eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) | ||
| 497 | add w20,w20,w16 // h+=Sigma1(e) | ||
| 498 | eor w19,w19,w22 // Maj(a,b,c) | ||
| 499 | eor w17,w10,w21,ror#22 // Sigma0(a) | ||
| 500 | eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) | ||
| 501 | add w3,w3,w12 | ||
| 502 | add w24,w24,w20 // d+=h | ||
| 503 | add w20,w20,w19 // h+=Maj(a,b,c) | ||
| 504 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 505 | add w3,w3,w9 | ||
| 506 | add w20,w20,w17 // h+=Sigma0(a) | ||
| 507 | add w3,w3,w8 | ||
| 508 | .Loop_16_xx: | ||
| 509 | ldr w8,[sp,#4] | ||
| 510 | str w11,[sp,#0] | ||
| 511 | ror w16,w24,#6 | ||
| 512 | add w27,w27,w19 // h+=K[i] | ||
| 513 | ror w10,w5,#7 | ||
| 514 | and w17,w25,w24 | ||
| 515 | ror w9,w2,#17 | ||
| 516 | bic w19,w26,w24 | ||
| 517 | ror w11,w20,#2 | ||
| 518 | add w27,w27,w3 // h+=X[i] | ||
| 519 | eor w16,w16,w24,ror#11 | ||
| 520 | eor w10,w10,w5,ror#18 | ||
| 521 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 522 | eor w19,w20,w21 // a^b, b^c in next round | ||
| 523 | eor w16,w16,w24,ror#25 // Sigma1(e) | ||
| 524 | eor w11,w11,w20,ror#13 | ||
| 525 | add w27,w27,w17 // h+=Ch(e,f,g) | ||
| 526 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 527 | eor w9,w9,w2,ror#19 | ||
| 528 | eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) | ||
| 529 | add w27,w27,w16 // h+=Sigma1(e) | ||
| 530 | eor w28,w28,w21 // Maj(a,b,c) | ||
| 531 | eor w17,w11,w20,ror#22 // Sigma0(a) | ||
| 532 | eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) | ||
| 533 | add w4,w4,w13 | ||
| 534 | add w23,w23,w27 // d+=h | ||
| 535 | add w27,w27,w28 // h+=Maj(a,b,c) | ||
| 536 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 537 | add w4,w4,w10 | ||
| 538 | add w27,w27,w17 // h+=Sigma0(a) | ||
| 539 | add w4,w4,w9 | ||
| 540 | ldr w9,[sp,#8] | ||
| 541 | str w12,[sp,#4] | ||
| 542 | ror w16,w23,#6 | ||
| 543 | add w26,w26,w28 // h+=K[i] | ||
| 544 | ror w11,w6,#7 | ||
| 545 | and w17,w24,w23 | ||
| 546 | ror w10,w3,#17 | ||
| 547 | bic w28,w25,w23 | ||
| 548 | ror w12,w27,#2 | ||
| 549 | add w26,w26,w4 // h+=X[i] | ||
| 550 | eor w16,w16,w23,ror#11 | ||
| 551 | eor w11,w11,w6,ror#18 | ||
| 552 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 553 | eor w28,w27,w20 // a^b, b^c in next round | ||
| 554 | eor w16,w16,w23,ror#25 // Sigma1(e) | ||
| 555 | eor w12,w12,w27,ror#13 | ||
| 556 | add w26,w26,w17 // h+=Ch(e,f,g) | ||
| 557 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 558 | eor w10,w10,w3,ror#19 | ||
| 559 | eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) | ||
| 560 | add w26,w26,w16 // h+=Sigma1(e) | ||
| 561 | eor w19,w19,w20 // Maj(a,b,c) | ||
| 562 | eor w17,w12,w27,ror#22 // Sigma0(a) | ||
| 563 | eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) | ||
| 564 | add w5,w5,w14 | ||
| 565 | add w22,w22,w26 // d+=h | ||
| 566 | add w26,w26,w19 // h+=Maj(a,b,c) | ||
| 567 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 568 | add w5,w5,w11 | ||
| 569 | add w26,w26,w17 // h+=Sigma0(a) | ||
| 570 | add w5,w5,w10 | ||
| 571 | ldr w10,[sp,#12] | ||
| 572 | str w13,[sp,#8] | ||
| 573 | ror w16,w22,#6 | ||
| 574 | add w25,w25,w19 // h+=K[i] | ||
| 575 | ror w12,w7,#7 | ||
| 576 | and w17,w23,w22 | ||
| 577 | ror w11,w4,#17 | ||
| 578 | bic w19,w24,w22 | ||
| 579 | ror w13,w26,#2 | ||
| 580 | add w25,w25,w5 // h+=X[i] | ||
| 581 | eor w16,w16,w22,ror#11 | ||
| 582 | eor w12,w12,w7,ror#18 | ||
| 583 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 584 | eor w19,w26,w27 // a^b, b^c in next round | ||
| 585 | eor w16,w16,w22,ror#25 // Sigma1(e) | ||
| 586 | eor w13,w13,w26,ror#13 | ||
| 587 | add w25,w25,w17 // h+=Ch(e,f,g) | ||
| 588 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 589 | eor w11,w11,w4,ror#19 | ||
| 590 | eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) | ||
| 591 | add w25,w25,w16 // h+=Sigma1(e) | ||
| 592 | eor w28,w28,w27 // Maj(a,b,c) | ||
| 593 | eor w17,w13,w26,ror#22 // Sigma0(a) | ||
| 594 | eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) | ||
| 595 | add w6,w6,w15 | ||
| 596 | add w21,w21,w25 // d+=h | ||
| 597 | add w25,w25,w28 // h+=Maj(a,b,c) | ||
| 598 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 599 | add w6,w6,w12 | ||
| 600 | add w25,w25,w17 // h+=Sigma0(a) | ||
| 601 | add w6,w6,w11 | ||
| 602 | ldr w11,[sp,#0] | ||
| 603 | str w14,[sp,#12] | ||
| 604 | ror w16,w21,#6 | ||
| 605 | add w24,w24,w28 // h+=K[i] | ||
| 606 | ror w13,w8,#7 | ||
| 607 | and w17,w22,w21 | ||
| 608 | ror w12,w5,#17 | ||
| 609 | bic w28,w23,w21 | ||
| 610 | ror w14,w25,#2 | ||
| 611 | add w24,w24,w6 // h+=X[i] | ||
| 612 | eor w16,w16,w21,ror#11 | ||
| 613 | eor w13,w13,w8,ror#18 | ||
| 614 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 615 | eor w28,w25,w26 // a^b, b^c in next round | ||
| 616 | eor w16,w16,w21,ror#25 // Sigma1(e) | ||
| 617 | eor w14,w14,w25,ror#13 | ||
| 618 | add w24,w24,w17 // h+=Ch(e,f,g) | ||
| 619 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 620 | eor w12,w12,w5,ror#19 | ||
| 621 | eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) | ||
| 622 | add w24,w24,w16 // h+=Sigma1(e) | ||
| 623 | eor w19,w19,w26 // Maj(a,b,c) | ||
| 624 | eor w17,w14,w25,ror#22 // Sigma0(a) | ||
| 625 | eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) | ||
| 626 | add w7,w7,w0 | ||
| 627 | add w20,w20,w24 // d+=h | ||
| 628 | add w24,w24,w19 // h+=Maj(a,b,c) | ||
| 629 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 630 | add w7,w7,w13 | ||
| 631 | add w24,w24,w17 // h+=Sigma0(a) | ||
| 632 | add w7,w7,w12 | ||
| 633 | ldr w12,[sp,#4] | ||
| 634 | str w15,[sp,#0] | ||
| 635 | ror w16,w20,#6 | ||
| 636 | add w23,w23,w19 // h+=K[i] | ||
| 637 | ror w14,w9,#7 | ||
| 638 | and w17,w21,w20 | ||
| 639 | ror w13,w6,#17 | ||
| 640 | bic w19,w22,w20 | ||
| 641 | ror w15,w24,#2 | ||
| 642 | add w23,w23,w7 // h+=X[i] | ||
| 643 | eor w16,w16,w20,ror#11 | ||
| 644 | eor w14,w14,w9,ror#18 | ||
| 645 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 646 | eor w19,w24,w25 // a^b, b^c in next round | ||
| 647 | eor w16,w16,w20,ror#25 // Sigma1(e) | ||
| 648 | eor w15,w15,w24,ror#13 | ||
| 649 | add w23,w23,w17 // h+=Ch(e,f,g) | ||
| 650 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 651 | eor w13,w13,w6,ror#19 | ||
| 652 | eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) | ||
| 653 | add w23,w23,w16 // h+=Sigma1(e) | ||
| 654 | eor w28,w28,w25 // Maj(a,b,c) | ||
| 655 | eor w17,w15,w24,ror#22 // Sigma0(a) | ||
| 656 | eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) | ||
| 657 | add w8,w8,w1 | ||
| 658 | add w27,w27,w23 // d+=h | ||
| 659 | add w23,w23,w28 // h+=Maj(a,b,c) | ||
| 660 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 661 | add w8,w8,w14 | ||
| 662 | add w23,w23,w17 // h+=Sigma0(a) | ||
| 663 | add w8,w8,w13 | ||
| 664 | ldr w13,[sp,#8] | ||
| 665 | str w0,[sp,#4] | ||
| 666 | ror w16,w27,#6 | ||
| 667 | add w22,w22,w28 // h+=K[i] | ||
| 668 | ror w15,w10,#7 | ||
| 669 | and w17,w20,w27 | ||
| 670 | ror w14,w7,#17 | ||
| 671 | bic w28,w21,w27 | ||
| 672 | ror w0,w23,#2 | ||
| 673 | add w22,w22,w8 // h+=X[i] | ||
| 674 | eor w16,w16,w27,ror#11 | ||
| 675 | eor w15,w15,w10,ror#18 | ||
| 676 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 677 | eor w28,w23,w24 // a^b, b^c in next round | ||
| 678 | eor w16,w16,w27,ror#25 // Sigma1(e) | ||
| 679 | eor w0,w0,w23,ror#13 | ||
| 680 | add w22,w22,w17 // h+=Ch(e,f,g) | ||
| 681 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 682 | eor w14,w14,w7,ror#19 | ||
| 683 | eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) | ||
| 684 | add w22,w22,w16 // h+=Sigma1(e) | ||
| 685 | eor w19,w19,w24 // Maj(a,b,c) | ||
| 686 | eor w17,w0,w23,ror#22 // Sigma0(a) | ||
| 687 | eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) | ||
| 688 | add w9,w9,w2 | ||
| 689 | add w26,w26,w22 // d+=h | ||
| 690 | add w22,w22,w19 // h+=Maj(a,b,c) | ||
| 691 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 692 | add w9,w9,w15 | ||
| 693 | add w22,w22,w17 // h+=Sigma0(a) | ||
| 694 | add w9,w9,w14 | ||
| 695 | ldr w14,[sp,#12] | ||
| 696 | str w1,[sp,#8] | ||
| 697 | ror w16,w26,#6 | ||
| 698 | add w21,w21,w19 // h+=K[i] | ||
| 699 | ror w0,w11,#7 | ||
| 700 | and w17,w27,w26 | ||
| 701 | ror w15,w8,#17 | ||
| 702 | bic w19,w20,w26 | ||
| 703 | ror w1,w22,#2 | ||
| 704 | add w21,w21,w9 // h+=X[i] | ||
| 705 | eor w16,w16,w26,ror#11 | ||
| 706 | eor w0,w0,w11,ror#18 | ||
| 707 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 708 | eor w19,w22,w23 // a^b, b^c in next round | ||
| 709 | eor w16,w16,w26,ror#25 // Sigma1(e) | ||
| 710 | eor w1,w1,w22,ror#13 | ||
| 711 | add w21,w21,w17 // h+=Ch(e,f,g) | ||
| 712 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 713 | eor w15,w15,w8,ror#19 | ||
| 714 | eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) | ||
| 715 | add w21,w21,w16 // h+=Sigma1(e) | ||
| 716 | eor w28,w28,w23 // Maj(a,b,c) | ||
| 717 | eor w17,w1,w22,ror#22 // Sigma0(a) | ||
| 718 | eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) | ||
| 719 | add w10,w10,w3 | ||
| 720 | add w25,w25,w21 // d+=h | ||
| 721 | add w21,w21,w28 // h+=Maj(a,b,c) | ||
| 722 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 723 | add w10,w10,w0 | ||
| 724 | add w21,w21,w17 // h+=Sigma0(a) | ||
| 725 | add w10,w10,w15 | ||
| 726 | ldr w15,[sp,#0] | ||
| 727 | str w2,[sp,#12] | ||
| 728 | ror w16,w25,#6 | ||
| 729 | add w20,w20,w28 // h+=K[i] | ||
| 730 | ror w1,w12,#7 | ||
| 731 | and w17,w26,w25 | ||
| 732 | ror w0,w9,#17 | ||
| 733 | bic w28,w27,w25 | ||
| 734 | ror w2,w21,#2 | ||
| 735 | add w20,w20,w10 // h+=X[i] | ||
| 736 | eor w16,w16,w25,ror#11 | ||
| 737 | eor w1,w1,w12,ror#18 | ||
| 738 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 739 | eor w28,w21,w22 // a^b, b^c in next round | ||
| 740 | eor w16,w16,w25,ror#25 // Sigma1(e) | ||
| 741 | eor w2,w2,w21,ror#13 | ||
| 742 | add w20,w20,w17 // h+=Ch(e,f,g) | ||
| 743 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 744 | eor w0,w0,w9,ror#19 | ||
| 745 | eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) | ||
| 746 | add w20,w20,w16 // h+=Sigma1(e) | ||
| 747 | eor w19,w19,w22 // Maj(a,b,c) | ||
| 748 | eor w17,w2,w21,ror#22 // Sigma0(a) | ||
| 749 | eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) | ||
| 750 | add w11,w11,w4 | ||
| 751 | add w24,w24,w20 // d+=h | ||
| 752 | add w20,w20,w19 // h+=Maj(a,b,c) | ||
| 753 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 754 | add w11,w11,w1 | ||
| 755 | add w20,w20,w17 // h+=Sigma0(a) | ||
| 756 | add w11,w11,w0 | ||
| 757 | ldr w0,[sp,#4] | ||
| 758 | str w3,[sp,#0] | ||
| 759 | ror w16,w24,#6 | ||
| 760 | add w27,w27,w19 // h+=K[i] | ||
| 761 | ror w2,w13,#7 | ||
| 762 | and w17,w25,w24 | ||
| 763 | ror w1,w10,#17 | ||
| 764 | bic w19,w26,w24 | ||
| 765 | ror w3,w20,#2 | ||
| 766 | add w27,w27,w11 // h+=X[i] | ||
| 767 | eor w16,w16,w24,ror#11 | ||
| 768 | eor w2,w2,w13,ror#18 | ||
| 769 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 770 | eor w19,w20,w21 // a^b, b^c in next round | ||
| 771 | eor w16,w16,w24,ror#25 // Sigma1(e) | ||
| 772 | eor w3,w3,w20,ror#13 | ||
| 773 | add w27,w27,w17 // h+=Ch(e,f,g) | ||
| 774 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 775 | eor w1,w1,w10,ror#19 | ||
| 776 | eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) | ||
| 777 | add w27,w27,w16 // h+=Sigma1(e) | ||
| 778 | eor w28,w28,w21 // Maj(a,b,c) | ||
| 779 | eor w17,w3,w20,ror#22 // Sigma0(a) | ||
| 780 | eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) | ||
| 781 | add w12,w12,w5 | ||
| 782 | add w23,w23,w27 // d+=h | ||
| 783 | add w27,w27,w28 // h+=Maj(a,b,c) | ||
| 784 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 785 | add w12,w12,w2 | ||
| 786 | add w27,w27,w17 // h+=Sigma0(a) | ||
| 787 | add w12,w12,w1 | ||
| 788 | ldr w1,[sp,#8] | ||
| 789 | str w4,[sp,#4] | ||
| 790 | ror w16,w23,#6 | ||
| 791 | add w26,w26,w28 // h+=K[i] | ||
| 792 | ror w3,w14,#7 | ||
| 793 | and w17,w24,w23 | ||
| 794 | ror w2,w11,#17 | ||
| 795 | bic w28,w25,w23 | ||
| 796 | ror w4,w27,#2 | ||
| 797 | add w26,w26,w12 // h+=X[i] | ||
| 798 | eor w16,w16,w23,ror#11 | ||
| 799 | eor w3,w3,w14,ror#18 | ||
| 800 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 801 | eor w28,w27,w20 // a^b, b^c in next round | ||
| 802 | eor w16,w16,w23,ror#25 // Sigma1(e) | ||
| 803 | eor w4,w4,w27,ror#13 | ||
| 804 | add w26,w26,w17 // h+=Ch(e,f,g) | ||
| 805 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 806 | eor w2,w2,w11,ror#19 | ||
| 807 | eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) | ||
| 808 | add w26,w26,w16 // h+=Sigma1(e) | ||
| 809 | eor w19,w19,w20 // Maj(a,b,c) | ||
| 810 | eor w17,w4,w27,ror#22 // Sigma0(a) | ||
| 811 | eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) | ||
| 812 | add w13,w13,w6 | ||
| 813 | add w22,w22,w26 // d+=h | ||
| 814 | add w26,w26,w19 // h+=Maj(a,b,c) | ||
| 815 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 816 | add w13,w13,w3 | ||
| 817 | add w26,w26,w17 // h+=Sigma0(a) | ||
| 818 | add w13,w13,w2 | ||
| 819 | ldr w2,[sp,#12] | ||
| 820 | str w5,[sp,#8] | ||
| 821 | ror w16,w22,#6 | ||
| 822 | add w25,w25,w19 // h+=K[i] | ||
| 823 | ror w4,w15,#7 | ||
| 824 | and w17,w23,w22 | ||
| 825 | ror w3,w12,#17 | ||
| 826 | bic w19,w24,w22 | ||
| 827 | ror w5,w26,#2 | ||
| 828 | add w25,w25,w13 // h+=X[i] | ||
| 829 | eor w16,w16,w22,ror#11 | ||
| 830 | eor w4,w4,w15,ror#18 | ||
| 831 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 832 | eor w19,w26,w27 // a^b, b^c in next round | ||
| 833 | eor w16,w16,w22,ror#25 // Sigma1(e) | ||
| 834 | eor w5,w5,w26,ror#13 | ||
| 835 | add w25,w25,w17 // h+=Ch(e,f,g) | ||
| 836 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 837 | eor w3,w3,w12,ror#19 | ||
| 838 | eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) | ||
| 839 | add w25,w25,w16 // h+=Sigma1(e) | ||
| 840 | eor w28,w28,w27 // Maj(a,b,c) | ||
| 841 | eor w17,w5,w26,ror#22 // Sigma0(a) | ||
| 842 | eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) | ||
| 843 | add w14,w14,w7 | ||
| 844 | add w21,w21,w25 // d+=h | ||
| 845 | add w25,w25,w28 // h+=Maj(a,b,c) | ||
| 846 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 847 | add w14,w14,w4 | ||
| 848 | add w25,w25,w17 // h+=Sigma0(a) | ||
| 849 | add w14,w14,w3 | ||
| 850 | ldr w3,[sp,#0] | ||
| 851 | str w6,[sp,#12] | ||
| 852 | ror w16,w21,#6 | ||
| 853 | add w24,w24,w28 // h+=K[i] | ||
| 854 | ror w5,w0,#7 | ||
| 855 | and w17,w22,w21 | ||
| 856 | ror w4,w13,#17 | ||
| 857 | bic w28,w23,w21 | ||
| 858 | ror w6,w25,#2 | ||
| 859 | add w24,w24,w14 // h+=X[i] | ||
| 860 | eor w16,w16,w21,ror#11 | ||
| 861 | eor w5,w5,w0,ror#18 | ||
| 862 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 863 | eor w28,w25,w26 // a^b, b^c in next round | ||
| 864 | eor w16,w16,w21,ror#25 // Sigma1(e) | ||
| 865 | eor w6,w6,w25,ror#13 | ||
| 866 | add w24,w24,w17 // h+=Ch(e,f,g) | ||
| 867 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 868 | eor w4,w4,w13,ror#19 | ||
| 869 | eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) | ||
| 870 | add w24,w24,w16 // h+=Sigma1(e) | ||
| 871 | eor w19,w19,w26 // Maj(a,b,c) | ||
| 872 | eor w17,w6,w25,ror#22 // Sigma0(a) | ||
| 873 | eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) | ||
| 874 | add w15,w15,w8 | ||
| 875 | add w20,w20,w24 // d+=h | ||
| 876 | add w24,w24,w19 // h+=Maj(a,b,c) | ||
| 877 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 878 | add w15,w15,w5 | ||
| 879 | add w24,w24,w17 // h+=Sigma0(a) | ||
| 880 | add w15,w15,w4 | ||
| 881 | ldr w4,[sp,#4] | ||
| 882 | str w7,[sp,#0] | ||
| 883 | ror w16,w20,#6 | ||
| 884 | add w23,w23,w19 // h+=K[i] | ||
| 885 | ror w6,w1,#7 | ||
| 886 | and w17,w21,w20 | ||
| 887 | ror w5,w14,#17 | ||
| 888 | bic w19,w22,w20 | ||
| 889 | ror w7,w24,#2 | ||
| 890 | add w23,w23,w15 // h+=X[i] | ||
| 891 | eor w16,w16,w20,ror#11 | ||
| 892 | eor w6,w6,w1,ror#18 | ||
| 893 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 894 | eor w19,w24,w25 // a^b, b^c in next round | ||
| 895 | eor w16,w16,w20,ror#25 // Sigma1(e) | ||
| 896 | eor w7,w7,w24,ror#13 | ||
| 897 | add w23,w23,w17 // h+=Ch(e,f,g) | ||
| 898 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 899 | eor w5,w5,w14,ror#19 | ||
| 900 | eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) | ||
| 901 | add w23,w23,w16 // h+=Sigma1(e) | ||
| 902 | eor w28,w28,w25 // Maj(a,b,c) | ||
| 903 | eor w17,w7,w24,ror#22 // Sigma0(a) | ||
| 904 | eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) | ||
| 905 | add w0,w0,w9 | ||
| 906 | add w27,w27,w23 // d+=h | ||
| 907 | add w23,w23,w28 // h+=Maj(a,b,c) | ||
| 908 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 909 | add w0,w0,w6 | ||
| 910 | add w23,w23,w17 // h+=Sigma0(a) | ||
| 911 | add w0,w0,w5 | ||
| 912 | ldr w5,[sp,#8] | ||
| 913 | str w8,[sp,#4] | ||
| 914 | ror w16,w27,#6 | ||
| 915 | add w22,w22,w28 // h+=K[i] | ||
| 916 | ror w7,w2,#7 | ||
| 917 | and w17,w20,w27 | ||
| 918 | ror w6,w15,#17 | ||
| 919 | bic w28,w21,w27 | ||
| 920 | ror w8,w23,#2 | ||
| 921 | add w22,w22,w0 // h+=X[i] | ||
| 922 | eor w16,w16,w27,ror#11 | ||
| 923 | eor w7,w7,w2,ror#18 | ||
| 924 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 925 | eor w28,w23,w24 // a^b, b^c in next round | ||
| 926 | eor w16,w16,w27,ror#25 // Sigma1(e) | ||
| 927 | eor w8,w8,w23,ror#13 | ||
| 928 | add w22,w22,w17 // h+=Ch(e,f,g) | ||
| 929 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 930 | eor w6,w6,w15,ror#19 | ||
| 931 | eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) | ||
| 932 | add w22,w22,w16 // h+=Sigma1(e) | ||
| 933 | eor w19,w19,w24 // Maj(a,b,c) | ||
| 934 | eor w17,w8,w23,ror#22 // Sigma0(a) | ||
| 935 | eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) | ||
| 936 | add w1,w1,w10 | ||
| 937 | add w26,w26,w22 // d+=h | ||
| 938 | add w22,w22,w19 // h+=Maj(a,b,c) | ||
| 939 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 940 | add w1,w1,w7 | ||
| 941 | add w22,w22,w17 // h+=Sigma0(a) | ||
| 942 | add w1,w1,w6 | ||
| 943 | ldr w6,[sp,#12] | ||
| 944 | str w9,[sp,#8] | ||
| 945 | ror w16,w26,#6 | ||
| 946 | add w21,w21,w19 // h+=K[i] | ||
| 947 | ror w8,w3,#7 | ||
| 948 | and w17,w27,w26 | ||
| 949 | ror w7,w0,#17 | ||
| 950 | bic w19,w20,w26 | ||
| 951 | ror w9,w22,#2 | ||
| 952 | add w21,w21,w1 // h+=X[i] | ||
| 953 | eor w16,w16,w26,ror#11 | ||
| 954 | eor w8,w8,w3,ror#18 | ||
| 955 | orr w17,w17,w19 // Ch(e,f,g) | ||
| 956 | eor w19,w22,w23 // a^b, b^c in next round | ||
| 957 | eor w16,w16,w26,ror#25 // Sigma1(e) | ||
| 958 | eor w9,w9,w22,ror#13 | ||
| 959 | add w21,w21,w17 // h+=Ch(e,f,g) | ||
| 960 | and w28,w28,w19 // (b^c)&=(a^b) | ||
| 961 | eor w7,w7,w0,ror#19 | ||
| 962 | eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) | ||
| 963 | add w21,w21,w16 // h+=Sigma1(e) | ||
| 964 | eor w28,w28,w23 // Maj(a,b,c) | ||
| 965 | eor w17,w9,w22,ror#22 // Sigma0(a) | ||
| 966 | eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) | ||
| 967 | add w2,w2,w11 | ||
| 968 | add w25,w25,w21 // d+=h | ||
| 969 | add w21,w21,w28 // h+=Maj(a,b,c) | ||
| 970 | ldr w28,[x30],#4 // *K++, w19 in next round | ||
| 971 | add w2,w2,w8 | ||
| 972 | add w21,w21,w17 // h+=Sigma0(a) | ||
| 973 | add w2,w2,w7 | ||
| 974 | ldr w7,[sp,#0] | ||
| 975 | str w10,[sp,#12] | ||
| 976 | ror w16,w25,#6 | ||
| 977 | add w20,w20,w28 // h+=K[i] | ||
| 978 | ror w9,w4,#7 | ||
| 979 | and w17,w26,w25 | ||
| 980 | ror w8,w1,#17 | ||
| 981 | bic w28,w27,w25 | ||
| 982 | ror w10,w21,#2 | ||
| 983 | add w20,w20,w2 // h+=X[i] | ||
| 984 | eor w16,w16,w25,ror#11 | ||
| 985 | eor w9,w9,w4,ror#18 | ||
| 986 | orr w17,w17,w28 // Ch(e,f,g) | ||
| 987 | eor w28,w21,w22 // a^b, b^c in next round | ||
| 988 | eor w16,w16,w25,ror#25 // Sigma1(e) | ||
| 989 | eor w10,w10,w21,ror#13 | ||
| 990 | add w20,w20,w17 // h+=Ch(e,f,g) | ||
| 991 | and w19,w19,w28 // (b^c)&=(a^b) | ||
| 992 | eor w8,w8,w1,ror#19 | ||
| 993 | eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) | ||
| 994 | add w20,w20,w16 // h+=Sigma1(e) | ||
| 995 | eor w19,w19,w22 // Maj(a,b,c) | ||
| 996 | eor w17,w10,w21,ror#22 // Sigma0(a) | ||
| 997 | eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) | ||
| 998 | add w3,w3,w12 | ||
| 999 | add w24,w24,w20 // d+=h | ||
| 1000 | add w20,w20,w19 // h+=Maj(a,b,c) | ||
| 1001 | ldr w19,[x30],#4 // *K++, w28 in next round | ||
| 1002 | add w3,w3,w9 | ||
| 1003 | add w20,w20,w17 // h+=Sigma0(a) | ||
| 1004 | add w3,w3,w8 | ||
| 1005 | cbnz w19,.Loop_16_xx | ||
| 1006 | |||
| 1007 | ldp x0,x2,[x29,#96] | ||
| 1008 | ldr x1,[x29,#112] | ||
| 1009 | sub x30,x30,#260 // rewind | ||
| 1010 | |||
| 1011 | ldp w3,w4,[x0] | ||
| 1012 | ldp w5,w6,[x0,#2*4] | ||
| 1013 | add x1,x1,#14*4 // advance input pointer | ||
| 1014 | ldp w7,w8,[x0,#4*4] | ||
| 1015 | add w20,w20,w3 | ||
| 1016 | ldp w9,w10,[x0,#6*4] | ||
| 1017 | add w21,w21,w4 | ||
| 1018 | add w22,w22,w5 | ||
| 1019 | add w23,w23,w6 | ||
| 1020 | stp w20,w21,[x0] | ||
| 1021 | add w24,w24,w7 | ||
| 1022 | add w25,w25,w8 | ||
| 1023 | stp w22,w23,[x0,#2*4] | ||
| 1024 | add w26,w26,w9 | ||
| 1025 | add w27,w27,w10 | ||
| 1026 | cmp x1,x2 | ||
| 1027 | stp w24,w25,[x0,#4*4] | ||
| 1028 | stp w26,w27,[x0,#6*4] | ||
| 1029 | b.ne .Loop | ||
| 1030 | |||
| 1031 | ldp x19,x20,[x29,#16] | ||
| 1032 | add sp,sp,#4*4 | ||
| 1033 | ldp x21,x22,[x29,#32] | ||
| 1034 | ldp x23,x24,[x29,#48] | ||
| 1035 | ldp x25,x26,[x29,#64] | ||
| 1036 | ldp x27,x28,[x29,#80] | ||
| 1037 | ldp x29,x30,[sp],#128 | ||
| 1038 | ret | ||
| 1039 | .size sha256_block_data_order,.-sha256_block_data_order | ||
| 1040 | |||
| 1041 | .align 6 | ||
| 1042 | .type .LK256,%object | ||
| 1043 | .LK256: | ||
| 1044 | .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 | ||
| 1045 | .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 | ||
| 1046 | .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 | ||
| 1047 | .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 | ||
| 1048 | .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc | ||
| 1049 | .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da | ||
| 1050 | .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 | ||
| 1051 | .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 | ||
| 1052 | .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 | ||
| 1053 | .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 | ||
| 1054 | .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 | ||
| 1055 | .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 | ||
| 1056 | .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 | ||
| 1057 | .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 | ||
| 1058 | .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 | ||
| 1059 | .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 | ||
| 1060 | .long 0 //terminator | ||
| 1061 | .size .LK256,.-.LK256 | ||
| 1062 | #ifndef __KERNEL__ | ||
| 1063 | .align 3 | ||
| 1064 | .LOPENSSL_armcap_P: | ||
| 1065 | # ifdef __ILP32__ | ||
| 1066 | .long OPENSSL_armcap_P-. | ||
| 1067 | # else | ||
| 1068 | .quad OPENSSL_armcap_P-. | ||
| 1069 | # endif | ||
| 1070 | #endif | ||
| 1071 | .asciz "SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>" | ||
| 1072 | .align 2 | ||
| 1073 | #ifndef __KERNEL__ | ||
| 1074 | .type sha256_block_armv8,%function | ||
| 1075 | .align 6 | ||
| 1076 | sha256_block_armv8: | ||
| 1077 | .Lv8_entry: | ||
| 1078 | stp x29,x30,[sp,#-16]! | ||
| 1079 | add x29,sp,#0 | ||
| 1080 | |||
| 1081 | ld1 {v0.4s,v1.4s},[x0] | ||
| 1082 | adr x3,.LK256 | ||
| 1083 | |||
| 1084 | .Loop_hw: | ||
| 1085 | ld1 {v4.16b-v7.16b},[x1],#64 | ||
| 1086 | sub x2,x2,#1 | ||
| 1087 | ld1 {v16.4s},[x3],#16 | ||
| 1088 | rev32 v4.16b,v4.16b | ||
| 1089 | rev32 v5.16b,v5.16b | ||
| 1090 | rev32 v6.16b,v6.16b | ||
| 1091 | rev32 v7.16b,v7.16b | ||
| 1092 | orr v18.16b,v0.16b,v0.16b // offload | ||
| 1093 | orr v19.16b,v1.16b,v1.16b | ||
| 1094 | ld1 {v17.4s},[x3],#16 | ||
| 1095 | add v16.4s,v16.4s,v4.4s | ||
| 1096 | .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b | ||
| 1097 | orr v2.16b,v0.16b,v0.16b | ||
| 1098 | .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s | ||
| 1099 | .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s | ||
| 1100 | .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b | ||
| 1101 | ld1 {v16.4s},[x3],#16 | ||
| 1102 | add v17.4s,v17.4s,v5.4s | ||
| 1103 | .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b | ||
| 1104 | orr v2.16b,v0.16b,v0.16b | ||
| 1105 | .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s | ||
| 1106 | .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s | ||
| 1107 | .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b | ||
| 1108 | ld1 {v17.4s},[x3],#16 | ||
| 1109 | add v16.4s,v16.4s,v6.4s | ||
| 1110 | .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b | ||
| 1111 | orr v2.16b,v0.16b,v0.16b | ||
| 1112 | .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s | ||
| 1113 | .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s | ||
| 1114 | .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b | ||
| 1115 | ld1 {v16.4s},[x3],#16 | ||
| 1116 | add v17.4s,v17.4s,v7.4s | ||
| 1117 | .inst 0x5e282887 //sha256su0 v7.16b,v4.16b | ||
| 1118 | orr v2.16b,v0.16b,v0.16b | ||
| 1119 | .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s | ||
| 1120 | .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s | ||
| 1121 | .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b | ||
| 1122 | ld1 {v17.4s},[x3],#16 | ||
| 1123 | add v16.4s,v16.4s,v4.4s | ||
| 1124 | .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b | ||
| 1125 | orr v2.16b,v0.16b,v0.16b | ||
| 1126 | .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s | ||
| 1127 | .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s | ||
| 1128 | .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b | ||
| 1129 | ld1 {v16.4s},[x3],#16 | ||
| 1130 | add v17.4s,v17.4s,v5.4s | ||
| 1131 | .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b | ||
| 1132 | orr v2.16b,v0.16b,v0.16b | ||
| 1133 | .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s | ||
| 1134 | .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s | ||
| 1135 | .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b | ||
| 1136 | ld1 {v17.4s},[x3],#16 | ||
| 1137 | add v16.4s,v16.4s,v6.4s | ||
| 1138 | .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b | ||
| 1139 | orr v2.16b,v0.16b,v0.16b | ||
| 1140 | .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s | ||
| 1141 | .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s | ||
| 1142 | .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b | ||
| 1143 | ld1 {v16.4s},[x3],#16 | ||
| 1144 | add v17.4s,v17.4s,v7.4s | ||
| 1145 | .inst 0x5e282887 //sha256su0 v7.16b,v4.16b | ||
| 1146 | orr v2.16b,v0.16b,v0.16b | ||
| 1147 | .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s | ||
| 1148 | .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s | ||
| 1149 | .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b | ||
| 1150 | ld1 {v17.4s},[x3],#16 | ||
| 1151 | add v16.4s,v16.4s,v4.4s | ||
| 1152 | .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b | ||
| 1153 | orr v2.16b,v0.16b,v0.16b | ||
| 1154 | .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s | ||
| 1155 | .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s | ||
| 1156 | .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b | ||
| 1157 | ld1 {v16.4s},[x3],#16 | ||
| 1158 | add v17.4s,v17.4s,v5.4s | ||
| 1159 | .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b | ||
| 1160 | orr v2.16b,v0.16b,v0.16b | ||
| 1161 | .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s | ||
| 1162 | .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s | ||
| 1163 | .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b | ||
| 1164 | ld1 {v17.4s},[x3],#16 | ||
| 1165 | add v16.4s,v16.4s,v6.4s | ||
| 1166 | .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b | ||
| 1167 | orr v2.16b,v0.16b,v0.16b | ||
| 1168 | .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s | ||
| 1169 | .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s | ||
| 1170 | .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b | ||
| 1171 | ld1 {v16.4s},[x3],#16 | ||
| 1172 | add v17.4s,v17.4s,v7.4s | ||
| 1173 | .inst 0x5e282887 //sha256su0 v7.16b,v4.16b | ||
| 1174 | orr v2.16b,v0.16b,v0.16b | ||
| 1175 | .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s | ||
| 1176 | .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s | ||
| 1177 | .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b | ||
| 1178 | ld1 {v17.4s},[x3],#16 | ||
| 1179 | add v16.4s,v16.4s,v4.4s | ||
| 1180 | orr v2.16b,v0.16b,v0.16b | ||
| 1181 | .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s | ||
| 1182 | .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s | ||
| 1183 | |||
| 1184 | ld1 {v16.4s},[x3],#16 | ||
| 1185 | add v17.4s,v17.4s,v5.4s | ||
| 1186 | orr v2.16b,v0.16b,v0.16b | ||
| 1187 | .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s | ||
| 1188 | .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s | ||
| 1189 | |||
| 1190 | ld1 {v17.4s},[x3] | ||
| 1191 | add v16.4s,v16.4s,v6.4s | ||
| 1192 | sub x3,x3,#64*4-16 // rewind | ||
| 1193 | orr v2.16b,v0.16b,v0.16b | ||
| 1194 | .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s | ||
| 1195 | .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s | ||
| 1196 | |||
| 1197 | add v17.4s,v17.4s,v7.4s | ||
| 1198 | orr v2.16b,v0.16b,v0.16b | ||
| 1199 | .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s | ||
| 1200 | .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s | ||
| 1201 | |||
| 1202 | add v0.4s,v0.4s,v18.4s | ||
| 1203 | add v1.4s,v1.4s,v19.4s | ||
| 1204 | |||
| 1205 | cbnz x2,.Loop_hw | ||
| 1206 | |||
| 1207 | st1 {v0.4s,v1.4s},[x0] | ||
| 1208 | |||
| 1209 | ldr x29,[sp],#16 | ||
| 1210 | ret | ||
| 1211 | .size sha256_block_armv8,.-sha256_block_armv8 | ||
| 1212 | #endif | ||
| 1213 | #ifdef __KERNEL__ | ||
| 1214 | .globl sha256_block_neon | ||
| 1215 | #endif | ||
| 1216 | .type sha256_block_neon,%function | ||
| 1217 | .align 4 | ||
| 1218 | sha256_block_neon: | ||
| 1219 | .Lneon_entry: | ||
| 1220 | stp x29, x30, [sp, #-16]! | ||
| 1221 | mov x29, sp | ||
| 1222 | sub sp,sp,#16*4 | ||
| 1223 | |||
| 1224 | adr x16,.LK256 | ||
| 1225 | add x2,x1,x2,lsl#6 // len to point at the end of inp | ||
| 1226 | |||
| 1227 | ld1 {v0.16b},[x1], #16 | ||
| 1228 | ld1 {v1.16b},[x1], #16 | ||
| 1229 | ld1 {v2.16b},[x1], #16 | ||
| 1230 | ld1 {v3.16b},[x1], #16 | ||
| 1231 | ld1 {v4.4s},[x16], #16 | ||
| 1232 | ld1 {v5.4s},[x16], #16 | ||
| 1233 | ld1 {v6.4s},[x16], #16 | ||
| 1234 | ld1 {v7.4s},[x16], #16 | ||
| 1235 | rev32 v0.16b,v0.16b // yes, even on | ||
| 1236 | rev32 v1.16b,v1.16b // big-endian | ||
| 1237 | rev32 v2.16b,v2.16b | ||
| 1238 | rev32 v3.16b,v3.16b | ||
| 1239 | mov x17,sp | ||
| 1240 | add v4.4s,v4.4s,v0.4s | ||
| 1241 | add v5.4s,v5.4s,v1.4s | ||
| 1242 | add v6.4s,v6.4s,v2.4s | ||
| 1243 | st1 {v4.4s-v5.4s},[x17], #32 | ||
| 1244 | add v7.4s,v7.4s,v3.4s | ||
| 1245 | st1 {v6.4s-v7.4s},[x17] | ||
| 1246 | sub x17,x17,#32 | ||
| 1247 | |||
| 1248 | ldp w3,w4,[x0] | ||
| 1249 | ldp w5,w6,[x0,#8] | ||
| 1250 | ldp w7,w8,[x0,#16] | ||
| 1251 | ldp w9,w10,[x0,#24] | ||
| 1252 | ldr w12,[sp,#0] | ||
| 1253 | mov w13,wzr | ||
| 1254 | eor w14,w4,w5 | ||
| 1255 | mov w15,wzr | ||
| 1256 | b .L_00_48 | ||
| 1257 | |||
| 1258 | .align 4 | ||
| 1259 | .L_00_48: | ||
| 1260 | ext v4.16b,v0.16b,v1.16b,#4 | ||
| 1261 | add w10,w10,w12 | ||
| 1262 | add w3,w3,w15 | ||
| 1263 | and w12,w8,w7 | ||
| 1264 | bic w15,w9,w7 | ||
| 1265 | ext v7.16b,v2.16b,v3.16b,#4 | ||
| 1266 | eor w11,w7,w7,ror#5 | ||
| 1267 | add w3,w3,w13 | ||
| 1268 | mov d19,v3.d[1] | ||
| 1269 | orr w12,w12,w15 | ||
| 1270 | eor w11,w11,w7,ror#19 | ||
| 1271 | ushr v6.4s,v4.4s,#7 | ||
| 1272 | eor w15,w3,w3,ror#11 | ||
| 1273 | ushr v5.4s,v4.4s,#3 | ||
| 1274 | add w10,w10,w12 | ||
| 1275 | add v0.4s,v0.4s,v7.4s | ||
| 1276 | ror w11,w11,#6 | ||
| 1277 | sli v6.4s,v4.4s,#25 | ||
| 1278 | eor w13,w3,w4 | ||
| 1279 | eor w15,w15,w3,ror#20 | ||
| 1280 | ushr v7.4s,v4.4s,#18 | ||
| 1281 | add w10,w10,w11 | ||
| 1282 | ldr w12,[sp,#4] | ||
| 1283 | and w14,w14,w13 | ||
| 1284 | eor v5.16b,v5.16b,v6.16b | ||
| 1285 | ror w15,w15,#2 | ||
| 1286 | add w6,w6,w10 | ||
| 1287 | sli v7.4s,v4.4s,#14 | ||
| 1288 | eor w14,w14,w4 | ||
| 1289 | ushr v16.4s,v19.4s,#17 | ||
| 1290 | add w9,w9,w12 | ||
| 1291 | add w10,w10,w15 | ||
| 1292 | and w12,w7,w6 | ||
| 1293 | eor v5.16b,v5.16b,v7.16b | ||
| 1294 | bic w15,w8,w6 | ||
| 1295 | eor w11,w6,w6,ror#5 | ||
| 1296 | sli v16.4s,v19.4s,#15 | ||
| 1297 | add w10,w10,w14 | ||
| 1298 | orr w12,w12,w15 | ||
| 1299 | ushr v17.4s,v19.4s,#10 | ||
| 1300 | eor w11,w11,w6,ror#19 | ||
| 1301 | eor w15,w10,w10,ror#11 | ||
| 1302 | ushr v7.4s,v19.4s,#19 | ||
| 1303 | add w9,w9,w12 | ||
| 1304 | ror w11,w11,#6 | ||
| 1305 | add v0.4s,v0.4s,v5.4s | ||
| 1306 | eor w14,w10,w3 | ||
| 1307 | eor w15,w15,w10,ror#20 | ||
| 1308 | sli v7.4s,v19.4s,#13 | ||
| 1309 | add w9,w9,w11 | ||
| 1310 | ldr w12,[sp,#8] | ||
| 1311 | and w13,w13,w14 | ||
| 1312 | eor v17.16b,v17.16b,v16.16b | ||
| 1313 | ror w15,w15,#2 | ||
| 1314 | add w5,w5,w9 | ||
| 1315 | eor w13,w13,w3 | ||
| 1316 | eor v17.16b,v17.16b,v7.16b | ||
| 1317 | add w8,w8,w12 | ||
| 1318 | add w9,w9,w15 | ||
| 1319 | and w12,w6,w5 | ||
| 1320 | add v0.4s,v0.4s,v17.4s | ||
| 1321 | bic w15,w7,w5 | ||
| 1322 | eor w11,w5,w5,ror#5 | ||
| 1323 | add w9,w9,w13 | ||
| 1324 | ushr v18.4s,v0.4s,#17 | ||
| 1325 | orr w12,w12,w15 | ||
| 1326 | ushr v19.4s,v0.4s,#10 | ||
| 1327 | eor w11,w11,w5,ror#19 | ||
| 1328 | eor w15,w9,w9,ror#11 | ||
| 1329 | sli v18.4s,v0.4s,#15 | ||
| 1330 | add w8,w8,w12 | ||
| 1331 | ushr v17.4s,v0.4s,#19 | ||
| 1332 | ror w11,w11,#6 | ||
| 1333 | eor w13,w9,w10 | ||
| 1334 | eor v19.16b,v19.16b,v18.16b | ||
| 1335 | eor w15,w15,w9,ror#20 | ||
| 1336 | add w8,w8,w11 | ||
| 1337 | sli v17.4s,v0.4s,#13 | ||
| 1338 | ldr w12,[sp,#12] | ||
| 1339 | and w14,w14,w13 | ||
| 1340 | ror w15,w15,#2 | ||
| 1341 | ld1 {v4.4s},[x16], #16 | ||
| 1342 | add w4,w4,w8 | ||
| 1343 | eor v19.16b,v19.16b,v17.16b | ||
| 1344 | eor w14,w14,w10 | ||
| 1345 | eor v17.16b,v17.16b,v17.16b | ||
| 1346 | add w7,w7,w12 | ||
| 1347 | add w8,w8,w15 | ||
| 1348 | and w12,w5,w4 | ||
| 1349 | mov v17.d[1],v19.d[0] | ||
| 1350 | bic w15,w6,w4 | ||
| 1351 | eor w11,w4,w4,ror#5 | ||
| 1352 | add w8,w8,w14 | ||
| 1353 | add v0.4s,v0.4s,v17.4s | ||
| 1354 | orr w12,w12,w15 | ||
| 1355 | eor w11,w11,w4,ror#19 | ||
| 1356 | eor w15,w8,w8,ror#11 | ||
| 1357 | add v4.4s,v4.4s,v0.4s | ||
| 1358 | add w7,w7,w12 | ||
| 1359 | ror w11,w11,#6 | ||
| 1360 | eor w14,w8,w9 | ||
| 1361 | eor w15,w15,w8,ror#20 | ||
| 1362 | add w7,w7,w11 | ||
| 1363 | ldr w12,[sp,#16] | ||
| 1364 | and w13,w13,w14 | ||
| 1365 | ror w15,w15,#2 | ||
| 1366 | add w3,w3,w7 | ||
| 1367 | eor w13,w13,w9 | ||
| 1368 | st1 {v4.4s},[x17], #16 | ||
| 1369 | ext v4.16b,v1.16b,v2.16b,#4 | ||
| 1370 | add w6,w6,w12 | ||
| 1371 | add w7,w7,w15 | ||
| 1372 | and w12,w4,w3 | ||
| 1373 | bic w15,w5,w3 | ||
| 1374 | ext v7.16b,v3.16b,v0.16b,#4 | ||
| 1375 | eor w11,w3,w3,ror#5 | ||
| 1376 | add w7,w7,w13 | ||
| 1377 | mov d19,v0.d[1] | ||
| 1378 | orr w12,w12,w15 | ||
| 1379 | eor w11,w11,w3,ror#19 | ||
| 1380 | ushr v6.4s,v4.4s,#7 | ||
| 1381 | eor w15,w7,w7,ror#11 | ||
| 1382 | ushr v5.4s,v4.4s,#3 | ||
| 1383 | add w6,w6,w12 | ||
| 1384 | add v1.4s,v1.4s,v7.4s | ||
| 1385 | ror w11,w11,#6 | ||
| 1386 | sli v6.4s,v4.4s,#25 | ||
| 1387 | eor w13,w7,w8 | ||
| 1388 | eor w15,w15,w7,ror#20 | ||
| 1389 | ushr v7.4s,v4.4s,#18 | ||
| 1390 | add w6,w6,w11 | ||
| 1391 | ldr w12,[sp,#20] | ||
| 1392 | and w14,w14,w13 | ||
| 1393 | eor v5.16b,v5.16b,v6.16b | ||
| 1394 | ror w15,w15,#2 | ||
| 1395 | add w10,w10,w6 | ||
| 1396 | sli v7.4s,v4.4s,#14 | ||
| 1397 | eor w14,w14,w8 | ||
| 1398 | ushr v16.4s,v19.4s,#17 | ||
| 1399 | add w5,w5,w12 | ||
| 1400 | add w6,w6,w15 | ||
| 1401 | and w12,w3,w10 | ||
| 1402 | eor v5.16b,v5.16b,v7.16b | ||
| 1403 | bic w15,w4,w10 | ||
| 1404 | eor w11,w10,w10,ror#5 | ||
| 1405 | sli v16.4s,v19.4s,#15 | ||
| 1406 | add w6,w6,w14 | ||
| 1407 | orr w12,w12,w15 | ||
| 1408 | ushr v17.4s,v19.4s,#10 | ||
| 1409 | eor w11,w11,w10,ror#19 | ||
| 1410 | eor w15,w6,w6,ror#11 | ||
| 1411 | ushr v7.4s,v19.4s,#19 | ||
| 1412 | add w5,w5,w12 | ||
| 1413 | ror w11,w11,#6 | ||
| 1414 | add v1.4s,v1.4s,v5.4s | ||
| 1415 | eor w14,w6,w7 | ||
| 1416 | eor w15,w15,w6,ror#20 | ||
| 1417 | sli v7.4s,v19.4s,#13 | ||
| 1418 | add w5,w5,w11 | ||
| 1419 | ldr w12,[sp,#24] | ||
| 1420 | and w13,w13,w14 | ||
| 1421 | eor v17.16b,v17.16b,v16.16b | ||
| 1422 | ror w15,w15,#2 | ||
| 1423 | add w9,w9,w5 | ||
| 1424 | eor w13,w13,w7 | ||
| 1425 | eor v17.16b,v17.16b,v7.16b | ||
| 1426 | add w4,w4,w12 | ||
| 1427 | add w5,w5,w15 | ||
| 1428 | and w12,w10,w9 | ||
| 1429 | add v1.4s,v1.4s,v17.4s | ||
| 1430 | bic w15,w3,w9 | ||
| 1431 | eor w11,w9,w9,ror#5 | ||
| 1432 | add w5,w5,w13 | ||
| 1433 | ushr v18.4s,v1.4s,#17 | ||
| 1434 | orr w12,w12,w15 | ||
| 1435 | ushr v19.4s,v1.4s,#10 | ||
| 1436 | eor w11,w11,w9,ror#19 | ||
| 1437 | eor w15,w5,w5,ror#11 | ||
| 1438 | sli v18.4s,v1.4s,#15 | ||
| 1439 | add w4,w4,w12 | ||
| 1440 | ushr v17.4s,v1.4s,#19 | ||
| 1441 | ror w11,w11,#6 | ||
| 1442 | eor w13,w5,w6 | ||
| 1443 | eor v19.16b,v19.16b,v18.16b | ||
| 1444 | eor w15,w15,w5,ror#20 | ||
| 1445 | add w4,w4,w11 | ||
| 1446 | sli v17.4s,v1.4s,#13 | ||
| 1447 | ldr w12,[sp,#28] | ||
| 1448 | and w14,w14,w13 | ||
| 1449 | ror w15,w15,#2 | ||
| 1450 | ld1 {v4.4s},[x16], #16 | ||
| 1451 | add w8,w8,w4 | ||
| 1452 | eor v19.16b,v19.16b,v17.16b | ||
| 1453 | eor w14,w14,w6 | ||
| 1454 | eor v17.16b,v17.16b,v17.16b | ||
| 1455 | add w3,w3,w12 | ||
| 1456 | add w4,w4,w15 | ||
| 1457 | and w12,w9,w8 | ||
| 1458 | mov v17.d[1],v19.d[0] | ||
| 1459 | bic w15,w10,w8 | ||
| 1460 | eor w11,w8,w8,ror#5 | ||
| 1461 | add w4,w4,w14 | ||
| 1462 | add v1.4s,v1.4s,v17.4s | ||
| 1463 | orr w12,w12,w15 | ||
| 1464 | eor w11,w11,w8,ror#19 | ||
| 1465 | eor w15,w4,w4,ror#11 | ||
| 1466 | add v4.4s,v4.4s,v1.4s | ||
| 1467 | add w3,w3,w12 | ||
| 1468 | ror w11,w11,#6 | ||
| 1469 | eor w14,w4,w5 | ||
| 1470 | eor w15,w15,w4,ror#20 | ||
| 1471 | add w3,w3,w11 | ||
| 1472 | ldr w12,[sp,#32] | ||
| 1473 | and w13,w13,w14 | ||
| 1474 | ror w15,w15,#2 | ||
| 1475 | add w7,w7,w3 | ||
| 1476 | eor w13,w13,w5 | ||
| 1477 | st1 {v4.4s},[x17], #16 | ||
| 1478 | ext v4.16b,v2.16b,v3.16b,#4 | ||
| 1479 | add w10,w10,w12 | ||
| 1480 | add w3,w3,w15 | ||
| 1481 | and w12,w8,w7 | ||
| 1482 | bic w15,w9,w7 | ||
| 1483 | ext v7.16b,v0.16b,v1.16b,#4 | ||
| 1484 | eor w11,w7,w7,ror#5 | ||
| 1485 | add w3,w3,w13 | ||
| 1486 | mov d19,v1.d[1] | ||
| 1487 | orr w12,w12,w15 | ||
| 1488 | eor w11,w11,w7,ror#19 | ||
| 1489 | ushr v6.4s,v4.4s,#7 | ||
| 1490 | eor w15,w3,w3,ror#11 | ||
| 1491 | ushr v5.4s,v4.4s,#3 | ||
| 1492 | add w10,w10,w12 | ||
| 1493 | add v2.4s,v2.4s,v7.4s | ||
| 1494 | ror w11,w11,#6 | ||
| 1495 | sli v6.4s,v4.4s,#25 | ||
| 1496 | eor w13,w3,w4 | ||
| 1497 | eor w15,w15,w3,ror#20 | ||
| 1498 | ushr v7.4s,v4.4s,#18 | ||
| 1499 | add w10,w10,w11 | ||
| 1500 | ldr w12,[sp,#36] | ||
| 1501 | and w14,w14,w13 | ||
| 1502 | eor v5.16b,v5.16b,v6.16b | ||
| 1503 | ror w15,w15,#2 | ||
| 1504 | add w6,w6,w10 | ||
| 1505 | sli v7.4s,v4.4s,#14 | ||
| 1506 | eor w14,w14,w4 | ||
| 1507 | ushr v16.4s,v19.4s,#17 | ||
| 1508 | add w9,w9,w12 | ||
| 1509 | add w10,w10,w15 | ||
| 1510 | and w12,w7,w6 | ||
| 1511 | eor v5.16b,v5.16b,v7.16b | ||
| 1512 | bic w15,w8,w6 | ||
| 1513 | eor w11,w6,w6,ror#5 | ||
| 1514 | sli v16.4s,v19.4s,#15 | ||
| 1515 | add w10,w10,w14 | ||
| 1516 | orr w12,w12,w15 | ||
| 1517 | ushr v17.4s,v19.4s,#10 | ||
| 1518 | eor w11,w11,w6,ror#19 | ||
| 1519 | eor w15,w10,w10,ror#11 | ||
| 1520 | ushr v7.4s,v19.4s,#19 | ||
| 1521 | add w9,w9,w12 | ||
| 1522 | ror w11,w11,#6 | ||
| 1523 | add v2.4s,v2.4s,v5.4s | ||
| 1524 | eor w14,w10,w3 | ||
| 1525 | eor w15,w15,w10,ror#20 | ||
| 1526 | sli v7.4s,v19.4s,#13 | ||
| 1527 | add w9,w9,w11 | ||
| 1528 | ldr w12,[sp,#40] | ||
| 1529 | and w13,w13,w14 | ||
| 1530 | eor v17.16b,v17.16b,v16.16b | ||
| 1531 | ror w15,w15,#2 | ||
| 1532 | add w5,w5,w9 | ||
| 1533 | eor w13,w13,w3 | ||
| 1534 | eor v17.16b,v17.16b,v7.16b | ||
| 1535 | add w8,w8,w12 | ||
| 1536 | add w9,w9,w15 | ||
| 1537 | and w12,w6,w5 | ||
| 1538 | add v2.4s,v2.4s,v17.4s | ||
| 1539 | bic w15,w7,w5 | ||
| 1540 | eor w11,w5,w5,ror#5 | ||
| 1541 | add w9,w9,w13 | ||
| 1542 | ushr v18.4s,v2.4s,#17 | ||
| 1543 | orr w12,w12,w15 | ||
| 1544 | ushr v19.4s,v2.4s,#10 | ||
| 1545 | eor w11,w11,w5,ror#19 | ||
| 1546 | eor w15,w9,w9,ror#11 | ||
| 1547 | sli v18.4s,v2.4s,#15 | ||
| 1548 | add w8,w8,w12 | ||
| 1549 | ushr v17.4s,v2.4s,#19 | ||
| 1550 | ror w11,w11,#6 | ||
| 1551 | eor w13,w9,w10 | ||
| 1552 | eor v19.16b,v19.16b,v18.16b | ||
| 1553 | eor w15,w15,w9,ror#20 | ||
| 1554 | add w8,w8,w11 | ||
| 1555 | sli v17.4s,v2.4s,#13 | ||
| 1556 | ldr w12,[sp,#44] | ||
| 1557 | and w14,w14,w13 | ||
| 1558 | ror w15,w15,#2 | ||
| 1559 | ld1 {v4.4s},[x16], #16 | ||
| 1560 | add w4,w4,w8 | ||
| 1561 | eor v19.16b,v19.16b,v17.16b | ||
| 1562 | eor w14,w14,w10 | ||
| 1563 | eor v17.16b,v17.16b,v17.16b | ||
| 1564 | add w7,w7,w12 | ||
| 1565 | add w8,w8,w15 | ||
| 1566 | and w12,w5,w4 | ||
| 1567 | mov v17.d[1],v19.d[0] | ||
| 1568 | bic w15,w6,w4 | ||
| 1569 | eor w11,w4,w4,ror#5 | ||
| 1570 | add w8,w8,w14 | ||
| 1571 | add v2.4s,v2.4s,v17.4s | ||
| 1572 | orr w12,w12,w15 | ||
| 1573 | eor w11,w11,w4,ror#19 | ||
| 1574 | eor w15,w8,w8,ror#11 | ||
| 1575 | add v4.4s,v4.4s,v2.4s | ||
| 1576 | add w7,w7,w12 | ||
| 1577 | ror w11,w11,#6 | ||
| 1578 | eor w14,w8,w9 | ||
| 1579 | eor w15,w15,w8,ror#20 | ||
| 1580 | add w7,w7,w11 | ||
| 1581 | ldr w12,[sp,#48] | ||
| 1582 | and w13,w13,w14 | ||
| 1583 | ror w15,w15,#2 | ||
| 1584 | add w3,w3,w7 | ||
| 1585 | eor w13,w13,w9 | ||
| 1586 | st1 {v4.4s},[x17], #16 | ||
| 1587 | ext v4.16b,v3.16b,v0.16b,#4 | ||
| 1588 | add w6,w6,w12 | ||
| 1589 | add w7,w7,w15 | ||
| 1590 | and w12,w4,w3 | ||
| 1591 | bic w15,w5,w3 | ||
| 1592 | ext v7.16b,v1.16b,v2.16b,#4 | ||
| 1593 | eor w11,w3,w3,ror#5 | ||
| 1594 | add w7,w7,w13 | ||
| 1595 | mov d19,v2.d[1] | ||
| 1596 | orr w12,w12,w15 | ||
| 1597 | eor w11,w11,w3,ror#19 | ||
| 1598 | ushr v6.4s,v4.4s,#7 | ||
| 1599 | eor w15,w7,w7,ror#11 | ||
| 1600 | ushr v5.4s,v4.4s,#3 | ||
| 1601 | add w6,w6,w12 | ||
| 1602 | add v3.4s,v3.4s,v7.4s | ||
| 1603 | ror w11,w11,#6 | ||
| 1604 | sli v6.4s,v4.4s,#25 | ||
| 1605 | eor w13,w7,w8 | ||
| 1606 | eor w15,w15,w7,ror#20 | ||
| 1607 | ushr v7.4s,v4.4s,#18 | ||
| 1608 | add w6,w6,w11 | ||
| 1609 | ldr w12,[sp,#52] | ||
| 1610 | and w14,w14,w13 | ||
| 1611 | eor v5.16b,v5.16b,v6.16b | ||
| 1612 | ror w15,w15,#2 | ||
| 1613 | add w10,w10,w6 | ||
| 1614 | sli v7.4s,v4.4s,#14 | ||
| 1615 | eor w14,w14,w8 | ||
| 1616 | ushr v16.4s,v19.4s,#17 | ||
| 1617 | add w5,w5,w12 | ||
| 1618 | add w6,w6,w15 | ||
| 1619 | and w12,w3,w10 | ||
| 1620 | eor v5.16b,v5.16b,v7.16b | ||
| 1621 | bic w15,w4,w10 | ||
| 1622 | eor w11,w10,w10,ror#5 | ||
| 1623 | sli v16.4s,v19.4s,#15 | ||
| 1624 | add w6,w6,w14 | ||
| 1625 | orr w12,w12,w15 | ||
| 1626 | ushr v17.4s,v19.4s,#10 | ||
| 1627 | eor w11,w11,w10,ror#19 | ||
| 1628 | eor w15,w6,w6,ror#11 | ||
| 1629 | ushr v7.4s,v19.4s,#19 | ||
| 1630 | add w5,w5,w12 | ||
| 1631 | ror w11,w11,#6 | ||
| 1632 | add v3.4s,v3.4s,v5.4s | ||
| 1633 | eor w14,w6,w7 | ||
| 1634 | eor w15,w15,w6,ror#20 | ||
| 1635 | sli v7.4s,v19.4s,#13 | ||
| 1636 | add w5,w5,w11 | ||
| 1637 | ldr w12,[sp,#56] | ||
| 1638 | and w13,w13,w14 | ||
| 1639 | eor v17.16b,v17.16b,v16.16b | ||
| 1640 | ror w15,w15,#2 | ||
| 1641 | add w9,w9,w5 | ||
| 1642 | eor w13,w13,w7 | ||
| 1643 | eor v17.16b,v17.16b,v7.16b | ||
| 1644 | add w4,w4,w12 | ||
| 1645 | add w5,w5,w15 | ||
| 1646 | and w12,w10,w9 | ||
| 1647 | add v3.4s,v3.4s,v17.4s | ||
| 1648 | bic w15,w3,w9 | ||
| 1649 | eor w11,w9,w9,ror#5 | ||
| 1650 | add w5,w5,w13 | ||
| 1651 | ushr v18.4s,v3.4s,#17 | ||
| 1652 | orr w12,w12,w15 | ||
| 1653 | ushr v19.4s,v3.4s,#10 | ||
| 1654 | eor w11,w11,w9,ror#19 | ||
| 1655 | eor w15,w5,w5,ror#11 | ||
| 1656 | sli v18.4s,v3.4s,#15 | ||
| 1657 | add w4,w4,w12 | ||
| 1658 | ushr v17.4s,v3.4s,#19 | ||
| 1659 | ror w11,w11,#6 | ||
| 1660 | eor w13,w5,w6 | ||
| 1661 | eor v19.16b,v19.16b,v18.16b | ||
| 1662 | eor w15,w15,w5,ror#20 | ||
| 1663 | add w4,w4,w11 | ||
| 1664 | sli v17.4s,v3.4s,#13 | ||
| 1665 | ldr w12,[sp,#60] | ||
| 1666 | and w14,w14,w13 | ||
| 1667 | ror w15,w15,#2 | ||
| 1668 | ld1 {v4.4s},[x16], #16 | ||
| 1669 | add w8,w8,w4 | ||
| 1670 | eor v19.16b,v19.16b,v17.16b | ||
| 1671 | eor w14,w14,w6 | ||
| 1672 | eor v17.16b,v17.16b,v17.16b | ||
| 1673 | add w3,w3,w12 | ||
| 1674 | add w4,w4,w15 | ||
| 1675 | and w12,w9,w8 | ||
| 1676 | mov v17.d[1],v19.d[0] | ||
| 1677 | bic w15,w10,w8 | ||
| 1678 | eor w11,w8,w8,ror#5 | ||
| 1679 | add w4,w4,w14 | ||
| 1680 | add v3.4s,v3.4s,v17.4s | ||
| 1681 | orr w12,w12,w15 | ||
| 1682 | eor w11,w11,w8,ror#19 | ||
| 1683 | eor w15,w4,w4,ror#11 | ||
| 1684 | add v4.4s,v4.4s,v3.4s | ||
| 1685 | add w3,w3,w12 | ||
| 1686 | ror w11,w11,#6 | ||
| 1687 | eor w14,w4,w5 | ||
| 1688 | eor w15,w15,w4,ror#20 | ||
| 1689 | add w3,w3,w11 | ||
| 1690 | ldr w12,[x16] | ||
| 1691 | and w13,w13,w14 | ||
| 1692 | ror w15,w15,#2 | ||
| 1693 | add w7,w7,w3 | ||
| 1694 | eor w13,w13,w5 | ||
| 1695 | st1 {v4.4s},[x17], #16 | ||
| 1696 | cmp w12,#0 // check for K256 terminator | ||
| 1697 | ldr w12,[sp,#0] | ||
| 1698 | sub x17,x17,#64 | ||
| 1699 | bne .L_00_48 | ||
| 1700 | |||
| 1701 | sub x16,x16,#256 // rewind x16 | ||
| 1702 | cmp x1,x2 | ||
| 1703 | mov x17, #64 | ||
| 1704 | csel x17, x17, xzr, eq | ||
| 1705 | sub x1,x1,x17 // avoid SEGV | ||
| 1706 | mov x17,sp | ||
| 1707 | add w10,w10,w12 | ||
| 1708 | add w3,w3,w15 | ||
| 1709 | and w12,w8,w7 | ||
| 1710 | ld1 {v0.16b},[x1],#16 | ||
| 1711 | bic w15,w9,w7 | ||
| 1712 | eor w11,w7,w7,ror#5 | ||
| 1713 | ld1 {v4.4s},[x16],#16 | ||
| 1714 | add w3,w3,w13 | ||
| 1715 | orr w12,w12,w15 | ||
| 1716 | eor w11,w11,w7,ror#19 | ||
| 1717 | eor w15,w3,w3,ror#11 | ||
| 1718 | rev32 v0.16b,v0.16b | ||
| 1719 | add w10,w10,w12 | ||
| 1720 | ror w11,w11,#6 | ||
| 1721 | eor w13,w3,w4 | ||
| 1722 | eor w15,w15,w3,ror#20 | ||
| 1723 | add v4.4s,v4.4s,v0.4s | ||
| 1724 | add w10,w10,w11 | ||
| 1725 | ldr w12,[sp,#4] | ||
| 1726 | and w14,w14,w13 | ||
| 1727 | ror w15,w15,#2 | ||
| 1728 | add w6,w6,w10 | ||
| 1729 | eor w14,w14,w4 | ||
| 1730 | add w9,w9,w12 | ||
| 1731 | add w10,w10,w15 | ||
| 1732 | and w12,w7,w6 | ||
| 1733 | bic w15,w8,w6 | ||
| 1734 | eor w11,w6,w6,ror#5 | ||
| 1735 | add w10,w10,w14 | ||
| 1736 | orr w12,w12,w15 | ||
| 1737 | eor w11,w11,w6,ror#19 | ||
| 1738 | eor w15,w10,w10,ror#11 | ||
| 1739 | add w9,w9,w12 | ||
| 1740 | ror w11,w11,#6 | ||
| 1741 | eor w14,w10,w3 | ||
| 1742 | eor w15,w15,w10,ror#20 | ||
| 1743 | add w9,w9,w11 | ||
| 1744 | ldr w12,[sp,#8] | ||
| 1745 | and w13,w13,w14 | ||
| 1746 | ror w15,w15,#2 | ||
| 1747 | add w5,w5,w9 | ||
| 1748 | eor w13,w13,w3 | ||
| 1749 | add w8,w8,w12 | ||
| 1750 | add w9,w9,w15 | ||
| 1751 | and w12,w6,w5 | ||
| 1752 | bic w15,w7,w5 | ||
| 1753 | eor w11,w5,w5,ror#5 | ||
| 1754 | add w9,w9,w13 | ||
| 1755 | orr w12,w12,w15 | ||
| 1756 | eor w11,w11,w5,ror#19 | ||
| 1757 | eor w15,w9,w9,ror#11 | ||
| 1758 | add w8,w8,w12 | ||
| 1759 | ror w11,w11,#6 | ||
| 1760 | eor w13,w9,w10 | ||
| 1761 | eor w15,w15,w9,ror#20 | ||
| 1762 | add w8,w8,w11 | ||
| 1763 | ldr w12,[sp,#12] | ||
| 1764 | and w14,w14,w13 | ||
| 1765 | ror w15,w15,#2 | ||
| 1766 | add w4,w4,w8 | ||
| 1767 | eor w14,w14,w10 | ||
| 1768 | add w7,w7,w12 | ||
| 1769 | add w8,w8,w15 | ||
| 1770 | and w12,w5,w4 | ||
| 1771 | bic w15,w6,w4 | ||
| 1772 | eor w11,w4,w4,ror#5 | ||
| 1773 | add w8,w8,w14 | ||
| 1774 | orr w12,w12,w15 | ||
| 1775 | eor w11,w11,w4,ror#19 | ||
| 1776 | eor w15,w8,w8,ror#11 | ||
| 1777 | add w7,w7,w12 | ||
| 1778 | ror w11,w11,#6 | ||
| 1779 | eor w14,w8,w9 | ||
| 1780 | eor w15,w15,w8,ror#20 | ||
| 1781 | add w7,w7,w11 | ||
| 1782 | ldr w12,[sp,#16] | ||
| 1783 | and w13,w13,w14 | ||
| 1784 | ror w15,w15,#2 | ||
| 1785 | add w3,w3,w7 | ||
| 1786 | eor w13,w13,w9 | ||
| 1787 | st1 {v4.4s},[x17], #16 | ||
| 1788 | add w6,w6,w12 | ||
| 1789 | add w7,w7,w15 | ||
| 1790 | and w12,w4,w3 | ||
| 1791 | ld1 {v1.16b},[x1],#16 | ||
| 1792 | bic w15,w5,w3 | ||
| 1793 | eor w11,w3,w3,ror#5 | ||
| 1794 | ld1 {v4.4s},[x16],#16 | ||
| 1795 | add w7,w7,w13 | ||
| 1796 | orr w12,w12,w15 | ||
| 1797 | eor w11,w11,w3,ror#19 | ||
| 1798 | eor w15,w7,w7,ror#11 | ||
| 1799 | rev32 v1.16b,v1.16b | ||
| 1800 | add w6,w6,w12 | ||
| 1801 | ror w11,w11,#6 | ||
| 1802 | eor w13,w7,w8 | ||
| 1803 | eor w15,w15,w7,ror#20 | ||
| 1804 | add v4.4s,v4.4s,v1.4s | ||
| 1805 | add w6,w6,w11 | ||
| 1806 | ldr w12,[sp,#20] | ||
| 1807 | and w14,w14,w13 | ||
| 1808 | ror w15,w15,#2 | ||
| 1809 | add w10,w10,w6 | ||
| 1810 | eor w14,w14,w8 | ||
| 1811 | add w5,w5,w12 | ||
| 1812 | add w6,w6,w15 | ||
| 1813 | and w12,w3,w10 | ||
| 1814 | bic w15,w4,w10 | ||
| 1815 | eor w11,w10,w10,ror#5 | ||
| 1816 | add w6,w6,w14 | ||
| 1817 | orr w12,w12,w15 | ||
| 1818 | eor w11,w11,w10,ror#19 | ||
| 1819 | eor w15,w6,w6,ror#11 | ||
| 1820 | add w5,w5,w12 | ||
| 1821 | ror w11,w11,#6 | ||
| 1822 | eor w14,w6,w7 | ||
| 1823 | eor w15,w15,w6,ror#20 | ||
| 1824 | add w5,w5,w11 | ||
| 1825 | ldr w12,[sp,#24] | ||
| 1826 | and w13,w13,w14 | ||
| 1827 | ror w15,w15,#2 | ||
| 1828 | add w9,w9,w5 | ||
| 1829 | eor w13,w13,w7 | ||
| 1830 | add w4,w4,w12 | ||
| 1831 | add w5,w5,w15 | ||
| 1832 | and w12,w10,w9 | ||
| 1833 | bic w15,w3,w9 | ||
| 1834 | eor w11,w9,w9,ror#5 | ||
| 1835 | add w5,w5,w13 | ||
| 1836 | orr w12,w12,w15 | ||
| 1837 | eor w11,w11,w9,ror#19 | ||
| 1838 | eor w15,w5,w5,ror#11 | ||
| 1839 | add w4,w4,w12 | ||
| 1840 | ror w11,w11,#6 | ||
| 1841 | eor w13,w5,w6 | ||
| 1842 | eor w15,w15,w5,ror#20 | ||
| 1843 | add w4,w4,w11 | ||
| 1844 | ldr w12,[sp,#28] | ||
| 1845 | and w14,w14,w13 | ||
| 1846 | ror w15,w15,#2 | ||
| 1847 | add w8,w8,w4 | ||
| 1848 | eor w14,w14,w6 | ||
| 1849 | add w3,w3,w12 | ||
| 1850 | add w4,w4,w15 | ||
| 1851 | and w12,w9,w8 | ||
| 1852 | bic w15,w10,w8 | ||
| 1853 | eor w11,w8,w8,ror#5 | ||
| 1854 | add w4,w4,w14 | ||
| 1855 | orr w12,w12,w15 | ||
| 1856 | eor w11,w11,w8,ror#19 | ||
| 1857 | eor w15,w4,w4,ror#11 | ||
| 1858 | add w3,w3,w12 | ||
| 1859 | ror w11,w11,#6 | ||
| 1860 | eor w14,w4,w5 | ||
| 1861 | eor w15,w15,w4,ror#20 | ||
| 1862 | add w3,w3,w11 | ||
| 1863 | ldr w12,[sp,#32] | ||
| 1864 | and w13,w13,w14 | ||
| 1865 | ror w15,w15,#2 | ||
| 1866 | add w7,w7,w3 | ||
| 1867 | eor w13,w13,w5 | ||
| 1868 | st1 {v4.4s},[x17], #16 | ||
| 1869 | add w10,w10,w12 | ||
| 1870 | add w3,w3,w15 | ||
| 1871 | and w12,w8,w7 | ||
| 1872 | ld1 {v2.16b},[x1],#16 | ||
| 1873 | bic w15,w9,w7 | ||
| 1874 | eor w11,w7,w7,ror#5 | ||
| 1875 | ld1 {v4.4s},[x16],#16 | ||
| 1876 | add w3,w3,w13 | ||
| 1877 | orr w12,w12,w15 | ||
| 1878 | eor w11,w11,w7,ror#19 | ||
| 1879 | eor w15,w3,w3,ror#11 | ||
| 1880 | rev32 v2.16b,v2.16b | ||
| 1881 | add w10,w10,w12 | ||
| 1882 | ror w11,w11,#6 | ||
| 1883 | eor w13,w3,w4 | ||
| 1884 | eor w15,w15,w3,ror#20 | ||
| 1885 | add v4.4s,v4.4s,v2.4s | ||
| 1886 | add w10,w10,w11 | ||
| 1887 | ldr w12,[sp,#36] | ||
| 1888 | and w14,w14,w13 | ||
| 1889 | ror w15,w15,#2 | ||
| 1890 | add w6,w6,w10 | ||
| 1891 | eor w14,w14,w4 | ||
| 1892 | add w9,w9,w12 | ||
| 1893 | add w10,w10,w15 | ||
| 1894 | and w12,w7,w6 | ||
| 1895 | bic w15,w8,w6 | ||
| 1896 | eor w11,w6,w6,ror#5 | ||
| 1897 | add w10,w10,w14 | ||
| 1898 | orr w12,w12,w15 | ||
| 1899 | eor w11,w11,w6,ror#19 | ||
| 1900 | eor w15,w10,w10,ror#11 | ||
| 1901 | add w9,w9,w12 | ||
| 1902 | ror w11,w11,#6 | ||
| 1903 | eor w14,w10,w3 | ||
| 1904 | eor w15,w15,w10,ror#20 | ||
| 1905 | add w9,w9,w11 | ||
| 1906 | ldr w12,[sp,#40] | ||
| 1907 | and w13,w13,w14 | ||
| 1908 | ror w15,w15,#2 | ||
| 1909 | add w5,w5,w9 | ||
| 1910 | eor w13,w13,w3 | ||
| 1911 | add w8,w8,w12 | ||
| 1912 | add w9,w9,w15 | ||
| 1913 | and w12,w6,w5 | ||
| 1914 | bic w15,w7,w5 | ||
| 1915 | eor w11,w5,w5,ror#5 | ||
| 1916 | add w9,w9,w13 | ||
| 1917 | orr w12,w12,w15 | ||
| 1918 | eor w11,w11,w5,ror#19 | ||
| 1919 | eor w15,w9,w9,ror#11 | ||
| 1920 | add w8,w8,w12 | ||
| 1921 | ror w11,w11,#6 | ||
| 1922 | eor w13,w9,w10 | ||
| 1923 | eor w15,w15,w9,ror#20 | ||
| 1924 | add w8,w8,w11 | ||
| 1925 | ldr w12,[sp,#44] | ||
| 1926 | and w14,w14,w13 | ||
| 1927 | ror w15,w15,#2 | ||
| 1928 | add w4,w4,w8 | ||
| 1929 | eor w14,w14,w10 | ||
| 1930 | add w7,w7,w12 | ||
| 1931 | add w8,w8,w15 | ||
| 1932 | and w12,w5,w4 | ||
| 1933 | bic w15,w6,w4 | ||
| 1934 | eor w11,w4,w4,ror#5 | ||
| 1935 | add w8,w8,w14 | ||
| 1936 | orr w12,w12,w15 | ||
| 1937 | eor w11,w11,w4,ror#19 | ||
| 1938 | eor w15,w8,w8,ror#11 | ||
| 1939 | add w7,w7,w12 | ||
| 1940 | ror w11,w11,#6 | ||
| 1941 | eor w14,w8,w9 | ||
| 1942 | eor w15,w15,w8,ror#20 | ||
| 1943 | add w7,w7,w11 | ||
| 1944 | ldr w12,[sp,#48] | ||
| 1945 | and w13,w13,w14 | ||
| 1946 | ror w15,w15,#2 | ||
| 1947 | add w3,w3,w7 | ||
| 1948 | eor w13,w13,w9 | ||
| 1949 | st1 {v4.4s},[x17], #16 | ||
| 1950 | add w6,w6,w12 | ||
| 1951 | add w7,w7,w15 | ||
| 1952 | and w12,w4,w3 | ||
| 1953 | ld1 {v3.16b},[x1],#16 | ||
| 1954 | bic w15,w5,w3 | ||
| 1955 | eor w11,w3,w3,ror#5 | ||
| 1956 | ld1 {v4.4s},[x16],#16 | ||
| 1957 | add w7,w7,w13 | ||
| 1958 | orr w12,w12,w15 | ||
| 1959 | eor w11,w11,w3,ror#19 | ||
| 1960 | eor w15,w7,w7,ror#11 | ||
| 1961 | rev32 v3.16b,v3.16b | ||
| 1962 | add w6,w6,w12 | ||
| 1963 | ror w11,w11,#6 | ||
| 1964 | eor w13,w7,w8 | ||
| 1965 | eor w15,w15,w7,ror#20 | ||
| 1966 | add v4.4s,v4.4s,v3.4s | ||
| 1967 | add w6,w6,w11 | ||
| 1968 | ldr w12,[sp,#52] | ||
| 1969 | and w14,w14,w13 | ||
| 1970 | ror w15,w15,#2 | ||
| 1971 | add w10,w10,w6 | ||
| 1972 | eor w14,w14,w8 | ||
| 1973 | add w5,w5,w12 | ||
| 1974 | add w6,w6,w15 | ||
| 1975 | and w12,w3,w10 | ||
| 1976 | bic w15,w4,w10 | ||
| 1977 | eor w11,w10,w10,ror#5 | ||
| 1978 | add w6,w6,w14 | ||
| 1979 | orr w12,w12,w15 | ||
| 1980 | eor w11,w11,w10,ror#19 | ||
| 1981 | eor w15,w6,w6,ror#11 | ||
| 1982 | add w5,w5,w12 | ||
| 1983 | ror w11,w11,#6 | ||
| 1984 | eor w14,w6,w7 | ||
| 1985 | eor w15,w15,w6,ror#20 | ||
| 1986 | add w5,w5,w11 | ||
| 1987 | ldr w12,[sp,#56] | ||
| 1988 | and w13,w13,w14 | ||
| 1989 | ror w15,w15,#2 | ||
| 1990 | add w9,w9,w5 | ||
| 1991 | eor w13,w13,w7 | ||
| 1992 | add w4,w4,w12 | ||
| 1993 | add w5,w5,w15 | ||
| 1994 | and w12,w10,w9 | ||
| 1995 | bic w15,w3,w9 | ||
| 1996 | eor w11,w9,w9,ror#5 | ||
| 1997 | add w5,w5,w13 | ||
| 1998 | orr w12,w12,w15 | ||
| 1999 | eor w11,w11,w9,ror#19 | ||
| 2000 | eor w15,w5,w5,ror#11 | ||
| 2001 | add w4,w4,w12 | ||
| 2002 | ror w11,w11,#6 | ||
| 2003 | eor w13,w5,w6 | ||
| 2004 | eor w15,w15,w5,ror#20 | ||
| 2005 | add w4,w4,w11 | ||
| 2006 | ldr w12,[sp,#60] | ||
| 2007 | and w14,w14,w13 | ||
| 2008 | ror w15,w15,#2 | ||
| 2009 | add w8,w8,w4 | ||
| 2010 | eor w14,w14,w6 | ||
| 2011 | add w3,w3,w12 | ||
| 2012 | add w4,w4,w15 | ||
| 2013 | and w12,w9,w8 | ||
| 2014 | bic w15,w10,w8 | ||
| 2015 | eor w11,w8,w8,ror#5 | ||
| 2016 | add w4,w4,w14 | ||
| 2017 | orr w12,w12,w15 | ||
| 2018 | eor w11,w11,w8,ror#19 | ||
| 2019 | eor w15,w4,w4,ror#11 | ||
| 2020 | add w3,w3,w12 | ||
| 2021 | ror w11,w11,#6 | ||
| 2022 | eor w14,w4,w5 | ||
| 2023 | eor w15,w15,w4,ror#20 | ||
| 2024 | add w3,w3,w11 | ||
| 2025 | and w13,w13,w14 | ||
| 2026 | ror w15,w15,#2 | ||
| 2027 | add w7,w7,w3 | ||
| 2028 | eor w13,w13,w5 | ||
| 2029 | st1 {v4.4s},[x17], #16 | ||
| 2030 | add w3,w3,w15 // h+=Sigma0(a) from the past | ||
| 2031 | ldp w11,w12,[x0,#0] | ||
| 2032 | add w3,w3,w13 // h+=Maj(a,b,c) from the past | ||
| 2033 | ldp w13,w14,[x0,#8] | ||
| 2034 | add w3,w3,w11 // accumulate | ||
| 2035 | add w4,w4,w12 | ||
| 2036 | ldp w11,w12,[x0,#16] | ||
| 2037 | add w5,w5,w13 | ||
| 2038 | add w6,w6,w14 | ||
| 2039 | ldp w13,w14,[x0,#24] | ||
| 2040 | add w7,w7,w11 | ||
| 2041 | add w8,w8,w12 | ||
| 2042 | ldr w12,[sp,#0] | ||
| 2043 | stp w3,w4,[x0,#0] | ||
| 2044 | add w9,w9,w13 | ||
| 2045 | mov w13,wzr | ||
| 2046 | stp w5,w6,[x0,#8] | ||
| 2047 | add w10,w10,w14 | ||
| 2048 | stp w7,w8,[x0,#16] | ||
| 2049 | eor w14,w4,w5 | ||
| 2050 | stp w9,w10,[x0,#24] | ||
| 2051 | mov w15,wzr | ||
| 2052 | mov x17,sp | ||
| 2053 | b.ne .L_00_48 | ||
| 2054 | |||
| 2055 | ldr x29,[x29] | ||
| 2056 | add sp,sp,#16*4+16 | ||
| 2057 | ret | ||
| 2058 | .size sha256_block_neon,.-sha256_block_neon | ||
| 2059 | #ifndef __KERNEL__ | ||
| 2060 | .comm OPENSSL_armcap_P,4,4 | ||
| 2061 | #endif | ||
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c new file mode 100644 index 000000000000..a2226f841960 --- /dev/null +++ b/arch/arm64/crypto/sha256-glue.c | |||
| @@ -0,0 +1,185 @@ | |||
| 1 | /* | ||
| 2 | * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64 | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the Free | ||
| 8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 9 | * any later version. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <asm/hwcap.h> | ||
| 14 | #include <asm/neon.h> | ||
| 15 | #include <asm/simd.h> | ||
| 16 | #include <crypto/internal/hash.h> | ||
| 17 | #include <crypto/sha.h> | ||
| 18 | #include <crypto/sha256_base.h> | ||
| 19 | #include <linux/cryptohash.h> | ||
| 20 | #include <linux/types.h> | ||
| 21 | #include <linux/string.h> | ||
| 22 | |||
| 23 | MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64"); | ||
| 24 | MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>"); | ||
| 25 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
| 26 | MODULE_LICENSE("GPL v2"); | ||
| 27 | MODULE_ALIAS_CRYPTO("sha224"); | ||
| 28 | MODULE_ALIAS_CRYPTO("sha256"); | ||
| 29 | |||
| 30 | asmlinkage void sha256_block_data_order(u32 *digest, const void *data, | ||
| 31 | unsigned int num_blks); | ||
| 32 | |||
| 33 | asmlinkage void sha256_block_neon(u32 *digest, const void *data, | ||
| 34 | unsigned int num_blks); | ||
| 35 | |||
| 36 | static int sha256_update(struct shash_desc *desc, const u8 *data, | ||
| 37 | unsigned int len) | ||
| 38 | { | ||
| 39 | return sha256_base_do_update(desc, data, len, | ||
| 40 | (sha256_block_fn *)sha256_block_data_order); | ||
| 41 | } | ||
| 42 | |||
| 43 | static int sha256_finup(struct shash_desc *desc, const u8 *data, | ||
| 44 | unsigned int len, u8 *out) | ||
| 45 | { | ||
| 46 | if (len) | ||
| 47 | sha256_base_do_update(desc, data, len, | ||
| 48 | (sha256_block_fn *)sha256_block_data_order); | ||
| 49 | sha256_base_do_finalize(desc, | ||
| 50 | (sha256_block_fn *)sha256_block_data_order); | ||
| 51 | |||
| 52 | return sha256_base_finish(desc, out); | ||
| 53 | } | ||
| 54 | |||
| 55 | static int sha256_final(struct shash_desc *desc, u8 *out) | ||
| 56 | { | ||
| 57 | return sha256_finup(desc, NULL, 0, out); | ||
| 58 | } | ||
| 59 | |||
| 60 | static struct shash_alg algs[] = { { | ||
| 61 | .digestsize = SHA256_DIGEST_SIZE, | ||
| 62 | .init = sha256_base_init, | ||
| 63 | .update = sha256_update, | ||
| 64 | .final = sha256_final, | ||
| 65 | .finup = sha256_finup, | ||
| 66 | .descsize = sizeof(struct sha256_state), | ||
| 67 | .base.cra_name = "sha256", | ||
| 68 | .base.cra_driver_name = "sha256-arm64", | ||
| 69 | .base.cra_priority = 100, | ||
| 70 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
| 71 | .base.cra_blocksize = SHA256_BLOCK_SIZE, | ||
| 72 | .base.cra_module = THIS_MODULE, | ||
| 73 | }, { | ||
| 74 | .digestsize = SHA224_DIGEST_SIZE, | ||
| 75 | .init = sha224_base_init, | ||
| 76 | .update = sha256_update, | ||
| 77 | .final = sha256_final, | ||
| 78 | .finup = sha256_finup, | ||
| 79 | .descsize = sizeof(struct sha256_state), | ||
| 80 | .base.cra_name = "sha224", | ||
| 81 | .base.cra_driver_name = "sha224-arm64", | ||
| 82 | .base.cra_priority = 100, | ||
| 83 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
| 84 | .base.cra_blocksize = SHA224_BLOCK_SIZE, | ||
| 85 | .base.cra_module = THIS_MODULE, | ||
| 86 | } }; | ||
| 87 | |||
| 88 | static int sha256_update_neon(struct shash_desc *desc, const u8 *data, | ||
| 89 | unsigned int len) | ||
| 90 | { | ||
| 91 | /* | ||
| 92 | * Stacking and unstacking a substantial slice of the NEON register | ||
| 93 | * file may significantly affect performance for small updates when | ||
| 94 | * executing in interrupt context, so fall back to the scalar code | ||
| 95 | * in that case. | ||
| 96 | */ | ||
| 97 | if (!may_use_simd()) | ||
| 98 | return sha256_base_do_update(desc, data, len, | ||
| 99 | (sha256_block_fn *)sha256_block_data_order); | ||
| 100 | |||
| 101 | kernel_neon_begin(); | ||
| 102 | sha256_base_do_update(desc, data, len, | ||
| 103 | (sha256_block_fn *)sha256_block_neon); | ||
| 104 | kernel_neon_end(); | ||
| 105 | |||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | |||
| 109 | static int sha256_finup_neon(struct shash_desc *desc, const u8 *data, | ||
| 110 | unsigned int len, u8 *out) | ||
| 111 | { | ||
| 112 | if (!may_use_simd()) { | ||
| 113 | if (len) | ||
| 114 | sha256_base_do_update(desc, data, len, | ||
| 115 | (sha256_block_fn *)sha256_block_data_order); | ||
| 116 | sha256_base_do_finalize(desc, | ||
| 117 | (sha256_block_fn *)sha256_block_data_order); | ||
| 118 | } else { | ||
| 119 | kernel_neon_begin(); | ||
| 120 | if (len) | ||
| 121 | sha256_base_do_update(desc, data, len, | ||
| 122 | (sha256_block_fn *)sha256_block_neon); | ||
| 123 | sha256_base_do_finalize(desc, | ||
| 124 | (sha256_block_fn *)sha256_block_neon); | ||
| 125 | kernel_neon_end(); | ||
| 126 | } | ||
| 127 | return sha256_base_finish(desc, out); | ||
| 128 | } | ||
| 129 | |||
| 130 | static int sha256_final_neon(struct shash_desc *desc, u8 *out) | ||
| 131 | { | ||
| 132 | return sha256_finup_neon(desc, NULL, 0, out); | ||
| 133 | } | ||
| 134 | |||
| 135 | static struct shash_alg neon_algs[] = { { | ||
| 136 | .digestsize = SHA256_DIGEST_SIZE, | ||
| 137 | .init = sha256_base_init, | ||
| 138 | .update = sha256_update_neon, | ||
| 139 | .final = sha256_final_neon, | ||
| 140 | .finup = sha256_finup_neon, | ||
| 141 | .descsize = sizeof(struct sha256_state), | ||
| 142 | .base.cra_name = "sha256", | ||
| 143 | .base.cra_driver_name = "sha256-arm64-neon", | ||
| 144 | .base.cra_priority = 150, | ||
| 145 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
| 146 | .base.cra_blocksize = SHA256_BLOCK_SIZE, | ||
| 147 | .base.cra_module = THIS_MODULE, | ||
| 148 | }, { | ||
| 149 | .digestsize = SHA224_DIGEST_SIZE, | ||
| 150 | .init = sha224_base_init, | ||
| 151 | .update = sha256_update_neon, | ||
| 152 | .final = sha256_final_neon, | ||
| 153 | .finup = sha256_finup_neon, | ||
| 154 | .descsize = sizeof(struct sha256_state), | ||
| 155 | .base.cra_name = "sha224", | ||
| 156 | .base.cra_driver_name = "sha224-arm64-neon", | ||
| 157 | .base.cra_priority = 150, | ||
| 158 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
| 159 | .base.cra_blocksize = SHA224_BLOCK_SIZE, | ||
| 160 | .base.cra_module = THIS_MODULE, | ||
| 161 | } }; | ||
| 162 | |||
| 163 | static int __init sha256_mod_init(void) | ||
| 164 | { | ||
| 165 | int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); | ||
| 166 | if (ret) | ||
| 167 | return ret; | ||
| 168 | |||
| 169 | if (elf_hwcap & HWCAP_ASIMD) { | ||
| 170 | ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs)); | ||
| 171 | if (ret) | ||
| 172 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); | ||
| 173 | } | ||
| 174 | return ret; | ||
| 175 | } | ||
| 176 | |||
| 177 | static void __exit sha256_mod_fini(void) | ||
| 178 | { | ||
| 179 | if (elf_hwcap & HWCAP_ASIMD) | ||
| 180 | crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs)); | ||
| 181 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); | ||
| 182 | } | ||
| 183 | |||
| 184 | module_init(sha256_mod_init); | ||
| 185 | module_exit(sha256_mod_fini); | ||
diff --git a/arch/arm64/crypto/sha512-armv8.pl b/arch/arm64/crypto/sha512-armv8.pl new file mode 100644 index 000000000000..c55efb308544 --- /dev/null +++ b/arch/arm64/crypto/sha512-armv8.pl | |||
| @@ -0,0 +1,778 @@ | |||
| 1 | #! /usr/bin/env perl | ||
| 2 | # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. | ||
| 3 | # | ||
| 4 | # Licensed under the OpenSSL license (the "License"). You may not use | ||
| 5 | # this file except in compliance with the License. You can obtain a copy | ||
| 6 | # in the file LICENSE in the source distribution or at | ||
| 7 | # https://www.openssl.org/source/license.html | ||
| 8 | |||
| 9 | # ==================================================================== | ||
| 10 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
| 11 | # project. The module is, however, dual licensed under OpenSSL and | ||
| 12 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
| 13 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
| 14 | # | ||
| 15 | # Permission to use under GPLv2 terms is granted. | ||
| 16 | # ==================================================================== | ||
| 17 | # | ||
| 18 | # SHA256/512 for ARMv8. | ||
| 19 | # | ||
| 20 | # Performance in cycles per processed byte and improvement coefficient | ||
| 21 | # over code generated with "default" compiler: | ||
| 22 | # | ||
| 23 | # SHA256-hw SHA256(*) SHA512 | ||
| 24 | # Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) | ||
| 25 | # Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) | ||
| 26 | # Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) | ||
| 27 | # Denver 2.01 10.5 (+26%) 6.70 (+8%) | ||
| 28 | # X-Gene 20.0 (+100%) 12.8 (+300%(***)) | ||
| 29 | # Mongoose 2.36 13.0 (+50%) 8.36 (+33%) | ||
| 30 | # | ||
| 31 | # (*) Software SHA256 results are of lesser relevance, presented | ||
| 32 | # mostly for informational purposes. | ||
| 33 | # (**) The result is a trade-off: it's possible to improve it by | ||
| 34 | # 10% (or by 1 cycle per round), but at the cost of 20% loss | ||
| 35 | # on Cortex-A53 (or by 4 cycles per round). | ||
| 36 | # (***) Super-impressive coefficients over gcc-generated code are | ||
| 37 | # indication of some compiler "pathology", most notably code | ||
| 38 | # generated with -mgeneral-regs-only is significanty faster | ||
| 39 | # and the gap is only 40-90%. | ||
| 40 | # | ||
| 41 | # October 2016. | ||
| 42 | # | ||
| 43 | # Originally it was reckoned that it makes no sense to implement NEON | ||
| 44 | # version of SHA256 for 64-bit processors. This is because performance | ||
| 45 | # improvement on most wide-spread Cortex-A5x processors was observed | ||
| 46 | # to be marginal, same on Cortex-A53 and ~10% on A57. But then it was | ||
| 47 | # observed that 32-bit NEON SHA256 performs significantly better than | ||
| 48 | # 64-bit scalar version on *some* of the more recent processors. As | ||
| 49 | # result 64-bit NEON version of SHA256 was added to provide best | ||
| 50 | # all-round performance. For example it executes ~30% faster on X-Gene | ||
| 51 | # and Mongoose. [For reference, NEON version of SHA512 is bound to | ||
| 52 | # deliver much less improvement, likely *negative* on Cortex-A5x. | ||
| 53 | # Which is why NEON support is limited to SHA256.] | ||
| 54 | |||
| 55 | $output=pop; | ||
| 56 | $flavour=pop; | ||
| 57 | |||
| 58 | if ($flavour && $flavour ne "void") { | ||
| 59 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | ||
| 60 | ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or | ||
| 61 | ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or | ||
| 62 | die "can't locate arm-xlate.pl"; | ||
| 63 | |||
| 64 | open OUT,"| \"$^X\" $xlate $flavour $output"; | ||
| 65 | *STDOUT=*OUT; | ||
| 66 | } else { | ||
| 67 | open STDOUT,">$output"; | ||
| 68 | } | ||
| 69 | |||
| 70 | if ($output =~ /512/) { | ||
| 71 | $BITS=512; | ||
| 72 | $SZ=8; | ||
| 73 | @Sigma0=(28,34,39); | ||
| 74 | @Sigma1=(14,18,41); | ||
| 75 | @sigma0=(1, 8, 7); | ||
| 76 | @sigma1=(19,61, 6); | ||
| 77 | $rounds=80; | ||
| 78 | $reg_t="x"; | ||
| 79 | } else { | ||
| 80 | $BITS=256; | ||
| 81 | $SZ=4; | ||
| 82 | @Sigma0=( 2,13,22); | ||
| 83 | @Sigma1=( 6,11,25); | ||
| 84 | @sigma0=( 7,18, 3); | ||
| 85 | @sigma1=(17,19,10); | ||
| 86 | $rounds=64; | ||
| 87 | $reg_t="w"; | ||
| 88 | } | ||
| 89 | |||
| 90 | $func="sha${BITS}_block_data_order"; | ||
| 91 | |||
| 92 | ($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30)); | ||
| 93 | |||
| 94 | @X=map("$reg_t$_",(3..15,0..2)); | ||
| 95 | @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27)); | ||
| 96 | ($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28)); | ||
| 97 | |||
| 98 | sub BODY_00_xx { | ||
| 99 | my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_; | ||
| 100 | my $j=($i+1)&15; | ||
| 101 | my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]); | ||
| 102 | $T0=@X[$i+3] if ($i<11); | ||
| 103 | |||
| 104 | $code.=<<___ if ($i<16); | ||
| 105 | #ifndef __AARCH64EB__ | ||
| 106 | rev @X[$i],@X[$i] // $i | ||
| 107 | #endif | ||
| 108 | ___ | ||
| 109 | $code.=<<___ if ($i<13 && ($i&1)); | ||
| 110 | ldp @X[$i+1],@X[$i+2],[$inp],#2*$SZ | ||
| 111 | ___ | ||
| 112 | $code.=<<___ if ($i==13); | ||
| 113 | ldp @X[14],@X[15],[$inp] | ||
| 114 | ___ | ||
| 115 | $code.=<<___ if ($i>=14); | ||
| 116 | ldr @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`] | ||
| 117 | ___ | ||
| 118 | $code.=<<___ if ($i>0 && $i<16); | ||
| 119 | add $a,$a,$t1 // h+=Sigma0(a) | ||
| 120 | ___ | ||
| 121 | $code.=<<___ if ($i>=11); | ||
| 122 | str @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`] | ||
| 123 | ___ | ||
| 124 | # While ARMv8 specifies merged rotate-n-logical operation such as | ||
| 125 | # 'eor x,y,z,ror#n', it was found to negatively affect performance | ||
| 126 | # on Apple A7. The reason seems to be that it requires even 'y' to | ||
| 127 | # be available earlier. This means that such merged instruction is | ||
| 128 | # not necessarily best choice on critical path... On the other hand | ||
| 129 | # Cortex-A5x handles merged instructions much better than disjoint | ||
| 130 | # rotate and logical... See (**) footnote above. | ||
| 131 | $code.=<<___ if ($i<15); | ||
| 132 | ror $t0,$e,#$Sigma1[0] | ||
| 133 | add $h,$h,$t2 // h+=K[i] | ||
| 134 | eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]` | ||
| 135 | and $t1,$f,$e | ||
| 136 | bic $t2,$g,$e | ||
| 137 | add $h,$h,@X[$i&15] // h+=X[i] | ||
| 138 | orr $t1,$t1,$t2 // Ch(e,f,g) | ||
| 139 | eor $t2,$a,$b // a^b, b^c in next round | ||
| 140 | eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e) | ||
| 141 | ror $T0,$a,#$Sigma0[0] | ||
| 142 | add $h,$h,$t1 // h+=Ch(e,f,g) | ||
| 143 | eor $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]` | ||
| 144 | add $h,$h,$t0 // h+=Sigma1(e) | ||
| 145 | and $t3,$t3,$t2 // (b^c)&=(a^b) | ||
| 146 | add $d,$d,$h // d+=h | ||
| 147 | eor $t3,$t3,$b // Maj(a,b,c) | ||
| 148 | eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a) | ||
| 149 | add $h,$h,$t3 // h+=Maj(a,b,c) | ||
| 150 | ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round | ||
| 151 | //add $h,$h,$t1 // h+=Sigma0(a) | ||
| 152 | ___ | ||
| 153 | $code.=<<___ if ($i>=15); | ||
| 154 | ror $t0,$e,#$Sigma1[0] | ||
| 155 | add $h,$h,$t2 // h+=K[i] | ||
| 156 | ror $T1,@X[($j+1)&15],#$sigma0[0] | ||
| 157 | and $t1,$f,$e | ||
| 158 | ror $T2,@X[($j+14)&15],#$sigma1[0] | ||
| 159 | bic $t2,$g,$e | ||
| 160 | ror $T0,$a,#$Sigma0[0] | ||
| 161 | add $h,$h,@X[$i&15] // h+=X[i] | ||
| 162 | eor $t0,$t0,$e,ror#$Sigma1[1] | ||
| 163 | eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1] | ||
| 164 | orr $t1,$t1,$t2 // Ch(e,f,g) | ||
| 165 | eor $t2,$a,$b // a^b, b^c in next round | ||
| 166 | eor $t0,$t0,$e,ror#$Sigma1[2] // Sigma1(e) | ||
| 167 | eor $T0,$T0,$a,ror#$Sigma0[1] | ||
| 168 | add $h,$h,$t1 // h+=Ch(e,f,g) | ||
| 169 | and $t3,$t3,$t2 // (b^c)&=(a^b) | ||
| 170 | eor $T2,$T2,@X[($j+14)&15],ror#$sigma1[1] | ||
| 171 | eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1]) | ||
| 172 | add $h,$h,$t0 // h+=Sigma1(e) | ||
| 173 | eor $t3,$t3,$b // Maj(a,b,c) | ||
| 174 | eor $t1,$T0,$a,ror#$Sigma0[2] // Sigma0(a) | ||
| 175 | eor $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2] // sigma1(X[i+14]) | ||
| 176 | add @X[$j],@X[$j],@X[($j+9)&15] | ||
| 177 | add $d,$d,$h // d+=h | ||
| 178 | add $h,$h,$t3 // h+=Maj(a,b,c) | ||
| 179 | ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round | ||
| 180 | add @X[$j],@X[$j],$T1 | ||
| 181 | add $h,$h,$t1 // h+=Sigma0(a) | ||
| 182 | add @X[$j],@X[$j],$T2 | ||
| 183 | ___ | ||
| 184 | ($t2,$t3)=($t3,$t2); | ||
| 185 | } | ||
| 186 | |||
| 187 | $code.=<<___; | ||
| 188 | #ifndef __KERNEL__ | ||
| 189 | # include "arm_arch.h" | ||
| 190 | #endif | ||
| 191 | |||
| 192 | .text | ||
| 193 | |||
| 194 | .extern OPENSSL_armcap_P | ||
| 195 | .globl $func | ||
| 196 | .type $func,%function | ||
| 197 | .align 6 | ||
| 198 | $func: | ||
| 199 | ___ | ||
| 200 | $code.=<<___ if ($SZ==4); | ||
| 201 | #ifndef __KERNEL__ | ||
| 202 | # ifdef __ILP32__ | ||
| 203 | ldrsw x16,.LOPENSSL_armcap_P | ||
| 204 | # else | ||
| 205 | ldr x16,.LOPENSSL_armcap_P | ||
| 206 | # endif | ||
| 207 | adr x17,.LOPENSSL_armcap_P | ||
| 208 | add x16,x16,x17 | ||
| 209 | ldr w16,[x16] | ||
| 210 | tst w16,#ARMV8_SHA256 | ||
| 211 | b.ne .Lv8_entry | ||
| 212 | tst w16,#ARMV7_NEON | ||
| 213 | b.ne .Lneon_entry | ||
| 214 | #endif | ||
| 215 | ___ | ||
| 216 | $code.=<<___; | ||
| 217 | stp x29,x30,[sp,#-128]! | ||
| 218 | add x29,sp,#0 | ||
| 219 | |||
| 220 | stp x19,x20,[sp,#16] | ||
| 221 | stp x21,x22,[sp,#32] | ||
| 222 | stp x23,x24,[sp,#48] | ||
| 223 | stp x25,x26,[sp,#64] | ||
| 224 | stp x27,x28,[sp,#80] | ||
| 225 | sub sp,sp,#4*$SZ | ||
| 226 | |||
| 227 | ldp $A,$B,[$ctx] // load context | ||
| 228 | ldp $C,$D,[$ctx,#2*$SZ] | ||
| 229 | ldp $E,$F,[$ctx,#4*$SZ] | ||
| 230 | add $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input | ||
| 231 | ldp $G,$H,[$ctx,#6*$SZ] | ||
| 232 | adr $Ktbl,.LK$BITS | ||
| 233 | stp $ctx,$num,[x29,#96] | ||
| 234 | |||
| 235 | .Loop: | ||
| 236 | ldp @X[0],@X[1],[$inp],#2*$SZ | ||
| 237 | ldr $t2,[$Ktbl],#$SZ // *K++ | ||
| 238 | eor $t3,$B,$C // magic seed | ||
| 239 | str $inp,[x29,#112] | ||
| 240 | ___ | ||
| 241 | for ($i=0;$i<16;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); } | ||
| 242 | $code.=".Loop_16_xx:\n"; | ||
| 243 | for (;$i<32;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); } | ||
| 244 | $code.=<<___; | ||
| 245 | cbnz $t2,.Loop_16_xx | ||
| 246 | |||
| 247 | ldp $ctx,$num,[x29,#96] | ||
| 248 | ldr $inp,[x29,#112] | ||
| 249 | sub $Ktbl,$Ktbl,#`$SZ*($rounds+1)` // rewind | ||
| 250 | |||
| 251 | ldp @X[0],@X[1],[$ctx] | ||
| 252 | ldp @X[2],@X[3],[$ctx,#2*$SZ] | ||
| 253 | add $inp,$inp,#14*$SZ // advance input pointer | ||
| 254 | ldp @X[4],@X[5],[$ctx,#4*$SZ] | ||
| 255 | add $A,$A,@X[0] | ||
| 256 | ldp @X[6],@X[7],[$ctx,#6*$SZ] | ||
| 257 | add $B,$B,@X[1] | ||
| 258 | add $C,$C,@X[2] | ||
| 259 | add $D,$D,@X[3] | ||
| 260 | stp $A,$B,[$ctx] | ||
| 261 | add $E,$E,@X[4] | ||
| 262 | add $F,$F,@X[5] | ||
| 263 | stp $C,$D,[$ctx,#2*$SZ] | ||
| 264 | add $G,$G,@X[6] | ||
| 265 | add $H,$H,@X[7] | ||
| 266 | cmp $inp,$num | ||
| 267 | stp $E,$F,[$ctx,#4*$SZ] | ||
| 268 | stp $G,$H,[$ctx,#6*$SZ] | ||
| 269 | b.ne .Loop | ||
| 270 | |||
| 271 | ldp x19,x20,[x29,#16] | ||
| 272 | add sp,sp,#4*$SZ | ||
| 273 | ldp x21,x22,[x29,#32] | ||
| 274 | ldp x23,x24,[x29,#48] | ||
| 275 | ldp x25,x26,[x29,#64] | ||
| 276 | ldp x27,x28,[x29,#80] | ||
| 277 | ldp x29,x30,[sp],#128 | ||
| 278 | ret | ||
| 279 | .size $func,.-$func | ||
| 280 | |||
| 281 | .align 6 | ||
| 282 | .type .LK$BITS,%object | ||
| 283 | .LK$BITS: | ||
| 284 | ___ | ||
| 285 | $code.=<<___ if ($SZ==8); | ||
| 286 | .quad 0x428a2f98d728ae22,0x7137449123ef65cd | ||
| 287 | .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc | ||
| 288 | .quad 0x3956c25bf348b538,0x59f111f1b605d019 | ||
| 289 | .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 | ||
| 290 | .quad 0xd807aa98a3030242,0x12835b0145706fbe | ||
| 291 | .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 | ||
| 292 | .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 | ||
| 293 | .quad 0x9bdc06a725c71235,0xc19bf174cf692694 | ||
| 294 | .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 | ||
| 295 | .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 | ||
| 296 | .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 | ||
| 297 | .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 | ||
| 298 | .quad 0x983e5152ee66dfab,0xa831c66d2db43210 | ||
| 299 | .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 | ||
| 300 | .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 | ||
| 301 | .quad 0x06ca6351e003826f,0x142929670a0e6e70 | ||
| 302 | .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 | ||
| 303 | .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df | ||
| 304 | .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 | ||
| 305 | .quad 0x81c2c92e47edaee6,0x92722c851482353b | ||
| 306 | .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 | ||
| 307 | .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 | ||
| 308 | .quad 0xd192e819d6ef5218,0xd69906245565a910 | ||
| 309 | .quad 0xf40e35855771202a,0x106aa07032bbd1b8 | ||
| 310 | .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 | ||
| 311 | .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 | ||
| 312 | .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb | ||
| 313 | .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 | ||
| 314 | .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 | ||
| 315 | .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec | ||
| 316 | .quad 0x90befffa23631e28,0xa4506cebde82bde9 | ||
| 317 | .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b | ||
| 318 | .quad 0xca273eceea26619c,0xd186b8c721c0c207 | ||
| 319 | .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 | ||
| 320 | .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 | ||
| 321 | .quad 0x113f9804bef90dae,0x1b710b35131c471b | ||
| 322 | .quad 0x28db77f523047d84,0x32caab7b40c72493 | ||
| 323 | .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c | ||
| 324 | .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a | ||
| 325 | .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 | ||
| 326 | .quad 0 // terminator | ||
| 327 | ___ | ||
| 328 | $code.=<<___ if ($SZ==4); | ||
| 329 | .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 | ||
| 330 | .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 | ||
| 331 | .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 | ||
| 332 | .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 | ||
| 333 | .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc | ||
| 334 | .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da | ||
| 335 | .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 | ||
| 336 | .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 | ||
| 337 | .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 | ||
| 338 | .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 | ||
| 339 | .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 | ||
| 340 | .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 | ||
| 341 | .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 | ||
| 342 | .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 | ||
| 343 | .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 | ||
| 344 | .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 | ||
| 345 | .long 0 //terminator | ||
| 346 | ___ | ||
| 347 | $code.=<<___; | ||
| 348 | .size .LK$BITS,.-.LK$BITS | ||
| 349 | #ifndef __KERNEL__ | ||
| 350 | .align 3 | ||
| 351 | .LOPENSSL_armcap_P: | ||
| 352 | # ifdef __ILP32__ | ||
| 353 | .long OPENSSL_armcap_P-. | ||
| 354 | # else | ||
| 355 | .quad OPENSSL_armcap_P-. | ||
| 356 | # endif | ||
| 357 | #endif | ||
| 358 | .asciz "SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>" | ||
| 359 | .align 2 | ||
| 360 | ___ | ||
| 361 | |||
| 362 | if ($SZ==4) { | ||
| 363 | my $Ktbl="x3"; | ||
| 364 | |||
| 365 | my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2)); | ||
| 366 | my @MSG=map("v$_.16b",(4..7)); | ||
| 367 | my ($W0,$W1)=("v16.4s","v17.4s"); | ||
| 368 | my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b"); | ||
| 369 | |||
| 370 | $code.=<<___; | ||
| 371 | #ifndef __KERNEL__ | ||
| 372 | .type sha256_block_armv8,%function | ||
| 373 | .align 6 | ||
| 374 | sha256_block_armv8: | ||
| 375 | .Lv8_entry: | ||
| 376 | stp x29,x30,[sp,#-16]! | ||
| 377 | add x29,sp,#0 | ||
| 378 | |||
| 379 | ld1.32 {$ABCD,$EFGH},[$ctx] | ||
| 380 | adr $Ktbl,.LK256 | ||
| 381 | |||
| 382 | .Loop_hw: | ||
| 383 | ld1 {@MSG[0]-@MSG[3]},[$inp],#64 | ||
| 384 | sub $num,$num,#1 | ||
| 385 | ld1.32 {$W0},[$Ktbl],#16 | ||
| 386 | rev32 @MSG[0],@MSG[0] | ||
| 387 | rev32 @MSG[1],@MSG[1] | ||
| 388 | rev32 @MSG[2],@MSG[2] | ||
| 389 | rev32 @MSG[3],@MSG[3] | ||
| 390 | orr $ABCD_SAVE,$ABCD,$ABCD // offload | ||
| 391 | orr $EFGH_SAVE,$EFGH,$EFGH | ||
| 392 | ___ | ||
| 393 | for($i=0;$i<12;$i++) { | ||
| 394 | $code.=<<___; | ||
| 395 | ld1.32 {$W1},[$Ktbl],#16 | ||
| 396 | add.i32 $W0,$W0,@MSG[0] | ||
| 397 | sha256su0 @MSG[0],@MSG[1] | ||
| 398 | orr $abcd,$ABCD,$ABCD | ||
| 399 | sha256h $ABCD,$EFGH,$W0 | ||
| 400 | sha256h2 $EFGH,$abcd,$W0 | ||
| 401 | sha256su1 @MSG[0],@MSG[2],@MSG[3] | ||
| 402 | ___ | ||
| 403 | ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG)); | ||
| 404 | } | ||
| 405 | $code.=<<___; | ||
| 406 | ld1.32 {$W1},[$Ktbl],#16 | ||
| 407 | add.i32 $W0,$W0,@MSG[0] | ||
| 408 | orr $abcd,$ABCD,$ABCD | ||
| 409 | sha256h $ABCD,$EFGH,$W0 | ||
| 410 | sha256h2 $EFGH,$abcd,$W0 | ||
| 411 | |||
| 412 | ld1.32 {$W0},[$Ktbl],#16 | ||
| 413 | add.i32 $W1,$W1,@MSG[1] | ||
| 414 | orr $abcd,$ABCD,$ABCD | ||
| 415 | sha256h $ABCD,$EFGH,$W1 | ||
| 416 | sha256h2 $EFGH,$abcd,$W1 | ||
| 417 | |||
| 418 | ld1.32 {$W1},[$Ktbl] | ||
| 419 | add.i32 $W0,$W0,@MSG[2] | ||
| 420 | sub $Ktbl,$Ktbl,#$rounds*$SZ-16 // rewind | ||
| 421 | orr $abcd,$ABCD,$ABCD | ||
| 422 | sha256h $ABCD,$EFGH,$W0 | ||
| 423 | sha256h2 $EFGH,$abcd,$W0 | ||
| 424 | |||
| 425 | add.i32 $W1,$W1,@MSG[3] | ||
| 426 | orr $abcd,$ABCD,$ABCD | ||
| 427 | sha256h $ABCD,$EFGH,$W1 | ||
| 428 | sha256h2 $EFGH,$abcd,$W1 | ||
| 429 | |||
| 430 | add.i32 $ABCD,$ABCD,$ABCD_SAVE | ||
| 431 | add.i32 $EFGH,$EFGH,$EFGH_SAVE | ||
| 432 | |||
| 433 | cbnz $num,.Loop_hw | ||
| 434 | |||
| 435 | st1.32 {$ABCD,$EFGH},[$ctx] | ||
| 436 | |||
| 437 | ldr x29,[sp],#16 | ||
| 438 | ret | ||
| 439 | .size sha256_block_armv8,.-sha256_block_armv8 | ||
| 440 | #endif | ||
| 441 | ___ | ||
| 442 | } | ||
| 443 | |||
| 444 | if ($SZ==4) { ######################################### NEON stuff # | ||
| 445 | # You'll surely note a lot of similarities with sha256-armv4 module, | ||
| 446 | # and of course it's not a coincidence. sha256-armv4 was used as | ||
| 447 | # initial template, but was adapted for ARMv8 instruction set and | ||
| 448 | # extensively re-tuned for all-round performance. | ||
| 449 | |||
| 450 | my @V = ($A,$B,$C,$D,$E,$F,$G,$H) = map("w$_",(3..10)); | ||
| 451 | my ($t0,$t1,$t2,$t3,$t4) = map("w$_",(11..15)); | ||
| 452 | my $Ktbl="x16"; | ||
| 453 | my $Xfer="x17"; | ||
| 454 | my @X = map("q$_",(0..3)); | ||
| 455 | my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19)); | ||
| 456 | my $j=0; | ||
| 457 | |||
| 458 | sub AUTOLOAD() # thunk [simplified] x86-style perlasm | ||
| 459 | { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./; | ||
| 460 | my $arg = pop; | ||
| 461 | $arg = "#$arg" if ($arg*1 eq $arg); | ||
| 462 | $code .= "\t$opcode\t".join(',',@_,$arg)."\n"; | ||
| 463 | } | ||
| 464 | |||
| 465 | sub Dscalar { shift =~ m|[qv]([0-9]+)|?"d$1":""; } | ||
| 466 | sub Dlo { shift =~ m|[qv]([0-9]+)|?"v$1.d[0]":""; } | ||
| 467 | sub Dhi { shift =~ m|[qv]([0-9]+)|?"v$1.d[1]":""; } | ||
| 468 | |||
| 469 | sub Xupdate() | ||
| 470 | { use integer; | ||
| 471 | my $body = shift; | ||
| 472 | my @insns = (&$body,&$body,&$body,&$body); | ||
| 473 | my ($a,$b,$c,$d,$e,$f,$g,$h); | ||
| 474 | |||
| 475 | &ext_8 ($T0,@X[0],@X[1],4); # X[1..4] | ||
| 476 | eval(shift(@insns)); | ||
| 477 | eval(shift(@insns)); | ||
| 478 | eval(shift(@insns)); | ||
| 479 | &ext_8 ($T3,@X[2],@X[3],4); # X[9..12] | ||
| 480 | eval(shift(@insns)); | ||
| 481 | eval(shift(@insns)); | ||
| 482 | &mov (&Dscalar($T7),&Dhi(@X[3])); # X[14..15] | ||
| 483 | eval(shift(@insns)); | ||
| 484 | eval(shift(@insns)); | ||
| 485 | &ushr_32 ($T2,$T0,$sigma0[0]); | ||
| 486 | eval(shift(@insns)); | ||
| 487 | &ushr_32 ($T1,$T0,$sigma0[2]); | ||
| 488 | eval(shift(@insns)); | ||
| 489 | &add_32 (@X[0],@X[0],$T3); # X[0..3] += X[9..12] | ||
| 490 | eval(shift(@insns)); | ||
| 491 | &sli_32 ($T2,$T0,32-$sigma0[0]); | ||
| 492 | eval(shift(@insns)); | ||
| 493 | eval(shift(@insns)); | ||
| 494 | &ushr_32 ($T3,$T0,$sigma0[1]); | ||
| 495 | eval(shift(@insns)); | ||
| 496 | eval(shift(@insns)); | ||
| 497 | &eor_8 ($T1,$T1,$T2); | ||
| 498 | eval(shift(@insns)); | ||
| 499 | eval(shift(@insns)); | ||
| 500 | &sli_32 ($T3,$T0,32-$sigma0[1]); | ||
| 501 | eval(shift(@insns)); | ||
| 502 | eval(shift(@insns)); | ||
| 503 | &ushr_32 ($T4,$T7,$sigma1[0]); | ||
| 504 | eval(shift(@insns)); | ||
| 505 | eval(shift(@insns)); | ||
| 506 | &eor_8 ($T1,$T1,$T3); # sigma0(X[1..4]) | ||
| 507 | eval(shift(@insns)); | ||
| 508 | eval(shift(@insns)); | ||
| 509 | &sli_32 ($T4,$T7,32-$sigma1[0]); | ||
| 510 | eval(shift(@insns)); | ||
| 511 | eval(shift(@insns)); | ||
| 512 | &ushr_32 ($T5,$T7,$sigma1[2]); | ||
| 513 | eval(shift(@insns)); | ||
| 514 | eval(shift(@insns)); | ||
| 515 | &ushr_32 ($T3,$T7,$sigma1[1]); | ||
| 516 | eval(shift(@insns)); | ||
| 517 | eval(shift(@insns)); | ||
| 518 | &add_32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4]) | ||
| 519 | eval(shift(@insns)); | ||
| 520 | eval(shift(@insns)); | ||
| 521 | &sli_u32 ($T3,$T7,32-$sigma1[1]); | ||
| 522 | eval(shift(@insns)); | ||
| 523 | eval(shift(@insns)); | ||
| 524 | &eor_8 ($T5,$T5,$T4); | ||
| 525 | eval(shift(@insns)); | ||
| 526 | eval(shift(@insns)); | ||
| 527 | eval(shift(@insns)); | ||
| 528 | &eor_8 ($T5,$T5,$T3); # sigma1(X[14..15]) | ||
| 529 | eval(shift(@insns)); | ||
| 530 | eval(shift(@insns)); | ||
| 531 | eval(shift(@insns)); | ||
| 532 | &add_32 (@X[0],@X[0],$T5); # X[0..1] += sigma1(X[14..15]) | ||
| 533 | eval(shift(@insns)); | ||
| 534 | eval(shift(@insns)); | ||
| 535 | eval(shift(@insns)); | ||
| 536 | &ushr_32 ($T6,@X[0],$sigma1[0]); | ||
| 537 | eval(shift(@insns)); | ||
| 538 | &ushr_32 ($T7,@X[0],$sigma1[2]); | ||
| 539 | eval(shift(@insns)); | ||
| 540 | eval(shift(@insns)); | ||
| 541 | &sli_32 ($T6,@X[0],32-$sigma1[0]); | ||
| 542 | eval(shift(@insns)); | ||
| 543 | &ushr_32 ($T5,@X[0],$sigma1[1]); | ||
| 544 | eval(shift(@insns)); | ||
| 545 | eval(shift(@insns)); | ||
| 546 | &eor_8 ($T7,$T7,$T6); | ||
| 547 | eval(shift(@insns)); | ||
| 548 | eval(shift(@insns)); | ||
| 549 | &sli_32 ($T5,@X[0],32-$sigma1[1]); | ||
| 550 | eval(shift(@insns)); | ||
| 551 | eval(shift(@insns)); | ||
| 552 | &ld1_32 ("{$T0}","[$Ktbl], #16"); | ||
| 553 | eval(shift(@insns)); | ||
| 554 | &eor_8 ($T7,$T7,$T5); # sigma1(X[16..17]) | ||
| 555 | eval(shift(@insns)); | ||
| 556 | eval(shift(@insns)); | ||
| 557 | &eor_8 ($T5,$T5,$T5); | ||
| 558 | eval(shift(@insns)); | ||
| 559 | eval(shift(@insns)); | ||
| 560 | &mov (&Dhi($T5), &Dlo($T7)); | ||
| 561 | eval(shift(@insns)); | ||
| 562 | eval(shift(@insns)); | ||
| 563 | eval(shift(@insns)); | ||
| 564 | &add_32 (@X[0],@X[0],$T5); # X[2..3] += sigma1(X[16..17]) | ||
| 565 | eval(shift(@insns)); | ||
| 566 | eval(shift(@insns)); | ||
| 567 | eval(shift(@insns)); | ||
| 568 | &add_32 ($T0,$T0,@X[0]); | ||
| 569 | while($#insns>=1) { eval(shift(@insns)); } | ||
| 570 | &st1_32 ("{$T0}","[$Xfer], #16"); | ||
| 571 | eval(shift(@insns)); | ||
| 572 | |||
| 573 | push(@X,shift(@X)); # "rotate" X[] | ||
| 574 | } | ||
| 575 | |||
| 576 | sub Xpreload() | ||
| 577 | { use integer; | ||
| 578 | my $body = shift; | ||
| 579 | my @insns = (&$body,&$body,&$body,&$body); | ||
| 580 | my ($a,$b,$c,$d,$e,$f,$g,$h); | ||
| 581 | |||
| 582 | eval(shift(@insns)); | ||
| 583 | eval(shift(@insns)); | ||
| 584 | &ld1_8 ("{@X[0]}","[$inp],#16"); | ||
| 585 | eval(shift(@insns)); | ||
| 586 | eval(shift(@insns)); | ||
| 587 | &ld1_32 ("{$T0}","[$Ktbl],#16"); | ||
| 588 | eval(shift(@insns)); | ||
| 589 | eval(shift(@insns)); | ||
| 590 | eval(shift(@insns)); | ||
| 591 | eval(shift(@insns)); | ||
| 592 | &rev32 (@X[0],@X[0]); | ||
| 593 | eval(shift(@insns)); | ||
| 594 | eval(shift(@insns)); | ||
| 595 | eval(shift(@insns)); | ||
| 596 | eval(shift(@insns)); | ||
| 597 | &add_32 ($T0,$T0,@X[0]); | ||
| 598 | foreach (@insns) { eval; } # remaining instructions | ||
| 599 | &st1_32 ("{$T0}","[$Xfer], #16"); | ||
| 600 | |||
| 601 | push(@X,shift(@X)); # "rotate" X[] | ||
| 602 | } | ||
| 603 | |||
| 604 | sub body_00_15 () { | ||
| 605 | ( | ||
| 606 | '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'. | ||
| 607 | '&add ($h,$h,$t1)', # h+=X[i]+K[i] | ||
| 608 | '&add ($a,$a,$t4);'. # h+=Sigma0(a) from the past | ||
| 609 | '&and ($t1,$f,$e)', | ||
| 610 | '&bic ($t4,$g,$e)', | ||
| 611 | '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))', | ||
| 612 | '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past | ||
| 613 | '&orr ($t1,$t1,$t4)', # Ch(e,f,g) | ||
| 614 | '&eor ($t0,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e) | ||
| 615 | '&eor ($t4,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))', | ||
| 616 | '&add ($h,$h,$t1)', # h+=Ch(e,f,g) | ||
| 617 | '&ror ($t0,$t0,"#$Sigma1[0]")', | ||
| 618 | '&eor ($t2,$a,$b)', # a^b, b^c in next round | ||
| 619 | '&eor ($t4,$t4,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a) | ||
| 620 | '&add ($h,$h,$t0)', # h+=Sigma1(e) | ||
| 621 | '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'. | ||
| 622 | '&ldr ($t1,"[$Ktbl]") if ($j==15);'. | ||
| 623 | '&and ($t3,$t3,$t2)', # (b^c)&=(a^b) | ||
| 624 | '&ror ($t4,$t4,"#$Sigma0[0]")', | ||
| 625 | '&add ($d,$d,$h)', # d+=h | ||
| 626 | '&eor ($t3,$t3,$b)', # Maj(a,b,c) | ||
| 627 | '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);' | ||
| 628 | ) | ||
| 629 | } | ||
| 630 | |||
| 631 | $code.=<<___; | ||
| 632 | #ifdef __KERNEL__ | ||
| 633 | .globl sha256_block_neon | ||
| 634 | #endif | ||
| 635 | .type sha256_block_neon,%function | ||
| 636 | .align 4 | ||
| 637 | sha256_block_neon: | ||
| 638 | .Lneon_entry: | ||
| 639 | stp x29, x30, [sp, #-16]! | ||
| 640 | mov x29, sp | ||
| 641 | sub sp,sp,#16*4 | ||
| 642 | |||
| 643 | adr $Ktbl,.LK256 | ||
| 644 | add $num,$inp,$num,lsl#6 // len to point at the end of inp | ||
| 645 | |||
| 646 | ld1.8 {@X[0]},[$inp], #16 | ||
| 647 | ld1.8 {@X[1]},[$inp], #16 | ||
| 648 | ld1.8 {@X[2]},[$inp], #16 | ||
| 649 | ld1.8 {@X[3]},[$inp], #16 | ||
| 650 | ld1.32 {$T0},[$Ktbl], #16 | ||
| 651 | ld1.32 {$T1},[$Ktbl], #16 | ||
| 652 | ld1.32 {$T2},[$Ktbl], #16 | ||
| 653 | ld1.32 {$T3},[$Ktbl], #16 | ||
| 654 | rev32 @X[0],@X[0] // yes, even on | ||
| 655 | rev32 @X[1],@X[1] // big-endian | ||
| 656 | rev32 @X[2],@X[2] | ||
| 657 | rev32 @X[3],@X[3] | ||
| 658 | mov $Xfer,sp | ||
| 659 | add.32 $T0,$T0,@X[0] | ||
| 660 | add.32 $T1,$T1,@X[1] | ||
| 661 | add.32 $T2,$T2,@X[2] | ||
| 662 | st1.32 {$T0-$T1},[$Xfer], #32 | ||
| 663 | add.32 $T3,$T3,@X[3] | ||
| 664 | st1.32 {$T2-$T3},[$Xfer] | ||
| 665 | sub $Xfer,$Xfer,#32 | ||
| 666 | |||
| 667 | ldp $A,$B,[$ctx] | ||
| 668 | ldp $C,$D,[$ctx,#8] | ||
| 669 | ldp $E,$F,[$ctx,#16] | ||
| 670 | ldp $G,$H,[$ctx,#24] | ||
| 671 | ldr $t1,[sp,#0] | ||
| 672 | mov $t2,wzr | ||
| 673 | eor $t3,$B,$C | ||
| 674 | mov $t4,wzr | ||
| 675 | b .L_00_48 | ||
| 676 | |||
| 677 | .align 4 | ||
| 678 | .L_00_48: | ||
| 679 | ___ | ||
| 680 | &Xupdate(\&body_00_15); | ||
| 681 | &Xupdate(\&body_00_15); | ||
| 682 | &Xupdate(\&body_00_15); | ||
| 683 | &Xupdate(\&body_00_15); | ||
| 684 | $code.=<<___; | ||
| 685 | cmp $t1,#0 // check for K256 terminator | ||
| 686 | ldr $t1,[sp,#0] | ||
| 687 | sub $Xfer,$Xfer,#64 | ||
| 688 | bne .L_00_48 | ||
| 689 | |||
| 690 | sub $Ktbl,$Ktbl,#256 // rewind $Ktbl | ||
| 691 | cmp $inp,$num | ||
| 692 | mov $Xfer, #64 | ||
| 693 | csel $Xfer, $Xfer, xzr, eq | ||
| 694 | sub $inp,$inp,$Xfer // avoid SEGV | ||
| 695 | mov $Xfer,sp | ||
| 696 | ___ | ||
| 697 | &Xpreload(\&body_00_15); | ||
| 698 | &Xpreload(\&body_00_15); | ||
| 699 | &Xpreload(\&body_00_15); | ||
| 700 | &Xpreload(\&body_00_15); | ||
| 701 | $code.=<<___; | ||
| 702 | add $A,$A,$t4 // h+=Sigma0(a) from the past | ||
| 703 | ldp $t0,$t1,[$ctx,#0] | ||
| 704 | add $A,$A,$t2 // h+=Maj(a,b,c) from the past | ||
| 705 | ldp $t2,$t3,[$ctx,#8] | ||
| 706 | add $A,$A,$t0 // accumulate | ||
| 707 | add $B,$B,$t1 | ||
| 708 | ldp $t0,$t1,[$ctx,#16] | ||
| 709 | add $C,$C,$t2 | ||
| 710 | add $D,$D,$t3 | ||
| 711 | ldp $t2,$t3,[$ctx,#24] | ||
| 712 | add $E,$E,$t0 | ||
| 713 | add $F,$F,$t1 | ||
| 714 | ldr $t1,[sp,#0] | ||
| 715 | stp $A,$B,[$ctx,#0] | ||
| 716 | add $G,$G,$t2 | ||
| 717 | mov $t2,wzr | ||
| 718 | stp $C,$D,[$ctx,#8] | ||
| 719 | add $H,$H,$t3 | ||
| 720 | stp $E,$F,[$ctx,#16] | ||
| 721 | eor $t3,$B,$C | ||
| 722 | stp $G,$H,[$ctx,#24] | ||
| 723 | mov $t4,wzr | ||
| 724 | mov $Xfer,sp | ||
| 725 | b.ne .L_00_48 | ||
| 726 | |||
| 727 | ldr x29,[x29] | ||
| 728 | add sp,sp,#16*4+16 | ||
| 729 | ret | ||
| 730 | .size sha256_block_neon,.-sha256_block_neon | ||
| 731 | ___ | ||
| 732 | } | ||
| 733 | |||
| 734 | $code.=<<___; | ||
| 735 | #ifndef __KERNEL__ | ||
| 736 | .comm OPENSSL_armcap_P,4,4 | ||
| 737 | #endif | ||
| 738 | ___ | ||
| 739 | |||
| 740 | { my %opcode = ( | ||
| 741 | "sha256h" => 0x5e004000, "sha256h2" => 0x5e005000, | ||
| 742 | "sha256su0" => 0x5e282800, "sha256su1" => 0x5e006000 ); | ||
| 743 | |||
| 744 | sub unsha256 { | ||
| 745 | my ($mnemonic,$arg)=@_; | ||
| 746 | |||
| 747 | $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o | ||
| 748 | && | ||
| 749 | sprintf ".inst\t0x%08x\t//%s %s", | ||
| 750 | $opcode{$mnemonic}|$1|($2<<5)|($3<<16), | ||
| 751 | $mnemonic,$arg; | ||
| 752 | } | ||
| 753 | } | ||
| 754 | |||
| 755 | open SELF,$0; | ||
| 756 | while(<SELF>) { | ||
| 757 | next if (/^#!/); | ||
| 758 | last if (!s/^#/\/\// and !/^$/); | ||
| 759 | print; | ||
| 760 | } | ||
| 761 | close SELF; | ||
| 762 | |||
| 763 | foreach(split("\n",$code)) { | ||
| 764 | |||
| 765 | s/\`([^\`]*)\`/eval($1)/ge; | ||
| 766 | |||
| 767 | s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/ge; | ||
| 768 | |||
| 769 | s/\bq([0-9]+)\b/v$1.16b/g; # old->new registers | ||
| 770 | |||
| 771 | s/\.[ui]?8(\s)/$1/; | ||
| 772 | s/\.\w?32\b// and s/\.16b/\.4s/g; | ||
| 773 | m/(ld|st)1[^\[]+\[0\]/ and s/\.4s/\.s/g; | ||
| 774 | |||
| 775 | print $_,"\n"; | ||
| 776 | } | ||
| 777 | |||
| 778 | close STDOUT; | ||
diff --git a/arch/arm64/crypto/sha512-core.S_shipped b/arch/arm64/crypto/sha512-core.S_shipped new file mode 100644 index 000000000000..bd0f59f06c9d --- /dev/null +++ b/arch/arm64/crypto/sha512-core.S_shipped | |||
| @@ -0,0 +1,1085 @@ | |||
| 1 | // Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. | ||
| 2 | // | ||
| 3 | // Licensed under the OpenSSL license (the "License"). You may not use | ||
| 4 | // this file except in compliance with the License. You can obtain a copy | ||
| 5 | // in the file LICENSE in the source distribution or at | ||
| 6 | // https://www.openssl.org/source/license.html | ||
| 7 | |||
| 8 | // ==================================================================== | ||
| 9 | // Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
| 10 | // project. The module is, however, dual licensed under OpenSSL and | ||
| 11 | // CRYPTOGAMS licenses depending on where you obtain it. For further | ||
| 12 | // details see http://www.openssl.org/~appro/cryptogams/. | ||
| 13 | // | ||
| 14 | // Permission to use under GPLv2 terms is granted. | ||
| 15 | // ==================================================================== | ||
| 16 | // | ||
| 17 | // SHA256/512 for ARMv8. | ||
| 18 | // | ||
| 19 | // Performance in cycles per processed byte and improvement coefficient | ||
| 20 | // over code generated with "default" compiler: | ||
| 21 | // | ||
| 22 | // SHA256-hw SHA256(*) SHA512 | ||
| 23 | // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) | ||
| 24 | // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) | ||
| 25 | // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) | ||
| 26 | // Denver 2.01 10.5 (+26%) 6.70 (+8%) | ||
| 27 | // X-Gene 20.0 (+100%) 12.8 (+300%(***)) | ||
| 28 | // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) | ||
| 29 | // | ||
| 30 | // (*) Software SHA256 results are of lesser relevance, presented | ||
| 31 | // mostly for informational purposes. | ||
| 32 | // (**) The result is a trade-off: it's possible to improve it by | ||
| 33 | // 10% (or by 1 cycle per round), but at the cost of 20% loss | ||
| 34 | // on Cortex-A53 (or by 4 cycles per round). | ||
| 35 | // (***) Super-impressive coefficients over gcc-generated code are | ||
| 36 | // indication of some compiler "pathology", most notably code | ||
| 37 | // generated with -mgeneral-regs-only is significanty faster | ||
| 38 | // and the gap is only 40-90%. | ||
| 39 | // | ||
| 40 | // October 2016. | ||
| 41 | // | ||
| 42 | // Originally it was reckoned that it makes no sense to implement NEON | ||
| 43 | // version of SHA256 for 64-bit processors. This is because performance | ||
| 44 | // improvement on most wide-spread Cortex-A5x processors was observed | ||
| 45 | // to be marginal, same on Cortex-A53 and ~10% on A57. But then it was | ||
| 46 | // observed that 32-bit NEON SHA256 performs significantly better than | ||
| 47 | // 64-bit scalar version on *some* of the more recent processors. As | ||
| 48 | // result 64-bit NEON version of SHA256 was added to provide best | ||
| 49 | // all-round performance. For example it executes ~30% faster on X-Gene | ||
| 50 | // and Mongoose. [For reference, NEON version of SHA512 is bound to | ||
| 51 | // deliver much less improvement, likely *negative* on Cortex-A5x. | ||
| 52 | // Which is why NEON support is limited to SHA256.] | ||
| 53 | |||
| 54 | #ifndef __KERNEL__ | ||
| 55 | # include "arm_arch.h" | ||
| 56 | #endif | ||
| 57 | |||
| 58 | .text | ||
| 59 | |||
| 60 | .extern OPENSSL_armcap_P | ||
| 61 | .globl sha512_block_data_order | ||
| 62 | .type sha512_block_data_order,%function | ||
| 63 | .align 6 | ||
| 64 | sha512_block_data_order: | ||
| 65 | stp x29,x30,[sp,#-128]! | ||
| 66 | add x29,sp,#0 | ||
| 67 | |||
| 68 | stp x19,x20,[sp,#16] | ||
| 69 | stp x21,x22,[sp,#32] | ||
| 70 | stp x23,x24,[sp,#48] | ||
| 71 | stp x25,x26,[sp,#64] | ||
| 72 | stp x27,x28,[sp,#80] | ||
| 73 | sub sp,sp,#4*8 | ||
| 74 | |||
| 75 | ldp x20,x21,[x0] // load context | ||
| 76 | ldp x22,x23,[x0,#2*8] | ||
| 77 | ldp x24,x25,[x0,#4*8] | ||
| 78 | add x2,x1,x2,lsl#7 // end of input | ||
| 79 | ldp x26,x27,[x0,#6*8] | ||
| 80 | adr x30,.LK512 | ||
| 81 | stp x0,x2,[x29,#96] | ||
| 82 | |||
| 83 | .Loop: | ||
| 84 | ldp x3,x4,[x1],#2*8 | ||
| 85 | ldr x19,[x30],#8 // *K++ | ||
| 86 | eor x28,x21,x22 // magic seed | ||
| 87 | str x1,[x29,#112] | ||
| 88 | #ifndef __AARCH64EB__ | ||
| 89 | rev x3,x3 // 0 | ||
| 90 | #endif | ||
| 91 | ror x16,x24,#14 | ||
| 92 | add x27,x27,x19 // h+=K[i] | ||
| 93 | eor x6,x24,x24,ror#23 | ||
| 94 | and x17,x25,x24 | ||
| 95 | bic x19,x26,x24 | ||
| 96 | add x27,x27,x3 // h+=X[i] | ||
| 97 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 98 | eor x19,x20,x21 // a^b, b^c in next round | ||
| 99 | eor x16,x16,x6,ror#18 // Sigma1(e) | ||
| 100 | ror x6,x20,#28 | ||
| 101 | add x27,x27,x17 // h+=Ch(e,f,g) | ||
| 102 | eor x17,x20,x20,ror#5 | ||
| 103 | add x27,x27,x16 // h+=Sigma1(e) | ||
| 104 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 105 | add x23,x23,x27 // d+=h | ||
| 106 | eor x28,x28,x21 // Maj(a,b,c) | ||
| 107 | eor x17,x6,x17,ror#34 // Sigma0(a) | ||
| 108 | add x27,x27,x28 // h+=Maj(a,b,c) | ||
| 109 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 110 | //add x27,x27,x17 // h+=Sigma0(a) | ||
| 111 | #ifndef __AARCH64EB__ | ||
| 112 | rev x4,x4 // 1 | ||
| 113 | #endif | ||
| 114 | ldp x5,x6,[x1],#2*8 | ||
| 115 | add x27,x27,x17 // h+=Sigma0(a) | ||
| 116 | ror x16,x23,#14 | ||
| 117 | add x26,x26,x28 // h+=K[i] | ||
| 118 | eor x7,x23,x23,ror#23 | ||
| 119 | and x17,x24,x23 | ||
| 120 | bic x28,x25,x23 | ||
| 121 | add x26,x26,x4 // h+=X[i] | ||
| 122 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 123 | eor x28,x27,x20 // a^b, b^c in next round | ||
| 124 | eor x16,x16,x7,ror#18 // Sigma1(e) | ||
| 125 | ror x7,x27,#28 | ||
| 126 | add x26,x26,x17 // h+=Ch(e,f,g) | ||
| 127 | eor x17,x27,x27,ror#5 | ||
| 128 | add x26,x26,x16 // h+=Sigma1(e) | ||
| 129 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 130 | add x22,x22,x26 // d+=h | ||
| 131 | eor x19,x19,x20 // Maj(a,b,c) | ||
| 132 | eor x17,x7,x17,ror#34 // Sigma0(a) | ||
| 133 | add x26,x26,x19 // h+=Maj(a,b,c) | ||
| 134 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 135 | //add x26,x26,x17 // h+=Sigma0(a) | ||
| 136 | #ifndef __AARCH64EB__ | ||
| 137 | rev x5,x5 // 2 | ||
| 138 | #endif | ||
| 139 | add x26,x26,x17 // h+=Sigma0(a) | ||
| 140 | ror x16,x22,#14 | ||
| 141 | add x25,x25,x19 // h+=K[i] | ||
| 142 | eor x8,x22,x22,ror#23 | ||
| 143 | and x17,x23,x22 | ||
| 144 | bic x19,x24,x22 | ||
| 145 | add x25,x25,x5 // h+=X[i] | ||
| 146 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 147 | eor x19,x26,x27 // a^b, b^c in next round | ||
| 148 | eor x16,x16,x8,ror#18 // Sigma1(e) | ||
| 149 | ror x8,x26,#28 | ||
| 150 | add x25,x25,x17 // h+=Ch(e,f,g) | ||
| 151 | eor x17,x26,x26,ror#5 | ||
| 152 | add x25,x25,x16 // h+=Sigma1(e) | ||
| 153 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 154 | add x21,x21,x25 // d+=h | ||
| 155 | eor x28,x28,x27 // Maj(a,b,c) | ||
| 156 | eor x17,x8,x17,ror#34 // Sigma0(a) | ||
| 157 | add x25,x25,x28 // h+=Maj(a,b,c) | ||
| 158 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 159 | //add x25,x25,x17 // h+=Sigma0(a) | ||
| 160 | #ifndef __AARCH64EB__ | ||
| 161 | rev x6,x6 // 3 | ||
| 162 | #endif | ||
| 163 | ldp x7,x8,[x1],#2*8 | ||
| 164 | add x25,x25,x17 // h+=Sigma0(a) | ||
| 165 | ror x16,x21,#14 | ||
| 166 | add x24,x24,x28 // h+=K[i] | ||
| 167 | eor x9,x21,x21,ror#23 | ||
| 168 | and x17,x22,x21 | ||
| 169 | bic x28,x23,x21 | ||
| 170 | add x24,x24,x6 // h+=X[i] | ||
| 171 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 172 | eor x28,x25,x26 // a^b, b^c in next round | ||
| 173 | eor x16,x16,x9,ror#18 // Sigma1(e) | ||
| 174 | ror x9,x25,#28 | ||
| 175 | add x24,x24,x17 // h+=Ch(e,f,g) | ||
| 176 | eor x17,x25,x25,ror#5 | ||
| 177 | add x24,x24,x16 // h+=Sigma1(e) | ||
| 178 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 179 | add x20,x20,x24 // d+=h | ||
| 180 | eor x19,x19,x26 // Maj(a,b,c) | ||
| 181 | eor x17,x9,x17,ror#34 // Sigma0(a) | ||
| 182 | add x24,x24,x19 // h+=Maj(a,b,c) | ||
| 183 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 184 | //add x24,x24,x17 // h+=Sigma0(a) | ||
| 185 | #ifndef __AARCH64EB__ | ||
| 186 | rev x7,x7 // 4 | ||
| 187 | #endif | ||
| 188 | add x24,x24,x17 // h+=Sigma0(a) | ||
| 189 | ror x16,x20,#14 | ||
| 190 | add x23,x23,x19 // h+=K[i] | ||
| 191 | eor x10,x20,x20,ror#23 | ||
| 192 | and x17,x21,x20 | ||
| 193 | bic x19,x22,x20 | ||
| 194 | add x23,x23,x7 // h+=X[i] | ||
| 195 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 196 | eor x19,x24,x25 // a^b, b^c in next round | ||
| 197 | eor x16,x16,x10,ror#18 // Sigma1(e) | ||
| 198 | ror x10,x24,#28 | ||
| 199 | add x23,x23,x17 // h+=Ch(e,f,g) | ||
| 200 | eor x17,x24,x24,ror#5 | ||
| 201 | add x23,x23,x16 // h+=Sigma1(e) | ||
| 202 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 203 | add x27,x27,x23 // d+=h | ||
| 204 | eor x28,x28,x25 // Maj(a,b,c) | ||
| 205 | eor x17,x10,x17,ror#34 // Sigma0(a) | ||
| 206 | add x23,x23,x28 // h+=Maj(a,b,c) | ||
| 207 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 208 | //add x23,x23,x17 // h+=Sigma0(a) | ||
| 209 | #ifndef __AARCH64EB__ | ||
| 210 | rev x8,x8 // 5 | ||
| 211 | #endif | ||
| 212 | ldp x9,x10,[x1],#2*8 | ||
| 213 | add x23,x23,x17 // h+=Sigma0(a) | ||
| 214 | ror x16,x27,#14 | ||
| 215 | add x22,x22,x28 // h+=K[i] | ||
| 216 | eor x11,x27,x27,ror#23 | ||
| 217 | and x17,x20,x27 | ||
| 218 | bic x28,x21,x27 | ||
| 219 | add x22,x22,x8 // h+=X[i] | ||
| 220 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 221 | eor x28,x23,x24 // a^b, b^c in next round | ||
| 222 | eor x16,x16,x11,ror#18 // Sigma1(e) | ||
| 223 | ror x11,x23,#28 | ||
| 224 | add x22,x22,x17 // h+=Ch(e,f,g) | ||
| 225 | eor x17,x23,x23,ror#5 | ||
| 226 | add x22,x22,x16 // h+=Sigma1(e) | ||
| 227 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 228 | add x26,x26,x22 // d+=h | ||
| 229 | eor x19,x19,x24 // Maj(a,b,c) | ||
| 230 | eor x17,x11,x17,ror#34 // Sigma0(a) | ||
| 231 | add x22,x22,x19 // h+=Maj(a,b,c) | ||
| 232 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 233 | //add x22,x22,x17 // h+=Sigma0(a) | ||
| 234 | #ifndef __AARCH64EB__ | ||
| 235 | rev x9,x9 // 6 | ||
| 236 | #endif | ||
| 237 | add x22,x22,x17 // h+=Sigma0(a) | ||
| 238 | ror x16,x26,#14 | ||
| 239 | add x21,x21,x19 // h+=K[i] | ||
| 240 | eor x12,x26,x26,ror#23 | ||
| 241 | and x17,x27,x26 | ||
| 242 | bic x19,x20,x26 | ||
| 243 | add x21,x21,x9 // h+=X[i] | ||
| 244 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 245 | eor x19,x22,x23 // a^b, b^c in next round | ||
| 246 | eor x16,x16,x12,ror#18 // Sigma1(e) | ||
| 247 | ror x12,x22,#28 | ||
| 248 | add x21,x21,x17 // h+=Ch(e,f,g) | ||
| 249 | eor x17,x22,x22,ror#5 | ||
| 250 | add x21,x21,x16 // h+=Sigma1(e) | ||
| 251 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 252 | add x25,x25,x21 // d+=h | ||
| 253 | eor x28,x28,x23 // Maj(a,b,c) | ||
| 254 | eor x17,x12,x17,ror#34 // Sigma0(a) | ||
| 255 | add x21,x21,x28 // h+=Maj(a,b,c) | ||
| 256 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 257 | //add x21,x21,x17 // h+=Sigma0(a) | ||
| 258 | #ifndef __AARCH64EB__ | ||
| 259 | rev x10,x10 // 7 | ||
| 260 | #endif | ||
| 261 | ldp x11,x12,[x1],#2*8 | ||
| 262 | add x21,x21,x17 // h+=Sigma0(a) | ||
| 263 | ror x16,x25,#14 | ||
| 264 | add x20,x20,x28 // h+=K[i] | ||
| 265 | eor x13,x25,x25,ror#23 | ||
| 266 | and x17,x26,x25 | ||
| 267 | bic x28,x27,x25 | ||
| 268 | add x20,x20,x10 // h+=X[i] | ||
| 269 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 270 | eor x28,x21,x22 // a^b, b^c in next round | ||
| 271 | eor x16,x16,x13,ror#18 // Sigma1(e) | ||
| 272 | ror x13,x21,#28 | ||
| 273 | add x20,x20,x17 // h+=Ch(e,f,g) | ||
| 274 | eor x17,x21,x21,ror#5 | ||
| 275 | add x20,x20,x16 // h+=Sigma1(e) | ||
| 276 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 277 | add x24,x24,x20 // d+=h | ||
| 278 | eor x19,x19,x22 // Maj(a,b,c) | ||
| 279 | eor x17,x13,x17,ror#34 // Sigma0(a) | ||
| 280 | add x20,x20,x19 // h+=Maj(a,b,c) | ||
| 281 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 282 | //add x20,x20,x17 // h+=Sigma0(a) | ||
| 283 | #ifndef __AARCH64EB__ | ||
| 284 | rev x11,x11 // 8 | ||
| 285 | #endif | ||
| 286 | add x20,x20,x17 // h+=Sigma0(a) | ||
| 287 | ror x16,x24,#14 | ||
| 288 | add x27,x27,x19 // h+=K[i] | ||
| 289 | eor x14,x24,x24,ror#23 | ||
| 290 | and x17,x25,x24 | ||
| 291 | bic x19,x26,x24 | ||
| 292 | add x27,x27,x11 // h+=X[i] | ||
| 293 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 294 | eor x19,x20,x21 // a^b, b^c in next round | ||
| 295 | eor x16,x16,x14,ror#18 // Sigma1(e) | ||
| 296 | ror x14,x20,#28 | ||
| 297 | add x27,x27,x17 // h+=Ch(e,f,g) | ||
| 298 | eor x17,x20,x20,ror#5 | ||
| 299 | add x27,x27,x16 // h+=Sigma1(e) | ||
| 300 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 301 | add x23,x23,x27 // d+=h | ||
| 302 | eor x28,x28,x21 // Maj(a,b,c) | ||
| 303 | eor x17,x14,x17,ror#34 // Sigma0(a) | ||
| 304 | add x27,x27,x28 // h+=Maj(a,b,c) | ||
| 305 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 306 | //add x27,x27,x17 // h+=Sigma0(a) | ||
| 307 | #ifndef __AARCH64EB__ | ||
| 308 | rev x12,x12 // 9 | ||
| 309 | #endif | ||
| 310 | ldp x13,x14,[x1],#2*8 | ||
| 311 | add x27,x27,x17 // h+=Sigma0(a) | ||
| 312 | ror x16,x23,#14 | ||
| 313 | add x26,x26,x28 // h+=K[i] | ||
| 314 | eor x15,x23,x23,ror#23 | ||
| 315 | and x17,x24,x23 | ||
| 316 | bic x28,x25,x23 | ||
| 317 | add x26,x26,x12 // h+=X[i] | ||
| 318 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 319 | eor x28,x27,x20 // a^b, b^c in next round | ||
| 320 | eor x16,x16,x15,ror#18 // Sigma1(e) | ||
| 321 | ror x15,x27,#28 | ||
| 322 | add x26,x26,x17 // h+=Ch(e,f,g) | ||
| 323 | eor x17,x27,x27,ror#5 | ||
| 324 | add x26,x26,x16 // h+=Sigma1(e) | ||
| 325 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 326 | add x22,x22,x26 // d+=h | ||
| 327 | eor x19,x19,x20 // Maj(a,b,c) | ||
| 328 | eor x17,x15,x17,ror#34 // Sigma0(a) | ||
| 329 | add x26,x26,x19 // h+=Maj(a,b,c) | ||
| 330 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 331 | //add x26,x26,x17 // h+=Sigma0(a) | ||
| 332 | #ifndef __AARCH64EB__ | ||
| 333 | rev x13,x13 // 10 | ||
| 334 | #endif | ||
| 335 | add x26,x26,x17 // h+=Sigma0(a) | ||
| 336 | ror x16,x22,#14 | ||
| 337 | add x25,x25,x19 // h+=K[i] | ||
| 338 | eor x0,x22,x22,ror#23 | ||
| 339 | and x17,x23,x22 | ||
| 340 | bic x19,x24,x22 | ||
| 341 | add x25,x25,x13 // h+=X[i] | ||
| 342 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 343 | eor x19,x26,x27 // a^b, b^c in next round | ||
| 344 | eor x16,x16,x0,ror#18 // Sigma1(e) | ||
| 345 | ror x0,x26,#28 | ||
| 346 | add x25,x25,x17 // h+=Ch(e,f,g) | ||
| 347 | eor x17,x26,x26,ror#5 | ||
| 348 | add x25,x25,x16 // h+=Sigma1(e) | ||
| 349 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 350 | add x21,x21,x25 // d+=h | ||
| 351 | eor x28,x28,x27 // Maj(a,b,c) | ||
| 352 | eor x17,x0,x17,ror#34 // Sigma0(a) | ||
| 353 | add x25,x25,x28 // h+=Maj(a,b,c) | ||
| 354 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 355 | //add x25,x25,x17 // h+=Sigma0(a) | ||
| 356 | #ifndef __AARCH64EB__ | ||
| 357 | rev x14,x14 // 11 | ||
| 358 | #endif | ||
| 359 | ldp x15,x0,[x1],#2*8 | ||
| 360 | add x25,x25,x17 // h+=Sigma0(a) | ||
| 361 | str x6,[sp,#24] | ||
| 362 | ror x16,x21,#14 | ||
| 363 | add x24,x24,x28 // h+=K[i] | ||
| 364 | eor x6,x21,x21,ror#23 | ||
| 365 | and x17,x22,x21 | ||
| 366 | bic x28,x23,x21 | ||
| 367 | add x24,x24,x14 // h+=X[i] | ||
| 368 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 369 | eor x28,x25,x26 // a^b, b^c in next round | ||
| 370 | eor x16,x16,x6,ror#18 // Sigma1(e) | ||
| 371 | ror x6,x25,#28 | ||
| 372 | add x24,x24,x17 // h+=Ch(e,f,g) | ||
| 373 | eor x17,x25,x25,ror#5 | ||
| 374 | add x24,x24,x16 // h+=Sigma1(e) | ||
| 375 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 376 | add x20,x20,x24 // d+=h | ||
| 377 | eor x19,x19,x26 // Maj(a,b,c) | ||
| 378 | eor x17,x6,x17,ror#34 // Sigma0(a) | ||
| 379 | add x24,x24,x19 // h+=Maj(a,b,c) | ||
| 380 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 381 | //add x24,x24,x17 // h+=Sigma0(a) | ||
| 382 | #ifndef __AARCH64EB__ | ||
| 383 | rev x15,x15 // 12 | ||
| 384 | #endif | ||
| 385 | add x24,x24,x17 // h+=Sigma0(a) | ||
| 386 | str x7,[sp,#0] | ||
| 387 | ror x16,x20,#14 | ||
| 388 | add x23,x23,x19 // h+=K[i] | ||
| 389 | eor x7,x20,x20,ror#23 | ||
| 390 | and x17,x21,x20 | ||
| 391 | bic x19,x22,x20 | ||
| 392 | add x23,x23,x15 // h+=X[i] | ||
| 393 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 394 | eor x19,x24,x25 // a^b, b^c in next round | ||
| 395 | eor x16,x16,x7,ror#18 // Sigma1(e) | ||
| 396 | ror x7,x24,#28 | ||
| 397 | add x23,x23,x17 // h+=Ch(e,f,g) | ||
| 398 | eor x17,x24,x24,ror#5 | ||
| 399 | add x23,x23,x16 // h+=Sigma1(e) | ||
| 400 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 401 | add x27,x27,x23 // d+=h | ||
| 402 | eor x28,x28,x25 // Maj(a,b,c) | ||
| 403 | eor x17,x7,x17,ror#34 // Sigma0(a) | ||
| 404 | add x23,x23,x28 // h+=Maj(a,b,c) | ||
| 405 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 406 | //add x23,x23,x17 // h+=Sigma0(a) | ||
| 407 | #ifndef __AARCH64EB__ | ||
| 408 | rev x0,x0 // 13 | ||
| 409 | #endif | ||
| 410 | ldp x1,x2,[x1] | ||
| 411 | add x23,x23,x17 // h+=Sigma0(a) | ||
| 412 | str x8,[sp,#8] | ||
| 413 | ror x16,x27,#14 | ||
| 414 | add x22,x22,x28 // h+=K[i] | ||
| 415 | eor x8,x27,x27,ror#23 | ||
| 416 | and x17,x20,x27 | ||
| 417 | bic x28,x21,x27 | ||
| 418 | add x22,x22,x0 // h+=X[i] | ||
| 419 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 420 | eor x28,x23,x24 // a^b, b^c in next round | ||
| 421 | eor x16,x16,x8,ror#18 // Sigma1(e) | ||
| 422 | ror x8,x23,#28 | ||
| 423 | add x22,x22,x17 // h+=Ch(e,f,g) | ||
| 424 | eor x17,x23,x23,ror#5 | ||
| 425 | add x22,x22,x16 // h+=Sigma1(e) | ||
| 426 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 427 | add x26,x26,x22 // d+=h | ||
| 428 | eor x19,x19,x24 // Maj(a,b,c) | ||
| 429 | eor x17,x8,x17,ror#34 // Sigma0(a) | ||
| 430 | add x22,x22,x19 // h+=Maj(a,b,c) | ||
| 431 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 432 | //add x22,x22,x17 // h+=Sigma0(a) | ||
| 433 | #ifndef __AARCH64EB__ | ||
| 434 | rev x1,x1 // 14 | ||
| 435 | #endif | ||
| 436 | ldr x6,[sp,#24] | ||
| 437 | add x22,x22,x17 // h+=Sigma0(a) | ||
| 438 | str x9,[sp,#16] | ||
| 439 | ror x16,x26,#14 | ||
| 440 | add x21,x21,x19 // h+=K[i] | ||
| 441 | eor x9,x26,x26,ror#23 | ||
| 442 | and x17,x27,x26 | ||
| 443 | bic x19,x20,x26 | ||
| 444 | add x21,x21,x1 // h+=X[i] | ||
| 445 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 446 | eor x19,x22,x23 // a^b, b^c in next round | ||
| 447 | eor x16,x16,x9,ror#18 // Sigma1(e) | ||
| 448 | ror x9,x22,#28 | ||
| 449 | add x21,x21,x17 // h+=Ch(e,f,g) | ||
| 450 | eor x17,x22,x22,ror#5 | ||
| 451 | add x21,x21,x16 // h+=Sigma1(e) | ||
| 452 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 453 | add x25,x25,x21 // d+=h | ||
| 454 | eor x28,x28,x23 // Maj(a,b,c) | ||
| 455 | eor x17,x9,x17,ror#34 // Sigma0(a) | ||
| 456 | add x21,x21,x28 // h+=Maj(a,b,c) | ||
| 457 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 458 | //add x21,x21,x17 // h+=Sigma0(a) | ||
| 459 | #ifndef __AARCH64EB__ | ||
| 460 | rev x2,x2 // 15 | ||
| 461 | #endif | ||
| 462 | ldr x7,[sp,#0] | ||
| 463 | add x21,x21,x17 // h+=Sigma0(a) | ||
| 464 | str x10,[sp,#24] | ||
| 465 | ror x16,x25,#14 | ||
| 466 | add x20,x20,x28 // h+=K[i] | ||
| 467 | ror x9,x4,#1 | ||
| 468 | and x17,x26,x25 | ||
| 469 | ror x8,x1,#19 | ||
| 470 | bic x28,x27,x25 | ||
| 471 | ror x10,x21,#28 | ||
| 472 | add x20,x20,x2 // h+=X[i] | ||
| 473 | eor x16,x16,x25,ror#18 | ||
| 474 | eor x9,x9,x4,ror#8 | ||
| 475 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 476 | eor x28,x21,x22 // a^b, b^c in next round | ||
| 477 | eor x16,x16,x25,ror#41 // Sigma1(e) | ||
| 478 | eor x10,x10,x21,ror#34 | ||
| 479 | add x20,x20,x17 // h+=Ch(e,f,g) | ||
| 480 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 481 | eor x8,x8,x1,ror#61 | ||
| 482 | eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) | ||
| 483 | add x20,x20,x16 // h+=Sigma1(e) | ||
| 484 | eor x19,x19,x22 // Maj(a,b,c) | ||
| 485 | eor x17,x10,x21,ror#39 // Sigma0(a) | ||
| 486 | eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) | ||
| 487 | add x3,x3,x12 | ||
| 488 | add x24,x24,x20 // d+=h | ||
| 489 | add x20,x20,x19 // h+=Maj(a,b,c) | ||
| 490 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 491 | add x3,x3,x9 | ||
| 492 | add x20,x20,x17 // h+=Sigma0(a) | ||
| 493 | add x3,x3,x8 | ||
| 494 | .Loop_16_xx: | ||
| 495 | ldr x8,[sp,#8] | ||
| 496 | str x11,[sp,#0] | ||
| 497 | ror x16,x24,#14 | ||
| 498 | add x27,x27,x19 // h+=K[i] | ||
| 499 | ror x10,x5,#1 | ||
| 500 | and x17,x25,x24 | ||
| 501 | ror x9,x2,#19 | ||
| 502 | bic x19,x26,x24 | ||
| 503 | ror x11,x20,#28 | ||
| 504 | add x27,x27,x3 // h+=X[i] | ||
| 505 | eor x16,x16,x24,ror#18 | ||
| 506 | eor x10,x10,x5,ror#8 | ||
| 507 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 508 | eor x19,x20,x21 // a^b, b^c in next round | ||
| 509 | eor x16,x16,x24,ror#41 // Sigma1(e) | ||
| 510 | eor x11,x11,x20,ror#34 | ||
| 511 | add x27,x27,x17 // h+=Ch(e,f,g) | ||
| 512 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 513 | eor x9,x9,x2,ror#61 | ||
| 514 | eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) | ||
| 515 | add x27,x27,x16 // h+=Sigma1(e) | ||
| 516 | eor x28,x28,x21 // Maj(a,b,c) | ||
| 517 | eor x17,x11,x20,ror#39 // Sigma0(a) | ||
| 518 | eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) | ||
| 519 | add x4,x4,x13 | ||
| 520 | add x23,x23,x27 // d+=h | ||
| 521 | add x27,x27,x28 // h+=Maj(a,b,c) | ||
| 522 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 523 | add x4,x4,x10 | ||
| 524 | add x27,x27,x17 // h+=Sigma0(a) | ||
| 525 | add x4,x4,x9 | ||
| 526 | ldr x9,[sp,#16] | ||
| 527 | str x12,[sp,#8] | ||
| 528 | ror x16,x23,#14 | ||
| 529 | add x26,x26,x28 // h+=K[i] | ||
| 530 | ror x11,x6,#1 | ||
| 531 | and x17,x24,x23 | ||
| 532 | ror x10,x3,#19 | ||
| 533 | bic x28,x25,x23 | ||
| 534 | ror x12,x27,#28 | ||
| 535 | add x26,x26,x4 // h+=X[i] | ||
| 536 | eor x16,x16,x23,ror#18 | ||
| 537 | eor x11,x11,x6,ror#8 | ||
| 538 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 539 | eor x28,x27,x20 // a^b, b^c in next round | ||
| 540 | eor x16,x16,x23,ror#41 // Sigma1(e) | ||
| 541 | eor x12,x12,x27,ror#34 | ||
| 542 | add x26,x26,x17 // h+=Ch(e,f,g) | ||
| 543 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 544 | eor x10,x10,x3,ror#61 | ||
| 545 | eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) | ||
| 546 | add x26,x26,x16 // h+=Sigma1(e) | ||
| 547 | eor x19,x19,x20 // Maj(a,b,c) | ||
| 548 | eor x17,x12,x27,ror#39 // Sigma0(a) | ||
| 549 | eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) | ||
| 550 | add x5,x5,x14 | ||
| 551 | add x22,x22,x26 // d+=h | ||
| 552 | add x26,x26,x19 // h+=Maj(a,b,c) | ||
| 553 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 554 | add x5,x5,x11 | ||
| 555 | add x26,x26,x17 // h+=Sigma0(a) | ||
| 556 | add x5,x5,x10 | ||
| 557 | ldr x10,[sp,#24] | ||
| 558 | str x13,[sp,#16] | ||
| 559 | ror x16,x22,#14 | ||
| 560 | add x25,x25,x19 // h+=K[i] | ||
| 561 | ror x12,x7,#1 | ||
| 562 | and x17,x23,x22 | ||
| 563 | ror x11,x4,#19 | ||
| 564 | bic x19,x24,x22 | ||
| 565 | ror x13,x26,#28 | ||
| 566 | add x25,x25,x5 // h+=X[i] | ||
| 567 | eor x16,x16,x22,ror#18 | ||
| 568 | eor x12,x12,x7,ror#8 | ||
| 569 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 570 | eor x19,x26,x27 // a^b, b^c in next round | ||
| 571 | eor x16,x16,x22,ror#41 // Sigma1(e) | ||
| 572 | eor x13,x13,x26,ror#34 | ||
| 573 | add x25,x25,x17 // h+=Ch(e,f,g) | ||
| 574 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 575 | eor x11,x11,x4,ror#61 | ||
| 576 | eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) | ||
| 577 | add x25,x25,x16 // h+=Sigma1(e) | ||
| 578 | eor x28,x28,x27 // Maj(a,b,c) | ||
| 579 | eor x17,x13,x26,ror#39 // Sigma0(a) | ||
| 580 | eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) | ||
| 581 | add x6,x6,x15 | ||
| 582 | add x21,x21,x25 // d+=h | ||
| 583 | add x25,x25,x28 // h+=Maj(a,b,c) | ||
| 584 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 585 | add x6,x6,x12 | ||
| 586 | add x25,x25,x17 // h+=Sigma0(a) | ||
| 587 | add x6,x6,x11 | ||
| 588 | ldr x11,[sp,#0] | ||
| 589 | str x14,[sp,#24] | ||
| 590 | ror x16,x21,#14 | ||
| 591 | add x24,x24,x28 // h+=K[i] | ||
| 592 | ror x13,x8,#1 | ||
| 593 | and x17,x22,x21 | ||
| 594 | ror x12,x5,#19 | ||
| 595 | bic x28,x23,x21 | ||
| 596 | ror x14,x25,#28 | ||
| 597 | add x24,x24,x6 // h+=X[i] | ||
| 598 | eor x16,x16,x21,ror#18 | ||
| 599 | eor x13,x13,x8,ror#8 | ||
| 600 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 601 | eor x28,x25,x26 // a^b, b^c in next round | ||
| 602 | eor x16,x16,x21,ror#41 // Sigma1(e) | ||
| 603 | eor x14,x14,x25,ror#34 | ||
| 604 | add x24,x24,x17 // h+=Ch(e,f,g) | ||
| 605 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 606 | eor x12,x12,x5,ror#61 | ||
| 607 | eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) | ||
| 608 | add x24,x24,x16 // h+=Sigma1(e) | ||
| 609 | eor x19,x19,x26 // Maj(a,b,c) | ||
| 610 | eor x17,x14,x25,ror#39 // Sigma0(a) | ||
| 611 | eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) | ||
| 612 | add x7,x7,x0 | ||
| 613 | add x20,x20,x24 // d+=h | ||
| 614 | add x24,x24,x19 // h+=Maj(a,b,c) | ||
| 615 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 616 | add x7,x7,x13 | ||
| 617 | add x24,x24,x17 // h+=Sigma0(a) | ||
| 618 | add x7,x7,x12 | ||
| 619 | ldr x12,[sp,#8] | ||
| 620 | str x15,[sp,#0] | ||
| 621 | ror x16,x20,#14 | ||
| 622 | add x23,x23,x19 // h+=K[i] | ||
| 623 | ror x14,x9,#1 | ||
| 624 | and x17,x21,x20 | ||
| 625 | ror x13,x6,#19 | ||
| 626 | bic x19,x22,x20 | ||
| 627 | ror x15,x24,#28 | ||
| 628 | add x23,x23,x7 // h+=X[i] | ||
| 629 | eor x16,x16,x20,ror#18 | ||
| 630 | eor x14,x14,x9,ror#8 | ||
| 631 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 632 | eor x19,x24,x25 // a^b, b^c in next round | ||
| 633 | eor x16,x16,x20,ror#41 // Sigma1(e) | ||
| 634 | eor x15,x15,x24,ror#34 | ||
| 635 | add x23,x23,x17 // h+=Ch(e,f,g) | ||
| 636 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 637 | eor x13,x13,x6,ror#61 | ||
| 638 | eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) | ||
| 639 | add x23,x23,x16 // h+=Sigma1(e) | ||
| 640 | eor x28,x28,x25 // Maj(a,b,c) | ||
| 641 | eor x17,x15,x24,ror#39 // Sigma0(a) | ||
| 642 | eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) | ||
| 643 | add x8,x8,x1 | ||
| 644 | add x27,x27,x23 // d+=h | ||
| 645 | add x23,x23,x28 // h+=Maj(a,b,c) | ||
| 646 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 647 | add x8,x8,x14 | ||
| 648 | add x23,x23,x17 // h+=Sigma0(a) | ||
| 649 | add x8,x8,x13 | ||
| 650 | ldr x13,[sp,#16] | ||
| 651 | str x0,[sp,#8] | ||
| 652 | ror x16,x27,#14 | ||
| 653 | add x22,x22,x28 // h+=K[i] | ||
| 654 | ror x15,x10,#1 | ||
| 655 | and x17,x20,x27 | ||
| 656 | ror x14,x7,#19 | ||
| 657 | bic x28,x21,x27 | ||
| 658 | ror x0,x23,#28 | ||
| 659 | add x22,x22,x8 // h+=X[i] | ||
| 660 | eor x16,x16,x27,ror#18 | ||
| 661 | eor x15,x15,x10,ror#8 | ||
| 662 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 663 | eor x28,x23,x24 // a^b, b^c in next round | ||
| 664 | eor x16,x16,x27,ror#41 // Sigma1(e) | ||
| 665 | eor x0,x0,x23,ror#34 | ||
| 666 | add x22,x22,x17 // h+=Ch(e,f,g) | ||
| 667 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 668 | eor x14,x14,x7,ror#61 | ||
| 669 | eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) | ||
| 670 | add x22,x22,x16 // h+=Sigma1(e) | ||
| 671 | eor x19,x19,x24 // Maj(a,b,c) | ||
| 672 | eor x17,x0,x23,ror#39 // Sigma0(a) | ||
| 673 | eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) | ||
| 674 | add x9,x9,x2 | ||
| 675 | add x26,x26,x22 // d+=h | ||
| 676 | add x22,x22,x19 // h+=Maj(a,b,c) | ||
| 677 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 678 | add x9,x9,x15 | ||
| 679 | add x22,x22,x17 // h+=Sigma0(a) | ||
| 680 | add x9,x9,x14 | ||
| 681 | ldr x14,[sp,#24] | ||
| 682 | str x1,[sp,#16] | ||
| 683 | ror x16,x26,#14 | ||
| 684 | add x21,x21,x19 // h+=K[i] | ||
| 685 | ror x0,x11,#1 | ||
| 686 | and x17,x27,x26 | ||
| 687 | ror x15,x8,#19 | ||
| 688 | bic x19,x20,x26 | ||
| 689 | ror x1,x22,#28 | ||
| 690 | add x21,x21,x9 // h+=X[i] | ||
| 691 | eor x16,x16,x26,ror#18 | ||
| 692 | eor x0,x0,x11,ror#8 | ||
| 693 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 694 | eor x19,x22,x23 // a^b, b^c in next round | ||
| 695 | eor x16,x16,x26,ror#41 // Sigma1(e) | ||
| 696 | eor x1,x1,x22,ror#34 | ||
| 697 | add x21,x21,x17 // h+=Ch(e,f,g) | ||
| 698 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 699 | eor x15,x15,x8,ror#61 | ||
| 700 | eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) | ||
| 701 | add x21,x21,x16 // h+=Sigma1(e) | ||
| 702 | eor x28,x28,x23 // Maj(a,b,c) | ||
| 703 | eor x17,x1,x22,ror#39 // Sigma0(a) | ||
| 704 | eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) | ||
| 705 | add x10,x10,x3 | ||
| 706 | add x25,x25,x21 // d+=h | ||
| 707 | add x21,x21,x28 // h+=Maj(a,b,c) | ||
| 708 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 709 | add x10,x10,x0 | ||
| 710 | add x21,x21,x17 // h+=Sigma0(a) | ||
| 711 | add x10,x10,x15 | ||
| 712 | ldr x15,[sp,#0] | ||
| 713 | str x2,[sp,#24] | ||
| 714 | ror x16,x25,#14 | ||
| 715 | add x20,x20,x28 // h+=K[i] | ||
| 716 | ror x1,x12,#1 | ||
| 717 | and x17,x26,x25 | ||
| 718 | ror x0,x9,#19 | ||
| 719 | bic x28,x27,x25 | ||
| 720 | ror x2,x21,#28 | ||
| 721 | add x20,x20,x10 // h+=X[i] | ||
| 722 | eor x16,x16,x25,ror#18 | ||
| 723 | eor x1,x1,x12,ror#8 | ||
| 724 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 725 | eor x28,x21,x22 // a^b, b^c in next round | ||
| 726 | eor x16,x16,x25,ror#41 // Sigma1(e) | ||
| 727 | eor x2,x2,x21,ror#34 | ||
| 728 | add x20,x20,x17 // h+=Ch(e,f,g) | ||
| 729 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 730 | eor x0,x0,x9,ror#61 | ||
| 731 | eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) | ||
| 732 | add x20,x20,x16 // h+=Sigma1(e) | ||
| 733 | eor x19,x19,x22 // Maj(a,b,c) | ||
| 734 | eor x17,x2,x21,ror#39 // Sigma0(a) | ||
| 735 | eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) | ||
| 736 | add x11,x11,x4 | ||
| 737 | add x24,x24,x20 // d+=h | ||
| 738 | add x20,x20,x19 // h+=Maj(a,b,c) | ||
| 739 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 740 | add x11,x11,x1 | ||
| 741 | add x20,x20,x17 // h+=Sigma0(a) | ||
| 742 | add x11,x11,x0 | ||
| 743 | ldr x0,[sp,#8] | ||
| 744 | str x3,[sp,#0] | ||
| 745 | ror x16,x24,#14 | ||
| 746 | add x27,x27,x19 // h+=K[i] | ||
| 747 | ror x2,x13,#1 | ||
| 748 | and x17,x25,x24 | ||
| 749 | ror x1,x10,#19 | ||
| 750 | bic x19,x26,x24 | ||
| 751 | ror x3,x20,#28 | ||
| 752 | add x27,x27,x11 // h+=X[i] | ||
| 753 | eor x16,x16,x24,ror#18 | ||
| 754 | eor x2,x2,x13,ror#8 | ||
| 755 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 756 | eor x19,x20,x21 // a^b, b^c in next round | ||
| 757 | eor x16,x16,x24,ror#41 // Sigma1(e) | ||
| 758 | eor x3,x3,x20,ror#34 | ||
| 759 | add x27,x27,x17 // h+=Ch(e,f,g) | ||
| 760 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 761 | eor x1,x1,x10,ror#61 | ||
| 762 | eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) | ||
| 763 | add x27,x27,x16 // h+=Sigma1(e) | ||
| 764 | eor x28,x28,x21 // Maj(a,b,c) | ||
| 765 | eor x17,x3,x20,ror#39 // Sigma0(a) | ||
| 766 | eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) | ||
| 767 | add x12,x12,x5 | ||
| 768 | add x23,x23,x27 // d+=h | ||
| 769 | add x27,x27,x28 // h+=Maj(a,b,c) | ||
| 770 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 771 | add x12,x12,x2 | ||
| 772 | add x27,x27,x17 // h+=Sigma0(a) | ||
| 773 | add x12,x12,x1 | ||
| 774 | ldr x1,[sp,#16] | ||
| 775 | str x4,[sp,#8] | ||
| 776 | ror x16,x23,#14 | ||
| 777 | add x26,x26,x28 // h+=K[i] | ||
| 778 | ror x3,x14,#1 | ||
| 779 | and x17,x24,x23 | ||
| 780 | ror x2,x11,#19 | ||
| 781 | bic x28,x25,x23 | ||
| 782 | ror x4,x27,#28 | ||
| 783 | add x26,x26,x12 // h+=X[i] | ||
| 784 | eor x16,x16,x23,ror#18 | ||
| 785 | eor x3,x3,x14,ror#8 | ||
| 786 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 787 | eor x28,x27,x20 // a^b, b^c in next round | ||
| 788 | eor x16,x16,x23,ror#41 // Sigma1(e) | ||
| 789 | eor x4,x4,x27,ror#34 | ||
| 790 | add x26,x26,x17 // h+=Ch(e,f,g) | ||
| 791 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 792 | eor x2,x2,x11,ror#61 | ||
| 793 | eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) | ||
| 794 | add x26,x26,x16 // h+=Sigma1(e) | ||
| 795 | eor x19,x19,x20 // Maj(a,b,c) | ||
| 796 | eor x17,x4,x27,ror#39 // Sigma0(a) | ||
| 797 | eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) | ||
| 798 | add x13,x13,x6 | ||
| 799 | add x22,x22,x26 // d+=h | ||
| 800 | add x26,x26,x19 // h+=Maj(a,b,c) | ||
| 801 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 802 | add x13,x13,x3 | ||
| 803 | add x26,x26,x17 // h+=Sigma0(a) | ||
| 804 | add x13,x13,x2 | ||
| 805 | ldr x2,[sp,#24] | ||
| 806 | str x5,[sp,#16] | ||
| 807 | ror x16,x22,#14 | ||
| 808 | add x25,x25,x19 // h+=K[i] | ||
| 809 | ror x4,x15,#1 | ||
| 810 | and x17,x23,x22 | ||
| 811 | ror x3,x12,#19 | ||
| 812 | bic x19,x24,x22 | ||
| 813 | ror x5,x26,#28 | ||
| 814 | add x25,x25,x13 // h+=X[i] | ||
| 815 | eor x16,x16,x22,ror#18 | ||
| 816 | eor x4,x4,x15,ror#8 | ||
| 817 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 818 | eor x19,x26,x27 // a^b, b^c in next round | ||
| 819 | eor x16,x16,x22,ror#41 // Sigma1(e) | ||
| 820 | eor x5,x5,x26,ror#34 | ||
| 821 | add x25,x25,x17 // h+=Ch(e,f,g) | ||
| 822 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 823 | eor x3,x3,x12,ror#61 | ||
| 824 | eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) | ||
| 825 | add x25,x25,x16 // h+=Sigma1(e) | ||
| 826 | eor x28,x28,x27 // Maj(a,b,c) | ||
| 827 | eor x17,x5,x26,ror#39 // Sigma0(a) | ||
| 828 | eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) | ||
| 829 | add x14,x14,x7 | ||
| 830 | add x21,x21,x25 // d+=h | ||
| 831 | add x25,x25,x28 // h+=Maj(a,b,c) | ||
| 832 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 833 | add x14,x14,x4 | ||
| 834 | add x25,x25,x17 // h+=Sigma0(a) | ||
| 835 | add x14,x14,x3 | ||
| 836 | ldr x3,[sp,#0] | ||
| 837 | str x6,[sp,#24] | ||
| 838 | ror x16,x21,#14 | ||
| 839 | add x24,x24,x28 // h+=K[i] | ||
| 840 | ror x5,x0,#1 | ||
| 841 | and x17,x22,x21 | ||
| 842 | ror x4,x13,#19 | ||
| 843 | bic x28,x23,x21 | ||
| 844 | ror x6,x25,#28 | ||
| 845 | add x24,x24,x14 // h+=X[i] | ||
| 846 | eor x16,x16,x21,ror#18 | ||
| 847 | eor x5,x5,x0,ror#8 | ||
| 848 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 849 | eor x28,x25,x26 // a^b, b^c in next round | ||
| 850 | eor x16,x16,x21,ror#41 // Sigma1(e) | ||
| 851 | eor x6,x6,x25,ror#34 | ||
| 852 | add x24,x24,x17 // h+=Ch(e,f,g) | ||
| 853 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 854 | eor x4,x4,x13,ror#61 | ||
| 855 | eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) | ||
| 856 | add x24,x24,x16 // h+=Sigma1(e) | ||
| 857 | eor x19,x19,x26 // Maj(a,b,c) | ||
| 858 | eor x17,x6,x25,ror#39 // Sigma0(a) | ||
| 859 | eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) | ||
| 860 | add x15,x15,x8 | ||
| 861 | add x20,x20,x24 // d+=h | ||
| 862 | add x24,x24,x19 // h+=Maj(a,b,c) | ||
| 863 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 864 | add x15,x15,x5 | ||
| 865 | add x24,x24,x17 // h+=Sigma0(a) | ||
| 866 | add x15,x15,x4 | ||
| 867 | ldr x4,[sp,#8] | ||
| 868 | str x7,[sp,#0] | ||
| 869 | ror x16,x20,#14 | ||
| 870 | add x23,x23,x19 // h+=K[i] | ||
| 871 | ror x6,x1,#1 | ||
| 872 | and x17,x21,x20 | ||
| 873 | ror x5,x14,#19 | ||
| 874 | bic x19,x22,x20 | ||
| 875 | ror x7,x24,#28 | ||
| 876 | add x23,x23,x15 // h+=X[i] | ||
| 877 | eor x16,x16,x20,ror#18 | ||
| 878 | eor x6,x6,x1,ror#8 | ||
| 879 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 880 | eor x19,x24,x25 // a^b, b^c in next round | ||
| 881 | eor x16,x16,x20,ror#41 // Sigma1(e) | ||
| 882 | eor x7,x7,x24,ror#34 | ||
| 883 | add x23,x23,x17 // h+=Ch(e,f,g) | ||
| 884 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 885 | eor x5,x5,x14,ror#61 | ||
| 886 | eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) | ||
| 887 | add x23,x23,x16 // h+=Sigma1(e) | ||
| 888 | eor x28,x28,x25 // Maj(a,b,c) | ||
| 889 | eor x17,x7,x24,ror#39 // Sigma0(a) | ||
| 890 | eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) | ||
| 891 | add x0,x0,x9 | ||
| 892 | add x27,x27,x23 // d+=h | ||
| 893 | add x23,x23,x28 // h+=Maj(a,b,c) | ||
| 894 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 895 | add x0,x0,x6 | ||
| 896 | add x23,x23,x17 // h+=Sigma0(a) | ||
| 897 | add x0,x0,x5 | ||
| 898 | ldr x5,[sp,#16] | ||
| 899 | str x8,[sp,#8] | ||
| 900 | ror x16,x27,#14 | ||
| 901 | add x22,x22,x28 // h+=K[i] | ||
| 902 | ror x7,x2,#1 | ||
| 903 | and x17,x20,x27 | ||
| 904 | ror x6,x15,#19 | ||
| 905 | bic x28,x21,x27 | ||
| 906 | ror x8,x23,#28 | ||
| 907 | add x22,x22,x0 // h+=X[i] | ||
| 908 | eor x16,x16,x27,ror#18 | ||
| 909 | eor x7,x7,x2,ror#8 | ||
| 910 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 911 | eor x28,x23,x24 // a^b, b^c in next round | ||
| 912 | eor x16,x16,x27,ror#41 // Sigma1(e) | ||
| 913 | eor x8,x8,x23,ror#34 | ||
| 914 | add x22,x22,x17 // h+=Ch(e,f,g) | ||
| 915 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 916 | eor x6,x6,x15,ror#61 | ||
| 917 | eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) | ||
| 918 | add x22,x22,x16 // h+=Sigma1(e) | ||
| 919 | eor x19,x19,x24 // Maj(a,b,c) | ||
| 920 | eor x17,x8,x23,ror#39 // Sigma0(a) | ||
| 921 | eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) | ||
| 922 | add x1,x1,x10 | ||
| 923 | add x26,x26,x22 // d+=h | ||
| 924 | add x22,x22,x19 // h+=Maj(a,b,c) | ||
| 925 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 926 | add x1,x1,x7 | ||
| 927 | add x22,x22,x17 // h+=Sigma0(a) | ||
| 928 | add x1,x1,x6 | ||
| 929 | ldr x6,[sp,#24] | ||
| 930 | str x9,[sp,#16] | ||
| 931 | ror x16,x26,#14 | ||
| 932 | add x21,x21,x19 // h+=K[i] | ||
| 933 | ror x8,x3,#1 | ||
| 934 | and x17,x27,x26 | ||
| 935 | ror x7,x0,#19 | ||
| 936 | bic x19,x20,x26 | ||
| 937 | ror x9,x22,#28 | ||
| 938 | add x21,x21,x1 // h+=X[i] | ||
| 939 | eor x16,x16,x26,ror#18 | ||
| 940 | eor x8,x8,x3,ror#8 | ||
| 941 | orr x17,x17,x19 // Ch(e,f,g) | ||
| 942 | eor x19,x22,x23 // a^b, b^c in next round | ||
| 943 | eor x16,x16,x26,ror#41 // Sigma1(e) | ||
| 944 | eor x9,x9,x22,ror#34 | ||
| 945 | add x21,x21,x17 // h+=Ch(e,f,g) | ||
| 946 | and x28,x28,x19 // (b^c)&=(a^b) | ||
| 947 | eor x7,x7,x0,ror#61 | ||
| 948 | eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) | ||
| 949 | add x21,x21,x16 // h+=Sigma1(e) | ||
| 950 | eor x28,x28,x23 // Maj(a,b,c) | ||
| 951 | eor x17,x9,x22,ror#39 // Sigma0(a) | ||
| 952 | eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) | ||
| 953 | add x2,x2,x11 | ||
| 954 | add x25,x25,x21 // d+=h | ||
| 955 | add x21,x21,x28 // h+=Maj(a,b,c) | ||
| 956 | ldr x28,[x30],#8 // *K++, x19 in next round | ||
| 957 | add x2,x2,x8 | ||
| 958 | add x21,x21,x17 // h+=Sigma0(a) | ||
| 959 | add x2,x2,x7 | ||
| 960 | ldr x7,[sp,#0] | ||
| 961 | str x10,[sp,#24] | ||
| 962 | ror x16,x25,#14 | ||
| 963 | add x20,x20,x28 // h+=K[i] | ||
| 964 | ror x9,x4,#1 | ||
| 965 | and x17,x26,x25 | ||
| 966 | ror x8,x1,#19 | ||
| 967 | bic x28,x27,x25 | ||
| 968 | ror x10,x21,#28 | ||
| 969 | add x20,x20,x2 // h+=X[i] | ||
| 970 | eor x16,x16,x25,ror#18 | ||
| 971 | eor x9,x9,x4,ror#8 | ||
| 972 | orr x17,x17,x28 // Ch(e,f,g) | ||
| 973 | eor x28,x21,x22 // a^b, b^c in next round | ||
| 974 | eor x16,x16,x25,ror#41 // Sigma1(e) | ||
| 975 | eor x10,x10,x21,ror#34 | ||
| 976 | add x20,x20,x17 // h+=Ch(e,f,g) | ||
| 977 | and x19,x19,x28 // (b^c)&=(a^b) | ||
| 978 | eor x8,x8,x1,ror#61 | ||
| 979 | eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) | ||
| 980 | add x20,x20,x16 // h+=Sigma1(e) | ||
| 981 | eor x19,x19,x22 // Maj(a,b,c) | ||
| 982 | eor x17,x10,x21,ror#39 // Sigma0(a) | ||
| 983 | eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) | ||
| 984 | add x3,x3,x12 | ||
| 985 | add x24,x24,x20 // d+=h | ||
| 986 | add x20,x20,x19 // h+=Maj(a,b,c) | ||
| 987 | ldr x19,[x30],#8 // *K++, x28 in next round | ||
| 988 | add x3,x3,x9 | ||
| 989 | add x20,x20,x17 // h+=Sigma0(a) | ||
| 990 | add x3,x3,x8 | ||
| 991 | cbnz x19,.Loop_16_xx | ||
| 992 | |||
| 993 | ldp x0,x2,[x29,#96] | ||
| 994 | ldr x1,[x29,#112] | ||
| 995 | sub x30,x30,#648 // rewind | ||
| 996 | |||
| 997 | ldp x3,x4,[x0] | ||
| 998 | ldp x5,x6,[x0,#2*8] | ||
| 999 | add x1,x1,#14*8 // advance input pointer | ||
| 1000 | ldp x7,x8,[x0,#4*8] | ||
| 1001 | add x20,x20,x3 | ||
| 1002 | ldp x9,x10,[x0,#6*8] | ||
| 1003 | add x21,x21,x4 | ||
| 1004 | add x22,x22,x5 | ||
| 1005 | add x23,x23,x6 | ||
| 1006 | stp x20,x21,[x0] | ||
| 1007 | add x24,x24,x7 | ||
| 1008 | add x25,x25,x8 | ||
| 1009 | stp x22,x23,[x0,#2*8] | ||
| 1010 | add x26,x26,x9 | ||
| 1011 | add x27,x27,x10 | ||
| 1012 | cmp x1,x2 | ||
| 1013 | stp x24,x25,[x0,#4*8] | ||
| 1014 | stp x26,x27,[x0,#6*8] | ||
| 1015 | b.ne .Loop | ||
| 1016 | |||
| 1017 | ldp x19,x20,[x29,#16] | ||
| 1018 | add sp,sp,#4*8 | ||
| 1019 | ldp x21,x22,[x29,#32] | ||
| 1020 | ldp x23,x24,[x29,#48] | ||
| 1021 | ldp x25,x26,[x29,#64] | ||
| 1022 | ldp x27,x28,[x29,#80] | ||
| 1023 | ldp x29,x30,[sp],#128 | ||
| 1024 | ret | ||
| 1025 | .size sha512_block_data_order,.-sha512_block_data_order | ||
| 1026 | |||
| 1027 | .align 6 | ||
| 1028 | .type .LK512,%object | ||
| 1029 | .LK512: | ||
| 1030 | .quad 0x428a2f98d728ae22,0x7137449123ef65cd | ||
| 1031 | .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc | ||
| 1032 | .quad 0x3956c25bf348b538,0x59f111f1b605d019 | ||
| 1033 | .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 | ||
| 1034 | .quad 0xd807aa98a3030242,0x12835b0145706fbe | ||
| 1035 | .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 | ||
| 1036 | .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 | ||
| 1037 | .quad 0x9bdc06a725c71235,0xc19bf174cf692694 | ||
| 1038 | .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 | ||
| 1039 | .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 | ||
| 1040 | .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 | ||
| 1041 | .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 | ||
| 1042 | .quad 0x983e5152ee66dfab,0xa831c66d2db43210 | ||
| 1043 | .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 | ||
| 1044 | .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 | ||
| 1045 | .quad 0x06ca6351e003826f,0x142929670a0e6e70 | ||
| 1046 | .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 | ||
| 1047 | .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df | ||
| 1048 | .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 | ||
| 1049 | .quad 0x81c2c92e47edaee6,0x92722c851482353b | ||
| 1050 | .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 | ||
| 1051 | .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 | ||
| 1052 | .quad 0xd192e819d6ef5218,0xd69906245565a910 | ||
| 1053 | .quad 0xf40e35855771202a,0x106aa07032bbd1b8 | ||
| 1054 | .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 | ||
| 1055 | .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 | ||
| 1056 | .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb | ||
| 1057 | .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 | ||
| 1058 | .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 | ||
| 1059 | .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec | ||
| 1060 | .quad 0x90befffa23631e28,0xa4506cebde82bde9 | ||
| 1061 | .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b | ||
| 1062 | .quad 0xca273eceea26619c,0xd186b8c721c0c207 | ||
| 1063 | .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 | ||
| 1064 | .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 | ||
| 1065 | .quad 0x113f9804bef90dae,0x1b710b35131c471b | ||
| 1066 | .quad 0x28db77f523047d84,0x32caab7b40c72493 | ||
| 1067 | .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c | ||
| 1068 | .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a | ||
| 1069 | .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 | ||
| 1070 | .quad 0 // terminator | ||
| 1071 | .size .LK512,.-.LK512 | ||
| 1072 | #ifndef __KERNEL__ | ||
| 1073 | .align 3 | ||
| 1074 | .LOPENSSL_armcap_P: | ||
| 1075 | # ifdef __ILP32__ | ||
| 1076 | .long OPENSSL_armcap_P-. | ||
| 1077 | # else | ||
| 1078 | .quad OPENSSL_armcap_P-. | ||
| 1079 | # endif | ||
| 1080 | #endif | ||
| 1081 | .asciz "SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>" | ||
| 1082 | .align 2 | ||
| 1083 | #ifndef __KERNEL__ | ||
| 1084 | .comm OPENSSL_armcap_P,4,4 | ||
| 1085 | #endif | ||
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c new file mode 100644 index 000000000000..aff35c9992a4 --- /dev/null +++ b/arch/arm64/crypto/sha512-glue.c | |||
| @@ -0,0 +1,94 @@ | |||
| 1 | /* | ||
| 2 | * Linux/arm64 port of the OpenSSL SHA512 implementation for AArch64 | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the Free | ||
| 8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 9 | * any later version. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <crypto/internal/hash.h> | ||
| 14 | #include <linux/cryptohash.h> | ||
| 15 | #include <linux/types.h> | ||
| 16 | #include <linux/string.h> | ||
| 17 | #include <crypto/sha.h> | ||
| 18 | #include <crypto/sha512_base.h> | ||
| 19 | #include <asm/neon.h> | ||
| 20 | |||
| 21 | MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash for arm64"); | ||
| 22 | MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>"); | ||
| 23 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
| 24 | MODULE_LICENSE("GPL v2"); | ||
| 25 | MODULE_ALIAS_CRYPTO("sha384"); | ||
| 26 | MODULE_ALIAS_CRYPTO("sha512"); | ||
| 27 | |||
| 28 | asmlinkage void sha512_block_data_order(u32 *digest, const void *data, | ||
| 29 | unsigned int num_blks); | ||
| 30 | |||
| 31 | static int sha512_update(struct shash_desc *desc, const u8 *data, | ||
| 32 | unsigned int len) | ||
| 33 | { | ||
| 34 | return sha512_base_do_update(desc, data, len, | ||
| 35 | (sha512_block_fn *)sha512_block_data_order); | ||
| 36 | } | ||
| 37 | |||
| 38 | static int sha512_finup(struct shash_desc *desc, const u8 *data, | ||
| 39 | unsigned int len, u8 *out) | ||
| 40 | { | ||
| 41 | if (len) | ||
| 42 | sha512_base_do_update(desc, data, len, | ||
| 43 | (sha512_block_fn *)sha512_block_data_order); | ||
| 44 | sha512_base_do_finalize(desc, | ||
| 45 | (sha512_block_fn *)sha512_block_data_order); | ||
| 46 | |||
| 47 | return sha512_base_finish(desc, out); | ||
| 48 | } | ||
| 49 | |||
| 50 | static int sha512_final(struct shash_desc *desc, u8 *out) | ||
| 51 | { | ||
| 52 | return sha512_finup(desc, NULL, 0, out); | ||
| 53 | } | ||
| 54 | |||
| 55 | static struct shash_alg algs[] = { { | ||
| 56 | .digestsize = SHA512_DIGEST_SIZE, | ||
| 57 | .init = sha512_base_init, | ||
| 58 | .update = sha512_update, | ||
| 59 | .final = sha512_final, | ||
| 60 | .finup = sha512_finup, | ||
| 61 | .descsize = sizeof(struct sha512_state), | ||
| 62 | .base.cra_name = "sha512", | ||
| 63 | .base.cra_driver_name = "sha512-arm64", | ||
| 64 | .base.cra_priority = 150, | ||
| 65 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
| 66 | .base.cra_blocksize = SHA512_BLOCK_SIZE, | ||
| 67 | .base.cra_module = THIS_MODULE, | ||
| 68 | }, { | ||
| 69 | .digestsize = SHA384_DIGEST_SIZE, | ||
| 70 | .init = sha384_base_init, | ||
| 71 | .update = sha512_update, | ||
| 72 | .final = sha512_final, | ||
| 73 | .finup = sha512_finup, | ||
| 74 | .descsize = sizeof(struct sha512_state), | ||
| 75 | .base.cra_name = "sha384", | ||
| 76 | .base.cra_driver_name = "sha384-arm64", | ||
| 77 | .base.cra_priority = 150, | ||
| 78 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
| 79 | .base.cra_blocksize = SHA384_BLOCK_SIZE, | ||
| 80 | .base.cra_module = THIS_MODULE, | ||
| 81 | } }; | ||
| 82 | |||
| 83 | static int __init sha512_mod_init(void) | ||
| 84 | { | ||
| 85 | return crypto_register_shashes(algs, ARRAY_SIZE(algs)); | ||
| 86 | } | ||
| 87 | |||
| 88 | static void __exit sha512_mod_fini(void) | ||
| 89 | { | ||
| 90 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); | ||
| 91 | } | ||
| 92 | |||
| 93 | module_init(sha512_mod_init); | ||
| 94 | module_exit(sha512_mod_fini); | ||
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index 7998c177f0a2..87f40454bad3 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile | |||
| @@ -9,7 +9,7 @@ obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o | |||
| 9 | obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o | 9 | obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o |
| 10 | obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o | 10 | obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o |
| 11 | obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o | 11 | obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o |
| 12 | obj-$(CONFIG_CRYPT_CRC32C_VPMSUM) += crc32c-vpmsum.o | 12 | obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o |
| 13 | 13 | ||
| 14 | aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o | 14 | aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o |
| 15 | md5-ppc-y := md5-asm.o md5-glue.o | 15 | md5-ppc-y := md5-asm.o md5-glue.o |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index aa8b0672f87a..31c34ee131f3 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
| @@ -21,7 +21,6 @@ | |||
| 21 | 21 | ||
| 22 | #include <linux/hardirq.h> | 22 | #include <linux/hardirq.h> |
| 23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
| 24 | #include <linux/crypto.h> | ||
| 25 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| 26 | #include <linux/err.h> | 25 | #include <linux/err.h> |
| 27 | #include <crypto/algapi.h> | 26 | #include <crypto/algapi.h> |
| @@ -29,14 +28,14 @@ | |||
| 29 | #include <crypto/cryptd.h> | 28 | #include <crypto/cryptd.h> |
| 30 | #include <crypto/ctr.h> | 29 | #include <crypto/ctr.h> |
| 31 | #include <crypto/b128ops.h> | 30 | #include <crypto/b128ops.h> |
| 32 | #include <crypto/lrw.h> | ||
| 33 | #include <crypto/xts.h> | 31 | #include <crypto/xts.h> |
| 34 | #include <asm/cpu_device_id.h> | 32 | #include <asm/cpu_device_id.h> |
| 35 | #include <asm/fpu/api.h> | 33 | #include <asm/fpu/api.h> |
| 36 | #include <asm/crypto/aes.h> | 34 | #include <asm/crypto/aes.h> |
| 37 | #include <crypto/ablk_helper.h> | ||
| 38 | #include <crypto/scatterwalk.h> | 35 | #include <crypto/scatterwalk.h> |
| 39 | #include <crypto/internal/aead.h> | 36 | #include <crypto/internal/aead.h> |
| 37 | #include <crypto/internal/simd.h> | ||
| 38 | #include <crypto/internal/skcipher.h> | ||
| 40 | #include <linux/workqueue.h> | 39 | #include <linux/workqueue.h> |
| 41 | #include <linux/spinlock.h> | 40 | #include <linux/spinlock.h> |
| 42 | #ifdef CONFIG_X86_64 | 41 | #ifdef CONFIG_X86_64 |
| @@ -45,28 +44,26 @@ | |||
| 45 | 44 | ||
| 46 | 45 | ||
| 47 | #define AESNI_ALIGN 16 | 46 | #define AESNI_ALIGN 16 |
| 47 | #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN))) | ||
| 48 | #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1)) | 48 | #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1)) |
| 49 | #define RFC4106_HASH_SUBKEY_SIZE 16 | 49 | #define RFC4106_HASH_SUBKEY_SIZE 16 |
| 50 | #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1)) | ||
| 51 | #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA) | ||
| 52 | #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA) | ||
| 50 | 53 | ||
| 51 | /* This data is stored at the end of the crypto_tfm struct. | 54 | /* This data is stored at the end of the crypto_tfm struct. |
| 52 | * It's a type of per "session" data storage location. | 55 | * It's a type of per "session" data storage location. |
| 53 | * This needs to be 16 byte aligned. | 56 | * This needs to be 16 byte aligned. |
| 54 | */ | 57 | */ |
| 55 | struct aesni_rfc4106_gcm_ctx { | 58 | struct aesni_rfc4106_gcm_ctx { |
| 56 | u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN))); | 59 | u8 hash_subkey[16] AESNI_ALIGN_ATTR; |
| 57 | struct crypto_aes_ctx aes_key_expanded | 60 | struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR; |
| 58 | __attribute__ ((__aligned__(AESNI_ALIGN))); | ||
| 59 | u8 nonce[4]; | 61 | u8 nonce[4]; |
| 60 | }; | 62 | }; |
| 61 | 63 | ||
| 62 | struct aesni_lrw_ctx { | ||
| 63 | struct lrw_table_ctx lrw_table; | ||
| 64 | u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; | ||
| 65 | }; | ||
| 66 | |||
| 67 | struct aesni_xts_ctx { | 64 | struct aesni_xts_ctx { |
| 68 | u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; | 65 | u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; |
| 69 | u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; | 66 | u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; |
| 70 | }; | 67 | }; |
| 71 | 68 | ||
| 72 | asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, | 69 | asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, |
| @@ -360,96 +357,95 @@ static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
| 360 | aesni_dec(ctx, dst, src); | 357 | aesni_dec(ctx, dst, src); |
| 361 | } | 358 | } |
| 362 | 359 | ||
| 363 | static int ecb_encrypt(struct blkcipher_desc *desc, | 360 | static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, |
| 364 | struct scatterlist *dst, struct scatterlist *src, | 361 | unsigned int len) |
| 365 | unsigned int nbytes) | 362 | { |
| 363 | return aes_set_key_common(crypto_skcipher_tfm(tfm), | ||
| 364 | crypto_skcipher_ctx(tfm), key, len); | ||
| 365 | } | ||
| 366 | |||
| 367 | static int ecb_encrypt(struct skcipher_request *req) | ||
| 366 | { | 368 | { |
| 367 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | 369 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 368 | struct blkcipher_walk walk; | 370 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); |
| 371 | struct skcipher_walk walk; | ||
| 372 | unsigned int nbytes; | ||
| 369 | int err; | 373 | int err; |
| 370 | 374 | ||
| 371 | blkcipher_walk_init(&walk, dst, src, nbytes); | 375 | err = skcipher_walk_virt(&walk, req, true); |
| 372 | err = blkcipher_walk_virt(desc, &walk); | ||
| 373 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 374 | 376 | ||
| 375 | kernel_fpu_begin(); | 377 | kernel_fpu_begin(); |
| 376 | while ((nbytes = walk.nbytes)) { | 378 | while ((nbytes = walk.nbytes)) { |
| 377 | aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, | 379 | aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| 378 | nbytes & AES_BLOCK_MASK); | 380 | nbytes & AES_BLOCK_MASK); |
| 379 | nbytes &= AES_BLOCK_SIZE - 1; | 381 | nbytes &= AES_BLOCK_SIZE - 1; |
| 380 | err = blkcipher_walk_done(desc, &walk, nbytes); | 382 | err = skcipher_walk_done(&walk, nbytes); |
| 381 | } | 383 | } |
| 382 | kernel_fpu_end(); | 384 | kernel_fpu_end(); |
| 383 | 385 | ||
| 384 | return err; | 386 | return err; |
| 385 | } | 387 | } |
| 386 | 388 | ||
| 387 | static int ecb_decrypt(struct blkcipher_desc *desc, | 389 | static int ecb_decrypt(struct skcipher_request *req) |
| 388 | struct scatterlist *dst, struct scatterlist *src, | ||
| 389 | unsigned int nbytes) | ||
| 390 | { | 390 | { |
| 391 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | 391 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 392 | struct blkcipher_walk walk; | 392 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); |
| 393 | struct skcipher_walk walk; | ||
| 394 | unsigned int nbytes; | ||
| 393 | int err; | 395 | int err; |
| 394 | 396 | ||
| 395 | blkcipher_walk_init(&walk, dst, src, nbytes); | 397 | err = skcipher_walk_virt(&walk, req, true); |
| 396 | err = blkcipher_walk_virt(desc, &walk); | ||
| 397 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 398 | 398 | ||
| 399 | kernel_fpu_begin(); | 399 | kernel_fpu_begin(); |
| 400 | while ((nbytes = walk.nbytes)) { | 400 | while ((nbytes = walk.nbytes)) { |
| 401 | aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, | 401 | aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| 402 | nbytes & AES_BLOCK_MASK); | 402 | nbytes & AES_BLOCK_MASK); |
| 403 | nbytes &= AES_BLOCK_SIZE - 1; | 403 | nbytes &= AES_BLOCK_SIZE - 1; |
| 404 | err = blkcipher_walk_done(desc, &walk, nbytes); | 404 | err = skcipher_walk_done(&walk, nbytes); |
| 405 | } | 405 | } |
| 406 | kernel_fpu_end(); | 406 | kernel_fpu_end(); |
| 407 | 407 | ||
| 408 | return err; | 408 | return err; |
| 409 | } | 409 | } |
| 410 | 410 | ||
| 411 | static int cbc_encrypt(struct blkcipher_desc *desc, | 411 | static int cbc_encrypt(struct skcipher_request *req) |
| 412 | struct scatterlist *dst, struct scatterlist *src, | ||
| 413 | unsigned int nbytes) | ||
| 414 | { | 412 | { |
| 415 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | 413 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 416 | struct blkcipher_walk walk; | 414 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); |
| 415 | struct skcipher_walk walk; | ||
| 416 | unsigned int nbytes; | ||
| 417 | int err; | 417 | int err; |
| 418 | 418 | ||
| 419 | blkcipher_walk_init(&walk, dst, src, nbytes); | 419 | err = skcipher_walk_virt(&walk, req, true); |
| 420 | err = blkcipher_walk_virt(desc, &walk); | ||
| 421 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 422 | 420 | ||
| 423 | kernel_fpu_begin(); | 421 | kernel_fpu_begin(); |
| 424 | while ((nbytes = walk.nbytes)) { | 422 | while ((nbytes = walk.nbytes)) { |
| 425 | aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, | 423 | aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| 426 | nbytes & AES_BLOCK_MASK, walk.iv); | 424 | nbytes & AES_BLOCK_MASK, walk.iv); |
| 427 | nbytes &= AES_BLOCK_SIZE - 1; | 425 | nbytes &= AES_BLOCK_SIZE - 1; |
| 428 | err = blkcipher_walk_done(desc, &walk, nbytes); | 426 | err = skcipher_walk_done(&walk, nbytes); |
| 429 | } | 427 | } |
| 430 | kernel_fpu_end(); | 428 | kernel_fpu_end(); |
| 431 | 429 | ||
| 432 | return err; | 430 | return err; |
| 433 | } | 431 | } |
| 434 | 432 | ||
| 435 | static int cbc_decrypt(struct blkcipher_desc *desc, | 433 | static int cbc_decrypt(struct skcipher_request *req) |
| 436 | struct scatterlist *dst, struct scatterlist *src, | ||
| 437 | unsigned int nbytes) | ||
| 438 | { | 434 | { |
| 439 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | 435 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 440 | struct blkcipher_walk walk; | 436 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); |
| 437 | struct skcipher_walk walk; | ||
| 438 | unsigned int nbytes; | ||
| 441 | int err; | 439 | int err; |
| 442 | 440 | ||
| 443 | blkcipher_walk_init(&walk, dst, src, nbytes); | 441 | err = skcipher_walk_virt(&walk, req, true); |
| 444 | err = blkcipher_walk_virt(desc, &walk); | ||
| 445 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 446 | 442 | ||
| 447 | kernel_fpu_begin(); | 443 | kernel_fpu_begin(); |
| 448 | while ((nbytes = walk.nbytes)) { | 444 | while ((nbytes = walk.nbytes)) { |
| 449 | aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, | 445 | aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| 450 | nbytes & AES_BLOCK_MASK, walk.iv); | 446 | nbytes & AES_BLOCK_MASK, walk.iv); |
| 451 | nbytes &= AES_BLOCK_SIZE - 1; | 447 | nbytes &= AES_BLOCK_SIZE - 1; |
| 452 | err = blkcipher_walk_done(desc, &walk, nbytes); | 448 | err = skcipher_walk_done(&walk, nbytes); |
| 453 | } | 449 | } |
| 454 | kernel_fpu_end(); | 450 | kernel_fpu_end(); |
| 455 | 451 | ||
| @@ -458,7 +454,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, | |||
| 458 | 454 | ||
| 459 | #ifdef CONFIG_X86_64 | 455 | #ifdef CONFIG_X86_64 |
| 460 | static void ctr_crypt_final(struct crypto_aes_ctx *ctx, | 456 | static void ctr_crypt_final(struct crypto_aes_ctx *ctx, |
| 461 | struct blkcipher_walk *walk) | 457 | struct skcipher_walk *walk) |
| 462 | { | 458 | { |
| 463 | u8 *ctrblk = walk->iv; | 459 | u8 *ctrblk = walk->iv; |
| 464 | u8 keystream[AES_BLOCK_SIZE]; | 460 | u8 keystream[AES_BLOCK_SIZE]; |
| @@ -491,157 +487,53 @@ static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, | |||
| 491 | } | 487 | } |
| 492 | #endif | 488 | #endif |
| 493 | 489 | ||
| 494 | static int ctr_crypt(struct blkcipher_desc *desc, | 490 | static int ctr_crypt(struct skcipher_request *req) |
| 495 | struct scatterlist *dst, struct scatterlist *src, | ||
| 496 | unsigned int nbytes) | ||
| 497 | { | 491 | { |
| 498 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | 492 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 499 | struct blkcipher_walk walk; | 493 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); |
| 494 | struct skcipher_walk walk; | ||
| 495 | unsigned int nbytes; | ||
| 500 | int err; | 496 | int err; |
| 501 | 497 | ||
| 502 | blkcipher_walk_init(&walk, dst, src, nbytes); | 498 | err = skcipher_walk_virt(&walk, req, true); |
| 503 | err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); | ||
| 504 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 505 | 499 | ||
| 506 | kernel_fpu_begin(); | 500 | kernel_fpu_begin(); |
| 507 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { | 501 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { |
| 508 | aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, | 502 | aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
| 509 | nbytes & AES_BLOCK_MASK, walk.iv); | 503 | nbytes & AES_BLOCK_MASK, walk.iv); |
| 510 | nbytes &= AES_BLOCK_SIZE - 1; | 504 | nbytes &= AES_BLOCK_SIZE - 1; |
| 511 | err = blkcipher_walk_done(desc, &walk, nbytes); | 505 | err = skcipher_walk_done(&walk, nbytes); |
| 512 | } | 506 | } |
| 513 | if (walk.nbytes) { | 507 | if (walk.nbytes) { |
| 514 | ctr_crypt_final(ctx, &walk); | 508 | ctr_crypt_final(ctx, &walk); |
| 515 | err = blkcipher_walk_done(desc, &walk, 0); | 509 | err = skcipher_walk_done(&walk, 0); |
| 516 | } | 510 | } |
| 517 | kernel_fpu_end(); | 511 | kernel_fpu_end(); |
| 518 | 512 | ||
| 519 | return err; | 513 | return err; |
| 520 | } | 514 | } |
| 521 | #endif | ||
| 522 | |||
| 523 | static int ablk_ecb_init(struct crypto_tfm *tfm) | ||
| 524 | { | ||
| 525 | return ablk_init_common(tfm, "__driver-ecb-aes-aesni"); | ||
| 526 | } | ||
| 527 | |||
| 528 | static int ablk_cbc_init(struct crypto_tfm *tfm) | ||
| 529 | { | ||
| 530 | return ablk_init_common(tfm, "__driver-cbc-aes-aesni"); | ||
| 531 | } | ||
| 532 | |||
| 533 | #ifdef CONFIG_X86_64 | ||
| 534 | static int ablk_ctr_init(struct crypto_tfm *tfm) | ||
| 535 | { | ||
| 536 | return ablk_init_common(tfm, "__driver-ctr-aes-aesni"); | ||
| 537 | } | ||
| 538 | |||
| 539 | #endif | ||
| 540 | |||
| 541 | #if IS_ENABLED(CONFIG_CRYPTO_PCBC) | ||
| 542 | static int ablk_pcbc_init(struct crypto_tfm *tfm) | ||
| 543 | { | ||
| 544 | return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))"); | ||
| 545 | } | ||
| 546 | #endif | ||
| 547 | |||
| 548 | static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes) | ||
| 549 | { | ||
| 550 | aesni_ecb_enc(ctx, blks, blks, nbytes); | ||
| 551 | } | ||
| 552 | 515 | ||
| 553 | static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes) | 516 | static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, |
| 554 | { | ||
| 555 | aesni_ecb_dec(ctx, blks, blks, nbytes); | ||
| 556 | } | ||
| 557 | |||
| 558 | static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
| 559 | unsigned int keylen) | 517 | unsigned int keylen) |
| 560 | { | 518 | { |
| 561 | struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm); | 519 | struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 562 | int err; | 520 | int err; |
| 563 | 521 | ||
| 564 | err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key, | 522 | err = xts_verify_key(tfm, key, keylen); |
| 565 | keylen - AES_BLOCK_SIZE); | ||
| 566 | if (err) | 523 | if (err) |
| 567 | return err; | 524 | return err; |
| 568 | 525 | ||
| 569 | return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE); | 526 | keylen /= 2; |
| 570 | } | ||
| 571 | |||
| 572 | static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm) | ||
| 573 | { | ||
| 574 | struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 575 | |||
| 576 | lrw_free_table(&ctx->lrw_table); | ||
| 577 | } | ||
| 578 | |||
| 579 | static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 580 | struct scatterlist *src, unsigned int nbytes) | ||
| 581 | { | ||
| 582 | struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 583 | be128 buf[8]; | ||
| 584 | struct lrw_crypt_req req = { | ||
| 585 | .tbuf = buf, | ||
| 586 | .tbuflen = sizeof(buf), | ||
| 587 | |||
| 588 | .table_ctx = &ctx->lrw_table, | ||
| 589 | .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), | ||
| 590 | .crypt_fn = lrw_xts_encrypt_callback, | ||
| 591 | }; | ||
| 592 | int ret; | ||
| 593 | |||
| 594 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 595 | |||
| 596 | kernel_fpu_begin(); | ||
| 597 | ret = lrw_crypt(desc, dst, src, nbytes, &req); | ||
| 598 | kernel_fpu_end(); | ||
| 599 | |||
| 600 | return ret; | ||
| 601 | } | ||
| 602 | |||
| 603 | static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 604 | struct scatterlist *src, unsigned int nbytes) | ||
| 605 | { | ||
| 606 | struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 607 | be128 buf[8]; | ||
| 608 | struct lrw_crypt_req req = { | ||
| 609 | .tbuf = buf, | ||
| 610 | .tbuflen = sizeof(buf), | ||
| 611 | |||
| 612 | .table_ctx = &ctx->lrw_table, | ||
| 613 | .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), | ||
| 614 | .crypt_fn = lrw_xts_decrypt_callback, | ||
| 615 | }; | ||
| 616 | int ret; | ||
| 617 | |||
| 618 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 619 | |||
| 620 | kernel_fpu_begin(); | ||
| 621 | ret = lrw_crypt(desc, dst, src, nbytes, &req); | ||
| 622 | kernel_fpu_end(); | ||
| 623 | |||
| 624 | return ret; | ||
| 625 | } | ||
| 626 | |||
| 627 | static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
| 628 | unsigned int keylen) | ||
| 629 | { | ||
| 630 | struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 631 | int err; | ||
| 632 | |||
| 633 | err = xts_check_key(tfm, key, keylen); | ||
| 634 | if (err) | ||
| 635 | return err; | ||
| 636 | 527 | ||
| 637 | /* first half of xts-key is for crypt */ | 528 | /* first half of xts-key is for crypt */ |
| 638 | err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2); | 529 | err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx, |
| 530 | key, keylen); | ||
| 639 | if (err) | 531 | if (err) |
| 640 | return err; | 532 | return err; |
| 641 | 533 | ||
| 642 | /* second half of xts-key is for tweak */ | 534 | /* second half of xts-key is for tweak */ |
| 643 | return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2, | 535 | return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx, |
| 644 | keylen / 2); | 536 | key + keylen, keylen); |
| 645 | } | 537 | } |
| 646 | 538 | ||
| 647 | 539 | ||
| @@ -650,8 +542,6 @@ static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) | |||
| 650 | aesni_enc(ctx, out, in); | 542 | aesni_enc(ctx, out, in); |
| 651 | } | 543 | } |
| 652 | 544 | ||
| 653 | #ifdef CONFIG_X86_64 | ||
| 654 | |||
| 655 | static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) | 545 | static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
| 656 | { | 546 | { |
| 657 | glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); | 547 | glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); |
| @@ -698,83 +588,28 @@ static const struct common_glue_ctx aesni_dec_xts = { | |||
| 698 | } } | 588 | } } |
| 699 | }; | 589 | }; |
| 700 | 590 | ||
| 701 | static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 591 | static int xts_encrypt(struct skcipher_request *req) |
| 702 | struct scatterlist *src, unsigned int nbytes) | ||
| 703 | { | 592 | { |
| 704 | struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 593 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 594 | struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 705 | 595 | ||
| 706 | return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes, | 596 | return glue_xts_req_128bit(&aesni_enc_xts, req, |
| 707 | XTS_TWEAK_CAST(aesni_xts_tweak), | 597 | XTS_TWEAK_CAST(aesni_xts_tweak), |
| 708 | aes_ctx(ctx->raw_tweak_ctx), | 598 | aes_ctx(ctx->raw_tweak_ctx), |
| 709 | aes_ctx(ctx->raw_crypt_ctx)); | 599 | aes_ctx(ctx->raw_crypt_ctx)); |
| 710 | } | 600 | } |
| 711 | 601 | ||
| 712 | static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 602 | static int xts_decrypt(struct skcipher_request *req) |
| 713 | struct scatterlist *src, unsigned int nbytes) | ||
| 714 | { | 603 | { |
| 715 | struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | 604 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 716 | 605 | struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); | |
| 717 | return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes, | ||
| 718 | XTS_TWEAK_CAST(aesni_xts_tweak), | ||
| 719 | aes_ctx(ctx->raw_tweak_ctx), | ||
| 720 | aes_ctx(ctx->raw_crypt_ctx)); | ||
| 721 | } | ||
| 722 | |||
| 723 | #else | ||
| 724 | |||
| 725 | static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 726 | struct scatterlist *src, unsigned int nbytes) | ||
| 727 | { | ||
| 728 | struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 729 | be128 buf[8]; | ||
| 730 | struct xts_crypt_req req = { | ||
| 731 | .tbuf = buf, | ||
| 732 | .tbuflen = sizeof(buf), | ||
| 733 | |||
| 734 | .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), | ||
| 735 | .tweak_fn = aesni_xts_tweak, | ||
| 736 | .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), | ||
| 737 | .crypt_fn = lrw_xts_encrypt_callback, | ||
| 738 | }; | ||
| 739 | int ret; | ||
| 740 | |||
| 741 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 742 | |||
| 743 | kernel_fpu_begin(); | ||
| 744 | ret = xts_crypt(desc, dst, src, nbytes, &req); | ||
| 745 | kernel_fpu_end(); | ||
| 746 | |||
| 747 | return ret; | ||
| 748 | } | ||
| 749 | |||
| 750 | static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 751 | struct scatterlist *src, unsigned int nbytes) | ||
| 752 | { | ||
| 753 | struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 754 | be128 buf[8]; | ||
| 755 | struct xts_crypt_req req = { | ||
| 756 | .tbuf = buf, | ||
| 757 | .tbuflen = sizeof(buf), | ||
| 758 | |||
| 759 | .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), | ||
| 760 | .tweak_fn = aesni_xts_tweak, | ||
| 761 | .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), | ||
| 762 | .crypt_fn = lrw_xts_decrypt_callback, | ||
| 763 | }; | ||
| 764 | int ret; | ||
| 765 | |||
| 766 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 767 | |||
| 768 | kernel_fpu_begin(); | ||
| 769 | ret = xts_crypt(desc, dst, src, nbytes, &req); | ||
| 770 | kernel_fpu_end(); | ||
| 771 | 606 | ||
| 772 | return ret; | 607 | return glue_xts_req_128bit(&aesni_dec_xts, req, |
| 608 | XTS_TWEAK_CAST(aesni_xts_tweak), | ||
| 609 | aes_ctx(ctx->raw_tweak_ctx), | ||
| 610 | aes_ctx(ctx->raw_crypt_ctx)); | ||
| 773 | } | 611 | } |
| 774 | 612 | ||
| 775 | #endif | ||
| 776 | |||
| 777 | #ifdef CONFIG_X86_64 | ||
| 778 | static int rfc4106_init(struct crypto_aead *aead) | 613 | static int rfc4106_init(struct crypto_aead *aead) |
| 779 | { | 614 | { |
| 780 | struct cryptd_aead *cryptd_tfm; | 615 | struct cryptd_aead *cryptd_tfm; |
| @@ -1077,9 +912,7 @@ static struct crypto_alg aesni_algs[] = { { | |||
| 1077 | .cra_priority = 300, | 912 | .cra_priority = 300, |
| 1078 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 913 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
| 1079 | .cra_blocksize = AES_BLOCK_SIZE, | 914 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1080 | .cra_ctxsize = sizeof(struct crypto_aes_ctx) + | 915 | .cra_ctxsize = CRYPTO_AES_CTX_SIZE, |
| 1081 | AESNI_ALIGN - 1, | ||
| 1082 | .cra_alignmask = 0, | ||
| 1083 | .cra_module = THIS_MODULE, | 916 | .cra_module = THIS_MODULE, |
| 1084 | .cra_u = { | 917 | .cra_u = { |
| 1085 | .cipher = { | 918 | .cipher = { |
| @@ -1091,14 +924,12 @@ static struct crypto_alg aesni_algs[] = { { | |||
| 1091 | } | 924 | } |
| 1092 | } | 925 | } |
| 1093 | }, { | 926 | }, { |
| 1094 | .cra_name = "__aes-aesni", | 927 | .cra_name = "__aes", |
| 1095 | .cra_driver_name = "__driver-aes-aesni", | 928 | .cra_driver_name = "__aes-aesni", |
| 1096 | .cra_priority = 0, | 929 | .cra_priority = 300, |
| 1097 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL, | 930 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL, |
| 1098 | .cra_blocksize = AES_BLOCK_SIZE, | 931 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1099 | .cra_ctxsize = sizeof(struct crypto_aes_ctx) + | 932 | .cra_ctxsize = CRYPTO_AES_CTX_SIZE, |
| 1100 | AESNI_ALIGN - 1, | ||
| 1101 | .cra_alignmask = 0, | ||
| 1102 | .cra_module = THIS_MODULE, | 933 | .cra_module = THIS_MODULE, |
| 1103 | .cra_u = { | 934 | .cra_u = { |
| 1104 | .cipher = { | 935 | .cipher = { |
| @@ -1109,250 +940,94 @@ static struct crypto_alg aesni_algs[] = { { | |||
| 1109 | .cia_decrypt = __aes_decrypt | 940 | .cia_decrypt = __aes_decrypt |
| 1110 | } | 941 | } |
| 1111 | } | 942 | } |
| 1112 | }, { | 943 | } }; |
| 1113 | .cra_name = "__ecb-aes-aesni", | 944 | |
| 1114 | .cra_driver_name = "__driver-ecb-aes-aesni", | 945 | static struct skcipher_alg aesni_skciphers[] = { |
| 1115 | .cra_priority = 0, | 946 | { |
| 1116 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 947 | .base = { |
| 1117 | CRYPTO_ALG_INTERNAL, | 948 | .cra_name = "__ecb(aes)", |
| 1118 | .cra_blocksize = AES_BLOCK_SIZE, | 949 | .cra_driver_name = "__ecb-aes-aesni", |
| 1119 | .cra_ctxsize = sizeof(struct crypto_aes_ctx) + | 950 | .cra_priority = 400, |
| 1120 | AESNI_ALIGN - 1, | 951 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 1121 | .cra_alignmask = 0, | 952 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1122 | .cra_type = &crypto_blkcipher_type, | 953 | .cra_ctxsize = CRYPTO_AES_CTX_SIZE, |
| 1123 | .cra_module = THIS_MODULE, | 954 | .cra_module = THIS_MODULE, |
| 1124 | .cra_u = { | ||
| 1125 | .blkcipher = { | ||
| 1126 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 1127 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 1128 | .setkey = aes_set_key, | ||
| 1129 | .encrypt = ecb_encrypt, | ||
| 1130 | .decrypt = ecb_decrypt, | ||
| 1131 | }, | ||
| 1132 | }, | ||
| 1133 | }, { | ||
| 1134 | .cra_name = "__cbc-aes-aesni", | ||
| 1135 | .cra_driver_name = "__driver-cbc-aes-aesni", | ||
| 1136 | .cra_priority = 0, | ||
| 1137 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | ||
| 1138 | CRYPTO_ALG_INTERNAL, | ||
| 1139 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 1140 | .cra_ctxsize = sizeof(struct crypto_aes_ctx) + | ||
| 1141 | AESNI_ALIGN - 1, | ||
| 1142 | .cra_alignmask = 0, | ||
| 1143 | .cra_type = &crypto_blkcipher_type, | ||
| 1144 | .cra_module = THIS_MODULE, | ||
| 1145 | .cra_u = { | ||
| 1146 | .blkcipher = { | ||
| 1147 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 1148 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 1149 | .setkey = aes_set_key, | ||
| 1150 | .encrypt = cbc_encrypt, | ||
| 1151 | .decrypt = cbc_decrypt, | ||
| 1152 | }, | ||
| 1153 | }, | ||
| 1154 | }, { | ||
| 1155 | .cra_name = "ecb(aes)", | ||
| 1156 | .cra_driver_name = "ecb-aes-aesni", | ||
| 1157 | .cra_priority = 400, | ||
| 1158 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 1159 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 1160 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 1161 | .cra_alignmask = 0, | ||
| 1162 | .cra_type = &crypto_ablkcipher_type, | ||
| 1163 | .cra_module = THIS_MODULE, | ||
| 1164 | .cra_init = ablk_ecb_init, | ||
| 1165 | .cra_exit = ablk_exit, | ||
| 1166 | .cra_u = { | ||
| 1167 | .ablkcipher = { | ||
| 1168 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 1169 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 1170 | .setkey = ablk_set_key, | ||
| 1171 | .encrypt = ablk_encrypt, | ||
| 1172 | .decrypt = ablk_decrypt, | ||
| 1173 | }, | 955 | }, |
| 1174 | }, | 956 | .min_keysize = AES_MIN_KEY_SIZE, |
| 1175 | }, { | 957 | .max_keysize = AES_MAX_KEY_SIZE, |
| 1176 | .cra_name = "cbc(aes)", | 958 | .setkey = aesni_skcipher_setkey, |
| 1177 | .cra_driver_name = "cbc-aes-aesni", | 959 | .encrypt = ecb_encrypt, |
| 1178 | .cra_priority = 400, | 960 | .decrypt = ecb_decrypt, |
| 1179 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 961 | }, { |
| 1180 | .cra_blocksize = AES_BLOCK_SIZE, | 962 | .base = { |
| 1181 | .cra_ctxsize = sizeof(struct async_helper_ctx), | 963 | .cra_name = "__cbc(aes)", |
| 1182 | .cra_alignmask = 0, | 964 | .cra_driver_name = "__cbc-aes-aesni", |
| 1183 | .cra_type = &crypto_ablkcipher_type, | 965 | .cra_priority = 400, |
| 1184 | .cra_module = THIS_MODULE, | 966 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 1185 | .cra_init = ablk_cbc_init, | 967 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1186 | .cra_exit = ablk_exit, | 968 | .cra_ctxsize = CRYPTO_AES_CTX_SIZE, |
| 1187 | .cra_u = { | 969 | .cra_module = THIS_MODULE, |
| 1188 | .ablkcipher = { | ||
| 1189 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 1190 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 1191 | .ivsize = AES_BLOCK_SIZE, | ||
| 1192 | .setkey = ablk_set_key, | ||
| 1193 | .encrypt = ablk_encrypt, | ||
| 1194 | .decrypt = ablk_decrypt, | ||
| 1195 | }, | 970 | }, |
| 1196 | }, | 971 | .min_keysize = AES_MIN_KEY_SIZE, |
| 972 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 973 | .ivsize = AES_BLOCK_SIZE, | ||
| 974 | .setkey = aesni_skcipher_setkey, | ||
| 975 | .encrypt = cbc_encrypt, | ||
| 976 | .decrypt = cbc_decrypt, | ||
| 1197 | #ifdef CONFIG_X86_64 | 977 | #ifdef CONFIG_X86_64 |
| 1198 | }, { | 978 | }, { |
| 1199 | .cra_name = "__ctr-aes-aesni", | 979 | .base = { |
| 1200 | .cra_driver_name = "__driver-ctr-aes-aesni", | 980 | .cra_name = "__ctr(aes)", |
| 1201 | .cra_priority = 0, | 981 | .cra_driver_name = "__ctr-aes-aesni", |
| 1202 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 982 | .cra_priority = 400, |
| 1203 | CRYPTO_ALG_INTERNAL, | 983 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 1204 | .cra_blocksize = 1, | 984 | .cra_blocksize = 1, |
| 1205 | .cra_ctxsize = sizeof(struct crypto_aes_ctx) + | 985 | .cra_ctxsize = CRYPTO_AES_CTX_SIZE, |
| 1206 | AESNI_ALIGN - 1, | 986 | .cra_module = THIS_MODULE, |
| 1207 | .cra_alignmask = 0, | ||
| 1208 | .cra_type = &crypto_blkcipher_type, | ||
| 1209 | .cra_module = THIS_MODULE, | ||
| 1210 | .cra_u = { | ||
| 1211 | .blkcipher = { | ||
| 1212 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 1213 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 1214 | .ivsize = AES_BLOCK_SIZE, | ||
| 1215 | .setkey = aes_set_key, | ||
| 1216 | .encrypt = ctr_crypt, | ||
| 1217 | .decrypt = ctr_crypt, | ||
| 1218 | }, | 987 | }, |
| 1219 | }, | 988 | .min_keysize = AES_MIN_KEY_SIZE, |
| 1220 | }, { | 989 | .max_keysize = AES_MAX_KEY_SIZE, |
| 1221 | .cra_name = "ctr(aes)", | 990 | .ivsize = AES_BLOCK_SIZE, |
| 1222 | .cra_driver_name = "ctr-aes-aesni", | 991 | .chunksize = AES_BLOCK_SIZE, |
| 1223 | .cra_priority = 400, | 992 | .setkey = aesni_skcipher_setkey, |
| 1224 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 993 | .encrypt = ctr_crypt, |
| 1225 | .cra_blocksize = 1, | 994 | .decrypt = ctr_crypt, |
| 1226 | .cra_ctxsize = sizeof(struct async_helper_ctx), | 995 | }, { |
| 1227 | .cra_alignmask = 0, | 996 | .base = { |
| 1228 | .cra_type = &crypto_ablkcipher_type, | 997 | .cra_name = "__xts(aes)", |
| 1229 | .cra_module = THIS_MODULE, | 998 | .cra_driver_name = "__xts-aes-aesni", |
| 1230 | .cra_init = ablk_ctr_init, | 999 | .cra_priority = 401, |
| 1231 | .cra_exit = ablk_exit, | 1000 | .cra_flags = CRYPTO_ALG_INTERNAL, |
| 1232 | .cra_u = { | 1001 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1233 | .ablkcipher = { | 1002 | .cra_ctxsize = XTS_AES_CTX_SIZE, |
| 1234 | .min_keysize = AES_MIN_KEY_SIZE, | 1003 | .cra_module = THIS_MODULE, |
| 1235 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 1236 | .ivsize = AES_BLOCK_SIZE, | ||
| 1237 | .setkey = ablk_set_key, | ||
| 1238 | .encrypt = ablk_encrypt, | ||
| 1239 | .decrypt = ablk_encrypt, | ||
| 1240 | .geniv = "chainiv", | ||
| 1241 | }, | 1004 | }, |
| 1242 | }, | 1005 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
| 1006 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
| 1007 | .ivsize = AES_BLOCK_SIZE, | ||
| 1008 | .setkey = xts_aesni_setkey, | ||
| 1009 | .encrypt = xts_encrypt, | ||
| 1010 | .decrypt = xts_decrypt, | ||
| 1243 | #endif | 1011 | #endif |
| 1012 | } | ||
| 1013 | }; | ||
| 1014 | |||
| 1015 | struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; | ||
| 1016 | |||
| 1017 | struct { | ||
| 1018 | const char *algname; | ||
| 1019 | const char *drvname; | ||
| 1020 | const char *basename; | ||
| 1021 | struct simd_skcipher_alg *simd; | ||
| 1022 | } aesni_simd_skciphers2[] = { | ||
| 1244 | #if IS_ENABLED(CONFIG_CRYPTO_PCBC) | 1023 | #if IS_ENABLED(CONFIG_CRYPTO_PCBC) |
| 1245 | }, { | 1024 | { |
| 1246 | .cra_name = "pcbc(aes)", | 1025 | .algname = "pcbc(aes)", |
| 1247 | .cra_driver_name = "pcbc-aes-aesni", | 1026 | .drvname = "pcbc-aes-aesni", |
| 1248 | .cra_priority = 400, | 1027 | .basename = "fpu(pcbc(__aes-aesni))", |
| 1249 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 1250 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 1251 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 1252 | .cra_alignmask = 0, | ||
| 1253 | .cra_type = &crypto_ablkcipher_type, | ||
| 1254 | .cra_module = THIS_MODULE, | ||
| 1255 | .cra_init = ablk_pcbc_init, | ||
| 1256 | .cra_exit = ablk_exit, | ||
| 1257 | .cra_u = { | ||
| 1258 | .ablkcipher = { | ||
| 1259 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 1260 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 1261 | .ivsize = AES_BLOCK_SIZE, | ||
| 1262 | .setkey = ablk_set_key, | ||
| 1263 | .encrypt = ablk_encrypt, | ||
| 1264 | .decrypt = ablk_decrypt, | ||
| 1265 | }, | ||
| 1266 | }, | 1028 | }, |
| 1267 | #endif | 1029 | #endif |
| 1268 | }, { | 1030 | }; |
| 1269 | .cra_name = "__lrw-aes-aesni", | ||
| 1270 | .cra_driver_name = "__driver-lrw-aes-aesni", | ||
| 1271 | .cra_priority = 0, | ||
| 1272 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | ||
| 1273 | CRYPTO_ALG_INTERNAL, | ||
| 1274 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 1275 | .cra_ctxsize = sizeof(struct aesni_lrw_ctx), | ||
| 1276 | .cra_alignmask = 0, | ||
| 1277 | .cra_type = &crypto_blkcipher_type, | ||
| 1278 | .cra_module = THIS_MODULE, | ||
| 1279 | .cra_exit = lrw_aesni_exit_tfm, | ||
| 1280 | .cra_u = { | ||
| 1281 | .blkcipher = { | ||
| 1282 | .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE, | ||
| 1283 | .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE, | ||
| 1284 | .ivsize = AES_BLOCK_SIZE, | ||
| 1285 | .setkey = lrw_aesni_setkey, | ||
| 1286 | .encrypt = lrw_encrypt, | ||
| 1287 | .decrypt = lrw_decrypt, | ||
| 1288 | }, | ||
| 1289 | }, | ||
| 1290 | }, { | ||
| 1291 | .cra_name = "__xts-aes-aesni", | ||
| 1292 | .cra_driver_name = "__driver-xts-aes-aesni", | ||
| 1293 | .cra_priority = 0, | ||
| 1294 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | ||
| 1295 | CRYPTO_ALG_INTERNAL, | ||
| 1296 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 1297 | .cra_ctxsize = sizeof(struct aesni_xts_ctx), | ||
| 1298 | .cra_alignmask = 0, | ||
| 1299 | .cra_type = &crypto_blkcipher_type, | ||
| 1300 | .cra_module = THIS_MODULE, | ||
| 1301 | .cra_u = { | ||
| 1302 | .blkcipher = { | ||
| 1303 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
| 1304 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
| 1305 | .ivsize = AES_BLOCK_SIZE, | ||
| 1306 | .setkey = xts_aesni_setkey, | ||
| 1307 | .encrypt = xts_encrypt, | ||
| 1308 | .decrypt = xts_decrypt, | ||
| 1309 | }, | ||
| 1310 | }, | ||
| 1311 | }, { | ||
| 1312 | .cra_name = "lrw(aes)", | ||
| 1313 | .cra_driver_name = "lrw-aes-aesni", | ||
| 1314 | .cra_priority = 400, | ||
| 1315 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 1316 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 1317 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 1318 | .cra_alignmask = 0, | ||
| 1319 | .cra_type = &crypto_ablkcipher_type, | ||
| 1320 | .cra_module = THIS_MODULE, | ||
| 1321 | .cra_init = ablk_init, | ||
| 1322 | .cra_exit = ablk_exit, | ||
| 1323 | .cra_u = { | ||
| 1324 | .ablkcipher = { | ||
| 1325 | .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE, | ||
| 1326 | .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE, | ||
| 1327 | .ivsize = AES_BLOCK_SIZE, | ||
| 1328 | .setkey = ablk_set_key, | ||
| 1329 | .encrypt = ablk_encrypt, | ||
| 1330 | .decrypt = ablk_decrypt, | ||
| 1331 | }, | ||
| 1332 | }, | ||
| 1333 | }, { | ||
| 1334 | .cra_name = "xts(aes)", | ||
| 1335 | .cra_driver_name = "xts-aes-aesni", | ||
| 1336 | .cra_priority = 400, | ||
| 1337 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 1338 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 1339 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
| 1340 | .cra_alignmask = 0, | ||
| 1341 | .cra_type = &crypto_ablkcipher_type, | ||
| 1342 | .cra_module = THIS_MODULE, | ||
| 1343 | .cra_init = ablk_init, | ||
| 1344 | .cra_exit = ablk_exit, | ||
| 1345 | .cra_u = { | ||
| 1346 | .ablkcipher = { | ||
| 1347 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
| 1348 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
| 1349 | .ivsize = AES_BLOCK_SIZE, | ||
| 1350 | .setkey = ablk_set_key, | ||
| 1351 | .encrypt = ablk_encrypt, | ||
| 1352 | .decrypt = ablk_decrypt, | ||
| 1353 | }, | ||
| 1354 | }, | ||
| 1355 | } }; | ||
| 1356 | 1031 | ||
| 1357 | #ifdef CONFIG_X86_64 | 1032 | #ifdef CONFIG_X86_64 |
| 1358 | static struct aead_alg aesni_aead_algs[] = { { | 1033 | static struct aead_alg aesni_aead_algs[] = { { |
| @@ -1401,9 +1076,27 @@ static const struct x86_cpu_id aesni_cpu_id[] = { | |||
| 1401 | }; | 1076 | }; |
| 1402 | MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); | 1077 | MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); |
| 1403 | 1078 | ||
| 1079 | static void aesni_free_simds(void) | ||
| 1080 | { | ||
| 1081 | int i; | ||
| 1082 | |||
| 1083 | for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) && | ||
| 1084 | aesni_simd_skciphers[i]; i++) | ||
| 1085 | simd_skcipher_free(aesni_simd_skciphers[i]); | ||
| 1086 | |||
| 1087 | for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) && | ||
| 1088 | aesni_simd_skciphers2[i].simd; i++) | ||
| 1089 | simd_skcipher_free(aesni_simd_skciphers2[i].simd); | ||
| 1090 | } | ||
| 1091 | |||
| 1404 | static int __init aesni_init(void) | 1092 | static int __init aesni_init(void) |
| 1405 | { | 1093 | { |
| 1094 | struct simd_skcipher_alg *simd; | ||
| 1095 | const char *basename; | ||
| 1096 | const char *algname; | ||
| 1097 | const char *drvname; | ||
| 1406 | int err; | 1098 | int err; |
| 1099 | int i; | ||
| 1407 | 1100 | ||
| 1408 | if (!x86_match_cpu(aesni_cpu_id)) | 1101 | if (!x86_match_cpu(aesni_cpu_id)) |
| 1409 | return -ENODEV; | 1102 | return -ENODEV; |
| @@ -1445,13 +1138,48 @@ static int __init aesni_init(void) | |||
| 1445 | if (err) | 1138 | if (err) |
| 1446 | goto fpu_exit; | 1139 | goto fpu_exit; |
| 1447 | 1140 | ||
| 1141 | err = crypto_register_skciphers(aesni_skciphers, | ||
| 1142 | ARRAY_SIZE(aesni_skciphers)); | ||
| 1143 | if (err) | ||
| 1144 | goto unregister_algs; | ||
| 1145 | |||
| 1448 | err = crypto_register_aeads(aesni_aead_algs, | 1146 | err = crypto_register_aeads(aesni_aead_algs, |
| 1449 | ARRAY_SIZE(aesni_aead_algs)); | 1147 | ARRAY_SIZE(aesni_aead_algs)); |
| 1450 | if (err) | 1148 | if (err) |
| 1451 | goto unregister_algs; | 1149 | goto unregister_skciphers; |
| 1150 | |||
| 1151 | for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) { | ||
| 1152 | algname = aesni_skciphers[i].base.cra_name + 2; | ||
| 1153 | drvname = aesni_skciphers[i].base.cra_driver_name + 2; | ||
| 1154 | basename = aesni_skciphers[i].base.cra_driver_name; | ||
| 1155 | simd = simd_skcipher_create_compat(algname, drvname, basename); | ||
| 1156 | err = PTR_ERR(simd); | ||
| 1157 | if (IS_ERR(simd)) | ||
| 1158 | goto unregister_simds; | ||
| 1159 | |||
| 1160 | aesni_simd_skciphers[i] = simd; | ||
| 1161 | } | ||
| 1452 | 1162 | ||
| 1453 | return err; | 1163 | for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) { |
| 1164 | algname = aesni_simd_skciphers2[i].algname; | ||
| 1165 | drvname = aesni_simd_skciphers2[i].drvname; | ||
| 1166 | basename = aesni_simd_skciphers2[i].basename; | ||
| 1167 | simd = simd_skcipher_create_compat(algname, drvname, basename); | ||
| 1168 | err = PTR_ERR(simd); | ||
| 1169 | if (IS_ERR(simd)) | ||
| 1170 | goto unregister_simds; | ||
| 1454 | 1171 | ||
| 1172 | aesni_simd_skciphers2[i].simd = simd; | ||
| 1173 | } | ||
| 1174 | |||
| 1175 | return 0; | ||
| 1176 | |||
| 1177 | unregister_simds: | ||
| 1178 | aesni_free_simds(); | ||
| 1179 | crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs)); | ||
| 1180 | unregister_skciphers: | ||
| 1181 | crypto_unregister_skciphers(aesni_skciphers, | ||
| 1182 | ARRAY_SIZE(aesni_skciphers)); | ||
| 1455 | unregister_algs: | 1183 | unregister_algs: |
| 1456 | crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); | 1184 | crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); |
| 1457 | fpu_exit: | 1185 | fpu_exit: |
| @@ -1461,7 +1189,10 @@ fpu_exit: | |||
| 1461 | 1189 | ||
| 1462 | static void __exit aesni_exit(void) | 1190 | static void __exit aesni_exit(void) |
| 1463 | { | 1191 | { |
| 1192 | aesni_free_simds(); | ||
| 1464 | crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs)); | 1193 | crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs)); |
| 1194 | crypto_unregister_skciphers(aesni_skciphers, | ||
| 1195 | ARRAY_SIZE(aesni_skciphers)); | ||
| 1465 | crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); | 1196 | crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); |
| 1466 | 1197 | ||
| 1467 | crypto_fpu_exit(); | 1198 | crypto_fpu_exit(); |
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c index e7d679e2a018..406680476c52 100644 --- a/arch/x86/crypto/fpu.c +++ b/arch/x86/crypto/fpu.c | |||
| @@ -11,143 +11,186 @@ | |||
| 11 | * | 11 | * |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #include <crypto/algapi.h> | 14 | #include <crypto/internal/skcipher.h> |
| 15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
| 16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | #include <linux/crypto.h> | ||
| 21 | #include <asm/fpu/api.h> | 20 | #include <asm/fpu/api.h> |
| 22 | 21 | ||
| 23 | struct crypto_fpu_ctx { | 22 | struct crypto_fpu_ctx { |
| 24 | struct crypto_blkcipher *child; | 23 | struct crypto_skcipher *child; |
| 25 | }; | 24 | }; |
| 26 | 25 | ||
| 27 | static int crypto_fpu_setkey(struct crypto_tfm *parent, const u8 *key, | 26 | static int crypto_fpu_setkey(struct crypto_skcipher *parent, const u8 *key, |
| 28 | unsigned int keylen) | 27 | unsigned int keylen) |
| 29 | { | 28 | { |
| 30 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(parent); | 29 | struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(parent); |
| 31 | struct crypto_blkcipher *child = ctx->child; | 30 | struct crypto_skcipher *child = ctx->child; |
| 32 | int err; | 31 | int err; |
| 33 | 32 | ||
| 34 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 33 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
| 35 | crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & | 34 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
| 36 | CRYPTO_TFM_REQ_MASK); | 35 | CRYPTO_TFM_REQ_MASK); |
| 37 | err = crypto_blkcipher_setkey(child, key, keylen); | 36 | err = crypto_skcipher_setkey(child, key, keylen); |
| 38 | crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & | 37 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & |
| 39 | CRYPTO_TFM_RES_MASK); | 38 | CRYPTO_TFM_RES_MASK); |
| 40 | return err; | 39 | return err; |
| 41 | } | 40 | } |
| 42 | 41 | ||
| 43 | static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in, | 42 | static int crypto_fpu_encrypt(struct skcipher_request *req) |
| 44 | struct scatterlist *dst, struct scatterlist *src, | ||
| 45 | unsigned int nbytes) | ||
| 46 | { | 43 | { |
| 44 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 45 | struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 46 | struct crypto_skcipher *child = ctx->child; | ||
| 47 | SKCIPHER_REQUEST_ON_STACK(subreq, child); | ||
| 47 | int err; | 48 | int err; |
| 48 | struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); | 49 | |
| 49 | struct crypto_blkcipher *child = ctx->child; | 50 | skcipher_request_set_tfm(subreq, child); |
| 50 | struct blkcipher_desc desc = { | 51 | skcipher_request_set_callback(subreq, 0, NULL, NULL); |
| 51 | .tfm = child, | 52 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, |
| 52 | .info = desc_in->info, | 53 | req->iv); |
| 53 | .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 54 | }; | ||
| 55 | 54 | ||
| 56 | kernel_fpu_begin(); | 55 | kernel_fpu_begin(); |
| 57 | err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes); | 56 | err = crypto_skcipher_encrypt(subreq); |
| 58 | kernel_fpu_end(); | 57 | kernel_fpu_end(); |
| 58 | |||
| 59 | skcipher_request_zero(subreq); | ||
| 59 | return err; | 60 | return err; |
| 60 | } | 61 | } |
| 61 | 62 | ||
| 62 | static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in, | 63 | static int crypto_fpu_decrypt(struct skcipher_request *req) |
| 63 | struct scatterlist *dst, struct scatterlist *src, | ||
| 64 | unsigned int nbytes) | ||
| 65 | { | 64 | { |
| 65 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 66 | struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 67 | struct crypto_skcipher *child = ctx->child; | ||
| 68 | SKCIPHER_REQUEST_ON_STACK(subreq, child); | ||
| 66 | int err; | 69 | int err; |
| 67 | struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); | 70 | |
| 68 | struct crypto_blkcipher *child = ctx->child; | 71 | skcipher_request_set_tfm(subreq, child); |
| 69 | struct blkcipher_desc desc = { | 72 | skcipher_request_set_callback(subreq, 0, NULL, NULL); |
| 70 | .tfm = child, | 73 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, |
| 71 | .info = desc_in->info, | 74 | req->iv); |
| 72 | .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 73 | }; | ||
| 74 | 75 | ||
| 75 | kernel_fpu_begin(); | 76 | kernel_fpu_begin(); |
| 76 | err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes); | 77 | err = crypto_skcipher_decrypt(subreq); |
| 77 | kernel_fpu_end(); | 78 | kernel_fpu_end(); |
| 79 | |||
| 80 | skcipher_request_zero(subreq); | ||
| 78 | return err; | 81 | return err; |
| 79 | } | 82 | } |
| 80 | 83 | ||
| 81 | static int crypto_fpu_init_tfm(struct crypto_tfm *tfm) | 84 | static int crypto_fpu_init_tfm(struct crypto_skcipher *tfm) |
| 82 | { | 85 | { |
| 83 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 86 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
| 84 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 87 | struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 85 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); | 88 | struct crypto_skcipher_spawn *spawn; |
| 86 | struct crypto_blkcipher *cipher; | 89 | struct crypto_skcipher *cipher; |
| 87 | 90 | ||
| 88 | cipher = crypto_spawn_blkcipher(spawn); | 91 | spawn = skcipher_instance_ctx(inst); |
| 92 | cipher = crypto_spawn_skcipher(spawn); | ||
| 89 | if (IS_ERR(cipher)) | 93 | if (IS_ERR(cipher)) |
| 90 | return PTR_ERR(cipher); | 94 | return PTR_ERR(cipher); |
| 91 | 95 | ||
| 92 | ctx->child = cipher; | 96 | ctx->child = cipher; |
| 97 | |||
| 93 | return 0; | 98 | return 0; |
| 94 | } | 99 | } |
| 95 | 100 | ||
| 96 | static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm) | 101 | static void crypto_fpu_exit_tfm(struct crypto_skcipher *tfm) |
| 102 | { | ||
| 103 | struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 104 | |||
| 105 | crypto_free_skcipher(ctx->child); | ||
| 106 | } | ||
| 107 | |||
| 108 | static void crypto_fpu_free(struct skcipher_instance *inst) | ||
| 97 | { | 109 | { |
| 98 | struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); | 110 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); |
| 99 | crypto_free_blkcipher(ctx->child); | 111 | kfree(inst); |
| 100 | } | 112 | } |
| 101 | 113 | ||
| 102 | static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb) | 114 | static int crypto_fpu_create(struct crypto_template *tmpl, struct rtattr **tb) |
| 103 | { | 115 | { |
| 104 | struct crypto_instance *inst; | 116 | struct crypto_skcipher_spawn *spawn; |
| 105 | struct crypto_alg *alg; | 117 | struct skcipher_instance *inst; |
| 118 | struct crypto_attr_type *algt; | ||
| 119 | struct skcipher_alg *alg; | ||
| 120 | const char *cipher_name; | ||
| 106 | int err; | 121 | int err; |
| 107 | 122 | ||
| 108 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 123 | algt = crypto_get_attr_type(tb); |
| 124 | if (IS_ERR(algt)) | ||
| 125 | return PTR_ERR(algt); | ||
| 126 | |||
| 127 | if ((algt->type ^ (CRYPTO_ALG_INTERNAL | CRYPTO_ALG_TYPE_SKCIPHER)) & | ||
| 128 | algt->mask) | ||
| 129 | return -EINVAL; | ||
| 130 | |||
| 131 | if (!(algt->mask & CRYPTO_ALG_INTERNAL)) | ||
| 132 | return -EINVAL; | ||
| 133 | |||
| 134 | cipher_name = crypto_attr_alg_name(tb[1]); | ||
| 135 | if (IS_ERR(cipher_name)) | ||
| 136 | return PTR_ERR(cipher_name); | ||
| 137 | |||
| 138 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | ||
| 139 | if (!inst) | ||
| 140 | return -ENOMEM; | ||
| 141 | |||
| 142 | spawn = skcipher_instance_ctx(inst); | ||
| 143 | |||
| 144 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); | ||
| 145 | err = crypto_grab_skcipher(spawn, cipher_name, CRYPTO_ALG_INTERNAL, | ||
| 146 | CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); | ||
| 109 | if (err) | 147 | if (err) |
| 110 | return ERR_PTR(err); | 148 | goto out_free_inst; |
| 111 | |||
| 112 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 113 | CRYPTO_ALG_TYPE_MASK); | ||
| 114 | if (IS_ERR(alg)) | ||
| 115 | return ERR_CAST(alg); | ||
| 116 | |||
| 117 | inst = crypto_alloc_instance("fpu", alg); | ||
| 118 | if (IS_ERR(inst)) | ||
| 119 | goto out_put_alg; | ||
| 120 | |||
| 121 | inst->alg.cra_flags = alg->cra_flags; | ||
| 122 | inst->alg.cra_priority = alg->cra_priority; | ||
| 123 | inst->alg.cra_blocksize = alg->cra_blocksize; | ||
| 124 | inst->alg.cra_alignmask = alg->cra_alignmask; | ||
| 125 | inst->alg.cra_type = alg->cra_type; | ||
| 126 | inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize; | ||
| 127 | inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | ||
| 128 | inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | ||
| 129 | inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx); | ||
| 130 | inst->alg.cra_init = crypto_fpu_init_tfm; | ||
| 131 | inst->alg.cra_exit = crypto_fpu_exit_tfm; | ||
| 132 | inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey; | ||
| 133 | inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt; | ||
| 134 | inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt; | ||
| 135 | |||
| 136 | out_put_alg: | ||
| 137 | crypto_mod_put(alg); | ||
| 138 | return inst; | ||
| 139 | } | ||
| 140 | 149 | ||
| 141 | static void crypto_fpu_free(struct crypto_instance *inst) | 150 | alg = crypto_skcipher_spawn_alg(spawn); |
| 142 | { | 151 | |
| 143 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 152 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "fpu", |
| 153 | &alg->base); | ||
| 154 | if (err) | ||
| 155 | goto out_drop_skcipher; | ||
| 156 | |||
| 157 | inst->alg.base.cra_flags = CRYPTO_ALG_INTERNAL; | ||
| 158 | inst->alg.base.cra_priority = alg->base.cra_priority; | ||
| 159 | inst->alg.base.cra_blocksize = alg->base.cra_blocksize; | ||
| 160 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask; | ||
| 161 | |||
| 162 | inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); | ||
| 163 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); | ||
| 164 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); | ||
| 165 | |||
| 166 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_fpu_ctx); | ||
| 167 | |||
| 168 | inst->alg.init = crypto_fpu_init_tfm; | ||
| 169 | inst->alg.exit = crypto_fpu_exit_tfm; | ||
| 170 | |||
| 171 | inst->alg.setkey = crypto_fpu_setkey; | ||
| 172 | inst->alg.encrypt = crypto_fpu_encrypt; | ||
| 173 | inst->alg.decrypt = crypto_fpu_decrypt; | ||
| 174 | |||
| 175 | inst->free = crypto_fpu_free; | ||
| 176 | |||
| 177 | err = skcipher_register_instance(tmpl, inst); | ||
| 178 | if (err) | ||
| 179 | goto out_drop_skcipher; | ||
| 180 | |||
| 181 | out: | ||
| 182 | return err; | ||
| 183 | |||
| 184 | out_drop_skcipher: | ||
| 185 | crypto_drop_skcipher(spawn); | ||
| 186 | out_free_inst: | ||
| 144 | kfree(inst); | 187 | kfree(inst); |
| 188 | goto out; | ||
| 145 | } | 189 | } |
| 146 | 190 | ||
| 147 | static struct crypto_template crypto_fpu_tmpl = { | 191 | static struct crypto_template crypto_fpu_tmpl = { |
| 148 | .name = "fpu", | 192 | .name = "fpu", |
| 149 | .alloc = crypto_fpu_alloc, | 193 | .create = crypto_fpu_create, |
| 150 | .free = crypto_fpu_free, | ||
| 151 | .module = THIS_MODULE, | 194 | .module = THIS_MODULE, |
| 152 | }; | 195 | }; |
| 153 | 196 | ||
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c index 6a85598931b5..260a060d7275 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c | |||
| @@ -27,10 +27,10 @@ | |||
| 27 | 27 | ||
| 28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
| 29 | #include <crypto/b128ops.h> | 29 | #include <crypto/b128ops.h> |
| 30 | #include <crypto/internal/skcipher.h> | ||
| 30 | #include <crypto/lrw.h> | 31 | #include <crypto/lrw.h> |
| 31 | #include <crypto/xts.h> | 32 | #include <crypto/xts.h> |
| 32 | #include <asm/crypto/glue_helper.h> | 33 | #include <asm/crypto/glue_helper.h> |
| 33 | #include <crypto/scatterwalk.h> | ||
| 34 | 34 | ||
| 35 | static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, | 35 | static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, |
| 36 | struct blkcipher_desc *desc, | 36 | struct blkcipher_desc *desc, |
| @@ -339,6 +339,41 @@ done: | |||
| 339 | return nbytes; | 339 | return nbytes; |
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx, | ||
| 343 | void *ctx, | ||
| 344 | struct skcipher_walk *walk) | ||
| 345 | { | ||
| 346 | const unsigned int bsize = 128 / 8; | ||
| 347 | unsigned int nbytes = walk->nbytes; | ||
| 348 | u128 *src = walk->src.virt.addr; | ||
| 349 | u128 *dst = walk->dst.virt.addr; | ||
| 350 | unsigned int num_blocks, func_bytes; | ||
| 351 | unsigned int i; | ||
| 352 | |||
| 353 | /* Process multi-block batch */ | ||
| 354 | for (i = 0; i < gctx->num_funcs; i++) { | ||
| 355 | num_blocks = gctx->funcs[i].num_blocks; | ||
| 356 | func_bytes = bsize * num_blocks; | ||
| 357 | |||
| 358 | if (nbytes >= func_bytes) { | ||
| 359 | do { | ||
| 360 | gctx->funcs[i].fn_u.xts(ctx, dst, src, | ||
| 361 | walk->iv); | ||
| 362 | |||
| 363 | src += num_blocks; | ||
| 364 | dst += num_blocks; | ||
| 365 | nbytes -= func_bytes; | ||
| 366 | } while (nbytes >= func_bytes); | ||
| 367 | |||
| 368 | if (nbytes < bsize) | ||
| 369 | goto done; | ||
| 370 | } | ||
| 371 | } | ||
| 372 | |||
| 373 | done: | ||
| 374 | return nbytes; | ||
| 375 | } | ||
| 376 | |||
| 342 | /* for implementations implementing faster XTS IV generator */ | 377 | /* for implementations implementing faster XTS IV generator */ |
| 343 | int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, | 378 | int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, |
| 344 | struct blkcipher_desc *desc, struct scatterlist *dst, | 379 | struct blkcipher_desc *desc, struct scatterlist *dst, |
| @@ -379,6 +414,43 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, | |||
| 379 | } | 414 | } |
| 380 | EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); | 415 | EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); |
| 381 | 416 | ||
| 417 | int glue_xts_req_128bit(const struct common_glue_ctx *gctx, | ||
| 418 | struct skcipher_request *req, | ||
| 419 | common_glue_func_t tweak_fn, void *tweak_ctx, | ||
| 420 | void *crypt_ctx) | ||
| 421 | { | ||
| 422 | const unsigned int bsize = 128 / 8; | ||
| 423 | struct skcipher_walk walk; | ||
| 424 | bool fpu_enabled = false; | ||
| 425 | unsigned int nbytes; | ||
| 426 | int err; | ||
| 427 | |||
| 428 | err = skcipher_walk_virt(&walk, req, false); | ||
| 429 | nbytes = walk.nbytes; | ||
| 430 | if (!nbytes) | ||
| 431 | return err; | ||
| 432 | |||
| 433 | /* set minimum length to bsize, for tweak_fn */ | ||
| 434 | fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit, | ||
| 435 | &walk, fpu_enabled, | ||
| 436 | nbytes < bsize ? bsize : nbytes); | ||
| 437 | |||
| 438 | /* calculate first value of T */ | ||
| 439 | tweak_fn(tweak_ctx, walk.iv, walk.iv); | ||
| 440 | |||
| 441 | while (nbytes) { | ||
| 442 | nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk); | ||
| 443 | |||
| 444 | err = skcipher_walk_done(&walk, nbytes); | ||
| 445 | nbytes = walk.nbytes; | ||
| 446 | } | ||
| 447 | |||
| 448 | glue_fpu_end(fpu_enabled); | ||
| 449 | |||
| 450 | return err; | ||
| 451 | } | ||
| 452 | EXPORT_SYMBOL_GPL(glue_xts_req_128bit); | ||
| 453 | |||
| 382 | void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv, | 454 | void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv, |
| 383 | common_glue_func_t fn) | 455 | common_glue_func_t fn) |
| 384 | { | 456 | { |
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c index 9e5b67127a09..acf9fdf01671 100644 --- a/arch/x86/crypto/sha1-mb/sha1_mb.c +++ b/arch/x86/crypto/sha1-mb/sha1_mb.c | |||
| @@ -114,7 +114,7 @@ static inline void sha1_init_digest(uint32_t *digest) | |||
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2], | 116 | static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2], |
| 117 | uint32_t total_len) | 117 | uint64_t total_len) |
| 118 | { | 118 | { |
| 119 | uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1); | 119 | uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1); |
| 120 | 120 | ||
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h index 98a35bcc6f4a..13590ccf965c 100644 --- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h +++ b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h | |||
| @@ -125,7 +125,7 @@ struct sha1_hash_ctx { | |||
| 125 | /* error flag */ | 125 | /* error flag */ |
| 126 | int error; | 126 | int error; |
| 127 | 127 | ||
| 128 | uint32_t total_length; | 128 | uint64_t total_length; |
| 129 | const void *incoming_buffer; | 129 | const void *incoming_buffer; |
| 130 | uint32_t incoming_buffer_length; | 130 | uint32_t incoming_buffer_length; |
| 131 | uint8_t partial_block_buffer[SHA1_BLOCK_SIZE * 2]; | 131 | uint8_t partial_block_buffer[SHA1_BLOCK_SIZE * 2]; |
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c index 6f97fb33ae21..7926a226b120 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb.c +++ b/arch/x86/crypto/sha256-mb/sha256_mb.c | |||
| @@ -115,7 +115,7 @@ inline void sha256_init_digest(uint32_t *digest) | |||
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], | 117 | inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], |
| 118 | uint32_t total_len) | 118 | uint64_t total_len) |
| 119 | { | 119 | { |
| 120 | uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1); | 120 | uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1); |
| 121 | 121 | ||
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h index edd252b73206..aabb30320af0 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h +++ b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h | |||
| @@ -125,7 +125,7 @@ struct sha256_hash_ctx { | |||
| 125 | /* error flag */ | 125 | /* error flag */ |
| 126 | int error; | 126 | int error; |
| 127 | 127 | ||
| 128 | uint32_t total_length; | 128 | uint64_t total_length; |
| 129 | const void *incoming_buffer; | 129 | const void *incoming_buffer; |
| 130 | uint32_t incoming_buffer_length; | 130 | uint32_t incoming_buffer_length; |
| 131 | uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2]; | 131 | uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2]; |
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c index d210174a52b0..9c1bb6d58141 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb.c +++ b/arch/x86/crypto/sha512-mb/sha512_mb.c | |||
| @@ -117,7 +117,7 @@ inline void sha512_init_digest(uint64_t *digest) | |||
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2], | 119 | inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2], |
| 120 | uint32_t total_len) | 120 | uint64_t total_len) |
| 121 | { | 121 | { |
| 122 | uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1); | 122 | uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1); |
| 123 | 123 | ||
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h index 9d4b2c8208d5..e4653f5eec3f 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h +++ b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h | |||
| @@ -119,7 +119,7 @@ struct sha512_hash_ctx { | |||
| 119 | /* error flag */ | 119 | /* error flag */ |
| 120 | int error; | 120 | int error; |
| 121 | 121 | ||
| 122 | uint32_t total_length; | 122 | uint64_t total_length; |
| 123 | const void *incoming_buffer; | 123 | const void *incoming_buffer; |
| 124 | uint32_t incoming_buffer_length; | 124 | uint32_t incoming_buffer_length; |
| 125 | uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2]; | 125 | uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2]; |
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h index 03bb1065c335..29e53ea7d764 100644 --- a/arch/x86/include/asm/crypto/glue_helper.h +++ b/arch/x86/include/asm/crypto/glue_helper.h | |||
| @@ -5,8 +5,8 @@ | |||
| 5 | #ifndef _CRYPTO_GLUE_HELPER_H | 5 | #ifndef _CRYPTO_GLUE_HELPER_H |
| 6 | #define _CRYPTO_GLUE_HELPER_H | 6 | #define _CRYPTO_GLUE_HELPER_H |
| 7 | 7 | ||
| 8 | #include <crypto/internal/skcipher.h> | ||
| 8 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| 9 | #include <linux/crypto.h> | ||
| 10 | #include <asm/fpu/api.h> | 10 | #include <asm/fpu/api.h> |
| 11 | #include <crypto/b128ops.h> | 11 | #include <crypto/b128ops.h> |
| 12 | 12 | ||
| @@ -69,6 +69,31 @@ static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit, | |||
| 69 | return true; | 69 | return true; |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | static inline bool glue_skwalk_fpu_begin(unsigned int bsize, | ||
| 73 | int fpu_blocks_limit, | ||
| 74 | struct skcipher_walk *walk, | ||
| 75 | bool fpu_enabled, unsigned int nbytes) | ||
| 76 | { | ||
| 77 | if (likely(fpu_blocks_limit < 0)) | ||
| 78 | return false; | ||
| 79 | |||
| 80 | if (fpu_enabled) | ||
| 81 | return true; | ||
| 82 | |||
| 83 | /* | ||
| 84 | * Vector-registers are only used when chunk to be processed is large | ||
| 85 | * enough, so do not enable FPU until it is necessary. | ||
| 86 | */ | ||
| 87 | if (nbytes < bsize * (unsigned int)fpu_blocks_limit) | ||
| 88 | return false; | ||
| 89 | |||
| 90 | /* prevent sleeping if FPU is in use */ | ||
| 91 | skcipher_walk_atomise(walk); | ||
| 92 | |||
| 93 | kernel_fpu_begin(); | ||
| 94 | return true; | ||
| 95 | } | ||
| 96 | |||
| 72 | static inline void glue_fpu_end(bool fpu_enabled) | 97 | static inline void glue_fpu_end(bool fpu_enabled) |
| 73 | { | 98 | { |
| 74 | if (fpu_enabled) | 99 | if (fpu_enabled) |
| @@ -139,6 +164,18 @@ extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, | |||
| 139 | common_glue_func_t tweak_fn, void *tweak_ctx, | 164 | common_glue_func_t tweak_fn, void *tweak_ctx, |
| 140 | void *crypt_ctx); | 165 | void *crypt_ctx); |
| 141 | 166 | ||
| 167 | extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, | ||
| 168 | struct blkcipher_desc *desc, | ||
| 169 | struct scatterlist *dst, | ||
| 170 | struct scatterlist *src, unsigned int nbytes, | ||
| 171 | common_glue_func_t tweak_fn, void *tweak_ctx, | ||
| 172 | void *crypt_ctx); | ||
| 173 | |||
| 174 | extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx, | ||
| 175 | struct skcipher_request *req, | ||
| 176 | common_glue_func_t tweak_fn, void *tweak_ctx, | ||
| 177 | void *crypt_ctx); | ||
| 178 | |||
| 142 | extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, | 179 | extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, |
| 143 | le128 *iv, common_glue_func_t fn); | 180 | le128 *iv, common_glue_func_t fn); |
| 144 | 181 | ||
diff --git a/crypto/842.c b/crypto/842.c index 98e387efb8c8..bc26dc942821 100644 --- a/crypto/842.c +++ b/crypto/842.c | |||
| @@ -31,11 +31,46 @@ | |||
| 31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
| 32 | #include <linux/crypto.h> | 32 | #include <linux/crypto.h> |
| 33 | #include <linux/sw842.h> | 33 | #include <linux/sw842.h> |
| 34 | #include <crypto/internal/scompress.h> | ||
| 34 | 35 | ||
| 35 | struct crypto842_ctx { | 36 | struct crypto842_ctx { |
| 36 | char wmem[SW842_MEM_COMPRESS]; /* working memory for compress */ | 37 | void *wmem; /* working memory for compress */ |
| 37 | }; | 38 | }; |
| 38 | 39 | ||
| 40 | static void *crypto842_alloc_ctx(struct crypto_scomp *tfm) | ||
| 41 | { | ||
| 42 | void *ctx; | ||
| 43 | |||
| 44 | ctx = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL); | ||
| 45 | if (!ctx) | ||
| 46 | return ERR_PTR(-ENOMEM); | ||
| 47 | |||
| 48 | return ctx; | ||
| 49 | } | ||
| 50 | |||
| 51 | static int crypto842_init(struct crypto_tfm *tfm) | ||
| 52 | { | ||
| 53 | struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 54 | |||
| 55 | ctx->wmem = crypto842_alloc_ctx(NULL); | ||
| 56 | if (IS_ERR(ctx->wmem)) | ||
| 57 | return -ENOMEM; | ||
| 58 | |||
| 59 | return 0; | ||
| 60 | } | ||
| 61 | |||
| 62 | static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx) | ||
| 63 | { | ||
| 64 | kfree(ctx); | ||
| 65 | } | ||
| 66 | |||
| 67 | static void crypto842_exit(struct crypto_tfm *tfm) | ||
| 68 | { | ||
| 69 | struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 70 | |||
| 71 | crypto842_free_ctx(NULL, ctx->wmem); | ||
| 72 | } | ||
| 73 | |||
| 39 | static int crypto842_compress(struct crypto_tfm *tfm, | 74 | static int crypto842_compress(struct crypto_tfm *tfm, |
| 40 | const u8 *src, unsigned int slen, | 75 | const u8 *src, unsigned int slen, |
| 41 | u8 *dst, unsigned int *dlen) | 76 | u8 *dst, unsigned int *dlen) |
| @@ -45,6 +80,13 @@ static int crypto842_compress(struct crypto_tfm *tfm, | |||
| 45 | return sw842_compress(src, slen, dst, dlen, ctx->wmem); | 80 | return sw842_compress(src, slen, dst, dlen, ctx->wmem); |
| 46 | } | 81 | } |
| 47 | 82 | ||
| 83 | static int crypto842_scompress(struct crypto_scomp *tfm, | ||
| 84 | const u8 *src, unsigned int slen, | ||
| 85 | u8 *dst, unsigned int *dlen, void *ctx) | ||
| 86 | { | ||
| 87 | return sw842_compress(src, slen, dst, dlen, ctx); | ||
| 88 | } | ||
| 89 | |||
| 48 | static int crypto842_decompress(struct crypto_tfm *tfm, | 90 | static int crypto842_decompress(struct crypto_tfm *tfm, |
| 49 | const u8 *src, unsigned int slen, | 91 | const u8 *src, unsigned int slen, |
| 50 | u8 *dst, unsigned int *dlen) | 92 | u8 *dst, unsigned int *dlen) |
| @@ -52,6 +94,13 @@ static int crypto842_decompress(struct crypto_tfm *tfm, | |||
| 52 | return sw842_decompress(src, slen, dst, dlen); | 94 | return sw842_decompress(src, slen, dst, dlen); |
| 53 | } | 95 | } |
| 54 | 96 | ||
| 97 | static int crypto842_sdecompress(struct crypto_scomp *tfm, | ||
| 98 | const u8 *src, unsigned int slen, | ||
| 99 | u8 *dst, unsigned int *dlen, void *ctx) | ||
| 100 | { | ||
| 101 | return sw842_decompress(src, slen, dst, dlen); | ||
| 102 | } | ||
| 103 | |||
| 55 | static struct crypto_alg alg = { | 104 | static struct crypto_alg alg = { |
| 56 | .cra_name = "842", | 105 | .cra_name = "842", |
| 57 | .cra_driver_name = "842-generic", | 106 | .cra_driver_name = "842-generic", |
| @@ -59,20 +108,48 @@ static struct crypto_alg alg = { | |||
| 59 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 108 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
| 60 | .cra_ctxsize = sizeof(struct crypto842_ctx), | 109 | .cra_ctxsize = sizeof(struct crypto842_ctx), |
| 61 | .cra_module = THIS_MODULE, | 110 | .cra_module = THIS_MODULE, |
| 111 | .cra_init = crypto842_init, | ||
| 112 | .cra_exit = crypto842_exit, | ||
| 62 | .cra_u = { .compress = { | 113 | .cra_u = { .compress = { |
| 63 | .coa_compress = crypto842_compress, | 114 | .coa_compress = crypto842_compress, |
| 64 | .coa_decompress = crypto842_decompress } } | 115 | .coa_decompress = crypto842_decompress } } |
| 65 | }; | 116 | }; |
| 66 | 117 | ||
| 118 | static struct scomp_alg scomp = { | ||
| 119 | .alloc_ctx = crypto842_alloc_ctx, | ||
| 120 | .free_ctx = crypto842_free_ctx, | ||
| 121 | .compress = crypto842_scompress, | ||
| 122 | .decompress = crypto842_sdecompress, | ||
| 123 | .base = { | ||
| 124 | .cra_name = "842", | ||
| 125 | .cra_driver_name = "842-scomp", | ||
| 126 | .cra_priority = 100, | ||
| 127 | .cra_module = THIS_MODULE, | ||
| 128 | } | ||
| 129 | }; | ||
| 130 | |||
| 67 | static int __init crypto842_mod_init(void) | 131 | static int __init crypto842_mod_init(void) |
| 68 | { | 132 | { |
| 69 | return crypto_register_alg(&alg); | 133 | int ret; |
| 134 | |||
| 135 | ret = crypto_register_alg(&alg); | ||
| 136 | if (ret) | ||
| 137 | return ret; | ||
| 138 | |||
| 139 | ret = crypto_register_scomp(&scomp); | ||
| 140 | if (ret) { | ||
| 141 | crypto_unregister_alg(&alg); | ||
| 142 | return ret; | ||
| 143 | } | ||
| 144 | |||
| 145 | return ret; | ||
| 70 | } | 146 | } |
| 71 | module_init(crypto842_mod_init); | 147 | module_init(crypto842_mod_init); |
| 72 | 148 | ||
| 73 | static void __exit crypto842_mod_exit(void) | 149 | static void __exit crypto842_mod_exit(void) |
| 74 | { | 150 | { |
| 75 | crypto_unregister_alg(&alg); | 151 | crypto_unregister_alg(&alg); |
| 152 | crypto_unregister_scomp(&scomp); | ||
| 76 | } | 153 | } |
| 77 | module_exit(crypto842_mod_exit); | 154 | module_exit(crypto842_mod_exit); |
| 78 | 155 | ||
diff --git a/crypto/Kconfig b/crypto/Kconfig index 84d71482bf08..160f08e721cc 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
| @@ -24,7 +24,7 @@ comment "Crypto core or helper" | |||
| 24 | config CRYPTO_FIPS | 24 | config CRYPTO_FIPS |
| 25 | bool "FIPS 200 compliance" | 25 | bool "FIPS 200 compliance" |
| 26 | depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS | 26 | depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS |
| 27 | depends on MODULE_SIG | 27 | depends on (MODULE_SIG || !MODULES) |
| 28 | help | 28 | help |
| 29 | This options enables the fips boot option which is | 29 | This options enables the fips boot option which is |
| 30 | required if you want to system to operate in a FIPS 200 | 30 | required if you want to system to operate in a FIPS 200 |
| @@ -102,6 +102,15 @@ config CRYPTO_KPP | |||
| 102 | select CRYPTO_ALGAPI | 102 | select CRYPTO_ALGAPI |
| 103 | select CRYPTO_KPP2 | 103 | select CRYPTO_KPP2 |
| 104 | 104 | ||
| 105 | config CRYPTO_ACOMP2 | ||
| 106 | tristate | ||
| 107 | select CRYPTO_ALGAPI2 | ||
| 108 | |||
| 109 | config CRYPTO_ACOMP | ||
| 110 | tristate | ||
| 111 | select CRYPTO_ALGAPI | ||
| 112 | select CRYPTO_ACOMP2 | ||
| 113 | |||
| 105 | config CRYPTO_RSA | 114 | config CRYPTO_RSA |
| 106 | tristate "RSA algorithm" | 115 | tristate "RSA algorithm" |
| 107 | select CRYPTO_AKCIPHER | 116 | select CRYPTO_AKCIPHER |
| @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2 | |||
| 138 | select CRYPTO_BLKCIPHER2 | 147 | select CRYPTO_BLKCIPHER2 |
| 139 | select CRYPTO_AKCIPHER2 | 148 | select CRYPTO_AKCIPHER2 |
| 140 | select CRYPTO_KPP2 | 149 | select CRYPTO_KPP2 |
| 150 | select CRYPTO_ACOMP2 | ||
| 141 | 151 | ||
| 142 | config CRYPTO_USER | 152 | config CRYPTO_USER |
| 143 | tristate "Userspace cryptographic algorithm configuration" | 153 | tristate "Userspace cryptographic algorithm configuration" |
| @@ -236,10 +246,14 @@ config CRYPTO_ABLK_HELPER | |||
| 236 | tristate | 246 | tristate |
| 237 | select CRYPTO_CRYPTD | 247 | select CRYPTO_CRYPTD |
| 238 | 248 | ||
| 249 | config CRYPTO_SIMD | ||
| 250 | tristate | ||
| 251 | select CRYPTO_CRYPTD | ||
| 252 | |||
| 239 | config CRYPTO_GLUE_HELPER_X86 | 253 | config CRYPTO_GLUE_HELPER_X86 |
| 240 | tristate | 254 | tristate |
| 241 | depends on X86 | 255 | depends on X86 |
| 242 | select CRYPTO_ALGAPI | 256 | select CRYPTO_BLKCIPHER |
| 243 | 257 | ||
| 244 | config CRYPTO_ENGINE | 258 | config CRYPTO_ENGINE |
| 245 | tristate | 259 | tristate |
| @@ -437,7 +451,7 @@ config CRYPTO_CRC32C_INTEL | |||
| 437 | gain performance compared with software implementation. | 451 | gain performance compared with software implementation. |
| 438 | Module will be crc32c-intel. | 452 | Module will be crc32c-intel. |
| 439 | 453 | ||
| 440 | config CRYPT_CRC32C_VPMSUM | 454 | config CRYPTO_CRC32C_VPMSUM |
| 441 | tristate "CRC32c CRC algorithm (powerpc64)" | 455 | tristate "CRC32c CRC algorithm (powerpc64)" |
| 442 | depends on PPC64 && ALTIVEC | 456 | depends on PPC64 && ALTIVEC |
| 443 | select CRYPTO_HASH | 457 | select CRYPTO_HASH |
| @@ -928,14 +942,13 @@ config CRYPTO_AES_X86_64 | |||
| 928 | config CRYPTO_AES_NI_INTEL | 942 | config CRYPTO_AES_NI_INTEL |
| 929 | tristate "AES cipher algorithms (AES-NI)" | 943 | tristate "AES cipher algorithms (AES-NI)" |
| 930 | depends on X86 | 944 | depends on X86 |
| 945 | select CRYPTO_AEAD | ||
| 931 | select CRYPTO_AES_X86_64 if 64BIT | 946 | select CRYPTO_AES_X86_64 if 64BIT |
| 932 | select CRYPTO_AES_586 if !64BIT | 947 | select CRYPTO_AES_586 if !64BIT |
| 933 | select CRYPTO_CRYPTD | ||
| 934 | select CRYPTO_ABLK_HELPER | ||
| 935 | select CRYPTO_ALGAPI | 948 | select CRYPTO_ALGAPI |
| 949 | select CRYPTO_BLKCIPHER | ||
| 936 | select CRYPTO_GLUE_HELPER_X86 if 64BIT | 950 | select CRYPTO_GLUE_HELPER_X86 if 64BIT |
| 937 | select CRYPTO_LRW | 951 | select CRYPTO_SIMD |
| 938 | select CRYPTO_XTS | ||
| 939 | help | 952 | help |
| 940 | Use Intel AES-NI instructions for AES algorithm. | 953 | Use Intel AES-NI instructions for AES algorithm. |
| 941 | 954 | ||
| @@ -1568,6 +1581,7 @@ comment "Compression" | |||
| 1568 | config CRYPTO_DEFLATE | 1581 | config CRYPTO_DEFLATE |
| 1569 | tristate "Deflate compression algorithm" | 1582 | tristate "Deflate compression algorithm" |
| 1570 | select CRYPTO_ALGAPI | 1583 | select CRYPTO_ALGAPI |
| 1584 | select CRYPTO_ACOMP2 | ||
| 1571 | select ZLIB_INFLATE | 1585 | select ZLIB_INFLATE |
| 1572 | select ZLIB_DEFLATE | 1586 | select ZLIB_DEFLATE |
| 1573 | help | 1587 | help |
| @@ -1579,6 +1593,7 @@ config CRYPTO_DEFLATE | |||
| 1579 | config CRYPTO_LZO | 1593 | config CRYPTO_LZO |
| 1580 | tristate "LZO compression algorithm" | 1594 | tristate "LZO compression algorithm" |
| 1581 | select CRYPTO_ALGAPI | 1595 | select CRYPTO_ALGAPI |
| 1596 | select CRYPTO_ACOMP2 | ||
| 1582 | select LZO_COMPRESS | 1597 | select LZO_COMPRESS |
| 1583 | select LZO_DECOMPRESS | 1598 | select LZO_DECOMPRESS |
| 1584 | help | 1599 | help |
| @@ -1587,6 +1602,7 @@ config CRYPTO_LZO | |||
| 1587 | config CRYPTO_842 | 1602 | config CRYPTO_842 |
| 1588 | tristate "842 compression algorithm" | 1603 | tristate "842 compression algorithm" |
| 1589 | select CRYPTO_ALGAPI | 1604 | select CRYPTO_ALGAPI |
| 1605 | select CRYPTO_ACOMP2 | ||
| 1590 | select 842_COMPRESS | 1606 | select 842_COMPRESS |
| 1591 | select 842_DECOMPRESS | 1607 | select 842_DECOMPRESS |
| 1592 | help | 1608 | help |
| @@ -1595,6 +1611,7 @@ config CRYPTO_842 | |||
| 1595 | config CRYPTO_LZ4 | 1611 | config CRYPTO_LZ4 |
| 1596 | tristate "LZ4 compression algorithm" | 1612 | tristate "LZ4 compression algorithm" |
| 1597 | select CRYPTO_ALGAPI | 1613 | select CRYPTO_ALGAPI |
| 1614 | select CRYPTO_ACOMP2 | ||
| 1598 | select LZ4_COMPRESS | 1615 | select LZ4_COMPRESS |
| 1599 | select LZ4_DECOMPRESS | 1616 | select LZ4_DECOMPRESS |
| 1600 | help | 1617 | help |
| @@ -1603,6 +1620,7 @@ config CRYPTO_LZ4 | |||
| 1603 | config CRYPTO_LZ4HC | 1620 | config CRYPTO_LZ4HC |
| 1604 | tristate "LZ4HC compression algorithm" | 1621 | tristate "LZ4HC compression algorithm" |
| 1605 | select CRYPTO_ALGAPI | 1622 | select CRYPTO_ALGAPI |
| 1623 | select CRYPTO_ACOMP2 | ||
| 1606 | select LZ4HC_COMPRESS | 1624 | select LZ4HC_COMPRESS |
| 1607 | select LZ4_DECOMPRESS | 1625 | select LZ4_DECOMPRESS |
| 1608 | help | 1626 | help |
diff --git a/crypto/Makefile b/crypto/Makefile index bd6a029094e6..b8f0e3eb0791 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
| @@ -51,6 +51,10 @@ rsa_generic-y += rsa_helper.o | |||
| 51 | rsa_generic-y += rsa-pkcs1pad.o | 51 | rsa_generic-y += rsa-pkcs1pad.o |
| 52 | obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o | 52 | obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o |
| 53 | 53 | ||
| 54 | crypto_acompress-y := acompress.o | ||
| 55 | crypto_acompress-y += scompress.o | ||
| 56 | obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o | ||
| 57 | |||
| 54 | cryptomgr-y := algboss.o testmgr.o | 58 | cryptomgr-y := algboss.o testmgr.o |
| 55 | 59 | ||
| 56 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o | 60 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o |
| @@ -139,3 +143,5 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx/ | |||
| 139 | obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ | 143 | obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ |
| 140 | obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o | 144 | obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o |
| 141 | obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o | 145 | obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o |
| 146 | crypto_simd-y := simd.o | ||
| 147 | obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o | ||
diff --git a/crypto/acompress.c b/crypto/acompress.c new file mode 100644 index 000000000000..887783d8e9a9 --- /dev/null +++ b/crypto/acompress.c | |||
| @@ -0,0 +1,169 @@ | |||
| 1 | /* | ||
| 2 | * Asynchronous Compression operations | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation | ||
| 5 | * Authors: Weigang Li <weigang.li@intel.com> | ||
| 6 | * Giovanni Cabiddu <giovanni.cabiddu@intel.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | #include <linux/errno.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/seq_file.h> | ||
| 18 | #include <linux/slab.h> | ||
| 19 | #include <linux/string.h> | ||
| 20 | #include <linux/crypto.h> | ||
| 21 | #include <crypto/algapi.h> | ||
| 22 | #include <linux/cryptouser.h> | ||
| 23 | #include <net/netlink.h> | ||
| 24 | #include <crypto/internal/acompress.h> | ||
| 25 | #include <crypto/internal/scompress.h> | ||
| 26 | #include "internal.h" | ||
| 27 | |||
| 28 | static const struct crypto_type crypto_acomp_type; | ||
| 29 | |||
| 30 | #ifdef CONFIG_NET | ||
| 31 | static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
| 32 | { | ||
| 33 | struct crypto_report_acomp racomp; | ||
| 34 | |||
| 35 | strncpy(racomp.type, "acomp", sizeof(racomp.type)); | ||
| 36 | |||
| 37 | if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, | ||
| 38 | sizeof(struct crypto_report_acomp), &racomp)) | ||
| 39 | goto nla_put_failure; | ||
| 40 | return 0; | ||
| 41 | |||
| 42 | nla_put_failure: | ||
| 43 | return -EMSGSIZE; | ||
| 44 | } | ||
| 45 | #else | ||
| 46 | static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
| 47 | { | ||
| 48 | return -ENOSYS; | ||
| 49 | } | ||
| 50 | #endif | ||
| 51 | |||
| 52 | static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) | ||
| 53 | __attribute__ ((unused)); | ||
| 54 | |||
| 55 | static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) | ||
| 56 | { | ||
| 57 | seq_puts(m, "type : acomp\n"); | ||
| 58 | } | ||
| 59 | |||
| 60 | static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) | ||
| 61 | { | ||
| 62 | struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); | ||
| 63 | struct acomp_alg *alg = crypto_acomp_alg(acomp); | ||
| 64 | |||
| 65 | alg->exit(acomp); | ||
| 66 | } | ||
| 67 | |||
| 68 | static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) | ||
| 69 | { | ||
| 70 | struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); | ||
| 71 | struct acomp_alg *alg = crypto_acomp_alg(acomp); | ||
| 72 | |||
| 73 | if (tfm->__crt_alg->cra_type != &crypto_acomp_type) | ||
| 74 | return crypto_init_scomp_ops_async(tfm); | ||
| 75 | |||
| 76 | acomp->compress = alg->compress; | ||
| 77 | acomp->decompress = alg->decompress; | ||
| 78 | acomp->dst_free = alg->dst_free; | ||
| 79 | acomp->reqsize = alg->reqsize; | ||
| 80 | |||
| 81 | if (alg->exit) | ||
| 82 | acomp->base.exit = crypto_acomp_exit_tfm; | ||
| 83 | |||
| 84 | if (alg->init) | ||
| 85 | return alg->init(acomp); | ||
| 86 | |||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) | ||
| 91 | { | ||
| 92 | int extsize = crypto_alg_extsize(alg); | ||
| 93 | |||
| 94 | if (alg->cra_type != &crypto_acomp_type) | ||
| 95 | extsize += sizeof(struct crypto_scomp *); | ||
| 96 | |||
| 97 | return extsize; | ||
| 98 | } | ||
| 99 | |||
| 100 | static const struct crypto_type crypto_acomp_type = { | ||
| 101 | .extsize = crypto_acomp_extsize, | ||
| 102 | .init_tfm = crypto_acomp_init_tfm, | ||
| 103 | #ifdef CONFIG_PROC_FS | ||
| 104 | .show = crypto_acomp_show, | ||
| 105 | #endif | ||
| 106 | .report = crypto_acomp_report, | ||
| 107 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
| 108 | .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, | ||
| 109 | .type = CRYPTO_ALG_TYPE_ACOMPRESS, | ||
| 110 | .tfmsize = offsetof(struct crypto_acomp, base), | ||
| 111 | }; | ||
| 112 | |||
| 113 | struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, | ||
| 114 | u32 mask) | ||
| 115 | { | ||
| 116 | return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask); | ||
| 117 | } | ||
| 118 | EXPORT_SYMBOL_GPL(crypto_alloc_acomp); | ||
| 119 | |||
| 120 | struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) | ||
| 121 | { | ||
| 122 | struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); | ||
| 123 | struct acomp_req *req; | ||
| 124 | |||
| 125 | req = __acomp_request_alloc(acomp); | ||
| 126 | if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type)) | ||
| 127 | return crypto_acomp_scomp_alloc_ctx(req); | ||
| 128 | |||
| 129 | return req; | ||
| 130 | } | ||
| 131 | EXPORT_SYMBOL_GPL(acomp_request_alloc); | ||
| 132 | |||
| 133 | void acomp_request_free(struct acomp_req *req) | ||
| 134 | { | ||
| 135 | struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); | ||
| 136 | struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); | ||
| 137 | |||
| 138 | if (tfm->__crt_alg->cra_type != &crypto_acomp_type) | ||
| 139 | crypto_acomp_scomp_free_ctx(req); | ||
| 140 | |||
| 141 | if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) { | ||
| 142 | acomp->dst_free(req->dst); | ||
| 143 | req->dst = NULL; | ||
| 144 | } | ||
| 145 | |||
| 146 | __acomp_request_free(req); | ||
| 147 | } | ||
| 148 | EXPORT_SYMBOL_GPL(acomp_request_free); | ||
| 149 | |||
| 150 | int crypto_register_acomp(struct acomp_alg *alg) | ||
| 151 | { | ||
| 152 | struct crypto_alg *base = &alg->base; | ||
| 153 | |||
| 154 | base->cra_type = &crypto_acomp_type; | ||
| 155 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
| 156 | base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; | ||
| 157 | |||
| 158 | return crypto_register_alg(base); | ||
| 159 | } | ||
| 160 | EXPORT_SYMBOL_GPL(crypto_register_acomp); | ||
| 161 | |||
| 162 | int crypto_unregister_acomp(struct acomp_alg *alg) | ||
| 163 | { | ||
| 164 | return crypto_unregister_alg(&alg->base); | ||
| 165 | } | ||
| 166 | EXPORT_SYMBOL_GPL(crypto_unregister_acomp); | ||
| 167 | |||
| 168 | MODULE_LICENSE("GPL"); | ||
| 169 | MODULE_DESCRIPTION("Asynchronous compression type"); | ||
diff --git a/crypto/algboss.c b/crypto/algboss.c index 6e39d9c05b98..ccb85e1798f2 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
| @@ -247,12 +247,8 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) | |||
| 247 | memcpy(param->alg, alg->cra_name, sizeof(param->alg)); | 247 | memcpy(param->alg, alg->cra_name, sizeof(param->alg)); |
| 248 | type = alg->cra_flags; | 248 | type = alg->cra_flags; |
| 249 | 249 | ||
| 250 | /* This piece of crap needs to disappear into per-type test hooks. */ | 250 | /* Do not test internal algorithms. */ |
| 251 | if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & | 251 | if (type & CRYPTO_ALG_INTERNAL) |
| 252 | CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) && | ||
| 253 | ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == | ||
| 254 | CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : | ||
| 255 | alg->cra_ablkcipher.ivsize)) | ||
| 256 | type |= CRYPTO_ALG_TESTED; | 252 | type |= CRYPTO_ALG_TESTED; |
| 257 | 253 | ||
| 258 | param->type = type; | 254 | param->type = type; |
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 235f54d4f8a9..668ef402c6eb 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
| @@ -454,12 +454,13 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, | |||
| 454 | used -= ctx->aead_assoclen; | 454 | used -= ctx->aead_assoclen; |
| 455 | 455 | ||
| 456 | /* take over all tx sgls from ctx */ | 456 | /* take over all tx sgls from ctx */ |
| 457 | areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur, | 457 | areq->tsgl = sock_kmalloc(sk, |
| 458 | sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1), | ||
| 458 | GFP_KERNEL); | 459 | GFP_KERNEL); |
| 459 | if (unlikely(!areq->tsgl)) | 460 | if (unlikely(!areq->tsgl)) |
| 460 | goto free; | 461 | goto free; |
| 461 | 462 | ||
| 462 | sg_init_table(areq->tsgl, sgl->cur); | 463 | sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1)); |
| 463 | for (i = 0; i < sgl->cur; i++) | 464 | for (i = 0; i < sgl->cur; i++) |
| 464 | sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), | 465 | sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), |
| 465 | sgl->sg[i].length, sgl->sg[i].offset); | 466 | sgl->sg[i].length, sgl->sg[i].offset); |
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 1e38aaa8303e..a9e79d8eff87 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
| @@ -566,8 +566,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, | |||
| 566 | * need to expand */ | 566 | * need to expand */ |
| 567 | tmp = kcalloc(tx_nents * 2, sizeof(*tmp), | 567 | tmp = kcalloc(tx_nents * 2, sizeof(*tmp), |
| 568 | GFP_KERNEL); | 568 | GFP_KERNEL); |
| 569 | if (!tmp) | 569 | if (!tmp) { |
| 570 | err = -ENOMEM; | ||
| 570 | goto free; | 571 | goto free; |
| 572 | } | ||
| 571 | 573 | ||
| 572 | sg_init_table(tmp, tx_nents * 2); | 574 | sg_init_table(tmp, tx_nents * 2); |
| 573 | for (x = 0; x < tx_nents; x++) | 575 | for (x = 0; x < tx_nents; x++) |
diff --git a/crypto/api.c b/crypto/api.c index bbc147cb5dec..b16ce1653284 100644 --- a/crypto/api.c +++ b/crypto/api.c | |||
| @@ -211,8 +211,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) | |||
| 211 | if (!name) | 211 | if (!name) |
| 212 | return ERR_PTR(-ENOENT); | 212 | return ERR_PTR(-ENOENT); |
| 213 | 213 | ||
| 214 | type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); | ||
| 214 | mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); | 215 | mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); |
| 215 | type &= mask; | ||
| 216 | 216 | ||
| 217 | alg = crypto_alg_lookup(name, type, mask); | 217 | alg = crypto_alg_lookup(name, type, mask); |
| 218 | if (!alg) { | 218 | if (!alg) { |
| @@ -310,24 +310,8 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) | |||
| 310 | { | 310 | { |
| 311 | const struct crypto_type *type = tfm->__crt_alg->cra_type; | 311 | const struct crypto_type *type = tfm->__crt_alg->cra_type; |
| 312 | 312 | ||
| 313 | if (type) { | 313 | if (type && tfm->exit) |
| 314 | if (tfm->exit) | 314 | tfm->exit(tfm); |
| 315 | tfm->exit(tfm); | ||
| 316 | return; | ||
| 317 | } | ||
| 318 | |||
| 319 | switch (crypto_tfm_alg_type(tfm)) { | ||
| 320 | case CRYPTO_ALG_TYPE_CIPHER: | ||
| 321 | crypto_exit_cipher_ops(tfm); | ||
| 322 | break; | ||
| 323 | |||
| 324 | case CRYPTO_ALG_TYPE_COMPRESS: | ||
| 325 | crypto_exit_compress_ops(tfm); | ||
| 326 | break; | ||
| 327 | |||
| 328 | default: | ||
| 329 | BUG(); | ||
| 330 | } | ||
| 331 | } | 315 | } |
| 332 | 316 | ||
| 333 | static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) | 317 | static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) |
diff --git a/crypto/authenc.c b/crypto/authenc.c index a7e1ac786c5d..875470b0e026 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
| @@ -324,7 +324,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) | |||
| 324 | if (IS_ERR(auth)) | 324 | if (IS_ERR(auth)) |
| 325 | return PTR_ERR(auth); | 325 | return PTR_ERR(auth); |
| 326 | 326 | ||
| 327 | enc = crypto_spawn_skcipher2(&ictx->enc); | 327 | enc = crypto_spawn_skcipher(&ictx->enc); |
| 328 | err = PTR_ERR(enc); | 328 | err = PTR_ERR(enc); |
| 329 | if (IS_ERR(enc)) | 329 | if (IS_ERR(enc)) |
| 330 | goto err_free_ahash; | 330 | goto err_free_ahash; |
| @@ -420,9 +420,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, | |||
| 420 | goto err_free_inst; | 420 | goto err_free_inst; |
| 421 | 421 | ||
| 422 | crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); | 422 | crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); |
| 423 | err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0, | 423 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, |
| 424 | crypto_requires_sync(algt->type, | 424 | crypto_requires_sync(algt->type, |
| 425 | algt->mask)); | 425 | algt->mask)); |
| 426 | if (err) | 426 | if (err) |
| 427 | goto err_drop_auth; | 427 | goto err_drop_auth; |
| 428 | 428 | ||
diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 121010ac9962..6f8f6b86bfe2 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c | |||
| @@ -342,7 +342,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) | |||
| 342 | if (IS_ERR(auth)) | 342 | if (IS_ERR(auth)) |
| 343 | return PTR_ERR(auth); | 343 | return PTR_ERR(auth); |
| 344 | 344 | ||
| 345 | enc = crypto_spawn_skcipher2(&ictx->enc); | 345 | enc = crypto_spawn_skcipher(&ictx->enc); |
| 346 | err = PTR_ERR(enc); | 346 | err = PTR_ERR(enc); |
| 347 | if (IS_ERR(enc)) | 347 | if (IS_ERR(enc)) |
| 348 | goto err_free_ahash; | 348 | goto err_free_ahash; |
| @@ -441,9 +441,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, | |||
| 441 | goto err_free_inst; | 441 | goto err_free_inst; |
| 442 | 442 | ||
| 443 | crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); | 443 | crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); |
| 444 | err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0, | 444 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, |
| 445 | crypto_requires_sync(algt->type, | 445 | crypto_requires_sync(algt->type, |
| 446 | algt->mask)); | 446 | algt->mask)); |
| 447 | if (err) | 447 | if (err) |
| 448 | goto err_drop_auth; | 448 | goto err_drop_auth; |
| 449 | 449 | ||
diff --git a/crypto/cbc.c b/crypto/cbc.c index 780ee27b2d43..68f751a41a84 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * CBC: Cipher Block Chaining mode | 2 | * CBC: Cipher Block Chaining mode |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au> |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
| @@ -10,191 +10,78 @@ | |||
| 10 | * | 10 | * |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <crypto/algapi.h> | 13 | #include <crypto/cbc.h> |
| 14 | #include <crypto/internal/skcipher.h> | ||
| 14 | #include <linux/err.h> | 15 | #include <linux/err.h> |
| 15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 17 | #include <linux/log2.h> | 18 | #include <linux/log2.h> |
| 18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 19 | #include <linux/scatterlist.h> | ||
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 21 | 21 | ||
| 22 | struct crypto_cbc_ctx { | 22 | struct crypto_cbc_ctx { |
| 23 | struct crypto_cipher *child; | 23 | struct crypto_cipher *child; |
| 24 | }; | 24 | }; |
| 25 | 25 | ||
| 26 | static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, | 26 | static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key, |
| 27 | unsigned int keylen) | 27 | unsigned int keylen) |
| 28 | { | 28 | { |
| 29 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent); | 29 | struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent); |
| 30 | struct crypto_cipher *child = ctx->child; | 30 | struct crypto_cipher *child = ctx->child; |
| 31 | int err; | 31 | int err; |
| 32 | 32 | ||
| 33 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 33 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
| 34 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 34 | crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
| 35 | CRYPTO_TFM_REQ_MASK); | 35 | CRYPTO_TFM_REQ_MASK); |
| 36 | err = crypto_cipher_setkey(child, key, keylen); | 36 | err = crypto_cipher_setkey(child, key, keylen); |
| 37 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 37 | crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & |
| 38 | CRYPTO_TFM_RES_MASK); | 38 | CRYPTO_TFM_RES_MASK); |
| 39 | return err; | 39 | return err; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, | 42 | static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm, |
| 43 | struct blkcipher_walk *walk, | 43 | const u8 *src, u8 *dst) |
| 44 | struct crypto_cipher *tfm) | ||
| 45 | { | 44 | { |
| 46 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 45 | struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 47 | crypto_cipher_alg(tfm)->cia_encrypt; | ||
| 48 | int bsize = crypto_cipher_blocksize(tfm); | ||
| 49 | unsigned int nbytes = walk->nbytes; | ||
| 50 | u8 *src = walk->src.virt.addr; | ||
| 51 | u8 *dst = walk->dst.virt.addr; | ||
| 52 | u8 *iv = walk->iv; | ||
| 53 | |||
| 54 | do { | ||
| 55 | crypto_xor(iv, src, bsize); | ||
| 56 | fn(crypto_cipher_tfm(tfm), dst, iv); | ||
| 57 | memcpy(iv, dst, bsize); | ||
| 58 | |||
| 59 | src += bsize; | ||
| 60 | dst += bsize; | ||
| 61 | } while ((nbytes -= bsize) >= bsize); | ||
| 62 | |||
| 63 | return nbytes; | ||
| 64 | } | ||
| 65 | |||
| 66 | static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, | ||
| 67 | struct blkcipher_walk *walk, | ||
| 68 | struct crypto_cipher *tfm) | ||
| 69 | { | ||
| 70 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
| 71 | crypto_cipher_alg(tfm)->cia_encrypt; | ||
| 72 | int bsize = crypto_cipher_blocksize(tfm); | ||
| 73 | unsigned int nbytes = walk->nbytes; | ||
| 74 | u8 *src = walk->src.virt.addr; | ||
| 75 | u8 *iv = walk->iv; | ||
| 76 | |||
| 77 | do { | ||
| 78 | crypto_xor(src, iv, bsize); | ||
| 79 | fn(crypto_cipher_tfm(tfm), src, src); | ||
| 80 | iv = src; | ||
| 81 | |||
| 82 | src += bsize; | ||
| 83 | } while ((nbytes -= bsize) >= bsize); | ||
| 84 | 46 | ||
| 85 | memcpy(walk->iv, iv, bsize); | 47 | crypto_cipher_encrypt_one(ctx->child, dst, src); |
| 86 | |||
| 87 | return nbytes; | ||
| 88 | } | 48 | } |
| 89 | 49 | ||
| 90 | static int crypto_cbc_encrypt(struct blkcipher_desc *desc, | 50 | static int crypto_cbc_encrypt(struct skcipher_request *req) |
| 91 | struct scatterlist *dst, struct scatterlist *src, | ||
| 92 | unsigned int nbytes) | ||
| 93 | { | 51 | { |
| 94 | struct blkcipher_walk walk; | 52 | return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one); |
| 95 | struct crypto_blkcipher *tfm = desc->tfm; | ||
| 96 | struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | ||
| 97 | struct crypto_cipher *child = ctx->child; | ||
| 98 | int err; | ||
| 99 | |||
| 100 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 101 | err = blkcipher_walk_virt(desc, &walk); | ||
| 102 | |||
| 103 | while ((nbytes = walk.nbytes)) { | ||
| 104 | if (walk.src.virt.addr == walk.dst.virt.addr) | ||
| 105 | nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child); | ||
| 106 | else | ||
| 107 | nbytes = crypto_cbc_encrypt_segment(desc, &walk, child); | ||
| 108 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 109 | } | ||
| 110 | |||
| 111 | return err; | ||
| 112 | } | 53 | } |
| 113 | 54 | ||
| 114 | static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, | 55 | static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm, |
| 115 | struct blkcipher_walk *walk, | 56 | const u8 *src, u8 *dst) |
| 116 | struct crypto_cipher *tfm) | ||
| 117 | { | 57 | { |
| 118 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | 58 | struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 119 | crypto_cipher_alg(tfm)->cia_decrypt; | ||
| 120 | int bsize = crypto_cipher_blocksize(tfm); | ||
| 121 | unsigned int nbytes = walk->nbytes; | ||
| 122 | u8 *src = walk->src.virt.addr; | ||
| 123 | u8 *dst = walk->dst.virt.addr; | ||
| 124 | u8 *iv = walk->iv; | ||
| 125 | |||
| 126 | do { | ||
| 127 | fn(crypto_cipher_tfm(tfm), dst, src); | ||
| 128 | crypto_xor(dst, iv, bsize); | ||
| 129 | iv = src; | ||
| 130 | |||
| 131 | src += bsize; | ||
| 132 | dst += bsize; | ||
| 133 | } while ((nbytes -= bsize) >= bsize); | ||
| 134 | |||
| 135 | memcpy(walk->iv, iv, bsize); | ||
| 136 | |||
| 137 | return nbytes; | ||
| 138 | } | ||
| 139 | 59 | ||
| 140 | static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, | 60 | crypto_cipher_decrypt_one(ctx->child, dst, src); |
| 141 | struct blkcipher_walk *walk, | ||
| 142 | struct crypto_cipher *tfm) | ||
| 143 | { | ||
| 144 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
| 145 | crypto_cipher_alg(tfm)->cia_decrypt; | ||
| 146 | int bsize = crypto_cipher_blocksize(tfm); | ||
| 147 | unsigned int nbytes = walk->nbytes; | ||
| 148 | u8 *src = walk->src.virt.addr; | ||
| 149 | u8 last_iv[bsize]; | ||
| 150 | |||
| 151 | /* Start of the last block. */ | ||
| 152 | src += nbytes - (nbytes & (bsize - 1)) - bsize; | ||
| 153 | memcpy(last_iv, src, bsize); | ||
| 154 | |||
| 155 | for (;;) { | ||
| 156 | fn(crypto_cipher_tfm(tfm), src, src); | ||
| 157 | if ((nbytes -= bsize) < bsize) | ||
| 158 | break; | ||
| 159 | crypto_xor(src, src - bsize, bsize); | ||
| 160 | src -= bsize; | ||
| 161 | } | ||
| 162 | |||
| 163 | crypto_xor(src, walk->iv, bsize); | ||
| 164 | memcpy(walk->iv, last_iv, bsize); | ||
| 165 | |||
| 166 | return nbytes; | ||
| 167 | } | 61 | } |
| 168 | 62 | ||
| 169 | static int crypto_cbc_decrypt(struct blkcipher_desc *desc, | 63 | static int crypto_cbc_decrypt(struct skcipher_request *req) |
| 170 | struct scatterlist *dst, struct scatterlist *src, | ||
| 171 | unsigned int nbytes) | ||
| 172 | { | 64 | { |
| 173 | struct blkcipher_walk walk; | 65 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 174 | struct crypto_blkcipher *tfm = desc->tfm; | 66 | struct skcipher_walk walk; |
| 175 | struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | ||
| 176 | struct crypto_cipher *child = ctx->child; | ||
| 177 | int err; | 67 | int err; |
| 178 | 68 | ||
| 179 | blkcipher_walk_init(&walk, dst, src, nbytes); | 69 | err = skcipher_walk_virt(&walk, req, false); |
| 180 | err = blkcipher_walk_virt(desc, &walk); | ||
| 181 | 70 | ||
| 182 | while ((nbytes = walk.nbytes)) { | 71 | while (walk.nbytes) { |
| 183 | if (walk.src.virt.addr == walk.dst.virt.addr) | 72 | err = crypto_cbc_decrypt_blocks(&walk, tfm, |
| 184 | nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child); | 73 | crypto_cbc_decrypt_one); |
| 185 | else | 74 | err = skcipher_walk_done(&walk, err); |
| 186 | nbytes = crypto_cbc_decrypt_segment(desc, &walk, child); | ||
| 187 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
| 188 | } | 75 | } |
| 189 | 76 | ||
| 190 | return err; | 77 | return err; |
| 191 | } | 78 | } |
| 192 | 79 | ||
| 193 | static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) | 80 | static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm) |
| 194 | { | 81 | { |
| 195 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 82 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
| 196 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 83 | struct crypto_spawn *spawn = skcipher_instance_ctx(inst); |
| 197 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 84 | struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 198 | struct crypto_cipher *cipher; | 85 | struct crypto_cipher *cipher; |
| 199 | 86 | ||
| 200 | cipher = crypto_spawn_cipher(spawn); | 87 | cipher = crypto_spawn_cipher(spawn); |
| @@ -205,72 +92,94 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) | |||
| 205 | return 0; | 92 | return 0; |
| 206 | } | 93 | } |
| 207 | 94 | ||
| 208 | static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) | 95 | static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm) |
| 209 | { | 96 | { |
| 210 | struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 97 | struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 98 | |||
| 211 | crypto_free_cipher(ctx->child); | 99 | crypto_free_cipher(ctx->child); |
| 212 | } | 100 | } |
| 213 | 101 | ||
| 214 | static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) | 102 | static void crypto_cbc_free(struct skcipher_instance *inst) |
| 103 | { | ||
| 104 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | ||
| 105 | kfree(inst); | ||
| 106 | } | ||
| 107 | |||
| 108 | static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
| 215 | { | 109 | { |
| 216 | struct crypto_instance *inst; | 110 | struct skcipher_instance *inst; |
| 111 | struct crypto_spawn *spawn; | ||
| 217 | struct crypto_alg *alg; | 112 | struct crypto_alg *alg; |
| 218 | int err; | 113 | int err; |
| 219 | 114 | ||
| 220 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 115 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); |
| 221 | if (err) | 116 | if (err) |
| 222 | return ERR_PTR(err); | 117 | return err; |
| 118 | |||
| 119 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | ||
| 120 | if (!inst) | ||
| 121 | return -ENOMEM; | ||
| 223 | 122 | ||
| 224 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 123 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
| 225 | CRYPTO_ALG_TYPE_MASK); | 124 | CRYPTO_ALG_TYPE_MASK); |
| 125 | err = PTR_ERR(alg); | ||
| 226 | if (IS_ERR(alg)) | 126 | if (IS_ERR(alg)) |
| 227 | return ERR_CAST(alg); | 127 | goto err_free_inst; |
| 228 | 128 | ||
| 229 | inst = ERR_PTR(-EINVAL); | 129 | spawn = skcipher_instance_ctx(inst); |
| 230 | if (!is_power_of_2(alg->cra_blocksize)) | 130 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), |
| 231 | goto out_put_alg; | 131 | CRYPTO_ALG_TYPE_MASK); |
| 132 | crypto_mod_put(alg); | ||
| 133 | if (err) | ||
| 134 | goto err_free_inst; | ||
| 232 | 135 | ||
| 233 | inst = crypto_alloc_instance("cbc", alg); | 136 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg); |
| 234 | if (IS_ERR(inst)) | 137 | if (err) |
| 235 | goto out_put_alg; | 138 | goto err_drop_spawn; |
| 236 | 139 | ||
| 237 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 140 | err = -EINVAL; |
| 238 | inst->alg.cra_priority = alg->cra_priority; | 141 | if (!is_power_of_2(alg->cra_blocksize)) |
| 239 | inst->alg.cra_blocksize = alg->cra_blocksize; | 142 | goto err_drop_spawn; |
| 240 | inst->alg.cra_alignmask = alg->cra_alignmask; | 143 | |
| 241 | inst->alg.cra_type = &crypto_blkcipher_type; | 144 | inst->alg.base.cra_priority = alg->cra_priority; |
| 145 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | ||
| 146 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | ||
| 242 | 147 | ||
| 243 | /* We access the data as u32s when xoring. */ | 148 | /* We access the data as u32s when xoring. */ |
| 244 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; | 149 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; |
| 245 | 150 | ||
| 246 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 151 | inst->alg.ivsize = alg->cra_blocksize; |
| 247 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; | 152 | inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; |
| 248 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; | 153 | inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; |
| 249 | 154 | ||
| 250 | inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx); | 155 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx); |
| 251 | 156 | ||
| 252 | inst->alg.cra_init = crypto_cbc_init_tfm; | 157 | inst->alg.init = crypto_cbc_init_tfm; |
| 253 | inst->alg.cra_exit = crypto_cbc_exit_tfm; | 158 | inst->alg.exit = crypto_cbc_exit_tfm; |
| 254 | 159 | ||
| 255 | inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey; | 160 | inst->alg.setkey = crypto_cbc_setkey; |
| 256 | inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt; | 161 | inst->alg.encrypt = crypto_cbc_encrypt; |
| 257 | inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt; | 162 | inst->alg.decrypt = crypto_cbc_decrypt; |
| 258 | 163 | ||
| 259 | out_put_alg: | 164 | inst->free = crypto_cbc_free; |
| 260 | crypto_mod_put(alg); | ||
| 261 | return inst; | ||
| 262 | } | ||
| 263 | 165 | ||
| 264 | static void crypto_cbc_free(struct crypto_instance *inst) | 166 | err = skcipher_register_instance(tmpl, inst); |
| 265 | { | 167 | if (err) |
| 266 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 168 | goto err_drop_spawn; |
| 169 | |||
| 170 | out: | ||
| 171 | return err; | ||
| 172 | |||
| 173 | err_drop_spawn: | ||
| 174 | crypto_drop_spawn(spawn); | ||
| 175 | err_free_inst: | ||
| 267 | kfree(inst); | 176 | kfree(inst); |
| 177 | goto out; | ||
| 268 | } | 178 | } |
| 269 | 179 | ||
| 270 | static struct crypto_template crypto_cbc_tmpl = { | 180 | static struct crypto_template crypto_cbc_tmpl = { |
| 271 | .name = "cbc", | 181 | .name = "cbc", |
| 272 | .alloc = crypto_cbc_alloc, | 182 | .create = crypto_cbc_create, |
| 273 | .free = crypto_cbc_free, | ||
| 274 | .module = THIS_MODULE, | 183 | .module = THIS_MODULE, |
| 275 | }; | 184 | }; |
| 276 | 185 | ||
diff --git a/crypto/ccm.c b/crypto/ccm.c index 006d8575ef5c..26b924d1e582 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c | |||
| @@ -462,7 +462,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm) | |||
| 462 | if (IS_ERR(cipher)) | 462 | if (IS_ERR(cipher)) |
| 463 | return PTR_ERR(cipher); | 463 | return PTR_ERR(cipher); |
| 464 | 464 | ||
| 465 | ctr = crypto_spawn_skcipher2(&ictx->ctr); | 465 | ctr = crypto_spawn_skcipher(&ictx->ctr); |
| 466 | err = PTR_ERR(ctr); | 466 | err = PTR_ERR(ctr); |
| 467 | if (IS_ERR(ctr)) | 467 | if (IS_ERR(ctr)) |
| 468 | goto err_free_cipher; | 468 | goto err_free_cipher; |
| @@ -544,9 +544,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, | |||
| 544 | goto err_free_inst; | 544 | goto err_free_inst; |
| 545 | 545 | ||
| 546 | crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); | 546 | crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); |
| 547 | err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0, | 547 | err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, |
| 548 | crypto_requires_sync(algt->type, | 548 | crypto_requires_sync(algt->type, |
| 549 | algt->mask)); | 549 | algt->mask)); |
| 550 | if (err) | 550 | if (err) |
| 551 | goto err_drop_cipher; | 551 | goto err_drop_cipher; |
| 552 | 552 | ||
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index e899ef51dc8e..db1bc3147bc4 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c | |||
| @@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm) | |||
| 532 | if (IS_ERR(poly)) | 532 | if (IS_ERR(poly)) |
| 533 | return PTR_ERR(poly); | 533 | return PTR_ERR(poly); |
| 534 | 534 | ||
| 535 | chacha = crypto_spawn_skcipher2(&ictx->chacha); | 535 | chacha = crypto_spawn_skcipher(&ictx->chacha); |
| 536 | if (IS_ERR(chacha)) { | 536 | if (IS_ERR(chacha)) { |
| 537 | crypto_free_ahash(poly); | 537 | crypto_free_ahash(poly); |
| 538 | return PTR_ERR(chacha); | 538 | return PTR_ERR(chacha); |
| @@ -625,9 +625,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, | |||
| 625 | goto err_free_inst; | 625 | goto err_free_inst; |
| 626 | 626 | ||
| 627 | crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst)); | 627 | crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst)); |
| 628 | err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0, | 628 | err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, |
| 629 | crypto_requires_sync(algt->type, | 629 | crypto_requires_sync(algt->type, |
| 630 | algt->mask)); | 630 | algt->mask)); |
| 631 | if (err) | 631 | if (err) |
| 632 | goto err_drop_poly; | 632 | goto err_drop_poly; |
| 633 | 633 | ||
diff --git a/crypto/cipher.c b/crypto/cipher.c index 39541e0e537d..94fa3551476b 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c | |||
| @@ -116,7 +116,3 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm) | |||
| 116 | 116 | ||
| 117 | return 0; | 117 | return 0; |
| 118 | } | 118 | } |
| 119 | |||
| 120 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm) | ||
| 121 | { | ||
| 122 | } | ||
diff --git a/crypto/cmac.c b/crypto/cmac.c index 7a8bfbd548f6..04080dca8f0c 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c | |||
| @@ -57,7 +57,8 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent, | |||
| 57 | unsigned long alignmask = crypto_shash_alignmask(parent); | 57 | unsigned long alignmask = crypto_shash_alignmask(parent); |
| 58 | struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); | 58 | struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); |
| 59 | unsigned int bs = crypto_shash_blocksize(parent); | 59 | unsigned int bs = crypto_shash_blocksize(parent); |
| 60 | __be64 *consts = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); | 60 | __be64 *consts = PTR_ALIGN((void *)ctx->ctx, |
| 61 | (alignmask | (__alignof__(__be64) - 1)) + 1); | ||
| 61 | u64 _const[2]; | 62 | u64 _const[2]; |
| 62 | int i, err = 0; | 63 | int i, err = 0; |
| 63 | u8 msb_mask, gfmask; | 64 | u8 msb_mask, gfmask; |
| @@ -173,7 +174,8 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) | |||
| 173 | struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); | 174 | struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); |
| 174 | struct crypto_cipher *tfm = tctx->child; | 175 | struct crypto_cipher *tfm = tctx->child; |
| 175 | int bs = crypto_shash_blocksize(parent); | 176 | int bs = crypto_shash_blocksize(parent); |
| 176 | u8 *consts = PTR_ALIGN((void *)tctx->ctx, alignmask + 1); | 177 | u8 *consts = PTR_ALIGN((void *)tctx->ctx, |
| 178 | (alignmask | (__alignof__(__be64) - 1)) + 1); | ||
| 177 | u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); | 179 | u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); |
| 178 | u8 *prev = odds + bs; | 180 | u8 *prev = odds + bs; |
| 179 | unsigned int offset = 0; | 181 | unsigned int offset = 0; |
| @@ -243,6 +245,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 243 | case 8: | 245 | case 8: |
| 244 | break; | 246 | break; |
| 245 | default: | 247 | default: |
| 248 | err = -EINVAL; | ||
| 246 | goto out_put_alg; | 249 | goto out_put_alg; |
| 247 | } | 250 | } |
| 248 | 251 | ||
| @@ -257,7 +260,8 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 257 | if (err) | 260 | if (err) |
| 258 | goto out_free_inst; | 261 | goto out_free_inst; |
| 259 | 262 | ||
| 260 | alignmask = alg->cra_alignmask | (sizeof(long) - 1); | 263 | /* We access the data as u32s when xoring. */ |
| 264 | alignmask = alg->cra_alignmask | (__alignof__(u32) - 1); | ||
| 261 | inst->alg.base.cra_alignmask = alignmask; | 265 | inst->alg.base.cra_alignmask = alignmask; |
| 262 | inst->alg.base.cra_priority = alg->cra_priority; | 266 | inst->alg.base.cra_priority = alg->cra_priority; |
| 263 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | 267 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
| @@ -269,7 +273,9 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 269 | + alg->cra_blocksize * 2; | 273 | + alg->cra_blocksize * 2; |
| 270 | 274 | ||
| 271 | inst->alg.base.cra_ctxsize = | 275 | inst->alg.base.cra_ctxsize = |
| 272 | ALIGN(sizeof(struct cmac_tfm_ctx), alignmask + 1) | 276 | ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment()) |
| 277 | + ((alignmask | (__alignof__(__be64) - 1)) & | ||
| 278 | ~(crypto_tfm_ctx_alignment() - 1)) | ||
| 273 | + alg->cra_blocksize * 2; | 279 | + alg->cra_blocksize * 2; |
| 274 | 280 | ||
| 275 | inst->alg.base.cra_init = cmac_init_tfm; | 281 | inst->alg.base.cra_init = cmac_init_tfm; |
diff --git a/crypto/compress.c b/crypto/compress.c index c33f0763a956..f2d522924a07 100644 --- a/crypto/compress.c +++ b/crypto/compress.c | |||
| @@ -42,7 +42,3 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm) | |||
| 42 | 42 | ||
| 43 | return 0; | 43 | return 0; |
| 44 | } | 44 | } |
| 45 | |||
| 46 | void crypto_exit_compress_ops(struct crypto_tfm *tfm) | ||
| 47 | { | ||
| 48 | } | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0c654e59f215..0508c48a45c4 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
| @@ -17,9 +17,9 @@ | |||
| 17 | * | 17 | * |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <crypto/algapi.h> | ||
| 21 | #include <crypto/internal/hash.h> | 20 | #include <crypto/internal/hash.h> |
| 22 | #include <crypto/internal/aead.h> | 21 | #include <crypto/internal/aead.h> |
| 22 | #include <crypto/internal/skcipher.h> | ||
| 23 | #include <crypto/cryptd.h> | 23 | #include <crypto/cryptd.h> |
| 24 | #include <crypto/crypto_wq.h> | 24 | #include <crypto/crypto_wq.h> |
| 25 | #include <linux/atomic.h> | 25 | #include <linux/atomic.h> |
| @@ -48,6 +48,11 @@ struct cryptd_instance_ctx { | |||
| 48 | struct cryptd_queue *queue; | 48 | struct cryptd_queue *queue; |
| 49 | }; | 49 | }; |
| 50 | 50 | ||
| 51 | struct skcipherd_instance_ctx { | ||
| 52 | struct crypto_skcipher_spawn spawn; | ||
| 53 | struct cryptd_queue *queue; | ||
| 54 | }; | ||
| 55 | |||
| 51 | struct hashd_instance_ctx { | 56 | struct hashd_instance_ctx { |
| 52 | struct crypto_shash_spawn spawn; | 57 | struct crypto_shash_spawn spawn; |
| 53 | struct cryptd_queue *queue; | 58 | struct cryptd_queue *queue; |
| @@ -67,6 +72,15 @@ struct cryptd_blkcipher_request_ctx { | |||
| 67 | crypto_completion_t complete; | 72 | crypto_completion_t complete; |
| 68 | }; | 73 | }; |
| 69 | 74 | ||
| 75 | struct cryptd_skcipher_ctx { | ||
| 76 | atomic_t refcnt; | ||
| 77 | struct crypto_skcipher *child; | ||
| 78 | }; | ||
| 79 | |||
| 80 | struct cryptd_skcipher_request_ctx { | ||
| 81 | crypto_completion_t complete; | ||
| 82 | }; | ||
| 83 | |||
| 70 | struct cryptd_hash_ctx { | 84 | struct cryptd_hash_ctx { |
| 71 | atomic_t refcnt; | 85 | atomic_t refcnt; |
| 72 | struct crypto_shash *child; | 86 | struct crypto_shash *child; |
| @@ -122,7 +136,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, | |||
| 122 | { | 136 | { |
| 123 | int cpu, err; | 137 | int cpu, err; |
| 124 | struct cryptd_cpu_queue *cpu_queue; | 138 | struct cryptd_cpu_queue *cpu_queue; |
| 125 | struct crypto_tfm *tfm; | ||
| 126 | atomic_t *refcnt; | 139 | atomic_t *refcnt; |
| 127 | bool may_backlog; | 140 | bool may_backlog; |
| 128 | 141 | ||
| @@ -141,7 +154,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, | |||
| 141 | if (!atomic_read(refcnt)) | 154 | if (!atomic_read(refcnt)) |
| 142 | goto out_put_cpu; | 155 | goto out_put_cpu; |
| 143 | 156 | ||
| 144 | tfm = request->tfm; | ||
| 145 | atomic_inc(refcnt); | 157 | atomic_inc(refcnt); |
| 146 | 158 | ||
| 147 | out_put_cpu: | 159 | out_put_cpu: |
| @@ -432,6 +444,216 @@ out_put_alg: | |||
| 432 | return err; | 444 | return err; |
| 433 | } | 445 | } |
| 434 | 446 | ||
| 447 | static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, | ||
| 448 | const u8 *key, unsigned int keylen) | ||
| 449 | { | ||
| 450 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); | ||
| 451 | struct crypto_skcipher *child = ctx->child; | ||
| 452 | int err; | ||
| 453 | |||
| 454 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
| 455 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & | ||
| 456 | CRYPTO_TFM_REQ_MASK); | ||
| 457 | err = crypto_skcipher_setkey(child, key, keylen); | ||
| 458 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | ||
| 459 | CRYPTO_TFM_RES_MASK); | ||
| 460 | return err; | ||
| 461 | } | ||
| 462 | |||
| 463 | static void cryptd_skcipher_complete(struct skcipher_request *req, int err) | ||
| 464 | { | ||
| 465 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 466 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 467 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | ||
| 468 | int refcnt = atomic_read(&ctx->refcnt); | ||
| 469 | |||
| 470 | local_bh_disable(); | ||
| 471 | rctx->complete(&req->base, err); | ||
| 472 | local_bh_enable(); | ||
| 473 | |||
| 474 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | ||
| 475 | crypto_free_skcipher(tfm); | ||
| 476 | } | ||
| 477 | |||
| 478 | static void cryptd_skcipher_encrypt(struct crypto_async_request *base, | ||
| 479 | int err) | ||
| 480 | { | ||
| 481 | struct skcipher_request *req = skcipher_request_cast(base); | ||
| 482 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | ||
| 483 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 484 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 485 | struct crypto_skcipher *child = ctx->child; | ||
| 486 | SKCIPHER_REQUEST_ON_STACK(subreq, child); | ||
| 487 | |||
| 488 | if (unlikely(err == -EINPROGRESS)) | ||
| 489 | goto out; | ||
| 490 | |||
| 491 | skcipher_request_set_tfm(subreq, child); | ||
| 492 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 493 | NULL, NULL); | ||
| 494 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | ||
| 495 | req->iv); | ||
| 496 | |||
| 497 | err = crypto_skcipher_encrypt(subreq); | ||
| 498 | skcipher_request_zero(subreq); | ||
| 499 | |||
| 500 | req->base.complete = rctx->complete; | ||
| 501 | |||
| 502 | out: | ||
| 503 | cryptd_skcipher_complete(req, err); | ||
| 504 | } | ||
| 505 | |||
| 506 | static void cryptd_skcipher_decrypt(struct crypto_async_request *base, | ||
| 507 | int err) | ||
| 508 | { | ||
| 509 | struct skcipher_request *req = skcipher_request_cast(base); | ||
| 510 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | ||
| 511 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 512 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 513 | struct crypto_skcipher *child = ctx->child; | ||
| 514 | SKCIPHER_REQUEST_ON_STACK(subreq, child); | ||
| 515 | |||
| 516 | if (unlikely(err == -EINPROGRESS)) | ||
| 517 | goto out; | ||
| 518 | |||
| 519 | skcipher_request_set_tfm(subreq, child); | ||
| 520 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 521 | NULL, NULL); | ||
| 522 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | ||
| 523 | req->iv); | ||
| 524 | |||
| 525 | err = crypto_skcipher_decrypt(subreq); | ||
| 526 | skcipher_request_zero(subreq); | ||
| 527 | |||
| 528 | req->base.complete = rctx->complete; | ||
| 529 | |||
| 530 | out: | ||
| 531 | cryptd_skcipher_complete(req, err); | ||
| 532 | } | ||
| 533 | |||
| 534 | static int cryptd_skcipher_enqueue(struct skcipher_request *req, | ||
| 535 | crypto_completion_t compl) | ||
| 536 | { | ||
| 537 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | ||
| 538 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 539 | struct cryptd_queue *queue; | ||
| 540 | |||
| 541 | queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); | ||
| 542 | rctx->complete = req->base.complete; | ||
| 543 | req->base.complete = compl; | ||
| 544 | |||
| 545 | return cryptd_enqueue_request(queue, &req->base); | ||
| 546 | } | ||
| 547 | |||
| 548 | static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) | ||
| 549 | { | ||
| 550 | return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); | ||
| 551 | } | ||
| 552 | |||
| 553 | static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) | ||
| 554 | { | ||
| 555 | return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); | ||
| 556 | } | ||
| 557 | |||
| 558 | static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) | ||
| 559 | { | ||
| 560 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); | ||
| 561 | struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); | ||
| 562 | struct crypto_skcipher_spawn *spawn = &ictx->spawn; | ||
| 563 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 564 | struct crypto_skcipher *cipher; | ||
| 565 | |||
| 566 | cipher = crypto_spawn_skcipher(spawn); | ||
| 567 | if (IS_ERR(cipher)) | ||
| 568 | return PTR_ERR(cipher); | ||
| 569 | |||
| 570 | ctx->child = cipher; | ||
| 571 | crypto_skcipher_set_reqsize( | ||
| 572 | tfm, sizeof(struct cryptd_skcipher_request_ctx)); | ||
| 573 | return 0; | ||
| 574 | } | ||
| 575 | |||
| 576 | static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) | ||
| 577 | { | ||
| 578 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 579 | |||
| 580 | crypto_free_skcipher(ctx->child); | ||
| 581 | } | ||
| 582 | |||
| 583 | static void cryptd_skcipher_free(struct skcipher_instance *inst) | ||
| 584 | { | ||
| 585 | struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); | ||
| 586 | |||
| 587 | crypto_drop_skcipher(&ctx->spawn); | ||
| 588 | } | ||
| 589 | |||
| 590 | static int cryptd_create_skcipher(struct crypto_template *tmpl, | ||
| 591 | struct rtattr **tb, | ||
| 592 | struct cryptd_queue *queue) | ||
| 593 | { | ||
| 594 | struct skcipherd_instance_ctx *ctx; | ||
| 595 | struct skcipher_instance *inst; | ||
| 596 | struct skcipher_alg *alg; | ||
| 597 | const char *name; | ||
| 598 | u32 type; | ||
| 599 | u32 mask; | ||
| 600 | int err; | ||
| 601 | |||
| 602 | type = 0; | ||
| 603 | mask = CRYPTO_ALG_ASYNC; | ||
| 604 | |||
| 605 | cryptd_check_internal(tb, &type, &mask); | ||
| 606 | |||
| 607 | name = crypto_attr_alg_name(tb[1]); | ||
| 608 | if (IS_ERR(name)) | ||
| 609 | return PTR_ERR(name); | ||
| 610 | |||
| 611 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | ||
| 612 | if (!inst) | ||
| 613 | return -ENOMEM; | ||
| 614 | |||
| 615 | ctx = skcipher_instance_ctx(inst); | ||
| 616 | ctx->queue = queue; | ||
| 617 | |||
| 618 | crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); | ||
| 619 | err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); | ||
| 620 | if (err) | ||
| 621 | goto out_free_inst; | ||
| 622 | |||
| 623 | alg = crypto_spawn_skcipher_alg(&ctx->spawn); | ||
| 624 | err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); | ||
| 625 | if (err) | ||
| 626 | goto out_drop_skcipher; | ||
| 627 | |||
| 628 | inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | | ||
| 629 | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); | ||
| 630 | |||
| 631 | inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); | ||
| 632 | inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); | ||
| 633 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); | ||
| 634 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); | ||
| 635 | |||
| 636 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); | ||
| 637 | |||
| 638 | inst->alg.init = cryptd_skcipher_init_tfm; | ||
| 639 | inst->alg.exit = cryptd_skcipher_exit_tfm; | ||
| 640 | |||
| 641 | inst->alg.setkey = cryptd_skcipher_setkey; | ||
| 642 | inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; | ||
| 643 | inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; | ||
| 644 | |||
| 645 | inst->free = cryptd_skcipher_free; | ||
| 646 | |||
| 647 | err = skcipher_register_instance(tmpl, inst); | ||
| 648 | if (err) { | ||
| 649 | out_drop_skcipher: | ||
| 650 | crypto_drop_skcipher(&ctx->spawn); | ||
| 651 | out_free_inst: | ||
| 652 | kfree(inst); | ||
| 653 | } | ||
| 654 | return err; | ||
| 655 | } | ||
| 656 | |||
| 435 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | 657 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
| 436 | { | 658 | { |
| 437 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 659 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
| @@ -895,7 +1117,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 895 | 1117 | ||
| 896 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 1118 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
| 897 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 1119 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
| 898 | return cryptd_create_blkcipher(tmpl, tb, &queue); | 1120 | if ((algt->type & CRYPTO_ALG_TYPE_MASK) == |
| 1121 | CRYPTO_ALG_TYPE_BLKCIPHER) | ||
| 1122 | return cryptd_create_blkcipher(tmpl, tb, &queue); | ||
| 1123 | |||
| 1124 | return cryptd_create_skcipher(tmpl, tb, &queue); | ||
| 899 | case CRYPTO_ALG_TYPE_DIGEST: | 1125 | case CRYPTO_ALG_TYPE_DIGEST: |
| 900 | return cryptd_create_hash(tmpl, tb, &queue); | 1126 | return cryptd_create_hash(tmpl, tb, &queue); |
| 901 | case CRYPTO_ALG_TYPE_AEAD: | 1127 | case CRYPTO_ALG_TYPE_AEAD: |
| @@ -985,6 +1211,58 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | |||
| 985 | } | 1211 | } |
| 986 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | 1212 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
| 987 | 1213 | ||
| 1214 | struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, | ||
| 1215 | u32 type, u32 mask) | ||
| 1216 | { | ||
| 1217 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | ||
| 1218 | struct cryptd_skcipher_ctx *ctx; | ||
| 1219 | struct crypto_skcipher *tfm; | ||
| 1220 | |||
| 1221 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | ||
| 1222 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
| 1223 | return ERR_PTR(-EINVAL); | ||
| 1224 | |||
| 1225 | tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); | ||
| 1226 | if (IS_ERR(tfm)) | ||
| 1227 | return ERR_CAST(tfm); | ||
| 1228 | |||
| 1229 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | ||
| 1230 | crypto_free_skcipher(tfm); | ||
| 1231 | return ERR_PTR(-EINVAL); | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | ctx = crypto_skcipher_ctx(tfm); | ||
| 1235 | atomic_set(&ctx->refcnt, 1); | ||
| 1236 | |||
| 1237 | return container_of(tfm, struct cryptd_skcipher, base); | ||
| 1238 | } | ||
| 1239 | EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); | ||
| 1240 | |||
| 1241 | struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) | ||
| 1242 | { | ||
| 1243 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | ||
| 1244 | |||
| 1245 | return ctx->child; | ||
| 1246 | } | ||
| 1247 | EXPORT_SYMBOL_GPL(cryptd_skcipher_child); | ||
| 1248 | |||
| 1249 | bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) | ||
| 1250 | { | ||
| 1251 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | ||
| 1252 | |||
| 1253 | return atomic_read(&ctx->refcnt) - 1; | ||
| 1254 | } | ||
| 1255 | EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); | ||
| 1256 | |||
| 1257 | void cryptd_free_skcipher(struct cryptd_skcipher *tfm) | ||
| 1258 | { | ||
| 1259 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | ||
| 1260 | |||
| 1261 | if (atomic_dec_and_test(&ctx->refcnt)) | ||
| 1262 | crypto_free_skcipher(&tfm->base); | ||
| 1263 | } | ||
| 1264 | EXPORT_SYMBOL_GPL(cryptd_free_skcipher); | ||
| 1265 | |||
| 988 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, | 1266 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, |
| 989 | u32 type, u32 mask) | 1267 | u32 type, u32 mask) |
| 990 | { | 1268 | { |
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 6989ba0046df..f1bf3418d968 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c | |||
| @@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
| 47 | 47 | ||
| 48 | /* If another context is idling then defer */ | 48 | /* If another context is idling then defer */ |
| 49 | if (engine->idling) { | 49 | if (engine->idling) { |
| 50 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 50 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
| 51 | goto out; | 51 | goto out; |
| 52 | } | 52 | } |
| 53 | 53 | ||
| @@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
| 58 | 58 | ||
| 59 | /* Only do teardown in the thread */ | 59 | /* Only do teardown in the thread */ |
| 60 | if (!in_kthread) { | 60 | if (!in_kthread) { |
| 61 | kthread_queue_work(&engine->kworker, | 61 | kthread_queue_work(engine->kworker, |
| 62 | &engine->pump_requests); | 62 | &engine->pump_requests); |
| 63 | goto out; | 63 | goto out; |
| 64 | } | 64 | } |
| @@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine, | |||
| 189 | ret = ablkcipher_enqueue_request(&engine->queue, req); | 189 | ret = ablkcipher_enqueue_request(&engine->queue, req); |
| 190 | 190 | ||
| 191 | if (!engine->busy && need_pump) | 191 | if (!engine->busy && need_pump) |
| 192 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 192 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
| 193 | 193 | ||
| 194 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 194 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
| 195 | return ret; | 195 | return ret; |
| @@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine, | |||
| 231 | ret = ahash_enqueue_request(&engine->queue, req); | 231 | ret = ahash_enqueue_request(&engine->queue, req); |
| 232 | 232 | ||
| 233 | if (!engine->busy && need_pump) | 233 | if (!engine->busy && need_pump) |
| 234 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 234 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
| 235 | 235 | ||
| 236 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 236 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
| 237 | return ret; | 237 | return ret; |
| @@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine, | |||
| 284 | 284 | ||
| 285 | req->base.complete(&req->base, err); | 285 | req->base.complete(&req->base, err); |
| 286 | 286 | ||
| 287 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 287 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
| 288 | } | 288 | } |
| 289 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); | 289 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); |
| 290 | 290 | ||
| @@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine, | |||
| 321 | 321 | ||
| 322 | req->base.complete(&req->base, err); | 322 | req->base.complete(&req->base, err); |
| 323 | 323 | ||
| 324 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 324 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
| 325 | } | 325 | } |
| 326 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); | 326 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); |
| 327 | 327 | ||
| @@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine) | |||
| 345 | engine->running = true; | 345 | engine->running = true; |
| 346 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 346 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
| 347 | 347 | ||
| 348 | kthread_queue_work(&engine->kworker, &engine->pump_requests); | 348 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
| 349 | 349 | ||
| 350 | return 0; | 350 | return 0; |
| 351 | } | 351 | } |
| @@ -422,11 +422,8 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |||
| 422 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); | 422 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); |
| 423 | spin_lock_init(&engine->queue_lock); | 423 | spin_lock_init(&engine->queue_lock); |
| 424 | 424 | ||
| 425 | kthread_init_worker(&engine->kworker); | 425 | engine->kworker = kthread_create_worker(0, "%s", engine->name); |
| 426 | engine->kworker_task = kthread_run(kthread_worker_fn, | 426 | if (IS_ERR(engine->kworker)) { |
| 427 | &engine->kworker, "%s", | ||
| 428 | engine->name); | ||
| 429 | if (IS_ERR(engine->kworker_task)) { | ||
| 430 | dev_err(dev, "failed to create crypto request pump task\n"); | 427 | dev_err(dev, "failed to create crypto request pump task\n"); |
| 431 | return NULL; | 428 | return NULL; |
| 432 | } | 429 | } |
| @@ -434,7 +431,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |||
| 434 | 431 | ||
| 435 | if (engine->rt) { | 432 | if (engine->rt) { |
| 436 | dev_info(dev, "will run requests pump with realtime priority\n"); | 433 | dev_info(dev, "will run requests pump with realtime priority\n"); |
| 437 | sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m); | 434 | sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); |
| 438 | } | 435 | } |
| 439 | 436 | ||
| 440 | return engine; | 437 | return engine; |
| @@ -455,8 +452,7 @@ int crypto_engine_exit(struct crypto_engine *engine) | |||
| 455 | if (ret) | 452 | if (ret) |
| 456 | return ret; | 453 | return ret; |
| 457 | 454 | ||
| 458 | kthread_flush_worker(&engine->kworker); | 455 | kthread_destroy_worker(engine->kworker); |
| 459 | kthread_stop(engine->kworker_task); | ||
| 460 | 456 | ||
| 461 | return 0; | 457 | return 0; |
| 462 | } | 458 | } |
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 1c5705481c69..a90404a0c5ff 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c | |||
| @@ -112,6 +112,21 @@ nla_put_failure: | |||
| 112 | return -EMSGSIZE; | 112 | return -EMSGSIZE; |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) | ||
| 116 | { | ||
| 117 | struct crypto_report_acomp racomp; | ||
| 118 | |||
| 119 | strncpy(racomp.type, "acomp", sizeof(racomp.type)); | ||
| 120 | |||
| 121 | if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, | ||
| 122 | sizeof(struct crypto_report_acomp), &racomp)) | ||
| 123 | goto nla_put_failure; | ||
| 124 | return 0; | ||
| 125 | |||
| 126 | nla_put_failure: | ||
| 127 | return -EMSGSIZE; | ||
| 128 | } | ||
| 129 | |||
| 115 | static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) | 130 | static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) |
| 116 | { | 131 | { |
| 117 | struct crypto_report_akcipher rakcipher; | 132 | struct crypto_report_akcipher rakcipher; |
| @@ -186,7 +201,11 @@ static int crypto_report_one(struct crypto_alg *alg, | |||
| 186 | goto nla_put_failure; | 201 | goto nla_put_failure; |
| 187 | 202 | ||
| 188 | break; | 203 | break; |
| 204 | case CRYPTO_ALG_TYPE_ACOMPRESS: | ||
| 205 | if (crypto_report_acomp(skb, alg)) | ||
| 206 | goto nla_put_failure; | ||
| 189 | 207 | ||
| 208 | break; | ||
| 190 | case CRYPTO_ALG_TYPE_AKCIPHER: | 209 | case CRYPTO_ALG_TYPE_AKCIPHER: |
| 191 | if (crypto_report_akcipher(skb, alg)) | 210 | if (crypto_report_akcipher(skb, alg)) |
| 192 | goto nla_put_failure; | 211 | goto nla_put_failure; |
diff --git a/crypto/ctr.c b/crypto/ctr.c index ff4d21eddb83..a9a7a44f2783 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c | |||
| @@ -312,7 +312,7 @@ static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm) | |||
| 312 | unsigned long align; | 312 | unsigned long align; |
| 313 | unsigned int reqsize; | 313 | unsigned int reqsize; |
| 314 | 314 | ||
| 315 | cipher = crypto_spawn_skcipher2(spawn); | 315 | cipher = crypto_spawn_skcipher(spawn); |
| 316 | if (IS_ERR(cipher)) | 316 | if (IS_ERR(cipher)) |
| 317 | return PTR_ERR(cipher); | 317 | return PTR_ERR(cipher); |
| 318 | 318 | ||
| @@ -370,9 +370,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, | |||
| 370 | spawn = skcipher_instance_ctx(inst); | 370 | spawn = skcipher_instance_ctx(inst); |
| 371 | 371 | ||
| 372 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); | 372 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); |
| 373 | err = crypto_grab_skcipher2(spawn, cipher_name, 0, | 373 | err = crypto_grab_skcipher(spawn, cipher_name, 0, |
| 374 | crypto_requires_sync(algt->type, | 374 | crypto_requires_sync(algt->type, |
| 375 | algt->mask)); | 375 | algt->mask)); |
| 376 | if (err) | 376 | if (err) |
| 377 | goto err_free_inst; | 377 | goto err_free_inst; |
| 378 | 378 | ||
diff --git a/crypto/cts.c b/crypto/cts.c index 51976187b2bf..00254d76b21b 100644 --- a/crypto/cts.c +++ b/crypto/cts.c | |||
| @@ -290,7 +290,7 @@ static int crypto_cts_init_tfm(struct crypto_skcipher *tfm) | |||
| 290 | unsigned bsize; | 290 | unsigned bsize; |
| 291 | unsigned align; | 291 | unsigned align; |
| 292 | 292 | ||
| 293 | cipher = crypto_spawn_skcipher2(spawn); | 293 | cipher = crypto_spawn_skcipher(spawn); |
| 294 | if (IS_ERR(cipher)) | 294 | if (IS_ERR(cipher)) |
| 295 | return PTR_ERR(cipher); | 295 | return PTR_ERR(cipher); |
| 296 | 296 | ||
| @@ -348,9 +348,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 348 | spawn = skcipher_instance_ctx(inst); | 348 | spawn = skcipher_instance_ctx(inst); |
| 349 | 349 | ||
| 350 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); | 350 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); |
| 351 | err = crypto_grab_skcipher2(spawn, cipher_name, 0, | 351 | err = crypto_grab_skcipher(spawn, cipher_name, 0, |
| 352 | crypto_requires_sync(algt->type, | 352 | crypto_requires_sync(algt->type, |
| 353 | algt->mask)); | 353 | algt->mask)); |
| 354 | if (err) | 354 | if (err) |
| 355 | goto err_free_inst; | 355 | goto err_free_inst; |
| 356 | 356 | ||
diff --git a/crypto/deflate.c b/crypto/deflate.c index 95d8d37c5021..f942cb391890 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
| 33 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
| 34 | #include <linux/net.h> | 34 | #include <linux/net.h> |
| 35 | #include <crypto/internal/scompress.h> | ||
| 35 | 36 | ||
| 36 | #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION | 37 | #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION |
| 37 | #define DEFLATE_DEF_WINBITS 11 | 38 | #define DEFLATE_DEF_WINBITS 11 |
| @@ -101,9 +102,8 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx) | |||
| 101 | vfree(ctx->decomp_stream.workspace); | 102 | vfree(ctx->decomp_stream.workspace); |
| 102 | } | 103 | } |
| 103 | 104 | ||
| 104 | static int deflate_init(struct crypto_tfm *tfm) | 105 | static int __deflate_init(void *ctx) |
| 105 | { | 106 | { |
| 106 | struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 107 | int ret; | 107 | int ret; |
| 108 | 108 | ||
| 109 | ret = deflate_comp_init(ctx); | 109 | ret = deflate_comp_init(ctx); |
| @@ -116,19 +116,55 @@ out: | |||
| 116 | return ret; | 116 | return ret; |
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | static void deflate_exit(struct crypto_tfm *tfm) | 119 | static void *deflate_alloc_ctx(struct crypto_scomp *tfm) |
| 120 | { | ||
| 121 | struct deflate_ctx *ctx; | ||
| 122 | int ret; | ||
| 123 | |||
| 124 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | ||
| 125 | if (!ctx) | ||
| 126 | return ERR_PTR(-ENOMEM); | ||
| 127 | |||
| 128 | ret = __deflate_init(ctx); | ||
| 129 | if (ret) { | ||
| 130 | kfree(ctx); | ||
| 131 | return ERR_PTR(ret); | ||
| 132 | } | ||
| 133 | |||
| 134 | return ctx; | ||
| 135 | } | ||
| 136 | |||
| 137 | static int deflate_init(struct crypto_tfm *tfm) | ||
| 120 | { | 138 | { |
| 121 | struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); | 139 | struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); |
| 122 | 140 | ||
| 141 | return __deflate_init(ctx); | ||
| 142 | } | ||
| 143 | |||
| 144 | static void __deflate_exit(void *ctx) | ||
| 145 | { | ||
| 123 | deflate_comp_exit(ctx); | 146 | deflate_comp_exit(ctx); |
| 124 | deflate_decomp_exit(ctx); | 147 | deflate_decomp_exit(ctx); |
| 125 | } | 148 | } |
| 126 | 149 | ||
| 127 | static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, | 150 | static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx) |
| 128 | unsigned int slen, u8 *dst, unsigned int *dlen) | 151 | { |
| 152 | __deflate_exit(ctx); | ||
| 153 | kzfree(ctx); | ||
| 154 | } | ||
| 155 | |||
| 156 | static void deflate_exit(struct crypto_tfm *tfm) | ||
| 157 | { | ||
| 158 | struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 159 | |||
| 160 | __deflate_exit(ctx); | ||
| 161 | } | ||
| 162 | |||
| 163 | static int __deflate_compress(const u8 *src, unsigned int slen, | ||
| 164 | u8 *dst, unsigned int *dlen, void *ctx) | ||
| 129 | { | 165 | { |
| 130 | int ret = 0; | 166 | int ret = 0; |
| 131 | struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); | 167 | struct deflate_ctx *dctx = ctx; |
| 132 | struct z_stream_s *stream = &dctx->comp_stream; | 168 | struct z_stream_s *stream = &dctx->comp_stream; |
| 133 | 169 | ||
| 134 | ret = zlib_deflateReset(stream); | 170 | ret = zlib_deflateReset(stream); |
| @@ -153,12 +189,27 @@ out: | |||
| 153 | return ret; | 189 | return ret; |
| 154 | } | 190 | } |
| 155 | 191 | ||
| 156 | static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, | 192 | static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, |
| 157 | unsigned int slen, u8 *dst, unsigned int *dlen) | 193 | unsigned int slen, u8 *dst, unsigned int *dlen) |
| 194 | { | ||
| 195 | struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); | ||
| 196 | |||
| 197 | return __deflate_compress(src, slen, dst, dlen, dctx); | ||
| 198 | } | ||
| 199 | |||
| 200 | static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src, | ||
| 201 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
| 202 | void *ctx) | ||
| 203 | { | ||
| 204 | return __deflate_compress(src, slen, dst, dlen, ctx); | ||
| 205 | } | ||
| 206 | |||
| 207 | static int __deflate_decompress(const u8 *src, unsigned int slen, | ||
| 208 | u8 *dst, unsigned int *dlen, void *ctx) | ||
| 158 | { | 209 | { |
| 159 | 210 | ||
| 160 | int ret = 0; | 211 | int ret = 0; |
| 161 | struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); | 212 | struct deflate_ctx *dctx = ctx; |
| 162 | struct z_stream_s *stream = &dctx->decomp_stream; | 213 | struct z_stream_s *stream = &dctx->decomp_stream; |
| 163 | 214 | ||
| 164 | ret = zlib_inflateReset(stream); | 215 | ret = zlib_inflateReset(stream); |
| @@ -194,6 +245,21 @@ out: | |||
| 194 | return ret; | 245 | return ret; |
| 195 | } | 246 | } |
| 196 | 247 | ||
| 248 | static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, | ||
| 249 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
| 250 | { | ||
| 251 | struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); | ||
| 252 | |||
| 253 | return __deflate_decompress(src, slen, dst, dlen, dctx); | ||
| 254 | } | ||
| 255 | |||
| 256 | static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src, | ||
| 257 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
| 258 | void *ctx) | ||
| 259 | { | ||
| 260 | return __deflate_decompress(src, slen, dst, dlen, ctx); | ||
| 261 | } | ||
| 262 | |||
| 197 | static struct crypto_alg alg = { | 263 | static struct crypto_alg alg = { |
| 198 | .cra_name = "deflate", | 264 | .cra_name = "deflate", |
| 199 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 265 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
| @@ -206,14 +272,39 @@ static struct crypto_alg alg = { | |||
| 206 | .coa_decompress = deflate_decompress } } | 272 | .coa_decompress = deflate_decompress } } |
| 207 | }; | 273 | }; |
| 208 | 274 | ||
| 275 | static struct scomp_alg scomp = { | ||
| 276 | .alloc_ctx = deflate_alloc_ctx, | ||
| 277 | .free_ctx = deflate_free_ctx, | ||
| 278 | .compress = deflate_scompress, | ||
| 279 | .decompress = deflate_sdecompress, | ||
| 280 | .base = { | ||
| 281 | .cra_name = "deflate", | ||
| 282 | .cra_driver_name = "deflate-scomp", | ||
| 283 | .cra_module = THIS_MODULE, | ||
| 284 | } | ||
| 285 | }; | ||
| 286 | |||
| 209 | static int __init deflate_mod_init(void) | 287 | static int __init deflate_mod_init(void) |
| 210 | { | 288 | { |
| 211 | return crypto_register_alg(&alg); | 289 | int ret; |
| 290 | |||
| 291 | ret = crypto_register_alg(&alg); | ||
| 292 | if (ret) | ||
| 293 | return ret; | ||
| 294 | |||
| 295 | ret = crypto_register_scomp(&scomp); | ||
| 296 | if (ret) { | ||
| 297 | crypto_unregister_alg(&alg); | ||
| 298 | return ret; | ||
| 299 | } | ||
| 300 | |||
| 301 | return ret; | ||
| 212 | } | 302 | } |
| 213 | 303 | ||
| 214 | static void __exit deflate_mod_fini(void) | 304 | static void __exit deflate_mod_fini(void) |
| 215 | { | 305 | { |
| 216 | crypto_unregister_alg(&alg); | 306 | crypto_unregister_alg(&alg); |
| 307 | crypto_unregister_scomp(&scomp); | ||
| 217 | } | 308 | } |
| 218 | 309 | ||
| 219 | module_init(deflate_mod_init); | 310 | module_init(deflate_mod_init); |
diff --git a/crypto/dh.c b/crypto/dh.c index 9d19360e7189..ddcb528ab2cc 100644 --- a/crypto/dh.c +++ b/crypto/dh.c | |||
| @@ -118,7 +118,7 @@ static int dh_compute_value(struct kpp_request *req) | |||
| 118 | if (req->src) { | 118 | if (req->src) { |
| 119 | base = mpi_read_raw_from_sgl(req->src, req->src_len); | 119 | base = mpi_read_raw_from_sgl(req->src, req->src_len); |
| 120 | if (!base) { | 120 | if (!base) { |
| 121 | ret = EINVAL; | 121 | ret = -EINVAL; |
| 122 | goto err_free_val; | 122 | goto err_free_val; |
| 123 | } | 123 | } |
| 124 | } else { | 124 | } else { |
diff --git a/crypto/drbg.c b/crypto/drbg.c index 053035b5c8f8..8a4d98b4adba 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
| @@ -1782,6 +1782,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, | |||
| 1782 | memcpy(outbuf, drbg->outscratchpad, cryptlen); | 1782 | memcpy(outbuf, drbg->outscratchpad, cryptlen); |
| 1783 | 1783 | ||
| 1784 | outlen -= cryptlen; | 1784 | outlen -= cryptlen; |
| 1785 | outbuf += cryptlen; | ||
| 1785 | } | 1786 | } |
| 1786 | ret = 0; | 1787 | ret = 0; |
| 1787 | 1788 | ||
diff --git a/crypto/gcm.c b/crypto/gcm.c index f624ac98c94e..b7ad808be3d4 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
| @@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm) | |||
| 575 | if (IS_ERR(ghash)) | 575 | if (IS_ERR(ghash)) |
| 576 | return PTR_ERR(ghash); | 576 | return PTR_ERR(ghash); |
| 577 | 577 | ||
| 578 | ctr = crypto_spawn_skcipher2(&ictx->ctr); | 578 | ctr = crypto_spawn_skcipher(&ictx->ctr); |
| 579 | err = PTR_ERR(ctr); | 579 | err = PTR_ERR(ctr); |
| 580 | if (IS_ERR(ctr)) | 580 | if (IS_ERR(ctr)) |
| 581 | goto err_free_hash; | 581 | goto err_free_hash; |
| @@ -663,20 +663,20 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, | |||
| 663 | goto err_drop_ghash; | 663 | goto err_drop_ghash; |
| 664 | 664 | ||
| 665 | crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); | 665 | crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); |
| 666 | err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0, | 666 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, |
| 667 | crypto_requires_sync(algt->type, | 667 | crypto_requires_sync(algt->type, |
| 668 | algt->mask)); | 668 | algt->mask)); |
| 669 | if (err) | 669 | if (err) |
| 670 | goto err_drop_ghash; | 670 | goto err_drop_ghash; |
| 671 | 671 | ||
| 672 | ctr = crypto_spawn_skcipher_alg(&ctx->ctr); | 672 | ctr = crypto_spawn_skcipher_alg(&ctx->ctr); |
| 673 | 673 | ||
| 674 | /* We only support 16-byte blocks. */ | 674 | /* We only support 16-byte blocks. */ |
| 675 | err = -EINVAL; | ||
| 675 | if (crypto_skcipher_alg_ivsize(ctr) != 16) | 676 | if (crypto_skcipher_alg_ivsize(ctr) != 16) |
| 676 | goto out_put_ctr; | 677 | goto out_put_ctr; |
| 677 | 678 | ||
| 678 | /* Not a stream cipher? */ | 679 | /* Not a stream cipher? */ |
| 679 | err = -EINVAL; | ||
| 680 | if (ctr->base.cra_blocksize != 1) | 680 | if (ctr->base.cra_blocksize != 1) |
| 681 | goto out_put_ctr; | 681 | goto out_put_ctr; |
| 682 | 682 | ||
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index 5276607c72d0..72015fee533d 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c | |||
| @@ -263,48 +263,6 @@ EXPORT_SYMBOL(gf128mul_bbe); | |||
| 263 | * t[1][BYTE] contains g*x^8*BYTE | 263 | * t[1][BYTE] contains g*x^8*BYTE |
| 264 | * .. | 264 | * .. |
| 265 | * t[15][BYTE] contains g*x^120*BYTE */ | 265 | * t[15][BYTE] contains g*x^120*BYTE */ |
| 266 | struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g) | ||
| 267 | { | ||
| 268 | struct gf128mul_64k *t; | ||
| 269 | int i, j, k; | ||
| 270 | |||
| 271 | t = kzalloc(sizeof(*t), GFP_KERNEL); | ||
| 272 | if (!t) | ||
| 273 | goto out; | ||
| 274 | |||
| 275 | for (i = 0; i < 16; i++) { | ||
| 276 | t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL); | ||
| 277 | if (!t->t[i]) { | ||
| 278 | gf128mul_free_64k(t); | ||
| 279 | t = NULL; | ||
| 280 | goto out; | ||
| 281 | } | ||
| 282 | } | ||
| 283 | |||
| 284 | t->t[0]->t[128] = *g; | ||
| 285 | for (j = 64; j > 0; j >>= 1) | ||
| 286 | gf128mul_x_lle(&t->t[0]->t[j], &t->t[0]->t[j + j]); | ||
| 287 | |||
| 288 | for (i = 0;;) { | ||
| 289 | for (j = 2; j < 256; j += j) | ||
| 290 | for (k = 1; k < j; ++k) | ||
| 291 | be128_xor(&t->t[i]->t[j + k], | ||
| 292 | &t->t[i]->t[j], &t->t[i]->t[k]); | ||
| 293 | |||
| 294 | if (++i >= 16) | ||
| 295 | break; | ||
| 296 | |||
| 297 | for (j = 128; j > 0; j >>= 1) { | ||
| 298 | t->t[i]->t[j] = t->t[i - 1]->t[j]; | ||
| 299 | gf128mul_x8_lle(&t->t[i]->t[j]); | ||
| 300 | } | ||
| 301 | } | ||
| 302 | |||
| 303 | out: | ||
| 304 | return t; | ||
| 305 | } | ||
| 306 | EXPORT_SYMBOL(gf128mul_init_64k_lle); | ||
| 307 | |||
| 308 | struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g) | 266 | struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g) |
| 309 | { | 267 | { |
| 310 | struct gf128mul_64k *t; | 268 | struct gf128mul_64k *t; |
| @@ -352,24 +310,11 @@ void gf128mul_free_64k(struct gf128mul_64k *t) | |||
| 352 | int i; | 310 | int i; |
| 353 | 311 | ||
| 354 | for (i = 0; i < 16; i++) | 312 | for (i = 0; i < 16; i++) |
| 355 | kfree(t->t[i]); | 313 | kzfree(t->t[i]); |
| 356 | kfree(t); | 314 | kzfree(t); |
| 357 | } | 315 | } |
| 358 | EXPORT_SYMBOL(gf128mul_free_64k); | 316 | EXPORT_SYMBOL(gf128mul_free_64k); |
| 359 | 317 | ||
| 360 | void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t) | ||
| 361 | { | ||
| 362 | u8 *ap = (u8 *)a; | ||
| 363 | be128 r[1]; | ||
| 364 | int i; | ||
| 365 | |||
| 366 | *r = t->t[0]->t[ap[0]]; | ||
| 367 | for (i = 1; i < 16; ++i) | ||
| 368 | be128_xor(r, r, &t->t[i]->t[ap[i]]); | ||
| 369 | *a = *r; | ||
| 370 | } | ||
| 371 | EXPORT_SYMBOL(gf128mul_64k_lle); | ||
| 372 | |||
| 373 | void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t) | 318 | void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t) |
| 374 | { | 319 | { |
| 375 | u8 *ap = (u8 *)a; | 320 | u8 *ap = (u8 *)a; |
diff --git a/crypto/internal.h b/crypto/internal.h index 7eefcdb00227..f07320423191 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
| @@ -76,9 +76,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); | |||
| 76 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); | 76 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); |
| 77 | int crypto_init_compress_ops(struct crypto_tfm *tfm); | 77 | int crypto_init_compress_ops(struct crypto_tfm *tfm); |
| 78 | 78 | ||
| 79 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm); | ||
| 80 | void crypto_exit_compress_ops(struct crypto_tfm *tfm); | ||
| 81 | |||
| 82 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); | 79 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); |
| 83 | void crypto_larval_kill(struct crypto_alg *alg); | 80 | void crypto_larval_kill(struct crypto_alg *alg); |
| 84 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); | 81 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); |
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index c4938497eedb..787dccca3715 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c | |||
| @@ -39,7 +39,6 @@ | |||
| 39 | 39 | ||
| 40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
| 41 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
| 42 | #include <linux/module.h> | ||
| 43 | #include <linux/fips.h> | 42 | #include <linux/fips.h> |
| 44 | #include <linux/time.h> | 43 | #include <linux/time.h> |
| 45 | #include <linux/crypto.h> | 44 | #include <linux/crypto.h> |
diff --git a/crypto/lrw.c b/crypto/lrw.c index 6f9908a7ebcb..ecd8474018e3 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c | |||
| @@ -17,7 +17,8 @@ | |||
| 17 | * | 17 | * |
| 18 | * The test vectors are included in the testing module tcrypt.[ch] */ | 18 | * The test vectors are included in the testing module tcrypt.[ch] */ |
| 19 | 19 | ||
| 20 | #include <crypto/algapi.h> | 20 | #include <crypto/internal/skcipher.h> |
| 21 | #include <crypto/scatterwalk.h> | ||
| 21 | #include <linux/err.h> | 22 | #include <linux/err.h> |
| 22 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| 23 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
| @@ -29,11 +30,30 @@ | |||
| 29 | #include <crypto/gf128mul.h> | 30 | #include <crypto/gf128mul.h> |
| 30 | #include <crypto/lrw.h> | 31 | #include <crypto/lrw.h> |
| 31 | 32 | ||
| 33 | #define LRW_BUFFER_SIZE 128u | ||
| 34 | |||
| 32 | struct priv { | 35 | struct priv { |
| 33 | struct crypto_cipher *child; | 36 | struct crypto_skcipher *child; |
| 34 | struct lrw_table_ctx table; | 37 | struct lrw_table_ctx table; |
| 35 | }; | 38 | }; |
| 36 | 39 | ||
| 40 | struct rctx { | ||
| 41 | be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; | ||
| 42 | |||
| 43 | be128 t; | ||
| 44 | |||
| 45 | be128 *ext; | ||
| 46 | |||
| 47 | struct scatterlist srcbuf[2]; | ||
| 48 | struct scatterlist dstbuf[2]; | ||
| 49 | struct scatterlist *src; | ||
| 50 | struct scatterlist *dst; | ||
| 51 | |||
| 52 | unsigned int left; | ||
| 53 | |||
| 54 | struct skcipher_request subreq; | ||
| 55 | }; | ||
| 56 | |||
| 37 | static inline void setbit128_bbe(void *b, int bit) | 57 | static inline void setbit128_bbe(void *b, int bit) |
| 38 | { | 58 | { |
| 39 | __set_bit(bit ^ (0x80 - | 59 | __set_bit(bit ^ (0x80 - |
| @@ -76,32 +96,26 @@ void lrw_free_table(struct lrw_table_ctx *ctx) | |||
| 76 | } | 96 | } |
| 77 | EXPORT_SYMBOL_GPL(lrw_free_table); | 97 | EXPORT_SYMBOL_GPL(lrw_free_table); |
| 78 | 98 | ||
| 79 | static int setkey(struct crypto_tfm *parent, const u8 *key, | 99 | static int setkey(struct crypto_skcipher *parent, const u8 *key, |
| 80 | unsigned int keylen) | 100 | unsigned int keylen) |
| 81 | { | 101 | { |
| 82 | struct priv *ctx = crypto_tfm_ctx(parent); | 102 | struct priv *ctx = crypto_skcipher_ctx(parent); |
| 83 | struct crypto_cipher *child = ctx->child; | 103 | struct crypto_skcipher *child = ctx->child; |
| 84 | int err, bsize = LRW_BLOCK_SIZE; | 104 | int err, bsize = LRW_BLOCK_SIZE; |
| 85 | const u8 *tweak = key + keylen - bsize; | 105 | const u8 *tweak = key + keylen - bsize; |
| 86 | 106 | ||
| 87 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 107 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
| 88 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 108 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
| 89 | CRYPTO_TFM_REQ_MASK); | 109 | CRYPTO_TFM_REQ_MASK); |
| 90 | err = crypto_cipher_setkey(child, key, keylen - bsize); | 110 | err = crypto_skcipher_setkey(child, key, keylen - bsize); |
| 111 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | ||
| 112 | CRYPTO_TFM_RES_MASK); | ||
| 91 | if (err) | 113 | if (err) |
| 92 | return err; | 114 | return err; |
| 93 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | ||
| 94 | CRYPTO_TFM_RES_MASK); | ||
| 95 | 115 | ||
| 96 | return lrw_init_table(&ctx->table, tweak); | 116 | return lrw_init_table(&ctx->table, tweak); |
| 97 | } | 117 | } |
| 98 | 118 | ||
| 99 | struct sinfo { | ||
| 100 | be128 t; | ||
| 101 | struct crypto_tfm *tfm; | ||
| 102 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *); | ||
| 103 | }; | ||
| 104 | |||
| 105 | static inline void inc(be128 *iv) | 119 | static inline void inc(be128 *iv) |
| 106 | { | 120 | { |
| 107 | be64_add_cpu(&iv->b, 1); | 121 | be64_add_cpu(&iv->b, 1); |
| @@ -109,13 +123,6 @@ static inline void inc(be128 *iv) | |||
| 109 | be64_add_cpu(&iv->a, 1); | 123 | be64_add_cpu(&iv->a, 1); |
| 110 | } | 124 | } |
| 111 | 125 | ||
| 112 | static inline void lrw_round(struct sinfo *s, void *dst, const void *src) | ||
| 113 | { | ||
| 114 | be128_xor(dst, &s->t, src); /* PP <- T xor P */ | ||
| 115 | s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */ | ||
| 116 | be128_xor(dst, dst, &s->t); /* C <- T xor CC */ | ||
| 117 | } | ||
| 118 | |||
| 119 | /* this returns the number of consequative 1 bits starting | 126 | /* this returns the number of consequative 1 bits starting |
| 120 | * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ | 127 | * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ |
| 121 | static inline int get_index128(be128 *block) | 128 | static inline int get_index128(be128 *block) |
| @@ -135,83 +142,263 @@ static inline int get_index128(be128 *block) | |||
| 135 | return x; | 142 | return x; |
| 136 | } | 143 | } |
| 137 | 144 | ||
| 138 | static int crypt(struct blkcipher_desc *d, | 145 | static int post_crypt(struct skcipher_request *req) |
| 139 | struct blkcipher_walk *w, struct priv *ctx, | ||
| 140 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) | ||
| 141 | { | 146 | { |
| 147 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 148 | be128 *buf = rctx->ext ?: rctx->buf; | ||
| 149 | struct skcipher_request *subreq; | ||
| 150 | const int bs = LRW_BLOCK_SIZE; | ||
| 151 | struct skcipher_walk w; | ||
| 152 | struct scatterlist *sg; | ||
| 153 | unsigned offset; | ||
| 142 | int err; | 154 | int err; |
| 143 | unsigned int avail; | 155 | |
| 156 | subreq = &rctx->subreq; | ||
| 157 | err = skcipher_walk_virt(&w, subreq, false); | ||
| 158 | |||
| 159 | while (w.nbytes) { | ||
| 160 | unsigned int avail = w.nbytes; | ||
| 161 | be128 *wdst; | ||
| 162 | |||
| 163 | wdst = w.dst.virt.addr; | ||
| 164 | |||
| 165 | do { | ||
| 166 | be128_xor(wdst, buf++, wdst); | ||
| 167 | wdst++; | ||
| 168 | } while ((avail -= bs) >= bs); | ||
| 169 | |||
| 170 | err = skcipher_walk_done(&w, avail); | ||
| 171 | } | ||
| 172 | |||
| 173 | rctx->left -= subreq->cryptlen; | ||
| 174 | |||
| 175 | if (err || !rctx->left) | ||
| 176 | goto out; | ||
| 177 | |||
| 178 | rctx->dst = rctx->dstbuf; | ||
| 179 | |||
| 180 | scatterwalk_done(&w.out, 0, 1); | ||
| 181 | sg = w.out.sg; | ||
| 182 | offset = w.out.offset; | ||
| 183 | |||
| 184 | if (rctx->dst != sg) { | ||
| 185 | rctx->dst[0] = *sg; | ||
| 186 | sg_unmark_end(rctx->dst); | ||
| 187 | scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); | ||
| 188 | } | ||
| 189 | rctx->dst[0].length -= offset - sg->offset; | ||
| 190 | rctx->dst[0].offset = offset; | ||
| 191 | |||
| 192 | out: | ||
| 193 | return err; | ||
| 194 | } | ||
| 195 | |||
| 196 | static int pre_crypt(struct skcipher_request *req) | ||
| 197 | { | ||
| 198 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 199 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 200 | struct priv *ctx = crypto_skcipher_ctx(tfm); | ||
| 201 | be128 *buf = rctx->ext ?: rctx->buf; | ||
| 202 | struct skcipher_request *subreq; | ||
| 144 | const int bs = LRW_BLOCK_SIZE; | 203 | const int bs = LRW_BLOCK_SIZE; |
| 145 | struct sinfo s = { | 204 | struct skcipher_walk w; |
| 146 | .tfm = crypto_cipher_tfm(ctx->child), | 205 | struct scatterlist *sg; |
| 147 | .fn = fn | 206 | unsigned cryptlen; |
| 148 | }; | 207 | unsigned offset; |
| 149 | be128 *iv; | 208 | be128 *iv; |
| 150 | u8 *wsrc; | 209 | bool more; |
| 151 | u8 *wdst; | 210 | int err; |
| 152 | 211 | ||
| 153 | err = blkcipher_walk_virt(d, w); | 212 | subreq = &rctx->subreq; |
| 154 | if (!(avail = w->nbytes)) | 213 | skcipher_request_set_tfm(subreq, tfm); |
| 155 | return err; | ||
| 156 | 214 | ||
| 157 | wsrc = w->src.virt.addr; | 215 | cryptlen = subreq->cryptlen; |
| 158 | wdst = w->dst.virt.addr; | 216 | more = rctx->left > cryptlen; |
| 217 | if (!more) | ||
| 218 | cryptlen = rctx->left; | ||
| 159 | 219 | ||
| 160 | /* calculate first value of T */ | 220 | skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, |
| 161 | iv = (be128 *)w->iv; | 221 | cryptlen, req->iv); |
| 162 | s.t = *iv; | ||
| 163 | 222 | ||
| 164 | /* T <- I*Key2 */ | 223 | err = skcipher_walk_virt(&w, subreq, false); |
| 165 | gf128mul_64k_bbe(&s.t, ctx->table.table); | 224 | iv = w.iv; |
| 166 | 225 | ||
| 167 | goto first; | 226 | while (w.nbytes) { |
| 227 | unsigned int avail = w.nbytes; | ||
| 228 | be128 *wsrc; | ||
| 229 | be128 *wdst; | ||
| 230 | |||
| 231 | wsrc = w.src.virt.addr; | ||
| 232 | wdst = w.dst.virt.addr; | ||
| 168 | 233 | ||
| 169 | for (;;) { | ||
| 170 | do { | 234 | do { |
| 235 | *buf++ = rctx->t; | ||
| 236 | be128_xor(wdst++, &rctx->t, wsrc++); | ||
| 237 | |||
| 171 | /* T <- I*Key2, using the optimization | 238 | /* T <- I*Key2, using the optimization |
| 172 | * discussed in the specification */ | 239 | * discussed in the specification */ |
| 173 | be128_xor(&s.t, &s.t, | 240 | be128_xor(&rctx->t, &rctx->t, |
| 174 | &ctx->table.mulinc[get_index128(iv)]); | 241 | &ctx->table.mulinc[get_index128(iv)]); |
| 175 | inc(iv); | 242 | inc(iv); |
| 243 | } while ((avail -= bs) >= bs); | ||
| 176 | 244 | ||
| 177 | first: | 245 | err = skcipher_walk_done(&w, avail); |
| 178 | lrw_round(&s, wdst, wsrc); | 246 | } |
| 179 | 247 | ||
| 180 | wsrc += bs; | 248 | skcipher_request_set_tfm(subreq, ctx->child); |
| 181 | wdst += bs; | 249 | skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, |
| 182 | } while ((avail -= bs) >= bs); | 250 | cryptlen, NULL); |
| 183 | 251 | ||
| 184 | err = blkcipher_walk_done(d, w, avail); | 252 | if (err || !more) |
| 185 | if (!(avail = w->nbytes)) | 253 | goto out; |
| 186 | break; | 254 | |
| 255 | rctx->src = rctx->srcbuf; | ||
| 256 | |||
| 257 | scatterwalk_done(&w.in, 0, 1); | ||
| 258 | sg = w.in.sg; | ||
| 259 | offset = w.in.offset; | ||
| 260 | |||
| 261 | if (rctx->src != sg) { | ||
| 262 | rctx->src[0] = *sg; | ||
| 263 | sg_unmark_end(rctx->src); | ||
| 264 | scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); | ||
| 265 | } | ||
| 266 | rctx->src[0].length -= offset - sg->offset; | ||
| 267 | rctx->src[0].offset = offset; | ||
| 268 | |||
| 269 | out: | ||
| 270 | return err; | ||
| 271 | } | ||
| 272 | |||
| 273 | static int init_crypt(struct skcipher_request *req, crypto_completion_t done) | ||
| 274 | { | ||
| 275 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); | ||
| 276 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 277 | struct skcipher_request *subreq; | ||
| 278 | gfp_t gfp; | ||
| 279 | |||
| 280 | subreq = &rctx->subreq; | ||
| 281 | skcipher_request_set_callback(subreq, req->base.flags, done, req); | ||
| 282 | |||
| 283 | gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
| 284 | GFP_ATOMIC; | ||
| 285 | rctx->ext = NULL; | ||
| 286 | |||
| 287 | subreq->cryptlen = LRW_BUFFER_SIZE; | ||
| 288 | if (req->cryptlen > LRW_BUFFER_SIZE) { | ||
| 289 | subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); | ||
| 290 | rctx->ext = kmalloc(subreq->cryptlen, gfp); | ||
| 291 | } | ||
| 292 | |||
| 293 | rctx->src = req->src; | ||
| 294 | rctx->dst = req->dst; | ||
| 295 | rctx->left = req->cryptlen; | ||
| 296 | |||
| 297 | /* calculate first value of T */ | ||
| 298 | memcpy(&rctx->t, req->iv, sizeof(rctx->t)); | ||
| 299 | |||
| 300 | /* T <- I*Key2 */ | ||
| 301 | gf128mul_64k_bbe(&rctx->t, ctx->table.table); | ||
| 187 | 302 | ||
| 188 | wsrc = w->src.virt.addr; | 303 | return 0; |
| 189 | wdst = w->dst.virt.addr; | 304 | } |
| 305 | |||
| 306 | static void exit_crypt(struct skcipher_request *req) | ||
| 307 | { | ||
| 308 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 309 | |||
| 310 | rctx->left = 0; | ||
| 311 | |||
| 312 | if (rctx->ext) | ||
| 313 | kfree(rctx->ext); | ||
| 314 | } | ||
| 315 | |||
| 316 | static int do_encrypt(struct skcipher_request *req, int err) | ||
| 317 | { | ||
| 318 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 319 | struct skcipher_request *subreq; | ||
| 320 | |||
| 321 | subreq = &rctx->subreq; | ||
| 322 | |||
| 323 | while (!err && rctx->left) { | ||
| 324 | err = pre_crypt(req) ?: | ||
| 325 | crypto_skcipher_encrypt(subreq) ?: | ||
| 326 | post_crypt(req); | ||
| 327 | |||
| 328 | if (err == -EINPROGRESS || | ||
| 329 | (err == -EBUSY && | ||
| 330 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
| 331 | return err; | ||
| 190 | } | 332 | } |
| 191 | 333 | ||
| 334 | exit_crypt(req); | ||
| 192 | return err; | 335 | return err; |
| 193 | } | 336 | } |
| 194 | 337 | ||
| 195 | static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 338 | static void encrypt_done(struct crypto_async_request *areq, int err) |
| 196 | struct scatterlist *src, unsigned int nbytes) | 339 | { |
| 340 | struct skcipher_request *req = areq->data; | ||
| 341 | struct skcipher_request *subreq; | ||
| 342 | struct rctx *rctx; | ||
| 343 | |||
| 344 | rctx = skcipher_request_ctx(req); | ||
| 345 | subreq = &rctx->subreq; | ||
| 346 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | ||
| 347 | |||
| 348 | err = do_encrypt(req, err ?: post_crypt(req)); | ||
| 349 | if (rctx->left) | ||
| 350 | return; | ||
| 351 | |||
| 352 | skcipher_request_complete(req, err); | ||
| 353 | } | ||
| 354 | |||
| 355 | static int encrypt(struct skcipher_request *req) | ||
| 356 | { | ||
| 357 | return do_encrypt(req, init_crypt(req, encrypt_done)); | ||
| 358 | } | ||
| 359 | |||
| 360 | static int do_decrypt(struct skcipher_request *req, int err) | ||
| 197 | { | 361 | { |
| 198 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 362 | struct rctx *rctx = skcipher_request_ctx(req); |
| 199 | struct blkcipher_walk w; | 363 | struct skcipher_request *subreq; |
| 364 | |||
| 365 | subreq = &rctx->subreq; | ||
| 366 | |||
| 367 | while (!err && rctx->left) { | ||
| 368 | err = pre_crypt(req) ?: | ||
| 369 | crypto_skcipher_decrypt(subreq) ?: | ||
| 370 | post_crypt(req); | ||
| 371 | |||
| 372 | if (err == -EINPROGRESS || | ||
| 373 | (err == -EBUSY && | ||
| 374 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
| 375 | return err; | ||
| 376 | } | ||
| 200 | 377 | ||
| 201 | blkcipher_walk_init(&w, dst, src, nbytes); | 378 | exit_crypt(req); |
| 202 | return crypt(desc, &w, ctx, | 379 | return err; |
| 203 | crypto_cipher_alg(ctx->child)->cia_encrypt); | ||
| 204 | } | 380 | } |
| 205 | 381 | ||
| 206 | static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 382 | static void decrypt_done(struct crypto_async_request *areq, int err) |
| 207 | struct scatterlist *src, unsigned int nbytes) | ||
| 208 | { | 383 | { |
| 209 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 384 | struct skcipher_request *req = areq->data; |
| 210 | struct blkcipher_walk w; | 385 | struct skcipher_request *subreq; |
| 386 | struct rctx *rctx; | ||
| 387 | |||
| 388 | rctx = skcipher_request_ctx(req); | ||
| 389 | subreq = &rctx->subreq; | ||
| 390 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | ||
| 391 | |||
| 392 | err = do_decrypt(req, err ?: post_crypt(req)); | ||
| 393 | if (rctx->left) | ||
| 394 | return; | ||
| 211 | 395 | ||
| 212 | blkcipher_walk_init(&w, dst, src, nbytes); | 396 | skcipher_request_complete(req, err); |
| 213 | return crypt(desc, &w, ctx, | 397 | } |
| 214 | crypto_cipher_alg(ctx->child)->cia_decrypt); | 398 | |
| 399 | static int decrypt(struct skcipher_request *req) | ||
| 400 | { | ||
| 401 | return do_decrypt(req, init_crypt(req, decrypt_done)); | ||
| 215 | } | 402 | } |
| 216 | 403 | ||
| 217 | int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, | 404 | int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, |
| @@ -293,95 +480,161 @@ first: | |||
| 293 | } | 480 | } |
| 294 | EXPORT_SYMBOL_GPL(lrw_crypt); | 481 | EXPORT_SYMBOL_GPL(lrw_crypt); |
| 295 | 482 | ||
| 296 | static int init_tfm(struct crypto_tfm *tfm) | 483 | static int init_tfm(struct crypto_skcipher *tfm) |
| 297 | { | 484 | { |
| 298 | struct crypto_cipher *cipher; | 485 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
| 299 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 486 | struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); |
| 300 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 487 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
| 301 | struct priv *ctx = crypto_tfm_ctx(tfm); | 488 | struct crypto_skcipher *cipher; |
| 302 | u32 *flags = &tfm->crt_flags; | ||
| 303 | 489 | ||
| 304 | cipher = crypto_spawn_cipher(spawn); | 490 | cipher = crypto_spawn_skcipher(spawn); |
| 305 | if (IS_ERR(cipher)) | 491 | if (IS_ERR(cipher)) |
| 306 | return PTR_ERR(cipher); | 492 | return PTR_ERR(cipher); |
| 307 | 493 | ||
| 308 | if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { | ||
| 309 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | ||
| 310 | crypto_free_cipher(cipher); | ||
| 311 | return -EINVAL; | ||
| 312 | } | ||
| 313 | |||
| 314 | ctx->child = cipher; | 494 | ctx->child = cipher; |
| 495 | |||
| 496 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + | ||
| 497 | sizeof(struct rctx)); | ||
| 498 | |||
| 315 | return 0; | 499 | return 0; |
| 316 | } | 500 | } |
| 317 | 501 | ||
| 318 | static void exit_tfm(struct crypto_tfm *tfm) | 502 | static void exit_tfm(struct crypto_skcipher *tfm) |
| 319 | { | 503 | { |
| 320 | struct priv *ctx = crypto_tfm_ctx(tfm); | 504 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
| 321 | 505 | ||
| 322 | lrw_free_table(&ctx->table); | 506 | lrw_free_table(&ctx->table); |
| 323 | crypto_free_cipher(ctx->child); | 507 | crypto_free_skcipher(ctx->child); |
| 508 | } | ||
| 509 | |||
| 510 | static void free(struct skcipher_instance *inst) | ||
| 511 | { | ||
| 512 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | ||
| 513 | kfree(inst); | ||
| 324 | } | 514 | } |
| 325 | 515 | ||
| 326 | static struct crypto_instance *alloc(struct rtattr **tb) | 516 | static int create(struct crypto_template *tmpl, struct rtattr **tb) |
| 327 | { | 517 | { |
| 328 | struct crypto_instance *inst; | 518 | struct crypto_skcipher_spawn *spawn; |
| 329 | struct crypto_alg *alg; | 519 | struct skcipher_instance *inst; |
| 520 | struct crypto_attr_type *algt; | ||
| 521 | struct skcipher_alg *alg; | ||
| 522 | const char *cipher_name; | ||
| 523 | char ecb_name[CRYPTO_MAX_ALG_NAME]; | ||
| 330 | int err; | 524 | int err; |
| 331 | 525 | ||
| 332 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 526 | algt = crypto_get_attr_type(tb); |
| 527 | if (IS_ERR(algt)) | ||
| 528 | return PTR_ERR(algt); | ||
| 529 | |||
| 530 | if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) | ||
| 531 | return -EINVAL; | ||
| 532 | |||
| 533 | cipher_name = crypto_attr_alg_name(tb[1]); | ||
| 534 | if (IS_ERR(cipher_name)) | ||
| 535 | return PTR_ERR(cipher_name); | ||
| 536 | |||
| 537 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | ||
| 538 | if (!inst) | ||
| 539 | return -ENOMEM; | ||
| 540 | |||
| 541 | spawn = skcipher_instance_ctx(inst); | ||
| 542 | |||
| 543 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); | ||
| 544 | err = crypto_grab_skcipher(spawn, cipher_name, 0, | ||
| 545 | crypto_requires_sync(algt->type, | ||
| 546 | algt->mask)); | ||
| 547 | if (err == -ENOENT) { | ||
| 548 | err = -ENAMETOOLONG; | ||
| 549 | if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", | ||
| 550 | cipher_name) >= CRYPTO_MAX_ALG_NAME) | ||
| 551 | goto err_free_inst; | ||
| 552 | |||
| 553 | err = crypto_grab_skcipher(spawn, ecb_name, 0, | ||
| 554 | crypto_requires_sync(algt->type, | ||
| 555 | algt->mask)); | ||
| 556 | } | ||
| 557 | |||
| 333 | if (err) | 558 | if (err) |
| 334 | return ERR_PTR(err); | 559 | goto err_free_inst; |
| 335 | 560 | ||
| 336 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 561 | alg = crypto_skcipher_spawn_alg(spawn); |
| 337 | CRYPTO_ALG_TYPE_MASK); | ||
| 338 | if (IS_ERR(alg)) | ||
| 339 | return ERR_CAST(alg); | ||
| 340 | 562 | ||
| 341 | inst = crypto_alloc_instance("lrw", alg); | 563 | err = -EINVAL; |
| 342 | if (IS_ERR(inst)) | 564 | if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) |
| 343 | goto out_put_alg; | 565 | goto err_drop_spawn; |
| 344 | 566 | ||
| 345 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 567 | if (crypto_skcipher_alg_ivsize(alg)) |
| 346 | inst->alg.cra_priority = alg->cra_priority; | 568 | goto err_drop_spawn; |
| 347 | inst->alg.cra_blocksize = alg->cra_blocksize; | ||
| 348 | 569 | ||
| 349 | if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; | 570 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", |
| 350 | else inst->alg.cra_alignmask = alg->cra_alignmask; | 571 | &alg->base); |
| 351 | inst->alg.cra_type = &crypto_blkcipher_type; | 572 | if (err) |
| 573 | goto err_drop_spawn; | ||
| 352 | 574 | ||
| 353 | if (!(alg->cra_blocksize % 4)) | 575 | err = -EINVAL; |
| 354 | inst->alg.cra_alignmask |= 3; | 576 | cipher_name = alg->base.cra_name; |
| 355 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | ||
| 356 | inst->alg.cra_blkcipher.min_keysize = | ||
| 357 | alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; | ||
| 358 | inst->alg.cra_blkcipher.max_keysize = | ||
| 359 | alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; | ||
| 360 | 577 | ||
| 361 | inst->alg.cra_ctxsize = sizeof(struct priv); | 578 | /* Alas we screwed up the naming so we have to mangle the |
| 579 | * cipher name. | ||
| 580 | */ | ||
| 581 | if (!strncmp(cipher_name, "ecb(", 4)) { | ||
| 582 | unsigned len; | ||
| 362 | 583 | ||
| 363 | inst->alg.cra_init = init_tfm; | 584 | len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); |
| 364 | inst->alg.cra_exit = exit_tfm; | 585 | if (len < 2 || len >= sizeof(ecb_name)) |
| 586 | goto err_drop_spawn; | ||
| 365 | 587 | ||
| 366 | inst->alg.cra_blkcipher.setkey = setkey; | 588 | if (ecb_name[len - 1] != ')') |
| 367 | inst->alg.cra_blkcipher.encrypt = encrypt; | 589 | goto err_drop_spawn; |
| 368 | inst->alg.cra_blkcipher.decrypt = decrypt; | ||
| 369 | 590 | ||
| 370 | out_put_alg: | 591 | ecb_name[len - 1] = 0; |
| 371 | crypto_mod_put(alg); | ||
| 372 | return inst; | ||
| 373 | } | ||
| 374 | 592 | ||
| 375 | static void free(struct crypto_instance *inst) | 593 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
| 376 | { | 594 | "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) |
| 377 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 595 | return -ENAMETOOLONG; |
| 596 | } | ||
| 597 | |||
| 598 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; | ||
| 599 | inst->alg.base.cra_priority = alg->base.cra_priority; | ||
| 600 | inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; | ||
| 601 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | | ||
| 602 | (__alignof__(u64) - 1); | ||
| 603 | |||
| 604 | inst->alg.ivsize = LRW_BLOCK_SIZE; | ||
| 605 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + | ||
| 606 | LRW_BLOCK_SIZE; | ||
| 607 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + | ||
| 608 | LRW_BLOCK_SIZE; | ||
| 609 | |||
| 610 | inst->alg.base.cra_ctxsize = sizeof(struct priv); | ||
| 611 | |||
| 612 | inst->alg.init = init_tfm; | ||
| 613 | inst->alg.exit = exit_tfm; | ||
| 614 | |||
| 615 | inst->alg.setkey = setkey; | ||
| 616 | inst->alg.encrypt = encrypt; | ||
| 617 | inst->alg.decrypt = decrypt; | ||
| 618 | |||
| 619 | inst->free = free; | ||
| 620 | |||
| 621 | err = skcipher_register_instance(tmpl, inst); | ||
| 622 | if (err) | ||
| 623 | goto err_drop_spawn; | ||
| 624 | |||
| 625 | out: | ||
| 626 | return err; | ||
| 627 | |||
| 628 | err_drop_spawn: | ||
| 629 | crypto_drop_skcipher(spawn); | ||
| 630 | err_free_inst: | ||
| 378 | kfree(inst); | 631 | kfree(inst); |
| 632 | goto out; | ||
| 379 | } | 633 | } |
| 380 | 634 | ||
| 381 | static struct crypto_template crypto_tmpl = { | 635 | static struct crypto_template crypto_tmpl = { |
| 382 | .name = "lrw", | 636 | .name = "lrw", |
| 383 | .alloc = alloc, | 637 | .create = create, |
| 384 | .free = free, | ||
| 385 | .module = THIS_MODULE, | 638 | .module = THIS_MODULE, |
| 386 | }; | 639 | }; |
| 387 | 640 | ||
diff --git a/crypto/lz4.c b/crypto/lz4.c index aefbceaf3104..99c1b2cc2976 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c | |||
| @@ -23,36 +23,53 @@ | |||
| 23 | #include <linux/crypto.h> | 23 | #include <linux/crypto.h> |
| 24 | #include <linux/vmalloc.h> | 24 | #include <linux/vmalloc.h> |
| 25 | #include <linux/lz4.h> | 25 | #include <linux/lz4.h> |
| 26 | #include <crypto/internal/scompress.h> | ||
| 26 | 27 | ||
| 27 | struct lz4_ctx { | 28 | struct lz4_ctx { |
| 28 | void *lz4_comp_mem; | 29 | void *lz4_comp_mem; |
| 29 | }; | 30 | }; |
| 30 | 31 | ||
| 32 | static void *lz4_alloc_ctx(struct crypto_scomp *tfm) | ||
| 33 | { | ||
| 34 | void *ctx; | ||
| 35 | |||
| 36 | ctx = vmalloc(LZ4_MEM_COMPRESS); | ||
| 37 | if (!ctx) | ||
| 38 | return ERR_PTR(-ENOMEM); | ||
| 39 | |||
| 40 | return ctx; | ||
| 41 | } | ||
| 42 | |||
| 31 | static int lz4_init(struct crypto_tfm *tfm) | 43 | static int lz4_init(struct crypto_tfm *tfm) |
| 32 | { | 44 | { |
| 33 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | 45 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); |
| 34 | 46 | ||
| 35 | ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS); | 47 | ctx->lz4_comp_mem = lz4_alloc_ctx(NULL); |
| 36 | if (!ctx->lz4_comp_mem) | 48 | if (IS_ERR(ctx->lz4_comp_mem)) |
| 37 | return -ENOMEM; | 49 | return -ENOMEM; |
| 38 | 50 | ||
| 39 | return 0; | 51 | return 0; |
| 40 | } | 52 | } |
| 41 | 53 | ||
| 54 | static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx) | ||
| 55 | { | ||
| 56 | vfree(ctx); | ||
| 57 | } | ||
| 58 | |||
| 42 | static void lz4_exit(struct crypto_tfm *tfm) | 59 | static void lz4_exit(struct crypto_tfm *tfm) |
| 43 | { | 60 | { |
| 44 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | 61 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); |
| 45 | vfree(ctx->lz4_comp_mem); | 62 | |
| 63 | lz4_free_ctx(NULL, ctx->lz4_comp_mem); | ||
| 46 | } | 64 | } |
| 47 | 65 | ||
| 48 | static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | 66 | static int __lz4_compress_crypto(const u8 *src, unsigned int slen, |
| 49 | unsigned int slen, u8 *dst, unsigned int *dlen) | 67 | u8 *dst, unsigned int *dlen, void *ctx) |
| 50 | { | 68 | { |
| 51 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 52 | size_t tmp_len = *dlen; | 69 | size_t tmp_len = *dlen; |
| 53 | int err; | 70 | int err; |
| 54 | 71 | ||
| 55 | err = lz4_compress(src, slen, dst, &tmp_len, ctx->lz4_comp_mem); | 72 | err = lz4_compress(src, slen, dst, &tmp_len, ctx); |
| 56 | 73 | ||
| 57 | if (err < 0) | 74 | if (err < 0) |
| 58 | return -EINVAL; | 75 | return -EINVAL; |
| @@ -61,8 +78,23 @@ static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
| 61 | return 0; | 78 | return 0; |
| 62 | } | 79 | } |
| 63 | 80 | ||
| 64 | static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | 81 | static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src, |
| 65 | unsigned int slen, u8 *dst, unsigned int *dlen) | 82 | unsigned int slen, u8 *dst, unsigned int *dlen, |
| 83 | void *ctx) | ||
| 84 | { | ||
| 85 | return __lz4_compress_crypto(src, slen, dst, dlen, ctx); | ||
| 86 | } | ||
| 87 | |||
| 88 | static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
| 89 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
| 90 | { | ||
| 91 | struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 92 | |||
| 93 | return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem); | ||
| 94 | } | ||
| 95 | |||
| 96 | static int __lz4_decompress_crypto(const u8 *src, unsigned int slen, | ||
| 97 | u8 *dst, unsigned int *dlen, void *ctx) | ||
| 66 | { | 98 | { |
| 67 | int err; | 99 | int err; |
| 68 | size_t tmp_len = *dlen; | 100 | size_t tmp_len = *dlen; |
| @@ -76,6 +108,20 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
| 76 | return err; | 108 | return err; |
| 77 | } | 109 | } |
| 78 | 110 | ||
| 111 | static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src, | ||
| 112 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
| 113 | void *ctx) | ||
| 114 | { | ||
| 115 | return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); | ||
| 116 | } | ||
| 117 | |||
| 118 | static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
| 119 | unsigned int slen, u8 *dst, | ||
| 120 | unsigned int *dlen) | ||
| 121 | { | ||
| 122 | return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); | ||
| 123 | } | ||
| 124 | |||
| 79 | static struct crypto_alg alg_lz4 = { | 125 | static struct crypto_alg alg_lz4 = { |
| 80 | .cra_name = "lz4", | 126 | .cra_name = "lz4", |
| 81 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 127 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
| @@ -89,14 +135,39 @@ static struct crypto_alg alg_lz4 = { | |||
| 89 | .coa_decompress = lz4_decompress_crypto } } | 135 | .coa_decompress = lz4_decompress_crypto } } |
| 90 | }; | 136 | }; |
| 91 | 137 | ||
| 138 | static struct scomp_alg scomp = { | ||
| 139 | .alloc_ctx = lz4_alloc_ctx, | ||
| 140 | .free_ctx = lz4_free_ctx, | ||
| 141 | .compress = lz4_scompress, | ||
| 142 | .decompress = lz4_sdecompress, | ||
| 143 | .base = { | ||
| 144 | .cra_name = "lz4", | ||
| 145 | .cra_driver_name = "lz4-scomp", | ||
| 146 | .cra_module = THIS_MODULE, | ||
| 147 | } | ||
| 148 | }; | ||
| 149 | |||
| 92 | static int __init lz4_mod_init(void) | 150 | static int __init lz4_mod_init(void) |
| 93 | { | 151 | { |
| 94 | return crypto_register_alg(&alg_lz4); | 152 | int ret; |
| 153 | |||
| 154 | ret = crypto_register_alg(&alg_lz4); | ||
| 155 | if (ret) | ||
| 156 | return ret; | ||
| 157 | |||
| 158 | ret = crypto_register_scomp(&scomp); | ||
| 159 | if (ret) { | ||
| 160 | crypto_unregister_alg(&alg_lz4); | ||
| 161 | return ret; | ||
| 162 | } | ||
| 163 | |||
| 164 | return ret; | ||
| 95 | } | 165 | } |
| 96 | 166 | ||
| 97 | static void __exit lz4_mod_fini(void) | 167 | static void __exit lz4_mod_fini(void) |
| 98 | { | 168 | { |
| 99 | crypto_unregister_alg(&alg_lz4); | 169 | crypto_unregister_alg(&alg_lz4); |
| 170 | crypto_unregister_scomp(&scomp); | ||
| 100 | } | 171 | } |
| 101 | 172 | ||
| 102 | module_init(lz4_mod_init); | 173 | module_init(lz4_mod_init); |
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index a1d3b5bd3d85..75ffc4a3f786 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c | |||
| @@ -22,37 +22,53 @@ | |||
| 22 | #include <linux/crypto.h> | 22 | #include <linux/crypto.h> |
| 23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
| 24 | #include <linux/lz4.h> | 24 | #include <linux/lz4.h> |
| 25 | #include <crypto/internal/scompress.h> | ||
| 25 | 26 | ||
| 26 | struct lz4hc_ctx { | 27 | struct lz4hc_ctx { |
| 27 | void *lz4hc_comp_mem; | 28 | void *lz4hc_comp_mem; |
| 28 | }; | 29 | }; |
| 29 | 30 | ||
| 31 | static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm) | ||
| 32 | { | ||
| 33 | void *ctx; | ||
| 34 | |||
| 35 | ctx = vmalloc(LZ4HC_MEM_COMPRESS); | ||
| 36 | if (!ctx) | ||
| 37 | return ERR_PTR(-ENOMEM); | ||
| 38 | |||
| 39 | return ctx; | ||
| 40 | } | ||
| 41 | |||
| 30 | static int lz4hc_init(struct crypto_tfm *tfm) | 42 | static int lz4hc_init(struct crypto_tfm *tfm) |
| 31 | { | 43 | { |
| 32 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | 44 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); |
| 33 | 45 | ||
| 34 | ctx->lz4hc_comp_mem = vmalloc(LZ4HC_MEM_COMPRESS); | 46 | ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL); |
| 35 | if (!ctx->lz4hc_comp_mem) | 47 | if (IS_ERR(ctx->lz4hc_comp_mem)) |
| 36 | return -ENOMEM; | 48 | return -ENOMEM; |
| 37 | 49 | ||
| 38 | return 0; | 50 | return 0; |
| 39 | } | 51 | } |
| 40 | 52 | ||
| 53 | static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx) | ||
| 54 | { | ||
| 55 | vfree(ctx); | ||
| 56 | } | ||
| 57 | |||
| 41 | static void lz4hc_exit(struct crypto_tfm *tfm) | 58 | static void lz4hc_exit(struct crypto_tfm *tfm) |
| 42 | { | 59 | { |
| 43 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | 60 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); |
| 44 | 61 | ||
| 45 | vfree(ctx->lz4hc_comp_mem); | 62 | lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem); |
| 46 | } | 63 | } |
| 47 | 64 | ||
| 48 | static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | 65 | static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen, |
| 49 | unsigned int slen, u8 *dst, unsigned int *dlen) | 66 | u8 *dst, unsigned int *dlen, void *ctx) |
| 50 | { | 67 | { |
| 51 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 52 | size_t tmp_len = *dlen; | 68 | size_t tmp_len = *dlen; |
| 53 | int err; | 69 | int err; |
| 54 | 70 | ||
| 55 | err = lz4hc_compress(src, slen, dst, &tmp_len, ctx->lz4hc_comp_mem); | 71 | err = lz4hc_compress(src, slen, dst, &tmp_len, ctx); |
| 56 | 72 | ||
| 57 | if (err < 0) | 73 | if (err < 0) |
| 58 | return -EINVAL; | 74 | return -EINVAL; |
| @@ -61,8 +77,25 @@ static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
| 61 | return 0; | 77 | return 0; |
| 62 | } | 78 | } |
| 63 | 79 | ||
| 64 | static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | 80 | static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src, |
| 65 | unsigned int slen, u8 *dst, unsigned int *dlen) | 81 | unsigned int slen, u8 *dst, unsigned int *dlen, |
| 82 | void *ctx) | ||
| 83 | { | ||
| 84 | return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx); | ||
| 85 | } | ||
| 86 | |||
| 87 | static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
| 88 | unsigned int slen, u8 *dst, | ||
| 89 | unsigned int *dlen) | ||
| 90 | { | ||
| 91 | struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 92 | |||
| 93 | return __lz4hc_compress_crypto(src, slen, dst, dlen, | ||
| 94 | ctx->lz4hc_comp_mem); | ||
| 95 | } | ||
| 96 | |||
| 97 | static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen, | ||
| 98 | u8 *dst, unsigned int *dlen, void *ctx) | ||
| 66 | { | 99 | { |
| 67 | int err; | 100 | int err; |
| 68 | size_t tmp_len = *dlen; | 101 | size_t tmp_len = *dlen; |
| @@ -76,6 +109,20 @@ static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
| 76 | return err; | 109 | return err; |
| 77 | } | 110 | } |
| 78 | 111 | ||
| 112 | static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src, | ||
| 113 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
| 114 | void *ctx) | ||
| 115 | { | ||
| 116 | return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); | ||
| 117 | } | ||
| 118 | |||
| 119 | static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | ||
| 120 | unsigned int slen, u8 *dst, | ||
| 121 | unsigned int *dlen) | ||
| 122 | { | ||
| 123 | return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); | ||
| 124 | } | ||
| 125 | |||
| 79 | static struct crypto_alg alg_lz4hc = { | 126 | static struct crypto_alg alg_lz4hc = { |
| 80 | .cra_name = "lz4hc", | 127 | .cra_name = "lz4hc", |
| 81 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 128 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
| @@ -89,14 +136,39 @@ static struct crypto_alg alg_lz4hc = { | |||
| 89 | .coa_decompress = lz4hc_decompress_crypto } } | 136 | .coa_decompress = lz4hc_decompress_crypto } } |
| 90 | }; | 137 | }; |
| 91 | 138 | ||
| 139 | static struct scomp_alg scomp = { | ||
| 140 | .alloc_ctx = lz4hc_alloc_ctx, | ||
| 141 | .free_ctx = lz4hc_free_ctx, | ||
| 142 | .compress = lz4hc_scompress, | ||
| 143 | .decompress = lz4hc_sdecompress, | ||
| 144 | .base = { | ||
| 145 | .cra_name = "lz4hc", | ||
| 146 | .cra_driver_name = "lz4hc-scomp", | ||
| 147 | .cra_module = THIS_MODULE, | ||
| 148 | } | ||
| 149 | }; | ||
| 150 | |||
| 92 | static int __init lz4hc_mod_init(void) | 151 | static int __init lz4hc_mod_init(void) |
| 93 | { | 152 | { |
| 94 | return crypto_register_alg(&alg_lz4hc); | 153 | int ret; |
| 154 | |||
| 155 | ret = crypto_register_alg(&alg_lz4hc); | ||
| 156 | if (ret) | ||
| 157 | return ret; | ||
| 158 | |||
| 159 | ret = crypto_register_scomp(&scomp); | ||
| 160 | if (ret) { | ||
| 161 | crypto_unregister_alg(&alg_lz4hc); | ||
| 162 | return ret; | ||
| 163 | } | ||
| 164 | |||
| 165 | return ret; | ||
| 95 | } | 166 | } |
| 96 | 167 | ||
| 97 | static void __exit lz4hc_mod_fini(void) | 168 | static void __exit lz4hc_mod_fini(void) |
| 98 | { | 169 | { |
| 99 | crypto_unregister_alg(&alg_lz4hc); | 170 | crypto_unregister_alg(&alg_lz4hc); |
| 171 | crypto_unregister_scomp(&scomp); | ||
| 100 | } | 172 | } |
| 101 | 173 | ||
| 102 | module_init(lz4hc_mod_init); | 174 | module_init(lz4hc_mod_init); |
diff --git a/crypto/lzo.c b/crypto/lzo.c index c3f3dd9a28c5..168df784da84 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c | |||
| @@ -22,40 +22,55 @@ | |||
| 22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
| 23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
| 24 | #include <linux/lzo.h> | 24 | #include <linux/lzo.h> |
| 25 | #include <crypto/internal/scompress.h> | ||
| 25 | 26 | ||
| 26 | struct lzo_ctx { | 27 | struct lzo_ctx { |
| 27 | void *lzo_comp_mem; | 28 | void *lzo_comp_mem; |
| 28 | }; | 29 | }; |
| 29 | 30 | ||
| 31 | static void *lzo_alloc_ctx(struct crypto_scomp *tfm) | ||
| 32 | { | ||
| 33 | void *ctx; | ||
| 34 | |||
| 35 | ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN); | ||
| 36 | if (!ctx) | ||
| 37 | ctx = vmalloc(LZO1X_MEM_COMPRESS); | ||
| 38 | if (!ctx) | ||
| 39 | return ERR_PTR(-ENOMEM); | ||
| 40 | |||
| 41 | return ctx; | ||
| 42 | } | ||
| 43 | |||
| 30 | static int lzo_init(struct crypto_tfm *tfm) | 44 | static int lzo_init(struct crypto_tfm *tfm) |
| 31 | { | 45 | { |
| 32 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); | 46 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); |
| 33 | 47 | ||
| 34 | ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS, | 48 | ctx->lzo_comp_mem = lzo_alloc_ctx(NULL); |
| 35 | GFP_KERNEL | __GFP_NOWARN); | 49 | if (IS_ERR(ctx->lzo_comp_mem)) |
| 36 | if (!ctx->lzo_comp_mem) | ||
| 37 | ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS); | ||
| 38 | if (!ctx->lzo_comp_mem) | ||
| 39 | return -ENOMEM; | 50 | return -ENOMEM; |
| 40 | 51 | ||
| 41 | return 0; | 52 | return 0; |
| 42 | } | 53 | } |
| 43 | 54 | ||
| 55 | static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx) | ||
| 56 | { | ||
| 57 | kvfree(ctx); | ||
| 58 | } | ||
| 59 | |||
| 44 | static void lzo_exit(struct crypto_tfm *tfm) | 60 | static void lzo_exit(struct crypto_tfm *tfm) |
| 45 | { | 61 | { |
| 46 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); | 62 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); |
| 47 | 63 | ||
| 48 | kvfree(ctx->lzo_comp_mem); | 64 | lzo_free_ctx(NULL, ctx->lzo_comp_mem); |
| 49 | } | 65 | } |
| 50 | 66 | ||
| 51 | static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, | 67 | static int __lzo_compress(const u8 *src, unsigned int slen, |
| 52 | unsigned int slen, u8 *dst, unsigned int *dlen) | 68 | u8 *dst, unsigned int *dlen, void *ctx) |
| 53 | { | 69 | { |
| 54 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 55 | size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ | 70 | size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ |
| 56 | int err; | 71 | int err; |
| 57 | 72 | ||
| 58 | err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem); | 73 | err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx); |
| 59 | 74 | ||
| 60 | if (err != LZO_E_OK) | 75 | if (err != LZO_E_OK) |
| 61 | return -EINVAL; | 76 | return -EINVAL; |
| @@ -64,8 +79,23 @@ static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, | |||
| 64 | return 0; | 79 | return 0; |
| 65 | } | 80 | } |
| 66 | 81 | ||
| 67 | static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, | 82 | static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, |
| 68 | unsigned int slen, u8 *dst, unsigned int *dlen) | 83 | unsigned int slen, u8 *dst, unsigned int *dlen) |
| 84 | { | ||
| 85 | struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 86 | |||
| 87 | return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem); | ||
| 88 | } | ||
| 89 | |||
| 90 | static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src, | ||
| 91 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
| 92 | void *ctx) | ||
| 93 | { | ||
| 94 | return __lzo_compress(src, slen, dst, dlen, ctx); | ||
| 95 | } | ||
| 96 | |||
| 97 | static int __lzo_decompress(const u8 *src, unsigned int slen, | ||
| 98 | u8 *dst, unsigned int *dlen) | ||
| 69 | { | 99 | { |
| 70 | int err; | 100 | int err; |
| 71 | size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ | 101 | size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ |
| @@ -77,7 +107,19 @@ static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, | |||
| 77 | 107 | ||
| 78 | *dlen = tmp_len; | 108 | *dlen = tmp_len; |
| 79 | return 0; | 109 | return 0; |
| 110 | } | ||
| 80 | 111 | ||
| 112 | static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, | ||
| 113 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
| 114 | { | ||
| 115 | return __lzo_decompress(src, slen, dst, dlen); | ||
| 116 | } | ||
| 117 | |||
| 118 | static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src, | ||
| 119 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
| 120 | void *ctx) | ||
| 121 | { | ||
| 122 | return __lzo_decompress(src, slen, dst, dlen); | ||
| 81 | } | 123 | } |
| 82 | 124 | ||
| 83 | static struct crypto_alg alg = { | 125 | static struct crypto_alg alg = { |
| @@ -88,18 +130,43 @@ static struct crypto_alg alg = { | |||
| 88 | .cra_init = lzo_init, | 130 | .cra_init = lzo_init, |
| 89 | .cra_exit = lzo_exit, | 131 | .cra_exit = lzo_exit, |
| 90 | .cra_u = { .compress = { | 132 | .cra_u = { .compress = { |
| 91 | .coa_compress = lzo_compress, | 133 | .coa_compress = lzo_compress, |
| 92 | .coa_decompress = lzo_decompress } } | 134 | .coa_decompress = lzo_decompress } } |
| 135 | }; | ||
| 136 | |||
| 137 | static struct scomp_alg scomp = { | ||
| 138 | .alloc_ctx = lzo_alloc_ctx, | ||
| 139 | .free_ctx = lzo_free_ctx, | ||
| 140 | .compress = lzo_scompress, | ||
| 141 | .decompress = lzo_sdecompress, | ||
| 142 | .base = { | ||
| 143 | .cra_name = "lzo", | ||
| 144 | .cra_driver_name = "lzo-scomp", | ||
| 145 | .cra_module = THIS_MODULE, | ||
| 146 | } | ||
| 93 | }; | 147 | }; |
| 94 | 148 | ||
| 95 | static int __init lzo_mod_init(void) | 149 | static int __init lzo_mod_init(void) |
| 96 | { | 150 | { |
| 97 | return crypto_register_alg(&alg); | 151 | int ret; |
| 152 | |||
| 153 | ret = crypto_register_alg(&alg); | ||
| 154 | if (ret) | ||
| 155 | return ret; | ||
| 156 | |||
| 157 | ret = crypto_register_scomp(&scomp); | ||
| 158 | if (ret) { | ||
| 159 | crypto_unregister_alg(&alg); | ||
| 160 | return ret; | ||
| 161 | } | ||
| 162 | |||
| 163 | return ret; | ||
| 98 | } | 164 | } |
| 99 | 165 | ||
| 100 | static void __exit lzo_mod_fini(void) | 166 | static void __exit lzo_mod_fini(void) |
| 101 | { | 167 | { |
| 102 | crypto_unregister_alg(&alg); | 168 | crypto_unregister_alg(&alg); |
| 169 | crypto_unregister_scomp(&scomp); | ||
| 103 | } | 170 | } |
| 104 | 171 | ||
| 105 | module_init(lzo_mod_init); | 172 | module_init(lzo_mod_init); |
diff --git a/crypto/pcbc.c b/crypto/pcbc.c index f654965f0933..e4538e07f7ca 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c | |||
| @@ -14,40 +14,37 @@ | |||
| 14 | * | 14 | * |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <crypto/algapi.h> | 17 | #include <crypto/internal/skcipher.h> |
| 18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
| 20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | #include <linux/scatterlist.h> | ||
| 23 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 24 | 23 | ||
| 25 | struct crypto_pcbc_ctx { | 24 | struct crypto_pcbc_ctx { |
| 26 | struct crypto_cipher *child; | 25 | struct crypto_cipher *child; |
| 27 | }; | 26 | }; |
| 28 | 27 | ||
| 29 | static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key, | 28 | static int crypto_pcbc_setkey(struct crypto_skcipher *parent, const u8 *key, |
| 30 | unsigned int keylen) | 29 | unsigned int keylen) |
| 31 | { | 30 | { |
| 32 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent); | 31 | struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(parent); |
| 33 | struct crypto_cipher *child = ctx->child; | 32 | struct crypto_cipher *child = ctx->child; |
| 34 | int err; | 33 | int err; |
| 35 | 34 | ||
| 36 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 35 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
| 37 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 36 | crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & |
| 38 | CRYPTO_TFM_REQ_MASK); | 37 | CRYPTO_TFM_REQ_MASK); |
| 39 | err = crypto_cipher_setkey(child, key, keylen); | 38 | err = crypto_cipher_setkey(child, key, keylen); |
| 40 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 39 | crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & |
| 41 | CRYPTO_TFM_RES_MASK); | 40 | CRYPTO_TFM_RES_MASK); |
| 42 | return err; | 41 | return err; |
| 43 | } | 42 | } |
| 44 | 43 | ||
| 45 | static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, | 44 | static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, |
| 46 | struct blkcipher_walk *walk, | 45 | struct skcipher_walk *walk, |
| 47 | struct crypto_cipher *tfm) | 46 | struct crypto_cipher *tfm) |
| 48 | { | 47 | { |
| 49 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
| 50 | crypto_cipher_alg(tfm)->cia_encrypt; | ||
| 51 | int bsize = crypto_cipher_blocksize(tfm); | 48 | int bsize = crypto_cipher_blocksize(tfm); |
| 52 | unsigned int nbytes = walk->nbytes; | 49 | unsigned int nbytes = walk->nbytes; |
| 53 | u8 *src = walk->src.virt.addr; | 50 | u8 *src = walk->src.virt.addr; |
| @@ -56,7 +53,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, | |||
| 56 | 53 | ||
| 57 | do { | 54 | do { |
| 58 | crypto_xor(iv, src, bsize); | 55 | crypto_xor(iv, src, bsize); |
| 59 | fn(crypto_cipher_tfm(tfm), dst, iv); | 56 | crypto_cipher_encrypt_one(tfm, dst, iv); |
| 60 | memcpy(iv, dst, bsize); | 57 | memcpy(iv, dst, bsize); |
| 61 | crypto_xor(iv, src, bsize); | 58 | crypto_xor(iv, src, bsize); |
| 62 | 59 | ||
| @@ -67,12 +64,10 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, | |||
| 67 | return nbytes; | 64 | return nbytes; |
| 68 | } | 65 | } |
| 69 | 66 | ||
| 70 | static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, | 67 | static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, |
| 71 | struct blkcipher_walk *walk, | 68 | struct skcipher_walk *walk, |
| 72 | struct crypto_cipher *tfm) | 69 | struct crypto_cipher *tfm) |
| 73 | { | 70 | { |
| 74 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
| 75 | crypto_cipher_alg(tfm)->cia_encrypt; | ||
| 76 | int bsize = crypto_cipher_blocksize(tfm); | 71 | int bsize = crypto_cipher_blocksize(tfm); |
| 77 | unsigned int nbytes = walk->nbytes; | 72 | unsigned int nbytes = walk->nbytes; |
| 78 | u8 *src = walk->src.virt.addr; | 73 | u8 *src = walk->src.virt.addr; |
| @@ -82,7 +77,7 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, | |||
| 82 | do { | 77 | do { |
| 83 | memcpy(tmpbuf, src, bsize); | 78 | memcpy(tmpbuf, src, bsize); |
| 84 | crypto_xor(iv, src, bsize); | 79 | crypto_xor(iv, src, bsize); |
| 85 | fn(crypto_cipher_tfm(tfm), src, iv); | 80 | crypto_cipher_encrypt_one(tfm, src, iv); |
| 86 | memcpy(iv, tmpbuf, bsize); | 81 | memcpy(iv, tmpbuf, bsize); |
| 87 | crypto_xor(iv, src, bsize); | 82 | crypto_xor(iv, src, bsize); |
| 88 | 83 | ||
| @@ -94,38 +89,34 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, | |||
| 94 | return nbytes; | 89 | return nbytes; |
| 95 | } | 90 | } |
| 96 | 91 | ||
| 97 | static int crypto_pcbc_encrypt(struct blkcipher_desc *desc, | 92 | static int crypto_pcbc_encrypt(struct skcipher_request *req) |
| 98 | struct scatterlist *dst, struct scatterlist *src, | ||
| 99 | unsigned int nbytes) | ||
| 100 | { | 93 | { |
| 101 | struct blkcipher_walk walk; | 94 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 102 | struct crypto_blkcipher *tfm = desc->tfm; | 95 | struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 103 | struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | ||
| 104 | struct crypto_cipher *child = ctx->child; | 96 | struct crypto_cipher *child = ctx->child; |
| 97 | struct skcipher_walk walk; | ||
| 98 | unsigned int nbytes; | ||
| 105 | int err; | 99 | int err; |
| 106 | 100 | ||
| 107 | blkcipher_walk_init(&walk, dst, src, nbytes); | 101 | err = skcipher_walk_virt(&walk, req, false); |
| 108 | err = blkcipher_walk_virt(desc, &walk); | ||
| 109 | 102 | ||
| 110 | while ((nbytes = walk.nbytes)) { | 103 | while ((nbytes = walk.nbytes)) { |
| 111 | if (walk.src.virt.addr == walk.dst.virt.addr) | 104 | if (walk.src.virt.addr == walk.dst.virt.addr) |
| 112 | nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, | 105 | nbytes = crypto_pcbc_encrypt_inplace(req, &walk, |
| 113 | child); | 106 | child); |
| 114 | else | 107 | else |
| 115 | nbytes = crypto_pcbc_encrypt_segment(desc, &walk, | 108 | nbytes = crypto_pcbc_encrypt_segment(req, &walk, |
| 116 | child); | 109 | child); |
| 117 | err = blkcipher_walk_done(desc, &walk, nbytes); | 110 | err = skcipher_walk_done(&walk, nbytes); |
| 118 | } | 111 | } |
| 119 | 112 | ||
| 120 | return err; | 113 | return err; |
| 121 | } | 114 | } |
| 122 | 115 | ||
| 123 | static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, | 116 | static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, |
| 124 | struct blkcipher_walk *walk, | 117 | struct skcipher_walk *walk, |
| 125 | struct crypto_cipher *tfm) | 118 | struct crypto_cipher *tfm) |
| 126 | { | 119 | { |
| 127 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
| 128 | crypto_cipher_alg(tfm)->cia_decrypt; | ||
| 129 | int bsize = crypto_cipher_blocksize(tfm); | 120 | int bsize = crypto_cipher_blocksize(tfm); |
| 130 | unsigned int nbytes = walk->nbytes; | 121 | unsigned int nbytes = walk->nbytes; |
| 131 | u8 *src = walk->src.virt.addr; | 122 | u8 *src = walk->src.virt.addr; |
| @@ -133,7 +124,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, | |||
| 133 | u8 *iv = walk->iv; | 124 | u8 *iv = walk->iv; |
| 134 | 125 | ||
| 135 | do { | 126 | do { |
| 136 | fn(crypto_cipher_tfm(tfm), dst, src); | 127 | crypto_cipher_decrypt_one(tfm, dst, src); |
| 137 | crypto_xor(dst, iv, bsize); | 128 | crypto_xor(dst, iv, bsize); |
| 138 | memcpy(iv, src, bsize); | 129 | memcpy(iv, src, bsize); |
| 139 | crypto_xor(iv, dst, bsize); | 130 | crypto_xor(iv, dst, bsize); |
| @@ -147,21 +138,19 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, | |||
| 147 | return nbytes; | 138 | return nbytes; |
| 148 | } | 139 | } |
| 149 | 140 | ||
| 150 | static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, | 141 | static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, |
| 151 | struct blkcipher_walk *walk, | 142 | struct skcipher_walk *walk, |
| 152 | struct crypto_cipher *tfm) | 143 | struct crypto_cipher *tfm) |
| 153 | { | 144 | { |
| 154 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = | ||
| 155 | crypto_cipher_alg(tfm)->cia_decrypt; | ||
| 156 | int bsize = crypto_cipher_blocksize(tfm); | 145 | int bsize = crypto_cipher_blocksize(tfm); |
| 157 | unsigned int nbytes = walk->nbytes; | 146 | unsigned int nbytes = walk->nbytes; |
| 158 | u8 *src = walk->src.virt.addr; | 147 | u8 *src = walk->src.virt.addr; |
| 159 | u8 *iv = walk->iv; | 148 | u8 *iv = walk->iv; |
| 160 | u8 tmpbuf[bsize]; | 149 | u8 tmpbuf[bsize] __attribute__ ((aligned(__alignof__(u32)))); |
| 161 | 150 | ||
| 162 | do { | 151 | do { |
| 163 | memcpy(tmpbuf, src, bsize); | 152 | memcpy(tmpbuf, src, bsize); |
| 164 | fn(crypto_cipher_tfm(tfm), src, src); | 153 | crypto_cipher_decrypt_one(tfm, src, src); |
| 165 | crypto_xor(src, iv, bsize); | 154 | crypto_xor(src, iv, bsize); |
| 166 | memcpy(iv, tmpbuf, bsize); | 155 | memcpy(iv, tmpbuf, bsize); |
| 167 | crypto_xor(iv, src, bsize); | 156 | crypto_xor(iv, src, bsize); |
| @@ -174,37 +163,35 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, | |||
| 174 | return nbytes; | 163 | return nbytes; |
| 175 | } | 164 | } |
| 176 | 165 | ||
| 177 | static int crypto_pcbc_decrypt(struct blkcipher_desc *desc, | 166 | static int crypto_pcbc_decrypt(struct skcipher_request *req) |
| 178 | struct scatterlist *dst, struct scatterlist *src, | ||
| 179 | unsigned int nbytes) | ||
| 180 | { | 167 | { |
| 181 | struct blkcipher_walk walk; | 168 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 182 | struct crypto_blkcipher *tfm = desc->tfm; | 169 | struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 183 | struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); | ||
| 184 | struct crypto_cipher *child = ctx->child; | 170 | struct crypto_cipher *child = ctx->child; |
| 171 | struct skcipher_walk walk; | ||
| 172 | unsigned int nbytes; | ||
| 185 | int err; | 173 | int err; |
| 186 | 174 | ||
| 187 | blkcipher_walk_init(&walk, dst, src, nbytes); | 175 | err = skcipher_walk_virt(&walk, req, false); |
| 188 | err = blkcipher_walk_virt(desc, &walk); | ||
| 189 | 176 | ||
| 190 | while ((nbytes = walk.nbytes)) { | 177 | while ((nbytes = walk.nbytes)) { |
| 191 | if (walk.src.virt.addr == walk.dst.virt.addr) | 178 | if (walk.src.virt.addr == walk.dst.virt.addr) |
| 192 | nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, | 179 | nbytes = crypto_pcbc_decrypt_inplace(req, &walk, |
| 193 | child); | 180 | child); |
| 194 | else | 181 | else |
| 195 | nbytes = crypto_pcbc_decrypt_segment(desc, &walk, | 182 | nbytes = crypto_pcbc_decrypt_segment(req, &walk, |
| 196 | child); | 183 | child); |
| 197 | err = blkcipher_walk_done(desc, &walk, nbytes); | 184 | err = skcipher_walk_done(&walk, nbytes); |
| 198 | } | 185 | } |
| 199 | 186 | ||
| 200 | return err; | 187 | return err; |
| 201 | } | 188 | } |
| 202 | 189 | ||
| 203 | static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) | 190 | static int crypto_pcbc_init_tfm(struct crypto_skcipher *tfm) |
| 204 | { | 191 | { |
| 205 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 192 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
| 206 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 193 | struct crypto_spawn *spawn = skcipher_instance_ctx(inst); |
| 207 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); | 194 | struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 208 | struct crypto_cipher *cipher; | 195 | struct crypto_cipher *cipher; |
| 209 | 196 | ||
| 210 | cipher = crypto_spawn_cipher(spawn); | 197 | cipher = crypto_spawn_cipher(spawn); |
| @@ -215,68 +202,98 @@ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) | |||
| 215 | return 0; | 202 | return 0; |
| 216 | } | 203 | } |
| 217 | 204 | ||
| 218 | static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm) | 205 | static void crypto_pcbc_exit_tfm(struct crypto_skcipher *tfm) |
| 219 | { | 206 | { |
| 220 | struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); | 207 | struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 208 | |||
| 221 | crypto_free_cipher(ctx->child); | 209 | crypto_free_cipher(ctx->child); |
| 222 | } | 210 | } |
| 223 | 211 | ||
| 224 | static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb) | 212 | static void crypto_pcbc_free(struct skcipher_instance *inst) |
| 213 | { | ||
| 214 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | ||
| 215 | kfree(inst); | ||
| 216 | } | ||
| 217 | |||
| 218 | static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
| 225 | { | 219 | { |
| 226 | struct crypto_instance *inst; | 220 | struct skcipher_instance *inst; |
| 221 | struct crypto_attr_type *algt; | ||
| 222 | struct crypto_spawn *spawn; | ||
| 227 | struct crypto_alg *alg; | 223 | struct crypto_alg *alg; |
| 228 | int err; | 224 | int err; |
| 229 | 225 | ||
| 230 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 226 | algt = crypto_get_attr_type(tb); |
| 231 | if (err) | 227 | if (IS_ERR(algt)) |
| 232 | return ERR_PTR(err); | 228 | return PTR_ERR(algt); |
| 229 | |||
| 230 | if (((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) & | ||
| 231 | ~CRYPTO_ALG_INTERNAL) | ||
| 232 | return -EINVAL; | ||
| 233 | 233 | ||
| 234 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 234 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
| 235 | CRYPTO_ALG_TYPE_MASK); | 235 | if (!inst) |
| 236 | return -ENOMEM; | ||
| 237 | |||
| 238 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER | | ||
| 239 | (algt->type & CRYPTO_ALG_INTERNAL), | ||
| 240 | CRYPTO_ALG_TYPE_MASK | | ||
| 241 | (algt->mask & CRYPTO_ALG_INTERNAL)); | ||
| 242 | err = PTR_ERR(alg); | ||
| 236 | if (IS_ERR(alg)) | 243 | if (IS_ERR(alg)) |
| 237 | return ERR_CAST(alg); | 244 | goto err_free_inst; |
| 245 | |||
| 246 | spawn = skcipher_instance_ctx(inst); | ||
| 247 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), | ||
| 248 | CRYPTO_ALG_TYPE_MASK); | ||
| 249 | crypto_mod_put(alg); | ||
| 250 | if (err) | ||
| 251 | goto err_free_inst; | ||
| 238 | 252 | ||
| 239 | inst = crypto_alloc_instance("pcbc", alg); | 253 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg); |
| 240 | if (IS_ERR(inst)) | 254 | if (err) |
| 241 | goto out_put_alg; | 255 | goto err_drop_spawn; |
| 242 | 256 | ||
| 243 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 257 | inst->alg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INTERNAL; |
| 244 | inst->alg.cra_priority = alg->cra_priority; | 258 | inst->alg.base.cra_priority = alg->cra_priority; |
| 245 | inst->alg.cra_blocksize = alg->cra_blocksize; | 259 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
| 246 | inst->alg.cra_alignmask = alg->cra_alignmask; | 260 | inst->alg.base.cra_alignmask = alg->cra_alignmask; |
| 247 | inst->alg.cra_type = &crypto_blkcipher_type; | ||
| 248 | 261 | ||
| 249 | /* We access the data as u32s when xoring. */ | 262 | /* We access the data as u32s when xoring. */ |
| 250 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; | 263 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; |
| 251 | 264 | ||
| 252 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 265 | inst->alg.ivsize = alg->cra_blocksize; |
| 253 | inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; | 266 | inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; |
| 254 | inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; | 267 | inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; |
| 255 | 268 | ||
| 256 | inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); | 269 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); |
| 257 | 270 | ||
| 258 | inst->alg.cra_init = crypto_pcbc_init_tfm; | 271 | inst->alg.init = crypto_pcbc_init_tfm; |
| 259 | inst->alg.cra_exit = crypto_pcbc_exit_tfm; | 272 | inst->alg.exit = crypto_pcbc_exit_tfm; |
| 260 | 273 | ||
| 261 | inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey; | 274 | inst->alg.setkey = crypto_pcbc_setkey; |
| 262 | inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt; | 275 | inst->alg.encrypt = crypto_pcbc_encrypt; |
| 263 | inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt; | 276 | inst->alg.decrypt = crypto_pcbc_decrypt; |
| 264 | 277 | ||
| 265 | out_put_alg: | 278 | inst->free = crypto_pcbc_free; |
| 266 | crypto_mod_put(alg); | ||
| 267 | return inst; | ||
| 268 | } | ||
| 269 | 279 | ||
| 270 | static void crypto_pcbc_free(struct crypto_instance *inst) | 280 | err = skcipher_register_instance(tmpl, inst); |
| 271 | { | 281 | if (err) |
| 272 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 282 | goto err_drop_spawn; |
| 283 | |||
| 284 | out: | ||
| 285 | return err; | ||
| 286 | |||
| 287 | err_drop_spawn: | ||
| 288 | crypto_drop_spawn(spawn); | ||
| 289 | err_free_inst: | ||
| 273 | kfree(inst); | 290 | kfree(inst); |
| 291 | goto out; | ||
| 274 | } | 292 | } |
| 275 | 293 | ||
| 276 | static struct crypto_template crypto_pcbc_tmpl = { | 294 | static struct crypto_template crypto_pcbc_tmpl = { |
| 277 | .name = "pcbc", | 295 | .name = "pcbc", |
| 278 | .alloc = crypto_pcbc_alloc, | 296 | .create = crypto_pcbc_create, |
| 279 | .free = crypto_pcbc_free, | ||
| 280 | .module = THIS_MODULE, | 297 | .module = THIS_MODULE, |
| 281 | }; | 298 | }; |
| 282 | 299 | ||
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index 2df9835dfbc0..b1c2d57dc734 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> |
| 18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 20 | #include <asm/unaligned.h> | ||
| 20 | 21 | ||
| 21 | static inline u64 mlt(u64 a, u64 b) | 22 | static inline u64 mlt(u64 a, u64 b) |
| 22 | { | 23 | { |
| @@ -33,11 +34,6 @@ static inline u32 and(u32 v, u32 mask) | |||
| 33 | return v & mask; | 34 | return v & mask; |
| 34 | } | 35 | } |
| 35 | 36 | ||
| 36 | static inline u32 le32_to_cpuvp(const void *p) | ||
| 37 | { | ||
| 38 | return le32_to_cpup(p); | ||
| 39 | } | ||
| 40 | |||
| 41 | int crypto_poly1305_init(struct shash_desc *desc) | 37 | int crypto_poly1305_init(struct shash_desc *desc) |
| 42 | { | 38 | { |
| 43 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); | 39 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); |
| @@ -65,19 +61,19 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); | |||
| 65 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) | 61 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) |
| 66 | { | 62 | { |
| 67 | /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ | 63 | /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ |
| 68 | dctx->r[0] = (le32_to_cpuvp(key + 0) >> 0) & 0x3ffffff; | 64 | dctx->r[0] = (get_unaligned_le32(key + 0) >> 0) & 0x3ffffff; |
| 69 | dctx->r[1] = (le32_to_cpuvp(key + 3) >> 2) & 0x3ffff03; | 65 | dctx->r[1] = (get_unaligned_le32(key + 3) >> 2) & 0x3ffff03; |
| 70 | dctx->r[2] = (le32_to_cpuvp(key + 6) >> 4) & 0x3ffc0ff; | 66 | dctx->r[2] = (get_unaligned_le32(key + 6) >> 4) & 0x3ffc0ff; |
| 71 | dctx->r[3] = (le32_to_cpuvp(key + 9) >> 6) & 0x3f03fff; | 67 | dctx->r[3] = (get_unaligned_le32(key + 9) >> 6) & 0x3f03fff; |
| 72 | dctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff; | 68 | dctx->r[4] = (get_unaligned_le32(key + 12) >> 8) & 0x00fffff; |
| 73 | } | 69 | } |
| 74 | 70 | ||
| 75 | static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) | 71 | static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) |
| 76 | { | 72 | { |
| 77 | dctx->s[0] = le32_to_cpuvp(key + 0); | 73 | dctx->s[0] = get_unaligned_le32(key + 0); |
| 78 | dctx->s[1] = le32_to_cpuvp(key + 4); | 74 | dctx->s[1] = get_unaligned_le32(key + 4); |
| 79 | dctx->s[2] = le32_to_cpuvp(key + 8); | 75 | dctx->s[2] = get_unaligned_le32(key + 8); |
| 80 | dctx->s[3] = le32_to_cpuvp(key + 12); | 76 | dctx->s[3] = get_unaligned_le32(key + 12); |
| 81 | } | 77 | } |
| 82 | 78 | ||
| 83 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, | 79 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, |
| @@ -137,11 +133,11 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx, | |||
| 137 | while (likely(srclen >= POLY1305_BLOCK_SIZE)) { | 133 | while (likely(srclen >= POLY1305_BLOCK_SIZE)) { |
| 138 | 134 | ||
| 139 | /* h += m[i] */ | 135 | /* h += m[i] */ |
| 140 | h0 += (le32_to_cpuvp(src + 0) >> 0) & 0x3ffffff; | 136 | h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff; |
| 141 | h1 += (le32_to_cpuvp(src + 3) >> 2) & 0x3ffffff; | 137 | h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff; |
| 142 | h2 += (le32_to_cpuvp(src + 6) >> 4) & 0x3ffffff; | 138 | h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff; |
| 143 | h3 += (le32_to_cpuvp(src + 9) >> 6) & 0x3ffffff; | 139 | h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff; |
| 144 | h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit; | 140 | h4 += (get_unaligned_le32(src + 12) >> 8) | hibit; |
| 145 | 141 | ||
| 146 | /* h *= r */ | 142 | /* h *= r */ |
| 147 | d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + | 143 | d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + |
diff --git a/crypto/scompress.c b/crypto/scompress.c new file mode 100644 index 000000000000..35e396d154b7 --- /dev/null +++ b/crypto/scompress.c | |||
| @@ -0,0 +1,356 @@ | |||
| 1 | /* | ||
| 2 | * Synchronous Compression operations | ||
| 3 | * | ||
| 4 | * Copyright 2015 LG Electronics Inc. | ||
| 5 | * Copyright (c) 2016, Intel Corporation | ||
| 6 | * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | #include <linux/errno.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/seq_file.h> | ||
| 18 | #include <linux/slab.h> | ||
| 19 | #include <linux/string.h> | ||
| 20 | #include <linux/crypto.h> | ||
| 21 | #include <linux/vmalloc.h> | ||
| 22 | #include <crypto/algapi.h> | ||
| 23 | #include <linux/cryptouser.h> | ||
| 24 | #include <net/netlink.h> | ||
| 25 | #include <linux/scatterlist.h> | ||
| 26 | #include <crypto/scatterwalk.h> | ||
| 27 | #include <crypto/internal/acompress.h> | ||
| 28 | #include <crypto/internal/scompress.h> | ||
| 29 | #include "internal.h" | ||
| 30 | |||
| 31 | static const struct crypto_type crypto_scomp_type; | ||
| 32 | static void * __percpu *scomp_src_scratches; | ||
| 33 | static void * __percpu *scomp_dst_scratches; | ||
| 34 | static int scomp_scratch_users; | ||
| 35 | static DEFINE_MUTEX(scomp_lock); | ||
| 36 | |||
| 37 | #ifdef CONFIG_NET | ||
| 38 | static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
| 39 | { | ||
| 40 | struct crypto_report_comp rscomp; | ||
| 41 | |||
| 42 | strncpy(rscomp.type, "scomp", sizeof(rscomp.type)); | ||
| 43 | |||
| 44 | if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, | ||
| 45 | sizeof(struct crypto_report_comp), &rscomp)) | ||
| 46 | goto nla_put_failure; | ||
| 47 | return 0; | ||
| 48 | |||
| 49 | nla_put_failure: | ||
| 50 | return -EMSGSIZE; | ||
| 51 | } | ||
| 52 | #else | ||
| 53 | static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
| 54 | { | ||
| 55 | return -ENOSYS; | ||
| 56 | } | ||
| 57 | #endif | ||
| 58 | |||
| 59 | static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) | ||
| 60 | __attribute__ ((unused)); | ||
| 61 | |||
| 62 | static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) | ||
| 63 | { | ||
| 64 | seq_puts(m, "type : scomp\n"); | ||
| 65 | } | ||
| 66 | |||
| 67 | static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) | ||
| 68 | { | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static void crypto_scomp_free_scratches(void * __percpu *scratches) | ||
| 73 | { | ||
| 74 | int i; | ||
| 75 | |||
| 76 | if (!scratches) | ||
| 77 | return; | ||
| 78 | |||
| 79 | for_each_possible_cpu(i) | ||
| 80 | vfree(*per_cpu_ptr(scratches, i)); | ||
| 81 | |||
| 82 | free_percpu(scratches); | ||
| 83 | } | ||
| 84 | |||
| 85 | static void * __percpu *crypto_scomp_alloc_scratches(void) | ||
| 86 | { | ||
| 87 | void * __percpu *scratches; | ||
| 88 | int i; | ||
| 89 | |||
| 90 | scratches = alloc_percpu(void *); | ||
| 91 | if (!scratches) | ||
| 92 | return NULL; | ||
| 93 | |||
| 94 | for_each_possible_cpu(i) { | ||
| 95 | void *scratch; | ||
| 96 | |||
| 97 | scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); | ||
| 98 | if (!scratch) | ||
| 99 | goto error; | ||
| 100 | *per_cpu_ptr(scratches, i) = scratch; | ||
| 101 | } | ||
| 102 | |||
| 103 | return scratches; | ||
| 104 | |||
| 105 | error: | ||
| 106 | crypto_scomp_free_scratches(scratches); | ||
| 107 | return NULL; | ||
| 108 | } | ||
| 109 | |||
| 110 | static void crypto_scomp_free_all_scratches(void) | ||
| 111 | { | ||
| 112 | if (!--scomp_scratch_users) { | ||
| 113 | crypto_scomp_free_scratches(scomp_src_scratches); | ||
| 114 | crypto_scomp_free_scratches(scomp_dst_scratches); | ||
| 115 | scomp_src_scratches = NULL; | ||
| 116 | scomp_dst_scratches = NULL; | ||
| 117 | } | ||
| 118 | } | ||
| 119 | |||
| 120 | static int crypto_scomp_alloc_all_scratches(void) | ||
| 121 | { | ||
| 122 | if (!scomp_scratch_users++) { | ||
| 123 | scomp_src_scratches = crypto_scomp_alloc_scratches(); | ||
| 124 | if (!scomp_src_scratches) | ||
| 125 | return -ENOMEM; | ||
| 126 | scomp_dst_scratches = crypto_scomp_alloc_scratches(); | ||
| 127 | if (!scomp_dst_scratches) | ||
| 128 | return -ENOMEM; | ||
| 129 | } | ||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | static void crypto_scomp_sg_free(struct scatterlist *sgl) | ||
| 134 | { | ||
| 135 | int i, n; | ||
| 136 | struct page *page; | ||
| 137 | |||
| 138 | if (!sgl) | ||
| 139 | return; | ||
| 140 | |||
| 141 | n = sg_nents(sgl); | ||
| 142 | for_each_sg(sgl, sgl, n, i) { | ||
| 143 | page = sg_page(sgl); | ||
| 144 | if (page) | ||
| 145 | __free_page(page); | ||
| 146 | } | ||
| 147 | |||
| 148 | kfree(sgl); | ||
| 149 | } | ||
| 150 | |||
| 151 | static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp) | ||
| 152 | { | ||
| 153 | struct scatterlist *sgl; | ||
| 154 | struct page *page; | ||
| 155 | int i, n; | ||
| 156 | |||
| 157 | n = ((size - 1) >> PAGE_SHIFT) + 1; | ||
| 158 | |||
| 159 | sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp); | ||
| 160 | if (!sgl) | ||
| 161 | return NULL; | ||
| 162 | |||
| 163 | sg_init_table(sgl, n); | ||
| 164 | |||
| 165 | for (i = 0; i < n; i++) { | ||
| 166 | page = alloc_page(gfp); | ||
| 167 | if (!page) | ||
| 168 | goto err; | ||
| 169 | sg_set_page(sgl + i, page, PAGE_SIZE, 0); | ||
| 170 | } | ||
| 171 | |||
| 172 | return sgl; | ||
| 173 | |||
| 174 | err: | ||
| 175 | sg_mark_end(sgl + i); | ||
| 176 | crypto_scomp_sg_free(sgl); | ||
| 177 | return NULL; | ||
| 178 | } | ||
| 179 | |||
| 180 | static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) | ||
| 181 | { | ||
| 182 | struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); | ||
| 183 | void **tfm_ctx = acomp_tfm_ctx(tfm); | ||
| 184 | struct crypto_scomp *scomp = *tfm_ctx; | ||
| 185 | void **ctx = acomp_request_ctx(req); | ||
| 186 | const int cpu = get_cpu(); | ||
| 187 | u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); | ||
| 188 | u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); | ||
| 189 | int ret; | ||
| 190 | |||
| 191 | if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) { | ||
| 192 | ret = -EINVAL; | ||
| 193 | goto out; | ||
| 194 | } | ||
| 195 | |||
| 196 | if (req->dst && !req->dlen) { | ||
| 197 | ret = -EINVAL; | ||
| 198 | goto out; | ||
| 199 | } | ||
| 200 | |||
| 201 | if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) | ||
| 202 | req->dlen = SCOMP_SCRATCH_SIZE; | ||
| 203 | |||
| 204 | scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0); | ||
| 205 | if (dir) | ||
| 206 | ret = crypto_scomp_compress(scomp, scratch_src, req->slen, | ||
| 207 | scratch_dst, &req->dlen, *ctx); | ||
| 208 | else | ||
| 209 | ret = crypto_scomp_decompress(scomp, scratch_src, req->slen, | ||
| 210 | scratch_dst, &req->dlen, *ctx); | ||
| 211 | if (!ret) { | ||
| 212 | if (!req->dst) { | ||
| 213 | req->dst = crypto_scomp_sg_alloc(req->dlen, | ||
| 214 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | ||
| 215 | GFP_KERNEL : GFP_ATOMIC); | ||
| 216 | if (!req->dst) | ||
| 217 | goto out; | ||
| 218 | } | ||
| 219 | scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen, | ||
| 220 | 1); | ||
| 221 | } | ||
| 222 | out: | ||
| 223 | put_cpu(); | ||
| 224 | return ret; | ||
| 225 | } | ||
| 226 | |||
| 227 | static int scomp_acomp_compress(struct acomp_req *req) | ||
| 228 | { | ||
| 229 | return scomp_acomp_comp_decomp(req, 1); | ||
| 230 | } | ||
| 231 | |||
| 232 | static int scomp_acomp_decompress(struct acomp_req *req) | ||
| 233 | { | ||
| 234 | return scomp_acomp_comp_decomp(req, 0); | ||
| 235 | } | ||
| 236 | |||
| 237 | static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) | ||
| 238 | { | ||
| 239 | struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); | ||
| 240 | |||
| 241 | crypto_free_scomp(*ctx); | ||
| 242 | } | ||
| 243 | |||
| 244 | int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) | ||
| 245 | { | ||
| 246 | struct crypto_alg *calg = tfm->__crt_alg; | ||
| 247 | struct crypto_acomp *crt = __crypto_acomp_tfm(tfm); | ||
| 248 | struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); | ||
| 249 | struct crypto_scomp *scomp; | ||
| 250 | |||
| 251 | if (!crypto_mod_get(calg)) | ||
| 252 | return -EAGAIN; | ||
| 253 | |||
| 254 | scomp = crypto_create_tfm(calg, &crypto_scomp_type); | ||
| 255 | if (IS_ERR(scomp)) { | ||
| 256 | crypto_mod_put(calg); | ||
| 257 | return PTR_ERR(scomp); | ||
| 258 | } | ||
| 259 | |||
| 260 | *ctx = scomp; | ||
| 261 | tfm->exit = crypto_exit_scomp_ops_async; | ||
| 262 | |||
| 263 | crt->compress = scomp_acomp_compress; | ||
| 264 | crt->decompress = scomp_acomp_decompress; | ||
| 265 | crt->dst_free = crypto_scomp_sg_free; | ||
| 266 | crt->reqsize = sizeof(void *); | ||
| 267 | |||
| 268 | return 0; | ||
| 269 | } | ||
| 270 | |||
| 271 | struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req) | ||
| 272 | { | ||
| 273 | struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); | ||
| 274 | struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); | ||
| 275 | struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); | ||
| 276 | struct crypto_scomp *scomp = *tfm_ctx; | ||
| 277 | void *ctx; | ||
| 278 | |||
| 279 | ctx = crypto_scomp_alloc_ctx(scomp); | ||
| 280 | if (IS_ERR(ctx)) { | ||
| 281 | kfree(req); | ||
| 282 | return NULL; | ||
| 283 | } | ||
| 284 | |||
| 285 | *req->__ctx = ctx; | ||
| 286 | |||
| 287 | return req; | ||
| 288 | } | ||
| 289 | |||
| 290 | void crypto_acomp_scomp_free_ctx(struct acomp_req *req) | ||
| 291 | { | ||
| 292 | struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); | ||
| 293 | struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); | ||
| 294 | struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); | ||
| 295 | struct crypto_scomp *scomp = *tfm_ctx; | ||
| 296 | void *ctx = *req->__ctx; | ||
| 297 | |||
| 298 | if (ctx) | ||
| 299 | crypto_scomp_free_ctx(scomp, ctx); | ||
| 300 | } | ||
| 301 | |||
| 302 | static const struct crypto_type crypto_scomp_type = { | ||
| 303 | .extsize = crypto_alg_extsize, | ||
| 304 | .init_tfm = crypto_scomp_init_tfm, | ||
| 305 | #ifdef CONFIG_PROC_FS | ||
| 306 | .show = crypto_scomp_show, | ||
| 307 | #endif | ||
| 308 | .report = crypto_scomp_report, | ||
| 309 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
| 310 | .maskset = CRYPTO_ALG_TYPE_MASK, | ||
| 311 | .type = CRYPTO_ALG_TYPE_SCOMPRESS, | ||
| 312 | .tfmsize = offsetof(struct crypto_scomp, base), | ||
| 313 | }; | ||
| 314 | |||
| 315 | int crypto_register_scomp(struct scomp_alg *alg) | ||
| 316 | { | ||
| 317 | struct crypto_alg *base = &alg->base; | ||
| 318 | int ret = -ENOMEM; | ||
| 319 | |||
| 320 | mutex_lock(&scomp_lock); | ||
| 321 | if (crypto_scomp_alloc_all_scratches()) | ||
| 322 | goto error; | ||
| 323 | |||
| 324 | base->cra_type = &crypto_scomp_type; | ||
| 325 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
| 326 | base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; | ||
| 327 | |||
| 328 | ret = crypto_register_alg(base); | ||
| 329 | if (ret) | ||
| 330 | goto error; | ||
| 331 | |||
| 332 | mutex_unlock(&scomp_lock); | ||
| 333 | return ret; | ||
| 334 | |||
| 335 | error: | ||
| 336 | crypto_scomp_free_all_scratches(); | ||
| 337 | mutex_unlock(&scomp_lock); | ||
| 338 | return ret; | ||
| 339 | } | ||
| 340 | EXPORT_SYMBOL_GPL(crypto_register_scomp); | ||
| 341 | |||
| 342 | int crypto_unregister_scomp(struct scomp_alg *alg) | ||
| 343 | { | ||
| 344 | int ret; | ||
| 345 | |||
| 346 | mutex_lock(&scomp_lock); | ||
| 347 | ret = crypto_unregister_alg(&alg->base); | ||
| 348 | crypto_scomp_free_all_scratches(); | ||
| 349 | mutex_unlock(&scomp_lock); | ||
| 350 | |||
| 351 | return ret; | ||
| 352 | } | ||
| 353 | EXPORT_SYMBOL_GPL(crypto_unregister_scomp); | ||
| 354 | |||
| 355 | MODULE_LICENSE("GPL"); | ||
| 356 | MODULE_DESCRIPTION("Synchronous compression type"); | ||
diff --git a/crypto/simd.c b/crypto/simd.c new file mode 100644 index 000000000000..88203370a62f --- /dev/null +++ b/crypto/simd.c | |||
| @@ -0,0 +1,226 @@ | |||
| 1 | /* | ||
| 2 | * Shared crypto simd helpers | ||
| 3 | * | ||
| 4 | * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | ||
| 5 | * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 6 | * | ||
| 7 | * Based on aesni-intel_glue.c by: | ||
| 8 | * Copyright (C) 2008, Intel Corp. | ||
| 9 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License as published by | ||
| 13 | * the Free Software Foundation; either version 2 of the License, or | ||
| 14 | * (at your option) any later version. | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, | ||
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | * GNU General Public License for more details. | ||
| 20 | * | ||
| 21 | * You should have received a copy of the GNU General Public License | ||
| 22 | * along with this program; if not, write to the Free Software | ||
| 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
| 24 | * USA | ||
| 25 | * | ||
| 26 | */ | ||
| 27 | |||
| 28 | #include <crypto/cryptd.h> | ||
| 29 | #include <crypto/internal/simd.h> | ||
| 30 | #include <crypto/internal/skcipher.h> | ||
| 31 | #include <linux/kernel.h> | ||
| 32 | #include <linux/module.h> | ||
| 33 | #include <linux/preempt.h> | ||
| 34 | #include <asm/simd.h> | ||
| 35 | |||
| 36 | struct simd_skcipher_alg { | ||
| 37 | const char *ialg_name; | ||
| 38 | struct skcipher_alg alg; | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct simd_skcipher_ctx { | ||
| 42 | struct cryptd_skcipher *cryptd_tfm; | ||
| 43 | }; | ||
| 44 | |||
| 45 | static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, | ||
| 46 | unsigned int key_len) | ||
| 47 | { | ||
| 48 | struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 49 | struct crypto_skcipher *child = &ctx->cryptd_tfm->base; | ||
| 50 | int err; | ||
| 51 | |||
| 52 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
| 53 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) & | ||
| 54 | CRYPTO_TFM_REQ_MASK); | ||
| 55 | err = crypto_skcipher_setkey(child, key, key_len); | ||
| 56 | crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) & | ||
| 57 | CRYPTO_TFM_RES_MASK); | ||
| 58 | return err; | ||
| 59 | } | ||
| 60 | |||
| 61 | static int simd_skcipher_encrypt(struct skcipher_request *req) | ||
| 62 | { | ||
| 63 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 64 | struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 65 | struct skcipher_request *subreq; | ||
| 66 | struct crypto_skcipher *child; | ||
| 67 | |||
| 68 | subreq = skcipher_request_ctx(req); | ||
| 69 | *subreq = *req; | ||
| 70 | |||
| 71 | if (!may_use_simd() || | ||
| 72 | (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) | ||
| 73 | child = &ctx->cryptd_tfm->base; | ||
| 74 | else | ||
| 75 | child = cryptd_skcipher_child(ctx->cryptd_tfm); | ||
| 76 | |||
| 77 | skcipher_request_set_tfm(subreq, child); | ||
| 78 | |||
| 79 | return crypto_skcipher_encrypt(subreq); | ||
| 80 | } | ||
| 81 | |||
| 82 | static int simd_skcipher_decrypt(struct skcipher_request *req) | ||
| 83 | { | ||
| 84 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 85 | struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 86 | struct skcipher_request *subreq; | ||
| 87 | struct crypto_skcipher *child; | ||
| 88 | |||
| 89 | subreq = skcipher_request_ctx(req); | ||
| 90 | *subreq = *req; | ||
| 91 | |||
| 92 | if (!may_use_simd() || | ||
| 93 | (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) | ||
| 94 | child = &ctx->cryptd_tfm->base; | ||
| 95 | else | ||
| 96 | child = cryptd_skcipher_child(ctx->cryptd_tfm); | ||
| 97 | |||
| 98 | skcipher_request_set_tfm(subreq, child); | ||
| 99 | |||
| 100 | return crypto_skcipher_decrypt(subreq); | ||
| 101 | } | ||
| 102 | |||
| 103 | static void simd_skcipher_exit(struct crypto_skcipher *tfm) | ||
| 104 | { | ||
| 105 | struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 106 | |||
| 107 | cryptd_free_skcipher(ctx->cryptd_tfm); | ||
| 108 | } | ||
| 109 | |||
| 110 | static int simd_skcipher_init(struct crypto_skcipher *tfm) | ||
| 111 | { | ||
| 112 | struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
| 113 | struct cryptd_skcipher *cryptd_tfm; | ||
| 114 | struct simd_skcipher_alg *salg; | ||
| 115 | struct skcipher_alg *alg; | ||
| 116 | unsigned reqsize; | ||
| 117 | |||
| 118 | alg = crypto_skcipher_alg(tfm); | ||
| 119 | salg = container_of(alg, struct simd_skcipher_alg, alg); | ||
| 120 | |||
| 121 | cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name, | ||
| 122 | CRYPTO_ALG_INTERNAL, | ||
| 123 | CRYPTO_ALG_INTERNAL); | ||
| 124 | if (IS_ERR(cryptd_tfm)) | ||
| 125 | return PTR_ERR(cryptd_tfm); | ||
| 126 | |||
| 127 | ctx->cryptd_tfm = cryptd_tfm; | ||
| 128 | |||
| 129 | reqsize = sizeof(struct skcipher_request); | ||
| 130 | reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base); | ||
| 131 | |||
| 132 | crypto_skcipher_set_reqsize(tfm, reqsize); | ||
| 133 | |||
| 134 | return 0; | ||
| 135 | } | ||
| 136 | |||
| 137 | struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, | ||
| 138 | const char *drvname, | ||
| 139 | const char *basename) | ||
| 140 | { | ||
| 141 | struct simd_skcipher_alg *salg; | ||
| 142 | struct crypto_skcipher *tfm; | ||
| 143 | struct skcipher_alg *ialg; | ||
| 144 | struct skcipher_alg *alg; | ||
| 145 | int err; | ||
| 146 | |||
| 147 | tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, | ||
| 148 | CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); | ||
| 149 | if (IS_ERR(tfm)) | ||
| 150 | return ERR_CAST(tfm); | ||
| 151 | |||
| 152 | ialg = crypto_skcipher_alg(tfm); | ||
| 153 | |||
| 154 | salg = kzalloc(sizeof(*salg), GFP_KERNEL); | ||
| 155 | if (!salg) { | ||
| 156 | salg = ERR_PTR(-ENOMEM); | ||
| 157 | goto out_put_tfm; | ||
| 158 | } | ||
| 159 | |||
| 160 | salg->ialg_name = basename; | ||
| 161 | alg = &salg->alg; | ||
| 162 | |||
| 163 | err = -ENAMETOOLONG; | ||
| 164 | if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= | ||
| 165 | CRYPTO_MAX_ALG_NAME) | ||
| 166 | goto out_free_salg; | ||
| 167 | |||
| 168 | if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | ||
| 169 | drvname) >= CRYPTO_MAX_ALG_NAME) | ||
| 170 | goto out_free_salg; | ||
| 171 | |||
| 172 | alg->base.cra_flags = CRYPTO_ALG_ASYNC; | ||
| 173 | alg->base.cra_priority = ialg->base.cra_priority; | ||
| 174 | alg->base.cra_blocksize = ialg->base.cra_blocksize; | ||
| 175 | alg->base.cra_alignmask = ialg->base.cra_alignmask; | ||
| 176 | alg->base.cra_module = ialg->base.cra_module; | ||
| 177 | alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx); | ||
| 178 | |||
| 179 | alg->ivsize = ialg->ivsize; | ||
| 180 | alg->chunksize = ialg->chunksize; | ||
| 181 | alg->min_keysize = ialg->min_keysize; | ||
| 182 | alg->max_keysize = ialg->max_keysize; | ||
| 183 | |||
| 184 | alg->init = simd_skcipher_init; | ||
| 185 | alg->exit = simd_skcipher_exit; | ||
| 186 | |||
| 187 | alg->setkey = simd_skcipher_setkey; | ||
| 188 | alg->encrypt = simd_skcipher_encrypt; | ||
| 189 | alg->decrypt = simd_skcipher_decrypt; | ||
| 190 | |||
| 191 | err = crypto_register_skcipher(alg); | ||
| 192 | if (err) | ||
| 193 | goto out_free_salg; | ||
| 194 | |||
| 195 | out_put_tfm: | ||
| 196 | crypto_free_skcipher(tfm); | ||
| 197 | return salg; | ||
| 198 | |||
| 199 | out_free_salg: | ||
| 200 | kfree(salg); | ||
| 201 | salg = ERR_PTR(err); | ||
| 202 | goto out_put_tfm; | ||
| 203 | } | ||
| 204 | EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); | ||
| 205 | |||
| 206 | struct simd_skcipher_alg *simd_skcipher_create(const char *algname, | ||
| 207 | const char *basename) | ||
| 208 | { | ||
| 209 | char drvname[CRYPTO_MAX_ALG_NAME]; | ||
| 210 | |||
| 211 | if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= | ||
| 212 | CRYPTO_MAX_ALG_NAME) | ||
| 213 | return ERR_PTR(-ENAMETOOLONG); | ||
| 214 | |||
| 215 | return simd_skcipher_create_compat(algname, drvname, basename); | ||
| 216 | } | ||
| 217 | EXPORT_SYMBOL_GPL(simd_skcipher_create); | ||
| 218 | |||
| 219 | void simd_skcipher_free(struct simd_skcipher_alg *salg) | ||
| 220 | { | ||
| 221 | crypto_unregister_skcipher(&salg->alg); | ||
| 222 | kfree(salg); | ||
| 223 | } | ||
| 224 | EXPORT_SYMBOL_GPL(simd_skcipher_free); | ||
| 225 | |||
| 226 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/skcipher.c b/crypto/skcipher.c index f7d0018dcaee..aca07c643d41 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c | |||
| @@ -14,9 +14,12 @@ | |||
| 14 | * | 14 | * |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <crypto/internal/aead.h> | ||
| 17 | #include <crypto/internal/skcipher.h> | 18 | #include <crypto/internal/skcipher.h> |
| 19 | #include <crypto/scatterwalk.h> | ||
| 18 | #include <linux/bug.h> | 20 | #include <linux/bug.h> |
| 19 | #include <linux/cryptouser.h> | 21 | #include <linux/cryptouser.h> |
| 22 | #include <linux/list.h> | ||
| 20 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| 21 | #include <linux/rtnetlink.h> | 24 | #include <linux/rtnetlink.h> |
| 22 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
| @@ -24,6 +27,543 @@ | |||
| 24 | 27 | ||
| 25 | #include "internal.h" | 28 | #include "internal.h" |
| 26 | 29 | ||
| 30 | enum { | ||
| 31 | SKCIPHER_WALK_PHYS = 1 << 0, | ||
| 32 | SKCIPHER_WALK_SLOW = 1 << 1, | ||
| 33 | SKCIPHER_WALK_COPY = 1 << 2, | ||
| 34 | SKCIPHER_WALK_DIFF = 1 << 3, | ||
| 35 | SKCIPHER_WALK_SLEEP = 1 << 4, | ||
| 36 | }; | ||
| 37 | |||
| 38 | struct skcipher_walk_buffer { | ||
| 39 | struct list_head entry; | ||
| 40 | struct scatter_walk dst; | ||
| 41 | unsigned int len; | ||
| 42 | u8 *data; | ||
| 43 | u8 buffer[]; | ||
| 44 | }; | ||
| 45 | |||
| 46 | static int skcipher_walk_next(struct skcipher_walk *walk); | ||
| 47 | |||
| 48 | static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) | ||
| 49 | { | ||
| 50 | if (PageHighMem(scatterwalk_page(walk))) | ||
| 51 | kunmap_atomic(vaddr); | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline void *skcipher_map(struct scatter_walk *walk) | ||
| 55 | { | ||
| 56 | struct page *page = scatterwalk_page(walk); | ||
| 57 | |||
| 58 | return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + | ||
| 59 | offset_in_page(walk->offset); | ||
| 60 | } | ||
| 61 | |||
| 62 | static inline void skcipher_map_src(struct skcipher_walk *walk) | ||
| 63 | { | ||
| 64 | walk->src.virt.addr = skcipher_map(&walk->in); | ||
| 65 | } | ||
| 66 | |||
| 67 | static inline void skcipher_map_dst(struct skcipher_walk *walk) | ||
| 68 | { | ||
| 69 | walk->dst.virt.addr = skcipher_map(&walk->out); | ||
| 70 | } | ||
| 71 | |||
| 72 | static inline void skcipher_unmap_src(struct skcipher_walk *walk) | ||
| 73 | { | ||
| 74 | skcipher_unmap(&walk->in, walk->src.virt.addr); | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline void skcipher_unmap_dst(struct skcipher_walk *walk) | ||
| 78 | { | ||
| 79 | skcipher_unmap(&walk->out, walk->dst.virt.addr); | ||
| 80 | } | ||
| 81 | |||
| 82 | static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) | ||
| 83 | { | ||
| 84 | return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; | ||
| 85 | } | ||
| 86 | |||
| 87 | /* Get a spot of the specified length that does not straddle a page. | ||
| 88 | * The caller needs to ensure that there is enough space for this operation. | ||
| 89 | */ | ||
| 90 | static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) | ||
| 91 | { | ||
| 92 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | ||
| 93 | |||
| 94 | return max(start, end_page); | ||
| 95 | } | ||
| 96 | |||
| 97 | static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) | ||
| 98 | { | ||
| 99 | u8 *addr; | ||
| 100 | |||
| 101 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); | ||
| 102 | addr = skcipher_get_spot(addr, bsize); | ||
| 103 | scatterwalk_copychunks(addr, &walk->out, bsize, | ||
| 104 | (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); | ||
| 105 | return 0; | ||
| 106 | } | ||
| 107 | |||
| 108 | int skcipher_walk_done(struct skcipher_walk *walk, int err) | ||
| 109 | { | ||
| 110 | unsigned int n = walk->nbytes - err; | ||
| 111 | unsigned int nbytes; | ||
| 112 | |||
| 113 | nbytes = walk->total - n; | ||
| 114 | |||
| 115 | if (unlikely(err < 0)) { | ||
| 116 | nbytes = 0; | ||
| 117 | n = 0; | ||
| 118 | } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | | ||
| 119 | SKCIPHER_WALK_SLOW | | ||
| 120 | SKCIPHER_WALK_COPY | | ||
| 121 | SKCIPHER_WALK_DIFF)))) { | ||
| 122 | unmap_src: | ||
| 123 | skcipher_unmap_src(walk); | ||
| 124 | } else if (walk->flags & SKCIPHER_WALK_DIFF) { | ||
| 125 | skcipher_unmap_dst(walk); | ||
| 126 | goto unmap_src; | ||
| 127 | } else if (walk->flags & SKCIPHER_WALK_COPY) { | ||
| 128 | skcipher_map_dst(walk); | ||
| 129 | memcpy(walk->dst.virt.addr, walk->page, n); | ||
| 130 | skcipher_unmap_dst(walk); | ||
| 131 | } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { | ||
| 132 | if (WARN_ON(err)) { | ||
| 133 | err = -EINVAL; | ||
| 134 | nbytes = 0; | ||
| 135 | } else | ||
| 136 | n = skcipher_done_slow(walk, n); | ||
| 137 | } | ||
| 138 | |||
| 139 | if (err > 0) | ||
| 140 | err = 0; | ||
| 141 | |||
| 142 | walk->total = nbytes; | ||
| 143 | walk->nbytes = nbytes; | ||
| 144 | |||
| 145 | scatterwalk_advance(&walk->in, n); | ||
| 146 | scatterwalk_advance(&walk->out, n); | ||
| 147 | scatterwalk_done(&walk->in, 0, nbytes); | ||
| 148 | scatterwalk_done(&walk->out, 1, nbytes); | ||
| 149 | |||
| 150 | if (nbytes) { | ||
| 151 | crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? | ||
| 152 | CRYPTO_TFM_REQ_MAY_SLEEP : 0); | ||
| 153 | return skcipher_walk_next(walk); | ||
| 154 | } | ||
| 155 | |||
| 156 | /* Short-circuit for the common/fast path. */ | ||
| 157 | if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) | ||
| 158 | goto out; | ||
| 159 | |||
| 160 | if (walk->flags & SKCIPHER_WALK_PHYS) | ||
| 161 | goto out; | ||
| 162 | |||
| 163 | if (walk->iv != walk->oiv) | ||
| 164 | memcpy(walk->oiv, walk->iv, walk->ivsize); | ||
| 165 | if (walk->buffer != walk->page) | ||
| 166 | kfree(walk->buffer); | ||
| 167 | if (walk->page) | ||
| 168 | free_page((unsigned long)walk->page); | ||
| 169 | |||
| 170 | out: | ||
| 171 | return err; | ||
| 172 | } | ||
| 173 | EXPORT_SYMBOL_GPL(skcipher_walk_done); | ||
| 174 | |||
| 175 | void skcipher_walk_complete(struct skcipher_walk *walk, int err) | ||
| 176 | { | ||
| 177 | struct skcipher_walk_buffer *p, *tmp; | ||
| 178 | |||
| 179 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { | ||
| 180 | u8 *data; | ||
| 181 | |||
| 182 | if (err) | ||
| 183 | goto done; | ||
| 184 | |||
| 185 | data = p->data; | ||
| 186 | if (!data) { | ||
| 187 | data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); | ||
| 188 | data = skcipher_get_spot(data, walk->chunksize); | ||
| 189 | } | ||
| 190 | |||
| 191 | scatterwalk_copychunks(data, &p->dst, p->len, 1); | ||
| 192 | |||
| 193 | if (offset_in_page(p->data) + p->len + walk->chunksize > | ||
| 194 | PAGE_SIZE) | ||
| 195 | free_page((unsigned long)p->data); | ||
| 196 | |||
| 197 | done: | ||
| 198 | list_del(&p->entry); | ||
| 199 | kfree(p); | ||
| 200 | } | ||
| 201 | |||
| 202 | if (!err && walk->iv != walk->oiv) | ||
| 203 | memcpy(walk->oiv, walk->iv, walk->ivsize); | ||
| 204 | if (walk->buffer != walk->page) | ||
| 205 | kfree(walk->buffer); | ||
| 206 | if (walk->page) | ||
| 207 | free_page((unsigned long)walk->page); | ||
| 208 | } | ||
| 209 | EXPORT_SYMBOL_GPL(skcipher_walk_complete); | ||
| 210 | |||
| 211 | static void skcipher_queue_write(struct skcipher_walk *walk, | ||
| 212 | struct skcipher_walk_buffer *p) | ||
| 213 | { | ||
| 214 | p->dst = walk->out; | ||
| 215 | list_add_tail(&p->entry, &walk->buffers); | ||
| 216 | } | ||
| 217 | |||
| 218 | static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) | ||
| 219 | { | ||
| 220 | bool phys = walk->flags & SKCIPHER_WALK_PHYS; | ||
| 221 | unsigned alignmask = walk->alignmask; | ||
| 222 | struct skcipher_walk_buffer *p; | ||
| 223 | unsigned a; | ||
| 224 | unsigned n; | ||
| 225 | u8 *buffer; | ||
| 226 | void *v; | ||
| 227 | |||
| 228 | if (!phys) { | ||
| 229 | buffer = walk->buffer ?: walk->page; | ||
| 230 | if (buffer) | ||
| 231 | goto ok; | ||
| 232 | } | ||
| 233 | |||
| 234 | /* Start with the minimum alignment of kmalloc. */ | ||
| 235 | a = crypto_tfm_ctx_alignment() - 1; | ||
| 236 | n = bsize; | ||
| 237 | |||
| 238 | if (phys) { | ||
| 239 | /* Calculate the minimum alignment of p->buffer. */ | ||
| 240 | a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; | ||
| 241 | n += sizeof(*p); | ||
| 242 | } | ||
| 243 | |||
| 244 | /* Minimum size to align p->buffer by alignmask. */ | ||
| 245 | n += alignmask & ~a; | ||
| 246 | |||
| 247 | /* Minimum size to ensure p->buffer does not straddle a page. */ | ||
| 248 | n += (bsize - 1) & ~(alignmask | a); | ||
| 249 | |||
| 250 | v = kzalloc(n, skcipher_walk_gfp(walk)); | ||
| 251 | if (!v) | ||
| 252 | return skcipher_walk_done(walk, -ENOMEM); | ||
| 253 | |||
| 254 | if (phys) { | ||
| 255 | p = v; | ||
| 256 | p->len = bsize; | ||
| 257 | skcipher_queue_write(walk, p); | ||
| 258 | buffer = p->buffer; | ||
| 259 | } else { | ||
| 260 | walk->buffer = v; | ||
| 261 | buffer = v; | ||
| 262 | } | ||
| 263 | |||
| 264 | ok: | ||
| 265 | walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); | ||
| 266 | walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); | ||
| 267 | walk->src.virt.addr = walk->dst.virt.addr; | ||
| 268 | |||
| 269 | scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); | ||
| 270 | |||
| 271 | walk->nbytes = bsize; | ||
| 272 | walk->flags |= SKCIPHER_WALK_SLOW; | ||
| 273 | |||
| 274 | return 0; | ||
| 275 | } | ||
| 276 | |||
| 277 | static int skcipher_next_copy(struct skcipher_walk *walk) | ||
| 278 | { | ||
| 279 | struct skcipher_walk_buffer *p; | ||
| 280 | u8 *tmp = walk->page; | ||
| 281 | |||
| 282 | skcipher_map_src(walk); | ||
| 283 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); | ||
| 284 | skcipher_unmap_src(walk); | ||
| 285 | |||
| 286 | walk->src.virt.addr = tmp; | ||
| 287 | walk->dst.virt.addr = tmp; | ||
| 288 | |||
| 289 | if (!(walk->flags & SKCIPHER_WALK_PHYS)) | ||
| 290 | return 0; | ||
| 291 | |||
| 292 | p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); | ||
| 293 | if (!p) | ||
| 294 | return -ENOMEM; | ||
| 295 | |||
| 296 | p->data = walk->page; | ||
| 297 | p->len = walk->nbytes; | ||
| 298 | skcipher_queue_write(walk, p); | ||
| 299 | |||
| 300 | if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize > | ||
| 301 | PAGE_SIZE) | ||
| 302 | walk->page = NULL; | ||
| 303 | else | ||
| 304 | walk->page += walk->nbytes; | ||
| 305 | |||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | |||
| 309 | static int skcipher_next_fast(struct skcipher_walk *walk) | ||
| 310 | { | ||
| 311 | unsigned long diff; | ||
| 312 | |||
| 313 | walk->src.phys.page = scatterwalk_page(&walk->in); | ||
| 314 | walk->src.phys.offset = offset_in_page(walk->in.offset); | ||
| 315 | walk->dst.phys.page = scatterwalk_page(&walk->out); | ||
| 316 | walk->dst.phys.offset = offset_in_page(walk->out.offset); | ||
| 317 | |||
| 318 | if (walk->flags & SKCIPHER_WALK_PHYS) | ||
| 319 | return 0; | ||
| 320 | |||
| 321 | diff = walk->src.phys.offset - walk->dst.phys.offset; | ||
| 322 | diff |= walk->src.virt.page - walk->dst.virt.page; | ||
| 323 | |||
| 324 | skcipher_map_src(walk); | ||
| 325 | walk->dst.virt.addr = walk->src.virt.addr; | ||
| 326 | |||
| 327 | if (diff) { | ||
| 328 | walk->flags |= SKCIPHER_WALK_DIFF; | ||
| 329 | skcipher_map_dst(walk); | ||
| 330 | } | ||
| 331 | |||
| 332 | return 0; | ||
| 333 | } | ||
| 334 | |||
| 335 | static int skcipher_walk_next(struct skcipher_walk *walk) | ||
| 336 | { | ||
| 337 | unsigned int bsize; | ||
| 338 | unsigned int n; | ||
| 339 | int err; | ||
| 340 | |||
| 341 | walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | | ||
| 342 | SKCIPHER_WALK_DIFF); | ||
| 343 | |||
| 344 | n = walk->total; | ||
| 345 | bsize = min(walk->chunksize, max(n, walk->blocksize)); | ||
| 346 | n = scatterwalk_clamp(&walk->in, n); | ||
| 347 | n = scatterwalk_clamp(&walk->out, n); | ||
| 348 | |||
| 349 | if (unlikely(n < bsize)) { | ||
| 350 | if (unlikely(walk->total < walk->blocksize)) | ||
| 351 | return skcipher_walk_done(walk, -EINVAL); | ||
| 352 | |||
| 353 | slow_path: | ||
| 354 | err = skcipher_next_slow(walk, bsize); | ||
| 355 | goto set_phys_lowmem; | ||
| 356 | } | ||
| 357 | |||
| 358 | if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { | ||
| 359 | if (!walk->page) { | ||
| 360 | gfp_t gfp = skcipher_walk_gfp(walk); | ||
| 361 | |||
| 362 | walk->page = (void *)__get_free_page(gfp); | ||
| 363 | if (!walk->page) | ||
| 364 | goto slow_path; | ||
| 365 | } | ||
| 366 | |||
| 367 | walk->nbytes = min_t(unsigned, n, | ||
| 368 | PAGE_SIZE - offset_in_page(walk->page)); | ||
| 369 | walk->flags |= SKCIPHER_WALK_COPY; | ||
| 370 | err = skcipher_next_copy(walk); | ||
| 371 | goto set_phys_lowmem; | ||
| 372 | } | ||
| 373 | |||
| 374 | walk->nbytes = n; | ||
| 375 | |||
| 376 | return skcipher_next_fast(walk); | ||
| 377 | |||
| 378 | set_phys_lowmem: | ||
| 379 | if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { | ||
| 380 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); | ||
| 381 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); | ||
| 382 | walk->src.phys.offset &= PAGE_SIZE - 1; | ||
| 383 | walk->dst.phys.offset &= PAGE_SIZE - 1; | ||
| 384 | } | ||
| 385 | return err; | ||
| 386 | } | ||
| 387 | EXPORT_SYMBOL_GPL(skcipher_walk_next); | ||
| 388 | |||
| 389 | static int skcipher_copy_iv(struct skcipher_walk *walk) | ||
| 390 | { | ||
| 391 | unsigned a = crypto_tfm_ctx_alignment() - 1; | ||
| 392 | unsigned alignmask = walk->alignmask; | ||
| 393 | unsigned ivsize = walk->ivsize; | ||
| 394 | unsigned bs = walk->chunksize; | ||
| 395 | unsigned aligned_bs; | ||
| 396 | unsigned size; | ||
| 397 | u8 *iv; | ||
| 398 | |||
| 399 | aligned_bs = ALIGN(bs, alignmask); | ||
| 400 | |||
| 401 | /* Minimum size to align buffer by alignmask. */ | ||
| 402 | size = alignmask & ~a; | ||
| 403 | |||
| 404 | if (walk->flags & SKCIPHER_WALK_PHYS) | ||
| 405 | size += ivsize; | ||
| 406 | else { | ||
| 407 | size += aligned_bs + ivsize; | ||
| 408 | |||
| 409 | /* Minimum size to ensure buffer does not straddle a page. */ | ||
| 410 | size += (bs - 1) & ~(alignmask | a); | ||
| 411 | } | ||
| 412 | |||
| 413 | walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); | ||
| 414 | if (!walk->buffer) | ||
| 415 | return -ENOMEM; | ||
| 416 | |||
| 417 | iv = PTR_ALIGN(walk->buffer, alignmask + 1); | ||
| 418 | iv = skcipher_get_spot(iv, bs) + aligned_bs; | ||
| 419 | |||
| 420 | walk->iv = memcpy(iv, walk->iv, walk->ivsize); | ||
| 421 | return 0; | ||
| 422 | } | ||
| 423 | |||
| 424 | static int skcipher_walk_first(struct skcipher_walk *walk) | ||
| 425 | { | ||
| 426 | walk->nbytes = 0; | ||
| 427 | |||
| 428 | if (WARN_ON_ONCE(in_irq())) | ||
| 429 | return -EDEADLK; | ||
| 430 | |||
| 431 | if (unlikely(!walk->total)) | ||
| 432 | return 0; | ||
| 433 | |||
| 434 | walk->buffer = NULL; | ||
| 435 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | ||
| 436 | int err = skcipher_copy_iv(walk); | ||
| 437 | if (err) | ||
| 438 | return err; | ||
| 439 | } | ||
| 440 | |||
| 441 | walk->page = NULL; | ||
| 442 | walk->nbytes = walk->total; | ||
| 443 | |||
| 444 | return skcipher_walk_next(walk); | ||
| 445 | } | ||
| 446 | |||
| 447 | static int skcipher_walk_skcipher(struct skcipher_walk *walk, | ||
| 448 | struct skcipher_request *req) | ||
| 449 | { | ||
| 450 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 451 | |||
| 452 | scatterwalk_start(&walk->in, req->src); | ||
| 453 | scatterwalk_start(&walk->out, req->dst); | ||
| 454 | |||
| 455 | walk->total = req->cryptlen; | ||
| 456 | walk->iv = req->iv; | ||
| 457 | walk->oiv = req->iv; | ||
| 458 | |||
| 459 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | ||
| 460 | walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | ||
| 461 | SKCIPHER_WALK_SLEEP : 0; | ||
| 462 | |||
| 463 | walk->blocksize = crypto_skcipher_blocksize(tfm); | ||
| 464 | walk->chunksize = crypto_skcipher_chunksize(tfm); | ||
| 465 | walk->ivsize = crypto_skcipher_ivsize(tfm); | ||
| 466 | walk->alignmask = crypto_skcipher_alignmask(tfm); | ||
| 467 | |||
| 468 | return skcipher_walk_first(walk); | ||
| 469 | } | ||
| 470 | |||
| 471 | int skcipher_walk_virt(struct skcipher_walk *walk, | ||
| 472 | struct skcipher_request *req, bool atomic) | ||
| 473 | { | ||
| 474 | int err; | ||
| 475 | |||
| 476 | walk->flags &= ~SKCIPHER_WALK_PHYS; | ||
| 477 | |||
| 478 | err = skcipher_walk_skcipher(walk, req); | ||
| 479 | |||
| 480 | walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; | ||
| 481 | |||
| 482 | return err; | ||
| 483 | } | ||
| 484 | EXPORT_SYMBOL_GPL(skcipher_walk_virt); | ||
| 485 | |||
| 486 | void skcipher_walk_atomise(struct skcipher_walk *walk) | ||
| 487 | { | ||
| 488 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | ||
| 489 | } | ||
| 490 | EXPORT_SYMBOL_GPL(skcipher_walk_atomise); | ||
| 491 | |||
| 492 | int skcipher_walk_async(struct skcipher_walk *walk, | ||
| 493 | struct skcipher_request *req) | ||
| 494 | { | ||
| 495 | walk->flags |= SKCIPHER_WALK_PHYS; | ||
| 496 | |||
| 497 | INIT_LIST_HEAD(&walk->buffers); | ||
| 498 | |||
| 499 | return skcipher_walk_skcipher(walk, req); | ||
| 500 | } | ||
| 501 | EXPORT_SYMBOL_GPL(skcipher_walk_async); | ||
| 502 | |||
| 503 | static int skcipher_walk_aead_common(struct skcipher_walk *walk, | ||
| 504 | struct aead_request *req, bool atomic) | ||
| 505 | { | ||
| 506 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 507 | int err; | ||
| 508 | |||
| 509 | walk->flags &= ~SKCIPHER_WALK_PHYS; | ||
| 510 | |||
| 511 | scatterwalk_start(&walk->in, req->src); | ||
| 512 | scatterwalk_start(&walk->out, req->dst); | ||
| 513 | |||
| 514 | scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); | ||
| 515 | scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); | ||
| 516 | |||
| 517 | walk->iv = req->iv; | ||
| 518 | walk->oiv = req->iv; | ||
| 519 | |||
| 520 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) | ||
| 521 | walk->flags |= SKCIPHER_WALK_SLEEP; | ||
| 522 | else | ||
| 523 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | ||
| 524 | |||
| 525 | walk->blocksize = crypto_aead_blocksize(tfm); | ||
| 526 | walk->chunksize = crypto_aead_chunksize(tfm); | ||
| 527 | walk->ivsize = crypto_aead_ivsize(tfm); | ||
| 528 | walk->alignmask = crypto_aead_alignmask(tfm); | ||
| 529 | |||
| 530 | err = skcipher_walk_first(walk); | ||
| 531 | |||
| 532 | if (atomic) | ||
| 533 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | ||
| 534 | |||
| 535 | return err; | ||
| 536 | } | ||
| 537 | |||
| 538 | int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, | ||
| 539 | bool atomic) | ||
| 540 | { | ||
| 541 | walk->total = req->cryptlen; | ||
| 542 | |||
| 543 | return skcipher_walk_aead_common(walk, req, atomic); | ||
| 544 | } | ||
| 545 | EXPORT_SYMBOL_GPL(skcipher_walk_aead); | ||
| 546 | |||
| 547 | int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, | ||
| 548 | struct aead_request *req, bool atomic) | ||
| 549 | { | ||
| 550 | walk->total = req->cryptlen; | ||
| 551 | |||
| 552 | return skcipher_walk_aead_common(walk, req, atomic); | ||
| 553 | } | ||
| 554 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); | ||
| 555 | |||
| 556 | int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, | ||
| 557 | struct aead_request *req, bool atomic) | ||
| 558 | { | ||
| 559 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 560 | |||
| 561 | walk->total = req->cryptlen - crypto_aead_authsize(tfm); | ||
| 562 | |||
| 563 | return skcipher_walk_aead_common(walk, req, atomic); | ||
| 564 | } | ||
| 565 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); | ||
| 566 | |||
| 27 | static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) | 567 | static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) |
| 28 | { | 568 | { |
| 29 | if (alg->cra_type == &crypto_blkcipher_type) | 569 | if (alg->cra_type == &crypto_blkcipher_type) |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 62dffa0028ac..f616ad74cce7 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <crypto/drbg.h> | 33 | #include <crypto/drbg.h> |
| 34 | #include <crypto/akcipher.h> | 34 | #include <crypto/akcipher.h> |
| 35 | #include <crypto/kpp.h> | 35 | #include <crypto/kpp.h> |
| 36 | #include <crypto/acompress.h> | ||
| 36 | 37 | ||
| 37 | #include "internal.h" | 38 | #include "internal.h" |
| 38 | 39 | ||
| @@ -62,7 +63,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | |||
| 62 | */ | 63 | */ |
| 63 | #define IDX1 32 | 64 | #define IDX1 32 |
| 64 | #define IDX2 32400 | 65 | #define IDX2 32400 |
| 65 | #define IDX3 1 | 66 | #define IDX3 1511 |
| 66 | #define IDX4 8193 | 67 | #define IDX4 8193 |
| 67 | #define IDX5 22222 | 68 | #define IDX5 22222 |
| 68 | #define IDX6 17101 | 69 | #define IDX6 17101 |
| @@ -1442,6 +1443,126 @@ out: | |||
| 1442 | return ret; | 1443 | return ret; |
| 1443 | } | 1444 | } |
| 1444 | 1445 | ||
| 1446 | static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, | ||
| 1447 | struct comp_testvec *dtemplate, int ctcount, int dtcount) | ||
| 1448 | { | ||
| 1449 | const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); | ||
| 1450 | unsigned int i; | ||
| 1451 | char *output; | ||
| 1452 | int ret; | ||
| 1453 | struct scatterlist src, dst; | ||
| 1454 | struct acomp_req *req; | ||
| 1455 | struct tcrypt_result result; | ||
| 1456 | |||
| 1457 | output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); | ||
| 1458 | if (!output) | ||
| 1459 | return -ENOMEM; | ||
| 1460 | |||
| 1461 | for (i = 0; i < ctcount; i++) { | ||
| 1462 | unsigned int dlen = COMP_BUF_SIZE; | ||
| 1463 | int ilen = ctemplate[i].inlen; | ||
| 1464 | |||
| 1465 | memset(output, 0, dlen); | ||
| 1466 | init_completion(&result.completion); | ||
| 1467 | sg_init_one(&src, ctemplate[i].input, ilen); | ||
| 1468 | sg_init_one(&dst, output, dlen); | ||
| 1469 | |||
| 1470 | req = acomp_request_alloc(tfm); | ||
| 1471 | if (!req) { | ||
| 1472 | pr_err("alg: acomp: request alloc failed for %s\n", | ||
| 1473 | algo); | ||
| 1474 | ret = -ENOMEM; | ||
| 1475 | goto out; | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | acomp_request_set_params(req, &src, &dst, ilen, dlen); | ||
| 1479 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
| 1480 | tcrypt_complete, &result); | ||
| 1481 | |||
| 1482 | ret = wait_async_op(&result, crypto_acomp_compress(req)); | ||
| 1483 | if (ret) { | ||
| 1484 | pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", | ||
| 1485 | i + 1, algo, -ret); | ||
| 1486 | acomp_request_free(req); | ||
| 1487 | goto out; | ||
| 1488 | } | ||
| 1489 | |||
| 1490 | if (req->dlen != ctemplate[i].outlen) { | ||
| 1491 | pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n", | ||
| 1492 | i + 1, algo, req->dlen); | ||
| 1493 | ret = -EINVAL; | ||
| 1494 | acomp_request_free(req); | ||
| 1495 | goto out; | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | if (memcmp(output, ctemplate[i].output, req->dlen)) { | ||
| 1499 | pr_err("alg: acomp: Compression test %d failed for %s\n", | ||
| 1500 | i + 1, algo); | ||
| 1501 | hexdump(output, req->dlen); | ||
| 1502 | ret = -EINVAL; | ||
| 1503 | acomp_request_free(req); | ||
| 1504 | goto out; | ||
| 1505 | } | ||
| 1506 | |||
| 1507 | acomp_request_free(req); | ||
| 1508 | } | ||
| 1509 | |||
| 1510 | for (i = 0; i < dtcount; i++) { | ||
| 1511 | unsigned int dlen = COMP_BUF_SIZE; | ||
| 1512 | int ilen = dtemplate[i].inlen; | ||
| 1513 | |||
| 1514 | memset(output, 0, dlen); | ||
| 1515 | init_completion(&result.completion); | ||
| 1516 | sg_init_one(&src, dtemplate[i].input, ilen); | ||
| 1517 | sg_init_one(&dst, output, dlen); | ||
| 1518 | |||
| 1519 | req = acomp_request_alloc(tfm); | ||
| 1520 | if (!req) { | ||
| 1521 | pr_err("alg: acomp: request alloc failed for %s\n", | ||
| 1522 | algo); | ||
| 1523 | ret = -ENOMEM; | ||
| 1524 | goto out; | ||
| 1525 | } | ||
| 1526 | |||
| 1527 | acomp_request_set_params(req, &src, &dst, ilen, dlen); | ||
| 1528 | acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
| 1529 | tcrypt_complete, &result); | ||
| 1530 | |||
| 1531 | ret = wait_async_op(&result, crypto_acomp_decompress(req)); | ||
| 1532 | if (ret) { | ||
| 1533 | pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", | ||
| 1534 | i + 1, algo, -ret); | ||
| 1535 | acomp_request_free(req); | ||
| 1536 | goto out; | ||
| 1537 | } | ||
| 1538 | |||
| 1539 | if (req->dlen != dtemplate[i].outlen) { | ||
| 1540 | pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n", | ||
| 1541 | i + 1, algo, req->dlen); | ||
| 1542 | ret = -EINVAL; | ||
| 1543 | acomp_request_free(req); | ||
| 1544 | goto out; | ||
| 1545 | } | ||
| 1546 | |||
| 1547 | if (memcmp(output, dtemplate[i].output, req->dlen)) { | ||
| 1548 | pr_err("alg: acomp: Decompression test %d failed for %s\n", | ||
| 1549 | i + 1, algo); | ||
| 1550 | hexdump(output, req->dlen); | ||
| 1551 | ret = -EINVAL; | ||
| 1552 | acomp_request_free(req); | ||
| 1553 | goto out; | ||
| 1554 | } | ||
| 1555 | |||
| 1556 | acomp_request_free(req); | ||
| 1557 | } | ||
| 1558 | |||
| 1559 | ret = 0; | ||
| 1560 | |||
| 1561 | out: | ||
| 1562 | kfree(output); | ||
| 1563 | return ret; | ||
| 1564 | } | ||
| 1565 | |||
| 1445 | static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, | 1566 | static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, |
| 1446 | unsigned int tcount) | 1567 | unsigned int tcount) |
| 1447 | { | 1568 | { |
| @@ -1509,7 +1630,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, | |||
| 1509 | struct crypto_aead *tfm; | 1630 | struct crypto_aead *tfm; |
| 1510 | int err = 0; | 1631 | int err = 0; |
| 1511 | 1632 | ||
| 1512 | tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1633 | tfm = crypto_alloc_aead(driver, type, mask); |
| 1513 | if (IS_ERR(tfm)) { | 1634 | if (IS_ERR(tfm)) { |
| 1514 | printk(KERN_ERR "alg: aead: Failed to load transform for %s: " | 1635 | printk(KERN_ERR "alg: aead: Failed to load transform for %s: " |
| 1515 | "%ld\n", driver, PTR_ERR(tfm)); | 1636 | "%ld\n", driver, PTR_ERR(tfm)); |
| @@ -1538,7 +1659,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc, | |||
| 1538 | struct crypto_cipher *tfm; | 1659 | struct crypto_cipher *tfm; |
| 1539 | int err = 0; | 1660 | int err = 0; |
| 1540 | 1661 | ||
| 1541 | tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1662 | tfm = crypto_alloc_cipher(driver, type, mask); |
| 1542 | if (IS_ERR(tfm)) { | 1663 | if (IS_ERR(tfm)) { |
| 1543 | printk(KERN_ERR "alg: cipher: Failed to load transform for " | 1664 | printk(KERN_ERR "alg: cipher: Failed to load transform for " |
| 1544 | "%s: %ld\n", driver, PTR_ERR(tfm)); | 1665 | "%s: %ld\n", driver, PTR_ERR(tfm)); |
| @@ -1567,7 +1688,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc, | |||
| 1567 | struct crypto_skcipher *tfm; | 1688 | struct crypto_skcipher *tfm; |
| 1568 | int err = 0; | 1689 | int err = 0; |
| 1569 | 1690 | ||
| 1570 | tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1691 | tfm = crypto_alloc_skcipher(driver, type, mask); |
| 1571 | if (IS_ERR(tfm)) { | 1692 | if (IS_ERR(tfm)) { |
| 1572 | printk(KERN_ERR "alg: skcipher: Failed to load transform for " | 1693 | printk(KERN_ERR "alg: skcipher: Failed to load transform for " |
| 1573 | "%s: %ld\n", driver, PTR_ERR(tfm)); | 1694 | "%s: %ld\n", driver, PTR_ERR(tfm)); |
| @@ -1593,22 +1714,38 @@ out: | |||
| 1593 | static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, | 1714 | static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, |
| 1594 | u32 type, u32 mask) | 1715 | u32 type, u32 mask) |
| 1595 | { | 1716 | { |
| 1596 | struct crypto_comp *tfm; | 1717 | struct crypto_comp *comp; |
| 1718 | struct crypto_acomp *acomp; | ||
| 1597 | int err; | 1719 | int err; |
| 1720 | u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK; | ||
| 1721 | |||
| 1722 | if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) { | ||
| 1723 | acomp = crypto_alloc_acomp(driver, type, mask); | ||
| 1724 | if (IS_ERR(acomp)) { | ||
| 1725 | pr_err("alg: acomp: Failed to load transform for %s: %ld\n", | ||
| 1726 | driver, PTR_ERR(acomp)); | ||
| 1727 | return PTR_ERR(acomp); | ||
| 1728 | } | ||
| 1729 | err = test_acomp(acomp, desc->suite.comp.comp.vecs, | ||
| 1730 | desc->suite.comp.decomp.vecs, | ||
| 1731 | desc->suite.comp.comp.count, | ||
| 1732 | desc->suite.comp.decomp.count); | ||
| 1733 | crypto_free_acomp(acomp); | ||
| 1734 | } else { | ||
| 1735 | comp = crypto_alloc_comp(driver, type, mask); | ||
| 1736 | if (IS_ERR(comp)) { | ||
| 1737 | pr_err("alg: comp: Failed to load transform for %s: %ld\n", | ||
| 1738 | driver, PTR_ERR(comp)); | ||
| 1739 | return PTR_ERR(comp); | ||
| 1740 | } | ||
| 1598 | 1741 | ||
| 1599 | tfm = crypto_alloc_comp(driver, type, mask); | 1742 | err = test_comp(comp, desc->suite.comp.comp.vecs, |
| 1600 | if (IS_ERR(tfm)) { | 1743 | desc->suite.comp.decomp.vecs, |
| 1601 | printk(KERN_ERR "alg: comp: Failed to load transform for %s: " | 1744 | desc->suite.comp.comp.count, |
| 1602 | "%ld\n", driver, PTR_ERR(tfm)); | 1745 | desc->suite.comp.decomp.count); |
| 1603 | return PTR_ERR(tfm); | ||
| 1604 | } | ||
| 1605 | |||
| 1606 | err = test_comp(tfm, desc->suite.comp.comp.vecs, | ||
| 1607 | desc->suite.comp.decomp.vecs, | ||
| 1608 | desc->suite.comp.comp.count, | ||
| 1609 | desc->suite.comp.decomp.count); | ||
| 1610 | 1746 | ||
| 1611 | crypto_free_comp(tfm); | 1747 | crypto_free_comp(comp); |
| 1748 | } | ||
| 1612 | return err; | 1749 | return err; |
| 1613 | } | 1750 | } |
| 1614 | 1751 | ||
| @@ -1618,7 +1755,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, | |||
| 1618 | struct crypto_ahash *tfm; | 1755 | struct crypto_ahash *tfm; |
| 1619 | int err; | 1756 | int err; |
| 1620 | 1757 | ||
| 1621 | tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1758 | tfm = crypto_alloc_ahash(driver, type, mask); |
| 1622 | if (IS_ERR(tfm)) { | 1759 | if (IS_ERR(tfm)) { |
| 1623 | printk(KERN_ERR "alg: hash: Failed to load transform for %s: " | 1760 | printk(KERN_ERR "alg: hash: Failed to load transform for %s: " |
| 1624 | "%ld\n", driver, PTR_ERR(tfm)); | 1761 | "%ld\n", driver, PTR_ERR(tfm)); |
| @@ -1646,7 +1783,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc, | |||
| 1646 | if (err) | 1783 | if (err) |
| 1647 | goto out; | 1784 | goto out; |
| 1648 | 1785 | ||
| 1649 | tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1786 | tfm = crypto_alloc_shash(driver, type, mask); |
| 1650 | if (IS_ERR(tfm)) { | 1787 | if (IS_ERR(tfm)) { |
| 1651 | printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " | 1788 | printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " |
| 1652 | "%ld\n", driver, PTR_ERR(tfm)); | 1789 | "%ld\n", driver, PTR_ERR(tfm)); |
| @@ -1688,7 +1825,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, | |||
| 1688 | struct crypto_rng *rng; | 1825 | struct crypto_rng *rng; |
| 1689 | int err; | 1826 | int err; |
| 1690 | 1827 | ||
| 1691 | rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1828 | rng = crypto_alloc_rng(driver, type, mask); |
| 1692 | if (IS_ERR(rng)) { | 1829 | if (IS_ERR(rng)) { |
| 1693 | printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " | 1830 | printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " |
| 1694 | "%ld\n", driver, PTR_ERR(rng)); | 1831 | "%ld\n", driver, PTR_ERR(rng)); |
| @@ -1715,7 +1852,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr, | |||
| 1715 | if (!buf) | 1852 | if (!buf) |
| 1716 | return -ENOMEM; | 1853 | return -ENOMEM; |
| 1717 | 1854 | ||
| 1718 | drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1855 | drng = crypto_alloc_rng(driver, type, mask); |
| 1719 | if (IS_ERR(drng)) { | 1856 | if (IS_ERR(drng)) { |
| 1720 | printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " | 1857 | printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " |
| 1721 | "%s\n", driver); | 1858 | "%s\n", driver); |
| @@ -1909,7 +2046,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver, | |||
| 1909 | struct crypto_kpp *tfm; | 2046 | struct crypto_kpp *tfm; |
| 1910 | int err = 0; | 2047 | int err = 0; |
| 1911 | 2048 | ||
| 1912 | tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask); | 2049 | tfm = crypto_alloc_kpp(driver, type, mask); |
| 1913 | if (IS_ERR(tfm)) { | 2050 | if (IS_ERR(tfm)) { |
| 1914 | pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", | 2051 | pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", |
| 1915 | driver, PTR_ERR(tfm)); | 2052 | driver, PTR_ERR(tfm)); |
| @@ -2068,7 +2205,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc, | |||
| 2068 | struct crypto_akcipher *tfm; | 2205 | struct crypto_akcipher *tfm; |
| 2069 | int err = 0; | 2206 | int err = 0; |
| 2070 | 2207 | ||
| 2071 | tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); | 2208 | tfm = crypto_alloc_akcipher(driver, type, mask); |
| 2072 | if (IS_ERR(tfm)) { | 2209 | if (IS_ERR(tfm)) { |
| 2073 | pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", | 2210 | pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", |
| 2074 | driver, PTR_ERR(tfm)); | 2211 | driver, PTR_ERR(tfm)); |
| @@ -2091,88 +2228,6 @@ static int alg_test_null(const struct alg_test_desc *desc, | |||
| 2091 | /* Please keep this list sorted by algorithm name. */ | 2228 | /* Please keep this list sorted by algorithm name. */ |
| 2092 | static const struct alg_test_desc alg_test_descs[] = { | 2229 | static const struct alg_test_desc alg_test_descs[] = { |
| 2093 | { | 2230 | { |
| 2094 | .alg = "__cbc-cast5-avx", | ||
| 2095 | .test = alg_test_null, | ||
| 2096 | }, { | ||
| 2097 | .alg = "__cbc-cast6-avx", | ||
| 2098 | .test = alg_test_null, | ||
| 2099 | }, { | ||
| 2100 | .alg = "__cbc-serpent-avx", | ||
| 2101 | .test = alg_test_null, | ||
| 2102 | }, { | ||
| 2103 | .alg = "__cbc-serpent-avx2", | ||
| 2104 | .test = alg_test_null, | ||
| 2105 | }, { | ||
| 2106 | .alg = "__cbc-serpent-sse2", | ||
| 2107 | .test = alg_test_null, | ||
| 2108 | }, { | ||
| 2109 | .alg = "__cbc-twofish-avx", | ||
| 2110 | .test = alg_test_null, | ||
| 2111 | }, { | ||
| 2112 | .alg = "__driver-cbc-aes-aesni", | ||
| 2113 | .test = alg_test_null, | ||
| 2114 | .fips_allowed = 1, | ||
| 2115 | }, { | ||
| 2116 | .alg = "__driver-cbc-camellia-aesni", | ||
| 2117 | .test = alg_test_null, | ||
| 2118 | }, { | ||
| 2119 | .alg = "__driver-cbc-camellia-aesni-avx2", | ||
| 2120 | .test = alg_test_null, | ||
| 2121 | }, { | ||
| 2122 | .alg = "__driver-cbc-cast5-avx", | ||
| 2123 | .test = alg_test_null, | ||
| 2124 | }, { | ||
| 2125 | .alg = "__driver-cbc-cast6-avx", | ||
| 2126 | .test = alg_test_null, | ||
| 2127 | }, { | ||
| 2128 | .alg = "__driver-cbc-serpent-avx", | ||
| 2129 | .test = alg_test_null, | ||
| 2130 | }, { | ||
| 2131 | .alg = "__driver-cbc-serpent-avx2", | ||
| 2132 | .test = alg_test_null, | ||
| 2133 | }, { | ||
| 2134 | .alg = "__driver-cbc-serpent-sse2", | ||
| 2135 | .test = alg_test_null, | ||
| 2136 | }, { | ||
| 2137 | .alg = "__driver-cbc-twofish-avx", | ||
| 2138 | .test = alg_test_null, | ||
| 2139 | }, { | ||
| 2140 | .alg = "__driver-ecb-aes-aesni", | ||
| 2141 | .test = alg_test_null, | ||
| 2142 | .fips_allowed = 1, | ||
| 2143 | }, { | ||
| 2144 | .alg = "__driver-ecb-camellia-aesni", | ||
| 2145 | .test = alg_test_null, | ||
| 2146 | }, { | ||
| 2147 | .alg = "__driver-ecb-camellia-aesni-avx2", | ||
| 2148 | .test = alg_test_null, | ||
| 2149 | }, { | ||
| 2150 | .alg = "__driver-ecb-cast5-avx", | ||
| 2151 | .test = alg_test_null, | ||
| 2152 | }, { | ||
| 2153 | .alg = "__driver-ecb-cast6-avx", | ||
| 2154 | .test = alg_test_null, | ||
| 2155 | }, { | ||
| 2156 | .alg = "__driver-ecb-serpent-avx", | ||
| 2157 | .test = alg_test_null, | ||
| 2158 | }, { | ||
| 2159 | .alg = "__driver-ecb-serpent-avx2", | ||
| 2160 | .test = alg_test_null, | ||
| 2161 | }, { | ||
| 2162 | .alg = "__driver-ecb-serpent-sse2", | ||
| 2163 | .test = alg_test_null, | ||
| 2164 | }, { | ||
| 2165 | .alg = "__driver-ecb-twofish-avx", | ||
| 2166 | .test = alg_test_null, | ||
| 2167 | }, { | ||
| 2168 | .alg = "__driver-gcm-aes-aesni", | ||
| 2169 | .test = alg_test_null, | ||
| 2170 | .fips_allowed = 1, | ||
| 2171 | }, { | ||
| 2172 | .alg = "__ghash-pclmulqdqni", | ||
| 2173 | .test = alg_test_null, | ||
| 2174 | .fips_allowed = 1, | ||
| 2175 | }, { | ||
| 2176 | .alg = "ansi_cprng", | 2231 | .alg = "ansi_cprng", |
| 2177 | .test = alg_test_cprng, | 2232 | .test = alg_test_cprng, |
| 2178 | .suite = { | 2233 | .suite = { |
| @@ -2659,55 +2714,6 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 2659 | } | 2714 | } |
| 2660 | } | 2715 | } |
| 2661 | }, { | 2716 | }, { |
| 2662 | .alg = "cryptd(__driver-cbc-aes-aesni)", | ||
| 2663 | .test = alg_test_null, | ||
| 2664 | .fips_allowed = 1, | ||
| 2665 | }, { | ||
| 2666 | .alg = "cryptd(__driver-cbc-camellia-aesni)", | ||
| 2667 | .test = alg_test_null, | ||
| 2668 | }, { | ||
| 2669 | .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)", | ||
| 2670 | .test = alg_test_null, | ||
| 2671 | }, { | ||
| 2672 | .alg = "cryptd(__driver-cbc-serpent-avx2)", | ||
| 2673 | .test = alg_test_null, | ||
| 2674 | }, { | ||
| 2675 | .alg = "cryptd(__driver-ecb-aes-aesni)", | ||
| 2676 | .test = alg_test_null, | ||
| 2677 | .fips_allowed = 1, | ||
| 2678 | }, { | ||
| 2679 | .alg = "cryptd(__driver-ecb-camellia-aesni)", | ||
| 2680 | .test = alg_test_null, | ||
| 2681 | }, { | ||
| 2682 | .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)", | ||
| 2683 | .test = alg_test_null, | ||
| 2684 | }, { | ||
| 2685 | .alg = "cryptd(__driver-ecb-cast5-avx)", | ||
| 2686 | .test = alg_test_null, | ||
| 2687 | }, { | ||
| 2688 | .alg = "cryptd(__driver-ecb-cast6-avx)", | ||
| 2689 | .test = alg_test_null, | ||
| 2690 | }, { | ||
| 2691 | .alg = "cryptd(__driver-ecb-serpent-avx)", | ||
| 2692 | .test = alg_test_null, | ||
| 2693 | }, { | ||
| 2694 | .alg = "cryptd(__driver-ecb-serpent-avx2)", | ||
| 2695 | .test = alg_test_null, | ||
| 2696 | }, { | ||
| 2697 | .alg = "cryptd(__driver-ecb-serpent-sse2)", | ||
| 2698 | .test = alg_test_null, | ||
| 2699 | }, { | ||
| 2700 | .alg = "cryptd(__driver-ecb-twofish-avx)", | ||
| 2701 | .test = alg_test_null, | ||
| 2702 | }, { | ||
| 2703 | .alg = "cryptd(__driver-gcm-aes-aesni)", | ||
| 2704 | .test = alg_test_null, | ||
| 2705 | .fips_allowed = 1, | ||
| 2706 | }, { | ||
| 2707 | .alg = "cryptd(__ghash-pclmulqdqni)", | ||
| 2708 | .test = alg_test_null, | ||
| 2709 | .fips_allowed = 1, | ||
| 2710 | }, { | ||
| 2711 | .alg = "ctr(aes)", | 2717 | .alg = "ctr(aes)", |
| 2712 | .test = alg_test_skcipher, | 2718 | .test = alg_test_skcipher, |
| 2713 | .fips_allowed = 1, | 2719 | .fips_allowed = 1, |
| @@ -3034,10 +3040,6 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 3034 | .fips_allowed = 1, | 3040 | .fips_allowed = 1, |
| 3035 | .test = alg_test_null, | 3041 | .test = alg_test_null, |
| 3036 | }, { | 3042 | }, { |
| 3037 | .alg = "ecb(__aes-aesni)", | ||
| 3038 | .test = alg_test_null, | ||
| 3039 | .fips_allowed = 1, | ||
| 3040 | }, { | ||
| 3041 | .alg = "ecb(aes)", | 3043 | .alg = "ecb(aes)", |
| 3042 | .test = alg_test_skcipher, | 3044 | .test = alg_test_skcipher, |
| 3043 | .fips_allowed = 1, | 3045 | .fips_allowed = 1, |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index e64a4ef9d8ca..9b656be7f52f 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
| @@ -1334,36 +1334,50 @@ static struct hash_testvec rmd320_tv_template[] = { | |||
| 1334 | } | 1334 | } |
| 1335 | }; | 1335 | }; |
| 1336 | 1336 | ||
| 1337 | #define CRCT10DIF_TEST_VECTORS 3 | 1337 | #define CRCT10DIF_TEST_VECTORS ARRAY_SIZE(crct10dif_tv_template) |
| 1338 | static struct hash_testvec crct10dif_tv_template[] = { | 1338 | static struct hash_testvec crct10dif_tv_template[] = { |
| 1339 | { | 1339 | { |
| 1340 | .plaintext = "abc", | 1340 | .plaintext = "abc", |
| 1341 | .psize = 3, | 1341 | .psize = 3, |
| 1342 | #ifdef __LITTLE_ENDIAN | 1342 | .digest = (u8 *)(u16 []){ 0x443b }, |
| 1343 | .digest = "\x3b\x44", | 1343 | }, { |
| 1344 | #else | 1344 | .plaintext = "1234567890123456789012345678901234567890" |
| 1345 | .digest = "\x44\x3b", | 1345 | "123456789012345678901234567890123456789", |
| 1346 | #endif | 1346 | .psize = 79, |
| 1347 | }, { | 1347 | .digest = (u8 *)(u16 []){ 0x4b70 }, |
| 1348 | .plaintext = "1234567890123456789012345678901234567890" | 1348 | .np = 2, |
| 1349 | "123456789012345678901234567890123456789", | 1349 | .tap = { 63, 16 }, |
| 1350 | .psize = 79, | 1350 | }, { |
| 1351 | #ifdef __LITTLE_ENDIAN | 1351 | .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd" |
| 1352 | .digest = "\x70\x4b", | 1352 | "ddddddddddddd", |
| 1353 | #else | 1353 | .psize = 56, |
| 1354 | .digest = "\x4b\x70", | 1354 | .digest = (u8 *)(u16 []){ 0x9ce3 }, |
| 1355 | #endif | 1355 | .np = 8, |
| 1356 | }, { | 1356 | .tap = { 1, 2, 28, 7, 6, 5, 4, 3 }, |
| 1357 | .plaintext = | 1357 | }, { |
| 1358 | "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd", | 1358 | .plaintext = "1234567890123456789012345678901234567890" |
| 1359 | .psize = 56, | 1359 | "1234567890123456789012345678901234567890" |
| 1360 | #ifdef __LITTLE_ENDIAN | 1360 | "1234567890123456789012345678901234567890" |
| 1361 | .digest = "\xe3\x9c", | 1361 | "1234567890123456789012345678901234567890" |
| 1362 | #else | 1362 | "1234567890123456789012345678901234567890" |
| 1363 | .digest = "\x9c\xe3", | 1363 | "1234567890123456789012345678901234567890" |
| 1364 | #endif | 1364 | "1234567890123456789012345678901234567890" |
| 1365 | .np = 2, | 1365 | "123456789012345678901234567890123456789", |
| 1366 | .tap = { 28, 28 } | 1366 | .psize = 319, |
| 1367 | .digest = (u8 *)(u16 []){ 0x44c6 }, | ||
| 1368 | }, { | ||
| 1369 | .plaintext = "1234567890123456789012345678901234567890" | ||
| 1370 | "1234567890123456789012345678901234567890" | ||
| 1371 | "1234567890123456789012345678901234567890" | ||
| 1372 | "1234567890123456789012345678901234567890" | ||
| 1373 | "1234567890123456789012345678901234567890" | ||
| 1374 | "1234567890123456789012345678901234567890" | ||
| 1375 | "1234567890123456789012345678901234567890" | ||
| 1376 | "123456789012345678901234567890123456789", | ||
| 1377 | .psize = 319, | ||
| 1378 | .digest = (u8 *)(u16 []){ 0x44c6 }, | ||
| 1379 | .np = 4, | ||
| 1380 | .tap = { 1, 255, 57, 6 }, | ||
| 1367 | } | 1381 | } |
| 1368 | }; | 1382 | }; |
| 1369 | 1383 | ||
diff --git a/crypto/xts.c b/crypto/xts.c index 305343f22a02..410a2e299085 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
| @@ -13,7 +13,8 @@ | |||
| 13 | * Software Foundation; either version 2 of the License, or (at your option) | 13 | * Software Foundation; either version 2 of the License, or (at your option) |
| 14 | * any later version. | 14 | * any later version. |
| 15 | */ | 15 | */ |
| 16 | #include <crypto/algapi.h> | 16 | #include <crypto/internal/skcipher.h> |
| 17 | #include <crypto/scatterwalk.h> | ||
| 17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
| 19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| @@ -25,140 +26,320 @@ | |||
| 25 | #include <crypto/b128ops.h> | 26 | #include <crypto/b128ops.h> |
| 26 | #include <crypto/gf128mul.h> | 27 | #include <crypto/gf128mul.h> |
| 27 | 28 | ||
| 29 | #define XTS_BUFFER_SIZE 128u | ||
| 30 | |||
| 28 | struct priv { | 31 | struct priv { |
| 29 | struct crypto_cipher *child; | 32 | struct crypto_skcipher *child; |
| 30 | struct crypto_cipher *tweak; | 33 | struct crypto_cipher *tweak; |
| 31 | }; | 34 | }; |
| 32 | 35 | ||
| 33 | static int setkey(struct crypto_tfm *parent, const u8 *key, | 36 | struct xts_instance_ctx { |
| 37 | struct crypto_skcipher_spawn spawn; | ||
| 38 | char name[CRYPTO_MAX_ALG_NAME]; | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct rctx { | ||
| 42 | be128 buf[XTS_BUFFER_SIZE / sizeof(be128)]; | ||
| 43 | |||
| 44 | be128 t; | ||
| 45 | |||
| 46 | be128 *ext; | ||
| 47 | |||
| 48 | struct scatterlist srcbuf[2]; | ||
| 49 | struct scatterlist dstbuf[2]; | ||
| 50 | struct scatterlist *src; | ||
| 51 | struct scatterlist *dst; | ||
| 52 | |||
| 53 | unsigned int left; | ||
| 54 | |||
| 55 | struct skcipher_request subreq; | ||
| 56 | }; | ||
| 57 | |||
| 58 | static int setkey(struct crypto_skcipher *parent, const u8 *key, | ||
| 34 | unsigned int keylen) | 59 | unsigned int keylen) |
| 35 | { | 60 | { |
| 36 | struct priv *ctx = crypto_tfm_ctx(parent); | 61 | struct priv *ctx = crypto_skcipher_ctx(parent); |
| 37 | struct crypto_cipher *child = ctx->tweak; | 62 | struct crypto_skcipher *child; |
| 63 | struct crypto_cipher *tweak; | ||
| 38 | int err; | 64 | int err; |
| 39 | 65 | ||
| 40 | err = xts_check_key(parent, key, keylen); | 66 | err = xts_verify_key(parent, key, keylen); |
| 41 | if (err) | 67 | if (err) |
| 42 | return err; | 68 | return err; |
| 43 | 69 | ||
| 70 | keylen /= 2; | ||
| 71 | |||
| 44 | /* we need two cipher instances: one to compute the initial 'tweak' | 72 | /* we need two cipher instances: one to compute the initial 'tweak' |
| 45 | * by encrypting the IV (usually the 'plain' iv) and the other | 73 | * by encrypting the IV (usually the 'plain' iv) and the other |
| 46 | * one to encrypt and decrypt the data */ | 74 | * one to encrypt and decrypt the data */ |
| 47 | 75 | ||
| 48 | /* tweak cipher, uses Key2 i.e. the second half of *key */ | 76 | /* tweak cipher, uses Key2 i.e. the second half of *key */ |
| 49 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 77 | tweak = ctx->tweak; |
| 50 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | 78 | crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK); |
| 79 | crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) & | ||
| 51 | CRYPTO_TFM_REQ_MASK); | 80 | CRYPTO_TFM_REQ_MASK); |
| 52 | err = crypto_cipher_setkey(child, key + keylen/2, keylen/2); | 81 | err = crypto_cipher_setkey(tweak, key + keylen, keylen); |
| 82 | crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) & | ||
| 83 | CRYPTO_TFM_RES_MASK); | ||
| 53 | if (err) | 84 | if (err) |
| 54 | return err; | 85 | return err; |
| 55 | 86 | ||
| 56 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 87 | /* data cipher, uses Key1 i.e. the first half of *key */ |
| 57 | CRYPTO_TFM_RES_MASK); | ||
| 58 | |||
| 59 | child = ctx->child; | 88 | child = ctx->child; |
| 89 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
| 90 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & | ||
| 91 | CRYPTO_TFM_REQ_MASK); | ||
| 92 | err = crypto_skcipher_setkey(child, key, keylen); | ||
| 93 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | ||
| 94 | CRYPTO_TFM_RES_MASK); | ||
| 60 | 95 | ||
| 61 | /* data cipher, uses Key1 i.e. the first half of *key */ | 96 | return err; |
| 62 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 97 | } |
| 63 | crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & | ||
| 64 | CRYPTO_TFM_REQ_MASK); | ||
| 65 | err = crypto_cipher_setkey(child, key, keylen/2); | ||
| 66 | if (err) | ||
| 67 | return err; | ||
| 68 | 98 | ||
| 69 | crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & | 99 | static int post_crypt(struct skcipher_request *req) |
| 70 | CRYPTO_TFM_RES_MASK); | 100 | { |
| 101 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 102 | be128 *buf = rctx->ext ?: rctx->buf; | ||
| 103 | struct skcipher_request *subreq; | ||
| 104 | const int bs = XTS_BLOCK_SIZE; | ||
| 105 | struct skcipher_walk w; | ||
| 106 | struct scatterlist *sg; | ||
| 107 | unsigned offset; | ||
| 108 | int err; | ||
| 71 | 109 | ||
| 72 | return 0; | 110 | subreq = &rctx->subreq; |
| 73 | } | 111 | err = skcipher_walk_virt(&w, subreq, false); |
| 74 | 112 | ||
| 75 | struct sinfo { | 113 | while (w.nbytes) { |
| 76 | be128 *t; | 114 | unsigned int avail = w.nbytes; |
| 77 | struct crypto_tfm *tfm; | 115 | be128 *wdst; |
| 78 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *); | ||
| 79 | }; | ||
| 80 | 116 | ||
| 81 | static inline void xts_round(struct sinfo *s, void *dst, const void *src) | 117 | wdst = w.dst.virt.addr; |
| 82 | { | 118 | |
| 83 | be128_xor(dst, s->t, src); /* PP <- T xor P */ | 119 | do { |
| 84 | s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */ | 120 | be128_xor(wdst, buf++, wdst); |
| 85 | be128_xor(dst, dst, s->t); /* C <- T xor CC */ | 121 | wdst++; |
| 122 | } while ((avail -= bs) >= bs); | ||
| 123 | |||
| 124 | err = skcipher_walk_done(&w, avail); | ||
| 125 | } | ||
| 126 | |||
| 127 | rctx->left -= subreq->cryptlen; | ||
| 128 | |||
| 129 | if (err || !rctx->left) | ||
| 130 | goto out; | ||
| 131 | |||
| 132 | rctx->dst = rctx->dstbuf; | ||
| 133 | |||
| 134 | scatterwalk_done(&w.out, 0, 1); | ||
| 135 | sg = w.out.sg; | ||
| 136 | offset = w.out.offset; | ||
| 137 | |||
| 138 | if (rctx->dst != sg) { | ||
| 139 | rctx->dst[0] = *sg; | ||
| 140 | sg_unmark_end(rctx->dst); | ||
| 141 | scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); | ||
| 142 | } | ||
| 143 | rctx->dst[0].length -= offset - sg->offset; | ||
| 144 | rctx->dst[0].offset = offset; | ||
| 145 | |||
| 146 | out: | ||
| 147 | return err; | ||
| 86 | } | 148 | } |
| 87 | 149 | ||
| 88 | static int crypt(struct blkcipher_desc *d, | 150 | static int pre_crypt(struct skcipher_request *req) |
| 89 | struct blkcipher_walk *w, struct priv *ctx, | ||
| 90 | void (*tw)(struct crypto_tfm *, u8 *, const u8 *), | ||
| 91 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) | ||
| 92 | { | 151 | { |
| 93 | int err; | 152 | struct rctx *rctx = skcipher_request_ctx(req); |
| 94 | unsigned int avail; | 153 | be128 *buf = rctx->ext ?: rctx->buf; |
| 154 | struct skcipher_request *subreq; | ||
| 95 | const int bs = XTS_BLOCK_SIZE; | 155 | const int bs = XTS_BLOCK_SIZE; |
| 96 | struct sinfo s = { | 156 | struct skcipher_walk w; |
| 97 | .tfm = crypto_cipher_tfm(ctx->child), | 157 | struct scatterlist *sg; |
| 98 | .fn = fn | 158 | unsigned cryptlen; |
| 99 | }; | 159 | unsigned offset; |
| 100 | u8 *wsrc; | 160 | bool more; |
| 101 | u8 *wdst; | 161 | int err; |
| 102 | |||
| 103 | err = blkcipher_walk_virt(d, w); | ||
| 104 | if (!w->nbytes) | ||
| 105 | return err; | ||
| 106 | 162 | ||
| 107 | s.t = (be128 *)w->iv; | 163 | subreq = &rctx->subreq; |
| 108 | avail = w->nbytes; | 164 | cryptlen = subreq->cryptlen; |
| 109 | 165 | ||
| 110 | wsrc = w->src.virt.addr; | 166 | more = rctx->left > cryptlen; |
| 111 | wdst = w->dst.virt.addr; | 167 | if (!more) |
| 168 | cryptlen = rctx->left; | ||
| 112 | 169 | ||
| 113 | /* calculate first value of T */ | 170 | skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, |
| 114 | tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv); | 171 | cryptlen, NULL); |
| 115 | 172 | ||
| 116 | goto first; | 173 | err = skcipher_walk_virt(&w, subreq, false); |
| 117 | 174 | ||
| 118 | for (;;) { | 175 | while (w.nbytes) { |
| 119 | do { | 176 | unsigned int avail = w.nbytes; |
| 120 | gf128mul_x_ble(s.t, s.t); | 177 | be128 *wsrc; |
| 178 | be128 *wdst; | ||
| 121 | 179 | ||
| 122 | first: | 180 | wsrc = w.src.virt.addr; |
| 123 | xts_round(&s, wdst, wsrc); | 181 | wdst = w.dst.virt.addr; |
| 124 | 182 | ||
| 125 | wsrc += bs; | 183 | do { |
| 126 | wdst += bs; | 184 | *buf++ = rctx->t; |
| 185 | be128_xor(wdst++, &rctx->t, wsrc++); | ||
| 186 | gf128mul_x_ble(&rctx->t, &rctx->t); | ||
| 127 | } while ((avail -= bs) >= bs); | 187 | } while ((avail -= bs) >= bs); |
| 128 | 188 | ||
| 129 | err = blkcipher_walk_done(d, w, avail); | 189 | err = skcipher_walk_done(&w, avail); |
| 130 | if (!w->nbytes) | 190 | } |
| 131 | break; | 191 | |
| 192 | skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, | ||
| 193 | cryptlen, NULL); | ||
| 132 | 194 | ||
| 133 | avail = w->nbytes; | 195 | if (err || !more) |
| 196 | goto out; | ||
| 134 | 197 | ||
| 135 | wsrc = w->src.virt.addr; | 198 | rctx->src = rctx->srcbuf; |
| 136 | wdst = w->dst.virt.addr; | 199 | |
| 200 | scatterwalk_done(&w.in, 0, 1); | ||
| 201 | sg = w.in.sg; | ||
| 202 | offset = w.in.offset; | ||
| 203 | |||
| 204 | if (rctx->src != sg) { | ||
| 205 | rctx->src[0] = *sg; | ||
| 206 | sg_unmark_end(rctx->src); | ||
| 207 | scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); | ||
| 137 | } | 208 | } |
| 209 | rctx->src[0].length -= offset - sg->offset; | ||
| 210 | rctx->src[0].offset = offset; | ||
| 138 | 211 | ||
| 212 | out: | ||
| 139 | return err; | 213 | return err; |
| 140 | } | 214 | } |
| 141 | 215 | ||
| 142 | static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 216 | static int init_crypt(struct skcipher_request *req, crypto_completion_t done) |
| 143 | struct scatterlist *src, unsigned int nbytes) | ||
| 144 | { | 217 | { |
| 145 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 218 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
| 146 | struct blkcipher_walk w; | 219 | struct rctx *rctx = skcipher_request_ctx(req); |
| 220 | struct skcipher_request *subreq; | ||
| 221 | gfp_t gfp; | ||
| 222 | |||
| 223 | subreq = &rctx->subreq; | ||
| 224 | skcipher_request_set_tfm(subreq, ctx->child); | ||
| 225 | skcipher_request_set_callback(subreq, req->base.flags, done, req); | ||
| 226 | |||
| 227 | gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
| 228 | GFP_ATOMIC; | ||
| 229 | rctx->ext = NULL; | ||
| 230 | |||
| 231 | subreq->cryptlen = XTS_BUFFER_SIZE; | ||
| 232 | if (req->cryptlen > XTS_BUFFER_SIZE) { | ||
| 233 | subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); | ||
| 234 | rctx->ext = kmalloc(subreq->cryptlen, gfp); | ||
| 235 | } | ||
| 236 | |||
| 237 | rctx->src = req->src; | ||
| 238 | rctx->dst = req->dst; | ||
| 239 | rctx->left = req->cryptlen; | ||
| 147 | 240 | ||
| 148 | blkcipher_walk_init(&w, dst, src, nbytes); | 241 | /* calculate first value of T */ |
| 149 | return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, | 242 | crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); |
| 150 | crypto_cipher_alg(ctx->child)->cia_encrypt); | 243 | |
| 244 | return 0; | ||
| 151 | } | 245 | } |
| 152 | 246 | ||
| 153 | static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 247 | static void exit_crypt(struct skcipher_request *req) |
| 154 | struct scatterlist *src, unsigned int nbytes) | ||
| 155 | { | 248 | { |
| 156 | struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); | 249 | struct rctx *rctx = skcipher_request_ctx(req); |
| 157 | struct blkcipher_walk w; | 250 | |
| 251 | rctx->left = 0; | ||
| 158 | 252 | ||
| 159 | blkcipher_walk_init(&w, dst, src, nbytes); | 253 | if (rctx->ext) |
| 160 | return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, | 254 | kzfree(rctx->ext); |
| 161 | crypto_cipher_alg(ctx->child)->cia_decrypt); | 255 | } |
| 256 | |||
| 257 | static int do_encrypt(struct skcipher_request *req, int err) | ||
| 258 | { | ||
| 259 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 260 | struct skcipher_request *subreq; | ||
| 261 | |||
| 262 | subreq = &rctx->subreq; | ||
| 263 | |||
| 264 | while (!err && rctx->left) { | ||
| 265 | err = pre_crypt(req) ?: | ||
| 266 | crypto_skcipher_encrypt(subreq) ?: | ||
| 267 | post_crypt(req); | ||
| 268 | |||
| 269 | if (err == -EINPROGRESS || | ||
| 270 | (err == -EBUSY && | ||
| 271 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
| 272 | return err; | ||
| 273 | } | ||
| 274 | |||
| 275 | exit_crypt(req); | ||
| 276 | return err; | ||
| 277 | } | ||
| 278 | |||
| 279 | static void encrypt_done(struct crypto_async_request *areq, int err) | ||
| 280 | { | ||
| 281 | struct skcipher_request *req = areq->data; | ||
| 282 | struct skcipher_request *subreq; | ||
| 283 | struct rctx *rctx; | ||
| 284 | |||
| 285 | rctx = skcipher_request_ctx(req); | ||
| 286 | subreq = &rctx->subreq; | ||
| 287 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | ||
| 288 | |||
| 289 | err = do_encrypt(req, err ?: post_crypt(req)); | ||
| 290 | if (rctx->left) | ||
| 291 | return; | ||
| 292 | |||
| 293 | skcipher_request_complete(req, err); | ||
| 294 | } | ||
| 295 | |||
| 296 | static int encrypt(struct skcipher_request *req) | ||
| 297 | { | ||
| 298 | return do_encrypt(req, init_crypt(req, encrypt_done)); | ||
| 299 | } | ||
| 300 | |||
| 301 | static int do_decrypt(struct skcipher_request *req, int err) | ||
| 302 | { | ||
| 303 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 304 | struct skcipher_request *subreq; | ||
| 305 | |||
| 306 | subreq = &rctx->subreq; | ||
| 307 | |||
| 308 | while (!err && rctx->left) { | ||
| 309 | err = pre_crypt(req) ?: | ||
| 310 | crypto_skcipher_decrypt(subreq) ?: | ||
| 311 | post_crypt(req); | ||
| 312 | |||
| 313 | if (err == -EINPROGRESS || | ||
| 314 | (err == -EBUSY && | ||
| 315 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
| 316 | return err; | ||
| 317 | } | ||
| 318 | |||
| 319 | exit_crypt(req); | ||
| 320 | return err; | ||
| 321 | } | ||
| 322 | |||
| 323 | static void decrypt_done(struct crypto_async_request *areq, int err) | ||
| 324 | { | ||
| 325 | struct skcipher_request *req = areq->data; | ||
| 326 | struct skcipher_request *subreq; | ||
| 327 | struct rctx *rctx; | ||
| 328 | |||
| 329 | rctx = skcipher_request_ctx(req); | ||
| 330 | subreq = &rctx->subreq; | ||
| 331 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | ||
| 332 | |||
| 333 | err = do_decrypt(req, err ?: post_crypt(req)); | ||
| 334 | if (rctx->left) | ||
| 335 | return; | ||
| 336 | |||
| 337 | skcipher_request_complete(req, err); | ||
| 338 | } | ||
| 339 | |||
| 340 | static int decrypt(struct skcipher_request *req) | ||
| 341 | { | ||
| 342 | return do_decrypt(req, init_crypt(req, decrypt_done)); | ||
| 162 | } | 343 | } |
| 163 | 344 | ||
| 164 | int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, | 345 | int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, |
| @@ -233,112 +414,168 @@ first: | |||
| 233 | } | 414 | } |
| 234 | EXPORT_SYMBOL_GPL(xts_crypt); | 415 | EXPORT_SYMBOL_GPL(xts_crypt); |
| 235 | 416 | ||
| 236 | static int init_tfm(struct crypto_tfm *tfm) | 417 | static int init_tfm(struct crypto_skcipher *tfm) |
| 237 | { | 418 | { |
| 238 | struct crypto_cipher *cipher; | 419 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
| 239 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 420 | struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); |
| 240 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 421 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
| 241 | struct priv *ctx = crypto_tfm_ctx(tfm); | 422 | struct crypto_skcipher *child; |
| 242 | u32 *flags = &tfm->crt_flags; | 423 | struct crypto_cipher *tweak; |
| 243 | |||
| 244 | cipher = crypto_spawn_cipher(spawn); | ||
| 245 | if (IS_ERR(cipher)) | ||
| 246 | return PTR_ERR(cipher); | ||
| 247 | |||
| 248 | if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) { | ||
| 249 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | ||
| 250 | crypto_free_cipher(cipher); | ||
| 251 | return -EINVAL; | ||
| 252 | } | ||
| 253 | 424 | ||
| 254 | ctx->child = cipher; | 425 | child = crypto_spawn_skcipher(&ictx->spawn); |
| 426 | if (IS_ERR(child)) | ||
| 427 | return PTR_ERR(child); | ||
| 255 | 428 | ||
| 256 | cipher = crypto_spawn_cipher(spawn); | 429 | ctx->child = child; |
| 257 | if (IS_ERR(cipher)) { | ||
| 258 | crypto_free_cipher(ctx->child); | ||
| 259 | return PTR_ERR(cipher); | ||
| 260 | } | ||
| 261 | 430 | ||
| 262 | /* this check isn't really needed, leave it here just in case */ | 431 | tweak = crypto_alloc_cipher(ictx->name, 0, 0); |
| 263 | if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) { | 432 | if (IS_ERR(tweak)) { |
| 264 | crypto_free_cipher(cipher); | 433 | crypto_free_skcipher(ctx->child); |
| 265 | crypto_free_cipher(ctx->child); | 434 | return PTR_ERR(tweak); |
| 266 | *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | ||
| 267 | return -EINVAL; | ||
| 268 | } | 435 | } |
| 269 | 436 | ||
| 270 | ctx->tweak = cipher; | 437 | ctx->tweak = tweak; |
| 438 | |||
| 439 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + | ||
| 440 | sizeof(struct rctx)); | ||
| 271 | 441 | ||
| 272 | return 0; | 442 | return 0; |
| 273 | } | 443 | } |
| 274 | 444 | ||
| 275 | static void exit_tfm(struct crypto_tfm *tfm) | 445 | static void exit_tfm(struct crypto_skcipher *tfm) |
| 276 | { | 446 | { |
| 277 | struct priv *ctx = crypto_tfm_ctx(tfm); | 447 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
| 278 | crypto_free_cipher(ctx->child); | 448 | |
| 449 | crypto_free_skcipher(ctx->child); | ||
| 279 | crypto_free_cipher(ctx->tweak); | 450 | crypto_free_cipher(ctx->tweak); |
| 280 | } | 451 | } |
| 281 | 452 | ||
| 282 | static struct crypto_instance *alloc(struct rtattr **tb) | 453 | static void free(struct skcipher_instance *inst) |
| 454 | { | ||
| 455 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | ||
| 456 | kfree(inst); | ||
| 457 | } | ||
| 458 | |||
| 459 | static int create(struct crypto_template *tmpl, struct rtattr **tb) | ||
| 283 | { | 460 | { |
| 284 | struct crypto_instance *inst; | 461 | struct skcipher_instance *inst; |
| 285 | struct crypto_alg *alg; | 462 | struct crypto_attr_type *algt; |
| 463 | struct xts_instance_ctx *ctx; | ||
| 464 | struct skcipher_alg *alg; | ||
| 465 | const char *cipher_name; | ||
| 286 | int err; | 466 | int err; |
| 287 | 467 | ||
| 288 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); | 468 | algt = crypto_get_attr_type(tb); |
| 469 | if (IS_ERR(algt)) | ||
| 470 | return PTR_ERR(algt); | ||
| 471 | |||
| 472 | if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) | ||
| 473 | return -EINVAL; | ||
| 474 | |||
| 475 | cipher_name = crypto_attr_alg_name(tb[1]); | ||
| 476 | if (IS_ERR(cipher_name)) | ||
| 477 | return PTR_ERR(cipher_name); | ||
| 478 | |||
| 479 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | ||
| 480 | if (!inst) | ||
| 481 | return -ENOMEM; | ||
| 482 | |||
| 483 | ctx = skcipher_instance_ctx(inst); | ||
| 484 | |||
| 485 | crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); | ||
| 486 | err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, | ||
| 487 | crypto_requires_sync(algt->type, | ||
| 488 | algt->mask)); | ||
| 489 | if (err == -ENOENT) { | ||
| 490 | err = -ENAMETOOLONG; | ||
| 491 | if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", | ||
| 492 | cipher_name) >= CRYPTO_MAX_ALG_NAME) | ||
| 493 | goto err_free_inst; | ||
| 494 | |||
| 495 | err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, | ||
| 496 | crypto_requires_sync(algt->type, | ||
| 497 | algt->mask)); | ||
| 498 | } | ||
| 499 | |||
| 289 | if (err) | 500 | if (err) |
| 290 | return ERR_PTR(err); | 501 | goto err_free_inst; |
| 291 | 502 | ||
| 292 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 503 | alg = crypto_skcipher_spawn_alg(&ctx->spawn); |
| 293 | CRYPTO_ALG_TYPE_MASK); | ||
| 294 | if (IS_ERR(alg)) | ||
| 295 | return ERR_CAST(alg); | ||
| 296 | 504 | ||
| 297 | inst = crypto_alloc_instance("xts", alg); | 505 | err = -EINVAL; |
| 298 | if (IS_ERR(inst)) | 506 | if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) |
| 299 | goto out_put_alg; | 507 | goto err_drop_spawn; |
| 300 | 508 | ||
| 301 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; | 509 | if (crypto_skcipher_alg_ivsize(alg)) |
| 302 | inst->alg.cra_priority = alg->cra_priority; | 510 | goto err_drop_spawn; |
| 303 | inst->alg.cra_blocksize = alg->cra_blocksize; | ||
| 304 | 511 | ||
| 305 | if (alg->cra_alignmask < 7) | 512 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", |
| 306 | inst->alg.cra_alignmask = 7; | 513 | &alg->base); |
| 307 | else | 514 | if (err) |
| 308 | inst->alg.cra_alignmask = alg->cra_alignmask; | 515 | goto err_drop_spawn; |
| 309 | 516 | ||
| 310 | inst->alg.cra_type = &crypto_blkcipher_type; | 517 | err = -EINVAL; |
| 518 | cipher_name = alg->base.cra_name; | ||
| 311 | 519 | ||
| 312 | inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; | 520 | /* Alas we screwed up the naming so we have to mangle the |
| 313 | inst->alg.cra_blkcipher.min_keysize = | 521 | * cipher name. |
| 314 | 2 * alg->cra_cipher.cia_min_keysize; | 522 | */ |
| 315 | inst->alg.cra_blkcipher.max_keysize = | 523 | if (!strncmp(cipher_name, "ecb(", 4)) { |
| 316 | 2 * alg->cra_cipher.cia_max_keysize; | 524 | unsigned len; |
| 317 | 525 | ||
| 318 | inst->alg.cra_ctxsize = sizeof(struct priv); | 526 | len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); |
| 527 | if (len < 2 || len >= sizeof(ctx->name)) | ||
| 528 | goto err_drop_spawn; | ||
| 319 | 529 | ||
| 320 | inst->alg.cra_init = init_tfm; | 530 | if (ctx->name[len - 1] != ')') |
| 321 | inst->alg.cra_exit = exit_tfm; | 531 | goto err_drop_spawn; |
| 322 | 532 | ||
| 323 | inst->alg.cra_blkcipher.setkey = setkey; | 533 | ctx->name[len - 1] = 0; |
| 324 | inst->alg.cra_blkcipher.encrypt = encrypt; | ||
| 325 | inst->alg.cra_blkcipher.decrypt = decrypt; | ||
| 326 | 534 | ||
| 327 | out_put_alg: | 535 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
| 328 | crypto_mod_put(alg); | 536 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) |
| 329 | return inst; | 537 | return -ENAMETOOLONG; |
| 330 | } | 538 | } else |
| 539 | goto err_drop_spawn; | ||
| 331 | 540 | ||
| 332 | static void free(struct crypto_instance *inst) | 541 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
| 333 | { | 542 | inst->alg.base.cra_priority = alg->base.cra_priority; |
| 334 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 543 | inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; |
| 544 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | | ||
| 545 | (__alignof__(u64) - 1); | ||
| 546 | |||
| 547 | inst->alg.ivsize = XTS_BLOCK_SIZE; | ||
| 548 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; | ||
| 549 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; | ||
| 550 | |||
| 551 | inst->alg.base.cra_ctxsize = sizeof(struct priv); | ||
| 552 | |||
| 553 | inst->alg.init = init_tfm; | ||
| 554 | inst->alg.exit = exit_tfm; | ||
| 555 | |||
| 556 | inst->alg.setkey = setkey; | ||
| 557 | inst->alg.encrypt = encrypt; | ||
| 558 | inst->alg.decrypt = decrypt; | ||
| 559 | |||
| 560 | inst->free = free; | ||
| 561 | |||
| 562 | err = skcipher_register_instance(tmpl, inst); | ||
| 563 | if (err) | ||
| 564 | goto err_drop_spawn; | ||
| 565 | |||
| 566 | out: | ||
| 567 | return err; | ||
| 568 | |||
| 569 | err_drop_spawn: | ||
| 570 | crypto_drop_skcipher(&ctx->spawn); | ||
| 571 | err_free_inst: | ||
| 335 | kfree(inst); | 572 | kfree(inst); |
| 573 | goto out; | ||
| 336 | } | 574 | } |
| 337 | 575 | ||
| 338 | static struct crypto_template crypto_tmpl = { | 576 | static struct crypto_template crypto_tmpl = { |
| 339 | .name = "xts", | 577 | .name = "xts", |
| 340 | .alloc = alloc, | 578 | .create = create, |
| 341 | .free = free, | ||
| 342 | .module = THIS_MODULE, | 579 | .module = THIS_MODULE, |
| 343 | }; | 580 | }; |
| 344 | 581 | ||
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 200dab5136a7..ceff2fc524b1 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
| @@ -168,7 +168,7 @@ config HW_RANDOM_IXP4XX | |||
| 168 | 168 | ||
| 169 | config HW_RANDOM_OMAP | 169 | config HW_RANDOM_OMAP |
| 170 | tristate "OMAP Random Number Generator support" | 170 | tristate "OMAP Random Number Generator support" |
| 171 | depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS | 171 | depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU |
| 172 | default HW_RANDOM | 172 | default HW_RANDOM |
| 173 | ---help--- | 173 | ---help--- |
| 174 | This driver provides kernel-side support for the Random Number | 174 | This driver provides kernel-side support for the Random Number |
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index 0fcc9e69a346..661c82cde0f2 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c | |||
| @@ -48,6 +48,16 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, | |||
| 48 | return 0; | 48 | return 0; |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | static void atmel_trng_enable(struct atmel_trng *trng) | ||
| 52 | { | ||
| 53 | writel(TRNG_KEY | 1, trng->base + TRNG_CR); | ||
| 54 | } | ||
| 55 | |||
| 56 | static void atmel_trng_disable(struct atmel_trng *trng) | ||
| 57 | { | ||
| 58 | writel(TRNG_KEY, trng->base + TRNG_CR); | ||
| 59 | } | ||
| 60 | |||
| 51 | static int atmel_trng_probe(struct platform_device *pdev) | 61 | static int atmel_trng_probe(struct platform_device *pdev) |
| 52 | { | 62 | { |
| 53 | struct atmel_trng *trng; | 63 | struct atmel_trng *trng; |
| @@ -71,7 +81,7 @@ static int atmel_trng_probe(struct platform_device *pdev) | |||
| 71 | if (ret) | 81 | if (ret) |
| 72 | return ret; | 82 | return ret; |
| 73 | 83 | ||
| 74 | writel(TRNG_KEY | 1, trng->base + TRNG_CR); | 84 | atmel_trng_enable(trng); |
| 75 | trng->rng.name = pdev->name; | 85 | trng->rng.name = pdev->name; |
| 76 | trng->rng.read = atmel_trng_read; | 86 | trng->rng.read = atmel_trng_read; |
| 77 | 87 | ||
| @@ -84,7 +94,7 @@ static int atmel_trng_probe(struct platform_device *pdev) | |||
| 84 | return 0; | 94 | return 0; |
| 85 | 95 | ||
| 86 | err_register: | 96 | err_register: |
| 87 | clk_disable(trng->clk); | 97 | clk_disable_unprepare(trng->clk); |
| 88 | return ret; | 98 | return ret; |
| 89 | } | 99 | } |
| 90 | 100 | ||
| @@ -94,7 +104,7 @@ static int atmel_trng_remove(struct platform_device *pdev) | |||
| 94 | 104 | ||
| 95 | hwrng_unregister(&trng->rng); | 105 | hwrng_unregister(&trng->rng); |
| 96 | 106 | ||
| 97 | writel(TRNG_KEY, trng->base + TRNG_CR); | 107 | atmel_trng_disable(trng); |
| 98 | clk_disable_unprepare(trng->clk); | 108 | clk_disable_unprepare(trng->clk); |
| 99 | 109 | ||
| 100 | return 0; | 110 | return 0; |
| @@ -105,6 +115,7 @@ static int atmel_trng_suspend(struct device *dev) | |||
| 105 | { | 115 | { |
| 106 | struct atmel_trng *trng = dev_get_drvdata(dev); | 116 | struct atmel_trng *trng = dev_get_drvdata(dev); |
| 107 | 117 | ||
| 118 | atmel_trng_disable(trng); | ||
| 108 | clk_disable_unprepare(trng->clk); | 119 | clk_disable_unprepare(trng->clk); |
| 109 | 120 | ||
| 110 | return 0; | 121 | return 0; |
| @@ -113,8 +124,15 @@ static int atmel_trng_suspend(struct device *dev) | |||
| 113 | static int atmel_trng_resume(struct device *dev) | 124 | static int atmel_trng_resume(struct device *dev) |
| 114 | { | 125 | { |
| 115 | struct atmel_trng *trng = dev_get_drvdata(dev); | 126 | struct atmel_trng *trng = dev_get_drvdata(dev); |
| 127 | int ret; | ||
| 116 | 128 | ||
| 117 | return clk_prepare_enable(trng->clk); | 129 | ret = clk_prepare_enable(trng->clk); |
| 130 | if (ret) | ||
| 131 | return ret; | ||
| 132 | |||
| 133 | atmel_trng_enable(trng); | ||
| 134 | |||
| 135 | return 0; | ||
| 118 | } | 136 | } |
| 119 | 137 | ||
| 120 | static const struct dev_pm_ops atmel_trng_pm_ops = { | 138 | static const struct dev_pm_ops atmel_trng_pm_ops = { |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index d2d2c89de5b4..f9766415ff10 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
| @@ -92,6 +92,7 @@ static void add_early_randomness(struct hwrng *rng) | |||
| 92 | mutex_unlock(&reading_mutex); | 92 | mutex_unlock(&reading_mutex); |
| 93 | if (bytes_read > 0) | 93 | if (bytes_read > 0) |
| 94 | add_device_randomness(rng_buffer, bytes_read); | 94 | add_device_randomness(rng_buffer, bytes_read); |
| 95 | memset(rng_buffer, 0, size); | ||
| 95 | } | 96 | } |
| 96 | 97 | ||
| 97 | static inline void cleanup_rng(struct kref *kref) | 98 | static inline void cleanup_rng(struct kref *kref) |
| @@ -287,6 +288,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, | |||
| 287 | } | 288 | } |
| 288 | } | 289 | } |
| 289 | out: | 290 | out: |
| 291 | memset(rng_buffer, 0, rng_buffer_size()); | ||
| 290 | return ret ? : err; | 292 | return ret ? : err; |
| 291 | 293 | ||
| 292 | out_unlock_reading: | 294 | out_unlock_reading: |
| @@ -425,6 +427,7 @@ static int hwrng_fillfn(void *unused) | |||
| 425 | /* Outside lock, sure, but y'know: randomness. */ | 427 | /* Outside lock, sure, but y'know: randomness. */ |
| 426 | add_hwgenerator_randomness((void *)rng_fillbuf, rc, | 428 | add_hwgenerator_randomness((void *)rng_fillbuf, rc, |
| 427 | rc * current_quality * 8 >> 10); | 429 | rc * current_quality * 8 >> 10); |
| 430 | memset(rng_fillbuf, 0, rng_buffer_size()); | ||
| 428 | } | 431 | } |
| 429 | hwrng_fill = NULL; | 432 | hwrng_fill = NULL; |
| 430 | return 0; | 433 | return 0; |
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c index 58bef39f7286..119d698439ae 100644 --- a/drivers/char/hw_random/meson-rng.c +++ b/drivers/char/hw_random/meson-rng.c | |||
| @@ -110,6 +110,7 @@ static const struct of_device_id meson_rng_of_match[] = { | |||
| 110 | { .compatible = "amlogic,meson-rng", }, | 110 | { .compatible = "amlogic,meson-rng", }, |
| 111 | {}, | 111 | {}, |
| 112 | }; | 112 | }; |
| 113 | MODULE_DEVICE_TABLE(of, meson_rng_of_match); | ||
| 113 | 114 | ||
| 114 | static struct platform_driver meson_rng_driver = { | 115 | static struct platform_driver meson_rng_driver = { |
| 115 | .probe = meson_rng_probe, | 116 | .probe = meson_rng_probe, |
| @@ -121,7 +122,6 @@ static struct platform_driver meson_rng_driver = { | |||
| 121 | 122 | ||
| 122 | module_platform_driver(meson_rng_driver); | 123 | module_platform_driver(meson_rng_driver); |
| 123 | 124 | ||
| 124 | MODULE_ALIAS("platform:meson-rng"); | ||
| 125 | MODULE_DESCRIPTION("Meson H/W Random Number Generator driver"); | 125 | MODULE_DESCRIPTION("Meson H/W Random Number Generator driver"); |
| 126 | MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>"); | 126 | MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>"); |
| 127 | MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); | 127 | MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); |
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c index 96fb986402eb..841fee845ec9 100644 --- a/drivers/char/hw_random/msm-rng.c +++ b/drivers/char/hw_random/msm-rng.c | |||
| @@ -90,10 +90,6 @@ static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait) | |||
| 90 | /* calculate max size bytes to transfer back to caller */ | 90 | /* calculate max size bytes to transfer back to caller */ |
| 91 | maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max); | 91 | maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max); |
| 92 | 92 | ||
| 93 | /* no room for word data */ | ||
| 94 | if (maxsize < WORD_SZ) | ||
| 95 | return 0; | ||
| 96 | |||
| 97 | ret = clk_prepare_enable(rng->clk); | 93 | ret = clk_prepare_enable(rng->clk); |
| 98 | if (ret) | 94 | if (ret) |
| 99 | return ret; | 95 | return ret; |
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index f5c26a5f6875..3ad86fdf954e 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/of_device.h> | 28 | #include <linux/of_device.h> |
| 29 | #include <linux/of_address.h> | 29 | #include <linux/of_address.h> |
| 30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
| 31 | #include <linux/clk.h> | ||
| 31 | 32 | ||
| 32 | #include <asm/io.h> | 33 | #include <asm/io.h> |
| 33 | 34 | ||
| @@ -63,10 +64,13 @@ | |||
| 63 | 64 | ||
| 64 | #define OMAP2_RNG_OUTPUT_SIZE 0x4 | 65 | #define OMAP2_RNG_OUTPUT_SIZE 0x4 |
| 65 | #define OMAP4_RNG_OUTPUT_SIZE 0x8 | 66 | #define OMAP4_RNG_OUTPUT_SIZE 0x8 |
| 67 | #define EIP76_RNG_OUTPUT_SIZE 0x10 | ||
| 66 | 68 | ||
| 67 | enum { | 69 | enum { |
| 68 | RNG_OUTPUT_L_REG = 0, | 70 | RNG_OUTPUT_0_REG = 0, |
| 69 | RNG_OUTPUT_H_REG, | 71 | RNG_OUTPUT_1_REG, |
| 72 | RNG_OUTPUT_2_REG, | ||
| 73 | RNG_OUTPUT_3_REG, | ||
| 70 | RNG_STATUS_REG, | 74 | RNG_STATUS_REG, |
| 71 | RNG_INTMASK_REG, | 75 | RNG_INTMASK_REG, |
| 72 | RNG_INTACK_REG, | 76 | RNG_INTACK_REG, |
| @@ -82,7 +86,7 @@ enum { | |||
| 82 | }; | 86 | }; |
| 83 | 87 | ||
| 84 | static const u16 reg_map_omap2[] = { | 88 | static const u16 reg_map_omap2[] = { |
| 85 | [RNG_OUTPUT_L_REG] = 0x0, | 89 | [RNG_OUTPUT_0_REG] = 0x0, |
| 86 | [RNG_STATUS_REG] = 0x4, | 90 | [RNG_STATUS_REG] = 0x4, |
| 87 | [RNG_CONFIG_REG] = 0x28, | 91 | [RNG_CONFIG_REG] = 0x28, |
| 88 | [RNG_REV_REG] = 0x3c, | 92 | [RNG_REV_REG] = 0x3c, |
| @@ -90,8 +94,8 @@ static const u16 reg_map_omap2[] = { | |||
| 90 | }; | 94 | }; |
| 91 | 95 | ||
| 92 | static const u16 reg_map_omap4[] = { | 96 | static const u16 reg_map_omap4[] = { |
| 93 | [RNG_OUTPUT_L_REG] = 0x0, | 97 | [RNG_OUTPUT_0_REG] = 0x0, |
| 94 | [RNG_OUTPUT_H_REG] = 0x4, | 98 | [RNG_OUTPUT_1_REG] = 0x4, |
| 95 | [RNG_STATUS_REG] = 0x8, | 99 | [RNG_STATUS_REG] = 0x8, |
| 96 | [RNG_INTMASK_REG] = 0xc, | 100 | [RNG_INTMASK_REG] = 0xc, |
| 97 | [RNG_INTACK_REG] = 0x10, | 101 | [RNG_INTACK_REG] = 0x10, |
| @@ -106,6 +110,23 @@ static const u16 reg_map_omap4[] = { | |||
| 106 | [RNG_SYSCONFIG_REG] = 0x1FE4, | 110 | [RNG_SYSCONFIG_REG] = 0x1FE4, |
| 107 | }; | 111 | }; |
| 108 | 112 | ||
| 113 | static const u16 reg_map_eip76[] = { | ||
| 114 | [RNG_OUTPUT_0_REG] = 0x0, | ||
| 115 | [RNG_OUTPUT_1_REG] = 0x4, | ||
| 116 | [RNG_OUTPUT_2_REG] = 0x8, | ||
| 117 | [RNG_OUTPUT_3_REG] = 0xc, | ||
| 118 | [RNG_STATUS_REG] = 0x10, | ||
| 119 | [RNG_INTACK_REG] = 0x10, | ||
| 120 | [RNG_CONTROL_REG] = 0x14, | ||
| 121 | [RNG_CONFIG_REG] = 0x18, | ||
| 122 | [RNG_ALARMCNT_REG] = 0x1c, | ||
| 123 | [RNG_FROENABLE_REG] = 0x20, | ||
| 124 | [RNG_FRODETUNE_REG] = 0x24, | ||
| 125 | [RNG_ALARMMASK_REG] = 0x28, | ||
| 126 | [RNG_ALARMSTOP_REG] = 0x2c, | ||
| 127 | [RNG_REV_REG] = 0x7c, | ||
| 128 | }; | ||
| 129 | |||
| 109 | struct omap_rng_dev; | 130 | struct omap_rng_dev; |
| 110 | /** | 131 | /** |
| 111 | * struct omap_rng_pdata - RNG IP block-specific data | 132 | * struct omap_rng_pdata - RNG IP block-specific data |
| @@ -127,6 +148,8 @@ struct omap_rng_dev { | |||
| 127 | void __iomem *base; | 148 | void __iomem *base; |
| 128 | struct device *dev; | 149 | struct device *dev; |
| 129 | const struct omap_rng_pdata *pdata; | 150 | const struct omap_rng_pdata *pdata; |
| 151 | struct hwrng rng; | ||
| 152 | struct clk *clk; | ||
| 130 | }; | 153 | }; |
| 131 | 154 | ||
| 132 | static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg) | 155 | static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg) |
| @@ -140,41 +163,35 @@ static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg, | |||
| 140 | __raw_writel(val, priv->base + priv->pdata->regs[reg]); | 163 | __raw_writel(val, priv->base + priv->pdata->regs[reg]); |
| 141 | } | 164 | } |
| 142 | 165 | ||
| 143 | static int omap_rng_data_present(struct hwrng *rng, int wait) | 166 | |
| 167 | static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max, | ||
| 168 | bool wait) | ||
| 144 | { | 169 | { |
| 145 | struct omap_rng_dev *priv; | 170 | struct omap_rng_dev *priv; |
| 146 | int data, i; | 171 | int i, present; |
| 147 | 172 | ||
| 148 | priv = (struct omap_rng_dev *)rng->priv; | 173 | priv = (struct omap_rng_dev *)rng->priv; |
| 149 | 174 | ||
| 175 | if (max < priv->pdata->data_size) | ||
| 176 | return 0; | ||
| 177 | |||
| 150 | for (i = 0; i < 20; i++) { | 178 | for (i = 0; i < 20; i++) { |
| 151 | data = priv->pdata->data_present(priv); | 179 | present = priv->pdata->data_present(priv); |
| 152 | if (data || !wait) | 180 | if (present || !wait) |
| 153 | break; | 181 | break; |
| 154 | /* RNG produces data fast enough (2+ MBit/sec, even | 182 | |
| 155 | * during "rngtest" loads, that these delays don't | ||
| 156 | * seem to trigger. We *could* use the RNG IRQ, but | ||
| 157 | * that'd be higher overhead ... so why bother? | ||
| 158 | */ | ||
| 159 | udelay(10); | 183 | udelay(10); |
| 160 | } | 184 | } |
| 161 | return data; | 185 | if (!present) |
| 162 | } | 186 | return 0; |
| 163 | |||
| 164 | static int omap_rng_data_read(struct hwrng *rng, u32 *data) | ||
| 165 | { | ||
| 166 | struct omap_rng_dev *priv; | ||
| 167 | u32 data_size, i; | ||
| 168 | |||
| 169 | priv = (struct omap_rng_dev *)rng->priv; | ||
| 170 | data_size = priv->pdata->data_size; | ||
| 171 | 187 | ||
| 172 | for (i = 0; i < data_size / sizeof(u32); i++) | 188 | memcpy_fromio(data, priv->base + priv->pdata->regs[RNG_OUTPUT_0_REG], |
| 173 | data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i); | 189 | priv->pdata->data_size); |
| 174 | 190 | ||
| 175 | if (priv->pdata->regs[RNG_INTACK_REG]) | 191 | if (priv->pdata->regs[RNG_INTACK_REG]) |
| 176 | omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK); | 192 | omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK); |
| 177 | return data_size; | 193 | |
| 194 | return priv->pdata->data_size; | ||
| 178 | } | 195 | } |
| 179 | 196 | ||
| 180 | static int omap_rng_init(struct hwrng *rng) | 197 | static int omap_rng_init(struct hwrng *rng) |
| @@ -193,13 +210,6 @@ static void omap_rng_cleanup(struct hwrng *rng) | |||
| 193 | priv->pdata->cleanup(priv); | 210 | priv->pdata->cleanup(priv); |
| 194 | } | 211 | } |
| 195 | 212 | ||
| 196 | static struct hwrng omap_rng_ops = { | ||
| 197 | .name = "omap", | ||
| 198 | .data_present = omap_rng_data_present, | ||
| 199 | .data_read = omap_rng_data_read, | ||
| 200 | .init = omap_rng_init, | ||
| 201 | .cleanup = omap_rng_cleanup, | ||
| 202 | }; | ||
| 203 | 213 | ||
| 204 | static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv) | 214 | static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv) |
| 205 | { | 215 | { |
| @@ -231,6 +241,38 @@ static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv) | |||
| 231 | return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY; | 241 | return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY; |
| 232 | } | 242 | } |
| 233 | 243 | ||
| 244 | static int eip76_rng_init(struct omap_rng_dev *priv) | ||
| 245 | { | ||
| 246 | u32 val; | ||
| 247 | |||
| 248 | /* Return if RNG is already running. */ | ||
| 249 | if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK) | ||
| 250 | return 0; | ||
| 251 | |||
| 252 | /* Number of 512 bit blocks of raw Noise Source output data that must | ||
| 253 | * be processed by either the Conditioning Function or the | ||
| 254 | * SP 800-90 DRBG ‘BC_DF’ functionality to yield a ‘full entropy’ | ||
| 255 | * output value. | ||
| 256 | */ | ||
| 257 | val = 0x5 << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT; | ||
| 258 | |||
| 259 | /* Number of FRO samples that are XOR-ed together into one bit to be | ||
| 260 | * shifted into the main shift register | ||
| 261 | */ | ||
| 262 | val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT; | ||
| 263 | omap_rng_write(priv, RNG_CONFIG_REG, val); | ||
| 264 | |||
| 265 | /* Enable all available FROs */ | ||
| 266 | omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0); | ||
| 267 | omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK); | ||
| 268 | |||
| 269 | /* Enable TRNG */ | ||
| 270 | val = RNG_CONTROL_ENABLE_TRNG_MASK; | ||
| 271 | omap_rng_write(priv, RNG_CONTROL_REG, val); | ||
| 272 | |||
| 273 | return 0; | ||
| 274 | } | ||
| 275 | |||
| 234 | static int omap4_rng_init(struct omap_rng_dev *priv) | 276 | static int omap4_rng_init(struct omap_rng_dev *priv) |
| 235 | { | 277 | { |
| 236 | u32 val; | 278 | u32 val; |
| @@ -300,6 +342,14 @@ static struct omap_rng_pdata omap4_rng_pdata = { | |||
| 300 | .cleanup = omap4_rng_cleanup, | 342 | .cleanup = omap4_rng_cleanup, |
| 301 | }; | 343 | }; |
| 302 | 344 | ||
| 345 | static struct omap_rng_pdata eip76_rng_pdata = { | ||
| 346 | .regs = (u16 *)reg_map_eip76, | ||
| 347 | .data_size = EIP76_RNG_OUTPUT_SIZE, | ||
| 348 | .data_present = omap4_rng_data_present, | ||
| 349 | .init = eip76_rng_init, | ||
| 350 | .cleanup = omap4_rng_cleanup, | ||
| 351 | }; | ||
| 352 | |||
| 303 | static const struct of_device_id omap_rng_of_match[] = { | 353 | static const struct of_device_id omap_rng_of_match[] = { |
| 304 | { | 354 | { |
| 305 | .compatible = "ti,omap2-rng", | 355 | .compatible = "ti,omap2-rng", |
| @@ -309,6 +359,10 @@ static const struct of_device_id omap_rng_of_match[] = { | |||
| 309 | .compatible = "ti,omap4-rng", | 359 | .compatible = "ti,omap4-rng", |
| 310 | .data = &omap4_rng_pdata, | 360 | .data = &omap4_rng_pdata, |
| 311 | }, | 361 | }, |
| 362 | { | ||
| 363 | .compatible = "inside-secure,safexcel-eip76", | ||
| 364 | .data = &eip76_rng_pdata, | ||
| 365 | }, | ||
| 312 | {}, | 366 | {}, |
| 313 | }; | 367 | }; |
| 314 | MODULE_DEVICE_TABLE(of, omap_rng_of_match); | 368 | MODULE_DEVICE_TABLE(of, omap_rng_of_match); |
| @@ -327,7 +381,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, | |||
| 327 | } | 381 | } |
| 328 | priv->pdata = match->data; | 382 | priv->pdata = match->data; |
| 329 | 383 | ||
| 330 | if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) { | 384 | if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") || |
| 385 | of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) { | ||
| 331 | irq = platform_get_irq(pdev, 0); | 386 | irq = platform_get_irq(pdev, 0); |
| 332 | if (irq < 0) { | 387 | if (irq < 0) { |
| 333 | dev_err(dev, "%s: error getting IRQ resource - %d\n", | 388 | dev_err(dev, "%s: error getting IRQ resource - %d\n", |
| @@ -343,6 +398,16 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, | |||
| 343 | return err; | 398 | return err; |
| 344 | } | 399 | } |
| 345 | omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK); | 400 | omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK); |
| 401 | |||
| 402 | priv->clk = of_clk_get(pdev->dev.of_node, 0); | ||
| 403 | if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER) | ||
| 404 | return -EPROBE_DEFER; | ||
| 405 | if (!IS_ERR(priv->clk)) { | ||
| 406 | err = clk_prepare_enable(priv->clk); | ||
| 407 | if (err) | ||
| 408 | dev_err(&pdev->dev, "unable to enable the clk, " | ||
| 409 | "err = %d\n", err); | ||
| 410 | } | ||
| 346 | } | 411 | } |
| 347 | return 0; | 412 | return 0; |
| 348 | } | 413 | } |
| @@ -372,7 +437,11 @@ static int omap_rng_probe(struct platform_device *pdev) | |||
| 372 | if (!priv) | 437 | if (!priv) |
| 373 | return -ENOMEM; | 438 | return -ENOMEM; |
| 374 | 439 | ||
| 375 | omap_rng_ops.priv = (unsigned long)priv; | 440 | priv->rng.read = omap_rng_do_read; |
| 441 | priv->rng.init = omap_rng_init; | ||
| 442 | priv->rng.cleanup = omap_rng_cleanup; | ||
| 443 | |||
| 444 | priv->rng.priv = (unsigned long)priv; | ||
| 376 | platform_set_drvdata(pdev, priv); | 445 | platform_set_drvdata(pdev, priv); |
| 377 | priv->dev = dev; | 446 | priv->dev = dev; |
| 378 | 447 | ||
| @@ -383,6 +452,12 @@ static int omap_rng_probe(struct platform_device *pdev) | |||
| 383 | goto err_ioremap; | 452 | goto err_ioremap; |
| 384 | } | 453 | } |
| 385 | 454 | ||
| 455 | priv->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); | ||
| 456 | if (!priv->rng.name) { | ||
| 457 | ret = -ENOMEM; | ||
| 458 | goto err_ioremap; | ||
| 459 | } | ||
| 460 | |||
| 386 | pm_runtime_enable(&pdev->dev); | 461 | pm_runtime_enable(&pdev->dev); |
| 387 | ret = pm_runtime_get_sync(&pdev->dev); | 462 | ret = pm_runtime_get_sync(&pdev->dev); |
| 388 | if (ret < 0) { | 463 | if (ret < 0) { |
| @@ -394,20 +469,24 @@ static int omap_rng_probe(struct platform_device *pdev) | |||
| 394 | ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : | 469 | ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : |
| 395 | get_omap_rng_device_details(priv); | 470 | get_omap_rng_device_details(priv); |
| 396 | if (ret) | 471 | if (ret) |
| 397 | goto err_ioremap; | 472 | goto err_register; |
| 398 | 473 | ||
| 399 | ret = hwrng_register(&omap_rng_ops); | 474 | ret = hwrng_register(&priv->rng); |
| 400 | if (ret) | 475 | if (ret) |
| 401 | goto err_register; | 476 | goto err_register; |
| 402 | 477 | ||
| 403 | dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", | 478 | dev_info(&pdev->dev, "Random Number Generator ver. %02x\n", |
| 404 | omap_rng_read(priv, RNG_REV_REG)); | 479 | omap_rng_read(priv, RNG_REV_REG)); |
| 405 | 480 | ||
| 406 | return 0; | 481 | return 0; |
| 407 | 482 | ||
| 408 | err_register: | 483 | err_register: |
| 409 | priv->base = NULL; | 484 | priv->base = NULL; |
| 485 | pm_runtime_put_sync(&pdev->dev); | ||
| 410 | pm_runtime_disable(&pdev->dev); | 486 | pm_runtime_disable(&pdev->dev); |
| 487 | |||
| 488 | if (!IS_ERR(priv->clk)) | ||
| 489 | clk_disable_unprepare(priv->clk); | ||
| 411 | err_ioremap: | 490 | err_ioremap: |
| 412 | dev_err(dev, "initialization failed.\n"); | 491 | dev_err(dev, "initialization failed.\n"); |
| 413 | return ret; | 492 | return ret; |
| @@ -417,13 +496,16 @@ static int omap_rng_remove(struct platform_device *pdev) | |||
| 417 | { | 496 | { |
| 418 | struct omap_rng_dev *priv = platform_get_drvdata(pdev); | 497 | struct omap_rng_dev *priv = platform_get_drvdata(pdev); |
| 419 | 498 | ||
| 420 | hwrng_unregister(&omap_rng_ops); | 499 | hwrng_unregister(&priv->rng); |
| 421 | 500 | ||
| 422 | priv->pdata->cleanup(priv); | 501 | priv->pdata->cleanup(priv); |
| 423 | 502 | ||
| 424 | pm_runtime_put_sync(&pdev->dev); | 503 | pm_runtime_put_sync(&pdev->dev); |
| 425 | pm_runtime_disable(&pdev->dev); | 504 | pm_runtime_disable(&pdev->dev); |
| 426 | 505 | ||
| 506 | if (!IS_ERR(priv->clk)) | ||
| 507 | clk_disable_unprepare(priv->clk); | ||
| 508 | |||
| 427 | return 0; | 509 | return 0; |
| 428 | } | 510 | } |
| 429 | 511 | ||
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index 11dc9b7c09ce..9b5e68a71d01 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c | |||
| @@ -62,9 +62,6 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max, | |||
| 62 | u32 t; | 62 | u32 t; |
| 63 | unsigned int timeout = RNG_TIMEOUT; | 63 | unsigned int timeout = RNG_TIMEOUT; |
| 64 | 64 | ||
| 65 | if (max < 8) | ||
| 66 | return 0; | ||
| 67 | |||
| 68 | do { | 65 | do { |
| 69 | t = readl(priv->base + RNGRCNT) & RCNT_MASK; | 66 | t = readl(priv->base + RNGRCNT) & RCNT_MASK; |
| 70 | if (t == 64) { | 67 | if (t == 64) { |
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c index 63ce51d09af1..d9f46b437cc2 100644 --- a/drivers/char/hw_random/pseries-rng.c +++ b/drivers/char/hw_random/pseries-rng.c | |||
| @@ -28,7 +28,6 @@ | |||
| 28 | static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) | 28 | static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) |
| 29 | { | 29 | { |
| 30 | u64 buffer[PLPAR_HCALL_BUFSIZE]; | 30 | u64 buffer[PLPAR_HCALL_BUFSIZE]; |
| 31 | size_t size = max < 8 ? max : 8; | ||
| 32 | int rc; | 31 | int rc; |
| 33 | 32 | ||
| 34 | rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer); | 33 | rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer); |
| @@ -36,10 +35,10 @@ static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait | |||
| 36 | pr_err_ratelimited("H_RANDOM call failed %d\n", rc); | 35 | pr_err_ratelimited("H_RANDOM call failed %d\n", rc); |
| 37 | return -EIO; | 36 | return -EIO; |
| 38 | } | 37 | } |
| 39 | memcpy(data, buffer, size); | 38 | memcpy(data, buffer, 8); |
| 40 | 39 | ||
| 41 | /* The hypervisor interface returns 64 bits */ | 40 | /* The hypervisor interface returns 64 bits */ |
| 42 | return size; | 41 | return 8; |
| 43 | } | 42 | } |
| 44 | 43 | ||
| 45 | /** | 44 | /** |
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index dae1e39139e9..d10b4ae5e0da 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
| @@ -135,8 +135,7 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) | |||
| 135 | ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, | 135 | ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, |
| 136 | &ctx->sa_out_dma_addr, GFP_ATOMIC); | 136 | &ctx->sa_out_dma_addr, GFP_ATOMIC); |
| 137 | if (ctx->sa_out == NULL) { | 137 | if (ctx->sa_out == NULL) { |
| 138 | dma_free_coherent(ctx->dev->core_dev->device, | 138 | dma_free_coherent(ctx->dev->core_dev->device, size * 4, |
| 139 | ctx->sa_len * 4, | ||
| 140 | ctx->sa_in, ctx->sa_in_dma_addr); | 139 | ctx->sa_in, ctx->sa_in_dma_addr); |
| 141 | return -ENOMEM; | 140 | return -ENOMEM; |
| 142 | } | 141 | } |
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h index 6c2951bb70b1..0ec04407b533 100644 --- a/drivers/crypto/atmel-aes-regs.h +++ b/drivers/crypto/atmel-aes-regs.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #define AES_MR_OPMOD_CFB (0x3 << 12) | 28 | #define AES_MR_OPMOD_CFB (0x3 << 12) |
| 29 | #define AES_MR_OPMOD_CTR (0x4 << 12) | 29 | #define AES_MR_OPMOD_CTR (0x4 << 12) |
| 30 | #define AES_MR_OPMOD_GCM (0x5 << 12) | 30 | #define AES_MR_OPMOD_GCM (0x5 << 12) |
| 31 | #define AES_MR_OPMOD_XTS (0x6 << 12) | ||
| 31 | #define AES_MR_LOD (0x1 << 15) | 32 | #define AES_MR_LOD (0x1 << 15) |
| 32 | #define AES_MR_CFBS_MASK (0x7 << 16) | 33 | #define AES_MR_CFBS_MASK (0x7 << 16) |
| 33 | #define AES_MR_CFBS_128b (0x0 << 16) | 34 | #define AES_MR_CFBS_128b (0x0 << 16) |
| @@ -67,6 +68,9 @@ | |||
| 67 | #define AES_CTRR 0x98 | 68 | #define AES_CTRR 0x98 |
| 68 | #define AES_GCMHR(x) (0x9c + ((x) * 0x04)) | 69 | #define AES_GCMHR(x) (0x9c + ((x) * 0x04)) |
| 69 | 70 | ||
| 71 | #define AES_TWR(x) (0xc0 + ((x) * 0x04)) | ||
| 72 | #define AES_ALPHAR(x) (0xd0 + ((x) * 0x04)) | ||
| 73 | |||
| 70 | #define AES_HW_VERSION 0xFC | 74 | #define AES_HW_VERSION 0xFC |
| 71 | 75 | ||
| 72 | #endif /* __ATMEL_AES_REGS_H__ */ | 76 | #endif /* __ATMEL_AES_REGS_H__ */ |
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index e3d40a8dfffb..0e3d0d655b96 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <crypto/scatterwalk.h> | 36 | #include <crypto/scatterwalk.h> |
| 37 | #include <crypto/algapi.h> | 37 | #include <crypto/algapi.h> |
| 38 | #include <crypto/aes.h> | 38 | #include <crypto/aes.h> |
| 39 | #include <crypto/xts.h> | ||
| 39 | #include <crypto/internal/aead.h> | 40 | #include <crypto/internal/aead.h> |
| 40 | #include <linux/platform_data/crypto-atmel.h> | 41 | #include <linux/platform_data/crypto-atmel.h> |
| 41 | #include <dt-bindings/dma/at91.h> | 42 | #include <dt-bindings/dma/at91.h> |
| @@ -68,6 +69,7 @@ | |||
| 68 | #define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b) | 69 | #define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b) |
| 69 | #define AES_FLAGS_CTR AES_MR_OPMOD_CTR | 70 | #define AES_FLAGS_CTR AES_MR_OPMOD_CTR |
| 70 | #define AES_FLAGS_GCM AES_MR_OPMOD_GCM | 71 | #define AES_FLAGS_GCM AES_MR_OPMOD_GCM |
| 72 | #define AES_FLAGS_XTS AES_MR_OPMOD_XTS | ||
| 71 | 73 | ||
| 72 | #define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \ | 74 | #define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \ |
| 73 | AES_FLAGS_ENCRYPT | \ | 75 | AES_FLAGS_ENCRYPT | \ |
| @@ -89,6 +91,7 @@ struct atmel_aes_caps { | |||
| 89 | bool has_cfb64; | 91 | bool has_cfb64; |
| 90 | bool has_ctr32; | 92 | bool has_ctr32; |
| 91 | bool has_gcm; | 93 | bool has_gcm; |
| 94 | bool has_xts; | ||
| 92 | u32 max_burst_size; | 95 | u32 max_burst_size; |
| 93 | }; | 96 | }; |
| 94 | 97 | ||
| @@ -135,6 +138,12 @@ struct atmel_aes_gcm_ctx { | |||
| 135 | atmel_aes_fn_t ghash_resume; | 138 | atmel_aes_fn_t ghash_resume; |
| 136 | }; | 139 | }; |
| 137 | 140 | ||
| 141 | struct atmel_aes_xts_ctx { | ||
| 142 | struct atmel_aes_base_ctx base; | ||
| 143 | |||
| 144 | u32 key2[AES_KEYSIZE_256 / sizeof(u32)]; | ||
| 145 | }; | ||
| 146 | |||
| 138 | struct atmel_aes_reqctx { | 147 | struct atmel_aes_reqctx { |
| 139 | unsigned long mode; | 148 | unsigned long mode; |
| 140 | }; | 149 | }; |
| @@ -282,6 +291,20 @@ static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz) | |||
| 282 | snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2); | 291 | snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2); |
| 283 | break; | 292 | break; |
| 284 | 293 | ||
| 294 | case AES_TWR(0): | ||
| 295 | case AES_TWR(1): | ||
| 296 | case AES_TWR(2): | ||
| 297 | case AES_TWR(3): | ||
| 298 | snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2); | ||
| 299 | break; | ||
| 300 | |||
| 301 | case AES_ALPHAR(0): | ||
| 302 | case AES_ALPHAR(1): | ||
| 303 | case AES_ALPHAR(2): | ||
| 304 | case AES_ALPHAR(3): | ||
| 305 | snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2); | ||
| 306 | break; | ||
| 307 | |||
| 285 | default: | 308 | default: |
| 286 | snprintf(tmp, sz, "0x%02x", offset); | 309 | snprintf(tmp, sz, "0x%02x", offset); |
| 287 | break; | 310 | break; |
| @@ -317,7 +340,7 @@ static inline void atmel_aes_write(struct atmel_aes_dev *dd, | |||
| 317 | char tmp[16]; | 340 | char tmp[16]; |
| 318 | 341 | ||
| 319 | dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, | 342 | dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, |
| 320 | atmel_aes_reg_name(offset, tmp)); | 343 | atmel_aes_reg_name(offset, tmp, sizeof(tmp))); |
| 321 | } | 344 | } |
| 322 | #endif /* VERBOSE_DEBUG */ | 345 | #endif /* VERBOSE_DEBUG */ |
| 323 | 346 | ||
| @@ -453,15 +476,15 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) | |||
| 453 | return err; | 476 | return err; |
| 454 | } | 477 | } |
| 455 | 478 | ||
| 456 | static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, | 479 | static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma, |
| 457 | const u32 *iv) | 480 | const u32 *iv, const u32 *key, int keylen) |
| 458 | { | 481 | { |
| 459 | u32 valmr = 0; | 482 | u32 valmr = 0; |
| 460 | 483 | ||
| 461 | /* MR register must be set before IV registers */ | 484 | /* MR register must be set before IV registers */ |
| 462 | if (dd->ctx->keylen == AES_KEYSIZE_128) | 485 | if (keylen == AES_KEYSIZE_128) |
| 463 | valmr |= AES_MR_KEYSIZE_128; | 486 | valmr |= AES_MR_KEYSIZE_128; |
| 464 | else if (dd->ctx->keylen == AES_KEYSIZE_192) | 487 | else if (keylen == AES_KEYSIZE_192) |
| 465 | valmr |= AES_MR_KEYSIZE_192; | 488 | valmr |= AES_MR_KEYSIZE_192; |
| 466 | else | 489 | else |
| 467 | valmr |= AES_MR_KEYSIZE_256; | 490 | valmr |= AES_MR_KEYSIZE_256; |
| @@ -478,13 +501,19 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, | |||
| 478 | 501 | ||
| 479 | atmel_aes_write(dd, AES_MR, valmr); | 502 | atmel_aes_write(dd, AES_MR, valmr); |
| 480 | 503 | ||
| 481 | atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, | 504 | atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen)); |
| 482 | SIZE_IN_WORDS(dd->ctx->keylen)); | ||
| 483 | 505 | ||
| 484 | if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) | 506 | if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) |
| 485 | atmel_aes_write_block(dd, AES_IVR(0), iv); | 507 | atmel_aes_write_block(dd, AES_IVR(0), iv); |
| 486 | } | 508 | } |
| 487 | 509 | ||
| 510 | static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, | ||
| 511 | const u32 *iv) | ||
| 512 | |||
| 513 | { | ||
| 514 | atmel_aes_write_ctrl_key(dd, use_dma, iv, | ||
| 515 | dd->ctx->key, dd->ctx->keylen); | ||
| 516 | } | ||
| 488 | 517 | ||
| 489 | /* CPU transfer */ | 518 | /* CPU transfer */ |
| 490 | 519 | ||
| @@ -1769,6 +1798,137 @@ static struct aead_alg aes_gcm_alg = { | |||
| 1769 | }; | 1798 | }; |
| 1770 | 1799 | ||
| 1771 | 1800 | ||
| 1801 | /* xts functions */ | ||
| 1802 | |||
| 1803 | static inline struct atmel_aes_xts_ctx * | ||
| 1804 | atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx) | ||
| 1805 | { | ||
| 1806 | return container_of(ctx, struct atmel_aes_xts_ctx, base); | ||
| 1807 | } | ||
| 1808 | |||
| 1809 | static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd); | ||
| 1810 | |||
| 1811 | static int atmel_aes_xts_start(struct atmel_aes_dev *dd) | ||
| 1812 | { | ||
| 1813 | struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx); | ||
| 1814 | struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); | ||
| 1815 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | ||
| 1816 | unsigned long flags; | ||
| 1817 | int err; | ||
| 1818 | |||
| 1819 | atmel_aes_set_mode(dd, rctx); | ||
| 1820 | |||
| 1821 | err = atmel_aes_hw_init(dd); | ||
| 1822 | if (err) | ||
| 1823 | return atmel_aes_complete(dd, err); | ||
| 1824 | |||
| 1825 | /* Compute the tweak value from req->info with ecb(aes). */ | ||
| 1826 | flags = dd->flags; | ||
| 1827 | dd->flags &= ~AES_FLAGS_MODE_MASK; | ||
| 1828 | dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT); | ||
| 1829 | atmel_aes_write_ctrl_key(dd, false, NULL, | ||
| 1830 | ctx->key2, ctx->base.keylen); | ||
| 1831 | dd->flags = flags; | ||
| 1832 | |||
| 1833 | atmel_aes_write_block(dd, AES_IDATAR(0), req->info); | ||
| 1834 | return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data); | ||
| 1835 | } | ||
| 1836 | |||
| 1837 | static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd) | ||
| 1838 | { | ||
| 1839 | struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); | ||
| 1840 | bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD); | ||
| 1841 | u32 tweak[AES_BLOCK_SIZE / sizeof(u32)]; | ||
| 1842 | static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), }; | ||
| 1843 | u8 *tweak_bytes = (u8 *)tweak; | ||
| 1844 | int i; | ||
| 1845 | |||
| 1846 | /* Read the computed ciphered tweak value. */ | ||
| 1847 | atmel_aes_read_block(dd, AES_ODATAR(0), tweak); | ||
| 1848 | /* | ||
| 1849 | * Hardware quirk: | ||
| 1850 | * the order of the ciphered tweak bytes need to be reversed before | ||
| 1851 | * writing them into the ODATARx registers. | ||
| 1852 | */ | ||
| 1853 | for (i = 0; i < AES_BLOCK_SIZE/2; ++i) { | ||
| 1854 | u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i]; | ||
| 1855 | |||
| 1856 | tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i]; | ||
| 1857 | tweak_bytes[i] = tmp; | ||
| 1858 | } | ||
| 1859 | |||
| 1860 | /* Process the data. */ | ||
| 1861 | atmel_aes_write_ctrl(dd, use_dma, NULL); | ||
| 1862 | atmel_aes_write_block(dd, AES_TWR(0), tweak); | ||
| 1863 | atmel_aes_write_block(dd, AES_ALPHAR(0), one); | ||
| 1864 | if (use_dma) | ||
| 1865 | return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes, | ||
| 1866 | atmel_aes_transfer_complete); | ||
| 1867 | |||
| 1868 | return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes, | ||
| 1869 | atmel_aes_transfer_complete); | ||
| 1870 | } | ||
| 1871 | |||
| 1872 | static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 1873 | unsigned int keylen) | ||
| 1874 | { | ||
| 1875 | struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 1876 | int err; | ||
| 1877 | |||
| 1878 | err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen); | ||
| 1879 | if (err) | ||
| 1880 | return err; | ||
| 1881 | |||
| 1882 | memcpy(ctx->base.key, key, keylen/2); | ||
| 1883 | memcpy(ctx->key2, key + keylen/2, keylen/2); | ||
| 1884 | ctx->base.keylen = keylen/2; | ||
| 1885 | |||
| 1886 | return 0; | ||
| 1887 | } | ||
| 1888 | |||
| 1889 | static int atmel_aes_xts_encrypt(struct ablkcipher_request *req) | ||
| 1890 | { | ||
| 1891 | return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT); | ||
| 1892 | } | ||
| 1893 | |||
| 1894 | static int atmel_aes_xts_decrypt(struct ablkcipher_request *req) | ||
| 1895 | { | ||
| 1896 | return atmel_aes_crypt(req, AES_FLAGS_XTS); | ||
| 1897 | } | ||
| 1898 | |||
| 1899 | static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm) | ||
| 1900 | { | ||
| 1901 | struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 1902 | |||
| 1903 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); | ||
| 1904 | ctx->base.start = atmel_aes_xts_start; | ||
| 1905 | |||
| 1906 | return 0; | ||
| 1907 | } | ||
| 1908 | |||
| 1909 | static struct crypto_alg aes_xts_alg = { | ||
| 1910 | .cra_name = "xts(aes)", | ||
| 1911 | .cra_driver_name = "atmel-xts-aes", | ||
| 1912 | .cra_priority = ATMEL_AES_PRIORITY, | ||
| 1913 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
| 1914 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 1915 | .cra_ctxsize = sizeof(struct atmel_aes_xts_ctx), | ||
| 1916 | .cra_alignmask = 0xf, | ||
| 1917 | .cra_type = &crypto_ablkcipher_type, | ||
| 1918 | .cra_module = THIS_MODULE, | ||
| 1919 | .cra_init = atmel_aes_xts_cra_init, | ||
| 1920 | .cra_exit = atmel_aes_cra_exit, | ||
| 1921 | .cra_u.ablkcipher = { | ||
| 1922 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
| 1923 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
| 1924 | .ivsize = AES_BLOCK_SIZE, | ||
| 1925 | .setkey = atmel_aes_xts_setkey, | ||
| 1926 | .encrypt = atmel_aes_xts_encrypt, | ||
| 1927 | .decrypt = atmel_aes_xts_decrypt, | ||
| 1928 | } | ||
| 1929 | }; | ||
| 1930 | |||
| 1931 | |||
| 1772 | /* Probe functions */ | 1932 | /* Probe functions */ |
| 1773 | 1933 | ||
| 1774 | static int atmel_aes_buff_init(struct atmel_aes_dev *dd) | 1934 | static int atmel_aes_buff_init(struct atmel_aes_dev *dd) |
| @@ -1877,6 +2037,9 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) | |||
| 1877 | { | 2037 | { |
| 1878 | int i; | 2038 | int i; |
| 1879 | 2039 | ||
| 2040 | if (dd->caps.has_xts) | ||
| 2041 | crypto_unregister_alg(&aes_xts_alg); | ||
| 2042 | |||
| 1880 | if (dd->caps.has_gcm) | 2043 | if (dd->caps.has_gcm) |
| 1881 | crypto_unregister_aead(&aes_gcm_alg); | 2044 | crypto_unregister_aead(&aes_gcm_alg); |
| 1882 | 2045 | ||
| @@ -1909,8 +2072,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | |||
| 1909 | goto err_aes_gcm_alg; | 2072 | goto err_aes_gcm_alg; |
| 1910 | } | 2073 | } |
| 1911 | 2074 | ||
| 2075 | if (dd->caps.has_xts) { | ||
| 2076 | err = crypto_register_alg(&aes_xts_alg); | ||
| 2077 | if (err) | ||
| 2078 | goto err_aes_xts_alg; | ||
| 2079 | } | ||
| 2080 | |||
| 1912 | return 0; | 2081 | return 0; |
| 1913 | 2082 | ||
| 2083 | err_aes_xts_alg: | ||
| 2084 | crypto_unregister_aead(&aes_gcm_alg); | ||
| 1914 | err_aes_gcm_alg: | 2085 | err_aes_gcm_alg: |
| 1915 | crypto_unregister_alg(&aes_cfb64_alg); | 2086 | crypto_unregister_alg(&aes_cfb64_alg); |
| 1916 | err_aes_cfb64_alg: | 2087 | err_aes_cfb64_alg: |
| @@ -1928,6 +2099,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) | |||
| 1928 | dd->caps.has_cfb64 = 0; | 2099 | dd->caps.has_cfb64 = 0; |
| 1929 | dd->caps.has_ctr32 = 0; | 2100 | dd->caps.has_ctr32 = 0; |
| 1930 | dd->caps.has_gcm = 0; | 2101 | dd->caps.has_gcm = 0; |
| 2102 | dd->caps.has_xts = 0; | ||
| 1931 | dd->caps.max_burst_size = 1; | 2103 | dd->caps.max_burst_size = 1; |
| 1932 | 2104 | ||
| 1933 | /* keep only major version number */ | 2105 | /* keep only major version number */ |
| @@ -1937,6 +2109,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) | |||
| 1937 | dd->caps.has_cfb64 = 1; | 2109 | dd->caps.has_cfb64 = 1; |
| 1938 | dd->caps.has_ctr32 = 1; | 2110 | dd->caps.has_ctr32 = 1; |
| 1939 | dd->caps.has_gcm = 1; | 2111 | dd->caps.has_gcm = 1; |
| 2112 | dd->caps.has_xts = 1; | ||
| 1940 | dd->caps.max_burst_size = 4; | 2113 | dd->caps.max_burst_size = 4; |
| 1941 | break; | 2114 | break; |
| 1942 | case 0x200: | 2115 | case 0x200: |
| @@ -2138,7 +2311,7 @@ aes_dd_err: | |||
| 2138 | 2311 | ||
| 2139 | static int atmel_aes_remove(struct platform_device *pdev) | 2312 | static int atmel_aes_remove(struct platform_device *pdev) |
| 2140 | { | 2313 | { |
| 2141 | static struct atmel_aes_dev *aes_dd; | 2314 | struct atmel_aes_dev *aes_dd; |
| 2142 | 2315 | ||
| 2143 | aes_dd = platform_get_drvdata(pdev); | 2316 | aes_dd = platform_get_drvdata(pdev); |
| 2144 | if (!aes_dd) | 2317 | if (!aes_dd) |
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index 64bf3024b680..bc0d3569f8d9 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
| @@ -74,7 +74,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD | |||
| 74 | 74 | ||
| 75 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | 75 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API |
| 76 | tristate "Register algorithm implementations with the Crypto API" | 76 | tristate "Register algorithm implementations with the Crypto API" |
| 77 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR | 77 | depends on CRYPTO_DEV_FSL_CAAM_JR |
| 78 | default y | 78 | default y |
| 79 | select CRYPTO_AEAD | 79 | select CRYPTO_AEAD |
| 80 | select CRYPTO_AUTHENC | 80 | select CRYPTO_AUTHENC |
| @@ -89,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | |||
| 89 | 89 | ||
| 90 | config CRYPTO_DEV_FSL_CAAM_AHASH_API | 90 | config CRYPTO_DEV_FSL_CAAM_AHASH_API |
| 91 | tristate "Register hash algorithm implementations with Crypto API" | 91 | tristate "Register hash algorithm implementations with Crypto API" |
| 92 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR | 92 | depends on CRYPTO_DEV_FSL_CAAM_JR |
| 93 | default y | 93 | default y |
| 94 | select CRYPTO_HASH | 94 | select CRYPTO_HASH |
| 95 | help | 95 | help |
| @@ -101,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API | |||
| 101 | 101 | ||
| 102 | config CRYPTO_DEV_FSL_CAAM_PKC_API | 102 | config CRYPTO_DEV_FSL_CAAM_PKC_API |
| 103 | tristate "Register public key cryptography implementations with Crypto API" | 103 | tristate "Register public key cryptography implementations with Crypto API" |
| 104 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR | 104 | depends on CRYPTO_DEV_FSL_CAAM_JR |
| 105 | default y | 105 | default y |
| 106 | select CRYPTO_RSA | 106 | select CRYPTO_RSA |
| 107 | help | 107 | help |
| @@ -113,7 +113,7 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API | |||
| 113 | 113 | ||
| 114 | config CRYPTO_DEV_FSL_CAAM_RNG_API | 114 | config CRYPTO_DEV_FSL_CAAM_RNG_API |
| 115 | tristate "Register caam device for hwrng API" | 115 | tristate "Register caam device for hwrng API" |
| 116 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR | 116 | depends on CRYPTO_DEV_FSL_CAAM_JR |
| 117 | default y | 117 | default y |
| 118 | select CRYPTO_RNG | 118 | select CRYPTO_RNG |
| 119 | select HW_RANDOM | 119 | select HW_RANDOM |
| @@ -134,3 +134,6 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG | |||
| 134 | help | 134 | help |
| 135 | Selecting this will enable printing of various debug | 135 | Selecting this will enable printing of various debug |
| 136 | information in the CAAM driver. | 136 | information in the CAAM driver. |
| 137 | |||
| 138 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
| 139 | def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API | ||
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index 08bf5515ae8a..6554742f357e 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile | |||
| @@ -8,6 +8,7 @@ endif | |||
| 8 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o | 8 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o |
| 9 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o | 9 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o |
| 10 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o | 10 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o |
| 11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o | ||
| 11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o | 12 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o |
| 12 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o | 13 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o |
| 13 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o | 14 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 954a64c7757b..662fe94cb2f8 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | * caam - Freescale FSL CAAM support for crypto API | 2 | * caam - Freescale FSL CAAM support for crypto API |
| 3 | * | 3 | * |
| 4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | 4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. |
| 5 | * Copyright 2016 NXP | ||
| 5 | * | 6 | * |
| 6 | * Based on talitos crypto API driver. | 7 | * Based on talitos crypto API driver. |
| 7 | * | 8 | * |
| @@ -53,6 +54,7 @@ | |||
| 53 | #include "error.h" | 54 | #include "error.h" |
| 54 | #include "sg_sw_sec4.h" | 55 | #include "sg_sw_sec4.h" |
| 55 | #include "key_gen.h" | 56 | #include "key_gen.h" |
| 57 | #include "caamalg_desc.h" | ||
| 56 | 58 | ||
| 57 | /* | 59 | /* |
| 58 | * crypto alg | 60 | * crypto alg |
| @@ -62,8 +64,6 @@ | |||
| 62 | #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ | 64 | #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ |
| 63 | CTR_RFC3686_NONCE_SIZE + \ | 65 | CTR_RFC3686_NONCE_SIZE + \ |
| 64 | SHA512_DIGEST_SIZE * 2) | 66 | SHA512_DIGEST_SIZE * 2) |
| 65 | /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | ||
| 66 | #define CAAM_MAX_IV_LENGTH 16 | ||
| 67 | 67 | ||
| 68 | #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) | 68 | #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) |
| 69 | #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ | 69 | #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ |
| @@ -71,37 +71,6 @@ | |||
| 71 | #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ | 71 | #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ |
| 72 | CAAM_CMD_SZ * 5) | 72 | CAAM_CMD_SZ * 5) |
| 73 | 73 | ||
| 74 | /* length of descriptors text */ | ||
| 75 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) | ||
| 76 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) | ||
| 77 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) | ||
| 78 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ) | ||
| 79 | |||
| 80 | /* Note: Nonce is counted in enckeylen */ | ||
| 81 | #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ) | ||
| 82 | |||
| 83 | #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) | ||
| 84 | #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ) | ||
| 85 | #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ) | ||
| 86 | |||
| 87 | #define DESC_GCM_BASE (3 * CAAM_CMD_SZ) | ||
| 88 | #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) | ||
| 89 | #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) | ||
| 90 | |||
| 91 | #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) | ||
| 92 | #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) | ||
| 93 | #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) | ||
| 94 | |||
| 95 | #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) | ||
| 96 | #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) | ||
| 97 | #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ) | ||
| 98 | |||
| 99 | #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) | ||
| 100 | #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ | ||
| 101 | 20 * CAAM_CMD_SZ) | ||
| 102 | #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ | ||
| 103 | 15 * CAAM_CMD_SZ) | ||
| 104 | |||
| 105 | #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) | 74 | #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) |
| 106 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) | 75 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) |
| 107 | 76 | ||
| @@ -117,8 +86,7 @@ | |||
| 117 | 86 | ||
| 118 | static void dbg_dump_sg(const char *level, const char *prefix_str, | 87 | static void dbg_dump_sg(const char *level, const char *prefix_str, |
| 119 | int prefix_type, int rowsize, int groupsize, | 88 | int prefix_type, int rowsize, int groupsize, |
| 120 | struct scatterlist *sg, size_t tlen, bool ascii, | 89 | struct scatterlist *sg, size_t tlen, bool ascii) |
| 121 | bool may_sleep) | ||
| 122 | { | 90 | { |
| 123 | struct scatterlist *it; | 91 | struct scatterlist *it; |
| 124 | void *it_page; | 92 | void *it_page; |
| @@ -152,7 +120,6 @@ static struct list_head alg_list; | |||
| 152 | struct caam_alg_entry { | 120 | struct caam_alg_entry { |
| 153 | int class1_alg_type; | 121 | int class1_alg_type; |
| 154 | int class2_alg_type; | 122 | int class2_alg_type; |
| 155 | int alg_op; | ||
| 156 | bool rfc3686; | 123 | bool rfc3686; |
| 157 | bool geniv; | 124 | bool geniv; |
| 158 | }; | 125 | }; |
| @@ -163,52 +130,6 @@ struct caam_aead_alg { | |||
| 163 | bool registered; | 130 | bool registered; |
| 164 | }; | 131 | }; |
| 165 | 132 | ||
| 166 | /* Set DK bit in class 1 operation if shared */ | ||
| 167 | static inline void append_dec_op1(u32 *desc, u32 type) | ||
| 168 | { | ||
| 169 | u32 *jump_cmd, *uncond_jump_cmd; | ||
| 170 | |||
| 171 | /* DK bit is valid only for AES */ | ||
| 172 | if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { | ||
| 173 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
| 174 | OP_ALG_DECRYPT); | ||
| 175 | return; | ||
| 176 | } | ||
| 177 | |||
| 178 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
| 179 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
| 180 | OP_ALG_DECRYPT); | ||
| 181 | uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
| 182 | set_jump_tgt_here(desc, jump_cmd); | ||
| 183 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
| 184 | OP_ALG_DECRYPT | OP_ALG_AAI_DK); | ||
| 185 | set_jump_tgt_here(desc, uncond_jump_cmd); | ||
| 186 | } | ||
| 187 | |||
| 188 | /* | ||
| 189 | * For aead functions, read payload and write payload, | ||
| 190 | * both of which are specified in req->src and req->dst | ||
| 191 | */ | ||
| 192 | static inline void aead_append_src_dst(u32 *desc, u32 msg_type) | ||
| 193 | { | ||
| 194 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | ||
| 195 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | | ||
| 196 | KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); | ||
| 197 | } | ||
| 198 | |||
| 199 | /* | ||
| 200 | * For ablkcipher encrypt and decrypt, read from req->src and | ||
| 201 | * write to req->dst | ||
| 202 | */ | ||
| 203 | static inline void ablkcipher_append_src_dst(u32 *desc) | ||
| 204 | { | ||
| 205 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 206 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 207 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | | ||
| 208 | KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); | ||
| 209 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | ||
| 210 | } | ||
| 211 | |||
| 212 | /* | 133 | /* |
| 213 | * per-session context | 134 | * per-session context |
| 214 | */ | 135 | */ |
| @@ -220,147 +141,36 @@ struct caam_ctx { | |||
| 220 | dma_addr_t sh_desc_enc_dma; | 141 | dma_addr_t sh_desc_enc_dma; |
| 221 | dma_addr_t sh_desc_dec_dma; | 142 | dma_addr_t sh_desc_dec_dma; |
| 222 | dma_addr_t sh_desc_givenc_dma; | 143 | dma_addr_t sh_desc_givenc_dma; |
| 223 | u32 class1_alg_type; | ||
| 224 | u32 class2_alg_type; | ||
| 225 | u32 alg_op; | ||
| 226 | u8 key[CAAM_MAX_KEY_SIZE]; | 144 | u8 key[CAAM_MAX_KEY_SIZE]; |
| 227 | dma_addr_t key_dma; | 145 | dma_addr_t key_dma; |
| 228 | unsigned int enckeylen; | 146 | struct alginfo adata; |
| 229 | unsigned int split_key_len; | 147 | struct alginfo cdata; |
| 230 | unsigned int split_key_pad_len; | ||
| 231 | unsigned int authsize; | 148 | unsigned int authsize; |
| 232 | }; | 149 | }; |
| 233 | 150 | ||
| 234 | static void append_key_aead(u32 *desc, struct caam_ctx *ctx, | ||
| 235 | int keys_fit_inline, bool is_rfc3686) | ||
| 236 | { | ||
| 237 | u32 *nonce; | ||
| 238 | unsigned int enckeylen = ctx->enckeylen; | ||
| 239 | |||
| 240 | /* | ||
| 241 | * RFC3686 specific: | ||
| 242 | * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE} | ||
| 243 | * | enckeylen = encryption key size + nonce size | ||
| 244 | */ | ||
| 245 | if (is_rfc3686) | ||
| 246 | enckeylen -= CTR_RFC3686_NONCE_SIZE; | ||
| 247 | |||
| 248 | if (keys_fit_inline) { | ||
| 249 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
| 250 | ctx->split_key_len, CLASS_2 | | ||
| 251 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 252 | append_key_as_imm(desc, (void *)ctx->key + | ||
| 253 | ctx->split_key_pad_len, enckeylen, | ||
| 254 | enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 255 | } else { | ||
| 256 | append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | | ||
| 257 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 258 | append_key(desc, ctx->key_dma + ctx->split_key_pad_len, | ||
| 259 | enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 260 | } | ||
| 261 | |||
| 262 | /* Load Counter into CONTEXT1 reg */ | ||
| 263 | if (is_rfc3686) { | ||
| 264 | nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len + | ||
| 265 | enckeylen); | ||
| 266 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | ||
| 267 | LDST_CLASS_IND_CCB | | ||
| 268 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 269 | append_move(desc, | ||
| 270 | MOVE_SRC_OUTFIFO | | ||
| 271 | MOVE_DEST_CLASS1CTX | | ||
| 272 | (16 << MOVE_OFFSET_SHIFT) | | ||
| 273 | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); | ||
| 274 | } | ||
| 275 | } | ||
| 276 | |||
| 277 | static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, | ||
| 278 | int keys_fit_inline, bool is_rfc3686) | ||
| 279 | { | ||
| 280 | u32 *key_jump_cmd; | ||
| 281 | |||
| 282 | /* Note: Context registers are saved. */ | ||
| 283 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | ||
| 284 | |||
| 285 | /* Skip if already shared */ | ||
| 286 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 287 | JUMP_COND_SHRD); | ||
| 288 | |||
| 289 | append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); | ||
| 290 | |||
| 291 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 292 | } | ||
| 293 | |||
| 294 | static int aead_null_set_sh_desc(struct crypto_aead *aead) | 151 | static int aead_null_set_sh_desc(struct crypto_aead *aead) |
| 295 | { | 152 | { |
| 296 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 153 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 297 | struct device *jrdev = ctx->jrdev; | 154 | struct device *jrdev = ctx->jrdev; |
| 298 | bool keys_fit_inline = false; | ||
| 299 | u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd; | ||
| 300 | u32 *desc; | 155 | u32 *desc; |
| 156 | int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - | ||
| 157 | ctx->adata.keylen_pad; | ||
| 301 | 158 | ||
| 302 | /* | 159 | /* |
| 303 | * Job Descriptor and Shared Descriptors | 160 | * Job Descriptor and Shared Descriptors |
| 304 | * must all fit into the 64-word Descriptor h/w Buffer | 161 | * must all fit into the 64-word Descriptor h/w Buffer |
| 305 | */ | 162 | */ |
| 306 | if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN + | 163 | if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) { |
| 307 | ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) | 164 | ctx->adata.key_inline = true; |
| 308 | keys_fit_inline = true; | 165 | ctx->adata.key_virt = ctx->key; |
| 166 | } else { | ||
| 167 | ctx->adata.key_inline = false; | ||
| 168 | ctx->adata.key_dma = ctx->key_dma; | ||
| 169 | } | ||
| 309 | 170 | ||
| 310 | /* aead_encrypt shared descriptor */ | 171 | /* aead_encrypt shared descriptor */ |
| 311 | desc = ctx->sh_desc_enc; | 172 | desc = ctx->sh_desc_enc; |
| 312 | 173 | cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize); | |
| 313 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 314 | |||
| 315 | /* Skip if already shared */ | ||
| 316 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 317 | JUMP_COND_SHRD); | ||
| 318 | if (keys_fit_inline) | ||
| 319 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
| 320 | ctx->split_key_len, CLASS_2 | | ||
| 321 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 322 | else | ||
| 323 | append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | | ||
| 324 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 325 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 326 | |||
| 327 | /* assoclen + cryptlen = seqinlen */ | ||
| 328 | append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 329 | |||
| 330 | /* Prepare to read and write cryptlen + assoclen bytes */ | ||
| 331 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 332 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 333 | |||
| 334 | /* | ||
| 335 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
| 336 | * thus need to do some magic, i.e. self-patch the descriptor | ||
| 337 | * buffer. | ||
| 338 | */ | ||
| 339 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | | ||
| 340 | MOVE_DEST_MATH3 | | ||
| 341 | (0x6 << MOVE_LEN_SHIFT)); | ||
| 342 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | | ||
| 343 | MOVE_DEST_DESCBUF | | ||
| 344 | MOVE_WAITCOMP | | ||
| 345 | (0x8 << MOVE_LEN_SHIFT)); | ||
| 346 | |||
| 347 | /* Class 2 operation */ | ||
| 348 | append_operation(desc, ctx->class2_alg_type | | ||
| 349 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
| 350 | |||
| 351 | /* Read and write cryptlen bytes */ | ||
| 352 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | ||
| 353 | |||
| 354 | set_move_tgt_here(desc, read_move_cmd); | ||
| 355 | set_move_tgt_here(desc, write_move_cmd); | ||
| 356 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 357 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | | ||
| 358 | MOVE_AUX_LS); | ||
| 359 | |||
| 360 | /* Write ICV */ | ||
| 361 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | ||
| 362 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 363 | |||
| 364 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | 174 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
| 365 | desc_bytes(desc), | 175 | desc_bytes(desc), |
| 366 | DMA_TO_DEVICE); | 176 | DMA_TO_DEVICE); |
| @@ -368,84 +178,22 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
| 368 | dev_err(jrdev, "unable to map shared descriptor\n"); | 178 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 369 | return -ENOMEM; | 179 | return -ENOMEM; |
| 370 | } | 180 | } |
| 371 | #ifdef DEBUG | ||
| 372 | print_hex_dump(KERN_ERR, | ||
| 373 | "aead null enc shdesc@"__stringify(__LINE__)": ", | ||
| 374 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 375 | desc_bytes(desc), 1); | ||
| 376 | #endif | ||
| 377 | 181 | ||
| 378 | /* | 182 | /* |
| 379 | * Job Descriptor and Shared Descriptors | 183 | * Job Descriptor and Shared Descriptors |
| 380 | * must all fit into the 64-word Descriptor h/w Buffer | 184 | * must all fit into the 64-word Descriptor h/w Buffer |
| 381 | */ | 185 | */ |
| 382 | keys_fit_inline = false; | 186 | if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) { |
| 383 | if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN + | 187 | ctx->adata.key_inline = true; |
| 384 | ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) | 188 | ctx->adata.key_virt = ctx->key; |
| 385 | keys_fit_inline = true; | 189 | } else { |
| 386 | 190 | ctx->adata.key_inline = false; | |
| 387 | desc = ctx->sh_desc_dec; | 191 | ctx->adata.key_dma = ctx->key_dma; |
| 192 | } | ||
| 388 | 193 | ||
| 389 | /* aead_decrypt shared descriptor */ | 194 | /* aead_decrypt shared descriptor */ |
| 390 | init_sh_desc(desc, HDR_SHARE_SERIAL); | 195 | desc = ctx->sh_desc_dec; |
| 391 | 196 | cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize); | |
| 392 | /* Skip if already shared */ | ||
| 393 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 394 | JUMP_COND_SHRD); | ||
| 395 | if (keys_fit_inline) | ||
| 396 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
| 397 | ctx->split_key_len, CLASS_2 | | ||
| 398 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 399 | else | ||
| 400 | append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | | ||
| 401 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 402 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 403 | |||
| 404 | /* Class 2 operation */ | ||
| 405 | append_operation(desc, ctx->class2_alg_type | | ||
| 406 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
| 407 | |||
| 408 | /* assoclen + cryptlen = seqoutlen */ | ||
| 409 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 410 | |||
| 411 | /* Prepare to read and write cryptlen + assoclen bytes */ | ||
| 412 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
| 413 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
| 414 | |||
| 415 | /* | ||
| 416 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
| 417 | * thus need to do some magic, i.e. self-patch the descriptor | ||
| 418 | * buffer. | ||
| 419 | */ | ||
| 420 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | | ||
| 421 | MOVE_DEST_MATH2 | | ||
| 422 | (0x6 << MOVE_LEN_SHIFT)); | ||
| 423 | write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | | ||
| 424 | MOVE_DEST_DESCBUF | | ||
| 425 | MOVE_WAITCOMP | | ||
| 426 | (0x8 << MOVE_LEN_SHIFT)); | ||
| 427 | |||
| 428 | /* Read and write cryptlen bytes */ | ||
| 429 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | ||
| 430 | |||
| 431 | /* | ||
| 432 | * Insert a NOP here, since we need at least 4 instructions between | ||
| 433 | * code patching the descriptor buffer and the location being patched. | ||
| 434 | */ | ||
| 435 | jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
| 436 | set_jump_tgt_here(desc, jump_cmd); | ||
| 437 | |||
| 438 | set_move_tgt_here(desc, read_move_cmd); | ||
| 439 | set_move_tgt_here(desc, write_move_cmd); | ||
| 440 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 441 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | | ||
| 442 | MOVE_AUX_LS); | ||
| 443 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
| 444 | |||
| 445 | /* Load ICV */ | ||
| 446 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | | ||
| 447 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | ||
| 448 | |||
| 449 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 197 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
| 450 | desc_bytes(desc), | 198 | desc_bytes(desc), |
| 451 | DMA_TO_DEVICE); | 199 | DMA_TO_DEVICE); |
| @@ -453,12 +201,6 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
| 453 | dev_err(jrdev, "unable to map shared descriptor\n"); | 201 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 454 | return -ENOMEM; | 202 | return -ENOMEM; |
| 455 | } | 203 | } |
| 456 | #ifdef DEBUG | ||
| 457 | print_hex_dump(KERN_ERR, | ||
| 458 | "aead null dec shdesc@"__stringify(__LINE__)": ", | ||
| 459 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 460 | desc_bytes(desc), 1); | ||
| 461 | #endif | ||
| 462 | 204 | ||
| 463 | return 0; | 205 | return 0; |
| 464 | } | 206 | } |
| @@ -470,11 +212,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 470 | unsigned int ivsize = crypto_aead_ivsize(aead); | 212 | unsigned int ivsize = crypto_aead_ivsize(aead); |
| 471 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 213 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 472 | struct device *jrdev = ctx->jrdev; | 214 | struct device *jrdev = ctx->jrdev; |
| 473 | bool keys_fit_inline; | ||
| 474 | u32 geniv, moveiv; | ||
| 475 | u32 ctx1_iv_off = 0; | 215 | u32 ctx1_iv_off = 0; |
| 476 | u32 *desc; | 216 | u32 *desc, *nonce = NULL; |
| 477 | const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == | 217 | u32 inl_mask; |
| 218 | unsigned int data_len[2]; | ||
| 219 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == | ||
| 478 | OP_ALG_AAI_CTR_MOD128); | 220 | OP_ALG_AAI_CTR_MOD128); |
| 479 | const bool is_rfc3686 = alg->caam.rfc3686; | 221 | const bool is_rfc3686 = alg->caam.rfc3686; |
| 480 | 222 | ||
| @@ -482,7 +224,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 482 | return 0; | 224 | return 0; |
| 483 | 225 | ||
| 484 | /* NULL encryption / decryption */ | 226 | /* NULL encryption / decryption */ |
| 485 | if (!ctx->enckeylen) | 227 | if (!ctx->cdata.keylen) |
| 486 | return aead_null_set_sh_desc(aead); | 228 | return aead_null_set_sh_desc(aead); |
| 487 | 229 | ||
| 488 | /* | 230 | /* |
| @@ -497,8 +239,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 497 | * RFC3686 specific: | 239 | * RFC3686 specific: |
| 498 | * CONTEXT1[255:128] = {NONCE, IV, COUNTER} | 240 | * CONTEXT1[255:128] = {NONCE, IV, COUNTER} |
| 499 | */ | 241 | */ |
| 500 | if (is_rfc3686) | 242 | if (is_rfc3686) { |
| 501 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; | 243 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; |
| 244 | nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + | ||
| 245 | ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); | ||
| 246 | } | ||
| 247 | |||
| 248 | data_len[0] = ctx->adata.keylen_pad; | ||
| 249 | data_len[1] = ctx->cdata.keylen; | ||
| 502 | 250 | ||
| 503 | if (alg->caam.geniv) | 251 | if (alg->caam.geniv) |
| 504 | goto skip_enc; | 252 | goto skip_enc; |
| @@ -507,54 +255,29 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 507 | * Job Descriptor and Shared Descriptors | 255 | * Job Descriptor and Shared Descriptors |
| 508 | * must all fit into the 64-word Descriptor h/w Buffer | 256 | * must all fit into the 64-word Descriptor h/w Buffer |
| 509 | */ | 257 | */ |
| 510 | keys_fit_inline = false; | 258 | if (desc_inline_query(DESC_AEAD_ENC_LEN + |
| 511 | if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN + | 259 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), |
| 512 | ctx->split_key_pad_len + ctx->enckeylen + | 260 | AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, |
| 513 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= | 261 | ARRAY_SIZE(data_len)) < 0) |
| 514 | CAAM_DESC_BYTES_MAX) | 262 | return -EINVAL; |
| 515 | keys_fit_inline = true; | ||
| 516 | |||
| 517 | /* aead_encrypt shared descriptor */ | ||
| 518 | desc = ctx->sh_desc_enc; | ||
| 519 | |||
| 520 | /* Note: Context registers are saved. */ | ||
| 521 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); | ||
| 522 | |||
| 523 | /* Class 2 operation */ | ||
| 524 | append_operation(desc, ctx->class2_alg_type | | ||
| 525 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
| 526 | |||
| 527 | /* Read and write assoclen bytes */ | ||
| 528 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 529 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 530 | |||
| 531 | /* Skip assoc data */ | ||
| 532 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 533 | |||
| 534 | /* read assoc before reading payload */ | ||
| 535 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
| 536 | FIFOLDST_VLF); | ||
| 537 | |||
| 538 | /* Load Counter into CONTEXT1 reg */ | ||
| 539 | if (is_rfc3686) | ||
| 540 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 541 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 542 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 543 | LDST_OFFSET_SHIFT)); | ||
| 544 | 263 | ||
| 545 | /* Class 1 operation */ | 264 | if (inl_mask & 1) |
| 546 | append_operation(desc, ctx->class1_alg_type | | 265 | ctx->adata.key_virt = ctx->key; |
| 547 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | 266 | else |
| 267 | ctx->adata.key_dma = ctx->key_dma; | ||
| 548 | 268 | ||
| 549 | /* Read and write cryptlen bytes */ | 269 | if (inl_mask & 2) |
| 550 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | 270 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; |
| 551 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | 271 | else |
| 552 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); | 272 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; |
| 553 | 273 | ||
| 554 | /* Write ICV */ | 274 | ctx->adata.key_inline = !!(inl_mask & 1); |
| 555 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | 275 | ctx->cdata.key_inline = !!(inl_mask & 2); |
| 556 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 557 | 276 | ||
| 277 | /* aead_encrypt shared descriptor */ | ||
| 278 | desc = ctx->sh_desc_enc; | ||
| 279 | cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize, | ||
| 280 | is_rfc3686, nonce, ctx1_iv_off); | ||
| 558 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | 281 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
| 559 | desc_bytes(desc), | 282 | desc_bytes(desc), |
| 560 | DMA_TO_DEVICE); | 283 | DMA_TO_DEVICE); |
| @@ -562,79 +285,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 562 | dev_err(jrdev, "unable to map shared descriptor\n"); | 285 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 563 | return -ENOMEM; | 286 | return -ENOMEM; |
| 564 | } | 287 | } |
| 565 | #ifdef DEBUG | ||
| 566 | print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ", | ||
| 567 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 568 | desc_bytes(desc), 1); | ||
| 569 | #endif | ||
| 570 | 288 | ||
| 571 | skip_enc: | 289 | skip_enc: |
| 572 | /* | 290 | /* |
| 573 | * Job Descriptor and Shared Descriptors | 291 | * Job Descriptor and Shared Descriptors |
| 574 | * must all fit into the 64-word Descriptor h/w Buffer | 292 | * must all fit into the 64-word Descriptor h/w Buffer |
| 575 | */ | 293 | */ |
| 576 | keys_fit_inline = false; | 294 | if (desc_inline_query(DESC_AEAD_DEC_LEN + |
| 577 | if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN + | 295 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), |
| 578 | ctx->split_key_pad_len + ctx->enckeylen + | 296 | AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, |
| 579 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= | 297 | ARRAY_SIZE(data_len)) < 0) |
| 580 | CAAM_DESC_BYTES_MAX) | 298 | return -EINVAL; |
| 581 | keys_fit_inline = true; | ||
| 582 | |||
| 583 | /* aead_decrypt shared descriptor */ | ||
| 584 | desc = ctx->sh_desc_dec; | ||
| 585 | |||
| 586 | /* Note: Context registers are saved. */ | ||
| 587 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); | ||
| 588 | |||
| 589 | /* Class 2 operation */ | ||
| 590 | append_operation(desc, ctx->class2_alg_type | | ||
| 591 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
| 592 | 299 | ||
| 593 | /* Read and write assoclen bytes */ | 300 | if (inl_mask & 1) |
| 594 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 301 | ctx->adata.key_virt = ctx->key; |
| 595 | if (alg->caam.geniv) | ||
| 596 | append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); | ||
| 597 | else | 302 | else |
| 598 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 303 | ctx->adata.key_dma = ctx->key_dma; |
| 599 | |||
| 600 | /* Skip assoc data */ | ||
| 601 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 602 | |||
| 603 | /* read assoc before reading payload */ | ||
| 604 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
| 605 | KEY_VLF); | ||
| 606 | 304 | ||
| 607 | if (alg->caam.geniv) { | 305 | if (inl_mask & 2) |
| 608 | append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | | 306 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; |
| 609 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 610 | (ctx1_iv_off << LDST_OFFSET_SHIFT)); | ||
| 611 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | | ||
| 612 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize); | ||
| 613 | } | ||
| 614 | |||
| 615 | /* Load Counter into CONTEXT1 reg */ | ||
| 616 | if (is_rfc3686) | ||
| 617 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 618 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 619 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 620 | LDST_OFFSET_SHIFT)); | ||
| 621 | |||
| 622 | /* Choose operation */ | ||
| 623 | if (ctr_mode) | ||
| 624 | append_operation(desc, ctx->class1_alg_type | | ||
| 625 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); | ||
| 626 | else | 307 | else |
| 627 | append_dec_op1(desc, ctx->class1_alg_type); | 308 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; |
| 628 | |||
| 629 | /* Read and write cryptlen bytes */ | ||
| 630 | append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 631 | append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 632 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG); | ||
| 633 | 309 | ||
| 634 | /* Load ICV */ | 310 | ctx->adata.key_inline = !!(inl_mask & 1); |
| 635 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | | 311 | ctx->cdata.key_inline = !!(inl_mask & 2); |
| 636 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | ||
| 637 | 312 | ||
| 313 | /* aead_decrypt shared descriptor */ | ||
| 314 | desc = ctx->sh_desc_dec; | ||
| 315 | cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, | ||
| 316 | ctx->authsize, alg->caam.geniv, is_rfc3686, | ||
| 317 | nonce, ctx1_iv_off); | ||
| 638 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 318 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
| 639 | desc_bytes(desc), | 319 | desc_bytes(desc), |
| 640 | DMA_TO_DEVICE); | 320 | DMA_TO_DEVICE); |
| @@ -642,11 +322,6 @@ skip_enc: | |||
| 642 | dev_err(jrdev, "unable to map shared descriptor\n"); | 322 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 643 | return -ENOMEM; | 323 | return -ENOMEM; |
| 644 | } | 324 | } |
| 645 | #ifdef DEBUG | ||
| 646 | print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ", | ||
| 647 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 648 | desc_bytes(desc), 1); | ||
| 649 | #endif | ||
| 650 | 325 | ||
| 651 | if (!alg->caam.geniv) | 326 | if (!alg->caam.geniv) |
| 652 | goto skip_givenc; | 327 | goto skip_givenc; |
| @@ -655,93 +330,30 @@ skip_enc: | |||
| 655 | * Job Descriptor and Shared Descriptors | 330 | * Job Descriptor and Shared Descriptors |
| 656 | * must all fit into the 64-word Descriptor h/w Buffer | 331 | * must all fit into the 64-word Descriptor h/w Buffer |
| 657 | */ | 332 | */ |
| 658 | keys_fit_inline = false; | 333 | if (desc_inline_query(DESC_AEAD_GIVENC_LEN + |
| 659 | if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN + | 334 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), |
| 660 | ctx->split_key_pad_len + ctx->enckeylen + | 335 | AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, |
| 661 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= | 336 | ARRAY_SIZE(data_len)) < 0) |
| 662 | CAAM_DESC_BYTES_MAX) | 337 | return -EINVAL; |
| 663 | keys_fit_inline = true; | ||
| 664 | |||
| 665 | /* aead_givencrypt shared descriptor */ | ||
| 666 | desc = ctx->sh_desc_enc; | ||
| 667 | |||
| 668 | /* Note: Context registers are saved. */ | ||
| 669 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); | ||
| 670 | |||
| 671 | if (is_rfc3686) | ||
| 672 | goto copy_iv; | ||
| 673 | |||
| 674 | /* Generate IV */ | ||
| 675 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | ||
| 676 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | | ||
| 677 | NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
| 678 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | | ||
| 679 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
| 680 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 681 | append_move(desc, MOVE_WAITCOMP | | ||
| 682 | MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | | ||
| 683 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | | ||
| 684 | (ivsize << MOVE_LEN_SHIFT)); | ||
| 685 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
| 686 | |||
| 687 | copy_iv: | ||
| 688 | /* Copy IV to class 1 context */ | ||
| 689 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | | ||
| 690 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | | ||
| 691 | (ivsize << MOVE_LEN_SHIFT)); | ||
| 692 | |||
| 693 | /* Return to encryption */ | ||
| 694 | append_operation(desc, ctx->class2_alg_type | | ||
| 695 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
| 696 | |||
| 697 | /* Read and write assoclen bytes */ | ||
| 698 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 699 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 700 | |||
| 701 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
| 702 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
| 703 | |||
| 704 | /* Skip assoc data */ | ||
| 705 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 706 | |||
| 707 | /* read assoc before reading payload */ | ||
| 708 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
| 709 | KEY_VLF); | ||
| 710 | |||
| 711 | /* Copy iv from outfifo to class 2 fifo */ | ||
| 712 | moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | | ||
| 713 | NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
| 714 | append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | | ||
| 715 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
| 716 | append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB | | ||
| 717 | LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); | ||
| 718 | |||
| 719 | /* Load Counter into CONTEXT1 reg */ | ||
| 720 | if (is_rfc3686) | ||
| 721 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 722 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 723 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 724 | LDST_OFFSET_SHIFT)); | ||
| 725 | |||
| 726 | /* Class 1 operation */ | ||
| 727 | append_operation(desc, ctx->class1_alg_type | | ||
| 728 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
| 729 | |||
| 730 | /* Will write ivsize + cryptlen */ | ||
| 731 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 732 | 338 | ||
| 733 | /* Not need to reload iv */ | 339 | if (inl_mask & 1) |
| 734 | append_seq_fifo_load(desc, ivsize, | 340 | ctx->adata.key_virt = ctx->key; |
| 735 | FIFOLD_CLASS_SKIP); | 341 | else |
| 342 | ctx->adata.key_dma = ctx->key_dma; | ||
| 736 | 343 | ||
| 737 | /* Will read cryptlen */ | 344 | if (inl_mask & 2) |
| 738 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | 345 | ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; |
| 739 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); | 346 | else |
| 347 | ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; | ||
| 740 | 348 | ||
| 741 | /* Write ICV */ | 349 | ctx->adata.key_inline = !!(inl_mask & 1); |
| 742 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | 350 | ctx->cdata.key_inline = !!(inl_mask & 2); |
| 743 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 744 | 351 | ||
| 352 | /* aead_givencrypt shared descriptor */ | ||
| 353 | desc = ctx->sh_desc_enc; | ||
| 354 | cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, | ||
| 355 | ctx->authsize, is_rfc3686, nonce, | ||
| 356 | ctx1_iv_off); | ||
| 745 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | 357 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
| 746 | desc_bytes(desc), | 358 | desc_bytes(desc), |
| 747 | DMA_TO_DEVICE); | 359 | DMA_TO_DEVICE); |
| @@ -749,11 +361,6 @@ copy_iv: | |||
| 749 | dev_err(jrdev, "unable to map shared descriptor\n"); | 361 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 750 | return -ENOMEM; | 362 | return -ENOMEM; |
| 751 | } | 363 | } |
| 752 | #ifdef DEBUG | ||
| 753 | print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ", | ||
| 754 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 755 | desc_bytes(desc), 1); | ||
| 756 | #endif | ||
| 757 | 364 | ||
| 758 | skip_givenc: | 365 | skip_givenc: |
| 759 | return 0; | 366 | return 0; |
| @@ -774,12 +381,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
| 774 | { | 381 | { |
| 775 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 382 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 776 | struct device *jrdev = ctx->jrdev; | 383 | struct device *jrdev = ctx->jrdev; |
| 777 | bool keys_fit_inline = false; | ||
| 778 | u32 *key_jump_cmd, *zero_payload_jump_cmd, | ||
| 779 | *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2; | ||
| 780 | u32 *desc; | 384 | u32 *desc; |
| 385 | int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - | ||
| 386 | ctx->cdata.keylen; | ||
| 781 | 387 | ||
| 782 | if (!ctx->enckeylen || !ctx->authsize) | 388 | if (!ctx->cdata.keylen || !ctx->authsize) |
| 783 | return 0; | 389 | return 0; |
| 784 | 390 | ||
| 785 | /* | 391 | /* |
| @@ -787,82 +393,16 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
| 787 | * Job Descriptor and Shared Descriptor | 393 | * Job Descriptor and Shared Descriptor |
| 788 | * must fit into the 64-word Descriptor h/w Buffer | 394 | * must fit into the 64-word Descriptor h/w Buffer |
| 789 | */ | 395 | */ |
| 790 | if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN + | 396 | if (rem_bytes >= DESC_GCM_ENC_LEN) { |
| 791 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 397 | ctx->cdata.key_inline = true; |
| 792 | keys_fit_inline = true; | 398 | ctx->cdata.key_virt = ctx->key; |
| 399 | } else { | ||
| 400 | ctx->cdata.key_inline = false; | ||
| 401 | ctx->cdata.key_dma = ctx->key_dma; | ||
| 402 | } | ||
| 793 | 403 | ||
| 794 | desc = ctx->sh_desc_enc; | 404 | desc = ctx->sh_desc_enc; |
| 795 | 405 | cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize); | |
| 796 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 797 | |||
| 798 | /* skip key loading if they are loaded due to sharing */ | ||
| 799 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 800 | JUMP_COND_SHRD | JUMP_COND_SELF); | ||
| 801 | if (keys_fit_inline) | ||
| 802 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 803 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 804 | else | ||
| 805 | append_key(desc, ctx->key_dma, ctx->enckeylen, | ||
| 806 | CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 807 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 808 | |||
| 809 | /* class 1 operation */ | ||
| 810 | append_operation(desc, ctx->class1_alg_type | | ||
| 811 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
| 812 | |||
| 813 | /* if assoclen + cryptlen is ZERO, skip to ICV write */ | ||
| 814 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 815 | zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | | ||
| 816 | JUMP_COND_MATH_Z); | ||
| 817 | |||
| 818 | /* if assoclen is ZERO, skip reading the assoc data */ | ||
| 819 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 820 | zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | | ||
| 821 | JUMP_COND_MATH_Z); | ||
| 822 | |||
| 823 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 824 | |||
| 825 | /* skip assoc data */ | ||
| 826 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 827 | |||
| 828 | /* cryptlen = seqinlen - assoclen */ | ||
| 829 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
| 830 | |||
| 831 | /* if cryptlen is ZERO jump to zero-payload commands */ | ||
| 832 | zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | | ||
| 833 | JUMP_COND_MATH_Z); | ||
| 834 | |||
| 835 | /* read assoc data */ | ||
| 836 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 837 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
| 838 | set_jump_tgt_here(desc, zero_assoc_jump_cmd1); | ||
| 839 | |||
| 840 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 841 | |||
| 842 | /* write encrypted data */ | ||
| 843 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
| 844 | |||
| 845 | /* read payload data */ | ||
| 846 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 847 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); | ||
| 848 | |||
| 849 | /* jump the zero-payload commands */ | ||
| 850 | append_jump(desc, JUMP_TEST_ALL | 2); | ||
| 851 | |||
| 852 | /* zero-payload commands */ | ||
| 853 | set_jump_tgt_here(desc, zero_payload_jump_cmd); | ||
| 854 | |||
| 855 | /* read assoc data */ | ||
| 856 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 857 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); | ||
| 858 | |||
| 859 | /* There is no input data */ | ||
| 860 | set_jump_tgt_here(desc, zero_assoc_jump_cmd2); | ||
| 861 | |||
| 862 | /* write ICV */ | ||
| 863 | append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | | ||
| 864 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 865 | |||
| 866 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | 406 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
| 867 | desc_bytes(desc), | 407 | desc_bytes(desc), |
| 868 | DMA_TO_DEVICE); | 408 | DMA_TO_DEVICE); |
| @@ -870,80 +410,21 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
| 870 | dev_err(jrdev, "unable to map shared descriptor\n"); | 410 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 871 | return -ENOMEM; | 411 | return -ENOMEM; |
| 872 | } | 412 | } |
| 873 | #ifdef DEBUG | ||
| 874 | print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ", | ||
| 875 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 876 | desc_bytes(desc), 1); | ||
| 877 | #endif | ||
| 878 | 413 | ||
| 879 | /* | 414 | /* |
| 880 | * Job Descriptor and Shared Descriptors | 415 | * Job Descriptor and Shared Descriptors |
| 881 | * must all fit into the 64-word Descriptor h/w Buffer | 416 | * must all fit into the 64-word Descriptor h/w Buffer |
| 882 | */ | 417 | */ |
| 883 | keys_fit_inline = false; | 418 | if (rem_bytes >= DESC_GCM_DEC_LEN) { |
| 884 | if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN + | 419 | ctx->cdata.key_inline = true; |
| 885 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 420 | ctx->cdata.key_virt = ctx->key; |
| 886 | keys_fit_inline = true; | 421 | } else { |
| 422 | ctx->cdata.key_inline = false; | ||
| 423 | ctx->cdata.key_dma = ctx->key_dma; | ||
| 424 | } | ||
| 887 | 425 | ||
| 888 | desc = ctx->sh_desc_dec; | 426 | desc = ctx->sh_desc_dec; |
| 889 | 427 | cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize); | |
| 890 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 891 | |||
| 892 | /* skip key loading if they are loaded due to sharing */ | ||
| 893 | key_jump_cmd = append_jump(desc, JUMP_JSL | | ||
| 894 | JUMP_TEST_ALL | JUMP_COND_SHRD | | ||
| 895 | JUMP_COND_SELF); | ||
| 896 | if (keys_fit_inline) | ||
| 897 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 898 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 899 | else | ||
| 900 | append_key(desc, ctx->key_dma, ctx->enckeylen, | ||
| 901 | CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 902 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 903 | |||
| 904 | /* class 1 operation */ | ||
| 905 | append_operation(desc, ctx->class1_alg_type | | ||
| 906 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
| 907 | |||
| 908 | /* if assoclen is ZERO, skip reading the assoc data */ | ||
| 909 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 910 | zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | | ||
| 911 | JUMP_COND_MATH_Z); | ||
| 912 | |||
| 913 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 914 | |||
| 915 | /* skip assoc data */ | ||
| 916 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 917 | |||
| 918 | /* read assoc data */ | ||
| 919 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 920 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
| 921 | |||
| 922 | set_jump_tgt_here(desc, zero_assoc_jump_cmd1); | ||
| 923 | |||
| 924 | /* cryptlen = seqoutlen - assoclen */ | ||
| 925 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 926 | |||
| 927 | /* jump to zero-payload command if cryptlen is zero */ | ||
| 928 | zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | | ||
| 929 | JUMP_COND_MATH_Z); | ||
| 930 | |||
| 931 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 932 | |||
| 933 | /* store encrypted data */ | ||
| 934 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
| 935 | |||
| 936 | /* read payload data */ | ||
| 937 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 938 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | ||
| 939 | |||
| 940 | /* zero-payload command */ | ||
| 941 | set_jump_tgt_here(desc, zero_payload_jump_cmd); | ||
| 942 | |||
| 943 | /* read ICV */ | ||
| 944 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | | ||
| 945 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | ||
| 946 | |||
| 947 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 428 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
| 948 | desc_bytes(desc), | 429 | desc_bytes(desc), |
| 949 | DMA_TO_DEVICE); | 430 | DMA_TO_DEVICE); |
| @@ -951,11 +432,6 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
| 951 | dev_err(jrdev, "unable to map shared descriptor\n"); | 432 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 952 | return -ENOMEM; | 433 | return -ENOMEM; |
| 953 | } | 434 | } |
| 954 | #ifdef DEBUG | ||
| 955 | print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ", | ||
| 956 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 957 | desc_bytes(desc), 1); | ||
| 958 | #endif | ||
| 959 | 435 | ||
| 960 | return 0; | 436 | return 0; |
| 961 | } | 437 | } |
| @@ -974,11 +450,11 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
| 974 | { | 450 | { |
| 975 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 451 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 976 | struct device *jrdev = ctx->jrdev; | 452 | struct device *jrdev = ctx->jrdev; |
| 977 | bool keys_fit_inline = false; | ||
| 978 | u32 *key_jump_cmd; | ||
| 979 | u32 *desc; | 453 | u32 *desc; |
| 454 | int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - | ||
| 455 | ctx->cdata.keylen; | ||
| 980 | 456 | ||
| 981 | if (!ctx->enckeylen || !ctx->authsize) | 457 | if (!ctx->cdata.keylen || !ctx->authsize) |
| 982 | return 0; | 458 | return 0; |
| 983 | 459 | ||
| 984 | /* | 460 | /* |
| @@ -986,62 +462,16 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
| 986 | * Job Descriptor and Shared Descriptor | 462 | * Job Descriptor and Shared Descriptor |
| 987 | * must fit into the 64-word Descriptor h/w Buffer | 463 | * must fit into the 64-word Descriptor h/w Buffer |
| 988 | */ | 464 | */ |
| 989 | if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN + | 465 | if (rem_bytes >= DESC_RFC4106_ENC_LEN) { |
| 990 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 466 | ctx->cdata.key_inline = true; |
| 991 | keys_fit_inline = true; | 467 | ctx->cdata.key_virt = ctx->key; |
| 468 | } else { | ||
| 469 | ctx->cdata.key_inline = false; | ||
| 470 | ctx->cdata.key_dma = ctx->key_dma; | ||
| 471 | } | ||
| 992 | 472 | ||
| 993 | desc = ctx->sh_desc_enc; | 473 | desc = ctx->sh_desc_enc; |
| 994 | 474 | cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize); | |
| 995 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 996 | |||
| 997 | /* Skip key loading if it is loaded due to sharing */ | ||
| 998 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 999 | JUMP_COND_SHRD); | ||
| 1000 | if (keys_fit_inline) | ||
| 1001 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 1002 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1003 | else | ||
| 1004 | append_key(desc, ctx->key_dma, ctx->enckeylen, | ||
| 1005 | CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1006 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1007 | |||
| 1008 | /* Class 1 operation */ | ||
| 1009 | append_operation(desc, ctx->class1_alg_type | | ||
| 1010 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
| 1011 | |||
| 1012 | append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); | ||
| 1013 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 1014 | |||
| 1015 | /* Read assoc data */ | ||
| 1016 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 1017 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
| 1018 | |||
| 1019 | /* Skip IV */ | ||
| 1020 | append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); | ||
| 1021 | |||
| 1022 | /* Will read cryptlen bytes */ | ||
| 1023 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 1024 | |||
| 1025 | /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ | ||
| 1026 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); | ||
| 1027 | |||
| 1028 | /* Skip assoc data */ | ||
| 1029 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 1030 | |||
| 1031 | /* cryptlen = seqoutlen - assoclen */ | ||
| 1032 | append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 1033 | |||
| 1034 | /* Write encrypted data */ | ||
| 1035 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
| 1036 | |||
| 1037 | /* Read payload data */ | ||
| 1038 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 1039 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); | ||
| 1040 | |||
| 1041 | /* Write ICV */ | ||
| 1042 | append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | | ||
| 1043 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 1044 | |||
| 1045 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | 475 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
| 1046 | desc_bytes(desc), | 476 | desc_bytes(desc), |
| 1047 | DMA_TO_DEVICE); | 477 | DMA_TO_DEVICE); |
| @@ -1049,73 +479,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
| 1049 | dev_err(jrdev, "unable to map shared descriptor\n"); | 479 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 1050 | return -ENOMEM; | 480 | return -ENOMEM; |
| 1051 | } | 481 | } |
| 1052 | #ifdef DEBUG | ||
| 1053 | print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ", | ||
| 1054 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 1055 | desc_bytes(desc), 1); | ||
| 1056 | #endif | ||
| 1057 | 482 | ||
| 1058 | /* | 483 | /* |
| 1059 | * Job Descriptor and Shared Descriptors | 484 | * Job Descriptor and Shared Descriptors |
| 1060 | * must all fit into the 64-word Descriptor h/w Buffer | 485 | * must all fit into the 64-word Descriptor h/w Buffer |
| 1061 | */ | 486 | */ |
| 1062 | keys_fit_inline = false; | 487 | if (rem_bytes >= DESC_RFC4106_DEC_LEN) { |
| 1063 | if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN + | 488 | ctx->cdata.key_inline = true; |
| 1064 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 489 | ctx->cdata.key_virt = ctx->key; |
| 1065 | keys_fit_inline = true; | 490 | } else { |
| 491 | ctx->cdata.key_inline = false; | ||
| 492 | ctx->cdata.key_dma = ctx->key_dma; | ||
| 493 | } | ||
| 1066 | 494 | ||
| 1067 | desc = ctx->sh_desc_dec; | 495 | desc = ctx->sh_desc_dec; |
| 1068 | 496 | cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize); | |
| 1069 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 1070 | |||
| 1071 | /* Skip key loading if it is loaded due to sharing */ | ||
| 1072 | key_jump_cmd = append_jump(desc, JUMP_JSL | | ||
| 1073 | JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
| 1074 | if (keys_fit_inline) | ||
| 1075 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 1076 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1077 | else | ||
| 1078 | append_key(desc, ctx->key_dma, ctx->enckeylen, | ||
| 1079 | CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1080 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1081 | |||
| 1082 | /* Class 1 operation */ | ||
| 1083 | append_operation(desc, ctx->class1_alg_type | | ||
| 1084 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
| 1085 | |||
| 1086 | append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); | ||
| 1087 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 1088 | |||
| 1089 | /* Read assoc data */ | ||
| 1090 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 1091 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
| 1092 | |||
| 1093 | /* Skip IV */ | ||
| 1094 | append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); | ||
| 1095 | |||
| 1096 | /* Will read cryptlen bytes */ | ||
| 1097 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); | ||
| 1098 | |||
| 1099 | /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ | ||
| 1100 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); | ||
| 1101 | |||
| 1102 | /* Skip assoc data */ | ||
| 1103 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 1104 | |||
| 1105 | /* Will write cryptlen bytes */ | ||
| 1106 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 1107 | |||
| 1108 | /* Store payload data */ | ||
| 1109 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
| 1110 | |||
| 1111 | /* Read encrypted data */ | ||
| 1112 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 1113 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | ||
| 1114 | |||
| 1115 | /* Read ICV */ | ||
| 1116 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | | ||
| 1117 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | ||
| 1118 | |||
| 1119 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 497 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
| 1120 | desc_bytes(desc), | 498 | desc_bytes(desc), |
| 1121 | DMA_TO_DEVICE); | 499 | DMA_TO_DEVICE); |
| @@ -1123,11 +501,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
| 1123 | dev_err(jrdev, "unable to map shared descriptor\n"); | 501 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 1124 | return -ENOMEM; | 502 | return -ENOMEM; |
| 1125 | } | 503 | } |
| 1126 | #ifdef DEBUG | ||
| 1127 | print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ", | ||
| 1128 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 1129 | desc_bytes(desc), 1); | ||
| 1130 | #endif | ||
| 1131 | 504 | ||
| 1132 | return 0; | 505 | return 0; |
| 1133 | } | 506 | } |
| @@ -1147,12 +520,11 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
| 1147 | { | 520 | { |
| 1148 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 521 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 1149 | struct device *jrdev = ctx->jrdev; | 522 | struct device *jrdev = ctx->jrdev; |
| 1150 | bool keys_fit_inline = false; | ||
| 1151 | u32 *key_jump_cmd; | ||
| 1152 | u32 *read_move_cmd, *write_move_cmd; | ||
| 1153 | u32 *desc; | 523 | u32 *desc; |
| 524 | int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - | ||
| 525 | ctx->cdata.keylen; | ||
| 1154 | 526 | ||
| 1155 | if (!ctx->enckeylen || !ctx->authsize) | 527 | if (!ctx->cdata.keylen || !ctx->authsize) |
| 1156 | return 0; | 528 | return 0; |
| 1157 | 529 | ||
| 1158 | /* | 530 | /* |
| @@ -1160,61 +532,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
| 1160 | * Job Descriptor and Shared Descriptor | 532 | * Job Descriptor and Shared Descriptor |
| 1161 | * must fit into the 64-word Descriptor h/w Buffer | 533 | * must fit into the 64-word Descriptor h/w Buffer |
| 1162 | */ | 534 | */ |
| 1163 | if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN + | 535 | if (rem_bytes >= DESC_RFC4543_ENC_LEN) { |
| 1164 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 536 | ctx->cdata.key_inline = true; |
| 1165 | keys_fit_inline = true; | 537 | ctx->cdata.key_virt = ctx->key; |
| 538 | } else { | ||
| 539 | ctx->cdata.key_inline = false; | ||
| 540 | ctx->cdata.key_dma = ctx->key_dma; | ||
| 541 | } | ||
| 1166 | 542 | ||
| 1167 | desc = ctx->sh_desc_enc; | 543 | desc = ctx->sh_desc_enc; |
| 1168 | 544 | cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize); | |
| 1169 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 1170 | |||
| 1171 | /* Skip key loading if it is loaded due to sharing */ | ||
| 1172 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1173 | JUMP_COND_SHRD); | ||
| 1174 | if (keys_fit_inline) | ||
| 1175 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 1176 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1177 | else | ||
| 1178 | append_key(desc, ctx->key_dma, ctx->enckeylen, | ||
| 1179 | CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1180 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1181 | |||
| 1182 | /* Class 1 operation */ | ||
| 1183 | append_operation(desc, ctx->class1_alg_type | | ||
| 1184 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
| 1185 | |||
| 1186 | /* assoclen + cryptlen = seqinlen */ | ||
| 1187 | append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 1188 | |||
| 1189 | /* | ||
| 1190 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
| 1191 | * thus need to do some magic, i.e. self-patch the descriptor | ||
| 1192 | * buffer. | ||
| 1193 | */ | ||
| 1194 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | | ||
| 1195 | (0x6 << MOVE_LEN_SHIFT)); | ||
| 1196 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | | ||
| 1197 | (0x8 << MOVE_LEN_SHIFT)); | ||
| 1198 | |||
| 1199 | /* Will read assoclen + cryptlen bytes */ | ||
| 1200 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 1201 | |||
| 1202 | /* Will write assoclen + cryptlen bytes */ | ||
| 1203 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 1204 | |||
| 1205 | /* Read and write assoclen + cryptlen bytes */ | ||
| 1206 | aead_append_src_dst(desc, FIFOLD_TYPE_AAD); | ||
| 1207 | |||
| 1208 | set_move_tgt_here(desc, read_move_cmd); | ||
| 1209 | set_move_tgt_here(desc, write_move_cmd); | ||
| 1210 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 1211 | /* Move payload data to OFIFO */ | ||
| 1212 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); | ||
| 1213 | |||
| 1214 | /* Write ICV */ | ||
| 1215 | append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | | ||
| 1216 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 1217 | |||
| 1218 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | 545 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
| 1219 | desc_bytes(desc), | 546 | desc_bytes(desc), |
| 1220 | DMA_TO_DEVICE); | 547 | DMA_TO_DEVICE); |
| @@ -1222,77 +549,21 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
| 1222 | dev_err(jrdev, "unable to map shared descriptor\n"); | 549 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 1223 | return -ENOMEM; | 550 | return -ENOMEM; |
| 1224 | } | 551 | } |
| 1225 | #ifdef DEBUG | ||
| 1226 | print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ", | ||
| 1227 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 1228 | desc_bytes(desc), 1); | ||
| 1229 | #endif | ||
| 1230 | 552 | ||
| 1231 | /* | 553 | /* |
| 1232 | * Job Descriptor and Shared Descriptors | 554 | * Job Descriptor and Shared Descriptors |
| 1233 | * must all fit into the 64-word Descriptor h/w Buffer | 555 | * must all fit into the 64-word Descriptor h/w Buffer |
| 1234 | */ | 556 | */ |
| 1235 | keys_fit_inline = false; | 557 | if (rem_bytes >= DESC_RFC4543_DEC_LEN) { |
| 1236 | if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN + | 558 | ctx->cdata.key_inline = true; |
| 1237 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 559 | ctx->cdata.key_virt = ctx->key; |
| 1238 | keys_fit_inline = true; | 560 | } else { |
| 561 | ctx->cdata.key_inline = false; | ||
| 562 | ctx->cdata.key_dma = ctx->key_dma; | ||
| 563 | } | ||
| 1239 | 564 | ||
| 1240 | desc = ctx->sh_desc_dec; | 565 | desc = ctx->sh_desc_dec; |
| 1241 | 566 | cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize); | |
| 1242 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 1243 | |||
| 1244 | /* Skip key loading if it is loaded due to sharing */ | ||
| 1245 | key_jump_cmd = append_jump(desc, JUMP_JSL | | ||
| 1246 | JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
| 1247 | if (keys_fit_inline) | ||
| 1248 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 1249 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1250 | else | ||
| 1251 | append_key(desc, ctx->key_dma, ctx->enckeylen, | ||
| 1252 | CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1253 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1254 | |||
| 1255 | /* Class 1 operation */ | ||
| 1256 | append_operation(desc, ctx->class1_alg_type | | ||
| 1257 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
| 1258 | |||
| 1259 | /* assoclen + cryptlen = seqoutlen */ | ||
| 1260 | append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 1261 | |||
| 1262 | /* | ||
| 1263 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
| 1264 | * thus need to do some magic, i.e. self-patch the descriptor | ||
| 1265 | * buffer. | ||
| 1266 | */ | ||
| 1267 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | | ||
| 1268 | (0x6 << MOVE_LEN_SHIFT)); | ||
| 1269 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | | ||
| 1270 | (0x8 << MOVE_LEN_SHIFT)); | ||
| 1271 | |||
| 1272 | /* Will read assoclen + cryptlen bytes */ | ||
| 1273 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 1274 | |||
| 1275 | /* Will write assoclen + cryptlen bytes */ | ||
| 1276 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 1277 | |||
| 1278 | /* Store payload data */ | ||
| 1279 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
| 1280 | |||
| 1281 | /* In-snoop assoclen + cryptlen data */ | ||
| 1282 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | | ||
| 1283 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); | ||
| 1284 | |||
| 1285 | set_move_tgt_here(desc, read_move_cmd); | ||
| 1286 | set_move_tgt_here(desc, write_move_cmd); | ||
| 1287 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 1288 | /* Move payload data to OFIFO */ | ||
| 1289 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); | ||
| 1290 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
| 1291 | |||
| 1292 | /* Read ICV */ | ||
| 1293 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | | ||
| 1294 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | ||
| 1295 | |||
| 1296 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 567 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
| 1297 | desc_bytes(desc), | 568 | desc_bytes(desc), |
| 1298 | DMA_TO_DEVICE); | 569 | DMA_TO_DEVICE); |
| @@ -1300,11 +571,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
| 1300 | dev_err(jrdev, "unable to map shared descriptor\n"); | 571 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 1301 | return -ENOMEM; | 572 | return -ENOMEM; |
| 1302 | } | 573 | } |
| 1303 | #ifdef DEBUG | ||
| 1304 | print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ", | ||
| 1305 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 1306 | desc_bytes(desc), 1); | ||
| 1307 | #endif | ||
| 1308 | 574 | ||
| 1309 | return 0; | 575 | return 0; |
| 1310 | } | 576 | } |
| @@ -1320,19 +586,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, | |||
| 1320 | return 0; | 586 | return 0; |
| 1321 | } | 587 | } |
| 1322 | 588 | ||
| 1323 | static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, | ||
| 1324 | u32 authkeylen) | ||
| 1325 | { | ||
| 1326 | return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, | ||
| 1327 | ctx->split_key_pad_len, key_in, authkeylen, | ||
| 1328 | ctx->alg_op); | ||
| 1329 | } | ||
| 1330 | |||
| 1331 | static int aead_setkey(struct crypto_aead *aead, | 589 | static int aead_setkey(struct crypto_aead *aead, |
| 1332 | const u8 *key, unsigned int keylen) | 590 | const u8 *key, unsigned int keylen) |
| 1333 | { | 591 | { |
| 1334 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | ||
| 1335 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | ||
| 1336 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 592 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 1337 | struct device *jrdev = ctx->jrdev; | 593 | struct device *jrdev = ctx->jrdev; |
| 1338 | struct crypto_authenc_keys keys; | 594 | struct crypto_authenc_keys keys; |
| @@ -1341,33 +597,25 @@ static int aead_setkey(struct crypto_aead *aead, | |||
| 1341 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) | 597 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
| 1342 | goto badkey; | 598 | goto badkey; |
| 1343 | 599 | ||
| 1344 | /* Pick class 2 key length from algorithm submask */ | ||
| 1345 | ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | ||
| 1346 | OP_ALG_ALGSEL_SHIFT] * 2; | ||
| 1347 | ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); | ||
| 1348 | |||
| 1349 | if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE) | ||
| 1350 | goto badkey; | ||
| 1351 | |||
| 1352 | #ifdef DEBUG | 600 | #ifdef DEBUG |
| 1353 | printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", | 601 | printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", |
| 1354 | keys.authkeylen + keys.enckeylen, keys.enckeylen, | 602 | keys.authkeylen + keys.enckeylen, keys.enckeylen, |
| 1355 | keys.authkeylen); | 603 | keys.authkeylen); |
| 1356 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | ||
| 1357 | ctx->split_key_len, ctx->split_key_pad_len); | ||
| 1358 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 604 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", |
| 1359 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 605 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| 1360 | #endif | 606 | #endif |
| 1361 | 607 | ||
| 1362 | ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen); | 608 | ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, |
| 609 | keys.authkeylen, CAAM_MAX_KEY_SIZE - | ||
| 610 | keys.enckeylen); | ||
| 1363 | if (ret) { | 611 | if (ret) { |
| 1364 | goto badkey; | 612 | goto badkey; |
| 1365 | } | 613 | } |
| 1366 | 614 | ||
| 1367 | /* postpend encryption key to auth split key */ | 615 | /* postpend encryption key to auth split key */ |
| 1368 | memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen); | 616 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
| 1369 | 617 | ||
| 1370 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + | 618 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad + |
| 1371 | keys.enckeylen, DMA_TO_DEVICE); | 619 | keys.enckeylen, DMA_TO_DEVICE); |
| 1372 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | 620 | if (dma_mapping_error(jrdev, ctx->key_dma)) { |
| 1373 | dev_err(jrdev, "unable to map key i/o memory\n"); | 621 | dev_err(jrdev, "unable to map key i/o memory\n"); |
| @@ -1376,14 +624,14 @@ static int aead_setkey(struct crypto_aead *aead, | |||
| 1376 | #ifdef DEBUG | 624 | #ifdef DEBUG |
| 1377 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", | 625 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
| 1378 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 626 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
| 1379 | ctx->split_key_pad_len + keys.enckeylen, 1); | 627 | ctx->adata.keylen_pad + keys.enckeylen, 1); |
| 1380 | #endif | 628 | #endif |
| 1381 | 629 | ||
| 1382 | ctx->enckeylen = keys.enckeylen; | 630 | ctx->cdata.keylen = keys.enckeylen; |
| 1383 | 631 | ||
| 1384 | ret = aead_set_sh_desc(aead); | 632 | ret = aead_set_sh_desc(aead); |
| 1385 | if (ret) { | 633 | if (ret) { |
| 1386 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + | 634 | dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad + |
| 1387 | keys.enckeylen, DMA_TO_DEVICE); | 635 | keys.enckeylen, DMA_TO_DEVICE); |
| 1388 | } | 636 | } |
| 1389 | 637 | ||
| @@ -1412,11 +660,11 @@ static int gcm_setkey(struct crypto_aead *aead, | |||
| 1412 | dev_err(jrdev, "unable to map key i/o memory\n"); | 660 | dev_err(jrdev, "unable to map key i/o memory\n"); |
| 1413 | return -ENOMEM; | 661 | return -ENOMEM; |
| 1414 | } | 662 | } |
| 1415 | ctx->enckeylen = keylen; | 663 | ctx->cdata.keylen = keylen; |
| 1416 | 664 | ||
| 1417 | ret = gcm_set_sh_desc(aead); | 665 | ret = gcm_set_sh_desc(aead); |
| 1418 | if (ret) { | 666 | if (ret) { |
| 1419 | dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, | 667 | dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen, |
| 1420 | DMA_TO_DEVICE); | 668 | DMA_TO_DEVICE); |
| 1421 | } | 669 | } |
| 1422 | 670 | ||
| @@ -1444,9 +692,9 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
| 1444 | * The last four bytes of the key material are used as the salt value | 692 | * The last four bytes of the key material are used as the salt value |
| 1445 | * in the nonce. Update the AES key length. | 693 | * in the nonce. Update the AES key length. |
| 1446 | */ | 694 | */ |
| 1447 | ctx->enckeylen = keylen - 4; | 695 | ctx->cdata.keylen = keylen - 4; |
| 1448 | 696 | ||
| 1449 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen, | 697 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen, |
| 1450 | DMA_TO_DEVICE); | 698 | DMA_TO_DEVICE); |
| 1451 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | 699 | if (dma_mapping_error(jrdev, ctx->key_dma)) { |
| 1452 | dev_err(jrdev, "unable to map key i/o memory\n"); | 700 | dev_err(jrdev, "unable to map key i/o memory\n"); |
| @@ -1455,7 +703,7 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
| 1455 | 703 | ||
| 1456 | ret = rfc4106_set_sh_desc(aead); | 704 | ret = rfc4106_set_sh_desc(aead); |
| 1457 | if (ret) { | 705 | if (ret) { |
| 1458 | dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, | 706 | dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen, |
| 1459 | DMA_TO_DEVICE); | 707 | DMA_TO_DEVICE); |
| 1460 | } | 708 | } |
| 1461 | 709 | ||
| @@ -1483,9 +731,9 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
| 1483 | * The last four bytes of the key material are used as the salt value | 731 | * The last four bytes of the key material are used as the salt value |
| 1484 | * in the nonce. Update the AES key length. | 732 | * in the nonce. Update the AES key length. |
| 1485 | */ | 733 | */ |
| 1486 | ctx->enckeylen = keylen - 4; | 734 | ctx->cdata.keylen = keylen - 4; |
| 1487 | 735 | ||
| 1488 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen, | 736 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen, |
| 1489 | DMA_TO_DEVICE); | 737 | DMA_TO_DEVICE); |
| 1490 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | 738 | if (dma_mapping_error(jrdev, ctx->key_dma)) { |
| 1491 | dev_err(jrdev, "unable to map key i/o memory\n"); | 739 | dev_err(jrdev, "unable to map key i/o memory\n"); |
| @@ -1494,7 +742,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
| 1494 | 742 | ||
| 1495 | ret = rfc4543_set_sh_desc(aead); | 743 | ret = rfc4543_set_sh_desc(aead); |
| 1496 | if (ret) { | 744 | if (ret) { |
| 1497 | dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, | 745 | dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen, |
| 1498 | DMA_TO_DEVICE); | 746 | DMA_TO_DEVICE); |
| 1499 | } | 747 | } |
| 1500 | 748 | ||
| @@ -1505,21 +753,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1505 | const u8 *key, unsigned int keylen) | 753 | const u8 *key, unsigned int keylen) |
| 1506 | { | 754 | { |
| 1507 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | 755 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); |
| 1508 | struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher; | ||
| 1509 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); | 756 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); |
| 1510 | const char *alg_name = crypto_tfm_alg_name(tfm); | 757 | const char *alg_name = crypto_tfm_alg_name(tfm); |
| 1511 | struct device *jrdev = ctx->jrdev; | 758 | struct device *jrdev = ctx->jrdev; |
| 1512 | int ret = 0; | 759 | unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); |
| 1513 | u32 *key_jump_cmd; | ||
| 1514 | u32 *desc; | 760 | u32 *desc; |
| 1515 | u8 *nonce; | ||
| 1516 | u32 geniv; | ||
| 1517 | u32 ctx1_iv_off = 0; | 761 | u32 ctx1_iv_off = 0; |
| 1518 | const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == | 762 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == |
| 1519 | OP_ALG_AAI_CTR_MOD128); | 763 | OP_ALG_AAI_CTR_MOD128); |
| 1520 | const bool is_rfc3686 = (ctr_mode && | 764 | const bool is_rfc3686 = (ctr_mode && |
| 1521 | (strstr(alg_name, "rfc3686") != NULL)); | 765 | (strstr(alg_name, "rfc3686") != NULL)); |
| 1522 | 766 | ||
| 767 | memcpy(ctx->key, key, keylen); | ||
| 1523 | #ifdef DEBUG | 768 | #ifdef DEBUG |
| 1524 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 769 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", |
| 1525 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 770 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
| @@ -1542,60 +787,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1542 | keylen -= CTR_RFC3686_NONCE_SIZE; | 787 | keylen -= CTR_RFC3686_NONCE_SIZE; |
| 1543 | } | 788 | } |
| 1544 | 789 | ||
| 1545 | memcpy(ctx->key, key, keylen); | ||
| 1546 | ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, | 790 | ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, |
| 1547 | DMA_TO_DEVICE); | 791 | DMA_TO_DEVICE); |
| 1548 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | 792 | if (dma_mapping_error(jrdev, ctx->key_dma)) { |
| 1549 | dev_err(jrdev, "unable to map key i/o memory\n"); | 793 | dev_err(jrdev, "unable to map key i/o memory\n"); |
| 1550 | return -ENOMEM; | 794 | return -ENOMEM; |
| 1551 | } | 795 | } |
| 1552 | ctx->enckeylen = keylen; | 796 | ctx->cdata.keylen = keylen; |
| 797 | ctx->cdata.key_virt = ctx->key; | ||
| 798 | ctx->cdata.key_inline = true; | ||
| 1553 | 799 | ||
| 1554 | /* ablkcipher_encrypt shared descriptor */ | 800 | /* ablkcipher_encrypt shared descriptor */ |
| 1555 | desc = ctx->sh_desc_enc; | 801 | desc = ctx->sh_desc_enc; |
| 1556 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | 802 | cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, |
| 1557 | /* Skip if already shared */ | 803 | ctx1_iv_off); |
| 1558 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1559 | JUMP_COND_SHRD); | ||
| 1560 | |||
| 1561 | /* Load class1 key only */ | ||
| 1562 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 1563 | ctx->enckeylen, CLASS_1 | | ||
| 1564 | KEY_DEST_CLASS_REG); | ||
| 1565 | |||
| 1566 | /* Load nonce into CONTEXT1 reg */ | ||
| 1567 | if (is_rfc3686) { | ||
| 1568 | nonce = (u8 *)key + keylen; | ||
| 1569 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | ||
| 1570 | LDST_CLASS_IND_CCB | | ||
| 1571 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 1572 | append_move(desc, MOVE_WAITCOMP | | ||
| 1573 | MOVE_SRC_OUTFIFO | | ||
| 1574 | MOVE_DEST_CLASS1CTX | | ||
| 1575 | (16 << MOVE_OFFSET_SHIFT) | | ||
| 1576 | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); | ||
| 1577 | } | ||
| 1578 | |||
| 1579 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1580 | |||
| 1581 | /* Load iv */ | ||
| 1582 | append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT | | ||
| 1583 | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); | ||
| 1584 | |||
| 1585 | /* Load counter into CONTEXT1 reg */ | ||
| 1586 | if (is_rfc3686) | ||
| 1587 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 1588 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1589 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 1590 | LDST_OFFSET_SHIFT)); | ||
| 1591 | |||
| 1592 | /* Load operation */ | ||
| 1593 | append_operation(desc, ctx->class1_alg_type | | ||
| 1594 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
| 1595 | |||
| 1596 | /* Perform operation */ | ||
| 1597 | ablkcipher_append_src_dst(desc); | ||
| 1598 | |||
| 1599 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | 804 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
| 1600 | desc_bytes(desc), | 805 | desc_bytes(desc), |
| 1601 | DMA_TO_DEVICE); | 806 | DMA_TO_DEVICE); |
| @@ -1603,61 +808,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1603 | dev_err(jrdev, "unable to map shared descriptor\n"); | 808 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 1604 | return -ENOMEM; | 809 | return -ENOMEM; |
| 1605 | } | 810 | } |
| 1606 | #ifdef DEBUG | 811 | |
| 1607 | print_hex_dump(KERN_ERR, | ||
| 1608 | "ablkcipher enc shdesc@"__stringify(__LINE__)": ", | ||
| 1609 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 1610 | desc_bytes(desc), 1); | ||
| 1611 | #endif | ||
| 1612 | /* ablkcipher_decrypt shared descriptor */ | 812 | /* ablkcipher_decrypt shared descriptor */ |
| 1613 | desc = ctx->sh_desc_dec; | 813 | desc = ctx->sh_desc_dec; |
| 1614 | 814 | cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, | |
| 1615 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | 815 | ctx1_iv_off); |
| 1616 | /* Skip if already shared */ | ||
| 1617 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1618 | JUMP_COND_SHRD); | ||
| 1619 | |||
| 1620 | /* Load class1 key only */ | ||
| 1621 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 1622 | ctx->enckeylen, CLASS_1 | | ||
| 1623 | KEY_DEST_CLASS_REG); | ||
| 1624 | |||
| 1625 | /* Load nonce into CONTEXT1 reg */ | ||
| 1626 | if (is_rfc3686) { | ||
| 1627 | nonce = (u8 *)key + keylen; | ||
| 1628 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | ||
| 1629 | LDST_CLASS_IND_CCB | | ||
| 1630 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 1631 | append_move(desc, MOVE_WAITCOMP | | ||
| 1632 | MOVE_SRC_OUTFIFO | | ||
| 1633 | MOVE_DEST_CLASS1CTX | | ||
| 1634 | (16 << MOVE_OFFSET_SHIFT) | | ||
| 1635 | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); | ||
| 1636 | } | ||
| 1637 | |||
| 1638 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1639 | |||
| 1640 | /* load IV */ | ||
| 1641 | append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT | | ||
| 1642 | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); | ||
| 1643 | |||
| 1644 | /* Load counter into CONTEXT1 reg */ | ||
| 1645 | if (is_rfc3686) | ||
| 1646 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 1647 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1648 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 1649 | LDST_OFFSET_SHIFT)); | ||
| 1650 | |||
| 1651 | /* Choose operation */ | ||
| 1652 | if (ctr_mode) | ||
| 1653 | append_operation(desc, ctx->class1_alg_type | | ||
| 1654 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); | ||
| 1655 | else | ||
| 1656 | append_dec_op1(desc, ctx->class1_alg_type); | ||
| 1657 | |||
| 1658 | /* Perform operation */ | ||
| 1659 | ablkcipher_append_src_dst(desc); | ||
| 1660 | |||
| 1661 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | 816 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, |
| 1662 | desc_bytes(desc), | 817 | desc_bytes(desc), |
| 1663 | DMA_TO_DEVICE); | 818 | DMA_TO_DEVICE); |
| @@ -1666,76 +821,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1666 | return -ENOMEM; | 821 | return -ENOMEM; |
| 1667 | } | 822 | } |
| 1668 | 823 | ||
| 1669 | #ifdef DEBUG | ||
| 1670 | print_hex_dump(KERN_ERR, | ||
| 1671 | "ablkcipher dec shdesc@"__stringify(__LINE__)": ", | ||
| 1672 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 1673 | desc_bytes(desc), 1); | ||
| 1674 | #endif | ||
| 1675 | /* ablkcipher_givencrypt shared descriptor */ | 824 | /* ablkcipher_givencrypt shared descriptor */ |
| 1676 | desc = ctx->sh_desc_givenc; | 825 | desc = ctx->sh_desc_givenc; |
| 1677 | 826 | cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, | |
| 1678 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | 827 | ctx1_iv_off); |
| 1679 | /* Skip if already shared */ | ||
| 1680 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1681 | JUMP_COND_SHRD); | ||
| 1682 | |||
| 1683 | /* Load class1 key only */ | ||
| 1684 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 1685 | ctx->enckeylen, CLASS_1 | | ||
| 1686 | KEY_DEST_CLASS_REG); | ||
| 1687 | |||
| 1688 | /* Load Nonce into CONTEXT1 reg */ | ||
| 1689 | if (is_rfc3686) { | ||
| 1690 | nonce = (u8 *)key + keylen; | ||
| 1691 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | ||
| 1692 | LDST_CLASS_IND_CCB | | ||
| 1693 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 1694 | append_move(desc, MOVE_WAITCOMP | | ||
| 1695 | MOVE_SRC_OUTFIFO | | ||
| 1696 | MOVE_DEST_CLASS1CTX | | ||
| 1697 | (16 << MOVE_OFFSET_SHIFT) | | ||
| 1698 | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); | ||
| 1699 | } | ||
| 1700 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1701 | |||
| 1702 | /* Generate IV */ | ||
| 1703 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | ||
| 1704 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | | ||
| 1705 | NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
| 1706 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | | ||
| 1707 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
| 1708 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 1709 | append_move(desc, MOVE_WAITCOMP | | ||
| 1710 | MOVE_SRC_INFIFO | | ||
| 1711 | MOVE_DEST_CLASS1CTX | | ||
| 1712 | (crt->ivsize << MOVE_LEN_SHIFT) | | ||
| 1713 | (ctx1_iv_off << MOVE_OFFSET_SHIFT)); | ||
| 1714 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
| 1715 | |||
| 1716 | /* Copy generated IV to memory */ | ||
| 1717 | append_seq_store(desc, crt->ivsize, | ||
| 1718 | LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | | ||
| 1719 | (ctx1_iv_off << LDST_OFFSET_SHIFT)); | ||
| 1720 | |||
| 1721 | /* Load Counter into CONTEXT1 reg */ | ||
| 1722 | if (is_rfc3686) | ||
| 1723 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 1724 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1725 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 1726 | LDST_OFFSET_SHIFT)); | ||
| 1727 | |||
| 1728 | if (ctx1_iv_off) | ||
| 1729 | append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP | | ||
| 1730 | (1 << JUMP_OFFSET_SHIFT)); | ||
| 1731 | |||
| 1732 | /* Load operation */ | ||
| 1733 | append_operation(desc, ctx->class1_alg_type | | ||
| 1734 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
| 1735 | |||
| 1736 | /* Perform operation */ | ||
| 1737 | ablkcipher_append_src_dst(desc); | ||
| 1738 | |||
| 1739 | ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, | 828 | ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, |
| 1740 | desc_bytes(desc), | 829 | desc_bytes(desc), |
| 1741 | DMA_TO_DEVICE); | 830 | DMA_TO_DEVICE); |
| @@ -1743,14 +832,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1743 | dev_err(jrdev, "unable to map shared descriptor\n"); | 832 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 1744 | return -ENOMEM; | 833 | return -ENOMEM; |
| 1745 | } | 834 | } |
| 1746 | #ifdef DEBUG | ||
| 1747 | print_hex_dump(KERN_ERR, | ||
| 1748 | "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ", | ||
| 1749 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 1750 | desc_bytes(desc), 1); | ||
| 1751 | #endif | ||
| 1752 | 835 | ||
| 1753 | return ret; | 836 | return 0; |
| 1754 | } | 837 | } |
| 1755 | 838 | ||
| 1756 | static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | 839 | static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, |
| @@ -1758,8 +841,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1758 | { | 841 | { |
| 1759 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | 842 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); |
| 1760 | struct device *jrdev = ctx->jrdev; | 843 | struct device *jrdev = ctx->jrdev; |
| 1761 | u32 *key_jump_cmd, *desc; | 844 | u32 *desc; |
| 1762 | __be64 sector_size = cpu_to_be64(512); | ||
| 1763 | 845 | ||
| 1764 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { | 846 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { |
| 1765 | crypto_ablkcipher_set_flags(ablkcipher, | 847 | crypto_ablkcipher_set_flags(ablkcipher, |
| @@ -1774,88 +856,23 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1774 | dev_err(jrdev, "unable to map key i/o memory\n"); | 856 | dev_err(jrdev, "unable to map key i/o memory\n"); |
| 1775 | return -ENOMEM; | 857 | return -ENOMEM; |
| 1776 | } | 858 | } |
| 1777 | ctx->enckeylen = keylen; | 859 | ctx->cdata.keylen = keylen; |
| 860 | ctx->cdata.key_virt = ctx->key; | ||
| 861 | ctx->cdata.key_inline = true; | ||
| 1778 | 862 | ||
| 1779 | /* xts_ablkcipher_encrypt shared descriptor */ | 863 | /* xts_ablkcipher_encrypt shared descriptor */ |
| 1780 | desc = ctx->sh_desc_enc; | 864 | desc = ctx->sh_desc_enc; |
| 1781 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | 865 | cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); |
| 1782 | /* Skip if already shared */ | ||
| 1783 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1784 | JUMP_COND_SHRD); | ||
| 1785 | |||
| 1786 | /* Load class1 keys only */ | ||
| 1787 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 1788 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1789 | |||
| 1790 | /* Load sector size with index 40 bytes (0x28) */ | ||
| 1791 | append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1792 | LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8); | ||
| 1793 | append_data(desc, (void *)§or_size, 8); | ||
| 1794 | |||
| 1795 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1796 | |||
| 1797 | /* | ||
| 1798 | * create sequence for loading the sector index | ||
| 1799 | * Upper 8B of IV - will be used as sector index | ||
| 1800 | * Lower 8B of IV - will be discarded | ||
| 1801 | */ | ||
| 1802 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1803 | LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8); | ||
| 1804 | append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); | ||
| 1805 | |||
| 1806 | /* Load operation */ | ||
| 1807 | append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | | ||
| 1808 | OP_ALG_ENCRYPT); | ||
| 1809 | |||
| 1810 | /* Perform operation */ | ||
| 1811 | ablkcipher_append_src_dst(desc); | ||
| 1812 | |||
| 1813 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | 866 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), |
| 1814 | DMA_TO_DEVICE); | 867 | DMA_TO_DEVICE); |
| 1815 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | 868 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { |
| 1816 | dev_err(jrdev, "unable to map shared descriptor\n"); | 869 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 1817 | return -ENOMEM; | 870 | return -ENOMEM; |
| 1818 | } | 871 | } |
| 1819 | #ifdef DEBUG | ||
| 1820 | print_hex_dump(KERN_ERR, | ||
| 1821 | "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ", | ||
| 1822 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1823 | #endif | ||
| 1824 | 872 | ||
| 1825 | /* xts_ablkcipher_decrypt shared descriptor */ | 873 | /* xts_ablkcipher_decrypt shared descriptor */ |
| 1826 | desc = ctx->sh_desc_dec; | 874 | desc = ctx->sh_desc_dec; |
| 1827 | 875 | cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); | |
| 1828 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | ||
| 1829 | /* Skip if already shared */ | ||
| 1830 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1831 | JUMP_COND_SHRD); | ||
| 1832 | |||
| 1833 | /* Load class1 key only */ | ||
| 1834 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
| 1835 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1836 | |||
| 1837 | /* Load sector size with index 40 bytes (0x28) */ | ||
| 1838 | append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1839 | LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8); | ||
| 1840 | append_data(desc, (void *)§or_size, 8); | ||
| 1841 | |||
| 1842 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1843 | |||
| 1844 | /* | ||
| 1845 | * create sequence for loading the sector index | ||
| 1846 | * Upper 8B of IV - will be used as sector index | ||
| 1847 | * Lower 8B of IV - will be discarded | ||
| 1848 | */ | ||
| 1849 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1850 | LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8); | ||
| 1851 | append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); | ||
| 1852 | |||
| 1853 | /* Load operation */ | ||
| 1854 | append_dec_op1(desc, ctx->class1_alg_type); | ||
| 1855 | |||
| 1856 | /* Perform operation */ | ||
| 1857 | ablkcipher_append_src_dst(desc); | ||
| 1858 | |||
| 1859 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | 876 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), |
| 1860 | DMA_TO_DEVICE); | 877 | DMA_TO_DEVICE); |
| 1861 | if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { | 878 | if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { |
| @@ -1864,31 +881,22 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 1864 | dev_err(jrdev, "unable to map shared descriptor\n"); | 881 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 1865 | return -ENOMEM; | 882 | return -ENOMEM; |
| 1866 | } | 883 | } |
| 1867 | #ifdef DEBUG | ||
| 1868 | print_hex_dump(KERN_ERR, | ||
| 1869 | "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ", | ||
| 1870 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1871 | #endif | ||
| 1872 | 884 | ||
| 1873 | return 0; | 885 | return 0; |
| 1874 | } | 886 | } |
| 1875 | 887 | ||
| 1876 | /* | 888 | /* |
| 1877 | * aead_edesc - s/w-extended aead descriptor | 889 | * aead_edesc - s/w-extended aead descriptor |
| 1878 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | ||
| 1879 | * @src_nents: number of segments in input scatterlist | 890 | * @src_nents: number of segments in input scatterlist |
| 1880 | * @dst_nents: number of segments in output scatterlist | 891 | * @dst_nents: number of segments in output scatterlist |
| 1881 | * @iv_dma: dma address of iv for checking continuity and link table | ||
| 1882 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | ||
| 1883 | * @sec4_sg_bytes: length of dma mapped sec4_sg space | 892 | * @sec4_sg_bytes: length of dma mapped sec4_sg space |
| 1884 | * @sec4_sg_dma: bus physical mapped address of h/w link table | 893 | * @sec4_sg_dma: bus physical mapped address of h/w link table |
| 894 | * @sec4_sg: pointer to h/w link table | ||
| 1885 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | 895 | * @hw_desc: the h/w job descriptor followed by any referenced link tables |
| 1886 | */ | 896 | */ |
| 1887 | struct aead_edesc { | 897 | struct aead_edesc { |
| 1888 | int assoc_nents; | ||
| 1889 | int src_nents; | 898 | int src_nents; |
| 1890 | int dst_nents; | 899 | int dst_nents; |
| 1891 | dma_addr_t iv_dma; | ||
| 1892 | int sec4_sg_bytes; | 900 | int sec4_sg_bytes; |
| 1893 | dma_addr_t sec4_sg_dma; | 901 | dma_addr_t sec4_sg_dma; |
| 1894 | struct sec4_sg_entry *sec4_sg; | 902 | struct sec4_sg_entry *sec4_sg; |
| @@ -1900,9 +908,9 @@ struct aead_edesc { | |||
| 1900 | * @src_nents: number of segments in input scatterlist | 908 | * @src_nents: number of segments in input scatterlist |
| 1901 | * @dst_nents: number of segments in output scatterlist | 909 | * @dst_nents: number of segments in output scatterlist |
| 1902 | * @iv_dma: dma address of iv for checking continuity and link table | 910 | * @iv_dma: dma address of iv for checking continuity and link table |
| 1903 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | ||
| 1904 | * @sec4_sg_bytes: length of dma mapped sec4_sg space | 911 | * @sec4_sg_bytes: length of dma mapped sec4_sg space |
| 1905 | * @sec4_sg_dma: bus physical mapped address of h/w link table | 912 | * @sec4_sg_dma: bus physical mapped address of h/w link table |
| 913 | * @sec4_sg: pointer to h/w link table | ||
| 1906 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | 914 | * @hw_desc: the h/w job descriptor followed by any referenced link tables |
| 1907 | */ | 915 | */ |
| 1908 | struct ablkcipher_edesc { | 916 | struct ablkcipher_edesc { |
| @@ -2019,8 +1027,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 2019 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 1027 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 2020 | #endif | 1028 | #endif |
| 2021 | 1029 | ||
| 2022 | edesc = (struct ablkcipher_edesc *)((char *)desc - | 1030 | edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); |
| 2023 | offsetof(struct ablkcipher_edesc, hw_desc)); | ||
| 2024 | 1031 | ||
| 2025 | if (err) | 1032 | if (err) |
| 2026 | caam_jr_strstatus(jrdev, err); | 1033 | caam_jr_strstatus(jrdev, err); |
| @@ -2031,7 +1038,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 2031 | edesc->src_nents > 1 ? 100 : ivsize, 1); | 1038 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
| 2032 | dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", | 1039 | dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
| 2033 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 1040 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
| 2034 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true); | 1041 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); |
| 2035 | #endif | 1042 | #endif |
| 2036 | 1043 | ||
| 2037 | ablkcipher_unmap(jrdev, edesc, req); | 1044 | ablkcipher_unmap(jrdev, edesc, req); |
| @@ -2052,8 +1059,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 2052 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 1059 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 2053 | #endif | 1060 | #endif |
| 2054 | 1061 | ||
| 2055 | edesc = (struct ablkcipher_edesc *)((char *)desc - | 1062 | edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); |
| 2056 | offsetof(struct ablkcipher_edesc, hw_desc)); | ||
| 2057 | if (err) | 1063 | if (err) |
| 2058 | caam_jr_strstatus(jrdev, err); | 1064 | caam_jr_strstatus(jrdev, err); |
| 2059 | 1065 | ||
| @@ -2063,7 +1069,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 2063 | ivsize, 1); | 1069 | ivsize, 1); |
| 2064 | dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", | 1070 | dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
| 2065 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 1071 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
| 2066 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true); | 1072 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); |
| 2067 | #endif | 1073 | #endif |
| 2068 | 1074 | ||
| 2069 | ablkcipher_unmap(jrdev, edesc, req); | 1075 | ablkcipher_unmap(jrdev, edesc, req); |
| @@ -2157,7 +1163,7 @@ static void init_gcm_job(struct aead_request *req, | |||
| 2157 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last); | 1163 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last); |
| 2158 | /* Append Salt */ | 1164 | /* Append Salt */ |
| 2159 | if (!generic_gcm) | 1165 | if (!generic_gcm) |
| 2160 | append_data(desc, ctx->key + ctx->enckeylen, 4); | 1166 | append_data(desc, ctx->key + ctx->cdata.keylen, 4); |
| 2161 | /* Append IV */ | 1167 | /* Append IV */ |
| 2162 | append_data(desc, req->iv, ivsize); | 1168 | append_data(desc, req->iv, ivsize); |
| 2163 | /* End of blank commands */ | 1169 | /* End of blank commands */ |
| @@ -2172,7 +1178,7 @@ static void init_authenc_job(struct aead_request *req, | |||
| 2172 | struct caam_aead_alg, aead); | 1178 | struct caam_aead_alg, aead); |
| 2173 | unsigned int ivsize = crypto_aead_ivsize(aead); | 1179 | unsigned int ivsize = crypto_aead_ivsize(aead); |
| 2174 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1180 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 2175 | const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == | 1181 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == |
| 2176 | OP_ALG_AAI_CTR_MOD128); | 1182 | OP_ALG_AAI_CTR_MOD128); |
| 2177 | const bool is_rfc3686 = alg->caam.rfc3686; | 1183 | const bool is_rfc3686 = alg->caam.rfc3686; |
| 2178 | u32 *desc = edesc->hw_desc; | 1184 | u32 *desc = edesc->hw_desc; |
| @@ -2218,15 +1224,13 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 2218 | int len, sec4_sg_index = 0; | 1224 | int len, sec4_sg_index = 0; |
| 2219 | 1225 | ||
| 2220 | #ifdef DEBUG | 1226 | #ifdef DEBUG |
| 2221 | bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 2222 | CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); | ||
| 2223 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", | 1227 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", |
| 2224 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | 1228 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
| 2225 | ivsize, 1); | 1229 | ivsize, 1); |
| 2226 | printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes); | 1230 | printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes); |
| 2227 | dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ", | 1231 | dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ", |
| 2228 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, | 1232 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, |
| 2229 | edesc->src_nents ? 100 : req->nbytes, 1, may_sleep); | 1233 | edesc->src_nents ? 100 : req->nbytes, 1); |
| 2230 | #endif | 1234 | #endif |
| 2231 | 1235 | ||
| 2232 | len = desc_len(sh_desc); | 1236 | len = desc_len(sh_desc); |
| @@ -2278,14 +1282,12 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 2278 | int len, sec4_sg_index = 0; | 1282 | int len, sec4_sg_index = 0; |
| 2279 | 1283 | ||
| 2280 | #ifdef DEBUG | 1284 | #ifdef DEBUG |
| 2281 | bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 2282 | CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); | ||
| 2283 | print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", | 1285 | print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", |
| 2284 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | 1286 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
| 2285 | ivsize, 1); | 1287 | ivsize, 1); |
| 2286 | dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", | 1288 | dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", |
| 2287 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, | 1289 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, |
| 2288 | edesc->src_nents ? 100 : req->nbytes, 1, may_sleep); | 1290 | edesc->src_nents ? 100 : req->nbytes, 1); |
| 2289 | #endif | 1291 | #endif |
| 2290 | 1292 | ||
| 2291 | len = desc_len(sh_desc); | 1293 | len = desc_len(sh_desc); |
| @@ -2344,10 +1346,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 2344 | 1346 | ||
| 2345 | /* Check if data are contiguous. */ | 1347 | /* Check if data are contiguous. */ |
| 2346 | all_contig = !src_nents; | 1348 | all_contig = !src_nents; |
| 2347 | if (!all_contig) { | 1349 | if (!all_contig) |
| 2348 | src_nents = src_nents ? : 1; | ||
| 2349 | sec4_sg_len = src_nents; | 1350 | sec4_sg_len = src_nents; |
| 2350 | } | ||
| 2351 | 1351 | ||
| 2352 | sec4_sg_len += dst_nents; | 1352 | sec4_sg_len += dst_nents; |
| 2353 | 1353 | ||
| @@ -2556,11 +1556,9 @@ static int aead_decrypt(struct aead_request *req) | |||
| 2556 | int ret = 0; | 1556 | int ret = 0; |
| 2557 | 1557 | ||
| 2558 | #ifdef DEBUG | 1558 | #ifdef DEBUG |
| 2559 | bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 2560 | CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); | ||
| 2561 | dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ", | 1559 | dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ", |
| 2562 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, | 1560 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, |
| 2563 | req->assoclen + req->cryptlen, 1, may_sleep); | 1561 | req->assoclen + req->cryptlen, 1); |
| 2564 | #endif | 1562 | #endif |
| 2565 | 1563 | ||
| 2566 | /* allocate extended descriptor */ | 1564 | /* allocate extended descriptor */ |
| @@ -2618,16 +1616,33 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
| 2618 | if (likely(req->src == req->dst)) { | 1616 | if (likely(req->src == req->dst)) { |
| 2619 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1617 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, |
| 2620 | DMA_BIDIRECTIONAL); | 1618 | DMA_BIDIRECTIONAL); |
| 1619 | if (unlikely(!sgc)) { | ||
| 1620 | dev_err(jrdev, "unable to map source\n"); | ||
| 1621 | return ERR_PTR(-ENOMEM); | ||
| 1622 | } | ||
| 2621 | } else { | 1623 | } else { |
| 2622 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1624 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, |
| 2623 | DMA_TO_DEVICE); | 1625 | DMA_TO_DEVICE); |
| 1626 | if (unlikely(!sgc)) { | ||
| 1627 | dev_err(jrdev, "unable to map source\n"); | ||
| 1628 | return ERR_PTR(-ENOMEM); | ||
| 1629 | } | ||
| 1630 | |||
| 2624 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | 1631 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, |
| 2625 | DMA_FROM_DEVICE); | 1632 | DMA_FROM_DEVICE); |
| 1633 | if (unlikely(!sgc)) { | ||
| 1634 | dev_err(jrdev, "unable to map destination\n"); | ||
| 1635 | dma_unmap_sg(jrdev, req->src, src_nents ? : 1, | ||
| 1636 | DMA_TO_DEVICE); | ||
| 1637 | return ERR_PTR(-ENOMEM); | ||
| 1638 | } | ||
| 2626 | } | 1639 | } |
| 2627 | 1640 | ||
| 2628 | iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); | 1641 | iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); |
| 2629 | if (dma_mapping_error(jrdev, iv_dma)) { | 1642 | if (dma_mapping_error(jrdev, iv_dma)) { |
| 2630 | dev_err(jrdev, "unable to map IV\n"); | 1643 | dev_err(jrdev, "unable to map IV\n"); |
| 1644 | caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, | ||
| 1645 | 0, 0, 0); | ||
| 2631 | return ERR_PTR(-ENOMEM); | 1646 | return ERR_PTR(-ENOMEM); |
| 2632 | } | 1647 | } |
| 2633 | 1648 | ||
| @@ -2647,6 +1662,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
| 2647 | GFP_DMA | flags); | 1662 | GFP_DMA | flags); |
| 2648 | if (!edesc) { | 1663 | if (!edesc) { |
| 2649 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1664 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 1665 | caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, | ||
| 1666 | iv_dma, ivsize, 0, 0); | ||
| 2650 | return ERR_PTR(-ENOMEM); | 1667 | return ERR_PTR(-ENOMEM); |
| 2651 | } | 1668 | } |
| 2652 | 1669 | ||
| @@ -2673,6 +1690,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
| 2673 | sec4_sg_bytes, DMA_TO_DEVICE); | 1690 | sec4_sg_bytes, DMA_TO_DEVICE); |
| 2674 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | 1691 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
| 2675 | dev_err(jrdev, "unable to map S/G table\n"); | 1692 | dev_err(jrdev, "unable to map S/G table\n"); |
| 1693 | caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, | ||
| 1694 | iv_dma, ivsize, 0, 0); | ||
| 1695 | kfree(edesc); | ||
| 2676 | return ERR_PTR(-ENOMEM); | 1696 | return ERR_PTR(-ENOMEM); |
| 2677 | } | 1697 | } |
| 2678 | 1698 | ||
| @@ -2794,11 +1814,26 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( | |||
| 2794 | if (likely(req->src == req->dst)) { | 1814 | if (likely(req->src == req->dst)) { |
| 2795 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1815 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, |
| 2796 | DMA_BIDIRECTIONAL); | 1816 | DMA_BIDIRECTIONAL); |
| 1817 | if (unlikely(!sgc)) { | ||
| 1818 | dev_err(jrdev, "unable to map source\n"); | ||
| 1819 | return ERR_PTR(-ENOMEM); | ||
| 1820 | } | ||
| 2797 | } else { | 1821 | } else { |
| 2798 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | 1822 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, |
| 2799 | DMA_TO_DEVICE); | 1823 | DMA_TO_DEVICE); |
| 1824 | if (unlikely(!sgc)) { | ||
| 1825 | dev_err(jrdev, "unable to map source\n"); | ||
| 1826 | return ERR_PTR(-ENOMEM); | ||
| 1827 | } | ||
| 1828 | |||
| 2800 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | 1829 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, |
| 2801 | DMA_FROM_DEVICE); | 1830 | DMA_FROM_DEVICE); |
| 1831 | if (unlikely(!sgc)) { | ||
| 1832 | dev_err(jrdev, "unable to map destination\n"); | ||
| 1833 | dma_unmap_sg(jrdev, req->src, src_nents ? : 1, | ||
| 1834 | DMA_TO_DEVICE); | ||
| 1835 | return ERR_PTR(-ENOMEM); | ||
| 1836 | } | ||
| 2802 | } | 1837 | } |
| 2803 | 1838 | ||
| 2804 | /* | 1839 | /* |
| @@ -2808,6 +1843,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( | |||
| 2808 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); | 1843 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); |
| 2809 | if (dma_mapping_error(jrdev, iv_dma)) { | 1844 | if (dma_mapping_error(jrdev, iv_dma)) { |
| 2810 | dev_err(jrdev, "unable to map IV\n"); | 1845 | dev_err(jrdev, "unable to map IV\n"); |
| 1846 | caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, | ||
| 1847 | 0, 0, 0); | ||
| 2811 | return ERR_PTR(-ENOMEM); | 1848 | return ERR_PTR(-ENOMEM); |
| 2812 | } | 1849 | } |
| 2813 | 1850 | ||
| @@ -2823,6 +1860,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( | |||
| 2823 | GFP_DMA | flags); | 1860 | GFP_DMA | flags); |
| 2824 | if (!edesc) { | 1861 | if (!edesc) { |
| 2825 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1862 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 1863 | caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, | ||
| 1864 | iv_dma, ivsize, 0, 0); | ||
| 2826 | return ERR_PTR(-ENOMEM); | 1865 | return ERR_PTR(-ENOMEM); |
| 2827 | } | 1866 | } |
| 2828 | 1867 | ||
| @@ -2850,6 +1889,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( | |||
| 2850 | sec4_sg_bytes, DMA_TO_DEVICE); | 1889 | sec4_sg_bytes, DMA_TO_DEVICE); |
| 2851 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | 1890 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
| 2852 | dev_err(jrdev, "unable to map S/G table\n"); | 1891 | dev_err(jrdev, "unable to map S/G table\n"); |
| 1892 | caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, | ||
| 1893 | iv_dma, ivsize, 0, 0); | ||
| 1894 | kfree(edesc); | ||
| 2853 | return ERR_PTR(-ENOMEM); | 1895 | return ERR_PTR(-ENOMEM); |
| 2854 | } | 1896 | } |
| 2855 | edesc->iv_dma = iv_dma; | 1897 | edesc->iv_dma = iv_dma; |
| @@ -2916,7 +1958,6 @@ struct caam_alg_template { | |||
| 2916 | } template_u; | 1958 | } template_u; |
| 2917 | u32 class1_alg_type; | 1959 | u32 class1_alg_type; |
| 2918 | u32 class2_alg_type; | 1960 | u32 class2_alg_type; |
| 2919 | u32 alg_op; | ||
| 2920 | }; | 1961 | }; |
| 2921 | 1962 | ||
| 2922 | static struct caam_alg_template driver_algs[] = { | 1963 | static struct caam_alg_template driver_algs[] = { |
| @@ -3101,7 +2142,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3101 | .caam = { | 2142 | .caam = { |
| 3102 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | 2143 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 3103 | OP_ALG_AAI_HMAC_PRECOMP, | 2144 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3104 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3105 | }, | 2145 | }, |
| 3106 | }, | 2146 | }, |
| 3107 | { | 2147 | { |
| @@ -3123,7 +2163,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3123 | .caam = { | 2163 | .caam = { |
| 3124 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | 2164 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 3125 | OP_ALG_AAI_HMAC_PRECOMP, | 2165 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3126 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3127 | }, | 2166 | }, |
| 3128 | }, | 2167 | }, |
| 3129 | { | 2168 | { |
| @@ -3145,7 +2184,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3145 | .caam = { | 2184 | .caam = { |
| 3146 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 2185 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 3147 | OP_ALG_AAI_HMAC_PRECOMP, | 2186 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3148 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3149 | }, | 2187 | }, |
| 3150 | }, | 2188 | }, |
| 3151 | { | 2189 | { |
| @@ -3167,7 +2205,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3167 | .caam = { | 2205 | .caam = { |
| 3168 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 2206 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
| 3169 | OP_ALG_AAI_HMAC_PRECOMP, | 2207 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3170 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3171 | }, | 2208 | }, |
| 3172 | }, | 2209 | }, |
| 3173 | { | 2210 | { |
| @@ -3189,7 +2226,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3189 | .caam = { | 2226 | .caam = { |
| 3190 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 2227 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 3191 | OP_ALG_AAI_HMAC_PRECOMP, | 2228 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3192 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3193 | }, | 2229 | }, |
| 3194 | }, | 2230 | }, |
| 3195 | { | 2231 | { |
| @@ -3211,7 +2247,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3211 | .caam = { | 2247 | .caam = { |
| 3212 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 2248 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 3213 | OP_ALG_AAI_HMAC_PRECOMP, | 2249 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3214 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 3215 | }, | 2250 | }, |
| 3216 | }, | 2251 | }, |
| 3217 | { | 2252 | { |
| @@ -3233,7 +2268,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3233 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2268 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3234 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | 2269 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 3235 | OP_ALG_AAI_HMAC_PRECOMP, | 2270 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3236 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3237 | }, | 2271 | }, |
| 3238 | }, | 2272 | }, |
| 3239 | { | 2273 | { |
| @@ -3256,7 +2290,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3256 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2290 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3257 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | 2291 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 3258 | OP_ALG_AAI_HMAC_PRECOMP, | 2292 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3259 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3260 | .geniv = true, | 2293 | .geniv = true, |
| 3261 | }, | 2294 | }, |
| 3262 | }, | 2295 | }, |
| @@ -3279,7 +2312,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3279 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2312 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3280 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | 2313 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 3281 | OP_ALG_AAI_HMAC_PRECOMP, | 2314 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3282 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3283 | }, | 2315 | }, |
| 3284 | }, | 2316 | }, |
| 3285 | { | 2317 | { |
| @@ -3302,7 +2334,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3302 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2334 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3303 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | 2335 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 3304 | OP_ALG_AAI_HMAC_PRECOMP, | 2336 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3305 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3306 | .geniv = true, | 2337 | .geniv = true, |
| 3307 | }, | 2338 | }, |
| 3308 | }, | 2339 | }, |
| @@ -3325,7 +2356,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3325 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2356 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3326 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 2357 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 3327 | OP_ALG_AAI_HMAC_PRECOMP, | 2358 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3328 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3329 | }, | 2359 | }, |
| 3330 | }, | 2360 | }, |
| 3331 | { | 2361 | { |
| @@ -3348,7 +2378,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3348 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2378 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3349 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 2379 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 3350 | OP_ALG_AAI_HMAC_PRECOMP, | 2380 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3351 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3352 | .geniv = true, | 2381 | .geniv = true, |
| 3353 | }, | 2382 | }, |
| 3354 | }, | 2383 | }, |
| @@ -3371,7 +2400,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3371 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2400 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3372 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 2401 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
| 3373 | OP_ALG_AAI_HMAC_PRECOMP, | 2402 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3374 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3375 | }, | 2403 | }, |
| 3376 | }, | 2404 | }, |
| 3377 | { | 2405 | { |
| @@ -3394,7 +2422,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3394 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2422 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3395 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 2423 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
| 3396 | OP_ALG_AAI_HMAC_PRECOMP, | 2424 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3397 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3398 | .geniv = true, | 2425 | .geniv = true, |
| 3399 | }, | 2426 | }, |
| 3400 | }, | 2427 | }, |
| @@ -3417,7 +2444,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3417 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2444 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3418 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 2445 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 3419 | OP_ALG_AAI_HMAC_PRECOMP, | 2446 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3420 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3421 | }, | 2447 | }, |
| 3422 | }, | 2448 | }, |
| 3423 | { | 2449 | { |
| @@ -3440,7 +2466,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3440 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2466 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3441 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 2467 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 3442 | OP_ALG_AAI_HMAC_PRECOMP, | 2468 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3443 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3444 | .geniv = true, | 2469 | .geniv = true, |
| 3445 | }, | 2470 | }, |
| 3446 | }, | 2471 | }, |
| @@ -3463,7 +2488,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3463 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2488 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3464 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 2489 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 3465 | OP_ALG_AAI_HMAC_PRECOMP, | 2490 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3466 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 3467 | }, | 2491 | }, |
| 3468 | }, | 2492 | }, |
| 3469 | { | 2493 | { |
| @@ -3486,7 +2510,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3486 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 2510 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
| 3487 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 2511 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 3488 | OP_ALG_AAI_HMAC_PRECOMP, | 2512 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3489 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 3490 | .geniv = true, | 2513 | .geniv = true, |
| 3491 | }, | 2514 | }, |
| 3492 | }, | 2515 | }, |
| @@ -3509,7 +2532,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3509 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2532 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3510 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | 2533 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 3511 | OP_ALG_AAI_HMAC_PRECOMP, | 2534 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3512 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3513 | } | 2535 | } |
| 3514 | }, | 2536 | }, |
| 3515 | { | 2537 | { |
| @@ -3532,7 +2554,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3532 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2554 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3533 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | 2555 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 3534 | OP_ALG_AAI_HMAC_PRECOMP, | 2556 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3535 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3536 | .geniv = true, | 2557 | .geniv = true, |
| 3537 | } | 2558 | } |
| 3538 | }, | 2559 | }, |
| @@ -3556,7 +2577,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3556 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2577 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3557 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | 2578 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 3558 | OP_ALG_AAI_HMAC_PRECOMP, | 2579 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3559 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3560 | }, | 2580 | }, |
| 3561 | }, | 2581 | }, |
| 3562 | { | 2582 | { |
| @@ -3580,7 +2600,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3580 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2600 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3581 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | 2601 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 3582 | OP_ALG_AAI_HMAC_PRECOMP, | 2602 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3583 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3584 | .geniv = true, | 2603 | .geniv = true, |
| 3585 | }, | 2604 | }, |
| 3586 | }, | 2605 | }, |
| @@ -3604,7 +2623,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3604 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2623 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3605 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 2624 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 3606 | OP_ALG_AAI_HMAC_PRECOMP, | 2625 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3607 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3608 | }, | 2626 | }, |
| 3609 | }, | 2627 | }, |
| 3610 | { | 2628 | { |
| @@ -3628,7 +2646,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3628 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2646 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3629 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 2647 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 3630 | OP_ALG_AAI_HMAC_PRECOMP, | 2648 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3631 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3632 | .geniv = true, | 2649 | .geniv = true, |
| 3633 | }, | 2650 | }, |
| 3634 | }, | 2651 | }, |
| @@ -3652,7 +2669,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3652 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2669 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3653 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 2670 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
| 3654 | OP_ALG_AAI_HMAC_PRECOMP, | 2671 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3655 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3656 | }, | 2672 | }, |
| 3657 | }, | 2673 | }, |
| 3658 | { | 2674 | { |
| @@ -3676,7 +2692,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3676 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2692 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3677 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 2693 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
| 3678 | OP_ALG_AAI_HMAC_PRECOMP, | 2694 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3679 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3680 | .geniv = true, | 2695 | .geniv = true, |
| 3681 | }, | 2696 | }, |
| 3682 | }, | 2697 | }, |
| @@ -3700,7 +2715,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3700 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2715 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3701 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 2716 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 3702 | OP_ALG_AAI_HMAC_PRECOMP, | 2717 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3703 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3704 | }, | 2718 | }, |
| 3705 | }, | 2719 | }, |
| 3706 | { | 2720 | { |
| @@ -3724,7 +2738,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3724 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2738 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3725 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 2739 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 3726 | OP_ALG_AAI_HMAC_PRECOMP, | 2740 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3727 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3728 | .geniv = true, | 2741 | .geniv = true, |
| 3729 | }, | 2742 | }, |
| 3730 | }, | 2743 | }, |
| @@ -3748,7 +2761,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3748 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2761 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3749 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 2762 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 3750 | OP_ALG_AAI_HMAC_PRECOMP, | 2763 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3751 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 3752 | }, | 2764 | }, |
| 3753 | }, | 2765 | }, |
| 3754 | { | 2766 | { |
| @@ -3772,7 +2784,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3772 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 2784 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
| 3773 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 2785 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 3774 | OP_ALG_AAI_HMAC_PRECOMP, | 2786 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3775 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 3776 | .geniv = true, | 2787 | .geniv = true, |
| 3777 | }, | 2788 | }, |
| 3778 | }, | 2789 | }, |
| @@ -3795,7 +2806,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3795 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 2806 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3796 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | 2807 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 3797 | OP_ALG_AAI_HMAC_PRECOMP, | 2808 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3798 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3799 | }, | 2809 | }, |
| 3800 | }, | 2810 | }, |
| 3801 | { | 2811 | { |
| @@ -3818,7 +2828,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3818 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 2828 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3819 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | 2829 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 3820 | OP_ALG_AAI_HMAC_PRECOMP, | 2830 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3821 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3822 | .geniv = true, | 2831 | .geniv = true, |
| 3823 | }, | 2832 | }, |
| 3824 | }, | 2833 | }, |
| @@ -3841,7 +2850,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3841 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 2850 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3842 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | 2851 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 3843 | OP_ALG_AAI_HMAC_PRECOMP, | 2852 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3844 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3845 | }, | 2853 | }, |
| 3846 | }, | 2854 | }, |
| 3847 | { | 2855 | { |
| @@ -3864,7 +2872,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3864 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 2872 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3865 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | 2873 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 3866 | OP_ALG_AAI_HMAC_PRECOMP, | 2874 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3867 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3868 | .geniv = true, | 2875 | .geniv = true, |
| 3869 | }, | 2876 | }, |
| 3870 | }, | 2877 | }, |
| @@ -3887,7 +2894,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3887 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 2894 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3888 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 2895 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 3889 | OP_ALG_AAI_HMAC_PRECOMP, | 2896 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3890 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3891 | }, | 2897 | }, |
| 3892 | }, | 2898 | }, |
| 3893 | { | 2899 | { |
| @@ -3910,7 +2916,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3910 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 2916 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3911 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 2917 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 3912 | OP_ALG_AAI_HMAC_PRECOMP, | 2918 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3913 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3914 | .geniv = true, | 2919 | .geniv = true, |
| 3915 | }, | 2920 | }, |
| 3916 | }, | 2921 | }, |
| @@ -3933,7 +2938,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3933 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 2938 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3934 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 2939 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
| 3935 | OP_ALG_AAI_HMAC_PRECOMP, | 2940 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3936 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3937 | }, | 2941 | }, |
| 3938 | }, | 2942 | }, |
| 3939 | { | 2943 | { |
| @@ -3956,7 +2960,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3956 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 2960 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3957 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 2961 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
| 3958 | OP_ALG_AAI_HMAC_PRECOMP, | 2962 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3959 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3960 | .geniv = true, | 2963 | .geniv = true, |
| 3961 | }, | 2964 | }, |
| 3962 | }, | 2965 | }, |
| @@ -3979,7 +2982,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 3979 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 2982 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3980 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 2983 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 3981 | OP_ALG_AAI_HMAC_PRECOMP, | 2984 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3982 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3983 | }, | 2985 | }, |
| 3984 | }, | 2986 | }, |
| 3985 | { | 2987 | { |
| @@ -4002,7 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4002 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 3004 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 4003 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 3005 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 4004 | OP_ALG_AAI_HMAC_PRECOMP, | 3006 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4005 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 4006 | .geniv = true, | 3007 | .geniv = true, |
| 4007 | }, | 3008 | }, |
| 4008 | }, | 3009 | }, |
| @@ -4025,7 +3026,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4025 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 3026 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 4026 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 3027 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 4027 | OP_ALG_AAI_HMAC_PRECOMP, | 3028 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4028 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 4029 | }, | 3029 | }, |
| 4030 | }, | 3030 | }, |
| 4031 | { | 3031 | { |
| @@ -4048,7 +3048,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4048 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 3048 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 4049 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 3049 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 4050 | OP_ALG_AAI_HMAC_PRECOMP, | 3050 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4051 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 4052 | .geniv = true, | 3051 | .geniv = true, |
| 4053 | }, | 3052 | }, |
| 4054 | }, | 3053 | }, |
| @@ -4073,7 +3072,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4073 | OP_ALG_AAI_CTR_MOD128, | 3072 | OP_ALG_AAI_CTR_MOD128, |
| 4074 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | 3073 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 4075 | OP_ALG_AAI_HMAC_PRECOMP, | 3074 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4076 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 4077 | .rfc3686 = true, | 3075 | .rfc3686 = true, |
| 4078 | }, | 3076 | }, |
| 4079 | }, | 3077 | }, |
| @@ -4098,7 +3096,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4098 | OP_ALG_AAI_CTR_MOD128, | 3096 | OP_ALG_AAI_CTR_MOD128, |
| 4099 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | 3097 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 4100 | OP_ALG_AAI_HMAC_PRECOMP, | 3098 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4101 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 4102 | .rfc3686 = true, | 3099 | .rfc3686 = true, |
| 4103 | .geniv = true, | 3100 | .geniv = true, |
| 4104 | }, | 3101 | }, |
| @@ -4124,7 +3121,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4124 | OP_ALG_AAI_CTR_MOD128, | 3121 | OP_ALG_AAI_CTR_MOD128, |
| 4125 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | 3122 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 4126 | OP_ALG_AAI_HMAC_PRECOMP, | 3123 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4127 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 4128 | .rfc3686 = true, | 3124 | .rfc3686 = true, |
| 4129 | }, | 3125 | }, |
| 4130 | }, | 3126 | }, |
| @@ -4149,7 +3145,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4149 | OP_ALG_AAI_CTR_MOD128, | 3145 | OP_ALG_AAI_CTR_MOD128, |
| 4150 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | 3146 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 4151 | OP_ALG_AAI_HMAC_PRECOMP, | 3147 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4152 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 4153 | .rfc3686 = true, | 3148 | .rfc3686 = true, |
| 4154 | .geniv = true, | 3149 | .geniv = true, |
| 4155 | }, | 3150 | }, |
| @@ -4175,7 +3170,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4175 | OP_ALG_AAI_CTR_MOD128, | 3170 | OP_ALG_AAI_CTR_MOD128, |
| 4176 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 3171 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 4177 | OP_ALG_AAI_HMAC_PRECOMP, | 3172 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4178 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 4179 | .rfc3686 = true, | 3173 | .rfc3686 = true, |
| 4180 | }, | 3174 | }, |
| 4181 | }, | 3175 | }, |
| @@ -4200,7 +3194,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4200 | OP_ALG_AAI_CTR_MOD128, | 3194 | OP_ALG_AAI_CTR_MOD128, |
| 4201 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 3195 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 4202 | OP_ALG_AAI_HMAC_PRECOMP, | 3196 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4203 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 4204 | .rfc3686 = true, | 3197 | .rfc3686 = true, |
| 4205 | .geniv = true, | 3198 | .geniv = true, |
| 4206 | }, | 3199 | }, |
| @@ -4226,7 +3219,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4226 | OP_ALG_AAI_CTR_MOD128, | 3219 | OP_ALG_AAI_CTR_MOD128, |
| 4227 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 3220 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
| 4228 | OP_ALG_AAI_HMAC_PRECOMP, | 3221 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4229 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 4230 | .rfc3686 = true, | 3222 | .rfc3686 = true, |
| 4231 | }, | 3223 | }, |
| 4232 | }, | 3224 | }, |
| @@ -4251,7 +3243,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4251 | OP_ALG_AAI_CTR_MOD128, | 3243 | OP_ALG_AAI_CTR_MOD128, |
| 4252 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 3244 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
| 4253 | OP_ALG_AAI_HMAC_PRECOMP, | 3245 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4254 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 4255 | .rfc3686 = true, | 3246 | .rfc3686 = true, |
| 4256 | .geniv = true, | 3247 | .geniv = true, |
| 4257 | }, | 3248 | }, |
| @@ -4277,7 +3268,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4277 | OP_ALG_AAI_CTR_MOD128, | 3268 | OP_ALG_AAI_CTR_MOD128, |
| 4278 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 3269 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 4279 | OP_ALG_AAI_HMAC_PRECOMP, | 3270 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4280 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 4281 | .rfc3686 = true, | 3271 | .rfc3686 = true, |
| 4282 | }, | 3272 | }, |
| 4283 | }, | 3273 | }, |
| @@ -4302,7 +3292,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4302 | OP_ALG_AAI_CTR_MOD128, | 3292 | OP_ALG_AAI_CTR_MOD128, |
| 4303 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 3293 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 4304 | OP_ALG_AAI_HMAC_PRECOMP, | 3294 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4305 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 4306 | .rfc3686 = true, | 3295 | .rfc3686 = true, |
| 4307 | .geniv = true, | 3296 | .geniv = true, |
| 4308 | }, | 3297 | }, |
| @@ -4328,7 +3317,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4328 | OP_ALG_AAI_CTR_MOD128, | 3317 | OP_ALG_AAI_CTR_MOD128, |
| 4329 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 3318 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 4330 | OP_ALG_AAI_HMAC_PRECOMP, | 3319 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4331 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 4332 | .rfc3686 = true, | 3320 | .rfc3686 = true, |
| 4333 | }, | 3321 | }, |
| 4334 | }, | 3322 | }, |
| @@ -4353,7 +3341,6 @@ static struct caam_aead_alg driver_aeads[] = { | |||
| 4353 | OP_ALG_AAI_CTR_MOD128, | 3341 | OP_ALG_AAI_CTR_MOD128, |
| 4354 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 3342 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 4355 | OP_ALG_AAI_HMAC_PRECOMP, | 3343 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4356 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 4357 | .rfc3686 = true, | 3344 | .rfc3686 = true, |
| 4358 | .geniv = true, | 3345 | .geniv = true, |
| 4359 | }, | 3346 | }, |
| @@ -4375,9 +3362,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) | |||
| 4375 | } | 3362 | } |
| 4376 | 3363 | ||
| 4377 | /* copy descriptor header template value */ | 3364 | /* copy descriptor header template value */ |
| 4378 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; | 3365 | ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; |
| 4379 | ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; | 3366 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; |
| 4380 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op; | ||
| 4381 | 3367 | ||
| 4382 | return 0; | 3368 | return 0; |
| 4383 | } | 3369 | } |
| @@ -4420,7 +3406,7 @@ static void caam_exit_common(struct caam_ctx *ctx) | |||
| 4420 | if (ctx->key_dma && | 3406 | if (ctx->key_dma && |
| 4421 | !dma_mapping_error(ctx->jrdev, ctx->key_dma)) | 3407 | !dma_mapping_error(ctx->jrdev, ctx->key_dma)) |
| 4422 | dma_unmap_single(ctx->jrdev, ctx->key_dma, | 3408 | dma_unmap_single(ctx->jrdev, ctx->key_dma, |
| 4423 | ctx->enckeylen + ctx->split_key_pad_len, | 3409 | ctx->cdata.keylen + ctx->adata.keylen_pad, |
| 4424 | DMA_TO_DEVICE); | 3410 | DMA_TO_DEVICE); |
| 4425 | 3411 | ||
| 4426 | caam_jr_free(ctx->jrdev); | 3412 | caam_jr_free(ctx->jrdev); |
| @@ -4498,7 +3484,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template | |||
| 4498 | 3484 | ||
| 4499 | t_alg->caam.class1_alg_type = template->class1_alg_type; | 3485 | t_alg->caam.class1_alg_type = template->class1_alg_type; |
| 4500 | t_alg->caam.class2_alg_type = template->class2_alg_type; | 3486 | t_alg->caam.class2_alg_type = template->class2_alg_type; |
| 4501 | t_alg->caam.alg_op = template->alg_op; | ||
| 4502 | 3487 | ||
| 4503 | return t_alg; | 3488 | return t_alg; |
| 4504 | } | 3489 | } |
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c new file mode 100644 index 000000000000..f3f48c10b9d6 --- /dev/null +++ b/drivers/crypto/caam/caamalg_desc.c | |||
| @@ -0,0 +1,1306 @@ | |||
| 1 | /* | ||
| 2 | * Shared descriptors for aead, ablkcipher algorithms | ||
| 3 | * | ||
| 4 | * Copyright 2016 NXP | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include "compat.h" | ||
| 8 | #include "desc_constr.h" | ||
| 9 | #include "caamalg_desc.h" | ||
| 10 | |||
| 11 | /* | ||
| 12 | * For aead functions, read payload and write payload, | ||
| 13 | * both of which are specified in req->src and req->dst | ||
| 14 | */ | ||
| 15 | static inline void aead_append_src_dst(u32 *desc, u32 msg_type) | ||
| 16 | { | ||
| 17 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | ||
| 18 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | | ||
| 19 | KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); | ||
| 20 | } | ||
| 21 | |||
| 22 | /* Set DK bit in class 1 operation if shared */ | ||
| 23 | static inline void append_dec_op1(u32 *desc, u32 type) | ||
| 24 | { | ||
| 25 | u32 *jump_cmd, *uncond_jump_cmd; | ||
| 26 | |||
| 27 | /* DK bit is valid only for AES */ | ||
| 28 | if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { | ||
| 29 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
| 30 | OP_ALG_DECRYPT); | ||
| 31 | return; | ||
| 32 | } | ||
| 33 | |||
| 34 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
| 35 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
| 36 | OP_ALG_DECRYPT); | ||
| 37 | uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
| 38 | set_jump_tgt_here(desc, jump_cmd); | ||
| 39 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
| 40 | OP_ALG_DECRYPT | OP_ALG_AAI_DK); | ||
| 41 | set_jump_tgt_here(desc, uncond_jump_cmd); | ||
| 42 | } | ||
| 43 | |||
| 44 | /** | ||
| 45 | * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor | ||
| 46 | * (non-protocol) with no (null) encryption. | ||
| 47 | * @desc: pointer to buffer used for descriptor construction | ||
| 48 | * @adata: pointer to authentication transform definitions. Note that since a | ||
| 49 | * split key is to be used, the size of the split key itself is | ||
| 50 | * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, | ||
| 51 | * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. | ||
| 52 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 53 | * | ||
| 54 | * Note: Requires an MDHA split key. | ||
| 55 | */ | ||
| 56 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, | ||
| 57 | unsigned int icvsize) | ||
| 58 | { | ||
| 59 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; | ||
| 60 | |||
| 61 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 62 | |||
| 63 | /* Skip if already shared */ | ||
| 64 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 65 | JUMP_COND_SHRD); | ||
| 66 | if (adata->key_inline) | ||
| 67 | append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, | ||
| 68 | adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | | ||
| 69 | KEY_ENC); | ||
| 70 | else | ||
| 71 | append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | | ||
| 72 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 73 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 74 | |||
| 75 | /* assoclen + cryptlen = seqinlen */ | ||
| 76 | append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 77 | |||
| 78 | /* Prepare to read and write cryptlen + assoclen bytes */ | ||
| 79 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 80 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 81 | |||
| 82 | /* | ||
| 83 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
| 84 | * thus need to do some magic, i.e. self-patch the descriptor | ||
| 85 | * buffer. | ||
| 86 | */ | ||
| 87 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | | ||
| 88 | MOVE_DEST_MATH3 | | ||
| 89 | (0x6 << MOVE_LEN_SHIFT)); | ||
| 90 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | | ||
| 91 | MOVE_DEST_DESCBUF | | ||
| 92 | MOVE_WAITCOMP | | ||
| 93 | (0x8 << MOVE_LEN_SHIFT)); | ||
| 94 | |||
| 95 | /* Class 2 operation */ | ||
| 96 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | | ||
| 97 | OP_ALG_ENCRYPT); | ||
| 98 | |||
| 99 | /* Read and write cryptlen bytes */ | ||
| 100 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | ||
| 101 | |||
| 102 | set_move_tgt_here(desc, read_move_cmd); | ||
| 103 | set_move_tgt_here(desc, write_move_cmd); | ||
| 104 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 105 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | | ||
| 106 | MOVE_AUX_LS); | ||
| 107 | |||
| 108 | /* Write ICV */ | ||
| 109 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | | ||
| 110 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 111 | |||
| 112 | #ifdef DEBUG | ||
| 113 | print_hex_dump(KERN_ERR, | ||
| 114 | "aead null enc shdesc@" __stringify(__LINE__)": ", | ||
| 115 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 116 | #endif | ||
| 117 | } | ||
| 118 | EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); | ||
| 119 | |||
| 120 | /** | ||
| 121 | * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor | ||
| 122 | * (non-protocol) with no (null) decryption. | ||
| 123 | * @desc: pointer to buffer used for descriptor construction | ||
| 124 | * @adata: pointer to authentication transform definitions. Note that since a | ||
| 125 | * split key is to be used, the size of the split key itself is | ||
| 126 | * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, | ||
| 127 | * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. | ||
| 128 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 129 | * | ||
| 130 | * Note: Requires an MDHA split key. | ||
| 131 | */ | ||
| 132 | void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, | ||
| 133 | unsigned int icvsize) | ||
| 134 | { | ||
| 135 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; | ||
| 136 | |||
| 137 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 138 | |||
| 139 | /* Skip if already shared */ | ||
| 140 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 141 | JUMP_COND_SHRD); | ||
| 142 | if (adata->key_inline) | ||
| 143 | append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, | ||
| 144 | adata->keylen, CLASS_2 | | ||
| 145 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 146 | else | ||
| 147 | append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | | ||
| 148 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 149 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 150 | |||
| 151 | /* Class 2 operation */ | ||
| 152 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | | ||
| 153 | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
| 154 | |||
| 155 | /* assoclen + cryptlen = seqoutlen */ | ||
| 156 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 157 | |||
| 158 | /* Prepare to read and write cryptlen + assoclen bytes */ | ||
| 159 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
| 160 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
| 161 | |||
| 162 | /* | ||
| 163 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
| 164 | * thus need to do some magic, i.e. self-patch the descriptor | ||
| 165 | * buffer. | ||
| 166 | */ | ||
| 167 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | | ||
| 168 | MOVE_DEST_MATH2 | | ||
| 169 | (0x6 << MOVE_LEN_SHIFT)); | ||
| 170 | write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | | ||
| 171 | MOVE_DEST_DESCBUF | | ||
| 172 | MOVE_WAITCOMP | | ||
| 173 | (0x8 << MOVE_LEN_SHIFT)); | ||
| 174 | |||
| 175 | /* Read and write cryptlen bytes */ | ||
| 176 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | ||
| 177 | |||
| 178 | /* | ||
| 179 | * Insert a NOP here, since we need at least 4 instructions between | ||
| 180 | * code patching the descriptor buffer and the location being patched. | ||
| 181 | */ | ||
| 182 | jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
| 183 | set_jump_tgt_here(desc, jump_cmd); | ||
| 184 | |||
| 185 | set_move_tgt_here(desc, read_move_cmd); | ||
| 186 | set_move_tgt_here(desc, write_move_cmd); | ||
| 187 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 188 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | | ||
| 189 | MOVE_AUX_LS); | ||
| 190 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
| 191 | |||
| 192 | /* Load ICV */ | ||
| 193 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | | ||
| 194 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | ||
| 195 | |||
| 196 | #ifdef DEBUG | ||
| 197 | print_hex_dump(KERN_ERR, | ||
| 198 | "aead null dec shdesc@" __stringify(__LINE__)": ", | ||
| 199 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 200 | #endif | ||
| 201 | } | ||
| 202 | EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); | ||
| 203 | |||
| 204 | static void init_sh_desc_key_aead(u32 * const desc, | ||
| 205 | struct alginfo * const cdata, | ||
| 206 | struct alginfo * const adata, | ||
| 207 | const bool is_rfc3686, u32 *nonce) | ||
| 208 | { | ||
| 209 | u32 *key_jump_cmd; | ||
| 210 | unsigned int enckeylen = cdata->keylen; | ||
| 211 | |||
| 212 | /* Note: Context registers are saved. */ | ||
| 213 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | ||
| 214 | |||
| 215 | /* Skip if already shared */ | ||
| 216 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 217 | JUMP_COND_SHRD); | ||
| 218 | |||
| 219 | /* | ||
| 220 | * RFC3686 specific: | ||
| 221 | * | key = {AUTH_KEY, ENC_KEY, NONCE} | ||
| 222 | * | enckeylen = encryption key size + nonce size | ||
| 223 | */ | ||
| 224 | if (is_rfc3686) | ||
| 225 | enckeylen -= CTR_RFC3686_NONCE_SIZE; | ||
| 226 | |||
| 227 | if (adata->key_inline) | ||
| 228 | append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, | ||
| 229 | adata->keylen, CLASS_2 | | ||
| 230 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 231 | else | ||
| 232 | append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | | ||
| 233 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 234 | |||
| 235 | if (cdata->key_inline) | ||
| 236 | append_key_as_imm(desc, cdata->key_virt, enckeylen, | ||
| 237 | enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 238 | else | ||
| 239 | append_key(desc, cdata->key_dma, enckeylen, CLASS_1 | | ||
| 240 | KEY_DEST_CLASS_REG); | ||
| 241 | |||
| 242 | /* Load Counter into CONTEXT1 reg */ | ||
| 243 | if (is_rfc3686) { | ||
| 244 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | ||
| 245 | LDST_CLASS_IND_CCB | | ||
| 246 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 247 | append_move(desc, | ||
| 248 | MOVE_SRC_OUTFIFO | | ||
| 249 | MOVE_DEST_CLASS1CTX | | ||
| 250 | (16 << MOVE_OFFSET_SHIFT) | | ||
| 251 | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); | ||
| 252 | } | ||
| 253 | |||
| 254 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 255 | } | ||
| 256 | |||
| 257 | /** | ||
| 258 | * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor | ||
| 259 | * (non-protocol). | ||
| 260 | * @desc: pointer to buffer used for descriptor construction | ||
| 261 | * @cdata: pointer to block cipher transform definitions | ||
| 262 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed | ||
| 263 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. | ||
| 264 | * @adata: pointer to authentication transform definitions. Note that since a | ||
| 265 | * split key is to be used, the size of the split key itself is | ||
| 266 | * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, | ||
| 267 | * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. | ||
| 268 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 269 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template | ||
| 270 | * @nonce: pointer to rfc3686 nonce | ||
| 271 | * @ctx1_iv_off: IV offset in CONTEXT1 register | ||
| 272 | * | ||
| 273 | * Note: Requires an MDHA split key. | ||
| 274 | */ | ||
| 275 | void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, | ||
| 276 | struct alginfo *adata, unsigned int icvsize, | ||
| 277 | const bool is_rfc3686, u32 *nonce, | ||
| 278 | const u32 ctx1_iv_off) | ||
| 279 | { | ||
| 280 | /* Note: Context registers are saved. */ | ||
| 281 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); | ||
| 282 | |||
| 283 | /* Class 2 operation */ | ||
| 284 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | | ||
| 285 | OP_ALG_ENCRYPT); | ||
| 286 | |||
| 287 | /* Read and write assoclen bytes */ | ||
| 288 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 289 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 290 | |||
| 291 | /* Skip assoc data */ | ||
| 292 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 293 | |||
| 294 | /* read assoc before reading payload */ | ||
| 295 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
| 296 | FIFOLDST_VLF); | ||
| 297 | |||
| 298 | /* Load Counter into CONTEXT1 reg */ | ||
| 299 | if (is_rfc3686) | ||
| 300 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 301 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 302 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 303 | LDST_OFFSET_SHIFT)); | ||
| 304 | |||
| 305 | /* Class 1 operation */ | ||
| 306 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 307 | OP_ALG_ENCRYPT); | ||
| 308 | |||
| 309 | /* Read and write cryptlen bytes */ | ||
| 310 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 311 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 312 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); | ||
| 313 | |||
| 314 | /* Write ICV */ | ||
| 315 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | | ||
| 316 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 317 | |||
| 318 | #ifdef DEBUG | ||
| 319 | print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ", | ||
| 320 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 321 | #endif | ||
| 322 | } | ||
| 323 | EXPORT_SYMBOL(cnstr_shdsc_aead_encap); | ||
| 324 | |||
| 325 | /** | ||
| 326 | * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor | ||
| 327 | * (non-protocol). | ||
| 328 | * @desc: pointer to buffer used for descriptor construction | ||
| 329 | * @cdata: pointer to block cipher transform definitions | ||
| 330 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed | ||
| 331 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. | ||
| 332 | * @adata: pointer to authentication transform definitions. Note that since a | ||
| 333 | * split key is to be used, the size of the split key itself is | ||
| 334 | * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, | ||
| 335 | * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. | ||
| 336 | * @ivsize: initialization vector size | ||
| 337 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 338 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template | ||
| 339 | * @nonce: pointer to rfc3686 nonce | ||
| 340 | * @ctx1_iv_off: IV offset in CONTEXT1 register | ||
| 341 | * | ||
| 342 | * Note: Requires an MDHA split key. | ||
| 343 | */ | ||
| 344 | void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, | ||
| 345 | struct alginfo *adata, unsigned int ivsize, | ||
| 346 | unsigned int icvsize, const bool geniv, | ||
| 347 | const bool is_rfc3686, u32 *nonce, | ||
| 348 | const u32 ctx1_iv_off) | ||
| 349 | { | ||
| 350 | /* Note: Context registers are saved. */ | ||
| 351 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); | ||
| 352 | |||
| 353 | /* Class 2 operation */ | ||
| 354 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | | ||
| 355 | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
| 356 | |||
| 357 | /* Read and write assoclen bytes */ | ||
| 358 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 359 | if (geniv) | ||
| 360 | append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); | ||
| 361 | else | ||
| 362 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 363 | |||
| 364 | /* Skip assoc data */ | ||
| 365 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 366 | |||
| 367 | /* read assoc before reading payload */ | ||
| 368 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
| 369 | KEY_VLF); | ||
| 370 | |||
| 371 | if (geniv) { | ||
| 372 | append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | | ||
| 373 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 374 | (ctx1_iv_off << LDST_OFFSET_SHIFT)); | ||
| 375 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | | ||
| 376 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize); | ||
| 377 | } | ||
| 378 | |||
| 379 | /* Load Counter into CONTEXT1 reg */ | ||
| 380 | if (is_rfc3686) | ||
| 381 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 382 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 383 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 384 | LDST_OFFSET_SHIFT)); | ||
| 385 | |||
| 386 | /* Choose operation */ | ||
| 387 | if (ctx1_iv_off) | ||
| 388 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 389 | OP_ALG_DECRYPT); | ||
| 390 | else | ||
| 391 | append_dec_op1(desc, cdata->algtype); | ||
| 392 | |||
| 393 | /* Read and write cryptlen bytes */ | ||
| 394 | append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 395 | append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 396 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG); | ||
| 397 | |||
| 398 | /* Load ICV */ | ||
| 399 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | | ||
| 400 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | ||
| 401 | |||
| 402 | #ifdef DEBUG | ||
| 403 | print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ", | ||
| 404 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 405 | #endif | ||
| 406 | } | ||
| 407 | EXPORT_SYMBOL(cnstr_shdsc_aead_decap); | ||
| 408 | |||
| 409 | /** | ||
| 410 | * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor | ||
| 411 | * (non-protocol) with HW-generated initialization | ||
| 412 | * vector. | ||
| 413 | * @desc: pointer to buffer used for descriptor construction | ||
| 414 | * @cdata: pointer to block cipher transform definitions | ||
| 415 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed | ||
| 416 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. | ||
| 417 | * @adata: pointer to authentication transform definitions. Note that since a | ||
| 418 | * split key is to be used, the size of the split key itself is | ||
| 419 | * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, | ||
| 420 | * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. | ||
| 421 | * @ivsize: initialization vector size | ||
| 422 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 423 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template | ||
| 424 | * @nonce: pointer to rfc3686 nonce | ||
| 425 | * @ctx1_iv_off: IV offset in CONTEXT1 register | ||
| 426 | * | ||
| 427 | * Note: Requires an MDHA split key. | ||
| 428 | */ | ||
| 429 | void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, | ||
| 430 | struct alginfo *adata, unsigned int ivsize, | ||
| 431 | unsigned int icvsize, const bool is_rfc3686, | ||
| 432 | u32 *nonce, const u32 ctx1_iv_off) | ||
| 433 | { | ||
| 434 | u32 geniv, moveiv; | ||
| 435 | |||
| 436 | /* Note: Context registers are saved. */ | ||
| 437 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); | ||
| 438 | |||
| 439 | if (is_rfc3686) | ||
| 440 | goto copy_iv; | ||
| 441 | |||
| 442 | /* Generate IV */ | ||
| 443 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | ||
| 444 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | | ||
| 445 | NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
| 446 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | | ||
| 447 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
| 448 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 449 | append_move(desc, MOVE_WAITCOMP | | ||
| 450 | MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | | ||
| 451 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | | ||
| 452 | (ivsize << MOVE_LEN_SHIFT)); | ||
| 453 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
| 454 | |||
| 455 | copy_iv: | ||
| 456 | /* Copy IV to class 1 context */ | ||
| 457 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | | ||
| 458 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | | ||
| 459 | (ivsize << MOVE_LEN_SHIFT)); | ||
| 460 | |||
| 461 | /* Return to encryption */ | ||
| 462 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | | ||
| 463 | OP_ALG_ENCRYPT); | ||
| 464 | |||
| 465 | /* Read and write assoclen bytes */ | ||
| 466 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 467 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 468 | |||
| 469 | /* Skip assoc data */ | ||
| 470 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 471 | |||
| 472 | /* read assoc before reading payload */ | ||
| 473 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
| 474 | KEY_VLF); | ||
| 475 | |||
| 476 | /* Copy iv from outfifo to class 2 fifo */ | ||
| 477 | moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | | ||
| 478 | NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
| 479 | append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | | ||
| 480 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
| 481 | append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB | | ||
| 482 | LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); | ||
| 483 | |||
| 484 | /* Load Counter into CONTEXT1 reg */ | ||
| 485 | if (is_rfc3686) | ||
| 486 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 487 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 488 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 489 | LDST_OFFSET_SHIFT)); | ||
| 490 | |||
| 491 | /* Class 1 operation */ | ||
| 492 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 493 | OP_ALG_ENCRYPT); | ||
| 494 | |||
| 495 | /* Will write ivsize + cryptlen */ | ||
| 496 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 497 | |||
| 498 | /* Not need to reload iv */ | ||
| 499 | append_seq_fifo_load(desc, ivsize, | ||
| 500 | FIFOLD_CLASS_SKIP); | ||
| 501 | |||
| 502 | /* Will read cryptlen */ | ||
| 503 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 504 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | | ||
| 505 | FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); | ||
| 506 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | ||
| 507 | |||
| 508 | /* Write ICV */ | ||
| 509 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | | ||
| 510 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 511 | |||
| 512 | #ifdef DEBUG | ||
| 513 | print_hex_dump(KERN_ERR, | ||
| 514 | "aead givenc shdesc@" __stringify(__LINE__)": ", | ||
| 515 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 516 | #endif | ||
| 517 | } | ||
| 518 | EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); | ||
| 519 | |||
| 520 | /** | ||
| 521 | * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor | ||
| 522 | * @desc: pointer to buffer used for descriptor construction | ||
| 523 | * @cdata: pointer to block cipher transform definitions | ||
| 524 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. | ||
| 525 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 526 | */ | ||
| 527 | void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, | ||
| 528 | unsigned int icvsize) | ||
| 529 | { | ||
| 530 | u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1, | ||
| 531 | *zero_assoc_jump_cmd2; | ||
| 532 | |||
| 533 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 534 | |||
| 535 | /* skip key loading if they are loaded due to sharing */ | ||
| 536 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 537 | JUMP_COND_SHRD | JUMP_COND_SELF); | ||
| 538 | if (cdata->key_inline) | ||
| 539 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 540 | cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 541 | else | ||
| 542 | append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | | ||
| 543 | KEY_DEST_CLASS_REG); | ||
| 544 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 545 | |||
| 546 | /* class 1 operation */ | ||
| 547 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 548 | OP_ALG_ENCRYPT); | ||
| 549 | |||
| 550 | /* if assoclen + cryptlen is ZERO, skip to ICV write */ | ||
| 551 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 552 | zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | | ||
| 553 | JUMP_COND_MATH_Z); | ||
| 554 | |||
| 555 | /* if assoclen is ZERO, skip reading the assoc data */ | ||
| 556 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 557 | zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | | ||
| 558 | JUMP_COND_MATH_Z); | ||
| 559 | |||
| 560 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 561 | |||
| 562 | /* skip assoc data */ | ||
| 563 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 564 | |||
| 565 | /* cryptlen = seqinlen - assoclen */ | ||
| 566 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
| 567 | |||
| 568 | /* if cryptlen is ZERO jump to zero-payload commands */ | ||
| 569 | zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | | ||
| 570 | JUMP_COND_MATH_Z); | ||
| 571 | |||
| 572 | /* read assoc data */ | ||
| 573 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 574 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
| 575 | set_jump_tgt_here(desc, zero_assoc_jump_cmd1); | ||
| 576 | |||
| 577 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 578 | |||
| 579 | /* write encrypted data */ | ||
| 580 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
| 581 | |||
| 582 | /* read payload data */ | ||
| 583 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 584 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); | ||
| 585 | |||
| 586 | /* jump the zero-payload commands */ | ||
| 587 | append_jump(desc, JUMP_TEST_ALL | 2); | ||
| 588 | |||
| 589 | /* zero-payload commands */ | ||
| 590 | set_jump_tgt_here(desc, zero_payload_jump_cmd); | ||
| 591 | |||
| 592 | /* read assoc data */ | ||
| 593 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 594 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); | ||
| 595 | |||
| 596 | /* There is no input data */ | ||
| 597 | set_jump_tgt_here(desc, zero_assoc_jump_cmd2); | ||
| 598 | |||
| 599 | /* write ICV */ | ||
| 600 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | | ||
| 601 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 602 | |||
| 603 | #ifdef DEBUG | ||
| 604 | print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ", | ||
| 605 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 606 | #endif | ||
| 607 | } | ||
| 608 | EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); | ||
| 609 | |||
| 610 | /** | ||
| 611 | * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor | ||
| 612 | * @desc: pointer to buffer used for descriptor construction | ||
| 613 | * @cdata: pointer to block cipher transform definitions | ||
| 614 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. | ||
| 615 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 616 | */ | ||
| 617 | void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, | ||
| 618 | unsigned int icvsize) | ||
| 619 | { | ||
| 620 | u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1; | ||
| 621 | |||
| 622 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 623 | |||
| 624 | /* skip key loading if they are loaded due to sharing */ | ||
| 625 | key_jump_cmd = append_jump(desc, JUMP_JSL | | ||
| 626 | JUMP_TEST_ALL | JUMP_COND_SHRD | | ||
| 627 | JUMP_COND_SELF); | ||
| 628 | if (cdata->key_inline) | ||
| 629 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 630 | cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 631 | else | ||
| 632 | append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | | ||
| 633 | KEY_DEST_CLASS_REG); | ||
| 634 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 635 | |||
| 636 | /* class 1 operation */ | ||
| 637 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 638 | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
| 639 | |||
| 640 | /* if assoclen is ZERO, skip reading the assoc data */ | ||
| 641 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 642 | zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | | ||
| 643 | JUMP_COND_MATH_Z); | ||
| 644 | |||
| 645 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 646 | |||
| 647 | /* skip assoc data */ | ||
| 648 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 649 | |||
| 650 | /* read assoc data */ | ||
| 651 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 652 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
| 653 | |||
| 654 | set_jump_tgt_here(desc, zero_assoc_jump_cmd1); | ||
| 655 | |||
| 656 | /* cryptlen = seqoutlen - assoclen */ | ||
| 657 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 658 | |||
| 659 | /* jump to zero-payload command if cryptlen is zero */ | ||
| 660 | zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | | ||
| 661 | JUMP_COND_MATH_Z); | ||
| 662 | |||
| 663 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 664 | |||
| 665 | /* store encrypted data */ | ||
| 666 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
| 667 | |||
| 668 | /* read payload data */ | ||
| 669 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 670 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | ||
| 671 | |||
| 672 | /* zero-payload command */ | ||
| 673 | set_jump_tgt_here(desc, zero_payload_jump_cmd); | ||
| 674 | |||
| 675 | /* read ICV */ | ||
| 676 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | | ||
| 677 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | ||
| 678 | |||
| 679 | #ifdef DEBUG | ||
| 680 | print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ", | ||
| 681 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 682 | #endif | ||
| 683 | } | ||
| 684 | EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); | ||
| 685 | |||
| 686 | /** | ||
| 687 | * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor | ||
| 688 | * (non-protocol). | ||
| 689 | * @desc: pointer to buffer used for descriptor construction | ||
| 690 | * @cdata: pointer to block cipher transform definitions | ||
| 691 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. | ||
| 692 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 693 | */ | ||
| 694 | void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, | ||
| 695 | unsigned int icvsize) | ||
| 696 | { | ||
| 697 | u32 *key_jump_cmd; | ||
| 698 | |||
| 699 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 700 | |||
| 701 | /* Skip key loading if it is loaded due to sharing */ | ||
| 702 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 703 | JUMP_COND_SHRD); | ||
| 704 | if (cdata->key_inline) | ||
| 705 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 706 | cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 707 | else | ||
| 708 | append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | | ||
| 709 | KEY_DEST_CLASS_REG); | ||
| 710 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 711 | |||
| 712 | /* Class 1 operation */ | ||
| 713 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 714 | OP_ALG_ENCRYPT); | ||
| 715 | |||
| 716 | append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); | ||
| 717 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 718 | |||
| 719 | /* Read assoc data */ | ||
| 720 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 721 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
| 722 | |||
| 723 | /* Skip IV */ | ||
| 724 | append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); | ||
| 725 | |||
| 726 | /* Will read cryptlen bytes */ | ||
| 727 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 728 | |||
| 729 | /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ | ||
| 730 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); | ||
| 731 | |||
| 732 | /* Skip assoc data */ | ||
| 733 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 734 | |||
| 735 | /* cryptlen = seqoutlen - assoclen */ | ||
| 736 | append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 737 | |||
| 738 | /* Write encrypted data */ | ||
| 739 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
| 740 | |||
| 741 | /* Read payload data */ | ||
| 742 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 743 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); | ||
| 744 | |||
| 745 | /* Write ICV */ | ||
| 746 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | | ||
| 747 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 748 | |||
| 749 | #ifdef DEBUG | ||
| 750 | print_hex_dump(KERN_ERR, | ||
| 751 | "rfc4106 enc shdesc@" __stringify(__LINE__)": ", | ||
| 752 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 753 | #endif | ||
| 754 | } | ||
| 755 | EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap); | ||
| 756 | |||
| 757 | /** | ||
| 758 | * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor | ||
| 759 | * (non-protocol). | ||
| 760 | * @desc: pointer to buffer used for descriptor construction | ||
| 761 | * @cdata: pointer to block cipher transform definitions | ||
| 762 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. | ||
| 763 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 764 | */ | ||
| 765 | void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, | ||
| 766 | unsigned int icvsize) | ||
| 767 | { | ||
| 768 | u32 *key_jump_cmd; | ||
| 769 | |||
| 770 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 771 | |||
| 772 | /* Skip key loading if it is loaded due to sharing */ | ||
| 773 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 774 | JUMP_COND_SHRD); | ||
| 775 | if (cdata->key_inline) | ||
| 776 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 777 | cdata->keylen, CLASS_1 | | ||
| 778 | KEY_DEST_CLASS_REG); | ||
| 779 | else | ||
| 780 | append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | | ||
| 781 | KEY_DEST_CLASS_REG); | ||
| 782 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 783 | |||
| 784 | /* Class 1 operation */ | ||
| 785 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 786 | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
| 787 | |||
| 788 | append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); | ||
| 789 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 790 | |||
| 791 | /* Read assoc data */ | ||
| 792 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 793 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
| 794 | |||
| 795 | /* Skip IV */ | ||
| 796 | append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); | ||
| 797 | |||
| 798 | /* Will read cryptlen bytes */ | ||
| 799 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); | ||
| 800 | |||
| 801 | /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ | ||
| 802 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); | ||
| 803 | |||
| 804 | /* Skip assoc data */ | ||
| 805 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 806 | |||
| 807 | /* Will write cryptlen bytes */ | ||
| 808 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 809 | |||
| 810 | /* Store payload data */ | ||
| 811 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
| 812 | |||
| 813 | /* Read encrypted data */ | ||
| 814 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
| 815 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | ||
| 816 | |||
| 817 | /* Read ICV */ | ||
| 818 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | | ||
| 819 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | ||
| 820 | |||
| 821 | #ifdef DEBUG | ||
| 822 | print_hex_dump(KERN_ERR, | ||
| 823 | "rfc4106 dec shdesc@" __stringify(__LINE__)": ", | ||
| 824 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 825 | #endif | ||
| 826 | } | ||
| 827 | EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap); | ||
| 828 | |||
| 829 | /** | ||
| 830 | * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor | ||
| 831 | * (non-protocol). | ||
| 832 | * @desc: pointer to buffer used for descriptor construction | ||
| 833 | * @cdata: pointer to block cipher transform definitions | ||
| 834 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. | ||
| 835 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 836 | */ | ||
| 837 | void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, | ||
| 838 | unsigned int icvsize) | ||
| 839 | { | ||
| 840 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; | ||
| 841 | |||
| 842 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 843 | |||
| 844 | /* Skip key loading if it is loaded due to sharing */ | ||
| 845 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 846 | JUMP_COND_SHRD); | ||
| 847 | if (cdata->key_inline) | ||
| 848 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 849 | cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 850 | else | ||
| 851 | append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | | ||
| 852 | KEY_DEST_CLASS_REG); | ||
| 853 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 854 | |||
| 855 | /* Class 1 operation */ | ||
| 856 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 857 | OP_ALG_ENCRYPT); | ||
| 858 | |||
| 859 | /* assoclen + cryptlen = seqinlen */ | ||
| 860 | append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 861 | |||
| 862 | /* | ||
| 863 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
| 864 | * thus need to do some magic, i.e. self-patch the descriptor | ||
| 865 | * buffer. | ||
| 866 | */ | ||
| 867 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | | ||
| 868 | (0x6 << MOVE_LEN_SHIFT)); | ||
| 869 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | | ||
| 870 | (0x8 << MOVE_LEN_SHIFT)); | ||
| 871 | |||
| 872 | /* Will read assoclen + cryptlen bytes */ | ||
| 873 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 874 | |||
| 875 | /* Will write assoclen + cryptlen bytes */ | ||
| 876 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 877 | |||
| 878 | /* Read and write assoclen + cryptlen bytes */ | ||
| 879 | aead_append_src_dst(desc, FIFOLD_TYPE_AAD); | ||
| 880 | |||
| 881 | set_move_tgt_here(desc, read_move_cmd); | ||
| 882 | set_move_tgt_here(desc, write_move_cmd); | ||
| 883 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 884 | /* Move payload data to OFIFO */ | ||
| 885 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); | ||
| 886 | |||
| 887 | /* Write ICV */ | ||
| 888 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | | ||
| 889 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 890 | |||
| 891 | #ifdef DEBUG | ||
| 892 | print_hex_dump(KERN_ERR, | ||
| 893 | "rfc4543 enc shdesc@" __stringify(__LINE__)": ", | ||
| 894 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 895 | #endif | ||
| 896 | } | ||
| 897 | EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap); | ||
| 898 | |||
| 899 | /** | ||
| 900 | * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor | ||
| 901 | * (non-protocol). | ||
| 902 | * @desc: pointer to buffer used for descriptor construction | ||
| 903 | * @cdata: pointer to block cipher transform definitions | ||
| 904 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. | ||
| 905 | * @icvsize: integrity check value (ICV) size (truncated or full) | ||
| 906 | */ | ||
| 907 | void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, | ||
| 908 | unsigned int icvsize) | ||
| 909 | { | ||
| 910 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; | ||
| 911 | |||
| 912 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 913 | |||
| 914 | /* Skip key loading if it is loaded due to sharing */ | ||
| 915 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 916 | JUMP_COND_SHRD); | ||
| 917 | if (cdata->key_inline) | ||
| 918 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 919 | cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 920 | else | ||
| 921 | append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | | ||
| 922 | KEY_DEST_CLASS_REG); | ||
| 923 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 924 | |||
| 925 | /* Class 1 operation */ | ||
| 926 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 927 | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
| 928 | |||
| 929 | /* assoclen + cryptlen = seqoutlen */ | ||
| 930 | append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 931 | |||
| 932 | /* | ||
| 933 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
| 934 | * thus need to do some magic, i.e. self-patch the descriptor | ||
| 935 | * buffer. | ||
| 936 | */ | ||
| 937 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | | ||
| 938 | (0x6 << MOVE_LEN_SHIFT)); | ||
| 939 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | | ||
| 940 | (0x8 << MOVE_LEN_SHIFT)); | ||
| 941 | |||
| 942 | /* Will read assoclen + cryptlen bytes */ | ||
| 943 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 944 | |||
| 945 | /* Will write assoclen + cryptlen bytes */ | ||
| 946 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 947 | |||
| 948 | /* Store payload data */ | ||
| 949 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
| 950 | |||
| 951 | /* In-snoop assoclen + cryptlen data */ | ||
| 952 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | | ||
| 953 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); | ||
| 954 | |||
| 955 | set_move_tgt_here(desc, read_move_cmd); | ||
| 956 | set_move_tgt_here(desc, write_move_cmd); | ||
| 957 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 958 | /* Move payload data to OFIFO */ | ||
| 959 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); | ||
| 960 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
| 961 | |||
| 962 | /* Read ICV */ | ||
| 963 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | | ||
| 964 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | ||
| 965 | |||
| 966 | #ifdef DEBUG | ||
| 967 | print_hex_dump(KERN_ERR, | ||
| 968 | "rfc4543 dec shdesc@" __stringify(__LINE__)": ", | ||
| 969 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 970 | #endif | ||
| 971 | } | ||
| 972 | EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); | ||
| 973 | |||
| 974 | /* | ||
| 975 | * For ablkcipher encrypt and decrypt, read from req->src and | ||
| 976 | * write to req->dst | ||
| 977 | */ | ||
| 978 | static inline void ablkcipher_append_src_dst(u32 *desc) | ||
| 979 | { | ||
| 980 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 981 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 982 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | | ||
| 983 | KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); | ||
| 984 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | ||
| 985 | } | ||
| 986 | |||
| 987 | /** | ||
| 988 | * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor | ||
| 989 | * @desc: pointer to buffer used for descriptor construction | ||
| 990 | * @cdata: pointer to block cipher transform definitions | ||
| 991 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed | ||
| 992 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. | ||
| 993 | * @ivsize: initialization vector size | ||
| 994 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template | ||
| 995 | * @ctx1_iv_off: IV offset in CONTEXT1 register | ||
| 996 | */ | ||
| 997 | void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata, | ||
| 998 | unsigned int ivsize, const bool is_rfc3686, | ||
| 999 | const u32 ctx1_iv_off) | ||
| 1000 | { | ||
| 1001 | u32 *key_jump_cmd; | ||
| 1002 | |||
| 1003 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | ||
| 1004 | /* Skip if already shared */ | ||
| 1005 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1006 | JUMP_COND_SHRD); | ||
| 1007 | |||
| 1008 | /* Load class1 key only */ | ||
| 1009 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 1010 | cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1011 | |||
| 1012 | /* Load nonce into CONTEXT1 reg */ | ||
| 1013 | if (is_rfc3686) { | ||
| 1014 | u8 *nonce = cdata->key_virt + cdata->keylen; | ||
| 1015 | |||
| 1016 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | ||
| 1017 | LDST_CLASS_IND_CCB | | ||
| 1018 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 1019 | append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | | ||
| 1020 | MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | | ||
| 1021 | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); | ||
| 1022 | } | ||
| 1023 | |||
| 1024 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1025 | |||
| 1026 | /* Load iv */ | ||
| 1027 | append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | | ||
| 1028 | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); | ||
| 1029 | |||
| 1030 | /* Load counter into CONTEXT1 reg */ | ||
| 1031 | if (is_rfc3686) | ||
| 1032 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 1033 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1034 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 1035 | LDST_OFFSET_SHIFT)); | ||
| 1036 | |||
| 1037 | /* Load operation */ | ||
| 1038 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 1039 | OP_ALG_ENCRYPT); | ||
| 1040 | |||
| 1041 | /* Perform operation */ | ||
| 1042 | ablkcipher_append_src_dst(desc); | ||
| 1043 | |||
| 1044 | #ifdef DEBUG | ||
| 1045 | print_hex_dump(KERN_ERR, | ||
| 1046 | "ablkcipher enc shdesc@" __stringify(__LINE__)": ", | ||
| 1047 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1048 | #endif | ||
| 1049 | } | ||
| 1050 | EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap); | ||
| 1051 | |||
| 1052 | /** | ||
| 1053 | * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor | ||
| 1054 | * @desc: pointer to buffer used for descriptor construction | ||
| 1055 | * @cdata: pointer to block cipher transform definitions | ||
| 1056 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed | ||
| 1057 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. | ||
| 1058 | * @ivsize: initialization vector size | ||
| 1059 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template | ||
| 1060 | * @ctx1_iv_off: IV offset in CONTEXT1 register | ||
| 1061 | */ | ||
| 1062 | void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata, | ||
| 1063 | unsigned int ivsize, const bool is_rfc3686, | ||
| 1064 | const u32 ctx1_iv_off) | ||
| 1065 | { | ||
| 1066 | u32 *key_jump_cmd; | ||
| 1067 | |||
| 1068 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | ||
| 1069 | /* Skip if already shared */ | ||
| 1070 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1071 | JUMP_COND_SHRD); | ||
| 1072 | |||
| 1073 | /* Load class1 key only */ | ||
| 1074 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 1075 | cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1076 | |||
| 1077 | /* Load nonce into CONTEXT1 reg */ | ||
| 1078 | if (is_rfc3686) { | ||
| 1079 | u8 *nonce = cdata->key_virt + cdata->keylen; | ||
| 1080 | |||
| 1081 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | ||
| 1082 | LDST_CLASS_IND_CCB | | ||
| 1083 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 1084 | append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | | ||
| 1085 | MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | | ||
| 1086 | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1090 | |||
| 1091 | /* load IV */ | ||
| 1092 | append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | | ||
| 1093 | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); | ||
| 1094 | |||
| 1095 | /* Load counter into CONTEXT1 reg */ | ||
| 1096 | if (is_rfc3686) | ||
| 1097 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 1098 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1099 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 1100 | LDST_OFFSET_SHIFT)); | ||
| 1101 | |||
| 1102 | /* Choose operation */ | ||
| 1103 | if (ctx1_iv_off) | ||
| 1104 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 1105 | OP_ALG_DECRYPT); | ||
| 1106 | else | ||
| 1107 | append_dec_op1(desc, cdata->algtype); | ||
| 1108 | |||
| 1109 | /* Perform operation */ | ||
| 1110 | ablkcipher_append_src_dst(desc); | ||
| 1111 | |||
| 1112 | #ifdef DEBUG | ||
| 1113 | print_hex_dump(KERN_ERR, | ||
| 1114 | "ablkcipher dec shdesc@" __stringify(__LINE__)": ", | ||
| 1115 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1116 | #endif | ||
| 1117 | } | ||
| 1118 | EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap); | ||
| 1119 | |||
| 1120 | /** | ||
| 1121 | * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor | ||
| 1122 | * with HW-generated initialization vector. | ||
| 1123 | * @desc: pointer to buffer used for descriptor construction | ||
| 1124 | * @cdata: pointer to block cipher transform definitions | ||
| 1125 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed | ||
| 1126 | * with OP_ALG_AAI_CBC. | ||
| 1127 | * @ivsize: initialization vector size | ||
| 1128 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template | ||
| 1129 | * @ctx1_iv_off: IV offset in CONTEXT1 register | ||
| 1130 | */ | ||
| 1131 | void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata, | ||
| 1132 | unsigned int ivsize, const bool is_rfc3686, | ||
| 1133 | const u32 ctx1_iv_off) | ||
| 1134 | { | ||
| 1135 | u32 *key_jump_cmd, geniv; | ||
| 1136 | |||
| 1137 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | ||
| 1138 | /* Skip if already shared */ | ||
| 1139 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1140 | JUMP_COND_SHRD); | ||
| 1141 | |||
| 1142 | /* Load class1 key only */ | ||
| 1143 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 1144 | cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1145 | |||
| 1146 | /* Load Nonce into CONTEXT1 reg */ | ||
| 1147 | if (is_rfc3686) { | ||
| 1148 | u8 *nonce = cdata->key_virt + cdata->keylen; | ||
| 1149 | |||
| 1150 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | ||
| 1151 | LDST_CLASS_IND_CCB | | ||
| 1152 | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); | ||
| 1153 | append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | | ||
| 1154 | MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | | ||
| 1155 | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); | ||
| 1156 | } | ||
| 1157 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1158 | |||
| 1159 | /* Generate IV */ | ||
| 1160 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | ||
| 1161 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND | | ||
| 1162 | (ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
| 1163 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | | ||
| 1164 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
| 1165 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
| 1166 | append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO | | ||
| 1167 | MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) | | ||
| 1168 | (ctx1_iv_off << MOVE_OFFSET_SHIFT)); | ||
| 1169 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
| 1170 | |||
| 1171 | /* Copy generated IV to memory */ | ||
| 1172 | append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | | ||
| 1173 | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); | ||
| 1174 | |||
| 1175 | /* Load Counter into CONTEXT1 reg */ | ||
| 1176 | if (is_rfc3686) | ||
| 1177 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | ||
| 1178 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1179 | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << | ||
| 1180 | LDST_OFFSET_SHIFT)); | ||
| 1181 | |||
| 1182 | if (ctx1_iv_off) | ||
| 1183 | append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP | | ||
| 1184 | (1 << JUMP_OFFSET_SHIFT)); | ||
| 1185 | |||
| 1186 | /* Load operation */ | ||
| 1187 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 1188 | OP_ALG_ENCRYPT); | ||
| 1189 | |||
| 1190 | /* Perform operation */ | ||
| 1191 | ablkcipher_append_src_dst(desc); | ||
| 1192 | |||
| 1193 | #ifdef DEBUG | ||
| 1194 | print_hex_dump(KERN_ERR, | ||
| 1195 | "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ", | ||
| 1196 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1197 | #endif | ||
| 1198 | } | ||
| 1199 | EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap); | ||
| 1200 | |||
| 1201 | /** | ||
| 1202 | * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared | ||
| 1203 | * descriptor | ||
| 1204 | * @desc: pointer to buffer used for descriptor construction | ||
| 1205 | * @cdata: pointer to block cipher transform definitions | ||
| 1206 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS. | ||
| 1207 | */ | ||
| 1208 | void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata) | ||
| 1209 | { | ||
| 1210 | __be64 sector_size = cpu_to_be64(512); | ||
| 1211 | u32 *key_jump_cmd; | ||
| 1212 | |||
| 1213 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | ||
| 1214 | /* Skip if already shared */ | ||
| 1215 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1216 | JUMP_COND_SHRD); | ||
| 1217 | |||
| 1218 | /* Load class1 keys only */ | ||
| 1219 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 1220 | cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1221 | |||
| 1222 | /* Load sector size with index 40 bytes (0x28) */ | ||
| 1223 | append_load_as_imm(desc, (void *)§or_size, 8, LDST_CLASS_1_CCB | | ||
| 1224 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1225 | (0x28 << LDST_OFFSET_SHIFT)); | ||
| 1226 | |||
| 1227 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1228 | |||
| 1229 | /* | ||
| 1230 | * create sequence for loading the sector index | ||
| 1231 | * Upper 8B of IV - will be used as sector index | ||
| 1232 | * Lower 8B of IV - will be discarded | ||
| 1233 | */ | ||
| 1234 | append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | | ||
| 1235 | (0x20 << LDST_OFFSET_SHIFT)); | ||
| 1236 | append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); | ||
| 1237 | |||
| 1238 | /* Load operation */ | ||
| 1239 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | ||
| 1240 | OP_ALG_ENCRYPT); | ||
| 1241 | |||
| 1242 | /* Perform operation */ | ||
| 1243 | ablkcipher_append_src_dst(desc); | ||
| 1244 | |||
| 1245 | #ifdef DEBUG | ||
| 1246 | print_hex_dump(KERN_ERR, | ||
| 1247 | "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ", | ||
| 1248 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1249 | #endif | ||
| 1250 | } | ||
| 1251 | EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap); | ||
| 1252 | |||
| 1253 | /** | ||
| 1254 | * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared | ||
| 1255 | * descriptor | ||
| 1256 | * @desc: pointer to buffer used for descriptor construction | ||
| 1257 | * @cdata: pointer to block cipher transform definitions | ||
| 1258 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS. | ||
| 1259 | */ | ||
| 1260 | void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata) | ||
| 1261 | { | ||
| 1262 | __be64 sector_size = cpu_to_be64(512); | ||
| 1263 | u32 *key_jump_cmd; | ||
| 1264 | |||
| 1265 | init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); | ||
| 1266 | /* Skip if already shared */ | ||
| 1267 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 1268 | JUMP_COND_SHRD); | ||
| 1269 | |||
| 1270 | /* Load class1 key only */ | ||
| 1271 | append_key_as_imm(desc, cdata->key_virt, cdata->keylen, | ||
| 1272 | cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
| 1273 | |||
| 1274 | /* Load sector size with index 40 bytes (0x28) */ | ||
| 1275 | append_load_as_imm(desc, (void *)§or_size, 8, LDST_CLASS_1_CCB | | ||
| 1276 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 1277 | (0x28 << LDST_OFFSET_SHIFT)); | ||
| 1278 | |||
| 1279 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 1280 | |||
| 1281 | /* | ||
| 1282 | * create sequence for loading the sector index | ||
| 1283 | * Upper 8B of IV - will be used as sector index | ||
| 1284 | * Lower 8B of IV - will be discarded | ||
| 1285 | */ | ||
| 1286 | append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | | ||
| 1287 | (0x20 << LDST_OFFSET_SHIFT)); | ||
| 1288 | append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); | ||
| 1289 | |||
| 1290 | /* Load operation */ | ||
| 1291 | append_dec_op1(desc, cdata->algtype); | ||
| 1292 | |||
| 1293 | /* Perform operation */ | ||
| 1294 | ablkcipher_append_src_dst(desc); | ||
| 1295 | |||
| 1296 | #ifdef DEBUG | ||
| 1297 | print_hex_dump(KERN_ERR, | ||
| 1298 | "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ", | ||
| 1299 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
| 1300 | #endif | ||
| 1301 | } | ||
| 1302 | EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap); | ||
| 1303 | |||
| 1304 | MODULE_LICENSE("GPL"); | ||
| 1305 | MODULE_DESCRIPTION("FSL CAAM descriptor support"); | ||
| 1306 | MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); | ||
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h new file mode 100644 index 000000000000..95551737333a --- /dev/null +++ b/drivers/crypto/caam/caamalg_desc.h | |||
| @@ -0,0 +1,97 @@ | |||
| 1 | /* | ||
| 2 | * Shared descriptors for aead, ablkcipher algorithms | ||
| 3 | * | ||
| 4 | * Copyright 2016 NXP | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef _CAAMALG_DESC_H_ | ||
| 8 | #define _CAAMALG_DESC_H_ | ||
| 9 | |||
| 10 | /* length of descriptors text */ | ||
| 11 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) | ||
| 12 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) | ||
| 13 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) | ||
| 14 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) | ||
| 15 | |||
| 16 | /* Note: Nonce is counted in cdata.keylen */ | ||
| 17 | #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ) | ||
| 18 | |||
| 19 | #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) | ||
| 20 | #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ) | ||
| 21 | #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ) | ||
| 22 | |||
| 23 | #define DESC_GCM_BASE (3 * CAAM_CMD_SZ) | ||
| 24 | #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) | ||
| 25 | #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) | ||
| 26 | |||
| 27 | #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) | ||
| 28 | #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) | ||
| 29 | #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) | ||
| 30 | |||
| 31 | #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) | ||
| 32 | #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) | ||
| 33 | #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ) | ||
| 34 | |||
| 35 | #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) | ||
| 36 | #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ | ||
| 37 | 20 * CAAM_CMD_SZ) | ||
| 38 | #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ | ||
| 39 | 15 * CAAM_CMD_SZ) | ||
| 40 | |||
| 41 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, | ||
| 42 | unsigned int icvsize); | ||
| 43 | |||
| 44 | void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, | ||
| 45 | unsigned int icvsize); | ||
| 46 | |||
| 47 | void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, | ||
| 48 | struct alginfo *adata, unsigned int icvsize, | ||
| 49 | const bool is_rfc3686, u32 *nonce, | ||
| 50 | const u32 ctx1_iv_off); | ||
| 51 | |||
| 52 | void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, | ||
| 53 | struct alginfo *adata, unsigned int ivsize, | ||
| 54 | unsigned int icvsize, const bool geniv, | ||
| 55 | const bool is_rfc3686, u32 *nonce, | ||
| 56 | const u32 ctx1_iv_off); | ||
| 57 | |||
| 58 | void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, | ||
| 59 | struct alginfo *adata, unsigned int ivsize, | ||
| 60 | unsigned int icvsize, const bool is_rfc3686, | ||
| 61 | u32 *nonce, const u32 ctx1_iv_off); | ||
| 62 | |||
| 63 | void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, | ||
| 64 | unsigned int icvsize); | ||
| 65 | |||
| 66 | void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, | ||
| 67 | unsigned int icvsize); | ||
| 68 | |||
| 69 | void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, | ||
| 70 | unsigned int icvsize); | ||
| 71 | |||
| 72 | void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, | ||
| 73 | unsigned int icvsize); | ||
| 74 | |||
| 75 | void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, | ||
| 76 | unsigned int icvsize); | ||
| 77 | |||
| 78 | void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, | ||
| 79 | unsigned int icvsize); | ||
| 80 | |||
| 81 | void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata, | ||
| 82 | unsigned int ivsize, const bool is_rfc3686, | ||
| 83 | const u32 ctx1_iv_off); | ||
| 84 | |||
| 85 | void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata, | ||
| 86 | unsigned int ivsize, const bool is_rfc3686, | ||
| 87 | const u32 ctx1_iv_off); | ||
| 88 | |||
| 89 | void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata, | ||
| 90 | unsigned int ivsize, const bool is_rfc3686, | ||
| 91 | const u32 ctx1_iv_off); | ||
| 92 | |||
| 93 | void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata); | ||
| 94 | |||
| 95 | void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata); | ||
| 96 | |||
| 97 | #endif /* _CAAMALG_DESC_H_ */ | ||
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 660dc206969f..e58639ea53b1 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -72,7 +72,7 @@ | |||
| 72 | #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE | 72 | #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE |
| 73 | 73 | ||
| 74 | /* length of descriptors text */ | 74 | /* length of descriptors text */ |
| 75 | #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) | 75 | #define DESC_AHASH_BASE (3 * CAAM_CMD_SZ) |
| 76 | #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) | 76 | #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) |
| 77 | #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) | 77 | #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) |
| 78 | #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) | 78 | #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) |
| @@ -103,20 +103,15 @@ struct caam_hash_ctx { | |||
| 103 | u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; | 103 | u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; |
| 104 | u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; | 104 | u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; |
| 105 | u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; | 105 | u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; |
| 106 | u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; | ||
| 107 | dma_addr_t sh_desc_update_dma ____cacheline_aligned; | 106 | dma_addr_t sh_desc_update_dma ____cacheline_aligned; |
| 108 | dma_addr_t sh_desc_update_first_dma; | 107 | dma_addr_t sh_desc_update_first_dma; |
| 109 | dma_addr_t sh_desc_fin_dma; | 108 | dma_addr_t sh_desc_fin_dma; |
| 110 | dma_addr_t sh_desc_digest_dma; | 109 | dma_addr_t sh_desc_digest_dma; |
| 111 | dma_addr_t sh_desc_finup_dma; | ||
| 112 | struct device *jrdev; | 110 | struct device *jrdev; |
| 113 | u32 alg_type; | ||
| 114 | u32 alg_op; | ||
| 115 | u8 key[CAAM_MAX_HASH_KEY_SIZE]; | 111 | u8 key[CAAM_MAX_HASH_KEY_SIZE]; |
| 116 | dma_addr_t key_dma; | 112 | dma_addr_t key_dma; |
| 117 | int ctx_len; | 113 | int ctx_len; |
| 118 | unsigned int split_key_len; | 114 | struct alginfo adata; |
| 119 | unsigned int split_key_pad_len; | ||
| 120 | }; | 115 | }; |
| 121 | 116 | ||
| 122 | /* ahash state */ | 117 | /* ahash state */ |
| @@ -222,89 +217,54 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, | |||
| 222 | return 0; | 217 | return 0; |
| 223 | } | 218 | } |
| 224 | 219 | ||
| 225 | /* Common shared descriptor commands */ | ||
| 226 | static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) | ||
| 227 | { | ||
| 228 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
| 229 | ctx->split_key_len, CLASS_2 | | ||
| 230 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
| 231 | } | ||
| 232 | |||
| 233 | /* Append key if it has been set */ | ||
| 234 | static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) | ||
| 235 | { | ||
| 236 | u32 *key_jump_cmd; | ||
| 237 | |||
| 238 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 239 | |||
| 240 | if (ctx->split_key_len) { | ||
| 241 | /* Skip if already shared */ | ||
| 242 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
| 243 | JUMP_COND_SHRD); | ||
| 244 | |||
| 245 | append_key_ahash(desc, ctx); | ||
| 246 | |||
| 247 | set_jump_tgt_here(desc, key_jump_cmd); | ||
| 248 | } | ||
| 249 | |||
| 250 | /* Propagate errors from shared to job descriptor */ | ||
| 251 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | ||
| 252 | } | ||
| 253 | |||
| 254 | /* | 220 | /* |
| 255 | * For ahash read data from seqin following state->caam_ctx, | 221 | * For ahash update, final and finup (import_ctx = true) |
| 256 | * and write resulting class2 context to seqout, which may be state->caam_ctx | 222 | * import context, read and write to seqout |
| 257 | * or req->result | 223 | * For ahash firsts and digest (import_ctx = false) |
| 224 | * read and write to seqout | ||
| 258 | */ | 225 | */ |
| 259 | static inline void ahash_append_load_str(u32 *desc, int digestsize) | 226 | static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, |
| 227 | struct caam_hash_ctx *ctx, bool import_ctx) | ||
| 260 | { | 228 | { |
| 261 | /* Calculate remaining bytes to read */ | 229 | u32 op = ctx->adata.algtype; |
| 262 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | 230 | u32 *skip_key_load; |
| 263 | |||
| 264 | /* Read remaining bytes */ | ||
| 265 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | | ||
| 266 | FIFOLD_TYPE_MSG | KEY_VLF); | ||
| 267 | 231 | ||
| 268 | /* Store class2 context bytes */ | 232 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
| 269 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | | ||
| 270 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 271 | } | ||
| 272 | 233 | ||
| 273 | /* | 234 | /* Append key if it has been set; ahash update excluded */ |
| 274 | * For ahash update, final and finup, import context, read and write to seqout | 235 | if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) { |
| 275 | */ | 236 | /* Skip key loading if already shared */ |
| 276 | static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, | 237 | skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
| 277 | int digestsize, | 238 | JUMP_COND_SHRD); |
| 278 | struct caam_hash_ctx *ctx) | ||
| 279 | { | ||
| 280 | init_sh_desc_key_ahash(desc, ctx); | ||
| 281 | 239 | ||
| 282 | /* Import context from software */ | 240 | append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, |
| 283 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | 241 | ctx->adata.keylen, CLASS_2 | |
| 284 | LDST_CLASS_2_CCB | ctx->ctx_len); | 242 | KEY_DEST_MDHA_SPLIT | KEY_ENC); |
| 285 | 243 | ||
| 286 | /* Class 2 operation */ | 244 | set_jump_tgt_here(desc, skip_key_load); |
| 287 | append_operation(desc, op | state | OP_ALG_ENCRYPT); | ||
| 288 | 245 | ||
| 289 | /* | 246 | op |= OP_ALG_AAI_HMAC_PRECOMP; |
| 290 | * Load from buf and/or src and write to req->result or state->context | 247 | } |
| 291 | */ | ||
| 292 | ahash_append_load_str(desc, digestsize); | ||
| 293 | } | ||
| 294 | 248 | ||
| 295 | /* For ahash firsts and digest, read and write to seqout */ | 249 | /* If needed, import context from software */ |
| 296 | static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, | 250 | if (import_ctx) |
| 297 | int digestsize, struct caam_hash_ctx *ctx) | 251 | append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB | |
| 298 | { | 252 | LDST_SRCDST_BYTE_CONTEXT); |
| 299 | init_sh_desc_key_ahash(desc, ctx); | ||
| 300 | 253 | ||
| 301 | /* Class 2 operation */ | 254 | /* Class 2 operation */ |
| 302 | append_operation(desc, op | state | OP_ALG_ENCRYPT); | 255 | append_operation(desc, op | state | OP_ALG_ENCRYPT); |
| 303 | 256 | ||
| 304 | /* | 257 | /* |
| 305 | * Load from buf and/or src and write to req->result or state->context | 258 | * Load from buf and/or src and write to req->result or state->context |
| 259 | * Calculate remaining bytes to read | ||
| 306 | */ | 260 | */ |
| 307 | ahash_append_load_str(desc, digestsize); | 261 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
| 262 | /* Read remaining bytes */ | ||
| 263 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | | ||
| 264 | FIFOLD_TYPE_MSG | KEY_VLF); | ||
| 265 | /* Store class2 context bytes */ | ||
| 266 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | | ||
| 267 | LDST_SRCDST_BYTE_CONTEXT); | ||
| 308 | } | 268 | } |
| 309 | 269 | ||
| 310 | static int ahash_set_sh_desc(struct crypto_ahash *ahash) | 270 | static int ahash_set_sh_desc(struct crypto_ahash *ahash) |
| @@ -312,28 +272,11 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
| 312 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 272 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
| 313 | int digestsize = crypto_ahash_digestsize(ahash); | 273 | int digestsize = crypto_ahash_digestsize(ahash); |
| 314 | struct device *jrdev = ctx->jrdev; | 274 | struct device *jrdev = ctx->jrdev; |
| 315 | u32 have_key = 0; | ||
| 316 | u32 *desc; | 275 | u32 *desc; |
| 317 | 276 | ||
| 318 | if (ctx->split_key_len) | ||
| 319 | have_key = OP_ALG_AAI_HMAC_PRECOMP; | ||
| 320 | |||
| 321 | /* ahash_update shared descriptor */ | 277 | /* ahash_update shared descriptor */ |
| 322 | desc = ctx->sh_desc_update; | 278 | desc = ctx->sh_desc_update; |
| 323 | 279 | ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); | |
| 324 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
| 325 | |||
| 326 | /* Import context from software */ | ||
| 327 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
| 328 | LDST_CLASS_2_CCB | ctx->ctx_len); | ||
| 329 | |||
| 330 | /* Class 2 operation */ | ||
| 331 | append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | | ||
| 332 | OP_ALG_ENCRYPT); | ||
| 333 | |||
| 334 | /* Load data and write to result or context */ | ||
| 335 | ahash_append_load_str(desc, ctx->ctx_len); | ||
| 336 | |||
| 337 | ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | 280 | ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), |
| 338 | DMA_TO_DEVICE); | 281 | DMA_TO_DEVICE); |
| 339 | if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { | 282 | if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { |
| @@ -348,10 +291,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
| 348 | 291 | ||
| 349 | /* ahash_update_first shared descriptor */ | 292 | /* ahash_update_first shared descriptor */ |
| 350 | desc = ctx->sh_desc_update_first; | 293 | desc = ctx->sh_desc_update_first; |
| 351 | 294 | ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); | |
| 352 | ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, | ||
| 353 | ctx->ctx_len, ctx); | ||
| 354 | |||
| 355 | ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, | 295 | ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, |
| 356 | desc_bytes(desc), | 296 | desc_bytes(desc), |
| 357 | DMA_TO_DEVICE); | 297 | DMA_TO_DEVICE); |
| @@ -367,10 +307,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
| 367 | 307 | ||
| 368 | /* ahash_final shared descriptor */ | 308 | /* ahash_final shared descriptor */ |
| 369 | desc = ctx->sh_desc_fin; | 309 | desc = ctx->sh_desc_fin; |
| 370 | 310 | ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); | |
| 371 | ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, | ||
| 372 | OP_ALG_AS_FINALIZE, digestsize, ctx); | ||
| 373 | |||
| 374 | ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | 311 | ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), |
| 375 | DMA_TO_DEVICE); | 312 | DMA_TO_DEVICE); |
| 376 | if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { | 313 | if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { |
| @@ -383,30 +320,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
| 383 | desc_bytes(desc), 1); | 320 | desc_bytes(desc), 1); |
| 384 | #endif | 321 | #endif |
| 385 | 322 | ||
| 386 | /* ahash_finup shared descriptor */ | ||
| 387 | desc = ctx->sh_desc_finup; | ||
| 388 | |||
| 389 | ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, | ||
| 390 | OP_ALG_AS_FINALIZE, digestsize, ctx); | ||
| 391 | |||
| 392 | ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | ||
| 393 | DMA_TO_DEVICE); | ||
| 394 | if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { | ||
| 395 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
| 396 | return -ENOMEM; | ||
| 397 | } | ||
| 398 | #ifdef DEBUG | ||
| 399 | print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ", | ||
| 400 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
| 401 | desc_bytes(desc), 1); | ||
| 402 | #endif | ||
| 403 | |||
| 404 | /* ahash_digest shared descriptor */ | 323 | /* ahash_digest shared descriptor */ |
| 405 | desc = ctx->sh_desc_digest; | 324 | desc = ctx->sh_desc_digest; |
| 406 | 325 | ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); | |
| 407 | ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, | ||
| 408 | digestsize, ctx); | ||
| 409 | |||
| 410 | ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, | 326 | ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, |
| 411 | desc_bytes(desc), | 327 | desc_bytes(desc), |
| 412 | DMA_TO_DEVICE); | 328 | DMA_TO_DEVICE); |
| @@ -424,14 +340,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
| 424 | return 0; | 340 | return 0; |
| 425 | } | 341 | } |
| 426 | 342 | ||
| 427 | static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, | ||
| 428 | u32 keylen) | ||
| 429 | { | ||
| 430 | return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, | ||
| 431 | ctx->split_key_pad_len, key_in, keylen, | ||
| 432 | ctx->alg_op); | ||
| 433 | } | ||
| 434 | |||
| 435 | /* Digest hash size if it is too large */ | 343 | /* Digest hash size if it is too large */ |
| 436 | static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | 344 | static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, |
| 437 | u32 *keylen, u8 *key_out, u32 digestsize) | 345 | u32 *keylen, u8 *key_out, u32 digestsize) |
| @@ -467,7 +375,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | |||
| 467 | } | 375 | } |
| 468 | 376 | ||
| 469 | /* Job descriptor to perform unkeyed hash on key_in */ | 377 | /* Job descriptor to perform unkeyed hash on key_in */ |
| 470 | append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | | 378 | append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | |
| 471 | OP_ALG_AS_INITFINAL); | 379 | OP_ALG_AS_INITFINAL); |
| 472 | append_seq_in_ptr(desc, src_dma, *keylen, 0); | 380 | append_seq_in_ptr(desc, src_dma, *keylen, 0); |
| 473 | append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | | 381 | append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | |
| @@ -511,8 +419,6 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, | |||
| 511 | static int ahash_setkey(struct crypto_ahash *ahash, | 419 | static int ahash_setkey(struct crypto_ahash *ahash, |
| 512 | const u8 *key, unsigned int keylen) | 420 | const u8 *key, unsigned int keylen) |
| 513 | { | 421 | { |
| 514 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | ||
| 515 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | ||
| 516 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 422 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
| 517 | struct device *jrdev = ctx->jrdev; | 423 | struct device *jrdev = ctx->jrdev; |
| 518 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); | 424 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); |
| @@ -537,23 +443,12 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
| 537 | key = hashed_key; | 443 | key = hashed_key; |
| 538 | } | 444 | } |
| 539 | 445 | ||
| 540 | /* Pick class 2 key length from algorithm submask */ | 446 | ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, |
| 541 | ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | 447 | CAAM_MAX_HASH_KEY_SIZE); |
| 542 | OP_ALG_ALGSEL_SHIFT] * 2; | ||
| 543 | ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); | ||
| 544 | |||
| 545 | #ifdef DEBUG | ||
| 546 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | ||
| 547 | ctx->split_key_len, ctx->split_key_pad_len); | ||
| 548 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | ||
| 549 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
| 550 | #endif | ||
| 551 | |||
| 552 | ret = gen_split_hash_key(ctx, key, keylen); | ||
| 553 | if (ret) | 448 | if (ret) |
| 554 | goto bad_free_key; | 449 | goto bad_free_key; |
| 555 | 450 | ||
| 556 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | 451 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad, |
| 557 | DMA_TO_DEVICE); | 452 | DMA_TO_DEVICE); |
| 558 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | 453 | if (dma_mapping_error(jrdev, ctx->key_dma)) { |
| 559 | dev_err(jrdev, "unable to map key i/o memory\n"); | 454 | dev_err(jrdev, "unable to map key i/o memory\n"); |
| @@ -563,14 +458,15 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
| 563 | #ifdef DEBUG | 458 | #ifdef DEBUG |
| 564 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", | 459 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
| 565 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 460 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
| 566 | ctx->split_key_pad_len, 1); | 461 | ctx->adata.keylen_pad, 1); |
| 567 | #endif | 462 | #endif |
| 568 | 463 | ||
| 569 | ret = ahash_set_sh_desc(ahash); | 464 | ret = ahash_set_sh_desc(ahash); |
| 570 | if (ret) { | 465 | if (ret) { |
| 571 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, | 466 | dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad, |
| 572 | DMA_TO_DEVICE); | 467 | DMA_TO_DEVICE); |
| 573 | } | 468 | } |
| 469 | |||
| 574 | error_free_key: | 470 | error_free_key: |
| 575 | kfree(hashed_key); | 471 | kfree(hashed_key); |
| 576 | return ret; | 472 | return ret; |
| @@ -639,8 +535,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 639 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 535 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 640 | #endif | 536 | #endif |
| 641 | 537 | ||
| 642 | edesc = (struct ahash_edesc *)((char *)desc - | 538 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
| 643 | offsetof(struct ahash_edesc, hw_desc)); | ||
| 644 | if (err) | 539 | if (err) |
| 645 | caam_jr_strstatus(jrdev, err); | 540 | caam_jr_strstatus(jrdev, err); |
| 646 | 541 | ||
| @@ -674,8 +569,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | |||
| 674 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 569 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 675 | #endif | 570 | #endif |
| 676 | 571 | ||
| 677 | edesc = (struct ahash_edesc *)((char *)desc - | 572 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
| 678 | offsetof(struct ahash_edesc, hw_desc)); | ||
| 679 | if (err) | 573 | if (err) |
| 680 | caam_jr_strstatus(jrdev, err); | 574 | caam_jr_strstatus(jrdev, err); |
| 681 | 575 | ||
| @@ -709,8 +603,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |||
| 709 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 603 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 710 | #endif | 604 | #endif |
| 711 | 605 | ||
| 712 | edesc = (struct ahash_edesc *)((char *)desc - | 606 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
| 713 | offsetof(struct ahash_edesc, hw_desc)); | ||
| 714 | if (err) | 607 | if (err) |
| 715 | caam_jr_strstatus(jrdev, err); | 608 | caam_jr_strstatus(jrdev, err); |
| 716 | 609 | ||
| @@ -744,8 +637,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
| 744 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 637 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
| 745 | #endif | 638 | #endif |
| 746 | 639 | ||
| 747 | edesc = (struct ahash_edesc *)((char *)desc - | 640 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
| 748 | offsetof(struct ahash_edesc, hw_desc)); | ||
| 749 | if (err) | 641 | if (err) |
| 750 | caam_jr_strstatus(jrdev, err); | 642 | caam_jr_strstatus(jrdev, err); |
| 751 | 643 | ||
| @@ -1078,7 +970,7 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
| 1078 | 970 | ||
| 1079 | /* allocate space for base edesc and hw desc commands, link tables */ | 971 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1080 | edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, | 972 | edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, |
| 1081 | ctx->sh_desc_finup, ctx->sh_desc_finup_dma, | 973 | ctx->sh_desc_fin, ctx->sh_desc_fin_dma, |
| 1082 | flags); | 974 | flags); |
| 1083 | if (!edesc) { | 975 | if (!edesc) { |
| 1084 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); | 976 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
| @@ -1683,7 +1575,6 @@ struct caam_hash_template { | |||
| 1683 | unsigned int blocksize; | 1575 | unsigned int blocksize; |
| 1684 | struct ahash_alg template_ahash; | 1576 | struct ahash_alg template_ahash; |
| 1685 | u32 alg_type; | 1577 | u32 alg_type; |
| 1686 | u32 alg_op; | ||
| 1687 | }; | 1578 | }; |
| 1688 | 1579 | ||
| 1689 | /* ahash descriptors */ | 1580 | /* ahash descriptors */ |
| @@ -1709,7 +1600,6 @@ static struct caam_hash_template driver_hash[] = { | |||
| 1709 | }, | 1600 | }, |
| 1710 | }, | 1601 | }, |
| 1711 | .alg_type = OP_ALG_ALGSEL_SHA1, | 1602 | .alg_type = OP_ALG_ALGSEL_SHA1, |
| 1712 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 1713 | }, { | 1603 | }, { |
| 1714 | .name = "sha224", | 1604 | .name = "sha224", |
| 1715 | .driver_name = "sha224-caam", | 1605 | .driver_name = "sha224-caam", |
| @@ -1731,7 +1621,6 @@ static struct caam_hash_template driver_hash[] = { | |||
| 1731 | }, | 1621 | }, |
| 1732 | }, | 1622 | }, |
| 1733 | .alg_type = OP_ALG_ALGSEL_SHA224, | 1623 | .alg_type = OP_ALG_ALGSEL_SHA224, |
| 1734 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 1735 | }, { | 1624 | }, { |
| 1736 | .name = "sha256", | 1625 | .name = "sha256", |
| 1737 | .driver_name = "sha256-caam", | 1626 | .driver_name = "sha256-caam", |
| @@ -1753,7 +1642,6 @@ static struct caam_hash_template driver_hash[] = { | |||
| 1753 | }, | 1642 | }, |
| 1754 | }, | 1643 | }, |
| 1755 | .alg_type = OP_ALG_ALGSEL_SHA256, | 1644 | .alg_type = OP_ALG_ALGSEL_SHA256, |
| 1756 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 1757 | }, { | 1645 | }, { |
| 1758 | .name = "sha384", | 1646 | .name = "sha384", |
| 1759 | .driver_name = "sha384-caam", | 1647 | .driver_name = "sha384-caam", |
| @@ -1775,7 +1663,6 @@ static struct caam_hash_template driver_hash[] = { | |||
| 1775 | }, | 1663 | }, |
| 1776 | }, | 1664 | }, |
| 1777 | .alg_type = OP_ALG_ALGSEL_SHA384, | 1665 | .alg_type = OP_ALG_ALGSEL_SHA384, |
| 1778 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 1779 | }, { | 1666 | }, { |
| 1780 | .name = "sha512", | 1667 | .name = "sha512", |
| 1781 | .driver_name = "sha512-caam", | 1668 | .driver_name = "sha512-caam", |
| @@ -1797,7 +1684,6 @@ static struct caam_hash_template driver_hash[] = { | |||
| 1797 | }, | 1684 | }, |
| 1798 | }, | 1685 | }, |
| 1799 | .alg_type = OP_ALG_ALGSEL_SHA512, | 1686 | .alg_type = OP_ALG_ALGSEL_SHA512, |
| 1800 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 1801 | }, { | 1687 | }, { |
| 1802 | .name = "md5", | 1688 | .name = "md5", |
| 1803 | .driver_name = "md5-caam", | 1689 | .driver_name = "md5-caam", |
| @@ -1819,14 +1705,12 @@ static struct caam_hash_template driver_hash[] = { | |||
| 1819 | }, | 1705 | }, |
| 1820 | }, | 1706 | }, |
| 1821 | .alg_type = OP_ALG_ALGSEL_MD5, | 1707 | .alg_type = OP_ALG_ALGSEL_MD5, |
| 1822 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 1823 | }, | 1708 | }, |
| 1824 | }; | 1709 | }; |
| 1825 | 1710 | ||
| 1826 | struct caam_hash_alg { | 1711 | struct caam_hash_alg { |
| 1827 | struct list_head entry; | 1712 | struct list_head entry; |
| 1828 | int alg_type; | 1713 | int alg_type; |
| 1829 | int alg_op; | ||
| 1830 | struct ahash_alg ahash_alg; | 1714 | struct ahash_alg ahash_alg; |
| 1831 | }; | 1715 | }; |
| 1832 | 1716 | ||
| @@ -1859,10 +1743,10 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
| 1859 | return PTR_ERR(ctx->jrdev); | 1743 | return PTR_ERR(ctx->jrdev); |
| 1860 | } | 1744 | } |
| 1861 | /* copy descriptor header template value */ | 1745 | /* copy descriptor header template value */ |
| 1862 | ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; | 1746 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; |
| 1863 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; | ||
| 1864 | 1747 | ||
| 1865 | ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | 1748 | ctx->ctx_len = runninglen[(ctx->adata.algtype & |
| 1749 | OP_ALG_ALGSEL_SUBMASK) >> | ||
| 1866 | OP_ALG_ALGSEL_SHIFT]; | 1750 | OP_ALG_ALGSEL_SHIFT]; |
| 1867 | 1751 | ||
| 1868 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 1752 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| @@ -1893,10 +1777,6 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) | |||
| 1893 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, | 1777 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, |
| 1894 | desc_bytes(ctx->sh_desc_digest), | 1778 | desc_bytes(ctx->sh_desc_digest), |
| 1895 | DMA_TO_DEVICE); | 1779 | DMA_TO_DEVICE); |
| 1896 | if (ctx->sh_desc_finup_dma && | ||
| 1897 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) | ||
| 1898 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, | ||
| 1899 | desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); | ||
| 1900 | 1780 | ||
| 1901 | caam_jr_free(ctx->jrdev); | 1781 | caam_jr_free(ctx->jrdev); |
| 1902 | } | 1782 | } |
| @@ -1956,7 +1836,6 @@ caam_hash_alloc(struct caam_hash_template *template, | |||
| 1956 | alg->cra_type = &crypto_ahash_type; | 1836 | alg->cra_type = &crypto_ahash_type; |
| 1957 | 1837 | ||
| 1958 | t_alg->alg_type = template->alg_type; | 1838 | t_alg->alg_type = template->alg_type; |
| 1959 | t_alg->alg_op = template->alg_op; | ||
| 1960 | 1839 | ||
| 1961 | return t_alg; | 1840 | return t_alg; |
| 1962 | } | 1841 | } |
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 851015e652b8..32100c4851dd 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
| @@ -395,7 +395,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, | |||
| 395 | unsigned int keylen) | 395 | unsigned int keylen) |
| 396 | { | 396 | { |
| 397 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 397 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
| 398 | struct rsa_key raw_key = {0}; | 398 | struct rsa_key raw_key = {NULL}; |
| 399 | struct caam_rsa_key *rsa_key = &ctx->key; | 399 | struct caam_rsa_key *rsa_key = &ctx->key; |
| 400 | int ret; | 400 | int ret; |
| 401 | 401 | ||
| @@ -441,7 +441,7 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, | |||
| 441 | unsigned int keylen) | 441 | unsigned int keylen) |
| 442 | { | 442 | { |
| 443 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 443 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
| 444 | struct rsa_key raw_key = {0}; | 444 | struct rsa_key raw_key = {NULL}; |
| 445 | struct caam_rsa_key *rsa_key = &ctx->key; | 445 | struct caam_rsa_key *rsa_key = &ctx->key; |
| 446 | int ret; | 446 | int ret; |
| 447 | 447 | ||
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 9b92af2c7241..41398da3edf4 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
| @@ -52,7 +52,7 @@ | |||
| 52 | 52 | ||
| 53 | /* length of descriptors */ | 53 | /* length of descriptors */ |
| 54 | #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) | 54 | #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) |
| 55 | #define DESC_RNG_LEN (4 * CAAM_CMD_SZ) | 55 | #define DESC_RNG_LEN (3 * CAAM_CMD_SZ) |
| 56 | 56 | ||
| 57 | /* Buffer, its dma address and lock */ | 57 | /* Buffer, its dma address and lock */ |
| 58 | struct buf_data { | 58 | struct buf_data { |
| @@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) | |||
| 100 | { | 100 | { |
| 101 | struct buf_data *bd; | 101 | struct buf_data *bd; |
| 102 | 102 | ||
| 103 | bd = (struct buf_data *)((char *)desc - | 103 | bd = container_of(desc, struct buf_data, hw_desc[0]); |
| 104 | offsetof(struct buf_data, hw_desc)); | ||
| 105 | 104 | ||
| 106 | if (err) | 105 | if (err) |
| 107 | caam_jr_strstatus(jrdev, err); | 106 | caam_jr_strstatus(jrdev, err); |
| @@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx) | |||
| 196 | 195 | ||
| 197 | init_sh_desc(desc, HDR_SHARE_SERIAL); | 196 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
| 198 | 197 | ||
| 199 | /* Propagate errors from shared to job descriptor */ | ||
| 200 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | ||
| 201 | |||
| 202 | /* Generate random bytes */ | 198 | /* Generate random bytes */ |
| 203 | append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); | 199 | append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); |
| 204 | 200 | ||
| @@ -351,7 +347,7 @@ static int __init caam_rng_init(void) | |||
| 351 | pr_err("Job Ring Device allocation for transform failed\n"); | 347 | pr_err("Job Ring Device allocation for transform failed\n"); |
| 352 | return PTR_ERR(dev); | 348 | return PTR_ERR(dev); |
| 353 | } | 349 | } |
| 354 | rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA); | 350 | rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); |
| 355 | if (!rng_ctx) { | 351 | if (!rng_ctx) { |
| 356 | err = -ENOMEM; | 352 | err = -ENOMEM; |
| 357 | goto free_caam_alloc; | 353 | goto free_caam_alloc; |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index e483b78c6343..755109841cfd 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
| @@ -330,8 +330,8 @@ static int caam_remove(struct platform_device *pdev) | |||
| 330 | clk_disable_unprepare(ctrlpriv->caam_ipg); | 330 | clk_disable_unprepare(ctrlpriv->caam_ipg); |
| 331 | clk_disable_unprepare(ctrlpriv->caam_mem); | 331 | clk_disable_unprepare(ctrlpriv->caam_mem); |
| 332 | clk_disable_unprepare(ctrlpriv->caam_aclk); | 332 | clk_disable_unprepare(ctrlpriv->caam_aclk); |
| 333 | clk_disable_unprepare(ctrlpriv->caam_emi_slow); | 333 | if (ctrlpriv->caam_emi_slow) |
| 334 | 334 | clk_disable_unprepare(ctrlpriv->caam_emi_slow); | |
| 335 | return 0; | 335 | return 0; |
| 336 | } | 336 | } |
| 337 | 337 | ||
| @@ -365,11 +365,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay) | |||
| 365 | */ | 365 | */ |
| 366 | val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) | 366 | val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) |
| 367 | >> RTSDCTL_ENT_DLY_SHIFT; | 367 | >> RTSDCTL_ENT_DLY_SHIFT; |
| 368 | if (ent_delay <= val) { | 368 | if (ent_delay <= val) |
| 369 | /* put RNG4 into run mode */ | 369 | goto start_rng; |
| 370 | clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0); | ||
| 371 | return; | ||
| 372 | } | ||
| 373 | 370 | ||
| 374 | val = rd_reg32(&r4tst->rtsdctl); | 371 | val = rd_reg32(&r4tst->rtsdctl); |
| 375 | val = (val & ~RTSDCTL_ENT_DLY_MASK) | | 372 | val = (val & ~RTSDCTL_ENT_DLY_MASK) | |
| @@ -381,15 +378,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay) | |||
| 381 | wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE); | 378 | wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE); |
| 382 | /* read the control register */ | 379 | /* read the control register */ |
| 383 | val = rd_reg32(&r4tst->rtmctl); | 380 | val = rd_reg32(&r4tst->rtmctl); |
| 381 | start_rng: | ||
| 384 | /* | 382 | /* |
| 385 | * select raw sampling in both entropy shifter | 383 | * select raw sampling in both entropy shifter |
| 386 | * and statistical checker | 384 | * and statistical checker; ; put RNG4 into run mode |
| 387 | */ | 385 | */ |
| 388 | clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC); | 386 | clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC); |
| 389 | /* put RNG4 into run mode */ | ||
| 390 | clrsetbits_32(&val, RTMCTL_PRGM, 0); | ||
| 391 | /* write back the control register */ | ||
| 392 | wr_reg32(&r4tst->rtmctl, val); | ||
| 393 | } | 387 | } |
| 394 | 388 | ||
| 395 | /** | 389 | /** |
| @@ -482,14 +476,16 @@ static int caam_probe(struct platform_device *pdev) | |||
| 482 | } | 476 | } |
| 483 | ctrlpriv->caam_aclk = clk; | 477 | ctrlpriv->caam_aclk = clk; |
| 484 | 478 | ||
| 485 | clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); | 479 | if (!of_machine_is_compatible("fsl,imx6ul")) { |
| 486 | if (IS_ERR(clk)) { | 480 | clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); |
| 487 | ret = PTR_ERR(clk); | 481 | if (IS_ERR(clk)) { |
| 488 | dev_err(&pdev->dev, | 482 | ret = PTR_ERR(clk); |
| 489 | "can't identify CAAM emi_slow clk: %d\n", ret); | 483 | dev_err(&pdev->dev, |
| 490 | return ret; | 484 | "can't identify CAAM emi_slow clk: %d\n", ret); |
| 485 | return ret; | ||
| 486 | } | ||
| 487 | ctrlpriv->caam_emi_slow = clk; | ||
| 491 | } | 488 | } |
| 492 | ctrlpriv->caam_emi_slow = clk; | ||
| 493 | 489 | ||
| 494 | ret = clk_prepare_enable(ctrlpriv->caam_ipg); | 490 | ret = clk_prepare_enable(ctrlpriv->caam_ipg); |
| 495 | if (ret < 0) { | 491 | if (ret < 0) { |
| @@ -510,11 +506,13 @@ static int caam_probe(struct platform_device *pdev) | |||
| 510 | goto disable_caam_mem; | 506 | goto disable_caam_mem; |
| 511 | } | 507 | } |
| 512 | 508 | ||
| 513 | ret = clk_prepare_enable(ctrlpriv->caam_emi_slow); | 509 | if (ctrlpriv->caam_emi_slow) { |
| 514 | if (ret < 0) { | 510 | ret = clk_prepare_enable(ctrlpriv->caam_emi_slow); |
| 515 | dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n", | 511 | if (ret < 0) { |
| 516 | ret); | 512 | dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n", |
| 517 | goto disable_caam_aclk; | 513 | ret); |
| 514 | goto disable_caam_aclk; | ||
| 515 | } | ||
| 518 | } | 516 | } |
| 519 | 517 | ||
| 520 | /* Get configuration properties from device tree */ | 518 | /* Get configuration properties from device tree */ |
| @@ -541,13 +539,13 @@ static int caam_probe(struct platform_device *pdev) | |||
| 541 | else | 539 | else |
| 542 | BLOCK_OFFSET = PG_SIZE_64K; | 540 | BLOCK_OFFSET = PG_SIZE_64K; |
| 543 | 541 | ||
| 544 | ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl; | 542 | ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl; |
| 545 | ctrlpriv->assure = (struct caam_assurance __force *) | 543 | ctrlpriv->assure = (struct caam_assurance __iomem __force *) |
| 546 | ((uint8_t *)ctrl + | 544 | ((__force uint8_t *)ctrl + |
| 547 | BLOCK_OFFSET * ASSURE_BLOCK_NUMBER | 545 | BLOCK_OFFSET * ASSURE_BLOCK_NUMBER |
| 548 | ); | 546 | ); |
| 549 | ctrlpriv->deco = (struct caam_deco __force *) | 547 | ctrlpriv->deco = (struct caam_deco __iomem __force *) |
| 550 | ((uint8_t *)ctrl + | 548 | ((__force uint8_t *)ctrl + |
| 551 | BLOCK_OFFSET * DECO_BLOCK_NUMBER | 549 | BLOCK_OFFSET * DECO_BLOCK_NUMBER |
| 552 | ); | 550 | ); |
| 553 | 551 | ||
| @@ -627,8 +625,8 @@ static int caam_probe(struct platform_device *pdev) | |||
| 627 | ring); | 625 | ring); |
| 628 | continue; | 626 | continue; |
| 629 | } | 627 | } |
| 630 | ctrlpriv->jr[ring] = (struct caam_job_ring __force *) | 628 | ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) |
| 631 | ((uint8_t *)ctrl + | 629 | ((__force uint8_t *)ctrl + |
| 632 | (ring + JR_BLOCK_NUMBER) * | 630 | (ring + JR_BLOCK_NUMBER) * |
| 633 | BLOCK_OFFSET | 631 | BLOCK_OFFSET |
| 634 | ); | 632 | ); |
| @@ -641,8 +639,8 @@ static int caam_probe(struct platform_device *pdev) | |||
| 641 | !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) & | 639 | !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) & |
| 642 | CTPR_MS_QI_MASK); | 640 | CTPR_MS_QI_MASK); |
| 643 | if (ctrlpriv->qi_present) { | 641 | if (ctrlpriv->qi_present) { |
| 644 | ctrlpriv->qi = (struct caam_queue_if __force *) | 642 | ctrlpriv->qi = (struct caam_queue_if __iomem __force *) |
| 645 | ((uint8_t *)ctrl + | 643 | ((__force uint8_t *)ctrl + |
| 646 | BLOCK_OFFSET * QI_BLOCK_NUMBER | 644 | BLOCK_OFFSET * QI_BLOCK_NUMBER |
| 647 | ); | 645 | ); |
| 648 | /* This is all that's required to physically enable QI */ | 646 | /* This is all that's required to physically enable QI */ |
| @@ -800,7 +798,7 @@ static int caam_probe(struct platform_device *pdev) | |||
| 800 | &caam_fops_u32_ro); | 798 | &caam_fops_u32_ro); |
| 801 | 799 | ||
| 802 | /* Internal covering keys (useful in non-secure mode only) */ | 800 | /* Internal covering keys (useful in non-secure mode only) */ |
| 803 | ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0]; | 801 | ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0]; |
| 804 | ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); | 802 | ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); |
| 805 | ctrlpriv->ctl_kek = debugfs_create_blob("kek", | 803 | ctrlpriv->ctl_kek = debugfs_create_blob("kek", |
| 806 | S_IRUSR | | 804 | S_IRUSR | |
| @@ -808,7 +806,7 @@ static int caam_probe(struct platform_device *pdev) | |||
| 808 | ctrlpriv->ctl, | 806 | ctrlpriv->ctl, |
| 809 | &ctrlpriv->ctl_kek_wrap); | 807 | &ctrlpriv->ctl_kek_wrap); |
| 810 | 808 | ||
| 811 | ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0]; | 809 | ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0]; |
| 812 | ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); | 810 | ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); |
| 813 | ctrlpriv->ctl_tkek = debugfs_create_blob("tkek", | 811 | ctrlpriv->ctl_tkek = debugfs_create_blob("tkek", |
| 814 | S_IRUSR | | 812 | S_IRUSR | |
| @@ -816,7 +814,7 @@ static int caam_probe(struct platform_device *pdev) | |||
| 816 | ctrlpriv->ctl, | 814 | ctrlpriv->ctl, |
| 817 | &ctrlpriv->ctl_tkek_wrap); | 815 | &ctrlpriv->ctl_tkek_wrap); |
| 818 | 816 | ||
| 819 | ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0]; | 817 | ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0]; |
| 820 | ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); | 818 | ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); |
| 821 | ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk", | 819 | ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk", |
| 822 | S_IRUSR | | 820 | S_IRUSR | |
| @@ -833,7 +831,8 @@ caam_remove: | |||
| 833 | iounmap_ctrl: | 831 | iounmap_ctrl: |
| 834 | iounmap(ctrl); | 832 | iounmap(ctrl); |
| 835 | disable_caam_emi_slow: | 833 | disable_caam_emi_slow: |
| 836 | clk_disable_unprepare(ctrlpriv->caam_emi_slow); | 834 | if (ctrlpriv->caam_emi_slow) |
| 835 | clk_disable_unprepare(ctrlpriv->caam_emi_slow); | ||
| 837 | disable_caam_aclk: | 836 | disable_caam_aclk: |
| 838 | clk_disable_unprepare(ctrlpriv->caam_aclk); | 837 | clk_disable_unprepare(ctrlpriv->caam_aclk); |
| 839 | disable_caam_mem: | 838 | disable_caam_mem: |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 513b6646bb36..2e6766a1573f 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
| @@ -22,12 +22,6 @@ | |||
| 22 | #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */ | 22 | #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */ |
| 23 | #define SEC4_SG_OFFSET_MASK 0x00001fff | 23 | #define SEC4_SG_OFFSET_MASK 0x00001fff |
| 24 | 24 | ||
| 25 | struct sec4_sg_entry { | ||
| 26 | u64 ptr; | ||
| 27 | u32 len; | ||
| 28 | u32 bpid_offset; | ||
| 29 | }; | ||
| 30 | |||
| 31 | /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ | 25 | /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ |
| 32 | #define MAX_CAAM_DESCSIZE 64 | 26 | #define MAX_CAAM_DESCSIZE 64 |
| 33 | 27 | ||
| @@ -90,8 +84,8 @@ struct sec4_sg_entry { | |||
| 90 | #define HDR_ZRO 0x00008000 | 84 | #define HDR_ZRO 0x00008000 |
| 91 | 85 | ||
| 92 | /* Start Index or SharedDesc Length */ | 86 | /* Start Index or SharedDesc Length */ |
| 93 | #define HDR_START_IDX_MASK 0x3f | ||
| 94 | #define HDR_START_IDX_SHIFT 16 | 87 | #define HDR_START_IDX_SHIFT 16 |
| 88 | #define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT) | ||
| 95 | 89 | ||
| 96 | /* If shared descriptor header, 6-bit length */ | 90 | /* If shared descriptor header, 6-bit length */ |
| 97 | #define HDR_DESCLEN_SHR_MASK 0x3f | 91 | #define HDR_DESCLEN_SHR_MASK 0x3f |
| @@ -121,10 +115,10 @@ struct sec4_sg_entry { | |||
| 121 | #define HDR_PROP_DNR 0x00000800 | 115 | #define HDR_PROP_DNR 0x00000800 |
| 122 | 116 | ||
| 123 | /* JobDesc/SharedDesc share property */ | 117 | /* JobDesc/SharedDesc share property */ |
| 124 | #define HDR_SD_SHARE_MASK 0x03 | ||
| 125 | #define HDR_SD_SHARE_SHIFT 8 | 118 | #define HDR_SD_SHARE_SHIFT 8 |
| 126 | #define HDR_JD_SHARE_MASK 0x07 | 119 | #define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT) |
| 127 | #define HDR_JD_SHARE_SHIFT 8 | 120 | #define HDR_JD_SHARE_SHIFT 8 |
| 121 | #define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT) | ||
| 128 | 122 | ||
| 129 | #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT) | 123 | #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT) |
| 130 | #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT) | 124 | #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT) |
| @@ -235,7 +229,7 @@ struct sec4_sg_entry { | |||
| 235 | #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT) | 229 | #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT) |
| 236 | #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT) | 230 | #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT) |
| 237 | #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT) | 231 | #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT) |
| 238 | #define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT) | 232 | #define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT) |
| 239 | #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT) | 233 | #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT) |
| 240 | #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT) | 234 | #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT) |
| 241 | #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT) | 235 | #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT) |
| @@ -400,7 +394,7 @@ struct sec4_sg_entry { | |||
| 400 | #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT) | 394 | #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT) |
| 401 | #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT) | 395 | #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT) |
| 402 | #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT) | 396 | #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT) |
| 403 | #define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT) | 397 | #define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT) |
| 404 | #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT) | 398 | #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT) |
| 405 | #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT) | 399 | #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT) |
| 406 | #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT) | 400 | #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT) |
| @@ -1107,8 +1101,8 @@ struct sec4_sg_entry { | |||
| 1107 | /* For non-protocol/alg-only op commands */ | 1101 | /* For non-protocol/alg-only op commands */ |
| 1108 | #define OP_ALG_TYPE_SHIFT 24 | 1102 | #define OP_ALG_TYPE_SHIFT 24 |
| 1109 | #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT) | 1103 | #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT) |
| 1110 | #define OP_ALG_TYPE_CLASS1 2 | 1104 | #define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT) |
| 1111 | #define OP_ALG_TYPE_CLASS2 4 | 1105 | #define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT) |
| 1112 | 1106 | ||
| 1113 | #define OP_ALG_ALGSEL_SHIFT 16 | 1107 | #define OP_ALG_ALGSEL_SHIFT 16 |
| 1114 | #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT) | 1108 | #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT) |
| @@ -1249,7 +1243,7 @@ struct sec4_sg_entry { | |||
| 1249 | #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f | 1243 | #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f |
| 1250 | 1244 | ||
| 1251 | /* PKHA mode copy-memory functions */ | 1245 | /* PKHA mode copy-memory functions */ |
| 1252 | #define OP_ALG_PKMODE_SRC_REG_SHIFT 13 | 1246 | #define OP_ALG_PKMODE_SRC_REG_SHIFT 17 |
| 1253 | #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT) | 1247 | #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT) |
| 1254 | #define OP_ALG_PKMODE_DST_REG_SHIFT 10 | 1248 | #define OP_ALG_PKMODE_DST_REG_SHIFT 10 |
| 1255 | #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT) | 1249 | #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT) |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index a8cd8a78ec1f..b9c8d98ef826 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
| @@ -33,38 +33,39 @@ | |||
| 33 | 33 | ||
| 34 | extern bool caam_little_end; | 34 | extern bool caam_little_end; |
| 35 | 35 | ||
| 36 | static inline int desc_len(u32 *desc) | 36 | static inline int desc_len(u32 * const desc) |
| 37 | { | 37 | { |
| 38 | return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; | 38 | return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | static inline int desc_bytes(void *desc) | 41 | static inline int desc_bytes(void * const desc) |
| 42 | { | 42 | { |
| 43 | return desc_len(desc) * CAAM_CMD_SZ; | 43 | return desc_len(desc) * CAAM_CMD_SZ; |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | static inline u32 *desc_end(u32 *desc) | 46 | static inline u32 *desc_end(u32 * const desc) |
| 47 | { | 47 | { |
| 48 | return desc + desc_len(desc); | 48 | return desc + desc_len(desc); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | static inline void *sh_desc_pdb(u32 *desc) | 51 | static inline void *sh_desc_pdb(u32 * const desc) |
| 52 | { | 52 | { |
| 53 | return desc + 1; | 53 | return desc + 1; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | static inline void init_desc(u32 *desc, u32 options) | 56 | static inline void init_desc(u32 * const desc, u32 options) |
| 57 | { | 57 | { |
| 58 | *desc = cpu_to_caam32((options | HDR_ONE) + 1); | 58 | *desc = cpu_to_caam32((options | HDR_ONE) + 1); |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static inline void init_sh_desc(u32 *desc, u32 options) | 61 | static inline void init_sh_desc(u32 * const desc, u32 options) |
| 62 | { | 62 | { |
| 63 | PRINT_POS; | 63 | PRINT_POS; |
| 64 | init_desc(desc, CMD_SHARED_DESC_HDR | options); | 64 | init_desc(desc, CMD_SHARED_DESC_HDR | options); |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) | 67 | static inline void init_sh_desc_pdb(u32 * const desc, u32 options, |
| 68 | size_t pdb_bytes) | ||
| 68 | { | 69 | { |
| 69 | u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; | 70 | u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; |
| 70 | 71 | ||
| @@ -72,19 +73,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) | |||
| 72 | options); | 73 | options); |
| 73 | } | 74 | } |
| 74 | 75 | ||
| 75 | static inline void init_job_desc(u32 *desc, u32 options) | 76 | static inline void init_job_desc(u32 * const desc, u32 options) |
| 76 | { | 77 | { |
| 77 | init_desc(desc, CMD_DESC_HDR | options); | 78 | init_desc(desc, CMD_DESC_HDR | options); |
| 78 | } | 79 | } |
| 79 | 80 | ||
| 80 | static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) | 81 | static inline void init_job_desc_pdb(u32 * const desc, u32 options, |
| 82 | size_t pdb_bytes) | ||
| 81 | { | 83 | { |
| 82 | u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; | 84 | u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; |
| 83 | 85 | ||
| 84 | init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options); | 86 | init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options); |
| 85 | } | 87 | } |
| 86 | 88 | ||
| 87 | static inline void append_ptr(u32 *desc, dma_addr_t ptr) | 89 | static inline void append_ptr(u32 * const desc, dma_addr_t ptr) |
| 88 | { | 90 | { |
| 89 | dma_addr_t *offset = (dma_addr_t *)desc_end(desc); | 91 | dma_addr_t *offset = (dma_addr_t *)desc_end(desc); |
| 90 | 92 | ||
| @@ -94,8 +96,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr) | |||
| 94 | CAAM_PTR_SZ / CAAM_CMD_SZ); | 96 | CAAM_PTR_SZ / CAAM_CMD_SZ); |
| 95 | } | 97 | } |
| 96 | 98 | ||
| 97 | static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len, | 99 | static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr, |
| 98 | u32 options) | 100 | int len, u32 options) |
| 99 | { | 101 | { |
| 100 | PRINT_POS; | 102 | PRINT_POS; |
| 101 | init_job_desc(desc, HDR_SHARED | options | | 103 | init_job_desc(desc, HDR_SHARED | options | |
| @@ -103,7 +105,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len, | |||
| 103 | append_ptr(desc, ptr); | 105 | append_ptr(desc, ptr); |
| 104 | } | 106 | } |
| 105 | 107 | ||
| 106 | static inline void append_data(u32 *desc, void *data, int len) | 108 | static inline void append_data(u32 * const desc, void *data, int len) |
| 107 | { | 109 | { |
| 108 | u32 *offset = desc_end(desc); | 110 | u32 *offset = desc_end(desc); |
| 109 | 111 | ||
| @@ -114,7 +116,7 @@ static inline void append_data(u32 *desc, void *data, int len) | |||
| 114 | (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ); | 116 | (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ); |
| 115 | } | 117 | } |
| 116 | 118 | ||
| 117 | static inline void append_cmd(u32 *desc, u32 command) | 119 | static inline void append_cmd(u32 * const desc, u32 command) |
| 118 | { | 120 | { |
| 119 | u32 *cmd = desc_end(desc); | 121 | u32 *cmd = desc_end(desc); |
| 120 | 122 | ||
| @@ -125,7 +127,7 @@ static inline void append_cmd(u32 *desc, u32 command) | |||
| 125 | 127 | ||
| 126 | #define append_u32 append_cmd | 128 | #define append_u32 append_cmd |
| 127 | 129 | ||
| 128 | static inline void append_u64(u32 *desc, u64 data) | 130 | static inline void append_u64(u32 * const desc, u64 data) |
| 129 | { | 131 | { |
| 130 | u32 *offset = desc_end(desc); | 132 | u32 *offset = desc_end(desc); |
| 131 | 133 | ||
| @@ -142,14 +144,14 @@ static inline void append_u64(u32 *desc, u64 data) | |||
| 142 | } | 144 | } |
| 143 | 145 | ||
| 144 | /* Write command without affecting header, and return pointer to next word */ | 146 | /* Write command without affecting header, and return pointer to next word */ |
| 145 | static inline u32 *write_cmd(u32 *desc, u32 command) | 147 | static inline u32 *write_cmd(u32 * const desc, u32 command) |
| 146 | { | 148 | { |
| 147 | *desc = cpu_to_caam32(command); | 149 | *desc = cpu_to_caam32(command); |
| 148 | 150 | ||
| 149 | return desc + 1; | 151 | return desc + 1; |
| 150 | } | 152 | } |
| 151 | 153 | ||
| 152 | static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, | 154 | static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len, |
| 153 | u32 command) | 155 | u32 command) |
| 154 | { | 156 | { |
| 155 | append_cmd(desc, command | len); | 157 | append_cmd(desc, command | len); |
| @@ -157,7 +159,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, | |||
| 157 | } | 159 | } |
| 158 | 160 | ||
| 159 | /* Write length after pointer, rather than inside command */ | 161 | /* Write length after pointer, rather than inside command */ |
| 160 | static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr, | 162 | static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr, |
| 161 | unsigned int len, u32 command) | 163 | unsigned int len, u32 command) |
| 162 | { | 164 | { |
| 163 | append_cmd(desc, command); | 165 | append_cmd(desc, command); |
| @@ -166,7 +168,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr, | |||
| 166 | append_cmd(desc, len); | 168 | append_cmd(desc, len); |
| 167 | } | 169 | } |
| 168 | 170 | ||
| 169 | static inline void append_cmd_data(u32 *desc, void *data, int len, | 171 | static inline void append_cmd_data(u32 * const desc, void *data, int len, |
| 170 | u32 command) | 172 | u32 command) |
| 171 | { | 173 | { |
| 172 | append_cmd(desc, command | IMMEDIATE | len); | 174 | append_cmd(desc, command | IMMEDIATE | len); |
| @@ -174,7 +176,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len, | |||
| 174 | } | 176 | } |
| 175 | 177 | ||
| 176 | #define APPEND_CMD_RET(cmd, op) \ | 178 | #define APPEND_CMD_RET(cmd, op) \ |
| 177 | static inline u32 *append_##cmd(u32 *desc, u32 options) \ | 179 | static inline u32 *append_##cmd(u32 * const desc, u32 options) \ |
| 178 | { \ | 180 | { \ |
| 179 | u32 *cmd = desc_end(desc); \ | 181 | u32 *cmd = desc_end(desc); \ |
| 180 | PRINT_POS; \ | 182 | PRINT_POS; \ |
| @@ -184,13 +186,13 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \ | |||
| 184 | APPEND_CMD_RET(jump, JUMP) | 186 | APPEND_CMD_RET(jump, JUMP) |
| 185 | APPEND_CMD_RET(move, MOVE) | 187 | APPEND_CMD_RET(move, MOVE) |
| 186 | 188 | ||
| 187 | static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd) | 189 | static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd) |
| 188 | { | 190 | { |
| 189 | *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) | | 191 | *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) | |
| 190 | (desc_len(desc) - (jump_cmd - desc))); | 192 | (desc_len(desc) - (jump_cmd - desc))); |
| 191 | } | 193 | } |
| 192 | 194 | ||
| 193 | static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd) | 195 | static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd) |
| 194 | { | 196 | { |
| 195 | u32 val = caam32_to_cpu(*move_cmd); | 197 | u32 val = caam32_to_cpu(*move_cmd); |
| 196 | 198 | ||
| @@ -200,7 +202,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd) | |||
| 200 | } | 202 | } |
| 201 | 203 | ||
| 202 | #define APPEND_CMD(cmd, op) \ | 204 | #define APPEND_CMD(cmd, op) \ |
| 203 | static inline void append_##cmd(u32 *desc, u32 options) \ | 205 | static inline void append_##cmd(u32 * const desc, u32 options) \ |
| 204 | { \ | 206 | { \ |
| 205 | PRINT_POS; \ | 207 | PRINT_POS; \ |
| 206 | append_cmd(desc, CMD_##op | options); \ | 208 | append_cmd(desc, CMD_##op | options); \ |
| @@ -208,7 +210,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \ | |||
| 208 | APPEND_CMD(operation, OPERATION) | 210 | APPEND_CMD(operation, OPERATION) |
| 209 | 211 | ||
| 210 | #define APPEND_CMD_LEN(cmd, op) \ | 212 | #define APPEND_CMD_LEN(cmd, op) \ |
| 211 | static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \ | 213 | static inline void append_##cmd(u32 * const desc, unsigned int len, \ |
| 214 | u32 options) \ | ||
| 212 | { \ | 215 | { \ |
| 213 | PRINT_POS; \ | 216 | PRINT_POS; \ |
| 214 | append_cmd(desc, CMD_##op | len | options); \ | 217 | append_cmd(desc, CMD_##op | len | options); \ |
| @@ -220,8 +223,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD) | |||
| 220 | APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE) | 223 | APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE) |
| 221 | 224 | ||
| 222 | #define APPEND_CMD_PTR(cmd, op) \ | 225 | #define APPEND_CMD_PTR(cmd, op) \ |
| 223 | static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \ | 226 | static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \ |
| 224 | u32 options) \ | 227 | unsigned int len, u32 options) \ |
| 225 | { \ | 228 | { \ |
| 226 | PRINT_POS; \ | 229 | PRINT_POS; \ |
| 227 | append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ | 230 | append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ |
| @@ -231,8 +234,8 @@ APPEND_CMD_PTR(load, LOAD) | |||
| 231 | APPEND_CMD_PTR(fifo_load, FIFO_LOAD) | 234 | APPEND_CMD_PTR(fifo_load, FIFO_LOAD) |
| 232 | APPEND_CMD_PTR(fifo_store, FIFO_STORE) | 235 | APPEND_CMD_PTR(fifo_store, FIFO_STORE) |
| 233 | 236 | ||
| 234 | static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len, | 237 | static inline void append_store(u32 * const desc, dma_addr_t ptr, |
| 235 | u32 options) | 238 | unsigned int len, u32 options) |
| 236 | { | 239 | { |
| 237 | u32 cmd_src; | 240 | u32 cmd_src; |
| 238 | 241 | ||
| @@ -249,7 +252,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len, | |||
| 249 | } | 252 | } |
| 250 | 253 | ||
| 251 | #define APPEND_SEQ_PTR_INTLEN(cmd, op) \ | 254 | #define APPEND_SEQ_PTR_INTLEN(cmd, op) \ |
| 252 | static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \ | 255 | static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \ |
| 256 | dma_addr_t ptr, \ | ||
| 253 | unsigned int len, \ | 257 | unsigned int len, \ |
| 254 | u32 options) \ | 258 | u32 options) \ |
| 255 | { \ | 259 | { \ |
| @@ -263,7 +267,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN) | |||
| 263 | APPEND_SEQ_PTR_INTLEN(out, OUT) | 267 | APPEND_SEQ_PTR_INTLEN(out, OUT) |
| 264 | 268 | ||
| 265 | #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ | 269 | #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ |
| 266 | static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ | 270 | static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ |
| 267 | unsigned int len, u32 options) \ | 271 | unsigned int len, u32 options) \ |
| 268 | { \ | 272 | { \ |
| 269 | PRINT_POS; \ | 273 | PRINT_POS; \ |
| @@ -273,7 +277,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD); | |||
| 273 | APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); | 277 | APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); |
| 274 | 278 | ||
| 275 | #define APPEND_CMD_PTR_EXTLEN(cmd, op) \ | 279 | #define APPEND_CMD_PTR_EXTLEN(cmd, op) \ |
| 276 | static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \ | 280 | static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \ |
| 277 | unsigned int len, u32 options) \ | 281 | unsigned int len, u32 options) \ |
| 278 | { \ | 282 | { \ |
| 279 | PRINT_POS; \ | 283 | PRINT_POS; \ |
| @@ -287,7 +291,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR) | |||
| 287 | * the size of its type | 291 | * the size of its type |
| 288 | */ | 292 | */ |
| 289 | #define APPEND_CMD_PTR_LEN(cmd, op, type) \ | 293 | #define APPEND_CMD_PTR_LEN(cmd, op, type) \ |
| 290 | static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \ | 294 | static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \ |
| 291 | type len, u32 options) \ | 295 | type len, u32 options) \ |
| 292 | { \ | 296 | { \ |
| 293 | PRINT_POS; \ | 297 | PRINT_POS; \ |
| @@ -304,7 +308,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32) | |||
| 304 | * from length of immediate data provided, e.g., split keys | 308 | * from length of immediate data provided, e.g., split keys |
| 305 | */ | 309 | */ |
| 306 | #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ | 310 | #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ |
| 307 | static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ | 311 | static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ |
| 308 | unsigned int data_len, \ | 312 | unsigned int data_len, \ |
| 309 | unsigned int len, u32 options) \ | 313 | unsigned int len, u32 options) \ |
| 310 | { \ | 314 | { \ |
| @@ -315,7 +319,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ | |||
| 315 | APPEND_CMD_PTR_TO_IMM2(key, KEY); | 319 | APPEND_CMD_PTR_TO_IMM2(key, KEY); |
| 316 | 320 | ||
| 317 | #define APPEND_CMD_RAW_IMM(cmd, op, type) \ | 321 | #define APPEND_CMD_RAW_IMM(cmd, op, type) \ |
| 318 | static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ | 322 | static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \ |
| 319 | u32 options) \ | 323 | u32 options) \ |
| 320 | { \ | 324 | { \ |
| 321 | PRINT_POS; \ | 325 | PRINT_POS; \ |
| @@ -426,3 +430,64 @@ do { \ | |||
| 426 | APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data) | 430 | APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data) |
| 427 | #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \ | 431 | #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \ |
| 428 | APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data) | 432 | APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data) |
| 433 | |||
| 434 | /** | ||
| 435 | * struct alginfo - Container for algorithm details | ||
| 436 | * @algtype: algorithm selector; for valid values, see documentation of the | ||
| 437 | * functions where it is used. | ||
| 438 | * @keylen: length of the provided algorithm key, in bytes | ||
| 439 | * @keylen_pad: padded length of the provided algorithm key, in bytes | ||
| 440 | * @key: address where algorithm key resides; virtual address if key_inline | ||
| 441 | * is true, dma (bus) address if key_inline is false. | ||
| 442 | * @key_inline: true - key can be inlined in the descriptor; false - key is | ||
| 443 | * referenced by the descriptor | ||
| 444 | */ | ||
| 445 | struct alginfo { | ||
| 446 | u32 algtype; | ||
| 447 | unsigned int keylen; | ||
| 448 | unsigned int keylen_pad; | ||
| 449 | union { | ||
| 450 | dma_addr_t key_dma; | ||
| 451 | void *key_virt; | ||
| 452 | }; | ||
| 453 | bool key_inline; | ||
| 454 | }; | ||
| 455 | |||
| 456 | /** | ||
| 457 | * desc_inline_query() - Provide indications on which data items can be inlined | ||
| 458 | * and which shall be referenced in a shared descriptor. | ||
| 459 | * @sd_base_len: Shared descriptor base length - bytes consumed by the commands, | ||
| 460 | * excluding the data items to be inlined (or corresponding | ||
| 461 | * pointer if an item is not inlined). Each cnstr_* function that | ||
| 462 | * generates descriptors should have a define mentioning | ||
| 463 | * corresponding length. | ||
| 464 | * @jd_len: Maximum length of the job descriptor(s) that will be used | ||
| 465 | * together with the shared descriptor. | ||
| 466 | * @data_len: Array of lengths of the data items trying to be inlined | ||
| 467 | * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0 | ||
| 468 | * otherwise. | ||
| 469 | * @count: Number of data items (size of @data_len array); must be <= 32 | ||
| 470 | * | ||
| 471 | * Return: 0 if data can be inlined / referenced, negative value if not. If 0, | ||
| 472 | * check @inl_mask for details. | ||
| 473 | */ | ||
| 474 | static inline int desc_inline_query(unsigned int sd_base_len, | ||
| 475 | unsigned int jd_len, unsigned int *data_len, | ||
| 476 | u32 *inl_mask, unsigned int count) | ||
| 477 | { | ||
| 478 | int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len); | ||
| 479 | unsigned int i; | ||
| 480 | |||
| 481 | *inl_mask = 0; | ||
| 482 | for (i = 0; (i < count) && (rem_bytes > 0); i++) { | ||
| 483 | if (rem_bytes - (int)(data_len[i] + | ||
| 484 | (count - i - 1) * CAAM_PTR_SZ) >= 0) { | ||
| 485 | rem_bytes -= data_len[i]; | ||
| 486 | *inl_mask |= (1 << i); | ||
| 487 | } else { | ||
| 488 | rem_bytes -= CAAM_PTR_SZ; | ||
| 489 | } | ||
| 490 | } | ||
| 491 | |||
| 492 | return (rem_bytes >= 0) ? 0 : -1; | ||
| 493 | } | ||
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 33e41ea83fcc..79a0cc70717f 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c | |||
| @@ -146,10 +146,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status, | |||
| 146 | strlen(rng_err_id_list[err_id])) { | 146 | strlen(rng_err_id_list[err_id])) { |
| 147 | /* RNG-only error */ | 147 | /* RNG-only error */ |
| 148 | err_str = rng_err_id_list[err_id]; | 148 | err_str = rng_err_id_list[err_id]; |
| 149 | } else if (err_id < ARRAY_SIZE(err_id_list)) | 149 | } else { |
| 150 | err_str = err_id_list[err_id]; | 150 | err_str = err_id_list[err_id]; |
| 151 | else | 151 | } |
| 152 | snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); | ||
| 153 | 152 | ||
| 154 | /* | 153 | /* |
| 155 | * CCB ICV check failures are part of normal operation life; | 154 | * CCB ICV check failures are part of normal operation life; |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 5d4c05074a5c..e2bcacc1a921 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
| @@ -41,6 +41,7 @@ struct caam_drv_private_jr { | |||
| 41 | struct device *dev; | 41 | struct device *dev; |
| 42 | int ridx; | 42 | int ridx; |
| 43 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ | 43 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ |
| 44 | struct tasklet_struct irqtask; | ||
| 44 | int irq; /* One per queue */ | 45 | int irq; /* One per queue */ |
| 45 | 46 | ||
| 46 | /* Number of scatterlist crypt transforms active on the JobR */ | 47 | /* Number of scatterlist crypt transforms active on the JobR */ |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 757c27f9953d..c8604dfadbf5 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
| @@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev) | |||
| 73 | 73 | ||
| 74 | ret = caam_reset_hw_jr(dev); | 74 | ret = caam_reset_hw_jr(dev); |
| 75 | 75 | ||
| 76 | tasklet_kill(&jrp->irqtask); | ||
| 77 | |||
| 76 | /* Release interrupt */ | 78 | /* Release interrupt */ |
| 77 | free_irq(jrp->irq, dev); | 79 | free_irq(jrp->irq, dev); |
| 78 | 80 | ||
| @@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) | |||
| 128 | 130 | ||
| 129 | /* | 131 | /* |
| 130 | * Check the output ring for ready responses, kick | 132 | * Check the output ring for ready responses, kick |
| 131 | * the threaded irq if jobs done. | 133 | * tasklet if jobs done. |
| 132 | */ | 134 | */ |
| 133 | irqstate = rd_reg32(&jrp->rregs->jrintstatus); | 135 | irqstate = rd_reg32(&jrp->rregs->jrintstatus); |
| 134 | if (!irqstate) | 136 | if (!irqstate) |
| @@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) | |||
| 150 | /* Have valid interrupt at this point, just ACK and trigger */ | 152 | /* Have valid interrupt at this point, just ACK and trigger */ |
| 151 | wr_reg32(&jrp->rregs->jrintstatus, irqstate); | 153 | wr_reg32(&jrp->rregs->jrintstatus, irqstate); |
| 152 | 154 | ||
| 153 | return IRQ_WAKE_THREAD; | 155 | preempt_disable(); |
| 156 | tasklet_schedule(&jrp->irqtask); | ||
| 157 | preempt_enable(); | ||
| 158 | |||
| 159 | return IRQ_HANDLED; | ||
| 154 | } | 160 | } |
| 155 | 161 | ||
| 156 | static irqreturn_t caam_jr_threadirq(int irq, void *st_dev) | 162 | /* Deferred service handler, run as interrupt-fired tasklet */ |
| 163 | static void caam_jr_dequeue(unsigned long devarg) | ||
| 157 | { | 164 | { |
| 158 | int hw_idx, sw_idx, i, head, tail; | 165 | int hw_idx, sw_idx, i, head, tail; |
| 159 | struct device *dev = st_dev; | 166 | struct device *dev = (struct device *)devarg; |
| 160 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | 167 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
| 161 | void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); | 168 | void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); |
| 162 | u32 *userdesc, userstatus; | 169 | u32 *userdesc, userstatus; |
| @@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev) | |||
| 230 | 237 | ||
| 231 | /* reenable / unmask IRQs */ | 238 | /* reenable / unmask IRQs */ |
| 232 | clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); | 239 | clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); |
| 233 | |||
| 234 | return IRQ_HANDLED; | ||
| 235 | } | 240 | } |
| 236 | 241 | ||
| 237 | /** | 242 | /** |
| @@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev) | |||
| 389 | 394 | ||
| 390 | jrp = dev_get_drvdata(dev); | 395 | jrp = dev_get_drvdata(dev); |
| 391 | 396 | ||
| 397 | tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); | ||
| 398 | |||
| 392 | /* Connect job ring interrupt handler. */ | 399 | /* Connect job ring interrupt handler. */ |
| 393 | error = request_threaded_irq(jrp->irq, caam_jr_interrupt, | 400 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, |
| 394 | caam_jr_threadirq, IRQF_SHARED, | 401 | dev_name(dev), dev); |
| 395 | dev_name(dev), dev); | ||
| 396 | if (error) { | 402 | if (error) { |
| 397 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", | 403 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", |
| 398 | jrp->ridx, jrp->irq); | 404 | jrp->ridx, jrp->irq); |
| @@ -454,6 +460,7 @@ out_free_inpring: | |||
| 454 | out_free_irq: | 460 | out_free_irq: |
| 455 | free_irq(jrp->irq, dev); | 461 | free_irq(jrp->irq, dev); |
| 456 | out_kill_deq: | 462 | out_kill_deq: |
| 463 | tasklet_kill(&jrp->irqtask); | ||
| 457 | return error; | 464 | return error; |
| 458 | } | 465 | } |
| 459 | 466 | ||
| @@ -489,7 +496,7 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
| 489 | return -ENOMEM; | 496 | return -ENOMEM; |
| 490 | } | 497 | } |
| 491 | 498 | ||
| 492 | jrpriv->rregs = (struct caam_job_ring __force *)ctrl; | 499 | jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; |
| 493 | 500 | ||
| 494 | if (sizeof(dma_addr_t) == sizeof(u64)) | 501 | if (sizeof(dma_addr_t) == sizeof(u64)) |
| 495 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) | 502 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) |
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index e1eaf4ff9762..1bb2816a9b4d 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c | |||
| @@ -10,6 +10,36 @@ | |||
| 10 | #include "desc_constr.h" | 10 | #include "desc_constr.h" |
| 11 | #include "key_gen.h" | 11 | #include "key_gen.h" |
| 12 | 12 | ||
| 13 | /** | ||
| 14 | * split_key_len - Compute MDHA split key length for a given algorithm | ||
| 15 | * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, | ||
| 16 | * SHA224, SHA384, SHA512. | ||
| 17 | * | ||
| 18 | * Return: MDHA split key length | ||
| 19 | */ | ||
| 20 | static inline u32 split_key_len(u32 hash) | ||
| 21 | { | ||
| 22 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | ||
| 23 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | ||
| 24 | u32 idx; | ||
| 25 | |||
| 26 | idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; | ||
| 27 | |||
| 28 | return (u32)(mdpadlen[idx] * 2); | ||
| 29 | } | ||
| 30 | |||
| 31 | /** | ||
| 32 | * split_key_pad_len - Compute MDHA split key pad length for a given algorithm | ||
| 33 | * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, | ||
| 34 | * SHA224, SHA384, SHA512. | ||
| 35 | * | ||
| 36 | * Return: MDHA split key pad length | ||
| 37 | */ | ||
| 38 | static inline u32 split_key_pad_len(u32 hash) | ||
| 39 | { | ||
| 40 | return ALIGN(split_key_len(hash), 16); | ||
| 41 | } | ||
| 42 | |||
| 13 | void split_key_done(struct device *dev, u32 *desc, u32 err, | 43 | void split_key_done(struct device *dev, u32 *desc, u32 err, |
| 14 | void *context) | 44 | void *context) |
| 15 | { | 45 | { |
| @@ -41,15 +71,29 @@ Split key generation----------------------------------------------- | |||
| 41 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 | 71 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 |
| 42 | @0xffe04000 | 72 | @0xffe04000 |
| 43 | */ | 73 | */ |
| 44 | int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | 74 | int gen_split_key(struct device *jrdev, u8 *key_out, |
| 45 | int split_key_pad_len, const u8 *key_in, u32 keylen, | 75 | struct alginfo * const adata, const u8 *key_in, u32 keylen, |
| 46 | u32 alg_op) | 76 | int max_keylen) |
| 47 | { | 77 | { |
| 48 | u32 *desc; | 78 | u32 *desc; |
| 49 | struct split_key_result result; | 79 | struct split_key_result result; |
| 50 | dma_addr_t dma_addr_in, dma_addr_out; | 80 | dma_addr_t dma_addr_in, dma_addr_out; |
| 51 | int ret = -ENOMEM; | 81 | int ret = -ENOMEM; |
| 52 | 82 | ||
| 83 | adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); | ||
| 84 | adata->keylen_pad = split_key_pad_len(adata->algtype & | ||
| 85 | OP_ALG_ALGSEL_MASK); | ||
| 86 | |||
| 87 | #ifdef DEBUG | ||
| 88 | dev_err(jrdev, "split keylen %d split keylen padded %d\n", | ||
| 89 | adata->keylen, adata->keylen_pad); | ||
| 90 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", | ||
| 91 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); | ||
| 92 | #endif | ||
| 93 | |||
| 94 | if (adata->keylen_pad > max_keylen) | ||
| 95 | return -EINVAL; | ||
| 96 | |||
| 53 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | 97 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); |
| 54 | if (!desc) { | 98 | if (!desc) { |
| 55 | dev_err(jrdev, "unable to allocate key input memory\n"); | 99 | dev_err(jrdev, "unable to allocate key input memory\n"); |
| @@ -63,7 +107,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | |||
| 63 | goto out_free; | 107 | goto out_free; |
| 64 | } | 108 | } |
| 65 | 109 | ||
| 66 | dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, | 110 | dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad, |
| 67 | DMA_FROM_DEVICE); | 111 | DMA_FROM_DEVICE); |
| 68 | if (dma_mapping_error(jrdev, dma_addr_out)) { | 112 | if (dma_mapping_error(jrdev, dma_addr_out)) { |
| 69 | dev_err(jrdev, "unable to map key output memory\n"); | 113 | dev_err(jrdev, "unable to map key output memory\n"); |
| @@ -74,7 +118,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | |||
| 74 | append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); | 118 | append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); |
| 75 | 119 | ||
| 76 | /* Sets MDHA up into an HMAC-INIT */ | 120 | /* Sets MDHA up into an HMAC-INIT */ |
| 77 | append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT); | 121 | append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) | |
| 122 | OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT | | ||
| 123 | OP_ALG_AS_INIT); | ||
| 78 | 124 | ||
| 79 | /* | 125 | /* |
| 80 | * do a FIFO_LOAD of zero, this will trigger the internal key expansion | 126 | * do a FIFO_LOAD of zero, this will trigger the internal key expansion |
| @@ -87,7 +133,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | |||
| 87 | * FIFO_STORE with the explicit split-key content store | 133 | * FIFO_STORE with the explicit split-key content store |
| 88 | * (0x26 output type) | 134 | * (0x26 output type) |
| 89 | */ | 135 | */ |
| 90 | append_fifo_store(desc, dma_addr_out, split_key_len, | 136 | append_fifo_store(desc, dma_addr_out, adata->keylen, |
| 91 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | 137 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); |
| 92 | 138 | ||
| 93 | #ifdef DEBUG | 139 | #ifdef DEBUG |
| @@ -108,11 +154,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | |||
| 108 | #ifdef DEBUG | 154 | #ifdef DEBUG |
| 109 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", | 155 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
| 110 | DUMP_PREFIX_ADDRESS, 16, 4, key_out, | 156 | DUMP_PREFIX_ADDRESS, 16, 4, key_out, |
| 111 | split_key_pad_len, 1); | 157 | adata->keylen_pad, 1); |
| 112 | #endif | 158 | #endif |
| 113 | } | 159 | } |
| 114 | 160 | ||
| 115 | dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, | 161 | dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad, |
| 116 | DMA_FROM_DEVICE); | 162 | DMA_FROM_DEVICE); |
| 117 | out_unmap_in: | 163 | out_unmap_in: |
| 118 | dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); | 164 | dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); |
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h index c5588f6d8109..4628f389eb64 100644 --- a/drivers/crypto/caam/key_gen.h +++ b/drivers/crypto/caam/key_gen.h | |||
| @@ -12,6 +12,6 @@ struct split_key_result { | |||
| 12 | 12 | ||
| 13 | void split_key_done(struct device *dev, u32 *desc, u32 err, void *context); | 13 | void split_key_done(struct device *dev, u32 *desc, u32 err, void *context); |
| 14 | 14 | ||
| 15 | int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, | 15 | int gen_split_key(struct device *jrdev, u8 *key_out, |
| 16 | int split_key_pad_len, const u8 *key_in, u32 keylen, | 16 | struct alginfo * const adata, const u8 *key_in, u32 keylen, |
| 17 | u32 alg_op); | 17 | int max_keylen); |
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index 41cd5a356d05..6afa20c4a013 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
| @@ -7,7 +7,11 @@ | |||
| 7 | 7 | ||
| 8 | #include "regs.h" | 8 | #include "regs.h" |
| 9 | 9 | ||
| 10 | struct sec4_sg_entry; | 10 | struct sec4_sg_entry { |
| 11 | u64 ptr; | ||
| 12 | u32 len; | ||
| 13 | u32 bpid_offset; | ||
| 14 | }; | ||
| 11 | 15 | ||
| 12 | /* | 16 | /* |
| 13 | * convert single dma address to h/w link table format | 17 | * convert single dma address to h/w link table format |
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 8d2dbacc6161..7bc09989e18a 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c | |||
| @@ -404,10 +404,6 @@ static int ccp_init(struct ccp_device *ccp) | |||
| 404 | goto e_pool; | 404 | goto e_pool; |
| 405 | } | 405 | } |
| 406 | 406 | ||
| 407 | /* Initialize the queues used to wait for KSB space and suspend */ | ||
| 408 | init_waitqueue_head(&ccp->sb_queue); | ||
| 409 | init_waitqueue_head(&ccp->suspend_queue); | ||
| 410 | |||
| 411 | dev_dbg(dev, "Starting threads...\n"); | 407 | dev_dbg(dev, "Starting threads...\n"); |
| 412 | /* Create a kthread for each queue */ | 408 | /* Create a kthread for each queue */ |
| 413 | for (i = 0; i < ccp->cmd_q_count; i++) { | 409 | for (i = 0; i < ccp->cmd_q_count; i++) { |
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index faf3cb3ddce2..e2ce8190ecc9 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c | |||
| @@ -21,6 +21,12 @@ | |||
| 21 | 21 | ||
| 22 | #include "ccp-dev.h" | 22 | #include "ccp-dev.h" |
| 23 | 23 | ||
| 24 | /* Allocate the requested number of contiguous LSB slots | ||
| 25 | * from the LSB bitmap. Look in the private range for this | ||
| 26 | * queue first; failing that, check the public area. | ||
| 27 | * If no space is available, wait around. | ||
| 28 | * Return: first slot number | ||
| 29 | */ | ||
| 24 | static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) | 30 | static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) |
| 25 | { | 31 | { |
| 26 | struct ccp_device *ccp; | 32 | struct ccp_device *ccp; |
| @@ -50,7 +56,7 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) | |||
| 50 | bitmap_set(ccp->lsbmap, start, count); | 56 | bitmap_set(ccp->lsbmap, start, count); |
| 51 | 57 | ||
| 52 | mutex_unlock(&ccp->sb_mutex); | 58 | mutex_unlock(&ccp->sb_mutex); |
| 53 | return start * LSB_ITEM_SIZE; | 59 | return start; |
| 54 | } | 60 | } |
| 55 | 61 | ||
| 56 | ccp->sb_avail = 0; | 62 | ccp->sb_avail = 0; |
| @@ -63,17 +69,18 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) | |||
| 63 | } | 69 | } |
| 64 | } | 70 | } |
| 65 | 71 | ||
| 72 | /* Free a number of LSB slots from the bitmap, starting at | ||
| 73 | * the indicated starting slot number. | ||
| 74 | */ | ||
| 66 | static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, | 75 | static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, |
| 67 | unsigned int count) | 76 | unsigned int count) |
| 68 | { | 77 | { |
| 69 | int lsbno = start / LSB_SIZE; | ||
| 70 | |||
| 71 | if (!start) | 78 | if (!start) |
| 72 | return; | 79 | return; |
| 73 | 80 | ||
| 74 | if (cmd_q->lsb == lsbno) { | 81 | if (cmd_q->lsb == start) { |
| 75 | /* An entry from the private LSB */ | 82 | /* An entry from the private LSB */ |
| 76 | bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count); | 83 | bitmap_clear(cmd_q->lsbmap, start, count); |
| 77 | } else { | 84 | } else { |
| 78 | /* From the shared LSBs */ | 85 | /* From the shared LSBs */ |
| 79 | struct ccp_device *ccp = cmd_q->ccp; | 86 | struct ccp_device *ccp = cmd_q->ccp; |
| @@ -396,7 +403,7 @@ static int ccp5_perform_rsa(struct ccp_op *op) | |||
| 396 | CCP5_CMD_PROT(&desc) = 0; | 403 | CCP5_CMD_PROT(&desc) = 0; |
| 397 | 404 | ||
| 398 | function.raw = 0; | 405 | function.raw = 0; |
| 399 | CCP_RSA_SIZE(&function) = op->u.rsa.mod_size; | 406 | CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3; |
| 400 | CCP5_CMD_FUNCTION(&desc) = function.raw; | 407 | CCP5_CMD_FUNCTION(&desc) = function.raw; |
| 401 | 408 | ||
| 402 | CCP5_CMD_LEN(&desc) = op->u.rsa.input_len; | 409 | CCP5_CMD_LEN(&desc) = op->u.rsa.input_len; |
| @@ -411,10 +418,10 @@ static int ccp5_perform_rsa(struct ccp_op *op) | |||
| 411 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); | 418 | CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); |
| 412 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | 419 | CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; |
| 413 | 420 | ||
| 414 | /* Key (Exponent) is in external memory */ | 421 | /* Exponent is in LSB memory */ |
| 415 | CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma); | 422 | CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE; |
| 416 | CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma); | 423 | CCP5_CMD_KEY_HI(&desc) = 0; |
| 417 | CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; | 424 | CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; |
| 418 | 425 | ||
| 419 | return ccp5_do_cmd(&desc, op->cmd_q); | 426 | return ccp5_do_cmd(&desc, op->cmd_q); |
| 420 | } | 427 | } |
| @@ -751,9 +758,6 @@ static int ccp5_init(struct ccp_device *ccp) | |||
| 751 | goto e_pool; | 758 | goto e_pool; |
| 752 | } | 759 | } |
| 753 | 760 | ||
| 754 | /* Initialize the queue used to suspend */ | ||
| 755 | init_waitqueue_head(&ccp->suspend_queue); | ||
| 756 | |||
| 757 | dev_dbg(dev, "Loading LSB map...\n"); | 761 | dev_dbg(dev, "Loading LSB map...\n"); |
| 758 | /* Copy the private LSB mask to the public registers */ | 762 | /* Copy the private LSB mask to the public registers */ |
| 759 | status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); | 763 | status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index cafa633aae10..511ab042b5e7 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
| @@ -41,7 +41,7 @@ struct ccp_tasklet_data { | |||
| 41 | }; | 41 | }; |
| 42 | 42 | ||
| 43 | /* Human-readable error strings */ | 43 | /* Human-readable error strings */ |
| 44 | char *ccp_error_codes[] = { | 44 | static char *ccp_error_codes[] = { |
| 45 | "", | 45 | "", |
| 46 | "ERR 01: ILLEGAL_ENGINE", | 46 | "ERR 01: ILLEGAL_ENGINE", |
| 47 | "ERR 02: ILLEGAL_KEY_ID", | 47 | "ERR 02: ILLEGAL_KEY_ID", |
| @@ -478,6 +478,10 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) | |||
| 478 | ccp->sb_count = KSB_COUNT; | 478 | ccp->sb_count = KSB_COUNT; |
| 479 | ccp->sb_start = 0; | 479 | ccp->sb_start = 0; |
| 480 | 480 | ||
| 481 | /* Initialize the wait queues */ | ||
| 482 | init_waitqueue_head(&ccp->sb_queue); | ||
| 483 | init_waitqueue_head(&ccp->suspend_queue); | ||
| 484 | |||
| 481 | ccp->ord = ccp_increment_unit_ordinal(); | 485 | ccp->ord = ccp_increment_unit_ordinal(); |
| 482 | snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); | 486 | snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); |
| 483 | snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord); | 487 | snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord); |
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index da5f4a678083..830f35e6005f 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
| @@ -278,7 +278,7 @@ struct ccp_cmd_queue { | |||
| 278 | /* Private LSB that is assigned to this queue, or -1 if none. | 278 | /* Private LSB that is assigned to this queue, or -1 if none. |
| 279 | * Bitmap for my private LSB, unused otherwise | 279 | * Bitmap for my private LSB, unused otherwise |
| 280 | */ | 280 | */ |
| 281 | unsigned int lsb; | 281 | int lsb; |
| 282 | DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE); | 282 | DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE); |
| 283 | 283 | ||
| 284 | /* Queue processing thread */ | 284 | /* Queue processing thread */ |
| @@ -515,7 +515,6 @@ struct ccp_op { | |||
| 515 | struct ccp_passthru_op passthru; | 515 | struct ccp_passthru_op passthru; |
| 516 | struct ccp_ecc_op ecc; | 516 | struct ccp_ecc_op ecc; |
| 517 | } u; | 517 | } u; |
| 518 | struct ccp_mem key; | ||
| 519 | }; | 518 | }; |
| 520 | 519 | ||
| 521 | static inline u32 ccp_addr_lo(struct ccp_dma_info *info) | 520 | static inline u32 ccp_addr_lo(struct ccp_dma_info *info) |
| @@ -541,23 +540,23 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info) | |||
| 541 | * word 7: upper 16 bits of key pointer; key memory type | 540 | * word 7: upper 16 bits of key pointer; key memory type |
| 542 | */ | 541 | */ |
| 543 | struct dword0 { | 542 | struct dword0 { |
| 544 | __le32 soc:1; | 543 | unsigned int soc:1; |
| 545 | __le32 ioc:1; | 544 | unsigned int ioc:1; |
| 546 | __le32 rsvd1:1; | 545 | unsigned int rsvd1:1; |
| 547 | __le32 init:1; | 546 | unsigned int init:1; |
| 548 | __le32 eom:1; /* AES/SHA only */ | 547 | unsigned int eom:1; /* AES/SHA only */ |
| 549 | __le32 function:15; | 548 | unsigned int function:15; |
| 550 | __le32 engine:4; | 549 | unsigned int engine:4; |
| 551 | __le32 prot:1; | 550 | unsigned int prot:1; |
| 552 | __le32 rsvd2:7; | 551 | unsigned int rsvd2:7; |
| 553 | }; | 552 | }; |
| 554 | 553 | ||
| 555 | struct dword3 { | 554 | struct dword3 { |
| 556 | __le32 src_hi:16; | 555 | unsigned int src_hi:16; |
| 557 | __le32 src_mem:2; | 556 | unsigned int src_mem:2; |
| 558 | __le32 lsb_cxt_id:8; | 557 | unsigned int lsb_cxt_id:8; |
| 559 | __le32 rsvd1:5; | 558 | unsigned int rsvd1:5; |
| 560 | __le32 fixed:1; | 559 | unsigned int fixed:1; |
| 561 | }; | 560 | }; |
| 562 | 561 | ||
| 563 | union dword4 { | 562 | union dword4 { |
| @@ -567,18 +566,18 @@ union dword4 { | |||
| 567 | 566 | ||
| 568 | union dword5 { | 567 | union dword5 { |
| 569 | struct { | 568 | struct { |
| 570 | __le32 dst_hi:16; | 569 | unsigned int dst_hi:16; |
| 571 | __le32 dst_mem:2; | 570 | unsigned int dst_mem:2; |
| 572 | __le32 rsvd1:13; | 571 | unsigned int rsvd1:13; |
| 573 | __le32 fixed:1; | 572 | unsigned int fixed:1; |
| 574 | } fields; | 573 | } fields; |
| 575 | __le32 sha_len_hi; | 574 | __le32 sha_len_hi; |
| 576 | }; | 575 | }; |
| 577 | 576 | ||
| 578 | struct dword7 { | 577 | struct dword7 { |
| 579 | __le32 key_hi:16; | 578 | unsigned int key_hi:16; |
| 580 | __le32 key_mem:2; | 579 | unsigned int key_mem:2; |
| 581 | __le32 rsvd1:14; | 580 | unsigned int rsvd1:14; |
| 582 | }; | 581 | }; |
| 583 | 582 | ||
| 584 | struct ccp5_desc { | 583 | struct ccp5_desc { |
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index 4ce67fb9a880..3e104f5aa0c2 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig | |||
| @@ -4,6 +4,7 @@ config CRYPTO_DEV_CHELSIO | |||
| 4 | select CRYPTO_SHA1 | 4 | select CRYPTO_SHA1 |
| 5 | select CRYPTO_SHA256 | 5 | select CRYPTO_SHA256 |
| 6 | select CRYPTO_SHA512 | 6 | select CRYPTO_SHA512 |
| 7 | select CRYPTO_AUTHENC | ||
| 7 | ---help--- | 8 | ---help--- |
| 8 | The Chelsio Crypto Co-processor driver for T6 adapters. | 9 | The Chelsio Crypto Co-processor driver for T6 adapters. |
| 9 | 10 | ||
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 56b153805462..2ed1e24b44a8 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c | |||
| @@ -54,6 +54,12 @@ | |||
| 54 | #include <crypto/algapi.h> | 54 | #include <crypto/algapi.h> |
| 55 | #include <crypto/hash.h> | 55 | #include <crypto/hash.h> |
| 56 | #include <crypto/sha.h> | 56 | #include <crypto/sha.h> |
| 57 | #include <crypto/authenc.h> | ||
| 58 | #include <crypto/internal/aead.h> | ||
| 59 | #include <crypto/null.h> | ||
| 60 | #include <crypto/internal/skcipher.h> | ||
| 61 | #include <crypto/aead.h> | ||
| 62 | #include <crypto/scatterwalk.h> | ||
| 57 | #include <crypto/internal/hash.h> | 63 | #include <crypto/internal/hash.h> |
| 58 | 64 | ||
| 59 | #include "t4fw_api.h" | 65 | #include "t4fw_api.h" |
| @@ -62,6 +68,11 @@ | |||
| 62 | #include "chcr_algo.h" | 68 | #include "chcr_algo.h" |
| 63 | #include "chcr_crypto.h" | 69 | #include "chcr_crypto.h" |
| 64 | 70 | ||
| 71 | static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) | ||
| 72 | { | ||
| 73 | return ctx->crypto_ctx->aeadctx; | ||
| 74 | } | ||
| 75 | |||
| 65 | static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) | 76 | static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) |
| 66 | { | 77 | { |
| 67 | return ctx->crypto_ctx->ablkctx; | 78 | return ctx->crypto_ctx->ablkctx; |
| @@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) | |||
| 72 | return ctx->crypto_ctx->hmacctx; | 83 | return ctx->crypto_ctx->hmacctx; |
| 73 | } | 84 | } |
| 74 | 85 | ||
| 86 | static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx) | ||
| 87 | { | ||
| 88 | return gctx->ctx->gcm; | ||
| 89 | } | ||
| 90 | |||
| 91 | static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx) | ||
| 92 | { | ||
| 93 | return gctx->ctx->authenc; | ||
| 94 | } | ||
| 95 | |||
| 75 | static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) | 96 | static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) |
| 76 | { | 97 | { |
| 77 | return ctx->dev->u_ctx; | 98 | return ctx->dev->u_ctx; |
| @@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n) | |||
| 94 | return (3 * n) / 2 + (n & 1) + 2; | 115 | return (3 * n) / 2 + (n & 1) + 2; |
| 95 | } | 116 | } |
| 96 | 117 | ||
| 118 | static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) | ||
| 119 | { | ||
| 120 | u8 temp[SHA512_DIGEST_SIZE]; | ||
| 121 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 122 | int authsize = crypto_aead_authsize(tfm); | ||
| 123 | struct cpl_fw6_pld *fw6_pld; | ||
| 124 | int cmp = 0; | ||
| 125 | |||
| 126 | fw6_pld = (struct cpl_fw6_pld *)input; | ||
| 127 | if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) || | ||
| 128 | (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) { | ||
| 129 | cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize); | ||
| 130 | } else { | ||
| 131 | |||
| 132 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp, | ||
| 133 | authsize, req->assoclen + | ||
| 134 | req->cryptlen - authsize); | ||
| 135 | cmp = memcmp(temp, (fw6_pld + 1), authsize); | ||
| 136 | } | ||
| 137 | if (cmp) | ||
| 138 | *err = -EBADMSG; | ||
| 139 | else | ||
| 140 | *err = 0; | ||
| 141 | } | ||
| 142 | |||
| 97 | /* | 143 | /* |
| 98 | * chcr_handle_resp - Unmap the DMA buffers associated with the request | 144 | * chcr_handle_resp - Unmap the DMA buffers associated with the request |
| 99 | * @req: crypto request | 145 | * @req: crypto request |
| 100 | */ | 146 | */ |
| 101 | int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | 147 | int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, |
| 102 | int error_status) | 148 | int err) |
| 103 | { | 149 | { |
| 104 | struct crypto_tfm *tfm = req->tfm; | 150 | struct crypto_tfm *tfm = req->tfm; |
| 105 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | 151 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); |
| @@ -109,17 +155,33 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |||
| 109 | unsigned int digestsize, updated_digestsize; | 155 | unsigned int digestsize, updated_digestsize; |
| 110 | 156 | ||
| 111 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | 157 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
| 158 | case CRYPTO_ALG_TYPE_AEAD: | ||
| 159 | ctx_req.req.aead_req = (struct aead_request *)req; | ||
| 160 | ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); | ||
| 161 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst, | ||
| 162 | ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); | ||
| 163 | if (ctx_req.ctx.reqctx->skb) { | ||
| 164 | kfree_skb(ctx_req.ctx.reqctx->skb); | ||
| 165 | ctx_req.ctx.reqctx->skb = NULL; | ||
| 166 | } | ||
| 167 | if (ctx_req.ctx.reqctx->verify == VERIFY_SW) { | ||
| 168 | chcr_verify_tag(ctx_req.req.aead_req, input, | ||
| 169 | &err); | ||
| 170 | ctx_req.ctx.reqctx->verify = VERIFY_HW; | ||
| 171 | } | ||
| 172 | break; | ||
| 173 | |||
| 112 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 174 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
| 113 | ctx_req.req.ablk_req = (struct ablkcipher_request *)req; | 175 | ctx_req.req.ablk_req = (struct ablkcipher_request *)req; |
| 114 | ctx_req.ctx.ablk_ctx = | 176 | ctx_req.ctx.ablk_ctx = |
| 115 | ablkcipher_request_ctx(ctx_req.req.ablk_req); | 177 | ablkcipher_request_ctx(ctx_req.req.ablk_req); |
| 116 | if (!error_status) { | 178 | if (!err) { |
| 117 | fw6_pld = (struct cpl_fw6_pld *)input; | 179 | fw6_pld = (struct cpl_fw6_pld *)input; |
| 118 | memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2], | 180 | memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2], |
| 119 | AES_BLOCK_SIZE); | 181 | AES_BLOCK_SIZE); |
| 120 | } | 182 | } |
| 121 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst, | 183 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst, |
| 122 | ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE); | 184 | ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE); |
| 123 | if (ctx_req.ctx.ablk_ctx->skb) { | 185 | if (ctx_req.ctx.ablk_ctx->skb) { |
| 124 | kfree_skb(ctx_req.ctx.ablk_ctx->skb); | 186 | kfree_skb(ctx_req.ctx.ablk_ctx->skb); |
| 125 | ctx_req.ctx.ablk_ctx->skb = NULL; | 187 | ctx_req.ctx.ablk_ctx->skb = NULL; |
| @@ -138,8 +200,10 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |||
| 138 | updated_digestsize = SHA256_DIGEST_SIZE; | 200 | updated_digestsize = SHA256_DIGEST_SIZE; |
| 139 | else if (digestsize == SHA384_DIGEST_SIZE) | 201 | else if (digestsize == SHA384_DIGEST_SIZE) |
| 140 | updated_digestsize = SHA512_DIGEST_SIZE; | 202 | updated_digestsize = SHA512_DIGEST_SIZE; |
| 141 | if (ctx_req.ctx.ahash_ctx->skb) | 203 | if (ctx_req.ctx.ahash_ctx->skb) { |
| 204 | kfree_skb(ctx_req.ctx.ahash_ctx->skb); | ||
| 142 | ctx_req.ctx.ahash_ctx->skb = NULL; | 205 | ctx_req.ctx.ahash_ctx->skb = NULL; |
| 206 | } | ||
| 143 | if (ctx_req.ctx.ahash_ctx->result == 1) { | 207 | if (ctx_req.ctx.ahash_ctx->result == 1) { |
| 144 | ctx_req.ctx.ahash_ctx->result = 0; | 208 | ctx_req.ctx.ahash_ctx->result = 0; |
| 145 | memcpy(ctx_req.req.ahash_req->result, input + | 209 | memcpy(ctx_req.req.ahash_req->result, input + |
| @@ -150,11 +214,9 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |||
| 150 | sizeof(struct cpl_fw6_pld), | 214 | sizeof(struct cpl_fw6_pld), |
| 151 | updated_digestsize); | 215 | updated_digestsize); |
| 152 | } | 216 | } |
| 153 | kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr); | ||
| 154 | ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL; | ||
| 155 | break; | 217 | break; |
| 156 | } | 218 | } |
| 157 | return 0; | 219 | return err; |
| 158 | } | 220 | } |
| 159 | 221 | ||
| 160 | /* | 222 | /* |
| @@ -178,40 +240,81 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) | |||
| 178 | return flits + sgl_len(cnt); | 240 | return flits + sgl_len(cnt); |
| 179 | } | 241 | } |
| 180 | 242 | ||
| 181 | static struct shash_desc *chcr_alloc_shash(unsigned int ds) | 243 | static inline void get_aes_decrypt_key(unsigned char *dec_key, |
| 244 | const unsigned char *key, | ||
| 245 | unsigned int keylength) | ||
| 246 | { | ||
| 247 | u32 temp; | ||
| 248 | u32 w_ring[MAX_NK]; | ||
| 249 | int i, j, k; | ||
| 250 | u8 nr, nk; | ||
| 251 | |||
| 252 | switch (keylength) { | ||
| 253 | case AES_KEYLENGTH_128BIT: | ||
| 254 | nk = KEYLENGTH_4BYTES; | ||
| 255 | nr = NUMBER_OF_ROUNDS_10; | ||
| 256 | break; | ||
| 257 | case AES_KEYLENGTH_192BIT: | ||
| 258 | nk = KEYLENGTH_6BYTES; | ||
| 259 | nr = NUMBER_OF_ROUNDS_12; | ||
| 260 | break; | ||
| 261 | case AES_KEYLENGTH_256BIT: | ||
| 262 | nk = KEYLENGTH_8BYTES; | ||
| 263 | nr = NUMBER_OF_ROUNDS_14; | ||
| 264 | break; | ||
| 265 | default: | ||
| 266 | return; | ||
| 267 | } | ||
| 268 | for (i = 0; i < nk; i++) | ||
| 269 | w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]); | ||
| 270 | |||
| 271 | i = 0; | ||
| 272 | temp = w_ring[nk - 1]; | ||
| 273 | while (i + nk < (nr + 1) * 4) { | ||
| 274 | if (!(i % nk)) { | ||
| 275 | /* RotWord(temp) */ | ||
| 276 | temp = (temp << 8) | (temp >> 24); | ||
| 277 | temp = aes_ks_subword(temp); | ||
| 278 | temp ^= round_constant[i / nk]; | ||
| 279 | } else if (nk == 8 && (i % 4 == 0)) { | ||
| 280 | temp = aes_ks_subword(temp); | ||
| 281 | } | ||
| 282 | w_ring[i % nk] ^= temp; | ||
| 283 | temp = w_ring[i % nk]; | ||
| 284 | i++; | ||
| 285 | } | ||
| 286 | i--; | ||
| 287 | for (k = 0, j = i % nk; k < nk; k++) { | ||
| 288 | *((u32 *)dec_key + k) = htonl(w_ring[j]); | ||
| 289 | j--; | ||
| 290 | if (j < 0) | ||
| 291 | j += nk; | ||
| 292 | } | ||
| 293 | } | ||
| 294 | |||
| 295 | static struct crypto_shash *chcr_alloc_shash(unsigned int ds) | ||
| 182 | { | 296 | { |
| 183 | struct crypto_shash *base_hash = NULL; | 297 | struct crypto_shash *base_hash = NULL; |
| 184 | struct shash_desc *desc; | ||
| 185 | 298 | ||
| 186 | switch (ds) { | 299 | switch (ds) { |
| 187 | case SHA1_DIGEST_SIZE: | 300 | case SHA1_DIGEST_SIZE: |
| 188 | base_hash = crypto_alloc_shash("sha1-generic", 0, 0); | 301 | base_hash = crypto_alloc_shash("sha1", 0, 0); |
| 189 | break; | 302 | break; |
| 190 | case SHA224_DIGEST_SIZE: | 303 | case SHA224_DIGEST_SIZE: |
| 191 | base_hash = crypto_alloc_shash("sha224-generic", 0, 0); | 304 | base_hash = crypto_alloc_shash("sha224", 0, 0); |
| 192 | break; | 305 | break; |
| 193 | case SHA256_DIGEST_SIZE: | 306 | case SHA256_DIGEST_SIZE: |
| 194 | base_hash = crypto_alloc_shash("sha256-generic", 0, 0); | 307 | base_hash = crypto_alloc_shash("sha256", 0, 0); |
| 195 | break; | 308 | break; |
| 196 | case SHA384_DIGEST_SIZE: | 309 | case SHA384_DIGEST_SIZE: |
| 197 | base_hash = crypto_alloc_shash("sha384-generic", 0, 0); | 310 | base_hash = crypto_alloc_shash("sha384", 0, 0); |
| 198 | break; | 311 | break; |
| 199 | case SHA512_DIGEST_SIZE: | 312 | case SHA512_DIGEST_SIZE: |
| 200 | base_hash = crypto_alloc_shash("sha512-generic", 0, 0); | 313 | base_hash = crypto_alloc_shash("sha512", 0, 0); |
| 201 | break; | 314 | break; |
| 202 | } | 315 | } |
| 203 | if (IS_ERR(base_hash)) { | ||
| 204 | pr_err("Can not allocate sha-generic algo.\n"); | ||
| 205 | return (void *)base_hash; | ||
| 206 | } | ||
| 207 | 316 | ||
| 208 | desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash), | 317 | return base_hash; |
| 209 | GFP_KERNEL); | ||
| 210 | if (!desc) | ||
| 211 | return ERR_PTR(-ENOMEM); | ||
| 212 | desc->tfm = base_hash; | ||
| 213 | desc->flags = crypto_shash_get_flags(base_hash); | ||
| 214 | return desc; | ||
| 215 | } | 318 | } |
| 216 | 319 | ||
| 217 | static int chcr_compute_partial_hash(struct shash_desc *desc, | 320 | static int chcr_compute_partial_hash(struct shash_desc *desc, |
| @@ -279,31 +382,18 @@ static inline int is_hmac(struct crypto_tfm *tfm) | |||
| 279 | struct chcr_alg_template *chcr_crypto_alg = | 382 | struct chcr_alg_template *chcr_crypto_alg = |
| 280 | container_of(__crypto_ahash_alg(alg), struct chcr_alg_template, | 383 | container_of(__crypto_ahash_alg(alg), struct chcr_alg_template, |
| 281 | alg.hash); | 384 | alg.hash); |
| 282 | if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) == | 385 | if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC) |
| 283 | CRYPTO_ALG_SUB_TYPE_HASH_HMAC) | ||
| 284 | return 1; | 386 | return 1; |
| 285 | return 0; | 387 | return 0; |
| 286 | } | 388 | } |
| 287 | 389 | ||
| 288 | static inline unsigned int ch_nents(struct scatterlist *sg, | ||
| 289 | unsigned int *total_size) | ||
| 290 | { | ||
| 291 | unsigned int nents; | ||
| 292 | |||
| 293 | for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) { | ||
| 294 | nents++; | ||
| 295 | *total_size += sg->length; | ||
| 296 | } | ||
| 297 | return nents; | ||
| 298 | } | ||
| 299 | |||
| 300 | static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, | 390 | static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, |
| 301 | struct scatterlist *sg, | 391 | struct scatterlist *sg, |
| 302 | struct phys_sge_parm *sg_param) | 392 | struct phys_sge_parm *sg_param) |
| 303 | { | 393 | { |
| 304 | struct phys_sge_pairs *to; | 394 | struct phys_sge_pairs *to; |
| 305 | unsigned int out_buf_size = sg_param->obsize; | 395 | int out_buf_size = sg_param->obsize; |
| 306 | unsigned int nents = sg_param->nents, i, j, tot_len = 0; | 396 | unsigned int nents = sg_param->nents, i, j = 0; |
| 307 | 397 | ||
| 308 | phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) | 398 | phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) |
| 309 | | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); | 399 | | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); |
| @@ -321,25 +411,24 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, | |||
| 321 | sizeof(struct cpl_rx_phys_dsgl)); | 411 | sizeof(struct cpl_rx_phys_dsgl)); |
| 322 | 412 | ||
| 323 | for (i = 0; nents; to++) { | 413 | for (i = 0; nents; to++) { |
| 324 | for (j = i; (nents && (j < (8 + i))); j++, nents--) { | 414 | for (j = 0; j < 8 && nents; j++, nents--) { |
| 325 | to->len[j] = htons(sg->length); | 415 | out_buf_size -= sg_dma_len(sg); |
| 416 | to->len[j] = htons(sg_dma_len(sg)); | ||
| 326 | to->addr[j] = cpu_to_be64(sg_dma_address(sg)); | 417 | to->addr[j] = cpu_to_be64(sg_dma_address(sg)); |
| 327 | if (out_buf_size) { | ||
| 328 | if (tot_len + sg_dma_len(sg) >= out_buf_size) { | ||
| 329 | to->len[j] = htons(out_buf_size - | ||
| 330 | tot_len); | ||
| 331 | return; | ||
| 332 | } | ||
| 333 | tot_len += sg_dma_len(sg); | ||
| 334 | } | ||
| 335 | sg = sg_next(sg); | 418 | sg = sg_next(sg); |
| 336 | } | 419 | } |
| 337 | } | 420 | } |
| 421 | if (out_buf_size) { | ||
| 422 | j--; | ||
| 423 | to--; | ||
| 424 | to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size)); | ||
| 425 | } | ||
| 338 | } | 426 | } |
| 339 | 427 | ||
| 340 | static inline unsigned | 428 | static inline int map_writesg_phys_cpl(struct device *dev, |
| 341 | int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl, | 429 | struct cpl_rx_phys_dsgl *phys_cpl, |
| 342 | struct scatterlist *sg, struct phys_sge_parm *sg_param) | 430 | struct scatterlist *sg, |
| 431 | struct phys_sge_parm *sg_param) | ||
| 343 | { | 432 | { |
| 344 | if (!sg || !sg_param->nents) | 433 | if (!sg || !sg_param->nents) |
| 345 | return 0; | 434 | return 0; |
| @@ -353,6 +442,14 @@ int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl, | |||
| 353 | return 0; | 442 | return 0; |
| 354 | } | 443 | } |
| 355 | 444 | ||
| 445 | static inline int get_aead_subtype(struct crypto_aead *aead) | ||
| 446 | { | ||
| 447 | struct aead_alg *alg = crypto_aead_alg(aead); | ||
| 448 | struct chcr_alg_template *chcr_crypto_alg = | ||
| 449 | container_of(alg, struct chcr_alg_template, alg.aead); | ||
| 450 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | ||
| 451 | } | ||
| 452 | |||
| 356 | static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) | 453 | static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) |
| 357 | { | 454 | { |
| 358 | struct crypto_alg *alg = tfm->__crt_alg; | 455 | struct crypto_alg *alg = tfm->__crt_alg; |
| @@ -362,8 +459,23 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) | |||
| 362 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | 459 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; |
| 363 | } | 460 | } |
| 364 | 461 | ||
| 462 | static inline void write_buffer_to_skb(struct sk_buff *skb, | ||
| 463 | unsigned int *frags, | ||
| 464 | char *bfr, | ||
| 465 | u8 bfr_len) | ||
| 466 | { | ||
| 467 | skb->len += bfr_len; | ||
| 468 | skb->data_len += bfr_len; | ||
| 469 | skb->truesize += bfr_len; | ||
| 470 | get_page(virt_to_page(bfr)); | ||
| 471 | skb_fill_page_desc(skb, *frags, virt_to_page(bfr), | ||
| 472 | offset_in_page(bfr), bfr_len); | ||
| 473 | (*frags)++; | ||
| 474 | } | ||
| 475 | |||
| 476 | |||
| 365 | static inline void | 477 | static inline void |
| 366 | write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags, | 478 | write_sg_to_skb(struct sk_buff *skb, unsigned int *frags, |
| 367 | struct scatterlist *sg, unsigned int count) | 479 | struct scatterlist *sg, unsigned int count) |
| 368 | { | 480 | { |
| 369 | struct page *spage; | 481 | struct page *spage; |
| @@ -372,8 +484,9 @@ write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags, | |||
| 372 | skb->len += count; | 484 | skb->len += count; |
| 373 | skb->data_len += count; | 485 | skb->data_len += count; |
| 374 | skb->truesize += count; | 486 | skb->truesize += count; |
| 487 | |||
| 375 | while (count > 0) { | 488 | while (count > 0) { |
| 376 | if (sg && (!(sg->length))) | 489 | if (!sg || (!(sg->length))) |
| 377 | break; | 490 | break; |
| 378 | spage = sg_page(sg); | 491 | spage = sg_page(sg); |
| 379 | get_page(spage); | 492 | get_page(spage); |
| @@ -389,29 +502,25 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx, | |||
| 389 | struct _key_ctx *key_ctx) | 502 | struct _key_ctx *key_ctx) |
| 390 | { | 503 | { |
| 391 | if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { | 504 | if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { |
| 392 | get_aes_decrypt_key(key_ctx->key, ablkctx->key, | 505 | memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len); |
| 393 | ablkctx->enckey_len << 3); | ||
| 394 | memset(key_ctx->key + ablkctx->enckey_len, 0, | ||
| 395 | CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len); | ||
| 396 | } else { | 506 | } else { |
| 397 | memcpy(key_ctx->key, | 507 | memcpy(key_ctx->key, |
| 398 | ablkctx->key + (ablkctx->enckey_len >> 1), | 508 | ablkctx->key + (ablkctx->enckey_len >> 1), |
| 399 | ablkctx->enckey_len >> 1); | 509 | ablkctx->enckey_len >> 1); |
| 400 | get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1), | 510 | memcpy(key_ctx->key + (ablkctx->enckey_len >> 1), |
| 401 | ablkctx->key, ablkctx->enckey_len << 2); | 511 | ablkctx->rrkey, ablkctx->enckey_len >> 1); |
| 402 | } | 512 | } |
| 403 | return 0; | 513 | return 0; |
| 404 | } | 514 | } |
| 405 | 515 | ||
| 406 | static inline void create_wreq(struct chcr_context *ctx, | 516 | static inline void create_wreq(struct chcr_context *ctx, |
| 407 | struct fw_crypto_lookaside_wr *wreq, | 517 | struct chcr_wr *chcr_req, |
| 408 | void *req, struct sk_buff *skb, | 518 | void *req, struct sk_buff *skb, |
| 409 | int kctx_len, int hash_sz, | 519 | int kctx_len, int hash_sz, |
| 410 | unsigned int phys_dsgl) | 520 | int is_iv, |
| 521 | unsigned int sc_len) | ||
| 411 | { | 522 | { |
| 412 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 523 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
| 413 | struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1); | ||
| 414 | struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1); | ||
| 415 | int iv_loc = IV_DSGL; | 524 | int iv_loc = IV_DSGL; |
| 416 | int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id]; | 525 | int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id]; |
| 417 | unsigned int immdatalen = 0, nr_frags = 0; | 526 | unsigned int immdatalen = 0, nr_frags = 0; |
| @@ -423,27 +532,27 @@ static inline void create_wreq(struct chcr_context *ctx, | |||
| 423 | nr_frags = skb_shinfo(skb)->nr_frags; | 532 | nr_frags = skb_shinfo(skb)->nr_frags; |
| 424 | } | 533 | } |
| 425 | 534 | ||
| 426 | wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen, | 535 | chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen, |
| 427 | (kctx_len >> 4)); | 536 | ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4)); |
| 428 | wreq->pld_size_hash_size = | 537 | chcr_req->wreq.pld_size_hash_size = |
| 429 | htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) | | 538 | htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) | |
| 430 | FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); | 539 | FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); |
| 431 | wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP( | 540 | chcr_req->wreq.len16_pkd = |
| 541 | htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP( | ||
| 432 | (calc_tx_flits_ofld(skb) * 8), 16))); | 542 | (calc_tx_flits_ofld(skb) * 8), 16))); |
| 433 | wreq->cookie = cpu_to_be64((uintptr_t)req); | 543 | chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); |
| 434 | wreq->rx_chid_to_rx_q_id = | 544 | chcr_req->wreq.rx_chid_to_rx_q_id = |
| 435 | FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid, | 545 | FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid, |
| 436 | (hash_sz) ? IV_NOP : iv_loc); | 546 | is_iv ? iv_loc : IV_NOP); |
| 437 | 547 | ||
| 438 | ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id); | 548 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id); |
| 439 | ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8), | 549 | chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8), |
| 440 | 16) - ((sizeof(*wreq)) >> 4))); | 550 | 16) - ((sizeof(chcr_req->wreq)) >> 4))); |
| 441 | 551 | ||
| 442 | sc_imm->cmd_more = FILL_CMD_MORE(immdatalen); | 552 | chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen); |
| 443 | sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len + | 553 | chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + |
| 444 | ((hash_sz) ? DUMMY_BYTES : | 554 | sizeof(chcr_req->key_ctx) + |
| 445 | (sizeof(struct cpl_rx_phys_dsgl) + | 555 | kctx_len + sc_len + immdatalen); |
| 446 | phys_dsgl)) + immdatalen); | ||
| 447 | } | 556 | } |
| 448 | 557 | ||
| 449 | /** | 558 | /** |
| @@ -454,86 +563,83 @@ static inline void create_wreq(struct chcr_context *ctx, | |||
| 454 | * @op_type: encryption or decryption | 563 | * @op_type: encryption or decryption |
| 455 | */ | 564 | */ |
| 456 | static struct sk_buff | 565 | static struct sk_buff |
| 457 | *create_cipher_wr(struct crypto_async_request *req_base, | 566 | *create_cipher_wr(struct ablkcipher_request *req, |
| 458 | struct chcr_context *ctx, unsigned short qid, | 567 | unsigned short qid, |
| 459 | unsigned short op_type) | 568 | unsigned short op_type) |
| 460 | { | 569 | { |
| 461 | struct ablkcipher_request *req = (struct ablkcipher_request *)req_base; | ||
| 462 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 570 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
| 571 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | ||
| 463 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 572 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
| 464 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | 573 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
| 465 | struct sk_buff *skb = NULL; | 574 | struct sk_buff *skb = NULL; |
| 466 | struct _key_ctx *key_ctx; | 575 | struct chcr_wr *chcr_req; |
| 467 | struct fw_crypto_lookaside_wr *wreq; | ||
| 468 | struct cpl_tx_sec_pdu *sec_cpl; | ||
| 469 | struct cpl_rx_phys_dsgl *phys_cpl; | 576 | struct cpl_rx_phys_dsgl *phys_cpl; |
| 470 | struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 577 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
| 471 | struct phys_sge_parm sg_param; | 578 | struct phys_sge_parm sg_param; |
| 472 | unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0; | 579 | unsigned int frags = 0, transhdr_len, phys_dsgl; |
| 473 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len; | 580 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len; |
| 581 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
| 582 | GFP_ATOMIC; | ||
| 474 | 583 | ||
| 475 | if (!req->info) | 584 | if (!req->info) |
| 476 | return ERR_PTR(-EINVAL); | 585 | return ERR_PTR(-EINVAL); |
| 477 | ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize); | 586 | reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); |
| 478 | ablkctx->enc = op_type; | 587 | if (reqctx->dst_nents <= 0) { |
| 479 | 588 | pr_err("AES:Invalid Destination sg lists\n"); | |
| 589 | return ERR_PTR(-EINVAL); | ||
| 590 | } | ||
| 480 | if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || | 591 | if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || |
| 481 | (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) | 592 | (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) { |
| 593 | pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", | ||
| 594 | ablkctx->enckey_len, req->nbytes, ivsize); | ||
| 482 | return ERR_PTR(-EINVAL); | 595 | return ERR_PTR(-EINVAL); |
| 596 | } | ||
| 483 | 597 | ||
| 484 | phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents); | 598 | phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents); |
| 485 | 599 | ||
| 486 | kctx_len = sizeof(*key_ctx) + | 600 | kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); |
| 487 | (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); | ||
| 488 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); | 601 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); |
| 489 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), | 602 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
| 490 | GFP_ATOMIC); | ||
| 491 | if (!skb) | 603 | if (!skb) |
| 492 | return ERR_PTR(-ENOMEM); | 604 | return ERR_PTR(-ENOMEM); |
| 493 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | 605 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); |
| 494 | wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len); | 606 | chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len); |
| 495 | 607 | memset(chcr_req, 0, transhdr_len); | |
| 496 | sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET); | 608 | chcr_req->sec_cpl.op_ivinsrtofst = |
| 497 | sec_cpl->op_ivinsrtofst = | 609 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1); |
| 498 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1); | 610 | |
| 499 | 611 | chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes); | |
| 500 | sec_cpl->pldlen = htonl(ivsize + req->nbytes); | 612 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
| 501 | sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, | 613 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0); |
| 502 | ivsize + 1, 0); | 614 | |
| 503 | 615 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | |
| 504 | sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0, | 616 | FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); |
| 505 | 0, 0); | 617 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0, |
| 506 | sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0, | ||
| 507 | ablkctx->ciph_mode, | 618 | ablkctx->ciph_mode, |
| 508 | 0, 0, ivsize >> 1, 1); | 619 | 0, 0, ivsize >> 1); |
| 509 | sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, | 620 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, |
| 510 | 0, 1, phys_dsgl); | 621 | 0, 1, phys_dsgl); |
| 511 | 622 | ||
| 512 | key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl)); | 623 | chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; |
| 513 | key_ctx->ctx_hdr = ablkctx->key_ctx_hdr; | ||
| 514 | if (op_type == CHCR_DECRYPT_OP) { | 624 | if (op_type == CHCR_DECRYPT_OP) { |
| 515 | if (generate_copy_rrkey(ablkctx, key_ctx)) | 625 | generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); |
| 516 | goto map_fail1; | ||
| 517 | } else { | 626 | } else { |
| 518 | if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { | 627 | if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { |
| 519 | memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len); | 628 | memcpy(chcr_req->key_ctx.key, ablkctx->key, |
| 629 | ablkctx->enckey_len); | ||
| 520 | } else { | 630 | } else { |
| 521 | memcpy(key_ctx->key, ablkctx->key + | 631 | memcpy(chcr_req->key_ctx.key, ablkctx->key + |
| 522 | (ablkctx->enckey_len >> 1), | 632 | (ablkctx->enckey_len >> 1), |
| 523 | ablkctx->enckey_len >> 1); | 633 | ablkctx->enckey_len >> 1); |
| 524 | memcpy(key_ctx->key + | 634 | memcpy(chcr_req->key_ctx.key + |
| 525 | (ablkctx->enckey_len >> 1), | 635 | (ablkctx->enckey_len >> 1), |
| 526 | ablkctx->key, | 636 | ablkctx->key, |
| 527 | ablkctx->enckey_len >> 1); | 637 | ablkctx->enckey_len >> 1); |
| 528 | } | 638 | } |
| 529 | } | 639 | } |
| 530 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len); | 640 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
| 531 | 641 | sg_param.nents = reqctx->dst_nents; | |
| 532 | memcpy(ablkctx->iv, req->info, ivsize); | 642 | sg_param.obsize = req->nbytes; |
| 533 | sg_init_table(&ablkctx->iv_sg, 1); | ||
| 534 | sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize); | ||
| 535 | sg_param.nents = ablkctx->dst_nents; | ||
| 536 | sg_param.obsize = dst_bufsize; | ||
| 537 | sg_param.qid = qid; | 643 | sg_param.qid = qid; |
| 538 | sg_param.align = 1; | 644 | sg_param.align = 1; |
| 539 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst, | 645 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst, |
| @@ -541,10 +647,12 @@ static struct sk_buff | |||
| 541 | goto map_fail1; | 647 | goto map_fail1; |
| 542 | 648 | ||
| 543 | skb_set_transport_header(skb, transhdr_len); | 649 | skb_set_transport_header(skb, transhdr_len); |
| 544 | write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize); | 650 | memcpy(reqctx->iv, req->info, ivsize); |
| 545 | write_sg_data_page_desc(skb, &frags, req->src, req->nbytes); | 651 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); |
| 546 | create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl); | 652 | write_sg_to_skb(skb, &frags, req->src, req->nbytes); |
| 547 | req_ctx->skb = skb; | 653 | create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1, |
| 654 | sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl); | ||
| 655 | reqctx->skb = skb; | ||
| 548 | skb_get(skb); | 656 | skb_get(skb); |
| 549 | return skb; | 657 | return skb; |
| 550 | map_fail1: | 658 | map_fail1: |
| @@ -557,15 +665,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
| 557 | { | 665 | { |
| 558 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 666 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); |
| 559 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | 667 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
| 560 | struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm); | ||
| 561 | unsigned int ck_size, context_size; | 668 | unsigned int ck_size, context_size; |
| 562 | u16 alignment = 0; | 669 | u16 alignment = 0; |
| 563 | 670 | ||
| 564 | if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize)) | ||
| 565 | goto badkey_err; | ||
| 566 | |||
| 567 | memcpy(ablkctx->key, key, keylen); | ||
| 568 | ablkctx->enckey_len = keylen; | ||
| 569 | if (keylen == AES_KEYSIZE_128) { | 671 | if (keylen == AES_KEYSIZE_128) { |
| 570 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | 672 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; |
| 571 | } else if (keylen == AES_KEYSIZE_192) { | 673 | } else if (keylen == AES_KEYSIZE_192) { |
| @@ -576,7 +678,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
| 576 | } else { | 678 | } else { |
| 577 | goto badkey_err; | 679 | goto badkey_err; |
| 578 | } | 680 | } |
| 579 | 681 | memcpy(ablkctx->key, key, keylen); | |
| 682 | ablkctx->enckey_len = keylen; | ||
| 683 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3); | ||
| 580 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + | 684 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + |
| 581 | keylen + alignment) >> 4; | 685 | keylen + alignment) >> 4; |
| 582 | 686 | ||
| @@ -612,7 +716,6 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req) | |||
| 612 | { | 716 | { |
| 613 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 717 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
| 614 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 718 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); |
| 615 | struct crypto_async_request *req_base = &req->base; | ||
| 616 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 719 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
| 617 | struct sk_buff *skb; | 720 | struct sk_buff *skb; |
| 618 | 721 | ||
| @@ -622,8 +725,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req) | |||
| 622 | return -EBUSY; | 725 | return -EBUSY; |
| 623 | } | 726 | } |
| 624 | 727 | ||
| 625 | skb = create_cipher_wr(req_base, ctx, | 728 | skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], |
| 626 | u_ctx->lldi.rxq_ids[ctx->tx_channel_id], | ||
| 627 | CHCR_ENCRYPT_OP); | 729 | CHCR_ENCRYPT_OP); |
| 628 | if (IS_ERR(skb)) { | 730 | if (IS_ERR(skb)) { |
| 629 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); | 731 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); |
| @@ -639,7 +741,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) | |||
| 639 | { | 741 | { |
| 640 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 742 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
| 641 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 743 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); |
| 642 | struct crypto_async_request *req_base = &req->base; | ||
| 643 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 744 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
| 644 | struct sk_buff *skb; | 745 | struct sk_buff *skb; |
| 645 | 746 | ||
| @@ -649,7 +750,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) | |||
| 649 | return -EBUSY; | 750 | return -EBUSY; |
| 650 | } | 751 | } |
| 651 | 752 | ||
| 652 | skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0], | 753 | skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0], |
| 653 | CHCR_DECRYPT_OP); | 754 | CHCR_DECRYPT_OP); |
| 654 | if (IS_ERR(skb)) { | 755 | if (IS_ERR(skb)) { |
| 655 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); | 756 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); |
| @@ -729,50 +830,33 @@ static int get_alg_config(struct algo_param *params, | |||
| 729 | return 0; | 830 | return 0; |
| 730 | } | 831 | } |
| 731 | 832 | ||
| 732 | static inline int | 833 | static inline void chcr_free_shash(struct crypto_shash *base_hash) |
| 733 | write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx, | ||
| 734 | struct sk_buff *skb, unsigned int *frags, char *bfr, | ||
| 735 | u8 bfr_len) | ||
| 736 | { | 834 | { |
| 737 | void *page_ptr = NULL; | 835 | crypto_free_shash(base_hash); |
| 738 | |||
| 739 | skb->len += bfr_len; | ||
| 740 | skb->data_len += bfr_len; | ||
| 741 | skb->truesize += bfr_len; | ||
| 742 | page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA); | ||
| 743 | if (!page_ptr) | ||
| 744 | return -ENOMEM; | ||
| 745 | get_page(virt_to_page(page_ptr)); | ||
| 746 | req_ctx->dummy_payload_ptr = page_ptr; | ||
| 747 | memcpy(page_ptr, bfr, bfr_len); | ||
| 748 | skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr), | ||
| 749 | offset_in_page(page_ptr), bfr_len); | ||
| 750 | (*frags)++; | ||
| 751 | return 0; | ||
| 752 | } | 836 | } |
| 753 | 837 | ||
| 754 | /** | 838 | /** |
| 755 | * create_final_hash_wr - Create hash work request | 839 | * create_hash_wr - Create hash work request |
| 756 | * @req - Cipher req base | 840 | * @req - Cipher req base |
| 757 | */ | 841 | */ |
| 758 | static struct sk_buff *create_final_hash_wr(struct ahash_request *req, | 842 | static struct sk_buff *create_hash_wr(struct ahash_request *req, |
| 759 | struct hash_wr_param *param) | 843 | struct hash_wr_param *param) |
| 760 | { | 844 | { |
| 761 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | 845 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
| 762 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 846 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 763 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | 847 | struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); |
| 764 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | 848 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); |
| 765 | struct sk_buff *skb = NULL; | 849 | struct sk_buff *skb = NULL; |
| 766 | struct _key_ctx *key_ctx; | 850 | struct chcr_wr *chcr_req; |
| 767 | struct fw_crypto_lookaside_wr *wreq; | ||
| 768 | struct cpl_tx_sec_pdu *sec_cpl; | ||
| 769 | unsigned int frags = 0, transhdr_len, iopad_alignment = 0; | 851 | unsigned int frags = 0, transhdr_len, iopad_alignment = 0; |
| 770 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | 852 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
| 771 | unsigned int kctx_len = sizeof(*key_ctx); | 853 | unsigned int kctx_len = 0; |
| 772 | u8 hash_size_in_response = 0; | 854 | u8 hash_size_in_response = 0; |
| 855 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
| 856 | GFP_ATOMIC; | ||
| 773 | 857 | ||
| 774 | iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); | 858 | iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); |
| 775 | kctx_len += param->alg_prm.result_size + iopad_alignment; | 859 | kctx_len = param->alg_prm.result_size + iopad_alignment; |
| 776 | if (param->opad_needed) | 860 | if (param->opad_needed) |
| 777 | kctx_len += param->alg_prm.result_size + iopad_alignment; | 861 | kctx_len += param->alg_prm.result_size + iopad_alignment; |
| 778 | 862 | ||
| @@ -781,54 +865,54 @@ static struct sk_buff *create_final_hash_wr(struct ahash_request *req, | |||
| 781 | else | 865 | else |
| 782 | hash_size_in_response = param->alg_prm.result_size; | 866 | hash_size_in_response = param->alg_prm.result_size; |
| 783 | transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); | 867 | transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); |
| 784 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), | 868 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
| 785 | GFP_ATOMIC); | ||
| 786 | if (!skb) | 869 | if (!skb) |
| 787 | return skb; | 870 | return skb; |
| 788 | 871 | ||
| 789 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | 872 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); |
| 790 | wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len); | 873 | chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len); |
| 791 | memset(wreq, 0, transhdr_len); | 874 | memset(chcr_req, 0, transhdr_len); |
| 792 | 875 | ||
| 793 | sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET); | 876 | chcr_req->sec_cpl.op_ivinsrtofst = |
| 794 | sec_cpl->op_ivinsrtofst = | 877 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0); |
| 795 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0); | 878 | chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); |
| 796 | sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len); | ||
| 797 | 879 | ||
| 798 | sec_cpl->aadstart_cipherstop_hi = | 880 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
| 799 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0); | 881 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0); |
| 800 | sec_cpl->cipherstop_lo_authinsert = | 882 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
| 801 | FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0); | 883 | FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0); |
| 802 | sec_cpl->seqno_numivs = | 884 | chcr_req->sec_cpl.seqno_numivs = |
| 803 | FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode, | 885 | FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode, |
| 804 | param->opad_needed, 0, 0); | 886 | param->opad_needed, 0); |
| 805 | 887 | ||
| 806 | sec_cpl->ivgen_hdrlen = | 888 | chcr_req->sec_cpl.ivgen_hdrlen = |
| 807 | FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0); | 889 | FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0); |
| 808 | 890 | ||
| 809 | key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl)); | 891 | memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash, |
| 810 | memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size); | 892 | param->alg_prm.result_size); |
| 811 | 893 | ||
| 812 | if (param->opad_needed) | 894 | if (param->opad_needed) |
| 813 | memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 : | 895 | memcpy(chcr_req->key_ctx.key + |
| 814 | CHCR_HASH_MAX_DIGEST_SIZE), | 896 | ((param->alg_prm.result_size <= 32) ? 32 : |
| 897 | CHCR_HASH_MAX_DIGEST_SIZE), | ||
| 815 | hmacctx->opad, param->alg_prm.result_size); | 898 | hmacctx->opad, param->alg_prm.result_size); |
| 816 | 899 | ||
| 817 | key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, | 900 | chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, |
| 818 | param->alg_prm.mk_size, 0, | 901 | param->alg_prm.mk_size, 0, |
| 819 | param->opad_needed, | 902 | param->opad_needed, |
| 820 | (kctx_len >> 4)); | 903 | ((kctx_len + |
| 821 | sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1); | 904 | sizeof(chcr_req->key_ctx)) >> 4)); |
| 905 | chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); | ||
| 822 | 906 | ||
| 823 | skb_set_transport_header(skb, transhdr_len); | 907 | skb_set_transport_header(skb, transhdr_len); |
| 824 | if (param->bfr_len != 0) | 908 | if (param->bfr_len != 0) |
| 825 | write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr, | 909 | write_buffer_to_skb(skb, &frags, req_ctx->reqbfr, |
| 826 | param->bfr_len); | 910 | param->bfr_len); |
| 827 | if (param->sg_len != 0) | 911 | if (param->sg_len != 0) |
| 828 | write_sg_data_page_desc(skb, &frags, req->src, param->sg_len); | 912 | write_sg_to_skb(skb, &frags, req->src, param->sg_len); |
| 829 | 913 | ||
| 830 | create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response, | 914 | create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0, |
| 831 | 0); | 915 | DUMMY_BYTES); |
| 832 | req_ctx->skb = skb; | 916 | req_ctx->skb = skb; |
| 833 | skb_get(skb); | 917 | skb_get(skb); |
| 834 | return skb; | 918 | return skb; |
| @@ -854,34 +938,40 @@ static int chcr_ahash_update(struct ahash_request *req) | |||
| 854 | return -EBUSY; | 938 | return -EBUSY; |
| 855 | } | 939 | } |
| 856 | 940 | ||
| 857 | if (nbytes + req_ctx->bfr_len >= bs) { | 941 | if (nbytes + req_ctx->reqlen >= bs) { |
| 858 | remainder = (nbytes + req_ctx->bfr_len) % bs; | 942 | remainder = (nbytes + req_ctx->reqlen) % bs; |
| 859 | nbytes = nbytes + req_ctx->bfr_len - remainder; | 943 | nbytes = nbytes + req_ctx->reqlen - remainder; |
| 860 | } else { | 944 | } else { |
| 861 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr + | 945 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr |
| 862 | req_ctx->bfr_len, nbytes, 0); | 946 | + req_ctx->reqlen, nbytes, 0); |
| 863 | req_ctx->bfr_len += nbytes; | 947 | req_ctx->reqlen += nbytes; |
| 864 | return 0; | 948 | return 0; |
| 865 | } | 949 | } |
| 866 | 950 | ||
| 867 | params.opad_needed = 0; | 951 | params.opad_needed = 0; |
| 868 | params.more = 1; | 952 | params.more = 1; |
| 869 | params.last = 0; | 953 | params.last = 0; |
| 870 | params.sg_len = nbytes - req_ctx->bfr_len; | 954 | params.sg_len = nbytes - req_ctx->reqlen; |
| 871 | params.bfr_len = req_ctx->bfr_len; | 955 | params.bfr_len = req_ctx->reqlen; |
| 872 | params.scmd1 = 0; | 956 | params.scmd1 = 0; |
| 873 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | 957 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
| 874 | req_ctx->result = 0; | 958 | req_ctx->result = 0; |
| 875 | req_ctx->data_len += params.sg_len + params.bfr_len; | 959 | req_ctx->data_len += params.sg_len + params.bfr_len; |
| 876 | skb = create_final_hash_wr(req, ¶ms); | 960 | skb = create_hash_wr(req, ¶ms); |
| 877 | if (!skb) | 961 | if (!skb) |
| 878 | return -ENOMEM; | 962 | return -ENOMEM; |
| 879 | 963 | ||
| 880 | req_ctx->bfr_len = remainder; | 964 | if (remainder) { |
| 881 | if (remainder) | 965 | u8 *temp; |
| 966 | /* Swap buffers */ | ||
| 967 | temp = req_ctx->reqbfr; | ||
| 968 | req_ctx->reqbfr = req_ctx->skbfr; | ||
| 969 | req_ctx->skbfr = temp; | ||
| 882 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | 970 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), |
| 883 | req_ctx->bfr, remainder, req->nbytes - | 971 | req_ctx->reqbfr, remainder, req->nbytes - |
| 884 | remainder); | 972 | remainder); |
| 973 | } | ||
| 974 | req_ctx->reqlen = remainder; | ||
| 885 | skb->dev = u_ctx->lldi.ports[0]; | 975 | skb->dev = u_ctx->lldi.ports[0]; |
| 886 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); | 976 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); |
| 887 | chcr_send_wr(skb); | 977 | chcr_send_wr(skb); |
| @@ -917,10 +1007,10 @@ static int chcr_ahash_final(struct ahash_request *req) | |||
| 917 | params.sg_len = 0; | 1007 | params.sg_len = 0; |
| 918 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | 1008 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
| 919 | req_ctx->result = 1; | 1009 | req_ctx->result = 1; |
| 920 | params.bfr_len = req_ctx->bfr_len; | 1010 | params.bfr_len = req_ctx->reqlen; |
| 921 | req_ctx->data_len += params.bfr_len + params.sg_len; | 1011 | req_ctx->data_len += params.bfr_len + params.sg_len; |
| 922 | if (req_ctx->bfr && (req_ctx->bfr_len == 0)) { | 1012 | if (req_ctx->reqlen == 0) { |
| 923 | create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len); | 1013 | create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); |
| 924 | params.last = 0; | 1014 | params.last = 0; |
| 925 | params.more = 1; | 1015 | params.more = 1; |
| 926 | params.scmd1 = 0; | 1016 | params.scmd1 = 0; |
| @@ -931,7 +1021,10 @@ static int chcr_ahash_final(struct ahash_request *req) | |||
| 931 | params.last = 1; | 1021 | params.last = 1; |
| 932 | params.more = 0; | 1022 | params.more = 0; |
| 933 | } | 1023 | } |
| 934 | skb = create_final_hash_wr(req, ¶ms); | 1024 | skb = create_hash_wr(req, ¶ms); |
| 1025 | if (!skb) | ||
| 1026 | return -ENOMEM; | ||
| 1027 | |||
| 935 | skb->dev = u_ctx->lldi.ports[0]; | 1028 | skb->dev = u_ctx->lldi.ports[0]; |
| 936 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); | 1029 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); |
| 937 | chcr_send_wr(skb); | 1030 | chcr_send_wr(skb); |
| @@ -963,12 +1056,12 @@ static int chcr_ahash_finup(struct ahash_request *req) | |||
| 963 | params.opad_needed = 0; | 1056 | params.opad_needed = 0; |
| 964 | 1057 | ||
| 965 | params.sg_len = req->nbytes; | 1058 | params.sg_len = req->nbytes; |
| 966 | params.bfr_len = req_ctx->bfr_len; | 1059 | params.bfr_len = req_ctx->reqlen; |
| 967 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | 1060 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
| 968 | req_ctx->data_len += params.bfr_len + params.sg_len; | 1061 | req_ctx->data_len += params.bfr_len + params.sg_len; |
| 969 | req_ctx->result = 1; | 1062 | req_ctx->result = 1; |
| 970 | if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) { | 1063 | if ((req_ctx->reqlen + req->nbytes) == 0) { |
| 971 | create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len); | 1064 | create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); |
| 972 | params.last = 0; | 1065 | params.last = 0; |
| 973 | params.more = 1; | 1066 | params.more = 1; |
| 974 | params.scmd1 = 0; | 1067 | params.scmd1 = 0; |
| @@ -979,9 +1072,10 @@ static int chcr_ahash_finup(struct ahash_request *req) | |||
| 979 | params.more = 0; | 1072 | params.more = 0; |
| 980 | } | 1073 | } |
| 981 | 1074 | ||
| 982 | skb = create_final_hash_wr(req, ¶ms); | 1075 | skb = create_hash_wr(req, ¶ms); |
| 983 | if (!skb) | 1076 | if (!skb) |
| 984 | return -ENOMEM; | 1077 | return -ENOMEM; |
| 1078 | |||
| 985 | skb->dev = u_ctx->lldi.ports[0]; | 1079 | skb->dev = u_ctx->lldi.ports[0]; |
| 986 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); | 1080 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); |
| 987 | chcr_send_wr(skb); | 1081 | chcr_send_wr(skb); |
| @@ -1023,13 +1117,13 @@ static int chcr_ahash_digest(struct ahash_request *req) | |||
| 1023 | req_ctx->result = 1; | 1117 | req_ctx->result = 1; |
| 1024 | req_ctx->data_len += params.bfr_len + params.sg_len; | 1118 | req_ctx->data_len += params.bfr_len + params.sg_len; |
| 1025 | 1119 | ||
| 1026 | if (req_ctx->bfr && req->nbytes == 0) { | 1120 | if (req->nbytes == 0) { |
| 1027 | create_last_hash_block(req_ctx->bfr, bs, 0); | 1121 | create_last_hash_block(req_ctx->reqbfr, bs, 0); |
| 1028 | params.more = 1; | 1122 | params.more = 1; |
| 1029 | params.bfr_len = bs; | 1123 | params.bfr_len = bs; |
| 1030 | } | 1124 | } |
| 1031 | 1125 | ||
| 1032 | skb = create_final_hash_wr(req, ¶ms); | 1126 | skb = create_hash_wr(req, ¶ms); |
| 1033 | if (!skb) | 1127 | if (!skb) |
| 1034 | return -ENOMEM; | 1128 | return -ENOMEM; |
| 1035 | 1129 | ||
| @@ -1044,12 +1138,12 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out) | |||
| 1044 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1138 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1045 | struct chcr_ahash_req_ctx *state = out; | 1139 | struct chcr_ahash_req_ctx *state = out; |
| 1046 | 1140 | ||
| 1047 | state->bfr_len = req_ctx->bfr_len; | 1141 | state->reqlen = req_ctx->reqlen; |
| 1048 | state->data_len = req_ctx->data_len; | 1142 | state->data_len = req_ctx->data_len; |
| 1049 | memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128); | 1143 | memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); |
| 1050 | memcpy(state->partial_hash, req_ctx->partial_hash, | 1144 | memcpy(state->partial_hash, req_ctx->partial_hash, |
| 1051 | CHCR_HASH_MAX_DIGEST_SIZE); | 1145 | CHCR_HASH_MAX_DIGEST_SIZE); |
| 1052 | return 0; | 1146 | return 0; |
| 1053 | } | 1147 | } |
| 1054 | 1148 | ||
| 1055 | static int chcr_ahash_import(struct ahash_request *areq, const void *in) | 1149 | static int chcr_ahash_import(struct ahash_request *areq, const void *in) |
| @@ -1057,10 +1151,11 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in) | |||
| 1057 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1151 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1058 | struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in; | 1152 | struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in; |
| 1059 | 1153 | ||
| 1060 | req_ctx->bfr_len = state->bfr_len; | 1154 | req_ctx->reqlen = state->reqlen; |
| 1061 | req_ctx->data_len = state->data_len; | 1155 | req_ctx->data_len = state->data_len; |
| 1062 | req_ctx->dummy_payload_ptr = NULL; | 1156 | req_ctx->reqbfr = req_ctx->bfr1; |
| 1063 | memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128); | 1157 | req_ctx->skbfr = req_ctx->bfr2; |
| 1158 | memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); | ||
| 1064 | memcpy(req_ctx->partial_hash, state->partial_hash, | 1159 | memcpy(req_ctx->partial_hash, state->partial_hash, |
| 1065 | CHCR_HASH_MAX_DIGEST_SIZE); | 1160 | CHCR_HASH_MAX_DIGEST_SIZE); |
| 1066 | return 0; | 1161 | return 0; |
| @@ -1075,15 +1170,16 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
| 1075 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | 1170 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
| 1076 | unsigned int i, err = 0, updated_digestsize; | 1171 | unsigned int i, err = 0, updated_digestsize; |
| 1077 | 1172 | ||
| 1078 | /* | 1173 | SHASH_DESC_ON_STACK(shash, hmacctx->base_hash); |
| 1079 | * use the key to calculate the ipad and opad. ipad will sent with the | 1174 | |
| 1175 | /* use the key to calculate the ipad and opad. ipad will sent with the | ||
| 1080 | * first request's data. opad will be sent with the final hash result | 1176 | * first request's data. opad will be sent with the final hash result |
| 1081 | * ipad in hmacctx->ipad and opad in hmacctx->opad location | 1177 | * ipad in hmacctx->ipad and opad in hmacctx->opad location |
| 1082 | */ | 1178 | */ |
| 1083 | if (!hmacctx->desc) | 1179 | shash->tfm = hmacctx->base_hash; |
| 1084 | return -EINVAL; | 1180 | shash->flags = crypto_shash_get_flags(hmacctx->base_hash); |
| 1085 | if (keylen > bs) { | 1181 | if (keylen > bs) { |
| 1086 | err = crypto_shash_digest(hmacctx->desc, key, keylen, | 1182 | err = crypto_shash_digest(shash, key, keylen, |
| 1087 | hmacctx->ipad); | 1183 | hmacctx->ipad); |
| 1088 | if (err) | 1184 | if (err) |
| 1089 | goto out; | 1185 | goto out; |
| @@ -1104,13 +1200,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
| 1104 | updated_digestsize = SHA256_DIGEST_SIZE; | 1200 | updated_digestsize = SHA256_DIGEST_SIZE; |
| 1105 | else if (digestsize == SHA384_DIGEST_SIZE) | 1201 | else if (digestsize == SHA384_DIGEST_SIZE) |
| 1106 | updated_digestsize = SHA512_DIGEST_SIZE; | 1202 | updated_digestsize = SHA512_DIGEST_SIZE; |
| 1107 | err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad, | 1203 | err = chcr_compute_partial_hash(shash, hmacctx->ipad, |
| 1108 | hmacctx->ipad, digestsize); | 1204 | hmacctx->ipad, digestsize); |
| 1109 | if (err) | 1205 | if (err) |
| 1110 | goto out; | 1206 | goto out; |
| 1111 | chcr_change_order(hmacctx->ipad, updated_digestsize); | 1207 | chcr_change_order(hmacctx->ipad, updated_digestsize); |
| 1112 | 1208 | ||
| 1113 | err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad, | 1209 | err = chcr_compute_partial_hash(shash, hmacctx->opad, |
| 1114 | hmacctx->opad, digestsize); | 1210 | hmacctx->opad, digestsize); |
| 1115 | if (err) | 1211 | if (err) |
| 1116 | goto out; | 1212 | goto out; |
| @@ -1124,28 +1220,29 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
| 1124 | { | 1220 | { |
| 1125 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 1221 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); |
| 1126 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | 1222 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
| 1127 | int status = 0; | ||
| 1128 | unsigned short context_size = 0; | 1223 | unsigned short context_size = 0; |
| 1129 | 1224 | ||
| 1130 | if ((key_len == (AES_KEYSIZE_128 << 1)) || | 1225 | if ((key_len != (AES_KEYSIZE_128 << 1)) && |
| 1131 | (key_len == (AES_KEYSIZE_256 << 1))) { | 1226 | (key_len != (AES_KEYSIZE_256 << 1))) { |
| 1132 | memcpy(ablkctx->key, key, key_len); | ||
| 1133 | ablkctx->enckey_len = key_len; | ||
| 1134 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; | ||
| 1135 | ablkctx->key_ctx_hdr = | ||
| 1136 | FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? | ||
| 1137 | CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : | ||
| 1138 | CHCR_KEYCTX_CIPHER_KEY_SIZE_256, | ||
| 1139 | CHCR_KEYCTX_NO_KEY, 1, | ||
| 1140 | 0, context_size); | ||
| 1141 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; | ||
| 1142 | } else { | ||
| 1143 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, | 1227 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, |
| 1144 | CRYPTO_TFM_RES_BAD_KEY_LEN); | 1228 | CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 1145 | ablkctx->enckey_len = 0; | 1229 | ablkctx->enckey_len = 0; |
| 1146 | status = -EINVAL; | 1230 | return -EINVAL; |
| 1231 | |||
| 1147 | } | 1232 | } |
| 1148 | return status; | 1233 | |
| 1234 | memcpy(ablkctx->key, key, key_len); | ||
| 1235 | ablkctx->enckey_len = key_len; | ||
| 1236 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2); | ||
| 1237 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; | ||
| 1238 | ablkctx->key_ctx_hdr = | ||
| 1239 | FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? | ||
| 1240 | CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : | ||
| 1241 | CHCR_KEYCTX_CIPHER_KEY_SIZE_256, | ||
| 1242 | CHCR_KEYCTX_NO_KEY, 1, | ||
| 1243 | 0, context_size); | ||
| 1244 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; | ||
| 1245 | return 0; | ||
| 1149 | } | 1246 | } |
| 1150 | 1247 | ||
| 1151 | static int chcr_sha_init(struct ahash_request *areq) | 1248 | static int chcr_sha_init(struct ahash_request *areq) |
| @@ -1155,8 +1252,9 @@ static int chcr_sha_init(struct ahash_request *areq) | |||
| 1155 | int digestsize = crypto_ahash_digestsize(tfm); | 1252 | int digestsize = crypto_ahash_digestsize(tfm); |
| 1156 | 1253 | ||
| 1157 | req_ctx->data_len = 0; | 1254 | req_ctx->data_len = 0; |
| 1158 | req_ctx->dummy_payload_ptr = NULL; | 1255 | req_ctx->reqlen = 0; |
| 1159 | req_ctx->bfr_len = 0; | 1256 | req_ctx->reqbfr = req_ctx->bfr1; |
| 1257 | req_ctx->skbfr = req_ctx->bfr2; | ||
| 1160 | req_ctx->skb = NULL; | 1258 | req_ctx->skb = NULL; |
| 1161 | req_ctx->result = 0; | 1259 | req_ctx->result = 0; |
| 1162 | copy_hash_init_values(req_ctx->partial_hash, digestsize); | 1260 | copy_hash_init_values(req_ctx->partial_hash, digestsize); |
| @@ -1204,29 +1302,1184 @@ static int chcr_hmac_cra_init(struct crypto_tfm *tfm) | |||
| 1204 | 1302 | ||
| 1205 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 1303 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| 1206 | sizeof(struct chcr_ahash_req_ctx)); | 1304 | sizeof(struct chcr_ahash_req_ctx)); |
| 1207 | hmacctx->desc = chcr_alloc_shash(digestsize); | 1305 | hmacctx->base_hash = chcr_alloc_shash(digestsize); |
| 1208 | if (IS_ERR(hmacctx->desc)) | 1306 | if (IS_ERR(hmacctx->base_hash)) |
| 1209 | return PTR_ERR(hmacctx->desc); | 1307 | return PTR_ERR(hmacctx->base_hash); |
| 1210 | return chcr_device_init(crypto_tfm_ctx(tfm)); | 1308 | return chcr_device_init(crypto_tfm_ctx(tfm)); |
| 1211 | } | 1309 | } |
| 1212 | 1310 | ||
| 1213 | static void chcr_free_shash(struct shash_desc *desc) | ||
| 1214 | { | ||
| 1215 | crypto_free_shash(desc->tfm); | ||
| 1216 | kfree(desc); | ||
| 1217 | } | ||
| 1218 | |||
| 1219 | static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) | 1311 | static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) |
| 1220 | { | 1312 | { |
| 1221 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | 1313 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); |
| 1222 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | 1314 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); |
| 1223 | 1315 | ||
| 1224 | if (hmacctx->desc) { | 1316 | if (hmacctx->base_hash) { |
| 1225 | chcr_free_shash(hmacctx->desc); | 1317 | chcr_free_shash(hmacctx->base_hash); |
| 1226 | hmacctx->desc = NULL; | 1318 | hmacctx->base_hash = NULL; |
| 1319 | } | ||
| 1320 | } | ||
| 1321 | |||
| 1322 | static int chcr_copy_assoc(struct aead_request *req, | ||
| 1323 | struct chcr_aead_ctx *ctx) | ||
| 1324 | { | ||
| 1325 | SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); | ||
| 1326 | |||
| 1327 | skcipher_request_set_tfm(skreq, ctx->null); | ||
| 1328 | skcipher_request_set_callback(skreq, aead_request_flags(req), | ||
| 1329 | NULL, NULL); | ||
| 1330 | skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen, | ||
| 1331 | NULL); | ||
| 1332 | |||
| 1333 | return crypto_skcipher_encrypt(skreq); | ||
| 1334 | } | ||
| 1335 | |||
| 1336 | static unsigned char get_hmac(unsigned int authsize) | ||
| 1337 | { | ||
| 1338 | switch (authsize) { | ||
| 1339 | case ICV_8: | ||
| 1340 | return CHCR_SCMD_HMAC_CTRL_PL1; | ||
| 1341 | case ICV_10: | ||
| 1342 | return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; | ||
| 1343 | case ICV_12: | ||
| 1344 | return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | ||
| 1345 | } | ||
| 1346 | return CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | ||
| 1347 | } | ||
| 1348 | |||
| 1349 | |||
| 1350 | static struct sk_buff *create_authenc_wr(struct aead_request *req, | ||
| 1351 | unsigned short qid, | ||
| 1352 | int size, | ||
| 1353 | unsigned short op_type) | ||
| 1354 | { | ||
| 1355 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 1356 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | ||
| 1357 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
| 1358 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
| 1359 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); | ||
| 1360 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
| 1361 | struct sk_buff *skb = NULL; | ||
| 1362 | struct chcr_wr *chcr_req; | ||
| 1363 | struct cpl_rx_phys_dsgl *phys_cpl; | ||
| 1364 | struct phys_sge_parm sg_param; | ||
| 1365 | struct scatterlist *src, *dst; | ||
| 1366 | struct scatterlist src_sg[2], dst_sg[2]; | ||
| 1367 | unsigned int frags = 0, transhdr_len; | ||
| 1368 | unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; | ||
| 1369 | unsigned int kctx_len = 0; | ||
| 1370 | unsigned short stop_offset = 0; | ||
| 1371 | unsigned int assoclen = req->assoclen; | ||
| 1372 | unsigned int authsize = crypto_aead_authsize(tfm); | ||
| 1373 | int err = 0; | ||
| 1374 | int null = 0; | ||
| 1375 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
| 1376 | GFP_ATOMIC; | ||
| 1377 | |||
| 1378 | if (aeadctx->enckey_len == 0 || (req->cryptlen == 0)) | ||
| 1379 | goto err; | ||
| 1380 | |||
| 1381 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | ||
| 1382 | goto err; | ||
| 1383 | |||
| 1384 | if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) | ||
| 1385 | goto err; | ||
| 1386 | src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); | ||
| 1387 | dst = src; | ||
| 1388 | if (req->src != req->dst) { | ||
| 1389 | err = chcr_copy_assoc(req, aeadctx); | ||
| 1390 | if (err) | ||
| 1391 | return ERR_PTR(err); | ||
| 1392 | dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); | ||
| 1393 | } | ||
| 1394 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { | ||
| 1395 | null = 1; | ||
| 1396 | assoclen = 0; | ||
| 1397 | } | ||
| 1398 | reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + | ||
| 1399 | (op_type ? -authsize : authsize)); | ||
| 1400 | if (reqctx->dst_nents <= 0) { | ||
| 1401 | pr_err("AUTHENC:Invalid Destination sg entries\n"); | ||
| 1402 | goto err; | ||
| 1403 | } | ||
| 1404 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | ||
| 1405 | kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) | ||
| 1406 | - sizeof(chcr_req->key_ctx); | ||
| 1407 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | ||
| 1408 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | ||
| 1409 | if (!skb) | ||
| 1410 | goto err; | ||
| 1411 | |||
| 1412 | /* LLD is going to write the sge hdr. */ | ||
| 1413 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | ||
| 1414 | |||
| 1415 | /* Write WR */ | ||
| 1416 | chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len); | ||
| 1417 | memset(chcr_req, 0, transhdr_len); | ||
| 1418 | |||
| 1419 | stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; | ||
| 1420 | |||
| 1421 | /* | ||
| 1422 | * Input order is AAD,IV and Payload. where IV should be included as | ||
| 1423 | * the part of authdata. All other fields should be filled according | ||
| 1424 | * to the hardware spec | ||
| 1425 | */ | ||
| 1426 | chcr_req->sec_cpl.op_ivinsrtofst = | ||
| 1427 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, | ||
| 1428 | (ivsize ? (assoclen + 1) : 0)); | ||
| 1429 | chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen); | ||
| 1430 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | ||
| 1431 | assoclen ? 1 : 0, assoclen, | ||
| 1432 | assoclen + ivsize + 1, | ||
| 1433 | (stop_offset & 0x1F0) >> 4); | ||
| 1434 | chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( | ||
| 1435 | stop_offset & 0xF, | ||
| 1436 | null ? 0 : assoclen + ivsize + 1, | ||
| 1437 | stop_offset, stop_offset); | ||
| 1438 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, | ||
| 1439 | (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, | ||
| 1440 | CHCR_SCMD_CIPHER_MODE_AES_CBC, | ||
| 1441 | actx->auth_mode, aeadctx->hmac_ctrl, | ||
| 1442 | ivsize >> 1); | ||
| 1443 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, | ||
| 1444 | 0, 1, dst_size); | ||
| 1445 | |||
| 1446 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | ||
| 1447 | if (op_type == CHCR_ENCRYPT_OP) | ||
| 1448 | memcpy(chcr_req->key_ctx.key, aeadctx->key, | ||
| 1449 | aeadctx->enckey_len); | ||
| 1450 | else | ||
| 1451 | memcpy(chcr_req->key_ctx.key, actx->dec_rrkey, | ||
| 1452 | aeadctx->enckey_len); | ||
| 1453 | |||
| 1454 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << | ||
| 1455 | 4), actx->h_iopad, kctx_len - | ||
| 1456 | (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4)); | ||
| 1457 | |||
| 1458 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | ||
| 1459 | sg_param.nents = reqctx->dst_nents; | ||
| 1460 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | ||
| 1461 | sg_param.qid = qid; | ||
| 1462 | sg_param.align = 0; | ||
| 1463 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, | ||
| 1464 | &sg_param)) | ||
| 1465 | goto dstmap_fail; | ||
| 1466 | |||
| 1467 | skb_set_transport_header(skb, transhdr_len); | ||
| 1468 | |||
| 1469 | if (assoclen) { | ||
| 1470 | /* AAD buffer in */ | ||
| 1471 | write_sg_to_skb(skb, &frags, req->src, assoclen); | ||
| 1472 | |||
| 1473 | } | ||
| 1474 | write_buffer_to_skb(skb, &frags, req->iv, ivsize); | ||
| 1475 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | ||
| 1476 | create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, | ||
| 1477 | sizeof(struct cpl_rx_phys_dsgl) + dst_size); | ||
| 1478 | reqctx->skb = skb; | ||
| 1479 | skb_get(skb); | ||
| 1480 | |||
| 1481 | return skb; | ||
| 1482 | dstmap_fail: | ||
| 1483 | /* ivmap_fail: */ | ||
| 1484 | kfree_skb(skb); | ||
| 1485 | err: | ||
| 1486 | return ERR_PTR(-EINVAL); | ||
| 1487 | } | ||
| 1488 | |||
| 1489 | static void aes_gcm_empty_pld_pad(struct scatterlist *sg, | ||
| 1490 | unsigned short offset) | ||
| 1491 | { | ||
| 1492 | struct page *spage; | ||
| 1493 | unsigned char *addr; | ||
| 1494 | |||
| 1495 | spage = sg_page(sg); | ||
| 1496 | get_page(spage); /* so that it is not freed by NIC */ | ||
| 1497 | #ifdef KMAP_ATOMIC_ARGS | ||
| 1498 | addr = kmap_atomic(spage, KM_SOFTIRQ0); | ||
| 1499 | #else | ||
| 1500 | addr = kmap_atomic(spage); | ||
| 1501 | #endif | ||
| 1502 | memset(addr + sg->offset, 0, offset + 1); | ||
| 1503 | |||
| 1504 | kunmap_atomic(addr); | ||
| 1505 | } | ||
| 1506 | |||
| 1507 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) | ||
| 1508 | { | ||
| 1509 | __be32 data; | ||
| 1510 | |||
| 1511 | memset(block, 0, csize); | ||
| 1512 | block += csize; | ||
| 1513 | |||
| 1514 | if (csize >= 4) | ||
| 1515 | csize = 4; | ||
| 1516 | else if (msglen > (unsigned int)(1 << (8 * csize))) | ||
| 1517 | return -EOVERFLOW; | ||
| 1518 | |||
| 1519 | data = cpu_to_be32(msglen); | ||
| 1520 | memcpy(block - csize, (u8 *)&data + 4 - csize, csize); | ||
| 1521 | |||
| 1522 | return 0; | ||
| 1523 | } | ||
| 1524 | |||
| 1525 | static void generate_b0(struct aead_request *req, | ||
| 1526 | struct chcr_aead_ctx *aeadctx, | ||
| 1527 | unsigned short op_type) | ||
| 1528 | { | ||
| 1529 | unsigned int l, lp, m; | ||
| 1530 | int rc; | ||
| 1531 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 1532 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
| 1533 | u8 *b0 = reqctx->scratch_pad; | ||
| 1534 | |||
| 1535 | m = crypto_aead_authsize(aead); | ||
| 1536 | |||
| 1537 | memcpy(b0, reqctx->iv, 16); | ||
| 1538 | |||
| 1539 | lp = b0[0]; | ||
| 1540 | l = lp + 1; | ||
| 1541 | |||
| 1542 | /* set m, bits 3-5 */ | ||
| 1543 | *b0 |= (8 * ((m - 2) / 2)); | ||
| 1544 | |||
| 1545 | /* set adata, bit 6, if associated data is used */ | ||
| 1546 | if (req->assoclen) | ||
| 1547 | *b0 |= 64; | ||
| 1548 | rc = set_msg_len(b0 + 16 - l, | ||
| 1549 | (op_type == CHCR_DECRYPT_OP) ? | ||
| 1550 | req->cryptlen - m : req->cryptlen, l); | ||
| 1551 | } | ||
| 1552 | |||
| 1553 | static inline int crypto_ccm_check_iv(const u8 *iv) | ||
| 1554 | { | ||
| 1555 | /* 2 <= L <= 8, so 1 <= L' <= 7. */ | ||
| 1556 | if (iv[0] < 1 || iv[0] > 7) | ||
| 1557 | return -EINVAL; | ||
| 1558 | |||
| 1559 | return 0; | ||
| 1560 | } | ||
| 1561 | |||
| 1562 | static int ccm_format_packet(struct aead_request *req, | ||
| 1563 | struct chcr_aead_ctx *aeadctx, | ||
| 1564 | unsigned int sub_type, | ||
| 1565 | unsigned short op_type) | ||
| 1566 | { | ||
| 1567 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
| 1568 | int rc = 0; | ||
| 1569 | |||
| 1570 | if (req->assoclen > T5_MAX_AAD_SIZE) { | ||
| 1571 | pr_err("CCM: Unsupported AAD data. It should be < %d\n", | ||
| 1572 | T5_MAX_AAD_SIZE); | ||
| 1573 | return -EINVAL; | ||
| 1574 | } | ||
| 1575 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { | ||
| 1576 | reqctx->iv[0] = 3; | ||
| 1577 | memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); | ||
| 1578 | memcpy(reqctx->iv + 4, req->iv, 8); | ||
| 1579 | memset(reqctx->iv + 12, 0, 4); | ||
| 1580 | *((unsigned short *)(reqctx->scratch_pad + 16)) = | ||
| 1581 | htons(req->assoclen - 8); | ||
| 1582 | } else { | ||
| 1583 | memcpy(reqctx->iv, req->iv, 16); | ||
| 1584 | *((unsigned short *)(reqctx->scratch_pad + 16)) = | ||
| 1585 | htons(req->assoclen); | ||
| 1586 | } | ||
| 1587 | generate_b0(req, aeadctx, op_type); | ||
| 1588 | /* zero the ctr value */ | ||
| 1589 | memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1); | ||
| 1590 | return rc; | ||
| 1591 | } | ||
| 1592 | |||
| 1593 | static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, | ||
| 1594 | unsigned int dst_size, | ||
| 1595 | struct aead_request *req, | ||
| 1596 | unsigned short op_type, | ||
| 1597 | struct chcr_context *chcrctx) | ||
| 1598 | { | ||
| 1599 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 1600 | unsigned int ivsize = AES_BLOCK_SIZE; | ||
| 1601 | unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; | ||
| 1602 | unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; | ||
| 1603 | unsigned int c_id = chcrctx->dev->tx_channel_id; | ||
| 1604 | unsigned int ccm_xtra; | ||
| 1605 | unsigned char tag_offset = 0, auth_offset = 0; | ||
| 1606 | unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm)); | ||
| 1607 | unsigned int assoclen; | ||
| 1608 | |||
| 1609 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | ||
| 1610 | assoclen = req->assoclen - 8; | ||
| 1611 | else | ||
| 1612 | assoclen = req->assoclen; | ||
| 1613 | ccm_xtra = CCM_B0_SIZE + | ||
| 1614 | ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); | ||
| 1615 | |||
| 1616 | auth_offset = req->cryptlen ? | ||
| 1617 | (assoclen + ivsize + 1 + ccm_xtra) : 0; | ||
| 1618 | if (op_type == CHCR_DECRYPT_OP) { | ||
| 1619 | if (crypto_aead_authsize(tfm) != req->cryptlen) | ||
| 1620 | tag_offset = crypto_aead_authsize(tfm); | ||
| 1621 | else | ||
| 1622 | auth_offset = 0; | ||
| 1623 | } | ||
| 1624 | |||
| 1625 | |||
| 1626 | sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id, | ||
| 1627 | 2, (ivsize ? (assoclen + 1) : 0) + | ||
| 1628 | ccm_xtra); | ||
| 1629 | sec_cpl->pldlen = | ||
| 1630 | htonl(assoclen + ivsize + req->cryptlen + ccm_xtra); | ||
| 1631 | /* For CCM there wil be b0 always. So AAD start will be 1 always */ | ||
| 1632 | sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | ||
| 1633 | 1, assoclen + ccm_xtra, assoclen | ||
| 1634 | + ivsize + 1 + ccm_xtra, 0); | ||
| 1635 | |||
| 1636 | sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, | ||
| 1637 | auth_offset, tag_offset, | ||
| 1638 | (op_type == CHCR_ENCRYPT_OP) ? 0 : | ||
| 1639 | crypto_aead_authsize(tfm)); | ||
| 1640 | sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, | ||
| 1641 | (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, | ||
| 1642 | cipher_mode, mac_mode, hmac_ctrl, | ||
| 1643 | ivsize >> 1); | ||
| 1644 | |||
| 1645 | sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, | ||
| 1646 | 1, dst_size); | ||
| 1647 | } | ||
| 1648 | |||
| 1649 | int aead_ccm_validate_input(unsigned short op_type, | ||
| 1650 | struct aead_request *req, | ||
| 1651 | struct chcr_aead_ctx *aeadctx, | ||
| 1652 | unsigned int sub_type) | ||
| 1653 | { | ||
| 1654 | if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { | ||
| 1655 | if (crypto_ccm_check_iv(req->iv)) { | ||
| 1656 | pr_err("CCM: IV check fails\n"); | ||
| 1657 | return -EINVAL; | ||
| 1658 | } | ||
| 1659 | } else { | ||
| 1660 | if (req->assoclen != 16 && req->assoclen != 20) { | ||
| 1661 | pr_err("RFC4309: Invalid AAD length %d\n", | ||
| 1662 | req->assoclen); | ||
| 1663 | return -EINVAL; | ||
| 1664 | } | ||
| 1665 | } | ||
| 1666 | if (aeadctx->enckey_len == 0) { | ||
| 1667 | pr_err("CCM: Encryption key not set\n"); | ||
| 1668 | return -EINVAL; | ||
| 1669 | } | ||
| 1670 | return 0; | ||
| 1671 | } | ||
| 1672 | |||
| 1673 | unsigned int fill_aead_req_fields(struct sk_buff *skb, | ||
| 1674 | struct aead_request *req, | ||
| 1675 | struct scatterlist *src, | ||
| 1676 | unsigned int ivsize, | ||
| 1677 | struct chcr_aead_ctx *aeadctx) | ||
| 1678 | { | ||
| 1679 | unsigned int frags = 0; | ||
| 1680 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 1681 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
| 1682 | /* b0 and aad length(if available) */ | ||
| 1683 | |||
| 1684 | write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE + | ||
| 1685 | (req->assoclen ? CCM_AAD_FIELD_SIZE : 0)); | ||
| 1686 | if (req->assoclen) { | ||
| 1687 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | ||
| 1688 | write_sg_to_skb(skb, &frags, req->src, | ||
| 1689 | req->assoclen - 8); | ||
| 1690 | else | ||
| 1691 | write_sg_to_skb(skb, &frags, req->src, req->assoclen); | ||
| 1692 | } | ||
| 1693 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); | ||
| 1694 | if (req->cryptlen) | ||
| 1695 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | ||
| 1696 | |||
| 1697 | return frags; | ||
| 1698 | } | ||
| 1699 | |||
| 1700 | static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | ||
| 1701 | unsigned short qid, | ||
| 1702 | int size, | ||
| 1703 | unsigned short op_type) | ||
| 1704 | { | ||
| 1705 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 1706 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | ||
| 1707 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
| 1708 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
| 1709 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
| 1710 | struct sk_buff *skb = NULL; | ||
| 1711 | struct chcr_wr *chcr_req; | ||
| 1712 | struct cpl_rx_phys_dsgl *phys_cpl; | ||
| 1713 | struct phys_sge_parm sg_param; | ||
| 1714 | struct scatterlist *src, *dst; | ||
| 1715 | struct scatterlist src_sg[2], dst_sg[2]; | ||
| 1716 | unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; | ||
| 1717 | unsigned int dst_size = 0, kctx_len; | ||
| 1718 | unsigned int sub_type; | ||
| 1719 | unsigned int authsize = crypto_aead_authsize(tfm); | ||
| 1720 | int err = 0; | ||
| 1721 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
| 1722 | GFP_ATOMIC; | ||
| 1723 | |||
| 1724 | |||
| 1725 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | ||
| 1726 | goto err; | ||
| 1727 | |||
| 1728 | if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) | ||
| 1729 | goto err; | ||
| 1730 | sub_type = get_aead_subtype(tfm); | ||
| 1731 | src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); | ||
| 1732 | dst = src; | ||
| 1733 | if (req->src != req->dst) { | ||
| 1734 | err = chcr_copy_assoc(req, aeadctx); | ||
| 1735 | if (err) { | ||
| 1736 | pr_err("AAD copy to destination buffer fails\n"); | ||
| 1737 | return ERR_PTR(err); | ||
| 1738 | } | ||
| 1739 | dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); | ||
| 1740 | } | ||
| 1741 | reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + | ||
| 1742 | (op_type ? -authsize : authsize)); | ||
| 1743 | if (reqctx->dst_nents <= 0) { | ||
| 1744 | pr_err("CCM:Invalid Destination sg entries\n"); | ||
| 1745 | goto err; | ||
| 1746 | } | ||
| 1747 | |||
| 1748 | |||
| 1749 | if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type)) | ||
| 1750 | goto err; | ||
| 1751 | |||
| 1752 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | ||
| 1753 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; | ||
| 1754 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | ||
| 1755 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | ||
| 1756 | |||
| 1757 | if (!skb) | ||
| 1758 | goto err; | ||
| 1759 | |||
| 1760 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | ||
| 1761 | |||
| 1762 | chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len); | ||
| 1763 | memset(chcr_req, 0, transhdr_len); | ||
| 1764 | |||
| 1765 | fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx); | ||
| 1766 | |||
| 1767 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | ||
| 1768 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); | ||
| 1769 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * | ||
| 1770 | 16), aeadctx->key, aeadctx->enckey_len); | ||
| 1771 | |||
| 1772 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | ||
| 1773 | if (ccm_format_packet(req, aeadctx, sub_type, op_type)) | ||
| 1774 | goto dstmap_fail; | ||
| 1775 | |||
| 1776 | sg_param.nents = reqctx->dst_nents; | ||
| 1777 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | ||
| 1778 | sg_param.qid = qid; | ||
| 1779 | sg_param.align = 0; | ||
| 1780 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, | ||
| 1781 | &sg_param)) | ||
| 1782 | goto dstmap_fail; | ||
| 1783 | |||
| 1784 | skb_set_transport_header(skb, transhdr_len); | ||
| 1785 | frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx); | ||
| 1786 | create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1, | ||
| 1787 | sizeof(struct cpl_rx_phys_dsgl) + dst_size); | ||
| 1788 | reqctx->skb = skb; | ||
| 1789 | skb_get(skb); | ||
| 1790 | return skb; | ||
| 1791 | dstmap_fail: | ||
| 1792 | kfree_skb(skb); | ||
| 1793 | skb = NULL; | ||
| 1794 | err: | ||
| 1795 | return ERR_PTR(-EINVAL); | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | static struct sk_buff *create_gcm_wr(struct aead_request *req, | ||
| 1799 | unsigned short qid, | ||
| 1800 | int size, | ||
| 1801 | unsigned short op_type) | ||
| 1802 | { | ||
| 1803 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 1804 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | ||
| 1805 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
| 1806 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
| 1807 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
| 1808 | struct sk_buff *skb = NULL; | ||
| 1809 | struct chcr_wr *chcr_req; | ||
| 1810 | struct cpl_rx_phys_dsgl *phys_cpl; | ||
| 1811 | struct phys_sge_parm sg_param; | ||
| 1812 | struct scatterlist *src, *dst; | ||
| 1813 | struct scatterlist src_sg[2], dst_sg[2]; | ||
| 1814 | unsigned int frags = 0, transhdr_len; | ||
| 1815 | unsigned int ivsize = AES_BLOCK_SIZE; | ||
| 1816 | unsigned int dst_size = 0, kctx_len; | ||
| 1817 | unsigned char tag_offset = 0; | ||
| 1818 | unsigned int crypt_len = 0; | ||
| 1819 | unsigned int authsize = crypto_aead_authsize(tfm); | ||
| 1820 | unsigned char hmac_ctrl = get_hmac(authsize); | ||
| 1821 | int err = 0; | ||
| 1822 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | ||
| 1823 | GFP_ATOMIC; | ||
| 1824 | |||
| 1825 | /* validate key size */ | ||
| 1826 | if (aeadctx->enckey_len == 0) | ||
| 1827 | goto err; | ||
| 1828 | |||
| 1829 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | ||
| 1830 | goto err; | ||
| 1831 | |||
| 1832 | if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) | ||
| 1833 | goto err; | ||
| 1834 | |||
| 1835 | src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); | ||
| 1836 | dst = src; | ||
| 1837 | if (req->src != req->dst) { | ||
| 1838 | err = chcr_copy_assoc(req, aeadctx); | ||
| 1839 | if (err) | ||
| 1840 | return ERR_PTR(err); | ||
| 1841 | dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); | ||
| 1842 | } | ||
| 1843 | |||
| 1844 | if (!req->cryptlen) | ||
| 1845 | /* null-payload is not supported in the hardware. | ||
| 1846 | * software is sending block size | ||
| 1847 | */ | ||
| 1848 | crypt_len = AES_BLOCK_SIZE; | ||
| 1849 | else | ||
| 1850 | crypt_len = req->cryptlen; | ||
| 1851 | reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + | ||
| 1852 | (op_type ? -authsize : authsize)); | ||
| 1853 | if (reqctx->dst_nents <= 0) { | ||
| 1854 | pr_err("GCM:Invalid Destination sg entries\n"); | ||
| 1855 | goto err; | ||
| 1856 | } | ||
| 1857 | |||
| 1858 | |||
| 1859 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | ||
| 1860 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + | ||
| 1861 | AEAD_H_SIZE; | ||
| 1862 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | ||
| 1863 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | ||
| 1864 | if (!skb) | ||
| 1865 | goto err; | ||
| 1866 | |||
| 1867 | /* NIC driver is going to write the sge hdr. */ | ||
| 1868 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | ||
| 1869 | |||
| 1870 | chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len); | ||
| 1871 | memset(chcr_req, 0, transhdr_len); | ||
| 1872 | |||
| 1873 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) | ||
| 1874 | req->assoclen -= 8; | ||
| 1875 | |||
| 1876 | tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; | ||
| 1877 | chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( | ||
| 1878 | ctx->dev->tx_channel_id, 2, (ivsize ? | ||
| 1879 | (req->assoclen + 1) : 0)); | ||
| 1880 | chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len); | ||
| 1881 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | ||
| 1882 | req->assoclen ? 1 : 0, req->assoclen, | ||
| 1883 | req->assoclen + ivsize + 1, 0); | ||
| 1884 | if (req->cryptlen) { | ||
| 1885 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | ||
| 1886 | FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1, | ||
| 1887 | tag_offset, tag_offset); | ||
| 1888 | chcr_req->sec_cpl.seqno_numivs = | ||
| 1889 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == | ||
| 1890 | CHCR_ENCRYPT_OP) ? 1 : 0, | ||
| 1891 | CHCR_SCMD_CIPHER_MODE_AES_GCM, | ||
| 1892 | CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl, | ||
| 1893 | ivsize >> 1); | ||
| 1894 | } else { | ||
| 1895 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | ||
| 1896 | FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); | ||
| 1897 | chcr_req->sec_cpl.seqno_numivs = | ||
| 1898 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, | ||
| 1899 | (op_type == CHCR_ENCRYPT_OP) ? | ||
| 1900 | 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC, | ||
| 1901 | 0, 0, ivsize >> 1); | ||
| 1902 | } | ||
| 1903 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, | ||
| 1904 | 0, 1, dst_size); | ||
| 1905 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | ||
| 1906 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); | ||
| 1907 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * | ||
| 1908 | 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); | ||
| 1909 | |||
| 1910 | /* prepare a 16 byte iv */ | ||
| 1911 | /* S A L T | IV | 0x00000001 */ | ||
| 1912 | if (get_aead_subtype(tfm) == | ||
| 1913 | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { | ||
| 1914 | memcpy(reqctx->iv, aeadctx->salt, 4); | ||
| 1915 | memcpy(reqctx->iv + 4, req->iv, 8); | ||
| 1916 | } else { | ||
| 1917 | memcpy(reqctx->iv, req->iv, 12); | ||
| 1918 | } | ||
| 1919 | *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01); | ||
| 1920 | |||
| 1921 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | ||
| 1922 | sg_param.nents = reqctx->dst_nents; | ||
| 1923 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | ||
| 1924 | sg_param.qid = qid; | ||
| 1925 | sg_param.align = 0; | ||
| 1926 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, | ||
| 1927 | &sg_param)) | ||
| 1928 | goto dstmap_fail; | ||
| 1929 | |||
| 1930 | skb_set_transport_header(skb, transhdr_len); | ||
| 1931 | |||
| 1932 | write_sg_to_skb(skb, &frags, req->src, req->assoclen); | ||
| 1933 | |||
| 1934 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); | ||
| 1935 | |||
| 1936 | if (req->cryptlen) { | ||
| 1937 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | ||
| 1938 | } else { | ||
| 1939 | aes_gcm_empty_pld_pad(req->dst, authsize - 1); | ||
| 1940 | write_sg_to_skb(skb, &frags, dst, crypt_len); | ||
| 1941 | } | ||
| 1942 | |||
| 1943 | create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, | ||
| 1944 | sizeof(struct cpl_rx_phys_dsgl) + dst_size); | ||
| 1945 | reqctx->skb = skb; | ||
| 1946 | skb_get(skb); | ||
| 1947 | return skb; | ||
| 1948 | |||
| 1949 | dstmap_fail: | ||
| 1950 | /* ivmap_fail: */ | ||
| 1951 | kfree_skb(skb); | ||
| 1952 | skb = NULL; | ||
| 1953 | err: | ||
| 1954 | return skb; | ||
| 1955 | } | ||
| 1956 | |||
| 1957 | |||
| 1958 | |||
| 1959 | static int chcr_aead_cra_init(struct crypto_aead *tfm) | ||
| 1960 | { | ||
| 1961 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | ||
| 1962 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
| 1963 | |||
| 1964 | crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx)); | ||
| 1965 | aeadctx->null = crypto_get_default_null_skcipher(); | ||
| 1966 | if (IS_ERR(aeadctx->null)) | ||
| 1967 | return PTR_ERR(aeadctx->null); | ||
| 1968 | return chcr_device_init(ctx); | ||
| 1969 | } | ||
| 1970 | |||
| 1971 | static void chcr_aead_cra_exit(struct crypto_aead *tfm) | ||
| 1972 | { | ||
| 1973 | crypto_put_default_null_skcipher(); | ||
| 1974 | } | ||
| 1975 | |||
| 1976 | static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, | ||
| 1977 | unsigned int authsize) | ||
| 1978 | { | ||
| 1979 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | ||
| 1980 | |||
| 1981 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; | ||
| 1982 | aeadctx->mayverify = VERIFY_HW; | ||
| 1983 | return 0; | ||
| 1984 | } | ||
| 1985 | static int chcr_authenc_setauthsize(struct crypto_aead *tfm, | ||
| 1986 | unsigned int authsize) | ||
| 1987 | { | ||
| 1988 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | ||
| 1989 | u32 maxauth = crypto_aead_maxauthsize(tfm); | ||
| 1990 | |||
| 1991 | /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not | ||
| 1992 | * true for sha1. authsize == 12 condition should be before | ||
| 1993 | * authsize == (maxauth >> 1) | ||
| 1994 | */ | ||
| 1995 | if (authsize == ICV_4) { | ||
| 1996 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | ||
| 1997 | aeadctx->mayverify = VERIFY_HW; | ||
| 1998 | } else if (authsize == ICV_6) { | ||
| 1999 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; | ||
| 2000 | aeadctx->mayverify = VERIFY_HW; | ||
| 2001 | } else if (authsize == ICV_10) { | ||
| 2002 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; | ||
| 2003 | aeadctx->mayverify = VERIFY_HW; | ||
| 2004 | } else if (authsize == ICV_12) { | ||
| 2005 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | ||
| 2006 | aeadctx->mayverify = VERIFY_HW; | ||
| 2007 | } else if (authsize == ICV_14) { | ||
| 2008 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | ||
| 2009 | aeadctx->mayverify = VERIFY_HW; | ||
| 2010 | } else if (authsize == (maxauth >> 1)) { | ||
| 2011 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | ||
| 2012 | aeadctx->mayverify = VERIFY_HW; | ||
| 2013 | } else if (authsize == maxauth) { | ||
| 2014 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | ||
| 2015 | aeadctx->mayverify = VERIFY_HW; | ||
| 2016 | } else { | ||
| 2017 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | ||
| 2018 | aeadctx->mayverify = VERIFY_SW; | ||
| 2019 | } | ||
| 2020 | return 0; | ||
| 2021 | } | ||
| 2022 | |||
| 2023 | |||
| 2024 | static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | ||
| 2025 | { | ||
| 2026 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | ||
| 2027 | |||
| 2028 | switch (authsize) { | ||
| 2029 | case ICV_4: | ||
| 2030 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | ||
| 2031 | aeadctx->mayverify = VERIFY_HW; | ||
| 2032 | break; | ||
| 2033 | case ICV_8: | ||
| 2034 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | ||
| 2035 | aeadctx->mayverify = VERIFY_HW; | ||
| 2036 | break; | ||
| 2037 | case ICV_12: | ||
| 2038 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | ||
| 2039 | aeadctx->mayverify = VERIFY_HW; | ||
| 2040 | break; | ||
| 2041 | case ICV_14: | ||
| 2042 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | ||
| 2043 | aeadctx->mayverify = VERIFY_HW; | ||
| 2044 | break; | ||
| 2045 | case ICV_16: | ||
| 2046 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | ||
| 2047 | aeadctx->mayverify = VERIFY_HW; | ||
| 2048 | break; | ||
| 2049 | case ICV_13: | ||
| 2050 | case ICV_15: | ||
| 2051 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | ||
| 2052 | aeadctx->mayverify = VERIFY_SW; | ||
| 2053 | break; | ||
| 2054 | default: | ||
| 2055 | |||
| 2056 | crypto_tfm_set_flags((struct crypto_tfm *) tfm, | ||
| 2057 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 2058 | return -EINVAL; | ||
| 2059 | } | ||
| 2060 | return 0; | ||
| 2061 | } | ||
| 2062 | |||
| 2063 | static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, | ||
| 2064 | unsigned int authsize) | ||
| 2065 | { | ||
| 2066 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | ||
| 2067 | |||
| 2068 | switch (authsize) { | ||
| 2069 | case ICV_8: | ||
| 2070 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | ||
| 2071 | aeadctx->mayverify = VERIFY_HW; | ||
| 2072 | break; | ||
| 2073 | case ICV_12: | ||
| 2074 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | ||
| 2075 | aeadctx->mayverify = VERIFY_HW; | ||
| 2076 | break; | ||
| 2077 | case ICV_16: | ||
| 2078 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | ||
| 2079 | aeadctx->mayverify = VERIFY_HW; | ||
| 2080 | break; | ||
| 2081 | default: | ||
| 2082 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, | ||
| 2083 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 2084 | return -EINVAL; | ||
| 2085 | } | ||
| 2086 | return 0; | ||
| 2087 | } | ||
| 2088 | |||
| 2089 | static int chcr_ccm_setauthsize(struct crypto_aead *tfm, | ||
| 2090 | unsigned int authsize) | ||
| 2091 | { | ||
| 2092 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | ||
| 2093 | |||
| 2094 | switch (authsize) { | ||
| 2095 | case ICV_4: | ||
| 2096 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | ||
| 2097 | aeadctx->mayverify = VERIFY_HW; | ||
| 2098 | break; | ||
| 2099 | case ICV_6: | ||
| 2100 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; | ||
| 2101 | aeadctx->mayverify = VERIFY_HW; | ||
| 2102 | break; | ||
| 2103 | case ICV_8: | ||
| 2104 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | ||
| 2105 | aeadctx->mayverify = VERIFY_HW; | ||
| 2106 | break; | ||
| 2107 | case ICV_10: | ||
| 2108 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; | ||
| 2109 | aeadctx->mayverify = VERIFY_HW; | ||
| 2110 | break; | ||
| 2111 | case ICV_12: | ||
| 2112 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | ||
| 2113 | aeadctx->mayverify = VERIFY_HW; | ||
| 2114 | break; | ||
| 2115 | case ICV_14: | ||
| 2116 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | ||
| 2117 | aeadctx->mayverify = VERIFY_HW; | ||
| 2118 | break; | ||
| 2119 | case ICV_16: | ||
| 2120 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | ||
| 2121 | aeadctx->mayverify = VERIFY_HW; | ||
| 2122 | break; | ||
| 2123 | default: | ||
| 2124 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, | ||
| 2125 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 2126 | return -EINVAL; | ||
| 2127 | } | ||
| 2128 | return 0; | ||
| 2129 | } | ||
| 2130 | |||
| 2131 | static int chcr_aead_ccm_setkey(struct crypto_aead *aead, | ||
| 2132 | const u8 *key, | ||
| 2133 | unsigned int keylen) | ||
| 2134 | { | ||
| 2135 | struct chcr_context *ctx = crypto_aead_ctx(aead); | ||
| 2136 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
| 2137 | unsigned char ck_size, mk_size; | ||
| 2138 | int key_ctx_size = 0; | ||
| 2139 | |||
| 2140 | memcpy(aeadctx->key, key, keylen); | ||
| 2141 | aeadctx->enckey_len = keylen; | ||
| 2142 | key_ctx_size = sizeof(struct _key_ctx) + | ||
| 2143 | ((DIV_ROUND_UP(keylen, 16)) << 4) * 2; | ||
| 2144 | if (keylen == AES_KEYSIZE_128) { | ||
| 2145 | mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | ||
| 2146 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | ||
| 2147 | } else if (keylen == AES_KEYSIZE_192) { | ||
| 2148 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | ||
| 2149 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; | ||
| 2150 | } else if (keylen == AES_KEYSIZE_256) { | ||
| 2151 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | ||
| 2152 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; | ||
| 2153 | } else { | ||
| 2154 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | ||
| 2155 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 2156 | aeadctx->enckey_len = 0; | ||
| 2157 | return -EINVAL; | ||
| 2158 | } | ||
| 2159 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0, | ||
| 2160 | key_ctx_size >> 4); | ||
| 2161 | return 0; | ||
| 2162 | } | ||
| 2163 | |||
| 2164 | static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, | ||
| 2165 | unsigned int keylen) | ||
| 2166 | { | ||
| 2167 | struct chcr_context *ctx = crypto_aead_ctx(aead); | ||
| 2168 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
| 2169 | |||
| 2170 | if (keylen < 3) { | ||
| 2171 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | ||
| 2172 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 2173 | aeadctx->enckey_len = 0; | ||
| 2174 | return -EINVAL; | ||
| 2175 | } | ||
| 2176 | keylen -= 3; | ||
| 2177 | memcpy(aeadctx->salt, key + keylen, 3); | ||
| 2178 | return chcr_aead_ccm_setkey(aead, key, keylen); | ||
| 2179 | } | ||
| 2180 | |||
| 2181 | static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, | ||
| 2182 | unsigned int keylen) | ||
| 2183 | { | ||
| 2184 | struct chcr_context *ctx = crypto_aead_ctx(aead); | ||
| 2185 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
| 2186 | struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); | ||
| 2187 | struct blkcipher_desc h_desc; | ||
| 2188 | struct scatterlist src[1]; | ||
| 2189 | unsigned int ck_size; | ||
| 2190 | int ret = 0, key_ctx_size = 0; | ||
| 2191 | |||
| 2192 | if (get_aead_subtype(aead) == | ||
| 2193 | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { | ||
| 2194 | keylen -= 4; /* nonce/salt is present in the last 4 bytes */ | ||
| 2195 | memcpy(aeadctx->salt, key + keylen, 4); | ||
| 2196 | } | ||
| 2197 | if (keylen == AES_KEYSIZE_128) { | ||
| 2198 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | ||
| 2199 | } else if (keylen == AES_KEYSIZE_192) { | ||
| 2200 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | ||
| 2201 | } else if (keylen == AES_KEYSIZE_256) { | ||
| 2202 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | ||
| 2203 | } else { | ||
| 2204 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | ||
| 2205 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 2206 | aeadctx->enckey_len = 0; | ||
| 2207 | pr_err("GCM: Invalid key length %d", keylen); | ||
| 2208 | ret = -EINVAL; | ||
| 2209 | goto out; | ||
| 2210 | } | ||
| 2211 | |||
| 2212 | memcpy(aeadctx->key, key, keylen); | ||
| 2213 | aeadctx->enckey_len = keylen; | ||
| 2214 | key_ctx_size = sizeof(struct _key_ctx) + | ||
| 2215 | ((DIV_ROUND_UP(keylen, 16)) << 4) + | ||
| 2216 | AEAD_H_SIZE; | ||
| 2217 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, | ||
| 2218 | CHCR_KEYCTX_MAC_KEY_SIZE_128, | ||
| 2219 | 0, 0, | ||
| 2220 | key_ctx_size >> 4); | ||
| 2221 | /* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes | ||
| 2222 | * blkcipher It will go on key context | ||
| 2223 | */ | ||
| 2224 | h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0); | ||
| 2225 | if (IS_ERR(h_desc.tfm)) { | ||
| 2226 | aeadctx->enckey_len = 0; | ||
| 2227 | ret = -ENOMEM; | ||
| 2228 | goto out; | ||
| 2229 | } | ||
| 2230 | h_desc.flags = 0; | ||
| 2231 | ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen); | ||
| 2232 | if (ret) { | ||
| 2233 | aeadctx->enckey_len = 0; | ||
| 2234 | goto out1; | ||
| 2235 | } | ||
| 2236 | memset(gctx->ghash_h, 0, AEAD_H_SIZE); | ||
| 2237 | sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE); | ||
| 2238 | ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE); | ||
| 2239 | |||
| 2240 | out1: | ||
| 2241 | crypto_free_blkcipher(h_desc.tfm); | ||
| 2242 | out: | ||
| 2243 | return ret; | ||
| 2244 | } | ||
| 2245 | |||
| 2246 | static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | ||
| 2247 | unsigned int keylen) | ||
| 2248 | { | ||
| 2249 | struct chcr_context *ctx = crypto_aead_ctx(authenc); | ||
| 2250 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
| 2251 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); | ||
| 2252 | /* it contains auth and cipher key both*/ | ||
| 2253 | struct crypto_authenc_keys keys; | ||
| 2254 | unsigned int bs; | ||
| 2255 | unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize; | ||
| 2256 | int err = 0, i, key_ctx_len = 0; | ||
| 2257 | unsigned char ck_size = 0; | ||
| 2258 | unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 }; | ||
| 2259 | struct crypto_shash *base_hash = NULL; | ||
| 2260 | struct algo_param param; | ||
| 2261 | int align; | ||
| 2262 | u8 *o_ptr = NULL; | ||
| 2263 | |||
| 2264 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { | ||
| 2265 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 2266 | goto out; | ||
| 2267 | } | ||
| 2268 | |||
| 2269 | if (get_alg_config(¶m, max_authsize)) { | ||
| 2270 | pr_err("chcr : Unsupported digest size\n"); | ||
| 2271 | goto out; | ||
| 2272 | } | ||
| 2273 | if (keys.enckeylen == AES_KEYSIZE_128) { | ||
| 2274 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | ||
| 2275 | } else if (keys.enckeylen == AES_KEYSIZE_192) { | ||
| 2276 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | ||
| 2277 | } else if (keys.enckeylen == AES_KEYSIZE_256) { | ||
| 2278 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | ||
| 2279 | } else { | ||
| 2280 | pr_err("chcr : Unsupported cipher key\n"); | ||
| 2281 | goto out; | ||
| 2282 | } | ||
| 2283 | |||
| 2284 | /* Copy only encryption key. We use authkey to generate h(ipad) and | ||
| 2285 | * h(opad) so authkey is not needed again. authkeylen size have the | ||
| 2286 | * size of the hash digest size. | ||
| 2287 | */ | ||
| 2288 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); | ||
| 2289 | aeadctx->enckey_len = keys.enckeylen; | ||
| 2290 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, | ||
| 2291 | aeadctx->enckey_len << 3); | ||
| 2292 | |||
| 2293 | base_hash = chcr_alloc_shash(max_authsize); | ||
| 2294 | if (IS_ERR(base_hash)) { | ||
| 2295 | pr_err("chcr : Base driver cannot be loaded\n"); | ||
| 2296 | goto out; | ||
| 1227 | } | 2297 | } |
| 2298 | { | ||
| 2299 | SHASH_DESC_ON_STACK(shash, base_hash); | ||
| 2300 | shash->tfm = base_hash; | ||
| 2301 | shash->flags = crypto_shash_get_flags(base_hash); | ||
| 2302 | bs = crypto_shash_blocksize(base_hash); | ||
| 2303 | align = KEYCTX_ALIGN_PAD(max_authsize); | ||
| 2304 | o_ptr = actx->h_iopad + param.result_size + align; | ||
| 2305 | |||
| 2306 | if (keys.authkeylen > bs) { | ||
| 2307 | err = crypto_shash_digest(shash, keys.authkey, | ||
| 2308 | keys.authkeylen, | ||
| 2309 | o_ptr); | ||
| 2310 | if (err) { | ||
| 2311 | pr_err("chcr : Base driver cannot be loaded\n"); | ||
| 2312 | goto out; | ||
| 2313 | } | ||
| 2314 | keys.authkeylen = max_authsize; | ||
| 2315 | } else | ||
| 2316 | memcpy(o_ptr, keys.authkey, keys.authkeylen); | ||
| 2317 | |||
| 2318 | /* Compute the ipad-digest*/ | ||
| 2319 | memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); | ||
| 2320 | memcpy(pad, o_ptr, keys.authkeylen); | ||
| 2321 | for (i = 0; i < bs >> 2; i++) | ||
| 2322 | *((unsigned int *)pad + i) ^= IPAD_DATA; | ||
| 2323 | |||
| 2324 | if (chcr_compute_partial_hash(shash, pad, actx->h_iopad, | ||
| 2325 | max_authsize)) | ||
| 2326 | goto out; | ||
| 2327 | /* Compute the opad-digest */ | ||
| 2328 | memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); | ||
| 2329 | memcpy(pad, o_ptr, keys.authkeylen); | ||
| 2330 | for (i = 0; i < bs >> 2; i++) | ||
| 2331 | *((unsigned int *)pad + i) ^= OPAD_DATA; | ||
| 2332 | |||
| 2333 | if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize)) | ||
| 2334 | goto out; | ||
| 2335 | |||
| 2336 | /* convert the ipad and opad digest to network order */ | ||
| 2337 | chcr_change_order(actx->h_iopad, param.result_size); | ||
| 2338 | chcr_change_order(o_ptr, param.result_size); | ||
| 2339 | key_ctx_len = sizeof(struct _key_ctx) + | ||
| 2340 | ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) + | ||
| 2341 | (param.result_size + align) * 2; | ||
| 2342 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, | ||
| 2343 | 0, 1, key_ctx_len >> 4); | ||
| 2344 | actx->auth_mode = param.auth_mode; | ||
| 2345 | chcr_free_shash(base_hash); | ||
| 2346 | |||
| 2347 | return 0; | ||
| 2348 | } | ||
| 2349 | out: | ||
| 2350 | aeadctx->enckey_len = 0; | ||
| 2351 | if (base_hash) | ||
| 2352 | chcr_free_shash(base_hash); | ||
| 2353 | return -EINVAL; | ||
| 1228 | } | 2354 | } |
| 1229 | 2355 | ||
| 2356 | static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, | ||
| 2357 | const u8 *key, unsigned int keylen) | ||
| 2358 | { | ||
| 2359 | struct chcr_context *ctx = crypto_aead_ctx(authenc); | ||
| 2360 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | ||
| 2361 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); | ||
| 2362 | struct crypto_authenc_keys keys; | ||
| 2363 | |||
| 2364 | /* it contains auth and cipher key both*/ | ||
| 2365 | int key_ctx_len = 0; | ||
| 2366 | unsigned char ck_size = 0; | ||
| 2367 | |||
| 2368 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { | ||
| 2369 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 2370 | goto out; | ||
| 2371 | } | ||
| 2372 | if (keys.enckeylen == AES_KEYSIZE_128) { | ||
| 2373 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | ||
| 2374 | } else if (keys.enckeylen == AES_KEYSIZE_192) { | ||
| 2375 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | ||
| 2376 | } else if (keys.enckeylen == AES_KEYSIZE_256) { | ||
| 2377 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | ||
| 2378 | } else { | ||
| 2379 | pr_err("chcr : Unsupported cipher key\n"); | ||
| 2380 | goto out; | ||
| 2381 | } | ||
| 2382 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); | ||
| 2383 | aeadctx->enckey_len = keys.enckeylen; | ||
| 2384 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, | ||
| 2385 | aeadctx->enckey_len << 3); | ||
| 2386 | key_ctx_len = sizeof(struct _key_ctx) | ||
| 2387 | + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4); | ||
| 2388 | |||
| 2389 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, | ||
| 2390 | 0, key_ctx_len >> 4); | ||
| 2391 | actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP; | ||
| 2392 | return 0; | ||
| 2393 | out: | ||
| 2394 | aeadctx->enckey_len = 0; | ||
| 2395 | return -EINVAL; | ||
| 2396 | } | ||
| 2397 | static int chcr_aead_encrypt(struct aead_request *req) | ||
| 2398 | { | ||
| 2399 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 2400 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
| 2401 | |||
| 2402 | reqctx->verify = VERIFY_HW; | ||
| 2403 | |||
| 2404 | switch (get_aead_subtype(tfm)) { | ||
| 2405 | case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC: | ||
| 2406 | case CRYPTO_ALG_SUB_TYPE_AEAD_NULL: | ||
| 2407 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | ||
| 2408 | create_authenc_wr); | ||
| 2409 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: | ||
| 2410 | case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: | ||
| 2411 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | ||
| 2412 | create_aead_ccm_wr); | ||
| 2413 | default: | ||
| 2414 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | ||
| 2415 | create_gcm_wr); | ||
| 2416 | } | ||
| 2417 | } | ||
| 2418 | |||
| 2419 | static int chcr_aead_decrypt(struct aead_request *req) | ||
| 2420 | { | ||
| 2421 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 2422 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); | ||
| 2423 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
| 2424 | int size; | ||
| 2425 | |||
| 2426 | if (aeadctx->mayverify == VERIFY_SW) { | ||
| 2427 | size = crypto_aead_maxauthsize(tfm); | ||
| 2428 | reqctx->verify = VERIFY_SW; | ||
| 2429 | } else { | ||
| 2430 | size = 0; | ||
| 2431 | reqctx->verify = VERIFY_HW; | ||
| 2432 | } | ||
| 2433 | |||
| 2434 | switch (get_aead_subtype(tfm)) { | ||
| 2435 | case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC: | ||
| 2436 | case CRYPTO_ALG_SUB_TYPE_AEAD_NULL: | ||
| 2437 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | ||
| 2438 | create_authenc_wr); | ||
| 2439 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: | ||
| 2440 | case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: | ||
| 2441 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | ||
| 2442 | create_aead_ccm_wr); | ||
| 2443 | default: | ||
| 2444 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | ||
| 2445 | create_gcm_wr); | ||
| 2446 | } | ||
| 2447 | } | ||
| 2448 | |||
| 2449 | static int chcr_aead_op(struct aead_request *req, | ||
| 2450 | unsigned short op_type, | ||
| 2451 | int size, | ||
| 2452 | create_wr_t create_wr_fn) | ||
| 2453 | { | ||
| 2454 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
| 2455 | struct chcr_context *ctx = crypto_aead_ctx(tfm); | ||
| 2456 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
| 2457 | struct sk_buff *skb; | ||
| 2458 | |||
| 2459 | if (ctx && !ctx->dev) { | ||
| 2460 | pr_err("chcr : %s : No crypto device.\n", __func__); | ||
| 2461 | return -ENXIO; | ||
| 2462 | } | ||
| 2463 | if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | ||
| 2464 | ctx->tx_channel_id)) { | ||
| 2465 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
| 2466 | return -EBUSY; | ||
| 2467 | } | ||
| 2468 | |||
| 2469 | /* Form a WR from req */ | ||
| 2470 | skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size, | ||
| 2471 | op_type); | ||
| 2472 | |||
| 2473 | if (IS_ERR(skb) || skb == NULL) { | ||
| 2474 | pr_err("chcr : %s : failed to form WR. No memory\n", __func__); | ||
| 2475 | return PTR_ERR(skb); | ||
| 2476 | } | ||
| 2477 | |||
| 2478 | skb->dev = u_ctx->lldi.ports[0]; | ||
| 2479 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); | ||
| 2480 | chcr_send_wr(skb); | ||
| 2481 | return -EINPROGRESS; | ||
| 2482 | } | ||
| 1230 | static struct chcr_alg_template driver_algs[] = { | 2483 | static struct chcr_alg_template driver_algs[] = { |
| 1231 | /* AES-CBC */ | 2484 | /* AES-CBC */ |
| 1232 | { | 2485 | { |
| @@ -1234,7 +2487,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
| 1234 | .is_registered = 0, | 2487 | .is_registered = 0, |
| 1235 | .alg.crypto = { | 2488 | .alg.crypto = { |
| 1236 | .cra_name = "cbc(aes)", | 2489 | .cra_name = "cbc(aes)", |
| 1237 | .cra_driver_name = "cbc(aes-chcr)", | 2490 | .cra_driver_name = "cbc-aes-chcr", |
| 1238 | .cra_priority = CHCR_CRA_PRIORITY, | 2491 | .cra_priority = CHCR_CRA_PRIORITY, |
| 1239 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 2492 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
| 1240 | CRYPTO_ALG_ASYNC, | 2493 | CRYPTO_ALG_ASYNC, |
| @@ -1261,7 +2514,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
| 1261 | .is_registered = 0, | 2514 | .is_registered = 0, |
| 1262 | .alg.crypto = { | 2515 | .alg.crypto = { |
| 1263 | .cra_name = "xts(aes)", | 2516 | .cra_name = "xts(aes)", |
| 1264 | .cra_driver_name = "xts(aes-chcr)", | 2517 | .cra_driver_name = "xts-aes-chcr", |
| 1265 | .cra_priority = CHCR_CRA_PRIORITY, | 2518 | .cra_priority = CHCR_CRA_PRIORITY, |
| 1266 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | 2519 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
| 1267 | CRYPTO_ALG_ASYNC, | 2520 | CRYPTO_ALG_ASYNC, |
| @@ -1354,7 +2607,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
| 1354 | .halg.digestsize = SHA1_DIGEST_SIZE, | 2607 | .halg.digestsize = SHA1_DIGEST_SIZE, |
| 1355 | .halg.base = { | 2608 | .halg.base = { |
| 1356 | .cra_name = "hmac(sha1)", | 2609 | .cra_name = "hmac(sha1)", |
| 1357 | .cra_driver_name = "hmac(sha1-chcr)", | 2610 | .cra_driver_name = "hmac-sha1-chcr", |
| 1358 | .cra_blocksize = SHA1_BLOCK_SIZE, | 2611 | .cra_blocksize = SHA1_BLOCK_SIZE, |
| 1359 | } | 2612 | } |
| 1360 | } | 2613 | } |
| @@ -1366,7 +2619,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
| 1366 | .halg.digestsize = SHA224_DIGEST_SIZE, | 2619 | .halg.digestsize = SHA224_DIGEST_SIZE, |
| 1367 | .halg.base = { | 2620 | .halg.base = { |
| 1368 | .cra_name = "hmac(sha224)", | 2621 | .cra_name = "hmac(sha224)", |
| 1369 | .cra_driver_name = "hmac(sha224-chcr)", | 2622 | .cra_driver_name = "hmac-sha224-chcr", |
| 1370 | .cra_blocksize = SHA224_BLOCK_SIZE, | 2623 | .cra_blocksize = SHA224_BLOCK_SIZE, |
| 1371 | } | 2624 | } |
| 1372 | } | 2625 | } |
| @@ -1378,7 +2631,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
| 1378 | .halg.digestsize = SHA256_DIGEST_SIZE, | 2631 | .halg.digestsize = SHA256_DIGEST_SIZE, |
| 1379 | .halg.base = { | 2632 | .halg.base = { |
| 1380 | .cra_name = "hmac(sha256)", | 2633 | .cra_name = "hmac(sha256)", |
| 1381 | .cra_driver_name = "hmac(sha256-chcr)", | 2634 | .cra_driver_name = "hmac-sha256-chcr", |
| 1382 | .cra_blocksize = SHA256_BLOCK_SIZE, | 2635 | .cra_blocksize = SHA256_BLOCK_SIZE, |
| 1383 | } | 2636 | } |
| 1384 | } | 2637 | } |
| @@ -1390,7 +2643,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
| 1390 | .halg.digestsize = SHA384_DIGEST_SIZE, | 2643 | .halg.digestsize = SHA384_DIGEST_SIZE, |
| 1391 | .halg.base = { | 2644 | .halg.base = { |
| 1392 | .cra_name = "hmac(sha384)", | 2645 | .cra_name = "hmac(sha384)", |
| 1393 | .cra_driver_name = "hmac(sha384-chcr)", | 2646 | .cra_driver_name = "hmac-sha384-chcr", |
| 1394 | .cra_blocksize = SHA384_BLOCK_SIZE, | 2647 | .cra_blocksize = SHA384_BLOCK_SIZE, |
| 1395 | } | 2648 | } |
| 1396 | } | 2649 | } |
| @@ -1402,11 +2655,205 @@ static struct chcr_alg_template driver_algs[] = { | |||
| 1402 | .halg.digestsize = SHA512_DIGEST_SIZE, | 2655 | .halg.digestsize = SHA512_DIGEST_SIZE, |
| 1403 | .halg.base = { | 2656 | .halg.base = { |
| 1404 | .cra_name = "hmac(sha512)", | 2657 | .cra_name = "hmac(sha512)", |
| 1405 | .cra_driver_name = "hmac(sha512-chcr)", | 2658 | .cra_driver_name = "hmac-sha512-chcr", |
| 1406 | .cra_blocksize = SHA512_BLOCK_SIZE, | 2659 | .cra_blocksize = SHA512_BLOCK_SIZE, |
| 1407 | } | 2660 | } |
| 1408 | } | 2661 | } |
| 1409 | }, | 2662 | }, |
| 2663 | /* Add AEAD Algorithms */ | ||
| 2664 | { | ||
| 2665 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM, | ||
| 2666 | .is_registered = 0, | ||
| 2667 | .alg.aead = { | ||
| 2668 | .base = { | ||
| 2669 | .cra_name = "gcm(aes)", | ||
| 2670 | .cra_driver_name = "gcm-aes-chcr", | ||
| 2671 | .cra_blocksize = 1, | ||
| 2672 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
| 2673 | sizeof(struct chcr_aead_ctx) + | ||
| 2674 | sizeof(struct chcr_gcm_ctx), | ||
| 2675 | }, | ||
| 2676 | .ivsize = 12, | ||
| 2677 | .maxauthsize = GHASH_DIGEST_SIZE, | ||
| 2678 | .setkey = chcr_gcm_setkey, | ||
| 2679 | .setauthsize = chcr_gcm_setauthsize, | ||
| 2680 | } | ||
| 2681 | }, | ||
| 2682 | { | ||
| 2683 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106, | ||
| 2684 | .is_registered = 0, | ||
| 2685 | .alg.aead = { | ||
| 2686 | .base = { | ||
| 2687 | .cra_name = "rfc4106(gcm(aes))", | ||
| 2688 | .cra_driver_name = "rfc4106-gcm-aes-chcr", | ||
| 2689 | .cra_blocksize = 1, | ||
| 2690 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
| 2691 | sizeof(struct chcr_aead_ctx) + | ||
| 2692 | sizeof(struct chcr_gcm_ctx), | ||
| 2693 | |||
| 2694 | }, | ||
| 2695 | .ivsize = 8, | ||
| 2696 | .maxauthsize = GHASH_DIGEST_SIZE, | ||
| 2697 | .setkey = chcr_gcm_setkey, | ||
| 2698 | .setauthsize = chcr_4106_4309_setauthsize, | ||
| 2699 | } | ||
| 2700 | }, | ||
| 2701 | { | ||
| 2702 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM, | ||
| 2703 | .is_registered = 0, | ||
| 2704 | .alg.aead = { | ||
| 2705 | .base = { | ||
| 2706 | .cra_name = "ccm(aes)", | ||
| 2707 | .cra_driver_name = "ccm-aes-chcr", | ||
| 2708 | .cra_blocksize = 1, | ||
| 2709 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
| 2710 | sizeof(struct chcr_aead_ctx), | ||
| 2711 | |||
| 2712 | }, | ||
| 2713 | .ivsize = AES_BLOCK_SIZE, | ||
| 2714 | .maxauthsize = GHASH_DIGEST_SIZE, | ||
| 2715 | .setkey = chcr_aead_ccm_setkey, | ||
| 2716 | .setauthsize = chcr_ccm_setauthsize, | ||
| 2717 | } | ||
| 2718 | }, | ||
| 2719 | { | ||
| 2720 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309, | ||
| 2721 | .is_registered = 0, | ||
| 2722 | .alg.aead = { | ||
| 2723 | .base = { | ||
| 2724 | .cra_name = "rfc4309(ccm(aes))", | ||
| 2725 | .cra_driver_name = "rfc4309-ccm-aes-chcr", | ||
| 2726 | .cra_blocksize = 1, | ||
| 2727 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
| 2728 | sizeof(struct chcr_aead_ctx), | ||
| 2729 | |||
| 2730 | }, | ||
| 2731 | .ivsize = 8, | ||
| 2732 | .maxauthsize = GHASH_DIGEST_SIZE, | ||
| 2733 | .setkey = chcr_aead_rfc4309_setkey, | ||
| 2734 | .setauthsize = chcr_4106_4309_setauthsize, | ||
| 2735 | } | ||
| 2736 | }, | ||
| 2737 | { | ||
| 2738 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | ||
| 2739 | .is_registered = 0, | ||
| 2740 | .alg.aead = { | ||
| 2741 | .base = { | ||
| 2742 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | ||
| 2743 | .cra_driver_name = | ||
| 2744 | "authenc-hmac-sha1-cbc-aes-chcr", | ||
| 2745 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 2746 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
| 2747 | sizeof(struct chcr_aead_ctx) + | ||
| 2748 | sizeof(struct chcr_authenc_ctx), | ||
| 2749 | |||
| 2750 | }, | ||
| 2751 | .ivsize = AES_BLOCK_SIZE, | ||
| 2752 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 2753 | .setkey = chcr_authenc_setkey, | ||
| 2754 | .setauthsize = chcr_authenc_setauthsize, | ||
| 2755 | } | ||
| 2756 | }, | ||
| 2757 | { | ||
| 2758 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | ||
| 2759 | .is_registered = 0, | ||
| 2760 | .alg.aead = { | ||
| 2761 | .base = { | ||
| 2762 | |||
| 2763 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | ||
| 2764 | .cra_driver_name = | ||
| 2765 | "authenc-hmac-sha256-cbc-aes-chcr", | ||
| 2766 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 2767 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
| 2768 | sizeof(struct chcr_aead_ctx) + | ||
| 2769 | sizeof(struct chcr_authenc_ctx), | ||
| 2770 | |||
| 2771 | }, | ||
| 2772 | .ivsize = AES_BLOCK_SIZE, | ||
| 2773 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 2774 | .setkey = chcr_authenc_setkey, | ||
| 2775 | .setauthsize = chcr_authenc_setauthsize, | ||
| 2776 | } | ||
| 2777 | }, | ||
| 2778 | { | ||
| 2779 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | ||
| 2780 | .is_registered = 0, | ||
| 2781 | .alg.aead = { | ||
| 2782 | .base = { | ||
| 2783 | .cra_name = "authenc(hmac(sha224),cbc(aes))", | ||
| 2784 | .cra_driver_name = | ||
| 2785 | "authenc-hmac-sha224-cbc-aes-chcr", | ||
| 2786 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 2787 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
| 2788 | sizeof(struct chcr_aead_ctx) + | ||
| 2789 | sizeof(struct chcr_authenc_ctx), | ||
| 2790 | }, | ||
| 2791 | .ivsize = AES_BLOCK_SIZE, | ||
| 2792 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
| 2793 | .setkey = chcr_authenc_setkey, | ||
| 2794 | .setauthsize = chcr_authenc_setauthsize, | ||
| 2795 | } | ||
| 2796 | }, | ||
| 2797 | { | ||
| 2798 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | ||
| 2799 | .is_registered = 0, | ||
| 2800 | .alg.aead = { | ||
| 2801 | .base = { | ||
| 2802 | .cra_name = "authenc(hmac(sha384),cbc(aes))", | ||
| 2803 | .cra_driver_name = | ||
| 2804 | "authenc-hmac-sha384-cbc-aes-chcr", | ||
| 2805 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 2806 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
| 2807 | sizeof(struct chcr_aead_ctx) + | ||
| 2808 | sizeof(struct chcr_authenc_ctx), | ||
| 2809 | |||
| 2810 | }, | ||
| 2811 | .ivsize = AES_BLOCK_SIZE, | ||
| 2812 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
| 2813 | .setkey = chcr_authenc_setkey, | ||
| 2814 | .setauthsize = chcr_authenc_setauthsize, | ||
| 2815 | } | ||
| 2816 | }, | ||
| 2817 | { | ||
| 2818 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | ||
| 2819 | .is_registered = 0, | ||
| 2820 | .alg.aead = { | ||
| 2821 | .base = { | ||
| 2822 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | ||
| 2823 | .cra_driver_name = | ||
| 2824 | "authenc-hmac-sha512-cbc-aes-chcr", | ||
| 2825 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 2826 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
| 2827 | sizeof(struct chcr_aead_ctx) + | ||
| 2828 | sizeof(struct chcr_authenc_ctx), | ||
| 2829 | |||
| 2830 | }, | ||
| 2831 | .ivsize = AES_BLOCK_SIZE, | ||
| 2832 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
| 2833 | .setkey = chcr_authenc_setkey, | ||
| 2834 | .setauthsize = chcr_authenc_setauthsize, | ||
| 2835 | } | ||
| 2836 | }, | ||
| 2837 | { | ||
| 2838 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL, | ||
| 2839 | .is_registered = 0, | ||
| 2840 | .alg.aead = { | ||
| 2841 | .base = { | ||
| 2842 | .cra_name = "authenc(digest_null,cbc(aes))", | ||
| 2843 | .cra_driver_name = | ||
| 2844 | "authenc-digest_null-cbc-aes-chcr", | ||
| 2845 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 2846 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
| 2847 | sizeof(struct chcr_aead_ctx) + | ||
| 2848 | sizeof(struct chcr_authenc_ctx), | ||
| 2849 | |||
| 2850 | }, | ||
| 2851 | .ivsize = AES_BLOCK_SIZE, | ||
| 2852 | .maxauthsize = 0, | ||
| 2853 | .setkey = chcr_aead_digest_null_setkey, | ||
| 2854 | .setauthsize = chcr_authenc_null_setauthsize, | ||
| 2855 | } | ||
| 2856 | }, | ||
| 1410 | }; | 2857 | }; |
| 1411 | 2858 | ||
| 1412 | /* | 2859 | /* |
| @@ -1424,6 +2871,11 @@ static int chcr_unregister_alg(void) | |||
| 1424 | crypto_unregister_alg( | 2871 | crypto_unregister_alg( |
| 1425 | &driver_algs[i].alg.crypto); | 2872 | &driver_algs[i].alg.crypto); |
| 1426 | break; | 2873 | break; |
| 2874 | case CRYPTO_ALG_TYPE_AEAD: | ||
| 2875 | if (driver_algs[i].is_registered) | ||
| 2876 | crypto_unregister_aead( | ||
| 2877 | &driver_algs[i].alg.aead); | ||
| 2878 | break; | ||
| 1427 | case CRYPTO_ALG_TYPE_AHASH: | 2879 | case CRYPTO_ALG_TYPE_AHASH: |
| 1428 | if (driver_algs[i].is_registered) | 2880 | if (driver_algs[i].is_registered) |
| 1429 | crypto_unregister_ahash( | 2881 | crypto_unregister_ahash( |
| @@ -1458,6 +2910,19 @@ static int chcr_register_alg(void) | |||
| 1458 | err = crypto_register_alg(&driver_algs[i].alg.crypto); | 2910 | err = crypto_register_alg(&driver_algs[i].alg.crypto); |
| 1459 | name = driver_algs[i].alg.crypto.cra_driver_name; | 2911 | name = driver_algs[i].alg.crypto.cra_driver_name; |
| 1460 | break; | 2912 | break; |
| 2913 | case CRYPTO_ALG_TYPE_AEAD: | ||
| 2914 | driver_algs[i].alg.aead.base.cra_priority = | ||
| 2915 | CHCR_CRA_PRIORITY; | ||
| 2916 | driver_algs[i].alg.aead.base.cra_flags = | ||
| 2917 | CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | ||
| 2918 | driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; | ||
| 2919 | driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; | ||
| 2920 | driver_algs[i].alg.aead.init = chcr_aead_cra_init; | ||
| 2921 | driver_algs[i].alg.aead.exit = chcr_aead_cra_exit; | ||
| 2922 | driver_algs[i].alg.aead.base.cra_module = THIS_MODULE; | ||
| 2923 | err = crypto_register_aead(&driver_algs[i].alg.aead); | ||
| 2924 | name = driver_algs[i].alg.aead.base.cra_driver_name; | ||
| 2925 | break; | ||
| 1461 | case CRYPTO_ALG_TYPE_AHASH: | 2926 | case CRYPTO_ALG_TYPE_AHASH: |
| 1462 | a_hash = &driver_algs[i].alg.hash; | 2927 | a_hash = &driver_algs[i].alg.hash; |
| 1463 | a_hash->update = chcr_ahash_update; | 2928 | a_hash->update = chcr_ahash_update; |
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index 199b0bb69b89..3c7c51f7bedf 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h | |||
| @@ -108,30 +108,24 @@ | |||
| 108 | #define IPAD_DATA 0x36363636 | 108 | #define IPAD_DATA 0x36363636 |
| 109 | #define OPAD_DATA 0x5c5c5c5c | 109 | #define OPAD_DATA 0x5c5c5c5c |
| 110 | 110 | ||
| 111 | #define TRANSHDR_SIZE(alignedkctx_len)\ | 111 | #define TRANSHDR_SIZE(kctx_len)\ |
| 112 | (sizeof(struct ulptx_idata) +\ | 112 | (sizeof(struct chcr_wr) +\ |
| 113 | sizeof(struct ulp_txpkt) +\ | 113 | kctx_len) |
| 114 | sizeof(struct fw_crypto_lookaside_wr) +\ | 114 | #define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \ |
| 115 | sizeof(struct cpl_tx_sec_pdu) +\ | 115 | (TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\ |
| 116 | (alignedkctx_len)) | ||
| 117 | #define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \ | ||
| 118 | (TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\ | ||
| 119 | sizeof(struct cpl_rx_phys_dsgl)) | 116 | sizeof(struct cpl_rx_phys_dsgl)) |
| 120 | #define HASH_TRANSHDR_SIZE(alignedkctx_len)\ | 117 | #define HASH_TRANSHDR_SIZE(kctx_len)\ |
| 121 | (TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES) | 118 | (TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES) |
| 122 | 119 | ||
| 123 | #define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \ | ||
| 124 | sizeof(struct ulp_txpkt) + \ | ||
| 125 | sizeof(struct ulptx_idata)) | ||
| 126 | 120 | ||
| 127 | #define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \ | 121 | #define FILL_SEC_CPL_OP_IVINSR(id, len, ofst) \ |
| 128 | htonl( \ | 122 | htonl( \ |
| 129 | CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \ | 123 | CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \ |
| 130 | CPL_TX_SEC_PDU_RXCHID_V((id)) | \ | 124 | CPL_TX_SEC_PDU_RXCHID_V((id)) | \ |
| 131 | CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \ | 125 | CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \ |
| 132 | CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \ | 126 | CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \ |
| 133 | CPL_TX_SEC_PDU_CPLLEN_V((len)) | \ | 127 | CPL_TX_SEC_PDU_CPLLEN_V((len)) | \ |
| 134 | CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \ | 128 | CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \ |
| 135 | CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst))) | 129 | CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst))) |
| 136 | 130 | ||
| 137 | #define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \ | 131 | #define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \ |
| @@ -148,7 +142,7 @@ | |||
| 148 | CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \ | 142 | CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \ |
| 149 | CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst))) | 143 | CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst))) |
| 150 | 144 | ||
| 151 | #define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \ | 145 | #define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size) \ |
| 152 | htonl( \ | 146 | htonl( \ |
| 153 | SCMD_SEQ_NO_CTRL_V(0) | \ | 147 | SCMD_SEQ_NO_CTRL_V(0) | \ |
| 154 | SCMD_STATUS_PRESENT_V(0) | \ | 148 | SCMD_STATUS_PRESENT_V(0) | \ |
| @@ -159,7 +153,7 @@ | |||
| 159 | SCMD_AUTH_MODE_V((amode)) | \ | 153 | SCMD_AUTH_MODE_V((amode)) | \ |
| 160 | SCMD_HMAC_CTRL_V((opad)) | \ | 154 | SCMD_HMAC_CTRL_V((opad)) | \ |
| 161 | SCMD_IV_SIZE_V((size)) | \ | 155 | SCMD_IV_SIZE_V((size)) | \ |
| 162 | SCMD_NUM_IVS_V((nivs))) | 156 | SCMD_NUM_IVS_V(0)) |
| 163 | 157 | ||
| 164 | #define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \ | 158 | #define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \ |
| 165 | SCMD_ENB_DBGID_V(0) | \ | 159 | SCMD_ENB_DBGID_V(0) | \ |
| @@ -264,13 +258,15 @@ enum { | |||
| 264 | * where they indicate the size of the integrity check value (ICV) | 258 | * where they indicate the size of the integrity check value (ICV) |
| 265 | */ | 259 | */ |
| 266 | enum { | 260 | enum { |
| 267 | AES_CCM_ICV_4 = 4, | 261 | ICV_4 = 4, |
| 268 | AES_CCM_ICV_6 = 6, | 262 | ICV_6 = 6, |
| 269 | AES_CCM_ICV_8 = 8, | 263 | ICV_8 = 8, |
| 270 | AES_CCM_ICV_10 = 10, | 264 | ICV_10 = 10, |
| 271 | AES_CCM_ICV_12 = 12, | 265 | ICV_12 = 12, |
| 272 | AES_CCM_ICV_14 = 14, | 266 | ICV_13 = 13, |
| 273 | AES_CCM_ICV_16 = 16 | 267 | ICV_14 = 14, |
| 268 | ICV_15 = 15, | ||
| 269 | ICV_16 = 16 | ||
| 274 | }; | 270 | }; |
| 275 | 271 | ||
| 276 | struct hash_op_params { | 272 | struct hash_op_params { |
| @@ -394,7 +390,7 @@ static const u8 aes_sbox[256] = { | |||
| 394 | 187, 22 | 390 | 187, 22 |
| 395 | }; | 391 | }; |
| 396 | 392 | ||
| 397 | static u32 aes_ks_subword(const u32 w) | 393 | static inline u32 aes_ks_subword(const u32 w) |
| 398 | { | 394 | { |
| 399 | u8 bytes[4]; | 395 | u8 bytes[4]; |
| 400 | 396 | ||
| @@ -412,61 +408,4 @@ static u32 round_constant[11] = { | |||
| 412 | 0x1B000000, 0x36000000, 0x6C000000 | 408 | 0x1B000000, 0x36000000, 0x6C000000 |
| 413 | }; | 409 | }; |
| 414 | 410 | ||
| 415 | /* dec_key - OUTPUT - Reverse round key | ||
| 416 | * key - INPUT - key | ||
| 417 | * keylength - INPUT - length of the key in number of bits | ||
| 418 | */ | ||
| 419 | static inline void get_aes_decrypt_key(unsigned char *dec_key, | ||
| 420 | const unsigned char *key, | ||
| 421 | unsigned int keylength) | ||
| 422 | { | ||
| 423 | u32 temp; | ||
| 424 | u32 w_ring[MAX_NK]; | ||
| 425 | int i, j, k; | ||
| 426 | u8 nr, nk; | ||
| 427 | |||
| 428 | switch (keylength) { | ||
| 429 | case AES_KEYLENGTH_128BIT: | ||
| 430 | nk = KEYLENGTH_4BYTES; | ||
| 431 | nr = NUMBER_OF_ROUNDS_10; | ||
| 432 | break; | ||
| 433 | |||
| 434 | case AES_KEYLENGTH_192BIT: | ||
| 435 | nk = KEYLENGTH_6BYTES; | ||
| 436 | nr = NUMBER_OF_ROUNDS_12; | ||
| 437 | break; | ||
| 438 | case AES_KEYLENGTH_256BIT: | ||
| 439 | nk = KEYLENGTH_8BYTES; | ||
| 440 | nr = NUMBER_OF_ROUNDS_14; | ||
| 441 | break; | ||
| 442 | default: | ||
| 443 | return; | ||
| 444 | } | ||
| 445 | for (i = 0; i < nk; i++ ) | ||
| 446 | w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]); | ||
| 447 | |||
| 448 | i = 0; | ||
| 449 | temp = w_ring[nk - 1]; | ||
| 450 | while(i + nk < (nr + 1) * 4) { | ||
| 451 | if(!(i % nk)) { | ||
| 452 | /* RotWord(temp) */ | ||
| 453 | temp = (temp << 8) | (temp >> 24); | ||
| 454 | temp = aes_ks_subword(temp); | ||
| 455 | temp ^= round_constant[i / nk]; | ||
| 456 | } | ||
| 457 | else if (nk == 8 && (i % 4 == 0)) | ||
| 458 | temp = aes_ks_subword(temp); | ||
| 459 | w_ring[i % nk] ^= temp; | ||
| 460 | temp = w_ring[i % nk]; | ||
| 461 | i++; | ||
| 462 | } | ||
| 463 | i--; | ||
| 464 | for (k = 0, j = i % nk; k < nk; k++) { | ||
| 465 | *((u32 *)dec_key + k) = htonl(w_ring[j]); | ||
| 466 | j--; | ||
| 467 | if(j < 0) | ||
| 468 | j += nk; | ||
| 469 | } | ||
| 470 | } | ||
| 471 | |||
| 472 | #endif /* __CHCR_ALGO_H__ */ | 411 | #endif /* __CHCR_ALGO_H__ */ |
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index 4d7f6700fd7e..918da8e6e2d8 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c | |||
| @@ -110,14 +110,12 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev, | |||
| 110 | if (ack_err_status) { | 110 | if (ack_err_status) { |
| 111 | if (CHK_MAC_ERR_BIT(ack_err_status) || | 111 | if (CHK_MAC_ERR_BIT(ack_err_status) || |
| 112 | CHK_PAD_ERR_BIT(ack_err_status)) | 112 | CHK_PAD_ERR_BIT(ack_err_status)) |
| 113 | error_status = -EINVAL; | 113 | error_status = -EBADMSG; |
| 114 | } | 114 | } |
| 115 | /* call completion callback with failure status */ | 115 | /* call completion callback with failure status */ |
| 116 | if (req) { | 116 | if (req) { |
| 117 | if (!chcr_handle_resp(req, input, error_status)) | 117 | error_status = chcr_handle_resp(req, input, error_status); |
| 118 | req->complete(req, error_status); | 118 | req->complete(req, error_status); |
| 119 | else | ||
| 120 | return -EINVAL; | ||
| 121 | } else { | 119 | } else { |
| 122 | pr_err("Incorrect request address from the firmware\n"); | 120 | pr_err("Incorrect request address from the firmware\n"); |
| 123 | return -EFAULT; | 121 | return -EFAULT; |
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index 2a5c671a4232..c7088a4e0a49 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h | |||
| @@ -52,13 +52,27 @@ | |||
| 52 | 52 | ||
| 53 | #define MAC_ERROR_BIT 0 | 53 | #define MAC_ERROR_BIT 0 |
| 54 | #define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) | 54 | #define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) |
| 55 | #define MAX_SALT 4 | ||
| 55 | 56 | ||
| 56 | struct uld_ctx; | 57 | struct uld_ctx; |
| 57 | 58 | ||
| 59 | struct _key_ctx { | ||
| 60 | __be32 ctx_hdr; | ||
| 61 | u8 salt[MAX_SALT]; | ||
| 62 | __be64 reserverd; | ||
| 63 | unsigned char key[0]; | ||
| 64 | }; | ||
| 65 | |||
| 66 | struct chcr_wr { | ||
| 67 | struct fw_crypto_lookaside_wr wreq; | ||
| 68 | struct ulp_txpkt ulptx; | ||
| 69 | struct ulptx_idata sc_imm; | ||
| 70 | struct cpl_tx_sec_pdu sec_cpl; | ||
| 71 | struct _key_ctx key_ctx; | ||
| 72 | }; | ||
| 73 | |||
| 58 | struct chcr_dev { | 74 | struct chcr_dev { |
| 59 | /* Request submited to h/w and waiting for response. */ | ||
| 60 | spinlock_t lock_chcr_dev; | 75 | spinlock_t lock_chcr_dev; |
| 61 | struct crypto_queue pending_queue; | ||
| 62 | struct uld_ctx *u_ctx; | 76 | struct uld_ctx *u_ctx; |
| 63 | unsigned char tx_channel_id; | 77 | unsigned char tx_channel_id; |
| 64 | }; | 78 | }; |
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index d7d75605da8b..d5af7d64a763 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h | |||
| @@ -36,6 +36,14 @@ | |||
| 36 | #ifndef __CHCR_CRYPTO_H__ | 36 | #ifndef __CHCR_CRYPTO_H__ |
| 37 | #define __CHCR_CRYPTO_H__ | 37 | #define __CHCR_CRYPTO_H__ |
| 38 | 38 | ||
| 39 | #define GHASH_BLOCK_SIZE 16 | ||
| 40 | #define GHASH_DIGEST_SIZE 16 | ||
| 41 | |||
| 42 | #define CCM_B0_SIZE 16 | ||
| 43 | #define CCM_AAD_FIELD_SIZE 2 | ||
| 44 | #define T5_MAX_AAD_SIZE 512 | ||
| 45 | |||
| 46 | |||
| 39 | /* Define following if h/w is not dropping the AAD and IV data before | 47 | /* Define following if h/w is not dropping the AAD and IV data before |
| 40 | * giving the processed data | 48 | * giving the processed data |
| 41 | */ | 49 | */ |
| @@ -63,22 +71,36 @@ | |||
| 63 | #define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0 | 71 | #define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0 |
| 64 | #define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1 | 72 | #define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1 |
| 65 | 73 | ||
| 66 | #define CHCR_SCMD_CIPHER_MODE_NOP 0 | 74 | #define CHCR_SCMD_CIPHER_MODE_NOP 0 |
| 67 | #define CHCR_SCMD_CIPHER_MODE_AES_CBC 1 | 75 | #define CHCR_SCMD_CIPHER_MODE_AES_CBC 1 |
| 68 | #define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4 | 76 | #define CHCR_SCMD_CIPHER_MODE_AES_GCM 2 |
| 69 | #define CHCR_SCMD_CIPHER_MODE_AES_XTS 6 | 77 | #define CHCR_SCMD_CIPHER_MODE_AES_CTR 3 |
| 78 | #define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4 | ||
| 79 | #define CHCR_SCMD_CIPHER_MODE_AES_XTS 6 | ||
| 80 | #define CHCR_SCMD_CIPHER_MODE_AES_CCM 7 | ||
| 70 | 81 | ||
| 71 | #define CHCR_SCMD_AUTH_MODE_NOP 0 | 82 | #define CHCR_SCMD_AUTH_MODE_NOP 0 |
| 72 | #define CHCR_SCMD_AUTH_MODE_SHA1 1 | 83 | #define CHCR_SCMD_AUTH_MODE_SHA1 1 |
| 73 | #define CHCR_SCMD_AUTH_MODE_SHA224 2 | 84 | #define CHCR_SCMD_AUTH_MODE_SHA224 2 |
| 74 | #define CHCR_SCMD_AUTH_MODE_SHA256 3 | 85 | #define CHCR_SCMD_AUTH_MODE_SHA256 3 |
| 86 | #define CHCR_SCMD_AUTH_MODE_GHASH 4 | ||
| 75 | #define CHCR_SCMD_AUTH_MODE_SHA512_224 5 | 87 | #define CHCR_SCMD_AUTH_MODE_SHA512_224 5 |
| 76 | #define CHCR_SCMD_AUTH_MODE_SHA512_256 6 | 88 | #define CHCR_SCMD_AUTH_MODE_SHA512_256 6 |
| 77 | #define CHCR_SCMD_AUTH_MODE_SHA512_384 7 | 89 | #define CHCR_SCMD_AUTH_MODE_SHA512_384 7 |
| 78 | #define CHCR_SCMD_AUTH_MODE_SHA512_512 8 | 90 | #define CHCR_SCMD_AUTH_MODE_SHA512_512 8 |
| 91 | #define CHCR_SCMD_AUTH_MODE_CBCMAC 9 | ||
| 92 | #define CHCR_SCMD_AUTH_MODE_CMAC 10 | ||
| 79 | 93 | ||
| 80 | #define CHCR_SCMD_HMAC_CTRL_NOP 0 | 94 | #define CHCR_SCMD_HMAC_CTRL_NOP 0 |
| 81 | #define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1 | 95 | #define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1 |
| 96 | #define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366 2 | ||
| 97 | #define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT 3 | ||
| 98 | #define CHCR_SCMD_HMAC_CTRL_PL1 4 | ||
| 99 | #define CHCR_SCMD_HMAC_CTRL_PL2 5 | ||
| 100 | #define CHCR_SCMD_HMAC_CTRL_PL3 6 | ||
| 101 | #define CHCR_SCMD_HMAC_CTRL_DIV2 7 | ||
| 102 | #define VERIFY_HW 0 | ||
| 103 | #define VERIFY_SW 1 | ||
| 82 | 104 | ||
| 83 | #define CHCR_SCMD_IVGEN_CTRL_HW 0 | 105 | #define CHCR_SCMD_IVGEN_CTRL_HW 0 |
| 84 | #define CHCR_SCMD_IVGEN_CTRL_SW 1 | 106 | #define CHCR_SCMD_IVGEN_CTRL_SW 1 |
| @@ -106,39 +128,74 @@ | |||
| 106 | #define IV_IMMEDIATE 1 | 128 | #define IV_IMMEDIATE 1 |
| 107 | #define IV_DSGL 2 | 129 | #define IV_DSGL 2 |
| 108 | 130 | ||
| 131 | #define AEAD_H_SIZE 16 | ||
| 132 | |||
| 109 | #define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000 | 133 | #define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000 |
| 110 | #define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000 | 134 | #define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000 |
| 135 | #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000 | ||
| 136 | #define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000 | ||
| 137 | #define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000 | ||
| 138 | #define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000 | ||
| 139 | #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000 | ||
| 140 | #define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000 | ||
| 141 | #define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000 | ||
| 111 | #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\ | 142 | #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\ |
| 112 | CRYPTO_ALG_SUB_TYPE_HASH_HMAC) | 143 | CRYPTO_ALG_SUB_TYPE_HASH_HMAC) |
| 113 | 144 | ||
| 114 | #define MAX_SALT 4 | ||
| 115 | #define MAX_SCRATCH_PAD_SIZE 32 | 145 | #define MAX_SCRATCH_PAD_SIZE 32 |
| 116 | 146 | ||
| 117 | #define CHCR_HASH_MAX_BLOCK_SIZE_64 64 | 147 | #define CHCR_HASH_MAX_BLOCK_SIZE_64 64 |
| 118 | #define CHCR_HASH_MAX_BLOCK_SIZE_128 128 | 148 | #define CHCR_HASH_MAX_BLOCK_SIZE_128 128 |
| 119 | 149 | ||
| 120 | /* Aligned to 128 bit boundary */ | 150 | /* Aligned to 128 bit boundary */ |
| 121 | struct _key_ctx { | ||
| 122 | __be32 ctx_hdr; | ||
| 123 | u8 salt[MAX_SALT]; | ||
| 124 | __be64 reserverd; | ||
| 125 | unsigned char key[0]; | ||
| 126 | }; | ||
| 127 | 151 | ||
| 128 | struct ablk_ctx { | 152 | struct ablk_ctx { |
| 129 | u8 enc; | ||
| 130 | unsigned int processed_len; | ||
| 131 | __be32 key_ctx_hdr; | 153 | __be32 key_ctx_hdr; |
| 132 | unsigned int enckey_len; | 154 | unsigned int enckey_len; |
| 133 | unsigned int dst_nents; | ||
| 134 | struct scatterlist iv_sg; | ||
| 135 | u8 key[CHCR_AES_MAX_KEY_LEN]; | 155 | u8 key[CHCR_AES_MAX_KEY_LEN]; |
| 136 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; | ||
| 137 | unsigned char ciph_mode; | 156 | unsigned char ciph_mode; |
| 157 | u8 rrkey[AES_MAX_KEY_SIZE]; | ||
| 158 | }; | ||
| 159 | struct chcr_aead_reqctx { | ||
| 160 | struct sk_buff *skb; | ||
| 161 | short int dst_nents; | ||
| 162 | u16 verify; | ||
| 163 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; | ||
| 164 | unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE]; | ||
| 165 | }; | ||
| 166 | |||
| 167 | struct chcr_gcm_ctx { | ||
| 168 | u8 ghash_h[AEAD_H_SIZE]; | ||
| 138 | }; | 169 | }; |
| 139 | 170 | ||
| 171 | struct chcr_authenc_ctx { | ||
| 172 | u8 dec_rrkey[AES_MAX_KEY_SIZE]; | ||
| 173 | u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE]; | ||
| 174 | unsigned char auth_mode; | ||
| 175 | }; | ||
| 176 | |||
| 177 | struct __aead_ctx { | ||
| 178 | struct chcr_gcm_ctx gcm[0]; | ||
| 179 | struct chcr_authenc_ctx authenc[0]; | ||
| 180 | }; | ||
| 181 | |||
| 182 | |||
| 183 | |||
| 184 | struct chcr_aead_ctx { | ||
| 185 | __be32 key_ctx_hdr; | ||
| 186 | unsigned int enckey_len; | ||
| 187 | struct crypto_skcipher *null; | ||
| 188 | u8 salt[MAX_SALT]; | ||
| 189 | u8 key[CHCR_AES_MAX_KEY_LEN]; | ||
| 190 | u16 hmac_ctrl; | ||
| 191 | u16 mayverify; | ||
| 192 | struct __aead_ctx ctx[0]; | ||
| 193 | }; | ||
| 194 | |||
| 195 | |||
| 196 | |||
| 140 | struct hmac_ctx { | 197 | struct hmac_ctx { |
| 141 | struct shash_desc *desc; | 198 | struct crypto_shash *base_hash; |
| 142 | u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128]; | 199 | u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128]; |
| 143 | u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128]; | 200 | u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128]; |
| 144 | }; | 201 | }; |
| @@ -146,6 +203,7 @@ struct hmac_ctx { | |||
| 146 | struct __crypto_ctx { | 203 | struct __crypto_ctx { |
| 147 | struct hmac_ctx hmacctx[0]; | 204 | struct hmac_ctx hmacctx[0]; |
| 148 | struct ablk_ctx ablkctx[0]; | 205 | struct ablk_ctx ablkctx[0]; |
| 206 | struct chcr_aead_ctx aeadctx[0]; | ||
| 149 | }; | 207 | }; |
| 150 | 208 | ||
| 151 | struct chcr_context { | 209 | struct chcr_context { |
| @@ -156,18 +214,22 @@ struct chcr_context { | |||
| 156 | 214 | ||
| 157 | struct chcr_ahash_req_ctx { | 215 | struct chcr_ahash_req_ctx { |
| 158 | u32 result; | 216 | u32 result; |
| 159 | char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128]; | 217 | u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128]; |
| 160 | u8 bfr_len; | 218 | u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128]; |
| 219 | u8 *reqbfr; | ||
| 220 | u8 *skbfr; | ||
| 221 | u8 reqlen; | ||
| 161 | /* DMA the partial hash in it */ | 222 | /* DMA the partial hash in it */ |
| 162 | u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE]; | 223 | u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE]; |
| 163 | u64 data_len; /* Data len till time */ | 224 | u64 data_len; /* Data len till time */ |
| 164 | void *dummy_payload_ptr; | ||
| 165 | /* SKB which is being sent to the hardware for processing */ | 225 | /* SKB which is being sent to the hardware for processing */ |
| 166 | struct sk_buff *skb; | 226 | struct sk_buff *skb; |
| 167 | }; | 227 | }; |
| 168 | 228 | ||
| 169 | struct chcr_blkcipher_req_ctx { | 229 | struct chcr_blkcipher_req_ctx { |
| 170 | struct sk_buff *skb; | 230 | struct sk_buff *skb; |
| 231 | unsigned int dst_nents; | ||
| 232 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; | ||
| 171 | }; | 233 | }; |
| 172 | 234 | ||
| 173 | struct chcr_alg_template { | 235 | struct chcr_alg_template { |
| @@ -176,16 +238,19 @@ struct chcr_alg_template { | |||
| 176 | union { | 238 | union { |
| 177 | struct crypto_alg crypto; | 239 | struct crypto_alg crypto; |
| 178 | struct ahash_alg hash; | 240 | struct ahash_alg hash; |
| 241 | struct aead_alg aead; | ||
| 179 | } alg; | 242 | } alg; |
| 180 | }; | 243 | }; |
| 181 | 244 | ||
| 182 | struct chcr_req_ctx { | 245 | struct chcr_req_ctx { |
| 183 | union { | 246 | union { |
| 184 | struct ahash_request *ahash_req; | 247 | struct ahash_request *ahash_req; |
| 248 | struct aead_request *aead_req; | ||
| 185 | struct ablkcipher_request *ablk_req; | 249 | struct ablkcipher_request *ablk_req; |
| 186 | } req; | 250 | } req; |
| 187 | union { | 251 | union { |
| 188 | struct chcr_ahash_req_ctx *ahash_ctx; | 252 | struct chcr_ahash_req_ctx *ahash_ctx; |
| 253 | struct chcr_aead_reqctx *reqctx; | ||
| 189 | struct chcr_blkcipher_req_ctx *ablk_ctx; | 254 | struct chcr_blkcipher_req_ctx *ablk_ctx; |
| 190 | } ctx; | 255 | } ctx; |
| 191 | }; | 256 | }; |
| @@ -195,9 +260,15 @@ struct sge_opaque_hdr { | |||
| 195 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | 260 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; |
| 196 | }; | 261 | }; |
| 197 | 262 | ||
| 198 | typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req, | 263 | typedef struct sk_buff *(*create_wr_t)(struct aead_request *req, |
| 199 | struct chcr_context *ctx, | ||
| 200 | unsigned short qid, | 264 | unsigned short qid, |
| 265 | int size, | ||
| 201 | unsigned short op_type); | 266 | unsigned short op_type); |
| 202 | 267 | ||
| 268 | static int chcr_aead_op(struct aead_request *req_base, | ||
| 269 | unsigned short op_type, | ||
| 270 | int size, | ||
| 271 | create_wr_t create_wr_fn); | ||
| 272 | static inline int get_aead_subtype(struct crypto_aead *aead); | ||
| 273 | |||
| 203 | #endif /* __CHCR_CRYPTO_H__ */ | 274 | #endif /* __CHCR_CRYPTO_H__ */ |
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 37dadb2a4feb..6e7a5c77a00a 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c | |||
| @@ -375,10 +375,6 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa) | |||
| 375 | if (!dma->padding_pool) | 375 | if (!dma->padding_pool) |
| 376 | return -ENOMEM; | 376 | return -ENOMEM; |
| 377 | 377 | ||
| 378 | dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0); | ||
| 379 | if (!dma->iv_pool) | ||
| 380 | return -ENOMEM; | ||
| 381 | |||
| 382 | cesa->dma = dma; | 378 | cesa->dma = dma; |
| 383 | 379 | ||
| 384 | return 0; | 380 | return 0; |
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index e423d33decd4..a768da7138a1 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h | |||
| @@ -277,7 +277,7 @@ struct mv_cesa_op_ctx { | |||
| 277 | #define CESA_TDMA_DUMMY 0 | 277 | #define CESA_TDMA_DUMMY 0 |
| 278 | #define CESA_TDMA_DATA 1 | 278 | #define CESA_TDMA_DATA 1 |
| 279 | #define CESA_TDMA_OP 2 | 279 | #define CESA_TDMA_OP 2 |
| 280 | #define CESA_TDMA_IV 3 | 280 | #define CESA_TDMA_RESULT 3 |
| 281 | 281 | ||
| 282 | /** | 282 | /** |
| 283 | * struct mv_cesa_tdma_desc - TDMA descriptor | 283 | * struct mv_cesa_tdma_desc - TDMA descriptor |
| @@ -393,7 +393,6 @@ struct mv_cesa_dev_dma { | |||
| 393 | struct dma_pool *op_pool; | 393 | struct dma_pool *op_pool; |
| 394 | struct dma_pool *cache_pool; | 394 | struct dma_pool *cache_pool; |
| 395 | struct dma_pool *padding_pool; | 395 | struct dma_pool *padding_pool; |
| 396 | struct dma_pool *iv_pool; | ||
| 397 | }; | 396 | }; |
| 398 | 397 | ||
| 399 | /** | 398 | /** |
| @@ -839,7 +838,7 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain) | |||
| 839 | memset(chain, 0, sizeof(*chain)); | 838 | memset(chain, 0, sizeof(*chain)); |
| 840 | } | 839 | } |
| 841 | 840 | ||
| 842 | int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src, | 841 | int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src, |
| 843 | u32 size, u32 flags, gfp_t gfp_flags); | 842 | u32 size, u32 flags, gfp_t gfp_flags); |
| 844 | 843 | ||
| 845 | struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, | 844 | struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, |
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index d19dc9614e6e..098871a22a54 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c | |||
| @@ -212,7 +212,8 @@ mv_cesa_ablkcipher_complete(struct crypto_async_request *req) | |||
| 212 | struct mv_cesa_req *basereq; | 212 | struct mv_cesa_req *basereq; |
| 213 | 213 | ||
| 214 | basereq = &creq->base; | 214 | basereq = &creq->base; |
| 215 | memcpy(ablkreq->info, basereq->chain.last->data, ivsize); | 215 | memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv, |
| 216 | ivsize); | ||
| 216 | } else { | 217 | } else { |
| 217 | memcpy_fromio(ablkreq->info, | 218 | memcpy_fromio(ablkreq->info, |
| 218 | engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, | 219 | engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, |
| @@ -373,8 +374,9 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, | |||
| 373 | 374 | ||
| 374 | /* Add output data for IV */ | 375 | /* Add output data for IV */ |
| 375 | ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); | 376 | ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); |
| 376 | ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET, | 377 | ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET, |
| 377 | ivsize, CESA_TDMA_SRC_IN_SRAM, flags); | 378 | CESA_SA_DATA_SRAM_OFFSET, |
| 379 | CESA_TDMA_SRC_IN_SRAM, flags); | ||
| 378 | 380 | ||
| 379 | if (ret) | 381 | if (ret) |
| 380 | goto err_free_tdma; | 382 | goto err_free_tdma; |
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 77712b375b84..317cf029c0cf 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c | |||
| @@ -311,24 +311,40 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req) | |||
| 311 | int i; | 311 | int i; |
| 312 | 312 | ||
| 313 | digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); | 313 | digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); |
| 314 | for (i = 0; i < digsize / 4; i++) | ||
| 315 | creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i)); | ||
| 316 | 314 | ||
| 317 | if (creq->last_req) { | 315 | if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ && |
| 316 | (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) { | ||
| 317 | __le32 *data = NULL; | ||
| 318 | |||
| 318 | /* | 319 | /* |
| 319 | * Hardware's MD5 digest is in little endian format, but | 320 | * Result is already in the correct endianess when the SA is |
| 320 | * SHA in big endian format | 321 | * used |
| 321 | */ | 322 | */ |
| 322 | if (creq->algo_le) { | 323 | data = creq->base.chain.last->op->ctx.hash.hash; |
| 323 | __le32 *result = (void *)ahashreq->result; | 324 | for (i = 0; i < digsize / 4; i++) |
| 325 | creq->state[i] = cpu_to_le32(data[i]); | ||
| 324 | 326 | ||
| 325 | for (i = 0; i < digsize / 4; i++) | 327 | memcpy(ahashreq->result, data, digsize); |
| 326 | result[i] = cpu_to_le32(creq->state[i]); | 328 | } else { |
| 327 | } else { | 329 | for (i = 0; i < digsize / 4; i++) |
| 328 | __be32 *result = (void *)ahashreq->result; | 330 | creq->state[i] = readl_relaxed(engine->regs + |
| 331 | CESA_IVDIG(i)); | ||
| 332 | if (creq->last_req) { | ||
| 333 | /* | ||
| 334 | * Hardware's MD5 digest is in little endian format, but | ||
| 335 | * SHA in big endian format | ||
| 336 | */ | ||
| 337 | if (creq->algo_le) { | ||
| 338 | __le32 *result = (void *)ahashreq->result; | ||
| 339 | |||
| 340 | for (i = 0; i < digsize / 4; i++) | ||
| 341 | result[i] = cpu_to_le32(creq->state[i]); | ||
| 342 | } else { | ||
| 343 | __be32 *result = (void *)ahashreq->result; | ||
| 329 | 344 | ||
| 330 | for (i = 0; i < digsize / 4; i++) | 345 | for (i = 0; i < digsize / 4; i++) |
| 331 | result[i] = cpu_to_be32(creq->state[i]); | 346 | result[i] = cpu_to_be32(creq->state[i]); |
| 347 | } | ||
| 332 | } | 348 | } |
| 333 | } | 349 | } |
| 334 | 350 | ||
| @@ -503,6 +519,12 @@ mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, | |||
| 503 | CESA_SA_DESC_CFG_LAST_FRAG, | 519 | CESA_SA_DESC_CFG_LAST_FRAG, |
| 504 | CESA_SA_DESC_CFG_FRAG_MSK); | 520 | CESA_SA_DESC_CFG_FRAG_MSK); |
| 505 | 521 | ||
| 522 | ret = mv_cesa_dma_add_result_op(chain, | ||
| 523 | CESA_SA_CFG_SRAM_OFFSET, | ||
| 524 | CESA_SA_DATA_SRAM_OFFSET, | ||
| 525 | CESA_TDMA_SRC_IN_SRAM, flags); | ||
| 526 | if (ret) | ||
| 527 | return ERR_PTR(-ENOMEM); | ||
| 506 | return op; | 528 | return op; |
| 507 | } | 529 | } |
| 508 | 530 | ||
| @@ -563,6 +585,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) | |||
| 563 | struct mv_cesa_op_ctx *op = NULL; | 585 | struct mv_cesa_op_ctx *op = NULL; |
| 564 | unsigned int frag_len; | 586 | unsigned int frag_len; |
| 565 | int ret; | 587 | int ret; |
| 588 | u32 type; | ||
| 566 | 589 | ||
| 567 | basereq->chain.first = NULL; | 590 | basereq->chain.first = NULL; |
| 568 | basereq->chain.last = NULL; | 591 | basereq->chain.last = NULL; |
| @@ -634,7 +657,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) | |||
| 634 | goto err_free_tdma; | 657 | goto err_free_tdma; |
| 635 | } | 658 | } |
| 636 | 659 | ||
| 637 | if (op) { | 660 | /* |
| 661 | * If results are copied via DMA, this means that this | ||
| 662 | * request can be directly processed by the engine, | ||
| 663 | * without partial updates. So we can chain it at the | ||
| 664 | * DMA level with other requests. | ||
| 665 | */ | ||
| 666 | type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK; | ||
| 667 | |||
| 668 | if (op && type != CESA_TDMA_RESULT) { | ||
| 638 | /* Add dummy desc to wait for crypto operation end */ | 669 | /* Add dummy desc to wait for crypto operation end */ |
| 639 | ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags); | 670 | ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags); |
| 640 | if (ret) | 671 | if (ret) |
| @@ -647,8 +678,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) | |||
| 647 | else | 678 | else |
| 648 | creq->cache_ptr = 0; | 679 | creq->cache_ptr = 0; |
| 649 | 680 | ||
| 650 | basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ | | 681 | basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; |
| 651 | CESA_TDMA_BREAK_CHAIN); | 682 | |
| 683 | if (type != CESA_TDMA_RESULT) | ||
| 684 | basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN; | ||
| 652 | 685 | ||
| 653 | return 0; | 686 | return 0; |
| 654 | 687 | ||
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c index 9fd7a5fbaa1b..4416b88eca70 100644 --- a/drivers/crypto/marvell/tdma.c +++ b/drivers/crypto/marvell/tdma.c | |||
| @@ -69,9 +69,6 @@ void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq) | |||
| 69 | if (type == CESA_TDMA_OP) | 69 | if (type == CESA_TDMA_OP) |
| 70 | dma_pool_free(cesa_dev->dma->op_pool, tdma->op, | 70 | dma_pool_free(cesa_dev->dma->op_pool, tdma->op, |
| 71 | le32_to_cpu(tdma->src)); | 71 | le32_to_cpu(tdma->src)); |
| 72 | else if (type == CESA_TDMA_IV) | ||
| 73 | dma_pool_free(cesa_dev->dma->iv_pool, tdma->data, | ||
| 74 | le32_to_cpu(tdma->dst)); | ||
| 75 | 72 | ||
| 76 | tdma = tdma->next; | 73 | tdma = tdma->next; |
| 77 | dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, | 74 | dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, |
| @@ -209,29 +206,37 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) | |||
| 209 | return new_tdma; | 206 | return new_tdma; |
| 210 | } | 207 | } |
| 211 | 208 | ||
| 212 | int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src, | 209 | int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src, |
| 213 | u32 size, u32 flags, gfp_t gfp_flags) | 210 | u32 size, u32 flags, gfp_t gfp_flags) |
| 214 | { | 211 | { |
| 215 | 212 | struct mv_cesa_tdma_desc *tdma, *op_desc; | |
| 216 | struct mv_cesa_tdma_desc *tdma; | ||
| 217 | u8 *iv; | ||
| 218 | dma_addr_t dma_handle; | ||
| 219 | 213 | ||
| 220 | tdma = mv_cesa_dma_add_desc(chain, gfp_flags); | 214 | tdma = mv_cesa_dma_add_desc(chain, gfp_flags); |
| 221 | if (IS_ERR(tdma)) | 215 | if (IS_ERR(tdma)) |
| 222 | return PTR_ERR(tdma); | 216 | return PTR_ERR(tdma); |
| 223 | 217 | ||
| 224 | iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle); | 218 | /* We re-use an existing op_desc object to retrieve the context |
| 225 | if (!iv) | 219 | * and result instead of allocating a new one. |
| 226 | return -ENOMEM; | 220 | * There is at least one object of this type in a CESA crypto |
| 221 | * req, just pick the first one in the chain. | ||
| 222 | */ | ||
| 223 | for (op_desc = chain->first; op_desc; op_desc = op_desc->next) { | ||
| 224 | u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK; | ||
| 225 | |||
| 226 | if (type == CESA_TDMA_OP) | ||
| 227 | break; | ||
| 228 | } | ||
| 229 | |||
| 230 | if (!op_desc) | ||
| 231 | return -EIO; | ||
| 227 | 232 | ||
| 228 | tdma->byte_cnt = cpu_to_le32(size | BIT(31)); | 233 | tdma->byte_cnt = cpu_to_le32(size | BIT(31)); |
| 229 | tdma->src = src; | 234 | tdma->src = src; |
| 230 | tdma->dst = cpu_to_le32(dma_handle); | 235 | tdma->dst = op_desc->src; |
| 231 | tdma->data = iv; | 236 | tdma->op = op_desc->op; |
| 232 | 237 | ||
| 233 | flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); | 238 | flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); |
| 234 | tdma->flags = flags | CESA_TDMA_IV; | 239 | tdma->flags = flags | CESA_TDMA_RESULT; |
| 235 | return 0; | 240 | return 0; |
| 236 | } | 241 | } |
| 237 | 242 | ||
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 104e9ce9400a..451fa18c1c7b 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
| @@ -1073,7 +1073,7 @@ static int mv_probe(struct platform_device *pdev) | |||
| 1073 | if (!res) | 1073 | if (!res) |
| 1074 | return -ENXIO; | 1074 | return -ENXIO; |
| 1075 | 1075 | ||
| 1076 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | 1076 | cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL); |
| 1077 | if (!cp) | 1077 | if (!cp) |
| 1078 | return -ENOMEM; | 1078 | return -ENOMEM; |
| 1079 | 1079 | ||
| @@ -1163,7 +1163,6 @@ err_irq: | |||
| 1163 | err_thread: | 1163 | err_thread: |
| 1164 | kthread_stop(cp->queue_th); | 1164 | kthread_stop(cp->queue_th); |
| 1165 | err: | 1165 | err: |
| 1166 | kfree(cp); | ||
| 1167 | cpg = NULL; | 1166 | cpg = NULL; |
| 1168 | return ret; | 1167 | return ret; |
| 1169 | } | 1168 | } |
| @@ -1187,7 +1186,6 @@ static int mv_remove(struct platform_device *pdev) | |||
| 1187 | clk_put(cp->clk); | 1186 | clk_put(cp->clk); |
| 1188 | } | 1187 | } |
| 1189 | 1188 | ||
| 1190 | kfree(cp); | ||
| 1191 | cpg = NULL; | 1189 | cpg = NULL; |
| 1192 | return 0; | 1190 | return 0; |
| 1193 | } | 1191 | } |
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 42f0f229f7f7..036057abb257 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c | |||
| @@ -32,7 +32,6 @@ | |||
| 32 | #include <linux/scatterlist.h> | 32 | #include <linux/scatterlist.h> |
| 33 | #include <linux/device.h> | 33 | #include <linux/device.h> |
| 34 | #include <linux/of.h> | 34 | #include <linux/of.h> |
| 35 | #include <linux/types.h> | ||
| 36 | #include <asm/hvcall.h> | 35 | #include <asm/hvcall.h> |
| 37 | #include <asm/vio.h> | 36 | #include <asm/vio.h> |
| 38 | 37 | ||
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 0c49956ee0ce..1d9ecd368b5b 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
| @@ -390,7 +390,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status) | |||
| 390 | if (status & SAHARA_STATUS_MODE_BATCH) | 390 | if (status & SAHARA_STATUS_MODE_BATCH) |
| 391 | dev_dbg(dev->device, " - Batch Mode.\n"); | 391 | dev_dbg(dev->device, " - Batch Mode.\n"); |
| 392 | else if (status & SAHARA_STATUS_MODE_DEDICATED) | 392 | else if (status & SAHARA_STATUS_MODE_DEDICATED) |
| 393 | dev_dbg(dev->device, " - Decidated Mode.\n"); | 393 | dev_dbg(dev->device, " - Dedicated Mode.\n"); |
| 394 | else if (status & SAHARA_STATUS_MODE_DEBUG) | 394 | else if (status & SAHARA_STATUS_MODE_DEBUG) |
| 395 | dev_dbg(dev->device, " - Debug Mode.\n"); | 395 | dev_dbg(dev->device, " - Debug Mode.\n"); |
| 396 | 396 | ||
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 0418a2f41dc0..0bba6a19d36a 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
| @@ -590,7 +590,7 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) | |||
| 590 | if (v_lo & TALITOS_CCPSR_LO_MDTE) | 590 | if (v_lo & TALITOS_CCPSR_LO_MDTE) |
| 591 | dev_err(dev, "master data transfer error\n"); | 591 | dev_err(dev, "master data transfer error\n"); |
| 592 | if (v_lo & TALITOS_CCPSR_LO_SGDLZ) | 592 | if (v_lo & TALITOS_CCPSR_LO_SGDLZ) |
| 593 | dev_err(dev, is_sec1 ? "pointeur not complete error\n" | 593 | dev_err(dev, is_sec1 ? "pointer not complete error\n" |
| 594 | : "s/g data length zero error\n"); | 594 | : "s/g data length zero error\n"); |
| 595 | if (v_lo & TALITOS_CCPSR_LO_FPZ) | 595 | if (v_lo & TALITOS_CCPSR_LO_FPZ) |
| 596 | dev_err(dev, is_sec1 ? "parity error\n" | 596 | dev_err(dev, is_sec1 ? "parity error\n" |
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile index de6e241b0866..55f7c392582f 100644 --- a/drivers/crypto/vmx/Makefile +++ b/drivers/crypto/vmx/Makefile | |||
| @@ -10,10 +10,12 @@ endif | |||
| 10 | quiet_cmd_perl = PERL $@ | 10 | quiet_cmd_perl = PERL $@ |
| 11 | cmd_perl = $(PERL) $(<) $(TARGET) > $(@) | 11 | cmd_perl = $(PERL) $(<) $(TARGET) > $(@) |
| 12 | 12 | ||
| 13 | $(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl | 13 | targets += aesp8-ppc.S ghashp8-ppc.S |
| 14 | $(call cmd,perl) | 14 | |
| 15 | $(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE | ||
| 16 | $(call if_changed,perl) | ||
| 15 | 17 | ||
| 16 | $(src)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl | 18 | $(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE |
| 17 | $(call cmd,perl) | 19 | $(call if_changed,perl) |
| 18 | 20 | ||
| 19 | .PRECIOUS: $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S | 21 | clean-files := aesp8-ppc.S ghashp8-ppc.S |
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h new file mode 100644 index 000000000000..e328b52425a8 --- /dev/null +++ b/include/crypto/acompress.h | |||
| @@ -0,0 +1,269 @@ | |||
| 1 | /* | ||
| 2 | * Asynchronous Compression operations | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation | ||
| 5 | * Authors: Weigang Li <weigang.li@intel.com> | ||
| 6 | * Giovanni Cabiddu <giovanni.cabiddu@intel.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | #ifndef _CRYPTO_ACOMP_H | ||
| 15 | #define _CRYPTO_ACOMP_H | ||
| 16 | #include <linux/crypto.h> | ||
| 17 | |||
| 18 | #define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 | ||
| 19 | |||
| 20 | /** | ||
| 21 | * struct acomp_req - asynchronous (de)compression request | ||
| 22 | * | ||
| 23 | * @base: Common attributes for asynchronous crypto requests | ||
| 24 | * @src: Source Data | ||
| 25 | * @dst: Destination data | ||
| 26 | * @slen: Size of the input buffer | ||
| 27 | * @dlen: Size of the output buffer and number of bytes produced | ||
| 28 | * @flags: Internal flags | ||
| 29 | * @__ctx: Start of private context data | ||
| 30 | */ | ||
| 31 | struct acomp_req { | ||
| 32 | struct crypto_async_request base; | ||
| 33 | struct scatterlist *src; | ||
| 34 | struct scatterlist *dst; | ||
| 35 | unsigned int slen; | ||
| 36 | unsigned int dlen; | ||
| 37 | u32 flags; | ||
| 38 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | ||
| 39 | }; | ||
| 40 | |||
| 41 | /** | ||
| 42 | * struct crypto_acomp - user-instantiated objects which encapsulate | ||
| 43 | * algorithms and core processing logic | ||
| 44 | * | ||
| 45 | * @compress: Function performs a compress operation | ||
| 46 | * @decompress: Function performs a de-compress operation | ||
| 47 | * @dst_free: Frees destination buffer if allocated inside the | ||
| 48 | * algorithm | ||
| 49 | * @reqsize: Context size for (de)compression requests | ||
| 50 | * @base: Common crypto API algorithm data structure | ||
| 51 | */ | ||
| 52 | struct crypto_acomp { | ||
| 53 | int (*compress)(struct acomp_req *req); | ||
| 54 | int (*decompress)(struct acomp_req *req); | ||
| 55 | void (*dst_free)(struct scatterlist *dst); | ||
| 56 | unsigned int reqsize; | ||
| 57 | struct crypto_tfm base; | ||
| 58 | }; | ||
| 59 | |||
| 60 | /** | ||
| 61 | * struct acomp_alg - asynchronous compression algorithm | ||
| 62 | * | ||
| 63 | * @compress: Function performs a compress operation | ||
| 64 | * @decompress: Function performs a de-compress operation | ||
| 65 | * @dst_free: Frees destination buffer if allocated inside the algorithm | ||
| 66 | * @init: Initialize the cryptographic transformation object. | ||
| 67 | * This function is used to initialize the cryptographic | ||
| 68 | * transformation object. This function is called only once at | ||
| 69 | * the instantiation time, right after the transformation context | ||
| 70 | * was allocated. In case the cryptographic hardware has some | ||
| 71 | * special requirements which need to be handled by software, this | ||
| 72 | * function shall check for the precise requirement of the | ||
| 73 | * transformation and put any software fallbacks in place. | ||
| 74 | * @exit: Deinitialize the cryptographic transformation object. This is a | ||
| 75 | * counterpart to @init, used to remove various changes set in | ||
| 76 | * @init. | ||
| 77 | * | ||
| 78 | * @reqsize: Context size for (de)compression requests | ||
| 79 | * @base: Common crypto API algorithm data structure | ||
| 80 | */ | ||
| 81 | struct acomp_alg { | ||
| 82 | int (*compress)(struct acomp_req *req); | ||
| 83 | int (*decompress)(struct acomp_req *req); | ||
| 84 | void (*dst_free)(struct scatterlist *dst); | ||
| 85 | int (*init)(struct crypto_acomp *tfm); | ||
| 86 | void (*exit)(struct crypto_acomp *tfm); | ||
| 87 | unsigned int reqsize; | ||
| 88 | struct crypto_alg base; | ||
| 89 | }; | ||
| 90 | |||
| 91 | /** | ||
| 92 | * DOC: Asynchronous Compression API | ||
| 93 | * | ||
| 94 | * The Asynchronous Compression API is used with the algorithms of type | ||
| 95 | * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto) | ||
| 96 | */ | ||
| 97 | |||
| 98 | /** | ||
| 99 | * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle | ||
| 100 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 101 | * compression algorithm e.g. "deflate" | ||
| 102 | * @type: specifies the type of the algorithm | ||
| 103 | * @mask: specifies the mask for the algorithm | ||
| 104 | * | ||
| 105 | * Allocate a handle for a compression algorithm. The returned struct | ||
| 106 | * crypto_acomp is the handle that is required for any subsequent | ||
| 107 | * API invocation for the compression operations. | ||
| 108 | * | ||
| 109 | * Return: allocated handle in case of success; IS_ERR() is true in case | ||
| 110 | * of an error, PTR_ERR() returns the error code. | ||
| 111 | */ | ||
| 112 | struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, | ||
| 113 | u32 mask); | ||
| 114 | |||
| 115 | static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) | ||
| 116 | { | ||
| 117 | return &tfm->base; | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) | ||
| 121 | { | ||
| 122 | return container_of(alg, struct acomp_alg, base); | ||
| 123 | } | ||
| 124 | |||
| 125 | static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) | ||
| 126 | { | ||
| 127 | return container_of(tfm, struct crypto_acomp, base); | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) | ||
| 131 | { | ||
| 132 | return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg); | ||
| 133 | } | ||
| 134 | |||
| 135 | static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) | ||
| 136 | { | ||
| 137 | return tfm->reqsize; | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline void acomp_request_set_tfm(struct acomp_req *req, | ||
| 141 | struct crypto_acomp *tfm) | ||
| 142 | { | ||
| 143 | req->base.tfm = crypto_acomp_tfm(tfm); | ||
| 144 | } | ||
| 145 | |||
| 146 | static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) | ||
| 147 | { | ||
| 148 | return __crypto_acomp_tfm(req->base.tfm); | ||
| 149 | } | ||
| 150 | |||
| 151 | /** | ||
| 152 | * crypto_free_acomp() -- free ACOMPRESS tfm handle | ||
| 153 | * | ||
| 154 | * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() | ||
| 155 | */ | ||
| 156 | static inline void crypto_free_acomp(struct crypto_acomp *tfm) | ||
| 157 | { | ||
| 158 | crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm)); | ||
| 159 | } | ||
| 160 | |||
| 161 | static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) | ||
| 162 | { | ||
| 163 | type &= ~CRYPTO_ALG_TYPE_MASK; | ||
| 164 | type |= CRYPTO_ALG_TYPE_ACOMPRESS; | ||
| 165 | mask |= CRYPTO_ALG_TYPE_MASK; | ||
| 166 | |||
| 167 | return crypto_has_alg(alg_name, type, mask); | ||
| 168 | } | ||
| 169 | |||
| 170 | /** | ||
| 171 | * acomp_request_alloc() -- allocates asynchronous (de)compression request | ||
| 172 | * | ||
| 173 | * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() | ||
| 174 | * | ||
| 175 | * Return: allocated handle in case of success or NULL in case of an error | ||
| 176 | */ | ||
| 177 | struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); | ||
| 178 | |||
| 179 | /** | ||
| 180 | * acomp_request_free() -- zeroize and free asynchronous (de)compression | ||
| 181 | * request as well as the output buffer if allocated | ||
| 182 | * inside the algorithm | ||
| 183 | * | ||
| 184 | * @req: request to free | ||
| 185 | */ | ||
| 186 | void acomp_request_free(struct acomp_req *req); | ||
| 187 | |||
| 188 | /** | ||
| 189 | * acomp_request_set_callback() -- Sets an asynchronous callback | ||
| 190 | * | ||
| 191 | * Callback will be called when an asynchronous operation on a given | ||
| 192 | * request is finished. | ||
| 193 | * | ||
| 194 | * @req: request that the callback will be set for | ||
| 195 | * @flgs: specify for instance if the operation may backlog | ||
| 196 | * @cmlp: callback which will be called | ||
| 197 | * @data: private data used by the caller | ||
| 198 | */ | ||
| 199 | static inline void acomp_request_set_callback(struct acomp_req *req, | ||
| 200 | u32 flgs, | ||
| 201 | crypto_completion_t cmpl, | ||
| 202 | void *data) | ||
| 203 | { | ||
| 204 | req->base.complete = cmpl; | ||
| 205 | req->base.data = data; | ||
| 206 | req->base.flags = flgs; | ||
| 207 | } | ||
| 208 | |||
| 209 | /** | ||
| 210 | * acomp_request_set_params() -- Sets request parameters | ||
| 211 | * | ||
| 212 | * Sets parameters required by an acomp operation | ||
| 213 | * | ||
| 214 | * @req: asynchronous compress request | ||
| 215 | * @src: pointer to input buffer scatterlist | ||
| 216 | * @dst: pointer to output buffer scatterlist. If this is NULL, the | ||
| 217 | * acomp layer will allocate the output memory | ||
| 218 | * @slen: size of the input buffer | ||
| 219 | * @dlen: size of the output buffer. If dst is NULL, this can be used by | ||
| 220 | * the user to specify the maximum amount of memory to allocate | ||
| 221 | */ | ||
| 222 | static inline void acomp_request_set_params(struct acomp_req *req, | ||
| 223 | struct scatterlist *src, | ||
| 224 | struct scatterlist *dst, | ||
| 225 | unsigned int slen, | ||
| 226 | unsigned int dlen) | ||
| 227 | { | ||
| 228 | req->src = src; | ||
| 229 | req->dst = dst; | ||
| 230 | req->slen = slen; | ||
| 231 | req->dlen = dlen; | ||
| 232 | |||
| 233 | if (!req->dst) | ||
| 234 | req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; | ||
| 235 | } | ||
| 236 | |||
| 237 | /** | ||
| 238 | * crypto_acomp_compress() -- Invoke asynchronous compress operation | ||
| 239 | * | ||
| 240 | * Function invokes the asynchronous compress operation | ||
| 241 | * | ||
| 242 | * @req: asynchronous compress request | ||
| 243 | * | ||
| 244 | * Return: zero on success; error code in case of error | ||
| 245 | */ | ||
| 246 | static inline int crypto_acomp_compress(struct acomp_req *req) | ||
| 247 | { | ||
| 248 | struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); | ||
| 249 | |||
| 250 | return tfm->compress(req); | ||
| 251 | } | ||
| 252 | |||
| 253 | /** | ||
| 254 | * crypto_acomp_decompress() -- Invoke asynchronous decompress operation | ||
| 255 | * | ||
| 256 | * Function invokes the asynchronous decompress operation | ||
| 257 | * | ||
| 258 | * @req: asynchronous compress request | ||
| 259 | * | ||
| 260 | * Return: zero on success; error code in case of error | ||
| 261 | */ | ||
| 262 | static inline int crypto_acomp_decompress(struct acomp_req *req) | ||
| 263 | { | ||
| 264 | struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); | ||
| 265 | |||
| 266 | return tfm->decompress(req); | ||
| 267 | } | ||
| 268 | |||
| 269 | #endif | ||
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h new file mode 100644 index 000000000000..f5b8bfc22e6d --- /dev/null +++ b/include/crypto/cbc.h | |||
| @@ -0,0 +1,146 @@ | |||
| 1 | /* | ||
| 2 | * CBC: Cipher Block Chaining mode | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the Free | ||
| 8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 9 | * any later version. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | |||
| 13 | #ifndef _CRYPTO_CBC_H | ||
| 14 | #define _CRYPTO_CBC_H | ||
| 15 | |||
| 16 | #include <crypto/internal/skcipher.h> | ||
| 17 | #include <linux/string.h> | ||
| 18 | #include <linux/types.h> | ||
| 19 | |||
| 20 | static inline int crypto_cbc_encrypt_segment( | ||
| 21 | struct skcipher_walk *walk, struct crypto_skcipher *tfm, | ||
| 22 | void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) | ||
| 23 | { | ||
| 24 | unsigned int bsize = crypto_skcipher_blocksize(tfm); | ||
| 25 | unsigned int nbytes = walk->nbytes; | ||
| 26 | u8 *src = walk->src.virt.addr; | ||
| 27 | u8 *dst = walk->dst.virt.addr; | ||
| 28 | u8 *iv = walk->iv; | ||
| 29 | |||
| 30 | do { | ||
| 31 | crypto_xor(iv, src, bsize); | ||
| 32 | fn(tfm, iv, dst); | ||
| 33 | memcpy(iv, dst, bsize); | ||
| 34 | |||
| 35 | src += bsize; | ||
| 36 | dst += bsize; | ||
| 37 | } while ((nbytes -= bsize) >= bsize); | ||
| 38 | |||
| 39 | return nbytes; | ||
| 40 | } | ||
| 41 | |||
| 42 | static inline int crypto_cbc_encrypt_inplace( | ||
| 43 | struct skcipher_walk *walk, struct crypto_skcipher *tfm, | ||
| 44 | void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) | ||
| 45 | { | ||
| 46 | unsigned int bsize = crypto_skcipher_blocksize(tfm); | ||
| 47 | unsigned int nbytes = walk->nbytes; | ||
| 48 | u8 *src = walk->src.virt.addr; | ||
| 49 | u8 *iv = walk->iv; | ||
| 50 | |||
| 51 | do { | ||
| 52 | crypto_xor(src, iv, bsize); | ||
| 53 | fn(tfm, src, src); | ||
| 54 | iv = src; | ||
| 55 | |||
| 56 | src += bsize; | ||
| 57 | } while ((nbytes -= bsize) >= bsize); | ||
| 58 | |||
| 59 | memcpy(walk->iv, iv, bsize); | ||
| 60 | |||
| 61 | return nbytes; | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req, | ||
| 65 | void (*fn)(struct crypto_skcipher *, | ||
| 66 | const u8 *, u8 *)) | ||
| 67 | { | ||
| 68 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 69 | struct skcipher_walk walk; | ||
| 70 | int err; | ||
| 71 | |||
| 72 | err = skcipher_walk_virt(&walk, req, false); | ||
| 73 | |||
| 74 | while (walk.nbytes) { | ||
| 75 | if (walk.src.virt.addr == walk.dst.virt.addr) | ||
| 76 | err = crypto_cbc_encrypt_inplace(&walk, tfm, fn); | ||
| 77 | else | ||
| 78 | err = crypto_cbc_encrypt_segment(&walk, tfm, fn); | ||
| 79 | err = skcipher_walk_done(&walk, err); | ||
| 80 | } | ||
| 81 | |||
| 82 | return err; | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline int crypto_cbc_decrypt_segment( | ||
| 86 | struct skcipher_walk *walk, struct crypto_skcipher *tfm, | ||
| 87 | void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) | ||
| 88 | { | ||
| 89 | unsigned int bsize = crypto_skcipher_blocksize(tfm); | ||
| 90 | unsigned int nbytes = walk->nbytes; | ||
| 91 | u8 *src = walk->src.virt.addr; | ||
| 92 | u8 *dst = walk->dst.virt.addr; | ||
| 93 | u8 *iv = walk->iv; | ||
| 94 | |||
| 95 | do { | ||
| 96 | fn(tfm, src, dst); | ||
| 97 | crypto_xor(dst, iv, bsize); | ||
| 98 | iv = src; | ||
| 99 | |||
| 100 | src += bsize; | ||
| 101 | dst += bsize; | ||
| 102 | } while ((nbytes -= bsize) >= bsize); | ||
| 103 | |||
| 104 | memcpy(walk->iv, iv, bsize); | ||
| 105 | |||
| 106 | return nbytes; | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline int crypto_cbc_decrypt_inplace( | ||
| 110 | struct skcipher_walk *walk, struct crypto_skcipher *tfm, | ||
| 111 | void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) | ||
| 112 | { | ||
| 113 | unsigned int bsize = crypto_skcipher_blocksize(tfm); | ||
| 114 | unsigned int nbytes = walk->nbytes; | ||
| 115 | u8 *src = walk->src.virt.addr; | ||
| 116 | u8 last_iv[bsize]; | ||
| 117 | |||
| 118 | /* Start of the last block. */ | ||
| 119 | src += nbytes - (nbytes & (bsize - 1)) - bsize; | ||
| 120 | memcpy(last_iv, src, bsize); | ||
| 121 | |||
| 122 | for (;;) { | ||
| 123 | fn(tfm, src, src); | ||
| 124 | if ((nbytes -= bsize) < bsize) | ||
| 125 | break; | ||
| 126 | crypto_xor(src, src - bsize, bsize); | ||
| 127 | src -= bsize; | ||
| 128 | } | ||
| 129 | |||
| 130 | crypto_xor(src, walk->iv, bsize); | ||
| 131 | memcpy(walk->iv, last_iv, bsize); | ||
| 132 | |||
| 133 | return nbytes; | ||
| 134 | } | ||
| 135 | |||
| 136 | static inline int crypto_cbc_decrypt_blocks( | ||
| 137 | struct skcipher_walk *walk, struct crypto_skcipher *tfm, | ||
| 138 | void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) | ||
| 139 | { | ||
| 140 | if (walk->src.virt.addr == walk->dst.virt.addr) | ||
| 141 | return crypto_cbc_decrypt_inplace(walk, tfm, fn); | ||
| 142 | else | ||
| 143 | return crypto_cbc_decrypt_segment(walk, tfm, fn); | ||
| 144 | } | ||
| 145 | |||
| 146 | #endif /* _CRYPTO_CBC_H */ | ||
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index bc792d5a9e88..94418cbf9013 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h | |||
| @@ -12,10 +12,10 @@ | |||
| 12 | #ifndef _CRYPTO_CRYPT_H | 12 | #ifndef _CRYPTO_CRYPT_H |
| 13 | #define _CRYPTO_CRYPT_H | 13 | #define _CRYPTO_CRYPT_H |
| 14 | 14 | ||
| 15 | #include <linux/crypto.h> | ||
| 16 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 17 | #include <crypto/aead.h> | 16 | #include <crypto/aead.h> |
| 18 | #include <crypto/hash.h> | 17 | #include <crypto/hash.h> |
| 18 | #include <crypto/skcipher.h> | ||
| 19 | 19 | ||
| 20 | struct cryptd_ablkcipher { | 20 | struct cryptd_ablkcipher { |
| 21 | struct crypto_ablkcipher base; | 21 | struct crypto_ablkcipher base; |
| @@ -34,6 +34,17 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); | |||
| 34 | bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); | 34 | bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); |
| 35 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); | 35 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); |
| 36 | 36 | ||
| 37 | struct cryptd_skcipher { | ||
| 38 | struct crypto_skcipher base; | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, | ||
| 42 | u32 type, u32 mask); | ||
| 43 | struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); | ||
| 44 | /* Must be called without moving CPUs. */ | ||
| 45 | bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); | ||
| 46 | void cryptd_free_skcipher(struct cryptd_skcipher *tfm); | ||
| 47 | |||
| 37 | struct cryptd_ahash { | 48 | struct cryptd_ahash { |
| 38 | struct crypto_ahash base; | 49 | struct crypto_ahash base; |
| 39 | }; | 50 | }; |
diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 04eb5c77addd..1bf600fc99f7 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h | |||
| @@ -43,8 +43,7 @@ | |||
| 43 | * @prepare_hash_request: do some prepare if need before handle the current request | 43 | * @prepare_hash_request: do some prepare if need before handle the current request |
| 44 | * @unprepare_hash_request: undo any work done by prepare_hash_request() | 44 | * @unprepare_hash_request: undo any work done by prepare_hash_request() |
| 45 | * @hash_one_request: do hash for current request | 45 | * @hash_one_request: do hash for current request |
| 46 | * @kworker: thread struct for request pump | 46 | * @kworker: kthread worker struct for request pump |
| 47 | * @kworker_task: pointer to task for request pump kworker thread | ||
| 48 | * @pump_requests: work struct for scheduling work to the request pump | 47 | * @pump_requests: work struct for scheduling work to the request pump |
| 49 | * @priv_data: the engine private data | 48 | * @priv_data: the engine private data |
| 50 | * @cur_req: the current request which is on processing | 49 | * @cur_req: the current request which is on processing |
| @@ -78,8 +77,7 @@ struct crypto_engine { | |||
| 78 | int (*hash_one_request)(struct crypto_engine *engine, | 77 | int (*hash_one_request)(struct crypto_engine *engine, |
| 79 | struct ahash_request *req); | 78 | struct ahash_request *req); |
| 80 | 79 | ||
| 81 | struct kthread_worker kworker; | 80 | struct kthread_worker *kworker; |
| 82 | struct task_struct *kworker_task; | ||
| 83 | struct kthread_work pump_requests; | 81 | struct kthread_work pump_requests; |
| 84 | 82 | ||
| 85 | void *priv_data; | 83 | void *priv_data; |
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h index da2530e34b26..592d47e565a8 100644 --- a/include/crypto/gf128mul.h +++ b/include/crypto/gf128mul.h | |||
| @@ -177,24 +177,23 @@ void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t); | |||
| 177 | 177 | ||
| 178 | static inline void gf128mul_free_4k(struct gf128mul_4k *t) | 178 | static inline void gf128mul_free_4k(struct gf128mul_4k *t) |
| 179 | { | 179 | { |
| 180 | kfree(t); | 180 | kzfree(t); |
| 181 | } | 181 | } |
| 182 | 182 | ||
| 183 | 183 | ||
| 184 | /* 64k table optimization, implemented for lle and bbe */ | 184 | /* 64k table optimization, implemented for bbe */ |
| 185 | 185 | ||
| 186 | struct gf128mul_64k { | 186 | struct gf128mul_64k { |
| 187 | struct gf128mul_4k *t[16]; | 187 | struct gf128mul_4k *t[16]; |
| 188 | }; | 188 | }; |
| 189 | 189 | ||
| 190 | /* first initialize with the constant factor with which you | 190 | /* First initialize with the constant factor with which you |
| 191 | * want to multiply and then call gf128_64k_lle with the other | 191 | * want to multiply and then call gf128mul_64k_bbe with the other |
| 192 | * factor in the first argument, the table in the second and a | 192 | * factor in the first argument, and the table in the second. |
| 193 | * scratch register in the third. Afterwards *a = *r. */ | 193 | * Afterwards, the result is stored in *a. |
| 194 | struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g); | 194 | */ |
| 195 | struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g); | 195 | struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g); |
| 196 | void gf128mul_free_64k(struct gf128mul_64k *t); | 196 | void gf128mul_free_64k(struct gf128mul_64k *t); |
| 197 | void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t); | ||
| 198 | void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t); | 197 | void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t); |
| 199 | 198 | ||
| 200 | #endif /* _CRYPTO_GF128MUL_H */ | 199 | #endif /* _CRYPTO_GF128MUL_H */ |
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h new file mode 100644 index 000000000000..1de2b5af12d7 --- /dev/null +++ b/include/crypto/internal/acompress.h | |||
| @@ -0,0 +1,81 @@ | |||
| 1 | /* | ||
| 2 | * Asynchronous Compression operations | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation | ||
| 5 | * Authors: Weigang Li <weigang.li@intel.com> | ||
| 6 | * Giovanni Cabiddu <giovanni.cabiddu@intel.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | #ifndef _CRYPTO_ACOMP_INT_H | ||
| 15 | #define _CRYPTO_ACOMP_INT_H | ||
| 16 | #include <crypto/acompress.h> | ||
| 17 | |||
| 18 | /* | ||
| 19 | * Transform internal helpers. | ||
| 20 | */ | ||
| 21 | static inline void *acomp_request_ctx(struct acomp_req *req) | ||
| 22 | { | ||
| 23 | return req->__ctx; | ||
| 24 | } | ||
| 25 | |||
| 26 | static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm) | ||
| 27 | { | ||
| 28 | return tfm->base.__crt_ctx; | ||
| 29 | } | ||
| 30 | |||
| 31 | static inline void acomp_request_complete(struct acomp_req *req, | ||
| 32 | int err) | ||
| 33 | { | ||
| 34 | req->base.complete(&req->base, err); | ||
| 35 | } | ||
| 36 | |||
| 37 | static inline const char *acomp_alg_name(struct crypto_acomp *tfm) | ||
| 38 | { | ||
| 39 | return crypto_acomp_tfm(tfm)->__crt_alg->cra_name; | ||
| 40 | } | ||
| 41 | |||
| 42 | static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) | ||
| 43 | { | ||
| 44 | struct acomp_req *req; | ||
| 45 | |||
| 46 | req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); | ||
| 47 | if (likely(req)) | ||
| 48 | acomp_request_set_tfm(req, tfm); | ||
| 49 | return req; | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline void __acomp_request_free(struct acomp_req *req) | ||
| 53 | { | ||
| 54 | kzfree(req); | ||
| 55 | } | ||
| 56 | |||
| 57 | /** | ||
| 58 | * crypto_register_acomp() -- Register asynchronous compression algorithm | ||
| 59 | * | ||
| 60 | * Function registers an implementation of an asynchronous | ||
| 61 | * compression algorithm | ||
| 62 | * | ||
| 63 | * @alg: algorithm definition | ||
| 64 | * | ||
| 65 | * Return: zero on success; error code in case of error | ||
| 66 | */ | ||
| 67 | int crypto_register_acomp(struct acomp_alg *alg); | ||
| 68 | |||
| 69 | /** | ||
| 70 | * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm | ||
| 71 | * | ||
| 72 | * Function unregisters an implementation of an asynchronous | ||
| 73 | * compression algorithm | ||
| 74 | * | ||
| 75 | * @alg: algorithm definition | ||
| 76 | * | ||
| 77 | * Return: zero on success; error code in case of error | ||
| 78 | */ | ||
| 79 | int crypto_unregister_acomp(struct acomp_alg *alg); | ||
| 80 | |||
| 81 | #endif | ||
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h new file mode 100644 index 000000000000..3fda3c5655a0 --- /dev/null +++ b/include/crypto/internal/scompress.h | |||
| @@ -0,0 +1,136 @@ | |||
| 1 | /* | ||
| 2 | * Synchronous Compression operations | ||
| 3 | * | ||
| 4 | * Copyright 2015 LG Electronics Inc. | ||
| 5 | * Copyright (c) 2016, Intel Corporation | ||
| 6 | * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | #ifndef _CRYPTO_SCOMP_INT_H | ||
| 15 | #define _CRYPTO_SCOMP_INT_H | ||
| 16 | #include <linux/crypto.h> | ||
| 17 | |||
| 18 | #define SCOMP_SCRATCH_SIZE 131072 | ||
| 19 | |||
| 20 | struct crypto_scomp { | ||
| 21 | struct crypto_tfm base; | ||
| 22 | }; | ||
| 23 | |||
| 24 | /** | ||
| 25 | * struct scomp_alg - synchronous compression algorithm | ||
| 26 | * | ||
| 27 | * @alloc_ctx: Function allocates algorithm specific context | ||
| 28 | * @free_ctx: Function frees context allocated with alloc_ctx | ||
| 29 | * @compress: Function performs a compress operation | ||
| 30 | * @decompress: Function performs a de-compress operation | ||
| 31 | * @init: Initialize the cryptographic transformation object. | ||
| 32 | * This function is used to initialize the cryptographic | ||
| 33 | * transformation object. This function is called only once at | ||
| 34 | * the instantiation time, right after the transformation context | ||
| 35 | * was allocated. In case the cryptographic hardware has some | ||
| 36 | * special requirements which need to be handled by software, this | ||
| 37 | * function shall check for the precise requirement of the | ||
| 38 | * transformation and put any software fallbacks in place. | ||
| 39 | * @exit: Deinitialize the cryptographic transformation object. This is a | ||
| 40 | * counterpart to @init, used to remove various changes set in | ||
| 41 | * @init. | ||
| 42 | * @base: Common crypto API algorithm data structure | ||
| 43 | */ | ||
| 44 | struct scomp_alg { | ||
| 45 | void *(*alloc_ctx)(struct crypto_scomp *tfm); | ||
| 46 | void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); | ||
| 47 | int (*compress)(struct crypto_scomp *tfm, const u8 *src, | ||
| 48 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
| 49 | void *ctx); | ||
| 50 | int (*decompress)(struct crypto_scomp *tfm, const u8 *src, | ||
| 51 | unsigned int slen, u8 *dst, unsigned int *dlen, | ||
| 52 | void *ctx); | ||
| 53 | struct crypto_alg base; | ||
| 54 | }; | ||
| 55 | |||
| 56 | static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg) | ||
| 57 | { | ||
| 58 | return container_of(alg, struct scomp_alg, base); | ||
| 59 | } | ||
| 60 | |||
| 61 | static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm) | ||
| 62 | { | ||
| 63 | return container_of(tfm, struct crypto_scomp, base); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm) | ||
| 67 | { | ||
| 68 | return &tfm->base; | ||
| 69 | } | ||
| 70 | |||
| 71 | static inline void crypto_free_scomp(struct crypto_scomp *tfm) | ||
| 72 | { | ||
| 73 | crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm)); | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) | ||
| 77 | { | ||
| 78 | return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); | ||
| 79 | } | ||
| 80 | |||
| 81 | static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) | ||
| 82 | { | ||
| 83 | return crypto_scomp_alg(tfm)->alloc_ctx(tfm); | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, | ||
| 87 | void *ctx) | ||
| 88 | { | ||
| 89 | return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); | ||
| 90 | } | ||
| 91 | |||
| 92 | static inline int crypto_scomp_compress(struct crypto_scomp *tfm, | ||
| 93 | const u8 *src, unsigned int slen, | ||
| 94 | u8 *dst, unsigned int *dlen, void *ctx) | ||
| 95 | { | ||
| 96 | return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx); | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline int crypto_scomp_decompress(struct crypto_scomp *tfm, | ||
| 100 | const u8 *src, unsigned int slen, | ||
| 101 | u8 *dst, unsigned int *dlen, | ||
| 102 | void *ctx) | ||
| 103 | { | ||
| 104 | return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen, | ||
| 105 | ctx); | ||
| 106 | } | ||
| 107 | |||
| 108 | int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); | ||
| 109 | struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); | ||
| 110 | void crypto_acomp_scomp_free_ctx(struct acomp_req *req); | ||
| 111 | |||
| 112 | /** | ||
| 113 | * crypto_register_scomp() -- Register synchronous compression algorithm | ||
| 114 | * | ||
| 115 | * Function registers an implementation of a synchronous | ||
| 116 | * compression algorithm | ||
| 117 | * | ||
| 118 | * @alg: algorithm definition | ||
| 119 | * | ||
| 120 | * Return: zero on success; error code in case of error | ||
| 121 | */ | ||
| 122 | int crypto_register_scomp(struct scomp_alg *alg); | ||
| 123 | |||
| 124 | /** | ||
| 125 | * crypto_unregister_scomp() -- Unregister synchronous compression algorithm | ||
| 126 | * | ||
| 127 | * Function unregisters an implementation of a synchronous | ||
| 128 | * compression algorithm | ||
| 129 | * | ||
| 130 | * @alg: algorithm definition | ||
| 131 | * | ||
| 132 | * Return: zero on success; error code in case of error | ||
| 133 | */ | ||
| 134 | int crypto_unregister_scomp(struct scomp_alg *alg); | ||
| 135 | |||
| 136 | #endif | ||
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h new file mode 100644 index 000000000000..429509968f68 --- /dev/null +++ b/include/crypto/internal/simd.h | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | /* | ||
| 2 | * Shared crypto simd helpers | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef _CRYPTO_INTERNAL_SIMD_H | ||
| 6 | #define _CRYPTO_INTERNAL_SIMD_H | ||
| 7 | |||
| 8 | struct simd_skcipher_alg; | ||
| 9 | |||
| 10 | struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, | ||
| 11 | const char *drvname, | ||
| 12 | const char *basename); | ||
| 13 | struct simd_skcipher_alg *simd_skcipher_create(const char *algname, | ||
| 14 | const char *basename); | ||
| 15 | void simd_skcipher_free(struct simd_skcipher_alg *alg); | ||
| 16 | |||
| 17 | #endif /* _CRYPTO_INTERNAL_SIMD_H */ | ||
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index a21a95e1a375..8735979ed341 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h | |||
| @@ -15,8 +15,10 @@ | |||
| 15 | 15 | ||
| 16 | #include <crypto/algapi.h> | 16 | #include <crypto/algapi.h> |
| 17 | #include <crypto/skcipher.h> | 17 | #include <crypto/skcipher.h> |
| 18 | #include <linux/list.h> | ||
| 18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 19 | 20 | ||
| 21 | struct aead_request; | ||
| 20 | struct rtattr; | 22 | struct rtattr; |
| 21 | 23 | ||
| 22 | struct skcipher_instance { | 24 | struct skcipher_instance { |
| @@ -34,6 +36,40 @@ struct crypto_skcipher_spawn { | |||
| 34 | struct crypto_spawn base; | 36 | struct crypto_spawn base; |
| 35 | }; | 37 | }; |
| 36 | 38 | ||
| 39 | struct skcipher_walk { | ||
| 40 | union { | ||
| 41 | struct { | ||
| 42 | struct page *page; | ||
| 43 | unsigned long offset; | ||
| 44 | } phys; | ||
| 45 | |||
| 46 | struct { | ||
| 47 | u8 *page; | ||
| 48 | void *addr; | ||
| 49 | } virt; | ||
| 50 | } src, dst; | ||
| 51 | |||
| 52 | struct scatter_walk in; | ||
| 53 | unsigned int nbytes; | ||
| 54 | |||
| 55 | struct scatter_walk out; | ||
| 56 | unsigned int total; | ||
| 57 | |||
| 58 | struct list_head buffers; | ||
| 59 | |||
| 60 | u8 *page; | ||
| 61 | u8 *buffer; | ||
| 62 | u8 *oiv; | ||
| 63 | void *iv; | ||
| 64 | |||
| 65 | unsigned int ivsize; | ||
| 66 | |||
| 67 | int flags; | ||
| 68 | unsigned int blocksize; | ||
| 69 | unsigned int chunksize; | ||
| 70 | unsigned int alignmask; | ||
| 71 | }; | ||
| 72 | |||
| 37 | extern const struct crypto_type crypto_givcipher_type; | 73 | extern const struct crypto_type crypto_givcipher_type; |
| 38 | 74 | ||
| 39 | static inline struct crypto_instance *skcipher_crypto_instance( | 75 | static inline struct crypto_instance *skcipher_crypto_instance( |
| @@ -68,14 +104,6 @@ static inline void crypto_set_skcipher_spawn( | |||
| 68 | int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, | 104 | int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, |
| 69 | u32 type, u32 mask); | 105 | u32 type, u32 mask); |
| 70 | 106 | ||
| 71 | static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, | ||
| 72 | const char *name, u32 type, u32 mask) | ||
| 73 | { | ||
| 74 | return crypto_grab_skcipher(spawn, name, type, mask); | ||
| 75 | } | ||
| 76 | |||
| 77 | struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); | ||
| 78 | |||
| 79 | static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) | 107 | static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) |
| 80 | { | 108 | { |
| 81 | crypto_drop_spawn(&spawn->base); | 109 | crypto_drop_spawn(&spawn->base); |
| @@ -99,12 +127,6 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher( | |||
| 99 | return crypto_spawn_tfm2(&spawn->base); | 127 | return crypto_spawn_tfm2(&spawn->base); |
| 100 | } | 128 | } |
| 101 | 129 | ||
| 102 | static inline struct crypto_skcipher *crypto_spawn_skcipher2( | ||
| 103 | struct crypto_skcipher_spawn *spawn) | ||
| 104 | { | ||
| 105 | return crypto_spawn_skcipher(spawn); | ||
| 106 | } | ||
| 107 | |||
| 108 | static inline void crypto_skcipher_set_reqsize( | 130 | static inline void crypto_skcipher_set_reqsize( |
| 109 | struct crypto_skcipher *skcipher, unsigned int reqsize) | 131 | struct crypto_skcipher *skcipher, unsigned int reqsize) |
| 110 | { | 132 | { |
| @@ -118,6 +140,21 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); | |||
| 118 | int skcipher_register_instance(struct crypto_template *tmpl, | 140 | int skcipher_register_instance(struct crypto_template *tmpl, |
| 119 | struct skcipher_instance *inst); | 141 | struct skcipher_instance *inst); |
| 120 | 142 | ||
| 143 | int skcipher_walk_done(struct skcipher_walk *walk, int err); | ||
| 144 | int skcipher_walk_virt(struct skcipher_walk *walk, | ||
| 145 | struct skcipher_request *req, | ||
| 146 | bool atomic); | ||
| 147 | void skcipher_walk_atomise(struct skcipher_walk *walk); | ||
| 148 | int skcipher_walk_async(struct skcipher_walk *walk, | ||
| 149 | struct skcipher_request *req); | ||
| 150 | int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, | ||
| 151 | bool atomic); | ||
| 152 | int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, | ||
| 153 | struct aead_request *req, bool atomic); | ||
| 154 | int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, | ||
| 155 | struct aead_request *req, bool atomic); | ||
| 156 | void skcipher_walk_complete(struct skcipher_walk *walk, int err); | ||
| 157 | |||
| 121 | static inline void ablkcipher_request_complete(struct ablkcipher_request *req, | 158 | static inline void ablkcipher_request_complete(struct ablkcipher_request *req, |
| 122 | int err) | 159 | int err) |
| 123 | { | 160 | { |
diff --git a/include/crypto/xts.h b/include/crypto/xts.h index ede6b97b24cc..77b630672b2c 100644 --- a/include/crypto/xts.h +++ b/include/crypto/xts.h | |||
| @@ -2,8 +2,7 @@ | |||
| 2 | #define _CRYPTO_XTS_H | 2 | #define _CRYPTO_XTS_H |
| 3 | 3 | ||
| 4 | #include <crypto/b128ops.h> | 4 | #include <crypto/b128ops.h> |
| 5 | #include <linux/crypto.h> | 5 | #include <crypto/internal/skcipher.h> |
| 6 | #include <crypto/algapi.h> | ||
| 7 | #include <linux/fips.h> | 6 | #include <linux/fips.h> |
| 8 | 7 | ||
| 9 | struct scatterlist; | 8 | struct scatterlist; |
| @@ -51,4 +50,27 @@ static inline int xts_check_key(struct crypto_tfm *tfm, | |||
| 51 | return 0; | 50 | return 0; |
| 52 | } | 51 | } |
| 53 | 52 | ||
| 53 | static inline int xts_verify_key(struct crypto_skcipher *tfm, | ||
| 54 | const u8 *key, unsigned int keylen) | ||
| 55 | { | ||
| 56 | /* | ||
| 57 | * key consists of keys of equal size concatenated, therefore | ||
| 58 | * the length must be even. | ||
| 59 | */ | ||
| 60 | if (keylen % 2) { | ||
| 61 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 62 | return -EINVAL; | ||
| 63 | } | ||
| 64 | |||
| 65 | /* ensure that the AES and tweak key are not identical */ | ||
| 66 | if ((fips_enabled || crypto_skcipher_get_flags(tfm) & | ||
| 67 | CRYPTO_TFM_REQ_WEAK_KEY) && | ||
| 68 | !crypto_memneq(key, key + (keylen / 2), keylen / 2)) { | ||
| 69 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); | ||
| 70 | return -EINVAL; | ||
| 71 | } | ||
| 72 | |||
| 73 | return 0; | ||
| 74 | } | ||
| 75 | |||
| 54 | #endif /* _CRYPTO_XTS_H */ | 76 | #endif /* _CRYPTO_XTS_H */ |
diff --git a/include/linux/ccp.h b/include/linux/ccp.h index a7653339fedb..c71dd8fa5764 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h | |||
| @@ -11,8 +11,8 @@ | |||
| 11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #ifndef __CPP_H__ | 14 | #ifndef __CCP_H__ |
| 15 | #define __CPP_H__ | 15 | #define __CCP_H__ |
| 16 | 16 | ||
| 17 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
| 18 | #include <linux/workqueue.h> | 18 | #include <linux/workqueue.h> |
| @@ -553,7 +553,7 @@ enum ccp_engine { | |||
| 553 | #define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 | 553 | #define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 |
| 554 | 554 | ||
| 555 | /** | 555 | /** |
| 556 | * struct ccp_cmd - CPP operation request | 556 | * struct ccp_cmd - CCP operation request |
| 557 | * @entry: list element (ccp driver use only) | 557 | * @entry: list element (ccp driver use only) |
| 558 | * @work: work element used for callbacks (ccp driver use only) | 558 | * @work: work element used for callbacks (ccp driver use only) |
| 559 | * @ccp: CCP device to be run on (ccp driver use only) | 559 | * @ccp: CCP device to be run on (ccp driver use only) |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 7cee5551625b..167aea29d41e 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
| @@ -50,6 +50,8 @@ | |||
| 50 | #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 | 50 | #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 |
| 51 | #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 | 51 | #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 |
| 52 | #define CRYPTO_ALG_TYPE_KPP 0x00000008 | 52 | #define CRYPTO_ALG_TYPE_KPP 0x00000008 |
| 53 | #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a | ||
| 54 | #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b | ||
| 53 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c | 55 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c |
| 54 | #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d | 56 | #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d |
| 55 | #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e | 57 | #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e |
| @@ -60,6 +62,7 @@ | |||
| 60 | #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e | 62 | #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e |
| 61 | #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e | 63 | #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e |
| 62 | #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c | 64 | #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c |
| 65 | #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e | ||
| 63 | 66 | ||
| 64 | #define CRYPTO_ALG_LARVAL 0x00000010 | 67 | #define CRYPTO_ALG_LARVAL 0x00000010 |
| 65 | #define CRYPTO_ALG_DEAD 0x00000020 | 68 | #define CRYPTO_ALG_DEAD 0x00000020 |
| @@ -87,7 +90,7 @@ | |||
| 87 | #define CRYPTO_ALG_TESTED 0x00000400 | 90 | #define CRYPTO_ALG_TESTED 0x00000400 |
| 88 | 91 | ||
| 89 | /* | 92 | /* |
| 90 | * Set if the algorithm is an instance that is build from templates. | 93 | * Set if the algorithm is an instance that is built from templates. |
| 91 | */ | 94 | */ |
| 92 | #define CRYPTO_ALG_INSTANCE 0x00000800 | 95 | #define CRYPTO_ALG_INSTANCE 0x00000800 |
| 93 | 96 | ||
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 34a0dc18f327..bee0827766a3 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h | |||
| @@ -30,8 +30,7 @@ | |||
| 30 | * Must not be NULL. *OBSOLETE* | 30 | * Must not be NULL. *OBSOLETE* |
| 31 | * @read: New API. drivers can fill up to max bytes of data | 31 | * @read: New API. drivers can fill up to max bytes of data |
| 32 | * into the buffer. The buffer is aligned for any type | 32 | * into the buffer. The buffer is aligned for any type |
| 33 | * and max is guaranteed to be >= to that alignment | 33 | * and max is a multiple of 4 and >= 32 bytes. |
| 34 | * (either 4 or 8 depending on architecture). | ||
| 35 | * @priv: Private data, for use by the RNG driver. | 34 | * @priv: Private data, for use by the RNG driver. |
| 36 | * @quality: Estimation of true entropy in RNG's bitstream | 35 | * @quality: Estimation of true entropy in RNG's bitstream |
| 37 | * (per mill). | 36 | * (per mill). |
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 79b5ded2001a..11d21fce14d6 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h | |||
| @@ -46,6 +46,7 @@ enum crypto_attr_type_t { | |||
| 46 | CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ | 46 | CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ |
| 47 | CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ | 47 | CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ |
| 48 | CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ | 48 | CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ |
| 49 | CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */ | ||
| 49 | __CRYPTOCFGA_MAX | 50 | __CRYPTOCFGA_MAX |
| 50 | 51 | ||
| 51 | #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) | 52 | #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) |
| @@ -112,5 +113,9 @@ struct crypto_report_kpp { | |||
| 112 | char type[CRYPTO_MAX_NAME]; | 113 | char type[CRYPTO_MAX_NAME]; |
| 113 | }; | 114 | }; |
| 114 | 115 | ||
| 116 | struct crypto_report_acomp { | ||
| 117 | char type[CRYPTO_MAX_NAME]; | ||
| 118 | }; | ||
| 119 | |||
| 115 | #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ | 120 | #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ |
| 116 | sizeof(struct crypto_report_blkcipher)) | 121 | sizeof(struct crypto_report_blkcipher)) |
diff --git a/kernel/padata.c b/kernel/padata.c index 7848f0566403..05316c9f32da 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
| @@ -64,15 +64,11 @@ static int padata_cpu_hash(struct parallel_data *pd) | |||
| 64 | static void padata_parallel_worker(struct work_struct *parallel_work) | 64 | static void padata_parallel_worker(struct work_struct *parallel_work) |
| 65 | { | 65 | { |
| 66 | struct padata_parallel_queue *pqueue; | 66 | struct padata_parallel_queue *pqueue; |
| 67 | struct parallel_data *pd; | ||
| 68 | struct padata_instance *pinst; | ||
| 69 | LIST_HEAD(local_list); | 67 | LIST_HEAD(local_list); |
| 70 | 68 | ||
| 71 | local_bh_disable(); | 69 | local_bh_disable(); |
| 72 | pqueue = container_of(parallel_work, | 70 | pqueue = container_of(parallel_work, |
| 73 | struct padata_parallel_queue, work); | 71 | struct padata_parallel_queue, work); |
| 74 | pd = pqueue->pd; | ||
| 75 | pinst = pd->pinst; | ||
| 76 | 72 | ||
| 77 | spin_lock(&pqueue->parallel.lock); | 73 | spin_lock(&pqueue->parallel.lock); |
| 78 | list_replace_init(&pqueue->parallel.list, &local_list); | 74 | list_replace_init(&pqueue->parallel.list, &local_list); |
