diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 17:22:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 17:22:45 -0500 |
commit | a103950e0dd2058df5e8a8d4a915707bdcf205f0 (patch) | |
tree | af5d091f768db4ed7a12fc3c5484d3e20ad9d514 | |
parent | 2cfa1cd3da14814a1e9ec6a4fce8612637d3ee3d (diff) | |
parent | 2d55807b7f7bf62bb05a8b91247c5eb7cd19ac04 (diff) |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Enforce the setting of keys for keyed aead/hash/skcipher
algorithms.
- Add multibuf speed tests in tcrypt.
Algorithms:
- Improve performance of sha3-generic.
- Add native sha512 support on arm64.
- Add v8.2 Crypto Extentions version of sha3/sm3 on arm64.
- Avoid hmac nesting by requiring underlying algorithm to be unkeyed.
- Add cryptd_max_cpu_qlen module parameter to cryptd.
Drivers:
- Add support for EIP97 engine in inside-secure.
- Add inline IPsec support to chelsio.
- Add RevB core support to crypto4xx.
- Fix AEAD ICV check in crypto4xx.
- Add stm32 crypto driver.
- Add support for BCM63xx platforms in bcm2835 and remove bcm63xx.
- Add Derived Key Protocol (DKP) support in caam.
- Add Samsung Exynos True RNG driver.
- Add support for Exynos5250+ SoCs in exynos PRNG driver"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (166 commits)
crypto: picoxcell - Fix error handling in spacc_probe()
crypto: arm64/sha512 - fix/improve new v8.2 Crypto Extensions code
crypto: arm64/sm3 - new v8.2 Crypto Extensions implementation
crypto: arm64/sha3 - new v8.2 Crypto Extensions implementation
crypto: testmgr - add new testcases for sha3
crypto: sha3-generic - export init/update/final routines
crypto: sha3-generic - simplify code
crypto: sha3-generic - rewrite KECCAK transform to help the compiler optimize
crypto: sha3-generic - fixes for alignment and big endian operation
crypto: aesni - handle zero length dst buffer
crypto: artpec6 - remove select on non-existing CRYPTO_SHA384
hwrng: bcm2835 - Remove redundant dev_err call in bcm2835_rng_probe()
crypto: stm32 - remove redundant dev_err call in stm32_cryp_probe()
crypto: axis - remove unnecessary platform_get_resource() error check
crypto: testmgr - test misuse of result in ahash
crypto: inside-secure - make function safexcel_try_push_requests static
crypto: aes-generic - fix aes-generic regression on powerpc
crypto: chelsio - Fix indentation warning
crypto: arm64/sha1-ce - get rid of literal pool
crypto: arm64/sha2-ce - move the round constant table to .rodata section
...
162 files changed, 7472 insertions, 2683 deletions
diff --git a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt new file mode 100644 index 000000000000..cec8d5d74e26 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt | |||
@@ -0,0 +1,22 @@ | |||
1 | Arm TrustZone CryptoCell cryptographic engine | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should be "arm,cryptocell-712-ree". | ||
5 | - reg: Base physical address of the engine and length of memory mapped region. | ||
6 | - interrupts: Interrupt number for the device. | ||
7 | |||
8 | Optional properties: | ||
9 | - interrupt-parent: The phandle for the interrupt controller that services | ||
10 | interrupts for this device. | ||
11 | - clocks: Reference to the crypto engine clock. | ||
12 | - dma-coherent: Present if dma operations are coherent. | ||
13 | |||
14 | Examples: | ||
15 | |||
16 | arm_cc712: crypto@80000000 { | ||
17 | compatible = "arm,cryptocell-712-ree"; | ||
18 | interrupt-parent = <&intc>; | ||
19 | interrupts = < 0 30 4 >; | ||
20 | reg = < 0x80000000 0x10000 >; | ||
21 | |||
22 | }; | ||
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt index fbc07d12322f..30c3ce6b502e 100644 --- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt +++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt | |||
@@ -1,7 +1,8 @@ | |||
1 | Inside Secure SafeXcel cryptographic engine | 1 | Inside Secure SafeXcel cryptographic engine |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible: Should be "inside-secure,safexcel-eip197". | 4 | - compatible: Should be "inside-secure,safexcel-eip197" or |
5 | "inside-secure,safexcel-eip97". | ||
5 | - reg: Base physical address of the engine and length of memory mapped region. | 6 | - reg: Base physical address of the engine and length of memory mapped region. |
6 | - interrupts: Interrupt numbers for the rings and engine. | 7 | - interrupts: Interrupt numbers for the rings and engine. |
7 | - interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem". | 8 | - interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem". |
diff --git a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt index 4ca8dd4d7e66..a13fbdb4bd88 100644 --- a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt +++ b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt | |||
@@ -2,7 +2,9 @@ Exynos Pseudo Random Number Generator | |||
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | 4 | ||
5 | - compatible : Should be "samsung,exynos4-rng". | 5 | - compatible : One of: |
6 | - "samsung,exynos4-rng" for Exynos4210 and Exynos4412 | ||
7 | - "samsung,exynos5250-prng" for Exynos5250+ | ||
6 | - reg : Specifies base physical address and size of the registers map. | 8 | - reg : Specifies base physical address and size of the registers map. |
7 | - clocks : Phandle to clock-controller plus clock-specifier pair. | 9 | - clocks : Phandle to clock-controller plus clock-specifier pair. |
8 | - clock-names : "secss" as a clock name. | 10 | - clock-names : "secss" as a clock name. |
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt new file mode 100644 index 000000000000..970487fa40b8 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt | |||
@@ -0,0 +1,19 @@ | |||
1 | * STMicroelectronics STM32 CRYP | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should be "st,stm32f756-cryp". | ||
5 | - reg: The address and length of the peripheral registers space | ||
6 | - clocks: The input clock of the CRYP instance | ||
7 | - interrupts: The CRYP interrupt | ||
8 | |||
9 | Optional properties: | ||
10 | - resets: The input reset of the CRYP instance | ||
11 | |||
12 | Example: | ||
13 | crypto@50060000 { | ||
14 | compatible = "st,stm32f756-cryp"; | ||
15 | reg = <0x50060000 0x400>; | ||
16 | interrupts = <79>; | ||
17 | clocks = <&rcc 0 STM32F7_AHB2_CLOCK(CRYP)>; | ||
18 | resets = <&rcc STM32F7_AHB2_RESET(CRYP)>; | ||
19 | }; | ||
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt b/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt index 26542690b578..627b29531a32 100644 --- a/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt +++ b/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt | |||
@@ -1,11 +1,19 @@ | |||
1 | BCM2835 Random number generator | 1 | BCM2835/6368 Random number generator |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | 4 | ||
5 | - compatible : should be "brcm,bcm2835-rng" or "brcm,bcm-nsp-rng" or | 5 | - compatible : should be one of |
6 | "brcm,bcm5301x-rng" | 6 | "brcm,bcm2835-rng" |
7 | "brcm,bcm-nsp-rng" | ||
8 | "brcm,bcm5301x-rng" or | ||
9 | "brcm,bcm6368-rng" | ||
7 | - reg : Specifies base physical address and size of the registers. | 10 | - reg : Specifies base physical address and size of the registers. |
8 | 11 | ||
12 | Optional properties: | ||
13 | |||
14 | - clocks : phandle to clock-controller plus clock-specifier pair | ||
15 | - clock-names : "ipsec" as a clock name | ||
16 | |||
9 | Example: | 17 | Example: |
10 | 18 | ||
11 | rng { | 19 | rng { |
@@ -17,3 +25,11 @@ rng@18033000 { | |||
17 | compatible = "brcm,bcm-nsp-rng"; | 25 | compatible = "brcm,bcm-nsp-rng"; |
18 | reg = <0x18033000 0x14>; | 26 | reg = <0x18033000 0x14>; |
19 | }; | 27 | }; |
28 | |||
29 | random: rng@10004180 { | ||
30 | compatible = "brcm,bcm6368-rng"; | ||
31 | reg = <0x10004180 0x14>; | ||
32 | |||
33 | clocks = <&periph_clk 18>; | ||
34 | clock-names = "ipsec"; | ||
35 | }; | ||
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt b/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt deleted file mode 100644 index 4b5ac600bfbd..000000000000 --- a/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | BCM6368 Random number generator | ||
2 | |||
3 | Required properties: | ||
4 | |||
5 | - compatible : should be "brcm,bcm6368-rng" | ||
6 | - reg : Specifies base physical address and size of the registers | ||
7 | - clocks : phandle to clock-controller plus clock-specifier pair | ||
8 | - clock-names : "ipsec" as a clock name | ||
9 | |||
10 | Example: | ||
11 | random: rng@10004180 { | ||
12 | compatible = "brcm,bcm6368-rng"; | ||
13 | reg = <0x10004180 0x14>; | ||
14 | |||
15 | clocks = <&periph_clk 18>; | ||
16 | clock-names = "ipsec"; | ||
17 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 7872d430e7b1..e6aa3922a32b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -11964,6 +11964,13 @@ S: Maintained | |||
11964 | F: drivers/crypto/exynos-rng.c | 11964 | F: drivers/crypto/exynos-rng.c |
11965 | F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt | 11965 | F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt |
11966 | 11966 | ||
11967 | SAMSUNG EXYNOS TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER | ||
11968 | M: Łukasz Stelmach <l.stelmach@samsung.com> | ||
11969 | L: linux-samsung-soc@vger.kernel.org | ||
11970 | S: Maintained | ||
11971 | F: drivers/char/hw_random/exynos-trng.c | ||
11972 | F: Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.txt | ||
11973 | |||
11967 | SAMSUNG FRAMEBUFFER DRIVER | 11974 | SAMSUNG FRAMEBUFFER DRIVER |
11968 | M: Jingoo Han <jingoohan1@gmail.com> | 11975 | M: Jingoo Han <jingoohan1@gmail.com> |
11969 | L: linux-fbdev@vger.kernel.org | 11976 | L: linux-fbdev@vger.kernel.org |
@@ -12026,6 +12033,7 @@ F: drivers/media/i2c/s5k5baf.c | |||
12026 | SAMSUNG S5P Security SubSystem (SSS) DRIVER | 12033 | SAMSUNG S5P Security SubSystem (SSS) DRIVER |
12027 | M: Krzysztof Kozlowski <krzk@kernel.org> | 12034 | M: Krzysztof Kozlowski <krzk@kernel.org> |
12028 | M: Vladimir Zapolskiy <vz@mleia.com> | 12035 | M: Vladimir Zapolskiy <vz@mleia.com> |
12036 | M: Kamil Konieczny <k.konieczny@partner.samsung.com> | ||
12029 | L: linux-crypto@vger.kernel.org | 12037 | L: linux-crypto@vger.kernel.org |
12030 | L: linux-samsung-soc@vger.kernel.org | 12038 | L: linux-samsung-soc@vger.kernel.org |
12031 | S: Maintained | 12039 | S: Maintained |
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index 18768f330449..07e31941dc67 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c | |||
@@ -181,9 +181,8 @@ static int cbc_init(struct crypto_tfm *tfm) | |||
181 | struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 181 | struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); |
182 | 182 | ||
183 | ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0); | 183 | ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0); |
184 | if (IS_ERR(ctx->enc_tfm)) | 184 | |
185 | return PTR_ERR(ctx->enc_tfm); | 185 | return PTR_ERR_OR_ZERO(ctx->enc_tfm); |
186 | return 0; | ||
187 | } | 186 | } |
188 | 187 | ||
189 | static void cbc_exit(struct crypto_tfm *tfm) | 188 | static void cbc_exit(struct crypto_tfm *tfm) |
@@ -258,9 +257,8 @@ static int xts_init(struct crypto_tfm *tfm) | |||
258 | struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); | 257 | struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); |
259 | 258 | ||
260 | ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); | 259 | ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); |
261 | if (IS_ERR(ctx->tweak_tfm)) | 260 | |
262 | return PTR_ERR(ctx->tweak_tfm); | 261 | return PTR_ERR_OR_ZERO(ctx->tweak_tfm); |
263 | return 0; | ||
264 | } | 262 | } |
265 | 263 | ||
266 | static void xts_exit(struct crypto_tfm *tfm) | 264 | static void xts_exit(struct crypto_tfm *tfm) |
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c index 1b0e0e86ee9c..96e62ec105d0 100644 --- a/arch/arm/crypto/crc32-ce-glue.c +++ b/arch/arm/crypto/crc32-ce-glue.c | |||
@@ -188,6 +188,7 @@ static struct shash_alg crc32_pmull_algs[] = { { | |||
188 | .base.cra_name = "crc32", | 188 | .base.cra_name = "crc32", |
189 | .base.cra_driver_name = "crc32-arm-ce", | 189 | .base.cra_driver_name = "crc32-arm-ce", |
190 | .base.cra_priority = 200, | 190 | .base.cra_priority = 200, |
191 | .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
191 | .base.cra_blocksize = 1, | 192 | .base.cra_blocksize = 1, |
192 | .base.cra_module = THIS_MODULE, | 193 | .base.cra_module = THIS_MODULE, |
193 | }, { | 194 | }, { |
@@ -203,6 +204,7 @@ static struct shash_alg crc32_pmull_algs[] = { { | |||
203 | .base.cra_name = "crc32c", | 204 | .base.cra_name = "crc32c", |
204 | .base.cra_driver_name = "crc32c-arm-ce", | 205 | .base.cra_driver_name = "crc32c-arm-ce", |
205 | .base.cra_priority = 200, | 206 | .base.cra_priority = 200, |
207 | .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
206 | .base.cra_blocksize = 1, | 208 | .base.cra_blocksize = 1, |
207 | .base.cra_module = THIS_MODULE, | 209 | .base.cra_module = THIS_MODULE, |
208 | } }; | 210 | } }; |
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 70c517aa4501..285c36c7b408 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig | |||
@@ -29,6 +29,24 @@ config CRYPTO_SHA2_ARM64_CE | |||
29 | select CRYPTO_HASH | 29 | select CRYPTO_HASH |
30 | select CRYPTO_SHA256_ARM64 | 30 | select CRYPTO_SHA256_ARM64 |
31 | 31 | ||
32 | config CRYPTO_SHA512_ARM64_CE | ||
33 | tristate "SHA-384/SHA-512 digest algorithm (ARMv8 Crypto Extensions)" | ||
34 | depends on KERNEL_MODE_NEON | ||
35 | select CRYPTO_HASH | ||
36 | select CRYPTO_SHA512_ARM64 | ||
37 | |||
38 | config CRYPTO_SHA3_ARM64 | ||
39 | tristate "SHA3 digest algorithm (ARMv8.2 Crypto Extensions)" | ||
40 | depends on KERNEL_MODE_NEON | ||
41 | select CRYPTO_HASH | ||
42 | select CRYPTO_SHA3 | ||
43 | |||
44 | config CRYPTO_SM3_ARM64_CE | ||
45 | tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)" | ||
46 | depends on KERNEL_MODE_NEON | ||
47 | select CRYPTO_HASH | ||
48 | select CRYPTO_SM3 | ||
49 | |||
32 | config CRYPTO_GHASH_ARM64_CE | 50 | config CRYPTO_GHASH_ARM64_CE |
33 | tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" | 51 | tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" |
34 | depends on KERNEL_MODE_NEON | 52 | depends on KERNEL_MODE_NEON |
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index b5edc5918c28..cee9b8d9830b 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile | |||
@@ -14,6 +14,15 @@ sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o | |||
14 | obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o | 14 | obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o |
15 | sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o | 15 | sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o |
16 | 16 | ||
17 | obj-$(CONFIG_CRYPTO_SHA512_ARM64_CE) += sha512-ce.o | ||
18 | sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o | ||
19 | |||
20 | obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o | ||
21 | sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o | ||
22 | |||
23 | obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o | ||
24 | sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o | ||
25 | |||
17 | obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o | 26 | obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o |
18 | ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o | 27 | ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o |
19 | 28 | ||
@@ -24,7 +33,7 @@ obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o | |||
24 | crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o | 33 | crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o |
25 | 34 | ||
26 | obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o | 35 | obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o |
27 | CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto | 36 | aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o |
28 | 37 | ||
29 | obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o | 38 | obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o |
30 | aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o | 39 | aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o |
diff --git a/arch/arm64/crypto/aes-ce-core.S b/arch/arm64/crypto/aes-ce-core.S new file mode 100644 index 000000000000..8efdfdade393 --- /dev/null +++ b/arch/arm64/crypto/aes-ce-core.S | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/linkage.h> | ||
10 | #include <asm/assembler.h> | ||
11 | |||
12 | .arch armv8-a+crypto | ||
13 | |||
14 | ENTRY(__aes_ce_encrypt) | ||
15 | sub w3, w3, #2 | ||
16 | ld1 {v0.16b}, [x2] | ||
17 | ld1 {v1.4s}, [x0], #16 | ||
18 | cmp w3, #10 | ||
19 | bmi 0f | ||
20 | bne 3f | ||
21 | mov v3.16b, v1.16b | ||
22 | b 2f | ||
23 | 0: mov v2.16b, v1.16b | ||
24 | ld1 {v3.4s}, [x0], #16 | ||
25 | 1: aese v0.16b, v2.16b | ||
26 | aesmc v0.16b, v0.16b | ||
27 | 2: ld1 {v1.4s}, [x0], #16 | ||
28 | aese v0.16b, v3.16b | ||
29 | aesmc v0.16b, v0.16b | ||
30 | 3: ld1 {v2.4s}, [x0], #16 | ||
31 | subs w3, w3, #3 | ||
32 | aese v0.16b, v1.16b | ||
33 | aesmc v0.16b, v0.16b | ||
34 | ld1 {v3.4s}, [x0], #16 | ||
35 | bpl 1b | ||
36 | aese v0.16b, v2.16b | ||
37 | eor v0.16b, v0.16b, v3.16b | ||
38 | st1 {v0.16b}, [x1] | ||
39 | ret | ||
40 | ENDPROC(__aes_ce_encrypt) | ||
41 | |||
42 | ENTRY(__aes_ce_decrypt) | ||
43 | sub w3, w3, #2 | ||
44 | ld1 {v0.16b}, [x2] | ||
45 | ld1 {v1.4s}, [x0], #16 | ||
46 | cmp w3, #10 | ||
47 | bmi 0f | ||
48 | bne 3f | ||
49 | mov v3.16b, v1.16b | ||
50 | b 2f | ||
51 | 0: mov v2.16b, v1.16b | ||
52 | ld1 {v3.4s}, [x0], #16 | ||
53 | 1: aesd v0.16b, v2.16b | ||
54 | aesimc v0.16b, v0.16b | ||
55 | 2: ld1 {v1.4s}, [x0], #16 | ||
56 | aesd v0.16b, v3.16b | ||
57 | aesimc v0.16b, v0.16b | ||
58 | 3: ld1 {v2.4s}, [x0], #16 | ||
59 | subs w3, w3, #3 | ||
60 | aesd v0.16b, v1.16b | ||
61 | aesimc v0.16b, v0.16b | ||
62 | ld1 {v3.4s}, [x0], #16 | ||
63 | bpl 1b | ||
64 | aesd v0.16b, v2.16b | ||
65 | eor v0.16b, v0.16b, v3.16b | ||
66 | st1 {v0.16b}, [x1] | ||
67 | ret | ||
68 | ENDPROC(__aes_ce_decrypt) | ||
69 | |||
70 | /* | ||
71 | * __aes_ce_sub() - use the aese instruction to perform the AES sbox | ||
72 | * substitution on each byte in 'input' | ||
73 | */ | ||
74 | ENTRY(__aes_ce_sub) | ||
75 | dup v1.4s, w0 | ||
76 | movi v0.16b, #0 | ||
77 | aese v0.16b, v1.16b | ||
78 | umov w0, v0.s[0] | ||
79 | ret | ||
80 | ENDPROC(__aes_ce_sub) | ||
81 | |||
82 | ENTRY(__aes_ce_invert) | ||
83 | ld1 {v0.4s}, [x1] | ||
84 | aesimc v1.16b, v0.16b | ||
85 | st1 {v1.4s}, [x0] | ||
86 | ret | ||
87 | ENDPROC(__aes_ce_invert) | ||
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-glue.c index 6a75cd75ed11..e6b3227bbf57 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-glue.c | |||
@@ -29,6 +29,13 @@ struct aes_block { | |||
29 | u8 b[AES_BLOCK_SIZE]; | 29 | u8 b[AES_BLOCK_SIZE]; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | asmlinkage void __aes_ce_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); | ||
33 | asmlinkage void __aes_ce_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds); | ||
34 | |||
35 | asmlinkage u32 __aes_ce_sub(u32 l); | ||
36 | asmlinkage void __aes_ce_invert(struct aes_block *out, | ||
37 | const struct aes_block *in); | ||
38 | |||
32 | static int num_rounds(struct crypto_aes_ctx *ctx) | 39 | static int num_rounds(struct crypto_aes_ctx *ctx) |
33 | { | 40 | { |
34 | /* | 41 | /* |
@@ -44,10 +51,6 @@ static int num_rounds(struct crypto_aes_ctx *ctx) | |||
44 | static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) | 51 | static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) |
45 | { | 52 | { |
46 | struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 53 | struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
47 | struct aes_block *out = (struct aes_block *)dst; | ||
48 | struct aes_block const *in = (struct aes_block *)src; | ||
49 | void *dummy0; | ||
50 | int dummy1; | ||
51 | 54 | ||
52 | if (!may_use_simd()) { | 55 | if (!may_use_simd()) { |
53 | __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); | 56 | __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); |
@@ -55,49 +58,13 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) | |||
55 | } | 58 | } |
56 | 59 | ||
57 | kernel_neon_begin(); | 60 | kernel_neon_begin(); |
58 | 61 | __aes_ce_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); | |
59 | __asm__(" ld1 {v0.16b}, %[in] ;" | ||
60 | " ld1 {v1.4s}, [%[key]], #16 ;" | ||
61 | " cmp %w[rounds], #10 ;" | ||
62 | " bmi 0f ;" | ||
63 | " bne 3f ;" | ||
64 | " mov v3.16b, v1.16b ;" | ||
65 | " b 2f ;" | ||
66 | "0: mov v2.16b, v1.16b ;" | ||
67 | " ld1 {v3.4s}, [%[key]], #16 ;" | ||
68 | "1: aese v0.16b, v2.16b ;" | ||
69 | " aesmc v0.16b, v0.16b ;" | ||
70 | "2: ld1 {v1.4s}, [%[key]], #16 ;" | ||
71 | " aese v0.16b, v3.16b ;" | ||
72 | " aesmc v0.16b, v0.16b ;" | ||
73 | "3: ld1 {v2.4s}, [%[key]], #16 ;" | ||
74 | " subs %w[rounds], %w[rounds], #3 ;" | ||
75 | " aese v0.16b, v1.16b ;" | ||
76 | " aesmc v0.16b, v0.16b ;" | ||
77 | " ld1 {v3.4s}, [%[key]], #16 ;" | ||
78 | " bpl 1b ;" | ||
79 | " aese v0.16b, v2.16b ;" | ||
80 | " eor v0.16b, v0.16b, v3.16b ;" | ||
81 | " st1 {v0.16b}, %[out] ;" | ||
82 | |||
83 | : [out] "=Q"(*out), | ||
84 | [key] "=r"(dummy0), | ||
85 | [rounds] "=r"(dummy1) | ||
86 | : [in] "Q"(*in), | ||
87 | "1"(ctx->key_enc), | ||
88 | "2"(num_rounds(ctx) - 2) | ||
89 | : "cc"); | ||
90 | |||
91 | kernel_neon_end(); | 62 | kernel_neon_end(); |
92 | } | 63 | } |
93 | 64 | ||
94 | static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) | 65 | static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) |
95 | { | 66 | { |
96 | struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 67 | struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
97 | struct aes_block *out = (struct aes_block *)dst; | ||
98 | struct aes_block const *in = (struct aes_block *)src; | ||
99 | void *dummy0; | ||
100 | int dummy1; | ||
101 | 68 | ||
102 | if (!may_use_simd()) { | 69 | if (!may_use_simd()) { |
103 | __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); | 70 | __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); |
@@ -105,62 +72,10 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) | |||
105 | } | 72 | } |
106 | 73 | ||
107 | kernel_neon_begin(); | 74 | kernel_neon_begin(); |
108 | 75 | __aes_ce_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); | |
109 | __asm__(" ld1 {v0.16b}, %[in] ;" | ||
110 | " ld1 {v1.4s}, [%[key]], #16 ;" | ||
111 | " cmp %w[rounds], #10 ;" | ||
112 | " bmi 0f ;" | ||
113 | " bne 3f ;" | ||
114 | " mov v3.16b, v1.16b ;" | ||
115 | " b 2f ;" | ||
116 | "0: mov v2.16b, v1.16b ;" | ||
117 | " ld1 {v3.4s}, [%[key]], #16 ;" | ||
118 | "1: aesd v0.16b, v2.16b ;" | ||
119 | " aesimc v0.16b, v0.16b ;" | ||
120 | "2: ld1 {v1.4s}, [%[key]], #16 ;" | ||
121 | " aesd v0.16b, v3.16b ;" | ||
122 | " aesimc v0.16b, v0.16b ;" | ||
123 | "3: ld1 {v2.4s}, [%[key]], #16 ;" | ||
124 | " subs %w[rounds], %w[rounds], #3 ;" | ||
125 | " aesd v0.16b, v1.16b ;" | ||
126 | " aesimc v0.16b, v0.16b ;" | ||
127 | " ld1 {v3.4s}, [%[key]], #16 ;" | ||
128 | " bpl 1b ;" | ||
129 | " aesd v0.16b, v2.16b ;" | ||
130 | " eor v0.16b, v0.16b, v3.16b ;" | ||
131 | " st1 {v0.16b}, %[out] ;" | ||
132 | |||
133 | : [out] "=Q"(*out), | ||
134 | [key] "=r"(dummy0), | ||
135 | [rounds] "=r"(dummy1) | ||
136 | : [in] "Q"(*in), | ||
137 | "1"(ctx->key_dec), | ||
138 | "2"(num_rounds(ctx) - 2) | ||
139 | : "cc"); | ||
140 | |||
141 | kernel_neon_end(); | 76 | kernel_neon_end(); |
142 | } | 77 | } |
143 | 78 | ||
144 | /* | ||
145 | * aes_sub() - use the aese instruction to perform the AES sbox substitution | ||
146 | * on each byte in 'input' | ||
147 | */ | ||
148 | static u32 aes_sub(u32 input) | ||
149 | { | ||
150 | u32 ret; | ||
151 | |||
152 | __asm__("dup v1.4s, %w[in] ;" | ||
153 | "movi v0.16b, #0 ;" | ||
154 | "aese v0.16b, v1.16b ;" | ||
155 | "umov %w[out], v0.4s[0] ;" | ||
156 | |||
157 | : [out] "=r"(ret) | ||
158 | : [in] "r"(input) | ||
159 | : "v0","v1"); | ||
160 | |||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, | 79 | int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, |
165 | unsigned int key_len) | 80 | unsigned int key_len) |
166 | { | 81 | { |
@@ -189,7 +104,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, | |||
189 | u32 *rki = ctx->key_enc + (i * kwords); | 104 | u32 *rki = ctx->key_enc + (i * kwords); |
190 | u32 *rko = rki + kwords; | 105 | u32 *rko = rki + kwords; |
191 | 106 | ||
192 | rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; | 107 | rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; |
193 | rko[1] = rko[0] ^ rki[1]; | 108 | rko[1] = rko[0] ^ rki[1]; |
194 | rko[2] = rko[1] ^ rki[2]; | 109 | rko[2] = rko[1] ^ rki[2]; |
195 | rko[3] = rko[2] ^ rki[3]; | 110 | rko[3] = rko[2] ^ rki[3]; |
@@ -202,7 +117,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, | |||
202 | } else if (key_len == AES_KEYSIZE_256) { | 117 | } else if (key_len == AES_KEYSIZE_256) { |
203 | if (i >= 6) | 118 | if (i >= 6) |
204 | break; | 119 | break; |
205 | rko[4] = aes_sub(rko[3]) ^ rki[4]; | 120 | rko[4] = __aes_ce_sub(rko[3]) ^ rki[4]; |
206 | rko[5] = rko[4] ^ rki[5]; | 121 | rko[5] = rko[4] ^ rki[5]; |
207 | rko[6] = rko[5] ^ rki[6]; | 122 | rko[6] = rko[5] ^ rki[6]; |
208 | rko[7] = rko[6] ^ rki[7]; | 123 | rko[7] = rko[6] ^ rki[7]; |
@@ -221,13 +136,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, | |||
221 | 136 | ||
222 | key_dec[0] = key_enc[j]; | 137 | key_dec[0] = key_enc[j]; |
223 | for (i = 1, j--; j > 0; i++, j--) | 138 | for (i = 1, j--; j > 0; i++, j--) |
224 | __asm__("ld1 {v0.4s}, %[in] ;" | 139 | __aes_ce_invert(key_dec + i, key_enc + j); |
225 | "aesimc v1.16b, v0.16b ;" | ||
226 | "st1 {v1.4s}, %[out] ;" | ||
227 | |||
228 | : [out] "=Q"(key_dec[i]) | ||
229 | : [in] "Q"(key_enc[j]) | ||
230 | : "v0","v1"); | ||
231 | key_dec[i] = key_enc[0]; | 140 | key_dec[i] = key_enc[0]; |
232 | 141 | ||
233 | kernel_neon_end(); | 142 | kernel_neon_end(); |
diff --git a/arch/arm64/crypto/aes-cipher-core.S b/arch/arm64/crypto/aes-cipher-core.S index 6d2445d603cc..3a44eada2347 100644 --- a/arch/arm64/crypto/aes-cipher-core.S +++ b/arch/arm64/crypto/aes-cipher-core.S | |||
@@ -125,6 +125,16 @@ CPU_BE( rev w7, w7 ) | |||
125 | ret | 125 | ret |
126 | .endm | 126 | .endm |
127 | 127 | ||
128 | ENTRY(__aes_arm64_encrypt) | ||
129 | do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 | ||
130 | ENDPROC(__aes_arm64_encrypt) | ||
131 | |||
132 | .align 5 | ||
133 | ENTRY(__aes_arm64_decrypt) | ||
134 | do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0 | ||
135 | ENDPROC(__aes_arm64_decrypt) | ||
136 | |||
137 | .section ".rodata", "a" | ||
128 | .align L1_CACHE_SHIFT | 138 | .align L1_CACHE_SHIFT |
129 | .type __aes_arm64_inverse_sbox, %object | 139 | .type __aes_arm64_inverse_sbox, %object |
130 | __aes_arm64_inverse_sbox: | 140 | __aes_arm64_inverse_sbox: |
@@ -161,12 +171,3 @@ __aes_arm64_inverse_sbox: | |||
161 | .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 | 171 | .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 |
162 | .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d | 172 | .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d |
163 | .size __aes_arm64_inverse_sbox, . - __aes_arm64_inverse_sbox | 173 | .size __aes_arm64_inverse_sbox, . - __aes_arm64_inverse_sbox |
164 | |||
165 | ENTRY(__aes_arm64_encrypt) | ||
166 | do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 | ||
167 | ENDPROC(__aes_arm64_encrypt) | ||
168 | |||
169 | .align 5 | ||
170 | ENTRY(__aes_arm64_decrypt) | ||
171 | do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0 | ||
172 | ENDPROC(__aes_arm64_decrypt) | ||
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 998ba519a026..2fa850e86aa8 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c | |||
@@ -665,6 +665,7 @@ static int __init aes_init(void) | |||
665 | 665 | ||
666 | unregister_simds: | 666 | unregister_simds: |
667 | aes_exit(); | 667 | aes_exit(); |
668 | return err; | ||
668 | unregister_ciphers: | 669 | unregister_ciphers: |
669 | crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); | 670 | crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); |
670 | return err; | 671 | return err; |
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S index f1e3aa2732f9..1c7b45b7268e 100644 --- a/arch/arm64/crypto/aes-neon.S +++ b/arch/arm64/crypto/aes-neon.S | |||
@@ -32,10 +32,10 @@ | |||
32 | 32 | ||
33 | /* preload the entire Sbox */ | 33 | /* preload the entire Sbox */ |
34 | .macro prepare, sbox, shiftrows, temp | 34 | .macro prepare, sbox, shiftrows, temp |
35 | adr \temp, \sbox | ||
36 | movi v12.16b, #0x1b | 35 | movi v12.16b, #0x1b |
37 | ldr q13, \shiftrows | 36 | ldr_l q13, \shiftrows, \temp |
38 | ldr q14, .Lror32by8 | 37 | ldr_l q14, .Lror32by8, \temp |
38 | adr_l \temp, \sbox | ||
39 | ld1 {v16.16b-v19.16b}, [\temp], #64 | 39 | ld1 {v16.16b-v19.16b}, [\temp], #64 |
40 | ld1 {v20.16b-v23.16b}, [\temp], #64 | 40 | ld1 {v20.16b-v23.16b}, [\temp], #64 |
41 | ld1 {v24.16b-v27.16b}, [\temp], #64 | 41 | ld1 {v24.16b-v27.16b}, [\temp], #64 |
@@ -272,7 +272,7 @@ | |||
272 | 272 | ||
273 | #include "aes-modes.S" | 273 | #include "aes-modes.S" |
274 | 274 | ||
275 | .text | 275 | .section ".rodata", "a" |
276 | .align 6 | 276 | .align 6 |
277 | .LForward_Sbox: | 277 | .LForward_Sbox: |
278 | .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 | 278 | .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 |
diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S index 18f5a8442276..16ed3c7ebd37 100644 --- a/arch/arm64/crypto/crc32-ce-core.S +++ b/arch/arm64/crypto/crc32-ce-core.S | |||
@@ -50,7 +50,7 @@ | |||
50 | #include <linux/linkage.h> | 50 | #include <linux/linkage.h> |
51 | #include <asm/assembler.h> | 51 | #include <asm/assembler.h> |
52 | 52 | ||
53 | .text | 53 | .section ".rodata", "a" |
54 | .align 6 | 54 | .align 6 |
55 | .cpu generic+crypto+crc | 55 | .cpu generic+crypto+crc |
56 | 56 | ||
@@ -115,12 +115,13 @@ | |||
115 | * uint crc32_pmull_le(unsigned char const *buffer, | 115 | * uint crc32_pmull_le(unsigned char const *buffer, |
116 | * size_t len, uint crc32) | 116 | * size_t len, uint crc32) |
117 | */ | 117 | */ |
118 | .text | ||
118 | ENTRY(crc32_pmull_le) | 119 | ENTRY(crc32_pmull_le) |
119 | adr x3, .Lcrc32_constants | 120 | adr_l x3, .Lcrc32_constants |
120 | b 0f | 121 | b 0f |
121 | 122 | ||
122 | ENTRY(crc32c_pmull_le) | 123 | ENTRY(crc32c_pmull_le) |
123 | adr x3, .Lcrc32c_constants | 124 | adr_l x3, .Lcrc32c_constants |
124 | 125 | ||
125 | 0: bic LEN, LEN, #15 | 126 | 0: bic LEN, LEN, #15 |
126 | ld1 {v1.16b-v4.16b}, [BUF], #0x40 | 127 | ld1 {v1.16b-v4.16b}, [BUF], #0x40 |
diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c index 624f4137918c..34b4e3d46aab 100644 --- a/arch/arm64/crypto/crc32-ce-glue.c +++ b/arch/arm64/crypto/crc32-ce-glue.c | |||
@@ -185,6 +185,7 @@ static struct shash_alg crc32_pmull_algs[] = { { | |||
185 | .base.cra_name = "crc32", | 185 | .base.cra_name = "crc32", |
186 | .base.cra_driver_name = "crc32-arm64-ce", | 186 | .base.cra_driver_name = "crc32-arm64-ce", |
187 | .base.cra_priority = 200, | 187 | .base.cra_priority = 200, |
188 | .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
188 | .base.cra_blocksize = 1, | 189 | .base.cra_blocksize = 1, |
189 | .base.cra_module = THIS_MODULE, | 190 | .base.cra_module = THIS_MODULE, |
190 | }, { | 191 | }, { |
@@ -200,6 +201,7 @@ static struct shash_alg crc32_pmull_algs[] = { { | |||
200 | .base.cra_name = "crc32c", | 201 | .base.cra_name = "crc32c", |
201 | .base.cra_driver_name = "crc32c-arm64-ce", | 202 | .base.cra_driver_name = "crc32c-arm64-ce", |
202 | .base.cra_priority = 200, | 203 | .base.cra_priority = 200, |
204 | .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
203 | .base.cra_blocksize = 1, | 205 | .base.cra_blocksize = 1, |
204 | .base.cra_module = THIS_MODULE, | 206 | .base.cra_module = THIS_MODULE, |
205 | } }; | 207 | } }; |
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S index d5b5a8c038c8..f179c01bd55c 100644 --- a/arch/arm64/crypto/crct10dif-ce-core.S +++ b/arch/arm64/crypto/crct10dif-ce-core.S | |||
@@ -128,7 +128,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) | |||
128 | // XOR the initial_crc value | 128 | // XOR the initial_crc value |
129 | eor v0.16b, v0.16b, v10.16b | 129 | eor v0.16b, v0.16b, v10.16b |
130 | 130 | ||
131 | ldr q10, rk3 // xmm10 has rk3 and rk4 | 131 | ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4 |
132 | // type of pmull instruction | 132 | // type of pmull instruction |
133 | // will determine which constant to use | 133 | // will determine which constant to use |
134 | 134 | ||
@@ -184,13 +184,13 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) | |||
184 | // fold the 8 vector registers to 1 vector register with different | 184 | // fold the 8 vector registers to 1 vector register with different |
185 | // constants | 185 | // constants |
186 | 186 | ||
187 | ldr q10, rk9 | 187 | ldr_l q10, rk9, x8 |
188 | 188 | ||
189 | .macro fold16, reg, rk | 189 | .macro fold16, reg, rk |
190 | pmull v8.1q, \reg\().1d, v10.1d | 190 | pmull v8.1q, \reg\().1d, v10.1d |
191 | pmull2 \reg\().1q, \reg\().2d, v10.2d | 191 | pmull2 \reg\().1q, \reg\().2d, v10.2d |
192 | .ifnb \rk | 192 | .ifnb \rk |
193 | ldr q10, \rk | 193 | ldr_l q10, \rk, x8 |
194 | .endif | 194 | .endif |
195 | eor v7.16b, v7.16b, v8.16b | 195 | eor v7.16b, v7.16b, v8.16b |
196 | eor v7.16b, v7.16b, \reg\().16b | 196 | eor v7.16b, v7.16b, \reg\().16b |
@@ -251,7 +251,7 @@ CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) | |||
251 | 251 | ||
252 | // get rid of the extra data that was loaded before | 252 | // get rid of the extra data that was loaded before |
253 | // load the shift constant | 253 | // load the shift constant |
254 | adr x4, tbl_shf_table + 16 | 254 | adr_l x4, tbl_shf_table + 16 |
255 | sub x4, x4, arg3 | 255 | sub x4, x4, arg3 |
256 | ld1 {v0.16b}, [x4] | 256 | ld1 {v0.16b}, [x4] |
257 | 257 | ||
@@ -275,7 +275,7 @@ CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) | |||
275 | 275 | ||
276 | _128_done: | 276 | _128_done: |
277 | // compute crc of a 128-bit value | 277 | // compute crc of a 128-bit value |
278 | ldr q10, rk5 // rk5 and rk6 in xmm10 | 278 | ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10 |
279 | 279 | ||
280 | // 64b fold | 280 | // 64b fold |
281 | ext v0.16b, vzr.16b, v7.16b, #8 | 281 | ext v0.16b, vzr.16b, v7.16b, #8 |
@@ -291,7 +291,7 @@ _128_done: | |||
291 | 291 | ||
292 | // barrett reduction | 292 | // barrett reduction |
293 | _barrett: | 293 | _barrett: |
294 | ldr q10, rk7 | 294 | ldr_l q10, rk7, x8 |
295 | mov v0.d[0], v7.d[1] | 295 | mov v0.d[0], v7.d[1] |
296 | 296 | ||
297 | pmull v0.1q, v0.1d, v10.1d | 297 | pmull v0.1q, v0.1d, v10.1d |
@@ -321,7 +321,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) | |||
321 | b.eq _128_done // exactly 16 left | 321 | b.eq _128_done // exactly 16 left |
322 | b.lt _less_than_16_left | 322 | b.lt _less_than_16_left |
323 | 323 | ||
324 | ldr q10, rk1 // rk1 and rk2 in xmm10 | 324 | ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10 |
325 | 325 | ||
326 | // update the counter. subtract 32 instead of 16 to save one | 326 | // update the counter. subtract 32 instead of 16 to save one |
327 | // instruction from the loop | 327 | // instruction from the loop |
@@ -333,7 +333,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) | |||
333 | 333 | ||
334 | _less_than_16_left: | 334 | _less_than_16_left: |
335 | // shl r9, 4 | 335 | // shl r9, 4 |
336 | adr x0, tbl_shf_table + 16 | 336 | adr_l x0, tbl_shf_table + 16 |
337 | sub x0, x0, arg3 | 337 | sub x0, x0, arg3 |
338 | ld1 {v0.16b}, [x0] | 338 | ld1 {v0.16b}, [x0] |
339 | movi v9.16b, #0x80 | 339 | movi v9.16b, #0x80 |
@@ -345,6 +345,7 @@ ENDPROC(crc_t10dif_pmull) | |||
345 | // precomputed constants | 345 | // precomputed constants |
346 | // these constants are precomputed from the poly: | 346 | // these constants are precomputed from the poly: |
347 | // 0x8bb70000 (0x8bb7 scaled to 32 bits) | 347 | // 0x8bb70000 (0x8bb7 scaled to 32 bits) |
348 | .section ".rodata", "a" | ||
348 | .align 4 | 349 | .align 4 |
349 | // Q = 0x18BB70000 | 350 | // Q = 0x18BB70000 |
350 | // rk1 = 2^(32*3) mod Q << 32 | 351 | // rk1 = 2^(32*3) mod Q << 32 |
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S index 8550408735a0..46049850727d 100644 --- a/arch/arm64/crypto/sha1-ce-core.S +++ b/arch/arm64/crypto/sha1-ce-core.S | |||
@@ -58,12 +58,11 @@ | |||
58 | sha1su1 v\s0\().4s, v\s3\().4s | 58 | sha1su1 v\s0\().4s, v\s3\().4s |
59 | .endm | 59 | .endm |
60 | 60 | ||
61 | /* | 61 | .macro loadrc, k, val, tmp |
62 | * The SHA1 round constants | 62 | movz \tmp, :abs_g0_nc:\val |
63 | */ | 63 | movk \tmp, :abs_g1:\val |
64 | .align 4 | 64 | dup \k, \tmp |
65 | .Lsha1_rcon: | 65 | .endm |
66 | .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 | ||
67 | 66 | ||
68 | /* | 67 | /* |
69 | * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, | 68 | * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, |
@@ -71,11 +70,10 @@ | |||
71 | */ | 70 | */ |
72 | ENTRY(sha1_ce_transform) | 71 | ENTRY(sha1_ce_transform) |
73 | /* load round constants */ | 72 | /* load round constants */ |
74 | adr x6, .Lsha1_rcon | 73 | loadrc k0.4s, 0x5a827999, w6 |
75 | ld1r {k0.4s}, [x6], #4 | 74 | loadrc k1.4s, 0x6ed9eba1, w6 |
76 | ld1r {k1.4s}, [x6], #4 | 75 | loadrc k2.4s, 0x8f1bbcdc, w6 |
77 | ld1r {k2.4s}, [x6], #4 | 76 | loadrc k3.4s, 0xca62c1d6, w6 |
78 | ld1r {k3.4s}, [x6] | ||
79 | 77 | ||
80 | /* load state */ | 78 | /* load state */ |
81 | ld1 {dgav.4s}, [x0] | 79 | ld1 {dgav.4s}, [x0] |
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S index 679c6c002f4f..4c3c89b812ce 100644 --- a/arch/arm64/crypto/sha2-ce-core.S +++ b/arch/arm64/crypto/sha2-ce-core.S | |||
@@ -53,6 +53,7 @@ | |||
53 | /* | 53 | /* |
54 | * The SHA-256 round constants | 54 | * The SHA-256 round constants |
55 | */ | 55 | */ |
56 | .section ".rodata", "a" | ||
56 | .align 4 | 57 | .align 4 |
57 | .Lsha2_rcon: | 58 | .Lsha2_rcon: |
58 | .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 | 59 | .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 |
@@ -76,9 +77,10 @@ | |||
76 | * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, | 77 | * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, |
77 | * int blocks) | 78 | * int blocks) |
78 | */ | 79 | */ |
80 | .text | ||
79 | ENTRY(sha2_ce_transform) | 81 | ENTRY(sha2_ce_transform) |
80 | /* load round constants */ | 82 | /* load round constants */ |
81 | adr x8, .Lsha2_rcon | 83 | adr_l x8, .Lsha2_rcon |
82 | ld1 { v0.4s- v3.4s}, [x8], #64 | 84 | ld1 { v0.4s- v3.4s}, [x8], #64 |
83 | ld1 { v4.4s- v7.4s}, [x8], #64 | 85 | ld1 { v4.4s- v7.4s}, [x8], #64 |
84 | ld1 { v8.4s-v11.4s}, [x8], #64 | 86 | ld1 { v8.4s-v11.4s}, [x8], #64 |
diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S new file mode 100644 index 000000000000..332ad7530690 --- /dev/null +++ b/arch/arm64/crypto/sha3-ce-core.S | |||
@@ -0,0 +1,210 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * sha3-ce-core.S - core SHA-3 transform using v8.2 Crypto Extensions | ||
4 | * | ||
5 | * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/linkage.h> | ||
13 | #include <asm/assembler.h> | ||
14 | |||
15 | .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 | ||
16 | .set .Lv\b\().2d, \b | ||
17 | .set .Lv\b\().16b, \b | ||
18 | .endr | ||
19 | |||
20 | /* | ||
21 | * ARMv8.2 Crypto Extensions instructions | ||
22 | */ | ||
23 | .macro eor3, rd, rn, rm, ra | ||
24 | .inst 0xce000000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) | ||
25 | .endm | ||
26 | |||
27 | .macro rax1, rd, rn, rm | ||
28 | .inst 0xce608c00 | .L\rd | (.L\rn << 5) | (.L\rm << 16) | ||
29 | .endm | ||
30 | |||
31 | .macro bcax, rd, rn, rm, ra | ||
32 | .inst 0xce200000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) | ||
33 | .endm | ||
34 | |||
35 | .macro xar, rd, rn, rm, imm6 | ||
36 | .inst 0xce800000 | .L\rd | (.L\rn << 5) | ((\imm6) << 10) | (.L\rm << 16) | ||
37 | .endm | ||
38 | |||
39 | /* | ||
40 | * sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size) | ||
41 | */ | ||
42 | .text | ||
43 | ENTRY(sha3_ce_transform) | ||
44 | /* load state */ | ||
45 | add x8, x0, #32 | ||
46 | ld1 { v0.1d- v3.1d}, [x0] | ||
47 | ld1 { v4.1d- v7.1d}, [x8], #32 | ||
48 | ld1 { v8.1d-v11.1d}, [x8], #32 | ||
49 | ld1 {v12.1d-v15.1d}, [x8], #32 | ||
50 | ld1 {v16.1d-v19.1d}, [x8], #32 | ||
51 | ld1 {v20.1d-v23.1d}, [x8], #32 | ||
52 | ld1 {v24.1d}, [x8] | ||
53 | |||
54 | 0: sub w2, w2, #1 | ||
55 | mov w8, #24 | ||
56 | adr_l x9, .Lsha3_rcon | ||
57 | |||
58 | /* load input */ | ||
59 | ld1 {v25.8b-v28.8b}, [x1], #32 | ||
60 | ld1 {v29.8b-v31.8b}, [x1], #24 | ||
61 | eor v0.8b, v0.8b, v25.8b | ||
62 | eor v1.8b, v1.8b, v26.8b | ||
63 | eor v2.8b, v2.8b, v27.8b | ||
64 | eor v3.8b, v3.8b, v28.8b | ||
65 | eor v4.8b, v4.8b, v29.8b | ||
66 | eor v5.8b, v5.8b, v30.8b | ||
67 | eor v6.8b, v6.8b, v31.8b | ||
68 | |||
69 | tbnz x3, #6, 2f // SHA3-512 | ||
70 | |||
71 | ld1 {v25.8b-v28.8b}, [x1], #32 | ||
72 | ld1 {v29.8b-v30.8b}, [x1], #16 | ||
73 | eor v7.8b, v7.8b, v25.8b | ||
74 | eor v8.8b, v8.8b, v26.8b | ||
75 | eor v9.8b, v9.8b, v27.8b | ||
76 | eor v10.8b, v10.8b, v28.8b | ||
77 | eor v11.8b, v11.8b, v29.8b | ||
78 | eor v12.8b, v12.8b, v30.8b | ||
79 | |||
80 | tbnz x3, #4, 1f // SHA3-384 or SHA3-224 | ||
81 | |||
82 | // SHA3-256 | ||
83 | ld1 {v25.8b-v28.8b}, [x1], #32 | ||
84 | eor v13.8b, v13.8b, v25.8b | ||
85 | eor v14.8b, v14.8b, v26.8b | ||
86 | eor v15.8b, v15.8b, v27.8b | ||
87 | eor v16.8b, v16.8b, v28.8b | ||
88 | b 3f | ||
89 | |||
90 | 1: tbz x3, #2, 3f // bit 2 cleared? SHA-384 | ||
91 | |||
92 | // SHA3-224 | ||
93 | ld1 {v25.8b-v28.8b}, [x1], #32 | ||
94 | ld1 {v29.8b}, [x1], #8 | ||
95 | eor v13.8b, v13.8b, v25.8b | ||
96 | eor v14.8b, v14.8b, v26.8b | ||
97 | eor v15.8b, v15.8b, v27.8b | ||
98 | eor v16.8b, v16.8b, v28.8b | ||
99 | eor v17.8b, v17.8b, v29.8b | ||
100 | b 3f | ||
101 | |||
102 | // SHA3-512 | ||
103 | 2: ld1 {v25.8b-v26.8b}, [x1], #16 | ||
104 | eor v7.8b, v7.8b, v25.8b | ||
105 | eor v8.8b, v8.8b, v26.8b | ||
106 | |||
107 | 3: sub w8, w8, #1 | ||
108 | |||
109 | eor3 v29.16b, v4.16b, v9.16b, v14.16b | ||
110 | eor3 v26.16b, v1.16b, v6.16b, v11.16b | ||
111 | eor3 v28.16b, v3.16b, v8.16b, v13.16b | ||
112 | eor3 v25.16b, v0.16b, v5.16b, v10.16b | ||
113 | eor3 v27.16b, v2.16b, v7.16b, v12.16b | ||
114 | eor3 v29.16b, v29.16b, v19.16b, v24.16b | ||
115 | eor3 v26.16b, v26.16b, v16.16b, v21.16b | ||
116 | eor3 v28.16b, v28.16b, v18.16b, v23.16b | ||
117 | eor3 v25.16b, v25.16b, v15.16b, v20.16b | ||
118 | eor3 v27.16b, v27.16b, v17.16b, v22.16b | ||
119 | |||
120 | rax1 v30.2d, v29.2d, v26.2d // bc[0] | ||
121 | rax1 v26.2d, v26.2d, v28.2d // bc[2] | ||
122 | rax1 v28.2d, v28.2d, v25.2d // bc[4] | ||
123 | rax1 v25.2d, v25.2d, v27.2d // bc[1] | ||
124 | rax1 v27.2d, v27.2d, v29.2d // bc[3] | ||
125 | |||
126 | eor v0.16b, v0.16b, v30.16b | ||
127 | xar v29.2d, v1.2d, v25.2d, (64 - 1) | ||
128 | xar v1.2d, v6.2d, v25.2d, (64 - 44) | ||
129 | xar v6.2d, v9.2d, v28.2d, (64 - 20) | ||
130 | xar v9.2d, v22.2d, v26.2d, (64 - 61) | ||
131 | xar v22.2d, v14.2d, v28.2d, (64 - 39) | ||
132 | xar v14.2d, v20.2d, v30.2d, (64 - 18) | ||
133 | xar v31.2d, v2.2d, v26.2d, (64 - 62) | ||
134 | xar v2.2d, v12.2d, v26.2d, (64 - 43) | ||
135 | xar v12.2d, v13.2d, v27.2d, (64 - 25) | ||
136 | xar v13.2d, v19.2d, v28.2d, (64 - 8) | ||
137 | xar v19.2d, v23.2d, v27.2d, (64 - 56) | ||
138 | xar v23.2d, v15.2d, v30.2d, (64 - 41) | ||
139 | xar v15.2d, v4.2d, v28.2d, (64 - 27) | ||
140 | xar v28.2d, v24.2d, v28.2d, (64 - 14) | ||
141 | xar v24.2d, v21.2d, v25.2d, (64 - 2) | ||
142 | xar v8.2d, v8.2d, v27.2d, (64 - 55) | ||
143 | xar v4.2d, v16.2d, v25.2d, (64 - 45) | ||
144 | xar v16.2d, v5.2d, v30.2d, (64 - 36) | ||
145 | xar v5.2d, v3.2d, v27.2d, (64 - 28) | ||
146 | xar v27.2d, v18.2d, v27.2d, (64 - 21) | ||
147 | xar v3.2d, v17.2d, v26.2d, (64 - 15) | ||
148 | xar v25.2d, v11.2d, v25.2d, (64 - 10) | ||
149 | xar v26.2d, v7.2d, v26.2d, (64 - 6) | ||
150 | xar v30.2d, v10.2d, v30.2d, (64 - 3) | ||
151 | |||
152 | bcax v20.16b, v31.16b, v22.16b, v8.16b | ||
153 | bcax v21.16b, v8.16b, v23.16b, v22.16b | ||
154 | bcax v22.16b, v22.16b, v24.16b, v23.16b | ||
155 | bcax v23.16b, v23.16b, v31.16b, v24.16b | ||
156 | bcax v24.16b, v24.16b, v8.16b, v31.16b | ||
157 | |||
158 | ld1r {v31.2d}, [x9], #8 | ||
159 | |||
160 | bcax v17.16b, v25.16b, v19.16b, v3.16b | ||
161 | bcax v18.16b, v3.16b, v15.16b, v19.16b | ||
162 | bcax v19.16b, v19.16b, v16.16b, v15.16b | ||
163 | bcax v15.16b, v15.16b, v25.16b, v16.16b | ||
164 | bcax v16.16b, v16.16b, v3.16b, v25.16b | ||
165 | |||
166 | bcax v10.16b, v29.16b, v12.16b, v26.16b | ||
167 | bcax v11.16b, v26.16b, v13.16b, v12.16b | ||
168 | bcax v12.16b, v12.16b, v14.16b, v13.16b | ||
169 | bcax v13.16b, v13.16b, v29.16b, v14.16b | ||
170 | bcax v14.16b, v14.16b, v26.16b, v29.16b | ||
171 | |||
172 | bcax v7.16b, v30.16b, v9.16b, v4.16b | ||
173 | bcax v8.16b, v4.16b, v5.16b, v9.16b | ||
174 | bcax v9.16b, v9.16b, v6.16b, v5.16b | ||
175 | bcax v5.16b, v5.16b, v30.16b, v6.16b | ||
176 | bcax v6.16b, v6.16b, v4.16b, v30.16b | ||
177 | |||
178 | bcax v3.16b, v27.16b, v0.16b, v28.16b | ||
179 | bcax v4.16b, v28.16b, v1.16b, v0.16b | ||
180 | bcax v0.16b, v0.16b, v2.16b, v1.16b | ||
181 | bcax v1.16b, v1.16b, v27.16b, v2.16b | ||
182 | bcax v2.16b, v2.16b, v28.16b, v27.16b | ||
183 | |||
184 | eor v0.16b, v0.16b, v31.16b | ||
185 | |||
186 | cbnz w8, 3b | ||
187 | cbnz w2, 0b | ||
188 | |||
189 | /* save state */ | ||
190 | st1 { v0.1d- v3.1d}, [x0], #32 | ||
191 | st1 { v4.1d- v7.1d}, [x0], #32 | ||
192 | st1 { v8.1d-v11.1d}, [x0], #32 | ||
193 | st1 {v12.1d-v15.1d}, [x0], #32 | ||
194 | st1 {v16.1d-v19.1d}, [x0], #32 | ||
195 | st1 {v20.1d-v23.1d}, [x0], #32 | ||
196 | st1 {v24.1d}, [x0] | ||
197 | ret | ||
198 | ENDPROC(sha3_ce_transform) | ||
199 | |||
200 | .section ".rodata", "a" | ||
201 | .align 8 | ||
202 | .Lsha3_rcon: | ||
203 | .quad 0x0000000000000001, 0x0000000000008082, 0x800000000000808a | ||
204 | .quad 0x8000000080008000, 0x000000000000808b, 0x0000000080000001 | ||
205 | .quad 0x8000000080008081, 0x8000000000008009, 0x000000000000008a | ||
206 | .quad 0x0000000000000088, 0x0000000080008009, 0x000000008000000a | ||
207 | .quad 0x000000008000808b, 0x800000000000008b, 0x8000000000008089 | ||
208 | .quad 0x8000000000008003, 0x8000000000008002, 0x8000000000000080 | ||
209 | .quad 0x000000000000800a, 0x800000008000000a, 0x8000000080008081 | ||
210 | .quad 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 | ||
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c new file mode 100644 index 000000000000..da8222e528bd --- /dev/null +++ b/arch/arm64/crypto/sha3-ce-glue.c | |||
@@ -0,0 +1,161 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * sha3-ce-glue.c - core SHA-3 transform using v8.2 Crypto Extensions | ||
4 | * | ||
5 | * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <asm/hwcap.h> | ||
13 | #include <asm/neon.h> | ||
14 | #include <asm/simd.h> | ||
15 | #include <asm/unaligned.h> | ||
16 | #include <crypto/internal/hash.h> | ||
17 | #include <crypto/sha3.h> | ||
18 | #include <linux/cpufeature.h> | ||
19 | #include <linux/crypto.h> | ||
20 | #include <linux/module.h> | ||
21 | |||
22 | MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions"); | ||
23 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
24 | MODULE_LICENSE("GPL v2"); | ||
25 | |||
26 | asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks, | ||
27 | int md_len); | ||
28 | |||
29 | static int sha3_update(struct shash_desc *desc, const u8 *data, | ||
30 | unsigned int len) | ||
31 | { | ||
32 | struct sha3_state *sctx = shash_desc_ctx(desc); | ||
33 | unsigned int digest_size = crypto_shash_digestsize(desc->tfm); | ||
34 | |||
35 | if (!may_use_simd()) | ||
36 | return crypto_sha3_update(desc, data, len); | ||
37 | |||
38 | if ((sctx->partial + len) >= sctx->rsiz) { | ||
39 | int blocks; | ||
40 | |||
41 | if (sctx->partial) { | ||
42 | int p = sctx->rsiz - sctx->partial; | ||
43 | |||
44 | memcpy(sctx->buf + sctx->partial, data, p); | ||
45 | kernel_neon_begin(); | ||
46 | sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size); | ||
47 | kernel_neon_end(); | ||
48 | |||
49 | data += p; | ||
50 | len -= p; | ||
51 | sctx->partial = 0; | ||
52 | } | ||
53 | |||
54 | blocks = len / sctx->rsiz; | ||
55 | len %= sctx->rsiz; | ||
56 | |||
57 | if (blocks) { | ||
58 | kernel_neon_begin(); | ||
59 | sha3_ce_transform(sctx->st, data, blocks, digest_size); | ||
60 | kernel_neon_end(); | ||
61 | data += blocks * sctx->rsiz; | ||
62 | } | ||
63 | } | ||
64 | |||
65 | if (len) { | ||
66 | memcpy(sctx->buf + sctx->partial, data, len); | ||
67 | sctx->partial += len; | ||
68 | } | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int sha3_final(struct shash_desc *desc, u8 *out) | ||
73 | { | ||
74 | struct sha3_state *sctx = shash_desc_ctx(desc); | ||
75 | unsigned int digest_size = crypto_shash_digestsize(desc->tfm); | ||
76 | __le64 *digest = (__le64 *)out; | ||
77 | int i; | ||
78 | |||
79 | if (!may_use_simd()) | ||
80 | return crypto_sha3_final(desc, out); | ||
81 | |||
82 | sctx->buf[sctx->partial++] = 0x06; | ||
83 | memset(sctx->buf + sctx->partial, 0, sctx->rsiz - sctx->partial); | ||
84 | sctx->buf[sctx->rsiz - 1] |= 0x80; | ||
85 | |||
86 | kernel_neon_begin(); | ||
87 | sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size); | ||
88 | kernel_neon_end(); | ||
89 | |||
90 | for (i = 0; i < digest_size / 8; i++) | ||
91 | put_unaligned_le64(sctx->st[i], digest++); | ||
92 | |||
93 | if (digest_size & 4) | ||
94 | put_unaligned_le32(sctx->st[i], (__le32 *)digest); | ||
95 | |||
96 | *sctx = (struct sha3_state){}; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static struct shash_alg algs[] = { { | ||
101 | .digestsize = SHA3_224_DIGEST_SIZE, | ||
102 | .init = crypto_sha3_init, | ||
103 | .update = sha3_update, | ||
104 | .final = sha3_final, | ||
105 | .descsize = sizeof(struct sha3_state), | ||
106 | .base.cra_name = "sha3-224", | ||
107 | .base.cra_driver_name = "sha3-224-ce", | ||
108 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
109 | .base.cra_blocksize = SHA3_224_BLOCK_SIZE, | ||
110 | .base.cra_module = THIS_MODULE, | ||
111 | .base.cra_priority = 200, | ||
112 | }, { | ||
113 | .digestsize = SHA3_256_DIGEST_SIZE, | ||
114 | .init = crypto_sha3_init, | ||
115 | .update = sha3_update, | ||
116 | .final = sha3_final, | ||
117 | .descsize = sizeof(struct sha3_state), | ||
118 | .base.cra_name = "sha3-256", | ||
119 | .base.cra_driver_name = "sha3-256-ce", | ||
120 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
121 | .base.cra_blocksize = SHA3_256_BLOCK_SIZE, | ||
122 | .base.cra_module = THIS_MODULE, | ||
123 | .base.cra_priority = 200, | ||
124 | }, { | ||
125 | .digestsize = SHA3_384_DIGEST_SIZE, | ||
126 | .init = crypto_sha3_init, | ||
127 | .update = sha3_update, | ||
128 | .final = sha3_final, | ||
129 | .descsize = sizeof(struct sha3_state), | ||
130 | .base.cra_name = "sha3-384", | ||
131 | .base.cra_driver_name = "sha3-384-ce", | ||
132 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
133 | .base.cra_blocksize = SHA3_384_BLOCK_SIZE, | ||
134 | .base.cra_module = THIS_MODULE, | ||
135 | .base.cra_priority = 200, | ||
136 | }, { | ||
137 | .digestsize = SHA3_512_DIGEST_SIZE, | ||
138 | .init = crypto_sha3_init, | ||
139 | .update = sha3_update, | ||
140 | .final = sha3_final, | ||
141 | .descsize = sizeof(struct sha3_state), | ||
142 | .base.cra_name = "sha3-512", | ||
143 | .base.cra_driver_name = "sha3-512-ce", | ||
144 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
145 | .base.cra_blocksize = SHA3_512_BLOCK_SIZE, | ||
146 | .base.cra_module = THIS_MODULE, | ||
147 | .base.cra_priority = 200, | ||
148 | } }; | ||
149 | |||
150 | static int __init sha3_neon_mod_init(void) | ||
151 | { | ||
152 | return crypto_register_shashes(algs, ARRAY_SIZE(algs)); | ||
153 | } | ||
154 | |||
155 | static void __exit sha3_neon_mod_fini(void) | ||
156 | { | ||
157 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); | ||
158 | } | ||
159 | |||
160 | module_cpu_feature_match(SHA3, sha3_neon_mod_init); | ||
161 | module_exit(sha3_neon_mod_fini); | ||
diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S new file mode 100644 index 000000000000..7f3bca5c59a2 --- /dev/null +++ b/arch/arm64/crypto/sha512-ce-core.S | |||
@@ -0,0 +1,204 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * sha512-ce-core.S - core SHA-384/SHA-512 transform using v8 Crypto Extensions | ||
4 | * | ||
5 | * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/linkage.h> | ||
13 | #include <asm/assembler.h> | ||
14 | |||
15 | .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 | ||
16 | .set .Lq\b, \b | ||
17 | .set .Lv\b\().2d, \b | ||
18 | .endr | ||
19 | |||
20 | .macro sha512h, rd, rn, rm | ||
21 | .inst 0xce608000 | .L\rd | (.L\rn << 5) | (.L\rm << 16) | ||
22 | .endm | ||
23 | |||
24 | .macro sha512h2, rd, rn, rm | ||
25 | .inst 0xce608400 | .L\rd | (.L\rn << 5) | (.L\rm << 16) | ||
26 | .endm | ||
27 | |||
28 | .macro sha512su0, rd, rn | ||
29 | .inst 0xcec08000 | .L\rd | (.L\rn << 5) | ||
30 | .endm | ||
31 | |||
32 | .macro sha512su1, rd, rn, rm | ||
33 | .inst 0xce608800 | .L\rd | (.L\rn << 5) | (.L\rm << 16) | ||
34 | .endm | ||
35 | |||
36 | /* | ||
37 | * The SHA-512 round constants | ||
38 | */ | ||
39 | .section ".rodata", "a" | ||
40 | .align 4 | ||
41 | .Lsha512_rcon: | ||
42 | .quad 0x428a2f98d728ae22, 0x7137449123ef65cd | ||
43 | .quad 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc | ||
44 | .quad 0x3956c25bf348b538, 0x59f111f1b605d019 | ||
45 | .quad 0x923f82a4af194f9b, 0xab1c5ed5da6d8118 | ||
46 | .quad 0xd807aa98a3030242, 0x12835b0145706fbe | ||
47 | .quad 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2 | ||
48 | .quad 0x72be5d74f27b896f, 0x80deb1fe3b1696b1 | ||
49 | .quad 0x9bdc06a725c71235, 0xc19bf174cf692694 | ||
50 | .quad 0xe49b69c19ef14ad2, 0xefbe4786384f25e3 | ||
51 | .quad 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65 | ||
52 | .quad 0x2de92c6f592b0275, 0x4a7484aa6ea6e483 | ||
53 | .quad 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5 | ||
54 | .quad 0x983e5152ee66dfab, 0xa831c66d2db43210 | ||
55 | .quad 0xb00327c898fb213f, 0xbf597fc7beef0ee4 | ||
56 | .quad 0xc6e00bf33da88fc2, 0xd5a79147930aa725 | ||
57 | .quad 0x06ca6351e003826f, 0x142929670a0e6e70 | ||
58 | .quad 0x27b70a8546d22ffc, 0x2e1b21385c26c926 | ||
59 | .quad 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df | ||
60 | .quad 0x650a73548baf63de, 0x766a0abb3c77b2a8 | ||
61 | .quad 0x81c2c92e47edaee6, 0x92722c851482353b | ||
62 | .quad 0xa2bfe8a14cf10364, 0xa81a664bbc423001 | ||
63 | .quad 0xc24b8b70d0f89791, 0xc76c51a30654be30 | ||
64 | .quad 0xd192e819d6ef5218, 0xd69906245565a910 | ||
65 | .quad 0xf40e35855771202a, 0x106aa07032bbd1b8 | ||
66 | .quad 0x19a4c116b8d2d0c8, 0x1e376c085141ab53 | ||
67 | .quad 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8 | ||
68 | .quad 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb | ||
69 | .quad 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3 | ||
70 | .quad 0x748f82ee5defb2fc, 0x78a5636f43172f60 | ||
71 | .quad 0x84c87814a1f0ab72, 0x8cc702081a6439ec | ||
72 | .quad 0x90befffa23631e28, 0xa4506cebde82bde9 | ||
73 | .quad 0xbef9a3f7b2c67915, 0xc67178f2e372532b | ||
74 | .quad 0xca273eceea26619c, 0xd186b8c721c0c207 | ||
75 | .quad 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178 | ||
76 | .quad 0x06f067aa72176fba, 0x0a637dc5a2c898a6 | ||
77 | .quad 0x113f9804bef90dae, 0x1b710b35131c471b | ||
78 | .quad 0x28db77f523047d84, 0x32caab7b40c72493 | ||
79 | .quad 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c | ||
80 | .quad 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a | ||
81 | .quad 0x5fcb6fab3ad6faec, 0x6c44198c4a475817 | ||
82 | |||
83 | .macro dround, i0, i1, i2, i3, i4, rc0, rc1, in0, in1, in2, in3, in4 | ||
84 | .ifnb \rc1 | ||
85 | ld1 {v\rc1\().2d}, [x4], #16 | ||
86 | .endif | ||
87 | add v5.2d, v\rc0\().2d, v\in0\().2d | ||
88 | ext v6.16b, v\i2\().16b, v\i3\().16b, #8 | ||
89 | ext v5.16b, v5.16b, v5.16b, #8 | ||
90 | ext v7.16b, v\i1\().16b, v\i2\().16b, #8 | ||
91 | add v\i3\().2d, v\i3\().2d, v5.2d | ||
92 | .ifnb \in1 | ||
93 | ext v5.16b, v\in3\().16b, v\in4\().16b, #8 | ||
94 | sha512su0 v\in0\().2d, v\in1\().2d | ||
95 | .endif | ||
96 | sha512h q\i3, q6, v7.2d | ||
97 | .ifnb \in1 | ||
98 | sha512su1 v\in0\().2d, v\in2\().2d, v5.2d | ||
99 | .endif | ||
100 | add v\i4\().2d, v\i1\().2d, v\i3\().2d | ||
101 | sha512h2 q\i3, q\i1, v\i0\().2d | ||
102 | .endm | ||
103 | |||
104 | /* | ||
105 | * void sha512_ce_transform(struct sha512_state *sst, u8 const *src, | ||
106 | * int blocks) | ||
107 | */ | ||
108 | .text | ||
109 | ENTRY(sha512_ce_transform) | ||
110 | /* load state */ | ||
111 | ld1 {v8.2d-v11.2d}, [x0] | ||
112 | |||
113 | /* load first 4 round constants */ | ||
114 | adr_l x3, .Lsha512_rcon | ||
115 | ld1 {v20.2d-v23.2d}, [x3], #64 | ||
116 | |||
117 | /* load input */ | ||
118 | 0: ld1 {v12.2d-v15.2d}, [x1], #64 | ||
119 | ld1 {v16.2d-v19.2d}, [x1], #64 | ||
120 | sub w2, w2, #1 | ||
121 | |||
122 | CPU_LE( rev64 v12.16b, v12.16b ) | ||
123 | CPU_LE( rev64 v13.16b, v13.16b ) | ||
124 | CPU_LE( rev64 v14.16b, v14.16b ) | ||
125 | CPU_LE( rev64 v15.16b, v15.16b ) | ||
126 | CPU_LE( rev64 v16.16b, v16.16b ) | ||
127 | CPU_LE( rev64 v17.16b, v17.16b ) | ||
128 | CPU_LE( rev64 v18.16b, v18.16b ) | ||
129 | CPU_LE( rev64 v19.16b, v19.16b ) | ||
130 | |||
131 | mov x4, x3 // rc pointer | ||
132 | |||
133 | mov v0.16b, v8.16b | ||
134 | mov v1.16b, v9.16b | ||
135 | mov v2.16b, v10.16b | ||
136 | mov v3.16b, v11.16b | ||
137 | |||
138 | // v0 ab cd -- ef gh ab | ||
139 | // v1 cd -- ef gh ab cd | ||
140 | // v2 ef gh ab cd -- ef | ||
141 | // v3 gh ab cd -- ef gh | ||
142 | // v4 -- ef gh ab cd -- | ||
143 | |||
144 | dround 0, 1, 2, 3, 4, 20, 24, 12, 13, 19, 16, 17 | ||
145 | dround 3, 0, 4, 2, 1, 21, 25, 13, 14, 12, 17, 18 | ||
146 | dround 2, 3, 1, 4, 0, 22, 26, 14, 15, 13, 18, 19 | ||
147 | dround 4, 2, 0, 1, 3, 23, 27, 15, 16, 14, 19, 12 | ||
148 | dround 1, 4, 3, 0, 2, 24, 28, 16, 17, 15, 12, 13 | ||
149 | |||
150 | dround 0, 1, 2, 3, 4, 25, 29, 17, 18, 16, 13, 14 | ||
151 | dround 3, 0, 4, 2, 1, 26, 30, 18, 19, 17, 14, 15 | ||
152 | dround 2, 3, 1, 4, 0, 27, 31, 19, 12, 18, 15, 16 | ||
153 | dround 4, 2, 0, 1, 3, 28, 24, 12, 13, 19, 16, 17 | ||
154 | dround 1, 4, 3, 0, 2, 29, 25, 13, 14, 12, 17, 18 | ||
155 | |||
156 | dround 0, 1, 2, 3, 4, 30, 26, 14, 15, 13, 18, 19 | ||
157 | dround 3, 0, 4, 2, 1, 31, 27, 15, 16, 14, 19, 12 | ||
158 | dround 2, 3, 1, 4, 0, 24, 28, 16, 17, 15, 12, 13 | ||
159 | dround 4, 2, 0, 1, 3, 25, 29, 17, 18, 16, 13, 14 | ||
160 | dround 1, 4, 3, 0, 2, 26, 30, 18, 19, 17, 14, 15 | ||
161 | |||
162 | dround 0, 1, 2, 3, 4, 27, 31, 19, 12, 18, 15, 16 | ||
163 | dround 3, 0, 4, 2, 1, 28, 24, 12, 13, 19, 16, 17 | ||
164 | dround 2, 3, 1, 4, 0, 29, 25, 13, 14, 12, 17, 18 | ||
165 | dround 4, 2, 0, 1, 3, 30, 26, 14, 15, 13, 18, 19 | ||
166 | dround 1, 4, 3, 0, 2, 31, 27, 15, 16, 14, 19, 12 | ||
167 | |||
168 | dround 0, 1, 2, 3, 4, 24, 28, 16, 17, 15, 12, 13 | ||
169 | dround 3, 0, 4, 2, 1, 25, 29, 17, 18, 16, 13, 14 | ||
170 | dround 2, 3, 1, 4, 0, 26, 30, 18, 19, 17, 14, 15 | ||
171 | dround 4, 2, 0, 1, 3, 27, 31, 19, 12, 18, 15, 16 | ||
172 | dround 1, 4, 3, 0, 2, 28, 24, 12, 13, 19, 16, 17 | ||
173 | |||
174 | dround 0, 1, 2, 3, 4, 29, 25, 13, 14, 12, 17, 18 | ||
175 | dround 3, 0, 4, 2, 1, 30, 26, 14, 15, 13, 18, 19 | ||
176 | dround 2, 3, 1, 4, 0, 31, 27, 15, 16, 14, 19, 12 | ||
177 | dround 4, 2, 0, 1, 3, 24, 28, 16, 17, 15, 12, 13 | ||
178 | dround 1, 4, 3, 0, 2, 25, 29, 17, 18, 16, 13, 14 | ||
179 | |||
180 | dround 0, 1, 2, 3, 4, 26, 30, 18, 19, 17, 14, 15 | ||
181 | dround 3, 0, 4, 2, 1, 27, 31, 19, 12, 18, 15, 16 | ||
182 | dround 2, 3, 1, 4, 0, 28, 24, 12 | ||
183 | dround 4, 2, 0, 1, 3, 29, 25, 13 | ||
184 | dround 1, 4, 3, 0, 2, 30, 26, 14 | ||
185 | |||
186 | dround 0, 1, 2, 3, 4, 31, 27, 15 | ||
187 | dround 3, 0, 4, 2, 1, 24, , 16 | ||
188 | dround 2, 3, 1, 4, 0, 25, , 17 | ||
189 | dround 4, 2, 0, 1, 3, 26, , 18 | ||
190 | dround 1, 4, 3, 0, 2, 27, , 19 | ||
191 | |||
192 | /* update state */ | ||
193 | add v8.2d, v8.2d, v0.2d | ||
194 | add v9.2d, v9.2d, v1.2d | ||
195 | add v10.2d, v10.2d, v2.2d | ||
196 | add v11.2d, v11.2d, v3.2d | ||
197 | |||
198 | /* handled all input blocks? */ | ||
199 | cbnz w2, 0b | ||
200 | |||
201 | /* store new state */ | ||
202 | 3: st1 {v8.2d-v11.2d}, [x0] | ||
203 | ret | ||
204 | ENDPROC(sha512_ce_transform) | ||
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c new file mode 100644 index 000000000000..a77c8632a589 --- /dev/null +++ b/arch/arm64/crypto/sha512-ce-glue.c | |||
@@ -0,0 +1,119 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * sha512-ce-glue.c - SHA-384/SHA-512 using ARMv8 Crypto Extensions | ||
4 | * | ||
5 | * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <asm/neon.h> | ||
13 | #include <asm/simd.h> | ||
14 | #include <asm/unaligned.h> | ||
15 | #include <crypto/internal/hash.h> | ||
16 | #include <crypto/sha.h> | ||
17 | #include <crypto/sha512_base.h> | ||
18 | #include <linux/cpufeature.h> | ||
19 | #include <linux/crypto.h> | ||
20 | #include <linux/module.h> | ||
21 | |||
22 | MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions"); | ||
23 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
24 | MODULE_LICENSE("GPL v2"); | ||
25 | |||
26 | asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src, | ||
27 | int blocks); | ||
28 | |||
29 | asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks); | ||
30 | |||
31 | static int sha512_ce_update(struct shash_desc *desc, const u8 *data, | ||
32 | unsigned int len) | ||
33 | { | ||
34 | if (!may_use_simd()) | ||
35 | return sha512_base_do_update(desc, data, len, | ||
36 | (sha512_block_fn *)sha512_block_data_order); | ||
37 | |||
38 | kernel_neon_begin(); | ||
39 | sha512_base_do_update(desc, data, len, | ||
40 | (sha512_block_fn *)sha512_ce_transform); | ||
41 | kernel_neon_end(); | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, | ||
47 | unsigned int len, u8 *out) | ||
48 | { | ||
49 | if (!may_use_simd()) { | ||
50 | if (len) | ||
51 | sha512_base_do_update(desc, data, len, | ||
52 | (sha512_block_fn *)sha512_block_data_order); | ||
53 | sha512_base_do_finalize(desc, | ||
54 | (sha512_block_fn *)sha512_block_data_order); | ||
55 | return sha512_base_finish(desc, out); | ||
56 | } | ||
57 | |||
58 | kernel_neon_begin(); | ||
59 | sha512_base_do_update(desc, data, len, | ||
60 | (sha512_block_fn *)sha512_ce_transform); | ||
61 | sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform); | ||
62 | kernel_neon_end(); | ||
63 | return sha512_base_finish(desc, out); | ||
64 | } | ||
65 | |||
66 | static int sha512_ce_final(struct shash_desc *desc, u8 *out) | ||
67 | { | ||
68 | if (!may_use_simd()) { | ||
69 | sha512_base_do_finalize(desc, | ||
70 | (sha512_block_fn *)sha512_block_data_order); | ||
71 | return sha512_base_finish(desc, out); | ||
72 | } | ||
73 | |||
74 | kernel_neon_begin(); | ||
75 | sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform); | ||
76 | kernel_neon_end(); | ||
77 | return sha512_base_finish(desc, out); | ||
78 | } | ||
79 | |||
80 | static struct shash_alg algs[] = { { | ||
81 | .init = sha384_base_init, | ||
82 | .update = sha512_ce_update, | ||
83 | .final = sha512_ce_final, | ||
84 | .finup = sha512_ce_finup, | ||
85 | .descsize = sizeof(struct sha512_state), | ||
86 | .digestsize = SHA384_DIGEST_SIZE, | ||
87 | .base.cra_name = "sha384", | ||
88 | .base.cra_driver_name = "sha384-ce", | ||
89 | .base.cra_priority = 200, | ||
90 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
91 | .base.cra_blocksize = SHA512_BLOCK_SIZE, | ||
92 | .base.cra_module = THIS_MODULE, | ||
93 | }, { | ||
94 | .init = sha512_base_init, | ||
95 | .update = sha512_ce_update, | ||
96 | .final = sha512_ce_final, | ||
97 | .finup = sha512_ce_finup, | ||
98 | .descsize = sizeof(struct sha512_state), | ||
99 | .digestsize = SHA512_DIGEST_SIZE, | ||
100 | .base.cra_name = "sha512", | ||
101 | .base.cra_driver_name = "sha512-ce", | ||
102 | .base.cra_priority = 200, | ||
103 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
104 | .base.cra_blocksize = SHA512_BLOCK_SIZE, | ||
105 | .base.cra_module = THIS_MODULE, | ||
106 | } }; | ||
107 | |||
108 | static int __init sha512_ce_mod_init(void) | ||
109 | { | ||
110 | return crypto_register_shashes(algs, ARRAY_SIZE(algs)); | ||
111 | } | ||
112 | |||
113 | static void __exit sha512_ce_mod_fini(void) | ||
114 | { | ||
115 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); | ||
116 | } | ||
117 | |||
118 | module_cpu_feature_match(SHA512, sha512_ce_mod_init); | ||
119 | module_exit(sha512_ce_mod_fini); | ||
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c index aff35c9992a4..27db4851e380 100644 --- a/arch/arm64/crypto/sha512-glue.c +++ b/arch/arm64/crypto/sha512-glue.c | |||
@@ -27,6 +27,7 @@ MODULE_ALIAS_CRYPTO("sha512"); | |||
27 | 27 | ||
28 | asmlinkage void sha512_block_data_order(u32 *digest, const void *data, | 28 | asmlinkage void sha512_block_data_order(u32 *digest, const void *data, |
29 | unsigned int num_blks); | 29 | unsigned int num_blks); |
30 | EXPORT_SYMBOL(sha512_block_data_order); | ||
30 | 31 | ||
31 | static int sha512_update(struct shash_desc *desc, const u8 *data, | 32 | static int sha512_update(struct shash_desc *desc, const u8 *data, |
32 | unsigned int len) | 33 | unsigned int len) |
diff --git a/arch/arm64/crypto/sm3-ce-core.S b/arch/arm64/crypto/sm3-ce-core.S new file mode 100644 index 000000000000..27169fe07a68 --- /dev/null +++ b/arch/arm64/crypto/sm3-ce-core.S | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * sm3-ce-core.S - SM3 secure hash using ARMv8.2 Crypto Extensions | ||
3 | * | ||
4 | * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/linkage.h> | ||
12 | #include <asm/assembler.h> | ||
13 | |||
14 | .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 | ||
15 | .set .Lv\b\().4s, \b | ||
16 | .endr | ||
17 | |||
18 | .macro sm3partw1, rd, rn, rm | ||
19 | .inst 0xce60c000 | .L\rd | (.L\rn << 5) | (.L\rm << 16) | ||
20 | .endm | ||
21 | |||
22 | .macro sm3partw2, rd, rn, rm | ||
23 | .inst 0xce60c400 | .L\rd | (.L\rn << 5) | (.L\rm << 16) | ||
24 | .endm | ||
25 | |||
26 | .macro sm3ss1, rd, rn, rm, ra | ||
27 | .inst 0xce400000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) | ||
28 | .endm | ||
29 | |||
30 | .macro sm3tt1a, rd, rn, rm, imm2 | ||
31 | .inst 0xce408000 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) | ||
32 | .endm | ||
33 | |||
34 | .macro sm3tt1b, rd, rn, rm, imm2 | ||
35 | .inst 0xce408400 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) | ||
36 | .endm | ||
37 | |||
38 | .macro sm3tt2a, rd, rn, rm, imm2 | ||
39 | .inst 0xce408800 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) | ||
40 | .endm | ||
41 | |||
42 | .macro sm3tt2b, rd, rn, rm, imm2 | ||
43 | .inst 0xce408c00 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) | ||
44 | .endm | ||
45 | |||
46 | .macro round, ab, s0, t0, t1, i | ||
47 | sm3ss1 v5.4s, v8.4s, \t0\().4s, v9.4s | ||
48 | shl \t1\().4s, \t0\().4s, #1 | ||
49 | sri \t1\().4s, \t0\().4s, #31 | ||
50 | sm3tt1\ab v8.4s, v5.4s, v10.4s, \i | ||
51 | sm3tt2\ab v9.4s, v5.4s, \s0\().4s, \i | ||
52 | .endm | ||
53 | |||
54 | .macro qround, ab, s0, s1, s2, s3, s4 | ||
55 | .ifnb \s4 | ||
56 | ext \s4\().16b, \s1\().16b, \s2\().16b, #12 | ||
57 | ext v6.16b, \s0\().16b, \s1\().16b, #12 | ||
58 | ext v7.16b, \s2\().16b, \s3\().16b, #8 | ||
59 | sm3partw1 \s4\().4s, \s0\().4s, \s3\().4s | ||
60 | .endif | ||
61 | |||
62 | eor v10.16b, \s0\().16b, \s1\().16b | ||
63 | |||
64 | round \ab, \s0, v11, v12, 0 | ||
65 | round \ab, \s0, v12, v11, 1 | ||
66 | round \ab, \s0, v11, v12, 2 | ||
67 | round \ab, \s0, v12, v11, 3 | ||
68 | |||
69 | .ifnb \s4 | ||
70 | sm3partw2 \s4\().4s, v7.4s, v6.4s | ||
71 | .endif | ||
72 | .endm | ||
73 | |||
74 | /* | ||
75 | * void sm3_ce_transform(struct sm3_state *sst, u8 const *src, | ||
76 | * int blocks) | ||
77 | */ | ||
78 | .text | ||
79 | ENTRY(sm3_ce_transform) | ||
80 | /* load state */ | ||
81 | ld1 {v8.4s-v9.4s}, [x0] | ||
82 | rev64 v8.4s, v8.4s | ||
83 | rev64 v9.4s, v9.4s | ||
84 | ext v8.16b, v8.16b, v8.16b, #8 | ||
85 | ext v9.16b, v9.16b, v9.16b, #8 | ||
86 | |||
87 | adr_l x8, .Lt | ||
88 | ldp s13, s14, [x8] | ||
89 | |||
90 | /* load input */ | ||
91 | 0: ld1 {v0.16b-v3.16b}, [x1], #64 | ||
92 | sub w2, w2, #1 | ||
93 | |||
94 | mov v15.16b, v8.16b | ||
95 | mov v16.16b, v9.16b | ||
96 | |||
97 | CPU_LE( rev32 v0.16b, v0.16b ) | ||
98 | CPU_LE( rev32 v1.16b, v1.16b ) | ||
99 | CPU_LE( rev32 v2.16b, v2.16b ) | ||
100 | CPU_LE( rev32 v3.16b, v3.16b ) | ||
101 | |||
102 | ext v11.16b, v13.16b, v13.16b, #4 | ||
103 | |||
104 | qround a, v0, v1, v2, v3, v4 | ||
105 | qround a, v1, v2, v3, v4, v0 | ||
106 | qround a, v2, v3, v4, v0, v1 | ||
107 | qround a, v3, v4, v0, v1, v2 | ||
108 | |||
109 | ext v11.16b, v14.16b, v14.16b, #4 | ||
110 | |||
111 | qround b, v4, v0, v1, v2, v3 | ||
112 | qround b, v0, v1, v2, v3, v4 | ||
113 | qround b, v1, v2, v3, v4, v0 | ||
114 | qround b, v2, v3, v4, v0, v1 | ||
115 | qround b, v3, v4, v0, v1, v2 | ||
116 | qround b, v4, v0, v1, v2, v3 | ||
117 | qround b, v0, v1, v2, v3, v4 | ||
118 | qround b, v1, v2, v3, v4, v0 | ||
119 | qround b, v2, v3, v4, v0, v1 | ||
120 | qround b, v3, v4 | ||
121 | qround b, v4, v0 | ||
122 | qround b, v0, v1 | ||
123 | |||
124 | eor v8.16b, v8.16b, v15.16b | ||
125 | eor v9.16b, v9.16b, v16.16b | ||
126 | |||
127 | /* handled all input blocks? */ | ||
128 | cbnz w2, 0b | ||
129 | |||
130 | /* save state */ | ||
131 | rev64 v8.4s, v8.4s | ||
132 | rev64 v9.4s, v9.4s | ||
133 | ext v8.16b, v8.16b, v8.16b, #8 | ||
134 | ext v9.16b, v9.16b, v9.16b, #8 | ||
135 | st1 {v8.4s-v9.4s}, [x0] | ||
136 | ret | ||
137 | ENDPROC(sm3_ce_transform) | ||
138 | |||
139 | .section ".rodata", "a" | ||
140 | .align 3 | ||
141 | .Lt: .word 0x79cc4519, 0x9d8a7a87 | ||
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c new file mode 100644 index 000000000000..3b4948f7e26f --- /dev/null +++ b/arch/arm64/crypto/sm3-ce-glue.c | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * sm3-ce-glue.c - SM3 secure hash using ARMv8.2 Crypto Extensions | ||
3 | * | ||
4 | * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <asm/neon.h> | ||
12 | #include <asm/simd.h> | ||
13 | #include <asm/unaligned.h> | ||
14 | #include <crypto/internal/hash.h> | ||
15 | #include <crypto/sm3.h> | ||
16 | #include <crypto/sm3_base.h> | ||
17 | #include <linux/cpufeature.h> | ||
18 | #include <linux/crypto.h> | ||
19 | #include <linux/module.h> | ||
20 | |||
21 | MODULE_DESCRIPTION("SM3 secure hash using ARMv8 Crypto Extensions"); | ||
22 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
23 | MODULE_LICENSE("GPL v2"); | ||
24 | |||
25 | asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src, | ||
26 | int blocks); | ||
27 | |||
28 | static int sm3_ce_update(struct shash_desc *desc, const u8 *data, | ||
29 | unsigned int len) | ||
30 | { | ||
31 | if (!may_use_simd()) | ||
32 | return crypto_sm3_update(desc, data, len); | ||
33 | |||
34 | kernel_neon_begin(); | ||
35 | sm3_base_do_update(desc, data, len, sm3_ce_transform); | ||
36 | kernel_neon_end(); | ||
37 | |||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static int sm3_ce_final(struct shash_desc *desc, u8 *out) | ||
42 | { | ||
43 | if (!may_use_simd()) | ||
44 | return crypto_sm3_finup(desc, NULL, 0, out); | ||
45 | |||
46 | kernel_neon_begin(); | ||
47 | sm3_base_do_finalize(desc, sm3_ce_transform); | ||
48 | kernel_neon_end(); | ||
49 | |||
50 | return sm3_base_finish(desc, out); | ||
51 | } | ||
52 | |||
53 | static int sm3_ce_finup(struct shash_desc *desc, const u8 *data, | ||
54 | unsigned int len, u8 *out) | ||
55 | { | ||
56 | if (!may_use_simd()) | ||
57 | return crypto_sm3_finup(desc, data, len, out); | ||
58 | |||
59 | kernel_neon_begin(); | ||
60 | sm3_base_do_update(desc, data, len, sm3_ce_transform); | ||
61 | kernel_neon_end(); | ||
62 | |||
63 | return sm3_ce_final(desc, out); | ||
64 | } | ||
65 | |||
66 | static struct shash_alg sm3_alg = { | ||
67 | .digestsize = SM3_DIGEST_SIZE, | ||
68 | .init = sm3_base_init, | ||
69 | .update = sm3_ce_update, | ||
70 | .final = sm3_ce_final, | ||
71 | .finup = sm3_ce_finup, | ||
72 | .descsize = sizeof(struct sm3_state), | ||
73 | .base.cra_name = "sm3", | ||
74 | .base.cra_driver_name = "sm3-ce", | ||
75 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
76 | .base.cra_blocksize = SM3_BLOCK_SIZE, | ||
77 | .base.cra_module = THIS_MODULE, | ||
78 | .base.cra_priority = 200, | ||
79 | }; | ||
80 | |||
81 | static int __init sm3_ce_mod_init(void) | ||
82 | { | ||
83 | return crypto_register_shash(&sm3_alg); | ||
84 | } | ||
85 | |||
86 | static void __exit sm3_ce_mod_fini(void) | ||
87 | { | ||
88 | crypto_unregister_shash(&sm3_alg); | ||
89 | } | ||
90 | |||
91 | module_cpu_feature_match(SM3, sm3_ce_mod_init); | ||
92 | module_exit(sm3_ce_mod_fini); | ||
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index f058e0c3e4d4..fd1d6c83f0c0 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c | |||
@@ -141,6 +141,7 @@ static struct shash_alg alg = { | |||
141 | .cra_name = "crc32c", | 141 | .cra_name = "crc32c", |
142 | .cra_driver_name = "crc32c-vpmsum", | 142 | .cra_driver_name = "crc32c-vpmsum", |
143 | .cra_priority = 200, | 143 | .cra_priority = 200, |
144 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
144 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 145 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
145 | .cra_ctxsize = sizeof(u32), | 146 | .cra_ctxsize = sizeof(u32), |
146 | .cra_module = THIS_MODULE, | 147 | .cra_module = THIS_MODULE, |
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c index 436865926c26..423ee05887e6 100644 --- a/arch/s390/crypto/crc32-vx.c +++ b/arch/s390/crypto/crc32-vx.c | |||
@@ -239,6 +239,7 @@ static struct shash_alg crc32_vx_algs[] = { | |||
239 | .cra_name = "crc32", | 239 | .cra_name = "crc32", |
240 | .cra_driver_name = "crc32-vx", | 240 | .cra_driver_name = "crc32-vx", |
241 | .cra_priority = 200, | 241 | .cra_priority = 200, |
242 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
242 | .cra_blocksize = CRC32_BLOCK_SIZE, | 243 | .cra_blocksize = CRC32_BLOCK_SIZE, |
243 | .cra_ctxsize = sizeof(struct crc_ctx), | 244 | .cra_ctxsize = sizeof(struct crc_ctx), |
244 | .cra_module = THIS_MODULE, | 245 | .cra_module = THIS_MODULE, |
@@ -259,6 +260,7 @@ static struct shash_alg crc32_vx_algs[] = { | |||
259 | .cra_name = "crc32be", | 260 | .cra_name = "crc32be", |
260 | .cra_driver_name = "crc32be-vx", | 261 | .cra_driver_name = "crc32be-vx", |
261 | .cra_priority = 200, | 262 | .cra_priority = 200, |
263 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
262 | .cra_blocksize = CRC32_BLOCK_SIZE, | 264 | .cra_blocksize = CRC32_BLOCK_SIZE, |
263 | .cra_ctxsize = sizeof(struct crc_ctx), | 265 | .cra_ctxsize = sizeof(struct crc_ctx), |
264 | .cra_module = THIS_MODULE, | 266 | .cra_module = THIS_MODULE, |
@@ -279,6 +281,7 @@ static struct shash_alg crc32_vx_algs[] = { | |||
279 | .cra_name = "crc32c", | 281 | .cra_name = "crc32c", |
280 | .cra_driver_name = "crc32c-vx", | 282 | .cra_driver_name = "crc32c-vx", |
281 | .cra_priority = 200, | 283 | .cra_priority = 200, |
284 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
282 | .cra_blocksize = CRC32_BLOCK_SIZE, | 285 | .cra_blocksize = CRC32_BLOCK_SIZE, |
283 | .cra_ctxsize = sizeof(struct crc_ctx), | 286 | .cra_ctxsize = sizeof(struct crc_ctx), |
284 | .cra_module = THIS_MODULE, | 287 | .cra_module = THIS_MODULE, |
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c index d1064e46efe8..8aa664638c3c 100644 --- a/arch/sparc/crypto/crc32c_glue.c +++ b/arch/sparc/crypto/crc32c_glue.c | |||
@@ -133,6 +133,7 @@ static struct shash_alg alg = { | |||
133 | .cra_name = "crc32c", | 133 | .cra_name = "crc32c", |
134 | .cra_driver_name = "crc32c-sparc64", | 134 | .cra_driver_name = "crc32c-sparc64", |
135 | .cra_priority = SPARC_CR_OPCODE_PRIORITY, | 135 | .cra_priority = SPARC_CR_OPCODE_PRIORITY, |
136 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
136 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 137 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
137 | .cra_ctxsize = sizeof(u32), | 138 | .cra_ctxsize = sizeof(u32), |
138 | .cra_alignmask = 7, | 139 | .cra_alignmask = 7, |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 3d09e3aca18d..12e8484a8ee7 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -90,30 +90,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 | |||
90 | ALL_F: .octa 0xffffffffffffffffffffffffffffffff | 90 | ALL_F: .octa 0xffffffffffffffffffffffffffffffff |
91 | .octa 0x00000000000000000000000000000000 | 91 | .octa 0x00000000000000000000000000000000 |
92 | 92 | ||
93 | .section .rodata | ||
94 | .align 16 | ||
95 | .type aad_shift_arr, @object | ||
96 | .size aad_shift_arr, 272 | ||
97 | aad_shift_arr: | ||
98 | .octa 0xffffffffffffffffffffffffffffffff | ||
99 | .octa 0xffffffffffffffffffffffffffffff0C | ||
100 | .octa 0xffffffffffffffffffffffffffff0D0C | ||
101 | .octa 0xffffffffffffffffffffffffff0E0D0C | ||
102 | .octa 0xffffffffffffffffffffffff0F0E0D0C | ||
103 | .octa 0xffffffffffffffffffffff0C0B0A0908 | ||
104 | .octa 0xffffffffffffffffffff0D0C0B0A0908 | ||
105 | .octa 0xffffffffffffffffff0E0D0C0B0A0908 | ||
106 | .octa 0xffffffffffffffff0F0E0D0C0B0A0908 | ||
107 | .octa 0xffffffffffffff0C0B0A090807060504 | ||
108 | .octa 0xffffffffffff0D0C0B0A090807060504 | ||
109 | .octa 0xffffffffff0E0D0C0B0A090807060504 | ||
110 | .octa 0xffffffff0F0E0D0C0B0A090807060504 | ||
111 | .octa 0xffffff0C0B0A09080706050403020100 | ||
112 | .octa 0xffff0D0C0B0A09080706050403020100 | ||
113 | .octa 0xff0E0D0C0B0A09080706050403020100 | ||
114 | .octa 0x0F0E0D0C0B0A09080706050403020100 | ||
115 | |||
116 | |||
117 | .text | 93 | .text |
118 | 94 | ||
119 | 95 | ||
@@ -257,6 +233,37 @@ aad_shift_arr: | |||
257 | pxor \TMP1, \GH # result is in TMP1 | 233 | pxor \TMP1, \GH # result is in TMP1 |
258 | .endm | 234 | .endm |
259 | 235 | ||
236 | # Reads DLEN bytes starting at DPTR and stores in XMMDst | ||
237 | # where 0 < DLEN < 16 | ||
238 | # Clobbers %rax, DLEN and XMM1 | ||
239 | .macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst | ||
240 | cmp $8, \DLEN | ||
241 | jl _read_lt8_\@ | ||
242 | mov (\DPTR), %rax | ||
243 | MOVQ_R64_XMM %rax, \XMMDst | ||
244 | sub $8, \DLEN | ||
245 | jz _done_read_partial_block_\@ | ||
246 | xor %eax, %eax | ||
247 | _read_next_byte_\@: | ||
248 | shl $8, %rax | ||
249 | mov 7(\DPTR, \DLEN, 1), %al | ||
250 | dec \DLEN | ||
251 | jnz _read_next_byte_\@ | ||
252 | MOVQ_R64_XMM %rax, \XMM1 | ||
253 | pslldq $8, \XMM1 | ||
254 | por \XMM1, \XMMDst | ||
255 | jmp _done_read_partial_block_\@ | ||
256 | _read_lt8_\@: | ||
257 | xor %eax, %eax | ||
258 | _read_next_byte_lt8_\@: | ||
259 | shl $8, %rax | ||
260 | mov -1(\DPTR, \DLEN, 1), %al | ||
261 | dec \DLEN | ||
262 | jnz _read_next_byte_lt8_\@ | ||
263 | MOVQ_R64_XMM %rax, \XMMDst | ||
264 | _done_read_partial_block_\@: | ||
265 | .endm | ||
266 | |||
260 | /* | 267 | /* |
261 | * if a = number of total plaintext bytes | 268 | * if a = number of total plaintext bytes |
262 | * b = floor(a/16) | 269 | * b = floor(a/16) |
@@ -273,62 +280,30 @@ aad_shift_arr: | |||
273 | XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation | 280 | XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation |
274 | MOVADQ SHUF_MASK(%rip), %xmm14 | 281 | MOVADQ SHUF_MASK(%rip), %xmm14 |
275 | mov arg7, %r10 # %r10 = AAD | 282 | mov arg7, %r10 # %r10 = AAD |
276 | mov arg8, %r12 # %r12 = aadLen | 283 | mov arg8, %r11 # %r11 = aadLen |
277 | mov %r12, %r11 | ||
278 | pxor %xmm\i, %xmm\i | 284 | pxor %xmm\i, %xmm\i |
279 | pxor \XMM2, \XMM2 | 285 | pxor \XMM2, \XMM2 |
280 | 286 | ||
281 | cmp $16, %r11 | 287 | cmp $16, %r11 |
282 | jl _get_AAD_rest8\num_initial_blocks\operation | 288 | jl _get_AAD_rest\num_initial_blocks\operation |
283 | _get_AAD_blocks\num_initial_blocks\operation: | 289 | _get_AAD_blocks\num_initial_blocks\operation: |
284 | movdqu (%r10), %xmm\i | 290 | movdqu (%r10), %xmm\i |
285 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data | 291 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
286 | pxor %xmm\i, \XMM2 | 292 | pxor %xmm\i, \XMM2 |
287 | GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | 293 | GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 |
288 | add $16, %r10 | 294 | add $16, %r10 |
289 | sub $16, %r12 | ||
290 | sub $16, %r11 | 295 | sub $16, %r11 |
291 | cmp $16, %r11 | 296 | cmp $16, %r11 |
292 | jge _get_AAD_blocks\num_initial_blocks\operation | 297 | jge _get_AAD_blocks\num_initial_blocks\operation |
293 | 298 | ||
294 | movdqu \XMM2, %xmm\i | 299 | movdqu \XMM2, %xmm\i |
300 | |||
301 | /* read the last <16B of AAD */ | ||
302 | _get_AAD_rest\num_initial_blocks\operation: | ||
295 | cmp $0, %r11 | 303 | cmp $0, %r11 |
296 | je _get_AAD_done\num_initial_blocks\operation | 304 | je _get_AAD_done\num_initial_blocks\operation |
297 | 305 | ||
298 | pxor %xmm\i,%xmm\i | 306 | READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i |
299 | |||
300 | /* read the last <16B of AAD. since we have at least 4B of | ||
301 | data right after the AAD (the ICV, and maybe some CT), we can | ||
302 | read 4B/8B blocks safely, and then get rid of the extra stuff */ | ||
303 | _get_AAD_rest8\num_initial_blocks\operation: | ||
304 | cmp $4, %r11 | ||
305 | jle _get_AAD_rest4\num_initial_blocks\operation | ||
306 | movq (%r10), \TMP1 | ||
307 | add $8, %r10 | ||
308 | sub $8, %r11 | ||
309 | pslldq $8, \TMP1 | ||
310 | psrldq $8, %xmm\i | ||
311 | pxor \TMP1, %xmm\i | ||
312 | jmp _get_AAD_rest8\num_initial_blocks\operation | ||
313 | _get_AAD_rest4\num_initial_blocks\operation: | ||
314 | cmp $0, %r11 | ||
315 | jle _get_AAD_rest0\num_initial_blocks\operation | ||
316 | mov (%r10), %eax | ||
317 | movq %rax, \TMP1 | ||
318 | add $4, %r10 | ||
319 | sub $4, %r10 | ||
320 | pslldq $12, \TMP1 | ||
321 | psrldq $4, %xmm\i | ||
322 | pxor \TMP1, %xmm\i | ||
323 | _get_AAD_rest0\num_initial_blocks\operation: | ||
324 | /* finalize: shift out the extra bytes we read, and align | ||
325 | left. since pslldq can only shift by an immediate, we use | ||
326 | vpshufb and an array of shuffle masks */ | ||
327 | movq %r12, %r11 | ||
328 | salq $4, %r11 | ||
329 | movdqu aad_shift_arr(%r11), \TMP1 | ||
330 | PSHUFB_XMM \TMP1, %xmm\i | ||
331 | _get_AAD_rest_final\num_initial_blocks\operation: | ||
332 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data | 307 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
333 | pxor \XMM2, %xmm\i | 308 | pxor \XMM2, %xmm\i |
334 | GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | 309 | GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 |
@@ -532,62 +507,30 @@ _initial_blocks_done\num_initial_blocks\operation: | |||
532 | XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation | 507 | XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation |
533 | MOVADQ SHUF_MASK(%rip), %xmm14 | 508 | MOVADQ SHUF_MASK(%rip), %xmm14 |
534 | mov arg7, %r10 # %r10 = AAD | 509 | mov arg7, %r10 # %r10 = AAD |
535 | mov arg8, %r12 # %r12 = aadLen | 510 | mov arg8, %r11 # %r11 = aadLen |
536 | mov %r12, %r11 | ||
537 | pxor %xmm\i, %xmm\i | 511 | pxor %xmm\i, %xmm\i |
538 | pxor \XMM2, \XMM2 | 512 | pxor \XMM2, \XMM2 |
539 | 513 | ||
540 | cmp $16, %r11 | 514 | cmp $16, %r11 |
541 | jl _get_AAD_rest8\num_initial_blocks\operation | 515 | jl _get_AAD_rest\num_initial_blocks\operation |
542 | _get_AAD_blocks\num_initial_blocks\operation: | 516 | _get_AAD_blocks\num_initial_blocks\operation: |
543 | movdqu (%r10), %xmm\i | 517 | movdqu (%r10), %xmm\i |
544 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data | 518 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
545 | pxor %xmm\i, \XMM2 | 519 | pxor %xmm\i, \XMM2 |
546 | GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | 520 | GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 |
547 | add $16, %r10 | 521 | add $16, %r10 |
548 | sub $16, %r12 | ||
549 | sub $16, %r11 | 522 | sub $16, %r11 |
550 | cmp $16, %r11 | 523 | cmp $16, %r11 |
551 | jge _get_AAD_blocks\num_initial_blocks\operation | 524 | jge _get_AAD_blocks\num_initial_blocks\operation |
552 | 525 | ||
553 | movdqu \XMM2, %xmm\i | 526 | movdqu \XMM2, %xmm\i |
527 | |||
528 | /* read the last <16B of AAD */ | ||
529 | _get_AAD_rest\num_initial_blocks\operation: | ||
554 | cmp $0, %r11 | 530 | cmp $0, %r11 |
555 | je _get_AAD_done\num_initial_blocks\operation | 531 | je _get_AAD_done\num_initial_blocks\operation |
556 | 532 | ||
557 | pxor %xmm\i,%xmm\i | 533 | READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i |
558 | |||
559 | /* read the last <16B of AAD. since we have at least 4B of | ||
560 | data right after the AAD (the ICV, and maybe some PT), we can | ||
561 | read 4B/8B blocks safely, and then get rid of the extra stuff */ | ||
562 | _get_AAD_rest8\num_initial_blocks\operation: | ||
563 | cmp $4, %r11 | ||
564 | jle _get_AAD_rest4\num_initial_blocks\operation | ||
565 | movq (%r10), \TMP1 | ||
566 | add $8, %r10 | ||
567 | sub $8, %r11 | ||
568 | pslldq $8, \TMP1 | ||
569 | psrldq $8, %xmm\i | ||
570 | pxor \TMP1, %xmm\i | ||
571 | jmp _get_AAD_rest8\num_initial_blocks\operation | ||
572 | _get_AAD_rest4\num_initial_blocks\operation: | ||
573 | cmp $0, %r11 | ||
574 | jle _get_AAD_rest0\num_initial_blocks\operation | ||
575 | mov (%r10), %eax | ||
576 | movq %rax, \TMP1 | ||
577 | add $4, %r10 | ||
578 | sub $4, %r10 | ||
579 | pslldq $12, \TMP1 | ||
580 | psrldq $4, %xmm\i | ||
581 | pxor \TMP1, %xmm\i | ||
582 | _get_AAD_rest0\num_initial_blocks\operation: | ||
583 | /* finalize: shift out the extra bytes we read, and align | ||
584 | left. since pslldq can only shift by an immediate, we use | ||
585 | vpshufb and an array of shuffle masks */ | ||
586 | movq %r12, %r11 | ||
587 | salq $4, %r11 | ||
588 | movdqu aad_shift_arr(%r11), \TMP1 | ||
589 | PSHUFB_XMM \TMP1, %xmm\i | ||
590 | _get_AAD_rest_final\num_initial_blocks\operation: | ||
591 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data | 534 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
592 | pxor \XMM2, %xmm\i | 535 | pxor \XMM2, %xmm\i |
593 | GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | 536 | GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 |
@@ -1386,14 +1329,6 @@ _esb_loop_\@: | |||
1386 | * | 1329 | * |
1387 | * AAD Format with 64-bit Extended Sequence Number | 1330 | * AAD Format with 64-bit Extended Sequence Number |
1388 | * | 1331 | * |
1389 | * aadLen: | ||
1390 | * from the definition of the spec, aadLen can only be 8 or 12 bytes. | ||
1391 | * The code supports 16 too but for other sizes, the code will fail. | ||
1392 | * | ||
1393 | * TLen: | ||
1394 | * from the definition of the spec, TLen can only be 8, 12 or 16 bytes. | ||
1395 | * For other sizes, the code will fail. | ||
1396 | * | ||
1397 | * poly = x^128 + x^127 + x^126 + x^121 + 1 | 1332 | * poly = x^128 + x^127 + x^126 + x^121 + 1 |
1398 | * | 1333 | * |
1399 | *****************************************************************************/ | 1334 | *****************************************************************************/ |
@@ -1487,19 +1422,16 @@ _zero_cipher_left_decrypt: | |||
1487 | PSHUFB_XMM %xmm10, %xmm0 | 1422 | PSHUFB_XMM %xmm10, %xmm0 |
1488 | 1423 | ||
1489 | ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn) | 1424 | ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn) |
1490 | sub $16, %r11 | ||
1491 | add %r13, %r11 | ||
1492 | movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block | ||
1493 | lea SHIFT_MASK+16(%rip), %r12 | ||
1494 | sub %r13, %r12 | ||
1495 | # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes | ||
1496 | # (%r13 is the number of bytes in plaintext mod 16) | ||
1497 | movdqu (%r12), %xmm2 # get the appropriate shuffle mask | ||
1498 | PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes | ||
1499 | 1425 | ||
1426 | lea (%arg3,%r11,1), %r10 | ||
1427 | mov %r13, %r12 | ||
1428 | READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 | ||
1429 | |||
1430 | lea ALL_F+16(%rip), %r12 | ||
1431 | sub %r13, %r12 | ||
1500 | movdqa %xmm1, %xmm2 | 1432 | movdqa %xmm1, %xmm2 |
1501 | pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) | 1433 | pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) |
1502 | movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 | 1434 | movdqu (%r12), %xmm1 |
1503 | # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 | 1435 | # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 |
1504 | pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 | 1436 | pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 |
1505 | pand %xmm1, %xmm2 | 1437 | pand %xmm1, %xmm2 |
@@ -1508,9 +1440,6 @@ _zero_cipher_left_decrypt: | |||
1508 | 1440 | ||
1509 | pxor %xmm2, %xmm8 | 1441 | pxor %xmm2, %xmm8 |
1510 | GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 | 1442 | GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 |
1511 | # GHASH computation for the last <16 byte block | ||
1512 | sub %r13, %r11 | ||
1513 | add $16, %r11 | ||
1514 | 1443 | ||
1515 | # output %r13 bytes | 1444 | # output %r13 bytes |
1516 | MOVQ_R64_XMM %xmm0, %rax | 1445 | MOVQ_R64_XMM %xmm0, %rax |
@@ -1664,14 +1593,6 @@ ENDPROC(aesni_gcm_dec) | |||
1664 | * | 1593 | * |
1665 | * AAD Format with 64-bit Extended Sequence Number | 1594 | * AAD Format with 64-bit Extended Sequence Number |
1666 | * | 1595 | * |
1667 | * aadLen: | ||
1668 | * from the definition of the spec, aadLen can only be 8 or 12 bytes. | ||
1669 | * The code supports 16 too but for other sizes, the code will fail. | ||
1670 | * | ||
1671 | * TLen: | ||
1672 | * from the definition of the spec, TLen can only be 8, 12 or 16 bytes. | ||
1673 | * For other sizes, the code will fail. | ||
1674 | * | ||
1675 | * poly = x^128 + x^127 + x^126 + x^121 + 1 | 1596 | * poly = x^128 + x^127 + x^126 + x^121 + 1 |
1676 | ***************************************************************************/ | 1597 | ***************************************************************************/ |
1677 | ENTRY(aesni_gcm_enc) | 1598 | ENTRY(aesni_gcm_enc) |
@@ -1764,19 +1685,16 @@ _zero_cipher_left_encrypt: | |||
1764 | movdqa SHUF_MASK(%rip), %xmm10 | 1685 | movdqa SHUF_MASK(%rip), %xmm10 |
1765 | PSHUFB_XMM %xmm10, %xmm0 | 1686 | PSHUFB_XMM %xmm10, %xmm0 |
1766 | 1687 | ||
1767 | |||
1768 | ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) | 1688 | ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) |
1769 | sub $16, %r11 | 1689 | |
1770 | add %r13, %r11 | 1690 | lea (%arg3,%r11,1), %r10 |
1771 | movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks | 1691 | mov %r13, %r12 |
1772 | lea SHIFT_MASK+16(%rip), %r12 | 1692 | READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 |
1693 | |||
1694 | lea ALL_F+16(%rip), %r12 | ||
1773 | sub %r13, %r12 | 1695 | sub %r13, %r12 |
1774 | # adjust the shuffle mask pointer to be able to shift 16-r13 bytes | ||
1775 | # (%r13 is the number of bytes in plaintext mod 16) | ||
1776 | movdqu (%r12), %xmm2 # get the appropriate shuffle mask | ||
1777 | PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte | ||
1778 | pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) | 1696 | pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) |
1779 | movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 | 1697 | movdqu (%r12), %xmm1 |
1780 | # get the appropriate mask to mask out top 16-r13 bytes of xmm0 | 1698 | # get the appropriate mask to mask out top 16-r13 bytes of xmm0 |
1781 | pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 | 1699 | pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 |
1782 | movdqa SHUF_MASK(%rip), %xmm10 | 1700 | movdqa SHUF_MASK(%rip), %xmm10 |
@@ -1785,9 +1703,6 @@ _zero_cipher_left_encrypt: | |||
1785 | pxor %xmm0, %xmm8 | 1703 | pxor %xmm0, %xmm8 |
1786 | GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 | 1704 | GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 |
1787 | # GHASH computation for the last <16 byte block | 1705 | # GHASH computation for the last <16 byte block |
1788 | sub %r13, %r11 | ||
1789 | add $16, %r11 | ||
1790 | |||
1791 | movdqa SHUF_MASK(%rip), %xmm10 | 1706 | movdqa SHUF_MASK(%rip), %xmm10 |
1792 | PSHUFB_XMM %xmm10, %xmm0 | 1707 | PSHUFB_XMM %xmm10, %xmm0 |
1793 | 1708 | ||
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 3bf3dcf29825..34cf1c1f8c98 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -690,8 +690,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, | |||
690 | rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); | 690 | rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); |
691 | } | 691 | } |
692 | 692 | ||
693 | static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, | 693 | static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key, |
694 | unsigned int key_len) | 694 | unsigned int key_len) |
695 | { | 695 | { |
696 | struct cryptd_aead **ctx = crypto_aead_ctx(parent); | 696 | struct cryptd_aead **ctx = crypto_aead_ctx(parent); |
697 | struct cryptd_aead *cryptd_tfm = *ctx; | 697 | struct cryptd_aead *cryptd_tfm = *ctx; |
@@ -716,8 +716,8 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead, | |||
716 | 716 | ||
717 | /* This is the Integrity Check Value (aka the authentication tag length and can | 717 | /* This is the Integrity Check Value (aka the authentication tag length and can |
718 | * be 8, 12 or 16 bytes long. */ | 718 | * be 8, 12 or 16 bytes long. */ |
719 | static int rfc4106_set_authsize(struct crypto_aead *parent, | 719 | static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent, |
720 | unsigned int authsize) | 720 | unsigned int authsize) |
721 | { | 721 | { |
722 | struct cryptd_aead **ctx = crypto_aead_ctx(parent); | 722 | struct cryptd_aead **ctx = crypto_aead_ctx(parent); |
723 | struct cryptd_aead *cryptd_tfm = *ctx; | 723 | struct cryptd_aead *cryptd_tfm = *ctx; |
@@ -824,7 +824,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, | |||
824 | if (sg_is_last(req->src) && | 824 | if (sg_is_last(req->src) && |
825 | (!PageHighMem(sg_page(req->src)) || | 825 | (!PageHighMem(sg_page(req->src)) || |
826 | req->src->offset + req->src->length <= PAGE_SIZE) && | 826 | req->src->offset + req->src->length <= PAGE_SIZE) && |
827 | sg_is_last(req->dst) && | 827 | sg_is_last(req->dst) && req->dst->length && |
828 | (!PageHighMem(sg_page(req->dst)) || | 828 | (!PageHighMem(sg_page(req->dst)) || |
829 | req->dst->offset + req->dst->length <= PAGE_SIZE)) { | 829 | req->dst->offset + req->dst->length <= PAGE_SIZE)) { |
830 | one_entry_in_sg = 1; | 830 | one_entry_in_sg = 1; |
@@ -929,7 +929,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req) | |||
929 | aes_ctx); | 929 | aes_ctx); |
930 | } | 930 | } |
931 | 931 | ||
932 | static int rfc4106_encrypt(struct aead_request *req) | 932 | static int gcmaes_wrapper_encrypt(struct aead_request *req) |
933 | { | 933 | { |
934 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 934 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
935 | struct cryptd_aead **ctx = crypto_aead_ctx(tfm); | 935 | struct cryptd_aead **ctx = crypto_aead_ctx(tfm); |
@@ -945,7 +945,7 @@ static int rfc4106_encrypt(struct aead_request *req) | |||
945 | return crypto_aead_encrypt(req); | 945 | return crypto_aead_encrypt(req); |
946 | } | 946 | } |
947 | 947 | ||
948 | static int rfc4106_decrypt(struct aead_request *req) | 948 | static int gcmaes_wrapper_decrypt(struct aead_request *req) |
949 | { | 949 | { |
950 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 950 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
951 | struct cryptd_aead **ctx = crypto_aead_ctx(tfm); | 951 | struct cryptd_aead **ctx = crypto_aead_ctx(tfm); |
@@ -1117,7 +1117,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req) | |||
1117 | { | 1117 | { |
1118 | __be32 counter = cpu_to_be32(1); | 1118 | __be32 counter = cpu_to_be32(1); |
1119 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 1119 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
1120 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | 1120 | struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); |
1121 | void *aes_ctx = &(ctx->aes_key_expanded); | 1121 | void *aes_ctx = &(ctx->aes_key_expanded); |
1122 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); | 1122 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); |
1123 | 1123 | ||
@@ -1128,6 +1128,30 @@ static int generic_gcmaes_decrypt(struct aead_request *req) | |||
1128 | aes_ctx); | 1128 | aes_ctx); |
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | static int generic_gcmaes_init(struct crypto_aead *aead) | ||
1132 | { | ||
1133 | struct cryptd_aead *cryptd_tfm; | ||
1134 | struct cryptd_aead **ctx = crypto_aead_ctx(aead); | ||
1135 | |||
1136 | cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni", | ||
1137 | CRYPTO_ALG_INTERNAL, | ||
1138 | CRYPTO_ALG_INTERNAL); | ||
1139 | if (IS_ERR(cryptd_tfm)) | ||
1140 | return PTR_ERR(cryptd_tfm); | ||
1141 | |||
1142 | *ctx = cryptd_tfm; | ||
1143 | crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); | ||
1144 | |||
1145 | return 0; | ||
1146 | } | ||
1147 | |||
1148 | static void generic_gcmaes_exit(struct crypto_aead *aead) | ||
1149 | { | ||
1150 | struct cryptd_aead **ctx = crypto_aead_ctx(aead); | ||
1151 | |||
1152 | cryptd_free_aead(*ctx); | ||
1153 | } | ||
1154 | |||
1131 | static struct aead_alg aesni_aead_algs[] = { { | 1155 | static struct aead_alg aesni_aead_algs[] = { { |
1132 | .setkey = common_rfc4106_set_key, | 1156 | .setkey = common_rfc4106_set_key, |
1133 | .setauthsize = common_rfc4106_set_authsize, | 1157 | .setauthsize = common_rfc4106_set_authsize, |
@@ -1147,10 +1171,10 @@ static struct aead_alg aesni_aead_algs[] = { { | |||
1147 | }, { | 1171 | }, { |
1148 | .init = rfc4106_init, | 1172 | .init = rfc4106_init, |
1149 | .exit = rfc4106_exit, | 1173 | .exit = rfc4106_exit, |
1150 | .setkey = rfc4106_set_key, | 1174 | .setkey = gcmaes_wrapper_set_key, |
1151 | .setauthsize = rfc4106_set_authsize, | 1175 | .setauthsize = gcmaes_wrapper_set_authsize, |
1152 | .encrypt = rfc4106_encrypt, | 1176 | .encrypt = gcmaes_wrapper_encrypt, |
1153 | .decrypt = rfc4106_decrypt, | 1177 | .decrypt = gcmaes_wrapper_decrypt, |
1154 | .ivsize = GCM_RFC4106_IV_SIZE, | 1178 | .ivsize = GCM_RFC4106_IV_SIZE, |
1155 | .maxauthsize = 16, | 1179 | .maxauthsize = 16, |
1156 | .base = { | 1180 | .base = { |
@@ -1170,13 +1194,31 @@ static struct aead_alg aesni_aead_algs[] = { { | |||
1170 | .ivsize = GCM_AES_IV_SIZE, | 1194 | .ivsize = GCM_AES_IV_SIZE, |
1171 | .maxauthsize = 16, | 1195 | .maxauthsize = 16, |
1172 | .base = { | 1196 | .base = { |
1197 | .cra_name = "__generic-gcm-aes-aesni", | ||
1198 | .cra_driver_name = "__driver-generic-gcm-aes-aesni", | ||
1199 | .cra_priority = 0, | ||
1200 | .cra_flags = CRYPTO_ALG_INTERNAL, | ||
1201 | .cra_blocksize = 1, | ||
1202 | .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), | ||
1203 | .cra_alignmask = AESNI_ALIGN - 1, | ||
1204 | .cra_module = THIS_MODULE, | ||
1205 | }, | ||
1206 | }, { | ||
1207 | .init = generic_gcmaes_init, | ||
1208 | .exit = generic_gcmaes_exit, | ||
1209 | .setkey = gcmaes_wrapper_set_key, | ||
1210 | .setauthsize = gcmaes_wrapper_set_authsize, | ||
1211 | .encrypt = gcmaes_wrapper_encrypt, | ||
1212 | .decrypt = gcmaes_wrapper_decrypt, | ||
1213 | .ivsize = GCM_AES_IV_SIZE, | ||
1214 | .maxauthsize = 16, | ||
1215 | .base = { | ||
1173 | .cra_name = "gcm(aes)", | 1216 | .cra_name = "gcm(aes)", |
1174 | .cra_driver_name = "generic-gcm-aesni", | 1217 | .cra_driver_name = "generic-gcm-aesni", |
1175 | .cra_priority = 400, | 1218 | .cra_priority = 400, |
1176 | .cra_flags = CRYPTO_ALG_ASYNC, | 1219 | .cra_flags = CRYPTO_ALG_ASYNC, |
1177 | .cra_blocksize = 1, | 1220 | .cra_blocksize = 1, |
1178 | .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), | 1221 | .cra_ctxsize = sizeof(struct cryptd_aead *), |
1179 | .cra_alignmask = AESNI_ALIGN - 1, | ||
1180 | .cra_module = THIS_MODULE, | 1222 | .cra_module = THIS_MODULE, |
1181 | }, | 1223 | }, |
1182 | } }; | 1224 | } }; |
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c index 1e6af1b35f7b..dce7c5d39c2f 100644 --- a/arch/x86/crypto/chacha20_glue.c +++ b/arch/x86/crypto/chacha20_glue.c | |||
@@ -107,7 +107,6 @@ static struct skcipher_alg alg = { | |||
107 | .base.cra_priority = 300, | 107 | .base.cra_priority = 300, |
108 | .base.cra_blocksize = 1, | 108 | .base.cra_blocksize = 1, |
109 | .base.cra_ctxsize = sizeof(struct chacha20_ctx), | 109 | .base.cra_ctxsize = sizeof(struct chacha20_ctx), |
110 | .base.cra_alignmask = sizeof(u32) - 1, | ||
111 | .base.cra_module = THIS_MODULE, | 110 | .base.cra_module = THIS_MODULE, |
112 | 111 | ||
113 | .min_keysize = CHACHA20_KEY_SIZE, | 112 | .min_keysize = CHACHA20_KEY_SIZE, |
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c index 27226df3f7d8..c8d9cdacbf10 100644 --- a/arch/x86/crypto/crc32-pclmul_glue.c +++ b/arch/x86/crypto/crc32-pclmul_glue.c | |||
@@ -162,6 +162,7 @@ static struct shash_alg alg = { | |||
162 | .cra_name = "crc32", | 162 | .cra_name = "crc32", |
163 | .cra_driver_name = "crc32-pclmul", | 163 | .cra_driver_name = "crc32-pclmul", |
164 | .cra_priority = 200, | 164 | .cra_priority = 200, |
165 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
165 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 166 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
166 | .cra_ctxsize = sizeof(u32), | 167 | .cra_ctxsize = sizeof(u32), |
167 | .cra_module = THIS_MODULE, | 168 | .cra_module = THIS_MODULE, |
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index c194d5717ae5..5773e1161072 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c | |||
@@ -226,6 +226,7 @@ static struct shash_alg alg = { | |||
226 | .cra_name = "crc32c", | 226 | .cra_name = "crc32c", |
227 | .cra_driver_name = "crc32c-intel", | 227 | .cra_driver_name = "crc32c-intel", |
228 | .cra_priority = 200, | 228 | .cra_priority = 200, |
229 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
229 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 230 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
230 | .cra_ctxsize = sizeof(u32), | 231 | .cra_ctxsize = sizeof(u32), |
231 | .cra_module = THIS_MODULE, | 232 | .cra_module = THIS_MODULE, |
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index e32142bc071d..790377797544 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c | |||
@@ -164,14 +164,12 @@ static struct shash_alg alg = { | |||
164 | .init = poly1305_simd_init, | 164 | .init = poly1305_simd_init, |
165 | .update = poly1305_simd_update, | 165 | .update = poly1305_simd_update, |
166 | .final = crypto_poly1305_final, | 166 | .final = crypto_poly1305_final, |
167 | .setkey = crypto_poly1305_setkey, | ||
168 | .descsize = sizeof(struct poly1305_simd_desc_ctx), | 167 | .descsize = sizeof(struct poly1305_simd_desc_ctx), |
169 | .base = { | 168 | .base = { |
170 | .cra_name = "poly1305", | 169 | .cra_name = "poly1305", |
171 | .cra_driver_name = "poly1305-simd", | 170 | .cra_driver_name = "poly1305-simd", |
172 | .cra_priority = 300, | 171 | .cra_priority = 300, |
173 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 172 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, |
174 | .cra_alignmask = sizeof(u32) - 1, | ||
175 | .cra_blocksize = POLY1305_BLOCK_SIZE, | 173 | .cra_blocksize = POLY1305_BLOCK_SIZE, |
176 | .cra_module = THIS_MODULE, | 174 | .cra_module = THIS_MODULE, |
177 | }, | 175 | }, |
diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S index 329452b8f794..6014b7b9e52a 100644 --- a/arch/x86/crypto/salsa20-i586-asm_32.S +++ b/arch/x86/crypto/salsa20-i586-asm_32.S | |||
@@ -1,6 +1,7 @@ | |||
1 | # salsa20_pm.s version 20051229 | 1 | # Derived from: |
2 | # D. J. Bernstein | 2 | # salsa20_pm.s version 20051229 |
3 | # Public domain. | 3 | # D. J. Bernstein |
4 | # Public domain. | ||
4 | 5 | ||
5 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
6 | 7 | ||
@@ -935,180 +936,3 @@ ENTRY(salsa20_encrypt_bytes) | |||
935 | # goto bytesatleast1 | 936 | # goto bytesatleast1 |
936 | jmp ._bytesatleast1 | 937 | jmp ._bytesatleast1 |
937 | ENDPROC(salsa20_encrypt_bytes) | 938 | ENDPROC(salsa20_encrypt_bytes) |
938 | |||
939 | # enter salsa20_keysetup | ||
940 | ENTRY(salsa20_keysetup) | ||
941 | mov %esp,%eax | ||
942 | and $31,%eax | ||
943 | add $256,%eax | ||
944 | sub %eax,%esp | ||
945 | # eax_stack = eax | ||
946 | movl %eax,64(%esp) | ||
947 | # ebx_stack = ebx | ||
948 | movl %ebx,68(%esp) | ||
949 | # esi_stack = esi | ||
950 | movl %esi,72(%esp) | ||
951 | # edi_stack = edi | ||
952 | movl %edi,76(%esp) | ||
953 | # ebp_stack = ebp | ||
954 | movl %ebp,80(%esp) | ||
955 | # k = arg2 | ||
956 | movl 8(%esp,%eax),%ecx | ||
957 | # kbits = arg3 | ||
958 | movl 12(%esp,%eax),%edx | ||
959 | # x = arg1 | ||
960 | movl 4(%esp,%eax),%eax | ||
961 | # in1 = *(uint32 *) (k + 0) | ||
962 | movl 0(%ecx),%ebx | ||
963 | # in2 = *(uint32 *) (k + 4) | ||
964 | movl 4(%ecx),%esi | ||
965 | # in3 = *(uint32 *) (k + 8) | ||
966 | movl 8(%ecx),%edi | ||
967 | # in4 = *(uint32 *) (k + 12) | ||
968 | movl 12(%ecx),%ebp | ||
969 | # *(uint32 *) (x + 4) = in1 | ||
970 | movl %ebx,4(%eax) | ||
971 | # *(uint32 *) (x + 8) = in2 | ||
972 | movl %esi,8(%eax) | ||
973 | # *(uint32 *) (x + 12) = in3 | ||
974 | movl %edi,12(%eax) | ||
975 | # *(uint32 *) (x + 16) = in4 | ||
976 | movl %ebp,16(%eax) | ||
977 | # kbits - 256 | ||
978 | cmp $256,%edx | ||
979 | # goto kbits128 if unsigned< | ||
980 | jb ._kbits128 | ||
981 | ._kbits256: | ||
982 | # in11 = *(uint32 *) (k + 16) | ||
983 | movl 16(%ecx),%edx | ||
984 | # in12 = *(uint32 *) (k + 20) | ||
985 | movl 20(%ecx),%ebx | ||
986 | # in13 = *(uint32 *) (k + 24) | ||
987 | movl 24(%ecx),%esi | ||
988 | # in14 = *(uint32 *) (k + 28) | ||
989 | movl 28(%ecx),%ecx | ||
990 | # *(uint32 *) (x + 44) = in11 | ||
991 | movl %edx,44(%eax) | ||
992 | # *(uint32 *) (x + 48) = in12 | ||
993 | movl %ebx,48(%eax) | ||
994 | # *(uint32 *) (x + 52) = in13 | ||
995 | movl %esi,52(%eax) | ||
996 | # *(uint32 *) (x + 56) = in14 | ||
997 | movl %ecx,56(%eax) | ||
998 | # in0 = 1634760805 | ||
999 | mov $1634760805,%ecx | ||
1000 | # in5 = 857760878 | ||
1001 | mov $857760878,%edx | ||
1002 | # in10 = 2036477234 | ||
1003 | mov $2036477234,%ebx | ||
1004 | # in15 = 1797285236 | ||
1005 | mov $1797285236,%esi | ||
1006 | # *(uint32 *) (x + 0) = in0 | ||
1007 | movl %ecx,0(%eax) | ||
1008 | # *(uint32 *) (x + 20) = in5 | ||
1009 | movl %edx,20(%eax) | ||
1010 | # *(uint32 *) (x + 40) = in10 | ||
1011 | movl %ebx,40(%eax) | ||
1012 | # *(uint32 *) (x + 60) = in15 | ||
1013 | movl %esi,60(%eax) | ||
1014 | # goto keysetupdone | ||
1015 | jmp ._keysetupdone | ||
1016 | ._kbits128: | ||
1017 | # in11 = *(uint32 *) (k + 0) | ||
1018 | movl 0(%ecx),%edx | ||
1019 | # in12 = *(uint32 *) (k + 4) | ||
1020 | movl 4(%ecx),%ebx | ||
1021 | # in13 = *(uint32 *) (k + 8) | ||
1022 | movl 8(%ecx),%esi | ||
1023 | # in14 = *(uint32 *) (k + 12) | ||
1024 | movl 12(%ecx),%ecx | ||
1025 | # *(uint32 *) (x + 44) = in11 | ||
1026 | movl %edx,44(%eax) | ||
1027 | # *(uint32 *) (x + 48) = in12 | ||
1028 | movl %ebx,48(%eax) | ||
1029 | # *(uint32 *) (x + 52) = in13 | ||
1030 | movl %esi,52(%eax) | ||
1031 | # *(uint32 *) (x + 56) = in14 | ||
1032 | movl %ecx,56(%eax) | ||
1033 | # in0 = 1634760805 | ||
1034 | mov $1634760805,%ecx | ||
1035 | # in5 = 824206446 | ||
1036 | mov $824206446,%edx | ||
1037 | # in10 = 2036477238 | ||
1038 | mov $2036477238,%ebx | ||
1039 | # in15 = 1797285236 | ||
1040 | mov $1797285236,%esi | ||
1041 | # *(uint32 *) (x + 0) = in0 | ||
1042 | movl %ecx,0(%eax) | ||
1043 | # *(uint32 *) (x + 20) = in5 | ||
1044 | movl %edx,20(%eax) | ||
1045 | # *(uint32 *) (x + 40) = in10 | ||
1046 | movl %ebx,40(%eax) | ||
1047 | # *(uint32 *) (x + 60) = in15 | ||
1048 | movl %esi,60(%eax) | ||
1049 | ._keysetupdone: | ||
1050 | # eax = eax_stack | ||
1051 | movl 64(%esp),%eax | ||
1052 | # ebx = ebx_stack | ||
1053 | movl 68(%esp),%ebx | ||
1054 | # esi = esi_stack | ||
1055 | movl 72(%esp),%esi | ||
1056 | # edi = edi_stack | ||
1057 | movl 76(%esp),%edi | ||
1058 | # ebp = ebp_stack | ||
1059 | movl 80(%esp),%ebp | ||
1060 | # leave | ||
1061 | add %eax,%esp | ||
1062 | ret | ||
1063 | ENDPROC(salsa20_keysetup) | ||
1064 | |||
1065 | # enter salsa20_ivsetup | ||
1066 | ENTRY(salsa20_ivsetup) | ||
1067 | mov %esp,%eax | ||
1068 | and $31,%eax | ||
1069 | add $256,%eax | ||
1070 | sub %eax,%esp | ||
1071 | # eax_stack = eax | ||
1072 | movl %eax,64(%esp) | ||
1073 | # ebx_stack = ebx | ||
1074 | movl %ebx,68(%esp) | ||
1075 | # esi_stack = esi | ||
1076 | movl %esi,72(%esp) | ||
1077 | # edi_stack = edi | ||
1078 | movl %edi,76(%esp) | ||
1079 | # ebp_stack = ebp | ||
1080 | movl %ebp,80(%esp) | ||
1081 | # iv = arg2 | ||
1082 | movl 8(%esp,%eax),%ecx | ||
1083 | # x = arg1 | ||
1084 | movl 4(%esp,%eax),%eax | ||
1085 | # in6 = *(uint32 *) (iv + 0) | ||
1086 | movl 0(%ecx),%edx | ||
1087 | # in7 = *(uint32 *) (iv + 4) | ||
1088 | movl 4(%ecx),%ecx | ||
1089 | # in8 = 0 | ||
1090 | mov $0,%ebx | ||
1091 | # in9 = 0 | ||
1092 | mov $0,%esi | ||
1093 | # *(uint32 *) (x + 24) = in6 | ||
1094 | movl %edx,24(%eax) | ||
1095 | # *(uint32 *) (x + 28) = in7 | ||
1096 | movl %ecx,28(%eax) | ||
1097 | # *(uint32 *) (x + 32) = in8 | ||
1098 | movl %ebx,32(%eax) | ||
1099 | # *(uint32 *) (x + 36) = in9 | ||
1100 | movl %esi,36(%eax) | ||
1101 | # eax = eax_stack | ||
1102 | movl 64(%esp),%eax | ||
1103 | # ebx = ebx_stack | ||
1104 | movl 68(%esp),%ebx | ||
1105 | # esi = esi_stack | ||
1106 | movl 72(%esp),%esi | ||
1107 | # edi = edi_stack | ||
1108 | movl 76(%esp),%edi | ||
1109 | # ebp = ebp_stack | ||
1110 | movl 80(%esp),%ebp | ||
1111 | # leave | ||
1112 | add %eax,%esp | ||
1113 | ret | ||
1114 | ENDPROC(salsa20_ivsetup) | ||
diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S index 10db30d58006..03a4918f41ee 100644 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S | |||
@@ -803,117 +803,3 @@ ENTRY(salsa20_encrypt_bytes) | |||
803 | # goto bytesatleast1 | 803 | # goto bytesatleast1 |
804 | jmp ._bytesatleast1 | 804 | jmp ._bytesatleast1 |
805 | ENDPROC(salsa20_encrypt_bytes) | 805 | ENDPROC(salsa20_encrypt_bytes) |
806 | |||
807 | # enter salsa20_keysetup | ||
808 | ENTRY(salsa20_keysetup) | ||
809 | mov %rsp,%r11 | ||
810 | and $31,%r11 | ||
811 | add $256,%r11 | ||
812 | sub %r11,%rsp | ||
813 | # k = arg2 | ||
814 | mov %rsi,%rsi | ||
815 | # kbits = arg3 | ||
816 | mov %rdx,%rdx | ||
817 | # x = arg1 | ||
818 | mov %rdi,%rdi | ||
819 | # in0 = *(uint64 *) (k + 0) | ||
820 | movq 0(%rsi),%r8 | ||
821 | # in2 = *(uint64 *) (k + 8) | ||
822 | movq 8(%rsi),%r9 | ||
823 | # *(uint64 *) (x + 4) = in0 | ||
824 | movq %r8,4(%rdi) | ||
825 | # *(uint64 *) (x + 12) = in2 | ||
826 | movq %r9,12(%rdi) | ||
827 | # unsigned<? kbits - 256 | ||
828 | cmp $256,%rdx | ||
829 | # comment:fp stack unchanged by jump | ||
830 | # goto kbits128 if unsigned< | ||
831 | jb ._kbits128 | ||
832 | # kbits256: | ||
833 | ._kbits256: | ||
834 | # in10 = *(uint64 *) (k + 16) | ||
835 | movq 16(%rsi),%rdx | ||
836 | # in12 = *(uint64 *) (k + 24) | ||
837 | movq 24(%rsi),%rsi | ||
838 | # *(uint64 *) (x + 44) = in10 | ||
839 | movq %rdx,44(%rdi) | ||
840 | # *(uint64 *) (x + 52) = in12 | ||
841 | movq %rsi,52(%rdi) | ||
842 | # in0 = 1634760805 | ||
843 | mov $1634760805,%rsi | ||
844 | # in4 = 857760878 | ||
845 | mov $857760878,%rdx | ||
846 | # in10 = 2036477234 | ||
847 | mov $2036477234,%rcx | ||
848 | # in14 = 1797285236 | ||
849 | mov $1797285236,%r8 | ||
850 | # *(uint32 *) (x + 0) = in0 | ||
851 | movl %esi,0(%rdi) | ||
852 | # *(uint32 *) (x + 20) = in4 | ||
853 | movl %edx,20(%rdi) | ||
854 | # *(uint32 *) (x + 40) = in10 | ||
855 | movl %ecx,40(%rdi) | ||
856 | # *(uint32 *) (x + 60) = in14 | ||
857 | movl %r8d,60(%rdi) | ||
858 | # comment:fp stack unchanged by jump | ||
859 | # goto keysetupdone | ||
860 | jmp ._keysetupdone | ||
861 | # kbits128: | ||
862 | ._kbits128: | ||
863 | # in10 = *(uint64 *) (k + 0) | ||
864 | movq 0(%rsi),%rdx | ||
865 | # in12 = *(uint64 *) (k + 8) | ||
866 | movq 8(%rsi),%rsi | ||
867 | # *(uint64 *) (x + 44) = in10 | ||
868 | movq %rdx,44(%rdi) | ||
869 | # *(uint64 *) (x + 52) = in12 | ||
870 | movq %rsi,52(%rdi) | ||
871 | # in0 = 1634760805 | ||
872 | mov $1634760805,%rsi | ||
873 | # in4 = 824206446 | ||
874 | mov $824206446,%rdx | ||
875 | # in10 = 2036477238 | ||
876 | mov $2036477238,%rcx | ||
877 | # in14 = 1797285236 | ||
878 | mov $1797285236,%r8 | ||
879 | # *(uint32 *) (x + 0) = in0 | ||
880 | movl %esi,0(%rdi) | ||
881 | # *(uint32 *) (x + 20) = in4 | ||
882 | movl %edx,20(%rdi) | ||
883 | # *(uint32 *) (x + 40) = in10 | ||
884 | movl %ecx,40(%rdi) | ||
885 | # *(uint32 *) (x + 60) = in14 | ||
886 | movl %r8d,60(%rdi) | ||
887 | # keysetupdone: | ||
888 | ._keysetupdone: | ||
889 | # leave | ||
890 | add %r11,%rsp | ||
891 | mov %rdi,%rax | ||
892 | mov %rsi,%rdx | ||
893 | ret | ||
894 | ENDPROC(salsa20_keysetup) | ||
895 | |||
896 | # enter salsa20_ivsetup | ||
897 | ENTRY(salsa20_ivsetup) | ||
898 | mov %rsp,%r11 | ||
899 | and $31,%r11 | ||
900 | add $256,%r11 | ||
901 | sub %r11,%rsp | ||
902 | # iv = arg2 | ||
903 | mov %rsi,%rsi | ||
904 | # x = arg1 | ||
905 | mov %rdi,%rdi | ||
906 | # in6 = *(uint64 *) (iv + 0) | ||
907 | movq 0(%rsi),%rsi | ||
908 | # in8 = 0 | ||
909 | mov $0,%r8 | ||
910 | # *(uint64 *) (x + 24) = in6 | ||
911 | movq %rsi,24(%rdi) | ||
912 | # *(uint64 *) (x + 32) = in8 | ||
913 | movq %r8,32(%rdi) | ||
914 | # leave | ||
915 | add %r11,%rsp | ||
916 | mov %rdi,%rax | ||
917 | mov %rsi,%rdx | ||
918 | ret | ||
919 | ENDPROC(salsa20_ivsetup) | ||
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c index cb91a64a99e7..b07d7d959806 100644 --- a/arch/x86/crypto/salsa20_glue.c +++ b/arch/x86/crypto/salsa20_glue.c | |||
@@ -11,6 +11,9 @@ | |||
11 | * - x86-64 version, renamed as salsa20-x86_64-asm_64.S | 11 | * - x86-64 version, renamed as salsa20-x86_64-asm_64.S |
12 | * available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s> | 12 | * available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s> |
13 | * | 13 | * |
14 | * Also modified to set up the initial state using the generic C code rather | ||
15 | * than in assembly. | ||
16 | * | ||
14 | * This program is free software; you can redistribute it and/or modify it | 17 | * This program is free software; you can redistribute it and/or modify it |
15 | * under the terms of the GNU General Public License as published by the Free | 18 | * under the terms of the GNU General Public License as published by the Free |
16 | * Software Foundation; either version 2 of the License, or (at your option) | 19 | * Software Foundation; either version 2 of the License, or (at your option) |
@@ -18,93 +21,65 @@ | |||
18 | * | 21 | * |
19 | */ | 22 | */ |
20 | 23 | ||
21 | #include <crypto/algapi.h> | 24 | #include <asm/unaligned.h> |
25 | #include <crypto/internal/skcipher.h> | ||
26 | #include <crypto/salsa20.h> | ||
22 | #include <linux/module.h> | 27 | #include <linux/module.h> |
23 | #include <linux/crypto.h> | ||
24 | |||
25 | #define SALSA20_IV_SIZE 8U | ||
26 | #define SALSA20_MIN_KEY_SIZE 16U | ||
27 | #define SALSA20_MAX_KEY_SIZE 32U | ||
28 | |||
29 | struct salsa20_ctx | ||
30 | { | ||
31 | u32 input[16]; | ||
32 | }; | ||
33 | 28 | ||
34 | asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, | 29 | asmlinkage void salsa20_encrypt_bytes(u32 state[16], const u8 *src, u8 *dst, |
35 | u32 keysize, u32 ivsize); | 30 | u32 bytes); |
36 | asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv); | ||
37 | asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, | ||
38 | const u8 *src, u8 *dst, u32 bytes); | ||
39 | 31 | ||
40 | static int setkey(struct crypto_tfm *tfm, const u8 *key, | 32 | static int salsa20_asm_crypt(struct skcipher_request *req) |
41 | unsigned int keysize) | ||
42 | { | 33 | { |
43 | struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); | 34 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
44 | salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8); | 35 | const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); |
45 | return 0; | 36 | struct skcipher_walk walk; |
46 | } | 37 | u32 state[16]; |
47 | |||
48 | static int encrypt(struct blkcipher_desc *desc, | ||
49 | struct scatterlist *dst, struct scatterlist *src, | ||
50 | unsigned int nbytes) | ||
51 | { | ||
52 | struct blkcipher_walk walk; | ||
53 | struct crypto_blkcipher *tfm = desc->tfm; | ||
54 | struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); | ||
55 | int err; | 38 | int err; |
56 | 39 | ||
57 | blkcipher_walk_init(&walk, dst, src, nbytes); | 40 | err = skcipher_walk_virt(&walk, req, true); |
58 | err = blkcipher_walk_virt_block(desc, &walk, 64); | ||
59 | 41 | ||
60 | salsa20_ivsetup(ctx, walk.iv); | 42 | crypto_salsa20_init(state, ctx, walk.iv); |
61 | 43 | ||
62 | while (walk.nbytes >= 64) { | 44 | while (walk.nbytes > 0) { |
63 | salsa20_encrypt_bytes(ctx, walk.src.virt.addr, | 45 | unsigned int nbytes = walk.nbytes; |
64 | walk.dst.virt.addr, | ||
65 | walk.nbytes - (walk.nbytes % 64)); | ||
66 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); | ||
67 | } | ||
68 | 46 | ||
69 | if (walk.nbytes) { | 47 | if (nbytes < walk.total) |
70 | salsa20_encrypt_bytes(ctx, walk.src.virt.addr, | 48 | nbytes = round_down(nbytes, walk.stride); |
71 | walk.dst.virt.addr, walk.nbytes); | 49 | |
72 | err = blkcipher_walk_done(desc, &walk, 0); | 50 | salsa20_encrypt_bytes(state, walk.src.virt.addr, |
51 | walk.dst.virt.addr, nbytes); | ||
52 | err = skcipher_walk_done(&walk, walk.nbytes - nbytes); | ||
73 | } | 53 | } |
74 | 54 | ||
75 | return err; | 55 | return err; |
76 | } | 56 | } |
77 | 57 | ||
78 | static struct crypto_alg alg = { | 58 | static struct skcipher_alg alg = { |
79 | .cra_name = "salsa20", | 59 | .base.cra_name = "salsa20", |
80 | .cra_driver_name = "salsa20-asm", | 60 | .base.cra_driver_name = "salsa20-asm", |
81 | .cra_priority = 200, | 61 | .base.cra_priority = 200, |
82 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | 62 | .base.cra_blocksize = 1, |
83 | .cra_type = &crypto_blkcipher_type, | 63 | .base.cra_ctxsize = sizeof(struct salsa20_ctx), |
84 | .cra_blocksize = 1, | 64 | .base.cra_module = THIS_MODULE, |
85 | .cra_ctxsize = sizeof(struct salsa20_ctx), | 65 | |
86 | .cra_alignmask = 3, | 66 | .min_keysize = SALSA20_MIN_KEY_SIZE, |
87 | .cra_module = THIS_MODULE, | 67 | .max_keysize = SALSA20_MAX_KEY_SIZE, |
88 | .cra_u = { | 68 | .ivsize = SALSA20_IV_SIZE, |
89 | .blkcipher = { | 69 | .chunksize = SALSA20_BLOCK_SIZE, |
90 | .setkey = setkey, | 70 | .setkey = crypto_salsa20_setkey, |
91 | .encrypt = encrypt, | 71 | .encrypt = salsa20_asm_crypt, |
92 | .decrypt = encrypt, | 72 | .decrypt = salsa20_asm_crypt, |
93 | .min_keysize = SALSA20_MIN_KEY_SIZE, | ||
94 | .max_keysize = SALSA20_MAX_KEY_SIZE, | ||
95 | .ivsize = SALSA20_IV_SIZE, | ||
96 | } | ||
97 | } | ||
98 | }; | 73 | }; |
99 | 74 | ||
100 | static int __init init(void) | 75 | static int __init init(void) |
101 | { | 76 | { |
102 | return crypto_register_alg(&alg); | 77 | return crypto_register_skcipher(&alg); |
103 | } | 78 | } |
104 | 79 | ||
105 | static void __exit fini(void) | 80 | static void __exit fini(void) |
106 | { | 81 | { |
107 | crypto_unregister_alg(&alg); | 82 | crypto_unregister_skcipher(&alg); |
108 | } | 83 | } |
109 | 84 | ||
110 | module_init(init); | 85 | module_init(init); |
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S index 1c3b7ceb36d2..e7273a606a07 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S | |||
@@ -55,29 +55,31 @@ | |||
55 | #define RAB1bl %bl | 55 | #define RAB1bl %bl |
56 | #define RAB2bl %cl | 56 | #define RAB2bl %cl |
57 | 57 | ||
58 | #define CD0 0x0(%rsp) | ||
59 | #define CD1 0x8(%rsp) | ||
60 | #define CD2 0x10(%rsp) | ||
61 | |||
62 | # used only before/after all rounds | ||
58 | #define RCD0 %r8 | 63 | #define RCD0 %r8 |
59 | #define RCD1 %r9 | 64 | #define RCD1 %r9 |
60 | #define RCD2 %r10 | 65 | #define RCD2 %r10 |
61 | 66 | ||
62 | #define RCD0d %r8d | 67 | # used only during rounds |
63 | #define RCD1d %r9d | 68 | #define RX0 %r8 |
64 | #define RCD2d %r10d | 69 | #define RX1 %r9 |
65 | 70 | #define RX2 %r10 | |
66 | #define RX0 %rbp | ||
67 | #define RX1 %r11 | ||
68 | #define RX2 %r12 | ||
69 | 71 | ||
70 | #define RX0d %ebp | 72 | #define RX0d %r8d |
71 | #define RX1d %r11d | 73 | #define RX1d %r9d |
72 | #define RX2d %r12d | 74 | #define RX2d %r10d |
73 | 75 | ||
74 | #define RY0 %r13 | 76 | #define RY0 %r11 |
75 | #define RY1 %r14 | 77 | #define RY1 %r12 |
76 | #define RY2 %r15 | 78 | #define RY2 %r13 |
77 | 79 | ||
78 | #define RY0d %r13d | 80 | #define RY0d %r11d |
79 | #define RY1d %r14d | 81 | #define RY1d %r12d |
80 | #define RY2d %r15d | 82 | #define RY2d %r13d |
81 | 83 | ||
82 | #define RT0 %rdx | 84 | #define RT0 %rdx |
83 | #define RT1 %rsi | 85 | #define RT1 %rsi |
@@ -85,6 +87,8 @@ | |||
85 | #define RT0d %edx | 87 | #define RT0d %edx |
86 | #define RT1d %esi | 88 | #define RT1d %esi |
87 | 89 | ||
90 | #define RT1bl %sil | ||
91 | |||
88 | #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ | 92 | #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ |
89 | movzbl ab ## bl, tmp2 ## d; \ | 93 | movzbl ab ## bl, tmp2 ## d; \ |
90 | movzbl ab ## bh, tmp1 ## d; \ | 94 | movzbl ab ## bh, tmp1 ## d; \ |
@@ -92,6 +96,11 @@ | |||
92 | op1##l T0(CTX, tmp2, 4), dst ## d; \ | 96 | op1##l T0(CTX, tmp2, 4), dst ## d; \ |
93 | op2##l T1(CTX, tmp1, 4), dst ## d; | 97 | op2##l T1(CTX, tmp1, 4), dst ## d; |
94 | 98 | ||
99 | #define swap_ab_with_cd(ab, cd, tmp) \ | ||
100 | movq cd, tmp; \ | ||
101 | movq ab, cd; \ | ||
102 | movq tmp, ab; | ||
103 | |||
95 | /* | 104 | /* |
96 | * Combined G1 & G2 function. Reordered with help of rotates to have moves | 105 | * Combined G1 & G2 function. Reordered with help of rotates to have moves |
97 | * at begining. | 106 | * at begining. |
@@ -110,15 +119,15 @@ | |||
110 | /* G1,2 && G2,2 */ \ | 119 | /* G1,2 && G2,2 */ \ |
111 | do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \ | 120 | do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \ |
112 | do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \ | 121 | do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \ |
113 | xchgq cd ## 0, ab ## 0; \ | 122 | swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \ |
114 | \ | 123 | \ |
115 | do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \ | 124 | do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \ |
116 | do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \ | 125 | do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \ |
117 | xchgq cd ## 1, ab ## 1; \ | 126 | swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \ |
118 | \ | 127 | \ |
119 | do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \ | 128 | do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \ |
120 | do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \ | 129 | do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \ |
121 | xchgq cd ## 2, ab ## 2; | 130 | swap_ab_with_cd(ab ## 2, cd ## 2, RT0); |
122 | 131 | ||
123 | #define enc_round_end(ab, x, y, n) \ | 132 | #define enc_round_end(ab, x, y, n) \ |
124 | addl y ## d, x ## d; \ | 133 | addl y ## d, x ## d; \ |
@@ -168,6 +177,16 @@ | |||
168 | decrypt_round3(ba, dc, (n*2)+1); \ | 177 | decrypt_round3(ba, dc, (n*2)+1); \ |
169 | decrypt_round3(ba, dc, (n*2)); | 178 | decrypt_round3(ba, dc, (n*2)); |
170 | 179 | ||
180 | #define push_cd() \ | ||
181 | pushq RCD2; \ | ||
182 | pushq RCD1; \ | ||
183 | pushq RCD0; | ||
184 | |||
185 | #define pop_cd() \ | ||
186 | popq RCD0; \ | ||
187 | popq RCD1; \ | ||
188 | popq RCD2; | ||
189 | |||
171 | #define inpack3(in, n, xy, m) \ | 190 | #define inpack3(in, n, xy, m) \ |
172 | movq 4*(n)(in), xy ## 0; \ | 191 | movq 4*(n)(in), xy ## 0; \ |
173 | xorq w+4*m(CTX), xy ## 0; \ | 192 | xorq w+4*m(CTX), xy ## 0; \ |
@@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way) | |||
223 | * %rdx: src, RIO | 242 | * %rdx: src, RIO |
224 | * %rcx: bool, if true: xor output | 243 | * %rcx: bool, if true: xor output |
225 | */ | 244 | */ |
226 | pushq %r15; | ||
227 | pushq %r14; | ||
228 | pushq %r13; | 245 | pushq %r13; |
229 | pushq %r12; | 246 | pushq %r12; |
230 | pushq %rbp; | ||
231 | pushq %rbx; | 247 | pushq %rbx; |
232 | 248 | ||
233 | pushq %rcx; /* bool xor */ | 249 | pushq %rcx; /* bool xor */ |
@@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way) | |||
235 | 251 | ||
236 | inpack_enc3(); | 252 | inpack_enc3(); |
237 | 253 | ||
238 | encrypt_cycle3(RAB, RCD, 0); | 254 | push_cd(); |
239 | encrypt_cycle3(RAB, RCD, 1); | 255 | encrypt_cycle3(RAB, CD, 0); |
240 | encrypt_cycle3(RAB, RCD, 2); | 256 | encrypt_cycle3(RAB, CD, 1); |
241 | encrypt_cycle3(RAB, RCD, 3); | 257 | encrypt_cycle3(RAB, CD, 2); |
242 | encrypt_cycle3(RAB, RCD, 4); | 258 | encrypt_cycle3(RAB, CD, 3); |
243 | encrypt_cycle3(RAB, RCD, 5); | 259 | encrypt_cycle3(RAB, CD, 4); |
244 | encrypt_cycle3(RAB, RCD, 6); | 260 | encrypt_cycle3(RAB, CD, 5); |
245 | encrypt_cycle3(RAB, RCD, 7); | 261 | encrypt_cycle3(RAB, CD, 6); |
262 | encrypt_cycle3(RAB, CD, 7); | ||
263 | pop_cd(); | ||
246 | 264 | ||
247 | popq RIO; /* dst */ | 265 | popq RIO; /* dst */ |
248 | popq %rbp; /* bool xor */ | 266 | popq RT1; /* bool xor */ |
249 | 267 | ||
250 | testb %bpl, %bpl; | 268 | testb RT1bl, RT1bl; |
251 | jnz .L__enc_xor3; | 269 | jnz .L__enc_xor3; |
252 | 270 | ||
253 | outunpack_enc3(mov); | 271 | outunpack_enc3(mov); |
254 | 272 | ||
255 | popq %rbx; | 273 | popq %rbx; |
256 | popq %rbp; | ||
257 | popq %r12; | 274 | popq %r12; |
258 | popq %r13; | 275 | popq %r13; |
259 | popq %r14; | ||
260 | popq %r15; | ||
261 | ret; | 276 | ret; |
262 | 277 | ||
263 | .L__enc_xor3: | 278 | .L__enc_xor3: |
264 | outunpack_enc3(xor); | 279 | outunpack_enc3(xor); |
265 | 280 | ||
266 | popq %rbx; | 281 | popq %rbx; |
267 | popq %rbp; | ||
268 | popq %r12; | 282 | popq %r12; |
269 | popq %r13; | 283 | popq %r13; |
270 | popq %r14; | ||
271 | popq %r15; | ||
272 | ret; | 284 | ret; |
273 | ENDPROC(__twofish_enc_blk_3way) | 285 | ENDPROC(__twofish_enc_blk_3way) |
274 | 286 | ||
@@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way) | |||
278 | * %rsi: dst | 290 | * %rsi: dst |
279 | * %rdx: src, RIO | 291 | * %rdx: src, RIO |
280 | */ | 292 | */ |
281 | pushq %r15; | ||
282 | pushq %r14; | ||
283 | pushq %r13; | 293 | pushq %r13; |
284 | pushq %r12; | 294 | pushq %r12; |
285 | pushq %rbp; | ||
286 | pushq %rbx; | 295 | pushq %rbx; |
287 | 296 | ||
288 | pushq %rsi; /* dst */ | 297 | pushq %rsi; /* dst */ |
289 | 298 | ||
290 | inpack_dec3(); | 299 | inpack_dec3(); |
291 | 300 | ||
292 | decrypt_cycle3(RAB, RCD, 7); | 301 | push_cd(); |
293 | decrypt_cycle3(RAB, RCD, 6); | 302 | decrypt_cycle3(RAB, CD, 7); |
294 | decrypt_cycle3(RAB, RCD, 5); | 303 | decrypt_cycle3(RAB, CD, 6); |
295 | decrypt_cycle3(RAB, RCD, 4); | 304 | decrypt_cycle3(RAB, CD, 5); |
296 | decrypt_cycle3(RAB, RCD, 3); | 305 | decrypt_cycle3(RAB, CD, 4); |
297 | decrypt_cycle3(RAB, RCD, 2); | 306 | decrypt_cycle3(RAB, CD, 3); |
298 | decrypt_cycle3(RAB, RCD, 1); | 307 | decrypt_cycle3(RAB, CD, 2); |
299 | decrypt_cycle3(RAB, RCD, 0); | 308 | decrypt_cycle3(RAB, CD, 1); |
309 | decrypt_cycle3(RAB, CD, 0); | ||
310 | pop_cd(); | ||
300 | 311 | ||
301 | popq RIO; /* dst */ | 312 | popq RIO; /* dst */ |
302 | 313 | ||
303 | outunpack_dec3(); | 314 | outunpack_dec3(); |
304 | 315 | ||
305 | popq %rbx; | 316 | popq %rbx; |
306 | popq %rbp; | ||
307 | popq %r12; | 317 | popq %r12; |
308 | popq %r13; | 318 | popq %r13; |
309 | popq %r14; | ||
310 | popq %r15; | ||
311 | ret; | 319 | ret; |
312 | ENDPROC(twofish_dec_blk_3way) | 320 | ENDPROC(twofish_dec_blk_3way) |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 20360e040425..b75264b09a46 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -131,7 +131,7 @@ config CRYPTO_DH | |||
131 | 131 | ||
132 | config CRYPTO_ECDH | 132 | config CRYPTO_ECDH |
133 | tristate "ECDH algorithm" | 133 | tristate "ECDH algorithm" |
134 | select CRYTPO_KPP | 134 | select CRYPTO_KPP |
135 | select CRYPTO_RNG_DEFAULT | 135 | select CRYPTO_RNG_DEFAULT |
136 | help | 136 | help |
137 | Generic implementation of the ECDH algorithm | 137 | Generic implementation of the ECDH algorithm |
@@ -1340,6 +1340,7 @@ config CRYPTO_SALSA20_586 | |||
1340 | tristate "Salsa20 stream cipher algorithm (i586)" | 1340 | tristate "Salsa20 stream cipher algorithm (i586)" |
1341 | depends on (X86 || UML_X86) && !64BIT | 1341 | depends on (X86 || UML_X86) && !64BIT |
1342 | select CRYPTO_BLKCIPHER | 1342 | select CRYPTO_BLKCIPHER |
1343 | select CRYPTO_SALSA20 | ||
1343 | help | 1344 | help |
1344 | Salsa20 stream cipher algorithm. | 1345 | Salsa20 stream cipher algorithm. |
1345 | 1346 | ||
@@ -1353,6 +1354,7 @@ config CRYPTO_SALSA20_X86_64 | |||
1353 | tristate "Salsa20 stream cipher algorithm (x86_64)" | 1354 | tristate "Salsa20 stream cipher algorithm (x86_64)" |
1354 | depends on (X86 || UML_X86) && 64BIT | 1355 | depends on (X86 || UML_X86) && 64BIT |
1355 | select CRYPTO_BLKCIPHER | 1356 | select CRYPTO_BLKCIPHER |
1357 | select CRYPTO_SALSA20 | ||
1356 | help | 1358 | help |
1357 | Salsa20 stream cipher algorithm. | 1359 | Salsa20 stream cipher algorithm. |
1358 | 1360 | ||
diff --git a/crypto/Makefile b/crypto/Makefile index d674884b2d51..cdbc03b35510 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o | |||
99 | obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o | 99 | obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o |
100 | CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 | 100 | CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 |
101 | obj-$(CONFIG_CRYPTO_AES) += aes_generic.o | 101 | obj-$(CONFIG_CRYPTO_AES) += aes_generic.o |
102 | CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 | ||
102 | obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o | 103 | obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o |
103 | obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o | 104 | obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o |
104 | obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o | 105 | obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o |
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c index 1441f07d0a19..09776bb1360e 100644 --- a/crypto/ablk_helper.c +++ b/crypto/ablk_helper.c | |||
@@ -18,9 +18,7 @@ | |||
18 | * GNU General Public License for more details. | 18 | * GNU General Public License for more details. |
19 | * | 19 | * |
20 | * You should have received a copy of the GNU General Public License | 20 | * You should have received a copy of the GNU General Public License |
21 | * along with this program; if not, write to the Free Software | 21 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
23 | * USA | ||
24 | * | 22 | * |
25 | */ | 23 | */ |
26 | 24 | ||
@@ -28,7 +26,6 @@ | |||
28 | #include <linux/crypto.h> | 26 | #include <linux/crypto.h> |
29 | #include <linux/init.h> | 27 | #include <linux/init.h> |
30 | #include <linux/module.h> | 28 | #include <linux/module.h> |
31 | #include <linux/hardirq.h> | ||
32 | #include <crypto/algapi.h> | 29 | #include <crypto/algapi.h> |
33 | #include <crypto/cryptd.h> | 30 | #include <crypto/cryptd.h> |
34 | #include <crypto/ablk_helper.h> | 31 | #include <crypto/ablk_helper.h> |
diff --git a/crypto/aead.c b/crypto/aead.c index f794b30a9407..60b3bbe973e7 100644 --- a/crypto/aead.c +++ b/crypto/aead.c | |||
@@ -54,11 +54,18 @@ int crypto_aead_setkey(struct crypto_aead *tfm, | |||
54 | const u8 *key, unsigned int keylen) | 54 | const u8 *key, unsigned int keylen) |
55 | { | 55 | { |
56 | unsigned long alignmask = crypto_aead_alignmask(tfm); | 56 | unsigned long alignmask = crypto_aead_alignmask(tfm); |
57 | int err; | ||
57 | 58 | ||
58 | if ((unsigned long)key & alignmask) | 59 | if ((unsigned long)key & alignmask) |
59 | return setkey_unaligned(tfm, key, keylen); | 60 | err = setkey_unaligned(tfm, key, keylen); |
61 | else | ||
62 | err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen); | ||
63 | |||
64 | if (err) | ||
65 | return err; | ||
60 | 66 | ||
61 | return crypto_aead_alg(tfm)->setkey(tfm, key, keylen); | 67 | crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
68 | return 0; | ||
62 | } | 69 | } |
63 | EXPORT_SYMBOL_GPL(crypto_aead_setkey); | 70 | EXPORT_SYMBOL_GPL(crypto_aead_setkey); |
64 | 71 | ||
@@ -93,6 +100,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm) | |||
93 | struct crypto_aead *aead = __crypto_aead_cast(tfm); | 100 | struct crypto_aead *aead = __crypto_aead_cast(tfm); |
94 | struct aead_alg *alg = crypto_aead_alg(aead); | 101 | struct aead_alg *alg = crypto_aead_alg(aead); |
95 | 102 | ||
103 | crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY); | ||
104 | |||
96 | aead->authsize = alg->maxauthsize; | 105 | aead->authsize = alg->maxauthsize; |
97 | 106 | ||
98 | if (alg->exit) | 107 | if (alg->exit) |
@@ -295,7 +304,7 @@ int aead_init_geniv(struct crypto_aead *aead) | |||
295 | if (err) | 304 | if (err) |
296 | goto out; | 305 | goto out; |
297 | 306 | ||
298 | ctx->sknull = crypto_get_default_null_skcipher2(); | 307 | ctx->sknull = crypto_get_default_null_skcipher(); |
299 | err = PTR_ERR(ctx->sknull); | 308 | err = PTR_ERR(ctx->sknull); |
300 | if (IS_ERR(ctx->sknull)) | 309 | if (IS_ERR(ctx->sknull)) |
301 | goto out; | 310 | goto out; |
@@ -315,7 +324,7 @@ out: | |||
315 | return err; | 324 | return err; |
316 | 325 | ||
317 | drop_null: | 326 | drop_null: |
318 | crypto_put_default_null_skcipher2(); | 327 | crypto_put_default_null_skcipher(); |
319 | goto out; | 328 | goto out; |
320 | } | 329 | } |
321 | EXPORT_SYMBOL_GPL(aead_init_geniv); | 330 | EXPORT_SYMBOL_GPL(aead_init_geniv); |
@@ -325,7 +334,7 @@ void aead_exit_geniv(struct crypto_aead *tfm) | |||
325 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); | 334 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); |
326 | 335 | ||
327 | crypto_free_aead(ctx->child); | 336 | crypto_free_aead(ctx->child); |
328 | crypto_put_default_null_skcipher2(); | 337 | crypto_put_default_null_skcipher(); |
329 | } | 338 | } |
330 | EXPORT_SYMBOL_GPL(aead_exit_geniv); | 339 | EXPORT_SYMBOL_GPL(aead_exit_geniv); |
331 | 340 | ||
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index f41047ab60f5..0f8d8d5523c3 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent); | |||
150 | 150 | ||
151 | static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 151 | static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
152 | { | 152 | { |
153 | const u32 forbidden = CRYPTO_ALG_INTERNAL; | 153 | const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; |
154 | struct sock *sk = sock->sk; | 154 | struct sock *sk = sock->sk; |
155 | struct alg_sock *ask = alg_sk(sk); | 155 | struct alg_sock *ask = alg_sk(sk); |
156 | struct sockaddr_alg *sa = (void *)uaddr; | 156 | struct sockaddr_alg *sa = (void *)uaddr; |
@@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
158 | void *private; | 158 | void *private; |
159 | int err; | 159 | int err; |
160 | 160 | ||
161 | /* If caller uses non-allowed flag, return error. */ | ||
162 | if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) | ||
163 | return -EINVAL; | ||
164 | |||
161 | if (sock->state == SS_CONNECTED) | 165 | if (sock->state == SS_CONNECTED) |
162 | return -EINVAL; | 166 | return -EINVAL; |
163 | 167 | ||
@@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
176 | if (IS_ERR(type)) | 180 | if (IS_ERR(type)) |
177 | return PTR_ERR(type); | 181 | return PTR_ERR(type); |
178 | 182 | ||
179 | private = type->bind(sa->salg_name, | 183 | private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); |
180 | sa->salg_feat & ~forbidden, | ||
181 | sa->salg_mask & ~forbidden); | ||
182 | if (IS_ERR(private)) { | 184 | if (IS_ERR(private)) { |
183 | module_put(type->owner); | 185 | module_put(type->owner); |
184 | return PTR_ERR(private); | 186 | return PTR_ERR(private); |
diff --git a/crypto/ahash.c b/crypto/ahash.c index 3a35d67de7d9..266fc1d64f61 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -193,11 +193,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
193 | unsigned int keylen) | 193 | unsigned int keylen) |
194 | { | 194 | { |
195 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | 195 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
196 | int err; | ||
196 | 197 | ||
197 | if ((unsigned long)key & alignmask) | 198 | if ((unsigned long)key & alignmask) |
198 | return ahash_setkey_unaligned(tfm, key, keylen); | 199 | err = ahash_setkey_unaligned(tfm, key, keylen); |
200 | else | ||
201 | err = tfm->setkey(tfm, key, keylen); | ||
202 | |||
203 | if (err) | ||
204 | return err; | ||
199 | 205 | ||
200 | return tfm->setkey(tfm, key, keylen); | 206 | crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
207 | return 0; | ||
201 | } | 208 | } |
202 | EXPORT_SYMBOL_GPL(crypto_ahash_setkey); | 209 | EXPORT_SYMBOL_GPL(crypto_ahash_setkey); |
203 | 210 | ||
@@ -368,7 +375,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup); | |||
368 | 375 | ||
369 | int crypto_ahash_digest(struct ahash_request *req) | 376 | int crypto_ahash_digest(struct ahash_request *req) |
370 | { | 377 | { |
371 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); | 378 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
379 | |||
380 | if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
381 | return -ENOKEY; | ||
382 | |||
383 | return crypto_ahash_op(req, tfm->digest); | ||
372 | } | 384 | } |
373 | EXPORT_SYMBOL_GPL(crypto_ahash_digest); | 385 | EXPORT_SYMBOL_GPL(crypto_ahash_digest); |
374 | 386 | ||
@@ -450,7 +462,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) | |||
450 | struct ahash_alg *alg = crypto_ahash_alg(hash); | 462 | struct ahash_alg *alg = crypto_ahash_alg(hash); |
451 | 463 | ||
452 | hash->setkey = ahash_nosetkey; | 464 | hash->setkey = ahash_nosetkey; |
453 | hash->has_setkey = false; | ||
454 | hash->export = ahash_no_export; | 465 | hash->export = ahash_no_export; |
455 | hash->import = ahash_no_import; | 466 | hash->import = ahash_no_import; |
456 | 467 | ||
@@ -465,7 +476,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) | |||
465 | 476 | ||
466 | if (alg->setkey) { | 477 | if (alg->setkey) { |
467 | hash->setkey = alg->setkey; | 478 | hash->setkey = alg->setkey; |
468 | hash->has_setkey = true; | 479 | if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) |
480 | crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); | ||
469 | } | 481 | } |
470 | if (alg->export) | 482 | if (alg->export) |
471 | hash->export = alg->export; | 483 | hash->export = alg->export; |
@@ -649,5 +661,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) | |||
649 | } | 661 | } |
650 | EXPORT_SYMBOL_GPL(ahash_attr_alg); | 662 | EXPORT_SYMBOL_GPL(ahash_attr_alg); |
651 | 663 | ||
664 | bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) | ||
665 | { | ||
666 | struct crypto_alg *alg = &halg->base; | ||
667 | |||
668 | if (alg->cra_type != &crypto_ahash_type) | ||
669 | return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); | ||
670 | |||
671 | return __crypto_ahash_alg(alg)->setkey != NULL; | ||
672 | } | ||
673 | EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); | ||
674 | |||
652 | MODULE_LICENSE("GPL"); | 675 | MODULE_LICENSE("GPL"); |
653 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); | 676 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 9a636f961572..395b082d03a9 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -62,7 +62,7 @@ static int crypto_check_alg(struct crypto_alg *alg) | |||
62 | if (alg->cra_priority < 0) | 62 | if (alg->cra_priority < 0) |
63 | return -EINVAL; | 63 | return -EINVAL; |
64 | 64 | ||
65 | atomic_set(&alg->cra_refcnt, 1); | 65 | refcount_set(&alg->cra_refcnt, 1); |
66 | 66 | ||
67 | return crypto_set_driver_name(alg); | 67 | return crypto_set_driver_name(alg); |
68 | } | 68 | } |
@@ -123,7 +123,6 @@ static void crypto_remove_instance(struct crypto_instance *inst, | |||
123 | if (!tmpl || !crypto_tmpl_get(tmpl)) | 123 | if (!tmpl || !crypto_tmpl_get(tmpl)) |
124 | return; | 124 | return; |
125 | 125 | ||
126 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); | ||
127 | list_move(&inst->alg.cra_list, list); | 126 | list_move(&inst->alg.cra_list, list); |
128 | hlist_del(&inst->list); | 127 | hlist_del(&inst->list); |
129 | inst->alg.cra_destroy = crypto_destroy_instance; | 128 | inst->alg.cra_destroy = crypto_destroy_instance; |
@@ -236,7 +235,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) | |||
236 | if (!larval->adult) | 235 | if (!larval->adult) |
237 | goto free_larval; | 236 | goto free_larval; |
238 | 237 | ||
239 | atomic_set(&larval->alg.cra_refcnt, 1); | 238 | refcount_set(&larval->alg.cra_refcnt, 1); |
240 | memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, | 239 | memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, |
241 | CRYPTO_MAX_ALG_NAME); | 240 | CRYPTO_MAX_ALG_NAME); |
242 | larval->alg.cra_priority = alg->cra_priority; | 241 | larval->alg.cra_priority = alg->cra_priority; |
@@ -392,7 +391,6 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) | |||
392 | 391 | ||
393 | alg->cra_flags |= CRYPTO_ALG_DEAD; | 392 | alg->cra_flags |= CRYPTO_ALG_DEAD; |
394 | 393 | ||
395 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); | ||
396 | list_del_init(&alg->cra_list); | 394 | list_del_init(&alg->cra_list); |
397 | crypto_remove_spawns(alg, list, NULL); | 395 | crypto_remove_spawns(alg, list, NULL); |
398 | 396 | ||
@@ -411,7 +409,7 @@ int crypto_unregister_alg(struct crypto_alg *alg) | |||
411 | if (ret) | 409 | if (ret) |
412 | return ret; | 410 | return ret; |
413 | 411 | ||
414 | BUG_ON(atomic_read(&alg->cra_refcnt) != 1); | 412 | BUG_ON(refcount_read(&alg->cra_refcnt) != 1); |
415 | if (alg->cra_destroy) | 413 | if (alg->cra_destroy) |
416 | alg->cra_destroy(alg); | 414 | alg->cra_destroy(alg); |
417 | 415 | ||
@@ -470,7 +468,6 @@ int crypto_register_template(struct crypto_template *tmpl) | |||
470 | } | 468 | } |
471 | 469 | ||
472 | list_add(&tmpl->list, &crypto_template_list); | 470 | list_add(&tmpl->list, &crypto_template_list); |
473 | crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl); | ||
474 | err = 0; | 471 | err = 0; |
475 | out: | 472 | out: |
476 | up_write(&crypto_alg_sem); | 473 | up_write(&crypto_alg_sem); |
@@ -497,12 +494,10 @@ void crypto_unregister_template(struct crypto_template *tmpl) | |||
497 | BUG_ON(err); | 494 | BUG_ON(err); |
498 | } | 495 | } |
499 | 496 | ||
500 | crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl); | ||
501 | |||
502 | up_write(&crypto_alg_sem); | 497 | up_write(&crypto_alg_sem); |
503 | 498 | ||
504 | hlist_for_each_entry_safe(inst, n, list, list) { | 499 | hlist_for_each_entry_safe(inst, n, list, list) { |
505 | BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); | 500 | BUG_ON(refcount_read(&inst->alg.cra_refcnt) != 1); |
506 | crypto_free_instance(inst); | 501 | crypto_free_instance(inst); |
507 | } | 502 | } |
508 | crypto_remove_final(&users); | 503 | crypto_remove_final(&users); |
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index e9885a35ef6e..4b07edd5a9ff 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
@@ -42,7 +42,6 @@ | |||
42 | 42 | ||
43 | struct aead_tfm { | 43 | struct aead_tfm { |
44 | struct crypto_aead *aead; | 44 | struct crypto_aead *aead; |
45 | bool has_key; | ||
46 | struct crypto_skcipher *null_tfm; | 45 | struct crypto_skcipher *null_tfm; |
47 | }; | 46 | }; |
48 | 47 | ||
@@ -398,7 +397,7 @@ static int aead_check_key(struct socket *sock) | |||
398 | 397 | ||
399 | err = -ENOKEY; | 398 | err = -ENOKEY; |
400 | lock_sock_nested(psk, SINGLE_DEPTH_NESTING); | 399 | lock_sock_nested(psk, SINGLE_DEPTH_NESTING); |
401 | if (!tfm->has_key) | 400 | if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) |
402 | goto unlock; | 401 | goto unlock; |
403 | 402 | ||
404 | if (!pask->refcnt++) | 403 | if (!pask->refcnt++) |
@@ -491,7 +490,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask) | |||
491 | return ERR_CAST(aead); | 490 | return ERR_CAST(aead); |
492 | } | 491 | } |
493 | 492 | ||
494 | null_tfm = crypto_get_default_null_skcipher2(); | 493 | null_tfm = crypto_get_default_null_skcipher(); |
495 | if (IS_ERR(null_tfm)) { | 494 | if (IS_ERR(null_tfm)) { |
496 | crypto_free_aead(aead); | 495 | crypto_free_aead(aead); |
497 | kfree(tfm); | 496 | kfree(tfm); |
@@ -509,7 +508,7 @@ static void aead_release(void *private) | |||
509 | struct aead_tfm *tfm = private; | 508 | struct aead_tfm *tfm = private; |
510 | 509 | ||
511 | crypto_free_aead(tfm->aead); | 510 | crypto_free_aead(tfm->aead); |
512 | crypto_put_default_null_skcipher2(); | 511 | crypto_put_default_null_skcipher(); |
513 | kfree(tfm); | 512 | kfree(tfm); |
514 | } | 513 | } |
515 | 514 | ||
@@ -523,12 +522,8 @@ static int aead_setauthsize(void *private, unsigned int authsize) | |||
523 | static int aead_setkey(void *private, const u8 *key, unsigned int keylen) | 522 | static int aead_setkey(void *private, const u8 *key, unsigned int keylen) |
524 | { | 523 | { |
525 | struct aead_tfm *tfm = private; | 524 | struct aead_tfm *tfm = private; |
526 | int err; | ||
527 | |||
528 | err = crypto_aead_setkey(tfm->aead, key, keylen); | ||
529 | tfm->has_key = !err; | ||
530 | 525 | ||
531 | return err; | 526 | return crypto_aead_setkey(tfm->aead, key, keylen); |
532 | } | 527 | } |
533 | 528 | ||
534 | static void aead_sock_destruct(struct sock *sk) | 529 | static void aead_sock_destruct(struct sock *sk) |
@@ -589,7 +584,7 @@ static int aead_accept_parent(void *private, struct sock *sk) | |||
589 | { | 584 | { |
590 | struct aead_tfm *tfm = private; | 585 | struct aead_tfm *tfm = private; |
591 | 586 | ||
592 | if (!tfm->has_key) | 587 | if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) |
593 | return -ENOKEY; | 588 | return -ENOKEY; |
594 | 589 | ||
595 | return aead_accept_parent_nokey(private, sk); | 590 | return aead_accept_parent_nokey(private, sk); |
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 76d2e716c792..6c9b1927a520 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c | |||
@@ -34,11 +34,6 @@ struct hash_ctx { | |||
34 | struct ahash_request req; | 34 | struct ahash_request req; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct algif_hash_tfm { | ||
38 | struct crypto_ahash *hash; | ||
39 | bool has_key; | ||
40 | }; | ||
41 | |||
42 | static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) | 37 | static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) |
43 | { | 38 | { |
44 | unsigned ds; | 39 | unsigned ds; |
@@ -307,7 +302,7 @@ static int hash_check_key(struct socket *sock) | |||
307 | int err = 0; | 302 | int err = 0; |
308 | struct sock *psk; | 303 | struct sock *psk; |
309 | struct alg_sock *pask; | 304 | struct alg_sock *pask; |
310 | struct algif_hash_tfm *tfm; | 305 | struct crypto_ahash *tfm; |
311 | struct sock *sk = sock->sk; | 306 | struct sock *sk = sock->sk; |
312 | struct alg_sock *ask = alg_sk(sk); | 307 | struct alg_sock *ask = alg_sk(sk); |
313 | 308 | ||
@@ -321,7 +316,7 @@ static int hash_check_key(struct socket *sock) | |||
321 | 316 | ||
322 | err = -ENOKEY; | 317 | err = -ENOKEY; |
323 | lock_sock_nested(psk, SINGLE_DEPTH_NESTING); | 318 | lock_sock_nested(psk, SINGLE_DEPTH_NESTING); |
324 | if (!tfm->has_key) | 319 | if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
325 | goto unlock; | 320 | goto unlock; |
326 | 321 | ||
327 | if (!pask->refcnt++) | 322 | if (!pask->refcnt++) |
@@ -412,41 +407,17 @@ static struct proto_ops algif_hash_ops_nokey = { | |||
412 | 407 | ||
413 | static void *hash_bind(const char *name, u32 type, u32 mask) | 408 | static void *hash_bind(const char *name, u32 type, u32 mask) |
414 | { | 409 | { |
415 | struct algif_hash_tfm *tfm; | 410 | return crypto_alloc_ahash(name, type, mask); |
416 | struct crypto_ahash *hash; | ||
417 | |||
418 | tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); | ||
419 | if (!tfm) | ||
420 | return ERR_PTR(-ENOMEM); | ||
421 | |||
422 | hash = crypto_alloc_ahash(name, type, mask); | ||
423 | if (IS_ERR(hash)) { | ||
424 | kfree(tfm); | ||
425 | return ERR_CAST(hash); | ||
426 | } | ||
427 | |||
428 | tfm->hash = hash; | ||
429 | |||
430 | return tfm; | ||
431 | } | 411 | } |
432 | 412 | ||
433 | static void hash_release(void *private) | 413 | static void hash_release(void *private) |
434 | { | 414 | { |
435 | struct algif_hash_tfm *tfm = private; | 415 | crypto_free_ahash(private); |
436 | |||
437 | crypto_free_ahash(tfm->hash); | ||
438 | kfree(tfm); | ||
439 | } | 416 | } |
440 | 417 | ||
441 | static int hash_setkey(void *private, const u8 *key, unsigned int keylen) | 418 | static int hash_setkey(void *private, const u8 *key, unsigned int keylen) |
442 | { | 419 | { |
443 | struct algif_hash_tfm *tfm = private; | 420 | return crypto_ahash_setkey(private, key, keylen); |
444 | int err; | ||
445 | |||
446 | err = crypto_ahash_setkey(tfm->hash, key, keylen); | ||
447 | tfm->has_key = !err; | ||
448 | |||
449 | return err; | ||
450 | } | 421 | } |
451 | 422 | ||
452 | static void hash_sock_destruct(struct sock *sk) | 423 | static void hash_sock_destruct(struct sock *sk) |
@@ -461,11 +432,10 @@ static void hash_sock_destruct(struct sock *sk) | |||
461 | 432 | ||
462 | static int hash_accept_parent_nokey(void *private, struct sock *sk) | 433 | static int hash_accept_parent_nokey(void *private, struct sock *sk) |
463 | { | 434 | { |
464 | struct hash_ctx *ctx; | 435 | struct crypto_ahash *tfm = private; |
465 | struct alg_sock *ask = alg_sk(sk); | 436 | struct alg_sock *ask = alg_sk(sk); |
466 | struct algif_hash_tfm *tfm = private; | 437 | struct hash_ctx *ctx; |
467 | struct crypto_ahash *hash = tfm->hash; | 438 | unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm); |
468 | unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); | ||
469 | 439 | ||
470 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); | 440 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); |
471 | if (!ctx) | 441 | if (!ctx) |
@@ -478,7 +448,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) | |||
478 | 448 | ||
479 | ask->private = ctx; | 449 | ask->private = ctx; |
480 | 450 | ||
481 | ahash_request_set_tfm(&ctx->req, hash); | 451 | ahash_request_set_tfm(&ctx->req, tfm); |
482 | ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 452 | ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
483 | crypto_req_done, &ctx->wait); | 453 | crypto_req_done, &ctx->wait); |
484 | 454 | ||
@@ -489,9 +459,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) | |||
489 | 459 | ||
490 | static int hash_accept_parent(void *private, struct sock *sk) | 460 | static int hash_accept_parent(void *private, struct sock *sk) |
491 | { | 461 | { |
492 | struct algif_hash_tfm *tfm = private; | 462 | struct crypto_ahash *tfm = private; |
493 | 463 | ||
494 | if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) | 464 | if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
495 | return -ENOKEY; | 465 | return -ENOKEY; |
496 | 466 | ||
497 | return hash_accept_parent_nokey(private, sk); | 467 | return hash_accept_parent_nokey(private, sk); |
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index f50907430c92..c4e885df4564 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
@@ -38,11 +38,6 @@ | |||
38 | #include <linux/net.h> | 38 | #include <linux/net.h> |
39 | #include <net/sock.h> | 39 | #include <net/sock.h> |
40 | 40 | ||
41 | struct skcipher_tfm { | ||
42 | struct crypto_skcipher *skcipher; | ||
43 | bool has_key; | ||
44 | }; | ||
45 | |||
46 | static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, | 41 | static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, |
47 | size_t size) | 42 | size_t size) |
48 | { | 43 | { |
@@ -50,8 +45,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, | |||
50 | struct alg_sock *ask = alg_sk(sk); | 45 | struct alg_sock *ask = alg_sk(sk); |
51 | struct sock *psk = ask->parent; | 46 | struct sock *psk = ask->parent; |
52 | struct alg_sock *pask = alg_sk(psk); | 47 | struct alg_sock *pask = alg_sk(psk); |
53 | struct skcipher_tfm *skc = pask->private; | 48 | struct crypto_skcipher *tfm = pask->private; |
54 | struct crypto_skcipher *tfm = skc->skcipher; | ||
55 | unsigned ivsize = crypto_skcipher_ivsize(tfm); | 49 | unsigned ivsize = crypto_skcipher_ivsize(tfm); |
56 | 50 | ||
57 | return af_alg_sendmsg(sock, msg, size, ivsize); | 51 | return af_alg_sendmsg(sock, msg, size, ivsize); |
@@ -65,8 +59,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, | |||
65 | struct sock *psk = ask->parent; | 59 | struct sock *psk = ask->parent; |
66 | struct alg_sock *pask = alg_sk(psk); | 60 | struct alg_sock *pask = alg_sk(psk); |
67 | struct af_alg_ctx *ctx = ask->private; | 61 | struct af_alg_ctx *ctx = ask->private; |
68 | struct skcipher_tfm *skc = pask->private; | 62 | struct crypto_skcipher *tfm = pask->private; |
69 | struct crypto_skcipher *tfm = skc->skcipher; | ||
70 | unsigned int bs = crypto_skcipher_blocksize(tfm); | 63 | unsigned int bs = crypto_skcipher_blocksize(tfm); |
71 | struct af_alg_async_req *areq; | 64 | struct af_alg_async_req *areq; |
72 | int err = 0; | 65 | int err = 0; |
@@ -220,7 +213,7 @@ static int skcipher_check_key(struct socket *sock) | |||
220 | int err = 0; | 213 | int err = 0; |
221 | struct sock *psk; | 214 | struct sock *psk; |
222 | struct alg_sock *pask; | 215 | struct alg_sock *pask; |
223 | struct skcipher_tfm *tfm; | 216 | struct crypto_skcipher *tfm; |
224 | struct sock *sk = sock->sk; | 217 | struct sock *sk = sock->sk; |
225 | struct alg_sock *ask = alg_sk(sk); | 218 | struct alg_sock *ask = alg_sk(sk); |
226 | 219 | ||
@@ -234,7 +227,7 @@ static int skcipher_check_key(struct socket *sock) | |||
234 | 227 | ||
235 | err = -ENOKEY; | 228 | err = -ENOKEY; |
236 | lock_sock_nested(psk, SINGLE_DEPTH_NESTING); | 229 | lock_sock_nested(psk, SINGLE_DEPTH_NESTING); |
237 | if (!tfm->has_key) | 230 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
238 | goto unlock; | 231 | goto unlock; |
239 | 232 | ||
240 | if (!pask->refcnt++) | 233 | if (!pask->refcnt++) |
@@ -313,41 +306,17 @@ static struct proto_ops algif_skcipher_ops_nokey = { | |||
313 | 306 | ||
314 | static void *skcipher_bind(const char *name, u32 type, u32 mask) | 307 | static void *skcipher_bind(const char *name, u32 type, u32 mask) |
315 | { | 308 | { |
316 | struct skcipher_tfm *tfm; | 309 | return crypto_alloc_skcipher(name, type, mask); |
317 | struct crypto_skcipher *skcipher; | ||
318 | |||
319 | tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); | ||
320 | if (!tfm) | ||
321 | return ERR_PTR(-ENOMEM); | ||
322 | |||
323 | skcipher = crypto_alloc_skcipher(name, type, mask); | ||
324 | if (IS_ERR(skcipher)) { | ||
325 | kfree(tfm); | ||
326 | return ERR_CAST(skcipher); | ||
327 | } | ||
328 | |||
329 | tfm->skcipher = skcipher; | ||
330 | |||
331 | return tfm; | ||
332 | } | 310 | } |
333 | 311 | ||
334 | static void skcipher_release(void *private) | 312 | static void skcipher_release(void *private) |
335 | { | 313 | { |
336 | struct skcipher_tfm *tfm = private; | 314 | crypto_free_skcipher(private); |
337 | |||
338 | crypto_free_skcipher(tfm->skcipher); | ||
339 | kfree(tfm); | ||
340 | } | 315 | } |
341 | 316 | ||
342 | static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) | 317 | static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) |
343 | { | 318 | { |
344 | struct skcipher_tfm *tfm = private; | 319 | return crypto_skcipher_setkey(private, key, keylen); |
345 | int err; | ||
346 | |||
347 | err = crypto_skcipher_setkey(tfm->skcipher, key, keylen); | ||
348 | tfm->has_key = !err; | ||
349 | |||
350 | return err; | ||
351 | } | 320 | } |
352 | 321 | ||
353 | static void skcipher_sock_destruct(struct sock *sk) | 322 | static void skcipher_sock_destruct(struct sock *sk) |
@@ -356,8 +325,7 @@ static void skcipher_sock_destruct(struct sock *sk) | |||
356 | struct af_alg_ctx *ctx = ask->private; | 325 | struct af_alg_ctx *ctx = ask->private; |
357 | struct sock *psk = ask->parent; | 326 | struct sock *psk = ask->parent; |
358 | struct alg_sock *pask = alg_sk(psk); | 327 | struct alg_sock *pask = alg_sk(psk); |
359 | struct skcipher_tfm *skc = pask->private; | 328 | struct crypto_skcipher *tfm = pask->private; |
360 | struct crypto_skcipher *tfm = skc->skcipher; | ||
361 | 329 | ||
362 | af_alg_pull_tsgl(sk, ctx->used, NULL, 0); | 330 | af_alg_pull_tsgl(sk, ctx->used, NULL, 0); |
363 | sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); | 331 | sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); |
@@ -369,22 +337,21 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) | |||
369 | { | 337 | { |
370 | struct af_alg_ctx *ctx; | 338 | struct af_alg_ctx *ctx; |
371 | struct alg_sock *ask = alg_sk(sk); | 339 | struct alg_sock *ask = alg_sk(sk); |
372 | struct skcipher_tfm *tfm = private; | 340 | struct crypto_skcipher *tfm = private; |
373 | struct crypto_skcipher *skcipher = tfm->skcipher; | ||
374 | unsigned int len = sizeof(*ctx); | 341 | unsigned int len = sizeof(*ctx); |
375 | 342 | ||
376 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); | 343 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); |
377 | if (!ctx) | 344 | if (!ctx) |
378 | return -ENOMEM; | 345 | return -ENOMEM; |
379 | 346 | ||
380 | ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher), | 347 | ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm), |
381 | GFP_KERNEL); | 348 | GFP_KERNEL); |
382 | if (!ctx->iv) { | 349 | if (!ctx->iv) { |
383 | sock_kfree_s(sk, ctx, len); | 350 | sock_kfree_s(sk, ctx, len); |
384 | return -ENOMEM; | 351 | return -ENOMEM; |
385 | } | 352 | } |
386 | 353 | ||
387 | memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); | 354 | memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm)); |
388 | 355 | ||
389 | INIT_LIST_HEAD(&ctx->tsgl_list); | 356 | INIT_LIST_HEAD(&ctx->tsgl_list); |
390 | ctx->len = len; | 357 | ctx->len = len; |
@@ -404,9 +371,9 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) | |||
404 | 371 | ||
405 | static int skcipher_accept_parent(void *private, struct sock *sk) | 372 | static int skcipher_accept_parent(void *private, struct sock *sk) |
406 | { | 373 | { |
407 | struct skcipher_tfm *tfm = private; | 374 | struct crypto_skcipher *tfm = private; |
408 | 375 | ||
409 | if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher)) | 376 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
410 | return -ENOKEY; | 377 | return -ENOKEY; |
411 | 378 | ||
412 | return skcipher_accept_parent_nokey(private, sk); | 379 | return skcipher_accept_parent_nokey(private, sk); |
diff --git a/crypto/api.c b/crypto/api.c index 2a2479d168aa..70a894e52ff3 100644 --- a/crypto/api.c +++ b/crypto/api.c | |||
@@ -137,7 +137,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, | |||
137 | if (IS_ERR(larval)) | 137 | if (IS_ERR(larval)) |
138 | return ERR_CAST(larval); | 138 | return ERR_CAST(larval); |
139 | 139 | ||
140 | atomic_set(&larval->alg.cra_refcnt, 2); | 140 | refcount_set(&larval->alg.cra_refcnt, 2); |
141 | 141 | ||
142 | down_write(&crypto_alg_sem); | 142 | down_write(&crypto_alg_sem); |
143 | alg = __crypto_alg_lookup(name, type, mask); | 143 | alg = __crypto_alg_lookup(name, type, mask); |
@@ -205,7 +205,8 @@ struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) | |||
205 | } | 205 | } |
206 | EXPORT_SYMBOL_GPL(crypto_alg_lookup); | 206 | EXPORT_SYMBOL_GPL(crypto_alg_lookup); |
207 | 207 | ||
208 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) | 208 | static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, |
209 | u32 mask) | ||
209 | { | 210 | { |
210 | struct crypto_alg *alg; | 211 | struct crypto_alg *alg; |
211 | 212 | ||
@@ -231,7 +232,6 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) | |||
231 | 232 | ||
232 | return crypto_larval_add(name, type, mask); | 233 | return crypto_larval_add(name, type, mask); |
233 | } | 234 | } |
234 | EXPORT_SYMBOL_GPL(crypto_larval_lookup); | ||
235 | 235 | ||
236 | int crypto_probing_notify(unsigned long val, void *v) | 236 | int crypto_probing_notify(unsigned long val, void *v) |
237 | { | 237 | { |
diff --git a/crypto/authenc.c b/crypto/authenc.c index 875470b0e026..d3d6d72fe649 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -329,7 +329,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) | |||
329 | if (IS_ERR(enc)) | 329 | if (IS_ERR(enc)) |
330 | goto err_free_ahash; | 330 | goto err_free_ahash; |
331 | 331 | ||
332 | null = crypto_get_default_null_skcipher2(); | 332 | null = crypto_get_default_null_skcipher(); |
333 | err = PTR_ERR(null); | 333 | err = PTR_ERR(null); |
334 | if (IS_ERR(null)) | 334 | if (IS_ERR(null)) |
335 | goto err_free_skcipher; | 335 | goto err_free_skcipher; |
@@ -363,7 +363,7 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm) | |||
363 | 363 | ||
364 | crypto_free_ahash(ctx->auth); | 364 | crypto_free_ahash(ctx->auth); |
365 | crypto_free_skcipher(ctx->enc); | 365 | crypto_free_skcipher(ctx->enc); |
366 | crypto_put_default_null_skcipher2(); | 366 | crypto_put_default_null_skcipher(); |
367 | } | 367 | } |
368 | 368 | ||
369 | static void crypto_authenc_free(struct aead_instance *inst) | 369 | static void crypto_authenc_free(struct aead_instance *inst) |
diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 0cf5fefdb859..15f91ddd7f0e 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c | |||
@@ -352,7 +352,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) | |||
352 | if (IS_ERR(enc)) | 352 | if (IS_ERR(enc)) |
353 | goto err_free_ahash; | 353 | goto err_free_ahash; |
354 | 354 | ||
355 | null = crypto_get_default_null_skcipher2(); | 355 | null = crypto_get_default_null_skcipher(); |
356 | err = PTR_ERR(null); | 356 | err = PTR_ERR(null); |
357 | if (IS_ERR(null)) | 357 | if (IS_ERR(null)) |
358 | goto err_free_skcipher; | 358 | goto err_free_skcipher; |
@@ -389,7 +389,7 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm) | |||
389 | 389 | ||
390 | crypto_free_ahash(ctx->auth); | 390 | crypto_free_ahash(ctx->auth); |
391 | crypto_free_skcipher(ctx->enc); | 391 | crypto_free_skcipher(ctx->enc); |
392 | crypto_put_default_null_skcipher2(); | 392 | crypto_put_default_null_skcipher(); |
393 | } | 393 | } |
394 | 394 | ||
395 | static void crypto_authenc_esn_free(struct aead_instance *inst) | 395 | static void crypto_authenc_esn_free(struct aead_instance *inst) |
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 6c43a0a17a55..01c0d4aa2563 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <crypto/internal/skcipher.h> | 18 | #include <crypto/internal/skcipher.h> |
19 | #include <crypto/scatterwalk.h> | 19 | #include <crypto/scatterwalk.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/hardirq.h> | ||
22 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
23 | #include <linux/module.h> | 22 | #include <linux/module.h> |
24 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index a02286bf319e..32ddd4836ff5 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c | |||
@@ -13,8 +13,7 @@ | |||
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | */ | 17 | */ |
19 | 18 | ||
20 | /* | 19 | /* |
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c index df5c72629383..66169c178314 100644 --- a/crypto/cast5_generic.c +++ b/crypto/cast5_generic.c | |||
@@ -16,8 +16,7 @@ | |||
16 | * any later version. | 16 | * any later version. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
21 | */ | 20 | */ |
22 | 21 | ||
23 | 22 | ||
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index 058c8d755d03..c8e5ec69790e 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c | |||
@@ -13,8 +13,7 @@ | |||
13 | * any later version. | 13 | * any later version. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
18 | */ | 17 | */ |
19 | 18 | ||
20 | 19 | ||
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c index 4a45fa4890c0..e451c3cb6a56 100644 --- a/crypto/chacha20_generic.c +++ b/crypto/chacha20_generic.c | |||
@@ -9,44 +9,38 @@ | |||
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <asm/unaligned.h> | ||
12 | #include <crypto/algapi.h> | 13 | #include <crypto/algapi.h> |
13 | #include <crypto/chacha20.h> | 14 | #include <crypto/chacha20.h> |
14 | #include <crypto/internal/skcipher.h> | 15 | #include <crypto/internal/skcipher.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | 17 | ||
17 | static inline u32 le32_to_cpuvp(const void *p) | ||
18 | { | ||
19 | return le32_to_cpup(p); | ||
20 | } | ||
21 | |||
22 | static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, | 18 | static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, |
23 | unsigned int bytes) | 19 | unsigned int bytes) |
24 | { | 20 | { |
25 | u8 stream[CHACHA20_BLOCK_SIZE]; | 21 | u32 stream[CHACHA20_BLOCK_WORDS]; |
26 | 22 | ||
27 | if (dst != src) | 23 | if (dst != src) |
28 | memcpy(dst, src, bytes); | 24 | memcpy(dst, src, bytes); |
29 | 25 | ||
30 | while (bytes >= CHACHA20_BLOCK_SIZE) { | 26 | while (bytes >= CHACHA20_BLOCK_SIZE) { |
31 | chacha20_block(state, stream); | 27 | chacha20_block(state, stream); |
32 | crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE); | 28 | crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE); |
33 | bytes -= CHACHA20_BLOCK_SIZE; | 29 | bytes -= CHACHA20_BLOCK_SIZE; |
34 | dst += CHACHA20_BLOCK_SIZE; | 30 | dst += CHACHA20_BLOCK_SIZE; |
35 | } | 31 | } |
36 | if (bytes) { | 32 | if (bytes) { |
37 | chacha20_block(state, stream); | 33 | chacha20_block(state, stream); |
38 | crypto_xor(dst, stream, bytes); | 34 | crypto_xor(dst, (const u8 *)stream, bytes); |
39 | } | 35 | } |
40 | } | 36 | } |
41 | 37 | ||
42 | void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) | 38 | void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) |
43 | { | 39 | { |
44 | static const char constant[16] = "expand 32-byte k"; | 40 | state[0] = 0x61707865; /* "expa" */ |
45 | 41 | state[1] = 0x3320646e; /* "nd 3" */ | |
46 | state[0] = le32_to_cpuvp(constant + 0); | 42 | state[2] = 0x79622d32; /* "2-by" */ |
47 | state[1] = le32_to_cpuvp(constant + 4); | 43 | state[3] = 0x6b206574; /* "te k" */ |
48 | state[2] = le32_to_cpuvp(constant + 8); | ||
49 | state[3] = le32_to_cpuvp(constant + 12); | ||
50 | state[4] = ctx->key[0]; | 44 | state[4] = ctx->key[0]; |
51 | state[5] = ctx->key[1]; | 45 | state[5] = ctx->key[1]; |
52 | state[6] = ctx->key[2]; | 46 | state[6] = ctx->key[2]; |
@@ -55,10 +49,10 @@ void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) | |||
55 | state[9] = ctx->key[5]; | 49 | state[9] = ctx->key[5]; |
56 | state[10] = ctx->key[6]; | 50 | state[10] = ctx->key[6]; |
57 | state[11] = ctx->key[7]; | 51 | state[11] = ctx->key[7]; |
58 | state[12] = le32_to_cpuvp(iv + 0); | 52 | state[12] = get_unaligned_le32(iv + 0); |
59 | state[13] = le32_to_cpuvp(iv + 4); | 53 | state[13] = get_unaligned_le32(iv + 4); |
60 | state[14] = le32_to_cpuvp(iv + 8); | 54 | state[14] = get_unaligned_le32(iv + 8); |
61 | state[15] = le32_to_cpuvp(iv + 12); | 55 | state[15] = get_unaligned_le32(iv + 12); |
62 | } | 56 | } |
63 | EXPORT_SYMBOL_GPL(crypto_chacha20_init); | 57 | EXPORT_SYMBOL_GPL(crypto_chacha20_init); |
64 | 58 | ||
@@ -72,7 +66,7 @@ int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, | |||
72 | return -EINVAL; | 66 | return -EINVAL; |
73 | 67 | ||
74 | for (i = 0; i < ARRAY_SIZE(ctx->key); i++) | 68 | for (i = 0; i < ARRAY_SIZE(ctx->key); i++) |
75 | ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32)); | 69 | ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); |
76 | 70 | ||
77 | return 0; | 71 | return 0; |
78 | } | 72 | } |
@@ -111,7 +105,6 @@ static struct skcipher_alg alg = { | |||
111 | .base.cra_priority = 100, | 105 | .base.cra_priority = 100, |
112 | .base.cra_blocksize = 1, | 106 | .base.cra_blocksize = 1, |
113 | .base.cra_ctxsize = sizeof(struct chacha20_ctx), | 107 | .base.cra_ctxsize = sizeof(struct chacha20_ctx), |
114 | .base.cra_alignmask = sizeof(u32) - 1, | ||
115 | .base.cra_module = THIS_MODULE, | 108 | .base.cra_module = THIS_MODULE, |
116 | 109 | ||
117 | .min_keysize = CHACHA20_KEY_SIZE, | 110 | .min_keysize = CHACHA20_KEY_SIZE, |
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c index aa2a25fc7482..718cbce8d169 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32_generic.c | |||
@@ -133,6 +133,7 @@ static struct shash_alg alg = { | |||
133 | .cra_name = "crc32", | 133 | .cra_name = "crc32", |
134 | .cra_driver_name = "crc32-generic", | 134 | .cra_driver_name = "crc32-generic", |
135 | .cra_priority = 100, | 135 | .cra_priority = 100, |
136 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
136 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 137 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
137 | .cra_ctxsize = sizeof(u32), | 138 | .cra_ctxsize = sizeof(u32), |
138 | .cra_module = THIS_MODULE, | 139 | .cra_module = THIS_MODULE, |
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 4c0a0e271876..372320399622 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c | |||
@@ -146,6 +146,7 @@ static struct shash_alg alg = { | |||
146 | .cra_name = "crc32c", | 146 | .cra_name = "crc32c", |
147 | .cra_driver_name = "crc32c-generic", | 147 | .cra_driver_name = "crc32c-generic", |
148 | .cra_priority = 100, | 148 | .cra_priority = 100, |
149 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
149 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 150 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
150 | .cra_alignmask = 3, | 151 | .cra_alignmask = 3, |
151 | .cra_ctxsize = sizeof(struct chksum_ctx), | 152 | .cra_ctxsize = sizeof(struct chksum_ctx), |
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index bd43cf5be14c..addca7bae33f 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -32,7 +32,9 @@ | |||
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | 34 | ||
35 | #define CRYPTD_MAX_CPU_QLEN 1000 | 35 | static unsigned int cryptd_max_cpu_qlen = 1000; |
36 | module_param(cryptd_max_cpu_qlen, uint, 0); | ||
37 | MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); | ||
36 | 38 | ||
37 | struct cryptd_cpu_queue { | 39 | struct cryptd_cpu_queue { |
38 | struct crypto_queue queue; | 40 | struct crypto_queue queue; |
@@ -116,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue, | |||
116 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | 118 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); |
117 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | 119 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); |
118 | } | 120 | } |
121 | pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); | ||
119 | return 0; | 122 | return 0; |
120 | } | 123 | } |
121 | 124 | ||
@@ -893,10 +896,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |||
893 | if (err) | 896 | if (err) |
894 | goto out_free_inst; | 897 | goto out_free_inst; |
895 | 898 | ||
896 | type = CRYPTO_ALG_ASYNC; | 899 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | |
897 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | 900 | (alg->cra_flags & (CRYPTO_ALG_INTERNAL | |
898 | type |= CRYPTO_ALG_INTERNAL; | 901 | CRYPTO_ALG_OPTIONAL_KEY)); |
899 | inst->alg.halg.base.cra_flags = type; | ||
900 | 902 | ||
901 | inst->alg.halg.digestsize = salg->digestsize; | 903 | inst->alg.halg.digestsize = salg->digestsize; |
902 | inst->alg.halg.statesize = salg->statesize; | 904 | inst->alg.halg.statesize = salg->statesize; |
@@ -911,7 +913,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |||
911 | inst->alg.finup = cryptd_hash_finup_enqueue; | 913 | inst->alg.finup = cryptd_hash_finup_enqueue; |
912 | inst->alg.export = cryptd_hash_export; | 914 | inst->alg.export = cryptd_hash_export; |
913 | inst->alg.import = cryptd_hash_import; | 915 | inst->alg.import = cryptd_hash_import; |
914 | inst->alg.setkey = cryptd_hash_setkey; | 916 | if (crypto_shash_alg_has_setkey(salg)) |
917 | inst->alg.setkey = cryptd_hash_setkey; | ||
915 | inst->alg.digest = cryptd_hash_digest_enqueue; | 918 | inst->alg.digest = cryptd_hash_digest_enqueue; |
916 | 919 | ||
917 | err = ahash_register_instance(tmpl, inst); | 920 | err = ahash_register_instance(tmpl, inst); |
@@ -1372,7 +1375,7 @@ static int __init cryptd_init(void) | |||
1372 | { | 1375 | { |
1373 | int err; | 1376 | int err; |
1374 | 1377 | ||
1375 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); | 1378 | err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); |
1376 | if (err) | 1379 | if (err) |
1377 | return err; | 1380 | return err; |
1378 | 1381 | ||
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 0dbe2be7f783..5c291eedaa70 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c | |||
@@ -169,7 +169,7 @@ static int crypto_report_one(struct crypto_alg *alg, | |||
169 | ualg->cru_type = 0; | 169 | ualg->cru_type = 0; |
170 | ualg->cru_mask = 0; | 170 | ualg->cru_mask = 0; |
171 | ualg->cru_flags = alg->cra_flags; | 171 | ualg->cru_flags = alg->cra_flags; |
172 | ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); | 172 | ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); |
173 | 173 | ||
174 | if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) | 174 | if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) |
175 | goto nla_put_failure; | 175 | goto nla_put_failure; |
@@ -387,7 +387,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
387 | goto drop_alg; | 387 | goto drop_alg; |
388 | 388 | ||
389 | err = -EBUSY; | 389 | err = -EBUSY; |
390 | if (atomic_read(&alg->cra_refcnt) > 2) | 390 | if (refcount_read(&alg->cra_refcnt) > 2) |
391 | goto drop_alg; | 391 | goto drop_alg; |
392 | 392 | ||
393 | err = crypto_unregister_instance((struct crypto_instance *)alg); | 393 | err = crypto_unregister_instance((struct crypto_instance *)alg); |
diff --git a/crypto/ecc.c b/crypto/ecc.c index 633a9bcdc574..18f32f2a5e1c 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c | |||
@@ -964,7 +964,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey) | |||
964 | * DRBG with a security strength of 256. | 964 | * DRBG with a security strength of 256. |
965 | */ | 965 | */ |
966 | if (crypto_get_default_rng()) | 966 | if (crypto_get_default_rng()) |
967 | err = -EFAULT; | 967 | return -EFAULT; |
968 | 968 | ||
969 | err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); | 969 | err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); |
970 | crypto_put_default_rng(); | 970 | crypto_put_default_rng(); |
diff --git a/crypto/echainiv.c b/crypto/echainiv.c index e3d889b122e0..45819e6015bf 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c | |||
@@ -118,8 +118,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl, | |||
118 | struct rtattr **tb) | 118 | struct rtattr **tb) |
119 | { | 119 | { |
120 | struct aead_instance *inst; | 120 | struct aead_instance *inst; |
121 | struct crypto_aead_spawn *spawn; | ||
122 | struct aead_alg *alg; | ||
123 | int err; | 121 | int err; |
124 | 122 | ||
125 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); | 123 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); |
@@ -127,9 +125,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl, | |||
127 | if (IS_ERR(inst)) | 125 | if (IS_ERR(inst)) |
128 | return PTR_ERR(inst); | 126 | return PTR_ERR(inst); |
129 | 127 | ||
130 | spawn = aead_instance_ctx(inst); | ||
131 | alg = crypto_spawn_aead_alg(spawn); | ||
132 | |||
133 | err = -EINVAL; | 128 | err = -EINVAL; |
134 | if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize) | 129 | if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize) |
135 | goto free_inst; | 130 | goto free_inst; |
diff --git a/crypto/gcm.c b/crypto/gcm.c index 8589681fb9f6..0ad879e1f9b2 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -1101,7 +1101,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) | |||
1101 | if (IS_ERR(aead)) | 1101 | if (IS_ERR(aead)) |
1102 | return PTR_ERR(aead); | 1102 | return PTR_ERR(aead); |
1103 | 1103 | ||
1104 | null = crypto_get_default_null_skcipher2(); | 1104 | null = crypto_get_default_null_skcipher(); |
1105 | err = PTR_ERR(null); | 1105 | err = PTR_ERR(null); |
1106 | if (IS_ERR(null)) | 1106 | if (IS_ERR(null)) |
1107 | goto err_free_aead; | 1107 | goto err_free_aead; |
@@ -1129,7 +1129,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) | |||
1129 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); | 1129 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); |
1130 | 1130 | ||
1131 | crypto_free_aead(ctx->child); | 1131 | crypto_free_aead(ctx->child); |
1132 | crypto_put_default_null_skcipher2(); | 1132 | crypto_put_default_null_skcipher(); |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | static void crypto_rfc4543_free(struct aead_instance *inst) | 1135 | static void crypto_rfc4543_free(struct aead_instance *inst) |
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index 24e601954c7a..a4b1c026aaee 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c | |||
@@ -160,8 +160,6 @@ void gf128mul_x8_ble(le128 *r, const le128 *x) | |||
160 | { | 160 | { |
161 | u64 a = le64_to_cpu(x->a); | 161 | u64 a = le64_to_cpu(x->a); |
162 | u64 b = le64_to_cpu(x->b); | 162 | u64 b = le64_to_cpu(x->b); |
163 | |||
164 | /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ | ||
165 | u64 _tt = gf128mul_table_be[a >> 56]; | 163 | u64 _tt = gf128mul_table_be[a >> 56]; |
166 | 164 | ||
167 | r->a = cpu_to_le64((a << 8) | (b >> 56)); | 165 | r->a = cpu_to_le64((a << 8) | (b >> 56)); |
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index 12ad3e3a84e3..1bffb3f712dd 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c | |||
@@ -56,9 +56,6 @@ static int ghash_update(struct shash_desc *desc, | |||
56 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); | 56 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); |
57 | u8 *dst = dctx->buffer; | 57 | u8 *dst = dctx->buffer; |
58 | 58 | ||
59 | if (!ctx->gf128) | ||
60 | return -ENOKEY; | ||
61 | |||
62 | if (dctx->bytes) { | 59 | if (dctx->bytes) { |
63 | int n = min(srclen, dctx->bytes); | 60 | int n = min(srclen, dctx->bytes); |
64 | u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); | 61 | u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); |
@@ -111,9 +108,6 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) | |||
111 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); | 108 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); |
112 | u8 *buf = dctx->buffer; | 109 | u8 *buf = dctx->buffer; |
113 | 110 | ||
114 | if (!ctx->gf128) | ||
115 | return -ENOKEY; | ||
116 | |||
117 | ghash_flush(ctx, dctx); | 111 | ghash_flush(ctx, dctx); |
118 | memcpy(dst, buf, GHASH_BLOCK_SIZE); | 112 | memcpy(dst, buf, GHASH_BLOCK_SIZE); |
119 | 113 | ||
diff --git a/crypto/internal.h b/crypto/internal.h index f07320423191..5ac27fba10e8 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
@@ -30,9 +30,6 @@ | |||
30 | enum { | 30 | enum { |
31 | CRYPTO_MSG_ALG_REQUEST, | 31 | CRYPTO_MSG_ALG_REQUEST, |
32 | CRYPTO_MSG_ALG_REGISTER, | 32 | CRYPTO_MSG_ALG_REGISTER, |
33 | CRYPTO_MSG_ALG_UNREGISTER, | ||
34 | CRYPTO_MSG_TMPL_REGISTER, | ||
35 | CRYPTO_MSG_TMPL_UNREGISTER, | ||
36 | }; | 33 | }; |
37 | 34 | ||
38 | struct crypto_instance; | 35 | struct crypto_instance; |
@@ -78,7 +75,6 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm); | |||
78 | 75 | ||
79 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); | 76 | struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); |
80 | void crypto_larval_kill(struct crypto_alg *alg); | 77 | void crypto_larval_kill(struct crypto_alg *alg); |
81 | struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); | ||
82 | void crypto_alg_tested(const char *name, int err); | 78 | void crypto_alg_tested(const char *name, int err); |
83 | 79 | ||
84 | void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, | 80 | void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, |
@@ -106,13 +102,13 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, | |||
106 | 102 | ||
107 | static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) | 103 | static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) |
108 | { | 104 | { |
109 | atomic_inc(&alg->cra_refcnt); | 105 | refcount_inc(&alg->cra_refcnt); |
110 | return alg; | 106 | return alg; |
111 | } | 107 | } |
112 | 108 | ||
113 | static inline void crypto_alg_put(struct crypto_alg *alg) | 109 | static inline void crypto_alg_put(struct crypto_alg *alg) |
114 | { | 110 | { |
115 | if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) | 111 | if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) |
116 | alg->cra_destroy(alg); | 112 | alg->cra_destroy(alg); |
117 | } | 113 | } |
118 | 114 | ||
diff --git a/crypto/keywrap.c b/crypto/keywrap.c index 744e35134c45..ec5c6a087c90 100644 --- a/crypto/keywrap.c +++ b/crypto/keywrap.c | |||
@@ -188,7 +188,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, | |||
188 | } | 188 | } |
189 | 189 | ||
190 | /* Perform authentication check */ | 190 | /* Perform authentication check */ |
191 | if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6)) | 191 | if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL)) |
192 | ret = -EBADMSG; | 192 | ret = -EBADMSG; |
193 | 193 | ||
194 | memzero_explicit(&block, sizeof(struct crypto_kw_block)); | 194 | memzero_explicit(&block, sizeof(struct crypto_kw_block)); |
@@ -221,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, | |||
221 | * Place the predefined IV into block A -- for encrypt, the caller | 221 | * Place the predefined IV into block A -- for encrypt, the caller |
222 | * does not need to provide an IV, but he needs to fetch the final IV. | 222 | * does not need to provide an IV, but he needs to fetch the final IV. |
223 | */ | 223 | */ |
224 | block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6); | 224 | block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL); |
225 | 225 | ||
226 | /* | 226 | /* |
227 | * src scatterlist is read-only. dst scatterlist is r/w. During the | 227 | * src scatterlist is read-only. dst scatterlist is r/w. During the |
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index eca04d3729b3..fe5129d6ff4e 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/sched/stat.h> | 27 | #include <linux/sched/stat.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/hardirq.h> | ||
30 | 29 | ||
31 | #define MCRYPTD_MAX_CPU_QLEN 100 | 30 | #define MCRYPTD_MAX_CPU_QLEN 100 |
32 | #define MCRYPTD_BATCH 9 | 31 | #define MCRYPTD_BATCH 9 |
@@ -517,10 +516,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |||
517 | if (err) | 516 | if (err) |
518 | goto out_free_inst; | 517 | goto out_free_inst; |
519 | 518 | ||
520 | type = CRYPTO_ALG_ASYNC; | 519 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | |
521 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | 520 | (alg->cra_flags & (CRYPTO_ALG_INTERNAL | |
522 | type |= CRYPTO_ALG_INTERNAL; | 521 | CRYPTO_ALG_OPTIONAL_KEY)); |
523 | inst->alg.halg.base.cra_flags = type; | ||
524 | 522 | ||
525 | inst->alg.halg.digestsize = halg->digestsize; | 523 | inst->alg.halg.digestsize = halg->digestsize; |
526 | inst->alg.halg.statesize = halg->statesize; | 524 | inst->alg.halg.statesize = halg->statesize; |
@@ -535,7 +533,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |||
535 | inst->alg.finup = mcryptd_hash_finup_enqueue; | 533 | inst->alg.finup = mcryptd_hash_finup_enqueue; |
536 | inst->alg.export = mcryptd_hash_export; | 534 | inst->alg.export = mcryptd_hash_export; |
537 | inst->alg.import = mcryptd_hash_import; | 535 | inst->alg.import = mcryptd_hash_import; |
538 | inst->alg.setkey = mcryptd_hash_setkey; | 536 | if (crypto_hash_alg_has_setkey(halg)) |
537 | inst->alg.setkey = mcryptd_hash_setkey; | ||
539 | inst->alg.digest = mcryptd_hash_digest_enqueue; | 538 | inst->alg.digest = mcryptd_hash_digest_enqueue; |
540 | 539 | ||
541 | err = ahash_register_instance(tmpl, inst); | 540 | err = ahash_register_instance(tmpl, inst); |
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index b1c2d57dc734..b7a3a0613a30 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c | |||
@@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc) | |||
47 | } | 47 | } |
48 | EXPORT_SYMBOL_GPL(crypto_poly1305_init); | 48 | EXPORT_SYMBOL_GPL(crypto_poly1305_init); |
49 | 49 | ||
50 | int crypto_poly1305_setkey(struct crypto_shash *tfm, | ||
51 | const u8 *key, unsigned int keylen) | ||
52 | { | ||
53 | /* Poly1305 requires a unique key for each tag, which implies that | ||
54 | * we can't set it on the tfm that gets accessed by multiple users | ||
55 | * simultaneously. Instead we expect the key as the first 32 bytes in | ||
56 | * the update() call. */ | ||
57 | return -ENOTSUPP; | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); | ||
60 | |||
61 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) | 50 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) |
62 | { | 51 | { |
63 | /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ | 52 | /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ |
@@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) | |||
76 | dctx->s[3] = get_unaligned_le32(key + 12); | 65 | dctx->s[3] = get_unaligned_le32(key + 12); |
77 | } | 66 | } |
78 | 67 | ||
68 | /* | ||
69 | * Poly1305 requires a unique key for each tag, which implies that we can't set | ||
70 | * it on the tfm that gets accessed by multiple users simultaneously. Instead we | ||
71 | * expect the key as the first 32 bytes in the update() call. | ||
72 | */ | ||
79 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, | 73 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, |
80 | const u8 *src, unsigned int srclen) | 74 | const u8 *src, unsigned int srclen) |
81 | { | 75 | { |
@@ -210,7 +204,6 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_update); | |||
210 | int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) | 204 | int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) |
211 | { | 205 | { |
212 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); | 206 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); |
213 | __le32 *mac = (__le32 *)dst; | ||
214 | u32 h0, h1, h2, h3, h4; | 207 | u32 h0, h1, h2, h3, h4; |
215 | u32 g0, g1, g2, g3, g4; | 208 | u32 g0, g1, g2, g3, g4; |
216 | u32 mask; | 209 | u32 mask; |
@@ -267,10 +260,10 @@ int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) | |||
267 | h3 = (h3 >> 18) | (h4 << 8); | 260 | h3 = (h3 >> 18) | (h4 << 8); |
268 | 261 | ||
269 | /* mac = (h + s) % (2^128) */ | 262 | /* mac = (h + s) % (2^128) */ |
270 | f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f); | 263 | f = (f >> 32) + h0 + dctx->s[0]; put_unaligned_le32(f, dst + 0); |
271 | f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f); | 264 | f = (f >> 32) + h1 + dctx->s[1]; put_unaligned_le32(f, dst + 4); |
272 | f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f); | 265 | f = (f >> 32) + h2 + dctx->s[2]; put_unaligned_le32(f, dst + 8); |
273 | f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f); | 266 | f = (f >> 32) + h3 + dctx->s[3]; put_unaligned_le32(f, dst + 12); |
274 | 267 | ||
275 | return 0; | 268 | return 0; |
276 | } | 269 | } |
@@ -281,14 +274,12 @@ static struct shash_alg poly1305_alg = { | |||
281 | .init = crypto_poly1305_init, | 274 | .init = crypto_poly1305_init, |
282 | .update = crypto_poly1305_update, | 275 | .update = crypto_poly1305_update, |
283 | .final = crypto_poly1305_final, | 276 | .final = crypto_poly1305_final, |
284 | .setkey = crypto_poly1305_setkey, | ||
285 | .descsize = sizeof(struct poly1305_desc_ctx), | 277 | .descsize = sizeof(struct poly1305_desc_ctx), |
286 | .base = { | 278 | .base = { |
287 | .cra_name = "poly1305", | 279 | .cra_name = "poly1305", |
288 | .cra_driver_name = "poly1305-generic", | 280 | .cra_driver_name = "poly1305-generic", |
289 | .cra_priority = 100, | 281 | .cra_priority = 100, |
290 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 282 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, |
291 | .cra_alignmask = sizeof(u32) - 1, | ||
292 | .cra_blocksize = POLY1305_BLOCK_SIZE, | 283 | .cra_blocksize = POLY1305_BLOCK_SIZE, |
293 | .cra_module = THIS_MODULE, | 284 | .cra_module = THIS_MODULE, |
294 | }, | 285 | }, |
diff --git a/crypto/proc.c b/crypto/proc.c index 2cc10c96d753..822fcef6d91c 100644 --- a/crypto/proc.c +++ b/crypto/proc.c | |||
@@ -46,7 +46,7 @@ static int c_show(struct seq_file *m, void *p) | |||
46 | seq_printf(m, "driver : %s\n", alg->cra_driver_name); | 46 | seq_printf(m, "driver : %s\n", alg->cra_driver_name); |
47 | seq_printf(m, "module : %s\n", module_name(alg->cra_module)); | 47 | seq_printf(m, "module : %s\n", module_name(alg->cra_module)); |
48 | seq_printf(m, "priority : %d\n", alg->cra_priority); | 48 | seq_printf(m, "priority : %d\n", alg->cra_priority); |
49 | seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt)); | 49 | seq_printf(m, "refcnt : %u\n", refcount_read(&alg->cra_refcnt)); |
50 | seq_printf(m, "selftest : %s\n", | 50 | seq_printf(m, "selftest : %s\n", |
51 | (alg->cra_flags & CRYPTO_ALG_TESTED) ? | 51 | (alg->cra_flags & CRYPTO_ALG_TESTED) ? |
52 | "passed" : "unknown"); | 52 | "passed" : "unknown"); |
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index d7da0eea5622..5074006a56c3 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c | |||
@@ -19,49 +19,19 @@ | |||
19 | * | 19 | * |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/init.h> | 22 | #include <asm/unaligned.h> |
23 | #include <crypto/internal/skcipher.h> | ||
24 | #include <crypto/salsa20.h> | ||
23 | #include <linux/module.h> | 25 | #include <linux/module.h> |
24 | #include <linux/errno.h> | ||
25 | #include <linux/crypto.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/bitops.h> | ||
28 | #include <crypto/algapi.h> | ||
29 | #include <asm/byteorder.h> | ||
30 | 26 | ||
31 | #define SALSA20_IV_SIZE 8U | 27 | static void salsa20_block(u32 *state, __le32 *stream) |
32 | #define SALSA20_MIN_KEY_SIZE 16U | ||
33 | #define SALSA20_MAX_KEY_SIZE 32U | ||
34 | |||
35 | /* | ||
36 | * Start of code taken from D. J. Bernstein's reference implementation. | ||
37 | * With some modifications and optimizations made to suit our needs. | ||
38 | */ | ||
39 | |||
40 | /* | ||
41 | salsa20-ref.c version 20051118 | ||
42 | D. J. Bernstein | ||
43 | Public domain. | ||
44 | */ | ||
45 | |||
46 | #define U32TO8_LITTLE(p, v) \ | ||
47 | { (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \ | ||
48 | (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; } | ||
49 | #define U8TO32_LITTLE(p) \ | ||
50 | (((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \ | ||
51 | ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) ) | ||
52 | |||
53 | struct salsa20_ctx | ||
54 | { | ||
55 | u32 input[16]; | ||
56 | }; | ||
57 | |||
58 | static void salsa20_wordtobyte(u8 output[64], const u32 input[16]) | ||
59 | { | 28 | { |
60 | u32 x[16]; | 29 | u32 x[16]; |
61 | int i; | 30 | int i; |
62 | 31 | ||
63 | memcpy(x, input, sizeof(x)); | 32 | memcpy(x, state, sizeof(x)); |
64 | for (i = 20; i > 0; i -= 2) { | 33 | |
34 | for (i = 0; i < 20; i += 2) { | ||
65 | x[ 4] ^= rol32((x[ 0] + x[12]), 7); | 35 | x[ 4] ^= rol32((x[ 0] + x[12]), 7); |
66 | x[ 8] ^= rol32((x[ 4] + x[ 0]), 9); | 36 | x[ 8] ^= rol32((x[ 4] + x[ 0]), 9); |
67 | x[12] ^= rol32((x[ 8] + x[ 4]), 13); | 37 | x[12] ^= rol32((x[ 8] + x[ 4]), 13); |
@@ -95,145 +65,137 @@ static void salsa20_wordtobyte(u8 output[64], const u32 input[16]) | |||
95 | x[14] ^= rol32((x[13] + x[12]), 13); | 65 | x[14] ^= rol32((x[13] + x[12]), 13); |
96 | x[15] ^= rol32((x[14] + x[13]), 18); | 66 | x[15] ^= rol32((x[14] + x[13]), 18); |
97 | } | 67 | } |
98 | for (i = 0; i < 16; ++i) | ||
99 | x[i] += input[i]; | ||
100 | for (i = 0; i < 16; ++i) | ||
101 | U32TO8_LITTLE(output + 4 * i,x[i]); | ||
102 | } | ||
103 | 68 | ||
104 | static const char sigma[16] = "expand 32-byte k"; | 69 | for (i = 0; i < 16; i++) |
105 | static const char tau[16] = "expand 16-byte k"; | 70 | stream[i] = cpu_to_le32(x[i] + state[i]); |
71 | |||
72 | if (++state[8] == 0) | ||
73 | state[9]++; | ||
74 | } | ||
106 | 75 | ||
107 | static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes) | 76 | static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src, |
77 | unsigned int bytes) | ||
108 | { | 78 | { |
109 | const char *constants; | 79 | __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)]; |
110 | 80 | ||
111 | ctx->input[1] = U8TO32_LITTLE(k + 0); | 81 | if (dst != src) |
112 | ctx->input[2] = U8TO32_LITTLE(k + 4); | 82 | memcpy(dst, src, bytes); |
113 | ctx->input[3] = U8TO32_LITTLE(k + 8); | 83 | |
114 | ctx->input[4] = U8TO32_LITTLE(k + 12); | 84 | while (bytes >= SALSA20_BLOCK_SIZE) { |
115 | if (kbytes == 32) { /* recommended */ | 85 | salsa20_block(state, stream); |
116 | k += 16; | 86 | crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE); |
117 | constants = sigma; | 87 | bytes -= SALSA20_BLOCK_SIZE; |
118 | } else { /* kbytes == 16 */ | 88 | dst += SALSA20_BLOCK_SIZE; |
119 | constants = tau; | 89 | } |
90 | if (bytes) { | ||
91 | salsa20_block(state, stream); | ||
92 | crypto_xor(dst, (const u8 *)stream, bytes); | ||
120 | } | 93 | } |
121 | ctx->input[11] = U8TO32_LITTLE(k + 0); | ||
122 | ctx->input[12] = U8TO32_LITTLE(k + 4); | ||
123 | ctx->input[13] = U8TO32_LITTLE(k + 8); | ||
124 | ctx->input[14] = U8TO32_LITTLE(k + 12); | ||
125 | ctx->input[0] = U8TO32_LITTLE(constants + 0); | ||
126 | ctx->input[5] = U8TO32_LITTLE(constants + 4); | ||
127 | ctx->input[10] = U8TO32_LITTLE(constants + 8); | ||
128 | ctx->input[15] = U8TO32_LITTLE(constants + 12); | ||
129 | } | 94 | } |
130 | 95 | ||
131 | static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv) | 96 | void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx, |
97 | const u8 *iv) | ||
132 | { | 98 | { |
133 | ctx->input[6] = U8TO32_LITTLE(iv + 0); | 99 | memcpy(state, ctx->initial_state, sizeof(ctx->initial_state)); |
134 | ctx->input[7] = U8TO32_LITTLE(iv + 4); | 100 | state[6] = get_unaligned_le32(iv + 0); |
135 | ctx->input[8] = 0; | 101 | state[7] = get_unaligned_le32(iv + 4); |
136 | ctx->input[9] = 0; | ||
137 | } | 102 | } |
103 | EXPORT_SYMBOL_GPL(crypto_salsa20_init); | ||
138 | 104 | ||
139 | static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst, | 105 | int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, |
140 | const u8 *src, unsigned int bytes) | 106 | unsigned int keysize) |
141 | { | 107 | { |
142 | u8 buf[64]; | 108 | static const char sigma[16] = "expand 32-byte k"; |
143 | 109 | static const char tau[16] = "expand 16-byte k"; | |
144 | if (dst != src) | 110 | struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); |
145 | memcpy(dst, src, bytes); | 111 | const char *constants; |
146 | |||
147 | while (bytes) { | ||
148 | salsa20_wordtobyte(buf, ctx->input); | ||
149 | |||
150 | ctx->input[8]++; | ||
151 | if (!ctx->input[8]) | ||
152 | ctx->input[9]++; | ||
153 | 112 | ||
154 | if (bytes <= 64) { | 113 | if (keysize != SALSA20_MIN_KEY_SIZE && |
155 | crypto_xor(dst, buf, bytes); | 114 | keysize != SALSA20_MAX_KEY_SIZE) |
156 | return; | 115 | return -EINVAL; |
157 | } | ||
158 | 116 | ||
159 | crypto_xor(dst, buf, 64); | 117 | ctx->initial_state[1] = get_unaligned_le32(key + 0); |
160 | bytes -= 64; | 118 | ctx->initial_state[2] = get_unaligned_le32(key + 4); |
161 | dst += 64; | 119 | ctx->initial_state[3] = get_unaligned_le32(key + 8); |
120 | ctx->initial_state[4] = get_unaligned_le32(key + 12); | ||
121 | if (keysize == 32) { /* recommended */ | ||
122 | key += 16; | ||
123 | constants = sigma; | ||
124 | } else { /* keysize == 16 */ | ||
125 | constants = tau; | ||
162 | } | 126 | } |
163 | } | 127 | ctx->initial_state[11] = get_unaligned_le32(key + 0); |
164 | 128 | ctx->initial_state[12] = get_unaligned_le32(key + 4); | |
165 | /* | 129 | ctx->initial_state[13] = get_unaligned_le32(key + 8); |
166 | * End of code taken from D. J. Bernstein's reference implementation. | 130 | ctx->initial_state[14] = get_unaligned_le32(key + 12); |
167 | */ | 131 | ctx->initial_state[0] = get_unaligned_le32(constants + 0); |
132 | ctx->initial_state[5] = get_unaligned_le32(constants + 4); | ||
133 | ctx->initial_state[10] = get_unaligned_le32(constants + 8); | ||
134 | ctx->initial_state[15] = get_unaligned_le32(constants + 12); | ||
135 | |||
136 | /* space for the nonce; it will be overridden for each request */ | ||
137 | ctx->initial_state[6] = 0; | ||
138 | ctx->initial_state[7] = 0; | ||
139 | |||
140 | /* initial block number */ | ||
141 | ctx->initial_state[8] = 0; | ||
142 | ctx->initial_state[9] = 0; | ||
168 | 143 | ||
169 | static int setkey(struct crypto_tfm *tfm, const u8 *key, | ||
170 | unsigned int keysize) | ||
171 | { | ||
172 | struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); | ||
173 | salsa20_keysetup(ctx, key, keysize); | ||
174 | return 0; | 144 | return 0; |
175 | } | 145 | } |
146 | EXPORT_SYMBOL_GPL(crypto_salsa20_setkey); | ||
176 | 147 | ||
177 | static int encrypt(struct blkcipher_desc *desc, | 148 | static int salsa20_crypt(struct skcipher_request *req) |
178 | struct scatterlist *dst, struct scatterlist *src, | ||
179 | unsigned int nbytes) | ||
180 | { | 149 | { |
181 | struct blkcipher_walk walk; | 150 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
182 | struct crypto_blkcipher *tfm = desc->tfm; | 151 | const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); |
183 | struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); | 152 | struct skcipher_walk walk; |
153 | u32 state[16]; | ||
184 | int err; | 154 | int err; |
185 | 155 | ||
186 | blkcipher_walk_init(&walk, dst, src, nbytes); | 156 | err = skcipher_walk_virt(&walk, req, true); |
187 | err = blkcipher_walk_virt_block(desc, &walk, 64); | ||
188 | 157 | ||
189 | salsa20_ivsetup(ctx, walk.iv); | 158 | crypto_salsa20_init(state, ctx, walk.iv); |
190 | 159 | ||
191 | while (walk.nbytes >= 64) { | 160 | while (walk.nbytes > 0) { |
192 | salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, | 161 | unsigned int nbytes = walk.nbytes; |
193 | walk.src.virt.addr, | ||
194 | walk.nbytes - (walk.nbytes % 64)); | ||
195 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); | ||
196 | } | ||
197 | 162 | ||
198 | if (walk.nbytes) { | 163 | if (nbytes < walk.total) |
199 | salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, | 164 | nbytes = round_down(nbytes, walk.stride); |
200 | walk.src.virt.addr, walk.nbytes); | 165 | |
201 | err = blkcipher_walk_done(desc, &walk, 0); | 166 | salsa20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, |
167 | nbytes); | ||
168 | err = skcipher_walk_done(&walk, walk.nbytes - nbytes); | ||
202 | } | 169 | } |
203 | 170 | ||
204 | return err; | 171 | return err; |
205 | } | 172 | } |
206 | 173 | ||
207 | static struct crypto_alg alg = { | 174 | static struct skcipher_alg alg = { |
208 | .cra_name = "salsa20", | 175 | .base.cra_name = "salsa20", |
209 | .cra_driver_name = "salsa20-generic", | 176 | .base.cra_driver_name = "salsa20-generic", |
210 | .cra_priority = 100, | 177 | .base.cra_priority = 100, |
211 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | 178 | .base.cra_blocksize = 1, |
212 | .cra_type = &crypto_blkcipher_type, | 179 | .base.cra_ctxsize = sizeof(struct salsa20_ctx), |
213 | .cra_blocksize = 1, | 180 | .base.cra_module = THIS_MODULE, |
214 | .cra_ctxsize = sizeof(struct salsa20_ctx), | 181 | |
215 | .cra_alignmask = 3, | 182 | .min_keysize = SALSA20_MIN_KEY_SIZE, |
216 | .cra_module = THIS_MODULE, | 183 | .max_keysize = SALSA20_MAX_KEY_SIZE, |
217 | .cra_u = { | 184 | .ivsize = SALSA20_IV_SIZE, |
218 | .blkcipher = { | 185 | .chunksize = SALSA20_BLOCK_SIZE, |
219 | .setkey = setkey, | 186 | .setkey = crypto_salsa20_setkey, |
220 | .encrypt = encrypt, | 187 | .encrypt = salsa20_crypt, |
221 | .decrypt = encrypt, | 188 | .decrypt = salsa20_crypt, |
222 | .min_keysize = SALSA20_MIN_KEY_SIZE, | ||
223 | .max_keysize = SALSA20_MAX_KEY_SIZE, | ||
224 | .ivsize = SALSA20_IV_SIZE, | ||
225 | } | ||
226 | } | ||
227 | }; | 189 | }; |
228 | 190 | ||
229 | static int __init salsa20_generic_mod_init(void) | 191 | static int __init salsa20_generic_mod_init(void) |
230 | { | 192 | { |
231 | return crypto_register_alg(&alg); | 193 | return crypto_register_skcipher(&alg); |
232 | } | 194 | } |
233 | 195 | ||
234 | static void __exit salsa20_generic_mod_fini(void) | 196 | static void __exit salsa20_generic_mod_fini(void) |
235 | { | 197 | { |
236 | crypto_unregister_alg(&alg); | 198 | crypto_unregister_skcipher(&alg); |
237 | } | 199 | } |
238 | 200 | ||
239 | module_init(salsa20_generic_mod_init); | 201 | module_init(salsa20_generic_mod_init); |
diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 570b7d1aa0ca..39dbf2f7e5f5 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c | |||
@@ -144,8 +144,6 @@ static int seqiv_aead_decrypt(struct aead_request *req) | |||
144 | static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) | 144 | static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) |
145 | { | 145 | { |
146 | struct aead_instance *inst; | 146 | struct aead_instance *inst; |
147 | struct crypto_aead_spawn *spawn; | ||
148 | struct aead_alg *alg; | ||
149 | int err; | 147 | int err; |
150 | 148 | ||
151 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); | 149 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); |
@@ -153,9 +151,6 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
153 | if (IS_ERR(inst)) | 151 | if (IS_ERR(inst)) |
154 | return PTR_ERR(inst); | 152 | return PTR_ERR(inst); |
155 | 153 | ||
156 | spawn = aead_instance_ctx(inst); | ||
157 | alg = crypto_spawn_aead_alg(spawn); | ||
158 | |||
159 | err = -EINVAL; | 154 | err = -EINVAL; |
160 | if (inst->alg.ivsize != sizeof(u64)) | 155 | if (inst->alg.ivsize != sizeof(u64)) |
161 | goto free_inst; | 156 | goto free_inst; |
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 7e8ed96236ce..a965b9d80559 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf | 5 | * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf |
6 | * | 6 | * |
7 | * SHA-3 code by Jeff Garzik <jeff@garzik.org> | 7 | * SHA-3 code by Jeff Garzik <jeff@garzik.org> |
8 | * Ard Biesheuvel <ard.biesheuvel@linaro.org> | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or modify it | 10 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 11 | * under the terms of the GNU General Public License as published by the Free |
@@ -17,12 +18,10 @@ | |||
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
19 | #include <crypto/sha3.h> | 20 | #include <crypto/sha3.h> |
20 | #include <asm/byteorder.h> | 21 | #include <asm/unaligned.h> |
21 | 22 | ||
22 | #define KECCAK_ROUNDS 24 | 23 | #define KECCAK_ROUNDS 24 |
23 | 24 | ||
24 | #define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) | ||
25 | |||
26 | static const u64 keccakf_rndc[24] = { | 25 | static const u64 keccakf_rndc[24] = { |
27 | 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, | 26 | 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, |
28 | 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL, | 27 | 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL, |
@@ -34,100 +33,133 @@ static const u64 keccakf_rndc[24] = { | |||
34 | 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL | 33 | 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL |
35 | }; | 34 | }; |
36 | 35 | ||
37 | static const int keccakf_rotc[24] = { | ||
38 | 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, | ||
39 | 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 | ||
40 | }; | ||
41 | |||
42 | static const int keccakf_piln[24] = { | ||
43 | 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, | ||
44 | 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 | ||
45 | }; | ||
46 | |||
47 | /* update the state with given number of rounds */ | 36 | /* update the state with given number of rounds */ |
48 | 37 | ||
49 | static void keccakf(u64 st[25]) | 38 | static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25]) |
50 | { | 39 | { |
51 | int i, j, round; | 40 | u64 t[5], tt, bc[5]; |
52 | u64 t, bc[5]; | 41 | int round; |
53 | 42 | ||
54 | for (round = 0; round < KECCAK_ROUNDS; round++) { | 43 | for (round = 0; round < KECCAK_ROUNDS; round++) { |
55 | 44 | ||
56 | /* Theta */ | 45 | /* Theta */ |
57 | for (i = 0; i < 5; i++) | 46 | bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20]; |
58 | bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] | 47 | bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21]; |
59 | ^ st[i + 20]; | 48 | bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22]; |
60 | 49 | bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23]; | |
61 | for (i = 0; i < 5; i++) { | 50 | bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24]; |
62 | t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1); | 51 | |
63 | for (j = 0; j < 25; j += 5) | 52 | t[0] = bc[4] ^ rol64(bc[1], 1); |
64 | st[j + i] ^= t; | 53 | t[1] = bc[0] ^ rol64(bc[2], 1); |
65 | } | 54 | t[2] = bc[1] ^ rol64(bc[3], 1); |
55 | t[3] = bc[2] ^ rol64(bc[4], 1); | ||
56 | t[4] = bc[3] ^ rol64(bc[0], 1); | ||
57 | |||
58 | st[0] ^= t[0]; | ||
66 | 59 | ||
67 | /* Rho Pi */ | 60 | /* Rho Pi */ |
68 | t = st[1]; | 61 | tt = st[1]; |
69 | for (i = 0; i < 24; i++) { | 62 | st[ 1] = rol64(st[ 6] ^ t[1], 44); |
70 | j = keccakf_piln[i]; | 63 | st[ 6] = rol64(st[ 9] ^ t[4], 20); |
71 | bc[0] = st[j]; | 64 | st[ 9] = rol64(st[22] ^ t[2], 61); |
72 | st[j] = ROTL64(t, keccakf_rotc[i]); | 65 | st[22] = rol64(st[14] ^ t[4], 39); |
73 | t = bc[0]; | 66 | st[14] = rol64(st[20] ^ t[0], 18); |
74 | } | 67 | st[20] = rol64(st[ 2] ^ t[2], 62); |
68 | st[ 2] = rol64(st[12] ^ t[2], 43); | ||
69 | st[12] = rol64(st[13] ^ t[3], 25); | ||
70 | st[13] = rol64(st[19] ^ t[4], 8); | ||
71 | st[19] = rol64(st[23] ^ t[3], 56); | ||
72 | st[23] = rol64(st[15] ^ t[0], 41); | ||
73 | st[15] = rol64(st[ 4] ^ t[4], 27); | ||
74 | st[ 4] = rol64(st[24] ^ t[4], 14); | ||
75 | st[24] = rol64(st[21] ^ t[1], 2); | ||
76 | st[21] = rol64(st[ 8] ^ t[3], 55); | ||
77 | st[ 8] = rol64(st[16] ^ t[1], 45); | ||
78 | st[16] = rol64(st[ 5] ^ t[0], 36); | ||
79 | st[ 5] = rol64(st[ 3] ^ t[3], 28); | ||
80 | st[ 3] = rol64(st[18] ^ t[3], 21); | ||
81 | st[18] = rol64(st[17] ^ t[2], 15); | ||
82 | st[17] = rol64(st[11] ^ t[1], 10); | ||
83 | st[11] = rol64(st[ 7] ^ t[2], 6); | ||
84 | st[ 7] = rol64(st[10] ^ t[0], 3); | ||
85 | st[10] = rol64( tt ^ t[1], 1); | ||
75 | 86 | ||
76 | /* Chi */ | 87 | /* Chi */ |
77 | for (j = 0; j < 25; j += 5) { | 88 | bc[ 0] = ~st[ 1] & st[ 2]; |
78 | for (i = 0; i < 5; i++) | 89 | bc[ 1] = ~st[ 2] & st[ 3]; |
79 | bc[i] = st[j + i]; | 90 | bc[ 2] = ~st[ 3] & st[ 4]; |
80 | for (i = 0; i < 5; i++) | 91 | bc[ 3] = ~st[ 4] & st[ 0]; |
81 | st[j + i] ^= (~bc[(i + 1) % 5]) & | 92 | bc[ 4] = ~st[ 0] & st[ 1]; |
82 | bc[(i + 2) % 5]; | 93 | st[ 0] ^= bc[ 0]; |
83 | } | 94 | st[ 1] ^= bc[ 1]; |
95 | st[ 2] ^= bc[ 2]; | ||
96 | st[ 3] ^= bc[ 3]; | ||
97 | st[ 4] ^= bc[ 4]; | ||
98 | |||
99 | bc[ 0] = ~st[ 6] & st[ 7]; | ||
100 | bc[ 1] = ~st[ 7] & st[ 8]; | ||
101 | bc[ 2] = ~st[ 8] & st[ 9]; | ||
102 | bc[ 3] = ~st[ 9] & st[ 5]; | ||
103 | bc[ 4] = ~st[ 5] & st[ 6]; | ||
104 | st[ 5] ^= bc[ 0]; | ||
105 | st[ 6] ^= bc[ 1]; | ||
106 | st[ 7] ^= bc[ 2]; | ||
107 | st[ 8] ^= bc[ 3]; | ||
108 | st[ 9] ^= bc[ 4]; | ||
109 | |||
110 | bc[ 0] = ~st[11] & st[12]; | ||
111 | bc[ 1] = ~st[12] & st[13]; | ||
112 | bc[ 2] = ~st[13] & st[14]; | ||
113 | bc[ 3] = ~st[14] & st[10]; | ||
114 | bc[ 4] = ~st[10] & st[11]; | ||
115 | st[10] ^= bc[ 0]; | ||
116 | st[11] ^= bc[ 1]; | ||
117 | st[12] ^= bc[ 2]; | ||
118 | st[13] ^= bc[ 3]; | ||
119 | st[14] ^= bc[ 4]; | ||
120 | |||
121 | bc[ 0] = ~st[16] & st[17]; | ||
122 | bc[ 1] = ~st[17] & st[18]; | ||
123 | bc[ 2] = ~st[18] & st[19]; | ||
124 | bc[ 3] = ~st[19] & st[15]; | ||
125 | bc[ 4] = ~st[15] & st[16]; | ||
126 | st[15] ^= bc[ 0]; | ||
127 | st[16] ^= bc[ 1]; | ||
128 | st[17] ^= bc[ 2]; | ||
129 | st[18] ^= bc[ 3]; | ||
130 | st[19] ^= bc[ 4]; | ||
131 | |||
132 | bc[ 0] = ~st[21] & st[22]; | ||
133 | bc[ 1] = ~st[22] & st[23]; | ||
134 | bc[ 2] = ~st[23] & st[24]; | ||
135 | bc[ 3] = ~st[24] & st[20]; | ||
136 | bc[ 4] = ~st[20] & st[21]; | ||
137 | st[20] ^= bc[ 0]; | ||
138 | st[21] ^= bc[ 1]; | ||
139 | st[22] ^= bc[ 2]; | ||
140 | st[23] ^= bc[ 3]; | ||
141 | st[24] ^= bc[ 4]; | ||
84 | 142 | ||
85 | /* Iota */ | 143 | /* Iota */ |
86 | st[0] ^= keccakf_rndc[round]; | 144 | st[0] ^= keccakf_rndc[round]; |
87 | } | 145 | } |
88 | } | 146 | } |
89 | 147 | ||
90 | static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz) | 148 | int crypto_sha3_init(struct shash_desc *desc) |
91 | { | ||
92 | memset(sctx, 0, sizeof(*sctx)); | ||
93 | sctx->md_len = digest_sz; | ||
94 | sctx->rsiz = 200 - 2 * digest_sz; | ||
95 | sctx->rsizw = sctx->rsiz / 8; | ||
96 | } | ||
97 | |||
98 | static int sha3_224_init(struct shash_desc *desc) | ||
99 | { | ||
100 | struct sha3_state *sctx = shash_desc_ctx(desc); | ||
101 | |||
102 | sha3_init(sctx, SHA3_224_DIGEST_SIZE); | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static int sha3_256_init(struct shash_desc *desc) | ||
107 | { | 149 | { |
108 | struct sha3_state *sctx = shash_desc_ctx(desc); | 150 | struct sha3_state *sctx = shash_desc_ctx(desc); |
151 | unsigned int digest_size = crypto_shash_digestsize(desc->tfm); | ||
109 | 152 | ||
110 | sha3_init(sctx, SHA3_256_DIGEST_SIZE); | 153 | sctx->rsiz = 200 - 2 * digest_size; |
111 | return 0; | 154 | sctx->rsizw = sctx->rsiz / 8; |
112 | } | 155 | sctx->partial = 0; |
113 | |||
114 | static int sha3_384_init(struct shash_desc *desc) | ||
115 | { | ||
116 | struct sha3_state *sctx = shash_desc_ctx(desc); | ||
117 | |||
118 | sha3_init(sctx, SHA3_384_DIGEST_SIZE); | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static int sha3_512_init(struct shash_desc *desc) | ||
123 | { | ||
124 | struct sha3_state *sctx = shash_desc_ctx(desc); | ||
125 | 156 | ||
126 | sha3_init(sctx, SHA3_512_DIGEST_SIZE); | 157 | memset(sctx->st, 0, sizeof(sctx->st)); |
127 | return 0; | 158 | return 0; |
128 | } | 159 | } |
160 | EXPORT_SYMBOL(crypto_sha3_init); | ||
129 | 161 | ||
130 | static int sha3_update(struct shash_desc *desc, const u8 *data, | 162 | int crypto_sha3_update(struct shash_desc *desc, const u8 *data, |
131 | unsigned int len) | 163 | unsigned int len) |
132 | { | 164 | { |
133 | struct sha3_state *sctx = shash_desc_ctx(desc); | 165 | struct sha3_state *sctx = shash_desc_ctx(desc); |
@@ -149,7 +181,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, | |||
149 | unsigned int i; | 181 | unsigned int i; |
150 | 182 | ||
151 | for (i = 0; i < sctx->rsizw; i++) | 183 | for (i = 0; i < sctx->rsizw; i++) |
152 | sctx->st[i] ^= ((u64 *) src)[i]; | 184 | sctx->st[i] ^= get_unaligned_le64(src + 8 * i); |
153 | keccakf(sctx->st); | 185 | keccakf(sctx->st); |
154 | 186 | ||
155 | done += sctx->rsiz; | 187 | done += sctx->rsiz; |
@@ -163,125 +195,89 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, | |||
163 | 195 | ||
164 | return 0; | 196 | return 0; |
165 | } | 197 | } |
198 | EXPORT_SYMBOL(crypto_sha3_update); | ||
166 | 199 | ||
167 | static int sha3_final(struct shash_desc *desc, u8 *out) | 200 | int crypto_sha3_final(struct shash_desc *desc, u8 *out) |
168 | { | 201 | { |
169 | struct sha3_state *sctx = shash_desc_ctx(desc); | 202 | struct sha3_state *sctx = shash_desc_ctx(desc); |
170 | unsigned int i, inlen = sctx->partial; | 203 | unsigned int i, inlen = sctx->partial; |
204 | unsigned int digest_size = crypto_shash_digestsize(desc->tfm); | ||
205 | __le64 *digest = (__le64 *)out; | ||
171 | 206 | ||
172 | sctx->buf[inlen++] = 0x06; | 207 | sctx->buf[inlen++] = 0x06; |
173 | memset(sctx->buf + inlen, 0, sctx->rsiz - inlen); | 208 | memset(sctx->buf + inlen, 0, sctx->rsiz - inlen); |
174 | sctx->buf[sctx->rsiz - 1] |= 0x80; | 209 | sctx->buf[sctx->rsiz - 1] |= 0x80; |
175 | 210 | ||
176 | for (i = 0; i < sctx->rsizw; i++) | 211 | for (i = 0; i < sctx->rsizw; i++) |
177 | sctx->st[i] ^= ((u64 *) sctx->buf)[i]; | 212 | sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); |
178 | 213 | ||
179 | keccakf(sctx->st); | 214 | keccakf(sctx->st); |
180 | 215 | ||
181 | for (i = 0; i < sctx->rsizw; i++) | 216 | for (i = 0; i < digest_size / 8; i++) |
182 | sctx->st[i] = cpu_to_le64(sctx->st[i]); | 217 | put_unaligned_le64(sctx->st[i], digest++); |
183 | 218 | ||
184 | memcpy(out, sctx->st, sctx->md_len); | 219 | if (digest_size & 4) |
220 | put_unaligned_le32(sctx->st[i], (__le32 *)digest); | ||
185 | 221 | ||
186 | memset(sctx, 0, sizeof(*sctx)); | 222 | memset(sctx, 0, sizeof(*sctx)); |
187 | return 0; | 223 | return 0; |
188 | } | 224 | } |
189 | 225 | EXPORT_SYMBOL(crypto_sha3_final); | |
190 | static struct shash_alg sha3_224 = { | 226 | |
191 | .digestsize = SHA3_224_DIGEST_SIZE, | 227 | static struct shash_alg algs[] = { { |
192 | .init = sha3_224_init, | 228 | .digestsize = SHA3_224_DIGEST_SIZE, |
193 | .update = sha3_update, | 229 | .init = crypto_sha3_init, |
194 | .final = sha3_final, | 230 | .update = crypto_sha3_update, |
195 | .descsize = sizeof(struct sha3_state), | 231 | .final = crypto_sha3_final, |
196 | .base = { | 232 | .descsize = sizeof(struct sha3_state), |
197 | .cra_name = "sha3-224", | 233 | .base.cra_name = "sha3-224", |
198 | .cra_driver_name = "sha3-224-generic", | 234 | .base.cra_driver_name = "sha3-224-generic", |
199 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 235 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, |
200 | .cra_blocksize = SHA3_224_BLOCK_SIZE, | 236 | .base.cra_blocksize = SHA3_224_BLOCK_SIZE, |
201 | .cra_module = THIS_MODULE, | 237 | .base.cra_module = THIS_MODULE, |
202 | } | 238 | }, { |
203 | }; | 239 | .digestsize = SHA3_256_DIGEST_SIZE, |
204 | 240 | .init = crypto_sha3_init, | |
205 | static struct shash_alg sha3_256 = { | 241 | .update = crypto_sha3_update, |
206 | .digestsize = SHA3_256_DIGEST_SIZE, | 242 | .final = crypto_sha3_final, |
207 | .init = sha3_256_init, | 243 | .descsize = sizeof(struct sha3_state), |
208 | .update = sha3_update, | 244 | .base.cra_name = "sha3-256", |
209 | .final = sha3_final, | 245 | .base.cra_driver_name = "sha3-256-generic", |
210 | .descsize = sizeof(struct sha3_state), | 246 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, |
211 | .base = { | 247 | .base.cra_blocksize = SHA3_256_BLOCK_SIZE, |
212 | .cra_name = "sha3-256", | 248 | .base.cra_module = THIS_MODULE, |
213 | .cra_driver_name = "sha3-256-generic", | 249 | }, { |
214 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 250 | .digestsize = SHA3_384_DIGEST_SIZE, |
215 | .cra_blocksize = SHA3_256_BLOCK_SIZE, | 251 | .init = crypto_sha3_init, |
216 | .cra_module = THIS_MODULE, | 252 | .update = crypto_sha3_update, |
217 | } | 253 | .final = crypto_sha3_final, |
218 | }; | 254 | .descsize = sizeof(struct sha3_state), |
219 | 255 | .base.cra_name = "sha3-384", | |
220 | static struct shash_alg sha3_384 = { | 256 | .base.cra_driver_name = "sha3-384-generic", |
221 | .digestsize = SHA3_384_DIGEST_SIZE, | 257 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, |
222 | .init = sha3_384_init, | 258 | .base.cra_blocksize = SHA3_384_BLOCK_SIZE, |
223 | .update = sha3_update, | 259 | .base.cra_module = THIS_MODULE, |
224 | .final = sha3_final, | 260 | }, { |
225 | .descsize = sizeof(struct sha3_state), | 261 | .digestsize = SHA3_512_DIGEST_SIZE, |
226 | .base = { | 262 | .init = crypto_sha3_init, |
227 | .cra_name = "sha3-384", | 263 | .update = crypto_sha3_update, |
228 | .cra_driver_name = "sha3-384-generic", | 264 | .final = crypto_sha3_final, |
229 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 265 | .descsize = sizeof(struct sha3_state), |
230 | .cra_blocksize = SHA3_384_BLOCK_SIZE, | 266 | .base.cra_name = "sha3-512", |
231 | .cra_module = THIS_MODULE, | 267 | .base.cra_driver_name = "sha3-512-generic", |
232 | } | 268 | .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, |
233 | }; | 269 | .base.cra_blocksize = SHA3_512_BLOCK_SIZE, |
234 | 270 | .base.cra_module = THIS_MODULE, | |
235 | static struct shash_alg sha3_512 = { | 271 | } }; |
236 | .digestsize = SHA3_512_DIGEST_SIZE, | ||
237 | .init = sha3_512_init, | ||
238 | .update = sha3_update, | ||
239 | .final = sha3_final, | ||
240 | .descsize = sizeof(struct sha3_state), | ||
241 | .base = { | ||
242 | .cra_name = "sha3-512", | ||
243 | .cra_driver_name = "sha3-512-generic", | ||
244 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
245 | .cra_blocksize = SHA3_512_BLOCK_SIZE, | ||
246 | .cra_module = THIS_MODULE, | ||
247 | } | ||
248 | }; | ||
249 | 272 | ||
250 | static int __init sha3_generic_mod_init(void) | 273 | static int __init sha3_generic_mod_init(void) |
251 | { | 274 | { |
252 | int ret; | 275 | return crypto_register_shashes(algs, ARRAY_SIZE(algs)); |
253 | |||
254 | ret = crypto_register_shash(&sha3_224); | ||
255 | if (ret < 0) | ||
256 | goto err_out; | ||
257 | ret = crypto_register_shash(&sha3_256); | ||
258 | if (ret < 0) | ||
259 | goto err_out_224; | ||
260 | ret = crypto_register_shash(&sha3_384); | ||
261 | if (ret < 0) | ||
262 | goto err_out_256; | ||
263 | ret = crypto_register_shash(&sha3_512); | ||
264 | if (ret < 0) | ||
265 | goto err_out_384; | ||
266 | |||
267 | return 0; | ||
268 | |||
269 | err_out_384: | ||
270 | crypto_unregister_shash(&sha3_384); | ||
271 | err_out_256: | ||
272 | crypto_unregister_shash(&sha3_256); | ||
273 | err_out_224: | ||
274 | crypto_unregister_shash(&sha3_224); | ||
275 | err_out: | ||
276 | return ret; | ||
277 | } | 276 | } |
278 | 277 | ||
279 | static void __exit sha3_generic_mod_fini(void) | 278 | static void __exit sha3_generic_mod_fini(void) |
280 | { | 279 | { |
281 | crypto_unregister_shash(&sha3_224); | 280 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); |
282 | crypto_unregister_shash(&sha3_256); | ||
283 | crypto_unregister_shash(&sha3_384); | ||
284 | crypto_unregister_shash(&sha3_512); | ||
285 | } | 281 | } |
286 | 282 | ||
287 | module_init(sha3_generic_mod_init); | 283 | module_init(sha3_generic_mod_init); |
diff --git a/crypto/shash.c b/crypto/shash.c index e849d3ee2e27..5d732c6bb4b2 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
@@ -58,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, | |||
58 | { | 58 | { |
59 | struct shash_alg *shash = crypto_shash_alg(tfm); | 59 | struct shash_alg *shash = crypto_shash_alg(tfm); |
60 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 60 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
61 | int err; | ||
61 | 62 | ||
62 | if ((unsigned long)key & alignmask) | 63 | if ((unsigned long)key & alignmask) |
63 | return shash_setkey_unaligned(tfm, key, keylen); | 64 | err = shash_setkey_unaligned(tfm, key, keylen); |
65 | else | ||
66 | err = shash->setkey(tfm, key, keylen); | ||
67 | |||
68 | if (err) | ||
69 | return err; | ||
64 | 70 | ||
65 | return shash->setkey(tfm, key, keylen); | 71 | crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
72 | return 0; | ||
66 | } | 73 | } |
67 | EXPORT_SYMBOL_GPL(crypto_shash_setkey); | 74 | EXPORT_SYMBOL_GPL(crypto_shash_setkey); |
68 | 75 | ||
@@ -181,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, | |||
181 | struct shash_alg *shash = crypto_shash_alg(tfm); | 188 | struct shash_alg *shash = crypto_shash_alg(tfm); |
182 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 189 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
183 | 190 | ||
191 | if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
192 | return -ENOKEY; | ||
193 | |||
184 | if (((unsigned long)data | (unsigned long)out) & alignmask) | 194 | if (((unsigned long)data | (unsigned long)out) & alignmask) |
185 | return shash_digest_unaligned(desc, data, len, out); | 195 | return shash_digest_unaligned(desc, data, len, out); |
186 | 196 | ||
@@ -360,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | |||
360 | crt->digest = shash_async_digest; | 370 | crt->digest = shash_async_digest; |
361 | crt->setkey = shash_async_setkey; | 371 | crt->setkey = shash_async_setkey; |
362 | 372 | ||
363 | crt->has_setkey = alg->setkey != shash_no_setkey; | 373 | crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & |
374 | CRYPTO_TFM_NEED_KEY); | ||
364 | 375 | ||
365 | if (alg->export) | 376 | if (alg->export) |
366 | crt->export = shash_async_export; | 377 | crt->export = shash_async_export; |
@@ -375,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | |||
375 | static int crypto_shash_init_tfm(struct crypto_tfm *tfm) | 386 | static int crypto_shash_init_tfm(struct crypto_tfm *tfm) |
376 | { | 387 | { |
377 | struct crypto_shash *hash = __crypto_shash_cast(tfm); | 388 | struct crypto_shash *hash = __crypto_shash_cast(tfm); |
389 | struct shash_alg *alg = crypto_shash_alg(hash); | ||
390 | |||
391 | hash->descsize = alg->descsize; | ||
392 | |||
393 | if (crypto_shash_alg_has_setkey(alg) && | ||
394 | !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) | ||
395 | crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY); | ||
378 | 396 | ||
379 | hash->descsize = crypto_shash_alg(hash)->descsize; | ||
380 | return 0; | 397 | return 0; |
381 | } | 398 | } |
382 | 399 | ||
diff --git a/crypto/simd.c b/crypto/simd.c index 88203370a62f..208226d7f908 100644 --- a/crypto/simd.c +++ b/crypto/simd.c | |||
@@ -19,9 +19,7 @@ | |||
19 | * GNU General Public License for more details. | 19 | * GNU General Public License for more details. |
20 | * | 20 | * |
21 | * You should have received a copy of the GNU General Public License | 21 | * You should have received a copy of the GNU General Public License |
22 | * along with this program; if not, write to the Free Software | 22 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
24 | * USA | ||
25 | * | 23 | * |
26 | */ | 24 | */ |
27 | 25 | ||
diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 11af5fd6a443..0fe2a2923ad0 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c | |||
@@ -598,8 +598,11 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, | |||
598 | err = crypto_blkcipher_setkey(blkcipher, key, keylen); | 598 | err = crypto_blkcipher_setkey(blkcipher, key, keylen); |
599 | crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & | 599 | crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & |
600 | CRYPTO_TFM_RES_MASK); | 600 | CRYPTO_TFM_RES_MASK); |
601 | if (err) | ||
602 | return err; | ||
601 | 603 | ||
602 | return err; | 604 | crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
605 | return 0; | ||
603 | } | 606 | } |
604 | 607 | ||
605 | static int skcipher_crypt_blkcipher(struct skcipher_request *req, | 608 | static int skcipher_crypt_blkcipher(struct skcipher_request *req, |
@@ -674,6 +677,9 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) | |||
674 | skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); | 677 | skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); |
675 | skcipher->keysize = calg->cra_blkcipher.max_keysize; | 678 | skcipher->keysize = calg->cra_blkcipher.max_keysize; |
676 | 679 | ||
680 | if (skcipher->keysize) | ||
681 | crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); | ||
682 | |||
677 | return 0; | 683 | return 0; |
678 | } | 684 | } |
679 | 685 | ||
@@ -692,8 +698,11 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, | |||
692 | crypto_skcipher_set_flags(tfm, | 698 | crypto_skcipher_set_flags(tfm, |
693 | crypto_ablkcipher_get_flags(ablkcipher) & | 699 | crypto_ablkcipher_get_flags(ablkcipher) & |
694 | CRYPTO_TFM_RES_MASK); | 700 | CRYPTO_TFM_RES_MASK); |
701 | if (err) | ||
702 | return err; | ||
695 | 703 | ||
696 | return err; | 704 | crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
705 | return 0; | ||
697 | } | 706 | } |
698 | 707 | ||
699 | static int skcipher_crypt_ablkcipher(struct skcipher_request *req, | 708 | static int skcipher_crypt_ablkcipher(struct skcipher_request *req, |
@@ -767,6 +776,9 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) | |||
767 | sizeof(struct ablkcipher_request); | 776 | sizeof(struct ablkcipher_request); |
768 | skcipher->keysize = calg->cra_ablkcipher.max_keysize; | 777 | skcipher->keysize = calg->cra_ablkcipher.max_keysize; |
769 | 778 | ||
779 | if (skcipher->keysize) | ||
780 | crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); | ||
781 | |||
770 | return 0; | 782 | return 0; |
771 | } | 783 | } |
772 | 784 | ||
@@ -796,6 +808,7 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, | |||
796 | { | 808 | { |
797 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); | 809 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); |
798 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); | 810 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); |
811 | int err; | ||
799 | 812 | ||
800 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { | 813 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { |
801 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 814 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
@@ -803,9 +816,15 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, | |||
803 | } | 816 | } |
804 | 817 | ||
805 | if ((unsigned long)key & alignmask) | 818 | if ((unsigned long)key & alignmask) |
806 | return skcipher_setkey_unaligned(tfm, key, keylen); | 819 | err = skcipher_setkey_unaligned(tfm, key, keylen); |
820 | else | ||
821 | err = cipher->setkey(tfm, key, keylen); | ||
822 | |||
823 | if (err) | ||
824 | return err; | ||
807 | 825 | ||
808 | return cipher->setkey(tfm, key, keylen); | 826 | crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
827 | return 0; | ||
809 | } | 828 | } |
810 | 829 | ||
811 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) | 830 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) |
@@ -834,6 +853,9 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) | |||
834 | skcipher->ivsize = alg->ivsize; | 853 | skcipher->ivsize = alg->ivsize; |
835 | skcipher->keysize = alg->max_keysize; | 854 | skcipher->keysize = alg->max_keysize; |
836 | 855 | ||
856 | if (skcipher->keysize) | ||
857 | crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); | ||
858 | |||
837 | if (alg->exit) | 859 | if (alg->exit) |
838 | skcipher->base.exit = crypto_skcipher_exit_tfm; | 860 | skcipher->base.exit = crypto_skcipher_exit_tfm; |
839 | 861 | ||
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 9267cbdb14d2..14213a096fd2 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -67,6 +67,7 @@ static char *alg = NULL; | |||
67 | static u32 type; | 67 | static u32 type; |
68 | static u32 mask; | 68 | static u32 mask; |
69 | static int mode; | 69 | static int mode; |
70 | static u32 num_mb = 8; | ||
70 | static char *tvmem[TVMEMSIZE]; | 71 | static char *tvmem[TVMEMSIZE]; |
71 | 72 | ||
72 | static char *check[] = { | 73 | static char *check[] = { |
@@ -79,6 +80,66 @@ static char *check[] = { | |||
79 | NULL | 80 | NULL |
80 | }; | 81 | }; |
81 | 82 | ||
83 | static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; | ||
84 | static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 }; | ||
85 | |||
86 | #define XBUFSIZE 8 | ||
87 | #define MAX_IVLEN 32 | ||
88 | |||
89 | static int testmgr_alloc_buf(char *buf[XBUFSIZE]) | ||
90 | { | ||
91 | int i; | ||
92 | |||
93 | for (i = 0; i < XBUFSIZE; i++) { | ||
94 | buf[i] = (void *)__get_free_page(GFP_KERNEL); | ||
95 | if (!buf[i]) | ||
96 | goto err_free_buf; | ||
97 | } | ||
98 | |||
99 | return 0; | ||
100 | |||
101 | err_free_buf: | ||
102 | while (i-- > 0) | ||
103 | free_page((unsigned long)buf[i]); | ||
104 | |||
105 | return -ENOMEM; | ||
106 | } | ||
107 | |||
108 | static void testmgr_free_buf(char *buf[XBUFSIZE]) | ||
109 | { | ||
110 | int i; | ||
111 | |||
112 | for (i = 0; i < XBUFSIZE; i++) | ||
113 | free_page((unsigned long)buf[i]); | ||
114 | } | ||
115 | |||
116 | static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], | ||
117 | unsigned int buflen, const void *assoc, | ||
118 | unsigned int aad_size) | ||
119 | { | ||
120 | int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; | ||
121 | int k, rem; | ||
122 | |||
123 | if (np > XBUFSIZE) { | ||
124 | rem = PAGE_SIZE; | ||
125 | np = XBUFSIZE; | ||
126 | } else { | ||
127 | rem = buflen % PAGE_SIZE; | ||
128 | } | ||
129 | |||
130 | sg_init_table(sg, np + 1); | ||
131 | |||
132 | sg_set_buf(&sg[0], assoc, aad_size); | ||
133 | |||
134 | if (rem) | ||
135 | np--; | ||
136 | for (k = 0; k < np; k++) | ||
137 | sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); | ||
138 | |||
139 | if (rem) | ||
140 | sg_set_buf(&sg[k + 1], xbuf[k], rem); | ||
141 | } | ||
142 | |||
82 | static inline int do_one_aead_op(struct aead_request *req, int ret) | 143 | static inline int do_one_aead_op(struct aead_request *req, int ret) |
83 | { | 144 | { |
84 | struct crypto_wait *wait = req->base.data; | 145 | struct crypto_wait *wait = req->base.data; |
@@ -86,6 +147,298 @@ static inline int do_one_aead_op(struct aead_request *req, int ret) | |||
86 | return crypto_wait_req(ret, wait); | 147 | return crypto_wait_req(ret, wait); |
87 | } | 148 | } |
88 | 149 | ||
150 | struct test_mb_aead_data { | ||
151 | struct scatterlist sg[XBUFSIZE]; | ||
152 | struct scatterlist sgout[XBUFSIZE]; | ||
153 | struct aead_request *req; | ||
154 | struct crypto_wait wait; | ||
155 | char *xbuf[XBUFSIZE]; | ||
156 | char *xoutbuf[XBUFSIZE]; | ||
157 | char *axbuf[XBUFSIZE]; | ||
158 | }; | ||
159 | |||
160 | static int do_mult_aead_op(struct test_mb_aead_data *data, int enc, | ||
161 | u32 num_mb) | ||
162 | { | ||
163 | int i, rc[num_mb], err = 0; | ||
164 | |||
165 | /* Fire up a bunch of concurrent requests */ | ||
166 | for (i = 0; i < num_mb; i++) { | ||
167 | if (enc == ENCRYPT) | ||
168 | rc[i] = crypto_aead_encrypt(data[i].req); | ||
169 | else | ||
170 | rc[i] = crypto_aead_decrypt(data[i].req); | ||
171 | } | ||
172 | |||
173 | /* Wait for all requests to finish */ | ||
174 | for (i = 0; i < num_mb; i++) { | ||
175 | rc[i] = crypto_wait_req(rc[i], &data[i].wait); | ||
176 | |||
177 | if (rc[i]) { | ||
178 | pr_info("concurrent request %d error %d\n", i, rc[i]); | ||
179 | err = rc[i]; | ||
180 | } | ||
181 | } | ||
182 | |||
183 | return err; | ||
184 | } | ||
185 | |||
186 | static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc, | ||
187 | int blen, int secs, u32 num_mb) | ||
188 | { | ||
189 | unsigned long start, end; | ||
190 | int bcount; | ||
191 | int ret; | ||
192 | |||
193 | for (start = jiffies, end = start + secs * HZ, bcount = 0; | ||
194 | time_before(jiffies, end); bcount++) { | ||
195 | ret = do_mult_aead_op(data, enc, num_mb); | ||
196 | if (ret) | ||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | pr_cont("%d operations in %d seconds (%ld bytes)\n", | ||
201 | bcount * num_mb, secs, (long)bcount * blen * num_mb); | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc, | ||
206 | int blen, u32 num_mb) | ||
207 | { | ||
208 | unsigned long cycles = 0; | ||
209 | int ret = 0; | ||
210 | int i; | ||
211 | |||
212 | /* Warm-up run. */ | ||
213 | for (i = 0; i < 4; i++) { | ||
214 | ret = do_mult_aead_op(data, enc, num_mb); | ||
215 | if (ret) | ||
216 | goto out; | ||
217 | } | ||
218 | |||
219 | /* The real thing. */ | ||
220 | for (i = 0; i < 8; i++) { | ||
221 | cycles_t start, end; | ||
222 | |||
223 | start = get_cycles(); | ||
224 | ret = do_mult_aead_op(data, enc, num_mb); | ||
225 | end = get_cycles(); | ||
226 | |||
227 | if (ret) | ||
228 | goto out; | ||
229 | |||
230 | cycles += end - start; | ||
231 | } | ||
232 | |||
233 | out: | ||
234 | if (ret == 0) | ||
235 | pr_cont("1 operation in %lu cycles (%d bytes)\n", | ||
236 | (cycles + 4) / (8 * num_mb), blen); | ||
237 | |||
238 | return ret; | ||
239 | } | ||
240 | |||
241 | static void test_mb_aead_speed(const char *algo, int enc, int secs, | ||
242 | struct aead_speed_template *template, | ||
243 | unsigned int tcount, u8 authsize, | ||
244 | unsigned int aad_size, u8 *keysize, u32 num_mb) | ||
245 | { | ||
246 | struct test_mb_aead_data *data; | ||
247 | struct crypto_aead *tfm; | ||
248 | unsigned int i, j, iv_len; | ||
249 | const char *key; | ||
250 | const char *e; | ||
251 | void *assoc; | ||
252 | u32 *b_size; | ||
253 | char *iv; | ||
254 | int ret; | ||
255 | |||
256 | |||
257 | if (aad_size >= PAGE_SIZE) { | ||
258 | pr_err("associate data length (%u) too big\n", aad_size); | ||
259 | return; | ||
260 | } | ||
261 | |||
262 | iv = kzalloc(MAX_IVLEN, GFP_KERNEL); | ||
263 | if (!iv) | ||
264 | return; | ||
265 | |||
266 | if (enc == ENCRYPT) | ||
267 | e = "encryption"; | ||
268 | else | ||
269 | e = "decryption"; | ||
270 | |||
271 | data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); | ||
272 | if (!data) | ||
273 | goto out_free_iv; | ||
274 | |||
275 | tfm = crypto_alloc_aead(algo, 0, 0); | ||
276 | if (IS_ERR(tfm)) { | ||
277 | pr_err("failed to load transform for %s: %ld\n", | ||
278 | algo, PTR_ERR(tfm)); | ||
279 | goto out_free_data; | ||
280 | } | ||
281 | |||
282 | ret = crypto_aead_setauthsize(tfm, authsize); | ||
283 | |||
284 | for (i = 0; i < num_mb; ++i) | ||
285 | if (testmgr_alloc_buf(data[i].xbuf)) { | ||
286 | while (i--) | ||
287 | testmgr_free_buf(data[i].xbuf); | ||
288 | goto out_free_tfm; | ||
289 | } | ||
290 | |||
291 | for (i = 0; i < num_mb; ++i) | ||
292 | if (testmgr_alloc_buf(data[i].axbuf)) { | ||
293 | while (i--) | ||
294 | testmgr_free_buf(data[i].axbuf); | ||
295 | goto out_free_xbuf; | ||
296 | } | ||
297 | |||
298 | for (i = 0; i < num_mb; ++i) | ||
299 | if (testmgr_alloc_buf(data[i].xoutbuf)) { | ||
300 | while (i--) | ||
301 | testmgr_free_buf(data[i].xoutbuf); | ||
302 | goto out_free_axbuf; | ||
303 | } | ||
304 | |||
305 | for (i = 0; i < num_mb; ++i) { | ||
306 | data[i].req = aead_request_alloc(tfm, GFP_KERNEL); | ||
307 | if (!data[i].req) { | ||
308 | pr_err("alg: skcipher: Failed to allocate request for %s\n", | ||
309 | algo); | ||
310 | while (i--) | ||
311 | aead_request_free(data[i].req); | ||
312 | goto out_free_xoutbuf; | ||
313 | } | ||
314 | } | ||
315 | |||
316 | for (i = 0; i < num_mb; ++i) { | ||
317 | crypto_init_wait(&data[i].wait); | ||
318 | aead_request_set_callback(data[i].req, | ||
319 | CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
320 | crypto_req_done, &data[i].wait); | ||
321 | } | ||
322 | |||
323 | pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo, | ||
324 | get_driver_name(crypto_aead, tfm), e); | ||
325 | |||
326 | i = 0; | ||
327 | do { | ||
328 | b_size = aead_sizes; | ||
329 | do { | ||
330 | if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) { | ||
331 | pr_err("template (%u) too big for buffer (%lu)\n", | ||
332 | authsize + *b_size, | ||
333 | XBUFSIZE * PAGE_SIZE); | ||
334 | goto out; | ||
335 | } | ||
336 | |||
337 | pr_info("test %u (%d bit key, %d byte blocks): ", i, | ||
338 | *keysize * 8, *b_size); | ||
339 | |||
340 | /* Set up tfm global state, i.e. the key */ | ||
341 | |||
342 | memset(tvmem[0], 0xff, PAGE_SIZE); | ||
343 | key = tvmem[0]; | ||
344 | for (j = 0; j < tcount; j++) { | ||
345 | if (template[j].klen == *keysize) { | ||
346 | key = template[j].key; | ||
347 | break; | ||
348 | } | ||
349 | } | ||
350 | |||
351 | crypto_aead_clear_flags(tfm, ~0); | ||
352 | |||
353 | ret = crypto_aead_setkey(tfm, key, *keysize); | ||
354 | if (ret) { | ||
355 | pr_err("setkey() failed flags=%x\n", | ||
356 | crypto_aead_get_flags(tfm)); | ||
357 | goto out; | ||
358 | } | ||
359 | |||
360 | iv_len = crypto_aead_ivsize(tfm); | ||
361 | if (iv_len) | ||
362 | memset(iv, 0xff, iv_len); | ||
363 | |||
364 | /* Now setup per request stuff, i.e. buffers */ | ||
365 | |||
366 | for (j = 0; j < num_mb; ++j) { | ||
367 | struct test_mb_aead_data *cur = &data[j]; | ||
368 | |||
369 | assoc = cur->axbuf[0]; | ||
370 | memset(assoc, 0xff, aad_size); | ||
371 | |||
372 | sg_init_aead(cur->sg, cur->xbuf, | ||
373 | *b_size + (enc ? 0 : authsize), | ||
374 | assoc, aad_size); | ||
375 | |||
376 | sg_init_aead(cur->sgout, cur->xoutbuf, | ||
377 | *b_size + (enc ? authsize : 0), | ||
378 | assoc, aad_size); | ||
379 | |||
380 | aead_request_set_ad(cur->req, aad_size); | ||
381 | |||
382 | if (!enc) { | ||
383 | |||
384 | aead_request_set_crypt(cur->req, | ||
385 | cur->sgout, | ||
386 | cur->sg, | ||
387 | *b_size, iv); | ||
388 | ret = crypto_aead_encrypt(cur->req); | ||
389 | ret = do_one_aead_op(cur->req, ret); | ||
390 | |||
391 | if (ret) { | ||
392 | pr_err("calculating auth failed failed (%d)\n", | ||
393 | ret); | ||
394 | break; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | aead_request_set_crypt(cur->req, cur->sg, | ||
399 | cur->sgout, *b_size + | ||
400 | (enc ? 0 : authsize), | ||
401 | iv); | ||
402 | |||
403 | } | ||
404 | |||
405 | if (secs) | ||
406 | ret = test_mb_aead_jiffies(data, enc, *b_size, | ||
407 | secs, num_mb); | ||
408 | else | ||
409 | ret = test_mb_aead_cycles(data, enc, *b_size, | ||
410 | num_mb); | ||
411 | |||
412 | if (ret) { | ||
413 | pr_err("%s() failed return code=%d\n", e, ret); | ||
414 | break; | ||
415 | } | ||
416 | b_size++; | ||
417 | i++; | ||
418 | } while (*b_size); | ||
419 | keysize++; | ||
420 | } while (*keysize); | ||
421 | |||
422 | out: | ||
423 | for (i = 0; i < num_mb; ++i) | ||
424 | aead_request_free(data[i].req); | ||
425 | out_free_xoutbuf: | ||
426 | for (i = 0; i < num_mb; ++i) | ||
427 | testmgr_free_buf(data[i].xoutbuf); | ||
428 | out_free_axbuf: | ||
429 | for (i = 0; i < num_mb; ++i) | ||
430 | testmgr_free_buf(data[i].axbuf); | ||
431 | out_free_xbuf: | ||
432 | for (i = 0; i < num_mb; ++i) | ||
433 | testmgr_free_buf(data[i].xbuf); | ||
434 | out_free_tfm: | ||
435 | crypto_free_aead(tfm); | ||
436 | out_free_data: | ||
437 | kfree(data); | ||
438 | out_free_iv: | ||
439 | kfree(iv); | ||
440 | } | ||
441 | |||
89 | static int test_aead_jiffies(struct aead_request *req, int enc, | 442 | static int test_aead_jiffies(struct aead_request *req, int enc, |
90 | int blen, int secs) | 443 | int blen, int secs) |
91 | { | 444 | { |
@@ -151,60 +504,6 @@ out: | |||
151 | return ret; | 504 | return ret; |
152 | } | 505 | } |
153 | 506 | ||
154 | static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; | ||
155 | static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 }; | ||
156 | |||
157 | #define XBUFSIZE 8 | ||
158 | #define MAX_IVLEN 32 | ||
159 | |||
160 | static int testmgr_alloc_buf(char *buf[XBUFSIZE]) | ||
161 | { | ||
162 | int i; | ||
163 | |||
164 | for (i = 0; i < XBUFSIZE; i++) { | ||
165 | buf[i] = (void *)__get_free_page(GFP_KERNEL); | ||
166 | if (!buf[i]) | ||
167 | goto err_free_buf; | ||
168 | } | ||
169 | |||
170 | return 0; | ||
171 | |||
172 | err_free_buf: | ||
173 | while (i-- > 0) | ||
174 | free_page((unsigned long)buf[i]); | ||
175 | |||
176 | return -ENOMEM; | ||
177 | } | ||
178 | |||
179 | static void testmgr_free_buf(char *buf[XBUFSIZE]) | ||
180 | { | ||
181 | int i; | ||
182 | |||
183 | for (i = 0; i < XBUFSIZE; i++) | ||
184 | free_page((unsigned long)buf[i]); | ||
185 | } | ||
186 | |||
187 | static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], | ||
188 | unsigned int buflen) | ||
189 | { | ||
190 | int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; | ||
191 | int k, rem; | ||
192 | |||
193 | if (np > XBUFSIZE) { | ||
194 | rem = PAGE_SIZE; | ||
195 | np = XBUFSIZE; | ||
196 | } else { | ||
197 | rem = buflen % PAGE_SIZE; | ||
198 | } | ||
199 | |||
200 | sg_init_table(sg, np + 1); | ||
201 | np--; | ||
202 | for (k = 0; k < np; k++) | ||
203 | sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); | ||
204 | |||
205 | sg_set_buf(&sg[k + 1], xbuf[k], rem); | ||
206 | } | ||
207 | |||
208 | static void test_aead_speed(const char *algo, int enc, unsigned int secs, | 507 | static void test_aead_speed(const char *algo, int enc, unsigned int secs, |
209 | struct aead_speed_template *template, | 508 | struct aead_speed_template *template, |
210 | unsigned int tcount, u8 authsize, | 509 | unsigned int tcount, u8 authsize, |
@@ -316,19 +615,37 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
316 | goto out; | 615 | goto out; |
317 | } | 616 | } |
318 | 617 | ||
319 | sg_init_aead(sg, xbuf, | 618 | sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize), |
320 | *b_size + (enc ? 0 : authsize)); | 619 | assoc, aad_size); |
321 | 620 | ||
322 | sg_init_aead(sgout, xoutbuf, | 621 | sg_init_aead(sgout, xoutbuf, |
323 | *b_size + (enc ? authsize : 0)); | 622 | *b_size + (enc ? authsize : 0), assoc, |
623 | aad_size); | ||
324 | 624 | ||
325 | sg_set_buf(&sg[0], assoc, aad_size); | 625 | aead_request_set_ad(req, aad_size); |
326 | sg_set_buf(&sgout[0], assoc, aad_size); | 626 | |
627 | if (!enc) { | ||
628 | |||
629 | /* | ||
630 | * For decryption we need a proper auth so | ||
631 | * we do the encryption path once with buffers | ||
632 | * reversed (input <-> output) to calculate it | ||
633 | */ | ||
634 | aead_request_set_crypt(req, sgout, sg, | ||
635 | *b_size, iv); | ||
636 | ret = do_one_aead_op(req, | ||
637 | crypto_aead_encrypt(req)); | ||
638 | |||
639 | if (ret) { | ||
640 | pr_err("calculating auth failed failed (%d)\n", | ||
641 | ret); | ||
642 | break; | ||
643 | } | ||
644 | } | ||
327 | 645 | ||
328 | aead_request_set_crypt(req, sg, sgout, | 646 | aead_request_set_crypt(req, sg, sgout, |
329 | *b_size + (enc ? 0 : authsize), | 647 | *b_size + (enc ? 0 : authsize), |
330 | iv); | 648 | iv); |
331 | aead_request_set_ad(req, aad_size); | ||
332 | 649 | ||
333 | if (secs) | 650 | if (secs) |
334 | ret = test_aead_jiffies(req, enc, *b_size, | 651 | ret = test_aead_jiffies(req, enc, *b_size, |
@@ -381,24 +698,98 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret) | |||
381 | } | 698 | } |
382 | 699 | ||
383 | struct test_mb_ahash_data { | 700 | struct test_mb_ahash_data { |
384 | struct scatterlist sg[TVMEMSIZE]; | 701 | struct scatterlist sg[XBUFSIZE]; |
385 | char result[64]; | 702 | char result[64]; |
386 | struct ahash_request *req; | 703 | struct ahash_request *req; |
387 | struct crypto_wait wait; | 704 | struct crypto_wait wait; |
388 | char *xbuf[XBUFSIZE]; | 705 | char *xbuf[XBUFSIZE]; |
389 | }; | 706 | }; |
390 | 707 | ||
391 | static void test_mb_ahash_speed(const char *algo, unsigned int sec, | 708 | static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb) |
392 | struct hash_speed *speed) | 709 | { |
710 | int i, rc[num_mb], err = 0; | ||
711 | |||
712 | /* Fire up a bunch of concurrent requests */ | ||
713 | for (i = 0; i < num_mb; i++) | ||
714 | rc[i] = crypto_ahash_digest(data[i].req); | ||
715 | |||
716 | /* Wait for all requests to finish */ | ||
717 | for (i = 0; i < num_mb; i++) { | ||
718 | rc[i] = crypto_wait_req(rc[i], &data[i].wait); | ||
719 | |||
720 | if (rc[i]) { | ||
721 | pr_info("concurrent request %d error %d\n", i, rc[i]); | ||
722 | err = rc[i]; | ||
723 | } | ||
724 | } | ||
725 | |||
726 | return err; | ||
727 | } | ||
728 | |||
729 | static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen, | ||
730 | int secs, u32 num_mb) | ||
731 | { | ||
732 | unsigned long start, end; | ||
733 | int bcount; | ||
734 | int ret; | ||
735 | |||
736 | for (start = jiffies, end = start + secs * HZ, bcount = 0; | ||
737 | time_before(jiffies, end); bcount++) { | ||
738 | ret = do_mult_ahash_op(data, num_mb); | ||
739 | if (ret) | ||
740 | return ret; | ||
741 | } | ||
742 | |||
743 | pr_cont("%d operations in %d seconds (%ld bytes)\n", | ||
744 | bcount * num_mb, secs, (long)bcount * blen * num_mb); | ||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen, | ||
749 | u32 num_mb) | ||
750 | { | ||
751 | unsigned long cycles = 0; | ||
752 | int ret = 0; | ||
753 | int i; | ||
754 | |||
755 | /* Warm-up run. */ | ||
756 | for (i = 0; i < 4; i++) { | ||
757 | ret = do_mult_ahash_op(data, num_mb); | ||
758 | if (ret) | ||
759 | goto out; | ||
760 | } | ||
761 | |||
762 | /* The real thing. */ | ||
763 | for (i = 0; i < 8; i++) { | ||
764 | cycles_t start, end; | ||
765 | |||
766 | start = get_cycles(); | ||
767 | ret = do_mult_ahash_op(data, num_mb); | ||
768 | end = get_cycles(); | ||
769 | |||
770 | if (ret) | ||
771 | goto out; | ||
772 | |||
773 | cycles += end - start; | ||
774 | } | ||
775 | |||
776 | out: | ||
777 | if (ret == 0) | ||
778 | pr_cont("1 operation in %lu cycles (%d bytes)\n", | ||
779 | (cycles + 4) / (8 * num_mb), blen); | ||
780 | |||
781 | return ret; | ||
782 | } | ||
783 | |||
784 | static void test_mb_ahash_speed(const char *algo, unsigned int secs, | ||
785 | struct hash_speed *speed, u32 num_mb) | ||
393 | { | 786 | { |
394 | struct test_mb_ahash_data *data; | 787 | struct test_mb_ahash_data *data; |
395 | struct crypto_ahash *tfm; | 788 | struct crypto_ahash *tfm; |
396 | unsigned long start, end; | ||
397 | unsigned long cycles; | ||
398 | unsigned int i, j, k; | 789 | unsigned int i, j, k; |
399 | int ret; | 790 | int ret; |
400 | 791 | ||
401 | data = kzalloc(sizeof(*data) * 8, GFP_KERNEL); | 792 | data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); |
402 | if (!data) | 793 | if (!data) |
403 | return; | 794 | return; |
404 | 795 | ||
@@ -409,7 +800,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, | |||
409 | goto free_data; | 800 | goto free_data; |
410 | } | 801 | } |
411 | 802 | ||
412 | for (i = 0; i < 8; ++i) { | 803 | for (i = 0; i < num_mb; ++i) { |
413 | if (testmgr_alloc_buf(data[i].xbuf)) | 804 | if (testmgr_alloc_buf(data[i].xbuf)) |
414 | goto out; | 805 | goto out; |
415 | 806 | ||
@@ -424,7 +815,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, | |||
424 | 815 | ||
425 | ahash_request_set_callback(data[i].req, 0, crypto_req_done, | 816 | ahash_request_set_callback(data[i].req, 0, crypto_req_done, |
426 | &data[i].wait); | 817 | &data[i].wait); |
427 | test_hash_sg_init(data[i].sg); | 818 | |
819 | sg_init_table(data[i].sg, XBUFSIZE); | ||
820 | for (j = 0; j < XBUFSIZE; j++) { | ||
821 | sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE); | ||
822 | memset(data[i].xbuf[j], 0xff, PAGE_SIZE); | ||
823 | } | ||
428 | } | 824 | } |
429 | 825 | ||
430 | pr_info("\ntesting speed of multibuffer %s (%s)\n", algo, | 826 | pr_info("\ntesting speed of multibuffer %s (%s)\n", algo, |
@@ -435,16 +831,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, | |||
435 | if (speed[i].blen != speed[i].plen) | 831 | if (speed[i].blen != speed[i].plen) |
436 | continue; | 832 | continue; |
437 | 833 | ||
438 | if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { | 834 | if (speed[i].blen > XBUFSIZE * PAGE_SIZE) { |
439 | pr_err("template (%u) too big for tvmem (%lu)\n", | 835 | pr_err("template (%u) too big for tvmem (%lu)\n", |
440 | speed[i].blen, TVMEMSIZE * PAGE_SIZE); | 836 | speed[i].blen, XBUFSIZE * PAGE_SIZE); |
441 | goto out; | 837 | goto out; |
442 | } | 838 | } |
443 | 839 | ||
444 | if (speed[i].klen) | 840 | if (speed[i].klen) |
445 | crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen); | 841 | crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen); |
446 | 842 | ||
447 | for (k = 0; k < 8; k++) | 843 | for (k = 0; k < num_mb; k++) |
448 | ahash_request_set_crypt(data[k].req, data[k].sg, | 844 | ahash_request_set_crypt(data[k].req, data[k].sg, |
449 | data[k].result, speed[i].blen); | 845 | data[k].result, speed[i].blen); |
450 | 846 | ||
@@ -453,34 +849,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, | |||
453 | i, speed[i].blen, speed[i].plen, | 849 | i, speed[i].blen, speed[i].plen, |
454 | speed[i].blen / speed[i].plen); | 850 | speed[i].blen / speed[i].plen); |
455 | 851 | ||
456 | start = get_cycles(); | 852 | if (secs) |
457 | 853 | ret = test_mb_ahash_jiffies(data, speed[i].blen, secs, | |
458 | for (k = 0; k < 8; k++) { | 854 | num_mb); |
459 | ret = crypto_ahash_digest(data[k].req); | 855 | else |
460 | if (ret == -EINPROGRESS) { | 856 | ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb); |
461 | ret = 0; | ||
462 | continue; | ||
463 | } | ||
464 | |||
465 | if (ret) | ||
466 | break; | ||
467 | |||
468 | crypto_req_done(&data[k].req->base, 0); | ||
469 | } | ||
470 | |||
471 | for (j = 0; j < k; j++) { | ||
472 | struct crypto_wait *wait = &data[j].wait; | ||
473 | int wait_ret; | ||
474 | |||
475 | wait_ret = crypto_wait_req(-EINPROGRESS, wait); | ||
476 | if (wait_ret) | ||
477 | ret = wait_ret; | ||
478 | } | ||
479 | 857 | ||
480 | end = get_cycles(); | ||
481 | cycles = end - start; | ||
482 | pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", | ||
483 | cycles, cycles / (8 * speed[i].blen)); | ||
484 | 858 | ||
485 | if (ret) { | 859 | if (ret) { |
486 | pr_err("At least one hashing failed ret=%d\n", ret); | 860 | pr_err("At least one hashing failed ret=%d\n", ret); |
@@ -489,10 +863,10 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, | |||
489 | } | 863 | } |
490 | 864 | ||
491 | out: | 865 | out: |
492 | for (k = 0; k < 8; ++k) | 866 | for (k = 0; k < num_mb; ++k) |
493 | ahash_request_free(data[k].req); | 867 | ahash_request_free(data[k].req); |
494 | 868 | ||
495 | for (k = 0; k < 8; ++k) | 869 | for (k = 0; k < num_mb; ++k) |
496 | testmgr_free_buf(data[k].xbuf); | 870 | testmgr_free_buf(data[k].xbuf); |
497 | 871 | ||
498 | crypto_free_ahash(tfm); | 872 | crypto_free_ahash(tfm); |
@@ -736,6 +1110,254 @@ static void test_hash_speed(const char *algo, unsigned int secs, | |||
736 | return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC); | 1110 | return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC); |
737 | } | 1111 | } |
738 | 1112 | ||
1113 | struct test_mb_skcipher_data { | ||
1114 | struct scatterlist sg[XBUFSIZE]; | ||
1115 | struct skcipher_request *req; | ||
1116 | struct crypto_wait wait; | ||
1117 | char *xbuf[XBUFSIZE]; | ||
1118 | }; | ||
1119 | |||
1120 | static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc, | ||
1121 | u32 num_mb) | ||
1122 | { | ||
1123 | int i, rc[num_mb], err = 0; | ||
1124 | |||
1125 | /* Fire up a bunch of concurrent requests */ | ||
1126 | for (i = 0; i < num_mb; i++) { | ||
1127 | if (enc == ENCRYPT) | ||
1128 | rc[i] = crypto_skcipher_encrypt(data[i].req); | ||
1129 | else | ||
1130 | rc[i] = crypto_skcipher_decrypt(data[i].req); | ||
1131 | } | ||
1132 | |||
1133 | /* Wait for all requests to finish */ | ||
1134 | for (i = 0; i < num_mb; i++) { | ||
1135 | rc[i] = crypto_wait_req(rc[i], &data[i].wait); | ||
1136 | |||
1137 | if (rc[i]) { | ||
1138 | pr_info("concurrent request %d error %d\n", i, rc[i]); | ||
1139 | err = rc[i]; | ||
1140 | } | ||
1141 | } | ||
1142 | |||
1143 | return err; | ||
1144 | } | ||
1145 | |||
1146 | static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc, | ||
1147 | int blen, int secs, u32 num_mb) | ||
1148 | { | ||
1149 | unsigned long start, end; | ||
1150 | int bcount; | ||
1151 | int ret; | ||
1152 | |||
1153 | for (start = jiffies, end = start + secs * HZ, bcount = 0; | ||
1154 | time_before(jiffies, end); bcount++) { | ||
1155 | ret = do_mult_acipher_op(data, enc, num_mb); | ||
1156 | if (ret) | ||
1157 | return ret; | ||
1158 | } | ||
1159 | |||
1160 | pr_cont("%d operations in %d seconds (%ld bytes)\n", | ||
1161 | bcount * num_mb, secs, (long)bcount * blen * num_mb); | ||
1162 | return 0; | ||
1163 | } | ||
1164 | |||
1165 | static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc, | ||
1166 | int blen, u32 num_mb) | ||
1167 | { | ||
1168 | unsigned long cycles = 0; | ||
1169 | int ret = 0; | ||
1170 | int i; | ||
1171 | |||
1172 | /* Warm-up run. */ | ||
1173 | for (i = 0; i < 4; i++) { | ||
1174 | ret = do_mult_acipher_op(data, enc, num_mb); | ||
1175 | if (ret) | ||
1176 | goto out; | ||
1177 | } | ||
1178 | |||
1179 | /* The real thing. */ | ||
1180 | for (i = 0; i < 8; i++) { | ||
1181 | cycles_t start, end; | ||
1182 | |||
1183 | start = get_cycles(); | ||
1184 | ret = do_mult_acipher_op(data, enc, num_mb); | ||
1185 | end = get_cycles(); | ||
1186 | |||
1187 | if (ret) | ||
1188 | goto out; | ||
1189 | |||
1190 | cycles += end - start; | ||
1191 | } | ||
1192 | |||
1193 | out: | ||
1194 | if (ret == 0) | ||
1195 | pr_cont("1 operation in %lu cycles (%d bytes)\n", | ||
1196 | (cycles + 4) / (8 * num_mb), blen); | ||
1197 | |||
1198 | return ret; | ||
1199 | } | ||
1200 | |||
1201 | static void test_mb_skcipher_speed(const char *algo, int enc, int secs, | ||
1202 | struct cipher_speed_template *template, | ||
1203 | unsigned int tcount, u8 *keysize, u32 num_mb) | ||
1204 | { | ||
1205 | struct test_mb_skcipher_data *data; | ||
1206 | struct crypto_skcipher *tfm; | ||
1207 | unsigned int i, j, iv_len; | ||
1208 | const char *key; | ||
1209 | const char *e; | ||
1210 | u32 *b_size; | ||
1211 | char iv[128]; | ||
1212 | int ret; | ||
1213 | |||
1214 | if (enc == ENCRYPT) | ||
1215 | e = "encryption"; | ||
1216 | else | ||
1217 | e = "decryption"; | ||
1218 | |||
1219 | data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); | ||
1220 | if (!data) | ||
1221 | return; | ||
1222 | |||
1223 | tfm = crypto_alloc_skcipher(algo, 0, 0); | ||
1224 | if (IS_ERR(tfm)) { | ||
1225 | pr_err("failed to load transform for %s: %ld\n", | ||
1226 | algo, PTR_ERR(tfm)); | ||
1227 | goto out_free_data; | ||
1228 | } | ||
1229 | |||
1230 | for (i = 0; i < num_mb; ++i) | ||
1231 | if (testmgr_alloc_buf(data[i].xbuf)) { | ||
1232 | while (i--) | ||
1233 | testmgr_free_buf(data[i].xbuf); | ||
1234 | goto out_free_tfm; | ||
1235 | } | ||
1236 | |||
1237 | |||
1238 | for (i = 0; i < num_mb; ++i) | ||
1239 | if (testmgr_alloc_buf(data[i].xbuf)) { | ||
1240 | while (i--) | ||
1241 | testmgr_free_buf(data[i].xbuf); | ||
1242 | goto out_free_tfm; | ||
1243 | } | ||
1244 | |||
1245 | |||
1246 | for (i = 0; i < num_mb; ++i) { | ||
1247 | data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); | ||
1248 | if (!data[i].req) { | ||
1249 | pr_err("alg: skcipher: Failed to allocate request for %s\n", | ||
1250 | algo); | ||
1251 | while (i--) | ||
1252 | skcipher_request_free(data[i].req); | ||
1253 | goto out_free_xbuf; | ||
1254 | } | ||
1255 | } | ||
1256 | |||
1257 | for (i = 0; i < num_mb; ++i) { | ||
1258 | skcipher_request_set_callback(data[i].req, | ||
1259 | CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
1260 | crypto_req_done, &data[i].wait); | ||
1261 | crypto_init_wait(&data[i].wait); | ||
1262 | } | ||
1263 | |||
1264 | pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo, | ||
1265 | get_driver_name(crypto_skcipher, tfm), e); | ||
1266 | |||
1267 | i = 0; | ||
1268 | do { | ||
1269 | b_size = block_sizes; | ||
1270 | do { | ||
1271 | if (*b_size > XBUFSIZE * PAGE_SIZE) { | ||
1272 | pr_err("template (%u) too big for buffer (%lu)\n", | ||
1273 | *b_size, XBUFSIZE * PAGE_SIZE); | ||
1274 | goto out; | ||
1275 | } | ||
1276 | |||
1277 | pr_info("test %u (%d bit key, %d byte blocks): ", i, | ||
1278 | *keysize * 8, *b_size); | ||
1279 | |||
1280 | /* Set up tfm global state, i.e. the key */ | ||
1281 | |||
1282 | memset(tvmem[0], 0xff, PAGE_SIZE); | ||
1283 | key = tvmem[0]; | ||
1284 | for (j = 0; j < tcount; j++) { | ||
1285 | if (template[j].klen == *keysize) { | ||
1286 | key = template[j].key; | ||
1287 | break; | ||
1288 | } | ||
1289 | } | ||
1290 | |||
1291 | crypto_skcipher_clear_flags(tfm, ~0); | ||
1292 | |||
1293 | ret = crypto_skcipher_setkey(tfm, key, *keysize); | ||
1294 | if (ret) { | ||
1295 | pr_err("setkey() failed flags=%x\n", | ||
1296 | crypto_skcipher_get_flags(tfm)); | ||
1297 | goto out; | ||
1298 | } | ||
1299 | |||
1300 | iv_len = crypto_skcipher_ivsize(tfm); | ||
1301 | if (iv_len) | ||
1302 | memset(&iv, 0xff, iv_len); | ||
1303 | |||
1304 | /* Now setup per request stuff, i.e. buffers */ | ||
1305 | |||
1306 | for (j = 0; j < num_mb; ++j) { | ||
1307 | struct test_mb_skcipher_data *cur = &data[j]; | ||
1308 | unsigned int k = *b_size; | ||
1309 | unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE); | ||
1310 | unsigned int p = 0; | ||
1311 | |||
1312 | sg_init_table(cur->sg, pages); | ||
1313 | |||
1314 | while (k > PAGE_SIZE) { | ||
1315 | sg_set_buf(cur->sg + p, cur->xbuf[p], | ||
1316 | PAGE_SIZE); | ||
1317 | memset(cur->xbuf[p], 0xff, PAGE_SIZE); | ||
1318 | p++; | ||
1319 | k -= PAGE_SIZE; | ||
1320 | } | ||
1321 | |||
1322 | sg_set_buf(cur->sg + p, cur->xbuf[p], k); | ||
1323 | memset(cur->xbuf[p], 0xff, k); | ||
1324 | |||
1325 | skcipher_request_set_crypt(cur->req, cur->sg, | ||
1326 | cur->sg, *b_size, | ||
1327 | iv); | ||
1328 | } | ||
1329 | |||
1330 | if (secs) | ||
1331 | ret = test_mb_acipher_jiffies(data, enc, | ||
1332 | *b_size, secs, | ||
1333 | num_mb); | ||
1334 | else | ||
1335 | ret = test_mb_acipher_cycles(data, enc, | ||
1336 | *b_size, num_mb); | ||
1337 | |||
1338 | if (ret) { | ||
1339 | pr_err("%s() failed flags=%x\n", e, | ||
1340 | crypto_skcipher_get_flags(tfm)); | ||
1341 | break; | ||
1342 | } | ||
1343 | b_size++; | ||
1344 | i++; | ||
1345 | } while (*b_size); | ||
1346 | keysize++; | ||
1347 | } while (*keysize); | ||
1348 | |||
1349 | out: | ||
1350 | for (i = 0; i < num_mb; ++i) | ||
1351 | skcipher_request_free(data[i].req); | ||
1352 | out_free_xbuf: | ||
1353 | for (i = 0; i < num_mb; ++i) | ||
1354 | testmgr_free_buf(data[i].xbuf); | ||
1355 | out_free_tfm: | ||
1356 | crypto_free_skcipher(tfm); | ||
1357 | out_free_data: | ||
1358 | kfree(data); | ||
1359 | } | ||
1360 | |||
739 | static inline int do_one_acipher_op(struct skcipher_request *req, int ret) | 1361 | static inline int do_one_acipher_op(struct skcipher_request *req, int ret) |
740 | { | 1362 | { |
741 | struct crypto_wait *wait = req->base.data; | 1363 | struct crypto_wait *wait = req->base.data; |
@@ -1557,16 +2179,24 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
1557 | NULL, 0, 16, 16, aead_speed_template_20); | 2179 | NULL, 0, 16, 16, aead_speed_template_20); |
1558 | test_aead_speed("gcm(aes)", ENCRYPT, sec, | 2180 | test_aead_speed("gcm(aes)", ENCRYPT, sec, |
1559 | NULL, 0, 16, 8, speed_template_16_24_32); | 2181 | NULL, 0, 16, 8, speed_template_16_24_32); |
2182 | test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, | ||
2183 | NULL, 0, 16, 16, aead_speed_template_20); | ||
2184 | test_aead_speed("gcm(aes)", DECRYPT, sec, | ||
2185 | NULL, 0, 16, 8, speed_template_16_24_32); | ||
1560 | break; | 2186 | break; |
1561 | 2187 | ||
1562 | case 212: | 2188 | case 212: |
1563 | test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, | 2189 | test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, |
1564 | NULL, 0, 16, 16, aead_speed_template_19); | 2190 | NULL, 0, 16, 16, aead_speed_template_19); |
2191 | test_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, | ||
2192 | NULL, 0, 16, 16, aead_speed_template_19); | ||
1565 | break; | 2193 | break; |
1566 | 2194 | ||
1567 | case 213: | 2195 | case 213: |
1568 | test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec, | 2196 | test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec, |
1569 | NULL, 0, 16, 8, aead_speed_template_36); | 2197 | NULL, 0, 16, 8, aead_speed_template_36); |
2198 | test_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, sec, | ||
2199 | NULL, 0, 16, 8, aead_speed_template_36); | ||
1570 | break; | 2200 | break; |
1571 | 2201 | ||
1572 | case 214: | 2202 | case 214: |
@@ -1574,6 +2204,33 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
1574 | speed_template_32); | 2204 | speed_template_32); |
1575 | break; | 2205 | break; |
1576 | 2206 | ||
2207 | case 215: | ||
2208 | test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL, | ||
2209 | 0, 16, 16, aead_speed_template_20, num_mb); | ||
2210 | test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8, | ||
2211 | speed_template_16_24_32, num_mb); | ||
2212 | test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL, | ||
2213 | 0, 16, 16, aead_speed_template_20, num_mb); | ||
2214 | test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8, | ||
2215 | speed_template_16_24_32, num_mb); | ||
2216 | break; | ||
2217 | |||
2218 | case 216: | ||
2219 | test_mb_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0, | ||
2220 | 16, 16, aead_speed_template_19, num_mb); | ||
2221 | test_mb_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, NULL, 0, | ||
2222 | 16, 16, aead_speed_template_19, num_mb); | ||
2223 | break; | ||
2224 | |||
2225 | case 217: | ||
2226 | test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, | ||
2227 | sec, NULL, 0, 16, 8, aead_speed_template_36, | ||
2228 | num_mb); | ||
2229 | test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, | ||
2230 | sec, NULL, 0, 16, 8, aead_speed_template_36, | ||
2231 | num_mb); | ||
2232 | break; | ||
2233 | |||
1577 | case 300: | 2234 | case 300: |
1578 | if (alg) { | 2235 | if (alg) { |
1579 | test_hash_speed(alg, sec, generic_hash_speed_template); | 2236 | test_hash_speed(alg, sec, generic_hash_speed_template); |
@@ -1778,19 +2435,23 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
1778 | if (mode > 400 && mode < 500) break; | 2435 | if (mode > 400 && mode < 500) break; |
1779 | /* fall through */ | 2436 | /* fall through */ |
1780 | case 422: | 2437 | case 422: |
1781 | test_mb_ahash_speed("sha1", sec, generic_hash_speed_template); | 2438 | test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, |
2439 | num_mb); | ||
1782 | if (mode > 400 && mode < 500) break; | 2440 | if (mode > 400 && mode < 500) break; |
1783 | /* fall through */ | 2441 | /* fall through */ |
1784 | case 423: | 2442 | case 423: |
1785 | test_mb_ahash_speed("sha256", sec, generic_hash_speed_template); | 2443 | test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, |
2444 | num_mb); | ||
1786 | if (mode > 400 && mode < 500) break; | 2445 | if (mode > 400 && mode < 500) break; |
1787 | /* fall through */ | 2446 | /* fall through */ |
1788 | case 424: | 2447 | case 424: |
1789 | test_mb_ahash_speed("sha512", sec, generic_hash_speed_template); | 2448 | test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, |
2449 | num_mb); | ||
1790 | if (mode > 400 && mode < 500) break; | 2450 | if (mode > 400 && mode < 500) break; |
1791 | /* fall through */ | 2451 | /* fall through */ |
1792 | case 425: | 2452 | case 425: |
1793 | test_mb_ahash_speed("sm3", sec, generic_hash_speed_template); | 2453 | test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, |
2454 | num_mb); | ||
1794 | if (mode > 400 && mode < 500) break; | 2455 | if (mode > 400 && mode < 500) break; |
1795 | /* fall through */ | 2456 | /* fall through */ |
1796 | case 499: | 2457 | case 499: |
@@ -2008,6 +2669,218 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
2008 | speed_template_8_32); | 2669 | speed_template_8_32); |
2009 | break; | 2670 | break; |
2010 | 2671 | ||
2672 | case 600: | ||
2673 | test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, | ||
2674 | speed_template_16_24_32, num_mb); | ||
2675 | test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0, | ||
2676 | speed_template_16_24_32, num_mb); | ||
2677 | test_mb_skcipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0, | ||
2678 | speed_template_16_24_32, num_mb); | ||
2679 | test_mb_skcipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0, | ||
2680 | speed_template_16_24_32, num_mb); | ||
2681 | test_mb_skcipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0, | ||
2682 | speed_template_32_40_48, num_mb); | ||
2683 | test_mb_skcipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0, | ||
2684 | speed_template_32_40_48, num_mb); | ||
2685 | test_mb_skcipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0, | ||
2686 | speed_template_32_64, num_mb); | ||
2687 | test_mb_skcipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, | ||
2688 | speed_template_32_64, num_mb); | ||
2689 | test_mb_skcipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, | ||
2690 | speed_template_16_24_32, num_mb); | ||
2691 | test_mb_skcipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, | ||
2692 | speed_template_16_24_32, num_mb); | ||
2693 | test_mb_skcipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0, | ||
2694 | speed_template_16_24_32, num_mb); | ||
2695 | test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, | ||
2696 | speed_template_16_24_32, num_mb); | ||
2697 | test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0, | ||
2698 | speed_template_16_24_32, num_mb); | ||
2699 | test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0, | ||
2700 | speed_template_16_24_32, num_mb); | ||
2701 | test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0, | ||
2702 | speed_template_16_24_32, num_mb); | ||
2703 | test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0, | ||
2704 | speed_template_16_24_32, num_mb); | ||
2705 | test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, | ||
2706 | 0, speed_template_20_28_36, num_mb); | ||
2707 | test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, | ||
2708 | 0, speed_template_20_28_36, num_mb); | ||
2709 | break; | ||
2710 | |||
2711 | case 601: | ||
2712 | test_mb_skcipher_speed("ecb(des3_ede)", ENCRYPT, sec, | ||
2713 | des3_speed_template, DES3_SPEED_VECTORS, | ||
2714 | speed_template_24, num_mb); | ||
2715 | test_mb_skcipher_speed("ecb(des3_ede)", DECRYPT, sec, | ||
2716 | des3_speed_template, DES3_SPEED_VECTORS, | ||
2717 | speed_template_24, num_mb); | ||
2718 | test_mb_skcipher_speed("cbc(des3_ede)", ENCRYPT, sec, | ||
2719 | des3_speed_template, DES3_SPEED_VECTORS, | ||
2720 | speed_template_24, num_mb); | ||
2721 | test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec, | ||
2722 | des3_speed_template, DES3_SPEED_VECTORS, | ||
2723 | speed_template_24, num_mb); | ||
2724 | test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec, | ||
2725 | des3_speed_template, DES3_SPEED_VECTORS, | ||
2726 | speed_template_24, num_mb); | ||
2727 | test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec, | ||
2728 | des3_speed_template, DES3_SPEED_VECTORS, | ||
2729 | speed_template_24, num_mb); | ||
2730 | test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec, | ||
2731 | des3_speed_template, DES3_SPEED_VECTORS, | ||
2732 | speed_template_24, num_mb); | ||
2733 | test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec, | ||
2734 | des3_speed_template, DES3_SPEED_VECTORS, | ||
2735 | speed_template_24, num_mb); | ||
2736 | break; | ||
2737 | |||
2738 | case 602: | ||
2739 | test_mb_skcipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0, | ||
2740 | speed_template_8, num_mb); | ||
2741 | test_mb_skcipher_speed("ecb(des)", DECRYPT, sec, NULL, 0, | ||
2742 | speed_template_8, num_mb); | ||
2743 | test_mb_skcipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0, | ||
2744 | speed_template_8, num_mb); | ||
2745 | test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0, | ||
2746 | speed_template_8, num_mb); | ||
2747 | test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0, | ||
2748 | speed_template_8, num_mb); | ||
2749 | test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0, | ||
2750 | speed_template_8, num_mb); | ||
2751 | test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0, | ||
2752 | speed_template_8, num_mb); | ||
2753 | test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0, | ||
2754 | speed_template_8, num_mb); | ||
2755 | break; | ||
2756 | |||
2757 | case 603: | ||
2758 | test_mb_skcipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0, | ||
2759 | speed_template_16_32, num_mb); | ||
2760 | test_mb_skcipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0, | ||
2761 | speed_template_16_32, num_mb); | ||
2762 | test_mb_skcipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0, | ||
2763 | speed_template_16_32, num_mb); | ||
2764 | test_mb_skcipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0, | ||
2765 | speed_template_16_32, num_mb); | ||
2766 | test_mb_skcipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0, | ||
2767 | speed_template_16_32, num_mb); | ||
2768 | test_mb_skcipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0, | ||
2769 | speed_template_16_32, num_mb); | ||
2770 | test_mb_skcipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0, | ||
2771 | speed_template_32_48, num_mb); | ||
2772 | test_mb_skcipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0, | ||
2773 | speed_template_32_48, num_mb); | ||
2774 | test_mb_skcipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0, | ||
2775 | speed_template_32_64, num_mb); | ||
2776 | test_mb_skcipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0, | ||
2777 | speed_template_32_64, num_mb); | ||
2778 | break; | ||
2779 | |||
2780 | case 604: | ||
2781 | test_mb_skcipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0, | ||
2782 | speed_template_16_24_32, num_mb); | ||
2783 | test_mb_skcipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0, | ||
2784 | speed_template_16_24_32, num_mb); | ||
2785 | test_mb_skcipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0, | ||
2786 | speed_template_16_24_32, num_mb); | ||
2787 | test_mb_skcipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0, | ||
2788 | speed_template_16_24_32, num_mb); | ||
2789 | test_mb_skcipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0, | ||
2790 | speed_template_16_24_32, num_mb); | ||
2791 | test_mb_skcipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0, | ||
2792 | speed_template_16_24_32, num_mb); | ||
2793 | test_mb_skcipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0, | ||
2794 | speed_template_32_40_48, num_mb); | ||
2795 | test_mb_skcipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0, | ||
2796 | speed_template_32_40_48, num_mb); | ||
2797 | test_mb_skcipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0, | ||
2798 | speed_template_32_48_64, num_mb); | ||
2799 | test_mb_skcipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0, | ||
2800 | speed_template_32_48_64, num_mb); | ||
2801 | break; | ||
2802 | |||
2803 | case 605: | ||
2804 | test_mb_skcipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0, | ||
2805 | speed_template_8, num_mb); | ||
2806 | break; | ||
2807 | |||
2808 | case 606: | ||
2809 | test_mb_skcipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0, | ||
2810 | speed_template_8_16, num_mb); | ||
2811 | test_mb_skcipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0, | ||
2812 | speed_template_8_16, num_mb); | ||
2813 | test_mb_skcipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0, | ||
2814 | speed_template_8_16, num_mb); | ||
2815 | test_mb_skcipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0, | ||
2816 | speed_template_8_16, num_mb); | ||
2817 | test_mb_skcipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0, | ||
2818 | speed_template_8_16, num_mb); | ||
2819 | test_mb_skcipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0, | ||
2820 | speed_template_8_16, num_mb); | ||
2821 | break; | ||
2822 | |||
2823 | case 607: | ||
2824 | test_mb_skcipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0, | ||
2825 | speed_template_16_32, num_mb); | ||
2826 | test_mb_skcipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0, | ||
2827 | speed_template_16_32, num_mb); | ||
2828 | test_mb_skcipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0, | ||
2829 | speed_template_16_32, num_mb); | ||
2830 | test_mb_skcipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0, | ||
2831 | speed_template_16_32, num_mb); | ||
2832 | test_mb_skcipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0, | ||
2833 | speed_template_16_32, num_mb); | ||
2834 | test_mb_skcipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0, | ||
2835 | speed_template_16_32, num_mb); | ||
2836 | test_mb_skcipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0, | ||
2837 | speed_template_32_48, num_mb); | ||
2838 | test_mb_skcipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0, | ||
2839 | speed_template_32_48, num_mb); | ||
2840 | test_mb_skcipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0, | ||
2841 | speed_template_32_64, num_mb); | ||
2842 | test_mb_skcipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0, | ||
2843 | speed_template_32_64, num_mb); | ||
2844 | break; | ||
2845 | |||
2846 | case 608: | ||
2847 | test_mb_skcipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0, | ||
2848 | speed_template_16_32, num_mb); | ||
2849 | test_mb_skcipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0, | ||
2850 | speed_template_16_32, num_mb); | ||
2851 | test_mb_skcipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0, | ||
2852 | speed_template_16_32, num_mb); | ||
2853 | test_mb_skcipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0, | ||
2854 | speed_template_16_32, num_mb); | ||
2855 | test_mb_skcipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0, | ||
2856 | speed_template_16_32, num_mb); | ||
2857 | test_mb_skcipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0, | ||
2858 | speed_template_16_32, num_mb); | ||
2859 | test_mb_skcipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0, | ||
2860 | speed_template_32_48, num_mb); | ||
2861 | test_mb_skcipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0, | ||
2862 | speed_template_32_48, num_mb); | ||
2863 | test_mb_skcipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0, | ||
2864 | speed_template_32_64, num_mb); | ||
2865 | test_mb_skcipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0, | ||
2866 | speed_template_32_64, num_mb); | ||
2867 | break; | ||
2868 | |||
2869 | case 609: | ||
2870 | test_mb_skcipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0, | ||
2871 | speed_template_8_32, num_mb); | ||
2872 | test_mb_skcipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0, | ||
2873 | speed_template_8_32, num_mb); | ||
2874 | test_mb_skcipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0, | ||
2875 | speed_template_8_32, num_mb); | ||
2876 | test_mb_skcipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0, | ||
2877 | speed_template_8_32, num_mb); | ||
2878 | test_mb_skcipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0, | ||
2879 | speed_template_8_32, num_mb); | ||
2880 | test_mb_skcipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0, | ||
2881 | speed_template_8_32, num_mb); | ||
2882 | break; | ||
2883 | |||
2011 | case 1000: | 2884 | case 1000: |
2012 | test_available(); | 2885 | test_available(); |
2013 | break; | 2886 | break; |
@@ -2069,6 +2942,8 @@ module_param(mode, int, 0); | |||
2069 | module_param(sec, uint, 0); | 2942 | module_param(sec, uint, 0); |
2070 | MODULE_PARM_DESC(sec, "Length in seconds of speed tests " | 2943 | MODULE_PARM_DESC(sec, "Length in seconds of speed tests " |
2071 | "(defaults to zero which uses CPU cycles instead)"); | 2944 | "(defaults to zero which uses CPU cycles instead)"); |
2945 | module_param(num_mb, uint, 0000); | ||
2946 | MODULE_PARM_DESC(num_mb, "Number of concurrent requests to be used in mb speed tests (defaults to 8)"); | ||
2072 | 2947 | ||
2073 | MODULE_LICENSE("GPL"); | 2948 | MODULE_LICENSE("GPL"); |
2074 | MODULE_DESCRIPTION("Quick & dirty crypto testing module"); | 2949 | MODULE_DESCRIPTION("Quick & dirty crypto testing module"); |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 29d7020b8826..d5e23a142a04 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -177,6 +177,18 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) | |||
177 | free_page((unsigned long)buf[i]); | 177 | free_page((unsigned long)buf[i]); |
178 | } | 178 | } |
179 | 179 | ||
180 | static int ahash_guard_result(char *result, char c, int size) | ||
181 | { | ||
182 | int i; | ||
183 | |||
184 | for (i = 0; i < size; i++) { | ||
185 | if (result[i] != c) | ||
186 | return -EINVAL; | ||
187 | } | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
180 | static int ahash_partial_update(struct ahash_request **preq, | 192 | static int ahash_partial_update(struct ahash_request **preq, |
181 | struct crypto_ahash *tfm, const struct hash_testvec *template, | 193 | struct crypto_ahash *tfm, const struct hash_testvec *template, |
182 | void *hash_buff, int k, int temp, struct scatterlist *sg, | 194 | void *hash_buff, int k, int temp, struct scatterlist *sg, |
@@ -185,7 +197,8 @@ static int ahash_partial_update(struct ahash_request **preq, | |||
185 | char *state; | 197 | char *state; |
186 | struct ahash_request *req; | 198 | struct ahash_request *req; |
187 | int statesize, ret = -EINVAL; | 199 | int statesize, ret = -EINVAL; |
188 | const char guard[] = { 0x00, 0xba, 0xad, 0x00 }; | 200 | static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 }; |
201 | int digestsize = crypto_ahash_digestsize(tfm); | ||
189 | 202 | ||
190 | req = *preq; | 203 | req = *preq; |
191 | statesize = crypto_ahash_statesize( | 204 | statesize = crypto_ahash_statesize( |
@@ -196,12 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq, | |||
196 | goto out_nostate; | 209 | goto out_nostate; |
197 | } | 210 | } |
198 | memcpy(state + statesize, guard, sizeof(guard)); | 211 | memcpy(state + statesize, guard, sizeof(guard)); |
212 | memset(result, 1, digestsize); | ||
199 | ret = crypto_ahash_export(req, state); | 213 | ret = crypto_ahash_export(req, state); |
200 | WARN_ON(memcmp(state + statesize, guard, sizeof(guard))); | 214 | WARN_ON(memcmp(state + statesize, guard, sizeof(guard))); |
201 | if (ret) { | 215 | if (ret) { |
202 | pr_err("alg: hash: Failed to export() for %s\n", algo); | 216 | pr_err("alg: hash: Failed to export() for %s\n", algo); |
203 | goto out; | 217 | goto out; |
204 | } | 218 | } |
219 | ret = ahash_guard_result(result, 1, digestsize); | ||
220 | if (ret) { | ||
221 | pr_err("alg: hash: Failed, export used req->result for %s\n", | ||
222 | algo); | ||
223 | goto out; | ||
224 | } | ||
205 | ahash_request_free(req); | 225 | ahash_request_free(req); |
206 | req = ahash_request_alloc(tfm, GFP_KERNEL); | 226 | req = ahash_request_alloc(tfm, GFP_KERNEL); |
207 | if (!req) { | 227 | if (!req) { |
@@ -221,6 +241,12 @@ static int ahash_partial_update(struct ahash_request **preq, | |||
221 | pr_err("alg: hash: Failed to import() for %s\n", algo); | 241 | pr_err("alg: hash: Failed to import() for %s\n", algo); |
222 | goto out; | 242 | goto out; |
223 | } | 243 | } |
244 | ret = ahash_guard_result(result, 1, digestsize); | ||
245 | if (ret) { | ||
246 | pr_err("alg: hash: Failed, import used req->result for %s\n", | ||
247 | algo); | ||
248 | goto out; | ||
249 | } | ||
224 | ret = crypto_wait_req(crypto_ahash_update(req), wait); | 250 | ret = crypto_wait_req(crypto_ahash_update(req), wait); |
225 | if (ret) | 251 | if (ret) |
226 | goto out; | 252 | goto out; |
@@ -316,18 +342,31 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
316 | goto out; | 342 | goto out; |
317 | } | 343 | } |
318 | } else { | 344 | } else { |
345 | memset(result, 1, digest_size); | ||
319 | ret = crypto_wait_req(crypto_ahash_init(req), &wait); | 346 | ret = crypto_wait_req(crypto_ahash_init(req), &wait); |
320 | if (ret) { | 347 | if (ret) { |
321 | pr_err("alg: hash: init failed on test %d " | 348 | pr_err("alg: hash: init failed on test %d " |
322 | "for %s: ret=%d\n", j, algo, -ret); | 349 | "for %s: ret=%d\n", j, algo, -ret); |
323 | goto out; | 350 | goto out; |
324 | } | 351 | } |
352 | ret = ahash_guard_result(result, 1, digest_size); | ||
353 | if (ret) { | ||
354 | pr_err("alg: hash: init failed on test %d " | ||
355 | "for %s: used req->result\n", j, algo); | ||
356 | goto out; | ||
357 | } | ||
325 | ret = crypto_wait_req(crypto_ahash_update(req), &wait); | 358 | ret = crypto_wait_req(crypto_ahash_update(req), &wait); |
326 | if (ret) { | 359 | if (ret) { |
327 | pr_err("alg: hash: update failed on test %d " | 360 | pr_err("alg: hash: update failed on test %d " |
328 | "for %s: ret=%d\n", j, algo, -ret); | 361 | "for %s: ret=%d\n", j, algo, -ret); |
329 | goto out; | 362 | goto out; |
330 | } | 363 | } |
364 | ret = ahash_guard_result(result, 1, digest_size); | ||
365 | if (ret) { | ||
366 | pr_err("alg: hash: update failed on test %d " | ||
367 | "for %s: used req->result\n", j, algo); | ||
368 | goto out; | ||
369 | } | ||
331 | ret = crypto_wait_req(crypto_ahash_final(req), &wait); | 370 | ret = crypto_wait_req(crypto_ahash_final(req), &wait); |
332 | if (ret) { | 371 | if (ret) { |
333 | pr_err("alg: hash: final failed on test %d " | 372 | pr_err("alg: hash: final failed on test %d " |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index a714b6293959..6044f6906bd6 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -1052,6 +1052,142 @@ static const struct hash_testvec sha3_224_tv_template[] = { | |||
1052 | "\xc9\xfd\x55\x74\x49\x44\x79\xba" | 1052 | "\xc9\xfd\x55\x74\x49\x44\x79\xba" |
1053 | "\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea" | 1053 | "\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea" |
1054 | "\xd0\xfc\xce\x33", | 1054 | "\xd0\xfc\xce\x33", |
1055 | .np = 2, | ||
1056 | .tap = { 28, 28 }, | ||
1057 | }, { | ||
1058 | .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" | ||
1059 | "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" | ||
1060 | "\xec\x60\xf7\x8e\x02\x99\x30\xc7" | ||
1061 | "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" | ||
1062 | "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" | ||
1063 | "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" | ||
1064 | "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" | ||
1065 | "\x03\x77\x0e\xa5\x19\xb0\x47\xde" | ||
1066 | "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" | ||
1067 | "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" | ||
1068 | "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" | ||
1069 | "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" | ||
1070 | "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" | ||
1071 | "\x69\x00\x97\x0b\xa2\x39\xd0\x44" | ||
1072 | "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" | ||
1073 | "\x4d\xe4\x58\xef\x86\x1d\x91\x28" | ||
1074 | "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" | ||
1075 | "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" | ||
1076 | "\x80\x17\xae\x22\xb9\x50\xe7\x5b" | ||
1077 | "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" | ||
1078 | "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" | ||
1079 | "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" | ||
1080 | "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" | ||
1081 | "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" | ||
1082 | "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" | ||
1083 | "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" | ||
1084 | "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" | ||
1085 | "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" | ||
1086 | "\xae\x45\xdc\x50\xe7\x7e\x15\x89" | ||
1087 | "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" | ||
1088 | "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" | ||
1089 | "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" | ||
1090 | "\x53\xea\x81\x18\x8c\x23\xba\x2e" | ||
1091 | "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" | ||
1092 | "\x37\xce\x42\xd9\x70\x07\x7b\x12" | ||
1093 | "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" | ||
1094 | "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" | ||
1095 | "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" | ||
1096 | "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" | ||
1097 | "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" | ||
1098 | "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" | ||
1099 | "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" | ||
1100 | "\x81\x18\xaf\x23\xba\x51\xe8\x5c" | ||
1101 | "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" | ||
1102 | "\x65\xfc\x70\x07\x9e\x12\xa9\x40" | ||
1103 | "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" | ||
1104 | "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" | ||
1105 | "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" | ||
1106 | "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" | ||
1107 | "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" | ||
1108 | "\xee\x62\xf9\x90\x04\x9b\x32\xc9" | ||
1109 | "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" | ||
1110 | "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" | ||
1111 | "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" | ||
1112 | "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" | ||
1113 | "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" | ||
1114 | "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" | ||
1115 | "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" | ||
1116 | "\x38\xcf\x43\xda\x71\x08\x7c\x13" | ||
1117 | "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" | ||
1118 | "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" | ||
1119 | "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" | ||
1120 | "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" | ||
1121 | "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" | ||
1122 | "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" | ||
1123 | "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" | ||
1124 | "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" | ||
1125 | "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" | ||
1126 | "\x66\xfd\x71\x08\x9f\x13\xaa\x41" | ||
1127 | "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" | ||
1128 | "\x27\xbe\x55\xec\x60\xf7\x8e\x02" | ||
1129 | "\x99\x30\xc7\x3b\xd2\x69\x00\x74" | ||
1130 | "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" | ||
1131 | "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" | ||
1132 | "\xef\x63\xfa\x91\x05\x9c\x33\xca" | ||
1133 | "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" | ||
1134 | "\xb0\x47\xde\x52\xe9\x80\x17\x8b" | ||
1135 | "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" | ||
1136 | "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" | ||
1137 | "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" | ||
1138 | "\x55\xec\x83\x1a\x8e\x25\xbc\x30" | ||
1139 | "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" | ||
1140 | "\x39\xd0\x44\xdb\x72\x09\x7d\x14" | ||
1141 | "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" | ||
1142 | "\x1d\x91\x28\xbf\x33\xca\x61\xf8" | ||
1143 | "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" | ||
1144 | "\xde\x75\x0c\x80\x17\xae\x22\xb9" | ||
1145 | "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" | ||
1146 | "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" | ||
1147 | "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" | ||
1148 | "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" | ||
1149 | "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" | ||
1150 | "\x67\xfe\x72\x09\xa0\x14\xab\x42" | ||
1151 | "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" | ||
1152 | "\x28\xbf\x56\xed\x61\xf8\x8f\x03" | ||
1153 | "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" | ||
1154 | "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" | ||
1155 | "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" | ||
1156 | "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" | ||
1157 | "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" | ||
1158 | "\xb1\x48\xdf\x53\xea\x81\x18\x8c" | ||
1159 | "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" | ||
1160 | "\x95\x09\xa0\x37\xce\x42\xd9\x70" | ||
1161 | "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" | ||
1162 | "\x56\xed\x84\x1b\x8f\x26\xbd\x31" | ||
1163 | "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" | ||
1164 | "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" | ||
1165 | "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" | ||
1166 | "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" | ||
1167 | "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" | ||
1168 | "\xdf\x76\x0d\x81\x18\xaf\x23\xba" | ||
1169 | "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" | ||
1170 | "\xc3\x37\xce\x65\xfc\x70\x07\x9e" | ||
1171 | "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" | ||
1172 | "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" | ||
1173 | "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" | ||
1174 | "\x68\xff\x73\x0a\xa1\x15\xac\x43" | ||
1175 | "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" | ||
1176 | "\x29\xc0\x57\xee\x62\xf9\x90\x04" | ||
1177 | "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" | ||
1178 | "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" | ||
1179 | "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" | ||
1180 | "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" | ||
1181 | "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" | ||
1182 | "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" | ||
1183 | "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" | ||
1184 | "\x96\x0a\xa1\x38\xcf\x43\xda\x71" | ||
1185 | "\x08\x7c\x13\xaa\x1e\xb5\x4c", | ||
1186 | .psize = 1023, | ||
1187 | .digest = "\x7d\x0f\x2f\xb7\x65\x3b\xa7\x26" | ||
1188 | "\xc3\x88\x20\x71\x15\x06\xe8\x2d" | ||
1189 | "\xa3\x92\x44\xab\x3e\xe7\xff\x86" | ||
1190 | "\xb6\x79\x10\x72", | ||
1055 | }, | 1191 | }, |
1056 | }; | 1192 | }; |
1057 | 1193 | ||
@@ -1077,6 +1213,142 @@ static const struct hash_testvec sha3_256_tv_template[] = { | |||
1077 | "\x49\x10\x03\x76\xa8\x23\x5e\x2c" | 1213 | "\x49\x10\x03\x76\xa8\x23\x5e\x2c" |
1078 | "\x82\xe1\xb9\x99\x8a\x99\x9e\x21" | 1214 | "\x82\xe1\xb9\x99\x8a\x99\x9e\x21" |
1079 | "\xdb\x32\xdd\x97\x49\x6d\x33\x76", | 1215 | "\xdb\x32\xdd\x97\x49\x6d\x33\x76", |
1216 | .np = 2, | ||
1217 | .tap = { 28, 28 }, | ||
1218 | }, { | ||
1219 | .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" | ||
1220 | "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" | ||
1221 | "\xec\x60\xf7\x8e\x02\x99\x30\xc7" | ||
1222 | "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" | ||
1223 | "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" | ||
1224 | "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" | ||
1225 | "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" | ||
1226 | "\x03\x77\x0e\xa5\x19\xb0\x47\xde" | ||
1227 | "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" | ||
1228 | "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" | ||
1229 | "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" | ||
1230 | "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" | ||
1231 | "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" | ||
1232 | "\x69\x00\x97\x0b\xa2\x39\xd0\x44" | ||
1233 | "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" | ||
1234 | "\x4d\xe4\x58\xef\x86\x1d\x91\x28" | ||
1235 | "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" | ||
1236 | "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" | ||
1237 | "\x80\x17\xae\x22\xb9\x50\xe7\x5b" | ||
1238 | "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" | ||
1239 | "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" | ||
1240 | "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" | ||
1241 | "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" | ||
1242 | "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" | ||
1243 | "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" | ||
1244 | "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" | ||
1245 | "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" | ||
1246 | "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" | ||
1247 | "\xae\x45\xdc\x50\xe7\x7e\x15\x89" | ||
1248 | "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" | ||
1249 | "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" | ||
1250 | "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" | ||
1251 | "\x53\xea\x81\x18\x8c\x23\xba\x2e" | ||
1252 | "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" | ||
1253 | "\x37\xce\x42\xd9\x70\x07\x7b\x12" | ||
1254 | "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" | ||
1255 | "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" | ||
1256 | "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" | ||
1257 | "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" | ||
1258 | "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" | ||
1259 | "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" | ||
1260 | "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" | ||
1261 | "\x81\x18\xaf\x23\xba\x51\xe8\x5c" | ||
1262 | "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" | ||
1263 | "\x65\xfc\x70\x07\x9e\x12\xa9\x40" | ||
1264 | "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" | ||
1265 | "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" | ||
1266 | "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" | ||
1267 | "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" | ||
1268 | "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" | ||
1269 | "\xee\x62\xf9\x90\x04\x9b\x32\xc9" | ||
1270 | "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" | ||
1271 | "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" | ||
1272 | "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" | ||
1273 | "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" | ||
1274 | "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" | ||
1275 | "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" | ||
1276 | "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" | ||
1277 | "\x38\xcf\x43\xda\x71\x08\x7c\x13" | ||
1278 | "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" | ||
1279 | "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" | ||
1280 | "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" | ||
1281 | "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" | ||
1282 | "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" | ||
1283 | "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" | ||
1284 | "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" | ||
1285 | "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" | ||
1286 | "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" | ||
1287 | "\x66\xfd\x71\x08\x9f\x13\xaa\x41" | ||
1288 | "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" | ||
1289 | "\x27\xbe\x55\xec\x60\xf7\x8e\x02" | ||
1290 | "\x99\x30\xc7\x3b\xd2\x69\x00\x74" | ||
1291 | "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" | ||
1292 | "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" | ||
1293 | "\xef\x63\xfa\x91\x05\x9c\x33\xca" | ||
1294 | "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" | ||
1295 | "\xb0\x47\xde\x52\xe9\x80\x17\x8b" | ||
1296 | "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" | ||
1297 | "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" | ||
1298 | "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" | ||
1299 | "\x55\xec\x83\x1a\x8e\x25\xbc\x30" | ||
1300 | "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" | ||
1301 | "\x39\xd0\x44\xdb\x72\x09\x7d\x14" | ||
1302 | "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" | ||
1303 | "\x1d\x91\x28\xbf\x33\xca\x61\xf8" | ||
1304 | "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" | ||
1305 | "\xde\x75\x0c\x80\x17\xae\x22\xb9" | ||
1306 | "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" | ||
1307 | "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" | ||
1308 | "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" | ||
1309 | "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" | ||
1310 | "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" | ||
1311 | "\x67\xfe\x72\x09\xa0\x14\xab\x42" | ||
1312 | "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" | ||
1313 | "\x28\xbf\x56\xed\x61\xf8\x8f\x03" | ||
1314 | "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" | ||
1315 | "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" | ||
1316 | "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" | ||
1317 | "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" | ||
1318 | "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" | ||
1319 | "\xb1\x48\xdf\x53\xea\x81\x18\x8c" | ||
1320 | "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" | ||
1321 | "\x95\x09\xa0\x37\xce\x42\xd9\x70" | ||
1322 | "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" | ||
1323 | "\x56\xed\x84\x1b\x8f\x26\xbd\x31" | ||
1324 | "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" | ||
1325 | "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" | ||
1326 | "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" | ||
1327 | "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" | ||
1328 | "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" | ||
1329 | "\xdf\x76\x0d\x81\x18\xaf\x23\xba" | ||
1330 | "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" | ||
1331 | "\xc3\x37\xce\x65\xfc\x70\x07\x9e" | ||
1332 | "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" | ||
1333 | "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" | ||
1334 | "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" | ||
1335 | "\x68\xff\x73\x0a\xa1\x15\xac\x43" | ||
1336 | "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" | ||
1337 | "\x29\xc0\x57\xee\x62\xf9\x90\x04" | ||
1338 | "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" | ||
1339 | "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" | ||
1340 | "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" | ||
1341 | "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" | ||
1342 | "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" | ||
1343 | "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" | ||
1344 | "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" | ||
1345 | "\x96\x0a\xa1\x38\xcf\x43\xda\x71" | ||
1346 | "\x08\x7c\x13\xaa\x1e\xb5\x4c", | ||
1347 | .psize = 1023, | ||
1348 | .digest = "\xde\x41\x04\xbd\xda\xda\xd9\x71" | ||
1349 | "\xf7\xfa\x80\xf5\xea\x11\x03\xb1" | ||
1350 | "\x3b\x6a\xbc\x5f\xb9\x66\x26\xf7" | ||
1351 | "\x8a\x97\xbb\xf2\x07\x08\x38\x30", | ||
1080 | }, | 1352 | }, |
1081 | }; | 1353 | }; |
1082 | 1354 | ||
@@ -1109,6 +1381,144 @@ static const struct hash_testvec sha3_384_tv_template[] = { | |||
1109 | "\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a" | 1381 | "\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a" |
1110 | "\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1" | 1382 | "\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1" |
1111 | "\x9e\xef\x51\xac\xd0\x65\x7c\x22", | 1383 | "\x9e\xef\x51\xac\xd0\x65\x7c\x22", |
1384 | .np = 2, | ||
1385 | .tap = { 28, 28 }, | ||
1386 | }, { | ||
1387 | .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" | ||
1388 | "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" | ||
1389 | "\xec\x60\xf7\x8e\x02\x99\x30\xc7" | ||
1390 | "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" | ||
1391 | "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" | ||
1392 | "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" | ||
1393 | "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" | ||
1394 | "\x03\x77\x0e\xa5\x19\xb0\x47\xde" | ||
1395 | "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" | ||
1396 | "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" | ||
1397 | "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" | ||
1398 | "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" | ||
1399 | "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" | ||
1400 | "\x69\x00\x97\x0b\xa2\x39\xd0\x44" | ||
1401 | "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" | ||
1402 | "\x4d\xe4\x58\xef\x86\x1d\x91\x28" | ||
1403 | "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" | ||
1404 | "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" | ||
1405 | "\x80\x17\xae\x22\xb9\x50\xe7\x5b" | ||
1406 | "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" | ||
1407 | "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" | ||
1408 | "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" | ||
1409 | "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" | ||
1410 | "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" | ||
1411 | "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" | ||
1412 | "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" | ||
1413 | "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" | ||
1414 | "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" | ||
1415 | "\xae\x45\xdc\x50\xe7\x7e\x15\x89" | ||
1416 | "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" | ||
1417 | "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" | ||
1418 | "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" | ||
1419 | "\x53\xea\x81\x18\x8c\x23\xba\x2e" | ||
1420 | "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" | ||
1421 | "\x37\xce\x42\xd9\x70\x07\x7b\x12" | ||
1422 | "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" | ||
1423 | "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" | ||
1424 | "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" | ||
1425 | "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" | ||
1426 | "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" | ||
1427 | "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" | ||
1428 | "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" | ||
1429 | "\x81\x18\xaf\x23\xba\x51\xe8\x5c" | ||
1430 | "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" | ||
1431 | "\x65\xfc\x70\x07\x9e\x12\xa9\x40" | ||
1432 | "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" | ||
1433 | "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" | ||
1434 | "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" | ||
1435 | "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" | ||
1436 | "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" | ||
1437 | "\xee\x62\xf9\x90\x04\x9b\x32\xc9" | ||
1438 | "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" | ||
1439 | "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" | ||
1440 | "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" | ||
1441 | "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" | ||
1442 | "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" | ||
1443 | "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" | ||
1444 | "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" | ||
1445 | "\x38\xcf\x43\xda\x71\x08\x7c\x13" | ||
1446 | "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" | ||
1447 | "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" | ||
1448 | "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" | ||
1449 | "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" | ||
1450 | "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" | ||
1451 | "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" | ||
1452 | "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" | ||
1453 | "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" | ||
1454 | "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" | ||
1455 | "\x66\xfd\x71\x08\x9f\x13\xaa\x41" | ||
1456 | "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" | ||
1457 | "\x27\xbe\x55\xec\x60\xf7\x8e\x02" | ||
1458 | "\x99\x30\xc7\x3b\xd2\x69\x00\x74" | ||
1459 | "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" | ||
1460 | "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" | ||
1461 | "\xef\x63\xfa\x91\x05\x9c\x33\xca" | ||
1462 | "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" | ||
1463 | "\xb0\x47\xde\x52\xe9\x80\x17\x8b" | ||
1464 | "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" | ||
1465 | "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" | ||
1466 | "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" | ||
1467 | "\x55\xec\x83\x1a\x8e\x25\xbc\x30" | ||
1468 | "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" | ||
1469 | "\x39\xd0\x44\xdb\x72\x09\x7d\x14" | ||
1470 | "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" | ||
1471 | "\x1d\x91\x28\xbf\x33\xca\x61\xf8" | ||
1472 | "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" | ||
1473 | "\xde\x75\x0c\x80\x17\xae\x22\xb9" | ||
1474 | "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" | ||
1475 | "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" | ||
1476 | "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" | ||
1477 | "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" | ||
1478 | "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" | ||
1479 | "\x67\xfe\x72\x09\xa0\x14\xab\x42" | ||
1480 | "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" | ||
1481 | "\x28\xbf\x56\xed\x61\xf8\x8f\x03" | ||
1482 | "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" | ||
1483 | "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" | ||
1484 | "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" | ||
1485 | "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" | ||
1486 | "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" | ||
1487 | "\xb1\x48\xdf\x53\xea\x81\x18\x8c" | ||
1488 | "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" | ||
1489 | "\x95\x09\xa0\x37\xce\x42\xd9\x70" | ||
1490 | "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" | ||
1491 | "\x56\xed\x84\x1b\x8f\x26\xbd\x31" | ||
1492 | "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" | ||
1493 | "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" | ||
1494 | "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" | ||
1495 | "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" | ||
1496 | "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" | ||
1497 | "\xdf\x76\x0d\x81\x18\xaf\x23\xba" | ||
1498 | "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" | ||
1499 | "\xc3\x37\xce\x65\xfc\x70\x07\x9e" | ||
1500 | "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" | ||
1501 | "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" | ||
1502 | "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" | ||
1503 | "\x68\xff\x73\x0a\xa1\x15\xac\x43" | ||
1504 | "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" | ||
1505 | "\x29\xc0\x57\xee\x62\xf9\x90\x04" | ||
1506 | "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" | ||
1507 | "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" | ||
1508 | "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" | ||
1509 | "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" | ||
1510 | "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" | ||
1511 | "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" | ||
1512 | "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" | ||
1513 | "\x96\x0a\xa1\x38\xcf\x43\xda\x71" | ||
1514 | "\x08\x7c\x13\xaa\x1e\xb5\x4c", | ||
1515 | .psize = 1023, | ||
1516 | .digest = "\x1b\x19\x4d\x8f\xd5\x36\x87\x71" | ||
1517 | "\xcf\xca\x30\x85\x9b\xc1\x25\xc7" | ||
1518 | "\x00\xcb\x73\x8a\x8e\xd4\xfe\x2b" | ||
1519 | "\x1a\xa2\xdc\x2e\x41\xfd\x52\x51" | ||
1520 | "\xd2\x21\xae\x2d\xc7\xae\x8c\x40" | ||
1521 | "\xb9\xe6\x56\x48\x03\xcd\x88\x6b", | ||
1112 | }, | 1522 | }, |
1113 | }; | 1523 | }; |
1114 | 1524 | ||
@@ -1147,6 +1557,146 @@ static const struct hash_testvec sha3_512_tv_template[] = { | |||
1147 | "\xba\x1b\x0d\x8d\xc7\x8c\x08\x63" | 1557 | "\xba\x1b\x0d\x8d\xc7\x8c\x08\x63" |
1148 | "\x46\xb5\x33\xb4\x9c\x03\x0d\x99" | 1558 | "\x46\xb5\x33\xb4\x9c\x03\x0d\x99" |
1149 | "\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e", | 1559 | "\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e", |
1560 | .np = 2, | ||
1561 | .tap = { 28, 28 }, | ||
1562 | }, { | ||
1563 | .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" | ||
1564 | "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" | ||
1565 | "\xec\x60\xf7\x8e\x02\x99\x30\xc7" | ||
1566 | "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" | ||
1567 | "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" | ||
1568 | "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" | ||
1569 | "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" | ||
1570 | "\x03\x77\x0e\xa5\x19\xb0\x47\xde" | ||
1571 | "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" | ||
1572 | "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" | ||
1573 | "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" | ||
1574 | "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" | ||
1575 | "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" | ||
1576 | "\x69\x00\x97\x0b\xa2\x39\xd0\x44" | ||
1577 | "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" | ||
1578 | "\x4d\xe4\x58\xef\x86\x1d\x91\x28" | ||
1579 | "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" | ||
1580 | "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" | ||
1581 | "\x80\x17\xae\x22\xb9\x50\xe7\x5b" | ||
1582 | "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" | ||
1583 | "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" | ||
1584 | "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" | ||
1585 | "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" | ||
1586 | "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" | ||
1587 | "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" | ||
1588 | "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" | ||
1589 | "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" | ||
1590 | "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" | ||
1591 | "\xae\x45\xdc\x50\xe7\x7e\x15\x89" | ||
1592 | "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" | ||
1593 | "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" | ||
1594 | "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" | ||
1595 | "\x53\xea\x81\x18\x8c\x23\xba\x2e" | ||
1596 | "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" | ||
1597 | "\x37\xce\x42\xd9\x70\x07\x7b\x12" | ||
1598 | "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" | ||
1599 | "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" | ||
1600 | "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" | ||
1601 | "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" | ||
1602 | "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" | ||
1603 | "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" | ||
1604 | "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" | ||
1605 | "\x81\x18\xaf\x23\xba\x51\xe8\x5c" | ||
1606 | "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" | ||
1607 | "\x65\xfc\x70\x07\x9e\x12\xa9\x40" | ||
1608 | "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" | ||
1609 | "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" | ||
1610 | "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" | ||
1611 | "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" | ||
1612 | "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" | ||
1613 | "\xee\x62\xf9\x90\x04\x9b\x32\xc9" | ||
1614 | "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" | ||
1615 | "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" | ||
1616 | "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" | ||
1617 | "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" | ||
1618 | "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" | ||
1619 | "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" | ||
1620 | "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" | ||
1621 | "\x38\xcf\x43\xda\x71\x08\x7c\x13" | ||
1622 | "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" | ||
1623 | "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" | ||
1624 | "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" | ||
1625 | "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" | ||
1626 | "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" | ||
1627 | "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" | ||
1628 | "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" | ||
1629 | "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" | ||
1630 | "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" | ||
1631 | "\x66\xfd\x71\x08\x9f\x13\xaa\x41" | ||
1632 | "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" | ||
1633 | "\x27\xbe\x55\xec\x60\xf7\x8e\x02" | ||
1634 | "\x99\x30\xc7\x3b\xd2\x69\x00\x74" | ||
1635 | "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" | ||
1636 | "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" | ||
1637 | "\xef\x63\xfa\x91\x05\x9c\x33\xca" | ||
1638 | "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" | ||
1639 | "\xb0\x47\xde\x52\xe9\x80\x17\x8b" | ||
1640 | "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" | ||
1641 | "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" | ||
1642 | "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" | ||
1643 | "\x55\xec\x83\x1a\x8e\x25\xbc\x30" | ||
1644 | "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" | ||
1645 | "\x39\xd0\x44\xdb\x72\x09\x7d\x14" | ||
1646 | "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" | ||
1647 | "\x1d\x91\x28\xbf\x33\xca\x61\xf8" | ||
1648 | "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" | ||
1649 | "\xde\x75\x0c\x80\x17\xae\x22\xb9" | ||
1650 | "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" | ||
1651 | "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" | ||
1652 | "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" | ||
1653 | "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" | ||
1654 | "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" | ||
1655 | "\x67\xfe\x72\x09\xa0\x14\xab\x42" | ||
1656 | "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" | ||
1657 | "\x28\xbf\x56\xed\x61\xf8\x8f\x03" | ||
1658 | "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" | ||
1659 | "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" | ||
1660 | "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" | ||
1661 | "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" | ||
1662 | "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" | ||
1663 | "\xb1\x48\xdf\x53\xea\x81\x18\x8c" | ||
1664 | "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" | ||
1665 | "\x95\x09\xa0\x37\xce\x42\xd9\x70" | ||
1666 | "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" | ||
1667 | "\x56\xed\x84\x1b\x8f\x26\xbd\x31" | ||
1668 | "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" | ||
1669 | "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" | ||
1670 | "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" | ||
1671 | "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" | ||
1672 | "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" | ||
1673 | "\xdf\x76\x0d\x81\x18\xaf\x23\xba" | ||
1674 | "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" | ||
1675 | "\xc3\x37\xce\x65\xfc\x70\x07\x9e" | ||
1676 | "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" | ||
1677 | "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" | ||
1678 | "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" | ||
1679 | "\x68\xff\x73\x0a\xa1\x15\xac\x43" | ||
1680 | "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" | ||
1681 | "\x29\xc0\x57\xee\x62\xf9\x90\x04" | ||
1682 | "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" | ||
1683 | "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" | ||
1684 | "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" | ||
1685 | "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" | ||
1686 | "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" | ||
1687 | "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" | ||
1688 | "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" | ||
1689 | "\x96\x0a\xa1\x38\xcf\x43\xda\x71" | ||
1690 | "\x08\x7c\x13\xaa\x1e\xb5\x4c", | ||
1691 | .psize = 1023, | ||
1692 | .digest = "\x59\xda\x30\xe3\x90\xe4\x3d\xde" | ||
1693 | "\xf0\xc6\x42\x17\xd7\xb2\x26\x47" | ||
1694 | "\x90\x28\xa6\x84\xe8\x49\x7a\x86" | ||
1695 | "\xd6\xb8\x9e\xf8\x07\x59\x21\x03" | ||
1696 | "\xad\xd2\xed\x48\xa3\xb9\xa5\xf0" | ||
1697 | "\xb3\xae\x02\x2b\xb8\xaf\xc3\x3b" | ||
1698 | "\xd6\xb0\x8f\xcb\x76\x8b\xa7\x41" | ||
1699 | "\x32\xc2\x8e\x50\x91\x86\x90\xfb", | ||
1150 | }, | 1700 | }, |
1151 | }; | 1701 | }; |
1152 | 1702 | ||
diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c index 5f62c4f9f6e0..f3a0dd25f871 100644 --- a/crypto/twofish_common.c +++ b/crypto/twofish_common.c | |||
@@ -24,9 +24,8 @@ | |||
24 | * GNU General Public License for more details. | 24 | * GNU General Public License for more details. |
25 | * | 25 | * |
26 | * You should have received a copy of the GNU General Public License | 26 | * You should have received a copy of the GNU General Public License |
27 | * along with this program; if not, write to the Free Software | 27 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
28 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | 28 | * |
29 | * USA | ||
30 | * | 29 | * |
31 | * This code is a "clean room" implementation, written from the paper | 30 | * This code is a "clean room" implementation, written from the paper |
32 | * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, | 31 | * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, |
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c index ebf7a3efb572..07e62433fbfb 100644 --- a/crypto/twofish_generic.c +++ b/crypto/twofish_generic.c | |||
@@ -23,9 +23,8 @@ | |||
23 | * GNU General Public License for more details. | 23 | * GNU General Public License for more details. |
24 | * | 24 | * |
25 | * You should have received a copy of the GNU General Public License | 25 | * You should have received a copy of the GNU General Public License |
26 | * along with this program; if not, write to the Free Software | 26 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
27 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | 27 | * |
28 | * USA | ||
29 | * | 28 | * |
30 | * This code is a "clean room" implementation, written from the paper | 29 | * This code is a "clean room" implementation, written from the paper |
31 | * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, | 30 | * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, |
diff --git a/crypto/xcbc.c b/crypto/xcbc.c index df90b332554c..25c75af50d3f 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c | |||
@@ -12,8 +12,7 @@ | |||
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | * | 16 | * |
18 | * Author: | 17 | * Author: |
19 | * Kazunori Miyazawa <miyazawa@linux-ipv6.org> | 18 | * Kazunori Miyazawa <miyazawa@linux-ipv6.org> |
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 88044eda0ac6..4d0f571c15f9 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -73,26 +73,14 @@ config HW_RANDOM_ATMEL | |||
73 | 73 | ||
74 | If unsure, say Y. | 74 | If unsure, say Y. |
75 | 75 | ||
76 | config HW_RANDOM_BCM63XX | ||
77 | tristate "Broadcom BCM63xx Random Number Generator support" | ||
78 | depends on BCM63XX || BMIPS_GENERIC | ||
79 | default HW_RANDOM | ||
80 | ---help--- | ||
81 | This driver provides kernel-side support for the Random Number | ||
82 | Generator hardware found on the Broadcom BCM63xx SoCs. | ||
83 | |||
84 | To compile this driver as a module, choose M here: the | ||
85 | module will be called bcm63xx-rng | ||
86 | |||
87 | If unusure, say Y. | ||
88 | |||
89 | config HW_RANDOM_BCM2835 | 76 | config HW_RANDOM_BCM2835 |
90 | tristate "Broadcom BCM2835 Random Number Generator support" | 77 | tristate "Broadcom BCM2835/BCM63xx Random Number Generator support" |
91 | depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X | 78 | depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \ |
79 | ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC | ||
92 | default HW_RANDOM | 80 | default HW_RANDOM |
93 | ---help--- | 81 | ---help--- |
94 | This driver provides kernel-side support for the Random Number | 82 | This driver provides kernel-side support for the Random Number |
95 | Generator hardware found on the Broadcom BCM2835 SoCs. | 83 | Generator hardware found on the Broadcom BCM2835 and BCM63xx SoCs. |
96 | 84 | ||
97 | To compile this driver as a module, choose M here: the | 85 | To compile this driver as a module, choose M here: the |
98 | module will be called bcm2835-rng | 86 | module will be called bcm2835-rng |
@@ -436,6 +424,18 @@ config HW_RANDOM_S390 | |||
436 | 424 | ||
437 | If unsure, say Y. | 425 | If unsure, say Y. |
438 | 426 | ||
427 | config HW_RANDOM_EXYNOS | ||
428 | tristate "Samsung Exynos True Random Number Generator support" | ||
429 | depends on ARCH_EXYNOS || COMPILE_TEST | ||
430 | default HW_RANDOM | ||
431 | ---help--- | ||
432 | This driver provides support for the True Random Number | ||
433 | Generator available in Exynos SoCs. | ||
434 | |||
435 | To compile this driver as a module, choose M here: the module | ||
436 | will be called exynos-trng. | ||
437 | |||
438 | If unsure, say Y. | ||
439 | endif # HW_RANDOM | 439 | endif # HW_RANDOM |
440 | 440 | ||
441 | config UML_RANDOM | 441 | config UML_RANDOM |
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 0ef05c61d9c8..b780370bd4eb 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile | |||
@@ -9,11 +9,11 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o | |||
9 | obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o | 9 | obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o |
10 | obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o | 10 | obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o |
11 | obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o | 11 | obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o |
12 | obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o | ||
13 | obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o | 12 | obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o |
14 | obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o | 13 | obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o |
15 | n2-rng-y := n2-drv.o n2-asm.o | 14 | n2-rng-y := n2-drv.o n2-asm.o |
16 | obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o | 15 | obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o |
16 | obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o | ||
17 | obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o | 17 | obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o |
18 | obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o | 18 | obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o |
19 | obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o | 19 | obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o |
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index 574211a49549..7a84cec30c3a 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/of_platform.h> | 15 | #include <linux/of_platform.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/printk.h> | 17 | #include <linux/printk.h> |
18 | #include <linux/clk.h> | ||
18 | 19 | ||
19 | #define RNG_CTRL 0x0 | 20 | #define RNG_CTRL 0x0 |
20 | #define RNG_STATUS 0x4 | 21 | #define RNG_STATUS 0x4 |
@@ -29,116 +30,180 @@ | |||
29 | 30 | ||
30 | #define RNG_INT_OFF 0x1 | 31 | #define RNG_INT_OFF 0x1 |
31 | 32 | ||
32 | static void __init nsp_rng_init(void __iomem *base) | 33 | struct bcm2835_rng_priv { |
34 | struct hwrng rng; | ||
35 | void __iomem *base; | ||
36 | bool mask_interrupts; | ||
37 | struct clk *clk; | ||
38 | }; | ||
39 | |||
40 | static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng) | ||
33 | { | 41 | { |
34 | u32 val; | 42 | return container_of(rng, struct bcm2835_rng_priv, rng); |
43 | } | ||
44 | |||
45 | static inline u32 rng_readl(struct bcm2835_rng_priv *priv, u32 offset) | ||
46 | { | ||
47 | /* MIPS chips strapped for BE will automagically configure the | ||
48 | * peripheral registers for CPU-native byte order. | ||
49 | */ | ||
50 | if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) | ||
51 | return __raw_readl(priv->base + offset); | ||
52 | else | ||
53 | return readl(priv->base + offset); | ||
54 | } | ||
35 | 55 | ||
36 | /* mask the interrupt */ | 56 | static inline void rng_writel(struct bcm2835_rng_priv *priv, u32 val, |
37 | val = readl(base + RNG_INT_MASK); | 57 | u32 offset) |
38 | val |= RNG_INT_OFF; | 58 | { |
39 | writel(val, base + RNG_INT_MASK); | 59 | if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) |
60 | __raw_writel(val, priv->base + offset); | ||
61 | else | ||
62 | writel(val, priv->base + offset); | ||
40 | } | 63 | } |
41 | 64 | ||
42 | static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, | 65 | static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, |
43 | bool wait) | 66 | bool wait) |
44 | { | 67 | { |
45 | void __iomem *rng_base = (void __iomem *)rng->priv; | 68 | struct bcm2835_rng_priv *priv = to_rng_priv(rng); |
46 | u32 max_words = max / sizeof(u32); | 69 | u32 max_words = max / sizeof(u32); |
47 | u32 num_words, count; | 70 | u32 num_words, count; |
48 | 71 | ||
49 | while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) { | 72 | while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) { |
50 | if (!wait) | 73 | if (!wait) |
51 | return 0; | 74 | return 0; |
52 | cpu_relax(); | 75 | cpu_relax(); |
53 | } | 76 | } |
54 | 77 | ||
55 | num_words = readl(rng_base + RNG_STATUS) >> 24; | 78 | num_words = rng_readl(priv, RNG_STATUS) >> 24; |
56 | if (num_words > max_words) | 79 | if (num_words > max_words) |
57 | num_words = max_words; | 80 | num_words = max_words; |
58 | 81 | ||
59 | for (count = 0; count < num_words; count++) | 82 | for (count = 0; count < num_words; count++) |
60 | ((u32 *)buf)[count] = readl(rng_base + RNG_DATA); | 83 | ((u32 *)buf)[count] = rng_readl(priv, RNG_DATA); |
61 | 84 | ||
62 | return num_words * sizeof(u32); | 85 | return num_words * sizeof(u32); |
63 | } | 86 | } |
64 | 87 | ||
65 | static struct hwrng bcm2835_rng_ops = { | 88 | static int bcm2835_rng_init(struct hwrng *rng) |
66 | .name = "bcm2835", | 89 | { |
67 | .read = bcm2835_rng_read, | 90 | struct bcm2835_rng_priv *priv = to_rng_priv(rng); |
91 | int ret = 0; | ||
92 | u32 val; | ||
93 | |||
94 | if (!IS_ERR(priv->clk)) { | ||
95 | ret = clk_prepare_enable(priv->clk); | ||
96 | if (ret) | ||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | if (priv->mask_interrupts) { | ||
101 | /* mask the interrupt */ | ||
102 | val = rng_readl(priv, RNG_INT_MASK); | ||
103 | val |= RNG_INT_OFF; | ||
104 | rng_writel(priv, val, RNG_INT_MASK); | ||
105 | } | ||
106 | |||
107 | /* set warm-up count & enable */ | ||
108 | rng_writel(priv, RNG_WARMUP_COUNT, RNG_STATUS); | ||
109 | rng_writel(priv, RNG_RBGEN, RNG_CTRL); | ||
110 | |||
111 | return ret; | ||
112 | } | ||
113 | |||
114 | static void bcm2835_rng_cleanup(struct hwrng *rng) | ||
115 | { | ||
116 | struct bcm2835_rng_priv *priv = to_rng_priv(rng); | ||
117 | |||
118 | /* disable rng hardware */ | ||
119 | rng_writel(priv, 0, RNG_CTRL); | ||
120 | |||
121 | if (!IS_ERR(priv->clk)) | ||
122 | clk_disable_unprepare(priv->clk); | ||
123 | } | ||
124 | |||
125 | struct bcm2835_rng_of_data { | ||
126 | bool mask_interrupts; | ||
127 | }; | ||
128 | |||
129 | static const struct bcm2835_rng_of_data nsp_rng_of_data = { | ||
130 | .mask_interrupts = true, | ||
68 | }; | 131 | }; |
69 | 132 | ||
70 | static const struct of_device_id bcm2835_rng_of_match[] = { | 133 | static const struct of_device_id bcm2835_rng_of_match[] = { |
71 | { .compatible = "brcm,bcm2835-rng"}, | 134 | { .compatible = "brcm,bcm2835-rng"}, |
72 | { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init}, | 135 | { .compatible = "brcm,bcm-nsp-rng", .data = &nsp_rng_of_data }, |
73 | { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init}, | 136 | { .compatible = "brcm,bcm5301x-rng", .data = &nsp_rng_of_data }, |
137 | { .compatible = "brcm,bcm6368-rng"}, | ||
74 | {}, | 138 | {}, |
75 | }; | 139 | }; |
76 | 140 | ||
77 | static int bcm2835_rng_probe(struct platform_device *pdev) | 141 | static int bcm2835_rng_probe(struct platform_device *pdev) |
78 | { | 142 | { |
143 | const struct bcm2835_rng_of_data *of_data; | ||
79 | struct device *dev = &pdev->dev; | 144 | struct device *dev = &pdev->dev; |
80 | struct device_node *np = dev->of_node; | 145 | struct device_node *np = dev->of_node; |
81 | void (*rng_setup)(void __iomem *base); | ||
82 | const struct of_device_id *rng_id; | 146 | const struct of_device_id *rng_id; |
83 | void __iomem *rng_base; | 147 | struct bcm2835_rng_priv *priv; |
148 | struct resource *r; | ||
84 | int err; | 149 | int err; |
85 | 150 | ||
151 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | ||
152 | if (!priv) | ||
153 | return -ENOMEM; | ||
154 | |||
155 | platform_set_drvdata(pdev, priv); | ||
156 | |||
157 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
158 | |||
86 | /* map peripheral */ | 159 | /* map peripheral */ |
87 | rng_base = of_iomap(np, 0); | 160 | priv->base = devm_ioremap_resource(dev, r); |
88 | if (!rng_base) { | 161 | if (IS_ERR(priv->base)) |
89 | dev_err(dev, "failed to remap rng regs"); | 162 | return PTR_ERR(priv->base); |
90 | return -ENODEV; | 163 | |
91 | } | 164 | /* Clock is optional on most platforms */ |
92 | bcm2835_rng_ops.priv = (unsigned long)rng_base; | 165 | priv->clk = devm_clk_get(dev, NULL); |
166 | |||
167 | priv->rng.name = pdev->name; | ||
168 | priv->rng.init = bcm2835_rng_init; | ||
169 | priv->rng.read = bcm2835_rng_read; | ||
170 | priv->rng.cleanup = bcm2835_rng_cleanup; | ||
93 | 171 | ||
94 | rng_id = of_match_node(bcm2835_rng_of_match, np); | 172 | rng_id = of_match_node(bcm2835_rng_of_match, np); |
95 | if (!rng_id) { | 173 | if (!rng_id) |
96 | iounmap(rng_base); | ||
97 | return -EINVAL; | 174 | return -EINVAL; |
98 | } | ||
99 | /* Check for rng init function, execute it */ | ||
100 | rng_setup = rng_id->data; | ||
101 | if (rng_setup) | ||
102 | rng_setup(rng_base); | ||
103 | 175 | ||
104 | /* set warm-up count & enable */ | 176 | /* Check for rng init function, execute it */ |
105 | __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS); | 177 | of_data = rng_id->data; |
106 | __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL); | 178 | if (of_data) |
179 | priv->mask_interrupts = of_data->mask_interrupts; | ||
107 | 180 | ||
108 | /* register driver */ | 181 | /* register driver */ |
109 | err = hwrng_register(&bcm2835_rng_ops); | 182 | err = devm_hwrng_register(dev, &priv->rng); |
110 | if (err) { | 183 | if (err) |
111 | dev_err(dev, "hwrng registration failed\n"); | 184 | dev_err(dev, "hwrng registration failed\n"); |
112 | iounmap(rng_base); | 185 | else |
113 | } else | ||
114 | dev_info(dev, "hwrng registered\n"); | 186 | dev_info(dev, "hwrng registered\n"); |
115 | 187 | ||
116 | return err; | 188 | return err; |
117 | } | 189 | } |
118 | 190 | ||
119 | static int bcm2835_rng_remove(struct platform_device *pdev) | ||
120 | { | ||
121 | void __iomem *rng_base = (void __iomem *)bcm2835_rng_ops.priv; | ||
122 | |||
123 | /* disable rng hardware */ | ||
124 | __raw_writel(0, rng_base + RNG_CTRL); | ||
125 | |||
126 | /* unregister driver */ | ||
127 | hwrng_unregister(&bcm2835_rng_ops); | ||
128 | iounmap(rng_base); | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); | 191 | MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); |
134 | 192 | ||
193 | static struct platform_device_id bcm2835_rng_devtype[] = { | ||
194 | { .name = "bcm2835-rng" }, | ||
195 | { .name = "bcm63xx-rng" }, | ||
196 | { /* sentinel */ } | ||
197 | }; | ||
198 | MODULE_DEVICE_TABLE(platform, bcm2835_rng_devtype); | ||
199 | |||
135 | static struct platform_driver bcm2835_rng_driver = { | 200 | static struct platform_driver bcm2835_rng_driver = { |
136 | .driver = { | 201 | .driver = { |
137 | .name = "bcm2835-rng", | 202 | .name = "bcm2835-rng", |
138 | .of_match_table = bcm2835_rng_of_match, | 203 | .of_match_table = bcm2835_rng_of_match, |
139 | }, | 204 | }, |
140 | .probe = bcm2835_rng_probe, | 205 | .probe = bcm2835_rng_probe, |
141 | .remove = bcm2835_rng_remove, | 206 | .id_table = bcm2835_rng_devtype, |
142 | }; | 207 | }; |
143 | module_platform_driver(bcm2835_rng_driver); | 208 | module_platform_driver(bcm2835_rng_driver); |
144 | 209 | ||
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c deleted file mode 100644 index 5132c9cde50d..000000000000 --- a/drivers/char/hw_random/bcm63xx-rng.c +++ /dev/null | |||
@@ -1,154 +0,0 @@ | |||
1 | /* | ||
2 | * Broadcom BCM63xx Random Number Generator support | ||
3 | * | ||
4 | * Copyright (C) 2011, Florian Fainelli <florian@openwrt.org> | ||
5 | * Copyright (C) 2009, Broadcom Corporation | ||
6 | * | ||
7 | */ | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/io.h> | ||
11 | #include <linux/err.h> | ||
12 | #include <linux/clk.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/hw_random.h> | ||
15 | #include <linux/of.h> | ||
16 | |||
17 | #define RNG_CTRL 0x00 | ||
18 | #define RNG_EN (1 << 0) | ||
19 | |||
20 | #define RNG_STAT 0x04 | ||
21 | #define RNG_AVAIL_MASK (0xff000000) | ||
22 | |||
23 | #define RNG_DATA 0x08 | ||
24 | #define RNG_THRES 0x0c | ||
25 | #define RNG_MASK 0x10 | ||
26 | |||
27 | struct bcm63xx_rng_priv { | ||
28 | struct hwrng rng; | ||
29 | struct clk *clk; | ||
30 | void __iomem *regs; | ||
31 | }; | ||
32 | |||
33 | #define to_rng_priv(rng) container_of(rng, struct bcm63xx_rng_priv, rng) | ||
34 | |||
35 | static int bcm63xx_rng_init(struct hwrng *rng) | ||
36 | { | ||
37 | struct bcm63xx_rng_priv *priv = to_rng_priv(rng); | ||
38 | u32 val; | ||
39 | int error; | ||
40 | |||
41 | error = clk_prepare_enable(priv->clk); | ||
42 | if (error) | ||
43 | return error; | ||
44 | |||
45 | val = __raw_readl(priv->regs + RNG_CTRL); | ||
46 | val |= RNG_EN; | ||
47 | __raw_writel(val, priv->regs + RNG_CTRL); | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static void bcm63xx_rng_cleanup(struct hwrng *rng) | ||
53 | { | ||
54 | struct bcm63xx_rng_priv *priv = to_rng_priv(rng); | ||
55 | u32 val; | ||
56 | |||
57 | val = __raw_readl(priv->regs + RNG_CTRL); | ||
58 | val &= ~RNG_EN; | ||
59 | __raw_writel(val, priv->regs + RNG_CTRL); | ||
60 | |||
61 | clk_disable_unprepare(priv->clk); | ||
62 | } | ||
63 | |||
64 | static int bcm63xx_rng_data_present(struct hwrng *rng, int wait) | ||
65 | { | ||
66 | struct bcm63xx_rng_priv *priv = to_rng_priv(rng); | ||
67 | |||
68 | return __raw_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK; | ||
69 | } | ||
70 | |||
71 | static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data) | ||
72 | { | ||
73 | struct bcm63xx_rng_priv *priv = to_rng_priv(rng); | ||
74 | |||
75 | *data = __raw_readl(priv->regs + RNG_DATA); | ||
76 | |||
77 | return 4; | ||
78 | } | ||
79 | |||
80 | static int bcm63xx_rng_probe(struct platform_device *pdev) | ||
81 | { | ||
82 | struct resource *r; | ||
83 | int ret; | ||
84 | struct bcm63xx_rng_priv *priv; | ||
85 | |||
86 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
87 | if (!r) { | ||
88 | dev_err(&pdev->dev, "no iomem resource\n"); | ||
89 | return -ENXIO; | ||
90 | } | ||
91 | |||
92 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | ||
93 | if (!priv) | ||
94 | return -ENOMEM; | ||
95 | |||
96 | priv->rng.name = pdev->name; | ||
97 | priv->rng.init = bcm63xx_rng_init; | ||
98 | priv->rng.cleanup = bcm63xx_rng_cleanup; | ||
99 | priv->rng.data_present = bcm63xx_rng_data_present; | ||
100 | priv->rng.data_read = bcm63xx_rng_data_read; | ||
101 | |||
102 | priv->clk = devm_clk_get(&pdev->dev, "ipsec"); | ||
103 | if (IS_ERR(priv->clk)) { | ||
104 | ret = PTR_ERR(priv->clk); | ||
105 | dev_err(&pdev->dev, "no clock for device: %d\n", ret); | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | if (!devm_request_mem_region(&pdev->dev, r->start, | ||
110 | resource_size(r), pdev->name)) { | ||
111 | dev_err(&pdev->dev, "request mem failed"); | ||
112 | return -EBUSY; | ||
113 | } | ||
114 | |||
115 | priv->regs = devm_ioremap_nocache(&pdev->dev, r->start, | ||
116 | resource_size(r)); | ||
117 | if (!priv->regs) { | ||
118 | dev_err(&pdev->dev, "ioremap failed"); | ||
119 | return -ENOMEM; | ||
120 | } | ||
121 | |||
122 | ret = devm_hwrng_register(&pdev->dev, &priv->rng); | ||
123 | if (ret) { | ||
124 | dev_err(&pdev->dev, "failed to register rng device: %d\n", | ||
125 | ret); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | dev_info(&pdev->dev, "registered RNG driver\n"); | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | #ifdef CONFIG_OF | ||
135 | static const struct of_device_id bcm63xx_rng_of_match[] = { | ||
136 | { .compatible = "brcm,bcm6368-rng", }, | ||
137 | {}, | ||
138 | }; | ||
139 | MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match); | ||
140 | #endif | ||
141 | |||
142 | static struct platform_driver bcm63xx_rng_driver = { | ||
143 | .probe = bcm63xx_rng_probe, | ||
144 | .driver = { | ||
145 | .name = "bcm63xx-rng", | ||
146 | .of_match_table = of_match_ptr(bcm63xx_rng_of_match), | ||
147 | }, | ||
148 | }; | ||
149 | |||
150 | module_platform_driver(bcm63xx_rng_driver); | ||
151 | |||
152 | MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); | ||
153 | MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver"); | ||
154 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 657b8770b6b9..91bb98c42a1c 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -306,6 +306,10 @@ static int enable_best_rng(void) | |||
306 | ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); | 306 | ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); |
307 | if (!ret) | 307 | if (!ret) |
308 | cur_rng_set_by_user = 0; | 308 | cur_rng_set_by_user = 0; |
309 | } else { | ||
310 | drop_current_rng(); | ||
311 | cur_rng_set_by_user = 0; | ||
312 | ret = 0; | ||
309 | } | 313 | } |
310 | 314 | ||
311 | return ret; | 315 | return ret; |
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c new file mode 100644 index 000000000000..1947aed7c044 --- /dev/null +++ b/drivers/char/hw_random/exynos-trng.c | |||
@@ -0,0 +1,235 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * RNG driver for Exynos TRNGs | ||
4 | * | ||
5 | * Author: Łukasz Stelmach <l.stelmach@samsung.com> | ||
6 | * | ||
7 | * Copyright 2017 (c) Samsung Electronics Software, Inc. | ||
8 | * | ||
9 | * Based on the Exynos PRNG driver drivers/crypto/exynos-rng by | ||
10 | * Krzysztof Kozłowski <krzk@kernel.org> | ||
11 | */ | ||
12 | |||
13 | #include <linux/clk.h> | ||
14 | #include <linux/crypto.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/hw_random.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/iopoll.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/pm_runtime.h> | ||
24 | |||
25 | #define EXYNOS_TRNG_CLKDIV (0x0) | ||
26 | |||
27 | #define EXYNOS_TRNG_CTRL (0x20) | ||
28 | #define EXYNOS_TRNG_CTRL_RNGEN BIT(31) | ||
29 | |||
30 | #define EXYNOS_TRNG_POST_CTRL (0x30) | ||
31 | #define EXYNOS_TRNG_ONLINE_CTRL (0x40) | ||
32 | #define EXYNOS_TRNG_ONLINE_STAT (0x44) | ||
33 | #define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48) | ||
34 | #define EXYNOS_TRNG_FIFO_CTRL (0x50) | ||
35 | #define EXYNOS_TRNG_FIFO_0 (0x80) | ||
36 | #define EXYNOS_TRNG_FIFO_1 (0x84) | ||
37 | #define EXYNOS_TRNG_FIFO_2 (0x88) | ||
38 | #define EXYNOS_TRNG_FIFO_3 (0x8c) | ||
39 | #define EXYNOS_TRNG_FIFO_4 (0x90) | ||
40 | #define EXYNOS_TRNG_FIFO_5 (0x94) | ||
41 | #define EXYNOS_TRNG_FIFO_6 (0x98) | ||
42 | #define EXYNOS_TRNG_FIFO_7 (0x9c) | ||
43 | #define EXYNOS_TRNG_FIFO_LEN (8) | ||
44 | #define EXYNOS_TRNG_CLOCK_RATE (500000) | ||
45 | |||
46 | |||
47 | struct exynos_trng_dev { | ||
48 | struct device *dev; | ||
49 | void __iomem *mem; | ||
50 | struct clk *clk; | ||
51 | struct hwrng rng; | ||
52 | }; | ||
53 | |||
54 | static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max, | ||
55 | bool wait) | ||
56 | { | ||
57 | struct exynos_trng_dev *trng; | ||
58 | int val; | ||
59 | |||
60 | max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4)); | ||
61 | |||
62 | trng = (struct exynos_trng_dev *)rng->priv; | ||
63 | |||
64 | writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL); | ||
65 | val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val, | ||
66 | val == 0, 200, 1000000); | ||
67 | if (val < 0) | ||
68 | return val; | ||
69 | |||
70 | memcpy_fromio(data, trng->mem + EXYNOS_TRNG_FIFO_0, max); | ||
71 | |||
72 | return max; | ||
73 | } | ||
74 | |||
75 | static int exynos_trng_init(struct hwrng *rng) | ||
76 | { | ||
77 | struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv; | ||
78 | unsigned long sss_rate; | ||
79 | u32 val; | ||
80 | |||
81 | sss_rate = clk_get_rate(trng->clk); | ||
82 | |||
83 | /* | ||
84 | * For most TRNG circuits the clock frequency of under 500 kHz | ||
85 | * is safe. | ||
86 | */ | ||
87 | val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2); | ||
88 | if (val > 0x7fff) { | ||
89 | dev_err(trng->dev, "clock divider too large: %d", val); | ||
90 | return -ERANGE; | ||
91 | } | ||
92 | val = val << 1; | ||
93 | writel_relaxed(val, trng->mem + EXYNOS_TRNG_CLKDIV); | ||
94 | |||
95 | /* Enable the generator. */ | ||
96 | val = EXYNOS_TRNG_CTRL_RNGEN; | ||
97 | writel_relaxed(val, trng->mem + EXYNOS_TRNG_CTRL); | ||
98 | |||
99 | /* | ||
100 | * Disable post-processing. /dev/hwrng is supposed to deliver | ||
101 | * unprocessed data. | ||
102 | */ | ||
103 | writel_relaxed(0, trng->mem + EXYNOS_TRNG_POST_CTRL); | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static int exynos_trng_probe(struct platform_device *pdev) | ||
109 | { | ||
110 | struct exynos_trng_dev *trng; | ||
111 | struct resource *res; | ||
112 | int ret = -ENOMEM; | ||
113 | |||
114 | trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); | ||
115 | if (!trng) | ||
116 | return ret; | ||
117 | |||
118 | trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev), | ||
119 | GFP_KERNEL); | ||
120 | if (!trng->rng.name) | ||
121 | return ret; | ||
122 | |||
123 | trng->rng.init = exynos_trng_init; | ||
124 | trng->rng.read = exynos_trng_do_read; | ||
125 | trng->rng.priv = (unsigned long) trng; | ||
126 | |||
127 | platform_set_drvdata(pdev, trng); | ||
128 | trng->dev = &pdev->dev; | ||
129 | |||
130 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
131 | trng->mem = devm_ioremap_resource(&pdev->dev, res); | ||
132 | if (IS_ERR(trng->mem)) | ||
133 | return PTR_ERR(trng->mem); | ||
134 | |||
135 | pm_runtime_enable(&pdev->dev); | ||
136 | ret = pm_runtime_get_sync(&pdev->dev); | ||
137 | if (ret < 0) { | ||
138 | dev_err(&pdev->dev, "Could not get runtime PM.\n"); | ||
139 | goto err_pm_get; | ||
140 | } | ||
141 | |||
142 | trng->clk = devm_clk_get(&pdev->dev, "secss"); | ||
143 | if (IS_ERR(trng->clk)) { | ||
144 | ret = PTR_ERR(trng->clk); | ||
145 | dev_err(&pdev->dev, "Could not get clock.\n"); | ||
146 | goto err_clock; | ||
147 | } | ||
148 | |||
149 | ret = clk_prepare_enable(trng->clk); | ||
150 | if (ret) { | ||
151 | dev_err(&pdev->dev, "Could not enable the clk.\n"); | ||
152 | goto err_clock; | ||
153 | } | ||
154 | |||
155 | ret = hwrng_register(&trng->rng); | ||
156 | if (ret) { | ||
157 | dev_err(&pdev->dev, "Could not register hwrng device.\n"); | ||
158 | goto err_register; | ||
159 | } | ||
160 | |||
161 | dev_info(&pdev->dev, "Exynos True Random Number Generator.\n"); | ||
162 | |||
163 | return 0; | ||
164 | |||
165 | err_register: | ||
166 | clk_disable_unprepare(trng->clk); | ||
167 | |||
168 | err_clock: | ||
169 | pm_runtime_put_sync(&pdev->dev); | ||
170 | |||
171 | err_pm_get: | ||
172 | pm_runtime_disable(&pdev->dev); | ||
173 | |||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | static int exynos_trng_remove(struct platform_device *pdev) | ||
178 | { | ||
179 | struct exynos_trng_dev *trng = platform_get_drvdata(pdev); | ||
180 | |||
181 | hwrng_unregister(&trng->rng); | ||
182 | clk_disable_unprepare(trng->clk); | ||
183 | |||
184 | pm_runtime_put_sync(&pdev->dev); | ||
185 | pm_runtime_disable(&pdev->dev); | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static int __maybe_unused exynos_trng_suspend(struct device *dev) | ||
191 | { | ||
192 | pm_runtime_put_sync(dev); | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int __maybe_unused exynos_trng_resume(struct device *dev) | ||
198 | { | ||
199 | int ret; | ||
200 | |||
201 | ret = pm_runtime_get_sync(dev); | ||
202 | if (ret < 0) { | ||
203 | dev_err(dev, "Could not get runtime PM.\n"); | ||
204 | pm_runtime_put_noidle(dev); | ||
205 | return ret; | ||
206 | } | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend, | ||
212 | exynos_trng_resume); | ||
213 | |||
214 | static const struct of_device_id exynos_trng_dt_match[] = { | ||
215 | { | ||
216 | .compatible = "samsung,exynos5250-trng", | ||
217 | }, | ||
218 | { }, | ||
219 | }; | ||
220 | MODULE_DEVICE_TABLE(of, exynos_trng_dt_match); | ||
221 | |||
222 | static struct platform_driver exynos_trng_driver = { | ||
223 | .driver = { | ||
224 | .name = "exynos-trng", | ||
225 | .pm = &exynos_trng_pm_ops, | ||
226 | .of_match_table = exynos_trng_dt_match, | ||
227 | }, | ||
228 | .probe = exynos_trng_probe, | ||
229 | .remove = exynos_trng_remove, | ||
230 | }; | ||
231 | |||
232 | module_platform_driver(exynos_trng_driver); | ||
233 | MODULE_AUTHOR("Łukasz Stelmach"); | ||
234 | MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips"); | ||
235 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index 88db42d30760..eca87249bcff 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c | |||
@@ -282,8 +282,7 @@ static int __exit imx_rngc_remove(struct platform_device *pdev) | |||
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
285 | #ifdef CONFIG_PM | 285 | static int __maybe_unused imx_rngc_suspend(struct device *dev) |
286 | static int imx_rngc_suspend(struct device *dev) | ||
287 | { | 286 | { |
288 | struct imx_rngc *rngc = dev_get_drvdata(dev); | 287 | struct imx_rngc *rngc = dev_get_drvdata(dev); |
289 | 288 | ||
@@ -292,7 +291,7 @@ static int imx_rngc_suspend(struct device *dev) | |||
292 | return 0; | 291 | return 0; |
293 | } | 292 | } |
294 | 293 | ||
295 | static int imx_rngc_resume(struct device *dev) | 294 | static int __maybe_unused imx_rngc_resume(struct device *dev) |
296 | { | 295 | { |
297 | struct imx_rngc *rngc = dev_get_drvdata(dev); | 296 | struct imx_rngc *rngc = dev_get_drvdata(dev); |
298 | 297 | ||
@@ -301,11 +300,7 @@ static int imx_rngc_resume(struct device *dev) | |||
301 | return 0; | 300 | return 0; |
302 | } | 301 | } |
303 | 302 | ||
304 | static const struct dev_pm_ops imx_rngc_pm_ops = { | 303 | SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume); |
305 | .suspend = imx_rngc_suspend, | ||
306 | .resume = imx_rngc_resume, | ||
307 | }; | ||
308 | #endif | ||
309 | 304 | ||
310 | static const struct of_device_id imx_rngc_dt_ids[] = { | 305 | static const struct of_device_id imx_rngc_dt_ids[] = { |
311 | { .compatible = "fsl,imx25-rngb", .data = NULL, }, | 306 | { .compatible = "fsl,imx25-rngb", .data = NULL, }, |
@@ -316,9 +311,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids); | |||
316 | static struct platform_driver imx_rngc_driver = { | 311 | static struct platform_driver imx_rngc_driver = { |
317 | .driver = { | 312 | .driver = { |
318 | .name = "imx_rngc", | 313 | .name = "imx_rngc", |
319 | #ifdef CONFIG_PM | ||
320 | .pm = &imx_rngc_pm_ops, | 314 | .pm = &imx_rngc_pm_ops, |
321 | #endif | ||
322 | .of_match_table = imx_rngc_dt_ids, | 315 | .of_match_table = imx_rngc_dt_ids, |
323 | }, | 316 | }, |
324 | .remove = __exit_p(imx_rngc_remove), | 317 | .remove = __exit_p(imx_rngc_remove), |
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index 8da7bcf54105..7f99cd52b40e 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c | |||
@@ -135,6 +135,7 @@ static int mtk_rng_probe(struct platform_device *pdev) | |||
135 | #endif | 135 | #endif |
136 | priv->rng.read = mtk_rng_read; | 136 | priv->rng.read = mtk_rng_read; |
137 | priv->rng.priv = (unsigned long)&pdev->dev; | 137 | priv->rng.priv = (unsigned long)&pdev->dev; |
138 | priv->rng.quality = 900; | ||
138 | 139 | ||
139 | priv->clk = devm_clk_get(&pdev->dev, "rng"); | 140 | priv->clk = devm_clk_get(&pdev->dev, "rng"); |
140 | if (IS_ERR(priv->clk)) { | 141 | if (IS_ERR(priv->clk)) { |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 64b59562c872..80f2c326db47 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -431,9 +431,9 @@ static int crng_init = 0; | |||
431 | static int crng_init_cnt = 0; | 431 | static int crng_init_cnt = 0; |
432 | #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) | 432 | #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) |
433 | static void _extract_crng(struct crng_state *crng, | 433 | static void _extract_crng(struct crng_state *crng, |
434 | __u8 out[CHACHA20_BLOCK_SIZE]); | 434 | __u32 out[CHACHA20_BLOCK_WORDS]); |
435 | static void _crng_backtrack_protect(struct crng_state *crng, | 435 | static void _crng_backtrack_protect(struct crng_state *crng, |
436 | __u8 tmp[CHACHA20_BLOCK_SIZE], int used); | 436 | __u32 tmp[CHACHA20_BLOCK_WORDS], int used); |
437 | static void process_random_ready_list(void); | 437 | static void process_random_ready_list(void); |
438 | static void _get_random_bytes(void *buf, int nbytes); | 438 | static void _get_random_bytes(void *buf, int nbytes); |
439 | 439 | ||
@@ -817,7 +817,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
817 | unsigned long flags; | 817 | unsigned long flags; |
818 | int i, num; | 818 | int i, num; |
819 | union { | 819 | union { |
820 | __u8 block[CHACHA20_BLOCK_SIZE]; | 820 | __u32 block[CHACHA20_BLOCK_WORDS]; |
821 | __u32 key[8]; | 821 | __u32 key[8]; |
822 | } buf; | 822 | } buf; |
823 | 823 | ||
@@ -851,7 +851,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
851 | } | 851 | } |
852 | 852 | ||
853 | static void _extract_crng(struct crng_state *crng, | 853 | static void _extract_crng(struct crng_state *crng, |
854 | __u8 out[CHACHA20_BLOCK_SIZE]) | 854 | __u32 out[CHACHA20_BLOCK_WORDS]) |
855 | { | 855 | { |
856 | unsigned long v, flags; | 856 | unsigned long v, flags; |
857 | 857 | ||
@@ -867,7 +867,7 @@ static void _extract_crng(struct crng_state *crng, | |||
867 | spin_unlock_irqrestore(&crng->lock, flags); | 867 | spin_unlock_irqrestore(&crng->lock, flags); |
868 | } | 868 | } |
869 | 869 | ||
870 | static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE]) | 870 | static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS]) |
871 | { | 871 | { |
872 | struct crng_state *crng = NULL; | 872 | struct crng_state *crng = NULL; |
873 | 873 | ||
@@ -885,7 +885,7 @@ static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE]) | |||
885 | * enough) to mutate the CRNG key to provide backtracking protection. | 885 | * enough) to mutate the CRNG key to provide backtracking protection. |
886 | */ | 886 | */ |
887 | static void _crng_backtrack_protect(struct crng_state *crng, | 887 | static void _crng_backtrack_protect(struct crng_state *crng, |
888 | __u8 tmp[CHACHA20_BLOCK_SIZE], int used) | 888 | __u32 tmp[CHACHA20_BLOCK_WORDS], int used) |
889 | { | 889 | { |
890 | unsigned long flags; | 890 | unsigned long flags; |
891 | __u32 *s, *d; | 891 | __u32 *s, *d; |
@@ -897,14 +897,14 @@ static void _crng_backtrack_protect(struct crng_state *crng, | |||
897 | used = 0; | 897 | used = 0; |
898 | } | 898 | } |
899 | spin_lock_irqsave(&crng->lock, flags); | 899 | spin_lock_irqsave(&crng->lock, flags); |
900 | s = (__u32 *) &tmp[used]; | 900 | s = &tmp[used / sizeof(__u32)]; |
901 | d = &crng->state[4]; | 901 | d = &crng->state[4]; |
902 | for (i=0; i < 8; i++) | 902 | for (i=0; i < 8; i++) |
903 | *d++ ^= *s++; | 903 | *d++ ^= *s++; |
904 | spin_unlock_irqrestore(&crng->lock, flags); | 904 | spin_unlock_irqrestore(&crng->lock, flags); |
905 | } | 905 | } |
906 | 906 | ||
907 | static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used) | 907 | static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used) |
908 | { | 908 | { |
909 | struct crng_state *crng = NULL; | 909 | struct crng_state *crng = NULL; |
910 | 910 | ||
@@ -920,7 +920,7 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used) | |||
920 | static ssize_t extract_crng_user(void __user *buf, size_t nbytes) | 920 | static ssize_t extract_crng_user(void __user *buf, size_t nbytes) |
921 | { | 921 | { |
922 | ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE; | 922 | ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE; |
923 | __u8 tmp[CHACHA20_BLOCK_SIZE]; | 923 | __u32 tmp[CHACHA20_BLOCK_WORDS]; |
924 | int large_request = (nbytes > 256); | 924 | int large_request = (nbytes > 256); |
925 | 925 | ||
926 | while (nbytes) { | 926 | while (nbytes) { |
@@ -1507,7 +1507,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, | |||
1507 | */ | 1507 | */ |
1508 | static void _get_random_bytes(void *buf, int nbytes) | 1508 | static void _get_random_bytes(void *buf, int nbytes) |
1509 | { | 1509 | { |
1510 | __u8 tmp[CHACHA20_BLOCK_SIZE]; | 1510 | __u32 tmp[CHACHA20_BLOCK_WORDS]; |
1511 | 1511 | ||
1512 | trace_get_random_bytes(nbytes, _RET_IP_); | 1512 | trace_get_random_bytes(nbytes, _RET_IP_); |
1513 | 1513 | ||
@@ -2114,7 +2114,7 @@ u64 get_random_u64(void) | |||
2114 | if (use_lock) | 2114 | if (use_lock) |
2115 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | 2115 | read_lock_irqsave(&batched_entropy_reset_lock, flags); |
2116 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { | 2116 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { |
2117 | extract_crng((u8 *)batch->entropy_u64); | 2117 | extract_crng((__u32 *)batch->entropy_u64); |
2118 | batch->position = 0; | 2118 | batch->position = 0; |
2119 | } | 2119 | } |
2120 | ret = batch->entropy_u64[batch->position++]; | 2120 | ret = batch->entropy_u64[batch->position++]; |
@@ -2144,7 +2144,7 @@ u32 get_random_u32(void) | |||
2144 | if (use_lock) | 2144 | if (use_lock) |
2145 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | 2145 | read_lock_irqsave(&batched_entropy_reset_lock, flags); |
2146 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { | 2146 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { |
2147 | extract_crng((u8 *)batch->entropy_u32); | 2147 | extract_crng(batch->entropy_u32); |
2148 | batch->position = 0; | 2148 | batch->position = 0; |
2149 | } | 2149 | } |
2150 | ret = batch->entropy_u32[batch->position++]; | 2150 | ret = batch->entropy_u32[batch->position++]; |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 47ec920d5b71..4b741b83e23f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -723,7 +723,6 @@ config CRYPTO_DEV_ARTPEC6 | |||
723 | select CRYPTO_HASH | 723 | select CRYPTO_HASH |
724 | select CRYPTO_SHA1 | 724 | select CRYPTO_SHA1 |
725 | select CRYPTO_SHA256 | 725 | select CRYPTO_SHA256 |
726 | select CRYPTO_SHA384 | ||
727 | select CRYPTO_SHA512 | 726 | select CRYPTO_SHA512 |
728 | help | 727 | help |
729 | Enables the driver for the on-chip crypto accelerator | 728 | Enables the driver for the on-chip crypto accelerator |
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index eeaf27859d80..ea83d0bff0e9 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c | |||
@@ -256,10 +256,6 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req, | |||
256 | if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3)) | 256 | if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3)) |
257 | return true; | 257 | return true; |
258 | 258 | ||
259 | /* CCM - fix CBC MAC mismatch in special case */ | ||
260 | if (is_ccm && decrypt && !req->assoclen) | ||
261 | return true; | ||
262 | |||
263 | return false; | 259 | return false; |
264 | } | 260 | } |
265 | 261 | ||
@@ -330,7 +326,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key, | |||
330 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; | 326 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; |
331 | sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2); | 327 | sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2); |
332 | 328 | ||
333 | set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, | 329 | set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, |
334 | SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, | 330 | SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, |
335 | SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC, | 331 | SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC, |
336 | SA_CIPHER_ALG_AES, | 332 | SA_CIPHER_ALG_AES, |
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index c44954e274bc..76f459ad2821 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -128,7 +128,14 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev) | |||
128 | writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); | 128 | writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); |
129 | writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); | 129 | writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); |
130 | writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG); | 130 | writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG); |
131 | writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN); | 131 | if (dev->is_revb) { |
132 | writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10, | ||
133 | dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT); | ||
134 | writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT, | ||
135 | dev->ce_base + CRYPTO4XX_INT_EN); | ||
136 | } else { | ||
137 | writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN); | ||
138 | } | ||
132 | } | 139 | } |
133 | 140 | ||
134 | int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) | 141 | int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) |
@@ -275,14 +282,12 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) | |||
275 | */ | 282 | */ |
276 | static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) | 283 | static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) |
277 | { | 284 | { |
278 | dev->gdr = dma_alloc_coherent(dev->core_dev->device, | 285 | dev->gdr = dma_zalloc_coherent(dev->core_dev->device, |
279 | sizeof(struct ce_gd) * PPC4XX_NUM_GD, | 286 | sizeof(struct ce_gd) * PPC4XX_NUM_GD, |
280 | &dev->gdr_pa, GFP_ATOMIC); | 287 | &dev->gdr_pa, GFP_ATOMIC); |
281 | if (!dev->gdr) | 288 | if (!dev->gdr) |
282 | return -ENOMEM; | 289 | return -ENOMEM; |
283 | 290 | ||
284 | memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD); | ||
285 | |||
286 | return 0; | 291 | return 0; |
287 | } | 292 | } |
288 | 293 | ||
@@ -570,15 +575,14 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev, | |||
570 | struct pd_uinfo *pd_uinfo, | 575 | struct pd_uinfo *pd_uinfo, |
571 | struct ce_pd *pd) | 576 | struct ce_pd *pd) |
572 | { | 577 | { |
573 | struct aead_request *aead_req; | 578 | struct aead_request *aead_req = container_of(pd_uinfo->async_req, |
574 | struct crypto4xx_ctx *ctx; | 579 | struct aead_request, base); |
575 | struct scatterlist *dst = pd_uinfo->dest_va; | 580 | struct scatterlist *dst = pd_uinfo->dest_va; |
581 | size_t cp_len = crypto_aead_authsize( | ||
582 | crypto_aead_reqtfm(aead_req)); | ||
583 | u32 icv[cp_len]; | ||
576 | int err = 0; | 584 | int err = 0; |
577 | 585 | ||
578 | aead_req = container_of(pd_uinfo->async_req, struct aead_request, | ||
579 | base); | ||
580 | ctx = crypto_tfm_ctx(aead_req->base.tfm); | ||
581 | |||
582 | if (pd_uinfo->using_sd) { | 586 | if (pd_uinfo->using_sd) { |
583 | crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, | 587 | crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, |
584 | pd->pd_ctl_len.bf.pkt_len, | 588 | pd->pd_ctl_len.bf.pkt_len, |
@@ -590,38 +594,39 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev, | |||
590 | 594 | ||
591 | if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) { | 595 | if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) { |
592 | /* append icv at the end */ | 596 | /* append icv at the end */ |
593 | size_t cp_len = crypto_aead_authsize( | ||
594 | crypto_aead_reqtfm(aead_req)); | ||
595 | u32 icv[cp_len]; | ||
596 | |||
597 | crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest, | 597 | crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest, |
598 | cp_len); | 598 | cp_len); |
599 | 599 | ||
600 | scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen, | 600 | scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen, |
601 | cp_len, 1); | 601 | cp_len, 1); |
602 | } else { | ||
603 | /* check icv at the end */ | ||
604 | scatterwalk_map_and_copy(icv, aead_req->src, | ||
605 | aead_req->assoclen + aead_req->cryptlen - | ||
606 | cp_len, cp_len, 0); | ||
607 | |||
608 | crypto4xx_memcpy_from_le32(icv, icv, cp_len); | ||
609 | |||
610 | if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len)) | ||
611 | err = -EBADMSG; | ||
602 | } | 612 | } |
603 | 613 | ||
604 | crypto4xx_ret_sg_desc(dev, pd_uinfo); | 614 | crypto4xx_ret_sg_desc(dev, pd_uinfo); |
605 | 615 | ||
606 | if (pd->pd_ctl.bf.status & 0xff) { | 616 | if (pd->pd_ctl.bf.status & 0xff) { |
607 | if (pd->pd_ctl.bf.status & 0x1) { | 617 | if (!__ratelimit(&dev->aead_ratelimit)) { |
608 | /* authentication error */ | 618 | if (pd->pd_ctl.bf.status & 2) |
609 | err = -EBADMSG; | 619 | pr_err("pad fail error\n"); |
610 | } else { | 620 | if (pd->pd_ctl.bf.status & 4) |
611 | if (!__ratelimit(&dev->aead_ratelimit)) { | 621 | pr_err("seqnum fail\n"); |
612 | if (pd->pd_ctl.bf.status & 2) | 622 | if (pd->pd_ctl.bf.status & 8) |
613 | pr_err("pad fail error\n"); | 623 | pr_err("error _notify\n"); |
614 | if (pd->pd_ctl.bf.status & 4) | 624 | pr_err("aead return err status = 0x%02x\n", |
615 | pr_err("seqnum fail\n"); | 625 | pd->pd_ctl.bf.status & 0xff); |
616 | if (pd->pd_ctl.bf.status & 8) | 626 | pr_err("pd pad_ctl = 0x%08x\n", |
617 | pr_err("error _notify\n"); | 627 | pd->pd_ctl.bf.pd_pad_ctl); |
618 | pr_err("aead return err status = 0x%02x\n", | ||
619 | pd->pd_ctl.bf.status & 0xff); | ||
620 | pr_err("pd pad_ctl = 0x%08x\n", | ||
621 | pd->pd_ctl.bf.pd_pad_ctl); | ||
622 | } | ||
623 | err = -EINVAL; | ||
624 | } | 628 | } |
629 | err = -EINVAL; | ||
625 | } | 630 | } |
626 | 631 | ||
627 | if (pd_uinfo->state & PD_ENTRY_BUSY) | 632 | if (pd_uinfo->state & PD_ENTRY_BUSY) |
@@ -1070,21 +1075,29 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data) | |||
1070 | /** | 1075 | /** |
1071 | * Top Half of isr. | 1076 | * Top Half of isr. |
1072 | */ | 1077 | */ |
1073 | static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) | 1078 | static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data, |
1079 | u32 clr_val) | ||
1074 | { | 1080 | { |
1075 | struct device *dev = (struct device *)data; | 1081 | struct device *dev = (struct device *)data; |
1076 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); | 1082 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); |
1077 | 1083 | ||
1078 | if (!core_dev->dev->ce_base) | 1084 | writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR); |
1079 | return 0; | ||
1080 | |||
1081 | writel(PPC4XX_INTERRUPT_CLR, | ||
1082 | core_dev->dev->ce_base + CRYPTO4XX_INT_CLR); | ||
1083 | tasklet_schedule(&core_dev->tasklet); | 1085 | tasklet_schedule(&core_dev->tasklet); |
1084 | 1086 | ||
1085 | return IRQ_HANDLED; | 1087 | return IRQ_HANDLED; |
1086 | } | 1088 | } |
1087 | 1089 | ||
1090 | static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) | ||
1091 | { | ||
1092 | return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR); | ||
1093 | } | ||
1094 | |||
1095 | static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data) | ||
1096 | { | ||
1097 | return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR | | ||
1098 | PPC4XX_TMO_ERR_INT); | ||
1099 | } | ||
1100 | |||
1088 | /** | 1101 | /** |
1089 | * Supported Crypto Algorithms | 1102 | * Supported Crypto Algorithms |
1090 | */ | 1103 | */ |
@@ -1266,6 +1279,8 @@ static int crypto4xx_probe(struct platform_device *ofdev) | |||
1266 | struct resource res; | 1279 | struct resource res; |
1267 | struct device *dev = &ofdev->dev; | 1280 | struct device *dev = &ofdev->dev; |
1268 | struct crypto4xx_core_device *core_dev; | 1281 | struct crypto4xx_core_device *core_dev; |
1282 | u32 pvr; | ||
1283 | bool is_revb = true; | ||
1269 | 1284 | ||
1270 | rc = of_address_to_resource(ofdev->dev.of_node, 0, &res); | 1285 | rc = of_address_to_resource(ofdev->dev.of_node, 0, &res); |
1271 | if (rc) | 1286 | if (rc) |
@@ -1282,6 +1297,7 @@ static int crypto4xx_probe(struct platform_device *ofdev) | |||
1282 | mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); | 1297 | mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); |
1283 | mtdcri(SDR0, PPC405EX_SDR0_SRST, | 1298 | mtdcri(SDR0, PPC405EX_SDR0_SRST, |
1284 | mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); | 1299 | mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); |
1300 | is_revb = false; | ||
1285 | } else if (of_find_compatible_node(NULL, NULL, | 1301 | } else if (of_find_compatible_node(NULL, NULL, |
1286 | "amcc,ppc460sx-crypto")) { | 1302 | "amcc,ppc460sx-crypto")) { |
1287 | mtdcri(SDR0, PPC460SX_SDR0_SRST, | 1303 | mtdcri(SDR0, PPC460SX_SDR0_SRST, |
@@ -1304,7 +1320,22 @@ static int crypto4xx_probe(struct platform_device *ofdev) | |||
1304 | if (!core_dev->dev) | 1320 | if (!core_dev->dev) |
1305 | goto err_alloc_dev; | 1321 | goto err_alloc_dev; |
1306 | 1322 | ||
1323 | /* | ||
1324 | * Older version of 460EX/GT have a hardware bug. | ||
1325 | * Hence they do not support H/W based security intr coalescing | ||
1326 | */ | ||
1327 | pvr = mfspr(SPRN_PVR); | ||
1328 | if (is_revb && ((pvr >> 4) == 0x130218A)) { | ||
1329 | u32 min = PVR_MIN(pvr); | ||
1330 | |||
1331 | if (min < 4) { | ||
1332 | dev_info(dev, "RevA detected - disable interrupt coalescing\n"); | ||
1333 | is_revb = false; | ||
1334 | } | ||
1335 | } | ||
1336 | |||
1307 | core_dev->dev->core_dev = core_dev; | 1337 | core_dev->dev->core_dev = core_dev; |
1338 | core_dev->dev->is_revb = is_revb; | ||
1308 | core_dev->device = dev; | 1339 | core_dev->device = dev; |
1309 | spin_lock_init(&core_dev->lock); | 1340 | spin_lock_init(&core_dev->lock); |
1310 | INIT_LIST_HEAD(&core_dev->dev->alg_list); | 1341 | INIT_LIST_HEAD(&core_dev->dev->alg_list); |
@@ -1325,13 +1356,6 @@ static int crypto4xx_probe(struct platform_device *ofdev) | |||
1325 | tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb, | 1356 | tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb, |
1326 | (unsigned long) dev); | 1357 | (unsigned long) dev); |
1327 | 1358 | ||
1328 | /* Register for Crypto isr, Crypto Engine IRQ */ | ||
1329 | core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); | ||
1330 | rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, | ||
1331 | core_dev->dev->name, dev); | ||
1332 | if (rc) | ||
1333 | goto err_request_irq; | ||
1334 | |||
1335 | core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0); | 1359 | core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0); |
1336 | if (!core_dev->dev->ce_base) { | 1360 | if (!core_dev->dev->ce_base) { |
1337 | dev_err(dev, "failed to of_iomap\n"); | 1361 | dev_err(dev, "failed to of_iomap\n"); |
@@ -1339,6 +1363,15 @@ static int crypto4xx_probe(struct platform_device *ofdev) | |||
1339 | goto err_iomap; | 1363 | goto err_iomap; |
1340 | } | 1364 | } |
1341 | 1365 | ||
1366 | /* Register for Crypto isr, Crypto Engine IRQ */ | ||
1367 | core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); | ||
1368 | rc = request_irq(core_dev->irq, is_revb ? | ||
1369 | crypto4xx_ce_interrupt_handler_revb : | ||
1370 | crypto4xx_ce_interrupt_handler, 0, | ||
1371 | KBUILD_MODNAME, dev); | ||
1372 | if (rc) | ||
1373 | goto err_request_irq; | ||
1374 | |||
1342 | /* need to setup pdr, rdr, gdr and sdr before this */ | 1375 | /* need to setup pdr, rdr, gdr and sdr before this */ |
1343 | crypto4xx_hw_init(core_dev->dev); | 1376 | crypto4xx_hw_init(core_dev->dev); |
1344 | 1377 | ||
@@ -1352,11 +1385,11 @@ static int crypto4xx_probe(struct platform_device *ofdev) | |||
1352 | return 0; | 1385 | return 0; |
1353 | 1386 | ||
1354 | err_start_dev: | 1387 | err_start_dev: |
1355 | iounmap(core_dev->dev->ce_base); | ||
1356 | err_iomap: | ||
1357 | free_irq(core_dev->irq, dev); | 1388 | free_irq(core_dev->irq, dev); |
1358 | err_request_irq: | 1389 | err_request_irq: |
1359 | irq_dispose_mapping(core_dev->irq); | 1390 | irq_dispose_mapping(core_dev->irq); |
1391 | iounmap(core_dev->dev->ce_base); | ||
1392 | err_iomap: | ||
1360 | tasklet_kill(&core_dev->tasklet); | 1393 | tasklet_kill(&core_dev->tasklet); |
1361 | err_build_sdr: | 1394 | err_build_sdr: |
1362 | crypto4xx_destroy_sdr(core_dev->dev); | 1395 | crypto4xx_destroy_sdr(core_dev->dev); |
@@ -1397,7 +1430,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match); | |||
1397 | 1430 | ||
1398 | static struct platform_driver crypto4xx_driver = { | 1431 | static struct platform_driver crypto4xx_driver = { |
1399 | .driver = { | 1432 | .driver = { |
1400 | .name = MODULE_NAME, | 1433 | .name = KBUILD_MODNAME, |
1401 | .of_match_table = crypto4xx_match, | 1434 | .of_match_table = crypto4xx_match, |
1402 | }, | 1435 | }, |
1403 | .probe = crypto4xx_probe, | 1436 | .probe = crypto4xx_probe, |
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index 8ac3bd37203b..23b726da6534 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h | |||
@@ -28,8 +28,6 @@ | |||
28 | #include "crypto4xx_reg_def.h" | 28 | #include "crypto4xx_reg_def.h" |
29 | #include "crypto4xx_sa.h" | 29 | #include "crypto4xx_sa.h" |
30 | 30 | ||
31 | #define MODULE_NAME "crypto4xx" | ||
32 | |||
33 | #define PPC460SX_SDR0_SRST 0x201 | 31 | #define PPC460SX_SDR0_SRST 0x201 |
34 | #define PPC405EX_SDR0_SRST 0x200 | 32 | #define PPC405EX_SDR0_SRST 0x200 |
35 | #define PPC460EX_SDR0_SRST 0x201 | 33 | #define PPC460EX_SDR0_SRST 0x201 |
@@ -82,7 +80,6 @@ struct pd_uinfo { | |||
82 | 80 | ||
83 | struct crypto4xx_device { | 81 | struct crypto4xx_device { |
84 | struct crypto4xx_core_device *core_dev; | 82 | struct crypto4xx_core_device *core_dev; |
85 | char *name; | ||
86 | void __iomem *ce_base; | 83 | void __iomem *ce_base; |
87 | void __iomem *trng_base; | 84 | void __iomem *trng_base; |
88 | 85 | ||
@@ -109,6 +106,7 @@ struct crypto4xx_device { | |||
109 | struct list_head alg_list; /* List of algorithm supported | 106 | struct list_head alg_list; /* List of algorithm supported |
110 | by this device */ | 107 | by this device */ |
111 | struct ratelimit_state aead_ratelimit; | 108 | struct ratelimit_state aead_ratelimit; |
109 | bool is_revb; | ||
112 | }; | 110 | }; |
113 | 111 | ||
114 | struct crypto4xx_core_device { | 112 | struct crypto4xx_core_device { |
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h index 0a22ec5d1a96..472331787e04 100644 --- a/drivers/crypto/amcc/crypto4xx_reg_def.h +++ b/drivers/crypto/amcc/crypto4xx_reg_def.h | |||
@@ -121,13 +121,15 @@ | |||
121 | #define PPC4XX_PD_SIZE 6 | 121 | #define PPC4XX_PD_SIZE 6 |
122 | #define PPC4XX_CTX_DONE_INT 0x2000 | 122 | #define PPC4XX_CTX_DONE_INT 0x2000 |
123 | #define PPC4XX_PD_DONE_INT 0x8000 | 123 | #define PPC4XX_PD_DONE_INT 0x8000 |
124 | #define PPC4XX_TMO_ERR_INT 0x40000 | ||
124 | #define PPC4XX_BYTE_ORDER 0x22222 | 125 | #define PPC4XX_BYTE_ORDER 0x22222 |
125 | #define PPC4XX_INTERRUPT_CLR 0x3ffff | 126 | #define PPC4XX_INTERRUPT_CLR 0x3ffff |
126 | #define PPC4XX_PRNG_CTRL_AUTO_EN 0x3 | 127 | #define PPC4XX_PRNG_CTRL_AUTO_EN 0x3 |
127 | #define PPC4XX_DC_3DES_EN 1 | 128 | #define PPC4XX_DC_3DES_EN 1 |
128 | #define PPC4XX_TRNG_EN 0x00020000 | 129 | #define PPC4XX_TRNG_EN 0x00020000 |
129 | #define PPC4XX_INT_DESCR_CNT 4 | 130 | #define PPC4XX_INT_DESCR_CNT 7 |
130 | #define PPC4XX_INT_TIMEOUT_CNT 0 | 131 | #define PPC4XX_INT_TIMEOUT_CNT 0 |
132 | #define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF | ||
131 | #define PPC4XX_INT_CFG 1 | 133 | #define PPC4XX_INT_CFG 1 |
132 | /** | 134 | /** |
133 | * all follow define are ad hoc | 135 | * all follow define are ad hoc |
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c index 677ca17fd223..5e63742b0d22 100644 --- a/drivers/crypto/amcc/crypto4xx_trng.c +++ b/drivers/crypto/amcc/crypto4xx_trng.c | |||
@@ -92,7 +92,7 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev) | |||
92 | if (!rng) | 92 | if (!rng) |
93 | goto err_out; | 93 | goto err_out; |
94 | 94 | ||
95 | rng->name = MODULE_NAME; | 95 | rng->name = KBUILD_MODNAME; |
96 | rng->data_present = ppc4xx_trng_data_present; | 96 | rng->data_present = ppc4xx_trng_data_present; |
97 | rng->data_read = ppc4xx_trng_data_read; | 97 | rng->data_read = ppc4xx_trng_data_read; |
98 | rng->priv = (unsigned long) dev; | 98 | rng->priv = (unsigned long) dev; |
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 456278440863..0fb8bbf41a8d 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | 23 | ||
24 | #include <crypto/aes.h> | 24 | #include <crypto/aes.h> |
25 | #include <crypto/gcm.h> | ||
25 | #include <crypto/internal/aead.h> | 26 | #include <crypto/internal/aead.h> |
26 | #include <crypto/internal/hash.h> | 27 | #include <crypto/internal/hash.h> |
27 | #include <crypto/internal/skcipher.h> | 28 | #include <crypto/internal/skcipher.h> |
@@ -1934,7 +1935,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) | |||
1934 | 1935 | ||
1935 | memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher)); | 1936 | memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher)); |
1936 | // The HW omits the initial increment of the counter field. | 1937 | // The HW omits the initial increment of the counter field. |
1937 | crypto_inc(req_ctx->hw_ctx.J0+12, 4); | 1938 | memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4); |
1938 | 1939 | ||
1939 | ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx, | 1940 | ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx, |
1940 | sizeof(struct artpec6_crypto_aead_hw_ctx), false, false); | 1941 | sizeof(struct artpec6_crypto_aead_hw_ctx), false, false); |
@@ -2956,7 +2957,7 @@ static struct aead_alg aead_algos[] = { | |||
2956 | .setkey = artpec6_crypto_aead_set_key, | 2957 | .setkey = artpec6_crypto_aead_set_key, |
2957 | .encrypt = artpec6_crypto_aead_encrypt, | 2958 | .encrypt = artpec6_crypto_aead_encrypt, |
2958 | .decrypt = artpec6_crypto_aead_decrypt, | 2959 | .decrypt = artpec6_crypto_aead_decrypt, |
2959 | .ivsize = AES_BLOCK_SIZE, | 2960 | .ivsize = GCM_AES_IV_SIZE, |
2960 | .maxauthsize = AES_BLOCK_SIZE, | 2961 | .maxauthsize = AES_BLOCK_SIZE, |
2961 | 2962 | ||
2962 | .base = { | 2963 | .base = { |
@@ -3041,9 +3042,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev) | |||
3041 | variant = (enum artpec6_crypto_variant)match->data; | 3042 | variant = (enum artpec6_crypto_variant)match->data; |
3042 | 3043 | ||
3043 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 3044 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
3044 | if (!res) | ||
3045 | return -ENODEV; | ||
3046 | |||
3047 | base = devm_ioremap_resource(&pdev->dev, res); | 3045 | base = devm_ioremap_resource(&pdev->dev, res); |
3048 | if (IS_ERR(base)) | 3046 | if (IS_ERR(base)) |
3049 | return PTR_ERR(base); | 3047 | return PTR_ERR(base); |
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index ce70b44d0fb6..2b75f95bbe1b 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <crypto/authenc.h> | 42 | #include <crypto/authenc.h> |
43 | #include <crypto/skcipher.h> | 43 | #include <crypto/skcipher.h> |
44 | #include <crypto/hash.h> | 44 | #include <crypto/hash.h> |
45 | #include <crypto/aes.h> | ||
46 | #include <crypto/sha3.h> | 45 | #include <crypto/sha3.h> |
47 | 46 | ||
48 | #include "util.h" | 47 | #include "util.h" |
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index a118b9bed669..bfbf8bf77f03 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c | |||
@@ -494,7 +494,8 @@ static struct ahash_alg algs = { | |||
494 | .cra_driver_name = DRIVER_NAME, | 494 | .cra_driver_name = DRIVER_NAME, |
495 | .cra_priority = 100, | 495 | .cra_priority = 100, |
496 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | 496 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
497 | CRYPTO_ALG_ASYNC, | 497 | CRYPTO_ALG_ASYNC | |
498 | CRYPTO_ALG_OPTIONAL_KEY, | ||
498 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 499 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
499 | .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), | 500 | .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), |
500 | .cra_alignmask = 3, | 501 | .cra_alignmask = 3, |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index baa8dd52472d..2188235be02d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -108,6 +108,7 @@ struct caam_ctx { | |||
108 | dma_addr_t sh_desc_dec_dma; | 108 | dma_addr_t sh_desc_dec_dma; |
109 | dma_addr_t sh_desc_givenc_dma; | 109 | dma_addr_t sh_desc_givenc_dma; |
110 | dma_addr_t key_dma; | 110 | dma_addr_t key_dma; |
111 | enum dma_data_direction dir; | ||
111 | struct device *jrdev; | 112 | struct device *jrdev; |
112 | struct alginfo adata; | 113 | struct alginfo adata; |
113 | struct alginfo cdata; | 114 | struct alginfo cdata; |
@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
118 | { | 119 | { |
119 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 120 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
120 | struct device *jrdev = ctx->jrdev; | 121 | struct device *jrdev = ctx->jrdev; |
122 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); | ||
121 | u32 *desc; | 123 | u32 *desc; |
122 | int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - | 124 | int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - |
123 | ctx->adata.keylen_pad; | 125 | ctx->adata.keylen_pad; |
@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
136 | 138 | ||
137 | /* aead_encrypt shared descriptor */ | 139 | /* aead_encrypt shared descriptor */ |
138 | desc = ctx->sh_desc_enc; | 140 | desc = ctx->sh_desc_enc; |
139 | cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize); | 141 | cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize, |
142 | ctrlpriv->era); | ||
140 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, | 143 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
141 | desc_bytes(desc), DMA_TO_DEVICE); | 144 | desc_bytes(desc), ctx->dir); |
142 | 145 | ||
143 | /* | 146 | /* |
144 | * Job Descriptor and Shared Descriptors | 147 | * Job Descriptor and Shared Descriptors |
@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
154 | 157 | ||
155 | /* aead_decrypt shared descriptor */ | 158 | /* aead_decrypt shared descriptor */ |
156 | desc = ctx->sh_desc_dec; | 159 | desc = ctx->sh_desc_dec; |
157 | cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize); | 160 | cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize, |
161 | ctrlpriv->era); | ||
158 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, | 162 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
159 | desc_bytes(desc), DMA_TO_DEVICE); | 163 | desc_bytes(desc), ctx->dir); |
160 | 164 | ||
161 | return 0; | 165 | return 0; |
162 | } | 166 | } |
@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
168 | unsigned int ivsize = crypto_aead_ivsize(aead); | 172 | unsigned int ivsize = crypto_aead_ivsize(aead); |
169 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 173 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
170 | struct device *jrdev = ctx->jrdev; | 174 | struct device *jrdev = ctx->jrdev; |
175 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); | ||
171 | u32 ctx1_iv_off = 0; | 176 | u32 ctx1_iv_off = 0; |
172 | u32 *desc, *nonce = NULL; | 177 | u32 *desc, *nonce = NULL; |
173 | u32 inl_mask; | 178 | u32 inl_mask; |
@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
234 | desc = ctx->sh_desc_enc; | 239 | desc = ctx->sh_desc_enc; |
235 | cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, | 240 | cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, |
236 | ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, | 241 | ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, |
237 | false); | 242 | false, ctrlpriv->era); |
238 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, | 243 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
239 | desc_bytes(desc), DMA_TO_DEVICE); | 244 | desc_bytes(desc), ctx->dir); |
240 | 245 | ||
241 | skip_enc: | 246 | skip_enc: |
242 | /* | 247 | /* |
@@ -266,9 +271,9 @@ skip_enc: | |||
266 | desc = ctx->sh_desc_dec; | 271 | desc = ctx->sh_desc_dec; |
267 | cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, | 272 | cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, |
268 | ctx->authsize, alg->caam.geniv, is_rfc3686, | 273 | ctx->authsize, alg->caam.geniv, is_rfc3686, |
269 | nonce, ctx1_iv_off, false); | 274 | nonce, ctx1_iv_off, false, ctrlpriv->era); |
270 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, | 275 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
271 | desc_bytes(desc), DMA_TO_DEVICE); | 276 | desc_bytes(desc), ctx->dir); |
272 | 277 | ||
273 | if (!alg->caam.geniv) | 278 | if (!alg->caam.geniv) |
274 | goto skip_givenc; | 279 | goto skip_givenc; |
@@ -300,9 +305,9 @@ skip_enc: | |||
300 | desc = ctx->sh_desc_enc; | 305 | desc = ctx->sh_desc_enc; |
301 | cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, | 306 | cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, |
302 | ctx->authsize, is_rfc3686, nonce, | 307 | ctx->authsize, is_rfc3686, nonce, |
303 | ctx1_iv_off, false); | 308 | ctx1_iv_off, false, ctrlpriv->era); |
304 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, | 309 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
305 | desc_bytes(desc), DMA_TO_DEVICE); | 310 | desc_bytes(desc), ctx->dir); |
306 | 311 | ||
307 | skip_givenc: | 312 | skip_givenc: |
308 | return 0; | 313 | return 0; |
@@ -346,7 +351,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
346 | desc = ctx->sh_desc_enc; | 351 | desc = ctx->sh_desc_enc; |
347 | cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize); | 352 | cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize); |
348 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, | 353 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
349 | desc_bytes(desc), DMA_TO_DEVICE); | 354 | desc_bytes(desc), ctx->dir); |
350 | 355 | ||
351 | /* | 356 | /* |
352 | * Job Descriptor and Shared Descriptors | 357 | * Job Descriptor and Shared Descriptors |
@@ -363,7 +368,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
363 | desc = ctx->sh_desc_dec; | 368 | desc = ctx->sh_desc_dec; |
364 | cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize); | 369 | cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize); |
365 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, | 370 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
366 | desc_bytes(desc), DMA_TO_DEVICE); | 371 | desc_bytes(desc), ctx->dir); |
367 | 372 | ||
368 | return 0; | 373 | return 0; |
369 | } | 374 | } |
@@ -405,7 +410,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
405 | desc = ctx->sh_desc_enc; | 410 | desc = ctx->sh_desc_enc; |
406 | cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize); | 411 | cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize); |
407 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, | 412 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
408 | desc_bytes(desc), DMA_TO_DEVICE); | 413 | desc_bytes(desc), ctx->dir); |
409 | 414 | ||
410 | /* | 415 | /* |
411 | * Job Descriptor and Shared Descriptors | 416 | * Job Descriptor and Shared Descriptors |
@@ -422,7 +427,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
422 | desc = ctx->sh_desc_dec; | 427 | desc = ctx->sh_desc_dec; |
423 | cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize); | 428 | cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize); |
424 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, | 429 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
425 | desc_bytes(desc), DMA_TO_DEVICE); | 430 | desc_bytes(desc), ctx->dir); |
426 | 431 | ||
427 | return 0; | 432 | return 0; |
428 | } | 433 | } |
@@ -465,7 +470,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
465 | desc = ctx->sh_desc_enc; | 470 | desc = ctx->sh_desc_enc; |
466 | cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize); | 471 | cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize); |
467 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, | 472 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
468 | desc_bytes(desc), DMA_TO_DEVICE); | 473 | desc_bytes(desc), ctx->dir); |
469 | 474 | ||
470 | /* | 475 | /* |
471 | * Job Descriptor and Shared Descriptors | 476 | * Job Descriptor and Shared Descriptors |
@@ -482,7 +487,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
482 | desc = ctx->sh_desc_dec; | 487 | desc = ctx->sh_desc_dec; |
483 | cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize); | 488 | cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize); |
484 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, | 489 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
485 | desc_bytes(desc), DMA_TO_DEVICE); | 490 | desc_bytes(desc), ctx->dir); |
486 | 491 | ||
487 | return 0; | 492 | return 0; |
488 | } | 493 | } |
@@ -503,6 +508,7 @@ static int aead_setkey(struct crypto_aead *aead, | |||
503 | { | 508 | { |
504 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 509 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
505 | struct device *jrdev = ctx->jrdev; | 510 | struct device *jrdev = ctx->jrdev; |
511 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); | ||
506 | struct crypto_authenc_keys keys; | 512 | struct crypto_authenc_keys keys; |
507 | int ret = 0; | 513 | int ret = 0; |
508 | 514 | ||
@@ -517,6 +523,27 @@ static int aead_setkey(struct crypto_aead *aead, | |||
517 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 523 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
518 | #endif | 524 | #endif |
519 | 525 | ||
526 | /* | ||
527 | * If DKP is supported, use it in the shared descriptor to generate | ||
528 | * the split key. | ||
529 | */ | ||
530 | if (ctrlpriv->era >= 6) { | ||
531 | ctx->adata.keylen = keys.authkeylen; | ||
532 | ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & | ||
533 | OP_ALG_ALGSEL_MASK); | ||
534 | |||
535 | if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) | ||
536 | goto badkey; | ||
537 | |||
538 | memcpy(ctx->key, keys.authkey, keys.authkeylen); | ||
539 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, | ||
540 | keys.enckeylen); | ||
541 | dma_sync_single_for_device(jrdev, ctx->key_dma, | ||
542 | ctx->adata.keylen_pad + | ||
543 | keys.enckeylen, ctx->dir); | ||
544 | goto skip_split_key; | ||
545 | } | ||
546 | |||
520 | ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, | 547 | ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, |
521 | keys.authkeylen, CAAM_MAX_KEY_SIZE - | 548 | keys.authkeylen, CAAM_MAX_KEY_SIZE - |
522 | keys.enckeylen); | 549 | keys.enckeylen); |
@@ -527,12 +554,14 @@ static int aead_setkey(struct crypto_aead *aead, | |||
527 | /* postpend encryption key to auth split key */ | 554 | /* postpend encryption key to auth split key */ |
528 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); | 555 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
529 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + | 556 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + |
530 | keys.enckeylen, DMA_TO_DEVICE); | 557 | keys.enckeylen, ctx->dir); |
531 | #ifdef DEBUG | 558 | #ifdef DEBUG |
532 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", | 559 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
533 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 560 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
534 | ctx->adata.keylen_pad + keys.enckeylen, 1); | 561 | ctx->adata.keylen_pad + keys.enckeylen, 1); |
535 | #endif | 562 | #endif |
563 | |||
564 | skip_split_key: | ||
536 | ctx->cdata.keylen = keys.enckeylen; | 565 | ctx->cdata.keylen = keys.enckeylen; |
537 | return aead_set_sh_desc(aead); | 566 | return aead_set_sh_desc(aead); |
538 | badkey: | 567 | badkey: |
@@ -552,7 +581,7 @@ static int gcm_setkey(struct crypto_aead *aead, | |||
552 | #endif | 581 | #endif |
553 | 582 | ||
554 | memcpy(ctx->key, key, keylen); | 583 | memcpy(ctx->key, key, keylen); |
555 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); | 584 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); |
556 | ctx->cdata.keylen = keylen; | 585 | ctx->cdata.keylen = keylen; |
557 | 586 | ||
558 | return gcm_set_sh_desc(aead); | 587 | return gcm_set_sh_desc(aead); |
@@ -580,7 +609,7 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
580 | */ | 609 | */ |
581 | ctx->cdata.keylen = keylen - 4; | 610 | ctx->cdata.keylen = keylen - 4; |
582 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, | 611 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, |
583 | DMA_TO_DEVICE); | 612 | ctx->dir); |
584 | return rfc4106_set_sh_desc(aead); | 613 | return rfc4106_set_sh_desc(aead); |
585 | } | 614 | } |
586 | 615 | ||
@@ -606,7 +635,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
606 | */ | 635 | */ |
607 | ctx->cdata.keylen = keylen - 4; | 636 | ctx->cdata.keylen = keylen - 4; |
608 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, | 637 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, |
609 | DMA_TO_DEVICE); | 638 | ctx->dir); |
610 | return rfc4543_set_sh_desc(aead); | 639 | return rfc4543_set_sh_desc(aead); |
611 | } | 640 | } |
612 | 641 | ||
@@ -625,7 +654,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
625 | const bool is_rfc3686 = (ctr_mode && | 654 | const bool is_rfc3686 = (ctr_mode && |
626 | (strstr(alg_name, "rfc3686") != NULL)); | 655 | (strstr(alg_name, "rfc3686") != NULL)); |
627 | 656 | ||
628 | memcpy(ctx->key, key, keylen); | ||
629 | #ifdef DEBUG | 657 | #ifdef DEBUG |
630 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 658 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", |
631 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 659 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
@@ -648,9 +676,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
648 | keylen -= CTR_RFC3686_NONCE_SIZE; | 676 | keylen -= CTR_RFC3686_NONCE_SIZE; |
649 | } | 677 | } |
650 | 678 | ||
651 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); | ||
652 | ctx->cdata.keylen = keylen; | 679 | ctx->cdata.keylen = keylen; |
653 | ctx->cdata.key_virt = ctx->key; | 680 | ctx->cdata.key_virt = key; |
654 | ctx->cdata.key_inline = true; | 681 | ctx->cdata.key_inline = true; |
655 | 682 | ||
656 | /* ablkcipher_encrypt shared descriptor */ | 683 | /* ablkcipher_encrypt shared descriptor */ |
@@ -658,21 +685,21 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
658 | cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, | 685 | cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, |
659 | ctx1_iv_off); | 686 | ctx1_iv_off); |
660 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, | 687 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
661 | desc_bytes(desc), DMA_TO_DEVICE); | 688 | desc_bytes(desc), ctx->dir); |
662 | 689 | ||
663 | /* ablkcipher_decrypt shared descriptor */ | 690 | /* ablkcipher_decrypt shared descriptor */ |
664 | desc = ctx->sh_desc_dec; | 691 | desc = ctx->sh_desc_dec; |
665 | cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, | 692 | cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, |
666 | ctx1_iv_off); | 693 | ctx1_iv_off); |
667 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, | 694 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
668 | desc_bytes(desc), DMA_TO_DEVICE); | 695 | desc_bytes(desc), ctx->dir); |
669 | 696 | ||
670 | /* ablkcipher_givencrypt shared descriptor */ | 697 | /* ablkcipher_givencrypt shared descriptor */ |
671 | desc = ctx->sh_desc_givenc; | 698 | desc = ctx->sh_desc_givenc; |
672 | cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, | 699 | cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, |
673 | ctx1_iv_off); | 700 | ctx1_iv_off); |
674 | dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma, | 701 | dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma, |
675 | desc_bytes(desc), DMA_TO_DEVICE); | 702 | desc_bytes(desc), ctx->dir); |
676 | 703 | ||
677 | return 0; | 704 | return 0; |
678 | } | 705 | } |
@@ -691,23 +718,21 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
691 | return -EINVAL; | 718 | return -EINVAL; |
692 | } | 719 | } |
693 | 720 | ||
694 | memcpy(ctx->key, key, keylen); | ||
695 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); | ||
696 | ctx->cdata.keylen = keylen; | 721 | ctx->cdata.keylen = keylen; |
697 | ctx->cdata.key_virt = ctx->key; | 722 | ctx->cdata.key_virt = key; |
698 | ctx->cdata.key_inline = true; | 723 | ctx->cdata.key_inline = true; |
699 | 724 | ||
700 | /* xts_ablkcipher_encrypt shared descriptor */ | 725 | /* xts_ablkcipher_encrypt shared descriptor */ |
701 | desc = ctx->sh_desc_enc; | 726 | desc = ctx->sh_desc_enc; |
702 | cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); | 727 | cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); |
703 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, | 728 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
704 | desc_bytes(desc), DMA_TO_DEVICE); | 729 | desc_bytes(desc), ctx->dir); |
705 | 730 | ||
706 | /* xts_ablkcipher_decrypt shared descriptor */ | 731 | /* xts_ablkcipher_decrypt shared descriptor */ |
707 | desc = ctx->sh_desc_dec; | 732 | desc = ctx->sh_desc_dec; |
708 | cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); | 733 | cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); |
709 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, | 734 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
710 | desc_bytes(desc), DMA_TO_DEVICE); | 735 | desc_bytes(desc), ctx->dir); |
711 | 736 | ||
712 | return 0; | 737 | return 0; |
713 | } | 738 | } |
@@ -979,9 +1004,6 @@ static void init_aead_job(struct aead_request *req, | |||
979 | append_seq_out_ptr(desc, dst_dma, | 1004 | append_seq_out_ptr(desc, dst_dma, |
980 | req->assoclen + req->cryptlen - authsize, | 1005 | req->assoclen + req->cryptlen - authsize, |
981 | out_options); | 1006 | out_options); |
982 | |||
983 | /* REG3 = assoclen */ | ||
984 | append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); | ||
985 | } | 1007 | } |
986 | 1008 | ||
987 | static void init_gcm_job(struct aead_request *req, | 1009 | static void init_gcm_job(struct aead_request *req, |
@@ -996,6 +1018,7 @@ static void init_gcm_job(struct aead_request *req, | |||
996 | unsigned int last; | 1018 | unsigned int last; |
997 | 1019 | ||
998 | init_aead_job(req, edesc, all_contig, encrypt); | 1020 | init_aead_job(req, edesc, all_contig, encrypt); |
1021 | append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); | ||
999 | 1022 | ||
1000 | /* BUG This should not be specific to generic GCM. */ | 1023 | /* BUG This should not be specific to generic GCM. */ |
1001 | last = 0; | 1024 | last = 0; |
@@ -1022,6 +1045,7 @@ static void init_authenc_job(struct aead_request *req, | |||
1022 | struct caam_aead_alg, aead); | 1045 | struct caam_aead_alg, aead); |
1023 | unsigned int ivsize = crypto_aead_ivsize(aead); | 1046 | unsigned int ivsize = crypto_aead_ivsize(aead); |
1024 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1047 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
1048 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); | ||
1025 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == | 1049 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == |
1026 | OP_ALG_AAI_CTR_MOD128); | 1050 | OP_ALG_AAI_CTR_MOD128); |
1027 | const bool is_rfc3686 = alg->caam.rfc3686; | 1051 | const bool is_rfc3686 = alg->caam.rfc3686; |
@@ -1045,6 +1069,15 @@ static void init_authenc_job(struct aead_request *req, | |||
1045 | 1069 | ||
1046 | init_aead_job(req, edesc, all_contig, encrypt); | 1070 | init_aead_job(req, edesc, all_contig, encrypt); |
1047 | 1071 | ||
1072 | /* | ||
1073 | * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports | ||
1074 | * having DPOVRD as destination. | ||
1075 | */ | ||
1076 | if (ctrlpriv->era < 3) | ||
1077 | append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); | ||
1078 | else | ||
1079 | append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen); | ||
1080 | |||
1048 | if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) | 1081 | if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) |
1049 | append_load_as_imm(desc, req->iv, ivsize, | 1082 | append_load_as_imm(desc, req->iv, ivsize, |
1050 | LDST_CLASS_1_CCB | | 1083 | LDST_CLASS_1_CCB | |
@@ -3228,9 +3261,11 @@ struct caam_crypto_alg { | |||
3228 | struct caam_alg_entry caam; | 3261 | struct caam_alg_entry caam; |
3229 | }; | 3262 | }; |
3230 | 3263 | ||
3231 | static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) | 3264 | static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, |
3265 | bool uses_dkp) | ||
3232 | { | 3266 | { |
3233 | dma_addr_t dma_addr; | 3267 | dma_addr_t dma_addr; |
3268 | struct caam_drv_private *priv; | ||
3234 | 3269 | ||
3235 | ctx->jrdev = caam_jr_alloc(); | 3270 | ctx->jrdev = caam_jr_alloc(); |
3236 | if (IS_ERR(ctx->jrdev)) { | 3271 | if (IS_ERR(ctx->jrdev)) { |
@@ -3238,10 +3273,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) | |||
3238 | return PTR_ERR(ctx->jrdev); | 3273 | return PTR_ERR(ctx->jrdev); |
3239 | } | 3274 | } |
3240 | 3275 | ||
3276 | priv = dev_get_drvdata(ctx->jrdev->parent); | ||
3277 | if (priv->era >= 6 && uses_dkp) | ||
3278 | ctx->dir = DMA_BIDIRECTIONAL; | ||
3279 | else | ||
3280 | ctx->dir = DMA_TO_DEVICE; | ||
3281 | |||
3241 | dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, | 3282 | dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, |
3242 | offsetof(struct caam_ctx, | 3283 | offsetof(struct caam_ctx, |
3243 | sh_desc_enc_dma), | 3284 | sh_desc_enc_dma), |
3244 | DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); | 3285 | ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
3245 | if (dma_mapping_error(ctx->jrdev, dma_addr)) { | 3286 | if (dma_mapping_error(ctx->jrdev, dma_addr)) { |
3246 | dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); | 3287 | dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); |
3247 | caam_jr_free(ctx->jrdev); | 3288 | caam_jr_free(ctx->jrdev); |
@@ -3269,7 +3310,7 @@ static int caam_cra_init(struct crypto_tfm *tfm) | |||
3269 | container_of(alg, struct caam_crypto_alg, crypto_alg); | 3310 | container_of(alg, struct caam_crypto_alg, crypto_alg); |
3270 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | 3311 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); |
3271 | 3312 | ||
3272 | return caam_init_common(ctx, &caam_alg->caam); | 3313 | return caam_init_common(ctx, &caam_alg->caam, false); |
3273 | } | 3314 | } |
3274 | 3315 | ||
3275 | static int caam_aead_init(struct crypto_aead *tfm) | 3316 | static int caam_aead_init(struct crypto_aead *tfm) |
@@ -3279,14 +3320,15 @@ static int caam_aead_init(struct crypto_aead *tfm) | |||
3279 | container_of(alg, struct caam_aead_alg, aead); | 3320 | container_of(alg, struct caam_aead_alg, aead); |
3280 | struct caam_ctx *ctx = crypto_aead_ctx(tfm); | 3321 | struct caam_ctx *ctx = crypto_aead_ctx(tfm); |
3281 | 3322 | ||
3282 | return caam_init_common(ctx, &caam_alg->caam); | 3323 | return caam_init_common(ctx, &caam_alg->caam, |
3324 | alg->setkey == aead_setkey); | ||
3283 | } | 3325 | } |
3284 | 3326 | ||
3285 | static void caam_exit_common(struct caam_ctx *ctx) | 3327 | static void caam_exit_common(struct caam_ctx *ctx) |
3286 | { | 3328 | { |
3287 | dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, | 3329 | dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, |
3288 | offsetof(struct caam_ctx, sh_desc_enc_dma), | 3330 | offsetof(struct caam_ctx, sh_desc_enc_dma), |
3289 | DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); | 3331 | ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
3290 | caam_jr_free(ctx->jrdev); | 3332 | caam_jr_free(ctx->jrdev); |
3291 | } | 3333 | } |
3292 | 3334 | ||
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 530c14ee32de..ceb93fbb76e6 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c | |||
@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *desc, u32 type) | |||
45 | * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor | 45 | * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor |
46 | * (non-protocol) with no (null) encryption. | 46 | * (non-protocol) with no (null) encryption. |
47 | * @desc: pointer to buffer used for descriptor construction | 47 | * @desc: pointer to buffer used for descriptor construction |
48 | * @adata: pointer to authentication transform definitions. Note that since a | 48 | * @adata: pointer to authentication transform definitions. |
49 | * split key is to be used, the size of the split key itself is | 49 | * A split key is required for SEC Era < 6; the size of the split key |
50 | * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, | 50 | * is specified in this case. Valid algorithm values - one of |
51 | * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. | 51 | * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed |
52 | * with OP_ALG_AAI_HMAC_PRECOMP. | ||
52 | * @icvsize: integrity check value (ICV) size (truncated or full) | 53 | * @icvsize: integrity check value (ICV) size (truncated or full) |
53 | * | 54 | * @era: SEC Era |
54 | * Note: Requires an MDHA split key. | ||
55 | */ | 55 | */ |
56 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, | 56 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, |
57 | unsigned int icvsize) | 57 | unsigned int icvsize, int era) |
58 | { | 58 | { |
59 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; | 59 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; |
60 | 60 | ||
@@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, | |||
63 | /* Skip if already shared */ | 63 | /* Skip if already shared */ |
64 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | 64 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
65 | JUMP_COND_SHRD); | 65 | JUMP_COND_SHRD); |
66 | if (adata->key_inline) | 66 | if (era < 6) { |
67 | append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, | 67 | if (adata->key_inline) |
68 | adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | | 68 | append_key_as_imm(desc, adata->key_virt, |
69 | KEY_ENC); | 69 | adata->keylen_pad, adata->keylen, |
70 | else | 70 | CLASS_2 | KEY_DEST_MDHA_SPLIT | |
71 | append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | | 71 | KEY_ENC); |
72 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | 72 | else |
73 | append_key(desc, adata->key_dma, adata->keylen, | ||
74 | CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
75 | } else { | ||
76 | append_proto_dkp(desc, adata); | ||
77 | } | ||
73 | set_jump_tgt_here(desc, key_jump_cmd); | 78 | set_jump_tgt_here(desc, key_jump_cmd); |
74 | 79 | ||
75 | /* assoclen + cryptlen = seqinlen */ | 80 | /* assoclen + cryptlen = seqinlen */ |
@@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); | |||
121 | * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor | 126 | * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor |
122 | * (non-protocol) with no (null) decryption. | 127 | * (non-protocol) with no (null) decryption. |
123 | * @desc: pointer to buffer used for descriptor construction | 128 | * @desc: pointer to buffer used for descriptor construction |
124 | * @adata: pointer to authentication transform definitions. Note that since a | 129 | * @adata: pointer to authentication transform definitions. |
125 | * split key is to be used, the size of the split key itself is | 130 | * A split key is required for SEC Era < 6; the size of the split key |
126 | * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, | 131 | * is specified in this case. Valid algorithm values - one of |
127 | * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. | 132 | * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed |
133 | * with OP_ALG_AAI_HMAC_PRECOMP. | ||
128 | * @icvsize: integrity check value (ICV) size (truncated or full) | 134 | * @icvsize: integrity check value (ICV) size (truncated or full) |
129 | * | 135 | * @era: SEC Era |
130 | * Note: Requires an MDHA split key. | ||
131 | */ | 136 | */ |
132 | void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, | 137 | void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, |
133 | unsigned int icvsize) | 138 | unsigned int icvsize, int era) |
134 | { | 139 | { |
135 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; | 140 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; |
136 | 141 | ||
@@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, | |||
139 | /* Skip if already shared */ | 144 | /* Skip if already shared */ |
140 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | 145 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
141 | JUMP_COND_SHRD); | 146 | JUMP_COND_SHRD); |
142 | if (adata->key_inline) | 147 | if (era < 6) { |
143 | append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, | 148 | if (adata->key_inline) |
144 | adata->keylen, CLASS_2 | | 149 | append_key_as_imm(desc, adata->key_virt, |
145 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | 150 | adata->keylen_pad, adata->keylen, |
146 | else | 151 | CLASS_2 | KEY_DEST_MDHA_SPLIT | |
147 | append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | | 152 | KEY_ENC); |
148 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | 153 | else |
154 | append_key(desc, adata->key_dma, adata->keylen, | ||
155 | CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
156 | } else { | ||
157 | append_proto_dkp(desc, adata); | ||
158 | } | ||
149 | set_jump_tgt_here(desc, key_jump_cmd); | 159 | set_jump_tgt_here(desc, key_jump_cmd); |
150 | 160 | ||
151 | /* Class 2 operation */ | 161 | /* Class 2 operation */ |
@@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); | |||
204 | static void init_sh_desc_key_aead(u32 * const desc, | 214 | static void init_sh_desc_key_aead(u32 * const desc, |
205 | struct alginfo * const cdata, | 215 | struct alginfo * const cdata, |
206 | struct alginfo * const adata, | 216 | struct alginfo * const adata, |
207 | const bool is_rfc3686, u32 *nonce) | 217 | const bool is_rfc3686, u32 *nonce, int era) |
208 | { | 218 | { |
209 | u32 *key_jump_cmd; | 219 | u32 *key_jump_cmd; |
210 | unsigned int enckeylen = cdata->keylen; | 220 | unsigned int enckeylen = cdata->keylen; |
@@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 * const desc, | |||
224 | if (is_rfc3686) | 234 | if (is_rfc3686) |
225 | enckeylen -= CTR_RFC3686_NONCE_SIZE; | 235 | enckeylen -= CTR_RFC3686_NONCE_SIZE; |
226 | 236 | ||
227 | if (adata->key_inline) | 237 | if (era < 6) { |
228 | append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, | 238 | if (adata->key_inline) |
229 | adata->keylen, CLASS_2 | | 239 | append_key_as_imm(desc, adata->key_virt, |
230 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | 240 | adata->keylen_pad, adata->keylen, |
231 | else | 241 | CLASS_2 | KEY_DEST_MDHA_SPLIT | |
232 | append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | | 242 | KEY_ENC); |
233 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | 243 | else |
244 | append_key(desc, adata->key_dma, adata->keylen, | ||
245 | CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
246 | } else { | ||
247 | append_proto_dkp(desc, adata); | ||
248 | } | ||
234 | 249 | ||
235 | if (cdata->key_inline) | 250 | if (cdata->key_inline) |
236 | append_key_as_imm(desc, cdata->key_virt, enckeylen, | 251 | append_key_as_imm(desc, cdata->key_virt, enckeylen, |
@@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 * const desc, | |||
261 | * @cdata: pointer to block cipher transform definitions | 276 | * @cdata: pointer to block cipher transform definitions |
262 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed | 277 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed |
263 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. | 278 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. |
264 | * @adata: pointer to authentication transform definitions. Note that since a | 279 | * @adata: pointer to authentication transform definitions. |
265 | * split key is to be used, the size of the split key itself is | 280 | * A split key is required for SEC Era < 6; the size of the split key |
266 | * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, | 281 | * is specified in this case. Valid algorithm values - one of |
267 | * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. | 282 | * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed |
283 | * with OP_ALG_AAI_HMAC_PRECOMP. | ||
268 | * @ivsize: initialization vector size | 284 | * @ivsize: initialization vector size |
269 | * @icvsize: integrity check value (ICV) size (truncated or full) | 285 | * @icvsize: integrity check value (ICV) size (truncated or full) |
270 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template | 286 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template |
271 | * @nonce: pointer to rfc3686 nonce | 287 | * @nonce: pointer to rfc3686 nonce |
272 | * @ctx1_iv_off: IV offset in CONTEXT1 register | 288 | * @ctx1_iv_off: IV offset in CONTEXT1 register |
273 | * @is_qi: true when called from caam/qi | 289 | * @is_qi: true when called from caam/qi |
274 | * | 290 | * @era: SEC Era |
275 | * Note: Requires an MDHA split key. | ||
276 | */ | 291 | */ |
277 | void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, | 292 | void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, |
278 | struct alginfo *adata, unsigned int ivsize, | 293 | struct alginfo *adata, unsigned int ivsize, |
279 | unsigned int icvsize, const bool is_rfc3686, | 294 | unsigned int icvsize, const bool is_rfc3686, |
280 | u32 *nonce, const u32 ctx1_iv_off, const bool is_qi) | 295 | u32 *nonce, const u32 ctx1_iv_off, const bool is_qi, |
296 | int era) | ||
281 | { | 297 | { |
282 | /* Note: Context registers are saved. */ | 298 | /* Note: Context registers are saved. */ |
283 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); | 299 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); |
284 | 300 | ||
285 | /* Class 2 operation */ | 301 | /* Class 2 operation */ |
286 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | | 302 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | |
@@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, | |||
306 | } | 322 | } |
307 | 323 | ||
308 | /* Read and write assoclen bytes */ | 324 | /* Read and write assoclen bytes */ |
309 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 325 | if (is_qi || era < 3) { |
310 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 326 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
327 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
328 | } else { | ||
329 | append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); | ||
330 | append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); | ||
331 | } | ||
311 | 332 | ||
312 | /* Skip assoc data */ | 333 | /* Skip assoc data */ |
313 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | 334 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
@@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap); | |||
350 | * @cdata: pointer to block cipher transform definitions | 371 | * @cdata: pointer to block cipher transform definitions |
351 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed | 372 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed |
352 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. | 373 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. |
353 | * @adata: pointer to authentication transform definitions. Note that since a | 374 | * @adata: pointer to authentication transform definitions. |
354 | * split key is to be used, the size of the split key itself is | 375 | * A split key is required for SEC Era < 6; the size of the split key |
355 | * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, | 376 | * is specified in this case. Valid algorithm values - one of |
356 | * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. | 377 | * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed |
378 | * with OP_ALG_AAI_HMAC_PRECOMP. | ||
357 | * @ivsize: initialization vector size | 379 | * @ivsize: initialization vector size |
358 | * @icvsize: integrity check value (ICV) size (truncated or full) | 380 | * @icvsize: integrity check value (ICV) size (truncated or full) |
359 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template | 381 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template |
360 | * @nonce: pointer to rfc3686 nonce | 382 | * @nonce: pointer to rfc3686 nonce |
361 | * @ctx1_iv_off: IV offset in CONTEXT1 register | 383 | * @ctx1_iv_off: IV offset in CONTEXT1 register |
362 | * @is_qi: true when called from caam/qi | 384 | * @is_qi: true when called from caam/qi |
363 | * | 385 | * @era: SEC Era |
364 | * Note: Requires an MDHA split key. | ||
365 | */ | 386 | */ |
366 | void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, | 387 | void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, |
367 | struct alginfo *adata, unsigned int ivsize, | 388 | struct alginfo *adata, unsigned int ivsize, |
368 | unsigned int icvsize, const bool geniv, | 389 | unsigned int icvsize, const bool geniv, |
369 | const bool is_rfc3686, u32 *nonce, | 390 | const bool is_rfc3686, u32 *nonce, |
370 | const u32 ctx1_iv_off, const bool is_qi) | 391 | const u32 ctx1_iv_off, const bool is_qi, int era) |
371 | { | 392 | { |
372 | /* Note: Context registers are saved. */ | 393 | /* Note: Context registers are saved. */ |
373 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); | 394 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); |
374 | 395 | ||
375 | /* Class 2 operation */ | 396 | /* Class 2 operation */ |
376 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | | 397 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | |
@@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, | |||
397 | } | 418 | } |
398 | 419 | ||
399 | /* Read and write assoclen bytes */ | 420 | /* Read and write assoclen bytes */ |
400 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 421 | if (is_qi || era < 3) { |
401 | if (geniv) | 422 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
402 | append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); | 423 | if (geniv) |
403 | else | 424 | append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, |
404 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 425 | ivsize); |
426 | else | ||
427 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, | ||
428 | CAAM_CMD_SZ); | ||
429 | } else { | ||
430 | append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); | ||
431 | if (geniv) | ||
432 | append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM, | ||
433 | ivsize); | ||
434 | else | ||
435 | append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, | ||
436 | CAAM_CMD_SZ); | ||
437 | } | ||
405 | 438 | ||
406 | /* Skip assoc data */ | 439 | /* Skip assoc data */ |
407 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | 440 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
@@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap); | |||
456 | * @cdata: pointer to block cipher transform definitions | 489 | * @cdata: pointer to block cipher transform definitions |
457 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed | 490 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed |
458 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. | 491 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. |
459 | * @adata: pointer to authentication transform definitions. Note that since a | 492 | * @adata: pointer to authentication transform definitions. |
460 | * split key is to be used, the size of the split key itself is | 493 | * A split key is required for SEC Era < 6; the size of the split key |
461 | * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, | 494 | * is specified in this case. Valid algorithm values - one of |
462 | * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. | 495 | * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed |
496 | * with OP_ALG_AAI_HMAC_PRECOMP. | ||
463 | * @ivsize: initialization vector size | 497 | * @ivsize: initialization vector size |
464 | * @icvsize: integrity check value (ICV) size (truncated or full) | 498 | * @icvsize: integrity check value (ICV) size (truncated or full) |
465 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template | 499 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template |
466 | * @nonce: pointer to rfc3686 nonce | 500 | * @nonce: pointer to rfc3686 nonce |
467 | * @ctx1_iv_off: IV offset in CONTEXT1 register | 501 | * @ctx1_iv_off: IV offset in CONTEXT1 register |
468 | * @is_qi: true when called from caam/qi | 502 | * @is_qi: true when called from caam/qi |
469 | * | 503 | * @era: SEC Era |
470 | * Note: Requires an MDHA split key. | ||
471 | */ | 504 | */ |
472 | void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, | 505 | void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, |
473 | struct alginfo *adata, unsigned int ivsize, | 506 | struct alginfo *adata, unsigned int ivsize, |
474 | unsigned int icvsize, const bool is_rfc3686, | 507 | unsigned int icvsize, const bool is_rfc3686, |
475 | u32 *nonce, const u32 ctx1_iv_off, | 508 | u32 *nonce, const u32 ctx1_iv_off, |
476 | const bool is_qi) | 509 | const bool is_qi, int era) |
477 | { | 510 | { |
478 | u32 geniv, moveiv; | 511 | u32 geniv, moveiv; |
479 | 512 | ||
480 | /* Note: Context registers are saved. */ | 513 | /* Note: Context registers are saved. */ |
481 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); | 514 | init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); |
482 | 515 | ||
483 | if (is_qi) { | 516 | if (is_qi) { |
484 | u32 *wait_load_cmd; | 517 | u32 *wait_load_cmd; |
@@ -528,8 +561,13 @@ copy_iv: | |||
528 | OP_ALG_ENCRYPT); | 561 | OP_ALG_ENCRYPT); |
529 | 562 | ||
530 | /* Read and write assoclen bytes */ | 563 | /* Read and write assoclen bytes */ |
531 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 564 | if (is_qi || era < 3) { |
532 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 565 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
566 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
567 | } else { | ||
568 | append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); | ||
569 | append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); | ||
570 | } | ||
533 | 571 | ||
534 | /* Skip assoc data */ | 572 | /* Skip assoc data */ |
535 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | 573 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
@@ -1075,7 +1113,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata, | |||
1075 | 1113 | ||
1076 | /* Load nonce into CONTEXT1 reg */ | 1114 | /* Load nonce into CONTEXT1 reg */ |
1077 | if (is_rfc3686) { | 1115 | if (is_rfc3686) { |
1078 | u8 *nonce = cdata->key_virt + cdata->keylen; | 1116 | const u8 *nonce = cdata->key_virt + cdata->keylen; |
1079 | 1117 | ||
1080 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | 1118 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, |
1081 | LDST_CLASS_IND_CCB | | 1119 | LDST_CLASS_IND_CCB | |
@@ -1140,7 +1178,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata, | |||
1140 | 1178 | ||
1141 | /* Load nonce into CONTEXT1 reg */ | 1179 | /* Load nonce into CONTEXT1 reg */ |
1142 | if (is_rfc3686) { | 1180 | if (is_rfc3686) { |
1143 | u8 *nonce = cdata->key_virt + cdata->keylen; | 1181 | const u8 *nonce = cdata->key_virt + cdata->keylen; |
1144 | 1182 | ||
1145 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | 1183 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, |
1146 | LDST_CLASS_IND_CCB | | 1184 | LDST_CLASS_IND_CCB | |
@@ -1209,7 +1247,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata, | |||
1209 | 1247 | ||
1210 | /* Load Nonce into CONTEXT1 reg */ | 1248 | /* Load Nonce into CONTEXT1 reg */ |
1211 | if (is_rfc3686) { | 1249 | if (is_rfc3686) { |
1212 | u8 *nonce = cdata->key_virt + cdata->keylen; | 1250 | const u8 *nonce = cdata->key_virt + cdata->keylen; |
1213 | 1251 | ||
1214 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, | 1252 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, |
1215 | LDST_CLASS_IND_CCB | | 1253 | LDST_CLASS_IND_CCB | |
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index e412ec8f7005..5f9445ae2114 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h | |||
@@ -43,28 +43,28 @@ | |||
43 | 15 * CAAM_CMD_SZ) | 43 | 15 * CAAM_CMD_SZ) |
44 | 44 | ||
45 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, | 45 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, |
46 | unsigned int icvsize); | 46 | unsigned int icvsize, int era); |
47 | 47 | ||
48 | void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, | 48 | void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, |
49 | unsigned int icvsize); | 49 | unsigned int icvsize, int era); |
50 | 50 | ||
51 | void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, | 51 | void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, |
52 | struct alginfo *adata, unsigned int ivsize, | 52 | struct alginfo *adata, unsigned int ivsize, |
53 | unsigned int icvsize, const bool is_rfc3686, | 53 | unsigned int icvsize, const bool is_rfc3686, |
54 | u32 *nonce, const u32 ctx1_iv_off, | 54 | u32 *nonce, const u32 ctx1_iv_off, |
55 | const bool is_qi); | 55 | const bool is_qi, int era); |
56 | 56 | ||
57 | void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, | 57 | void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, |
58 | struct alginfo *adata, unsigned int ivsize, | 58 | struct alginfo *adata, unsigned int ivsize, |
59 | unsigned int icvsize, const bool geniv, | 59 | unsigned int icvsize, const bool geniv, |
60 | const bool is_rfc3686, u32 *nonce, | 60 | const bool is_rfc3686, u32 *nonce, |
61 | const u32 ctx1_iv_off, const bool is_qi); | 61 | const u32 ctx1_iv_off, const bool is_qi, int era); |
62 | 62 | ||
63 | void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, | 63 | void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, |
64 | struct alginfo *adata, unsigned int ivsize, | 64 | struct alginfo *adata, unsigned int ivsize, |
65 | unsigned int icvsize, const bool is_rfc3686, | 65 | unsigned int icvsize, const bool is_rfc3686, |
66 | u32 *nonce, const u32 ctx1_iv_off, | 66 | u32 *nonce, const u32 ctx1_iv_off, |
67 | const bool is_qi); | 67 | const bool is_qi, int era); |
68 | 68 | ||
69 | void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, | 69 | void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, |
70 | unsigned int icvsize); | 70 | unsigned int icvsize); |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index f9f08fce4356..4aecc9435f69 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
@@ -53,6 +53,7 @@ struct caam_ctx { | |||
53 | u32 sh_desc_givenc[DESC_MAX_USED_LEN]; | 53 | u32 sh_desc_givenc[DESC_MAX_USED_LEN]; |
54 | u8 key[CAAM_MAX_KEY_SIZE]; | 54 | u8 key[CAAM_MAX_KEY_SIZE]; |
55 | dma_addr_t key_dma; | 55 | dma_addr_t key_dma; |
56 | enum dma_data_direction dir; | ||
56 | struct alginfo adata; | 57 | struct alginfo adata; |
57 | struct alginfo cdata; | 58 | struct alginfo cdata; |
58 | unsigned int authsize; | 59 | unsigned int authsize; |
@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
74 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == | 75 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == |
75 | OP_ALG_AAI_CTR_MOD128); | 76 | OP_ALG_AAI_CTR_MOD128); |
76 | const bool is_rfc3686 = alg->caam.rfc3686; | 77 | const bool is_rfc3686 = alg->caam.rfc3686; |
78 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); | ||
77 | 79 | ||
78 | if (!ctx->cdata.keylen || !ctx->authsize) | 80 | if (!ctx->cdata.keylen || !ctx->authsize) |
79 | return 0; | 81 | return 0; |
@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
124 | 126 | ||
125 | cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, | 127 | cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, |
126 | ivsize, ctx->authsize, is_rfc3686, nonce, | 128 | ivsize, ctx->authsize, is_rfc3686, nonce, |
127 | ctx1_iv_off, true); | 129 | ctx1_iv_off, true, ctrlpriv->era); |
128 | 130 | ||
129 | skip_enc: | 131 | skip_enc: |
130 | /* aead_decrypt shared descriptor */ | 132 | /* aead_decrypt shared descriptor */ |
@@ -149,7 +151,8 @@ skip_enc: | |||
149 | 151 | ||
150 | cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, | 152 | cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, |
151 | ivsize, ctx->authsize, alg->caam.geniv, | 153 | ivsize, ctx->authsize, alg->caam.geniv, |
152 | is_rfc3686, nonce, ctx1_iv_off, true); | 154 | is_rfc3686, nonce, ctx1_iv_off, true, |
155 | ctrlpriv->era); | ||
153 | 156 | ||
154 | if (!alg->caam.geniv) | 157 | if (!alg->caam.geniv) |
155 | goto skip_givenc; | 158 | goto skip_givenc; |
@@ -176,7 +179,7 @@ skip_enc: | |||
176 | 179 | ||
177 | cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, | 180 | cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, |
178 | ivsize, ctx->authsize, is_rfc3686, nonce, | 181 | ivsize, ctx->authsize, is_rfc3686, nonce, |
179 | ctx1_iv_off, true); | 182 | ctx1_iv_off, true, ctrlpriv->era); |
180 | 183 | ||
181 | skip_givenc: | 184 | skip_givenc: |
182 | return 0; | 185 | return 0; |
@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
197 | { | 200 | { |
198 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 201 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
199 | struct device *jrdev = ctx->jrdev; | 202 | struct device *jrdev = ctx->jrdev; |
203 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); | ||
200 | struct crypto_authenc_keys keys; | 204 | struct crypto_authenc_keys keys; |
201 | int ret = 0; | 205 | int ret = 0; |
202 | 206 | ||
@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
211 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 215 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
212 | #endif | 216 | #endif |
213 | 217 | ||
218 | /* | ||
219 | * If DKP is supported, use it in the shared descriptor to generate | ||
220 | * the split key. | ||
221 | */ | ||
222 | if (ctrlpriv->era >= 6) { | ||
223 | ctx->adata.keylen = keys.authkeylen; | ||
224 | ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & | ||
225 | OP_ALG_ALGSEL_MASK); | ||
226 | |||
227 | if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) | ||
228 | goto badkey; | ||
229 | |||
230 | memcpy(ctx->key, keys.authkey, keys.authkeylen); | ||
231 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, | ||
232 | keys.enckeylen); | ||
233 | dma_sync_single_for_device(jrdev, ctx->key_dma, | ||
234 | ctx->adata.keylen_pad + | ||
235 | keys.enckeylen, ctx->dir); | ||
236 | goto skip_split_key; | ||
237 | } | ||
238 | |||
214 | ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, | 239 | ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, |
215 | keys.authkeylen, CAAM_MAX_KEY_SIZE - | 240 | keys.authkeylen, CAAM_MAX_KEY_SIZE - |
216 | keys.enckeylen); | 241 | keys.enckeylen); |
@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
220 | /* postpend encryption key to auth split key */ | 245 | /* postpend encryption key to auth split key */ |
221 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); | 246 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
222 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + | 247 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + |
223 | keys.enckeylen, DMA_TO_DEVICE); | 248 | keys.enckeylen, ctx->dir); |
224 | #ifdef DEBUG | 249 | #ifdef DEBUG |
225 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", | 250 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", |
226 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 251 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
227 | ctx->adata.keylen_pad + keys.enckeylen, 1); | 252 | ctx->adata.keylen_pad + keys.enckeylen, 1); |
228 | #endif | 253 | #endif |
229 | 254 | ||
255 | skip_split_key: | ||
230 | ctx->cdata.keylen = keys.enckeylen; | 256 | ctx->cdata.keylen = keys.enckeylen; |
231 | 257 | ||
232 | ret = aead_set_sh_desc(aead); | 258 | ret = aead_set_sh_desc(aead); |
@@ -272,7 +298,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
272 | const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); | 298 | const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); |
273 | int ret = 0; | 299 | int ret = 0; |
274 | 300 | ||
275 | memcpy(ctx->key, key, keylen); | ||
276 | #ifdef DEBUG | 301 | #ifdef DEBUG |
277 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 302 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
278 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 303 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
@@ -295,9 +320,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
295 | keylen -= CTR_RFC3686_NONCE_SIZE; | 320 | keylen -= CTR_RFC3686_NONCE_SIZE; |
296 | } | 321 | } |
297 | 322 | ||
298 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); | ||
299 | ctx->cdata.keylen = keylen; | 323 | ctx->cdata.keylen = keylen; |
300 | ctx->cdata.key_virt = ctx->key; | 324 | ctx->cdata.key_virt = key; |
301 | ctx->cdata.key_inline = true; | 325 | ctx->cdata.key_inline = true; |
302 | 326 | ||
303 | /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */ | 327 | /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */ |
@@ -356,10 +380,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
356 | return -EINVAL; | 380 | return -EINVAL; |
357 | } | 381 | } |
358 | 382 | ||
359 | memcpy(ctx->key, key, keylen); | ||
360 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); | ||
361 | ctx->cdata.keylen = keylen; | 383 | ctx->cdata.keylen = keylen; |
362 | ctx->cdata.key_virt = ctx->key; | 384 | ctx->cdata.key_virt = key; |
363 | ctx->cdata.key_inline = true; | 385 | ctx->cdata.key_inline = true; |
364 | 386 | ||
365 | /* xts ablkcipher encrypt, decrypt shared descriptors */ | 387 | /* xts ablkcipher encrypt, decrypt shared descriptors */ |
@@ -668,7 +690,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
668 | qm_sg_ents = 1 + !!ivsize + mapped_src_nents + | 690 | qm_sg_ents = 1 + !!ivsize + mapped_src_nents + |
669 | (mapped_dst_nents > 1 ? mapped_dst_nents : 0); | 691 | (mapped_dst_nents > 1 ? mapped_dst_nents : 0); |
670 | if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { | 692 | if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { |
671 | dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", | 693 | dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", |
672 | qm_sg_ents, CAAM_QI_MAX_AEAD_SG); | 694 | qm_sg_ents, CAAM_QI_MAX_AEAD_SG); |
673 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, | 695 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, |
674 | iv_dma, ivsize, op_type, 0, 0); | 696 | iv_dma, ivsize, op_type, 0, 0); |
@@ -905,7 +927,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
905 | 927 | ||
906 | qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; | 928 | qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; |
907 | if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { | 929 | if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { |
908 | dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", | 930 | dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", |
909 | qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); | 931 | qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); |
910 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, | 932 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, |
911 | iv_dma, ivsize, op_type, 0, 0); | 933 | iv_dma, ivsize, op_type, 0, 0); |
@@ -1058,7 +1080,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( | |||
1058 | } | 1080 | } |
1059 | 1081 | ||
1060 | if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { | 1082 | if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { |
1061 | dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", | 1083 | dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", |
1062 | qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); | 1084 | qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); |
1063 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, | 1085 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, |
1064 | iv_dma, ivsize, GIVENCRYPT, 0, 0); | 1086 | iv_dma, ivsize, GIVENCRYPT, 0, 0); |
@@ -2123,7 +2145,8 @@ struct caam_crypto_alg { | |||
2123 | struct caam_alg_entry caam; | 2145 | struct caam_alg_entry caam; |
2124 | }; | 2146 | }; |
2125 | 2147 | ||
2126 | static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) | 2148 | static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, |
2149 | bool uses_dkp) | ||
2127 | { | 2150 | { |
2128 | struct caam_drv_private *priv; | 2151 | struct caam_drv_private *priv; |
2129 | 2152 | ||
@@ -2137,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) | |||
2137 | return PTR_ERR(ctx->jrdev); | 2160 | return PTR_ERR(ctx->jrdev); |
2138 | } | 2161 | } |
2139 | 2162 | ||
2163 | priv = dev_get_drvdata(ctx->jrdev->parent); | ||
2164 | if (priv->era >= 6 && uses_dkp) | ||
2165 | ctx->dir = DMA_BIDIRECTIONAL; | ||
2166 | else | ||
2167 | ctx->dir = DMA_TO_DEVICE; | ||
2168 | |||
2140 | ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), | 2169 | ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), |
2141 | DMA_TO_DEVICE); | 2170 | ctx->dir); |
2142 | if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { | 2171 | if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { |
2143 | dev_err(ctx->jrdev, "unable to map key\n"); | 2172 | dev_err(ctx->jrdev, "unable to map key\n"); |
2144 | caam_jr_free(ctx->jrdev); | 2173 | caam_jr_free(ctx->jrdev); |
@@ -2149,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) | |||
2149 | ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; | 2178 | ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; |
2150 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; | 2179 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; |
2151 | 2180 | ||
2152 | priv = dev_get_drvdata(ctx->jrdev->parent); | ||
2153 | ctx->qidev = priv->qidev; | 2181 | ctx->qidev = priv->qidev; |
2154 | 2182 | ||
2155 | spin_lock_init(&ctx->lock); | 2183 | spin_lock_init(&ctx->lock); |
@@ -2167,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm) | |||
2167 | crypto_alg); | 2195 | crypto_alg); |
2168 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | 2196 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); |
2169 | 2197 | ||
2170 | return caam_init_common(ctx, &caam_alg->caam); | 2198 | return caam_init_common(ctx, &caam_alg->caam, false); |
2171 | } | 2199 | } |
2172 | 2200 | ||
2173 | static int caam_aead_init(struct crypto_aead *tfm) | 2201 | static int caam_aead_init(struct crypto_aead *tfm) |
@@ -2177,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm) | |||
2177 | aead); | 2205 | aead); |
2178 | struct caam_ctx *ctx = crypto_aead_ctx(tfm); | 2206 | struct caam_ctx *ctx = crypto_aead_ctx(tfm); |
2179 | 2207 | ||
2180 | return caam_init_common(ctx, &caam_alg->caam); | 2208 | return caam_init_common(ctx, &caam_alg->caam, |
2209 | alg->setkey == aead_setkey); | ||
2181 | } | 2210 | } |
2182 | 2211 | ||
2183 | static void caam_exit_common(struct caam_ctx *ctx) | 2212 | static void caam_exit_common(struct caam_ctx *ctx) |
@@ -2186,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx) | |||
2186 | caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); | 2215 | caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); |
2187 | caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]); | 2216 | caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]); |
2188 | 2217 | ||
2189 | dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), | 2218 | dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); |
2190 | DMA_TO_DEVICE); | ||
2191 | 2219 | ||
2192 | caam_jr_free(ctx->jrdev); | 2220 | caam_jr_free(ctx->jrdev); |
2193 | } | 2221 | } |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 616720a04e7a..0beb28196e20 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -107,6 +107,7 @@ struct caam_hash_ctx { | |||
107 | dma_addr_t sh_desc_update_first_dma; | 107 | dma_addr_t sh_desc_update_first_dma; |
108 | dma_addr_t sh_desc_fin_dma; | 108 | dma_addr_t sh_desc_fin_dma; |
109 | dma_addr_t sh_desc_digest_dma; | 109 | dma_addr_t sh_desc_digest_dma; |
110 | enum dma_data_direction dir; | ||
110 | struct device *jrdev; | 111 | struct device *jrdev; |
111 | u8 key[CAAM_MAX_HASH_KEY_SIZE]; | 112 | u8 key[CAAM_MAX_HASH_KEY_SIZE]; |
112 | int ctx_len; | 113 | int ctx_len; |
@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev, | |||
241 | * read and write to seqout | 242 | * read and write to seqout |
242 | */ | 243 | */ |
243 | static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, | 244 | static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, |
244 | struct caam_hash_ctx *ctx, bool import_ctx) | 245 | struct caam_hash_ctx *ctx, bool import_ctx, |
246 | int era) | ||
245 | { | 247 | { |
246 | u32 op = ctx->adata.algtype; | 248 | u32 op = ctx->adata.algtype; |
247 | u32 *skip_key_load; | 249 | u32 *skip_key_load; |
@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, | |||
254 | skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | 256 | skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
255 | JUMP_COND_SHRD); | 257 | JUMP_COND_SHRD); |
256 | 258 | ||
257 | append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, | 259 | if (era < 6) |
258 | ctx->adata.keylen, CLASS_2 | | 260 | append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, |
259 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | 261 | ctx->adata.keylen, CLASS_2 | |
262 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
263 | else | ||
264 | append_proto_dkp(desc, &ctx->adata); | ||
260 | 265 | ||
261 | set_jump_tgt_here(desc, skip_key_load); | 266 | set_jump_tgt_here(desc, skip_key_load); |
262 | 267 | ||
@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
289 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 294 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
290 | int digestsize = crypto_ahash_digestsize(ahash); | 295 | int digestsize = crypto_ahash_digestsize(ahash); |
291 | struct device *jrdev = ctx->jrdev; | 296 | struct device *jrdev = ctx->jrdev; |
297 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); | ||
292 | u32 *desc; | 298 | u32 *desc; |
293 | 299 | ||
300 | ctx->adata.key_virt = ctx->key; | ||
301 | |||
294 | /* ahash_update shared descriptor */ | 302 | /* ahash_update shared descriptor */ |
295 | desc = ctx->sh_desc_update; | 303 | desc = ctx->sh_desc_update; |
296 | ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); | 304 | ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true, |
305 | ctrlpriv->era); | ||
297 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, | 306 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, |
298 | desc_bytes(desc), DMA_TO_DEVICE); | 307 | desc_bytes(desc), ctx->dir); |
299 | #ifdef DEBUG | 308 | #ifdef DEBUG |
300 | print_hex_dump(KERN_ERR, | 309 | print_hex_dump(KERN_ERR, |
301 | "ahash update shdesc@"__stringify(__LINE__)": ", | 310 | "ahash update shdesc@"__stringify(__LINE__)": ", |
@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
304 | 313 | ||
305 | /* ahash_update_first shared descriptor */ | 314 | /* ahash_update_first shared descriptor */ |
306 | desc = ctx->sh_desc_update_first; | 315 | desc = ctx->sh_desc_update_first; |
307 | ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); | 316 | ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false, |
317 | ctrlpriv->era); | ||
308 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, | 318 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
309 | desc_bytes(desc), DMA_TO_DEVICE); | 319 | desc_bytes(desc), ctx->dir); |
310 | #ifdef DEBUG | 320 | #ifdef DEBUG |
311 | print_hex_dump(KERN_ERR, | 321 | print_hex_dump(KERN_ERR, |
312 | "ahash update first shdesc@"__stringify(__LINE__)": ", | 322 | "ahash update first shdesc@"__stringify(__LINE__)": ", |
@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
315 | 325 | ||
316 | /* ahash_final shared descriptor */ | 326 | /* ahash_final shared descriptor */ |
317 | desc = ctx->sh_desc_fin; | 327 | desc = ctx->sh_desc_fin; |
318 | ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); | 328 | ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true, |
329 | ctrlpriv->era); | ||
319 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, | 330 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, |
320 | desc_bytes(desc), DMA_TO_DEVICE); | 331 | desc_bytes(desc), ctx->dir); |
321 | #ifdef DEBUG | 332 | #ifdef DEBUG |
322 | print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", | 333 | print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", |
323 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 334 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
326 | 337 | ||
327 | /* ahash_digest shared descriptor */ | 338 | /* ahash_digest shared descriptor */ |
328 | desc = ctx->sh_desc_digest; | 339 | desc = ctx->sh_desc_digest; |
329 | ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); | 340 | ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false, |
341 | ctrlpriv->era); | ||
330 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, | 342 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, |
331 | desc_bytes(desc), DMA_TO_DEVICE); | 343 | desc_bytes(desc), ctx->dir); |
332 | #ifdef DEBUG | 344 | #ifdef DEBUG |
333 | print_hex_dump(KERN_ERR, | 345 | print_hex_dump(KERN_ERR, |
334 | "ahash digest shdesc@"__stringify(__LINE__)": ", | 346 | "ahash digest shdesc@"__stringify(__LINE__)": ", |
@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
421 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 433 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
422 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); | 434 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); |
423 | int digestsize = crypto_ahash_digestsize(ahash); | 435 | int digestsize = crypto_ahash_digestsize(ahash); |
436 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); | ||
424 | int ret; | 437 | int ret; |
425 | u8 *hashed_key = NULL; | 438 | u8 *hashed_key = NULL; |
426 | 439 | ||
@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
441 | key = hashed_key; | 454 | key = hashed_key; |
442 | } | 455 | } |
443 | 456 | ||
444 | ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, | 457 | /* |
445 | CAAM_MAX_HASH_KEY_SIZE); | 458 | * If DKP is supported, use it in the shared descriptor to generate |
446 | if (ret) | 459 | * the split key. |
447 | goto bad_free_key; | 460 | */ |
461 | if (ctrlpriv->era >= 6) { | ||
462 | ctx->adata.key_inline = true; | ||
463 | ctx->adata.keylen = keylen; | ||
464 | ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & | ||
465 | OP_ALG_ALGSEL_MASK); | ||
448 | 466 | ||
449 | #ifdef DEBUG | 467 | if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) |
450 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", | 468 | goto bad_free_key; |
451 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 469 | |
452 | ctx->adata.keylen_pad, 1); | 470 | memcpy(ctx->key, key, keylen); |
453 | #endif | 471 | } else { |
472 | ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, | ||
473 | keylen, CAAM_MAX_HASH_KEY_SIZE); | ||
474 | if (ret) | ||
475 | goto bad_free_key; | ||
476 | } | ||
454 | 477 | ||
455 | kfree(hashed_key); | 478 | kfree(hashed_key); |
456 | return ahash_set_sh_desc(ahash); | 479 | return ahash_set_sh_desc(ahash); |
@@ -1715,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
1715 | HASH_MSG_LEN + 64, | 1738 | HASH_MSG_LEN + 64, |
1716 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; | 1739 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; |
1717 | dma_addr_t dma_addr; | 1740 | dma_addr_t dma_addr; |
1741 | struct caam_drv_private *priv; | ||
1718 | 1742 | ||
1719 | /* | 1743 | /* |
1720 | * Get a Job ring from Job Ring driver to ensure in-order | 1744 | * Get a Job ring from Job Ring driver to ensure in-order |
@@ -1726,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
1726 | return PTR_ERR(ctx->jrdev); | 1750 | return PTR_ERR(ctx->jrdev); |
1727 | } | 1751 | } |
1728 | 1752 | ||
1753 | priv = dev_get_drvdata(ctx->jrdev->parent); | ||
1754 | ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | ||
1755 | |||
1729 | dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, | 1756 | dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, |
1730 | offsetof(struct caam_hash_ctx, | 1757 | offsetof(struct caam_hash_ctx, |
1731 | sh_desc_update_dma), | 1758 | sh_desc_update_dma), |
1732 | DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); | 1759 | ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
1733 | if (dma_mapping_error(ctx->jrdev, dma_addr)) { | 1760 | if (dma_mapping_error(ctx->jrdev, dma_addr)) { |
1734 | dev_err(ctx->jrdev, "unable to map shared descriptors\n"); | 1761 | dev_err(ctx->jrdev, "unable to map shared descriptors\n"); |
1735 | caam_jr_free(ctx->jrdev); | 1762 | caam_jr_free(ctx->jrdev); |
@@ -1764,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) | |||
1764 | dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, | 1791 | dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, |
1765 | offsetof(struct caam_hash_ctx, | 1792 | offsetof(struct caam_hash_ctx, |
1766 | sh_desc_update_dma), | 1793 | sh_desc_update_dma), |
1767 | DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); | 1794 | ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
1768 | caam_jr_free(ctx->jrdev); | 1795 | caam_jr_free(ctx->jrdev); |
1769 | } | 1796 | } |
1770 | 1797 | ||
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 027e121c6f70..75d280cb2dc0 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -611,6 +611,8 @@ static int caam_probe(struct platform_device *pdev) | |||
611 | goto iounmap_ctrl; | 611 | goto iounmap_ctrl; |
612 | } | 612 | } |
613 | 613 | ||
614 | ctrlpriv->era = caam_get_era(); | ||
615 | |||
614 | ret = of_platform_populate(nprop, caam_match, NULL, dev); | 616 | ret = of_platform_populate(nprop, caam_match, NULL, dev); |
615 | if (ret) { | 617 | if (ret) { |
616 | dev_err(dev, "JR platform devices creation error\n"); | 618 | dev_err(dev, "JR platform devices creation error\n"); |
@@ -742,7 +744,7 @@ static int caam_probe(struct platform_device *pdev) | |||
742 | 744 | ||
743 | /* Report "alive" for developer to see */ | 745 | /* Report "alive" for developer to see */ |
744 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, | 746 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, |
745 | caam_get_era()); | 747 | ctrlpriv->era); |
746 | dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n", | 748 | dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n", |
747 | ctrlpriv->total_jobrs, ctrlpriv->qi_present, | 749 | ctrlpriv->total_jobrs, ctrlpriv->qi_present, |
748 | caam_dpaa2 ? "yes" : "no"); | 750 | caam_dpaa2 ? "yes" : "no"); |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 8142de7ba050..f76ff160a02c 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
@@ -444,6 +444,18 @@ | |||
444 | #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) | 444 | #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) |
445 | #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT) | 445 | #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT) |
446 | #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT) | 446 | #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT) |
447 | #define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT) | ||
448 | #define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT) | ||
449 | #define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT) | ||
450 | #define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT) | ||
451 | #define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT) | ||
452 | #define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT) | ||
453 | #define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT) | ||
454 | #define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT) | ||
455 | #define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT) | ||
456 | #define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT) | ||
457 | #define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT) | ||
458 | #define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT) | ||
447 | 459 | ||
448 | /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ | 460 | /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ |
449 | #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) | 461 | #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) |
@@ -1093,6 +1105,22 @@ | |||
1093 | /* MacSec protinfos */ | 1105 | /* MacSec protinfos */ |
1094 | #define OP_PCL_MACSEC 0x0001 | 1106 | #define OP_PCL_MACSEC 0x0001 |
1095 | 1107 | ||
1108 | /* Derived Key Protocol (DKP) Protinfo */ | ||
1109 | #define OP_PCL_DKP_SRC_SHIFT 14 | ||
1110 | #define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT) | ||
1111 | #define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT) | ||
1112 | #define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT) | ||
1113 | #define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT) | ||
1114 | #define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT) | ||
1115 | #define OP_PCL_DKP_DST_SHIFT 12 | ||
1116 | #define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT) | ||
1117 | #define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT) | ||
1118 | #define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT) | ||
1119 | #define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT) | ||
1120 | #define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT) | ||
1121 | #define OP_PCL_DKP_KEY_SHIFT 0 | ||
1122 | #define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT) | ||
1123 | |||
1096 | /* PKI unidirectional protocol protinfo bits */ | 1124 | /* PKI unidirectional protocol protinfo bits */ |
1097 | #define OP_PCL_PKPROT_TEST 0x0008 | 1125 | #define OP_PCL_PKPROT_TEST 0x0008 |
1098 | #define OP_PCL_PKPROT_DECRYPT 0x0004 | 1126 | #define OP_PCL_PKPROT_DECRYPT 0x0004 |
@@ -1452,6 +1480,7 @@ | |||
1452 | #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) | 1480 | #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) |
1453 | #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) | 1481 | #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) |
1454 | #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) | 1482 | #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) |
1483 | #define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT) | ||
1455 | #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) | 1484 | #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) |
1456 | #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) | 1485 | #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) |
1457 | #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT) | 1486 | #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT) |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index ba1ca0806f0a..d4256fa4a1d6 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -109,7 +109,7 @@ static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr, | |||
109 | append_ptr(desc, ptr); | 109 | append_ptr(desc, ptr); |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline void append_data(u32 * const desc, void *data, int len) | 112 | static inline void append_data(u32 * const desc, const void *data, int len) |
113 | { | 113 | { |
114 | u32 *offset = desc_end(desc); | 114 | u32 *offset = desc_end(desc); |
115 | 115 | ||
@@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr, | |||
172 | append_cmd(desc, len); | 172 | append_cmd(desc, len); |
173 | } | 173 | } |
174 | 174 | ||
175 | static inline void append_cmd_data(u32 * const desc, void *data, int len, | 175 | static inline void append_cmd_data(u32 * const desc, const void *data, int len, |
176 | u32 command) | 176 | u32 command) |
177 | { | 177 | { |
178 | append_cmd(desc, command | IMMEDIATE | len); | 178 | append_cmd(desc, command | IMMEDIATE | len); |
@@ -271,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN) | |||
271 | APPEND_SEQ_PTR_INTLEN(out, OUT) | 271 | APPEND_SEQ_PTR_INTLEN(out, OUT) |
272 | 272 | ||
273 | #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ | 273 | #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ |
274 | static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ | 274 | static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \ |
275 | unsigned int len, u32 options) \ | 275 | unsigned int len, u32 options) \ |
276 | { \ | 276 | { \ |
277 | PRINT_POS; \ | 277 | PRINT_POS; \ |
@@ -312,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32) | |||
312 | * from length of immediate data provided, e.g., split keys | 312 | * from length of immediate data provided, e.g., split keys |
313 | */ | 313 | */ |
314 | #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ | 314 | #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ |
315 | static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ | 315 | static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \ |
316 | unsigned int data_len, \ | 316 | unsigned int data_len, \ |
317 | unsigned int len, u32 options) \ | 317 | unsigned int len, u32 options) \ |
318 | { \ | 318 | { \ |
@@ -452,7 +452,7 @@ struct alginfo { | |||
452 | unsigned int keylen_pad; | 452 | unsigned int keylen_pad; |
453 | union { | 453 | union { |
454 | dma_addr_t key_dma; | 454 | dma_addr_t key_dma; |
455 | void *key_virt; | 455 | const void *key_virt; |
456 | }; | 456 | }; |
457 | bool key_inline; | 457 | bool key_inline; |
458 | }; | 458 | }; |
@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len, | |||
496 | return (rem_bytes >= 0) ? 0 : -1; | 496 | return (rem_bytes >= 0) ? 0 : -1; |
497 | } | 497 | } |
498 | 498 | ||
499 | /** | ||
500 | * append_proto_dkp - Derived Key Protocol (DKP): key -> split key | ||
501 | * @desc: pointer to buffer used for descriptor construction | ||
502 | * @adata: pointer to authentication transform definitions. | ||
503 | * keylen should be the length of initial key, while keylen_pad | ||
504 | * the length of the derived (split) key. | ||
505 | * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, | ||
506 | * SHA256, SHA384, SHA512}. | ||
507 | */ | ||
508 | static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata) | ||
509 | { | ||
510 | u32 protid; | ||
511 | |||
512 | /* | ||
513 | * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*} | ||
514 | * to OP_PCLID_DKP_{MD5, SHA*} | ||
515 | */ | ||
516 | protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) | | ||
517 | (0x20 << OP_ALG_ALGSEL_SHIFT); | ||
518 | |||
519 | if (adata->key_inline) { | ||
520 | int words; | ||
521 | |||
522 | append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | | ||
523 | OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM | | ||
524 | adata->keylen); | ||
525 | append_data(desc, adata->key_virt, adata->keylen); | ||
526 | |||
527 | /* Reserve space in descriptor buffer for the derived key */ | ||
528 | words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - | ||
529 | ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ; | ||
530 | if (words) | ||
531 | (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); | ||
532 | } else { | ||
533 | append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | | ||
534 | OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR | | ||
535 | adata->keylen); | ||
536 | append_ptr(desc, adata->key_dma); | ||
537 | } | ||
538 | } | ||
539 | |||
499 | #endif /* DESC_CONSTR_H */ | 540 | #endif /* DESC_CONSTR_H */ |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 91f1107276e5..7696a774a362 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
@@ -84,6 +84,7 @@ struct caam_drv_private { | |||
84 | u8 qi_present; /* Nonzero if QI present in device */ | 84 | u8 qi_present; /* Nonzero if QI present in device */ |
85 | int secvio_irq; /* Security violation interrupt number */ | 85 | int secvio_irq; /* Security violation interrupt number */ |
86 | int virt_en; /* Virtualization enabled in CAAM */ | 86 | int virt_en; /* Virtualization enabled in CAAM */ |
87 | int era; /* CAAM Era (internal HW revision) */ | ||
87 | 88 | ||
88 | #define RNG4_MAX_HANDLES 2 | 89 | #define RNG4_MAX_HANDLES 2 |
89 | /* RNG4 block */ | 90 | /* RNG4 block */ |
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 8c79c3a153dc..312b5f042f31 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c | |||
@@ -11,36 +11,6 @@ | |||
11 | #include "desc_constr.h" | 11 | #include "desc_constr.h" |
12 | #include "key_gen.h" | 12 | #include "key_gen.h" |
13 | 13 | ||
14 | /** | ||
15 | * split_key_len - Compute MDHA split key length for a given algorithm | ||
16 | * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, | ||
17 | * SHA224, SHA384, SHA512. | ||
18 | * | ||
19 | * Return: MDHA split key length | ||
20 | */ | ||
21 | static inline u32 split_key_len(u32 hash) | ||
22 | { | ||
23 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | ||
24 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | ||
25 | u32 idx; | ||
26 | |||
27 | idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; | ||
28 | |||
29 | return (u32)(mdpadlen[idx] * 2); | ||
30 | } | ||
31 | |||
32 | /** | ||
33 | * split_key_pad_len - Compute MDHA split key pad length for a given algorithm | ||
34 | * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, | ||
35 | * SHA224, SHA384, SHA512. | ||
36 | * | ||
37 | * Return: MDHA split key pad length | ||
38 | */ | ||
39 | static inline u32 split_key_pad_len(u32 hash) | ||
40 | { | ||
41 | return ALIGN(split_key_len(hash), 16); | ||
42 | } | ||
43 | |||
44 | void split_key_done(struct device *dev, u32 *desc, u32 err, | 14 | void split_key_done(struct device *dev, u32 *desc, u32 err, |
45 | void *context) | 15 | void *context) |
46 | { | 16 | { |
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h index 5db055c25bd2..818f78f6fc1a 100644 --- a/drivers/crypto/caam/key_gen.h +++ b/drivers/crypto/caam/key_gen.h | |||
@@ -6,6 +6,36 @@ | |||
6 | * | 6 | * |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /** | ||
10 | * split_key_len - Compute MDHA split key length for a given algorithm | ||
11 | * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, | ||
12 | * SHA224, SHA384, SHA512. | ||
13 | * | ||
14 | * Return: MDHA split key length | ||
15 | */ | ||
16 | static inline u32 split_key_len(u32 hash) | ||
17 | { | ||
18 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | ||
19 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | ||
20 | u32 idx; | ||
21 | |||
22 | idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; | ||
23 | |||
24 | return (u32)(mdpadlen[idx] * 2); | ||
25 | } | ||
26 | |||
27 | /** | ||
28 | * split_key_pad_len - Compute MDHA split key pad length for a given algorithm | ||
29 | * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, | ||
30 | * SHA224, SHA384, SHA512. | ||
31 | * | ||
32 | * Return: MDHA split key pad length | ||
33 | */ | ||
34 | static inline u32 split_key_pad_len(u32 hash) | ||
35 | { | ||
36 | return ALIGN(split_key_len(hash), 16); | ||
37 | } | ||
38 | |||
9 | struct split_key_result { | 39 | struct split_key_result { |
10 | struct completion completion; | 40 | struct completion completion; |
11 | int err; | 41 | int err; |
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 169e66231bcf..b0ba4331944b 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c | |||
@@ -459,7 +459,8 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) | |||
459 | info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); | 459 | info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); |
460 | if (unlikely(!info->completion_addr)) { | 460 | if (unlikely(!info->completion_addr)) { |
461 | dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); | 461 | dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); |
462 | return -ENOMEM; | 462 | ret = -ENOMEM; |
463 | goto request_cleanup; | ||
463 | } | 464 | } |
464 | 465 | ||
465 | result = (union cpt_res_s *)info->completion_addr; | 466 | result = (union cpt_res_s *)info->completion_addr; |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index 4addc238a6ef..deaefd532aaa 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
@@ -6,7 +6,6 @@ | |||
6 | #include "nitrox_dev.h" | 6 | #include "nitrox_dev.h" |
7 | #include "nitrox_req.h" | 7 | #include "nitrox_req.h" |
8 | #include "nitrox_csr.h" | 8 | #include "nitrox_csr.h" |
9 | #include "nitrox_req.h" | ||
10 | 9 | ||
11 | /* SLC_STORE_INFO */ | 10 | /* SLC_STORE_INFO */ |
12 | #define MIN_UDD_LEN 16 | 11 | #define MIN_UDD_LEN 16 |
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index ff02b713c6f6..ca1f0d780b61 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <crypto/ctr.h> | 21 | #include <crypto/ctr.h> |
22 | #include <crypto/gcm.h> | 22 | #include <crypto/gcm.h> |
23 | #include <crypto/scatterwalk.h> | 23 | #include <crypto/scatterwalk.h> |
24 | #include <linux/delay.h> | ||
25 | 24 | ||
26 | #include "ccp-crypto.h" | 25 | #include "ccp-crypto.h" |
27 | 26 | ||
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index b56b3f711d94..5ae9f8706f17 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig | |||
@@ -19,3 +19,13 @@ config CRYPTO_DEV_CHELSIO | |||
19 | 19 | ||
20 | To compile this driver as a module, choose M here: the module | 20 | To compile this driver as a module, choose M here: the module |
21 | will be called chcr. | 21 | will be called chcr. |
22 | |||
23 | config CHELSIO_IPSEC_INLINE | ||
24 | bool "Chelsio IPSec XFRM Tx crypto offload" | ||
25 | depends on CHELSIO_T4 | ||
26 | depends on CRYPTO_DEV_CHELSIO | ||
27 | depends on XFRM_OFFLOAD | ||
28 | depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD | ||
29 | default n | ||
30 | ---help--- | ||
31 | Enable support for IPSec Tx Inline. | ||
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile index bebdf06687ad..eaecaf1ebcf3 100644 --- a/drivers/crypto/chelsio/Makefile +++ b/drivers/crypto/chelsio/Makefile | |||
@@ -2,3 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 | |||
2 | 2 | ||
3 | obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o | 3 | obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o |
4 | chcr-objs := chcr_core.o chcr_algo.o | 4 | chcr-objs := chcr_core.o chcr_algo.o |
5 | chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o | ||
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 4eed7171e2ae..34a02d690548 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c | |||
@@ -73,6 +73,29 @@ | |||
73 | 73 | ||
74 | #define IV AES_BLOCK_SIZE | 74 | #define IV AES_BLOCK_SIZE |
75 | 75 | ||
76 | static unsigned int sgl_ent_len[] = { | ||
77 | 0, 0, 16, 24, 40, 48, 64, 72, 88, | ||
78 | 96, 112, 120, 136, 144, 160, 168, 184, | ||
79 | 192, 208, 216, 232, 240, 256, 264, 280, | ||
80 | 288, 304, 312, 328, 336, 352, 360, 376 | ||
81 | }; | ||
82 | |||
83 | static unsigned int dsgl_ent_len[] = { | ||
84 | 0, 32, 32, 48, 48, 64, 64, 80, 80, | ||
85 | 112, 112, 128, 128, 144, 144, 160, 160, | ||
86 | 192, 192, 208, 208, 224, 224, 240, 240, | ||
87 | 272, 272, 288, 288, 304, 304, 320, 320 | ||
88 | }; | ||
89 | |||
90 | static u32 round_constant[11] = { | ||
91 | 0x01000000, 0x02000000, 0x04000000, 0x08000000, | ||
92 | 0x10000000, 0x20000000, 0x40000000, 0x80000000, | ||
93 | 0x1B000000, 0x36000000, 0x6C000000 | ||
94 | }; | ||
95 | |||
96 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | ||
97 | unsigned char *input, int err); | ||
98 | |||
76 | static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) | 99 | static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) |
77 | { | 100 | { |
78 | return ctx->crypto_ctx->aeadctx; | 101 | return ctx->crypto_ctx->aeadctx; |
@@ -108,18 +131,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb) | |||
108 | return (skb->len <= SGE_MAX_WR_LEN); | 131 | return (skb->len <= SGE_MAX_WR_LEN); |
109 | } | 132 | } |
110 | 133 | ||
111 | /* | ||
112 | * sgl_len - calculates the size of an SGL of the given capacity | ||
113 | * @n: the number of SGL entries | ||
114 | * Calculates the number of flits needed for a scatter/gather list that | ||
115 | * can hold the given number of entries. | ||
116 | */ | ||
117 | static inline unsigned int sgl_len(unsigned int n) | ||
118 | { | ||
119 | n--; | ||
120 | return (3 * n) / 2 + (n & 1) + 2; | ||
121 | } | ||
122 | |||
123 | static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, | 134 | static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, |
124 | unsigned int entlen, | 135 | unsigned int entlen, |
125 | unsigned int skip) | 136 | unsigned int skip) |
@@ -160,7 +171,6 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req, | |||
160 | 171 | ||
161 | if (input == NULL) | 172 | if (input == NULL) |
162 | goto out; | 173 | goto out; |
163 | reqctx = ahash_request_ctx(req); | ||
164 | digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); | 174 | digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); |
165 | if (reqctx->is_sg_map) | 175 | if (reqctx->is_sg_map) |
166 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | 176 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); |
@@ -183,30 +193,17 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req, | |||
183 | } | 193 | } |
184 | out: | 194 | out: |
185 | req->base.complete(&req->base, err); | 195 | req->base.complete(&req->base, err); |
196 | } | ||
186 | 197 | ||
187 | } | 198 | static inline int get_aead_subtype(struct crypto_aead *aead) |
188 | |||
189 | static inline void chcr_handle_aead_resp(struct aead_request *req, | ||
190 | unsigned char *input, | ||
191 | int err) | ||
192 | { | 199 | { |
193 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | 200 | struct aead_alg *alg = crypto_aead_alg(aead); |
194 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 201 | struct chcr_alg_template *chcr_crypto_alg = |
195 | struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); | 202 | container_of(alg, struct chcr_alg_template, alg.aead); |
196 | 203 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | |
197 | |||
198 | chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); | ||
199 | if (reqctx->b0_dma) | ||
200 | dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma, | ||
201 | reqctx->b0_len, DMA_BIDIRECTIONAL); | ||
202 | if (reqctx->verify == VERIFY_SW) { | ||
203 | chcr_verify_tag(req, input, &err); | ||
204 | reqctx->verify = VERIFY_HW; | ||
205 | } | 204 | } |
206 | req->base.complete(&req->base, err); | ||
207 | 205 | ||
208 | } | 206 | void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) |
209 | static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) | ||
210 | { | 207 | { |
211 | u8 temp[SHA512_DIGEST_SIZE]; | 208 | u8 temp[SHA512_DIGEST_SIZE]; |
212 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 209 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
@@ -231,6 +228,25 @@ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) | |||
231 | *err = 0; | 228 | *err = 0; |
232 | } | 229 | } |
233 | 230 | ||
231 | static inline void chcr_handle_aead_resp(struct aead_request *req, | ||
232 | unsigned char *input, | ||
233 | int err) | ||
234 | { | ||
235 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | ||
236 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
237 | struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); | ||
238 | |||
239 | chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); | ||
240 | if (reqctx->b0_dma) | ||
241 | dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma, | ||
242 | reqctx->b0_len, DMA_BIDIRECTIONAL); | ||
243 | if (reqctx->verify == VERIFY_SW) { | ||
244 | chcr_verify_tag(req, input, &err); | ||
245 | reqctx->verify = VERIFY_HW; | ||
246 | } | ||
247 | req->base.complete(&req->base, err); | ||
248 | } | ||
249 | |||
234 | /* | 250 | /* |
235 | * chcr_handle_resp - Unmap the DMA buffers associated with the request | 251 | * chcr_handle_resp - Unmap the DMA buffers associated with the request |
236 | * @req: crypto request | 252 | * @req: crypto request |
@@ -558,7 +574,8 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk, | |||
558 | skip = 0; | 574 | skip = 0; |
559 | } | 575 | } |
560 | } | 576 | } |
561 | if (walk->nents == 0) { | 577 | WARN(!sg, "SG should not be null here\n"); |
578 | if (sg && (walk->nents == 0)) { | ||
562 | small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len); | 579 | small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len); |
563 | sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); | 580 | sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); |
564 | walk->sgl->len0 = cpu_to_be32(sgmin); | 581 | walk->sgl->len0 = cpu_to_be32(sgmin); |
@@ -595,14 +612,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk, | |||
595 | } | 612 | } |
596 | } | 613 | } |
597 | 614 | ||
598 | static inline int get_aead_subtype(struct crypto_aead *aead) | ||
599 | { | ||
600 | struct aead_alg *alg = crypto_aead_alg(aead); | ||
601 | struct chcr_alg_template *chcr_crypto_alg = | ||
602 | container_of(alg, struct chcr_alg_template, alg.aead); | ||
603 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | ||
604 | } | ||
605 | |||
606 | static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) | 615 | static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) |
607 | { | 616 | { |
608 | struct crypto_alg *alg = tfm->__crt_alg; | 617 | struct crypto_alg *alg = tfm->__crt_alg; |
@@ -675,7 +684,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, | |||
675 | if (srclen <= dstlen) | 684 | if (srclen <= dstlen) |
676 | break; | 685 | break; |
677 | less = min_t(unsigned int, sg_dma_len(dst) - offset - | 686 | less = min_t(unsigned int, sg_dma_len(dst) - offset - |
678 | dstskip, CHCR_DST_SG_SIZE); | 687 | dstskip, CHCR_DST_SG_SIZE); |
679 | dstlen += less; | 688 | dstlen += less; |
680 | offset += less; | 689 | offset += less; |
681 | if (offset == sg_dma_len(dst)) { | 690 | if (offset == sg_dma_len(dst)) { |
@@ -686,7 +695,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, | |||
686 | dstskip = 0; | 695 | dstskip = 0; |
687 | } | 696 | } |
688 | src = sg_next(src); | 697 | src = sg_next(src); |
689 | srcskip = 0; | 698 | srcskip = 0; |
690 | } | 699 | } |
691 | return min(srclen, dstlen); | 700 | return min(srclen, dstlen); |
692 | } | 701 | } |
@@ -1008,7 +1017,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) | |||
1008 | return bytes; | 1017 | return bytes; |
1009 | } | 1018 | } |
1010 | 1019 | ||
1011 | static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) | 1020 | static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, |
1021 | u32 isfinal) | ||
1012 | { | 1022 | { |
1013 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 1023 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
1014 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); | 1024 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
@@ -1035,7 +1045,8 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) | |||
1035 | for (i = 0; i < (round % 8); i++) | 1045 | for (i = 0; i < (round % 8); i++) |
1036 | gf128mul_x_ble((le128 *)iv, (le128 *)iv); | 1046 | gf128mul_x_ble((le128 *)iv, (le128 *)iv); |
1037 | 1047 | ||
1038 | crypto_cipher_decrypt_one(cipher, iv, iv); | 1048 | if (!isfinal) |
1049 | crypto_cipher_decrypt_one(cipher, iv, iv); | ||
1039 | out: | 1050 | out: |
1040 | return ret; | 1051 | return ret; |
1041 | } | 1052 | } |
@@ -1056,7 +1067,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req, | |||
1056 | CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed / | 1067 | CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed / |
1057 | AES_BLOCK_SIZE) + 1); | 1068 | AES_BLOCK_SIZE) + 1); |
1058 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) | 1069 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) |
1059 | ret = chcr_update_tweak(req, iv); | 1070 | ret = chcr_update_tweak(req, iv, 0); |
1060 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { | 1071 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { |
1061 | if (reqctx->op) | 1072 | if (reqctx->op) |
1062 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, | 1073 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, |
@@ -1087,7 +1098,7 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req, | |||
1087 | ctr_add_iv(iv, req->info, (reqctx->processed / | 1098 | ctr_add_iv(iv, req->info, (reqctx->processed / |
1088 | AES_BLOCK_SIZE)); | 1099 | AES_BLOCK_SIZE)); |
1089 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) | 1100 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) |
1090 | ret = chcr_update_tweak(req, iv); | 1101 | ret = chcr_update_tweak(req, iv, 1); |
1091 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { | 1102 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { |
1092 | if (reqctx->op) | 1103 | if (reqctx->op) |
1093 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, | 1104 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, |
@@ -1101,7 +1112,6 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req, | |||
1101 | 1112 | ||
1102 | } | 1113 | } |
1103 | 1114 | ||
1104 | |||
1105 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | 1115 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, |
1106 | unsigned char *input, int err) | 1116 | unsigned char *input, int err) |
1107 | { | 1117 | { |
@@ -1135,10 +1145,10 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | |||
1135 | bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, | 1145 | bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, |
1136 | SPACE_LEFT(ablkctx->enckey_len), | 1146 | SPACE_LEFT(ablkctx->enckey_len), |
1137 | reqctx->src_ofst, reqctx->dst_ofst); | 1147 | reqctx->src_ofst, reqctx->dst_ofst); |
1138 | if ((bytes + reqctx->processed) >= req->nbytes) | 1148 | if ((bytes + reqctx->processed) >= req->nbytes) |
1139 | bytes = req->nbytes - reqctx->processed; | 1149 | bytes = req->nbytes - reqctx->processed; |
1140 | else | 1150 | else |
1141 | bytes = ROUND_16(bytes); | 1151 | bytes = ROUND_16(bytes); |
1142 | } else { | 1152 | } else { |
1143 | /*CTR mode counter overfloa*/ | 1153 | /*CTR mode counter overfloa*/ |
1144 | bytes = req->nbytes - reqctx->processed; | 1154 | bytes = req->nbytes - reqctx->processed; |
@@ -1239,15 +1249,15 @@ static int process_cipher(struct ablkcipher_request *req, | |||
1239 | MIN_CIPHER_SG, | 1249 | MIN_CIPHER_SG, |
1240 | SPACE_LEFT(ablkctx->enckey_len), | 1250 | SPACE_LEFT(ablkctx->enckey_len), |
1241 | 0, 0); | 1251 | 0, 0); |
1242 | if ((bytes + reqctx->processed) >= req->nbytes) | 1252 | if ((bytes + reqctx->processed) >= req->nbytes) |
1243 | bytes = req->nbytes - reqctx->processed; | 1253 | bytes = req->nbytes - reqctx->processed; |
1244 | else | 1254 | else |
1245 | bytes = ROUND_16(bytes); | 1255 | bytes = ROUND_16(bytes); |
1246 | } else { | 1256 | } else { |
1247 | bytes = req->nbytes; | 1257 | bytes = req->nbytes; |
1248 | } | 1258 | } |
1249 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | 1259 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
1250 | CRYPTO_ALG_SUB_TYPE_CTR) { | 1260 | CRYPTO_ALG_SUB_TYPE_CTR) { |
1251 | bytes = adjust_ctr_overflow(req->info, bytes); | 1261 | bytes = adjust_ctr_overflow(req->info, bytes); |
1252 | } | 1262 | } |
1253 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | 1263 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
@@ -2014,11 +2024,8 @@ static int chcr_aead_common_init(struct aead_request *req, | |||
2014 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); | 2024 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2015 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | 2025 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
2016 | int error = -EINVAL; | 2026 | int error = -EINVAL; |
2017 | unsigned int dst_size; | ||
2018 | unsigned int authsize = crypto_aead_authsize(tfm); | 2027 | unsigned int authsize = crypto_aead_authsize(tfm); |
2019 | 2028 | ||
2020 | dst_size = req->assoclen + req->cryptlen + (op_type ? | ||
2021 | -authsize : authsize); | ||
2022 | /* validate key size */ | 2029 | /* validate key size */ |
2023 | if (aeadctx->enckey_len == 0) | 2030 | if (aeadctx->enckey_len == 0) |
2024 | goto err; | 2031 | goto err; |
@@ -2083,7 +2090,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
2083 | struct cpl_rx_phys_dsgl *phys_cpl; | 2090 | struct cpl_rx_phys_dsgl *phys_cpl; |
2084 | struct ulptx_sgl *ulptx; | 2091 | struct ulptx_sgl *ulptx; |
2085 | unsigned int transhdr_len; | 2092 | unsigned int transhdr_len; |
2086 | unsigned int dst_size = 0, temp; | 2093 | unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm); |
2087 | unsigned int kctx_len = 0, dnents; | 2094 | unsigned int kctx_len = 0, dnents; |
2088 | unsigned int assoclen = req->assoclen; | 2095 | unsigned int assoclen = req->assoclen; |
2089 | unsigned int authsize = crypto_aead_authsize(tfm); | 2096 | unsigned int authsize = crypto_aead_authsize(tfm); |
@@ -2097,24 +2104,19 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
2097 | return NULL; | 2104 | return NULL; |
2098 | 2105 | ||
2099 | reqctx->b0_dma = 0; | 2106 | reqctx->b0_dma = 0; |
2100 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { | 2107 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || |
2108 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | ||
2101 | null = 1; | 2109 | null = 1; |
2102 | assoclen = 0; | 2110 | assoclen = 0; |
2103 | } | 2111 | } |
2104 | dst_size = assoclen + req->cryptlen + (op_type ? -authsize : | ||
2105 | authsize); | ||
2106 | error = chcr_aead_common_init(req, op_type); | 2112 | error = chcr_aead_common_init(req, op_type); |
2107 | if (error) | 2113 | if (error) |
2108 | return ERR_PTR(error); | 2114 | return ERR_PTR(error); |
2109 | if (dst_size) { | 2115 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
2110 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); | 2116 | dnents += sg_nents_xlen(req->dst, req->cryptlen + |
2111 | dnents += sg_nents_xlen(req->dst, req->cryptlen + | 2117 | (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, |
2112 | (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, | 2118 | req->assoclen); |
2113 | req->assoclen); | 2119 | dnents += MIN_AUTH_SG; // For IV |
2114 | dnents += MIN_AUTH_SG; // For IV | ||
2115 | } else { | ||
2116 | dnents = 0; | ||
2117 | } | ||
2118 | 2120 | ||
2119 | dst_size = get_space_for_phys_dsgl(dnents); | 2121 | dst_size = get_space_for_phys_dsgl(dnents); |
2120 | kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) | 2122 | kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) |
@@ -2162,16 +2164,23 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
2162 | temp & 0xF, | 2164 | temp & 0xF, |
2163 | null ? 0 : assoclen + IV + 1, | 2165 | null ? 0 : assoclen + IV + 1, |
2164 | temp, temp); | 2166 | temp, temp); |
2167 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL || | ||
2168 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA) | ||
2169 | temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; | ||
2170 | else | ||
2171 | temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; | ||
2165 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, | 2172 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, |
2166 | (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, | 2173 | (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, |
2167 | CHCR_SCMD_CIPHER_MODE_AES_CBC, | 2174 | temp, |
2168 | actx->auth_mode, aeadctx->hmac_ctrl, | 2175 | actx->auth_mode, aeadctx->hmac_ctrl, |
2169 | IV >> 1); | 2176 | IV >> 1); |
2170 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, | 2177 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, |
2171 | 0, 0, dst_size); | 2178 | 0, 0, dst_size); |
2172 | 2179 | ||
2173 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | 2180 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; |
2174 | if (op_type == CHCR_ENCRYPT_OP) | 2181 | if (op_type == CHCR_ENCRYPT_OP || |
2182 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || | ||
2183 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) | ||
2175 | memcpy(chcr_req->key_ctx.key, aeadctx->key, | 2184 | memcpy(chcr_req->key_ctx.key, aeadctx->key, |
2176 | aeadctx->enckey_len); | 2185 | aeadctx->enckey_len); |
2177 | else | 2186 | else |
@@ -2181,7 +2190,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
2181 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << | 2190 | memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << |
2182 | 4), actx->h_iopad, kctx_len - | 2191 | 4), actx->h_iopad, kctx_len - |
2183 | (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4)); | 2192 | (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4)); |
2184 | memcpy(reqctx->iv, req->iv, IV); | 2193 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || |
2194 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | ||
2195 | memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); | ||
2196 | memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, | ||
2197 | CTR_RFC3686_IV_SIZE); | ||
2198 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + | ||
2199 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); | ||
2200 | } else { | ||
2201 | memcpy(reqctx->iv, req->iv, IV); | ||
2202 | } | ||
2185 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | 2203 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
2186 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); | 2204 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
2187 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); | 2205 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); |
@@ -2202,9 +2220,9 @@ err: | |||
2202 | return ERR_PTR(error); | 2220 | return ERR_PTR(error); |
2203 | } | 2221 | } |
2204 | 2222 | ||
2205 | static int chcr_aead_dma_map(struct device *dev, | 2223 | int chcr_aead_dma_map(struct device *dev, |
2206 | struct aead_request *req, | 2224 | struct aead_request *req, |
2207 | unsigned short op_type) | 2225 | unsigned short op_type) |
2208 | { | 2226 | { |
2209 | int error; | 2227 | int error; |
2210 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | 2228 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
@@ -2246,9 +2264,9 @@ err: | |||
2246 | return -ENOMEM; | 2264 | return -ENOMEM; |
2247 | } | 2265 | } |
2248 | 2266 | ||
2249 | static void chcr_aead_dma_unmap(struct device *dev, | 2267 | void chcr_aead_dma_unmap(struct device *dev, |
2250 | struct aead_request *req, | 2268 | struct aead_request *req, |
2251 | unsigned short op_type) | 2269 | unsigned short op_type) |
2252 | { | 2270 | { |
2253 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | 2271 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
2254 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 2272 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
@@ -2273,10 +2291,10 @@ static void chcr_aead_dma_unmap(struct device *dev, | |||
2273 | } | 2291 | } |
2274 | } | 2292 | } |
2275 | 2293 | ||
2276 | static inline void chcr_add_aead_src_ent(struct aead_request *req, | 2294 | void chcr_add_aead_src_ent(struct aead_request *req, |
2277 | struct ulptx_sgl *ulptx, | 2295 | struct ulptx_sgl *ulptx, |
2278 | unsigned int assoclen, | 2296 | unsigned int assoclen, |
2279 | unsigned short op_type) | 2297 | unsigned short op_type) |
2280 | { | 2298 | { |
2281 | struct ulptx_walk ulp_walk; | 2299 | struct ulptx_walk ulp_walk; |
2282 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | 2300 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
@@ -2308,11 +2326,11 @@ static inline void chcr_add_aead_src_ent(struct aead_request *req, | |||
2308 | } | 2326 | } |
2309 | } | 2327 | } |
2310 | 2328 | ||
2311 | static inline void chcr_add_aead_dst_ent(struct aead_request *req, | 2329 | void chcr_add_aead_dst_ent(struct aead_request *req, |
2312 | struct cpl_rx_phys_dsgl *phys_cpl, | 2330 | struct cpl_rx_phys_dsgl *phys_cpl, |
2313 | unsigned int assoclen, | 2331 | unsigned int assoclen, |
2314 | unsigned short op_type, | 2332 | unsigned short op_type, |
2315 | unsigned short qid) | 2333 | unsigned short qid) |
2316 | { | 2334 | { |
2317 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | 2335 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
2318 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 2336 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
@@ -2330,9 +2348,9 @@ static inline void chcr_add_aead_dst_ent(struct aead_request *req, | |||
2330 | dsgl_walk_end(&dsgl_walk, qid); | 2348 | dsgl_walk_end(&dsgl_walk, qid); |
2331 | } | 2349 | } |
2332 | 2350 | ||
2333 | static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req, | 2351 | void chcr_add_cipher_src_ent(struct ablkcipher_request *req, |
2334 | struct ulptx_sgl *ulptx, | 2352 | struct ulptx_sgl *ulptx, |
2335 | struct cipher_wr_param *wrparam) | 2353 | struct cipher_wr_param *wrparam) |
2336 | { | 2354 | { |
2337 | struct ulptx_walk ulp_walk; | 2355 | struct ulptx_walk ulp_walk; |
2338 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | 2356 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
@@ -2355,10 +2373,10 @@ static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req, | |||
2355 | } | 2373 | } |
2356 | } | 2374 | } |
2357 | 2375 | ||
2358 | static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, | 2376 | void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, |
2359 | struct cpl_rx_phys_dsgl *phys_cpl, | 2377 | struct cpl_rx_phys_dsgl *phys_cpl, |
2360 | struct cipher_wr_param *wrparam, | 2378 | struct cipher_wr_param *wrparam, |
2361 | unsigned short qid) | 2379 | unsigned short qid) |
2362 | { | 2380 | { |
2363 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | 2381 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
2364 | struct dsgl_walk dsgl_walk; | 2382 | struct dsgl_walk dsgl_walk; |
@@ -2373,9 +2391,9 @@ static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, | |||
2373 | dsgl_walk_end(&dsgl_walk, qid); | 2391 | dsgl_walk_end(&dsgl_walk, qid); |
2374 | } | 2392 | } |
2375 | 2393 | ||
2376 | static inline void chcr_add_hash_src_ent(struct ahash_request *req, | 2394 | void chcr_add_hash_src_ent(struct ahash_request *req, |
2377 | struct ulptx_sgl *ulptx, | 2395 | struct ulptx_sgl *ulptx, |
2378 | struct hash_wr_param *param) | 2396 | struct hash_wr_param *param) |
2379 | { | 2397 | { |
2380 | struct ulptx_walk ulp_walk; | 2398 | struct ulptx_walk ulp_walk; |
2381 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | 2399 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); |
@@ -2395,16 +2413,13 @@ static inline void chcr_add_hash_src_ent(struct ahash_request *req, | |||
2395 | ulptx_walk_add_page(&ulp_walk, param->bfr_len, | 2413 | ulptx_walk_add_page(&ulp_walk, param->bfr_len, |
2396 | &reqctx->dma_addr); | 2414 | &reqctx->dma_addr); |
2397 | ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len, | 2415 | ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len, |
2398 | 0); | 2416 | 0); |
2399 | // reqctx->srcsg = ulp_walk.last_sg; | 2417 | ulptx_walk_end(&ulp_walk); |
2400 | // reqctx->src_ofst = ulp_walk.last_sg_len; | ||
2401 | ulptx_walk_end(&ulp_walk); | ||
2402 | } | 2418 | } |
2403 | } | 2419 | } |
2404 | 2420 | ||
2405 | 2421 | int chcr_hash_dma_map(struct device *dev, | |
2406 | static inline int chcr_hash_dma_map(struct device *dev, | 2422 | struct ahash_request *req) |
2407 | struct ahash_request *req) | ||
2408 | { | 2423 | { |
2409 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | 2424 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
2410 | int error = 0; | 2425 | int error = 0; |
@@ -2414,13 +2429,13 @@ static inline int chcr_hash_dma_map(struct device *dev, | |||
2414 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | 2429 | error = dma_map_sg(dev, req->src, sg_nents(req->src), |
2415 | DMA_TO_DEVICE); | 2430 | DMA_TO_DEVICE); |
2416 | if (!error) | 2431 | if (!error) |
2417 | return error; | 2432 | return -ENOMEM; |
2418 | req_ctx->is_sg_map = 1; | 2433 | req_ctx->is_sg_map = 1; |
2419 | return 0; | 2434 | return 0; |
2420 | } | 2435 | } |
2421 | 2436 | ||
2422 | static inline void chcr_hash_dma_unmap(struct device *dev, | 2437 | void chcr_hash_dma_unmap(struct device *dev, |
2423 | struct ahash_request *req) | 2438 | struct ahash_request *req) |
2424 | { | 2439 | { |
2425 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | 2440 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
2426 | 2441 | ||
@@ -2433,9 +2448,8 @@ static inline void chcr_hash_dma_unmap(struct device *dev, | |||
2433 | 2448 | ||
2434 | } | 2449 | } |
2435 | 2450 | ||
2436 | 2451 | int chcr_cipher_dma_map(struct device *dev, | |
2437 | static int chcr_cipher_dma_map(struct device *dev, | 2452 | struct ablkcipher_request *req) |
2438 | struct ablkcipher_request *req) | ||
2439 | { | 2453 | { |
2440 | int error; | 2454 | int error; |
2441 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | 2455 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
@@ -2469,8 +2483,9 @@ err: | |||
2469 | dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); | 2483 | dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); |
2470 | return -ENOMEM; | 2484 | return -ENOMEM; |
2471 | } | 2485 | } |
2472 | static void chcr_cipher_dma_unmap(struct device *dev, | 2486 | |
2473 | struct ablkcipher_request *req) | 2487 | void chcr_cipher_dma_unmap(struct device *dev, |
2488 | struct ablkcipher_request *req) | ||
2474 | { | 2489 | { |
2475 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | 2490 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
2476 | 2491 | ||
@@ -2666,8 +2681,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |||
2666 | sub_type = get_aead_subtype(tfm); | 2681 | sub_type = get_aead_subtype(tfm); |
2667 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | 2682 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) |
2668 | assoclen -= 8; | 2683 | assoclen -= 8; |
2669 | dst_size = assoclen + req->cryptlen + (op_type ? -authsize : | ||
2670 | authsize); | ||
2671 | error = chcr_aead_common_init(req, op_type); | 2684 | error = chcr_aead_common_init(req, op_type); |
2672 | if (error) | 2685 | if (error) |
2673 | return ERR_PTR(error); | 2686 | return ERR_PTR(error); |
@@ -2677,15 +2690,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |||
2677 | error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); | 2690 | error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); |
2678 | if (error) | 2691 | if (error) |
2679 | goto err; | 2692 | goto err; |
2680 | if (dst_size) { | 2693 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
2681 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); | 2694 | dnents += sg_nents_xlen(req->dst, req->cryptlen |
2682 | dnents += sg_nents_xlen(req->dst, req->cryptlen | 2695 | + (op_type ? -authsize : authsize), |
2683 | + (op_type ? -authsize : authsize), | 2696 | CHCR_DST_SG_SIZE, req->assoclen); |
2684 | CHCR_DST_SG_SIZE, req->assoclen); | 2697 | dnents += MIN_CCM_SG; // For IV and B0 |
2685 | dnents += MIN_CCM_SG; // For IV and B0 | ||
2686 | } else { | ||
2687 | dnents = 0; | ||
2688 | } | ||
2689 | dst_size = get_space_for_phys_dsgl(dnents); | 2698 | dst_size = get_space_for_phys_dsgl(dnents); |
2690 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; | 2699 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; |
2691 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | 2700 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
@@ -2780,19 +2789,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
2780 | assoclen = req->assoclen - 8; | 2789 | assoclen = req->assoclen - 8; |
2781 | 2790 | ||
2782 | reqctx->b0_dma = 0; | 2791 | reqctx->b0_dma = 0; |
2783 | dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize); | ||
2784 | error = chcr_aead_common_init(req, op_type); | 2792 | error = chcr_aead_common_init(req, op_type); |
2785 | if (error) | 2793 | if (error) |
2786 | return ERR_PTR(error); | 2794 | return ERR_PTR(error); |
2787 | if (dst_size) { | 2795 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
2788 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); | 2796 | dnents += sg_nents_xlen(req->dst, req->cryptlen + |
2789 | dnents += sg_nents_xlen(req->dst, | 2797 | (op_type ? -authsize : authsize), |
2790 | req->cryptlen + (op_type ? -authsize : authsize), | ||
2791 | CHCR_DST_SG_SIZE, req->assoclen); | 2798 | CHCR_DST_SG_SIZE, req->assoclen); |
2792 | dnents += MIN_GCM_SG; // For IV | 2799 | dnents += MIN_GCM_SG; // For IV |
2793 | } else { | ||
2794 | dnents = 0; | ||
2795 | } | ||
2796 | dst_size = get_space_for_phys_dsgl(dnents); | 2800 | dst_size = get_space_for_phys_dsgl(dnents); |
2797 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + | 2801 | kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + |
2798 | AEAD_H_SIZE; | 2802 | AEAD_H_SIZE; |
@@ -2829,10 +2833,10 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
2829 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | 2833 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
2830 | assoclen ? 1 : 0, assoclen, | 2834 | assoclen ? 1 : 0, assoclen, |
2831 | assoclen + IV + 1, 0); | 2835 | assoclen + IV + 1, 0); |
2832 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | 2836 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
2833 | FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, | 2837 | FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, |
2834 | temp, temp); | 2838 | temp, temp); |
2835 | chcr_req->sec_cpl.seqno_numivs = | 2839 | chcr_req->sec_cpl.seqno_numivs = |
2836 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == | 2840 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == |
2837 | CHCR_ENCRYPT_OP) ? 1 : 0, | 2841 | CHCR_ENCRYPT_OP) ? 1 : 0, |
2838 | CHCR_SCMD_CIPHER_MODE_AES_GCM, | 2842 | CHCR_SCMD_CIPHER_MODE_AES_GCM, |
@@ -3212,7 +3216,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | |||
3212 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); | 3216 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
3213 | /* it contains auth and cipher key both*/ | 3217 | /* it contains auth and cipher key both*/ |
3214 | struct crypto_authenc_keys keys; | 3218 | struct crypto_authenc_keys keys; |
3215 | unsigned int bs; | 3219 | unsigned int bs, subtype; |
3216 | unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize; | 3220 | unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize; |
3217 | int err = 0, i, key_ctx_len = 0; | 3221 | int err = 0, i, key_ctx_len = 0; |
3218 | unsigned char ck_size = 0; | 3222 | unsigned char ck_size = 0; |
@@ -3241,6 +3245,15 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | |||
3241 | pr_err("chcr : Unsupported digest size\n"); | 3245 | pr_err("chcr : Unsupported digest size\n"); |
3242 | goto out; | 3246 | goto out; |
3243 | } | 3247 | } |
3248 | subtype = get_aead_subtype(authenc); | ||
3249 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || | ||
3250 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | ||
3251 | if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) | ||
3252 | goto out; | ||
3253 | memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen | ||
3254 | - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); | ||
3255 | keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; | ||
3256 | } | ||
3244 | if (keys.enckeylen == AES_KEYSIZE_128) { | 3257 | if (keys.enckeylen == AES_KEYSIZE_128) { |
3245 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | 3258 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; |
3246 | } else if (keys.enckeylen == AES_KEYSIZE_192) { | 3259 | } else if (keys.enckeylen == AES_KEYSIZE_192) { |
@@ -3258,9 +3271,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | |||
3258 | */ | 3271 | */ |
3259 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); | 3272 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); |
3260 | aeadctx->enckey_len = keys.enckeylen; | 3273 | aeadctx->enckey_len = keys.enckeylen; |
3261 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, | 3274 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || |
3262 | aeadctx->enckey_len << 3); | 3275 | subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { |
3263 | 3276 | ||
3277 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, | ||
3278 | aeadctx->enckey_len << 3); | ||
3279 | } | ||
3264 | base_hash = chcr_alloc_shash(max_authsize); | 3280 | base_hash = chcr_alloc_shash(max_authsize); |
3265 | if (IS_ERR(base_hash)) { | 3281 | if (IS_ERR(base_hash)) { |
3266 | pr_err("chcr : Base driver cannot be loaded\n"); | 3282 | pr_err("chcr : Base driver cannot be loaded\n"); |
@@ -3333,6 +3349,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, | |||
3333 | struct crypto_authenc_keys keys; | 3349 | struct crypto_authenc_keys keys; |
3334 | int err; | 3350 | int err; |
3335 | /* it contains auth and cipher key both*/ | 3351 | /* it contains auth and cipher key both*/ |
3352 | unsigned int subtype; | ||
3336 | int key_ctx_len = 0; | 3353 | int key_ctx_len = 0; |
3337 | unsigned char ck_size = 0; | 3354 | unsigned char ck_size = 0; |
3338 | 3355 | ||
@@ -3350,6 +3367,15 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, | |||
3350 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); | 3367 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); |
3351 | goto out; | 3368 | goto out; |
3352 | } | 3369 | } |
3370 | subtype = get_aead_subtype(authenc); | ||
3371 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || | ||
3372 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | ||
3373 | if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) | ||
3374 | goto out; | ||
3375 | memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen | ||
3376 | - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); | ||
3377 | keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; | ||
3378 | } | ||
3353 | if (keys.enckeylen == AES_KEYSIZE_128) { | 3379 | if (keys.enckeylen == AES_KEYSIZE_128) { |
3354 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | 3380 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; |
3355 | } else if (keys.enckeylen == AES_KEYSIZE_192) { | 3381 | } else if (keys.enckeylen == AES_KEYSIZE_192) { |
@@ -3357,13 +3383,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, | |||
3357 | } else if (keys.enckeylen == AES_KEYSIZE_256) { | 3383 | } else if (keys.enckeylen == AES_KEYSIZE_256) { |
3358 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | 3384 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; |
3359 | } else { | 3385 | } else { |
3360 | pr_err("chcr : Unsupported cipher key\n"); | 3386 | pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen); |
3361 | goto out; | 3387 | goto out; |
3362 | } | 3388 | } |
3363 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); | 3389 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); |
3364 | aeadctx->enckey_len = keys.enckeylen; | 3390 | aeadctx->enckey_len = keys.enckeylen; |
3365 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, | 3391 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || |
3366 | aeadctx->enckey_len << 3); | 3392 | subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { |
3393 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, | ||
3394 | aeadctx->enckey_len << 3); | ||
3395 | } | ||
3367 | key_ctx_len = sizeof(struct _key_ctx) | 3396 | key_ctx_len = sizeof(struct _key_ctx) |
3368 | + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4); | 3397 | + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4); |
3369 | 3398 | ||
@@ -3375,6 +3404,40 @@ out: | |||
3375 | aeadctx->enckey_len = 0; | 3404 | aeadctx->enckey_len = 0; |
3376 | return -EINVAL; | 3405 | return -EINVAL; |
3377 | } | 3406 | } |
3407 | |||
3408 | static int chcr_aead_op(struct aead_request *req, | ||
3409 | unsigned short op_type, | ||
3410 | int size, | ||
3411 | create_wr_t create_wr_fn) | ||
3412 | { | ||
3413 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
3414 | struct uld_ctx *u_ctx; | ||
3415 | struct sk_buff *skb; | ||
3416 | |||
3417 | if (!a_ctx(tfm)->dev) { | ||
3418 | pr_err("chcr : %s : No crypto device.\n", __func__); | ||
3419 | return -ENXIO; | ||
3420 | } | ||
3421 | u_ctx = ULD_CTX(a_ctx(tfm)); | ||
3422 | if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | ||
3423 | a_ctx(tfm)->tx_qidx)) { | ||
3424 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
3425 | return -EBUSY; | ||
3426 | } | ||
3427 | |||
3428 | /* Form a WR from req */ | ||
3429 | skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size, | ||
3430 | op_type); | ||
3431 | |||
3432 | if (IS_ERR(skb) || !skb) | ||
3433 | return PTR_ERR(skb); | ||
3434 | |||
3435 | skb->dev = u_ctx->lldi.ports[0]; | ||
3436 | set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx); | ||
3437 | chcr_send_wr(skb); | ||
3438 | return -EINPROGRESS; | ||
3439 | } | ||
3440 | |||
3378 | static int chcr_aead_encrypt(struct aead_request *req) | 3441 | static int chcr_aead_encrypt(struct aead_request *req) |
3379 | { | 3442 | { |
3380 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 3443 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
@@ -3383,8 +3446,10 @@ static int chcr_aead_encrypt(struct aead_request *req) | |||
3383 | reqctx->verify = VERIFY_HW; | 3446 | reqctx->verify = VERIFY_HW; |
3384 | 3447 | ||
3385 | switch (get_aead_subtype(tfm)) { | 3448 | switch (get_aead_subtype(tfm)) { |
3386 | case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC: | 3449 | case CRYPTO_ALG_SUB_TYPE_CTR_SHA: |
3387 | case CRYPTO_ALG_SUB_TYPE_AEAD_NULL: | 3450 | case CRYPTO_ALG_SUB_TYPE_CBC_SHA: |
3451 | case CRYPTO_ALG_SUB_TYPE_CBC_NULL: | ||
3452 | case CRYPTO_ALG_SUB_TYPE_CTR_NULL: | ||
3388 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | 3453 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, |
3389 | create_authenc_wr); | 3454 | create_authenc_wr); |
3390 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: | 3455 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: |
@@ -3413,8 +3478,10 @@ static int chcr_aead_decrypt(struct aead_request *req) | |||
3413 | } | 3478 | } |
3414 | 3479 | ||
3415 | switch (get_aead_subtype(tfm)) { | 3480 | switch (get_aead_subtype(tfm)) { |
3416 | case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC: | 3481 | case CRYPTO_ALG_SUB_TYPE_CBC_SHA: |
3417 | case CRYPTO_ALG_SUB_TYPE_AEAD_NULL: | 3482 | case CRYPTO_ALG_SUB_TYPE_CTR_SHA: |
3483 | case CRYPTO_ALG_SUB_TYPE_CBC_NULL: | ||
3484 | case CRYPTO_ALG_SUB_TYPE_CTR_NULL: | ||
3418 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | 3485 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, |
3419 | create_authenc_wr); | 3486 | create_authenc_wr); |
3420 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: | 3487 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: |
@@ -3427,38 +3494,6 @@ static int chcr_aead_decrypt(struct aead_request *req) | |||
3427 | } | 3494 | } |
3428 | } | 3495 | } |
3429 | 3496 | ||
3430 | static int chcr_aead_op(struct aead_request *req, | ||
3431 | unsigned short op_type, | ||
3432 | int size, | ||
3433 | create_wr_t create_wr_fn) | ||
3434 | { | ||
3435 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
3436 | struct uld_ctx *u_ctx; | ||
3437 | struct sk_buff *skb; | ||
3438 | |||
3439 | if (!a_ctx(tfm)->dev) { | ||
3440 | pr_err("chcr : %s : No crypto device.\n", __func__); | ||
3441 | return -ENXIO; | ||
3442 | } | ||
3443 | u_ctx = ULD_CTX(a_ctx(tfm)); | ||
3444 | if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | ||
3445 | a_ctx(tfm)->tx_qidx)) { | ||
3446 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
3447 | return -EBUSY; | ||
3448 | } | ||
3449 | |||
3450 | /* Form a WR from req */ | ||
3451 | skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size, | ||
3452 | op_type); | ||
3453 | |||
3454 | if (IS_ERR(skb) || !skb) | ||
3455 | return PTR_ERR(skb); | ||
3456 | |||
3457 | skb->dev = u_ctx->lldi.ports[0]; | ||
3458 | set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx); | ||
3459 | chcr_send_wr(skb); | ||
3460 | return -EINPROGRESS; | ||
3461 | } | ||
3462 | static struct chcr_alg_template driver_algs[] = { | 3497 | static struct chcr_alg_template driver_algs[] = { |
3463 | /* AES-CBC */ | 3498 | /* AES-CBC */ |
3464 | { | 3499 | { |
@@ -3742,7 +3777,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
3742 | } | 3777 | } |
3743 | }, | 3778 | }, |
3744 | { | 3779 | { |
3745 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | 3780 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
3746 | .is_registered = 0, | 3781 | .is_registered = 0, |
3747 | .alg.aead = { | 3782 | .alg.aead = { |
3748 | .base = { | 3783 | .base = { |
@@ -3763,7 +3798,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
3763 | } | 3798 | } |
3764 | }, | 3799 | }, |
3765 | { | 3800 | { |
3766 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | 3801 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
3767 | .is_registered = 0, | 3802 | .is_registered = 0, |
3768 | .alg.aead = { | 3803 | .alg.aead = { |
3769 | .base = { | 3804 | .base = { |
@@ -3785,7 +3820,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
3785 | } | 3820 | } |
3786 | }, | 3821 | }, |
3787 | { | 3822 | { |
3788 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | 3823 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
3789 | .is_registered = 0, | 3824 | .is_registered = 0, |
3790 | .alg.aead = { | 3825 | .alg.aead = { |
3791 | .base = { | 3826 | .base = { |
@@ -3805,7 +3840,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
3805 | } | 3840 | } |
3806 | }, | 3841 | }, |
3807 | { | 3842 | { |
3808 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | 3843 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
3809 | .is_registered = 0, | 3844 | .is_registered = 0, |
3810 | .alg.aead = { | 3845 | .alg.aead = { |
3811 | .base = { | 3846 | .base = { |
@@ -3826,7 +3861,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
3826 | } | 3861 | } |
3827 | }, | 3862 | }, |
3828 | { | 3863 | { |
3829 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, | 3864 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
3830 | .is_registered = 0, | 3865 | .is_registered = 0, |
3831 | .alg.aead = { | 3866 | .alg.aead = { |
3832 | .base = { | 3867 | .base = { |
@@ -3847,7 +3882,7 @@ static struct chcr_alg_template driver_algs[] = { | |||
3847 | } | 3882 | } |
3848 | }, | 3883 | }, |
3849 | { | 3884 | { |
3850 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL, | 3885 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL, |
3851 | .is_registered = 0, | 3886 | .is_registered = 0, |
3852 | .alg.aead = { | 3887 | .alg.aead = { |
3853 | .base = { | 3888 | .base = { |
@@ -3867,6 +3902,133 @@ static struct chcr_alg_template driver_algs[] = { | |||
3867 | .setauthsize = chcr_authenc_null_setauthsize, | 3902 | .setauthsize = chcr_authenc_null_setauthsize, |
3868 | } | 3903 | } |
3869 | }, | 3904 | }, |
3905 | { | ||
3906 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | ||
3907 | .is_registered = 0, | ||
3908 | .alg.aead = { | ||
3909 | .base = { | ||
3910 | .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", | ||
3911 | .cra_driver_name = | ||
3912 | "authenc-hmac-sha1-rfc3686-ctr-aes-chcr", | ||
3913 | .cra_blocksize = 1, | ||
3914 | .cra_priority = CHCR_AEAD_PRIORITY, | ||
3915 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
3916 | sizeof(struct chcr_aead_ctx) + | ||
3917 | sizeof(struct chcr_authenc_ctx), | ||
3918 | |||
3919 | }, | ||
3920 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
3921 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
3922 | .setkey = chcr_authenc_setkey, | ||
3923 | .setauthsize = chcr_authenc_setauthsize, | ||
3924 | } | ||
3925 | }, | ||
3926 | { | ||
3927 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | ||
3928 | .is_registered = 0, | ||
3929 | .alg.aead = { | ||
3930 | .base = { | ||
3931 | |||
3932 | .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", | ||
3933 | .cra_driver_name = | ||
3934 | "authenc-hmac-sha256-rfc3686-ctr-aes-chcr", | ||
3935 | .cra_blocksize = 1, | ||
3936 | .cra_priority = CHCR_AEAD_PRIORITY, | ||
3937 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
3938 | sizeof(struct chcr_aead_ctx) + | ||
3939 | sizeof(struct chcr_authenc_ctx), | ||
3940 | |||
3941 | }, | ||
3942 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
3943 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
3944 | .setkey = chcr_authenc_setkey, | ||
3945 | .setauthsize = chcr_authenc_setauthsize, | ||
3946 | } | ||
3947 | }, | ||
3948 | { | ||
3949 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | ||
3950 | .is_registered = 0, | ||
3951 | .alg.aead = { | ||
3952 | .base = { | ||
3953 | .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", | ||
3954 | .cra_driver_name = | ||
3955 | "authenc-hmac-sha224-rfc3686-ctr-aes-chcr", | ||
3956 | .cra_blocksize = 1, | ||
3957 | .cra_priority = CHCR_AEAD_PRIORITY, | ||
3958 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
3959 | sizeof(struct chcr_aead_ctx) + | ||
3960 | sizeof(struct chcr_authenc_ctx), | ||
3961 | }, | ||
3962 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
3963 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
3964 | .setkey = chcr_authenc_setkey, | ||
3965 | .setauthsize = chcr_authenc_setauthsize, | ||
3966 | } | ||
3967 | }, | ||
3968 | { | ||
3969 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | ||
3970 | .is_registered = 0, | ||
3971 | .alg.aead = { | ||
3972 | .base = { | ||
3973 | .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", | ||
3974 | .cra_driver_name = | ||
3975 | "authenc-hmac-sha384-rfc3686-ctr-aes-chcr", | ||
3976 | .cra_blocksize = 1, | ||
3977 | .cra_priority = CHCR_AEAD_PRIORITY, | ||
3978 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
3979 | sizeof(struct chcr_aead_ctx) + | ||
3980 | sizeof(struct chcr_authenc_ctx), | ||
3981 | |||
3982 | }, | ||
3983 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
3984 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
3985 | .setkey = chcr_authenc_setkey, | ||
3986 | .setauthsize = chcr_authenc_setauthsize, | ||
3987 | } | ||
3988 | }, | ||
3989 | { | ||
3990 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | ||
3991 | .is_registered = 0, | ||
3992 | .alg.aead = { | ||
3993 | .base = { | ||
3994 | .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", | ||
3995 | .cra_driver_name = | ||
3996 | "authenc-hmac-sha512-rfc3686-ctr-aes-chcr", | ||
3997 | .cra_blocksize = 1, | ||
3998 | .cra_priority = CHCR_AEAD_PRIORITY, | ||
3999 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
4000 | sizeof(struct chcr_aead_ctx) + | ||
4001 | sizeof(struct chcr_authenc_ctx), | ||
4002 | |||
4003 | }, | ||
4004 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
4005 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
4006 | .setkey = chcr_authenc_setkey, | ||
4007 | .setauthsize = chcr_authenc_setauthsize, | ||
4008 | } | ||
4009 | }, | ||
4010 | { | ||
4011 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL, | ||
4012 | .is_registered = 0, | ||
4013 | .alg.aead = { | ||
4014 | .base = { | ||
4015 | .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))", | ||
4016 | .cra_driver_name = | ||
4017 | "authenc-digest_null-rfc3686-ctr-aes-chcr", | ||
4018 | .cra_blocksize = 1, | ||
4019 | .cra_priority = CHCR_AEAD_PRIORITY, | ||
4020 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
4021 | sizeof(struct chcr_aead_ctx) + | ||
4022 | sizeof(struct chcr_authenc_ctx), | ||
4023 | |||
4024 | }, | ||
4025 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
4026 | .maxauthsize = 0, | ||
4027 | .setkey = chcr_aead_digest_null_setkey, | ||
4028 | .setauthsize = chcr_authenc_null_setauthsize, | ||
4029 | } | ||
4030 | }, | ||
4031 | |||
3870 | }; | 4032 | }; |
3871 | 4033 | ||
3872 | /* | 4034 | /* |
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index 96c9335ee728..d1673a5d4bf5 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h | |||
@@ -226,15 +226,6 @@ | |||
226 | #define SPACE_LEFT(len) \ | 226 | #define SPACE_LEFT(len) \ |
227 | ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len))) | 227 | ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len))) |
228 | 228 | ||
229 | unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88, | ||
230 | 96, 112, 120, 136, 144, 160, 168, 184, | ||
231 | 192, 208, 216, 232, 240, 256, 264, 280, | ||
232 | 288, 304, 312, 328, 336, 352, 360, 376}; | ||
233 | unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80, | ||
234 | 112, 112, 128, 128, 144, 144, 160, 160, | ||
235 | 192, 192, 208, 208, 224, 224, 240, 240, | ||
236 | 272, 272, 288, 288, 304, 304, 320, 320}; | ||
237 | |||
238 | struct algo_param { | 229 | struct algo_param { |
239 | unsigned int auth_mode; | 230 | unsigned int auth_mode; |
240 | unsigned int mk_size; | 231 | unsigned int mk_size; |
@@ -404,10 +395,4 @@ static inline u32 aes_ks_subword(const u32 w) | |||
404 | return *(u32 *)(&bytes[0]); | 395 | return *(u32 *)(&bytes[0]); |
405 | } | 396 | } |
406 | 397 | ||
407 | static u32 round_constant[11] = { | ||
408 | 0x01000000, 0x02000000, 0x04000000, 0x08000000, | ||
409 | 0x10000000, 0x20000000, 0x40000000, 0x80000000, | ||
410 | 0x1B000000, 0x36000000, 0x6C000000 | ||
411 | }; | ||
412 | |||
413 | #endif /* __CHCR_ALGO_H__ */ | 398 | #endif /* __CHCR_ALGO_H__ */ |
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index f5a2624081dc..04f277cade7c 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c | |||
@@ -48,6 +48,9 @@ static struct cxgb4_uld_info chcr_uld_info = { | |||
48 | .add = chcr_uld_add, | 48 | .add = chcr_uld_add, |
49 | .state_change = chcr_uld_state_change, | 49 | .state_change = chcr_uld_state_change, |
50 | .rx_handler = chcr_uld_rx_handler, | 50 | .rx_handler = chcr_uld_rx_handler, |
51 | #ifdef CONFIG_CHELSIO_IPSEC_INLINE | ||
52 | .tx_handler = chcr_uld_tx_handler, | ||
53 | #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ | ||
51 | }; | 54 | }; |
52 | 55 | ||
53 | struct uld_ctx *assign_chcr_device(void) | 56 | struct uld_ctx *assign_chcr_device(void) |
@@ -164,6 +167,10 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld) | |||
164 | goto out; | 167 | goto out; |
165 | } | 168 | } |
166 | u_ctx->lldi = *lld; | 169 | u_ctx->lldi = *lld; |
170 | #ifdef CONFIG_CHELSIO_IPSEC_INLINE | ||
171 | if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE) | ||
172 | chcr_add_xfrmops(lld); | ||
173 | #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ | ||
167 | out: | 174 | out: |
168 | return u_ctx; | 175 | return u_ctx; |
169 | } | 176 | } |
@@ -187,6 +194,13 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp, | |||
187 | return 0; | 194 | return 0; |
188 | } | 195 | } |
189 | 196 | ||
197 | #ifdef CONFIG_CHELSIO_IPSEC_INLINE | ||
198 | int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev) | ||
199 | { | ||
200 | return chcr_ipsec_xmit(skb, dev); | ||
201 | } | ||
202 | #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ | ||
203 | |||
190 | static int chcr_uld_state_change(void *handle, enum cxgb4_state state) | 204 | static int chcr_uld_state_change(void *handle, enum cxgb4_state state) |
191 | { | 205 | { |
192 | struct uld_ctx *u_ctx = handle; | 206 | struct uld_ctx *u_ctx = handle; |
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index 94e7412f6164..3c29ee09b8b5 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <crypto/algapi.h> | 39 | #include <crypto/algapi.h> |
40 | #include "t4_hw.h" | 40 | #include "t4_hw.h" |
41 | #include "cxgb4.h" | 41 | #include "cxgb4.h" |
42 | #include "t4_msg.h" | ||
42 | #include "cxgb4_uld.h" | 43 | #include "cxgb4_uld.h" |
43 | 44 | ||
44 | #define DRV_MODULE_NAME "chcr" | 45 | #define DRV_MODULE_NAME "chcr" |
@@ -89,12 +90,49 @@ struct uld_ctx { | |||
89 | struct chcr_dev *dev; | 90 | struct chcr_dev *dev; |
90 | }; | 91 | }; |
91 | 92 | ||
93 | struct chcr_ipsec_req { | ||
94 | struct ulp_txpkt ulptx; | ||
95 | struct ulptx_idata sc_imm; | ||
96 | struct cpl_tx_sec_pdu sec_cpl; | ||
97 | struct _key_ctx key_ctx; | ||
98 | }; | ||
99 | |||
100 | struct chcr_ipsec_wr { | ||
101 | struct fw_ulptx_wr wreq; | ||
102 | struct chcr_ipsec_req req; | ||
103 | }; | ||
104 | |||
105 | struct ipsec_sa_entry { | ||
106 | int hmac_ctrl; | ||
107 | unsigned int enckey_len; | ||
108 | unsigned int kctx_len; | ||
109 | unsigned int authsize; | ||
110 | __be32 key_ctx_hdr; | ||
111 | char salt[MAX_SALT]; | ||
112 | char key[2 * AES_MAX_KEY_SIZE]; | ||
113 | }; | ||
114 | |||
115 | /* | ||
116 | * sgl_len - calculates the size of an SGL of the given capacity | ||
117 | * @n: the number of SGL entries | ||
118 | * Calculates the number of flits needed for a scatter/gather list that | ||
119 | * can hold the given number of entries. | ||
120 | */ | ||
121 | static inline unsigned int sgl_len(unsigned int n) | ||
122 | { | ||
123 | n--; | ||
124 | return (3 * n) / 2 + (n & 1) + 2; | ||
125 | } | ||
126 | |||
92 | struct uld_ctx *assign_chcr_device(void); | 127 | struct uld_ctx *assign_chcr_device(void); |
93 | int chcr_send_wr(struct sk_buff *skb); | 128 | int chcr_send_wr(struct sk_buff *skb); |
94 | int start_crypto(void); | 129 | int start_crypto(void); |
95 | int stop_crypto(void); | 130 | int stop_crypto(void); |
96 | int chcr_uld_rx_handler(void *handle, const __be64 *rsp, | 131 | int chcr_uld_rx_handler(void *handle, const __be64 *rsp, |
97 | const struct pkt_gl *pgl); | 132 | const struct pkt_gl *pgl); |
133 | int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev); | ||
98 | int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | 134 | int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, |
99 | int err); | 135 | int err); |
136 | int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev); | ||
137 | void chcr_add_xfrmops(const struct cxgb4_lld_info *lld); | ||
100 | #endif /* __CHCR_CORE_H__ */ | 138 | #endif /* __CHCR_CORE_H__ */ |
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 94a87e3ad9bc..7daf0a17a7d2 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h | |||
@@ -134,14 +134,16 @@ | |||
134 | #define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000 | 134 | #define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000 |
135 | #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000 | 135 | #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000 |
136 | #define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000 | 136 | #define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000 |
137 | #define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000 | 137 | #define CRYPTO_ALG_SUB_TYPE_CBC_SHA 0x04000000 |
138 | #define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000 | 138 | #define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000 |
139 | #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000 | 139 | #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000 |
140 | #define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000 | 140 | #define CRYPTO_ALG_SUB_TYPE_CBC_NULL 0x07000000 |
141 | #define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000 | 141 | #define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000 |
142 | #define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000 | 142 | #define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000 |
143 | #define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000 | 143 | #define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000 |
144 | #define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000 | 144 | #define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000 |
145 | #define CRYPTO_ALG_SUB_TYPE_CTR_SHA 0x0c000000 | ||
146 | #define CRYPTO_ALG_SUB_TYPE_CTR_NULL 0x0d000000 | ||
145 | #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\ | 147 | #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\ |
146 | CRYPTO_ALG_SUB_TYPE_HASH_HMAC) | 148 | CRYPTO_ALG_SUB_TYPE_HASH_HMAC) |
147 | 149 | ||
@@ -210,8 +212,6 @@ struct dsgl_walk { | |||
210 | struct phys_sge_pairs *to; | 212 | struct phys_sge_pairs *to; |
211 | }; | 213 | }; |
212 | 214 | ||
213 | |||
214 | |||
215 | struct chcr_gcm_ctx { | 215 | struct chcr_gcm_ctx { |
216 | u8 ghash_h[AEAD_H_SIZE]; | 216 | u8 ghash_h[AEAD_H_SIZE]; |
217 | }; | 217 | }; |
@@ -227,21 +227,18 @@ struct __aead_ctx { | |||
227 | struct chcr_authenc_ctx authenc[0]; | 227 | struct chcr_authenc_ctx authenc[0]; |
228 | }; | 228 | }; |
229 | 229 | ||
230 | |||
231 | |||
232 | struct chcr_aead_ctx { | 230 | struct chcr_aead_ctx { |
233 | __be32 key_ctx_hdr; | 231 | __be32 key_ctx_hdr; |
234 | unsigned int enckey_len; | 232 | unsigned int enckey_len; |
235 | struct crypto_aead *sw_cipher; | 233 | struct crypto_aead *sw_cipher; |
236 | u8 salt[MAX_SALT]; | 234 | u8 salt[MAX_SALT]; |
237 | u8 key[CHCR_AES_MAX_KEY_LEN]; | 235 | u8 key[CHCR_AES_MAX_KEY_LEN]; |
236 | u8 nonce[4]; | ||
238 | u16 hmac_ctrl; | 237 | u16 hmac_ctrl; |
239 | u16 mayverify; | 238 | u16 mayverify; |
240 | struct __aead_ctx ctx[0]; | 239 | struct __aead_ctx ctx[0]; |
241 | }; | 240 | }; |
242 | 241 | ||
243 | |||
244 | |||
245 | struct hmac_ctx { | 242 | struct hmac_ctx { |
246 | struct crypto_shash *base_hash; | 243 | struct crypto_shash *base_hash; |
247 | u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128]; | 244 | u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128]; |
@@ -307,44 +304,29 @@ typedef struct sk_buff *(*create_wr_t)(struct aead_request *req, | |||
307 | int size, | 304 | int size, |
308 | unsigned short op_type); | 305 | unsigned short op_type); |
309 | 306 | ||
310 | static int chcr_aead_op(struct aead_request *req_base, | 307 | void chcr_verify_tag(struct aead_request *req, u8 *input, int *err); |
311 | unsigned short op_type, | 308 | int chcr_aead_dma_map(struct device *dev, struct aead_request *req, |
312 | int size, | 309 | unsigned short op_type); |
313 | create_wr_t create_wr_fn); | 310 | void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req, |
314 | static inline int get_aead_subtype(struct crypto_aead *aead); | 311 | unsigned short op_type); |
315 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | 312 | void chcr_add_aead_dst_ent(struct aead_request *req, |
316 | unsigned char *input, int err); | 313 | struct cpl_rx_phys_dsgl *phys_cpl, |
317 | static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err); | 314 | unsigned int assoclen, unsigned short op_type, |
318 | static int chcr_aead_dma_map(struct device *dev, struct aead_request *req, | 315 | unsigned short qid); |
319 | unsigned short op_type); | 316 | void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx, |
320 | static void chcr_aead_dma_unmap(struct device *dev, struct aead_request | 317 | unsigned int assoclen, unsigned short op_type); |
321 | *req, unsigned short op_type); | 318 | void chcr_add_cipher_src_ent(struct ablkcipher_request *req, |
322 | static inline void chcr_add_aead_dst_ent(struct aead_request *req, | 319 | struct ulptx_sgl *ulptx, |
323 | struct cpl_rx_phys_dsgl *phys_cpl, | 320 | struct cipher_wr_param *wrparam); |
324 | unsigned int assoclen, | 321 | int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req); |
325 | unsigned short op_type, | 322 | void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req); |
326 | unsigned short qid); | 323 | void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, |
327 | static inline void chcr_add_aead_src_ent(struct aead_request *req, | 324 | struct cpl_rx_phys_dsgl *phys_cpl, |
328 | struct ulptx_sgl *ulptx, | 325 | struct cipher_wr_param *wrparam, |
329 | unsigned int assoclen, | 326 | unsigned short qid); |
330 | unsigned short op_type); | ||
331 | static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req, | ||
332 | struct ulptx_sgl *ulptx, | ||
333 | struct cipher_wr_param *wrparam); | ||
334 | static int chcr_cipher_dma_map(struct device *dev, | ||
335 | struct ablkcipher_request *req); | ||
336 | static void chcr_cipher_dma_unmap(struct device *dev, | ||
337 | struct ablkcipher_request *req); | ||
338 | static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, | ||
339 | struct cpl_rx_phys_dsgl *phys_cpl, | ||
340 | struct cipher_wr_param *wrparam, | ||
341 | unsigned short qid); | ||
342 | int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip); | 327 | int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip); |
343 | static inline void chcr_add_hash_src_ent(struct ahash_request *req, | 328 | void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx, |
344 | struct ulptx_sgl *ulptx, | 329 | struct hash_wr_param *param); |
345 | struct hash_wr_param *param); | 330 | int chcr_hash_dma_map(struct device *dev, struct ahash_request *req); |
346 | static inline int chcr_hash_dma_map(struct device *dev, | 331 | void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req); |
347 | struct ahash_request *req); | ||
348 | static inline void chcr_hash_dma_unmap(struct device *dev, | ||
349 | struct ahash_request *req); | ||
350 | #endif /* __CHCR_CRYPTO_H__ */ | 332 | #endif /* __CHCR_CRYPTO_H__ */ |
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c new file mode 100644 index 000000000000..db1e241104ed --- /dev/null +++ b/drivers/crypto/chelsio/chcr_ipsec.c | |||
@@ -0,0 +1,654 @@ | |||
1 | /* | ||
2 | * This file is part of the Chelsio T6 Crypto driver for Linux. | ||
3 | * | ||
4 | * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | * | ||
34 | * Written and Maintained by: | ||
35 | * Atul Gupta (atul.gupta@chelsio.com) | ||
36 | */ | ||
37 | |||
38 | #define pr_fmt(fmt) "chcr:" fmt | ||
39 | |||
40 | #include <linux/kernel.h> | ||
41 | #include <linux/module.h> | ||
42 | #include <linux/crypto.h> | ||
43 | #include <linux/cryptohash.h> | ||
44 | #include <linux/skbuff.h> | ||
45 | #include <linux/rtnetlink.h> | ||
46 | #include <linux/highmem.h> | ||
47 | #include <linux/if_vlan.h> | ||
48 | #include <linux/ip.h> | ||
49 | #include <linux/netdevice.h> | ||
50 | #include <net/esp.h> | ||
51 | #include <net/xfrm.h> | ||
52 | #include <crypto/aes.h> | ||
53 | #include <crypto/algapi.h> | ||
54 | #include <crypto/hash.h> | ||
55 | #include <crypto/sha.h> | ||
56 | #include <crypto/authenc.h> | ||
57 | #include <crypto/internal/aead.h> | ||
58 | #include <crypto/null.h> | ||
59 | #include <crypto/internal/skcipher.h> | ||
60 | #include <crypto/aead.h> | ||
61 | #include <crypto/scatterwalk.h> | ||
62 | #include <crypto/internal/hash.h> | ||
63 | |||
64 | #include "chcr_core.h" | ||
65 | #include "chcr_algo.h" | ||
66 | #include "chcr_crypto.h" | ||
67 | |||
68 | /* | ||
69 | * Max Tx descriptor space we allow for an Ethernet packet to be inlined | ||
70 | * into a WR. | ||
71 | */ | ||
72 | #define MAX_IMM_TX_PKT_LEN 256 | ||
73 | #define GCM_ESP_IV_SIZE 8 | ||
74 | |||
75 | static int chcr_xfrm_add_state(struct xfrm_state *x); | ||
76 | static void chcr_xfrm_del_state(struct xfrm_state *x); | ||
77 | static void chcr_xfrm_free_state(struct xfrm_state *x); | ||
78 | static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x); | ||
79 | |||
80 | static const struct xfrmdev_ops chcr_xfrmdev_ops = { | ||
81 | .xdo_dev_state_add = chcr_xfrm_add_state, | ||
82 | .xdo_dev_state_delete = chcr_xfrm_del_state, | ||
83 | .xdo_dev_state_free = chcr_xfrm_free_state, | ||
84 | .xdo_dev_offload_ok = chcr_ipsec_offload_ok, | ||
85 | }; | ||
86 | |||
87 | /* Add offload xfrms to Chelsio Interface */ | ||
88 | void chcr_add_xfrmops(const struct cxgb4_lld_info *lld) | ||
89 | { | ||
90 | struct net_device *netdev = NULL; | ||
91 | int i; | ||
92 | |||
93 | for (i = 0; i < lld->nports; i++) { | ||
94 | netdev = lld->ports[i]; | ||
95 | if (!netdev) | ||
96 | continue; | ||
97 | netdev->xfrmdev_ops = &chcr_xfrmdev_ops; | ||
98 | netdev->hw_enc_features |= NETIF_F_HW_ESP; | ||
99 | netdev->features |= NETIF_F_HW_ESP; | ||
100 | rtnl_lock(); | ||
101 | netdev_change_features(netdev); | ||
102 | rtnl_unlock(); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | static inline int chcr_ipsec_setauthsize(struct xfrm_state *x, | ||
107 | struct ipsec_sa_entry *sa_entry) | ||
108 | { | ||
109 | int hmac_ctrl; | ||
110 | int authsize = x->aead->alg_icv_len / 8; | ||
111 | |||
112 | sa_entry->authsize = authsize; | ||
113 | |||
114 | switch (authsize) { | ||
115 | case ICV_8: | ||
116 | hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | ||
117 | break; | ||
118 | case ICV_12: | ||
119 | hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | ||
120 | break; | ||
121 | case ICV_16: | ||
122 | hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | ||
123 | break; | ||
124 | default: | ||
125 | return -EINVAL; | ||
126 | } | ||
127 | return hmac_ctrl; | ||
128 | } | ||
129 | |||
130 | static inline int chcr_ipsec_setkey(struct xfrm_state *x, | ||
131 | struct ipsec_sa_entry *sa_entry) | ||
132 | { | ||
133 | struct crypto_cipher *cipher; | ||
134 | int keylen = (x->aead->alg_key_len + 7) / 8; | ||
135 | unsigned char *key = x->aead->alg_key; | ||
136 | int ck_size, key_ctx_size = 0; | ||
137 | unsigned char ghash_h[AEAD_H_SIZE]; | ||
138 | int ret = 0; | ||
139 | |||
140 | if (keylen > 3) { | ||
141 | keylen -= 4; /* nonce/salt is present in the last 4 bytes */ | ||
142 | memcpy(sa_entry->salt, key + keylen, 4); | ||
143 | } | ||
144 | |||
145 | if (keylen == AES_KEYSIZE_128) { | ||
146 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | ||
147 | } else if (keylen == AES_KEYSIZE_192) { | ||
148 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | ||
149 | } else if (keylen == AES_KEYSIZE_256) { | ||
150 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | ||
151 | } else { | ||
152 | pr_err("GCM: Invalid key length %d\n", keylen); | ||
153 | ret = -EINVAL; | ||
154 | goto out; | ||
155 | } | ||
156 | |||
157 | memcpy(sa_entry->key, key, keylen); | ||
158 | sa_entry->enckey_len = keylen; | ||
159 | key_ctx_size = sizeof(struct _key_ctx) + | ||
160 | ((DIV_ROUND_UP(keylen, 16)) << 4) + | ||
161 | AEAD_H_SIZE; | ||
162 | |||
163 | sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, | ||
164 | CHCR_KEYCTX_MAC_KEY_SIZE_128, | ||
165 | 0, 0, | ||
166 | key_ctx_size >> 4); | ||
167 | |||
168 | /* Calculate the H = CIPH(K, 0 repeated 16 times). | ||
169 | * It will go in key context | ||
170 | */ | ||
171 | cipher = crypto_alloc_cipher("aes-generic", 0, 0); | ||
172 | if (IS_ERR(cipher)) { | ||
173 | sa_entry->enckey_len = 0; | ||
174 | ret = -ENOMEM; | ||
175 | goto out; | ||
176 | } | ||
177 | |||
178 | ret = crypto_cipher_setkey(cipher, key, keylen); | ||
179 | if (ret) { | ||
180 | sa_entry->enckey_len = 0; | ||
181 | goto out1; | ||
182 | } | ||
183 | memset(ghash_h, 0, AEAD_H_SIZE); | ||
184 | crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); | ||
185 | memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) * | ||
186 | 16), ghash_h, AEAD_H_SIZE); | ||
187 | sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) + | ||
188 | AEAD_H_SIZE; | ||
189 | out1: | ||
190 | crypto_free_cipher(cipher); | ||
191 | out: | ||
192 | return ret; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * chcr_xfrm_add_state | ||
197 | * returns 0 on success, negative error if failed to send message to FPGA | ||
198 | * positive error if FPGA returned a bad response | ||
199 | */ | ||
200 | static int chcr_xfrm_add_state(struct xfrm_state *x) | ||
201 | { | ||
202 | struct ipsec_sa_entry *sa_entry; | ||
203 | int res = 0; | ||
204 | |||
205 | if (x->props.aalgo != SADB_AALG_NONE) { | ||
206 | pr_debug("CHCR: Cannot offload authenticated xfrm states\n"); | ||
207 | return -EINVAL; | ||
208 | } | ||
209 | if (x->props.calgo != SADB_X_CALG_NONE) { | ||
210 | pr_debug("CHCR: Cannot offload compressed xfrm states\n"); | ||
211 | return -EINVAL; | ||
212 | } | ||
213 | if (x->props.flags & XFRM_STATE_ESN) { | ||
214 | pr_debug("CHCR: Cannot offload ESN xfrm states\n"); | ||
215 | return -EINVAL; | ||
216 | } | ||
217 | if (x->props.family != AF_INET && | ||
218 | x->props.family != AF_INET6) { | ||
219 | pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n"); | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | if (x->props.mode != XFRM_MODE_TRANSPORT && | ||
223 | x->props.mode != XFRM_MODE_TUNNEL) { | ||
224 | pr_debug("CHCR: Only transport and tunnel xfrm offload\n"); | ||
225 | return -EINVAL; | ||
226 | } | ||
227 | if (x->id.proto != IPPROTO_ESP) { | ||
228 | pr_debug("CHCR: Only ESP xfrm state offloaded\n"); | ||
229 | return -EINVAL; | ||
230 | } | ||
231 | if (x->encap) { | ||
232 | pr_debug("CHCR: Encapsulated xfrm state not offloaded\n"); | ||
233 | return -EINVAL; | ||
234 | } | ||
235 | if (!x->aead) { | ||
236 | pr_debug("CHCR: Cannot offload xfrm states without aead\n"); | ||
237 | return -EINVAL; | ||
238 | } | ||
239 | if (x->aead->alg_icv_len != 128 && | ||
240 | x->aead->alg_icv_len != 96) { | ||
241 | pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n"); | ||
242 | return -EINVAL; | ||
243 | } | ||
244 | if ((x->aead->alg_key_len != 128 + 32) && | ||
245 | (x->aead->alg_key_len != 256 + 32)) { | ||
246 | pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); | ||
247 | return -EINVAL; | ||
248 | } | ||
249 | if (x->tfcpad) { | ||
250 | pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n"); | ||
251 | return -EINVAL; | ||
252 | } | ||
253 | if (!x->geniv) { | ||
254 | pr_debug("CHCR: Cannot offload xfrm states without geniv\n"); | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | if (strcmp(x->geniv, "seqiv")) { | ||
258 | pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n"); | ||
259 | return -EINVAL; | ||
260 | } | ||
261 | |||
262 | sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); | ||
263 | if (!sa_entry) { | ||
264 | res = -ENOMEM; | ||
265 | goto out; | ||
266 | } | ||
267 | |||
268 | sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry); | ||
269 | chcr_ipsec_setkey(x, sa_entry); | ||
270 | x->xso.offload_handle = (unsigned long)sa_entry; | ||
271 | try_module_get(THIS_MODULE); | ||
272 | out: | ||
273 | return res; | ||
274 | } | ||
275 | |||
276 | static void chcr_xfrm_del_state(struct xfrm_state *x) | ||
277 | { | ||
278 | /* do nothing */ | ||
279 | if (!x->xso.offload_handle) | ||
280 | return; | ||
281 | } | ||
282 | |||
283 | static void chcr_xfrm_free_state(struct xfrm_state *x) | ||
284 | { | ||
285 | struct ipsec_sa_entry *sa_entry; | ||
286 | |||
287 | if (!x->xso.offload_handle) | ||
288 | return; | ||
289 | |||
290 | sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; | ||
291 | kfree(sa_entry); | ||
292 | module_put(THIS_MODULE); | ||
293 | } | ||
294 | |||
295 | static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) | ||
296 | { | ||
297 | /* Offload with IP options is not supported yet */ | ||
298 | if (ip_hdr(skb)->ihl > 5) | ||
299 | return false; | ||
300 | |||
301 | return true; | ||
302 | } | ||
303 | |||
304 | static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len) | ||
305 | { | ||
306 | int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len; | ||
307 | |||
308 | hdrlen += sizeof(struct cpl_tx_pkt); | ||
309 | if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) | ||
310 | return hdrlen; | ||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb, | ||
315 | unsigned int kctx_len) | ||
316 | { | ||
317 | unsigned int flits; | ||
318 | int hdrlen = is_eth_imm(skb, kctx_len); | ||
319 | |||
320 | /* If the skb is small enough, we can pump it out as a work request | ||
321 | * with only immediate data. In that case we just have to have the | ||
322 | * TX Packet header plus the skb data in the Work Request. | ||
323 | */ | ||
324 | |||
325 | if (hdrlen) | ||
326 | return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); | ||
327 | |||
328 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); | ||
329 | |||
330 | /* Otherwise, we're going to have to construct a Scatter gather list | ||
331 | * of the skb body and fragments. We also include the flits necessary | ||
332 | * for the TX Packet Work Request and CPL. We always have a firmware | ||
333 | * Write Header (incorporated as part of the cpl_tx_pkt_lso and | ||
334 | * cpl_tx_pkt structures), followed by either a TX Packet Write CPL | ||
335 | * message or, if we're doing a Large Send Offload, an LSO CPL message | ||
336 | * with an embedded TX Packet Write CPL message. | ||
337 | */ | ||
338 | flits += (sizeof(struct fw_ulptx_wr) + | ||
339 | sizeof(struct chcr_ipsec_req) + | ||
340 | kctx_len + | ||
341 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); | ||
342 | return flits; | ||
343 | } | ||
344 | |||
345 | inline void *copy_cpltx_pktxt(struct sk_buff *skb, | ||
346 | struct net_device *dev, | ||
347 | void *pos) | ||
348 | { | ||
349 | struct adapter *adap; | ||
350 | struct port_info *pi; | ||
351 | struct sge_eth_txq *q; | ||
352 | struct cpl_tx_pkt_core *cpl; | ||
353 | u64 cntrl = 0; | ||
354 | u32 ctrl0, qidx; | ||
355 | |||
356 | pi = netdev_priv(dev); | ||
357 | adap = pi->adapter; | ||
358 | qidx = skb->queue_mapping; | ||
359 | q = &adap->sge.ethtxq[qidx + pi->first_qset]; | ||
360 | |||
361 | cpl = (struct cpl_tx_pkt_core *)pos; | ||
362 | |||
363 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
364 | cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; | ||
365 | ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | | ||
366 | TXPKT_PF_V(adap->pf); | ||
367 | if (skb_vlan_tag_present(skb)) { | ||
368 | q->vlan_ins++; | ||
369 | cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); | ||
370 | } | ||
371 | |||
372 | cpl->ctrl0 = htonl(ctrl0); | ||
373 | cpl->pack = htons(0); | ||
374 | cpl->len = htons(skb->len); | ||
375 | cpl->ctrl1 = cpu_to_be64(cntrl); | ||
376 | |||
377 | pos += sizeof(struct cpl_tx_pkt_core); | ||
378 | return pos; | ||
379 | } | ||
380 | |||
381 | inline void *copy_key_cpltx_pktxt(struct sk_buff *skb, | ||
382 | struct net_device *dev, | ||
383 | void *pos, | ||
384 | struct ipsec_sa_entry *sa_entry) | ||
385 | { | ||
386 | struct adapter *adap; | ||
387 | struct port_info *pi; | ||
388 | struct sge_eth_txq *q; | ||
389 | unsigned int len, qidx; | ||
390 | struct _key_ctx *key_ctx; | ||
391 | int left, eoq, key_len; | ||
392 | |||
393 | pi = netdev_priv(dev); | ||
394 | adap = pi->adapter; | ||
395 | qidx = skb->queue_mapping; | ||
396 | q = &adap->sge.ethtxq[qidx + pi->first_qset]; | ||
397 | len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core); | ||
398 | key_len = sa_entry->kctx_len; | ||
399 | |||
400 | /* end of queue, reset pos to start of queue */ | ||
401 | eoq = (void *)q->q.stat - pos; | ||
402 | left = eoq; | ||
403 | if (!eoq) { | ||
404 | pos = q->q.desc; | ||
405 | left = 64 * q->q.size; | ||
406 | } | ||
407 | |||
408 | /* Copy the Key context header */ | ||
409 | key_ctx = (struct _key_ctx *)pos; | ||
410 | key_ctx->ctx_hdr = sa_entry->key_ctx_hdr; | ||
411 | memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT); | ||
412 | pos += sizeof(struct _key_ctx); | ||
413 | left -= sizeof(struct _key_ctx); | ||
414 | |||
415 | if (likely(len <= left)) { | ||
416 | memcpy(key_ctx->key, sa_entry->key, key_len); | ||
417 | pos += key_len; | ||
418 | } else { | ||
419 | if (key_len <= left) { | ||
420 | memcpy(pos, sa_entry->key, key_len); | ||
421 | pos += key_len; | ||
422 | } else { | ||
423 | memcpy(pos, sa_entry->key, left); | ||
424 | memcpy(q->q.desc, sa_entry->key + left, | ||
425 | key_len - left); | ||
426 | pos = (u8 *)q->q.desc + (key_len - left); | ||
427 | } | ||
428 | } | ||
429 | /* Copy CPL TX PKT XT */ | ||
430 | pos = copy_cpltx_pktxt(skb, dev, pos); | ||
431 | |||
432 | return pos; | ||
433 | } | ||
434 | |||
435 | inline void *chcr_crypto_wreq(struct sk_buff *skb, | ||
436 | struct net_device *dev, | ||
437 | void *pos, | ||
438 | int credits, | ||
439 | struct ipsec_sa_entry *sa_entry) | ||
440 | { | ||
441 | struct port_info *pi = netdev_priv(dev); | ||
442 | struct adapter *adap = pi->adapter; | ||
443 | unsigned int immdatalen = 0; | ||
444 | unsigned int ivsize = GCM_ESP_IV_SIZE; | ||
445 | struct chcr_ipsec_wr *wr; | ||
446 | unsigned int flits; | ||
447 | u32 wr_mid; | ||
448 | int qidx = skb_get_queue_mapping(skb); | ||
449 | struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset]; | ||
450 | unsigned int kctx_len = sa_entry->kctx_len; | ||
451 | int qid = q->q.cntxt_id; | ||
452 | |||
453 | atomic_inc(&adap->chcr_stats.ipsec_cnt); | ||
454 | |||
455 | flits = calc_tx_sec_flits(skb, kctx_len); | ||
456 | |||
457 | if (is_eth_imm(skb, kctx_len)) | ||
458 | immdatalen = skb->len; | ||
459 | |||
460 | /* WR Header */ | ||
461 | wr = (struct chcr_ipsec_wr *)pos; | ||
462 | wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); | ||
463 | wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); | ||
464 | |||
465 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { | ||
466 | netif_tx_stop_queue(q->txq); | ||
467 | q->q.stops++; | ||
468 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; | ||
469 | } | ||
470 | wr_mid |= FW_ULPTX_WR_DATA_F; | ||
471 | wr->wreq.flowid_len16 = htonl(wr_mid); | ||
472 | |||
473 | /* ULPTX */ | ||
474 | wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid); | ||
475 | wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1); | ||
476 | |||
477 | /* Sub-command */ | ||
478 | wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen); | ||
479 | wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + | ||
480 | sizeof(wr->req.key_ctx) + | ||
481 | kctx_len + | ||
482 | sizeof(struct cpl_tx_pkt_core) + | ||
483 | immdatalen); | ||
484 | |||
485 | /* CPL_SEC_PDU */ | ||
486 | wr->req.sec_cpl.op_ivinsrtofst = htonl( | ||
487 | CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | | ||
488 | CPL_TX_SEC_PDU_CPLLEN_V(2) | | ||
489 | CPL_TX_SEC_PDU_PLACEHOLDER_V(1) | | ||
490 | CPL_TX_SEC_PDU_IVINSRTOFST_V( | ||
491 | (skb_transport_offset(skb) + | ||
492 | sizeof(struct ip_esp_hdr) + 1))); | ||
493 | |||
494 | wr->req.sec_cpl.pldlen = htonl(skb->len); | ||
495 | |||
496 | wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | ||
497 | (skb_transport_offset(skb) + 1), | ||
498 | (skb_transport_offset(skb) + | ||
499 | sizeof(struct ip_esp_hdr)), | ||
500 | (skb_transport_offset(skb) + | ||
501 | sizeof(struct ip_esp_hdr) + | ||
502 | GCM_ESP_IV_SIZE + 1), 0); | ||
503 | |||
504 | wr->req.sec_cpl.cipherstop_lo_authinsert = | ||
505 | FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) + | ||
506 | sizeof(struct ip_esp_hdr) + | ||
507 | GCM_ESP_IV_SIZE + 1, | ||
508 | sa_entry->authsize, | ||
509 | sa_entry->authsize); | ||
510 | wr->req.sec_cpl.seqno_numivs = | ||
511 | FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1, | ||
512 | CHCR_SCMD_CIPHER_MODE_AES_GCM, | ||
513 | CHCR_SCMD_AUTH_MODE_GHASH, | ||
514 | sa_entry->hmac_ctrl, | ||
515 | ivsize >> 1); | ||
516 | wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, | ||
517 | 0, 0, 0); | ||
518 | |||
519 | pos += sizeof(struct fw_ulptx_wr) + | ||
520 | sizeof(struct ulp_txpkt) + | ||
521 | sizeof(struct ulptx_idata) + | ||
522 | sizeof(struct cpl_tx_sec_pdu); | ||
523 | |||
524 | pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry); | ||
525 | |||
526 | return pos; | ||
527 | } | ||
528 | |||
529 | /** | ||
530 | * flits_to_desc - returns the num of Tx descriptors for the given flits | ||
531 | * @n: the number of flits | ||
532 | * | ||
533 | * Returns the number of Tx descriptors needed for the supplied number | ||
534 | * of flits. | ||
535 | */ | ||
536 | static inline unsigned int flits_to_desc(unsigned int n) | ||
537 | { | ||
538 | WARN_ON(n > SGE_MAX_WR_LEN / 8); | ||
539 | return DIV_ROUND_UP(n, 8); | ||
540 | } | ||
541 | |||
542 | static inline unsigned int txq_avail(const struct sge_txq *q) | ||
543 | { | ||
544 | return q->size - 1 - q->in_use; | ||
545 | } | ||
546 | |||
547 | static void eth_txq_stop(struct sge_eth_txq *q) | ||
548 | { | ||
549 | netif_tx_stop_queue(q->txq); | ||
550 | q->q.stops++; | ||
551 | } | ||
552 | |||
553 | static inline void txq_advance(struct sge_txq *q, unsigned int n) | ||
554 | { | ||
555 | q->in_use += n; | ||
556 | q->pidx += n; | ||
557 | if (q->pidx >= q->size) | ||
558 | q->pidx -= q->size; | ||
559 | } | ||
560 | |||
561 | /* | ||
562 | * chcr_ipsec_xmit called from ULD Tx handler | ||
563 | */ | ||
564 | int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev) | ||
565 | { | ||
566 | struct xfrm_state *x = xfrm_input_state(skb); | ||
567 | struct ipsec_sa_entry *sa_entry; | ||
568 | u64 *pos, *end, *before, *sgl; | ||
569 | int qidx, left, credits; | ||
570 | unsigned int flits = 0, ndesc, kctx_len; | ||
571 | struct adapter *adap; | ||
572 | struct sge_eth_txq *q; | ||
573 | struct port_info *pi; | ||
574 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
575 | bool immediate = false; | ||
576 | |||
577 | if (!x->xso.offload_handle) | ||
578 | return NETDEV_TX_BUSY; | ||
579 | |||
580 | sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; | ||
581 | kctx_len = sa_entry->kctx_len; | ||
582 | |||
583 | if (skb->sp->len != 1) { | ||
584 | out_free: dev_kfree_skb_any(skb); | ||
585 | return NETDEV_TX_OK; | ||
586 | } | ||
587 | |||
588 | pi = netdev_priv(dev); | ||
589 | adap = pi->adapter; | ||
590 | qidx = skb->queue_mapping; | ||
591 | q = &adap->sge.ethtxq[qidx + pi->first_qset]; | ||
592 | |||
593 | cxgb4_reclaim_completed_tx(adap, &q->q, true); | ||
594 | |||
595 | flits = calc_tx_sec_flits(skb, sa_entry->kctx_len); | ||
596 | ndesc = flits_to_desc(flits); | ||
597 | credits = txq_avail(&q->q) - ndesc; | ||
598 | |||
599 | if (unlikely(credits < 0)) { | ||
600 | eth_txq_stop(q); | ||
601 | dev_err(adap->pdev_dev, | ||
602 | "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n", | ||
603 | dev->name, qidx, credits, ndesc, txq_avail(&q->q), | ||
604 | flits); | ||
605 | return NETDEV_TX_BUSY; | ||
606 | } | ||
607 | |||
608 | if (is_eth_imm(skb, kctx_len)) | ||
609 | immediate = true; | ||
610 | |||
611 | if (!immediate && | ||
612 | unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { | ||
613 | q->mapping_err++; | ||
614 | goto out_free; | ||
615 | } | ||
616 | |||
617 | pos = (u64 *)&q->q.desc[q->q.pidx]; | ||
618 | before = (u64 *)pos; | ||
619 | end = (u64 *)pos + flits; | ||
620 | /* Setup IPSec CPL */ | ||
621 | pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos, | ||
622 | credits, sa_entry); | ||
623 | if (before > (u64 *)pos) { | ||
624 | left = (u8 *)end - (u8 *)q->q.stat; | ||
625 | end = (void *)q->q.desc + left; | ||
626 | } | ||
627 | if (pos == (u64 *)q->q.stat) { | ||
628 | left = (u8 *)end - (u8 *)q->q.stat; | ||
629 | end = (void *)q->q.desc + left; | ||
630 | pos = (void *)q->q.desc; | ||
631 | } | ||
632 | |||
633 | sgl = (void *)pos; | ||
634 | if (immediate) { | ||
635 | cxgb4_inline_tx_skb(skb, &q->q, sgl); | ||
636 | dev_consume_skb_any(skb); | ||
637 | } else { | ||
638 | int last_desc; | ||
639 | |||
640 | cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, | ||
641 | 0, addr); | ||
642 | skb_orphan(skb); | ||
643 | |||
644 | last_desc = q->q.pidx + ndesc - 1; | ||
645 | if (last_desc >= q->q.size) | ||
646 | last_desc -= q->q.size; | ||
647 | q->q.sdesc[last_desc].skb = skb; | ||
648 | q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; | ||
649 | } | ||
650 | txq_advance(&q->q, ndesc); | ||
651 | |||
652 | cxgb4_ring_tx_db(adap, &q->q, ndesc); | ||
653 | return NETDEV_TX_OK; | ||
654 | } | ||
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c index 451620b475a0..86f5f459762e 100644 --- a/drivers/crypto/exynos-rng.c +++ b/drivers/crypto/exynos-rng.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * exynos-rng.c - Random Number Generator driver for the Exynos | 3 | * exynos-rng.c - Random Number Generator driver for the Exynos |
3 | * | 4 | * |
@@ -6,15 +7,6 @@ | |||
6 | * Loosely based on old driver from drivers/char/hw_random/exynos-rng.c: | 7 | * Loosely based on old driver from drivers/char/hw_random/exynos-rng.c: |
7 | * Copyright (C) 2012 Samsung Electronics | 8 | * Copyright (C) 2012 Samsung Electronics |
8 | * Jonghwa Lee <jonghwa3.lee@samsung.com> | 9 | * Jonghwa Lee <jonghwa3.lee@samsung.com> |
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | */ | 10 | */ |
19 | 11 | ||
20 | #include <linux/clk.h> | 12 | #include <linux/clk.h> |
@@ -22,12 +14,18 @@ | |||
22 | #include <linux/err.h> | 14 | #include <linux/err.h> |
23 | #include <linux/io.h> | 15 | #include <linux/io.h> |
24 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/mutex.h> | ||
18 | #include <linux/of_device.h> | ||
25 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
26 | 20 | ||
27 | #include <crypto/internal/rng.h> | 21 | #include <crypto/internal/rng.h> |
28 | 22 | ||
29 | #define EXYNOS_RNG_CONTROL 0x0 | 23 | #define EXYNOS_RNG_CONTROL 0x0 |
30 | #define EXYNOS_RNG_STATUS 0x10 | 24 | #define EXYNOS_RNG_STATUS 0x10 |
25 | |||
26 | #define EXYNOS_RNG_SEED_CONF 0x14 | ||
27 | #define EXYNOS_RNG_GEN_PRNG BIT(1) | ||
28 | |||
31 | #define EXYNOS_RNG_SEED_BASE 0x140 | 29 | #define EXYNOS_RNG_SEED_BASE 0x140 |
32 | #define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4)) | 30 | #define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4)) |
33 | #define EXYNOS_RNG_OUT_BASE 0x160 | 31 | #define EXYNOS_RNG_OUT_BASE 0x160 |
@@ -43,13 +41,21 @@ | |||
43 | #define EXYNOS_RNG_SEED_REGS 5 | 41 | #define EXYNOS_RNG_SEED_REGS 5 |
44 | #define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4) | 42 | #define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4) |
45 | 43 | ||
44 | enum exynos_prng_type { | ||
45 | EXYNOS_PRNG_UNKNOWN = 0, | ||
46 | EXYNOS_PRNG_EXYNOS4, | ||
47 | EXYNOS_PRNG_EXYNOS5, | ||
48 | }; | ||
49 | |||
46 | /* | 50 | /* |
47 | * Driver re-seeds itself with generated random numbers to increase | 51 | * Driver re-seeds itself with generated random numbers to hinder |
48 | * the randomness. | 52 | * backtracking of the original seed. |
49 | * | 53 | * |
50 | * Time for next re-seed in ms. | 54 | * Time for next re-seed in ms. |
51 | */ | 55 | */ |
52 | #define EXYNOS_RNG_RESEED_TIME 100 | 56 | #define EXYNOS_RNG_RESEED_TIME 1000 |
57 | #define EXYNOS_RNG_RESEED_BYTES 65536 | ||
58 | |||
53 | /* | 59 | /* |
54 | * In polling mode, do not wait infinitely for the engine to finish the work. | 60 | * In polling mode, do not wait infinitely for the engine to finish the work. |
55 | */ | 61 | */ |
@@ -63,13 +69,17 @@ struct exynos_rng_ctx { | |||
63 | /* Device associated memory */ | 69 | /* Device associated memory */ |
64 | struct exynos_rng_dev { | 70 | struct exynos_rng_dev { |
65 | struct device *dev; | 71 | struct device *dev; |
72 | enum exynos_prng_type type; | ||
66 | void __iomem *mem; | 73 | void __iomem *mem; |
67 | struct clk *clk; | 74 | struct clk *clk; |
75 | struct mutex lock; | ||
68 | /* Generated numbers stored for seeding during resume */ | 76 | /* Generated numbers stored for seeding during resume */ |
69 | u8 seed_save[EXYNOS_RNG_SEED_SIZE]; | 77 | u8 seed_save[EXYNOS_RNG_SEED_SIZE]; |
70 | unsigned int seed_save_len; | 78 | unsigned int seed_save_len; |
71 | /* Time of last seeding in jiffies */ | 79 | /* Time of last seeding in jiffies */ |
72 | unsigned long last_seeding; | 80 | unsigned long last_seeding; |
81 | /* Bytes generated since last seeding */ | ||
82 | unsigned long bytes_seeding; | ||
73 | }; | 83 | }; |
74 | 84 | ||
75 | static struct exynos_rng_dev *exynos_rng_dev; | 85 | static struct exynos_rng_dev *exynos_rng_dev; |
@@ -114,39 +124,12 @@ static int exynos_rng_set_seed(struct exynos_rng_dev *rng, | |||
114 | } | 124 | } |
115 | 125 | ||
116 | rng->last_seeding = jiffies; | 126 | rng->last_seeding = jiffies; |
127 | rng->bytes_seeding = 0; | ||
117 | 128 | ||
118 | return 0; | 129 | return 0; |
119 | } | 130 | } |
120 | 131 | ||
121 | /* | 132 | /* |
122 | * Read from output registers and put the data under 'dst' array, | ||
123 | * up to dlen bytes. | ||
124 | * | ||
125 | * Returns number of bytes actually stored in 'dst' (dlen | ||
126 | * or EXYNOS_RNG_SEED_SIZE). | ||
127 | */ | ||
128 | static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng, | ||
129 | u8 *dst, unsigned int dlen) | ||
130 | { | ||
131 | unsigned int cnt = 0; | ||
132 | int i, j; | ||
133 | u32 val; | ||
134 | |||
135 | for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) { | ||
136 | val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j)); | ||
137 | |||
138 | for (i = 0; i < 4; i++) { | ||
139 | dst[cnt] = val & 0xff; | ||
140 | val >>= 8; | ||
141 | if (++cnt >= dlen) | ||
142 | return cnt; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | return cnt; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Start the engine and poll for finish. Then read from output registers | 133 | * Start the engine and poll for finish. Then read from output registers |
151 | * filling the 'dst' buffer up to 'dlen' bytes or up to size of generated | 134 | * filling the 'dst' buffer up to 'dlen' bytes or up to size of generated |
152 | * random data (EXYNOS_RNG_SEED_SIZE). | 135 | * random data (EXYNOS_RNG_SEED_SIZE). |
@@ -160,8 +143,13 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng, | |||
160 | { | 143 | { |
161 | int retry = EXYNOS_RNG_WAIT_RETRIES; | 144 | int retry = EXYNOS_RNG_WAIT_RETRIES; |
162 | 145 | ||
163 | exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START, | 146 | if (rng->type == EXYNOS_PRNG_EXYNOS4) { |
164 | EXYNOS_RNG_CONTROL); | 147 | exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START, |
148 | EXYNOS_RNG_CONTROL); | ||
149 | } else if (rng->type == EXYNOS_PRNG_EXYNOS5) { | ||
150 | exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG, | ||
151 | EXYNOS_RNG_SEED_CONF); | ||
152 | } | ||
165 | 153 | ||
166 | while (!(exynos_rng_readl(rng, | 154 | while (!(exynos_rng_readl(rng, |
167 | EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry) | 155 | EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry) |
@@ -173,7 +161,9 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng, | |||
173 | /* Clear status bit */ | 161 | /* Clear status bit */ |
174 | exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE, | 162 | exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE, |
175 | EXYNOS_RNG_STATUS); | 163 | EXYNOS_RNG_STATUS); |
176 | *read = exynos_rng_copy_random(rng, dst, dlen); | 164 | *read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE); |
165 | memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read); | ||
166 | rng->bytes_seeding += *read; | ||
177 | 167 | ||
178 | return 0; | 168 | return 0; |
179 | } | 169 | } |
@@ -187,13 +177,18 @@ static void exynos_rng_reseed(struct exynos_rng_dev *rng) | |||
187 | unsigned int read = 0; | 177 | unsigned int read = 0; |
188 | u8 seed[EXYNOS_RNG_SEED_SIZE]; | 178 | u8 seed[EXYNOS_RNG_SEED_SIZE]; |
189 | 179 | ||
190 | if (time_before(now, next_seeding)) | 180 | if (time_before(now, next_seeding) && |
181 | rng->bytes_seeding < EXYNOS_RNG_RESEED_BYTES) | ||
191 | return; | 182 | return; |
192 | 183 | ||
193 | if (exynos_rng_get_random(rng, seed, sizeof(seed), &read)) | 184 | if (exynos_rng_get_random(rng, seed, sizeof(seed), &read)) |
194 | return; | 185 | return; |
195 | 186 | ||
196 | exynos_rng_set_seed(rng, seed, read); | 187 | exynos_rng_set_seed(rng, seed, read); |
188 | |||
189 | /* Let others do some of their job. */ | ||
190 | mutex_unlock(&rng->lock); | ||
191 | mutex_lock(&rng->lock); | ||
197 | } | 192 | } |
198 | 193 | ||
199 | static int exynos_rng_generate(struct crypto_rng *tfm, | 194 | static int exynos_rng_generate(struct crypto_rng *tfm, |
@@ -209,6 +204,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm, | |||
209 | if (ret) | 204 | if (ret) |
210 | return ret; | 205 | return ret; |
211 | 206 | ||
207 | mutex_lock(&rng->lock); | ||
212 | do { | 208 | do { |
213 | ret = exynos_rng_get_random(rng, dst, dlen, &read); | 209 | ret = exynos_rng_get_random(rng, dst, dlen, &read); |
214 | if (ret) | 210 | if (ret) |
@@ -219,6 +215,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm, | |||
219 | 215 | ||
220 | exynos_rng_reseed(rng); | 216 | exynos_rng_reseed(rng); |
221 | } while (dlen > 0); | 217 | } while (dlen > 0); |
218 | mutex_unlock(&rng->lock); | ||
222 | 219 | ||
223 | clk_disable_unprepare(rng->clk); | 220 | clk_disable_unprepare(rng->clk); |
224 | 221 | ||
@@ -236,7 +233,9 @@ static int exynos_rng_seed(struct crypto_rng *tfm, const u8 *seed, | |||
236 | if (ret) | 233 | if (ret) |
237 | return ret; | 234 | return ret; |
238 | 235 | ||
236 | mutex_lock(&rng->lock); | ||
239 | ret = exynos_rng_set_seed(ctx->rng, seed, slen); | 237 | ret = exynos_rng_set_seed(ctx->rng, seed, slen); |
238 | mutex_unlock(&rng->lock); | ||
240 | 239 | ||
241 | clk_disable_unprepare(rng->clk); | 240 | clk_disable_unprepare(rng->clk); |
242 | 241 | ||
@@ -259,7 +258,7 @@ static struct rng_alg exynos_rng_alg = { | |||
259 | .base = { | 258 | .base = { |
260 | .cra_name = "stdrng", | 259 | .cra_name = "stdrng", |
261 | .cra_driver_name = "exynos_rng", | 260 | .cra_driver_name = "exynos_rng", |
262 | .cra_priority = 100, | 261 | .cra_priority = 300, |
263 | .cra_ctxsize = sizeof(struct exynos_rng_ctx), | 262 | .cra_ctxsize = sizeof(struct exynos_rng_ctx), |
264 | .cra_module = THIS_MODULE, | 263 | .cra_module = THIS_MODULE, |
265 | .cra_init = exynos_rng_kcapi_init, | 264 | .cra_init = exynos_rng_kcapi_init, |
@@ -279,6 +278,10 @@ static int exynos_rng_probe(struct platform_device *pdev) | |||
279 | if (!rng) | 278 | if (!rng) |
280 | return -ENOMEM; | 279 | return -ENOMEM; |
281 | 280 | ||
281 | rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev); | ||
282 | |||
283 | mutex_init(&rng->lock); | ||
284 | |||
282 | rng->dev = &pdev->dev; | 285 | rng->dev = &pdev->dev; |
283 | rng->clk = devm_clk_get(&pdev->dev, "secss"); | 286 | rng->clk = devm_clk_get(&pdev->dev, "secss"); |
284 | if (IS_ERR(rng->clk)) { | 287 | if (IS_ERR(rng->clk)) { |
@@ -329,9 +332,14 @@ static int __maybe_unused exynos_rng_suspend(struct device *dev) | |||
329 | if (ret) | 332 | if (ret) |
330 | return ret; | 333 | return ret; |
331 | 334 | ||
335 | mutex_lock(&rng->lock); | ||
336 | |||
332 | /* Get new random numbers and store them for seeding on resume. */ | 337 | /* Get new random numbers and store them for seeding on resume. */ |
333 | exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save), | 338 | exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save), |
334 | &(rng->seed_save_len)); | 339 | &(rng->seed_save_len)); |
340 | |||
341 | mutex_unlock(&rng->lock); | ||
342 | |||
335 | dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n", | 343 | dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n", |
336 | rng->seed_save_len); | 344 | rng->seed_save_len); |
337 | 345 | ||
@@ -354,8 +362,12 @@ static int __maybe_unused exynos_rng_resume(struct device *dev) | |||
354 | if (ret) | 362 | if (ret) |
355 | return ret; | 363 | return ret; |
356 | 364 | ||
365 | mutex_lock(&rng->lock); | ||
366 | |||
357 | ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len); | 367 | ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len); |
358 | 368 | ||
369 | mutex_unlock(&rng->lock); | ||
370 | |||
359 | clk_disable_unprepare(rng->clk); | 371 | clk_disable_unprepare(rng->clk); |
360 | 372 | ||
361 | return ret; | 373 | return ret; |
@@ -367,6 +379,10 @@ static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_suspend, | |||
367 | static const struct of_device_id exynos_rng_dt_match[] = { | 379 | static const struct of_device_id exynos_rng_dt_match[] = { |
368 | { | 380 | { |
369 | .compatible = "samsung,exynos4-rng", | 381 | .compatible = "samsung,exynos4-rng", |
382 | .data = (const void *)EXYNOS_PRNG_EXYNOS4, | ||
383 | }, { | ||
384 | .compatible = "samsung,exynos5250-prng", | ||
385 | .data = (const void *)EXYNOS_PRNG_EXYNOS5, | ||
370 | }, | 386 | }, |
371 | { }, | 387 | { }, |
372 | }; | 388 | }; |
@@ -386,4 +402,4 @@ module_platform_driver(exynos_rng_driver); | |||
386 | 402 | ||
387 | MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver"); | 403 | MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver"); |
388 | MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>"); | 404 | MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>"); |
389 | MODULE_LICENSE("GPL"); | 405 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index e09d4055b19e..a5a36fe7bf2c 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -2579,6 +2579,7 @@ err_out_unmap_bars: | |||
2579 | for (i = 0; i < 3; ++i) | 2579 | for (i = 0; i < 3; ++i) |
2580 | if (dev->bar[i]) | 2580 | if (dev->bar[i]) |
2581 | iounmap(dev->bar[i]); | 2581 | iounmap(dev->bar[i]); |
2582 | kfree(dev); | ||
2582 | 2583 | ||
2583 | err_out_free_regions: | 2584 | err_out_free_regions: |
2584 | pci_release_regions(pdev); | 2585 | pci_release_regions(pdev); |
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 4bcef78a08aa..225e74a7f724 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c | |||
@@ -108,10 +108,10 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv, | |||
108 | writel(EIP197_PE_ICE_x_CTRL_SW_RESET | | 108 | writel(EIP197_PE_ICE_x_CTRL_SW_RESET | |
109 | EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | | 109 | EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | |
110 | EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, | 110 | EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, |
111 | priv->base + ctrl); | 111 | EIP197_PE(priv) + ctrl); |
112 | 112 | ||
113 | /* Enable access to the program memory */ | 113 | /* Enable access to the program memory */ |
114 | writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL); | 114 | writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL); |
115 | 115 | ||
116 | /* Write the firmware */ | 116 | /* Write the firmware */ |
117 | for (i = 0; i < fw->size / sizeof(u32); i++) | 117 | for (i = 0; i < fw->size / sizeof(u32); i++) |
@@ -119,12 +119,12 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv, | |||
119 | priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); | 119 | priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); |
120 | 120 | ||
121 | /* Disable access to the program memory */ | 121 | /* Disable access to the program memory */ |
122 | writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL); | 122 | writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL); |
123 | 123 | ||
124 | /* Release engine from reset */ | 124 | /* Release engine from reset */ |
125 | val = readl(priv->base + ctrl); | 125 | val = readl(EIP197_PE(priv) + ctrl); |
126 | val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET; | 126 | val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET; |
127 | writel(val, priv->base + ctrl); | 127 | writel(val, EIP197_PE(priv) + ctrl); |
128 | } | 128 | } |
129 | 129 | ||
130 | static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) | 130 | static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) |
@@ -145,14 +145,14 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) | |||
145 | } | 145 | } |
146 | 146 | ||
147 | /* Clear the scratchpad memory */ | 147 | /* Clear the scratchpad memory */ |
148 | val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL); | 148 | val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); |
149 | val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | | 149 | val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | |
150 | EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | | 150 | EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | |
151 | EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | | 151 | EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | |
152 | EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; | 152 | EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; |
153 | writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL); | 153 | writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); |
154 | 154 | ||
155 | memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0, | 155 | memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, |
156 | EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); | 156 | EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); |
157 | 157 | ||
158 | eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, | 158 | eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, |
@@ -173,7 +173,7 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) | |||
173 | u32 hdw, cd_size_rnd, val; | 173 | u32 hdw, cd_size_rnd, val; |
174 | int i; | 174 | int i; |
175 | 175 | ||
176 | hdw = readl(priv->base + EIP197_HIA_OPTIONS); | 176 | hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); |
177 | hdw &= GENMASK(27, 25); | 177 | hdw &= GENMASK(27, 25); |
178 | hdw >>= 25; | 178 | hdw >>= 25; |
179 | 179 | ||
@@ -182,26 +182,25 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) | |||
182 | for (i = 0; i < priv->config.rings; i++) { | 182 | for (i = 0; i < priv->config.rings; i++) { |
183 | /* ring base address */ | 183 | /* ring base address */ |
184 | writel(lower_32_bits(priv->ring[i].cdr.base_dma), | 184 | writel(lower_32_bits(priv->ring[i].cdr.base_dma), |
185 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); | 185 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); |
186 | writel(upper_32_bits(priv->ring[i].cdr.base_dma), | 186 | writel(upper_32_bits(priv->ring[i].cdr.base_dma), |
187 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); | 187 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); |
188 | 188 | ||
189 | writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | | 189 | writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | |
190 | priv->config.cd_size, | 190 | priv->config.cd_size, |
191 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE); | 191 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); |
192 | writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) | | 192 | writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) | |
193 | (EIP197_FETCH_COUNT * priv->config.cd_offset), | 193 | (EIP197_FETCH_COUNT * priv->config.cd_offset), |
194 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG); | 194 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); |
195 | 195 | ||
196 | /* Configure DMA tx control */ | 196 | /* Configure DMA tx control */ |
197 | val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); | 197 | val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); |
198 | val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); | 198 | val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); |
199 | writel(val, | 199 | writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); |
200 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG); | ||
201 | 200 | ||
202 | /* clear any pending interrupt */ | 201 | /* clear any pending interrupt */ |
203 | writel(GENMASK(5, 0), | 202 | writel(GENMASK(5, 0), |
204 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT); | 203 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT); |
205 | } | 204 | } |
206 | 205 | ||
207 | return 0; | 206 | return 0; |
@@ -212,7 +211,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) | |||
212 | u32 hdw, rd_size_rnd, val; | 211 | u32 hdw, rd_size_rnd, val; |
213 | int i; | 212 | int i; |
214 | 213 | ||
215 | hdw = readl(priv->base + EIP197_HIA_OPTIONS); | 214 | hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); |
216 | hdw &= GENMASK(27, 25); | 215 | hdw &= GENMASK(27, 25); |
217 | hdw >>= 25; | 216 | hdw >>= 25; |
218 | 217 | ||
@@ -221,33 +220,33 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) | |||
221 | for (i = 0; i < priv->config.rings; i++) { | 220 | for (i = 0; i < priv->config.rings; i++) { |
222 | /* ring base address */ | 221 | /* ring base address */ |
223 | writel(lower_32_bits(priv->ring[i].rdr.base_dma), | 222 | writel(lower_32_bits(priv->ring[i].rdr.base_dma), |
224 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); | 223 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); |
225 | writel(upper_32_bits(priv->ring[i].rdr.base_dma), | 224 | writel(upper_32_bits(priv->ring[i].rdr.base_dma), |
226 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); | 225 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); |
227 | 226 | ||
228 | writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) | | 227 | writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) | |
229 | priv->config.rd_size, | 228 | priv->config.rd_size, |
230 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE); | 229 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); |
231 | 230 | ||
232 | writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) | | 231 | writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) | |
233 | (EIP197_FETCH_COUNT * priv->config.rd_offset), | 232 | (EIP197_FETCH_COUNT * priv->config.rd_offset), |
234 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG); | 233 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); |
235 | 234 | ||
236 | /* Configure DMA tx control */ | 235 | /* Configure DMA tx control */ |
237 | val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); | 236 | val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); |
238 | val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); | 237 | val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); |
239 | val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG; | 238 | val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG; |
240 | writel(val, | 239 | writel(val, |
241 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG); | 240 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); |
242 | 241 | ||
243 | /* clear any pending interrupt */ | 242 | /* clear any pending interrupt */ |
244 | writel(GENMASK(7, 0), | 243 | writel(GENMASK(7, 0), |
245 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT); | 244 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT); |
246 | 245 | ||
247 | /* enable ring interrupt */ | 246 | /* enable ring interrupt */ |
248 | val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); | 247 | val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); |
249 | val |= EIP197_RDR_IRQ(i); | 248 | val |= EIP197_RDR_IRQ(i); |
250 | writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); | 249 | writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); |
251 | } | 250 | } |
252 | 251 | ||
253 | return 0; | 252 | return 0; |
@@ -259,39 +258,40 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
259 | int i, ret; | 258 | int i, ret; |
260 | 259 | ||
261 | /* Determine endianess and configure byte swap */ | 260 | /* Determine endianess and configure byte swap */ |
262 | version = readl(priv->base + EIP197_HIA_VERSION); | 261 | version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION); |
263 | val = readl(priv->base + EIP197_HIA_MST_CTRL); | 262 | val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); |
264 | 263 | ||
265 | if ((version & 0xffff) == EIP197_HIA_VERSION_BE) | 264 | if ((version & 0xffff) == EIP197_HIA_VERSION_BE) |
266 | val |= EIP197_MST_CTRL_BYTE_SWAP; | 265 | val |= EIP197_MST_CTRL_BYTE_SWAP; |
267 | else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) | 266 | else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) |
268 | val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); | 267 | val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); |
269 | 268 | ||
270 | writel(val, priv->base + EIP197_HIA_MST_CTRL); | 269 | writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); |
271 | |||
272 | 270 | ||
273 | /* Configure wr/rd cache values */ | 271 | /* Configure wr/rd cache values */ |
274 | writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | | 272 | writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | |
275 | EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS), | 273 | EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS), |
276 | priv->base + EIP197_MST_CTRL); | 274 | EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL); |
277 | 275 | ||
278 | /* Interrupts reset */ | 276 | /* Interrupts reset */ |
279 | 277 | ||
280 | /* Disable all global interrupts */ | 278 | /* Disable all global interrupts */ |
281 | writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL); | 279 | writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL); |
282 | 280 | ||
283 | /* Clear any pending interrupt */ | 281 | /* Clear any pending interrupt */ |
284 | writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK); | 282 | writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); |
285 | 283 | ||
286 | /* Data Fetch Engine configuration */ | 284 | /* Data Fetch Engine configuration */ |
287 | 285 | ||
288 | /* Reset all DFE threads */ | 286 | /* Reset all DFE threads */ |
289 | writel(EIP197_DxE_THR_CTRL_RESET_PE, | 287 | writel(EIP197_DxE_THR_CTRL_RESET_PE, |
290 | priv->base + EIP197_HIA_DFE_THR_CTRL); | 288 | EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); |
291 | 289 | ||
292 | /* Reset HIA input interface arbiter */ | 290 | if (priv->version == EIP197) { |
293 | writel(EIP197_HIA_RA_PE_CTRL_RESET, | 291 | /* Reset HIA input interface arbiter */ |
294 | priv->base + EIP197_HIA_RA_PE_CTRL); | 292 | writel(EIP197_HIA_RA_PE_CTRL_RESET, |
293 | EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL); | ||
294 | } | ||
295 | 295 | ||
296 | /* DMA transfer size to use */ | 296 | /* DMA transfer size to use */ |
297 | val = EIP197_HIA_DFE_CFG_DIS_DEBUG; | 297 | val = EIP197_HIA_DFE_CFG_DIS_DEBUG; |
@@ -299,29 +299,32 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
299 | val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7); | 299 | val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7); |
300 | val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS); | 300 | val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS); |
301 | val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS); | 301 | val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS); |
302 | writel(val, priv->base + EIP197_HIA_DFE_CFG); | 302 | writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG); |
303 | 303 | ||
304 | /* Leave the DFE threads reset state */ | 304 | /* Leave the DFE threads reset state */ |
305 | writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL); | 305 | writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); |
306 | 306 | ||
307 | /* Configure the procesing engine thresholds */ | 307 | /* Configure the procesing engine thresholds */ |
308 | writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9), | 308 | writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9), |
309 | priv->base + EIP197_PE_IN_DBUF_THRES); | 309 | EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES); |
310 | writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7), | 310 | writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7), |
311 | priv->base + EIP197_PE_IN_TBUF_THRES); | 311 | EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES); |
312 | 312 | ||
313 | /* enable HIA input interface arbiter and rings */ | 313 | if (priv->version == EIP197) { |
314 | writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0), | 314 | /* enable HIA input interface arbiter and rings */ |
315 | priv->base + EIP197_HIA_RA_PE_CTRL); | 315 | writel(EIP197_HIA_RA_PE_CTRL_EN | |
316 | GENMASK(priv->config.rings - 1, 0), | ||
317 | EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL); | ||
318 | } | ||
316 | 319 | ||
317 | /* Data Store Engine configuration */ | 320 | /* Data Store Engine configuration */ |
318 | 321 | ||
319 | /* Reset all DSE threads */ | 322 | /* Reset all DSE threads */ |
320 | writel(EIP197_DxE_THR_CTRL_RESET_PE, | 323 | writel(EIP197_DxE_THR_CTRL_RESET_PE, |
321 | priv->base + EIP197_HIA_DSE_THR_CTRL); | 324 | EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); |
322 | 325 | ||
323 | /* Wait for all DSE threads to complete */ | 326 | /* Wait for all DSE threads to complete */ |
324 | while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) & | 327 | while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) & |
325 | GENMASK(15, 12)) != GENMASK(15, 12)) | 328 | GENMASK(15, 12)) != GENMASK(15, 12)) |
326 | ; | 329 | ; |
327 | 330 | ||
@@ -330,15 +333,19 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
330 | val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); | 333 | val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); |
331 | val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); | 334 | val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); |
332 | val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE; | 335 | val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE; |
333 | val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; | 336 | /* FIXME: instability issues can occur for EIP97 but disabling it impact |
334 | writel(val, priv->base + EIP197_HIA_DSE_CFG); | 337 | * performances. |
338 | */ | ||
339 | if (priv->version == EIP197) | ||
340 | val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; | ||
341 | writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG); | ||
335 | 342 | ||
336 | /* Leave the DSE threads reset state */ | 343 | /* Leave the DSE threads reset state */ |
337 | writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL); | 344 | writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); |
338 | 345 | ||
339 | /* Configure the procesing engine thresholds */ | 346 | /* Configure the procesing engine thresholds */ |
340 | writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8), | 347 | writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8), |
341 | priv->base + EIP197_PE_OUT_DBUF_THRES); | 348 | EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES); |
342 | 349 | ||
343 | /* Processing Engine configuration */ | 350 | /* Processing Engine configuration */ |
344 | 351 | ||
@@ -348,73 +355,75 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
348 | val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; | 355 | val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; |
349 | val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; | 356 | val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; |
350 | val |= EIP197_ALG_SHA2; | 357 | val |= EIP197_ALG_SHA2; |
351 | writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN); | 358 | writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN); |
352 | 359 | ||
353 | /* Command Descriptor Rings prepare */ | 360 | /* Command Descriptor Rings prepare */ |
354 | for (i = 0; i < priv->config.rings; i++) { | 361 | for (i = 0; i < priv->config.rings; i++) { |
355 | /* Clear interrupts for this ring */ | 362 | /* Clear interrupts for this ring */ |
356 | writel(GENMASK(31, 0), | 363 | writel(GENMASK(31, 0), |
357 | priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i)); | 364 | EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i)); |
358 | 365 | ||
359 | /* Disable external triggering */ | 366 | /* Disable external triggering */ |
360 | writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG); | 367 | writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); |
361 | 368 | ||
362 | /* Clear the pending prepared counter */ | 369 | /* Clear the pending prepared counter */ |
363 | writel(EIP197_xDR_PREP_CLR_COUNT, | 370 | writel(EIP197_xDR_PREP_CLR_COUNT, |
364 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT); | 371 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT); |
365 | 372 | ||
366 | /* Clear the pending processed counter */ | 373 | /* Clear the pending processed counter */ |
367 | writel(EIP197_xDR_PROC_CLR_COUNT, | 374 | writel(EIP197_xDR_PROC_CLR_COUNT, |
368 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT); | 375 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT); |
369 | 376 | ||
370 | writel(0, | 377 | writel(0, |
371 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR); | 378 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR); |
372 | writel(0, | 379 | writel(0, |
373 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR); | 380 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); |
374 | 381 | ||
375 | writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2, | 382 | writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2, |
376 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE); | 383 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); |
377 | } | 384 | } |
378 | 385 | ||
379 | /* Result Descriptor Ring prepare */ | 386 | /* Result Descriptor Ring prepare */ |
380 | for (i = 0; i < priv->config.rings; i++) { | 387 | for (i = 0; i < priv->config.rings; i++) { |
381 | /* Disable external triggering*/ | 388 | /* Disable external triggering*/ |
382 | writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG); | 389 | writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); |
383 | 390 | ||
384 | /* Clear the pending prepared counter */ | 391 | /* Clear the pending prepared counter */ |
385 | writel(EIP197_xDR_PREP_CLR_COUNT, | 392 | writel(EIP197_xDR_PREP_CLR_COUNT, |
386 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT); | 393 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT); |
387 | 394 | ||
388 | /* Clear the pending processed counter */ | 395 | /* Clear the pending processed counter */ |
389 | writel(EIP197_xDR_PROC_CLR_COUNT, | 396 | writel(EIP197_xDR_PROC_CLR_COUNT, |
390 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT); | 397 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT); |
391 | 398 | ||
392 | writel(0, | 399 | writel(0, |
393 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR); | 400 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR); |
394 | writel(0, | 401 | writel(0, |
395 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR); | 402 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); |
396 | 403 | ||
397 | /* Ring size */ | 404 | /* Ring size */ |
398 | writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2, | 405 | writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2, |
399 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE); | 406 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); |
400 | } | 407 | } |
401 | 408 | ||
402 | /* Enable command descriptor rings */ | 409 | /* Enable command descriptor rings */ |
403 | writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), | 410 | writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), |
404 | priv->base + EIP197_HIA_DFE_THR_CTRL); | 411 | EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); |
405 | 412 | ||
406 | /* Enable result descriptor rings */ | 413 | /* Enable result descriptor rings */ |
407 | writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), | 414 | writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), |
408 | priv->base + EIP197_HIA_DSE_THR_CTRL); | 415 | EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); |
409 | 416 | ||
410 | /* Clear any HIA interrupt */ | 417 | /* Clear any HIA interrupt */ |
411 | writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK); | 418 | writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); |
412 | 419 | ||
413 | eip197_trc_cache_init(priv); | 420 | if (priv->version == EIP197) { |
421 | eip197_trc_cache_init(priv); | ||
414 | 422 | ||
415 | ret = eip197_load_firmwares(priv); | 423 | ret = eip197_load_firmwares(priv); |
416 | if (ret) | 424 | if (ret) |
417 | return ret; | 425 | return ret; |
426 | } | ||
418 | 427 | ||
419 | safexcel_hw_setup_cdesc_rings(priv); | 428 | safexcel_hw_setup_cdesc_rings(priv); |
420 | safexcel_hw_setup_rdesc_rings(priv); | 429 | safexcel_hw_setup_rdesc_rings(priv); |
@@ -422,6 +431,23 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
422 | return 0; | 431 | return 0; |
423 | } | 432 | } |
424 | 433 | ||
434 | /* Called with ring's lock taken */ | ||
435 | static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv, | ||
436 | int ring, int reqs) | ||
437 | { | ||
438 | int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ); | ||
439 | |||
440 | if (!coal) | ||
441 | return 0; | ||
442 | |||
443 | /* Configure when we want an interrupt */ | ||
444 | writel(EIP197_HIA_RDR_THRESH_PKT_MODE | | ||
445 | EIP197_HIA_RDR_THRESH_PROC_PKT(coal), | ||
446 | EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH); | ||
447 | |||
448 | return coal; | ||
449 | } | ||
450 | |||
425 | void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) | 451 | void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) |
426 | { | 452 | { |
427 | struct crypto_async_request *req, *backlog; | 453 | struct crypto_async_request *req, *backlog; |
@@ -429,34 +455,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) | |||
429 | struct safexcel_request *request; | 455 | struct safexcel_request *request; |
430 | int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; | 456 | int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; |
431 | 457 | ||
432 | priv->ring[ring].need_dequeue = false; | 458 | /* If a request wasn't properly dequeued because of a lack of resources, |
459 | * proceeded it first, | ||
460 | */ | ||
461 | req = priv->ring[ring].req; | ||
462 | backlog = priv->ring[ring].backlog; | ||
463 | if (req) | ||
464 | goto handle_req; | ||
433 | 465 | ||
434 | do { | 466 | while (true) { |
435 | spin_lock_bh(&priv->ring[ring].queue_lock); | 467 | spin_lock_bh(&priv->ring[ring].queue_lock); |
436 | backlog = crypto_get_backlog(&priv->ring[ring].queue); | 468 | backlog = crypto_get_backlog(&priv->ring[ring].queue); |
437 | req = crypto_dequeue_request(&priv->ring[ring].queue); | 469 | req = crypto_dequeue_request(&priv->ring[ring].queue); |
438 | spin_unlock_bh(&priv->ring[ring].queue_lock); | 470 | spin_unlock_bh(&priv->ring[ring].queue_lock); |
439 | 471 | ||
440 | if (!req) | 472 | if (!req) { |
473 | priv->ring[ring].req = NULL; | ||
474 | priv->ring[ring].backlog = NULL; | ||
441 | goto finalize; | 475 | goto finalize; |
476 | } | ||
442 | 477 | ||
478 | handle_req: | ||
443 | request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req)); | 479 | request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req)); |
444 | if (!request) { | 480 | if (!request) |
445 | spin_lock_bh(&priv->ring[ring].queue_lock); | 481 | goto request_failed; |
446 | crypto_enqueue_request(&priv->ring[ring].queue, req); | ||
447 | spin_unlock_bh(&priv->ring[ring].queue_lock); | ||
448 | |||
449 | priv->ring[ring].need_dequeue = true; | ||
450 | goto finalize; | ||
451 | } | ||
452 | 482 | ||
453 | ctx = crypto_tfm_ctx(req->tfm); | 483 | ctx = crypto_tfm_ctx(req->tfm); |
454 | ret = ctx->send(req, ring, request, &commands, &results); | 484 | ret = ctx->send(req, ring, request, &commands, &results); |
455 | if (ret) { | 485 | if (ret) { |
456 | kfree(request); | 486 | kfree(request); |
457 | req->complete(req, ret); | 487 | goto request_failed; |
458 | priv->ring[ring].need_dequeue = true; | ||
459 | goto finalize; | ||
460 | } | 488 | } |
461 | 489 | ||
462 | if (backlog) | 490 | if (backlog) |
@@ -468,30 +496,39 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) | |||
468 | 496 | ||
469 | cdesc += commands; | 497 | cdesc += commands; |
470 | rdesc += results; | 498 | rdesc += results; |
471 | } while (nreq++ < EIP197_MAX_BATCH_SZ); | 499 | nreq++; |
500 | } | ||
501 | |||
502 | request_failed: | ||
503 | /* Not enough resources to handle all the requests. Bail out and save | ||
504 | * the request and the backlog for the next dequeue call (per-ring). | ||
505 | */ | ||
506 | priv->ring[ring].req = req; | ||
507 | priv->ring[ring].backlog = backlog; | ||
472 | 508 | ||
473 | finalize: | 509 | finalize: |
474 | if (nreq == EIP197_MAX_BATCH_SZ) | 510 | if (!nreq) |
475 | priv->ring[ring].need_dequeue = true; | ||
476 | else if (!nreq) | ||
477 | return; | 511 | return; |
478 | 512 | ||
479 | spin_lock_bh(&priv->ring[ring].lock); | 513 | spin_lock_bh(&priv->ring[ring].egress_lock); |
480 | 514 | ||
481 | /* Configure when we want an interrupt */ | 515 | if (!priv->ring[ring].busy) { |
482 | writel(EIP197_HIA_RDR_THRESH_PKT_MODE | | 516 | nreq -= safexcel_try_push_requests(priv, ring, nreq); |
483 | EIP197_HIA_RDR_THRESH_PROC_PKT(nreq), | 517 | if (nreq) |
484 | priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH); | 518 | priv->ring[ring].busy = true; |
519 | } | ||
520 | |||
521 | priv->ring[ring].requests_left += nreq; | ||
522 | |||
523 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
485 | 524 | ||
486 | /* let the RDR know we have pending descriptors */ | 525 | /* let the RDR know we have pending descriptors */ |
487 | writel((rdesc * priv->config.rd_offset) << 2, | 526 | writel((rdesc * priv->config.rd_offset) << 2, |
488 | priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT); | 527 | EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); |
489 | 528 | ||
490 | /* let the CDR know we have pending descriptors */ | 529 | /* let the CDR know we have pending descriptors */ |
491 | writel((cdesc * priv->config.cd_offset) << 2, | 530 | writel((cdesc * priv->config.cd_offset) << 2, |
492 | priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT); | 531 | EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); |
493 | |||
494 | spin_unlock_bh(&priv->ring[ring].lock); | ||
495 | } | 532 | } |
496 | 533 | ||
497 | void safexcel_free_context(struct safexcel_crypto_priv *priv, | 534 | void safexcel_free_context(struct safexcel_crypto_priv *priv, |
@@ -540,7 +577,6 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error) | |||
540 | } | 577 | } |
541 | 578 | ||
542 | int safexcel_invalidate_cache(struct crypto_async_request *async, | 579 | int safexcel_invalidate_cache(struct crypto_async_request *async, |
543 | struct safexcel_context *ctx, | ||
544 | struct safexcel_crypto_priv *priv, | 580 | struct safexcel_crypto_priv *priv, |
545 | dma_addr_t ctxr_dma, int ring, | 581 | dma_addr_t ctxr_dma, int ring, |
546 | struct safexcel_request *request) | 582 | struct safexcel_request *request) |
@@ -587,14 +623,17 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv | |||
587 | { | 623 | { |
588 | struct safexcel_request *sreq; | 624 | struct safexcel_request *sreq; |
589 | struct safexcel_context *ctx; | 625 | struct safexcel_context *ctx; |
590 | int ret, i, nreq, ndesc = 0; | 626 | int ret, i, nreq, ndesc, tot_descs, done; |
591 | bool should_complete; | 627 | bool should_complete; |
592 | 628 | ||
593 | nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT); | 629 | handle_results: |
594 | nreq >>= 24; | 630 | tot_descs = 0; |
595 | nreq &= GENMASK(6, 0); | 631 | |
632 | nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); | ||
633 | nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; | ||
634 | nreq &= EIP197_xDR_PROC_xD_PKT_MASK; | ||
596 | if (!nreq) | 635 | if (!nreq) |
597 | return; | 636 | goto requests_left; |
598 | 637 | ||
599 | for (i = 0; i < nreq; i++) { | 638 | for (i = 0; i < nreq; i++) { |
600 | spin_lock_bh(&priv->ring[ring].egress_lock); | 639 | spin_lock_bh(&priv->ring[ring].egress_lock); |
@@ -609,13 +648,9 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv | |||
609 | if (ndesc < 0) { | 648 | if (ndesc < 0) { |
610 | kfree(sreq); | 649 | kfree(sreq); |
611 | dev_err(priv->dev, "failed to handle result (%d)", ndesc); | 650 | dev_err(priv->dev, "failed to handle result (%d)", ndesc); |
612 | return; | 651 | goto acknowledge; |
613 | } | 652 | } |
614 | 653 | ||
615 | writel(EIP197_xDR_PROC_xD_PKT(1) | | ||
616 | EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset), | ||
617 | priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT); | ||
618 | |||
619 | if (should_complete) { | 654 | if (should_complete) { |
620 | local_bh_disable(); | 655 | local_bh_disable(); |
621 | sreq->req->complete(sreq->req, ret); | 656 | sreq->req->complete(sreq->req, ret); |
@@ -623,19 +658,41 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv | |||
623 | } | 658 | } |
624 | 659 | ||
625 | kfree(sreq); | 660 | kfree(sreq); |
661 | tot_descs += ndesc; | ||
626 | } | 662 | } |
663 | |||
664 | acknowledge: | ||
665 | if (i) { | ||
666 | writel(EIP197_xDR_PROC_xD_PKT(i) | | ||
667 | EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset), | ||
668 | EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); | ||
669 | } | ||
670 | |||
671 | /* If the number of requests overflowed the counter, try to proceed more | ||
672 | * requests. | ||
673 | */ | ||
674 | if (nreq == EIP197_xDR_PROC_xD_PKT_MASK) | ||
675 | goto handle_results; | ||
676 | |||
677 | requests_left: | ||
678 | spin_lock_bh(&priv->ring[ring].egress_lock); | ||
679 | |||
680 | done = safexcel_try_push_requests(priv, ring, | ||
681 | priv->ring[ring].requests_left); | ||
682 | |||
683 | priv->ring[ring].requests_left -= done; | ||
684 | if (!done && !priv->ring[ring].requests_left) | ||
685 | priv->ring[ring].busy = false; | ||
686 | |||
687 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
627 | } | 688 | } |
628 | 689 | ||
629 | static void safexcel_handle_result_work(struct work_struct *work) | 690 | static void safexcel_dequeue_work(struct work_struct *work) |
630 | { | 691 | { |
631 | struct safexcel_work_data *data = | 692 | struct safexcel_work_data *data = |
632 | container_of(work, struct safexcel_work_data, work); | 693 | container_of(work, struct safexcel_work_data, work); |
633 | struct safexcel_crypto_priv *priv = data->priv; | ||
634 | |||
635 | safexcel_handle_result_descriptor(priv, data->ring); | ||
636 | 694 | ||
637 | if (priv->ring[data->ring].need_dequeue) | 695 | safexcel_dequeue(data->priv, data->ring); |
638 | safexcel_dequeue(data->priv, data->ring); | ||
639 | } | 696 | } |
640 | 697 | ||
641 | struct safexcel_ring_irq_data { | 698 | struct safexcel_ring_irq_data { |
@@ -647,16 +704,16 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data) | |||
647 | { | 704 | { |
648 | struct safexcel_ring_irq_data *irq_data = data; | 705 | struct safexcel_ring_irq_data *irq_data = data; |
649 | struct safexcel_crypto_priv *priv = irq_data->priv; | 706 | struct safexcel_crypto_priv *priv = irq_data->priv; |
650 | int ring = irq_data->ring; | 707 | int ring = irq_data->ring, rc = IRQ_NONE; |
651 | u32 status, stat; | 708 | u32 status, stat; |
652 | 709 | ||
653 | status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring)); | 710 | status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring)); |
654 | if (!status) | 711 | if (!status) |
655 | return IRQ_NONE; | 712 | return rc; |
656 | 713 | ||
657 | /* RDR interrupts */ | 714 | /* RDR interrupts */ |
658 | if (status & EIP197_RDR_IRQ(ring)) { | 715 | if (status & EIP197_RDR_IRQ(ring)) { |
659 | stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT); | 716 | stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT); |
660 | 717 | ||
661 | if (unlikely(stat & EIP197_xDR_ERR)) { | 718 | if (unlikely(stat & EIP197_xDR_ERR)) { |
662 | /* | 719 | /* |
@@ -666,22 +723,37 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data) | |||
666 | */ | 723 | */ |
667 | dev_err(priv->dev, "RDR: fatal error."); | 724 | dev_err(priv->dev, "RDR: fatal error."); |
668 | } else if (likely(stat & EIP197_xDR_THRESH)) { | 725 | } else if (likely(stat & EIP197_xDR_THRESH)) { |
669 | queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work); | 726 | rc = IRQ_WAKE_THREAD; |
670 | } | 727 | } |
671 | 728 | ||
672 | /* ACK the interrupts */ | 729 | /* ACK the interrupts */ |
673 | writel(stat & 0xff, | 730 | writel(stat & 0xff, |
674 | priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT); | 731 | EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT); |
675 | } | 732 | } |
676 | 733 | ||
677 | /* ACK the interrupts */ | 734 | /* ACK the interrupts */ |
678 | writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring)); | 735 | writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring)); |
736 | |||
737 | return rc; | ||
738 | } | ||
739 | |||
740 | static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) | ||
741 | { | ||
742 | struct safexcel_ring_irq_data *irq_data = data; | ||
743 | struct safexcel_crypto_priv *priv = irq_data->priv; | ||
744 | int ring = irq_data->ring; | ||
745 | |||
746 | safexcel_handle_result_descriptor(priv, ring); | ||
747 | |||
748 | queue_work(priv->ring[ring].workqueue, | ||
749 | &priv->ring[ring].work_data.work); | ||
679 | 750 | ||
680 | return IRQ_HANDLED; | 751 | return IRQ_HANDLED; |
681 | } | 752 | } |
682 | 753 | ||
683 | static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name, | 754 | static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name, |
684 | irq_handler_t handler, | 755 | irq_handler_t handler, |
756 | irq_handler_t threaded_handler, | ||
685 | struct safexcel_ring_irq_data *ring_irq_priv) | 757 | struct safexcel_ring_irq_data *ring_irq_priv) |
686 | { | 758 | { |
687 | int ret, irq = platform_get_irq_byname(pdev, name); | 759 | int ret, irq = platform_get_irq_byname(pdev, name); |
@@ -691,8 +763,9 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n | |||
691 | return irq; | 763 | return irq; |
692 | } | 764 | } |
693 | 765 | ||
694 | ret = devm_request_irq(&pdev->dev, irq, handler, 0, | 766 | ret = devm_request_threaded_irq(&pdev->dev, irq, handler, |
695 | dev_name(&pdev->dev), ring_irq_priv); | 767 | threaded_handler, IRQF_ONESHOT, |
768 | dev_name(&pdev->dev), ring_irq_priv); | ||
696 | if (ret) { | 769 | if (ret) { |
697 | dev_err(&pdev->dev, "unable to request IRQ %d\n", irq); | 770 | dev_err(&pdev->dev, "unable to request IRQ %d\n", irq); |
698 | return ret; | 771 | return ret; |
@@ -755,11 +828,11 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv) | |||
755 | { | 828 | { |
756 | u32 val, mask; | 829 | u32 val, mask; |
757 | 830 | ||
758 | val = readl(priv->base + EIP197_HIA_OPTIONS); | 831 | val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); |
759 | val = (val & GENMASK(27, 25)) >> 25; | 832 | val = (val & GENMASK(27, 25)) >> 25; |
760 | mask = BIT(val) - 1; | 833 | mask = BIT(val) - 1; |
761 | 834 | ||
762 | val = readl(priv->base + EIP197_HIA_OPTIONS); | 835 | val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); |
763 | priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); | 836 | priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); |
764 | 837 | ||
765 | priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); | 838 | priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); |
@@ -769,6 +842,35 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv) | |||
769 | priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask; | 842 | priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask; |
770 | } | 843 | } |
771 | 844 | ||
845 | static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) | ||
846 | { | ||
847 | struct safexcel_register_offsets *offsets = &priv->offsets; | ||
848 | |||
849 | if (priv->version == EIP197) { | ||
850 | offsets->hia_aic = EIP197_HIA_AIC_BASE; | ||
851 | offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; | ||
852 | offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; | ||
853 | offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE; | ||
854 | offsets->hia_dfe = EIP197_HIA_DFE_BASE; | ||
855 | offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE; | ||
856 | offsets->hia_dse = EIP197_HIA_DSE_BASE; | ||
857 | offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; | ||
858 | offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; | ||
859 | offsets->pe = EIP197_PE_BASE; | ||
860 | } else { | ||
861 | offsets->hia_aic = EIP97_HIA_AIC_BASE; | ||
862 | offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; | ||
863 | offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; | ||
864 | offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE; | ||
865 | offsets->hia_dfe = EIP97_HIA_DFE_BASE; | ||
866 | offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE; | ||
867 | offsets->hia_dse = EIP97_HIA_DSE_BASE; | ||
868 | offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; | ||
869 | offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; | ||
870 | offsets->pe = EIP97_PE_BASE; | ||
871 | } | ||
872 | } | ||
873 | |||
772 | static int safexcel_probe(struct platform_device *pdev) | 874 | static int safexcel_probe(struct platform_device *pdev) |
773 | { | 875 | { |
774 | struct device *dev = &pdev->dev; | 876 | struct device *dev = &pdev->dev; |
@@ -781,6 +883,9 @@ static int safexcel_probe(struct platform_device *pdev) | |||
781 | return -ENOMEM; | 883 | return -ENOMEM; |
782 | 884 | ||
783 | priv->dev = dev; | 885 | priv->dev = dev; |
886 | priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); | ||
887 | |||
888 | safexcel_init_register_offsets(priv); | ||
784 | 889 | ||
785 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 890 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
786 | priv->base = devm_ioremap_resource(dev, res); | 891 | priv->base = devm_ioremap_resource(dev, res); |
@@ -839,6 +944,7 @@ static int safexcel_probe(struct platform_device *pdev) | |||
839 | 944 | ||
840 | snprintf(irq_name, 6, "ring%d", i); | 945 | snprintf(irq_name, 6, "ring%d", i); |
841 | irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, | 946 | irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, |
947 | safexcel_irq_ring_thread, | ||
842 | ring_irq); | 948 | ring_irq); |
843 | if (irq < 0) { | 949 | if (irq < 0) { |
844 | ret = irq; | 950 | ret = irq; |
@@ -847,7 +953,7 @@ static int safexcel_probe(struct platform_device *pdev) | |||
847 | 953 | ||
848 | priv->ring[i].work_data.priv = priv; | 954 | priv->ring[i].work_data.priv = priv; |
849 | priv->ring[i].work_data.ring = i; | 955 | priv->ring[i].work_data.ring = i; |
850 | INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work); | 956 | INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work); |
851 | 957 | ||
852 | snprintf(wq_name, 9, "wq_ring%d", i); | 958 | snprintf(wq_name, 9, "wq_ring%d", i); |
853 | priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); | 959 | priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); |
@@ -856,6 +962,9 @@ static int safexcel_probe(struct platform_device *pdev) | |||
856 | goto err_clk; | 962 | goto err_clk; |
857 | } | 963 | } |
858 | 964 | ||
965 | priv->ring[i].requests_left = 0; | ||
966 | priv->ring[i].busy = false; | ||
967 | |||
859 | crypto_init_queue(&priv->ring[i].queue, | 968 | crypto_init_queue(&priv->ring[i].queue, |
860 | EIP197_DEFAULT_RING_SIZE); | 969 | EIP197_DEFAULT_RING_SIZE); |
861 | 970 | ||
@@ -903,7 +1012,14 @@ static int safexcel_remove(struct platform_device *pdev) | |||
903 | } | 1012 | } |
904 | 1013 | ||
905 | static const struct of_device_id safexcel_of_match_table[] = { | 1014 | static const struct of_device_id safexcel_of_match_table[] = { |
906 | { .compatible = "inside-secure,safexcel-eip197" }, | 1015 | { |
1016 | .compatible = "inside-secure,safexcel-eip97", | ||
1017 | .data = (void *)EIP97, | ||
1018 | }, | ||
1019 | { | ||
1020 | .compatible = "inside-secure,safexcel-eip197", | ||
1021 | .data = (void *)EIP197, | ||
1022 | }, | ||
907 | {}, | 1023 | {}, |
908 | }; | 1024 | }; |
909 | 1025 | ||
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 304c5838c11a..4e219c21608b 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h | |||
@@ -19,64 +19,103 @@ | |||
19 | #define EIP197_HIA_VERSION_BE 0x35ca | 19 | #define EIP197_HIA_VERSION_BE 0x35ca |
20 | 20 | ||
21 | /* Static configuration */ | 21 | /* Static configuration */ |
22 | #define EIP197_DEFAULT_RING_SIZE 64 | 22 | #define EIP197_DEFAULT_RING_SIZE 400 |
23 | #define EIP197_MAX_TOKENS 5 | 23 | #define EIP197_MAX_TOKENS 5 |
24 | #define EIP197_MAX_RINGS 4 | 24 | #define EIP197_MAX_RINGS 4 |
25 | #define EIP197_FETCH_COUNT 1 | 25 | #define EIP197_FETCH_COUNT 1 |
26 | #define EIP197_MAX_BATCH_SZ EIP197_DEFAULT_RING_SIZE | 26 | #define EIP197_MAX_BATCH_SZ 64 |
27 | 27 | ||
28 | #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ | 28 | #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ |
29 | GFP_KERNEL : GFP_ATOMIC) | 29 | GFP_KERNEL : GFP_ATOMIC) |
30 | 30 | ||
31 | /* Register base offsets */ | ||
32 | #define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic) | ||
33 | #define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g) | ||
34 | #define EIP197_HIA_AIC_R(priv) ((priv)->base + (priv)->offsets.hia_aic_r) | ||
35 | #define EIP197_HIA_AIC_xDR(priv) ((priv)->base + (priv)->offsets.hia_aic_xdr) | ||
36 | #define EIP197_HIA_DFE(priv) ((priv)->base + (priv)->offsets.hia_dfe) | ||
37 | #define EIP197_HIA_DFE_THR(priv) ((priv)->base + (priv)->offsets.hia_dfe_thr) | ||
38 | #define EIP197_HIA_DSE(priv) ((priv)->base + (priv)->offsets.hia_dse) | ||
39 | #define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr) | ||
40 | #define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg) | ||
41 | #define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe) | ||
42 | |||
43 | /* EIP197 base offsets */ | ||
44 | #define EIP197_HIA_AIC_BASE 0x90000 | ||
45 | #define EIP197_HIA_AIC_G_BASE 0x90000 | ||
46 | #define EIP197_HIA_AIC_R_BASE 0x90800 | ||
47 | #define EIP197_HIA_AIC_xDR_BASE 0x80000 | ||
48 | #define EIP197_HIA_DFE_BASE 0x8c000 | ||
49 | #define EIP197_HIA_DFE_THR_BASE 0x8c040 | ||
50 | #define EIP197_HIA_DSE_BASE 0x8d000 | ||
51 | #define EIP197_HIA_DSE_THR_BASE 0x8d040 | ||
52 | #define EIP197_HIA_GEN_CFG_BASE 0xf0000 | ||
53 | #define EIP197_PE_BASE 0xa0000 | ||
54 | |||
55 | /* EIP97 base offsets */ | ||
56 | #define EIP97_HIA_AIC_BASE 0x0 | ||
57 | #define EIP97_HIA_AIC_G_BASE 0x0 | ||
58 | #define EIP97_HIA_AIC_R_BASE 0x0 | ||
59 | #define EIP97_HIA_AIC_xDR_BASE 0x0 | ||
60 | #define EIP97_HIA_DFE_BASE 0xf000 | ||
61 | #define EIP97_HIA_DFE_THR_BASE 0xf200 | ||
62 | #define EIP97_HIA_DSE_BASE 0xf400 | ||
63 | #define EIP97_HIA_DSE_THR_BASE 0xf600 | ||
64 | #define EIP97_HIA_GEN_CFG_BASE 0x10000 | ||
65 | #define EIP97_PE_BASE 0x10000 | ||
66 | |||
31 | /* CDR/RDR register offsets */ | 67 | /* CDR/RDR register offsets */ |
32 | #define EIP197_HIA_xDR_OFF(r) (0x80000 + (r) * 0x1000) | 68 | #define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000) |
33 | #define EIP197_HIA_CDR(r) (EIP197_HIA_xDR_OFF(r)) | 69 | #define EIP197_HIA_CDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r)) |
34 | #define EIP197_HIA_RDR(r) (EIP197_HIA_xDR_OFF(r) + 0x800) | 70 | #define EIP197_HIA_RDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r) + 0x800) |
35 | #define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0 | 71 | #define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0000 |
36 | #define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x4 | 72 | #define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x0004 |
37 | #define EIP197_HIA_xDR_RING_SIZE 0x18 | 73 | #define EIP197_HIA_xDR_RING_SIZE 0x0018 |
38 | #define EIP197_HIA_xDR_DESC_SIZE 0x1c | 74 | #define EIP197_HIA_xDR_DESC_SIZE 0x001c |
39 | #define EIP197_HIA_xDR_CFG 0x20 | 75 | #define EIP197_HIA_xDR_CFG 0x0020 |
40 | #define EIP197_HIA_xDR_DMA_CFG 0x24 | 76 | #define EIP197_HIA_xDR_DMA_CFG 0x0024 |
41 | #define EIP197_HIA_xDR_THRESH 0x28 | 77 | #define EIP197_HIA_xDR_THRESH 0x0028 |
42 | #define EIP197_HIA_xDR_PREP_COUNT 0x2c | 78 | #define EIP197_HIA_xDR_PREP_COUNT 0x002c |
43 | #define EIP197_HIA_xDR_PROC_COUNT 0x30 | 79 | #define EIP197_HIA_xDR_PROC_COUNT 0x0030 |
44 | #define EIP197_HIA_xDR_PREP_PNTR 0x34 | 80 | #define EIP197_HIA_xDR_PREP_PNTR 0x0034 |
45 | #define EIP197_HIA_xDR_PROC_PNTR 0x38 | 81 | #define EIP197_HIA_xDR_PROC_PNTR 0x0038 |
46 | #define EIP197_HIA_xDR_STAT 0x3c | 82 | #define EIP197_HIA_xDR_STAT 0x003c |
47 | 83 | ||
48 | /* register offsets */ | 84 | /* register offsets */ |
49 | #define EIP197_HIA_DFE_CFG 0x8c000 | 85 | #define EIP197_HIA_DFE_CFG 0x0000 |
50 | #define EIP197_HIA_DFE_THR_CTRL 0x8c040 | 86 | #define EIP197_HIA_DFE_THR_CTRL 0x0000 |
51 | #define EIP197_HIA_DFE_THR_STAT 0x8c044 | 87 | #define EIP197_HIA_DFE_THR_STAT 0x0004 |
52 | #define EIP197_HIA_DSE_CFG 0x8d000 | 88 | #define EIP197_HIA_DSE_CFG 0x0000 |
53 | #define EIP197_HIA_DSE_THR_CTRL 0x8d040 | 89 | #define EIP197_HIA_DSE_THR_CTRL 0x0000 |
54 | #define EIP197_HIA_DSE_THR_STAT 0x8d044 | 90 | #define EIP197_HIA_DSE_THR_STAT 0x0004 |
55 | #define EIP197_HIA_RA_PE_CTRL 0x90010 | 91 | #define EIP197_HIA_RA_PE_CTRL 0x0010 |
56 | #define EIP197_HIA_RA_PE_STAT 0x90014 | 92 | #define EIP197_HIA_RA_PE_STAT 0x0014 |
57 | #define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000) | 93 | #define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000) |
58 | #define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0x9e808 - EIP197_HIA_AIC_R_OFF(r)) | 94 | #define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r)) |
59 | #define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r)) | 95 | #define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r)) |
60 | #define EIP197_HIA_AIC_R_ACK(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r)) | 96 | #define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r)) |
61 | #define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0x9e814 - EIP197_HIA_AIC_R_OFF(r)) | 97 | #define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r)) |
62 | #define EIP197_HIA_AIC_G_ENABLE_CTRL 0x9f808 | 98 | #define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808 |
63 | #define EIP197_HIA_AIC_G_ENABLED_STAT 0x9f810 | 99 | #define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810 |
64 | #define EIP197_HIA_AIC_G_ACK 0x9f810 | 100 | #define EIP197_HIA_AIC_G_ACK 0xf810 |
65 | #define EIP197_HIA_MST_CTRL 0x9fff4 | 101 | #define EIP197_HIA_MST_CTRL 0xfff4 |
66 | #define EIP197_HIA_OPTIONS 0x9fff8 | 102 | #define EIP197_HIA_OPTIONS 0xfff8 |
67 | #define EIP197_HIA_VERSION 0x9fffc | 103 | #define EIP197_HIA_VERSION 0xfffc |
68 | #define EIP197_PE_IN_DBUF_THRES 0xa0000 | 104 | #define EIP197_PE_IN_DBUF_THRES 0x0000 |
69 | #define EIP197_PE_IN_TBUF_THRES 0xa0100 | 105 | #define EIP197_PE_IN_TBUF_THRES 0x0100 |
70 | #define EIP197_PE_ICE_SCRATCH_RAM 0xa0800 | 106 | #define EIP197_PE_ICE_SCRATCH_RAM 0x0800 |
71 | #define EIP197_PE_ICE_PUE_CTRL 0xa0c80 | 107 | #define EIP197_PE_ICE_PUE_CTRL 0x0c80 |
72 | #define EIP197_PE_ICE_SCRATCH_CTRL 0xa0d04 | 108 | #define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04 |
73 | #define EIP197_PE_ICE_FPP_CTRL 0xa0d80 | 109 | #define EIP197_PE_ICE_FPP_CTRL 0x0d80 |
74 | #define EIP197_PE_ICE_RAM_CTRL 0xa0ff0 | 110 | #define EIP197_PE_ICE_RAM_CTRL 0x0ff0 |
75 | #define EIP197_PE_EIP96_FUNCTION_EN 0xa1004 | 111 | #define EIP197_PE_EIP96_FUNCTION_EN 0x1004 |
76 | #define EIP197_PE_EIP96_CONTEXT_CTRL 0xa1008 | 112 | #define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008 |
77 | #define EIP197_PE_EIP96_CONTEXT_STAT 0xa100c | 113 | #define EIP197_PE_EIP96_CONTEXT_STAT 0x100c |
78 | #define EIP197_PE_OUT_DBUF_THRES 0xa1c00 | 114 | #define EIP197_PE_OUT_DBUF_THRES 0x1c00 |
79 | #define EIP197_PE_OUT_TBUF_THRES 0xa1d00 | 115 | #define EIP197_PE_OUT_TBUF_THRES 0x1d00 |
116 | #define EIP197_MST_CTRL 0xfff4 | ||
117 | |||
118 | /* EIP197-specific registers, no indirection */ | ||
80 | #define EIP197_CLASSIFICATION_RAMS 0xe0000 | 119 | #define EIP197_CLASSIFICATION_RAMS 0xe0000 |
81 | #define EIP197_TRC_CTRL 0xf0800 | 120 | #define EIP197_TRC_CTRL 0xf0800 |
82 | #define EIP197_TRC_LASTRES 0xf0804 | 121 | #define EIP197_TRC_LASTRES 0xf0804 |
@@ -90,7 +129,6 @@ | |||
90 | #define EIP197_TRC_ECCDATASTAT 0xf083c | 129 | #define EIP197_TRC_ECCDATASTAT 0xf083c |
91 | #define EIP197_TRC_ECCDATA 0xf0840 | 130 | #define EIP197_TRC_ECCDATA 0xf0840 |
92 | #define EIP197_CS_RAM_CTRL 0xf7ff0 | 131 | #define EIP197_CS_RAM_CTRL 0xf7ff0 |
93 | #define EIP197_MST_CTRL 0xffff4 | ||
94 | 132 | ||
95 | /* EIP197_HIA_xDR_DESC_SIZE */ | 133 | /* EIP197_HIA_xDR_DESC_SIZE */ |
96 | #define EIP197_xDR_DESC_MODE_64BIT BIT(31) | 134 | #define EIP197_xDR_DESC_MODE_64BIT BIT(31) |
@@ -117,6 +155,8 @@ | |||
117 | #define EIP197_xDR_PREP_CLR_COUNT BIT(31) | 155 | #define EIP197_xDR_PREP_CLR_COUNT BIT(31) |
118 | 156 | ||
119 | /* EIP197_HIA_xDR_PROC_COUNT */ | 157 | /* EIP197_HIA_xDR_PROC_COUNT */ |
158 | #define EIP197_xDR_PROC_xD_PKT_OFFSET 24 | ||
159 | #define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0) | ||
120 | #define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2) | 160 | #define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2) |
121 | #define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24) | 161 | #define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24) |
122 | #define EIP197_xDR_PROC_CLR_COUNT BIT(31) | 162 | #define EIP197_xDR_PROC_CLR_COUNT BIT(31) |
@@ -463,12 +503,33 @@ struct safexcel_work_data { | |||
463 | int ring; | 503 | int ring; |
464 | }; | 504 | }; |
465 | 505 | ||
506 | enum safexcel_eip_version { | ||
507 | EIP97, | ||
508 | EIP197, | ||
509 | }; | ||
510 | |||
511 | struct safexcel_register_offsets { | ||
512 | u32 hia_aic; | ||
513 | u32 hia_aic_g; | ||
514 | u32 hia_aic_r; | ||
515 | u32 hia_aic_xdr; | ||
516 | u32 hia_dfe; | ||
517 | u32 hia_dfe_thr; | ||
518 | u32 hia_dse; | ||
519 | u32 hia_dse_thr; | ||
520 | u32 hia_gen_cfg; | ||
521 | u32 pe; | ||
522 | }; | ||
523 | |||
466 | struct safexcel_crypto_priv { | 524 | struct safexcel_crypto_priv { |
467 | void __iomem *base; | 525 | void __iomem *base; |
468 | struct device *dev; | 526 | struct device *dev; |
469 | struct clk *clk; | 527 | struct clk *clk; |
470 | struct safexcel_config config; | 528 | struct safexcel_config config; |
471 | 529 | ||
530 | enum safexcel_eip_version version; | ||
531 | struct safexcel_register_offsets offsets; | ||
532 | |||
472 | /* context DMA pool */ | 533 | /* context DMA pool */ |
473 | struct dma_pool *context_pool; | 534 | struct dma_pool *context_pool; |
474 | 535 | ||
@@ -489,7 +550,20 @@ struct safexcel_crypto_priv { | |||
489 | /* queue */ | 550 | /* queue */ |
490 | struct crypto_queue queue; | 551 | struct crypto_queue queue; |
491 | spinlock_t queue_lock; | 552 | spinlock_t queue_lock; |
492 | bool need_dequeue; | 553 | |
554 | /* Number of requests in the engine that needs the threshold | ||
555 | * interrupt to be set up. | ||
556 | */ | ||
557 | int requests_left; | ||
558 | |||
559 | /* The ring is currently handling at least one request */ | ||
560 | bool busy; | ||
561 | |||
562 | /* Store for current requests when bailing out of the dequeueing | ||
563 | * function when no enough resources are available. | ||
564 | */ | ||
565 | struct crypto_async_request *req; | ||
566 | struct crypto_async_request *backlog; | ||
493 | } ring[EIP197_MAX_RINGS]; | 567 | } ring[EIP197_MAX_RINGS]; |
494 | }; | 568 | }; |
495 | 569 | ||
@@ -539,7 +613,6 @@ void safexcel_free_context(struct safexcel_crypto_priv *priv, | |||
539 | struct crypto_async_request *req, | 613 | struct crypto_async_request *req, |
540 | int result_sz); | 614 | int result_sz); |
541 | int safexcel_invalidate_cache(struct crypto_async_request *async, | 615 | int safexcel_invalidate_cache(struct crypto_async_request *async, |
542 | struct safexcel_context *ctx, | ||
543 | struct safexcel_crypto_priv *priv, | 616 | struct safexcel_crypto_priv *priv, |
544 | dma_addr_t ctxr_dma, int ring, | 617 | dma_addr_t ctxr_dma, int ring, |
545 | struct safexcel_request *request); | 618 | struct safexcel_request *request); |
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index fcc0a606d748..63a8768ed2ae 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c | |||
@@ -27,7 +27,6 @@ struct safexcel_cipher_ctx { | |||
27 | struct safexcel_context base; | 27 | struct safexcel_context base; |
28 | struct safexcel_crypto_priv *priv; | 28 | struct safexcel_crypto_priv *priv; |
29 | 29 | ||
30 | enum safexcel_cipher_direction direction; | ||
31 | u32 mode; | 30 | u32 mode; |
32 | 31 | ||
33 | __le32 key[8]; | 32 | __le32 key[8]; |
@@ -35,6 +34,7 @@ struct safexcel_cipher_ctx { | |||
35 | }; | 34 | }; |
36 | 35 | ||
37 | struct safexcel_cipher_req { | 36 | struct safexcel_cipher_req { |
37 | enum safexcel_cipher_direction direction; | ||
38 | bool needs_inv; | 38 | bool needs_inv; |
39 | }; | 39 | }; |
40 | 40 | ||
@@ -69,6 +69,7 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, | |||
69 | { | 69 | { |
70 | struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); | 70 | struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); |
71 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | 71 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
72 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
72 | struct crypto_aes_ctx aes; | 73 | struct crypto_aes_ctx aes; |
73 | int ret, i; | 74 | int ret, i; |
74 | 75 | ||
@@ -78,10 +79,12 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, | |||
78 | return ret; | 79 | return ret; |
79 | } | 80 | } |
80 | 81 | ||
81 | for (i = 0; i < len / sizeof(u32); i++) { | 82 | if (priv->version == EIP197 && ctx->base.ctxr_dma) { |
82 | if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { | 83 | for (i = 0; i < len / sizeof(u32); i++) { |
83 | ctx->base.needs_inv = true; | 84 | if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { |
84 | break; | 85 | ctx->base.needs_inv = true; |
86 | break; | ||
87 | } | ||
85 | } | 88 | } |
86 | } | 89 | } |
87 | 90 | ||
@@ -95,12 +98,15 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, | |||
95 | } | 98 | } |
96 | 99 | ||
97 | static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, | 100 | static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, |
101 | struct crypto_async_request *async, | ||
98 | struct safexcel_command_desc *cdesc) | 102 | struct safexcel_command_desc *cdesc) |
99 | { | 103 | { |
100 | struct safexcel_crypto_priv *priv = ctx->priv; | 104 | struct safexcel_crypto_priv *priv = ctx->priv; |
105 | struct skcipher_request *req = skcipher_request_cast(async); | ||
106 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); | ||
101 | int ctrl_size; | 107 | int ctrl_size; |
102 | 108 | ||
103 | if (ctx->direction == SAFEXCEL_ENCRYPT) | 109 | if (sreq->direction == SAFEXCEL_ENCRYPT) |
104 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; | 110 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; |
105 | else | 111 | else |
106 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN; | 112 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN; |
@@ -243,7 +249,7 @@ static int safexcel_aes_send(struct crypto_async_request *async, | |||
243 | n_cdesc++; | 249 | n_cdesc++; |
244 | 250 | ||
245 | if (n_cdesc == 1) { | 251 | if (n_cdesc == 1) { |
246 | safexcel_context_control(ctx, cdesc); | 252 | safexcel_context_control(ctx, async, cdesc); |
247 | safexcel_cipher_token(ctx, async, cdesc, req->cryptlen); | 253 | safexcel_cipher_token(ctx, async, cdesc, req->cryptlen); |
248 | } | 254 | } |
249 | 255 | ||
@@ -353,8 +359,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | |||
353 | if (enq_ret != -EINPROGRESS) | 359 | if (enq_ret != -EINPROGRESS) |
354 | *ret = enq_ret; | 360 | *ret = enq_ret; |
355 | 361 | ||
356 | if (!priv->ring[ring].need_dequeue) | 362 | queue_work(priv->ring[ring].workqueue, |
357 | safexcel_dequeue(priv, ring); | 363 | &priv->ring[ring].work_data.work); |
358 | 364 | ||
359 | *should_complete = false; | 365 | *should_complete = false; |
360 | 366 | ||
@@ -390,7 +396,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, | |||
390 | struct safexcel_crypto_priv *priv = ctx->priv; | 396 | struct safexcel_crypto_priv *priv = ctx->priv; |
391 | int ret; | 397 | int ret; |
392 | 398 | ||
393 | ret = safexcel_invalidate_cache(async, &ctx->base, priv, | 399 | ret = safexcel_invalidate_cache(async, priv, |
394 | ctx->base.ctxr_dma, ring, request); | 400 | ctx->base.ctxr_dma, ring, request); |
395 | if (unlikely(ret)) | 401 | if (unlikely(ret)) |
396 | return ret; | 402 | return ret; |
@@ -406,9 +412,13 @@ static int safexcel_send(struct crypto_async_request *async, | |||
406 | int *commands, int *results) | 412 | int *commands, int *results) |
407 | { | 413 | { |
408 | struct skcipher_request *req = skcipher_request_cast(async); | 414 | struct skcipher_request *req = skcipher_request_cast(async); |
415 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
409 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); | 416 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); |
417 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
410 | int ret; | 418 | int ret; |
411 | 419 | ||
420 | BUG_ON(priv->version == EIP97 && sreq->needs_inv); | ||
421 | |||
412 | if (sreq->needs_inv) | 422 | if (sreq->needs_inv) |
413 | ret = safexcel_cipher_send_inv(async, ring, request, | 423 | ret = safexcel_cipher_send_inv(async, ring, request, |
414 | commands, results); | 424 | commands, results); |
@@ -443,8 +453,8 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) | |||
443 | crypto_enqueue_request(&priv->ring[ring].queue, &req->base); | 453 | crypto_enqueue_request(&priv->ring[ring].queue, &req->base); |
444 | spin_unlock_bh(&priv->ring[ring].queue_lock); | 454 | spin_unlock_bh(&priv->ring[ring].queue_lock); |
445 | 455 | ||
446 | if (!priv->ring[ring].need_dequeue) | 456 | queue_work(priv->ring[ring].workqueue, |
447 | safexcel_dequeue(priv, ring); | 457 | &priv->ring[ring].work_data.work); |
448 | 458 | ||
449 | wait_for_completion_interruptible(&result.completion); | 459 | wait_for_completion_interruptible(&result.completion); |
450 | 460 | ||
@@ -467,11 +477,11 @@ static int safexcel_aes(struct skcipher_request *req, | |||
467 | int ret, ring; | 477 | int ret, ring; |
468 | 478 | ||
469 | sreq->needs_inv = false; | 479 | sreq->needs_inv = false; |
470 | ctx->direction = dir; | 480 | sreq->direction = dir; |
471 | ctx->mode = mode; | 481 | ctx->mode = mode; |
472 | 482 | ||
473 | if (ctx->base.ctxr) { | 483 | if (ctx->base.ctxr) { |
474 | if (ctx->base.needs_inv) { | 484 | if (priv->version == EIP197 && ctx->base.needs_inv) { |
475 | sreq->needs_inv = true; | 485 | sreq->needs_inv = true; |
476 | ctx->base.needs_inv = false; | 486 | ctx->base.needs_inv = false; |
477 | } | 487 | } |
@@ -490,8 +500,8 @@ static int safexcel_aes(struct skcipher_request *req, | |||
490 | ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base); | 500 | ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base); |
491 | spin_unlock_bh(&priv->ring[ring].queue_lock); | 501 | spin_unlock_bh(&priv->ring[ring].queue_lock); |
492 | 502 | ||
493 | if (!priv->ring[ring].need_dequeue) | 503 | queue_work(priv->ring[ring].workqueue, |
494 | safexcel_dequeue(priv, ring); | 504 | &priv->ring[ring].work_data.work); |
495 | 505 | ||
496 | return ret; | 506 | return ret; |
497 | } | 507 | } |
@@ -539,9 +549,14 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) | |||
539 | 549 | ||
540 | memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32)); | 550 | memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32)); |
541 | 551 | ||
542 | ret = safexcel_cipher_exit_inv(tfm); | 552 | if (priv->version == EIP197) { |
543 | if (ret) | 553 | ret = safexcel_cipher_exit_inv(tfm); |
544 | dev_warn(priv->dev, "cipher: invalidation error %d\n", ret); | 554 | if (ret) |
555 | dev_warn(priv->dev, "cipher: invalidation error %d\n", ret); | ||
556 | } else { | ||
557 | dma_pool_free(priv->context_pool, ctx->base.ctxr, | ||
558 | ctx->base.ctxr_dma); | ||
559 | } | ||
545 | } | 560 | } |
546 | 561 | ||
547 | struct safexcel_alg_template safexcel_alg_ecb_aes = { | 562 | struct safexcel_alg_template safexcel_alg_ecb_aes = { |
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 0c5a5820b06e..122a2a58e98f 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
15 | #include <linux/dmapool.h> | 15 | #include <linux/dmapool.h> |
16 | 16 | ||
17 | |||
18 | #include "safexcel.h" | 17 | #include "safexcel.h" |
19 | 18 | ||
20 | struct safexcel_ahash_ctx { | 19 | struct safexcel_ahash_ctx { |
@@ -34,6 +33,8 @@ struct safexcel_ahash_req { | |||
34 | bool hmac; | 33 | bool hmac; |
35 | bool needs_inv; | 34 | bool needs_inv; |
36 | 35 | ||
36 | int nents; | ||
37 | |||
37 | u8 state_sz; /* expected sate size, only set once */ | 38 | u8 state_sz; /* expected sate size, only set once */ |
38 | u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); | 39 | u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); |
39 | 40 | ||
@@ -152,8 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin | |||
152 | memcpy(areq->result, sreq->state, | 153 | memcpy(areq->result, sreq->state, |
153 | crypto_ahash_digestsize(ahash)); | 154 | crypto_ahash_digestsize(ahash)); |
154 | 155 | ||
155 | dma_unmap_sg(priv->dev, areq->src, | 156 | if (sreq->nents) { |
156 | sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); | 157 | dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); |
158 | sreq->nents = 0; | ||
159 | } | ||
157 | 160 | ||
158 | safexcel_free_context(priv, async, sreq->state_sz); | 161 | safexcel_free_context(priv, async, sreq->state_sz); |
159 | 162 | ||
@@ -178,7 +181,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
178 | struct safexcel_command_desc *cdesc, *first_cdesc = NULL; | 181 | struct safexcel_command_desc *cdesc, *first_cdesc = NULL; |
179 | struct safexcel_result_desc *rdesc; | 182 | struct safexcel_result_desc *rdesc; |
180 | struct scatterlist *sg; | 183 | struct scatterlist *sg; |
181 | int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; | 184 | int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; |
182 | 185 | ||
183 | queued = len = req->len - req->processed; | 186 | queued = len = req->len - req->processed; |
184 | if (queued < crypto_ahash_blocksize(ahash)) | 187 | if (queued < crypto_ahash_blocksize(ahash)) |
@@ -186,17 +189,31 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
186 | else | 189 | else |
187 | cache_len = queued - areq->nbytes; | 190 | cache_len = queued - areq->nbytes; |
188 | 191 | ||
189 | /* | 192 | if (!req->last_req) { |
190 | * If this is not the last request and the queued data does not fit | 193 | /* If this is not the last request and the queued data does not |
191 | * into full blocks, cache it for the next send() call. | 194 | * fit into full blocks, cache it for the next send() call. |
192 | */ | 195 | */ |
193 | extra = queued & (crypto_ahash_blocksize(ahash) - 1); | 196 | extra = queued & (crypto_ahash_blocksize(ahash) - 1); |
194 | if (!req->last_req && extra) { | 197 | if (!extra) |
195 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | 198 | /* If this is not the last request and the queued data |
196 | req->cache_next, extra, areq->nbytes - extra); | 199 | * is a multiple of a block, cache the last one for now. |
197 | 200 | */ | |
198 | queued -= extra; | 201 | extra = queued - crypto_ahash_blocksize(ahash); |
199 | len -= extra; | 202 | |
203 | if (extra) { | ||
204 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | ||
205 | req->cache_next, extra, | ||
206 | areq->nbytes - extra); | ||
207 | |||
208 | queued -= extra; | ||
209 | len -= extra; | ||
210 | |||
211 | if (!queued) { | ||
212 | *commands = 0; | ||
213 | *results = 0; | ||
214 | return 0; | ||
215 | } | ||
216 | } | ||
200 | } | 217 | } |
201 | 218 | ||
202 | spin_lock_bh(&priv->ring[ring].egress_lock); | 219 | spin_lock_bh(&priv->ring[ring].egress_lock); |
@@ -234,15 +251,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
234 | } | 251 | } |
235 | 252 | ||
236 | /* Now handle the current ahash request buffer(s) */ | 253 | /* Now handle the current ahash request buffer(s) */ |
237 | nents = dma_map_sg(priv->dev, areq->src, | 254 | req->nents = dma_map_sg(priv->dev, areq->src, |
238 | sg_nents_for_len(areq->src, areq->nbytes), | 255 | sg_nents_for_len(areq->src, areq->nbytes), |
239 | DMA_TO_DEVICE); | 256 | DMA_TO_DEVICE); |
240 | if (!nents) { | 257 | if (!req->nents) { |
241 | ret = -ENOMEM; | 258 | ret = -ENOMEM; |
242 | goto cdesc_rollback; | 259 | goto cdesc_rollback; |
243 | } | 260 | } |
244 | 261 | ||
245 | for_each_sg(areq->src, sg, nents, i) { | 262 | for_each_sg(areq->src, sg, req->nents, i) { |
246 | int sglen = sg_dma_len(sg); | 263 | int sglen = sg_dma_len(sg); |
247 | 264 | ||
248 | /* Do not overflow the request */ | 265 | /* Do not overflow the request */ |
@@ -382,8 +399,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | |||
382 | if (enq_ret != -EINPROGRESS) | 399 | if (enq_ret != -EINPROGRESS) |
383 | *ret = enq_ret; | 400 | *ret = enq_ret; |
384 | 401 | ||
385 | if (!priv->ring[ring].need_dequeue) | 402 | queue_work(priv->ring[ring].workqueue, |
386 | safexcel_dequeue(priv, ring); | 403 | &priv->ring[ring].work_data.work); |
387 | 404 | ||
388 | *should_complete = false; | 405 | *should_complete = false; |
389 | 406 | ||
@@ -398,6 +415,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, | |||
398 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 415 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
399 | int err; | 416 | int err; |
400 | 417 | ||
418 | BUG_ON(priv->version == EIP97 && req->needs_inv); | ||
419 | |||
401 | if (req->needs_inv) { | 420 | if (req->needs_inv) { |
402 | req->needs_inv = false; | 421 | req->needs_inv = false; |
403 | err = safexcel_handle_inv_result(priv, ring, async, | 422 | err = safexcel_handle_inv_result(priv, ring, async, |
@@ -418,7 +437,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, | |||
418 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | 437 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); |
419 | int ret; | 438 | int ret; |
420 | 439 | ||
421 | ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, | 440 | ret = safexcel_invalidate_cache(async, ctx->priv, |
422 | ctx->base.ctxr_dma, ring, request); | 441 | ctx->base.ctxr_dma, ring, request); |
423 | if (unlikely(ret)) | 442 | if (unlikely(ret)) |
424 | return ret; | 443 | return ret; |
@@ -471,8 +490,8 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) | |||
471 | crypto_enqueue_request(&priv->ring[ring].queue, &req->base); | 490 | crypto_enqueue_request(&priv->ring[ring].queue, &req->base); |
472 | spin_unlock_bh(&priv->ring[ring].queue_lock); | 491 | spin_unlock_bh(&priv->ring[ring].queue_lock); |
473 | 492 | ||
474 | if (!priv->ring[ring].need_dequeue) | 493 | queue_work(priv->ring[ring].workqueue, |
475 | safexcel_dequeue(priv, ring); | 494 | &priv->ring[ring].work_data.work); |
476 | 495 | ||
477 | wait_for_completion_interruptible(&result.completion); | 496 | wait_for_completion_interruptible(&result.completion); |
478 | 497 | ||
@@ -485,13 +504,23 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) | |||
485 | return 0; | 504 | return 0; |
486 | } | 505 | } |
487 | 506 | ||
507 | /* safexcel_ahash_cache: cache data until at least one request can be sent to | ||
508 | * the engine, aka. when there is at least 1 block size in the pipe. | ||
509 | */ | ||
488 | static int safexcel_ahash_cache(struct ahash_request *areq) | 510 | static int safexcel_ahash_cache(struct ahash_request *areq) |
489 | { | 511 | { |
490 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 512 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
491 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | 513 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); |
492 | int queued, cache_len; | 514 | int queued, cache_len; |
493 | 515 | ||
516 | /* cache_len: everyting accepted by the driver but not sent yet, | ||
517 | * tot sz handled by update() - last req sz - tot sz handled by send() | ||
518 | */ | ||
494 | cache_len = req->len - areq->nbytes - req->processed; | 519 | cache_len = req->len - areq->nbytes - req->processed; |
520 | /* queued: everything accepted by the driver which will be handled by | ||
521 | * the next send() calls. | ||
522 | * tot sz handled by update() - tot sz handled by send() | ||
523 | */ | ||
495 | queued = req->len - req->processed; | 524 | queued = req->len - req->processed; |
496 | 525 | ||
497 | /* | 526 | /* |
@@ -505,7 +534,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq) | |||
505 | return areq->nbytes; | 534 | return areq->nbytes; |
506 | } | 535 | } |
507 | 536 | ||
508 | /* We could'nt cache all the data */ | 537 | /* We couldn't cache all the data */ |
509 | return -E2BIG; | 538 | return -E2BIG; |
510 | } | 539 | } |
511 | 540 | ||
@@ -518,10 +547,17 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) | |||
518 | 547 | ||
519 | req->needs_inv = false; | 548 | req->needs_inv = false; |
520 | 549 | ||
521 | if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) | ||
522 | ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); | ||
523 | |||
524 | if (ctx->base.ctxr) { | 550 | if (ctx->base.ctxr) { |
551 | if (priv->version == EIP197 && | ||
552 | !ctx->base.needs_inv && req->processed && | ||
553 | ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) | ||
554 | /* We're still setting needs_inv here, even though it is | ||
555 | * cleared right away, because the needs_inv flag can be | ||
556 | * set in other functions and we want to keep the same | ||
557 | * logic. | ||
558 | */ | ||
559 | ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); | ||
560 | |||
525 | if (ctx->base.needs_inv) { | 561 | if (ctx->base.needs_inv) { |
526 | ctx->base.needs_inv = false; | 562 | ctx->base.needs_inv = false; |
527 | req->needs_inv = true; | 563 | req->needs_inv = true; |
@@ -541,8 +577,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) | |||
541 | ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base); | 577 | ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base); |
542 | spin_unlock_bh(&priv->ring[ring].queue_lock); | 578 | spin_unlock_bh(&priv->ring[ring].queue_lock); |
543 | 579 | ||
544 | if (!priv->ring[ring].need_dequeue) | 580 | queue_work(priv->ring[ring].workqueue, |
545 | safexcel_dequeue(priv, ring); | 581 | &priv->ring[ring].work_data.work); |
546 | 582 | ||
547 | return ret; | 583 | return ret; |
548 | } | 584 | } |
@@ -625,7 +661,6 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out) | |||
625 | export->processed = req->processed; | 661 | export->processed = req->processed; |
626 | 662 | ||
627 | memcpy(export->state, req->state, req->state_sz); | 663 | memcpy(export->state, req->state, req->state_sz); |
628 | memset(export->cache, 0, crypto_ahash_blocksize(ahash)); | ||
629 | memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); | 664 | memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); |
630 | 665 | ||
631 | return 0; | 666 | return 0; |
@@ -707,9 +742,14 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) | |||
707 | if (!ctx->base.ctxr) | 742 | if (!ctx->base.ctxr) |
708 | return; | 743 | return; |
709 | 744 | ||
710 | ret = safexcel_ahash_exit_inv(tfm); | 745 | if (priv->version == EIP197) { |
711 | if (ret) | 746 | ret = safexcel_ahash_exit_inv(tfm); |
712 | dev_warn(priv->dev, "hash: invalidation error %d\n", ret); | 747 | if (ret) |
748 | dev_warn(priv->dev, "hash: invalidation error %d\n", ret); | ||
749 | } else { | ||
750 | dma_pool_free(priv->context_pool, ctx->base.ctxr, | ||
751 | ctx->base.ctxr_dma); | ||
752 | } | ||
713 | } | 753 | } |
714 | 754 | ||
715 | struct safexcel_alg_template safexcel_alg_sha1 = { | 755 | struct safexcel_alg_template safexcel_alg_sha1 = { |
@@ -848,7 +888,7 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq, | |||
848 | req->last_req = true; | 888 | req->last_req = true; |
849 | 889 | ||
850 | ret = crypto_ahash_update(areq); | 890 | ret = crypto_ahash_update(areq); |
851 | if (ret && ret != -EINPROGRESS) | 891 | if (ret && ret != -EINPROGRESS && ret != -EBUSY) |
852 | return ret; | 892 | return ret; |
853 | 893 | ||
854 | wait_for_completion_interruptible(&result.completion); | 894 | wait_for_completion_interruptible(&result.completion); |
@@ -913,6 +953,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
913 | unsigned int keylen) | 953 | unsigned int keylen) |
914 | { | 954 | { |
915 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | 955 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); |
956 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
916 | struct safexcel_ahash_export_state istate, ostate; | 957 | struct safexcel_ahash_export_state istate, ostate; |
917 | int ret, i; | 958 | int ret, i; |
918 | 959 | ||
@@ -920,11 +961,13 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
920 | if (ret) | 961 | if (ret) |
921 | return ret; | 962 | return ret; |
922 | 963 | ||
923 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { | 964 | if (priv->version == EIP197 && ctx->base.ctxr) { |
924 | if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || | 965 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { |
925 | ctx->opad[i] != le32_to_cpu(ostate.state[i])) { | 966 | if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || |
926 | ctx->base.needs_inv = true; | 967 | ctx->opad[i] != le32_to_cpu(ostate.state[i])) { |
927 | break; | 968 | ctx->base.needs_inv = true; |
969 | break; | ||
970 | } | ||
928 | } | 971 | } |
929 | } | 972 | } |
930 | 973 | ||
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 8705b28eb02c..717a26607bdb 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -260,12 +260,11 @@ static int setup_crypt_desc(void) | |||
260 | { | 260 | { |
261 | struct device *dev = &pdev->dev; | 261 | struct device *dev = &pdev->dev; |
262 | BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); | 262 | BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); |
263 | crypt_virt = dma_alloc_coherent(dev, | 263 | crypt_virt = dma_zalloc_coherent(dev, |
264 | NPE_QLEN * sizeof(struct crypt_ctl), | 264 | NPE_QLEN * sizeof(struct crypt_ctl), |
265 | &crypt_phys, GFP_ATOMIC); | 265 | &crypt_phys, GFP_ATOMIC); |
266 | if (!crypt_virt) | 266 | if (!crypt_virt) |
267 | return -ENOMEM; | 267 | return -ENOMEM; |
268 | memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl)); | ||
269 | return 0; | 268 | return 0; |
270 | } | 269 | } |
271 | 270 | ||
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 3a0c40081ffb..aca2373fa1de 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c | |||
@@ -15,6 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/genalloc.h> | 19 | #include <linux/genalloc.h> |
19 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
20 | #include <linux/io.h> | 21 | #include <linux/io.h> |
@@ -410,8 +411,11 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx) | |||
410 | if (IS_ERR(engine->sram)) | 411 | if (IS_ERR(engine->sram)) |
411 | return PTR_ERR(engine->sram); | 412 | return PTR_ERR(engine->sram); |
412 | 413 | ||
413 | engine->sram_dma = phys_to_dma(cesa->dev, | 414 | engine->sram_dma = dma_map_resource(cesa->dev, res->start, |
414 | (phys_addr_t)res->start); | 415 | cesa->sram_size, |
416 | DMA_BIDIRECTIONAL, 0); | ||
417 | if (dma_mapping_error(cesa->dev, engine->sram_dma)) | ||
418 | return -ENOMEM; | ||
415 | 419 | ||
416 | return 0; | 420 | return 0; |
417 | } | 421 | } |
@@ -421,11 +425,12 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx) | |||
421 | struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); | 425 | struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); |
422 | struct mv_cesa_engine *engine = &cesa->engines[idx]; | 426 | struct mv_cesa_engine *engine = &cesa->engines[idx]; |
423 | 427 | ||
424 | if (!engine->pool) | 428 | if (engine->pool) |
425 | return; | 429 | gen_pool_free(engine->pool, (unsigned long)engine->sram, |
426 | 430 | cesa->sram_size); | |
427 | gen_pool_free(engine->pool, (unsigned long)engine->sram, | 431 | else |
428 | cesa->sram_size); | 432 | dma_unmap_resource(cesa->dev, engine->sram_dma, |
433 | cesa->sram_size, DMA_BIDIRECTIONAL, 0); | ||
429 | } | 434 | } |
430 | 435 | ||
431 | static int mv_cesa_probe(struct platform_device *pdev) | 436 | static int mv_cesa_probe(struct platform_device *pdev) |
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index f2246a5abcf6..1e87637c412d 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c | |||
@@ -743,8 +743,8 @@ static int nx842_open_percpu_txwins(void) | |||
743 | } | 743 | } |
744 | 744 | ||
745 | if (!per_cpu(cpu_txwin, i)) { | 745 | if (!per_cpu(cpu_txwin, i)) { |
746 | /* shoudn't happen, Each chip will have NX engine */ | 746 | /* shouldn't happen, Each chip will have NX engine */ |
747 | pr_err("NX engine is not availavle for CPU %d\n", i); | 747 | pr_err("NX engine is not available for CPU %d\n", i); |
748 | return -EINVAL; | 748 | return -EINVAL; |
749 | } | 749 | } |
750 | } | 750 | } |
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 5a6dc53b2b9d..4ef52c9d72fc 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c | |||
@@ -1618,7 +1618,7 @@ MODULE_DEVICE_TABLE(of, spacc_of_id_table); | |||
1618 | 1618 | ||
1619 | static int spacc_probe(struct platform_device *pdev) | 1619 | static int spacc_probe(struct platform_device *pdev) |
1620 | { | 1620 | { |
1621 | int i, err, ret = -EINVAL; | 1621 | int i, err, ret; |
1622 | struct resource *mem, *irq; | 1622 | struct resource *mem, *irq; |
1623 | struct device_node *np = pdev->dev.of_node; | 1623 | struct device_node *np = pdev->dev.of_node; |
1624 | struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), | 1624 | struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), |
@@ -1679,22 +1679,18 @@ static int spacc_probe(struct platform_device *pdev) | |||
1679 | engine->clk = clk_get(&pdev->dev, "ref"); | 1679 | engine->clk = clk_get(&pdev->dev, "ref"); |
1680 | if (IS_ERR(engine->clk)) { | 1680 | if (IS_ERR(engine->clk)) { |
1681 | dev_info(&pdev->dev, "clk unavailable\n"); | 1681 | dev_info(&pdev->dev, "clk unavailable\n"); |
1682 | device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); | ||
1683 | return PTR_ERR(engine->clk); | 1682 | return PTR_ERR(engine->clk); |
1684 | } | 1683 | } |
1685 | 1684 | ||
1686 | if (clk_prepare_enable(engine->clk)) { | 1685 | if (clk_prepare_enable(engine->clk)) { |
1687 | dev_info(&pdev->dev, "unable to prepare/enable clk\n"); | 1686 | dev_info(&pdev->dev, "unable to prepare/enable clk\n"); |
1688 | clk_put(engine->clk); | 1687 | ret = -EIO; |
1689 | return -EIO; | 1688 | goto err_clk_put; |
1690 | } | 1689 | } |
1691 | 1690 | ||
1692 | err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); | 1691 | ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); |
1693 | if (err) { | 1692 | if (ret) |
1694 | clk_disable_unprepare(engine->clk); | 1693 | goto err_clk_disable; |
1695 | clk_put(engine->clk); | ||
1696 | return err; | ||
1697 | } | ||
1698 | 1694 | ||
1699 | 1695 | ||
1700 | /* | 1696 | /* |
@@ -1725,6 +1721,7 @@ static int spacc_probe(struct platform_device *pdev) | |||
1725 | 1721 | ||
1726 | platform_set_drvdata(pdev, engine); | 1722 | platform_set_drvdata(pdev, engine); |
1727 | 1723 | ||
1724 | ret = -EINVAL; | ||
1728 | INIT_LIST_HEAD(&engine->registered_algs); | 1725 | INIT_LIST_HEAD(&engine->registered_algs); |
1729 | for (i = 0; i < engine->num_algs; ++i) { | 1726 | for (i = 0; i < engine->num_algs; ++i) { |
1730 | engine->algs[i].engine = engine; | 1727 | engine->algs[i].engine = engine; |
@@ -1759,6 +1756,16 @@ static int spacc_probe(struct platform_device *pdev) | |||
1759 | engine->aeads[i].alg.base.cra_name); | 1756 | engine->aeads[i].alg.base.cra_name); |
1760 | } | 1757 | } |
1761 | 1758 | ||
1759 | if (!ret) | ||
1760 | return 0; | ||
1761 | |||
1762 | del_timer_sync(&engine->packet_timeout); | ||
1763 | device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); | ||
1764 | err_clk_disable: | ||
1765 | clk_disable_unprepare(engine->clk); | ||
1766 | err_clk_put: | ||
1767 | clk_put(engine->clk); | ||
1768 | |||
1762 | return ret; | 1769 | return ret; |
1763 | } | 1770 | } |
1764 | 1771 | ||
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 8c4fd255a601..ff149e176f64 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c | |||
@@ -117,19 +117,19 @@ void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, | |||
117 | 117 | ||
118 | #define CSR_RETRY_TIMES 500 | 118 | #define CSR_RETRY_TIMES 500 |
119 | static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle, | 119 | static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle, |
120 | unsigned char ae, unsigned int csr, | 120 | unsigned char ae, unsigned int csr) |
121 | unsigned int *value) | ||
122 | { | 121 | { |
123 | unsigned int iterations = CSR_RETRY_TIMES; | 122 | unsigned int iterations = CSR_RETRY_TIMES; |
123 | int value; | ||
124 | 124 | ||
125 | do { | 125 | do { |
126 | *value = GET_AE_CSR(handle, ae, csr); | 126 | value = GET_AE_CSR(handle, ae, csr); |
127 | if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) | 127 | if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) |
128 | return 0; | 128 | return value; |
129 | } while (iterations--); | 129 | } while (iterations--); |
130 | 130 | ||
131 | pr_err("QAT: Read CSR timeout\n"); | 131 | pr_err("QAT: Read CSR timeout\n"); |
132 | return -EFAULT; | 132 | return 0; |
133 | } | 133 | } |
134 | 134 | ||
135 | static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle, | 135 | static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle, |
@@ -154,9 +154,9 @@ static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle, | |||
154 | { | 154 | { |
155 | unsigned int cur_ctx; | 155 | unsigned int cur_ctx; |
156 | 156 | ||
157 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | 157 | cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER); |
158 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | 158 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); |
159 | qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events); | 159 | *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT); |
160 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | 160 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); |
161 | } | 161 | } |
162 | 162 | ||
@@ -169,13 +169,13 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle, | |||
169 | int times = MAX_RETRY_TIMES; | 169 | int times = MAX_RETRY_TIMES; |
170 | int elapsed_cycles = 0; | 170 | int elapsed_cycles = 0; |
171 | 171 | ||
172 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt); | 172 | base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT); |
173 | base_cnt &= 0xffff; | 173 | base_cnt &= 0xffff; |
174 | while ((int)cycles > elapsed_cycles && times--) { | 174 | while ((int)cycles > elapsed_cycles && times--) { |
175 | if (chk_inactive) | 175 | if (chk_inactive) |
176 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr); | 176 | csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); |
177 | 177 | ||
178 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt); | 178 | cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT); |
179 | cur_cnt &= 0xffff; | 179 | cur_cnt &= 0xffff; |
180 | elapsed_cycles = cur_cnt - base_cnt; | 180 | elapsed_cycles = cur_cnt - base_cnt; |
181 | 181 | ||
@@ -207,7 +207,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle, | |||
207 | } | 207 | } |
208 | 208 | ||
209 | /* Sets the accelaration engine context mode to either four or eight */ | 209 | /* Sets the accelaration engine context mode to either four or eight */ |
210 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); | 210 | csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
211 | csr = IGNORE_W1C_MASK & csr; | 211 | csr = IGNORE_W1C_MASK & csr; |
212 | new_csr = (mode == 4) ? | 212 | new_csr = (mode == 4) ? |
213 | SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) : | 213 | SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) : |
@@ -221,7 +221,7 @@ int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle, | |||
221 | { | 221 | { |
222 | unsigned int csr, new_csr; | 222 | unsigned int csr, new_csr; |
223 | 223 | ||
224 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); | 224 | csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
225 | csr &= IGNORE_W1C_MASK; | 225 | csr &= IGNORE_W1C_MASK; |
226 | 226 | ||
227 | new_csr = (mode) ? | 227 | new_csr = (mode) ? |
@@ -240,7 +240,7 @@ int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, | |||
240 | { | 240 | { |
241 | unsigned int csr, new_csr; | 241 | unsigned int csr, new_csr; |
242 | 242 | ||
243 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); | 243 | csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
244 | csr &= IGNORE_W1C_MASK; | 244 | csr &= IGNORE_W1C_MASK; |
245 | switch (lm_type) { | 245 | switch (lm_type) { |
246 | case ICP_LMEM0: | 246 | case ICP_LMEM0: |
@@ -328,7 +328,7 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle, | |||
328 | { | 328 | { |
329 | unsigned int ctx, cur_ctx; | 329 | unsigned int ctx, cur_ctx; |
330 | 330 | ||
331 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | 331 | cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER); |
332 | 332 | ||
333 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | 333 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { |
334 | if (!(ctx_mask & (1 << ctx))) | 334 | if (!(ctx_mask & (1 << ctx))) |
@@ -340,16 +340,18 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle, | |||
340 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | 340 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); |
341 | } | 341 | } |
342 | 342 | ||
343 | static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle, | 343 | static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle, |
344 | unsigned char ae, unsigned char ctx, | 344 | unsigned char ae, unsigned char ctx, |
345 | unsigned int ae_csr, unsigned int *csr_val) | 345 | unsigned int ae_csr) |
346 | { | 346 | { |
347 | unsigned int cur_ctx; | 347 | unsigned int cur_ctx, csr_val; |
348 | 348 | ||
349 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | 349 | cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER); |
350 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); | 350 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); |
351 | qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val); | 351 | csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr); |
352 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); | 352 | qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); |
353 | |||
354 | return csr_val; | ||
353 | } | 355 | } |
354 | 356 | ||
355 | static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle, | 357 | static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle, |
@@ -358,7 +360,7 @@ static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle, | |||
358 | { | 360 | { |
359 | unsigned int ctx, cur_ctx; | 361 | unsigned int ctx, cur_ctx; |
360 | 362 | ||
361 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | 363 | cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER); |
362 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | 364 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { |
363 | if (!(ctx_mask & (1 << ctx))) | 365 | if (!(ctx_mask & (1 << ctx))) |
364 | continue; | 366 | continue; |
@@ -374,7 +376,7 @@ static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle, | |||
374 | { | 376 | { |
375 | unsigned int ctx, cur_ctx; | 377 | unsigned int ctx, cur_ctx; |
376 | 378 | ||
377 | qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); | 379 | cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER); |
378 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { | 380 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { |
379 | if (!(ctx_mask & (1 << ctx))) | 381 | if (!(ctx_mask & (1 << ctx))) |
380 | continue; | 382 | continue; |
@@ -392,13 +394,11 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) | |||
392 | int times = MAX_RETRY_TIMES; | 394 | int times = MAX_RETRY_TIMES; |
393 | 395 | ||
394 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | 396 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
395 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, | 397 | base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT); |
396 | (unsigned int *)&base_cnt); | ||
397 | base_cnt &= 0xffff; | 398 | base_cnt &= 0xffff; |
398 | 399 | ||
399 | do { | 400 | do { |
400 | qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, | 401 | cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT); |
401 | (unsigned int *)&cur_cnt); | ||
402 | cur_cnt &= 0xffff; | 402 | cur_cnt &= 0xffff; |
403 | } while (times-- && (cur_cnt == base_cnt)); | 403 | } while (times-- && (cur_cnt == base_cnt)); |
404 | 404 | ||
@@ -416,8 +416,8 @@ int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, | |||
416 | { | 416 | { |
417 | unsigned int enable = 0, active = 0; | 417 | unsigned int enable = 0, active = 0; |
418 | 418 | ||
419 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable); | 419 | enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
420 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active); | 420 | active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); |
421 | if ((enable & (0xff << CE_ENABLE_BITPOS)) || | 421 | if ((enable & (0xff << CE_ENABLE_BITPOS)) || |
422 | (active & (1 << ACS_ABO_BITPOS))) | 422 | (active & (1 << ACS_ABO_BITPOS))) |
423 | return 1; | 423 | return 1; |
@@ -540,7 +540,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle, | |||
540 | { | 540 | { |
541 | unsigned int ctx; | 541 | unsigned int ctx; |
542 | 542 | ||
543 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); | 543 | ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
544 | ctx &= IGNORE_W1C_MASK & | 544 | ctx &= IGNORE_W1C_MASK & |
545 | (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS)); | 545 | (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS)); |
546 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); | 546 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); |
@@ -583,7 +583,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle, | |||
583 | unsigned int ustore_addr; | 583 | unsigned int ustore_addr; |
584 | unsigned int i; | 584 | unsigned int i; |
585 | 585 | ||
586 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | 586 | ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS); |
587 | uaddr |= UA_ECS; | 587 | uaddr |= UA_ECS; |
588 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | 588 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); |
589 | for (i = 0; i < words_num; i++) { | 589 | for (i = 0; i < words_num; i++) { |
@@ -604,7 +604,7 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle, | |||
604 | { | 604 | { |
605 | unsigned int ctx; | 605 | unsigned int ctx; |
606 | 606 | ||
607 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); | 607 | ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
608 | ctx &= IGNORE_W1C_MASK; | 608 | ctx &= IGNORE_W1C_MASK; |
609 | ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF; | 609 | ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF; |
610 | ctx |= (ctx_mask << CE_ENABLE_BITPOS); | 610 | ctx |= (ctx_mask << CE_ENABLE_BITPOS); |
@@ -636,10 +636,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | |||
636 | int ret = 0; | 636 | int ret = 0; |
637 | 637 | ||
638 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | 638 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
639 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); | 639 | csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL); |
640 | csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); | 640 | csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); |
641 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); | 641 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); |
642 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val); | 642 | csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
643 | csr_val &= IGNORE_W1C_MASK; | 643 | csr_val &= IGNORE_W1C_MASK; |
644 | csr_val |= CE_NN_MODE; | 644 | csr_val |= CE_NN_MODE; |
645 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); | 645 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); |
@@ -648,7 +648,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | |||
648 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, | 648 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, |
649 | handle->hal_handle->upc_mask & | 649 | handle->hal_handle->upc_mask & |
650 | INIT_PC_VALUE); | 650 | INIT_PC_VALUE); |
651 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); | 651 | savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); |
652 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0); | 652 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0); |
653 | qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY); | 653 | qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY); |
654 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, | 654 | qat_hal_wr_indr_csr(handle, ae, ctx_mask, |
@@ -760,7 +760,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) | |||
760 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | 760 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
761 | unsigned int csr_val = 0; | 761 | unsigned int csr_val = 0; |
762 | 762 | ||
763 | qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); | 763 | csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE); |
764 | csr_val |= 0x1; | 764 | csr_val |= 0x1; |
765 | qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); | 765 | qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); |
766 | } | 766 | } |
@@ -826,16 +826,16 @@ static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle, | |||
826 | unsigned int i, uwrd_lo, uwrd_hi; | 826 | unsigned int i, uwrd_lo, uwrd_hi; |
827 | unsigned int ustore_addr, misc_control; | 827 | unsigned int ustore_addr, misc_control; |
828 | 828 | ||
829 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control); | 829 | misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL); |
830 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, | 830 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, |
831 | misc_control & 0xfffffffb); | 831 | misc_control & 0xfffffffb); |
832 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | 832 | ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS); |
833 | uaddr |= UA_ECS; | 833 | uaddr |= UA_ECS; |
834 | for (i = 0; i < words_num; i++) { | 834 | for (i = 0; i < words_num; i++) { |
835 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | 835 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); |
836 | uaddr++; | 836 | uaddr++; |
837 | qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo); | 837 | uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER); |
838 | qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi); | 838 | uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER); |
839 | uword[i] = uwrd_hi; | 839 | uword[i] = uwrd_hi; |
840 | uword[i] = (uword[i] << 0x20) | uwrd_lo; | 840 | uword[i] = (uword[i] << 0x20) | uwrd_lo; |
841 | } | 841 | } |
@@ -849,7 +849,7 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, | |||
849 | { | 849 | { |
850 | unsigned int i, ustore_addr; | 850 | unsigned int i, ustore_addr; |
851 | 851 | ||
852 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | 852 | ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS); |
853 | uaddr |= UA_ECS; | 853 | uaddr |= UA_ECS; |
854 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | 854 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); |
855 | for (i = 0; i < words_num; i++) { | 855 | for (i = 0; i < words_num; i++) { |
@@ -890,26 +890,27 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, | |||
890 | return -EINVAL; | 890 | return -EINVAL; |
891 | } | 891 | } |
892 | /* save current context */ | 892 | /* save current context */ |
893 | qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0); | 893 | ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT); |
894 | qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1); | 894 | ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT); |
895 | qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, | 895 | ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx, |
896 | &ind_lm_addr_byte0); | 896 | INDIRECT_LM_ADDR_0_BYTE_INDEX); |
897 | qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, | 897 | ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx, |
898 | &ind_lm_addr_byte1); | 898 | INDIRECT_LM_ADDR_1_BYTE_INDEX); |
899 | if (inst_num <= MAX_EXEC_INST) | 899 | if (inst_num <= MAX_EXEC_INST) |
900 | qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords); | 900 | qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords); |
901 | qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events); | 901 | qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events); |
902 | qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc); | 902 | savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT); |
903 | savpc = (savpc & handle->hal_handle->upc_mask) >> 0; | 903 | savpc = (savpc & handle->hal_handle->upc_mask) >> 0; |
904 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | 904 | ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
905 | ctx_enables &= IGNORE_W1C_MASK; | 905 | ctx_enables &= IGNORE_W1C_MASK; |
906 | qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc); | 906 | savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE); |
907 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); | 907 | savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); |
908 | qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl); | 908 | ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL); |
909 | qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, | 909 | ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx, |
910 | &ind_cnt_sig); | 910 | FUTURE_COUNT_SIGNAL_INDIRECT); |
911 | qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig); | 911 | ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx, |
912 | qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig); | 912 | CTX_SIG_EVENTS_INDIRECT); |
913 | act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE); | ||
913 | /* execute micro codes */ | 914 | /* execute micro codes */ |
914 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | 915 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); |
915 | qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst); | 916 | qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst); |
@@ -927,8 +928,8 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, | |||
927 | if (endpc) { | 928 | if (endpc) { |
928 | unsigned int ctx_status; | 929 | unsigned int ctx_status; |
929 | 930 | ||
930 | qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, | 931 | ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx, |
931 | &ctx_status); | 932 | CTX_STS_INDIRECT); |
932 | *endpc = ctx_status & handle->hal_handle->upc_mask; | 933 | *endpc = ctx_status & handle->hal_handle->upc_mask; |
933 | } | 934 | } |
934 | /* retore to saved context */ | 935 | /* retore to saved context */ |
@@ -938,7 +939,7 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle, | |||
938 | qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events); | 939 | qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events); |
939 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, | 940 | qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, |
940 | handle->hal_handle->upc_mask & savpc); | 941 | handle->hal_handle->upc_mask & savpc); |
941 | qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); | 942 | csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL); |
942 | newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); | 943 | newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); |
943 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); | 944 | qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); |
944 | qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc); | 945 | qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc); |
@@ -986,16 +987,16 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, | |||
986 | insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); | 987 | insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); |
987 | break; | 988 | break; |
988 | } | 989 | } |
989 | qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); | 990 | savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); |
990 | qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl); | 991 | ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL); |
991 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | 992 | ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
992 | ctx_enables &= IGNORE_W1C_MASK; | 993 | ctx_enables &= IGNORE_W1C_MASK; |
993 | if (ctx != (savctx & ACS_ACNO)) | 994 | if (ctx != (savctx & ACS_ACNO)) |
994 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, | 995 | qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, |
995 | ctx & ACS_ACNO); | 996 | ctx & ACS_ACNO); |
996 | qat_hal_get_uwords(handle, ae, 0, 1, &savuword); | 997 | qat_hal_get_uwords(handle, ae, 0, 1, &savuword); |
997 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); | 998 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); |
998 | qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); | 999 | ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS); |
999 | uaddr = UA_ECS; | 1000 | uaddr = UA_ECS; |
1000 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); | 1001 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); |
1001 | insts = qat_hal_set_uword_ecc(insts); | 1002 | insts = qat_hal_set_uword_ecc(insts); |
@@ -1011,7 +1012,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle, | |||
1011 | * the instruction should have been executed | 1012 | * the instruction should have been executed |
1012 | * prior to clearing the ECS in putUwords | 1013 | * prior to clearing the ECS in putUwords |
1013 | */ | 1014 | */ |
1014 | qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data); | 1015 | *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT); |
1015 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); | 1016 | qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); |
1016 | qat_hal_wr_uwords(handle, ae, 0, 1, &savuword); | 1017 | qat_hal_wr_uwords(handle, ae, 0, 1, &savuword); |
1017 | if (ctx != (savctx & ACS_ACNO)) | 1018 | if (ctx != (savctx & ACS_ACNO)) |
@@ -1188,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle, | |||
1188 | unsigned short mask; | 1189 | unsigned short mask; |
1189 | unsigned short dr_offset = 0x10; | 1190 | unsigned short dr_offset = 0x10; |
1190 | 1191 | ||
1191 | status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | 1192 | status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
1192 | if (CE_INUSE_CONTEXTS & ctx_enables) { | 1193 | if (CE_INUSE_CONTEXTS & ctx_enables) { |
1193 | if (ctx & 0x1) { | 1194 | if (ctx & 0x1) { |
1194 | pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); | 1195 | pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); |
@@ -1238,7 +1239,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, | |||
1238 | const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1; | 1239 | const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1; |
1239 | const unsigned short gprnum = 0, dly = num_inst * 0x5; | 1240 | const unsigned short gprnum = 0, dly = num_inst * 0x5; |
1240 | 1241 | ||
1241 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | 1242 | ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
1242 | if (CE_INUSE_CONTEXTS & ctx_enables) { | 1243 | if (CE_INUSE_CONTEXTS & ctx_enables) { |
1243 | if (ctx & 0x1) { | 1244 | if (ctx & 0x1) { |
1244 | pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx); | 1245 | pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx); |
@@ -1282,7 +1283,7 @@ static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle, | |||
1282 | unsigned int ctx_enables; | 1283 | unsigned int ctx_enables; |
1283 | int stat = 0; | 1284 | int stat = 0; |
1284 | 1285 | ||
1285 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | 1286 | ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
1286 | ctx_enables &= IGNORE_W1C_MASK; | 1287 | ctx_enables &= IGNORE_W1C_MASK; |
1287 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE); | 1288 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE); |
1288 | 1289 | ||
@@ -1299,7 +1300,7 @@ static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle | |||
1299 | { | 1300 | { |
1300 | unsigned int ctx_enables; | 1301 | unsigned int ctx_enables; |
1301 | 1302 | ||
1302 | qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); | 1303 | ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); |
1303 | if (ctx_enables & CE_INUSE_CONTEXTS) { | 1304 | if (ctx_enables & CE_INUSE_CONTEXTS) { |
1304 | /* 4-ctx mode */ | 1305 | /* 4-ctx mode */ |
1305 | *relreg = absreg_num & 0x1F; | 1306 | *relreg = absreg_num & 0x1F; |
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 142c6020cec7..188f44b7eb27 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
@@ -1,17 +1,13 @@ | |||
1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | * Cryptographic API. | 2 | // |
3 | * | 3 | // Cryptographic API. |
4 | * Support for Samsung S5PV210 and Exynos HW acceleration. | 4 | // |
5 | * | 5 | // Support for Samsung S5PV210 and Exynos HW acceleration. |
6 | * Copyright (C) 2011 NetUP Inc. All rights reserved. | 6 | // |
7 | * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved. | 7 | // Copyright (C) 2011 NetUP Inc. All rights reserved. |
8 | * | 8 | // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved. |
9 | * This program is free software; you can redistribute it and/or modify | 9 | // |
10 | * it under the terms of the GNU General Public License version 2 as published | 10 | // Hash part based on omap-sham.c driver. |
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * Hash part based on omap-sham.c driver. | ||
14 | */ | ||
15 | 11 | ||
16 | #include <linux/clk.h> | 12 | #include <linux/clk.h> |
17 | #include <linux/crypto.h> | 13 | #include <linux/crypto.h> |
@@ -1461,7 +1457,7 @@ static void s5p_hash_tasklet_cb(unsigned long data) | |||
1461 | &dd->hash_flags)) { | 1457 | &dd->hash_flags)) { |
1462 | /* hash or semi-hash ready */ | 1458 | /* hash or semi-hash ready */ |
1463 | clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags); | 1459 | clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags); |
1464 | goto finish; | 1460 | goto finish; |
1465 | } | 1461 | } |
1466 | } | 1462 | } |
1467 | 1463 | ||
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig index 602332e02729..63aa78c0b12b 100644 --- a/drivers/crypto/stm32/Kconfig +++ b/drivers/crypto/stm32/Kconfig | |||
@@ -1,4 +1,4 @@ | |||
1 | config CRC_DEV_STM32 | 1 | config CRYPTO_DEV_STM32_CRC |
2 | tristate "Support for STM32 crc accelerators" | 2 | tristate "Support for STM32 crc accelerators" |
3 | depends on ARCH_STM32 | 3 | depends on ARCH_STM32 |
4 | select CRYPTO_HASH | 4 | select CRYPTO_HASH |
@@ -6,7 +6,7 @@ config CRC_DEV_STM32 | |||
6 | This enables support for the CRC32 hw accelerator which can be found | 6 | This enables support for the CRC32 hw accelerator which can be found |
7 | on STMicroelectronics STM32 SOC. | 7 | on STMicroelectronics STM32 SOC. |
8 | 8 | ||
9 | config HASH_DEV_STM32 | 9 | config CRYPTO_DEV_STM32_HASH |
10 | tristate "Support for STM32 hash accelerators" | 10 | tristate "Support for STM32 hash accelerators" |
11 | depends on ARCH_STM32 | 11 | depends on ARCH_STM32 |
12 | depends on HAS_DMA | 12 | depends on HAS_DMA |
@@ -18,3 +18,12 @@ config HASH_DEV_STM32 | |||
18 | help | 18 | help |
19 | This enables support for the HASH hw accelerator which can be found | 19 | This enables support for the HASH hw accelerator which can be found |
20 | on STMicroelectronics STM32 SOC. | 20 | on STMicroelectronics STM32 SOC. |
21 | |||
22 | config CRYPTO_DEV_STM32_CRYP | ||
23 | tristate "Support for STM32 cryp accelerators" | ||
24 | depends on ARCH_STM32 | ||
25 | select CRYPTO_HASH | ||
26 | select CRYPTO_ENGINE | ||
27 | help | ||
28 | This enables support for the CRYP (AES/DES/TDES) hw accelerator which | ||
29 | can be found on STMicroelectronics STM32 SOC. | ||
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile index 73cd56cad0cc..53d1bb94b221 100644 --- a/drivers/crypto/stm32/Makefile +++ b/drivers/crypto/stm32/Makefile | |||
@@ -1,2 +1,3 @@ | |||
1 | obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o | 1 | obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o |
2 | obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file | 2 | obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o |
3 | obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o | ||
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c new file mode 100644 index 000000000000..4a06a7a665ee --- /dev/null +++ b/drivers/crypto/stm32/stm32-cryp.c | |||
@@ -0,0 +1,1170 @@ | |||
1 | /* | ||
2 | * Copyright (C) STMicroelectronics SA 2017 | ||
3 | * Author: Fabien Dessenne <fabien.dessenne@st.com> | ||
4 | * License terms: GNU General Public License (GPL), version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/clk.h> | ||
8 | #include <linux/delay.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/iopoll.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/of_device.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/reset.h> | ||
15 | |||
16 | #include <crypto/aes.h> | ||
17 | #include <crypto/des.h> | ||
18 | #include <crypto/engine.h> | ||
19 | #include <crypto/scatterwalk.h> | ||
20 | |||
21 | #define DRIVER_NAME "stm32-cryp" | ||
22 | |||
23 | /* Bit [0] encrypt / decrypt */ | ||
24 | #define FLG_ENCRYPT BIT(0) | ||
25 | /* Bit [8..1] algo & operation mode */ | ||
26 | #define FLG_AES BIT(1) | ||
27 | #define FLG_DES BIT(2) | ||
28 | #define FLG_TDES BIT(3) | ||
29 | #define FLG_ECB BIT(4) | ||
30 | #define FLG_CBC BIT(5) | ||
31 | #define FLG_CTR BIT(6) | ||
32 | /* Mode mask = bits [15..0] */ | ||
33 | #define FLG_MODE_MASK GENMASK(15, 0) | ||
34 | |||
35 | /* Registers */ | ||
36 | #define CRYP_CR 0x00000000 | ||
37 | #define CRYP_SR 0x00000004 | ||
38 | #define CRYP_DIN 0x00000008 | ||
39 | #define CRYP_DOUT 0x0000000C | ||
40 | #define CRYP_DMACR 0x00000010 | ||
41 | #define CRYP_IMSCR 0x00000014 | ||
42 | #define CRYP_RISR 0x00000018 | ||
43 | #define CRYP_MISR 0x0000001C | ||
44 | #define CRYP_K0LR 0x00000020 | ||
45 | #define CRYP_K0RR 0x00000024 | ||
46 | #define CRYP_K1LR 0x00000028 | ||
47 | #define CRYP_K1RR 0x0000002C | ||
48 | #define CRYP_K2LR 0x00000030 | ||
49 | #define CRYP_K2RR 0x00000034 | ||
50 | #define CRYP_K3LR 0x00000038 | ||
51 | #define CRYP_K3RR 0x0000003C | ||
52 | #define CRYP_IV0LR 0x00000040 | ||
53 | #define CRYP_IV0RR 0x00000044 | ||
54 | #define CRYP_IV1LR 0x00000048 | ||
55 | #define CRYP_IV1RR 0x0000004C | ||
56 | |||
57 | /* Registers values */ | ||
58 | #define CR_DEC_NOT_ENC 0x00000004 | ||
59 | #define CR_TDES_ECB 0x00000000 | ||
60 | #define CR_TDES_CBC 0x00000008 | ||
61 | #define CR_DES_ECB 0x00000010 | ||
62 | #define CR_DES_CBC 0x00000018 | ||
63 | #define CR_AES_ECB 0x00000020 | ||
64 | #define CR_AES_CBC 0x00000028 | ||
65 | #define CR_AES_CTR 0x00000030 | ||
66 | #define CR_AES_KP 0x00000038 | ||
67 | #define CR_AES_UNKNOWN 0xFFFFFFFF | ||
68 | #define CR_ALGO_MASK 0x00080038 | ||
69 | #define CR_DATA32 0x00000000 | ||
70 | #define CR_DATA16 0x00000040 | ||
71 | #define CR_DATA8 0x00000080 | ||
72 | #define CR_DATA1 0x000000C0 | ||
73 | #define CR_KEY128 0x00000000 | ||
74 | #define CR_KEY192 0x00000100 | ||
75 | #define CR_KEY256 0x00000200 | ||
76 | #define CR_FFLUSH 0x00004000 | ||
77 | #define CR_CRYPEN 0x00008000 | ||
78 | |||
79 | #define SR_BUSY 0x00000010 | ||
80 | #define SR_OFNE 0x00000004 | ||
81 | |||
82 | #define IMSCR_IN BIT(0) | ||
83 | #define IMSCR_OUT BIT(1) | ||
84 | |||
85 | #define MISR_IN BIT(0) | ||
86 | #define MISR_OUT BIT(1) | ||
87 | |||
88 | /* Misc */ | ||
89 | #define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) | ||
90 | #define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset) | ||
91 | #define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset) | ||
92 | |||
93 | struct stm32_cryp_ctx { | ||
94 | struct stm32_cryp *cryp; | ||
95 | int keylen; | ||
96 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | ||
97 | unsigned long flags; | ||
98 | }; | ||
99 | |||
100 | struct stm32_cryp_reqctx { | ||
101 | unsigned long mode; | ||
102 | }; | ||
103 | |||
104 | struct stm32_cryp { | ||
105 | struct list_head list; | ||
106 | struct device *dev; | ||
107 | void __iomem *regs; | ||
108 | struct clk *clk; | ||
109 | unsigned long flags; | ||
110 | u32 irq_status; | ||
111 | struct stm32_cryp_ctx *ctx; | ||
112 | |||
113 | struct crypto_engine *engine; | ||
114 | |||
115 | struct mutex lock; /* protects req */ | ||
116 | struct ablkcipher_request *req; | ||
117 | |||
118 | size_t hw_blocksize; | ||
119 | |||
120 | size_t total_in; | ||
121 | size_t total_in_save; | ||
122 | size_t total_out; | ||
123 | size_t total_out_save; | ||
124 | |||
125 | struct scatterlist *in_sg; | ||
126 | struct scatterlist *out_sg; | ||
127 | struct scatterlist *out_sg_save; | ||
128 | |||
129 | struct scatterlist in_sgl; | ||
130 | struct scatterlist out_sgl; | ||
131 | bool sgs_copied; | ||
132 | |||
133 | int in_sg_len; | ||
134 | int out_sg_len; | ||
135 | |||
136 | struct scatter_walk in_walk; | ||
137 | struct scatter_walk out_walk; | ||
138 | |||
139 | u32 last_ctr[4]; | ||
140 | }; | ||
141 | |||
142 | struct stm32_cryp_list { | ||
143 | struct list_head dev_list; | ||
144 | spinlock_t lock; /* protect dev_list */ | ||
145 | }; | ||
146 | |||
147 | static struct stm32_cryp_list cryp_list = { | ||
148 | .dev_list = LIST_HEAD_INIT(cryp_list.dev_list), | ||
149 | .lock = __SPIN_LOCK_UNLOCKED(cryp_list.lock), | ||
150 | }; | ||
151 | |||
152 | static inline bool is_aes(struct stm32_cryp *cryp) | ||
153 | { | ||
154 | return cryp->flags & FLG_AES; | ||
155 | } | ||
156 | |||
157 | static inline bool is_des(struct stm32_cryp *cryp) | ||
158 | { | ||
159 | return cryp->flags & FLG_DES; | ||
160 | } | ||
161 | |||
162 | static inline bool is_tdes(struct stm32_cryp *cryp) | ||
163 | { | ||
164 | return cryp->flags & FLG_TDES; | ||
165 | } | ||
166 | |||
167 | static inline bool is_ecb(struct stm32_cryp *cryp) | ||
168 | { | ||
169 | return cryp->flags & FLG_ECB; | ||
170 | } | ||
171 | |||
172 | static inline bool is_cbc(struct stm32_cryp *cryp) | ||
173 | { | ||
174 | return cryp->flags & FLG_CBC; | ||
175 | } | ||
176 | |||
177 | static inline bool is_ctr(struct stm32_cryp *cryp) | ||
178 | { | ||
179 | return cryp->flags & FLG_CTR; | ||
180 | } | ||
181 | |||
182 | static inline bool is_encrypt(struct stm32_cryp *cryp) | ||
183 | { | ||
184 | return cryp->flags & FLG_ENCRYPT; | ||
185 | } | ||
186 | |||
187 | static inline bool is_decrypt(struct stm32_cryp *cryp) | ||
188 | { | ||
189 | return !is_encrypt(cryp); | ||
190 | } | ||
191 | |||
192 | static inline u32 stm32_cryp_read(struct stm32_cryp *cryp, u32 ofst) | ||
193 | { | ||
194 | return readl_relaxed(cryp->regs + ofst); | ||
195 | } | ||
196 | |||
197 | static inline void stm32_cryp_write(struct stm32_cryp *cryp, u32 ofst, u32 val) | ||
198 | { | ||
199 | writel_relaxed(val, cryp->regs + ofst); | ||
200 | } | ||
201 | |||
202 | static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp) | ||
203 | { | ||
204 | u32 status; | ||
205 | |||
206 | return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status, | ||
207 | !(status & SR_BUSY), 10, 100000); | ||
208 | } | ||
209 | |||
210 | static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) | ||
211 | { | ||
212 | struct stm32_cryp *tmp, *cryp = NULL; | ||
213 | |||
214 | spin_lock_bh(&cryp_list.lock); | ||
215 | if (!ctx->cryp) { | ||
216 | list_for_each_entry(tmp, &cryp_list.dev_list, list) { | ||
217 | cryp = tmp; | ||
218 | break; | ||
219 | } | ||
220 | ctx->cryp = cryp; | ||
221 | } else { | ||
222 | cryp = ctx->cryp; | ||
223 | } | ||
224 | |||
225 | spin_unlock_bh(&cryp_list.lock); | ||
226 | |||
227 | return cryp; | ||
228 | } | ||
229 | |||
230 | static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total, | ||
231 | size_t align) | ||
232 | { | ||
233 | int len = 0; | ||
234 | |||
235 | if (!total) | ||
236 | return 0; | ||
237 | |||
238 | if (!IS_ALIGNED(total, align)) | ||
239 | return -EINVAL; | ||
240 | |||
241 | while (sg) { | ||
242 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) | ||
243 | return -EINVAL; | ||
244 | |||
245 | if (!IS_ALIGNED(sg->length, align)) | ||
246 | return -EINVAL; | ||
247 | |||
248 | len += sg->length; | ||
249 | sg = sg_next(sg); | ||
250 | } | ||
251 | |||
252 | if (len != total) | ||
253 | return -EINVAL; | ||
254 | |||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp) | ||
259 | { | ||
260 | int ret; | ||
261 | |||
262 | ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in, | ||
263 | cryp->hw_blocksize); | ||
264 | if (ret) | ||
265 | return ret; | ||
266 | |||
267 | ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out, | ||
268 | cryp->hw_blocksize); | ||
269 | |||
270 | return ret; | ||
271 | } | ||
272 | |||
273 | static void sg_copy_buf(void *buf, struct scatterlist *sg, | ||
274 | unsigned int start, unsigned int nbytes, int out) | ||
275 | { | ||
276 | struct scatter_walk walk; | ||
277 | |||
278 | if (!nbytes) | ||
279 | return; | ||
280 | |||
281 | scatterwalk_start(&walk, sg); | ||
282 | scatterwalk_advance(&walk, start); | ||
283 | scatterwalk_copychunks(buf, &walk, nbytes, out); | ||
284 | scatterwalk_done(&walk, out, 0); | ||
285 | } | ||
286 | |||
287 | static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp) | ||
288 | { | ||
289 | void *buf_in, *buf_out; | ||
290 | int pages, total_in, total_out; | ||
291 | |||
292 | if (!stm32_cryp_check_io_aligned(cryp)) { | ||
293 | cryp->sgs_copied = 0; | ||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | total_in = ALIGN(cryp->total_in, cryp->hw_blocksize); | ||
298 | pages = total_in ? get_order(total_in) : 1; | ||
299 | buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
300 | |||
301 | total_out = ALIGN(cryp->total_out, cryp->hw_blocksize); | ||
302 | pages = total_out ? get_order(total_out) : 1; | ||
303 | buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
304 | |||
305 | if (!buf_in || !buf_out) { | ||
306 | dev_err(cryp->dev, "Can't allocate pages when unaligned\n"); | ||
307 | cryp->sgs_copied = 0; | ||
308 | return -EFAULT; | ||
309 | } | ||
310 | |||
311 | sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0); | ||
312 | |||
313 | sg_init_one(&cryp->in_sgl, buf_in, total_in); | ||
314 | cryp->in_sg = &cryp->in_sgl; | ||
315 | cryp->in_sg_len = 1; | ||
316 | |||
317 | sg_init_one(&cryp->out_sgl, buf_out, total_out); | ||
318 | cryp->out_sg_save = cryp->out_sg; | ||
319 | cryp->out_sg = &cryp->out_sgl; | ||
320 | cryp->out_sg_len = 1; | ||
321 | |||
322 | cryp->sgs_copied = 1; | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv) | ||
328 | { | ||
329 | if (!iv) | ||
330 | return; | ||
331 | |||
332 | stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++)); | ||
333 | stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++)); | ||
334 | |||
335 | if (is_aes(cryp)) { | ||
336 | stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++)); | ||
337 | stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++)); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | static void stm32_cryp_hw_write_key(struct stm32_cryp *c) | ||
342 | { | ||
343 | unsigned int i; | ||
344 | int r_id; | ||
345 | |||
346 | if (is_des(c)) { | ||
347 | stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0])); | ||
348 | stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1])); | ||
349 | } else { | ||
350 | r_id = CRYP_K3RR; | ||
351 | for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4) | ||
352 | stm32_cryp_write(c, r_id, | ||
353 | cpu_to_be32(c->ctx->key[i - 1])); | ||
354 | } | ||
355 | } | ||
356 | |||
357 | static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp) | ||
358 | { | ||
359 | if (is_aes(cryp) && is_ecb(cryp)) | ||
360 | return CR_AES_ECB; | ||
361 | |||
362 | if (is_aes(cryp) && is_cbc(cryp)) | ||
363 | return CR_AES_CBC; | ||
364 | |||
365 | if (is_aes(cryp) && is_ctr(cryp)) | ||
366 | return CR_AES_CTR; | ||
367 | |||
368 | if (is_des(cryp) && is_ecb(cryp)) | ||
369 | return CR_DES_ECB; | ||
370 | |||
371 | if (is_des(cryp) && is_cbc(cryp)) | ||
372 | return CR_DES_CBC; | ||
373 | |||
374 | if (is_tdes(cryp) && is_ecb(cryp)) | ||
375 | return CR_TDES_ECB; | ||
376 | |||
377 | if (is_tdes(cryp) && is_cbc(cryp)) | ||
378 | return CR_TDES_CBC; | ||
379 | |||
380 | dev_err(cryp->dev, "Unknown mode\n"); | ||
381 | return CR_AES_UNKNOWN; | ||
382 | } | ||
383 | |||
384 | static int stm32_cryp_hw_init(struct stm32_cryp *cryp) | ||
385 | { | ||
386 | int ret; | ||
387 | u32 cfg, hw_mode; | ||
388 | |||
389 | /* Disable interrupt */ | ||
390 | stm32_cryp_write(cryp, CRYP_IMSCR, 0); | ||
391 | |||
392 | /* Set key */ | ||
393 | stm32_cryp_hw_write_key(cryp); | ||
394 | |||
395 | /* Set configuration */ | ||
396 | cfg = CR_DATA8 | CR_FFLUSH; | ||
397 | |||
398 | switch (cryp->ctx->keylen) { | ||
399 | case AES_KEYSIZE_128: | ||
400 | cfg |= CR_KEY128; | ||
401 | break; | ||
402 | |||
403 | case AES_KEYSIZE_192: | ||
404 | cfg |= CR_KEY192; | ||
405 | break; | ||
406 | |||
407 | default: | ||
408 | case AES_KEYSIZE_256: | ||
409 | cfg |= CR_KEY256; | ||
410 | break; | ||
411 | } | ||
412 | |||
413 | hw_mode = stm32_cryp_get_hw_mode(cryp); | ||
414 | if (hw_mode == CR_AES_UNKNOWN) | ||
415 | return -EINVAL; | ||
416 | |||
417 | /* AES ECB/CBC decrypt: run key preparation first */ | ||
418 | if (is_decrypt(cryp) && | ||
419 | ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) { | ||
420 | stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN); | ||
421 | |||
422 | /* Wait for end of processing */ | ||
423 | ret = stm32_cryp_wait_busy(cryp); | ||
424 | if (ret) { | ||
425 | dev_err(cryp->dev, "Timeout (key preparation)\n"); | ||
426 | return ret; | ||
427 | } | ||
428 | } | ||
429 | |||
430 | cfg |= hw_mode; | ||
431 | |||
432 | if (is_decrypt(cryp)) | ||
433 | cfg |= CR_DEC_NOT_ENC; | ||
434 | |||
435 | /* Apply config and flush (valid when CRYPEN = 0) */ | ||
436 | stm32_cryp_write(cryp, CRYP_CR, cfg); | ||
437 | |||
438 | switch (hw_mode) { | ||
439 | case CR_DES_CBC: | ||
440 | case CR_TDES_CBC: | ||
441 | case CR_AES_CBC: | ||
442 | case CR_AES_CTR: | ||
443 | stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info); | ||
444 | break; | ||
445 | |||
446 | default: | ||
447 | break; | ||
448 | } | ||
449 | |||
450 | /* Enable now */ | ||
451 | cfg |= CR_CRYPEN; | ||
452 | |||
453 | stm32_cryp_write(cryp, CRYP_CR, cfg); | ||
454 | |||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static void stm32_cryp_finish_req(struct stm32_cryp *cryp) | ||
459 | { | ||
460 | int err = 0; | ||
461 | |||
462 | if (cryp->sgs_copied) { | ||
463 | void *buf_in, *buf_out; | ||
464 | int pages, len; | ||
465 | |||
466 | buf_in = sg_virt(&cryp->in_sgl); | ||
467 | buf_out = sg_virt(&cryp->out_sgl); | ||
468 | |||
469 | sg_copy_buf(buf_out, cryp->out_sg_save, 0, | ||
470 | cryp->total_out_save, 1); | ||
471 | |||
472 | len = ALIGN(cryp->total_in_save, cryp->hw_blocksize); | ||
473 | pages = len ? get_order(len) : 1; | ||
474 | free_pages((unsigned long)buf_in, pages); | ||
475 | |||
476 | len = ALIGN(cryp->total_out_save, cryp->hw_blocksize); | ||
477 | pages = len ? get_order(len) : 1; | ||
478 | free_pages((unsigned long)buf_out, pages); | ||
479 | } | ||
480 | |||
481 | crypto_finalize_cipher_request(cryp->engine, cryp->req, err); | ||
482 | cryp->req = NULL; | ||
483 | |||
484 | memset(cryp->ctx->key, 0, cryp->ctx->keylen); | ||
485 | |||
486 | mutex_unlock(&cryp->lock); | ||
487 | } | ||
488 | |||
489 | static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) | ||
490 | { | ||
491 | /* Enable interrupt and let the IRQ handler do everything */ | ||
492 | stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT); | ||
493 | |||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | static int stm32_cryp_cra_init(struct crypto_tfm *tfm) | ||
498 | { | ||
499 | tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx); | ||
500 | |||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode) | ||
505 | { | ||
506 | struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx( | ||
507 | crypto_ablkcipher_reqtfm(req)); | ||
508 | struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req); | ||
509 | struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx); | ||
510 | |||
511 | if (!cryp) | ||
512 | return -ENODEV; | ||
513 | |||
514 | rctx->mode = mode; | ||
515 | |||
516 | return crypto_transfer_cipher_request_to_engine(cryp->engine, req); | ||
517 | } | ||
518 | |||
519 | static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
520 | unsigned int keylen) | ||
521 | { | ||
522 | struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
523 | |||
524 | memcpy(ctx->key, key, keylen); | ||
525 | ctx->keylen = keylen; | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
531 | unsigned int keylen) | ||
532 | { | ||
533 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | ||
534 | keylen != AES_KEYSIZE_256) | ||
535 | return -EINVAL; | ||
536 | else | ||
537 | return stm32_cryp_setkey(tfm, key, keylen); | ||
538 | } | ||
539 | |||
540 | static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
541 | unsigned int keylen) | ||
542 | { | ||
543 | if (keylen != DES_KEY_SIZE) | ||
544 | return -EINVAL; | ||
545 | else | ||
546 | return stm32_cryp_setkey(tfm, key, keylen); | ||
547 | } | ||
548 | |||
549 | static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
550 | unsigned int keylen) | ||
551 | { | ||
552 | if (keylen != (3 * DES_KEY_SIZE)) | ||
553 | return -EINVAL; | ||
554 | else | ||
555 | return stm32_cryp_setkey(tfm, key, keylen); | ||
556 | } | ||
557 | |||
558 | static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req) | ||
559 | { | ||
560 | return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT); | ||
561 | } | ||
562 | |||
563 | static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req) | ||
564 | { | ||
565 | return stm32_cryp_crypt(req, FLG_AES | FLG_ECB); | ||
566 | } | ||
567 | |||
568 | static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req) | ||
569 | { | ||
570 | return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT); | ||
571 | } | ||
572 | |||
573 | static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req) | ||
574 | { | ||
575 | return stm32_cryp_crypt(req, FLG_AES | FLG_CBC); | ||
576 | } | ||
577 | |||
578 | static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req) | ||
579 | { | ||
580 | return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT); | ||
581 | } | ||
582 | |||
583 | static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req) | ||
584 | { | ||
585 | return stm32_cryp_crypt(req, FLG_AES | FLG_CTR); | ||
586 | } | ||
587 | |||
588 | static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req) | ||
589 | { | ||
590 | return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT); | ||
591 | } | ||
592 | |||
593 | static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req) | ||
594 | { | ||
595 | return stm32_cryp_crypt(req, FLG_DES | FLG_ECB); | ||
596 | } | ||
597 | |||
598 | static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req) | ||
599 | { | ||
600 | return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT); | ||
601 | } | ||
602 | |||
603 | static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req) | ||
604 | { | ||
605 | return stm32_cryp_crypt(req, FLG_DES | FLG_CBC); | ||
606 | } | ||
607 | |||
608 | static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req) | ||
609 | { | ||
610 | return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT); | ||
611 | } | ||
612 | |||
613 | static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req) | ||
614 | { | ||
615 | return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB); | ||
616 | } | ||
617 | |||
618 | static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req) | ||
619 | { | ||
620 | return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT); | ||
621 | } | ||
622 | |||
623 | static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req) | ||
624 | { | ||
625 | return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC); | ||
626 | } | ||
627 | |||
628 | static int stm32_cryp_prepare_req(struct crypto_engine *engine, | ||
629 | struct ablkcipher_request *req) | ||
630 | { | ||
631 | struct stm32_cryp_ctx *ctx; | ||
632 | struct stm32_cryp *cryp; | ||
633 | struct stm32_cryp_reqctx *rctx; | ||
634 | int ret; | ||
635 | |||
636 | if (!req) | ||
637 | return -EINVAL; | ||
638 | |||
639 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
640 | |||
641 | cryp = ctx->cryp; | ||
642 | |||
643 | if (!cryp) | ||
644 | return -ENODEV; | ||
645 | |||
646 | mutex_lock(&cryp->lock); | ||
647 | |||
648 | rctx = ablkcipher_request_ctx(req); | ||
649 | rctx->mode &= FLG_MODE_MASK; | ||
650 | |||
651 | ctx->cryp = cryp; | ||
652 | |||
653 | cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode; | ||
654 | cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE; | ||
655 | cryp->ctx = ctx; | ||
656 | |||
657 | cryp->req = req; | ||
658 | cryp->total_in = req->nbytes; | ||
659 | cryp->total_out = cryp->total_in; | ||
660 | |||
661 | cryp->total_in_save = cryp->total_in; | ||
662 | cryp->total_out_save = cryp->total_out; | ||
663 | |||
664 | cryp->in_sg = req->src; | ||
665 | cryp->out_sg = req->dst; | ||
666 | cryp->out_sg_save = cryp->out_sg; | ||
667 | |||
668 | cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in); | ||
669 | if (cryp->in_sg_len < 0) { | ||
670 | dev_err(cryp->dev, "Cannot get in_sg_len\n"); | ||
671 | ret = cryp->in_sg_len; | ||
672 | goto out; | ||
673 | } | ||
674 | |||
675 | cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out); | ||
676 | if (cryp->out_sg_len < 0) { | ||
677 | dev_err(cryp->dev, "Cannot get out_sg_len\n"); | ||
678 | ret = cryp->out_sg_len; | ||
679 | goto out; | ||
680 | } | ||
681 | |||
682 | ret = stm32_cryp_copy_sgs(cryp); | ||
683 | if (ret) | ||
684 | goto out; | ||
685 | |||
686 | scatterwalk_start(&cryp->in_walk, cryp->in_sg); | ||
687 | scatterwalk_start(&cryp->out_walk, cryp->out_sg); | ||
688 | |||
689 | ret = stm32_cryp_hw_init(cryp); | ||
690 | out: | ||
691 | if (ret) | ||
692 | mutex_unlock(&cryp->lock); | ||
693 | |||
694 | return ret; | ||
695 | } | ||
696 | |||
697 | static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine, | ||
698 | struct ablkcipher_request *req) | ||
699 | { | ||
700 | return stm32_cryp_prepare_req(engine, req); | ||
701 | } | ||
702 | |||
703 | static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, | ||
704 | struct ablkcipher_request *req) | ||
705 | { | ||
706 | struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx( | ||
707 | crypto_ablkcipher_reqtfm(req)); | ||
708 | struct stm32_cryp *cryp = ctx->cryp; | ||
709 | |||
710 | if (!cryp) | ||
711 | return -ENODEV; | ||
712 | |||
713 | return stm32_cryp_cpu_start(cryp); | ||
714 | } | ||
715 | |||
716 | static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst, | ||
717 | unsigned int n) | ||
718 | { | ||
719 | scatterwalk_advance(&cryp->out_walk, n); | ||
720 | |||
721 | if (unlikely(cryp->out_sg->length == _walked_out)) { | ||
722 | cryp->out_sg = sg_next(cryp->out_sg); | ||
723 | if (cryp->out_sg) { | ||
724 | scatterwalk_start(&cryp->out_walk, cryp->out_sg); | ||
725 | return (sg_virt(cryp->out_sg) + _walked_out); | ||
726 | } | ||
727 | } | ||
728 | |||
729 | return (u32 *)((u8 *)dst + n); | ||
730 | } | ||
731 | |||
732 | static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src, | ||
733 | unsigned int n) | ||
734 | { | ||
735 | scatterwalk_advance(&cryp->in_walk, n); | ||
736 | |||
737 | if (unlikely(cryp->in_sg->length == _walked_in)) { | ||
738 | cryp->in_sg = sg_next(cryp->in_sg); | ||
739 | if (cryp->in_sg) { | ||
740 | scatterwalk_start(&cryp->in_walk, cryp->in_sg); | ||
741 | return (sg_virt(cryp->in_sg) + _walked_in); | ||
742 | } | ||
743 | } | ||
744 | |||
745 | return (u32 *)((u8 *)src + n); | ||
746 | } | ||
747 | |||
748 | static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp) | ||
749 | { | ||
750 | u32 cr; | ||
751 | |||
752 | if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) { | ||
753 | cryp->last_ctr[3] = 0; | ||
754 | cryp->last_ctr[2]++; | ||
755 | if (!cryp->last_ctr[2]) { | ||
756 | cryp->last_ctr[1]++; | ||
757 | if (!cryp->last_ctr[1]) | ||
758 | cryp->last_ctr[0]++; | ||
759 | } | ||
760 | |||
761 | cr = stm32_cryp_read(cryp, CRYP_CR); | ||
762 | stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN); | ||
763 | |||
764 | stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr); | ||
765 | |||
766 | stm32_cryp_write(cryp, CRYP_CR, cr); | ||
767 | } | ||
768 | |||
769 | cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR); | ||
770 | cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR); | ||
771 | cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR); | ||
772 | cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR); | ||
773 | } | ||
774 | |||
775 | static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp) | ||
776 | { | ||
777 | unsigned int i, j; | ||
778 | u32 d32, *dst; | ||
779 | u8 *d8; | ||
780 | |||
781 | dst = sg_virt(cryp->out_sg) + _walked_out; | ||
782 | |||
783 | for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { | ||
784 | if (likely(cryp->total_out >= sizeof(u32))) { | ||
785 | /* Read a full u32 */ | ||
786 | *dst = stm32_cryp_read(cryp, CRYP_DOUT); | ||
787 | |||
788 | dst = stm32_cryp_next_out(cryp, dst, sizeof(u32)); | ||
789 | cryp->total_out -= sizeof(u32); | ||
790 | } else if (!cryp->total_out) { | ||
791 | /* Empty fifo out (data from input padding) */ | ||
792 | d32 = stm32_cryp_read(cryp, CRYP_DOUT); | ||
793 | } else { | ||
794 | /* Read less than an u32 */ | ||
795 | d32 = stm32_cryp_read(cryp, CRYP_DOUT); | ||
796 | d8 = (u8 *)&d32; | ||
797 | |||
798 | for (j = 0; j < cryp->total_out; j++) { | ||
799 | *((u8 *)dst) = *(d8++); | ||
800 | dst = stm32_cryp_next_out(cryp, dst, 1); | ||
801 | } | ||
802 | cryp->total_out = 0; | ||
803 | } | ||
804 | } | ||
805 | |||
806 | return !cryp->total_out || !cryp->total_in; | ||
807 | } | ||
808 | |||
809 | static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) | ||
810 | { | ||
811 | unsigned int i, j; | ||
812 | u32 *src; | ||
813 | u8 d8[4]; | ||
814 | |||
815 | src = sg_virt(cryp->in_sg) + _walked_in; | ||
816 | |||
817 | for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { | ||
818 | if (likely(cryp->total_in >= sizeof(u32))) { | ||
819 | /* Write a full u32 */ | ||
820 | stm32_cryp_write(cryp, CRYP_DIN, *src); | ||
821 | |||
822 | src = stm32_cryp_next_in(cryp, src, sizeof(u32)); | ||
823 | cryp->total_in -= sizeof(u32); | ||
824 | } else if (!cryp->total_in) { | ||
825 | /* Write padding data */ | ||
826 | stm32_cryp_write(cryp, CRYP_DIN, 0); | ||
827 | } else { | ||
828 | /* Write less than an u32 */ | ||
829 | memset(d8, 0, sizeof(u32)); | ||
830 | for (j = 0; j < cryp->total_in; j++) { | ||
831 | d8[j] = *((u8 *)src); | ||
832 | src = stm32_cryp_next_in(cryp, src, 1); | ||
833 | } | ||
834 | |||
835 | stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); | ||
836 | cryp->total_in = 0; | ||
837 | } | ||
838 | } | ||
839 | } | ||
840 | |||
841 | static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) | ||
842 | { | ||
843 | if (unlikely(!cryp->total_in)) { | ||
844 | dev_warn(cryp->dev, "No more data to process\n"); | ||
845 | return; | ||
846 | } | ||
847 | |||
848 | if (is_aes(cryp) && is_ctr(cryp)) | ||
849 | stm32_cryp_check_ctr_counter(cryp); | ||
850 | |||
851 | stm32_cryp_irq_write_block(cryp); | ||
852 | } | ||
853 | |||
854 | static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) | ||
855 | { | ||
856 | struct stm32_cryp *cryp = arg; | ||
857 | |||
858 | if (cryp->irq_status & MISR_OUT) | ||
859 | /* Output FIFO IRQ: read data */ | ||
860 | if (unlikely(stm32_cryp_irq_read_data(cryp))) { | ||
861 | /* All bytes processed, finish */ | ||
862 | stm32_cryp_write(cryp, CRYP_IMSCR, 0); | ||
863 | stm32_cryp_finish_req(cryp); | ||
864 | return IRQ_HANDLED; | ||
865 | } | ||
866 | |||
867 | if (cryp->irq_status & MISR_IN) { | ||
868 | /* Input FIFO IRQ: write data */ | ||
869 | stm32_cryp_irq_write_data(cryp); | ||
870 | } | ||
871 | |||
872 | return IRQ_HANDLED; | ||
873 | } | ||
874 | |||
875 | static irqreturn_t stm32_cryp_irq(int irq, void *arg) | ||
876 | { | ||
877 | struct stm32_cryp *cryp = arg; | ||
878 | |||
879 | cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR); | ||
880 | |||
881 | return IRQ_WAKE_THREAD; | ||
882 | } | ||
883 | |||
884 | static struct crypto_alg crypto_algs[] = { | ||
885 | { | ||
886 | .cra_name = "ecb(aes)", | ||
887 | .cra_driver_name = "stm32-ecb-aes", | ||
888 | .cra_priority = 200, | ||
889 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
890 | CRYPTO_ALG_ASYNC, | ||
891 | .cra_blocksize = AES_BLOCK_SIZE, | ||
892 | .cra_ctxsize = sizeof(struct stm32_cryp_ctx), | ||
893 | .cra_alignmask = 0xf, | ||
894 | .cra_type = &crypto_ablkcipher_type, | ||
895 | .cra_module = THIS_MODULE, | ||
896 | .cra_init = stm32_cryp_cra_init, | ||
897 | .cra_ablkcipher = { | ||
898 | .min_keysize = AES_MIN_KEY_SIZE, | ||
899 | .max_keysize = AES_MAX_KEY_SIZE, | ||
900 | .setkey = stm32_cryp_aes_setkey, | ||
901 | .encrypt = stm32_cryp_aes_ecb_encrypt, | ||
902 | .decrypt = stm32_cryp_aes_ecb_decrypt, | ||
903 | } | ||
904 | }, | ||
905 | { | ||
906 | .cra_name = "cbc(aes)", | ||
907 | .cra_driver_name = "stm32-cbc-aes", | ||
908 | .cra_priority = 200, | ||
909 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
910 | CRYPTO_ALG_ASYNC, | ||
911 | .cra_blocksize = AES_BLOCK_SIZE, | ||
912 | .cra_ctxsize = sizeof(struct stm32_cryp_ctx), | ||
913 | .cra_alignmask = 0xf, | ||
914 | .cra_type = &crypto_ablkcipher_type, | ||
915 | .cra_module = THIS_MODULE, | ||
916 | .cra_init = stm32_cryp_cra_init, | ||
917 | .cra_ablkcipher = { | ||
918 | .min_keysize = AES_MIN_KEY_SIZE, | ||
919 | .max_keysize = AES_MAX_KEY_SIZE, | ||
920 | .ivsize = AES_BLOCK_SIZE, | ||
921 | .setkey = stm32_cryp_aes_setkey, | ||
922 | .encrypt = stm32_cryp_aes_cbc_encrypt, | ||
923 | .decrypt = stm32_cryp_aes_cbc_decrypt, | ||
924 | } | ||
925 | }, | ||
926 | { | ||
927 | .cra_name = "ctr(aes)", | ||
928 | .cra_driver_name = "stm32-ctr-aes", | ||
929 | .cra_priority = 200, | ||
930 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
931 | CRYPTO_ALG_ASYNC, | ||
932 | .cra_blocksize = 1, | ||
933 | .cra_ctxsize = sizeof(struct stm32_cryp_ctx), | ||
934 | .cra_alignmask = 0xf, | ||
935 | .cra_type = &crypto_ablkcipher_type, | ||
936 | .cra_module = THIS_MODULE, | ||
937 | .cra_init = stm32_cryp_cra_init, | ||
938 | .cra_ablkcipher = { | ||
939 | .min_keysize = AES_MIN_KEY_SIZE, | ||
940 | .max_keysize = AES_MAX_KEY_SIZE, | ||
941 | .ivsize = AES_BLOCK_SIZE, | ||
942 | .setkey = stm32_cryp_aes_setkey, | ||
943 | .encrypt = stm32_cryp_aes_ctr_encrypt, | ||
944 | .decrypt = stm32_cryp_aes_ctr_decrypt, | ||
945 | } | ||
946 | }, | ||
947 | { | ||
948 | .cra_name = "ecb(des)", | ||
949 | .cra_driver_name = "stm32-ecb-des", | ||
950 | .cra_priority = 200, | ||
951 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
952 | CRYPTO_ALG_ASYNC, | ||
953 | .cra_blocksize = DES_BLOCK_SIZE, | ||
954 | .cra_ctxsize = sizeof(struct stm32_cryp_ctx), | ||
955 | .cra_alignmask = 0xf, | ||
956 | .cra_type = &crypto_ablkcipher_type, | ||
957 | .cra_module = THIS_MODULE, | ||
958 | .cra_init = stm32_cryp_cra_init, | ||
959 | .cra_ablkcipher = { | ||
960 | .min_keysize = DES_BLOCK_SIZE, | ||
961 | .max_keysize = DES_BLOCK_SIZE, | ||
962 | .setkey = stm32_cryp_des_setkey, | ||
963 | .encrypt = stm32_cryp_des_ecb_encrypt, | ||
964 | .decrypt = stm32_cryp_des_ecb_decrypt, | ||
965 | } | ||
966 | }, | ||
967 | { | ||
968 | .cra_name = "cbc(des)", | ||
969 | .cra_driver_name = "stm32-cbc-des", | ||
970 | .cra_priority = 200, | ||
971 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
972 | CRYPTO_ALG_ASYNC, | ||
973 | .cra_blocksize = DES_BLOCK_SIZE, | ||
974 | .cra_ctxsize = sizeof(struct stm32_cryp_ctx), | ||
975 | .cra_alignmask = 0xf, | ||
976 | .cra_type = &crypto_ablkcipher_type, | ||
977 | .cra_module = THIS_MODULE, | ||
978 | .cra_init = stm32_cryp_cra_init, | ||
979 | .cra_ablkcipher = { | ||
980 | .min_keysize = DES_BLOCK_SIZE, | ||
981 | .max_keysize = DES_BLOCK_SIZE, | ||
982 | .ivsize = DES_BLOCK_SIZE, | ||
983 | .setkey = stm32_cryp_des_setkey, | ||
984 | .encrypt = stm32_cryp_des_cbc_encrypt, | ||
985 | .decrypt = stm32_cryp_des_cbc_decrypt, | ||
986 | } | ||
987 | }, | ||
988 | { | ||
989 | .cra_name = "ecb(des3_ede)", | ||
990 | .cra_driver_name = "stm32-ecb-des3", | ||
991 | .cra_priority = 200, | ||
992 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
993 | CRYPTO_ALG_ASYNC, | ||
994 | .cra_blocksize = DES_BLOCK_SIZE, | ||
995 | .cra_ctxsize = sizeof(struct stm32_cryp_ctx), | ||
996 | .cra_alignmask = 0xf, | ||
997 | .cra_type = &crypto_ablkcipher_type, | ||
998 | .cra_module = THIS_MODULE, | ||
999 | .cra_init = stm32_cryp_cra_init, | ||
1000 | .cra_ablkcipher = { | ||
1001 | .min_keysize = 3 * DES_BLOCK_SIZE, | ||
1002 | .max_keysize = 3 * DES_BLOCK_SIZE, | ||
1003 | .setkey = stm32_cryp_tdes_setkey, | ||
1004 | .encrypt = stm32_cryp_tdes_ecb_encrypt, | ||
1005 | .decrypt = stm32_cryp_tdes_ecb_decrypt, | ||
1006 | } | ||
1007 | }, | ||
1008 | { | ||
1009 | .cra_name = "cbc(des3_ede)", | ||
1010 | .cra_driver_name = "stm32-cbc-des3", | ||
1011 | .cra_priority = 200, | ||
1012 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
1013 | CRYPTO_ALG_ASYNC, | ||
1014 | .cra_blocksize = DES_BLOCK_SIZE, | ||
1015 | .cra_ctxsize = sizeof(struct stm32_cryp_ctx), | ||
1016 | .cra_alignmask = 0xf, | ||
1017 | .cra_type = &crypto_ablkcipher_type, | ||
1018 | .cra_module = THIS_MODULE, | ||
1019 | .cra_init = stm32_cryp_cra_init, | ||
1020 | .cra_ablkcipher = { | ||
1021 | .min_keysize = 3 * DES_BLOCK_SIZE, | ||
1022 | .max_keysize = 3 * DES_BLOCK_SIZE, | ||
1023 | .ivsize = DES_BLOCK_SIZE, | ||
1024 | .setkey = stm32_cryp_tdes_setkey, | ||
1025 | .encrypt = stm32_cryp_tdes_cbc_encrypt, | ||
1026 | .decrypt = stm32_cryp_tdes_cbc_decrypt, | ||
1027 | } | ||
1028 | }, | ||
1029 | }; | ||
1030 | |||
1031 | static const struct of_device_id stm32_dt_ids[] = { | ||
1032 | { .compatible = "st,stm32f756-cryp", }, | ||
1033 | {}, | ||
1034 | }; | ||
1035 | MODULE_DEVICE_TABLE(of, stm32_dt_ids); | ||
1036 | |||
1037 | static int stm32_cryp_probe(struct platform_device *pdev) | ||
1038 | { | ||
1039 | struct device *dev = &pdev->dev; | ||
1040 | struct stm32_cryp *cryp; | ||
1041 | struct resource *res; | ||
1042 | struct reset_control *rst; | ||
1043 | int irq, ret; | ||
1044 | |||
1045 | cryp = devm_kzalloc(dev, sizeof(*cryp), GFP_KERNEL); | ||
1046 | if (!cryp) | ||
1047 | return -ENOMEM; | ||
1048 | |||
1049 | cryp->dev = dev; | ||
1050 | |||
1051 | mutex_init(&cryp->lock); | ||
1052 | |||
1053 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1054 | cryp->regs = devm_ioremap_resource(dev, res); | ||
1055 | if (IS_ERR(cryp->regs)) | ||
1056 | return PTR_ERR(cryp->regs); | ||
1057 | |||
1058 | irq = platform_get_irq(pdev, 0); | ||
1059 | if (irq < 0) { | ||
1060 | dev_err(dev, "Cannot get IRQ resource\n"); | ||
1061 | return irq; | ||
1062 | } | ||
1063 | |||
1064 | ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq, | ||
1065 | stm32_cryp_irq_thread, IRQF_ONESHOT, | ||
1066 | dev_name(dev), cryp); | ||
1067 | if (ret) { | ||
1068 | dev_err(dev, "Cannot grab IRQ\n"); | ||
1069 | return ret; | ||
1070 | } | ||
1071 | |||
1072 | cryp->clk = devm_clk_get(dev, NULL); | ||
1073 | if (IS_ERR(cryp->clk)) { | ||
1074 | dev_err(dev, "Could not get clock\n"); | ||
1075 | return PTR_ERR(cryp->clk); | ||
1076 | } | ||
1077 | |||
1078 | ret = clk_prepare_enable(cryp->clk); | ||
1079 | if (ret) { | ||
1080 | dev_err(cryp->dev, "Failed to enable clock\n"); | ||
1081 | return ret; | ||
1082 | } | ||
1083 | |||
1084 | rst = devm_reset_control_get(dev, NULL); | ||
1085 | if (!IS_ERR(rst)) { | ||
1086 | reset_control_assert(rst); | ||
1087 | udelay(2); | ||
1088 | reset_control_deassert(rst); | ||
1089 | } | ||
1090 | |||
1091 | platform_set_drvdata(pdev, cryp); | ||
1092 | |||
1093 | spin_lock(&cryp_list.lock); | ||
1094 | list_add(&cryp->list, &cryp_list.dev_list); | ||
1095 | spin_unlock(&cryp_list.lock); | ||
1096 | |||
1097 | /* Initialize crypto engine */ | ||
1098 | cryp->engine = crypto_engine_alloc_init(dev, 1); | ||
1099 | if (!cryp->engine) { | ||
1100 | dev_err(dev, "Could not init crypto engine\n"); | ||
1101 | ret = -ENOMEM; | ||
1102 | goto err_engine1; | ||
1103 | } | ||
1104 | |||
1105 | cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req; | ||
1106 | cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req; | ||
1107 | |||
1108 | ret = crypto_engine_start(cryp->engine); | ||
1109 | if (ret) { | ||
1110 | dev_err(dev, "Could not start crypto engine\n"); | ||
1111 | goto err_engine2; | ||
1112 | } | ||
1113 | |||
1114 | ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs)); | ||
1115 | if (ret) { | ||
1116 | dev_err(dev, "Could not register algs\n"); | ||
1117 | goto err_algs; | ||
1118 | } | ||
1119 | |||
1120 | dev_info(dev, "Initialized\n"); | ||
1121 | |||
1122 | return 0; | ||
1123 | |||
1124 | err_algs: | ||
1125 | err_engine2: | ||
1126 | crypto_engine_exit(cryp->engine); | ||
1127 | err_engine1: | ||
1128 | spin_lock(&cryp_list.lock); | ||
1129 | list_del(&cryp->list); | ||
1130 | spin_unlock(&cryp_list.lock); | ||
1131 | |||
1132 | clk_disable_unprepare(cryp->clk); | ||
1133 | |||
1134 | return ret; | ||
1135 | } | ||
1136 | |||
1137 | static int stm32_cryp_remove(struct platform_device *pdev) | ||
1138 | { | ||
1139 | struct stm32_cryp *cryp = platform_get_drvdata(pdev); | ||
1140 | |||
1141 | if (!cryp) | ||
1142 | return -ENODEV; | ||
1143 | |||
1144 | crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs)); | ||
1145 | |||
1146 | crypto_engine_exit(cryp->engine); | ||
1147 | |||
1148 | spin_lock(&cryp_list.lock); | ||
1149 | list_del(&cryp->list); | ||
1150 | spin_unlock(&cryp_list.lock); | ||
1151 | |||
1152 | clk_disable_unprepare(cryp->clk); | ||
1153 | |||
1154 | return 0; | ||
1155 | } | ||
1156 | |||
1157 | static struct platform_driver stm32_cryp_driver = { | ||
1158 | .probe = stm32_cryp_probe, | ||
1159 | .remove = stm32_cryp_remove, | ||
1160 | .driver = { | ||
1161 | .name = DRIVER_NAME, | ||
1162 | .of_match_table = stm32_dt_ids, | ||
1163 | }, | ||
1164 | }; | ||
1165 | |||
1166 | module_platform_driver(stm32_cryp_driver); | ||
1167 | |||
1168 | MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>"); | ||
1169 | MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver"); | ||
1170 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c index 090582baecfe..8f09b8430893 100644 --- a/drivers/crypto/stm32/stm32_crc32.c +++ b/drivers/crypto/stm32/stm32_crc32.c | |||
@@ -208,6 +208,7 @@ static struct shash_alg algs[] = { | |||
208 | .cra_name = "crc32", | 208 | .cra_name = "crc32", |
209 | .cra_driver_name = DRIVER_NAME, | 209 | .cra_driver_name = DRIVER_NAME, |
210 | .cra_priority = 200, | 210 | .cra_priority = 200, |
211 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
211 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 212 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
212 | .cra_alignmask = 3, | 213 | .cra_alignmask = 3, |
213 | .cra_ctxsize = sizeof(struct stm32_crc_ctx), | 214 | .cra_ctxsize = sizeof(struct stm32_crc_ctx), |
@@ -229,6 +230,7 @@ static struct shash_alg algs[] = { | |||
229 | .cra_name = "crc32c", | 230 | .cra_name = "crc32c", |
230 | .cra_driver_name = DRIVER_NAME, | 231 | .cra_driver_name = DRIVER_NAME, |
231 | .cra_priority = 200, | 232 | .cra_priority = 200, |
233 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
232 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 234 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
233 | .cra_alignmask = 3, | 235 | .cra_alignmask = 3, |
234 | .cra_ctxsize = sizeof(struct stm32_crc_ctx), | 236 | .cra_ctxsize = sizeof(struct stm32_crc_ctx), |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d8424ed16c33..351f4bf37ca9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -58,6 +58,13 @@ | |||
58 | extern struct list_head adapter_list; | 58 | extern struct list_head adapter_list; |
59 | extern struct mutex uld_mutex; | 59 | extern struct mutex uld_mutex; |
60 | 60 | ||
61 | /* Suspend an Ethernet Tx queue with fewer available descriptors than this. | ||
62 | * This is the same as calc_tx_descs() for a TSO packet with | ||
63 | * nr_frags == MAX_SKB_FRAGS. | ||
64 | */ | ||
65 | #define ETHTXQ_STOP_THRES \ | ||
66 | (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) | ||
67 | |||
61 | enum { | 68 | enum { |
62 | MAX_NPORTS = 4, /* max # of ports */ | 69 | MAX_NPORTS = 4, /* max # of ports */ |
63 | SERNUM_LEN = 24, /* Serial # length */ | 70 | SERNUM_LEN = 24, /* Serial # length */ |
@@ -563,6 +570,7 @@ enum { /* adapter flags */ | |||
563 | 570 | ||
564 | enum { | 571 | enum { |
565 | ULP_CRYPTO_LOOKASIDE = 1 << 0, | 572 | ULP_CRYPTO_LOOKASIDE = 1 << 0, |
573 | ULP_CRYPTO_IPSEC_INLINE = 1 << 1, | ||
566 | }; | 574 | }; |
567 | 575 | ||
568 | struct rx_sw_desc; | 576 | struct rx_sw_desc; |
@@ -967,6 +975,11 @@ enum { | |||
967 | SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */ | 975 | SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */ |
968 | }; | 976 | }; |
969 | 977 | ||
978 | struct tx_sw_desc { /* SW state per Tx descriptor */ | ||
979 | struct sk_buff *skb; | ||
980 | struct ulptx_sgl *sgl; | ||
981 | }; | ||
982 | |||
970 | /* Support for "sched_queue" command to allow one or more NIC TX Queues | 983 | /* Support for "sched_queue" command to allow one or more NIC TX Queues |
971 | * to be bound to a TX Scheduling Class. | 984 | * to be bound to a TX Scheduling Class. |
972 | */ | 985 | */ |
@@ -1699,4 +1712,14 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); | |||
1699 | void free_tx_desc(struct adapter *adap, struct sge_txq *q, | 1712 | void free_tx_desc(struct adapter *adap, struct sge_txq *q, |
1700 | unsigned int n, bool unmap); | 1713 | unsigned int n, bool unmap); |
1701 | void free_txq(struct adapter *adap, struct sge_txq *q); | 1714 | void free_txq(struct adapter *adap, struct sge_txq *q); |
1715 | void cxgb4_reclaim_completed_tx(struct adapter *adap, | ||
1716 | struct sge_txq *q, bool unmap); | ||
1717 | int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, | ||
1718 | dma_addr_t *addr); | ||
1719 | void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, | ||
1720 | void *pos); | ||
1721 | void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, | ||
1722 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, | ||
1723 | const dma_addr_t *addr); | ||
1724 | void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); | ||
1702 | #endif /* __CXGB4_H__ */ | 1725 | #endif /* __CXGB4_H__ */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 917663b35603..cf471831ee71 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | |||
@@ -3096,6 +3096,8 @@ static int chcr_show(struct seq_file *seq, void *v) | |||
3096 | atomic_read(&adap->chcr_stats.error)); | 3096 | atomic_read(&adap->chcr_stats.error)); |
3097 | seq_printf(seq, "Fallback: %10u \n", | 3097 | seq_printf(seq, "Fallback: %10u \n", |
3098 | atomic_read(&adap->chcr_stats.fallback)); | 3098 | atomic_read(&adap->chcr_stats.fallback)); |
3099 | seq_printf(seq, "IPSec PDU: %10u\n", | ||
3100 | atomic_read(&adap->chcr_stats.ipsec_cnt)); | ||
3099 | return 0; | 3101 | return 0; |
3100 | } | 3102 | } |
3101 | 3103 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6f900ffe25cc..05a4abfd5ec1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -4096,7 +4096,7 @@ static int adap_init0(struct adapter *adap) | |||
4096 | } else { | 4096 | } else { |
4097 | adap->vres.ncrypto_fc = val[0]; | 4097 | adap->vres.ncrypto_fc = val[0]; |
4098 | } | 4098 | } |
4099 | adap->params.crypto |= ULP_CRYPTO_LOOKASIDE; | 4099 | adap->params.crypto = ntohs(caps_cmd.cryptocaps); |
4100 | adap->num_uld += 1; | 4100 | adap->num_uld += 1; |
4101 | } | 4101 | } |
4102 | #undef FW_PARAM_PFVF | 4102 | #undef FW_PARAM_PFVF |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 71a315bc1409..6b5fea4532f3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | |||
@@ -637,6 +637,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) | |||
637 | lld->nchan = adap->params.nports; | 637 | lld->nchan = adap->params.nports; |
638 | lld->nports = adap->params.nports; | 638 | lld->nports = adap->params.nports; |
639 | lld->wr_cred = adap->params.ofldq_wr_cred; | 639 | lld->wr_cred = adap->params.ofldq_wr_cred; |
640 | lld->crypto = adap->params.crypto; | ||
640 | lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); | 641 | lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); |
641 | lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); | 642 | lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); |
642 | lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); | 643 | lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 08e709ab6dd4..1d37672902da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | |||
@@ -297,6 +297,7 @@ struct chcr_stats_debug { | |||
297 | atomic_t complete; | 297 | atomic_t complete; |
298 | atomic_t error; | 298 | atomic_t error; |
299 | atomic_t fallback; | 299 | atomic_t fallback; |
300 | atomic_t ipsec_cnt; | ||
300 | }; | 301 | }; |
301 | 302 | ||
302 | #define OCQ_WIN_OFFSET(pdev, vres) \ | 303 | #define OCQ_WIN_OFFSET(pdev, vres) \ |
@@ -322,6 +323,7 @@ struct cxgb4_lld_info { | |||
322 | unsigned char wr_cred; /* WR 16-byte credits */ | 323 | unsigned char wr_cred; /* WR 16-byte credits */ |
323 | unsigned char adapter_type; /* type of adapter */ | 324 | unsigned char adapter_type; /* type of adapter */ |
324 | unsigned char fw_api_ver; /* FW API version */ | 325 | unsigned char fw_api_ver; /* FW API version */ |
326 | unsigned char crypto; /* crypto support */ | ||
325 | unsigned int fw_vers; /* FW version */ | 327 | unsigned int fw_vers; /* FW version */ |
326 | unsigned int iscsi_iolen; /* iSCSI max I/O length */ | 328 | unsigned int iscsi_iolen; /* iSCSI max I/O length */ |
327 | unsigned int cclk_ps; /* Core clock period in psec */ | 329 | unsigned int cclk_ps; /* Core clock period in psec */ |
@@ -370,6 +372,7 @@ struct cxgb4_uld_info { | |||
370 | struct t4_lro_mgr *lro_mgr, | 372 | struct t4_lro_mgr *lro_mgr, |
371 | struct napi_struct *napi); | 373 | struct napi_struct *napi); |
372 | void (*lro_flush)(struct t4_lro_mgr *); | 374 | void (*lro_flush)(struct t4_lro_mgr *); |
375 | int (*tx_handler)(struct sk_buff *skb, struct net_device *dev); | ||
373 | }; | 376 | }; |
374 | 377 | ||
375 | int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); | 378 | int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 922f2f937789..6c7b0ac0b48b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/jiffies.h> | 41 | #include <linux/jiffies.h> |
42 | #include <linux/prefetch.h> | 42 | #include <linux/prefetch.h> |
43 | #include <linux/export.h> | 43 | #include <linux/export.h> |
44 | #include <net/xfrm.h> | ||
44 | #include <net/ipv6.h> | 45 | #include <net/ipv6.h> |
45 | #include <net/tcp.h> | 46 | #include <net/tcp.h> |
46 | #include <net/busy_poll.h> | 47 | #include <net/busy_poll.h> |
@@ -53,6 +54,7 @@ | |||
53 | #include "t4_msg.h" | 54 | #include "t4_msg.h" |
54 | #include "t4fw_api.h" | 55 | #include "t4fw_api.h" |
55 | #include "cxgb4_ptp.h" | 56 | #include "cxgb4_ptp.h" |
57 | #include "cxgb4_uld.h" | ||
56 | 58 | ||
57 | /* | 59 | /* |
58 | * Rx buffer size. We use largish buffers if possible but settle for single | 60 | * Rx buffer size. We use largish buffers if possible but settle for single |
@@ -110,14 +112,6 @@ | |||
110 | #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) | 112 | #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) |
111 | 113 | ||
112 | /* | 114 | /* |
113 | * Suspend an Ethernet Tx queue with fewer available descriptors than this. | ||
114 | * This is the same as calc_tx_descs() for a TSO packet with | ||
115 | * nr_frags == MAX_SKB_FRAGS. | ||
116 | */ | ||
117 | #define ETHTXQ_STOP_THRES \ | ||
118 | (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) | ||
119 | |||
120 | /* | ||
121 | * Suspension threshold for non-Ethernet Tx queues. We require enough room | 115 | * Suspension threshold for non-Ethernet Tx queues. We require enough room |
122 | * for a full sized WR. | 116 | * for a full sized WR. |
123 | */ | 117 | */ |
@@ -134,11 +128,6 @@ | |||
134 | */ | 128 | */ |
135 | #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN | 129 | #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN |
136 | 130 | ||
137 | struct tx_sw_desc { /* SW state per Tx descriptor */ | ||
138 | struct sk_buff *skb; | ||
139 | struct ulptx_sgl *sgl; | ||
140 | }; | ||
141 | |||
142 | struct rx_sw_desc { /* SW state per Rx descriptor */ | 131 | struct rx_sw_desc { /* SW state per Rx descriptor */ |
143 | struct page *page; | 132 | struct page *page; |
144 | dma_addr_t dma_addr; | 133 | dma_addr_t dma_addr; |
@@ -248,8 +237,8 @@ static inline bool fl_starving(const struct adapter *adapter, | |||
248 | return fl->avail - fl->pend_cred <= s->fl_starve_thres; | 237 | return fl->avail - fl->pend_cred <= s->fl_starve_thres; |
249 | } | 238 | } |
250 | 239 | ||
251 | static int map_skb(struct device *dev, const struct sk_buff *skb, | 240 | int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, |
252 | dma_addr_t *addr) | 241 | dma_addr_t *addr) |
253 | { | 242 | { |
254 | const skb_frag_t *fp, *end; | 243 | const skb_frag_t *fp, *end; |
255 | const struct skb_shared_info *si; | 244 | const struct skb_shared_info *si; |
@@ -277,6 +266,7 @@ unwind: | |||
277 | out_err: | 266 | out_err: |
278 | return -ENOMEM; | 267 | return -ENOMEM; |
279 | } | 268 | } |
269 | EXPORT_SYMBOL(cxgb4_map_skb); | ||
280 | 270 | ||
281 | #ifdef CONFIG_NEED_DMA_MAP_STATE | 271 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
282 | static void unmap_skb(struct device *dev, const struct sk_buff *skb, | 272 | static void unmap_skb(struct device *dev, const struct sk_buff *skb, |
@@ -411,7 +401,7 @@ static inline int reclaimable(const struct sge_txq *q) | |||
411 | } | 401 | } |
412 | 402 | ||
413 | /** | 403 | /** |
414 | * reclaim_completed_tx - reclaims completed Tx descriptors | 404 | * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors |
415 | * @adap: the adapter | 405 | * @adap: the adapter |
416 | * @q: the Tx queue to reclaim completed descriptors from | 406 | * @q: the Tx queue to reclaim completed descriptors from |
417 | * @unmap: whether the buffers should be unmapped for DMA | 407 | * @unmap: whether the buffers should be unmapped for DMA |
@@ -420,7 +410,7 @@ static inline int reclaimable(const struct sge_txq *q) | |||
420 | * and frees the associated buffers if possible. Called with the Tx | 410 | * and frees the associated buffers if possible. Called with the Tx |
421 | * queue locked. | 411 | * queue locked. |
422 | */ | 412 | */ |
423 | static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, | 413 | inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, |
424 | bool unmap) | 414 | bool unmap) |
425 | { | 415 | { |
426 | int avail = reclaimable(q); | 416 | int avail = reclaimable(q); |
@@ -437,6 +427,7 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, | |||
437 | q->in_use -= avail; | 427 | q->in_use -= avail; |
438 | } | 428 | } |
439 | } | 429 | } |
430 | EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); | ||
440 | 431 | ||
441 | static inline int get_buf_size(struct adapter *adapter, | 432 | static inline int get_buf_size(struct adapter *adapter, |
442 | const struct rx_sw_desc *d) | 433 | const struct rx_sw_desc *d) |
@@ -833,7 +824,7 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | |||
833 | } | 824 | } |
834 | 825 | ||
835 | /** | 826 | /** |
836 | * write_sgl - populate a scatter/gather list for a packet | 827 | * cxgb4_write_sgl - populate a scatter/gather list for a packet |
837 | * @skb: the packet | 828 | * @skb: the packet |
838 | * @q: the Tx queue we are writing into | 829 | * @q: the Tx queue we are writing into |
839 | * @sgl: starting location for writing the SGL | 830 | * @sgl: starting location for writing the SGL |
@@ -849,9 +840,9 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | |||
849 | * right after the end of the SGL but does not account for any potential | 840 | * right after the end of the SGL but does not account for any potential |
850 | * wrap around, i.e., @end > @sgl. | 841 | * wrap around, i.e., @end > @sgl. |
851 | */ | 842 | */ |
852 | static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, | 843 | void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, |
853 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, | 844 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, |
854 | const dma_addr_t *addr) | 845 | const dma_addr_t *addr) |
855 | { | 846 | { |
856 | unsigned int i, len; | 847 | unsigned int i, len; |
857 | struct ulptx_sge_pair *to; | 848 | struct ulptx_sge_pair *to; |
@@ -903,6 +894,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, | |||
903 | if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ | 894 | if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ |
904 | *end = 0; | 895 | *end = 0; |
905 | } | 896 | } |
897 | EXPORT_SYMBOL(cxgb4_write_sgl); | ||
906 | 898 | ||
907 | /* This function copies 64 byte coalesced work request to | 899 | /* This function copies 64 byte coalesced work request to |
908 | * memory mapped BAR2 space. For coalesced WR SGE fetches | 900 | * memory mapped BAR2 space. For coalesced WR SGE fetches |
@@ -921,14 +913,14 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) | |||
921 | } | 913 | } |
922 | 914 | ||
923 | /** | 915 | /** |
924 | * ring_tx_db - check and potentially ring a Tx queue's doorbell | 916 | * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell |
925 | * @adap: the adapter | 917 | * @adap: the adapter |
926 | * @q: the Tx queue | 918 | * @q: the Tx queue |
927 | * @n: number of new descriptors to give to HW | 919 | * @n: number of new descriptors to give to HW |
928 | * | 920 | * |
929 | * Ring the doorbel for a Tx queue. | 921 | * Ring the doorbel for a Tx queue. |
930 | */ | 922 | */ |
931 | static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) | 923 | inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) |
932 | { | 924 | { |
933 | /* Make sure that all writes to the TX Descriptors are committed | 925 | /* Make sure that all writes to the TX Descriptors are committed |
934 | * before we tell the hardware about them. | 926 | * before we tell the hardware about them. |
@@ -995,9 +987,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) | |||
995 | wmb(); | 987 | wmb(); |
996 | } | 988 | } |
997 | } | 989 | } |
990 | EXPORT_SYMBOL(cxgb4_ring_tx_db); | ||
998 | 991 | ||
999 | /** | 992 | /** |
1000 | * inline_tx_skb - inline a packet's data into Tx descriptors | 993 | * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors |
1001 | * @skb: the packet | 994 | * @skb: the packet |
1002 | * @q: the Tx queue where the packet will be inlined | 995 | * @q: the Tx queue where the packet will be inlined |
1003 | * @pos: starting position in the Tx queue where to inline the packet | 996 | * @pos: starting position in the Tx queue where to inline the packet |
@@ -1007,8 +1000,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) | |||
1007 | * Most of the complexity of this operation is dealing with wrap arounds | 1000 | * Most of the complexity of this operation is dealing with wrap arounds |
1008 | * in the middle of the packet we want to inline. | 1001 | * in the middle of the packet we want to inline. |
1009 | */ | 1002 | */ |
1010 | static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, | 1003 | void cxgb4_inline_tx_skb(const struct sk_buff *skb, |
1011 | void *pos) | 1004 | const struct sge_txq *q, void *pos) |
1012 | { | 1005 | { |
1013 | u64 *p; | 1006 | u64 *p; |
1014 | int left = (void *)q->stat - pos; | 1007 | int left = (void *)q->stat - pos; |
@@ -1030,6 +1023,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, | |||
1030 | if ((uintptr_t)p & 8) | 1023 | if ((uintptr_t)p & 8) |
1031 | *p = 0; | 1024 | *p = 0; |
1032 | } | 1025 | } |
1026 | EXPORT_SYMBOL(cxgb4_inline_tx_skb); | ||
1033 | 1027 | ||
1034 | static void *inline_tx_skb_header(const struct sk_buff *skb, | 1028 | static void *inline_tx_skb_header(const struct sk_buff *skb, |
1035 | const struct sge_txq *q, void *pos, | 1029 | const struct sge_txq *q, void *pos, |
@@ -1199,6 +1193,12 @@ out_free: dev_kfree_skb_any(skb); | |||
1199 | 1193 | ||
1200 | pi = netdev_priv(dev); | 1194 | pi = netdev_priv(dev); |
1201 | adap = pi->adapter; | 1195 | adap = pi->adapter; |
1196 | ssi = skb_shinfo(skb); | ||
1197 | #ifdef CONFIG_CHELSIO_IPSEC_INLINE | ||
1198 | if (xfrm_offload(skb) && !ssi->gso_size) | ||
1199 | return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev); | ||
1200 | #endif /* CHELSIO_IPSEC_INLINE */ | ||
1201 | |||
1202 | qidx = skb_get_queue_mapping(skb); | 1202 | qidx = skb_get_queue_mapping(skb); |
1203 | if (ptp_enabled) { | 1203 | if (ptp_enabled) { |
1204 | spin_lock(&adap->ptp_lock); | 1204 | spin_lock(&adap->ptp_lock); |
@@ -1215,7 +1215,7 @@ out_free: dev_kfree_skb_any(skb); | |||
1215 | } | 1215 | } |
1216 | skb_tx_timestamp(skb); | 1216 | skb_tx_timestamp(skb); |
1217 | 1217 | ||
1218 | reclaim_completed_tx(adap, &q->q, true); | 1218 | cxgb4_reclaim_completed_tx(adap, &q->q, true); |
1219 | cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; | 1219 | cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; |
1220 | 1220 | ||
1221 | #ifdef CONFIG_CHELSIO_T4_FCOE | 1221 | #ifdef CONFIG_CHELSIO_T4_FCOE |
@@ -1245,7 +1245,7 @@ out_free: dev_kfree_skb_any(skb); | |||
1245 | immediate = true; | 1245 | immediate = true; |
1246 | 1246 | ||
1247 | if (!immediate && | 1247 | if (!immediate && |
1248 | unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { | 1248 | unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { |
1249 | q->mapping_err++; | 1249 | q->mapping_err++; |
1250 | if (ptp_enabled) | 1250 | if (ptp_enabled) |
1251 | spin_unlock(&adap->ptp_lock); | 1251 | spin_unlock(&adap->ptp_lock); |
@@ -1264,7 +1264,6 @@ out_free: dev_kfree_skb_any(skb); | |||
1264 | end = (u64 *)wr + flits; | 1264 | end = (u64 *)wr + flits; |
1265 | 1265 | ||
1266 | len = immediate ? skb->len : 0; | 1266 | len = immediate ? skb->len : 0; |
1267 | ssi = skb_shinfo(skb); | ||
1268 | if (ssi->gso_size) { | 1267 | if (ssi->gso_size) { |
1269 | struct cpl_tx_pkt_lso *lso = (void *)wr; | 1268 | struct cpl_tx_pkt_lso *lso = (void *)wr; |
1270 | bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; | 1269 | bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; |
@@ -1341,13 +1340,13 @@ out_free: dev_kfree_skb_any(skb); | |||
1341 | cpl->ctrl1 = cpu_to_be64(cntrl); | 1340 | cpl->ctrl1 = cpu_to_be64(cntrl); |
1342 | 1341 | ||
1343 | if (immediate) { | 1342 | if (immediate) { |
1344 | inline_tx_skb(skb, &q->q, cpl + 1); | 1343 | cxgb4_inline_tx_skb(skb, &q->q, cpl + 1); |
1345 | dev_consume_skb_any(skb); | 1344 | dev_consume_skb_any(skb); |
1346 | } else { | 1345 | } else { |
1347 | int last_desc; | 1346 | int last_desc; |
1348 | 1347 | ||
1349 | write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, | 1348 | cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), |
1350 | addr); | 1349 | end, 0, addr); |
1351 | skb_orphan(skb); | 1350 | skb_orphan(skb); |
1352 | 1351 | ||
1353 | last_desc = q->q.pidx + ndesc - 1; | 1352 | last_desc = q->q.pidx + ndesc - 1; |
@@ -1359,7 +1358,7 @@ out_free: dev_kfree_skb_any(skb); | |||
1359 | 1358 | ||
1360 | txq_advance(&q->q, ndesc); | 1359 | txq_advance(&q->q, ndesc); |
1361 | 1360 | ||
1362 | ring_tx_db(adap, &q->q, ndesc); | 1361 | cxgb4_ring_tx_db(adap, &q->q, ndesc); |
1363 | if (ptp_enabled) | 1362 | if (ptp_enabled) |
1364 | spin_unlock(&adap->ptp_lock); | 1363 | spin_unlock(&adap->ptp_lock); |
1365 | return NETDEV_TX_OK; | 1364 | return NETDEV_TX_OK; |
@@ -1369,9 +1368,9 @@ out_free: dev_kfree_skb_any(skb); | |||
1369 | * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs | 1368 | * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs |
1370 | * @q: the SGE control Tx queue | 1369 | * @q: the SGE control Tx queue |
1371 | * | 1370 | * |
1372 | * This is a variant of reclaim_completed_tx() that is used for Tx queues | 1371 | * This is a variant of cxgb4_reclaim_completed_tx() that is used |
1373 | * that send only immediate data (presently just the control queues) and | 1372 | * for Tx queues that send only immediate data (presently just |
1374 | * thus do not have any sk_buffs to release. | 1373 | * the control queues) and thus do not have any sk_buffs to release. |
1375 | */ | 1374 | */ |
1376 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) | 1375 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) |
1377 | { | 1376 | { |
@@ -1446,13 +1445,13 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) | |||
1446 | } | 1445 | } |
1447 | 1446 | ||
1448 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; | 1447 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; |
1449 | inline_tx_skb(skb, &q->q, wr); | 1448 | cxgb4_inline_tx_skb(skb, &q->q, wr); |
1450 | 1449 | ||
1451 | txq_advance(&q->q, ndesc); | 1450 | txq_advance(&q->q, ndesc); |
1452 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) | 1451 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) |
1453 | ctrlq_check_stop(q, wr); | 1452 | ctrlq_check_stop(q, wr); |
1454 | 1453 | ||
1455 | ring_tx_db(q->adap, &q->q, ndesc); | 1454 | cxgb4_ring_tx_db(q->adap, &q->q, ndesc); |
1456 | spin_unlock(&q->sendq.lock); | 1455 | spin_unlock(&q->sendq.lock); |
1457 | 1456 | ||
1458 | kfree_skb(skb); | 1457 | kfree_skb(skb); |
@@ -1487,7 +1486,7 @@ static void restart_ctrlq(unsigned long data) | |||
1487 | txq_advance(&q->q, ndesc); | 1486 | txq_advance(&q->q, ndesc); |
1488 | spin_unlock(&q->sendq.lock); | 1487 | spin_unlock(&q->sendq.lock); |
1489 | 1488 | ||
1490 | inline_tx_skb(skb, &q->q, wr); | 1489 | cxgb4_inline_tx_skb(skb, &q->q, wr); |
1491 | kfree_skb(skb); | 1490 | kfree_skb(skb); |
1492 | 1491 | ||
1493 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { | 1492 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { |
@@ -1500,14 +1499,15 @@ static void restart_ctrlq(unsigned long data) | |||
1500 | } | 1499 | } |
1501 | } | 1500 | } |
1502 | if (written > 16) { | 1501 | if (written > 16) { |
1503 | ring_tx_db(q->adap, &q->q, written); | 1502 | cxgb4_ring_tx_db(q->adap, &q->q, written); |
1504 | written = 0; | 1503 | written = 0; |
1505 | } | 1504 | } |
1506 | spin_lock(&q->sendq.lock); | 1505 | spin_lock(&q->sendq.lock); |
1507 | } | 1506 | } |
1508 | q->full = 0; | 1507 | q->full = 0; |
1509 | ringdb: if (written) | 1508 | ringdb: |
1510 | ring_tx_db(q->adap, &q->q, written); | 1509 | if (written) |
1510 | cxgb4_ring_tx_db(q->adap, &q->q, written); | ||
1511 | spin_unlock(&q->sendq.lock); | 1511 | spin_unlock(&q->sendq.lock); |
1512 | } | 1512 | } |
1513 | 1513 | ||
@@ -1650,7 +1650,7 @@ static void service_ofldq(struct sge_uld_txq *q) | |||
1650 | */ | 1650 | */ |
1651 | spin_unlock(&q->sendq.lock); | 1651 | spin_unlock(&q->sendq.lock); |
1652 | 1652 | ||
1653 | reclaim_completed_tx(q->adap, &q->q, false); | 1653 | cxgb4_reclaim_completed_tx(q->adap, &q->q, false); |
1654 | 1654 | ||
1655 | flits = skb->priority; /* previously saved */ | 1655 | flits = skb->priority; /* previously saved */ |
1656 | ndesc = flits_to_desc(flits); | 1656 | ndesc = flits_to_desc(flits); |
@@ -1661,9 +1661,9 @@ static void service_ofldq(struct sge_uld_txq *q) | |||
1661 | 1661 | ||
1662 | pos = (u64 *)&q->q.desc[q->q.pidx]; | 1662 | pos = (u64 *)&q->q.desc[q->q.pidx]; |
1663 | if (is_ofld_imm(skb)) | 1663 | if (is_ofld_imm(skb)) |
1664 | inline_tx_skb(skb, &q->q, pos); | 1664 | cxgb4_inline_tx_skb(skb, &q->q, pos); |
1665 | else if (map_skb(q->adap->pdev_dev, skb, | 1665 | else if (cxgb4_map_skb(q->adap->pdev_dev, skb, |
1666 | (dma_addr_t *)skb->head)) { | 1666 | (dma_addr_t *)skb->head)) { |
1667 | txq_stop_maperr(q); | 1667 | txq_stop_maperr(q); |
1668 | spin_lock(&q->sendq.lock); | 1668 | spin_lock(&q->sendq.lock); |
1669 | break; | 1669 | break; |
@@ -1694,9 +1694,9 @@ static void service_ofldq(struct sge_uld_txq *q) | |||
1694 | pos = (void *)txq->desc; | 1694 | pos = (void *)txq->desc; |
1695 | } | 1695 | } |
1696 | 1696 | ||
1697 | write_sgl(skb, &q->q, (void *)pos, | 1697 | cxgb4_write_sgl(skb, &q->q, (void *)pos, |
1698 | end, hdr_len, | 1698 | end, hdr_len, |
1699 | (dma_addr_t *)skb->head); | 1699 | (dma_addr_t *)skb->head); |
1700 | #ifdef CONFIG_NEED_DMA_MAP_STATE | 1700 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
1701 | skb->dev = q->adap->port[0]; | 1701 | skb->dev = q->adap->port[0]; |
1702 | skb->destructor = deferred_unmap_destructor; | 1702 | skb->destructor = deferred_unmap_destructor; |
@@ -1710,7 +1710,7 @@ static void service_ofldq(struct sge_uld_txq *q) | |||
1710 | txq_advance(&q->q, ndesc); | 1710 | txq_advance(&q->q, ndesc); |
1711 | written += ndesc; | 1711 | written += ndesc; |
1712 | if (unlikely(written > 32)) { | 1712 | if (unlikely(written > 32)) { |
1713 | ring_tx_db(q->adap, &q->q, written); | 1713 | cxgb4_ring_tx_db(q->adap, &q->q, written); |
1714 | written = 0; | 1714 | written = 0; |
1715 | } | 1715 | } |
1716 | 1716 | ||
@@ -1725,7 +1725,7 @@ static void service_ofldq(struct sge_uld_txq *q) | |||
1725 | kfree_skb(skb); | 1725 | kfree_skb(skb); |
1726 | } | 1726 | } |
1727 | if (likely(written)) | 1727 | if (likely(written)) |
1728 | ring_tx_db(q->adap, &q->q, written); | 1728 | cxgb4_ring_tx_db(q->adap, &q->q, written); |
1729 | 1729 | ||
1730 | /*Indicate that no thread is processing the Pending Send Queue | 1730 | /*Indicate that no thread is processing the Pending Send Queue |
1731 | * currently. | 1731 | * currently. |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 57eb4ad3485d..be3658301832 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
@@ -513,6 +513,13 @@ struct fw_ulptx_wr { | |||
513 | u64 cookie; | 513 | u64 cookie; |
514 | }; | 514 | }; |
515 | 515 | ||
516 | #define FW_ULPTX_WR_DATA_S 28 | ||
517 | #define FW_ULPTX_WR_DATA_M 0x1 | ||
518 | #define FW_ULPTX_WR_DATA_V(x) ((x) << FW_ULPTX_WR_DATA_S) | ||
519 | #define FW_ULPTX_WR_DATA_G(x) \ | ||
520 | (((x) >> FW_ULPTX_WR_DATA_S) & FW_ULPTX_WR_DATA_M) | ||
521 | #define FW_ULPTX_WR_DATA_F FW_ULPTX_WR_DATA_V(1U) | ||
522 | |||
516 | struct fw_tp_wr { | 523 | struct fw_tp_wr { |
517 | __be32 op_to_immdlen; | 524 | __be32 op_to_immdlen; |
518 | __be32 flowid_len16; | 525 | __be32 flowid_len16; |
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c index 2e5d311d2438..db81ed527452 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c | |||
@@ -120,6 +120,7 @@ static struct shash_alg alg = { | |||
120 | .cra_name = "adler32", | 120 | .cra_name = "adler32", |
121 | .cra_driver_name = "adler32-zlib", | 121 | .cra_driver_name = "adler32-zlib", |
122 | .cra_priority = 100, | 122 | .cra_priority = 100, |
123 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
123 | .cra_blocksize = CHKSUM_BLOCK_SIZE, | 124 | .cra_blocksize = CHKSUM_BLOCK_SIZE, |
124 | .cra_ctxsize = sizeof(u32), | 125 | .cra_ctxsize = sizeof(u32), |
125 | .cra_module = THIS_MODULE, | 126 | .cra_module = THIS_MODULE, |
diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 03b97629442c..1e26f790b03f 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h | |||
@@ -327,7 +327,12 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | |||
327 | */ | 327 | */ |
328 | static inline int crypto_aead_encrypt(struct aead_request *req) | 328 | static inline int crypto_aead_encrypt(struct aead_request *req) |
329 | { | 329 | { |
330 | return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req); | 330 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
331 | |||
332 | if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) | ||
333 | return -ENOKEY; | ||
334 | |||
335 | return crypto_aead_alg(aead)->encrypt(req); | ||
331 | } | 336 | } |
332 | 337 | ||
333 | /** | 338 | /** |
@@ -356,6 +361,9 @@ static inline int crypto_aead_decrypt(struct aead_request *req) | |||
356 | { | 361 | { |
357 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 362 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
358 | 363 | ||
364 | if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) | ||
365 | return -ENOKEY; | ||
366 | |||
359 | if (req->cryptlen < crypto_aead_authsize(aead)) | 367 | if (req->cryptlen < crypto_aead_authsize(aead)) |
360 | return -EINVAL; | 368 | return -EINVAL; |
361 | 369 | ||
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h index caaa470389e0..b83d66073db0 100644 --- a/include/crypto/chacha20.h +++ b/include/crypto/chacha20.h | |||
@@ -13,12 +13,13 @@ | |||
13 | #define CHACHA20_IV_SIZE 16 | 13 | #define CHACHA20_IV_SIZE 16 |
14 | #define CHACHA20_KEY_SIZE 32 | 14 | #define CHACHA20_KEY_SIZE 32 |
15 | #define CHACHA20_BLOCK_SIZE 64 | 15 | #define CHACHA20_BLOCK_SIZE 64 |
16 | #define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32)) | ||
16 | 17 | ||
17 | struct chacha20_ctx { | 18 | struct chacha20_ctx { |
18 | u32 key[8]; | 19 | u32 key[8]; |
19 | }; | 20 | }; |
20 | 21 | ||
21 | void chacha20_block(u32 *state, void *stream); | 22 | void chacha20_block(u32 *state, u32 *stream); |
22 | void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); | 23 | void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); |
23 | int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, | 24 | int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, |
24 | unsigned int keysize); | 25 | unsigned int keysize); |
diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 0ed31fd80242..2d1849dffb80 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h | |||
@@ -71,12 +71,11 @@ struct ahash_request { | |||
71 | 71 | ||
72 | /** | 72 | /** |
73 | * struct ahash_alg - asynchronous message digest definition | 73 | * struct ahash_alg - asynchronous message digest definition |
74 | * @init: Initialize the transformation context. Intended only to initialize the | 74 | * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the |
75 | * state of the HASH transformation at the beginning. This shall fill in | 75 | * state of the HASH transformation at the beginning. This shall fill in |
76 | * the internal structures used during the entire duration of the whole | 76 | * the internal structures used during the entire duration of the whole |
77 | * transformation. No data processing happens at this point. | 77 | * transformation. No data processing happens at this point. |
78 | * Note: mandatory. | 78 | * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This |
79 | * @update: Push a chunk of data into the driver for transformation. This | ||
80 | * function actually pushes blocks of data from upper layers into the | 79 | * function actually pushes blocks of data from upper layers into the |
81 | * driver, which then passes those to the hardware as seen fit. This | 80 | * driver, which then passes those to the hardware as seen fit. This |
82 | * function must not finalize the HASH transformation by calculating the | 81 | * function must not finalize the HASH transformation by calculating the |
@@ -85,20 +84,17 @@ struct ahash_request { | |||
85 | * context, as this function may be called in parallel with the same | 84 | * context, as this function may be called in parallel with the same |
86 | * transformation object. Data processing can happen synchronously | 85 | * transformation object. Data processing can happen synchronously |
87 | * [SHASH] or asynchronously [AHASH] at this point. | 86 | * [SHASH] or asynchronously [AHASH] at this point. |
88 | * Note: mandatory. | 87 | * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the |
89 | * @final: Retrieve result from the driver. This function finalizes the | ||
90 | * transformation and retrieves the resulting hash from the driver and | 88 | * transformation and retrieves the resulting hash from the driver and |
91 | * pushes it back to upper layers. No data processing happens at this | 89 | * pushes it back to upper layers. No data processing happens at this |
92 | * point unless hardware requires it to finish the transformation | 90 | * point unless hardware requires it to finish the transformation |
93 | * (then the data buffered by the device driver is processed). | 91 | * (then the data buffered by the device driver is processed). |
94 | * Note: mandatory. | 92 | * @finup: **[optional]** Combination of @update and @final. This function is effectively a |
95 | * @finup: Combination of @update and @final. This function is effectively a | ||
96 | * combination of @update and @final calls issued in sequence. As some | 93 | * combination of @update and @final calls issued in sequence. As some |
97 | * hardware cannot do @update and @final separately, this callback was | 94 | * hardware cannot do @update and @final separately, this callback was |
98 | * added to allow such hardware to be used at least by IPsec. Data | 95 | * added to allow such hardware to be used at least by IPsec. Data |
99 | * processing can happen synchronously [SHASH] or asynchronously [AHASH] | 96 | * processing can happen synchronously [SHASH] or asynchronously [AHASH] |
100 | * at this point. | 97 | * at this point. |
101 | * Note: optional. | ||
102 | * @digest: Combination of @init and @update and @final. This function | 98 | * @digest: Combination of @init and @update and @final. This function |
103 | * effectively behaves as the entire chain of operations, @init, | 99 | * effectively behaves as the entire chain of operations, @init, |
104 | * @update and @final issued in sequence. Just like @finup, this was | 100 | * @update and @final issued in sequence. Just like @finup, this was |
@@ -210,7 +206,6 @@ struct crypto_ahash { | |||
210 | unsigned int keylen); | 206 | unsigned int keylen); |
211 | 207 | ||
212 | unsigned int reqsize; | 208 | unsigned int reqsize; |
213 | bool has_setkey; | ||
214 | struct crypto_tfm base; | 209 | struct crypto_tfm base; |
215 | }; | 210 | }; |
216 | 211 | ||
@@ -410,11 +405,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req) | |||
410 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | 405 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
411 | unsigned int keylen); | 406 | unsigned int keylen); |
412 | 407 | ||
413 | static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm) | ||
414 | { | ||
415 | return tfm->has_setkey; | ||
416 | } | ||
417 | |||
418 | /** | 408 | /** |
419 | * crypto_ahash_finup() - update and finalize message digest | 409 | * crypto_ahash_finup() - update and finalize message digest |
420 | * @req: reference to the ahash_request handle that holds all information | 410 | * @req: reference to the ahash_request handle that holds all information |
@@ -487,7 +477,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out) | |||
487 | */ | 477 | */ |
488 | static inline int crypto_ahash_import(struct ahash_request *req, const void *in) | 478 | static inline int crypto_ahash_import(struct ahash_request *req, const void *in) |
489 | { | 479 | { |
490 | return crypto_ahash_reqtfm(req)->import(req, in); | 480 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
481 | |||
482 | if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
483 | return -ENOKEY; | ||
484 | |||
485 | return tfm->import(req, in); | ||
491 | } | 486 | } |
492 | 487 | ||
493 | /** | 488 | /** |
@@ -503,7 +498,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) | |||
503 | */ | 498 | */ |
504 | static inline int crypto_ahash_init(struct ahash_request *req) | 499 | static inline int crypto_ahash_init(struct ahash_request *req) |
505 | { | 500 | { |
506 | return crypto_ahash_reqtfm(req)->init(req); | 501 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
502 | |||
503 | if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
504 | return -ENOKEY; | ||
505 | |||
506 | return tfm->init(req); | ||
507 | } | 507 | } |
508 | 508 | ||
509 | /** | 509 | /** |
@@ -855,7 +855,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) | |||
855 | */ | 855 | */ |
856 | static inline int crypto_shash_import(struct shash_desc *desc, const void *in) | 856 | static inline int crypto_shash_import(struct shash_desc *desc, const void *in) |
857 | { | 857 | { |
858 | return crypto_shash_alg(desc->tfm)->import(desc, in); | 858 | struct crypto_shash *tfm = desc->tfm; |
859 | |||
860 | if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
861 | return -ENOKEY; | ||
862 | |||
863 | return crypto_shash_alg(tfm)->import(desc, in); | ||
859 | } | 864 | } |
860 | 865 | ||
861 | /** | 866 | /** |
@@ -871,7 +876,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) | |||
871 | */ | 876 | */ |
872 | static inline int crypto_shash_init(struct shash_desc *desc) | 877 | static inline int crypto_shash_init(struct shash_desc *desc) |
873 | { | 878 | { |
874 | return crypto_shash_alg(desc->tfm)->init(desc); | 879 | struct crypto_shash *tfm = desc->tfm; |
880 | |||
881 | if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
882 | return -ENOKEY; | ||
883 | |||
884 | return crypto_shash_alg(tfm)->init(desc); | ||
875 | } | 885 | } |
876 | 886 | ||
877 | /** | 887 | /** |
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index c2bae8da642c..27040a46d50a 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h | |||
@@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) | |||
90 | return alg->setkey != shash_no_setkey; | 90 | return alg->setkey != shash_no_setkey; |
91 | } | 91 | } |
92 | 92 | ||
93 | bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); | ||
94 | |||
93 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, | 95 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, |
94 | struct hash_alg_common *alg, | 96 | struct hash_alg_common *alg, |
95 | struct crypto_instance *inst); | 97 | struct crypto_instance *inst); |
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index ccad9b2c9bd6..0f6ddac1acfc 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h | |||
@@ -28,17 +28,6 @@ struct crypto_scomp { | |||
28 | * @free_ctx: Function frees context allocated with alloc_ctx | 28 | * @free_ctx: Function frees context allocated with alloc_ctx |
29 | * @compress: Function performs a compress operation | 29 | * @compress: Function performs a compress operation |
30 | * @decompress: Function performs a de-compress operation | 30 | * @decompress: Function performs a de-compress operation |
31 | * @init: Initialize the cryptographic transformation object. | ||
32 | * This function is used to initialize the cryptographic | ||
33 | * transformation object. This function is called only once at | ||
34 | * the instantiation time, right after the transformation context | ||
35 | * was allocated. In case the cryptographic hardware has some | ||
36 | * special requirements which need to be handled by software, this | ||
37 | * function shall check for the precise requirement of the | ||
38 | * transformation and put any software fallbacks in place. | ||
39 | * @exit: Deinitialize the cryptographic transformation object. This is a | ||
40 | * counterpart to @init, used to remove various changes set in | ||
41 | * @init. | ||
42 | * @base: Common crypto API algorithm data structure | 31 | * @base: Common crypto API algorithm data structure |
43 | */ | 32 | */ |
44 | struct scomp_alg { | 33 | struct scomp_alg { |
diff --git a/include/crypto/null.h b/include/crypto/null.h index 5757c0a4b321..15aeef6e30ef 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h | |||
@@ -12,14 +12,4 @@ | |||
12 | struct crypto_skcipher *crypto_get_default_null_skcipher(void); | 12 | struct crypto_skcipher *crypto_get_default_null_skcipher(void); |
13 | void crypto_put_default_null_skcipher(void); | 13 | void crypto_put_default_null_skcipher(void); |
14 | 14 | ||
15 | static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void) | ||
16 | { | ||
17 | return crypto_get_default_null_skcipher(); | ||
18 | } | ||
19 | |||
20 | static inline void crypto_put_default_null_skcipher2(void) | ||
21 | { | ||
22 | crypto_put_default_null_skcipher(); | ||
23 | } | ||
24 | |||
25 | #endif | 15 | #endif |
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index c65567d01e8e..f718a19da82f 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h | |||
@@ -31,8 +31,6 @@ struct poly1305_desc_ctx { | |||
31 | }; | 31 | }; |
32 | 32 | ||
33 | int crypto_poly1305_init(struct shash_desc *desc); | 33 | int crypto_poly1305_init(struct shash_desc *desc); |
34 | int crypto_poly1305_setkey(struct crypto_shash *tfm, | ||
35 | const u8 *key, unsigned int keylen); | ||
36 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, | 34 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, |
37 | const u8 *src, unsigned int srclen); | 35 | const u8 *src, unsigned int srclen); |
38 | int crypto_poly1305_update(struct shash_desc *desc, | 36 | int crypto_poly1305_update(struct shash_desc *desc, |
diff --git a/include/crypto/salsa20.h b/include/crypto/salsa20.h new file mode 100644 index 000000000000..19ed48aefc86 --- /dev/null +++ b/include/crypto/salsa20.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Common values for the Salsa20 algorithm | ||
4 | */ | ||
5 | |||
6 | #ifndef _CRYPTO_SALSA20_H | ||
7 | #define _CRYPTO_SALSA20_H | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | |||
11 | #define SALSA20_IV_SIZE 8 | ||
12 | #define SALSA20_MIN_KEY_SIZE 16 | ||
13 | #define SALSA20_MAX_KEY_SIZE 32 | ||
14 | #define SALSA20_BLOCK_SIZE 64 | ||
15 | |||
16 | struct crypto_skcipher; | ||
17 | |||
18 | struct salsa20_ctx { | ||
19 | u32 initial_state[16]; | ||
20 | }; | ||
21 | |||
22 | void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx, | ||
23 | const u8 *iv); | ||
24 | int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, | ||
25 | unsigned int keysize); | ||
26 | |||
27 | #endif /* _CRYPTO_SALSA20_H */ | ||
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h index b9d9bd553b48..080f60c2e6b1 100644 --- a/include/crypto/sha3.h +++ b/include/crypto/sha3.h | |||
@@ -19,7 +19,6 @@ | |||
19 | 19 | ||
20 | struct sha3_state { | 20 | struct sha3_state { |
21 | u64 st[25]; | 21 | u64 st[25]; |
22 | unsigned int md_len; | ||
23 | unsigned int rsiz; | 22 | unsigned int rsiz; |
24 | unsigned int rsizw; | 23 | unsigned int rsizw; |
25 | 24 | ||
@@ -27,4 +26,9 @@ struct sha3_state { | |||
27 | u8 buf[SHA3_224_BLOCK_SIZE]; | 26 | u8 buf[SHA3_224_BLOCK_SIZE]; |
28 | }; | 27 | }; |
29 | 28 | ||
29 | int crypto_sha3_init(struct shash_desc *desc); | ||
30 | int crypto_sha3_update(struct shash_desc *desc, const u8 *data, | ||
31 | unsigned int len); | ||
32 | int crypto_sha3_final(struct shash_desc *desc, u8 *out); | ||
33 | |||
30 | #endif | 34 | #endif |
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 562001cb412b..2f327f090c3e 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h | |||
@@ -401,11 +401,6 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, | |||
401 | return tfm->setkey(tfm, key, keylen); | 401 | return tfm->setkey(tfm, key, keylen); |
402 | } | 402 | } |
403 | 403 | ||
404 | static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm) | ||
405 | { | ||
406 | return tfm->keysize; | ||
407 | } | ||
408 | |||
409 | static inline unsigned int crypto_skcipher_default_keysize( | 404 | static inline unsigned int crypto_skcipher_default_keysize( |
410 | struct crypto_skcipher *tfm) | 405 | struct crypto_skcipher *tfm) |
411 | { | 406 | { |
@@ -442,6 +437,9 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req) | |||
442 | { | 437 | { |
443 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 438 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
444 | 439 | ||
440 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
441 | return -ENOKEY; | ||
442 | |||
445 | return tfm->encrypt(req); | 443 | return tfm->encrypt(req); |
446 | } | 444 | } |
447 | 445 | ||
@@ -460,6 +458,9 @@ static inline int crypto_skcipher_decrypt(struct skcipher_request *req) | |||
460 | { | 458 | { |
461 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 459 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
462 | 460 | ||
461 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
462 | return -ENOKEY; | ||
463 | |||
463 | return tfm->decrypt(req); | 464 | return tfm->decrypt(req); |
464 | } | 465 | } |
465 | 466 | ||
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 78508ca4b108..7e6e84cf6383 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -107,8 +107,16 @@ | |||
107 | #define CRYPTO_ALG_INTERNAL 0x00002000 | 107 | #define CRYPTO_ALG_INTERNAL 0x00002000 |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Set if the algorithm has a ->setkey() method but can be used without | ||
111 | * calling it first, i.e. there is a default key. | ||
112 | */ | ||
113 | #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 | ||
114 | |||
115 | /* | ||
110 | * Transform masks and values (for crt_flags). | 116 | * Transform masks and values (for crt_flags). |
111 | */ | 117 | */ |
118 | #define CRYPTO_TFM_NEED_KEY 0x00000001 | ||
119 | |||
112 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 | 120 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 |
113 | #define CRYPTO_TFM_RES_MASK 0xfff00000 | 121 | #define CRYPTO_TFM_RES_MASK 0xfff00000 |
114 | 122 | ||
@@ -447,7 +455,7 @@ struct crypto_alg { | |||
447 | unsigned int cra_alignmask; | 455 | unsigned int cra_alignmask; |
448 | 456 | ||
449 | int cra_priority; | 457 | int cra_priority; |
450 | atomic_t cra_refcnt; | 458 | refcount_t cra_refcnt; |
451 | 459 | ||
452 | char cra_name[CRYPTO_MAX_ALG_NAME]; | 460 | char cra_name[CRYPTO_MAX_ALG_NAME]; |
453 | char cra_driver_name[CRYPTO_MAX_ALG_NAME]; | 461 | char cra_driver_name[CRYPTO_MAX_ALG_NAME]; |
diff --git a/kernel/padata.c b/kernel/padata.c index 57c0074d50cc..d568cc56405f 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * padata.c - generic interface to process data streams in parallel | 3 | * padata.c - generic interface to process data streams in parallel |
3 | * | 4 | * |
diff --git a/lib/chacha20.c b/lib/chacha20.c index 250ceed9ec9a..c1cc50fb68c9 100644 --- a/lib/chacha20.c +++ b/lib/chacha20.c | |||
@@ -16,12 +16,7 @@ | |||
16 | #include <asm/unaligned.h> | 16 | #include <asm/unaligned.h> |
17 | #include <crypto/chacha20.h> | 17 | #include <crypto/chacha20.h> |
18 | 18 | ||
19 | static inline u32 rotl32(u32 v, u8 n) | 19 | void chacha20_block(u32 *state, u32 *stream) |
20 | { | ||
21 | return (v << n) | (v >> (sizeof(v) * 8 - n)); | ||
22 | } | ||
23 | |||
24 | extern void chacha20_block(u32 *state, void *stream) | ||
25 | { | 20 | { |
26 | u32 x[16], *out = stream; | 21 | u32 x[16], *out = stream; |
27 | int i; | 22 | int i; |
@@ -30,45 +25,45 @@ extern void chacha20_block(u32 *state, void *stream) | |||
30 | x[i] = state[i]; | 25 | x[i] = state[i]; |
31 | 26 | ||
32 | for (i = 0; i < 20; i += 2) { | 27 | for (i = 0; i < 20; i += 2) { |
33 | x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16); | 28 | x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16); |
34 | x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16); | 29 | x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16); |
35 | x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16); | 30 | x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16); |
36 | x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16); | 31 | x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16); |
37 | 32 | ||
38 | x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12); | 33 | x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12); |
39 | x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12); | 34 | x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12); |
40 | x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12); | 35 | x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12); |
41 | x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12); | 36 | x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12); |
42 | 37 | ||
43 | x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8); | 38 | x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8); |
44 | x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8); | 39 | x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8); |
45 | x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8); | 40 | x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8); |
46 | x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8); | 41 | x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8); |
47 | 42 | ||
48 | x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7); | 43 | x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7); |
49 | x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7); | 44 | x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7); |
50 | x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7); | 45 | x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7); |
51 | x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7); | 46 | x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7); |
52 | 47 | ||
53 | x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16); | 48 | x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16); |
54 | x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16); | 49 | x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16); |
55 | x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16); | 50 | x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16); |
56 | x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16); | 51 | x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16); |
57 | 52 | ||
58 | x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12); | 53 | x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12); |
59 | x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12); | 54 | x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12); |
60 | x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12); | 55 | x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12); |
61 | x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12); | 56 | x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12); |
62 | 57 | ||
63 | x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8); | 58 | x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8); |
64 | x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8); | 59 | x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8); |
65 | x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8); | 60 | x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8); |
66 | x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8); | 61 | x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8); |
67 | 62 | ||
68 | x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7); | 63 | x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7); |
69 | x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7); | 64 | x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7); |
70 | x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7); | 65 | x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7); |
71 | x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7); | 66 | x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7); |
72 | } | 67 | } |
73 | 68 | ||
74 | for (i = 0; i < ARRAY_SIZE(x); i++) | 69 | for (i = 0; i < ARRAY_SIZE(x); i++) |