diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-23 00:04:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-23 00:04:48 -0400 |
commit | 44d21c3f3a2ef2f58b18bda64c52c99e723f3f4a (patch) | |
tree | 5146cf96cb0dbd7121176d484417ab942c92dcd4 | |
parent | efdfce2b7ff3205ba0fba10270b92b80bbc6187d (diff) | |
parent | fe55dfdcdfabf160ab0c14617725c57c7a1facfc (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"Here is the crypto update for 4.2:
API:
- Convert RNG interface to new style.
- New AEAD interface with one SG list for AD and plain/cipher text.
All external AEAD users have been converted.
- New asymmetric key interface (akcipher).
Algorithms:
- Chacha20, Poly1305 and RFC7539 support.
- New RSA implementation.
- Jitter RNG.
- DRBG is now seeded with both /dev/random and Jitter RNG. If kernel
pool isn't ready then DRBG will be reseeded when it is.
- DRBG is now the default crypto API RNG, replacing krng.
- 842 compression (previously part of powerpc nx driver).
Drivers:
- Accelerated SHA-512 for arm64.
- New Marvell CESA driver that supports DMA and more algorithms.
- Updated powerpc nx 842 support.
- Added support for SEC1 hardware to talitos"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (292 commits)
crypto: marvell/cesa - remove COMPILE_TEST dependency
crypto: algif_aead - Temporarily disable all AEAD algorithms
crypto: af_alg - Forbid the use internal algorithms
crypto: echainiv - Only hold RNG during initialisation
crypto: seqiv - Add compatibility support without RNG
crypto: eseqiv - Offer normal cipher functionality without RNG
crypto: chainiv - Offer normal cipher functionality without RNG
crypto: user - Add CRYPTO_MSG_DELRNG
crypto: user - Move cryptouser.h to uapi
crypto: rng - Do not free default RNG when it becomes unused
crypto: skcipher - Allow givencrypt to be NULL
crypto: sahara - propagate the error on clk_disable_unprepare() failure
crypto: rsa - fix invalid select for AKCIPHER
crypto: picoxcell - Update to the current clk API
crypto: nx - Check for bogus firmware properties
crypto: marvell/cesa - add DT bindings documentation
crypto: marvell/cesa - add support for Kirkwood and Dove SoCs
crypto: marvell/cesa - add support for Orion SoCs
crypto: marvell/cesa - add allhwsupport module parameter
crypto: marvell/cesa - add support for all armada SoCs
...
174 files changed, 23569 insertions, 7249 deletions
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl index efc8d90a9a3f..0992531ffefb 100644 --- a/Documentation/DocBook/crypto-API.tmpl +++ b/Documentation/DocBook/crypto-API.tmpl | |||
@@ -119,7 +119,7 @@ | |||
119 | 119 | ||
120 | <para> | 120 | <para> |
121 | Note: The terms "transformation" and cipher algorithm are used | 121 | Note: The terms "transformation" and cipher algorithm are used |
122 | interchangably. | 122 | interchangeably. |
123 | </para> | 123 | </para> |
124 | </sect1> | 124 | </sect1> |
125 | 125 | ||
@@ -536,8 +536,8 @@ | |||
536 | 536 | ||
537 | <para> | 537 | <para> |
538 | For other use cases of AEAD ciphers, the ASCII art applies as | 538 | For other use cases of AEAD ciphers, the ASCII art applies as |
539 | well, but the caller may not use the GIVCIPHER interface. In | 539 | well, but the caller may not use the AEAD cipher with a separate |
540 | this case, the caller must generate the IV. | 540 | IV generator. In this case, the caller must generate the IV. |
541 | </para> | 541 | </para> |
542 | 542 | ||
543 | <para> | 543 | <para> |
@@ -584,8 +584,8 @@ kernel crypto API | IPSEC Layer | |||
584 | | | 584 | | |
585 | +-----------+ | | 585 | +-----------+ | |
586 | | | (1) | 586 | | | (1) |
587 | | givcipher | <----------------------------------- esp_output | 587 | | aead | <----------------------------------- esp_output |
588 | | (seqiv) | ---+ | 588 | | (seqniv) | ---+ |
589 | +-----------+ | | 589 | +-----------+ | |
590 | | (2) | 590 | | (2) |
591 | +-----------+ | | 591 | +-----------+ | |
@@ -620,8 +620,8 @@ kernel crypto API | IPSEC Layer | |||
620 | <orderedlist> | 620 | <orderedlist> |
621 | <listitem> | 621 | <listitem> |
622 | <para> | 622 | <para> |
623 | esp_output() invokes crypto_aead_givencrypt() to trigger an encryption | 623 | esp_output() invokes crypto_aead_encrypt() to trigger an encryption |
624 | operation of the GIVCIPHER implementation. | 624 | operation of the AEAD cipher with IV generator. |
625 | </para> | 625 | </para> |
626 | 626 | ||
627 | <para> | 627 | <para> |
@@ -1563,7 +1563,7 @@ struct sockaddr_alg sa = { | |||
1563 | 1563 | ||
1564 | <sect1><title>Zero-Copy Interface</title> | 1564 | <sect1><title>Zero-Copy Interface</title> |
1565 | <para> | 1565 | <para> |
1566 | In addition to the send/write/read/recv system call familty, the AF_ALG | 1566 | In addition to the send/write/read/recv system call family, the AF_ALG |
1567 | interface can be accessed with the zero-copy interface of splice/vmsplice. | 1567 | interface can be accessed with the zero-copy interface of splice/vmsplice. |
1568 | As the name indicates, the kernel tries to avoid a copy operation into | 1568 | As the name indicates, the kernel tries to avoid a copy operation into |
1569 | kernel space. | 1569 | kernel space. |
@@ -1669,9 +1669,19 @@ read(opfd, out, outlen); | |||
1669 | </chapter> | 1669 | </chapter> |
1670 | 1670 | ||
1671 | <chapter id="API"><title>Programming Interface</title> | 1671 | <chapter id="API"><title>Programming Interface</title> |
1672 | <para> | ||
1673 | Please note that the kernel crypto API contains the AEAD givcrypt | ||
1674 | API (crypto_aead_giv* and aead_givcrypt_* function calls in | ||
1675 | include/crypto/aead.h). This API is obsolete and will be removed | ||
1676 | in the future. To obtain the functionality of an AEAD cipher with | ||
1677 | internal IV generation, use the IV generator as a regular cipher. | ||
1678 | For example, rfc4106(gcm(aes)) is the AEAD cipher with external | ||
1679 | IV generation and seqniv(rfc4106(gcm(aes))) implies that the kernel | ||
1680 | crypto API generates the IV. Different IV generators are available. | ||
1681 | </para> | ||
1672 | <sect1><title>Block Cipher Context Data Structures</title> | 1682 | <sect1><title>Block Cipher Context Data Structures</title> |
1673 | !Pinclude/linux/crypto.h Block Cipher Context Data Structures | 1683 | !Pinclude/linux/crypto.h Block Cipher Context Data Structures |
1674 | !Finclude/linux/crypto.h aead_request | 1684 | !Finclude/crypto/aead.h aead_request |
1675 | </sect1> | 1685 | </sect1> |
1676 | <sect1><title>Block Cipher Algorithm Definitions</title> | 1686 | <sect1><title>Block Cipher Algorithm Definitions</title> |
1677 | !Pinclude/linux/crypto.h Block Cipher Algorithm Definitions | 1687 | !Pinclude/linux/crypto.h Block Cipher Algorithm Definitions |
@@ -1680,7 +1690,7 @@ read(opfd, out, outlen); | |||
1680 | !Finclude/linux/crypto.h aead_alg | 1690 | !Finclude/linux/crypto.h aead_alg |
1681 | !Finclude/linux/crypto.h blkcipher_alg | 1691 | !Finclude/linux/crypto.h blkcipher_alg |
1682 | !Finclude/linux/crypto.h cipher_alg | 1692 | !Finclude/linux/crypto.h cipher_alg |
1683 | !Finclude/linux/crypto.h rng_alg | 1693 | !Finclude/crypto/rng.h rng_alg |
1684 | </sect1> | 1694 | </sect1> |
1685 | <sect1><title>Asynchronous Block Cipher API</title> | 1695 | <sect1><title>Asynchronous Block Cipher API</title> |
1686 | !Pinclude/linux/crypto.h Asynchronous Block Cipher API | 1696 | !Pinclude/linux/crypto.h Asynchronous Block Cipher API |
@@ -1704,26 +1714,27 @@ read(opfd, out, outlen); | |||
1704 | !Finclude/linux/crypto.h ablkcipher_request_set_crypt | 1714 | !Finclude/linux/crypto.h ablkcipher_request_set_crypt |
1705 | </sect1> | 1715 | </sect1> |
1706 | <sect1><title>Authenticated Encryption With Associated Data (AEAD) Cipher API</title> | 1716 | <sect1><title>Authenticated Encryption With Associated Data (AEAD) Cipher API</title> |
1707 | !Pinclude/linux/crypto.h Authenticated Encryption With Associated Data (AEAD) Cipher API | 1717 | !Pinclude/crypto/aead.h Authenticated Encryption With Associated Data (AEAD) Cipher API |
1708 | !Finclude/linux/crypto.h crypto_alloc_aead | 1718 | !Finclude/crypto/aead.h crypto_alloc_aead |
1709 | !Finclude/linux/crypto.h crypto_free_aead | 1719 | !Finclude/crypto/aead.h crypto_free_aead |
1710 | !Finclude/linux/crypto.h crypto_aead_ivsize | 1720 | !Finclude/crypto/aead.h crypto_aead_ivsize |
1711 | !Finclude/linux/crypto.h crypto_aead_authsize | 1721 | !Finclude/crypto/aead.h crypto_aead_authsize |
1712 | !Finclude/linux/crypto.h crypto_aead_blocksize | 1722 | !Finclude/crypto/aead.h crypto_aead_blocksize |
1713 | !Finclude/linux/crypto.h crypto_aead_setkey | 1723 | !Finclude/crypto/aead.h crypto_aead_setkey |
1714 | !Finclude/linux/crypto.h crypto_aead_setauthsize | 1724 | !Finclude/crypto/aead.h crypto_aead_setauthsize |
1715 | !Finclude/linux/crypto.h crypto_aead_encrypt | 1725 | !Finclude/crypto/aead.h crypto_aead_encrypt |
1716 | !Finclude/linux/crypto.h crypto_aead_decrypt | 1726 | !Finclude/crypto/aead.h crypto_aead_decrypt |
1717 | </sect1> | 1727 | </sect1> |
1718 | <sect1><title>Asynchronous AEAD Request Handle</title> | 1728 | <sect1><title>Asynchronous AEAD Request Handle</title> |
1719 | !Pinclude/linux/crypto.h Asynchronous AEAD Request Handle | 1729 | !Pinclude/crypto/aead.h Asynchronous AEAD Request Handle |
1720 | !Finclude/linux/crypto.h crypto_aead_reqsize | 1730 | !Finclude/crypto/aead.h crypto_aead_reqsize |
1721 | !Finclude/linux/crypto.h aead_request_set_tfm | 1731 | !Finclude/crypto/aead.h aead_request_set_tfm |
1722 | !Finclude/linux/crypto.h aead_request_alloc | 1732 | !Finclude/crypto/aead.h aead_request_alloc |
1723 | !Finclude/linux/crypto.h aead_request_free | 1733 | !Finclude/crypto/aead.h aead_request_free |
1724 | !Finclude/linux/crypto.h aead_request_set_callback | 1734 | !Finclude/crypto/aead.h aead_request_set_callback |
1725 | !Finclude/linux/crypto.h aead_request_set_crypt | 1735 | !Finclude/crypto/aead.h aead_request_set_crypt |
1726 | !Finclude/linux/crypto.h aead_request_set_assoc | 1736 | !Finclude/crypto/aead.h aead_request_set_assoc |
1737 | !Finclude/crypto/aead.h aead_request_set_ad | ||
1727 | </sect1> | 1738 | </sect1> |
1728 | <sect1><title>Synchronous Block Cipher API</title> | 1739 | <sect1><title>Synchronous Block Cipher API</title> |
1729 | !Pinclude/linux/crypto.h Synchronous Block Cipher API | 1740 | !Pinclude/linux/crypto.h Synchronous Block Cipher API |
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt index 38988ef1336b..f0d926bf9f36 100644 --- a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt +++ b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt | |||
@@ -1,9 +1,11 @@ | |||
1 | Freescale SoC SEC Security Engines versions 2.x-3.x | 1 | Freescale SoC SEC Security Engines versions 1.x-2.x-3.x |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | 4 | ||
5 | - compatible : Should contain entries for this and backward compatible | 5 | - compatible : Should contain entries for this and backward compatible |
6 | SEC versions, high to low, e.g., "fsl,sec2.1", "fsl,sec2.0" | 6 | SEC versions, high to low, e.g., "fsl,sec2.1", "fsl,sec2.0" (SEC2/3) |
7 | e.g., "fsl,sec1.2", "fsl,sec1.0" (SEC1) | ||
8 | warning: SEC1 and SEC2 are mutually exclusive | ||
7 | - reg : Offset and length of the register set for the device | 9 | - reg : Offset and length of the register set for the device |
8 | - interrupts : the SEC's interrupt number | 10 | - interrupts : the SEC's interrupt number |
9 | - fsl,num-channels : An integer representing the number of channels | 11 | - fsl,num-channels : An integer representing the number of channels |
diff --git a/Documentation/devicetree/bindings/crypto/marvell-cesa.txt b/Documentation/devicetree/bindings/crypto/marvell-cesa.txt new file mode 100644 index 000000000000..c6c6a4a045bd --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/marvell-cesa.txt | |||
@@ -0,0 +1,45 @@ | |||
1 | Marvell Cryptographic Engines And Security Accelerator | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: should be one of the following string | ||
5 | "marvell,orion-crypto" | ||
6 | "marvell,kirkwood-crypto" | ||
7 | "marvell,dove-crypto" | ||
8 | "marvell,armada-370-crypto" | ||
9 | "marvell,armada-xp-crypto" | ||
10 | "marvell,armada-375-crypto" | ||
11 | "marvell,armada-38x-crypto" | ||
12 | - reg: base physical address of the engine and length of memory mapped | ||
13 | region. Can also contain an entry for the SRAM attached to the CESA, | ||
14 | but this representation is deprecated and marvell,crypto-srams should | ||
15 | be used instead | ||
16 | - reg-names: "regs". Can contain an "sram" entry, but this representation | ||
17 | is deprecated and marvell,crypto-srams should be used instead | ||
18 | - interrupts: interrupt number | ||
19 | - clocks: reference to the crypto engines clocks. This property is not | ||
20 | required for orion and kirkwood platforms | ||
21 | - clock-names: "cesaX" and "cesazX", X should be replaced by the crypto engine | ||
22 | id. | ||
23 | This property is not required for the orion and kirkwoord | ||
24 | platforms. | ||
25 | "cesazX" clocks are not required on armada-370 platforms | ||
26 | - marvell,crypto-srams: phandle to crypto SRAM definitions | ||
27 | |||
28 | Optional properties: | ||
29 | - marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not | ||
30 | specified the whole SRAM is used (2KB) | ||
31 | |||
32 | |||
33 | Examples: | ||
34 | |||
35 | crypto@90000 { | ||
36 | compatible = "marvell,armada-xp-crypto"; | ||
37 | reg = <0x90000 0x10000>; | ||
38 | reg-names = "regs"; | ||
39 | interrupts = <48>, <49>; | ||
40 | clocks = <&gateclk 23>, <&gateclk 23>; | ||
41 | clock-names = "cesa0", "cesa1"; | ||
42 | marvell,crypto-srams = <&crypto_sram0>, <&crypto_sram1>; | ||
43 | marvell,crypto-sram-size = <0x600>; | ||
44 | status = "okay"; | ||
45 | }; | ||
diff --git a/Documentation/devicetree/bindings/crypto/mv_cesa.txt b/Documentation/devicetree/bindings/crypto/mv_cesa.txt index 47229b1a594b..c0c35f00335b 100644 --- a/Documentation/devicetree/bindings/crypto/mv_cesa.txt +++ b/Documentation/devicetree/bindings/crypto/mv_cesa.txt | |||
@@ -1,20 +1,33 @@ | |||
1 | Marvell Cryptographic Engines And Security Accelerator | 1 | Marvell Cryptographic Engines And Security Accelerator |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible : should be "marvell,orion-crypto" | 4 | - compatible: should be one of the following string |
5 | - reg : base physical address of the engine and length of memory mapped | 5 | "marvell,orion-crypto" |
6 | region, followed by base physical address of sram and its memory | 6 | "marvell,kirkwood-crypto" |
7 | length | 7 | "marvell,dove-crypto" |
8 | - reg-names : "regs" , "sram"; | 8 | - reg: base physical address of the engine and length of memory mapped |
9 | - interrupts : interrupt number | 9 | region. Can also contain an entry for the SRAM attached to the CESA, |
10 | but this representation is deprecated and marvell,crypto-srams should | ||
11 | be used instead | ||
12 | - reg-names: "regs". Can contain an "sram" entry, but this representation | ||
13 | is deprecated and marvell,crypto-srams should be used instead | ||
14 | - interrupts: interrupt number | ||
15 | - clocks: reference to the crypto engines clocks. This property is only | ||
16 | required for Dove platforms | ||
17 | - marvell,crypto-srams: phandle to crypto SRAM definitions | ||
18 | |||
19 | Optional properties: | ||
20 | - marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not | ||
21 | specified the whole SRAM is used (2KB) | ||
10 | 22 | ||
11 | Examples: | 23 | Examples: |
12 | 24 | ||
13 | crypto@30000 { | 25 | crypto@30000 { |
14 | compatible = "marvell,orion-crypto"; | 26 | compatible = "marvell,orion-crypto"; |
15 | reg = <0x30000 0x10000>, | 27 | reg = <0x30000 0x10000>; |
16 | <0x4000000 0x800>; | 28 | reg-names = "regs"; |
17 | reg-names = "regs" , "sram"; | ||
18 | interrupts = <22>; | 29 | interrupts = <22>; |
30 | marvell,crypto-srams = <&crypto_sram>; | ||
31 | marvell,crypto-sram-size = <0x600>; | ||
19 | status = "okay"; | 32 | status = "okay"; |
20 | }; | 33 | }; |
diff --git a/MAINTAINERS b/MAINTAINERS index a655435705aa..4f1e79b52cc5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4879,13 +4879,23 @@ M: Marcelo Henrique Cerri <mhcerri@linux.vnet.ibm.com> | |||
4879 | M: Fionnuala Gunter <fin@linux.vnet.ibm.com> | 4879 | M: Fionnuala Gunter <fin@linux.vnet.ibm.com> |
4880 | L: linux-crypto@vger.kernel.org | 4880 | L: linux-crypto@vger.kernel.org |
4881 | S: Supported | 4881 | S: Supported |
4882 | F: drivers/crypto/nx/ | 4882 | F: drivers/crypto/nx/Makefile |
4883 | F: drivers/crypto/nx/Kconfig | ||
4884 | F: drivers/crypto/nx/nx-aes* | ||
4885 | F: drivers/crypto/nx/nx-sha* | ||
4886 | F: drivers/crypto/nx/nx.* | ||
4887 | F: drivers/crypto/nx/nx_csbcpb.h | ||
4888 | F: drivers/crypto/nx/nx_debugfs.h | ||
4883 | 4889 | ||
4884 | IBM Power 842 compression accelerator | 4890 | IBM Power 842 compression accelerator |
4885 | M: Dan Streetman <ddstreet@us.ibm.com> | 4891 | M: Dan Streetman <ddstreet@us.ibm.com> |
4886 | S: Supported | 4892 | S: Supported |
4887 | F: drivers/crypto/nx/nx-842.c | 4893 | F: drivers/crypto/nx/Makefile |
4888 | F: include/linux/nx842.h | 4894 | F: drivers/crypto/nx/Kconfig |
4895 | F: drivers/crypto/nx/nx-842* | ||
4896 | F: include/linux/sw842.h | ||
4897 | F: crypto/842.c | ||
4898 | F: lib/842/ | ||
4889 | 4899 | ||
4890 | IBM Power Linux RAID adapter | 4900 | IBM Power Linux RAID adapter |
4891 | M: Brian King <brking@us.ibm.com> | 4901 | M: Brian King <brking@us.ibm.com> |
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 8da2207b0072..27ed1b1cd1d7 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig | |||
@@ -53,20 +53,13 @@ config CRYPTO_SHA256_ARM | |||
53 | SHA-256 secure hash standard (DFIPS 180-2) implemented | 53 | SHA-256 secure hash standard (DFIPS 180-2) implemented |
54 | using optimized ARM assembler and NEON, when available. | 54 | using optimized ARM assembler and NEON, when available. |
55 | 55 | ||
56 | config CRYPTO_SHA512_ARM_NEON | 56 | config CRYPTO_SHA512_ARM |
57 | tristate "SHA384 and SHA512 digest algorithm (ARM NEON)" | 57 | tristate "SHA-384/512 digest algorithm (ARM-asm and NEON)" |
58 | depends on KERNEL_MODE_NEON | ||
59 | select CRYPTO_SHA512 | ||
60 | select CRYPTO_HASH | 58 | select CRYPTO_HASH |
59 | depends on !CPU_V7M | ||
61 | help | 60 | help |
62 | SHA-512 secure hash standard (DFIPS 180-2) implemented | 61 | SHA-512 secure hash standard (DFIPS 180-2) implemented |
63 | using ARM NEON instructions, when available. | 62 | using optimized ARM assembler and NEON, when available. |
64 | |||
65 | This version of SHA implements a 512 bit hash with 256 bits of | ||
66 | security against collision attacks. | ||
67 | |||
68 | This code also includes SHA-384, a 384 bit hash with 192 bits | ||
69 | of security against collision attacks. | ||
70 | 63 | ||
71 | config CRYPTO_AES_ARM | 64 | config CRYPTO_AES_ARM |
72 | tristate "AES cipher algorithms (ARM-asm)" | 65 | tristate "AES cipher algorithms (ARM-asm)" |
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 6ea828241fcb..fc5150702b64 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile | |||
@@ -7,7 +7,7 @@ obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o | |||
7 | obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o | 7 | obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o |
8 | obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o | 8 | obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o |
9 | obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o | 9 | obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o |
10 | obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o | 10 | obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o |
11 | 11 | ||
12 | ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o | 12 | ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o |
13 | ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o | 13 | ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o |
@@ -30,7 +30,8 @@ sha1-arm-y := sha1-armv4-large.o sha1_glue.o | |||
30 | sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o | 30 | sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o |
31 | sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o | 31 | sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o |
32 | sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y) | 32 | sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y) |
33 | sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o | 33 | sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o |
34 | sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y) | ||
34 | sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o | 35 | sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o |
35 | sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o | 36 | sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o |
36 | aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o | 37 | aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o |
@@ -45,4 +46,7 @@ $(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl | |||
45 | $(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl | 46 | $(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl |
46 | $(call cmd,perl) | 47 | $(call cmd,perl) |
47 | 48 | ||
48 | .PRECIOUS: $(obj)/aesbs-core.S $(obj)/sha256-core.S | 49 | $(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl |
50 | $(call cmd,perl) | ||
51 | |||
52 | .PRECIOUS: $(obj)/aesbs-core.S $(obj)/sha256-core.S $(obj)/sha512-core.S | ||
diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S index 8cfa468ee570..987aa632c9f0 100644 --- a/arch/arm/crypto/aes-ce-core.S +++ b/arch/arm/crypto/aes-ce-core.S | |||
@@ -101,15 +101,14 @@ | |||
101 | \dround q10, q11 | 101 | \dround q10, q11 |
102 | blo 0f @ AES-128: 10 rounds | 102 | blo 0f @ AES-128: 10 rounds |
103 | vld1.8 {q10-q11}, [ip]! | 103 | vld1.8 {q10-q11}, [ip]! |
104 | beq 1f @ AES-192: 12 rounds | ||
105 | \dround q12, q13 | 104 | \dround q12, q13 |
105 | beq 1f @ AES-192: 12 rounds | ||
106 | vld1.8 {q12-q13}, [ip] | 106 | vld1.8 {q12-q13}, [ip] |
107 | \dround q10, q11 | 107 | \dround q10, q11 |
108 | 0: \fround q12, q13, q14 | 108 | 0: \fround q12, q13, q14 |
109 | bx lr | 109 | bx lr |
110 | 110 | ||
111 | 1: \dround q12, q13 | 111 | 1: \fround q10, q11, q14 |
112 | \fround q10, q11, q14 | ||
113 | bx lr | 112 | bx lr |
114 | .endm | 113 | .endm |
115 | 114 | ||
@@ -122,8 +121,8 @@ | |||
122 | * q2 : third in/output block (_3x version only) | 121 | * q2 : third in/output block (_3x version only) |
123 | * q8 : first round key | 122 | * q8 : first round key |
124 | * q9 : secound round key | 123 | * q9 : secound round key |
125 | * ip : address of 3rd round key | ||
126 | * q14 : final round key | 124 | * q14 : final round key |
125 | * r2 : address of round key array | ||
127 | * r3 : number of rounds | 126 | * r3 : number of rounds |
128 | */ | 127 | */ |
129 | .align 6 | 128 | .align 6 |
diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl new file mode 100644 index 000000000000..a2b11a844357 --- /dev/null +++ b/arch/arm/crypto/sha512-armv4.pl | |||
@@ -0,0 +1,649 @@ | |||
1 | #!/usr/bin/env perl | ||
2 | |||
3 | # ==================================================================== | ||
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
5 | # project. The module is, however, dual licensed under OpenSSL and | ||
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | ||
7 | # details see http://www.openssl.org/~appro/cryptogams/. | ||
8 | # | ||
9 | # Permission to use under GPL terms is granted. | ||
10 | # ==================================================================== | ||
11 | |||
12 | # SHA512 block procedure for ARMv4. September 2007. | ||
13 | |||
14 | # This code is ~4.5 (four and a half) times faster than code generated | ||
15 | # by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue | ||
16 | # Xscale PXA250 core]. | ||
17 | # | ||
18 | # July 2010. | ||
19 | # | ||
20 | # Rescheduling for dual-issue pipeline resulted in 6% improvement on | ||
21 | # Cortex A8 core and ~40 cycles per processed byte. | ||
22 | |||
23 | # February 2011. | ||
24 | # | ||
25 | # Profiler-assisted and platform-specific optimization resulted in 7% | ||
26 | # improvement on Coxtex A8 core and ~38 cycles per byte. | ||
27 | |||
28 | # March 2011. | ||
29 | # | ||
30 | # Add NEON implementation. On Cortex A8 it was measured to process | ||
31 | # one byte in 23.3 cycles or ~60% faster than integer-only code. | ||
32 | |||
33 | # August 2012. | ||
34 | # | ||
35 | # Improve NEON performance by 12% on Snapdragon S4. In absolute | ||
36 | # terms it's 22.6 cycles per byte, which is disappointing result. | ||
37 | # Technical writers asserted that 3-way S4 pipeline can sustain | ||
38 | # multiple NEON instructions per cycle, but dual NEON issue could | ||
39 | # not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html | ||
40 | # for further details. On side note Cortex-A15 processes one byte in | ||
41 | # 16 cycles. | ||
42 | |||
43 | # Byte order [in]dependence. ========================================= | ||
44 | # | ||
45 | # Originally caller was expected to maintain specific *dword* order in | ||
46 | # h[0-7], namely with most significant dword at *lower* address, which | ||
47 | # was reflected in below two parameters as 0 and 4. Now caller is | ||
48 | # expected to maintain native byte order for whole 64-bit values. | ||
49 | $hi="HI"; | ||
50 | $lo="LO"; | ||
51 | # ==================================================================== | ||
52 | |||
53 | while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} | ||
54 | open STDOUT,">$output"; | ||
55 | |||
56 | $ctx="r0"; # parameter block | ||
57 | $inp="r1"; | ||
58 | $len="r2"; | ||
59 | |||
60 | $Tlo="r3"; | ||
61 | $Thi="r4"; | ||
62 | $Alo="r5"; | ||
63 | $Ahi="r6"; | ||
64 | $Elo="r7"; | ||
65 | $Ehi="r8"; | ||
66 | $t0="r9"; | ||
67 | $t1="r10"; | ||
68 | $t2="r11"; | ||
69 | $t3="r12"; | ||
70 | ############ r13 is stack pointer | ||
71 | $Ktbl="r14"; | ||
72 | ############ r15 is program counter | ||
73 | |||
74 | $Aoff=8*0; | ||
75 | $Boff=8*1; | ||
76 | $Coff=8*2; | ||
77 | $Doff=8*3; | ||
78 | $Eoff=8*4; | ||
79 | $Foff=8*5; | ||
80 | $Goff=8*6; | ||
81 | $Hoff=8*7; | ||
82 | $Xoff=8*8; | ||
83 | |||
84 | sub BODY_00_15() { | ||
85 | my $magic = shift; | ||
86 | $code.=<<___; | ||
87 | @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) | ||
88 | @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 | ||
89 | @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 | ||
90 | mov $t0,$Elo,lsr#14 | ||
91 | str $Tlo,[sp,#$Xoff+0] | ||
92 | mov $t1,$Ehi,lsr#14 | ||
93 | str $Thi,[sp,#$Xoff+4] | ||
94 | eor $t0,$t0,$Ehi,lsl#18 | ||
95 | ldr $t2,[sp,#$Hoff+0] @ h.lo | ||
96 | eor $t1,$t1,$Elo,lsl#18 | ||
97 | ldr $t3,[sp,#$Hoff+4] @ h.hi | ||
98 | eor $t0,$t0,$Elo,lsr#18 | ||
99 | eor $t1,$t1,$Ehi,lsr#18 | ||
100 | eor $t0,$t0,$Ehi,lsl#14 | ||
101 | eor $t1,$t1,$Elo,lsl#14 | ||
102 | eor $t0,$t0,$Ehi,lsr#9 | ||
103 | eor $t1,$t1,$Elo,lsr#9 | ||
104 | eor $t0,$t0,$Elo,lsl#23 | ||
105 | eor $t1,$t1,$Ehi,lsl#23 @ Sigma1(e) | ||
106 | adds $Tlo,$Tlo,$t0 | ||
107 | ldr $t0,[sp,#$Foff+0] @ f.lo | ||
108 | adc $Thi,$Thi,$t1 @ T += Sigma1(e) | ||
109 | ldr $t1,[sp,#$Foff+4] @ f.hi | ||
110 | adds $Tlo,$Tlo,$t2 | ||
111 | ldr $t2,[sp,#$Goff+0] @ g.lo | ||
112 | adc $Thi,$Thi,$t3 @ T += h | ||
113 | ldr $t3,[sp,#$Goff+4] @ g.hi | ||
114 | |||
115 | eor $t0,$t0,$t2 | ||
116 | str $Elo,[sp,#$Eoff+0] | ||
117 | eor $t1,$t1,$t3 | ||
118 | str $Ehi,[sp,#$Eoff+4] | ||
119 | and $t0,$t0,$Elo | ||
120 | str $Alo,[sp,#$Aoff+0] | ||
121 | and $t1,$t1,$Ehi | ||
122 | str $Ahi,[sp,#$Aoff+4] | ||
123 | eor $t0,$t0,$t2 | ||
124 | ldr $t2,[$Ktbl,#$lo] @ K[i].lo | ||
125 | eor $t1,$t1,$t3 @ Ch(e,f,g) | ||
126 | ldr $t3,[$Ktbl,#$hi] @ K[i].hi | ||
127 | |||
128 | adds $Tlo,$Tlo,$t0 | ||
129 | ldr $Elo,[sp,#$Doff+0] @ d.lo | ||
130 | adc $Thi,$Thi,$t1 @ T += Ch(e,f,g) | ||
131 | ldr $Ehi,[sp,#$Doff+4] @ d.hi | ||
132 | adds $Tlo,$Tlo,$t2 | ||
133 | and $t0,$t2,#0xff | ||
134 | adc $Thi,$Thi,$t3 @ T += K[i] | ||
135 | adds $Elo,$Elo,$Tlo | ||
136 | ldr $t2,[sp,#$Boff+0] @ b.lo | ||
137 | adc $Ehi,$Ehi,$Thi @ d += T | ||
138 | teq $t0,#$magic | ||
139 | |||
140 | ldr $t3,[sp,#$Coff+0] @ c.lo | ||
141 | #if __ARM_ARCH__>=7 | ||
142 | it eq @ Thumb2 thing, sanity check in ARM | ||
143 | #endif | ||
144 | orreq $Ktbl,$Ktbl,#1 | ||
145 | @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) | ||
146 | @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 | ||
147 | @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 | ||
148 | mov $t0,$Alo,lsr#28 | ||
149 | mov $t1,$Ahi,lsr#28 | ||
150 | eor $t0,$t0,$Ahi,lsl#4 | ||
151 | eor $t1,$t1,$Alo,lsl#4 | ||
152 | eor $t0,$t0,$Ahi,lsr#2 | ||
153 | eor $t1,$t1,$Alo,lsr#2 | ||
154 | eor $t0,$t0,$Alo,lsl#30 | ||
155 | eor $t1,$t1,$Ahi,lsl#30 | ||
156 | eor $t0,$t0,$Ahi,lsr#7 | ||
157 | eor $t1,$t1,$Alo,lsr#7 | ||
158 | eor $t0,$t0,$Alo,lsl#25 | ||
159 | eor $t1,$t1,$Ahi,lsl#25 @ Sigma0(a) | ||
160 | adds $Tlo,$Tlo,$t0 | ||
161 | and $t0,$Alo,$t2 | ||
162 | adc $Thi,$Thi,$t1 @ T += Sigma0(a) | ||
163 | |||
164 | ldr $t1,[sp,#$Boff+4] @ b.hi | ||
165 | orr $Alo,$Alo,$t2 | ||
166 | ldr $t2,[sp,#$Coff+4] @ c.hi | ||
167 | and $Alo,$Alo,$t3 | ||
168 | and $t3,$Ahi,$t1 | ||
169 | orr $Ahi,$Ahi,$t1 | ||
170 | orr $Alo,$Alo,$t0 @ Maj(a,b,c).lo | ||
171 | and $Ahi,$Ahi,$t2 | ||
172 | adds $Alo,$Alo,$Tlo | ||
173 | orr $Ahi,$Ahi,$t3 @ Maj(a,b,c).hi | ||
174 | sub sp,sp,#8 | ||
175 | adc $Ahi,$Ahi,$Thi @ h += T | ||
176 | tst $Ktbl,#1 | ||
177 | add $Ktbl,$Ktbl,#8 | ||
178 | ___ | ||
179 | } | ||
180 | $code=<<___; | ||
181 | #ifndef __KERNEL__ | ||
182 | # include "arm_arch.h" | ||
183 | # define VFP_ABI_PUSH vstmdb sp!,{d8-d15} | ||
184 | # define VFP_ABI_POP vldmia sp!,{d8-d15} | ||
185 | #else | ||
186 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ | ||
187 | # define __ARM_MAX_ARCH__ 7 | ||
188 | # define VFP_ABI_PUSH | ||
189 | # define VFP_ABI_POP | ||
190 | #endif | ||
191 | |||
192 | #ifdef __ARMEL__ | ||
193 | # define LO 0 | ||
194 | # define HI 4 | ||
195 | # define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 | ||
196 | #else | ||
197 | # define HI 0 | ||
198 | # define LO 4 | ||
199 | # define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 | ||
200 | #endif | ||
201 | |||
202 | .text | ||
203 | #if __ARM_ARCH__<7 | ||
204 | .code 32 | ||
205 | #else | ||
206 | .syntax unified | ||
207 | # ifdef __thumb2__ | ||
208 | # define adrl adr | ||
209 | .thumb | ||
210 | # else | ||
211 | .code 32 | ||
212 | # endif | ||
213 | #endif | ||
214 | |||
215 | .type K512,%object | ||
216 | .align 5 | ||
217 | K512: | ||
218 | WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) | ||
219 | WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) | ||
220 | WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) | ||
221 | WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) | ||
222 | WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) | ||
223 | WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) | ||
224 | WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) | ||
225 | WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) | ||
226 | WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) | ||
227 | WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) | ||
228 | WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) | ||
229 | WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) | ||
230 | WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) | ||
231 | WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) | ||
232 | WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) | ||
233 | WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) | ||
234 | WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) | ||
235 | WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) | ||
236 | WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) | ||
237 | WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) | ||
238 | WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) | ||
239 | WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) | ||
240 | WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) | ||
241 | WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) | ||
242 | WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) | ||
243 | WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) | ||
244 | WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) | ||
245 | WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) | ||
246 | WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) | ||
247 | WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) | ||
248 | WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) | ||
249 | WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) | ||
250 | WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) | ||
251 | WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) | ||
252 | WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) | ||
253 | WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) | ||
254 | WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) | ||
255 | WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) | ||
256 | WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) | ||
257 | WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) | ||
258 | .size K512,.-K512 | ||
259 | #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) | ||
260 | .LOPENSSL_armcap: | ||
261 | .word OPENSSL_armcap_P-sha512_block_data_order | ||
262 | .skip 32-4 | ||
263 | #else | ||
264 | .skip 32 | ||
265 | #endif | ||
266 | |||
267 | .global sha512_block_data_order | ||
268 | .type sha512_block_data_order,%function | ||
269 | sha512_block_data_order: | ||
270 | #if __ARM_ARCH__<7 | ||
271 | sub r3,pc,#8 @ sha512_block_data_order | ||
272 | #else | ||
273 | adr r3,sha512_block_data_order | ||
274 | #endif | ||
275 | #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) | ||
276 | ldr r12,.LOPENSSL_armcap | ||
277 | ldr r12,[r3,r12] @ OPENSSL_armcap_P | ||
278 | tst r12,#1 | ||
279 | bne .LNEON | ||
280 | #endif | ||
281 | add $len,$inp,$len,lsl#7 @ len to point at the end of inp | ||
282 | stmdb sp!,{r4-r12,lr} | ||
283 | sub $Ktbl,r3,#672 @ K512 | ||
284 | sub sp,sp,#9*8 | ||
285 | |||
286 | ldr $Elo,[$ctx,#$Eoff+$lo] | ||
287 | ldr $Ehi,[$ctx,#$Eoff+$hi] | ||
288 | ldr $t0, [$ctx,#$Goff+$lo] | ||
289 | ldr $t1, [$ctx,#$Goff+$hi] | ||
290 | ldr $t2, [$ctx,#$Hoff+$lo] | ||
291 | ldr $t3, [$ctx,#$Hoff+$hi] | ||
292 | .Loop: | ||
293 | str $t0, [sp,#$Goff+0] | ||
294 | str $t1, [sp,#$Goff+4] | ||
295 | str $t2, [sp,#$Hoff+0] | ||
296 | str $t3, [sp,#$Hoff+4] | ||
297 | ldr $Alo,[$ctx,#$Aoff+$lo] | ||
298 | ldr $Ahi,[$ctx,#$Aoff+$hi] | ||
299 | ldr $Tlo,[$ctx,#$Boff+$lo] | ||
300 | ldr $Thi,[$ctx,#$Boff+$hi] | ||
301 | ldr $t0, [$ctx,#$Coff+$lo] | ||
302 | ldr $t1, [$ctx,#$Coff+$hi] | ||
303 | ldr $t2, [$ctx,#$Doff+$lo] | ||
304 | ldr $t3, [$ctx,#$Doff+$hi] | ||
305 | str $Tlo,[sp,#$Boff+0] | ||
306 | str $Thi,[sp,#$Boff+4] | ||
307 | str $t0, [sp,#$Coff+0] | ||
308 | str $t1, [sp,#$Coff+4] | ||
309 | str $t2, [sp,#$Doff+0] | ||
310 | str $t3, [sp,#$Doff+4] | ||
311 | ldr $Tlo,[$ctx,#$Foff+$lo] | ||
312 | ldr $Thi,[$ctx,#$Foff+$hi] | ||
313 | str $Tlo,[sp,#$Foff+0] | ||
314 | str $Thi,[sp,#$Foff+4] | ||
315 | |||
316 | .L00_15: | ||
317 | #if __ARM_ARCH__<7 | ||
318 | ldrb $Tlo,[$inp,#7] | ||
319 | ldrb $t0, [$inp,#6] | ||
320 | ldrb $t1, [$inp,#5] | ||
321 | ldrb $t2, [$inp,#4] | ||
322 | ldrb $Thi,[$inp,#3] | ||
323 | ldrb $t3, [$inp,#2] | ||
324 | orr $Tlo,$Tlo,$t0,lsl#8 | ||
325 | ldrb $t0, [$inp,#1] | ||
326 | orr $Tlo,$Tlo,$t1,lsl#16 | ||
327 | ldrb $t1, [$inp],#8 | ||
328 | orr $Tlo,$Tlo,$t2,lsl#24 | ||
329 | orr $Thi,$Thi,$t3,lsl#8 | ||
330 | orr $Thi,$Thi,$t0,lsl#16 | ||
331 | orr $Thi,$Thi,$t1,lsl#24 | ||
332 | #else | ||
333 | ldr $Tlo,[$inp,#4] | ||
334 | ldr $Thi,[$inp],#8 | ||
335 | #ifdef __ARMEL__ | ||
336 | rev $Tlo,$Tlo | ||
337 | rev $Thi,$Thi | ||
338 | #endif | ||
339 | #endif | ||
340 | ___ | ||
341 | &BODY_00_15(0x94); | ||
342 | $code.=<<___; | ||
343 | tst $Ktbl,#1 | ||
344 | beq .L00_15 | ||
345 | ldr $t0,[sp,#`$Xoff+8*(16-1)`+0] | ||
346 | ldr $t1,[sp,#`$Xoff+8*(16-1)`+4] | ||
347 | bic $Ktbl,$Ktbl,#1 | ||
348 | .L16_79: | ||
349 | @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) | ||
350 | @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 | ||
351 | @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 | ||
352 | mov $Tlo,$t0,lsr#1 | ||
353 | ldr $t2,[sp,#`$Xoff+8*(16-14)`+0] | ||
354 | mov $Thi,$t1,lsr#1 | ||
355 | ldr $t3,[sp,#`$Xoff+8*(16-14)`+4] | ||
356 | eor $Tlo,$Tlo,$t1,lsl#31 | ||
357 | eor $Thi,$Thi,$t0,lsl#31 | ||
358 | eor $Tlo,$Tlo,$t0,lsr#8 | ||
359 | eor $Thi,$Thi,$t1,lsr#8 | ||
360 | eor $Tlo,$Tlo,$t1,lsl#24 | ||
361 | eor $Thi,$Thi,$t0,lsl#24 | ||
362 | eor $Tlo,$Tlo,$t0,lsr#7 | ||
363 | eor $Thi,$Thi,$t1,lsr#7 | ||
364 | eor $Tlo,$Tlo,$t1,lsl#25 | ||
365 | |||
366 | @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) | ||
367 | @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 | ||
368 | @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 | ||
369 | mov $t0,$t2,lsr#19 | ||
370 | mov $t1,$t3,lsr#19 | ||
371 | eor $t0,$t0,$t3,lsl#13 | ||
372 | eor $t1,$t1,$t2,lsl#13 | ||
373 | eor $t0,$t0,$t3,lsr#29 | ||
374 | eor $t1,$t1,$t2,lsr#29 | ||
375 | eor $t0,$t0,$t2,lsl#3 | ||
376 | eor $t1,$t1,$t3,lsl#3 | ||
377 | eor $t0,$t0,$t2,lsr#6 | ||
378 | eor $t1,$t1,$t3,lsr#6 | ||
379 | ldr $t2,[sp,#`$Xoff+8*(16-9)`+0] | ||
380 | eor $t0,$t0,$t3,lsl#26 | ||
381 | |||
382 | ldr $t3,[sp,#`$Xoff+8*(16-9)`+4] | ||
383 | adds $Tlo,$Tlo,$t0 | ||
384 | ldr $t0,[sp,#`$Xoff+8*16`+0] | ||
385 | adc $Thi,$Thi,$t1 | ||
386 | |||
387 | ldr $t1,[sp,#`$Xoff+8*16`+4] | ||
388 | adds $Tlo,$Tlo,$t2 | ||
389 | adc $Thi,$Thi,$t3 | ||
390 | adds $Tlo,$Tlo,$t0 | ||
391 | adc $Thi,$Thi,$t1 | ||
392 | ___ | ||
393 | &BODY_00_15(0x17); | ||
394 | $code.=<<___; | ||
395 | #if __ARM_ARCH__>=7 | ||
396 | ittt eq @ Thumb2 thing, sanity check in ARM | ||
397 | #endif | ||
398 | ldreq $t0,[sp,#`$Xoff+8*(16-1)`+0] | ||
399 | ldreq $t1,[sp,#`$Xoff+8*(16-1)`+4] | ||
400 | beq .L16_79 | ||
401 | bic $Ktbl,$Ktbl,#1 | ||
402 | |||
403 | ldr $Tlo,[sp,#$Boff+0] | ||
404 | ldr $Thi,[sp,#$Boff+4] | ||
405 | ldr $t0, [$ctx,#$Aoff+$lo] | ||
406 | ldr $t1, [$ctx,#$Aoff+$hi] | ||
407 | ldr $t2, [$ctx,#$Boff+$lo] | ||
408 | ldr $t3, [$ctx,#$Boff+$hi] | ||
409 | adds $t0,$Alo,$t0 | ||
410 | str $t0, [$ctx,#$Aoff+$lo] | ||
411 | adc $t1,$Ahi,$t1 | ||
412 | str $t1, [$ctx,#$Aoff+$hi] | ||
413 | adds $t2,$Tlo,$t2 | ||
414 | str $t2, [$ctx,#$Boff+$lo] | ||
415 | adc $t3,$Thi,$t3 | ||
416 | str $t3, [$ctx,#$Boff+$hi] | ||
417 | |||
418 | ldr $Alo,[sp,#$Coff+0] | ||
419 | ldr $Ahi,[sp,#$Coff+4] | ||
420 | ldr $Tlo,[sp,#$Doff+0] | ||
421 | ldr $Thi,[sp,#$Doff+4] | ||
422 | ldr $t0, [$ctx,#$Coff+$lo] | ||
423 | ldr $t1, [$ctx,#$Coff+$hi] | ||
424 | ldr $t2, [$ctx,#$Doff+$lo] | ||
425 | ldr $t3, [$ctx,#$Doff+$hi] | ||
426 | adds $t0,$Alo,$t0 | ||
427 | str $t0, [$ctx,#$Coff+$lo] | ||
428 | adc $t1,$Ahi,$t1 | ||
429 | str $t1, [$ctx,#$Coff+$hi] | ||
430 | adds $t2,$Tlo,$t2 | ||
431 | str $t2, [$ctx,#$Doff+$lo] | ||
432 | adc $t3,$Thi,$t3 | ||
433 | str $t3, [$ctx,#$Doff+$hi] | ||
434 | |||
435 | ldr $Tlo,[sp,#$Foff+0] | ||
436 | ldr $Thi,[sp,#$Foff+4] | ||
437 | ldr $t0, [$ctx,#$Eoff+$lo] | ||
438 | ldr $t1, [$ctx,#$Eoff+$hi] | ||
439 | ldr $t2, [$ctx,#$Foff+$lo] | ||
440 | ldr $t3, [$ctx,#$Foff+$hi] | ||
441 | adds $Elo,$Elo,$t0 | ||
442 | str $Elo,[$ctx,#$Eoff+$lo] | ||
443 | adc $Ehi,$Ehi,$t1 | ||
444 | str $Ehi,[$ctx,#$Eoff+$hi] | ||
445 | adds $t2,$Tlo,$t2 | ||
446 | str $t2, [$ctx,#$Foff+$lo] | ||
447 | adc $t3,$Thi,$t3 | ||
448 | str $t3, [$ctx,#$Foff+$hi] | ||
449 | |||
450 | ldr $Alo,[sp,#$Goff+0] | ||
451 | ldr $Ahi,[sp,#$Goff+4] | ||
452 | ldr $Tlo,[sp,#$Hoff+0] | ||
453 | ldr $Thi,[sp,#$Hoff+4] | ||
454 | ldr $t0, [$ctx,#$Goff+$lo] | ||
455 | ldr $t1, [$ctx,#$Goff+$hi] | ||
456 | ldr $t2, [$ctx,#$Hoff+$lo] | ||
457 | ldr $t3, [$ctx,#$Hoff+$hi] | ||
458 | adds $t0,$Alo,$t0 | ||
459 | str $t0, [$ctx,#$Goff+$lo] | ||
460 | adc $t1,$Ahi,$t1 | ||
461 | str $t1, [$ctx,#$Goff+$hi] | ||
462 | adds $t2,$Tlo,$t2 | ||
463 | str $t2, [$ctx,#$Hoff+$lo] | ||
464 | adc $t3,$Thi,$t3 | ||
465 | str $t3, [$ctx,#$Hoff+$hi] | ||
466 | |||
467 | add sp,sp,#640 | ||
468 | sub $Ktbl,$Ktbl,#640 | ||
469 | |||
470 | teq $inp,$len | ||
471 | bne .Loop | ||
472 | |||
473 | add sp,sp,#8*9 @ destroy frame | ||
474 | #if __ARM_ARCH__>=5 | ||
475 | ldmia sp!,{r4-r12,pc} | ||
476 | #else | ||
477 | ldmia sp!,{r4-r12,lr} | ||
478 | tst lr,#1 | ||
479 | moveq pc,lr @ be binary compatible with V4, yet | ||
480 | bx lr @ interoperable with Thumb ISA:-) | ||
481 | #endif | ||
482 | .size sha512_block_data_order,.-sha512_block_data_order | ||
483 | ___ | ||
484 | |||
485 | { | ||
486 | my @Sigma0=(28,34,39); | ||
487 | my @Sigma1=(14,18,41); | ||
488 | my @sigma0=(1, 8, 7); | ||
489 | my @sigma1=(19,61,6); | ||
490 | |||
491 | my $Ktbl="r3"; | ||
492 | my $cnt="r12"; # volatile register known as ip, intra-procedure-call scratch | ||
493 | |||
494 | my @X=map("d$_",(0..15)); | ||
495 | my @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("d$_",(16..23)); | ||
496 | |||
497 | sub NEON_00_15() { | ||
498 | my $i=shift; | ||
499 | my ($a,$b,$c,$d,$e,$f,$g,$h)=@_; | ||
500 | my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31)); # temps | ||
501 | |||
502 | $code.=<<___ if ($i<16 || $i&1); | ||
503 | vshr.u64 $t0,$e,#@Sigma1[0] @ $i | ||
504 | #if $i<16 | ||
505 | vld1.64 {@X[$i%16]},[$inp]! @ handles unaligned | ||
506 | #endif | ||
507 | vshr.u64 $t1,$e,#@Sigma1[1] | ||
508 | #if $i>0 | ||
509 | vadd.i64 $a,$Maj @ h+=Maj from the past | ||
510 | #endif | ||
511 | vshr.u64 $t2,$e,#@Sigma1[2] | ||
512 | ___ | ||
513 | $code.=<<___; | ||
514 | vld1.64 {$K},[$Ktbl,:64]! @ K[i++] | ||
515 | vsli.64 $t0,$e,#`64-@Sigma1[0]` | ||
516 | vsli.64 $t1,$e,#`64-@Sigma1[1]` | ||
517 | vmov $Ch,$e | ||
518 | vsli.64 $t2,$e,#`64-@Sigma1[2]` | ||
519 | #if $i<16 && defined(__ARMEL__) | ||
520 | vrev64.8 @X[$i],@X[$i] | ||
521 | #endif | ||
522 | veor $t1,$t0 | ||
523 | vbsl $Ch,$f,$g @ Ch(e,f,g) | ||
524 | vshr.u64 $t0,$a,#@Sigma0[0] | ||
525 | veor $t2,$t1 @ Sigma1(e) | ||
526 | vadd.i64 $T1,$Ch,$h | ||
527 | vshr.u64 $t1,$a,#@Sigma0[1] | ||
528 | vsli.64 $t0,$a,#`64-@Sigma0[0]` | ||
529 | vadd.i64 $T1,$t2 | ||
530 | vshr.u64 $t2,$a,#@Sigma0[2] | ||
531 | vadd.i64 $K,@X[$i%16] | ||
532 | vsli.64 $t1,$a,#`64-@Sigma0[1]` | ||
533 | veor $Maj,$a,$b | ||
534 | vsli.64 $t2,$a,#`64-@Sigma0[2]` | ||
535 | veor $h,$t0,$t1 | ||
536 | vadd.i64 $T1,$K | ||
537 | vbsl $Maj,$c,$b @ Maj(a,b,c) | ||
538 | veor $h,$t2 @ Sigma0(a) | ||
539 | vadd.i64 $d,$T1 | ||
540 | vadd.i64 $Maj,$T1 | ||
541 | @ vadd.i64 $h,$Maj | ||
542 | ___ | ||
543 | } | ||
544 | |||
545 | sub NEON_16_79() { | ||
546 | my $i=shift; | ||
547 | |||
548 | if ($i&1) { &NEON_00_15($i,@_); return; } | ||
549 | |||
550 | # 2x-vectorized, therefore runs every 2nd round | ||
551 | my @X=map("q$_",(0..7)); # view @X as 128-bit vector | ||
552 | my ($t0,$t1,$s0,$s1) = map("q$_",(12..15)); # temps | ||
553 | my ($d0,$d1,$d2) = map("d$_",(24..26)); # temps from NEON_00_15 | ||
554 | my $e=@_[4]; # $e from NEON_00_15 | ||
555 | $i /= 2; | ||
556 | $code.=<<___; | ||
557 | vshr.u64 $t0,@X[($i+7)%8],#@sigma1[0] | ||
558 | vshr.u64 $t1,@X[($i+7)%8],#@sigma1[1] | ||
559 | vadd.i64 @_[0],d30 @ h+=Maj from the past | ||
560 | vshr.u64 $s1,@X[($i+7)%8],#@sigma1[2] | ||
561 | vsli.64 $t0,@X[($i+7)%8],#`64-@sigma1[0]` | ||
562 | vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1] | ||
563 | vsli.64 $t1,@X[($i+7)%8],#`64-@sigma1[1]` | ||
564 | veor $s1,$t0 | ||
565 | vshr.u64 $t0,$s0,#@sigma0[0] | ||
566 | veor $s1,$t1 @ sigma1(X[i+14]) | ||
567 | vshr.u64 $t1,$s0,#@sigma0[1] | ||
568 | vadd.i64 @X[$i%8],$s1 | ||
569 | vshr.u64 $s1,$s0,#@sigma0[2] | ||
570 | vsli.64 $t0,$s0,#`64-@sigma0[0]` | ||
571 | vsli.64 $t1,$s0,#`64-@sigma0[1]` | ||
572 | vext.8 $s0,@X[($i+4)%8],@X[($i+5)%8],#8 @ X[i+9] | ||
573 | veor $s1,$t0 | ||
574 | vshr.u64 $d0,$e,#@Sigma1[0] @ from NEON_00_15 | ||
575 | vadd.i64 @X[$i%8],$s0 | ||
576 | vshr.u64 $d1,$e,#@Sigma1[1] @ from NEON_00_15 | ||
577 | veor $s1,$t1 @ sigma0(X[i+1]) | ||
578 | vshr.u64 $d2,$e,#@Sigma1[2] @ from NEON_00_15 | ||
579 | vadd.i64 @X[$i%8],$s1 | ||
580 | ___ | ||
581 | &NEON_00_15(2*$i,@_); | ||
582 | } | ||
583 | |||
584 | $code.=<<___; | ||
585 | #if __ARM_MAX_ARCH__>=7 | ||
586 | .arch armv7-a | ||
587 | .fpu neon | ||
588 | |||
589 | .global sha512_block_data_order_neon | ||
590 | .type sha512_block_data_order_neon,%function | ||
591 | .align 4 | ||
592 | sha512_block_data_order_neon: | ||
593 | .LNEON: | ||
594 | dmb @ errata #451034 on early Cortex A8 | ||
595 | add $len,$inp,$len,lsl#7 @ len to point at the end of inp | ||
596 | VFP_ABI_PUSH | ||
597 | adrl $Ktbl,K512 | ||
598 | vldmia $ctx,{$A-$H} @ load context | ||
599 | .Loop_neon: | ||
600 | ___ | ||
601 | for($i=0;$i<16;$i++) { &NEON_00_15($i,@V); unshift(@V,pop(@V)); } | ||
602 | $code.=<<___; | ||
603 | mov $cnt,#4 | ||
604 | .L16_79_neon: | ||
605 | subs $cnt,#1 | ||
606 | ___ | ||
607 | for(;$i<32;$i++) { &NEON_16_79($i,@V); unshift(@V,pop(@V)); } | ||
608 | $code.=<<___; | ||
609 | bne .L16_79_neon | ||
610 | |||
611 | vadd.i64 $A,d30 @ h+=Maj from the past | ||
612 | vldmia $ctx,{d24-d31} @ load context to temp | ||
613 | vadd.i64 q8,q12 @ vectorized accumulate | ||
614 | vadd.i64 q9,q13 | ||
615 | vadd.i64 q10,q14 | ||
616 | vadd.i64 q11,q15 | ||
617 | vstmia $ctx,{$A-$H} @ save context | ||
618 | teq $inp,$len | ||
619 | sub $Ktbl,#640 @ rewind K512 | ||
620 | bne .Loop_neon | ||
621 | |||
622 | VFP_ABI_POP | ||
623 | ret @ bx lr | ||
624 | .size sha512_block_data_order_neon,.-sha512_block_data_order_neon | ||
625 | #endif | ||
626 | ___ | ||
627 | } | ||
628 | $code.=<<___; | ||
629 | .asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>" | ||
630 | .align 2 | ||
631 | #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) | ||
632 | .comm OPENSSL_armcap_P,4,4 | ||
633 | #endif | ||
634 | ___ | ||
635 | |||
636 | $code =~ s/\`([^\`]*)\`/eval $1/gem; | ||
637 | $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 | ||
638 | $code =~ s/\bret\b/bx lr/gm; | ||
639 | |||
640 | open SELF,$0; | ||
641 | while(<SELF>) { | ||
642 | next if (/^#!/); | ||
643 | last if (!s/^#/@/ and !/^$/); | ||
644 | print; | ||
645 | } | ||
646 | close SELF; | ||
647 | |||
648 | print $code; | ||
649 | close STDOUT; # enforce flush | ||
diff --git a/arch/arm/crypto/sha512-armv7-neon.S b/arch/arm/crypto/sha512-armv7-neon.S deleted file mode 100644 index fe99472e507c..000000000000 --- a/arch/arm/crypto/sha512-armv7-neon.S +++ /dev/null | |||
@@ -1,455 +0,0 @@ | |||
1 | /* sha512-armv7-neon.S - ARM/NEON assembly implementation of SHA-512 transform | ||
2 | * | ||
3 | * Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License as published by the Free | ||
7 | * Software Foundation; either version 2 of the License, or (at your option) | ||
8 | * any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/linkage.h> | ||
12 | |||
13 | |||
14 | .syntax unified | ||
15 | .code 32 | ||
16 | .fpu neon | ||
17 | |||
18 | .text | ||
19 | |||
20 | /* structure of SHA512_CONTEXT */ | ||
21 | #define hd_a 0 | ||
22 | #define hd_b ((hd_a) + 8) | ||
23 | #define hd_c ((hd_b) + 8) | ||
24 | #define hd_d ((hd_c) + 8) | ||
25 | #define hd_e ((hd_d) + 8) | ||
26 | #define hd_f ((hd_e) + 8) | ||
27 | #define hd_g ((hd_f) + 8) | ||
28 | |||
29 | /* register macros */ | ||
30 | #define RK %r2 | ||
31 | |||
32 | #define RA d0 | ||
33 | #define RB d1 | ||
34 | #define RC d2 | ||
35 | #define RD d3 | ||
36 | #define RE d4 | ||
37 | #define RF d5 | ||
38 | #define RG d6 | ||
39 | #define RH d7 | ||
40 | |||
41 | #define RT0 d8 | ||
42 | #define RT1 d9 | ||
43 | #define RT2 d10 | ||
44 | #define RT3 d11 | ||
45 | #define RT4 d12 | ||
46 | #define RT5 d13 | ||
47 | #define RT6 d14 | ||
48 | #define RT7 d15 | ||
49 | |||
50 | #define RT01q q4 | ||
51 | #define RT23q q5 | ||
52 | #define RT45q q6 | ||
53 | #define RT67q q7 | ||
54 | |||
55 | #define RW0 d16 | ||
56 | #define RW1 d17 | ||
57 | #define RW2 d18 | ||
58 | #define RW3 d19 | ||
59 | #define RW4 d20 | ||
60 | #define RW5 d21 | ||
61 | #define RW6 d22 | ||
62 | #define RW7 d23 | ||
63 | #define RW8 d24 | ||
64 | #define RW9 d25 | ||
65 | #define RW10 d26 | ||
66 | #define RW11 d27 | ||
67 | #define RW12 d28 | ||
68 | #define RW13 d29 | ||
69 | #define RW14 d30 | ||
70 | #define RW15 d31 | ||
71 | |||
72 | #define RW01q q8 | ||
73 | #define RW23q q9 | ||
74 | #define RW45q q10 | ||
75 | #define RW67q q11 | ||
76 | #define RW89q q12 | ||
77 | #define RW1011q q13 | ||
78 | #define RW1213q q14 | ||
79 | #define RW1415q q15 | ||
80 | |||
81 | /*********************************************************************** | ||
82 | * ARM assembly implementation of sha512 transform | ||
83 | ***********************************************************************/ | ||
84 | #define rounds2_0_63(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, rw01q, rw2, \ | ||
85 | rw23q, rw1415q, rw9, rw10, interleave_op, arg1) \ | ||
86 | /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \ | ||
87 | vshr.u64 RT2, re, #14; \ | ||
88 | vshl.u64 RT3, re, #64 - 14; \ | ||
89 | interleave_op(arg1); \ | ||
90 | vshr.u64 RT4, re, #18; \ | ||
91 | vshl.u64 RT5, re, #64 - 18; \ | ||
92 | vld1.64 {RT0}, [RK]!; \ | ||
93 | veor.64 RT23q, RT23q, RT45q; \ | ||
94 | vshr.u64 RT4, re, #41; \ | ||
95 | vshl.u64 RT5, re, #64 - 41; \ | ||
96 | vadd.u64 RT0, RT0, rw0; \ | ||
97 | veor.64 RT23q, RT23q, RT45q; \ | ||
98 | vmov.64 RT7, re; \ | ||
99 | veor.64 RT1, RT2, RT3; \ | ||
100 | vbsl.64 RT7, rf, rg; \ | ||
101 | \ | ||
102 | vadd.u64 RT1, RT1, rh; \ | ||
103 | vshr.u64 RT2, ra, #28; \ | ||
104 | vshl.u64 RT3, ra, #64 - 28; \ | ||
105 | vadd.u64 RT1, RT1, RT0; \ | ||
106 | vshr.u64 RT4, ra, #34; \ | ||
107 | vshl.u64 RT5, ra, #64 - 34; \ | ||
108 | vadd.u64 RT1, RT1, RT7; \ | ||
109 | \ | ||
110 | /* h = Sum0 (a) + Maj (a, b, c); */ \ | ||
111 | veor.64 RT23q, RT23q, RT45q; \ | ||
112 | vshr.u64 RT4, ra, #39; \ | ||
113 | vshl.u64 RT5, ra, #64 - 39; \ | ||
114 | veor.64 RT0, ra, rb; \ | ||
115 | veor.64 RT23q, RT23q, RT45q; \ | ||
116 | vbsl.64 RT0, rc, rb; \ | ||
117 | vadd.u64 rd, rd, RT1; /* d+=t1; */ \ | ||
118 | veor.64 rh, RT2, RT3; \ | ||
119 | \ | ||
120 | /* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \ | ||
121 | vshr.u64 RT2, rd, #14; \ | ||
122 | vshl.u64 RT3, rd, #64 - 14; \ | ||
123 | vadd.u64 rh, rh, RT0; \ | ||
124 | vshr.u64 RT4, rd, #18; \ | ||
125 | vshl.u64 RT5, rd, #64 - 18; \ | ||
126 | vadd.u64 rh, rh, RT1; /* h+=t1; */ \ | ||
127 | vld1.64 {RT0}, [RK]!; \ | ||
128 | veor.64 RT23q, RT23q, RT45q; \ | ||
129 | vshr.u64 RT4, rd, #41; \ | ||
130 | vshl.u64 RT5, rd, #64 - 41; \ | ||
131 | vadd.u64 RT0, RT0, rw1; \ | ||
132 | veor.64 RT23q, RT23q, RT45q; \ | ||
133 | vmov.64 RT7, rd; \ | ||
134 | veor.64 RT1, RT2, RT3; \ | ||
135 | vbsl.64 RT7, re, rf; \ | ||
136 | \ | ||
137 | vadd.u64 RT1, RT1, rg; \ | ||
138 | vshr.u64 RT2, rh, #28; \ | ||
139 | vshl.u64 RT3, rh, #64 - 28; \ | ||
140 | vadd.u64 RT1, RT1, RT0; \ | ||
141 | vshr.u64 RT4, rh, #34; \ | ||
142 | vshl.u64 RT5, rh, #64 - 34; \ | ||
143 | vadd.u64 RT1, RT1, RT7; \ | ||
144 | \ | ||
145 | /* g = Sum0 (h) + Maj (h, a, b); */ \ | ||
146 | veor.64 RT23q, RT23q, RT45q; \ | ||
147 | vshr.u64 RT4, rh, #39; \ | ||
148 | vshl.u64 RT5, rh, #64 - 39; \ | ||
149 | veor.64 RT0, rh, ra; \ | ||
150 | veor.64 RT23q, RT23q, RT45q; \ | ||
151 | vbsl.64 RT0, rb, ra; \ | ||
152 | vadd.u64 rc, rc, RT1; /* c+=t1; */ \ | ||
153 | veor.64 rg, RT2, RT3; \ | ||
154 | \ | ||
155 | /* w[0] += S1 (w[14]) + w[9] + S0 (w[1]); */ \ | ||
156 | /* w[1] += S1 (w[15]) + w[10] + S0 (w[2]); */ \ | ||
157 | \ | ||
158 | /**** S0(w[1:2]) */ \ | ||
159 | \ | ||
160 | /* w[0:1] += w[9:10] */ \ | ||
161 | /* RT23q = rw1:rw2 */ \ | ||
162 | vext.u64 RT23q, rw01q, rw23q, #1; \ | ||
163 | vadd.u64 rw0, rw9; \ | ||
164 | vadd.u64 rg, rg, RT0; \ | ||
165 | vadd.u64 rw1, rw10;\ | ||
166 | vadd.u64 rg, rg, RT1; /* g+=t1; */ \ | ||
167 | \ | ||
168 | vshr.u64 RT45q, RT23q, #1; \ | ||
169 | vshl.u64 RT67q, RT23q, #64 - 1; \ | ||
170 | vshr.u64 RT01q, RT23q, #8; \ | ||
171 | veor.u64 RT45q, RT45q, RT67q; \ | ||
172 | vshl.u64 RT67q, RT23q, #64 - 8; \ | ||
173 | veor.u64 RT45q, RT45q, RT01q; \ | ||
174 | vshr.u64 RT01q, RT23q, #7; \ | ||
175 | veor.u64 RT45q, RT45q, RT67q; \ | ||
176 | \ | ||
177 | /**** S1(w[14:15]) */ \ | ||
178 | vshr.u64 RT23q, rw1415q, #6; \ | ||
179 | veor.u64 RT01q, RT01q, RT45q; \ | ||
180 | vshr.u64 RT45q, rw1415q, #19; \ | ||
181 | vshl.u64 RT67q, rw1415q, #64 - 19; \ | ||
182 | veor.u64 RT23q, RT23q, RT45q; \ | ||
183 | vshr.u64 RT45q, rw1415q, #61; \ | ||
184 | veor.u64 RT23q, RT23q, RT67q; \ | ||
185 | vshl.u64 RT67q, rw1415q, #64 - 61; \ | ||
186 | veor.u64 RT23q, RT23q, RT45q; \ | ||
187 | vadd.u64 rw01q, RT01q; /* w[0:1] += S(w[1:2]) */ \ | ||
188 | veor.u64 RT01q, RT23q, RT67q; | ||
189 | #define vadd_RT01q(rw01q) \ | ||
190 | /* w[0:1] += S(w[14:15]) */ \ | ||
191 | vadd.u64 rw01q, RT01q; | ||
192 | |||
193 | #define dummy(_) /*_*/ | ||
194 | |||
195 | #define rounds2_64_79(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, \ | ||
196 | interleave_op1, arg1, interleave_op2, arg2) \ | ||
197 | /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \ | ||
198 | vshr.u64 RT2, re, #14; \ | ||
199 | vshl.u64 RT3, re, #64 - 14; \ | ||
200 | interleave_op1(arg1); \ | ||
201 | vshr.u64 RT4, re, #18; \ | ||
202 | vshl.u64 RT5, re, #64 - 18; \ | ||
203 | interleave_op2(arg2); \ | ||
204 | vld1.64 {RT0}, [RK]!; \ | ||
205 | veor.64 RT23q, RT23q, RT45q; \ | ||
206 | vshr.u64 RT4, re, #41; \ | ||
207 | vshl.u64 RT5, re, #64 - 41; \ | ||
208 | vadd.u64 RT0, RT0, rw0; \ | ||
209 | veor.64 RT23q, RT23q, RT45q; \ | ||
210 | vmov.64 RT7, re; \ | ||
211 | veor.64 RT1, RT2, RT3; \ | ||
212 | vbsl.64 RT7, rf, rg; \ | ||
213 | \ | ||
214 | vadd.u64 RT1, RT1, rh; \ | ||
215 | vshr.u64 RT2, ra, #28; \ | ||
216 | vshl.u64 RT3, ra, #64 - 28; \ | ||
217 | vadd.u64 RT1, RT1, RT0; \ | ||
218 | vshr.u64 RT4, ra, #34; \ | ||
219 | vshl.u64 RT5, ra, #64 - 34; \ | ||
220 | vadd.u64 RT1, RT1, RT7; \ | ||
221 | \ | ||
222 | /* h = Sum0 (a) + Maj (a, b, c); */ \ | ||
223 | veor.64 RT23q, RT23q, RT45q; \ | ||
224 | vshr.u64 RT4, ra, #39; \ | ||
225 | vshl.u64 RT5, ra, #64 - 39; \ | ||
226 | veor.64 RT0, ra, rb; \ | ||
227 | veor.64 RT23q, RT23q, RT45q; \ | ||
228 | vbsl.64 RT0, rc, rb; \ | ||
229 | vadd.u64 rd, rd, RT1; /* d+=t1; */ \ | ||
230 | veor.64 rh, RT2, RT3; \ | ||
231 | \ | ||
232 | /* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \ | ||
233 | vshr.u64 RT2, rd, #14; \ | ||
234 | vshl.u64 RT3, rd, #64 - 14; \ | ||
235 | vadd.u64 rh, rh, RT0; \ | ||
236 | vshr.u64 RT4, rd, #18; \ | ||
237 | vshl.u64 RT5, rd, #64 - 18; \ | ||
238 | vadd.u64 rh, rh, RT1; /* h+=t1; */ \ | ||
239 | vld1.64 {RT0}, [RK]!; \ | ||
240 | veor.64 RT23q, RT23q, RT45q; \ | ||
241 | vshr.u64 RT4, rd, #41; \ | ||
242 | vshl.u64 RT5, rd, #64 - 41; \ | ||
243 | vadd.u64 RT0, RT0, rw1; \ | ||
244 | veor.64 RT23q, RT23q, RT45q; \ | ||
245 | vmov.64 RT7, rd; \ | ||
246 | veor.64 RT1, RT2, RT3; \ | ||
247 | vbsl.64 RT7, re, rf; \ | ||
248 | \ | ||
249 | vadd.u64 RT1, RT1, rg; \ | ||
250 | vshr.u64 RT2, rh, #28; \ | ||
251 | vshl.u64 RT3, rh, #64 - 28; \ | ||
252 | vadd.u64 RT1, RT1, RT0; \ | ||
253 | vshr.u64 RT4, rh, #34; \ | ||
254 | vshl.u64 RT5, rh, #64 - 34; \ | ||
255 | vadd.u64 RT1, RT1, RT7; \ | ||
256 | \ | ||
257 | /* g = Sum0 (h) + Maj (h, a, b); */ \ | ||
258 | veor.64 RT23q, RT23q, RT45q; \ | ||
259 | vshr.u64 RT4, rh, #39; \ | ||
260 | vshl.u64 RT5, rh, #64 - 39; \ | ||
261 | veor.64 RT0, rh, ra; \ | ||
262 | veor.64 RT23q, RT23q, RT45q; \ | ||
263 | vbsl.64 RT0, rb, ra; \ | ||
264 | vadd.u64 rc, rc, RT1; /* c+=t1; */ \ | ||
265 | veor.64 rg, RT2, RT3; | ||
266 | #define vadd_rg_RT0(rg) \ | ||
267 | vadd.u64 rg, rg, RT0; | ||
268 | #define vadd_rg_RT1(rg) \ | ||
269 | vadd.u64 rg, rg, RT1; /* g+=t1; */ | ||
270 | |||
271 | .align 3 | ||
272 | ENTRY(sha512_transform_neon) | ||
273 | /* Input: | ||
274 | * %r0: SHA512_CONTEXT | ||
275 | * %r1: data | ||
276 | * %r2: u64 k[] constants | ||
277 | * %r3: nblks | ||
278 | */ | ||
279 | push {%lr}; | ||
280 | |||
281 | mov %lr, #0; | ||
282 | |||
283 | /* Load context to d0-d7 */ | ||
284 | vld1.64 {RA-RD}, [%r0]!; | ||
285 | vld1.64 {RE-RH}, [%r0]; | ||
286 | sub %r0, #(4*8); | ||
287 | |||
288 | /* Load input to w[16], d16-d31 */ | ||
289 | /* NOTE: Assumes that on ARMv7 unaligned accesses are always allowed. */ | ||
290 | vld1.64 {RW0-RW3}, [%r1]!; | ||
291 | vld1.64 {RW4-RW7}, [%r1]!; | ||
292 | vld1.64 {RW8-RW11}, [%r1]!; | ||
293 | vld1.64 {RW12-RW15}, [%r1]!; | ||
294 | #ifdef __ARMEL__ | ||
295 | /* byteswap */ | ||
296 | vrev64.8 RW01q, RW01q; | ||
297 | vrev64.8 RW23q, RW23q; | ||
298 | vrev64.8 RW45q, RW45q; | ||
299 | vrev64.8 RW67q, RW67q; | ||
300 | vrev64.8 RW89q, RW89q; | ||
301 | vrev64.8 RW1011q, RW1011q; | ||
302 | vrev64.8 RW1213q, RW1213q; | ||
303 | vrev64.8 RW1415q, RW1415q; | ||
304 | #endif | ||
305 | |||
306 | /* EABI says that d8-d15 must be preserved by callee. */ | ||
307 | /*vpush {RT0-RT7};*/ | ||
308 | |||
309 | .Loop: | ||
310 | rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2, | ||
311 | RW23q, RW1415q, RW9, RW10, dummy, _); | ||
312 | b .Lenter_rounds; | ||
313 | |||
314 | .Loop_rounds: | ||
315 | rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2, | ||
316 | RW23q, RW1415q, RW9, RW10, vadd_RT01q, RW1415q); | ||
317 | .Lenter_rounds: | ||
318 | rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3, RW23q, RW4, | ||
319 | RW45q, RW01q, RW11, RW12, vadd_RT01q, RW01q); | ||
320 | rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, RW45q, RW6, | ||
321 | RW67q, RW23q, RW13, RW14, vadd_RT01q, RW23q); | ||
322 | rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, RW67q, RW8, | ||
323 | RW89q, RW45q, RW15, RW0, vadd_RT01q, RW45q); | ||
324 | rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, RW89q, RW10, | ||
325 | RW1011q, RW67q, RW1, RW2, vadd_RT01q, RW67q); | ||
326 | rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, RW1011q, RW12, | ||
327 | RW1213q, RW89q, RW3, RW4, vadd_RT01q, RW89q); | ||
328 | add %lr, #16; | ||
329 | rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, RW1213q, RW14, | ||
330 | RW1415q, RW1011q, RW5, RW6, vadd_RT01q, RW1011q); | ||
331 | cmp %lr, #64; | ||
332 | rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, RW1415q, RW0, | ||
333 | RW01q, RW1213q, RW7, RW8, vadd_RT01q, RW1213q); | ||
334 | bne .Loop_rounds; | ||
335 | |||
336 | subs %r3, #1; | ||
337 | |||
338 | rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, | ||
339 | vadd_RT01q, RW1415q, dummy, _); | ||
340 | rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3, | ||
341 | vadd_rg_RT0, RG, vadd_rg_RT1, RG); | ||
342 | beq .Lhandle_tail; | ||
343 | vld1.64 {RW0-RW3}, [%r1]!; | ||
344 | rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, | ||
345 | vadd_rg_RT0, RE, vadd_rg_RT1, RE); | ||
346 | rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, | ||
347 | vadd_rg_RT0, RC, vadd_rg_RT1, RC); | ||
348 | #ifdef __ARMEL__ | ||
349 | vrev64.8 RW01q, RW01q; | ||
350 | vrev64.8 RW23q, RW23q; | ||
351 | #endif | ||
352 | vld1.64 {RW4-RW7}, [%r1]!; | ||
353 | rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, | ||
354 | vadd_rg_RT0, RA, vadd_rg_RT1, RA); | ||
355 | rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, | ||
356 | vadd_rg_RT0, RG, vadd_rg_RT1, RG); | ||
357 | #ifdef __ARMEL__ | ||
358 | vrev64.8 RW45q, RW45q; | ||
359 | vrev64.8 RW67q, RW67q; | ||
360 | #endif | ||
361 | vld1.64 {RW8-RW11}, [%r1]!; | ||
362 | rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, | ||
363 | vadd_rg_RT0, RE, vadd_rg_RT1, RE); | ||
364 | rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, | ||
365 | vadd_rg_RT0, RC, vadd_rg_RT1, RC); | ||
366 | #ifdef __ARMEL__ | ||
367 | vrev64.8 RW89q, RW89q; | ||
368 | vrev64.8 RW1011q, RW1011q; | ||
369 | #endif | ||
370 | vld1.64 {RW12-RW15}, [%r1]!; | ||
371 | vadd_rg_RT0(RA); | ||
372 | vadd_rg_RT1(RA); | ||
373 | |||
374 | /* Load context */ | ||
375 | vld1.64 {RT0-RT3}, [%r0]!; | ||
376 | vld1.64 {RT4-RT7}, [%r0]; | ||
377 | sub %r0, #(4*8); | ||
378 | |||
379 | #ifdef __ARMEL__ | ||
380 | vrev64.8 RW1213q, RW1213q; | ||
381 | vrev64.8 RW1415q, RW1415q; | ||
382 | #endif | ||
383 | |||
384 | vadd.u64 RA, RT0; | ||
385 | vadd.u64 RB, RT1; | ||
386 | vadd.u64 RC, RT2; | ||
387 | vadd.u64 RD, RT3; | ||
388 | vadd.u64 RE, RT4; | ||
389 | vadd.u64 RF, RT5; | ||
390 | vadd.u64 RG, RT6; | ||
391 | vadd.u64 RH, RT7; | ||
392 | |||
393 | /* Store the first half of context */ | ||
394 | vst1.64 {RA-RD}, [%r0]!; | ||
395 | sub RK, $(8*80); | ||
396 | vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */ | ||
397 | mov %lr, #0; | ||
398 | sub %r0, #(4*8); | ||
399 | |||
400 | b .Loop; | ||
401 | |||
402 | .Lhandle_tail: | ||
403 | rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, | ||
404 | vadd_rg_RT0, RE, vadd_rg_RT1, RE); | ||
405 | rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, | ||
406 | vadd_rg_RT0, RC, vadd_rg_RT1, RC); | ||
407 | rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, | ||
408 | vadd_rg_RT0, RA, vadd_rg_RT1, RA); | ||
409 | rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, | ||
410 | vadd_rg_RT0, RG, vadd_rg_RT1, RG); | ||
411 | rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, | ||
412 | vadd_rg_RT0, RE, vadd_rg_RT1, RE); | ||
413 | rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, | ||
414 | vadd_rg_RT0, RC, vadd_rg_RT1, RC); | ||
415 | |||
416 | /* Load context to d16-d23 */ | ||
417 | vld1.64 {RW0-RW3}, [%r0]!; | ||
418 | vadd_rg_RT0(RA); | ||
419 | vld1.64 {RW4-RW7}, [%r0]; | ||
420 | vadd_rg_RT1(RA); | ||
421 | sub %r0, #(4*8); | ||
422 | |||
423 | vadd.u64 RA, RW0; | ||
424 | vadd.u64 RB, RW1; | ||
425 | vadd.u64 RC, RW2; | ||
426 | vadd.u64 RD, RW3; | ||
427 | vadd.u64 RE, RW4; | ||
428 | vadd.u64 RF, RW5; | ||
429 | vadd.u64 RG, RW6; | ||
430 | vadd.u64 RH, RW7; | ||
431 | |||
432 | /* Store the first half of context */ | ||
433 | vst1.64 {RA-RD}, [%r0]!; | ||
434 | |||
435 | /* Clear used registers */ | ||
436 | /* d16-d31 */ | ||
437 | veor.u64 RW01q, RW01q; | ||
438 | veor.u64 RW23q, RW23q; | ||
439 | veor.u64 RW45q, RW45q; | ||
440 | veor.u64 RW67q, RW67q; | ||
441 | vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */ | ||
442 | veor.u64 RW89q, RW89q; | ||
443 | veor.u64 RW1011q, RW1011q; | ||
444 | veor.u64 RW1213q, RW1213q; | ||
445 | veor.u64 RW1415q, RW1415q; | ||
446 | /* d8-d15 */ | ||
447 | /*vpop {RT0-RT7};*/ | ||
448 | /* d0-d7 (q0-q3) */ | ||
449 | veor.u64 %q0, %q0; | ||
450 | veor.u64 %q1, %q1; | ||
451 | veor.u64 %q2, %q2; | ||
452 | veor.u64 %q3, %q3; | ||
453 | |||
454 | pop {%pc}; | ||
455 | ENDPROC(sha512_transform_neon) | ||
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped new file mode 100644 index 000000000000..3694c4d4ca2b --- /dev/null +++ b/arch/arm/crypto/sha512-core.S_shipped | |||
@@ -0,0 +1,1861 @@ | |||
1 | |||
2 | @ ==================================================================== | ||
3 | @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | ||
4 | @ project. The module is, however, dual licensed under OpenSSL and | ||
5 | @ CRYPTOGAMS licenses depending on where you obtain it. For further | ||
6 | @ details see http://www.openssl.org/~appro/cryptogams/. | ||
7 | @ | ||
8 | @ Permission to use under GPL terms is granted. | ||
9 | @ ==================================================================== | ||
10 | |||
11 | @ SHA512 block procedure for ARMv4. September 2007. | ||
12 | |||
13 | @ This code is ~4.5 (four and a half) times faster than code generated | ||
14 | @ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue | ||
15 | @ Xscale PXA250 core]. | ||
16 | @ | ||
17 | @ July 2010. | ||
18 | @ | ||
19 | @ Rescheduling for dual-issue pipeline resulted in 6% improvement on | ||
20 | @ Cortex A8 core and ~40 cycles per processed byte. | ||
21 | |||
22 | @ February 2011. | ||
23 | @ | ||
24 | @ Profiler-assisted and platform-specific optimization resulted in 7% | ||
25 | @ improvement on Coxtex A8 core and ~38 cycles per byte. | ||
26 | |||
27 | @ March 2011. | ||
28 | @ | ||
29 | @ Add NEON implementation. On Cortex A8 it was measured to process | ||
30 | @ one byte in 23.3 cycles or ~60% faster than integer-only code. | ||
31 | |||
32 | @ August 2012. | ||
33 | @ | ||
34 | @ Improve NEON performance by 12% on Snapdragon S4. In absolute | ||
35 | @ terms it's 22.6 cycles per byte, which is disappointing result. | ||
36 | @ Technical writers asserted that 3-way S4 pipeline can sustain | ||
37 | @ multiple NEON instructions per cycle, but dual NEON issue could | ||
38 | @ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html | ||
39 | @ for further details. On side note Cortex-A15 processes one byte in | ||
40 | @ 16 cycles. | ||
41 | |||
42 | @ Byte order [in]dependence. ========================================= | ||
43 | @ | ||
44 | @ Originally caller was expected to maintain specific *dword* order in | ||
45 | @ h[0-7], namely with most significant dword at *lower* address, which | ||
46 | @ was reflected in below two parameters as 0 and 4. Now caller is | ||
47 | @ expected to maintain native byte order for whole 64-bit values. | ||
48 | #ifndef __KERNEL__ | ||
49 | # include "arm_arch.h" | ||
50 | # define VFP_ABI_PUSH vstmdb sp!,{d8-d15} | ||
51 | # define VFP_ABI_POP vldmia sp!,{d8-d15} | ||
52 | #else | ||
53 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ | ||
54 | # define __ARM_MAX_ARCH__ 7 | ||
55 | # define VFP_ABI_PUSH | ||
56 | # define VFP_ABI_POP | ||
57 | #endif | ||
58 | |||
59 | #ifdef __ARMEL__ | ||
60 | # define LO 0 | ||
61 | # define HI 4 | ||
62 | # define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 | ||
63 | #else | ||
64 | # define HI 0 | ||
65 | # define LO 4 | ||
66 | # define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 | ||
67 | #endif | ||
68 | |||
69 | .text | ||
70 | #if __ARM_ARCH__<7 | ||
71 | .code 32 | ||
72 | #else | ||
73 | .syntax unified | ||
74 | # ifdef __thumb2__ | ||
75 | # define adrl adr | ||
76 | .thumb | ||
77 | # else | ||
78 | .code 32 | ||
79 | # endif | ||
80 | #endif | ||
81 | |||
82 | .type K512,%object | ||
83 | .align 5 | ||
84 | K512: | ||
85 | WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) | ||
86 | WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) | ||
87 | WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) | ||
88 | WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) | ||
89 | WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) | ||
90 | WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) | ||
91 | WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) | ||
92 | WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) | ||
93 | WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) | ||
94 | WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) | ||
95 | WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) | ||
96 | WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) | ||
97 | WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) | ||
98 | WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) | ||
99 | WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) | ||
100 | WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) | ||
101 | WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) | ||
102 | WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) | ||
103 | WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) | ||
104 | WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) | ||
105 | WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) | ||
106 | WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) | ||
107 | WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) | ||
108 | WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) | ||
109 | WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) | ||
110 | WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) | ||
111 | WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) | ||
112 | WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) | ||
113 | WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) | ||
114 | WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) | ||
115 | WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) | ||
116 | WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) | ||
117 | WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) | ||
118 | WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) | ||
119 | WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) | ||
120 | WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) | ||
121 | WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) | ||
122 | WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) | ||
123 | WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) | ||
124 | WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) | ||
125 | .size K512,.-K512 | ||
126 | #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) | ||
127 | .LOPENSSL_armcap: | ||
128 | .word OPENSSL_armcap_P-sha512_block_data_order | ||
129 | .skip 32-4 | ||
130 | #else | ||
131 | .skip 32 | ||
132 | #endif | ||
133 | |||
134 | .global sha512_block_data_order | ||
135 | .type sha512_block_data_order,%function | ||
136 | sha512_block_data_order: | ||
137 | #if __ARM_ARCH__<7 | ||
138 | sub r3,pc,#8 @ sha512_block_data_order | ||
139 | #else | ||
140 | adr r3,sha512_block_data_order | ||
141 | #endif | ||
142 | #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) | ||
143 | ldr r12,.LOPENSSL_armcap | ||
144 | ldr r12,[r3,r12] @ OPENSSL_armcap_P | ||
145 | tst r12,#1 | ||
146 | bne .LNEON | ||
147 | #endif | ||
148 | add r2,r1,r2,lsl#7 @ len to point at the end of inp | ||
149 | stmdb sp!,{r4-r12,lr} | ||
150 | sub r14,r3,#672 @ K512 | ||
151 | sub sp,sp,#9*8 | ||
152 | |||
153 | ldr r7,[r0,#32+LO] | ||
154 | ldr r8,[r0,#32+HI] | ||
155 | ldr r9, [r0,#48+LO] | ||
156 | ldr r10, [r0,#48+HI] | ||
157 | ldr r11, [r0,#56+LO] | ||
158 | ldr r12, [r0,#56+HI] | ||
159 | .Loop: | ||
160 | str r9, [sp,#48+0] | ||
161 | str r10, [sp,#48+4] | ||
162 | str r11, [sp,#56+0] | ||
163 | str r12, [sp,#56+4] | ||
164 | ldr r5,[r0,#0+LO] | ||
165 | ldr r6,[r0,#0+HI] | ||
166 | ldr r3,[r0,#8+LO] | ||
167 | ldr r4,[r0,#8+HI] | ||
168 | ldr r9, [r0,#16+LO] | ||
169 | ldr r10, [r0,#16+HI] | ||
170 | ldr r11, [r0,#24+LO] | ||
171 | ldr r12, [r0,#24+HI] | ||
172 | str r3,[sp,#8+0] | ||
173 | str r4,[sp,#8+4] | ||
174 | str r9, [sp,#16+0] | ||
175 | str r10, [sp,#16+4] | ||
176 | str r11, [sp,#24+0] | ||
177 | str r12, [sp,#24+4] | ||
178 | ldr r3,[r0,#40+LO] | ||
179 | ldr r4,[r0,#40+HI] | ||
180 | str r3,[sp,#40+0] | ||
181 | str r4,[sp,#40+4] | ||
182 | |||
183 | .L00_15: | ||
184 | #if __ARM_ARCH__<7 | ||
185 | ldrb r3,[r1,#7] | ||
186 | ldrb r9, [r1,#6] | ||
187 | ldrb r10, [r1,#5] | ||
188 | ldrb r11, [r1,#4] | ||
189 | ldrb r4,[r1,#3] | ||
190 | ldrb r12, [r1,#2] | ||
191 | orr r3,r3,r9,lsl#8 | ||
192 | ldrb r9, [r1,#1] | ||
193 | orr r3,r3,r10,lsl#16 | ||
194 | ldrb r10, [r1],#8 | ||
195 | orr r3,r3,r11,lsl#24 | ||
196 | orr r4,r4,r12,lsl#8 | ||
197 | orr r4,r4,r9,lsl#16 | ||
198 | orr r4,r4,r10,lsl#24 | ||
199 | #else | ||
200 | ldr r3,[r1,#4] | ||
201 | ldr r4,[r1],#8 | ||
202 | #ifdef __ARMEL__ | ||
203 | rev r3,r3 | ||
204 | rev r4,r4 | ||
205 | #endif | ||
206 | #endif | ||
207 | @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) | ||
208 | @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 | ||
209 | @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 | ||
210 | mov r9,r7,lsr#14 | ||
211 | str r3,[sp,#64+0] | ||
212 | mov r10,r8,lsr#14 | ||
213 | str r4,[sp,#64+4] | ||
214 | eor r9,r9,r8,lsl#18 | ||
215 | ldr r11,[sp,#56+0] @ h.lo | ||
216 | eor r10,r10,r7,lsl#18 | ||
217 | ldr r12,[sp,#56+4] @ h.hi | ||
218 | eor r9,r9,r7,lsr#18 | ||
219 | eor r10,r10,r8,lsr#18 | ||
220 | eor r9,r9,r8,lsl#14 | ||
221 | eor r10,r10,r7,lsl#14 | ||
222 | eor r9,r9,r8,lsr#9 | ||
223 | eor r10,r10,r7,lsr#9 | ||
224 | eor r9,r9,r7,lsl#23 | ||
225 | eor r10,r10,r8,lsl#23 @ Sigma1(e) | ||
226 | adds r3,r3,r9 | ||
227 | ldr r9,[sp,#40+0] @ f.lo | ||
228 | adc r4,r4,r10 @ T += Sigma1(e) | ||
229 | ldr r10,[sp,#40+4] @ f.hi | ||
230 | adds r3,r3,r11 | ||
231 | ldr r11,[sp,#48+0] @ g.lo | ||
232 | adc r4,r4,r12 @ T += h | ||
233 | ldr r12,[sp,#48+4] @ g.hi | ||
234 | |||
235 | eor r9,r9,r11 | ||
236 | str r7,[sp,#32+0] | ||
237 | eor r10,r10,r12 | ||
238 | str r8,[sp,#32+4] | ||
239 | and r9,r9,r7 | ||
240 | str r5,[sp,#0+0] | ||
241 | and r10,r10,r8 | ||
242 | str r6,[sp,#0+4] | ||
243 | eor r9,r9,r11 | ||
244 | ldr r11,[r14,#LO] @ K[i].lo | ||
245 | eor r10,r10,r12 @ Ch(e,f,g) | ||
246 | ldr r12,[r14,#HI] @ K[i].hi | ||
247 | |||
248 | adds r3,r3,r9 | ||
249 | ldr r7,[sp,#24+0] @ d.lo | ||
250 | adc r4,r4,r10 @ T += Ch(e,f,g) | ||
251 | ldr r8,[sp,#24+4] @ d.hi | ||
252 | adds r3,r3,r11 | ||
253 | and r9,r11,#0xff | ||
254 | adc r4,r4,r12 @ T += K[i] | ||
255 | adds r7,r7,r3 | ||
256 | ldr r11,[sp,#8+0] @ b.lo | ||
257 | adc r8,r8,r4 @ d += T | ||
258 | teq r9,#148 | ||
259 | |||
260 | ldr r12,[sp,#16+0] @ c.lo | ||
261 | #if __ARM_ARCH__>=7 | ||
262 | it eq @ Thumb2 thing, sanity check in ARM | ||
263 | #endif | ||
264 | orreq r14,r14,#1 | ||
265 | @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) | ||
266 | @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 | ||
267 | @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 | ||
268 | mov r9,r5,lsr#28 | ||
269 | mov r10,r6,lsr#28 | ||
270 | eor r9,r9,r6,lsl#4 | ||
271 | eor r10,r10,r5,lsl#4 | ||
272 | eor r9,r9,r6,lsr#2 | ||
273 | eor r10,r10,r5,lsr#2 | ||
274 | eor r9,r9,r5,lsl#30 | ||
275 | eor r10,r10,r6,lsl#30 | ||
276 | eor r9,r9,r6,lsr#7 | ||
277 | eor r10,r10,r5,lsr#7 | ||
278 | eor r9,r9,r5,lsl#25 | ||
279 | eor r10,r10,r6,lsl#25 @ Sigma0(a) | ||
280 | adds r3,r3,r9 | ||
281 | and r9,r5,r11 | ||
282 | adc r4,r4,r10 @ T += Sigma0(a) | ||
283 | |||
284 | ldr r10,[sp,#8+4] @ b.hi | ||
285 | orr r5,r5,r11 | ||
286 | ldr r11,[sp,#16+4] @ c.hi | ||
287 | and r5,r5,r12 | ||
288 | and r12,r6,r10 | ||
289 | orr r6,r6,r10 | ||
290 | orr r5,r5,r9 @ Maj(a,b,c).lo | ||
291 | and r6,r6,r11 | ||
292 | adds r5,r5,r3 | ||
293 | orr r6,r6,r12 @ Maj(a,b,c).hi | ||
294 | sub sp,sp,#8 | ||
295 | adc r6,r6,r4 @ h += T | ||
296 | tst r14,#1 | ||
297 | add r14,r14,#8 | ||
298 | tst r14,#1 | ||
299 | beq .L00_15 | ||
300 | ldr r9,[sp,#184+0] | ||
301 | ldr r10,[sp,#184+4] | ||
302 | bic r14,r14,#1 | ||
303 | .L16_79: | ||
304 | @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) | ||
305 | @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 | ||
306 | @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 | ||
307 | mov r3,r9,lsr#1 | ||
308 | ldr r11,[sp,#80+0] | ||
309 | mov r4,r10,lsr#1 | ||
310 | ldr r12,[sp,#80+4] | ||
311 | eor r3,r3,r10,lsl#31 | ||
312 | eor r4,r4,r9,lsl#31 | ||
313 | eor r3,r3,r9,lsr#8 | ||
314 | eor r4,r4,r10,lsr#8 | ||
315 | eor r3,r3,r10,lsl#24 | ||
316 | eor r4,r4,r9,lsl#24 | ||
317 | eor r3,r3,r9,lsr#7 | ||
318 | eor r4,r4,r10,lsr#7 | ||
319 | eor r3,r3,r10,lsl#25 | ||
320 | |||
321 | @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) | ||
322 | @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 | ||
323 | @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 | ||
324 | mov r9,r11,lsr#19 | ||
325 | mov r10,r12,lsr#19 | ||
326 | eor r9,r9,r12,lsl#13 | ||
327 | eor r10,r10,r11,lsl#13 | ||
328 | eor r9,r9,r12,lsr#29 | ||
329 | eor r10,r10,r11,lsr#29 | ||
330 | eor r9,r9,r11,lsl#3 | ||
331 | eor r10,r10,r12,lsl#3 | ||
332 | eor r9,r9,r11,lsr#6 | ||
333 | eor r10,r10,r12,lsr#6 | ||
334 | ldr r11,[sp,#120+0] | ||
335 | eor r9,r9,r12,lsl#26 | ||
336 | |||
337 | ldr r12,[sp,#120+4] | ||
338 | adds r3,r3,r9 | ||
339 | ldr r9,[sp,#192+0] | ||
340 | adc r4,r4,r10 | ||
341 | |||
342 | ldr r10,[sp,#192+4] | ||
343 | adds r3,r3,r11 | ||
344 | adc r4,r4,r12 | ||
345 | adds r3,r3,r9 | ||
346 | adc r4,r4,r10 | ||
347 | @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) | ||
348 | @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 | ||
349 | @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 | ||
350 | mov r9,r7,lsr#14 | ||
351 | str r3,[sp,#64+0] | ||
352 | mov r10,r8,lsr#14 | ||
353 | str r4,[sp,#64+4] | ||
354 | eor r9,r9,r8,lsl#18 | ||
355 | ldr r11,[sp,#56+0] @ h.lo | ||
356 | eor r10,r10,r7,lsl#18 | ||
357 | ldr r12,[sp,#56+4] @ h.hi | ||
358 | eor r9,r9,r7,lsr#18 | ||
359 | eor r10,r10,r8,lsr#18 | ||
360 | eor r9,r9,r8,lsl#14 | ||
361 | eor r10,r10,r7,lsl#14 | ||
362 | eor r9,r9,r8,lsr#9 | ||
363 | eor r10,r10,r7,lsr#9 | ||
364 | eor r9,r9,r7,lsl#23 | ||
365 | eor r10,r10,r8,lsl#23 @ Sigma1(e) | ||
366 | adds r3,r3,r9 | ||
367 | ldr r9,[sp,#40+0] @ f.lo | ||
368 | adc r4,r4,r10 @ T += Sigma1(e) | ||
369 | ldr r10,[sp,#40+4] @ f.hi | ||
370 | adds r3,r3,r11 | ||
371 | ldr r11,[sp,#48+0] @ g.lo | ||
372 | adc r4,r4,r12 @ T += h | ||
373 | ldr r12,[sp,#48+4] @ g.hi | ||
374 | |||
375 | eor r9,r9,r11 | ||
376 | str r7,[sp,#32+0] | ||
377 | eor r10,r10,r12 | ||
378 | str r8,[sp,#32+4] | ||
379 | and r9,r9,r7 | ||
380 | str r5,[sp,#0+0] | ||
381 | and r10,r10,r8 | ||
382 | str r6,[sp,#0+4] | ||
383 | eor r9,r9,r11 | ||
384 | ldr r11,[r14,#LO] @ K[i].lo | ||
385 | eor r10,r10,r12 @ Ch(e,f,g) | ||
386 | ldr r12,[r14,#HI] @ K[i].hi | ||
387 | |||
388 | adds r3,r3,r9 | ||
389 | ldr r7,[sp,#24+0] @ d.lo | ||
390 | adc r4,r4,r10 @ T += Ch(e,f,g) | ||
391 | ldr r8,[sp,#24+4] @ d.hi | ||
392 | adds r3,r3,r11 | ||
393 | and r9,r11,#0xff | ||
394 | adc r4,r4,r12 @ T += K[i] | ||
395 | adds r7,r7,r3 | ||
396 | ldr r11,[sp,#8+0] @ b.lo | ||
397 | adc r8,r8,r4 @ d += T | ||
398 | teq r9,#23 | ||
399 | |||
400 | ldr r12,[sp,#16+0] @ c.lo | ||
401 | #if __ARM_ARCH__>=7 | ||
402 | it eq @ Thumb2 thing, sanity check in ARM | ||
403 | #endif | ||
404 | orreq r14,r14,#1 | ||
405 | @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) | ||
406 | @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 | ||
407 | @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 | ||
408 | mov r9,r5,lsr#28 | ||
409 | mov r10,r6,lsr#28 | ||
410 | eor r9,r9,r6,lsl#4 | ||
411 | eor r10,r10,r5,lsl#4 | ||
412 | eor r9,r9,r6,lsr#2 | ||
413 | eor r10,r10,r5,lsr#2 | ||
414 | eor r9,r9,r5,lsl#30 | ||
415 | eor r10,r10,r6,lsl#30 | ||
416 | eor r9,r9,r6,lsr#7 | ||
417 | eor r10,r10,r5,lsr#7 | ||
418 | eor r9,r9,r5,lsl#25 | ||
419 | eor r10,r10,r6,lsl#25 @ Sigma0(a) | ||
420 | adds r3,r3,r9 | ||
421 | and r9,r5,r11 | ||
422 | adc r4,r4,r10 @ T += Sigma0(a) | ||
423 | |||
424 | ldr r10,[sp,#8+4] @ b.hi | ||
425 | orr r5,r5,r11 | ||
426 | ldr r11,[sp,#16+4] @ c.hi | ||
427 | and r5,r5,r12 | ||
428 | and r12,r6,r10 | ||
429 | orr r6,r6,r10 | ||
430 | orr r5,r5,r9 @ Maj(a,b,c).lo | ||
431 | and r6,r6,r11 | ||
432 | adds r5,r5,r3 | ||
433 | orr r6,r6,r12 @ Maj(a,b,c).hi | ||
434 | sub sp,sp,#8 | ||
435 | adc r6,r6,r4 @ h += T | ||
436 | tst r14,#1 | ||
437 | add r14,r14,#8 | ||
438 | #if __ARM_ARCH__>=7 | ||
439 | ittt eq @ Thumb2 thing, sanity check in ARM | ||
440 | #endif | ||
441 | ldreq r9,[sp,#184+0] | ||
442 | ldreq r10,[sp,#184+4] | ||
443 | beq .L16_79 | ||
444 | bic r14,r14,#1 | ||
445 | |||
446 | ldr r3,[sp,#8+0] | ||
447 | ldr r4,[sp,#8+4] | ||
448 | ldr r9, [r0,#0+LO] | ||
449 | ldr r10, [r0,#0+HI] | ||
450 | ldr r11, [r0,#8+LO] | ||
451 | ldr r12, [r0,#8+HI] | ||
452 | adds r9,r5,r9 | ||
453 | str r9, [r0,#0+LO] | ||
454 | adc r10,r6,r10 | ||
455 | str r10, [r0,#0+HI] | ||
456 | adds r11,r3,r11 | ||
457 | str r11, [r0,#8+LO] | ||
458 | adc r12,r4,r12 | ||
459 | str r12, [r0,#8+HI] | ||
460 | |||
461 | ldr r5,[sp,#16+0] | ||
462 | ldr r6,[sp,#16+4] | ||
463 | ldr r3,[sp,#24+0] | ||
464 | ldr r4,[sp,#24+4] | ||
465 | ldr r9, [r0,#16+LO] | ||
466 | ldr r10, [r0,#16+HI] | ||
467 | ldr r11, [r0,#24+LO] | ||
468 | ldr r12, [r0,#24+HI] | ||
469 | adds r9,r5,r9 | ||
470 | str r9, [r0,#16+LO] | ||
471 | adc r10,r6,r10 | ||
472 | str r10, [r0,#16+HI] | ||
473 | adds r11,r3,r11 | ||
474 | str r11, [r0,#24+LO] | ||
475 | adc r12,r4,r12 | ||
476 | str r12, [r0,#24+HI] | ||
477 | |||
478 | ldr r3,[sp,#40+0] | ||
479 | ldr r4,[sp,#40+4] | ||
480 | ldr r9, [r0,#32+LO] | ||
481 | ldr r10, [r0,#32+HI] | ||
482 | ldr r11, [r0,#40+LO] | ||
483 | ldr r12, [r0,#40+HI] | ||
484 | adds r7,r7,r9 | ||
485 | str r7,[r0,#32+LO] | ||
486 | adc r8,r8,r10 | ||
487 | str r8,[r0,#32+HI] | ||
488 | adds r11,r3,r11 | ||
489 | str r11, [r0,#40+LO] | ||
490 | adc r12,r4,r12 | ||
491 | str r12, [r0,#40+HI] | ||
492 | |||
493 | ldr r5,[sp,#48+0] | ||
494 | ldr r6,[sp,#48+4] | ||
495 | ldr r3,[sp,#56+0] | ||
496 | ldr r4,[sp,#56+4] | ||
497 | ldr r9, [r0,#48+LO] | ||
498 | ldr r10, [r0,#48+HI] | ||
499 | ldr r11, [r0,#56+LO] | ||
500 | ldr r12, [r0,#56+HI] | ||
501 | adds r9,r5,r9 | ||
502 | str r9, [r0,#48+LO] | ||
503 | adc r10,r6,r10 | ||
504 | str r10, [r0,#48+HI] | ||
505 | adds r11,r3,r11 | ||
506 | str r11, [r0,#56+LO] | ||
507 | adc r12,r4,r12 | ||
508 | str r12, [r0,#56+HI] | ||
509 | |||
510 | add sp,sp,#640 | ||
511 | sub r14,r14,#640 | ||
512 | |||
513 | teq r1,r2 | ||
514 | bne .Loop | ||
515 | |||
516 | add sp,sp,#8*9 @ destroy frame | ||
517 | #if __ARM_ARCH__>=5 | ||
518 | ldmia sp!,{r4-r12,pc} | ||
519 | #else | ||
520 | ldmia sp!,{r4-r12,lr} | ||
521 | tst lr,#1 | ||
522 | moveq pc,lr @ be binary compatible with V4, yet | ||
523 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
524 | #endif | ||
525 | .size sha512_block_data_order,.-sha512_block_data_order | ||
526 | #if __ARM_MAX_ARCH__>=7 | ||
527 | .arch armv7-a | ||
528 | .fpu neon | ||
529 | |||
530 | .global sha512_block_data_order_neon | ||
531 | .type sha512_block_data_order_neon,%function | ||
532 | .align 4 | ||
533 | sha512_block_data_order_neon: | ||
534 | .LNEON: | ||
535 | dmb @ errata #451034 on early Cortex A8 | ||
536 | add r2,r1,r2,lsl#7 @ len to point at the end of inp | ||
537 | VFP_ABI_PUSH | ||
538 | adrl r3,K512 | ||
539 | vldmia r0,{d16-d23} @ load context | ||
540 | .Loop_neon: | ||
541 | vshr.u64 d24,d20,#14 @ 0 | ||
542 | #if 0<16 | ||
543 | vld1.64 {d0},[r1]! @ handles unaligned | ||
544 | #endif | ||
545 | vshr.u64 d25,d20,#18 | ||
546 | #if 0>0 | ||
547 | vadd.i64 d16,d30 @ h+=Maj from the past | ||
548 | #endif | ||
549 | vshr.u64 d26,d20,#41 | ||
550 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
551 | vsli.64 d24,d20,#50 | ||
552 | vsli.64 d25,d20,#46 | ||
553 | vmov d29,d20 | ||
554 | vsli.64 d26,d20,#23 | ||
555 | #if 0<16 && defined(__ARMEL__) | ||
556 | vrev64.8 d0,d0 | ||
557 | #endif | ||
558 | veor d25,d24 | ||
559 | vbsl d29,d21,d22 @ Ch(e,f,g) | ||
560 | vshr.u64 d24,d16,#28 | ||
561 | veor d26,d25 @ Sigma1(e) | ||
562 | vadd.i64 d27,d29,d23 | ||
563 | vshr.u64 d25,d16,#34 | ||
564 | vsli.64 d24,d16,#36 | ||
565 | vadd.i64 d27,d26 | ||
566 | vshr.u64 d26,d16,#39 | ||
567 | vadd.i64 d28,d0 | ||
568 | vsli.64 d25,d16,#30 | ||
569 | veor d30,d16,d17 | ||
570 | vsli.64 d26,d16,#25 | ||
571 | veor d23,d24,d25 | ||
572 | vadd.i64 d27,d28 | ||
573 | vbsl d30,d18,d17 @ Maj(a,b,c) | ||
574 | veor d23,d26 @ Sigma0(a) | ||
575 | vadd.i64 d19,d27 | ||
576 | vadd.i64 d30,d27 | ||
577 | @ vadd.i64 d23,d30 | ||
578 | vshr.u64 d24,d19,#14 @ 1 | ||
579 | #if 1<16 | ||
580 | vld1.64 {d1},[r1]! @ handles unaligned | ||
581 | #endif | ||
582 | vshr.u64 d25,d19,#18 | ||
583 | #if 1>0 | ||
584 | vadd.i64 d23,d30 @ h+=Maj from the past | ||
585 | #endif | ||
586 | vshr.u64 d26,d19,#41 | ||
587 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
588 | vsli.64 d24,d19,#50 | ||
589 | vsli.64 d25,d19,#46 | ||
590 | vmov d29,d19 | ||
591 | vsli.64 d26,d19,#23 | ||
592 | #if 1<16 && defined(__ARMEL__) | ||
593 | vrev64.8 d1,d1 | ||
594 | #endif | ||
595 | veor d25,d24 | ||
596 | vbsl d29,d20,d21 @ Ch(e,f,g) | ||
597 | vshr.u64 d24,d23,#28 | ||
598 | veor d26,d25 @ Sigma1(e) | ||
599 | vadd.i64 d27,d29,d22 | ||
600 | vshr.u64 d25,d23,#34 | ||
601 | vsli.64 d24,d23,#36 | ||
602 | vadd.i64 d27,d26 | ||
603 | vshr.u64 d26,d23,#39 | ||
604 | vadd.i64 d28,d1 | ||
605 | vsli.64 d25,d23,#30 | ||
606 | veor d30,d23,d16 | ||
607 | vsli.64 d26,d23,#25 | ||
608 | veor d22,d24,d25 | ||
609 | vadd.i64 d27,d28 | ||
610 | vbsl d30,d17,d16 @ Maj(a,b,c) | ||
611 | veor d22,d26 @ Sigma0(a) | ||
612 | vadd.i64 d18,d27 | ||
613 | vadd.i64 d30,d27 | ||
614 | @ vadd.i64 d22,d30 | ||
615 | vshr.u64 d24,d18,#14 @ 2 | ||
616 | #if 2<16 | ||
617 | vld1.64 {d2},[r1]! @ handles unaligned | ||
618 | #endif | ||
619 | vshr.u64 d25,d18,#18 | ||
620 | #if 2>0 | ||
621 | vadd.i64 d22,d30 @ h+=Maj from the past | ||
622 | #endif | ||
623 | vshr.u64 d26,d18,#41 | ||
624 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
625 | vsli.64 d24,d18,#50 | ||
626 | vsli.64 d25,d18,#46 | ||
627 | vmov d29,d18 | ||
628 | vsli.64 d26,d18,#23 | ||
629 | #if 2<16 && defined(__ARMEL__) | ||
630 | vrev64.8 d2,d2 | ||
631 | #endif | ||
632 | veor d25,d24 | ||
633 | vbsl d29,d19,d20 @ Ch(e,f,g) | ||
634 | vshr.u64 d24,d22,#28 | ||
635 | veor d26,d25 @ Sigma1(e) | ||
636 | vadd.i64 d27,d29,d21 | ||
637 | vshr.u64 d25,d22,#34 | ||
638 | vsli.64 d24,d22,#36 | ||
639 | vadd.i64 d27,d26 | ||
640 | vshr.u64 d26,d22,#39 | ||
641 | vadd.i64 d28,d2 | ||
642 | vsli.64 d25,d22,#30 | ||
643 | veor d30,d22,d23 | ||
644 | vsli.64 d26,d22,#25 | ||
645 | veor d21,d24,d25 | ||
646 | vadd.i64 d27,d28 | ||
647 | vbsl d30,d16,d23 @ Maj(a,b,c) | ||
648 | veor d21,d26 @ Sigma0(a) | ||
649 | vadd.i64 d17,d27 | ||
650 | vadd.i64 d30,d27 | ||
651 | @ vadd.i64 d21,d30 | ||
652 | vshr.u64 d24,d17,#14 @ 3 | ||
653 | #if 3<16 | ||
654 | vld1.64 {d3},[r1]! @ handles unaligned | ||
655 | #endif | ||
656 | vshr.u64 d25,d17,#18 | ||
657 | #if 3>0 | ||
658 | vadd.i64 d21,d30 @ h+=Maj from the past | ||
659 | #endif | ||
660 | vshr.u64 d26,d17,#41 | ||
661 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
662 | vsli.64 d24,d17,#50 | ||
663 | vsli.64 d25,d17,#46 | ||
664 | vmov d29,d17 | ||
665 | vsli.64 d26,d17,#23 | ||
666 | #if 3<16 && defined(__ARMEL__) | ||
667 | vrev64.8 d3,d3 | ||
668 | #endif | ||
669 | veor d25,d24 | ||
670 | vbsl d29,d18,d19 @ Ch(e,f,g) | ||
671 | vshr.u64 d24,d21,#28 | ||
672 | veor d26,d25 @ Sigma1(e) | ||
673 | vadd.i64 d27,d29,d20 | ||
674 | vshr.u64 d25,d21,#34 | ||
675 | vsli.64 d24,d21,#36 | ||
676 | vadd.i64 d27,d26 | ||
677 | vshr.u64 d26,d21,#39 | ||
678 | vadd.i64 d28,d3 | ||
679 | vsli.64 d25,d21,#30 | ||
680 | veor d30,d21,d22 | ||
681 | vsli.64 d26,d21,#25 | ||
682 | veor d20,d24,d25 | ||
683 | vadd.i64 d27,d28 | ||
684 | vbsl d30,d23,d22 @ Maj(a,b,c) | ||
685 | veor d20,d26 @ Sigma0(a) | ||
686 | vadd.i64 d16,d27 | ||
687 | vadd.i64 d30,d27 | ||
688 | @ vadd.i64 d20,d30 | ||
689 | vshr.u64 d24,d16,#14 @ 4 | ||
690 | #if 4<16 | ||
691 | vld1.64 {d4},[r1]! @ handles unaligned | ||
692 | #endif | ||
693 | vshr.u64 d25,d16,#18 | ||
694 | #if 4>0 | ||
695 | vadd.i64 d20,d30 @ h+=Maj from the past | ||
696 | #endif | ||
697 | vshr.u64 d26,d16,#41 | ||
698 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
699 | vsli.64 d24,d16,#50 | ||
700 | vsli.64 d25,d16,#46 | ||
701 | vmov d29,d16 | ||
702 | vsli.64 d26,d16,#23 | ||
703 | #if 4<16 && defined(__ARMEL__) | ||
704 | vrev64.8 d4,d4 | ||
705 | #endif | ||
706 | veor d25,d24 | ||
707 | vbsl d29,d17,d18 @ Ch(e,f,g) | ||
708 | vshr.u64 d24,d20,#28 | ||
709 | veor d26,d25 @ Sigma1(e) | ||
710 | vadd.i64 d27,d29,d19 | ||
711 | vshr.u64 d25,d20,#34 | ||
712 | vsli.64 d24,d20,#36 | ||
713 | vadd.i64 d27,d26 | ||
714 | vshr.u64 d26,d20,#39 | ||
715 | vadd.i64 d28,d4 | ||
716 | vsli.64 d25,d20,#30 | ||
717 | veor d30,d20,d21 | ||
718 | vsli.64 d26,d20,#25 | ||
719 | veor d19,d24,d25 | ||
720 | vadd.i64 d27,d28 | ||
721 | vbsl d30,d22,d21 @ Maj(a,b,c) | ||
722 | veor d19,d26 @ Sigma0(a) | ||
723 | vadd.i64 d23,d27 | ||
724 | vadd.i64 d30,d27 | ||
725 | @ vadd.i64 d19,d30 | ||
726 | vshr.u64 d24,d23,#14 @ 5 | ||
727 | #if 5<16 | ||
728 | vld1.64 {d5},[r1]! @ handles unaligned | ||
729 | #endif | ||
730 | vshr.u64 d25,d23,#18 | ||
731 | #if 5>0 | ||
732 | vadd.i64 d19,d30 @ h+=Maj from the past | ||
733 | #endif | ||
734 | vshr.u64 d26,d23,#41 | ||
735 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
736 | vsli.64 d24,d23,#50 | ||
737 | vsli.64 d25,d23,#46 | ||
738 | vmov d29,d23 | ||
739 | vsli.64 d26,d23,#23 | ||
740 | #if 5<16 && defined(__ARMEL__) | ||
741 | vrev64.8 d5,d5 | ||
742 | #endif | ||
743 | veor d25,d24 | ||
744 | vbsl d29,d16,d17 @ Ch(e,f,g) | ||
745 | vshr.u64 d24,d19,#28 | ||
746 | veor d26,d25 @ Sigma1(e) | ||
747 | vadd.i64 d27,d29,d18 | ||
748 | vshr.u64 d25,d19,#34 | ||
749 | vsli.64 d24,d19,#36 | ||
750 | vadd.i64 d27,d26 | ||
751 | vshr.u64 d26,d19,#39 | ||
752 | vadd.i64 d28,d5 | ||
753 | vsli.64 d25,d19,#30 | ||
754 | veor d30,d19,d20 | ||
755 | vsli.64 d26,d19,#25 | ||
756 | veor d18,d24,d25 | ||
757 | vadd.i64 d27,d28 | ||
758 | vbsl d30,d21,d20 @ Maj(a,b,c) | ||
759 | veor d18,d26 @ Sigma0(a) | ||
760 | vadd.i64 d22,d27 | ||
761 | vadd.i64 d30,d27 | ||
762 | @ vadd.i64 d18,d30 | ||
763 | vshr.u64 d24,d22,#14 @ 6 | ||
764 | #if 6<16 | ||
765 | vld1.64 {d6},[r1]! @ handles unaligned | ||
766 | #endif | ||
767 | vshr.u64 d25,d22,#18 | ||
768 | #if 6>0 | ||
769 | vadd.i64 d18,d30 @ h+=Maj from the past | ||
770 | #endif | ||
771 | vshr.u64 d26,d22,#41 | ||
772 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
773 | vsli.64 d24,d22,#50 | ||
774 | vsli.64 d25,d22,#46 | ||
775 | vmov d29,d22 | ||
776 | vsli.64 d26,d22,#23 | ||
777 | #if 6<16 && defined(__ARMEL__) | ||
778 | vrev64.8 d6,d6 | ||
779 | #endif | ||
780 | veor d25,d24 | ||
781 | vbsl d29,d23,d16 @ Ch(e,f,g) | ||
782 | vshr.u64 d24,d18,#28 | ||
783 | veor d26,d25 @ Sigma1(e) | ||
784 | vadd.i64 d27,d29,d17 | ||
785 | vshr.u64 d25,d18,#34 | ||
786 | vsli.64 d24,d18,#36 | ||
787 | vadd.i64 d27,d26 | ||
788 | vshr.u64 d26,d18,#39 | ||
789 | vadd.i64 d28,d6 | ||
790 | vsli.64 d25,d18,#30 | ||
791 | veor d30,d18,d19 | ||
792 | vsli.64 d26,d18,#25 | ||
793 | veor d17,d24,d25 | ||
794 | vadd.i64 d27,d28 | ||
795 | vbsl d30,d20,d19 @ Maj(a,b,c) | ||
796 | veor d17,d26 @ Sigma0(a) | ||
797 | vadd.i64 d21,d27 | ||
798 | vadd.i64 d30,d27 | ||
799 | @ vadd.i64 d17,d30 | ||
800 | vshr.u64 d24,d21,#14 @ 7 | ||
801 | #if 7<16 | ||
802 | vld1.64 {d7},[r1]! @ handles unaligned | ||
803 | #endif | ||
804 | vshr.u64 d25,d21,#18 | ||
805 | #if 7>0 | ||
806 | vadd.i64 d17,d30 @ h+=Maj from the past | ||
807 | #endif | ||
808 | vshr.u64 d26,d21,#41 | ||
809 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
810 | vsli.64 d24,d21,#50 | ||
811 | vsli.64 d25,d21,#46 | ||
812 | vmov d29,d21 | ||
813 | vsli.64 d26,d21,#23 | ||
814 | #if 7<16 && defined(__ARMEL__) | ||
815 | vrev64.8 d7,d7 | ||
816 | #endif | ||
817 | veor d25,d24 | ||
818 | vbsl d29,d22,d23 @ Ch(e,f,g) | ||
819 | vshr.u64 d24,d17,#28 | ||
820 | veor d26,d25 @ Sigma1(e) | ||
821 | vadd.i64 d27,d29,d16 | ||
822 | vshr.u64 d25,d17,#34 | ||
823 | vsli.64 d24,d17,#36 | ||
824 | vadd.i64 d27,d26 | ||
825 | vshr.u64 d26,d17,#39 | ||
826 | vadd.i64 d28,d7 | ||
827 | vsli.64 d25,d17,#30 | ||
828 | veor d30,d17,d18 | ||
829 | vsli.64 d26,d17,#25 | ||
830 | veor d16,d24,d25 | ||
831 | vadd.i64 d27,d28 | ||
832 | vbsl d30,d19,d18 @ Maj(a,b,c) | ||
833 | veor d16,d26 @ Sigma0(a) | ||
834 | vadd.i64 d20,d27 | ||
835 | vadd.i64 d30,d27 | ||
836 | @ vadd.i64 d16,d30 | ||
837 | vshr.u64 d24,d20,#14 @ 8 | ||
838 | #if 8<16 | ||
839 | vld1.64 {d8},[r1]! @ handles unaligned | ||
840 | #endif | ||
841 | vshr.u64 d25,d20,#18 | ||
842 | #if 8>0 | ||
843 | vadd.i64 d16,d30 @ h+=Maj from the past | ||
844 | #endif | ||
845 | vshr.u64 d26,d20,#41 | ||
846 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
847 | vsli.64 d24,d20,#50 | ||
848 | vsli.64 d25,d20,#46 | ||
849 | vmov d29,d20 | ||
850 | vsli.64 d26,d20,#23 | ||
851 | #if 8<16 && defined(__ARMEL__) | ||
852 | vrev64.8 d8,d8 | ||
853 | #endif | ||
854 | veor d25,d24 | ||
855 | vbsl d29,d21,d22 @ Ch(e,f,g) | ||
856 | vshr.u64 d24,d16,#28 | ||
857 | veor d26,d25 @ Sigma1(e) | ||
858 | vadd.i64 d27,d29,d23 | ||
859 | vshr.u64 d25,d16,#34 | ||
860 | vsli.64 d24,d16,#36 | ||
861 | vadd.i64 d27,d26 | ||
862 | vshr.u64 d26,d16,#39 | ||
863 | vadd.i64 d28,d8 | ||
864 | vsli.64 d25,d16,#30 | ||
865 | veor d30,d16,d17 | ||
866 | vsli.64 d26,d16,#25 | ||
867 | veor d23,d24,d25 | ||
868 | vadd.i64 d27,d28 | ||
869 | vbsl d30,d18,d17 @ Maj(a,b,c) | ||
870 | veor d23,d26 @ Sigma0(a) | ||
871 | vadd.i64 d19,d27 | ||
872 | vadd.i64 d30,d27 | ||
873 | @ vadd.i64 d23,d30 | ||
874 | vshr.u64 d24,d19,#14 @ 9 | ||
875 | #if 9<16 | ||
876 | vld1.64 {d9},[r1]! @ handles unaligned | ||
877 | #endif | ||
878 | vshr.u64 d25,d19,#18 | ||
879 | #if 9>0 | ||
880 | vadd.i64 d23,d30 @ h+=Maj from the past | ||
881 | #endif | ||
882 | vshr.u64 d26,d19,#41 | ||
883 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
884 | vsli.64 d24,d19,#50 | ||
885 | vsli.64 d25,d19,#46 | ||
886 | vmov d29,d19 | ||
887 | vsli.64 d26,d19,#23 | ||
888 | #if 9<16 && defined(__ARMEL__) | ||
889 | vrev64.8 d9,d9 | ||
890 | #endif | ||
891 | veor d25,d24 | ||
892 | vbsl d29,d20,d21 @ Ch(e,f,g) | ||
893 | vshr.u64 d24,d23,#28 | ||
894 | veor d26,d25 @ Sigma1(e) | ||
895 | vadd.i64 d27,d29,d22 | ||
896 | vshr.u64 d25,d23,#34 | ||
897 | vsli.64 d24,d23,#36 | ||
898 | vadd.i64 d27,d26 | ||
899 | vshr.u64 d26,d23,#39 | ||
900 | vadd.i64 d28,d9 | ||
901 | vsli.64 d25,d23,#30 | ||
902 | veor d30,d23,d16 | ||
903 | vsli.64 d26,d23,#25 | ||
904 | veor d22,d24,d25 | ||
905 | vadd.i64 d27,d28 | ||
906 | vbsl d30,d17,d16 @ Maj(a,b,c) | ||
907 | veor d22,d26 @ Sigma0(a) | ||
908 | vadd.i64 d18,d27 | ||
909 | vadd.i64 d30,d27 | ||
910 | @ vadd.i64 d22,d30 | ||
911 | vshr.u64 d24,d18,#14 @ 10 | ||
912 | #if 10<16 | ||
913 | vld1.64 {d10},[r1]! @ handles unaligned | ||
914 | #endif | ||
915 | vshr.u64 d25,d18,#18 | ||
916 | #if 10>0 | ||
917 | vadd.i64 d22,d30 @ h+=Maj from the past | ||
918 | #endif | ||
919 | vshr.u64 d26,d18,#41 | ||
920 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
921 | vsli.64 d24,d18,#50 | ||
922 | vsli.64 d25,d18,#46 | ||
923 | vmov d29,d18 | ||
924 | vsli.64 d26,d18,#23 | ||
925 | #if 10<16 && defined(__ARMEL__) | ||
926 | vrev64.8 d10,d10 | ||
927 | #endif | ||
928 | veor d25,d24 | ||
929 | vbsl d29,d19,d20 @ Ch(e,f,g) | ||
930 | vshr.u64 d24,d22,#28 | ||
931 | veor d26,d25 @ Sigma1(e) | ||
932 | vadd.i64 d27,d29,d21 | ||
933 | vshr.u64 d25,d22,#34 | ||
934 | vsli.64 d24,d22,#36 | ||
935 | vadd.i64 d27,d26 | ||
936 | vshr.u64 d26,d22,#39 | ||
937 | vadd.i64 d28,d10 | ||
938 | vsli.64 d25,d22,#30 | ||
939 | veor d30,d22,d23 | ||
940 | vsli.64 d26,d22,#25 | ||
941 | veor d21,d24,d25 | ||
942 | vadd.i64 d27,d28 | ||
943 | vbsl d30,d16,d23 @ Maj(a,b,c) | ||
944 | veor d21,d26 @ Sigma0(a) | ||
945 | vadd.i64 d17,d27 | ||
946 | vadd.i64 d30,d27 | ||
947 | @ vadd.i64 d21,d30 | ||
948 | vshr.u64 d24,d17,#14 @ 11 | ||
949 | #if 11<16 | ||
950 | vld1.64 {d11},[r1]! @ handles unaligned | ||
951 | #endif | ||
952 | vshr.u64 d25,d17,#18 | ||
953 | #if 11>0 | ||
954 | vadd.i64 d21,d30 @ h+=Maj from the past | ||
955 | #endif | ||
956 | vshr.u64 d26,d17,#41 | ||
957 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
958 | vsli.64 d24,d17,#50 | ||
959 | vsli.64 d25,d17,#46 | ||
960 | vmov d29,d17 | ||
961 | vsli.64 d26,d17,#23 | ||
962 | #if 11<16 && defined(__ARMEL__) | ||
963 | vrev64.8 d11,d11 | ||
964 | #endif | ||
965 | veor d25,d24 | ||
966 | vbsl d29,d18,d19 @ Ch(e,f,g) | ||
967 | vshr.u64 d24,d21,#28 | ||
968 | veor d26,d25 @ Sigma1(e) | ||
969 | vadd.i64 d27,d29,d20 | ||
970 | vshr.u64 d25,d21,#34 | ||
971 | vsli.64 d24,d21,#36 | ||
972 | vadd.i64 d27,d26 | ||
973 | vshr.u64 d26,d21,#39 | ||
974 | vadd.i64 d28,d11 | ||
975 | vsli.64 d25,d21,#30 | ||
976 | veor d30,d21,d22 | ||
977 | vsli.64 d26,d21,#25 | ||
978 | veor d20,d24,d25 | ||
979 | vadd.i64 d27,d28 | ||
980 | vbsl d30,d23,d22 @ Maj(a,b,c) | ||
981 | veor d20,d26 @ Sigma0(a) | ||
982 | vadd.i64 d16,d27 | ||
983 | vadd.i64 d30,d27 | ||
984 | @ vadd.i64 d20,d30 | ||
985 | vshr.u64 d24,d16,#14 @ 12 | ||
986 | #if 12<16 | ||
987 | vld1.64 {d12},[r1]! @ handles unaligned | ||
988 | #endif | ||
989 | vshr.u64 d25,d16,#18 | ||
990 | #if 12>0 | ||
991 | vadd.i64 d20,d30 @ h+=Maj from the past | ||
992 | #endif | ||
993 | vshr.u64 d26,d16,#41 | ||
994 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
995 | vsli.64 d24,d16,#50 | ||
996 | vsli.64 d25,d16,#46 | ||
997 | vmov d29,d16 | ||
998 | vsli.64 d26,d16,#23 | ||
999 | #if 12<16 && defined(__ARMEL__) | ||
1000 | vrev64.8 d12,d12 | ||
1001 | #endif | ||
1002 | veor d25,d24 | ||
1003 | vbsl d29,d17,d18 @ Ch(e,f,g) | ||
1004 | vshr.u64 d24,d20,#28 | ||
1005 | veor d26,d25 @ Sigma1(e) | ||
1006 | vadd.i64 d27,d29,d19 | ||
1007 | vshr.u64 d25,d20,#34 | ||
1008 | vsli.64 d24,d20,#36 | ||
1009 | vadd.i64 d27,d26 | ||
1010 | vshr.u64 d26,d20,#39 | ||
1011 | vadd.i64 d28,d12 | ||
1012 | vsli.64 d25,d20,#30 | ||
1013 | veor d30,d20,d21 | ||
1014 | vsli.64 d26,d20,#25 | ||
1015 | veor d19,d24,d25 | ||
1016 | vadd.i64 d27,d28 | ||
1017 | vbsl d30,d22,d21 @ Maj(a,b,c) | ||
1018 | veor d19,d26 @ Sigma0(a) | ||
1019 | vadd.i64 d23,d27 | ||
1020 | vadd.i64 d30,d27 | ||
1021 | @ vadd.i64 d19,d30 | ||
1022 | vshr.u64 d24,d23,#14 @ 13 | ||
1023 | #if 13<16 | ||
1024 | vld1.64 {d13},[r1]! @ handles unaligned | ||
1025 | #endif | ||
1026 | vshr.u64 d25,d23,#18 | ||
1027 | #if 13>0 | ||
1028 | vadd.i64 d19,d30 @ h+=Maj from the past | ||
1029 | #endif | ||
1030 | vshr.u64 d26,d23,#41 | ||
1031 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1032 | vsli.64 d24,d23,#50 | ||
1033 | vsli.64 d25,d23,#46 | ||
1034 | vmov d29,d23 | ||
1035 | vsli.64 d26,d23,#23 | ||
1036 | #if 13<16 && defined(__ARMEL__) | ||
1037 | vrev64.8 d13,d13 | ||
1038 | #endif | ||
1039 | veor d25,d24 | ||
1040 | vbsl d29,d16,d17 @ Ch(e,f,g) | ||
1041 | vshr.u64 d24,d19,#28 | ||
1042 | veor d26,d25 @ Sigma1(e) | ||
1043 | vadd.i64 d27,d29,d18 | ||
1044 | vshr.u64 d25,d19,#34 | ||
1045 | vsli.64 d24,d19,#36 | ||
1046 | vadd.i64 d27,d26 | ||
1047 | vshr.u64 d26,d19,#39 | ||
1048 | vadd.i64 d28,d13 | ||
1049 | vsli.64 d25,d19,#30 | ||
1050 | veor d30,d19,d20 | ||
1051 | vsli.64 d26,d19,#25 | ||
1052 | veor d18,d24,d25 | ||
1053 | vadd.i64 d27,d28 | ||
1054 | vbsl d30,d21,d20 @ Maj(a,b,c) | ||
1055 | veor d18,d26 @ Sigma0(a) | ||
1056 | vadd.i64 d22,d27 | ||
1057 | vadd.i64 d30,d27 | ||
1058 | @ vadd.i64 d18,d30 | ||
1059 | vshr.u64 d24,d22,#14 @ 14 | ||
1060 | #if 14<16 | ||
1061 | vld1.64 {d14},[r1]! @ handles unaligned | ||
1062 | #endif | ||
1063 | vshr.u64 d25,d22,#18 | ||
1064 | #if 14>0 | ||
1065 | vadd.i64 d18,d30 @ h+=Maj from the past | ||
1066 | #endif | ||
1067 | vshr.u64 d26,d22,#41 | ||
1068 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1069 | vsli.64 d24,d22,#50 | ||
1070 | vsli.64 d25,d22,#46 | ||
1071 | vmov d29,d22 | ||
1072 | vsli.64 d26,d22,#23 | ||
1073 | #if 14<16 && defined(__ARMEL__) | ||
1074 | vrev64.8 d14,d14 | ||
1075 | #endif | ||
1076 | veor d25,d24 | ||
1077 | vbsl d29,d23,d16 @ Ch(e,f,g) | ||
1078 | vshr.u64 d24,d18,#28 | ||
1079 | veor d26,d25 @ Sigma1(e) | ||
1080 | vadd.i64 d27,d29,d17 | ||
1081 | vshr.u64 d25,d18,#34 | ||
1082 | vsli.64 d24,d18,#36 | ||
1083 | vadd.i64 d27,d26 | ||
1084 | vshr.u64 d26,d18,#39 | ||
1085 | vadd.i64 d28,d14 | ||
1086 | vsli.64 d25,d18,#30 | ||
1087 | veor d30,d18,d19 | ||
1088 | vsli.64 d26,d18,#25 | ||
1089 | veor d17,d24,d25 | ||
1090 | vadd.i64 d27,d28 | ||
1091 | vbsl d30,d20,d19 @ Maj(a,b,c) | ||
1092 | veor d17,d26 @ Sigma0(a) | ||
1093 | vadd.i64 d21,d27 | ||
1094 | vadd.i64 d30,d27 | ||
1095 | @ vadd.i64 d17,d30 | ||
1096 | vshr.u64 d24,d21,#14 @ 15 | ||
1097 | #if 15<16 | ||
1098 | vld1.64 {d15},[r1]! @ handles unaligned | ||
1099 | #endif | ||
1100 | vshr.u64 d25,d21,#18 | ||
1101 | #if 15>0 | ||
1102 | vadd.i64 d17,d30 @ h+=Maj from the past | ||
1103 | #endif | ||
1104 | vshr.u64 d26,d21,#41 | ||
1105 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1106 | vsli.64 d24,d21,#50 | ||
1107 | vsli.64 d25,d21,#46 | ||
1108 | vmov d29,d21 | ||
1109 | vsli.64 d26,d21,#23 | ||
1110 | #if 15<16 && defined(__ARMEL__) | ||
1111 | vrev64.8 d15,d15 | ||
1112 | #endif | ||
1113 | veor d25,d24 | ||
1114 | vbsl d29,d22,d23 @ Ch(e,f,g) | ||
1115 | vshr.u64 d24,d17,#28 | ||
1116 | veor d26,d25 @ Sigma1(e) | ||
1117 | vadd.i64 d27,d29,d16 | ||
1118 | vshr.u64 d25,d17,#34 | ||
1119 | vsli.64 d24,d17,#36 | ||
1120 | vadd.i64 d27,d26 | ||
1121 | vshr.u64 d26,d17,#39 | ||
1122 | vadd.i64 d28,d15 | ||
1123 | vsli.64 d25,d17,#30 | ||
1124 | veor d30,d17,d18 | ||
1125 | vsli.64 d26,d17,#25 | ||
1126 | veor d16,d24,d25 | ||
1127 | vadd.i64 d27,d28 | ||
1128 | vbsl d30,d19,d18 @ Maj(a,b,c) | ||
1129 | veor d16,d26 @ Sigma0(a) | ||
1130 | vadd.i64 d20,d27 | ||
1131 | vadd.i64 d30,d27 | ||
1132 | @ vadd.i64 d16,d30 | ||
1133 | mov r12,#4 | ||
1134 | .L16_79_neon: | ||
1135 | subs r12,#1 | ||
1136 | vshr.u64 q12,q7,#19 | ||
1137 | vshr.u64 q13,q7,#61 | ||
1138 | vadd.i64 d16,d30 @ h+=Maj from the past | ||
1139 | vshr.u64 q15,q7,#6 | ||
1140 | vsli.64 q12,q7,#45 | ||
1141 | vext.8 q14,q0,q1,#8 @ X[i+1] | ||
1142 | vsli.64 q13,q7,#3 | ||
1143 | veor q15,q12 | ||
1144 | vshr.u64 q12,q14,#1 | ||
1145 | veor q15,q13 @ sigma1(X[i+14]) | ||
1146 | vshr.u64 q13,q14,#8 | ||
1147 | vadd.i64 q0,q15 | ||
1148 | vshr.u64 q15,q14,#7 | ||
1149 | vsli.64 q12,q14,#63 | ||
1150 | vsli.64 q13,q14,#56 | ||
1151 | vext.8 q14,q4,q5,#8 @ X[i+9] | ||
1152 | veor q15,q12 | ||
1153 | vshr.u64 d24,d20,#14 @ from NEON_00_15 | ||
1154 | vadd.i64 q0,q14 | ||
1155 | vshr.u64 d25,d20,#18 @ from NEON_00_15 | ||
1156 | veor q15,q13 @ sigma0(X[i+1]) | ||
1157 | vshr.u64 d26,d20,#41 @ from NEON_00_15 | ||
1158 | vadd.i64 q0,q15 | ||
1159 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1160 | vsli.64 d24,d20,#50 | ||
1161 | vsli.64 d25,d20,#46 | ||
1162 | vmov d29,d20 | ||
1163 | vsli.64 d26,d20,#23 | ||
1164 | #if 16<16 && defined(__ARMEL__) | ||
1165 | vrev64.8 , | ||
1166 | #endif | ||
1167 | veor d25,d24 | ||
1168 | vbsl d29,d21,d22 @ Ch(e,f,g) | ||
1169 | vshr.u64 d24,d16,#28 | ||
1170 | veor d26,d25 @ Sigma1(e) | ||
1171 | vadd.i64 d27,d29,d23 | ||
1172 | vshr.u64 d25,d16,#34 | ||
1173 | vsli.64 d24,d16,#36 | ||
1174 | vadd.i64 d27,d26 | ||
1175 | vshr.u64 d26,d16,#39 | ||
1176 | vadd.i64 d28,d0 | ||
1177 | vsli.64 d25,d16,#30 | ||
1178 | veor d30,d16,d17 | ||
1179 | vsli.64 d26,d16,#25 | ||
1180 | veor d23,d24,d25 | ||
1181 | vadd.i64 d27,d28 | ||
1182 | vbsl d30,d18,d17 @ Maj(a,b,c) | ||
1183 | veor d23,d26 @ Sigma0(a) | ||
1184 | vadd.i64 d19,d27 | ||
1185 | vadd.i64 d30,d27 | ||
1186 | @ vadd.i64 d23,d30 | ||
1187 | vshr.u64 d24,d19,#14 @ 17 | ||
1188 | #if 17<16 | ||
1189 | vld1.64 {d1},[r1]! @ handles unaligned | ||
1190 | #endif | ||
1191 | vshr.u64 d25,d19,#18 | ||
1192 | #if 17>0 | ||
1193 | vadd.i64 d23,d30 @ h+=Maj from the past | ||
1194 | #endif | ||
1195 | vshr.u64 d26,d19,#41 | ||
1196 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1197 | vsli.64 d24,d19,#50 | ||
1198 | vsli.64 d25,d19,#46 | ||
1199 | vmov d29,d19 | ||
1200 | vsli.64 d26,d19,#23 | ||
1201 | #if 17<16 && defined(__ARMEL__) | ||
1202 | vrev64.8 , | ||
1203 | #endif | ||
1204 | veor d25,d24 | ||
1205 | vbsl d29,d20,d21 @ Ch(e,f,g) | ||
1206 | vshr.u64 d24,d23,#28 | ||
1207 | veor d26,d25 @ Sigma1(e) | ||
1208 | vadd.i64 d27,d29,d22 | ||
1209 | vshr.u64 d25,d23,#34 | ||
1210 | vsli.64 d24,d23,#36 | ||
1211 | vadd.i64 d27,d26 | ||
1212 | vshr.u64 d26,d23,#39 | ||
1213 | vadd.i64 d28,d1 | ||
1214 | vsli.64 d25,d23,#30 | ||
1215 | veor d30,d23,d16 | ||
1216 | vsli.64 d26,d23,#25 | ||
1217 | veor d22,d24,d25 | ||
1218 | vadd.i64 d27,d28 | ||
1219 | vbsl d30,d17,d16 @ Maj(a,b,c) | ||
1220 | veor d22,d26 @ Sigma0(a) | ||
1221 | vadd.i64 d18,d27 | ||
1222 | vadd.i64 d30,d27 | ||
1223 | @ vadd.i64 d22,d30 | ||
1224 | vshr.u64 q12,q0,#19 | ||
1225 | vshr.u64 q13,q0,#61 | ||
1226 | vadd.i64 d22,d30 @ h+=Maj from the past | ||
1227 | vshr.u64 q15,q0,#6 | ||
1228 | vsli.64 q12,q0,#45 | ||
1229 | vext.8 q14,q1,q2,#8 @ X[i+1] | ||
1230 | vsli.64 q13,q0,#3 | ||
1231 | veor q15,q12 | ||
1232 | vshr.u64 q12,q14,#1 | ||
1233 | veor q15,q13 @ sigma1(X[i+14]) | ||
1234 | vshr.u64 q13,q14,#8 | ||
1235 | vadd.i64 q1,q15 | ||
1236 | vshr.u64 q15,q14,#7 | ||
1237 | vsli.64 q12,q14,#63 | ||
1238 | vsli.64 q13,q14,#56 | ||
1239 | vext.8 q14,q5,q6,#8 @ X[i+9] | ||
1240 | veor q15,q12 | ||
1241 | vshr.u64 d24,d18,#14 @ from NEON_00_15 | ||
1242 | vadd.i64 q1,q14 | ||
1243 | vshr.u64 d25,d18,#18 @ from NEON_00_15 | ||
1244 | veor q15,q13 @ sigma0(X[i+1]) | ||
1245 | vshr.u64 d26,d18,#41 @ from NEON_00_15 | ||
1246 | vadd.i64 q1,q15 | ||
1247 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1248 | vsli.64 d24,d18,#50 | ||
1249 | vsli.64 d25,d18,#46 | ||
1250 | vmov d29,d18 | ||
1251 | vsli.64 d26,d18,#23 | ||
1252 | #if 18<16 && defined(__ARMEL__) | ||
1253 | vrev64.8 , | ||
1254 | #endif | ||
1255 | veor d25,d24 | ||
1256 | vbsl d29,d19,d20 @ Ch(e,f,g) | ||
1257 | vshr.u64 d24,d22,#28 | ||
1258 | veor d26,d25 @ Sigma1(e) | ||
1259 | vadd.i64 d27,d29,d21 | ||
1260 | vshr.u64 d25,d22,#34 | ||
1261 | vsli.64 d24,d22,#36 | ||
1262 | vadd.i64 d27,d26 | ||
1263 | vshr.u64 d26,d22,#39 | ||
1264 | vadd.i64 d28,d2 | ||
1265 | vsli.64 d25,d22,#30 | ||
1266 | veor d30,d22,d23 | ||
1267 | vsli.64 d26,d22,#25 | ||
1268 | veor d21,d24,d25 | ||
1269 | vadd.i64 d27,d28 | ||
1270 | vbsl d30,d16,d23 @ Maj(a,b,c) | ||
1271 | veor d21,d26 @ Sigma0(a) | ||
1272 | vadd.i64 d17,d27 | ||
1273 | vadd.i64 d30,d27 | ||
1274 | @ vadd.i64 d21,d30 | ||
1275 | vshr.u64 d24,d17,#14 @ 19 | ||
1276 | #if 19<16 | ||
1277 | vld1.64 {d3},[r1]! @ handles unaligned | ||
1278 | #endif | ||
1279 | vshr.u64 d25,d17,#18 | ||
1280 | #if 19>0 | ||
1281 | vadd.i64 d21,d30 @ h+=Maj from the past | ||
1282 | #endif | ||
1283 | vshr.u64 d26,d17,#41 | ||
1284 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1285 | vsli.64 d24,d17,#50 | ||
1286 | vsli.64 d25,d17,#46 | ||
1287 | vmov d29,d17 | ||
1288 | vsli.64 d26,d17,#23 | ||
1289 | #if 19<16 && defined(__ARMEL__) | ||
1290 | vrev64.8 , | ||
1291 | #endif | ||
1292 | veor d25,d24 | ||
1293 | vbsl d29,d18,d19 @ Ch(e,f,g) | ||
1294 | vshr.u64 d24,d21,#28 | ||
1295 | veor d26,d25 @ Sigma1(e) | ||
1296 | vadd.i64 d27,d29,d20 | ||
1297 | vshr.u64 d25,d21,#34 | ||
1298 | vsli.64 d24,d21,#36 | ||
1299 | vadd.i64 d27,d26 | ||
1300 | vshr.u64 d26,d21,#39 | ||
1301 | vadd.i64 d28,d3 | ||
1302 | vsli.64 d25,d21,#30 | ||
1303 | veor d30,d21,d22 | ||
1304 | vsli.64 d26,d21,#25 | ||
1305 | veor d20,d24,d25 | ||
1306 | vadd.i64 d27,d28 | ||
1307 | vbsl d30,d23,d22 @ Maj(a,b,c) | ||
1308 | veor d20,d26 @ Sigma0(a) | ||
1309 | vadd.i64 d16,d27 | ||
1310 | vadd.i64 d30,d27 | ||
1311 | @ vadd.i64 d20,d30 | ||
1312 | vshr.u64 q12,q1,#19 | ||
1313 | vshr.u64 q13,q1,#61 | ||
1314 | vadd.i64 d20,d30 @ h+=Maj from the past | ||
1315 | vshr.u64 q15,q1,#6 | ||
1316 | vsli.64 q12,q1,#45 | ||
1317 | vext.8 q14,q2,q3,#8 @ X[i+1] | ||
1318 | vsli.64 q13,q1,#3 | ||
1319 | veor q15,q12 | ||
1320 | vshr.u64 q12,q14,#1 | ||
1321 | veor q15,q13 @ sigma1(X[i+14]) | ||
1322 | vshr.u64 q13,q14,#8 | ||
1323 | vadd.i64 q2,q15 | ||
1324 | vshr.u64 q15,q14,#7 | ||
1325 | vsli.64 q12,q14,#63 | ||
1326 | vsli.64 q13,q14,#56 | ||
1327 | vext.8 q14,q6,q7,#8 @ X[i+9] | ||
1328 | veor q15,q12 | ||
1329 | vshr.u64 d24,d16,#14 @ from NEON_00_15 | ||
1330 | vadd.i64 q2,q14 | ||
1331 | vshr.u64 d25,d16,#18 @ from NEON_00_15 | ||
1332 | veor q15,q13 @ sigma0(X[i+1]) | ||
1333 | vshr.u64 d26,d16,#41 @ from NEON_00_15 | ||
1334 | vadd.i64 q2,q15 | ||
1335 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1336 | vsli.64 d24,d16,#50 | ||
1337 | vsli.64 d25,d16,#46 | ||
1338 | vmov d29,d16 | ||
1339 | vsli.64 d26,d16,#23 | ||
1340 | #if 20<16 && defined(__ARMEL__) | ||
1341 | vrev64.8 , | ||
1342 | #endif | ||
1343 | veor d25,d24 | ||
1344 | vbsl d29,d17,d18 @ Ch(e,f,g) | ||
1345 | vshr.u64 d24,d20,#28 | ||
1346 | veor d26,d25 @ Sigma1(e) | ||
1347 | vadd.i64 d27,d29,d19 | ||
1348 | vshr.u64 d25,d20,#34 | ||
1349 | vsli.64 d24,d20,#36 | ||
1350 | vadd.i64 d27,d26 | ||
1351 | vshr.u64 d26,d20,#39 | ||
1352 | vadd.i64 d28,d4 | ||
1353 | vsli.64 d25,d20,#30 | ||
1354 | veor d30,d20,d21 | ||
1355 | vsli.64 d26,d20,#25 | ||
1356 | veor d19,d24,d25 | ||
1357 | vadd.i64 d27,d28 | ||
1358 | vbsl d30,d22,d21 @ Maj(a,b,c) | ||
1359 | veor d19,d26 @ Sigma0(a) | ||
1360 | vadd.i64 d23,d27 | ||
1361 | vadd.i64 d30,d27 | ||
1362 | @ vadd.i64 d19,d30 | ||
1363 | vshr.u64 d24,d23,#14 @ 21 | ||
1364 | #if 21<16 | ||
1365 | vld1.64 {d5},[r1]! @ handles unaligned | ||
1366 | #endif | ||
1367 | vshr.u64 d25,d23,#18 | ||
1368 | #if 21>0 | ||
1369 | vadd.i64 d19,d30 @ h+=Maj from the past | ||
1370 | #endif | ||
1371 | vshr.u64 d26,d23,#41 | ||
1372 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1373 | vsli.64 d24,d23,#50 | ||
1374 | vsli.64 d25,d23,#46 | ||
1375 | vmov d29,d23 | ||
1376 | vsli.64 d26,d23,#23 | ||
1377 | #if 21<16 && defined(__ARMEL__) | ||
1378 | vrev64.8 , | ||
1379 | #endif | ||
1380 | veor d25,d24 | ||
1381 | vbsl d29,d16,d17 @ Ch(e,f,g) | ||
1382 | vshr.u64 d24,d19,#28 | ||
1383 | veor d26,d25 @ Sigma1(e) | ||
1384 | vadd.i64 d27,d29,d18 | ||
1385 | vshr.u64 d25,d19,#34 | ||
1386 | vsli.64 d24,d19,#36 | ||
1387 | vadd.i64 d27,d26 | ||
1388 | vshr.u64 d26,d19,#39 | ||
1389 | vadd.i64 d28,d5 | ||
1390 | vsli.64 d25,d19,#30 | ||
1391 | veor d30,d19,d20 | ||
1392 | vsli.64 d26,d19,#25 | ||
1393 | veor d18,d24,d25 | ||
1394 | vadd.i64 d27,d28 | ||
1395 | vbsl d30,d21,d20 @ Maj(a,b,c) | ||
1396 | veor d18,d26 @ Sigma0(a) | ||
1397 | vadd.i64 d22,d27 | ||
1398 | vadd.i64 d30,d27 | ||
1399 | @ vadd.i64 d18,d30 | ||
1400 | vshr.u64 q12,q2,#19 | ||
1401 | vshr.u64 q13,q2,#61 | ||
1402 | vadd.i64 d18,d30 @ h+=Maj from the past | ||
1403 | vshr.u64 q15,q2,#6 | ||
1404 | vsli.64 q12,q2,#45 | ||
1405 | vext.8 q14,q3,q4,#8 @ X[i+1] | ||
1406 | vsli.64 q13,q2,#3 | ||
1407 | veor q15,q12 | ||
1408 | vshr.u64 q12,q14,#1 | ||
1409 | veor q15,q13 @ sigma1(X[i+14]) | ||
1410 | vshr.u64 q13,q14,#8 | ||
1411 | vadd.i64 q3,q15 | ||
1412 | vshr.u64 q15,q14,#7 | ||
1413 | vsli.64 q12,q14,#63 | ||
1414 | vsli.64 q13,q14,#56 | ||
1415 | vext.8 q14,q7,q0,#8 @ X[i+9] | ||
1416 | veor q15,q12 | ||
1417 | vshr.u64 d24,d22,#14 @ from NEON_00_15 | ||
1418 | vadd.i64 q3,q14 | ||
1419 | vshr.u64 d25,d22,#18 @ from NEON_00_15 | ||
1420 | veor q15,q13 @ sigma0(X[i+1]) | ||
1421 | vshr.u64 d26,d22,#41 @ from NEON_00_15 | ||
1422 | vadd.i64 q3,q15 | ||
1423 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1424 | vsli.64 d24,d22,#50 | ||
1425 | vsli.64 d25,d22,#46 | ||
1426 | vmov d29,d22 | ||
1427 | vsli.64 d26,d22,#23 | ||
1428 | #if 22<16 && defined(__ARMEL__) | ||
1429 | vrev64.8 , | ||
1430 | #endif | ||
1431 | veor d25,d24 | ||
1432 | vbsl d29,d23,d16 @ Ch(e,f,g) | ||
1433 | vshr.u64 d24,d18,#28 | ||
1434 | veor d26,d25 @ Sigma1(e) | ||
1435 | vadd.i64 d27,d29,d17 | ||
1436 | vshr.u64 d25,d18,#34 | ||
1437 | vsli.64 d24,d18,#36 | ||
1438 | vadd.i64 d27,d26 | ||
1439 | vshr.u64 d26,d18,#39 | ||
1440 | vadd.i64 d28,d6 | ||
1441 | vsli.64 d25,d18,#30 | ||
1442 | veor d30,d18,d19 | ||
1443 | vsli.64 d26,d18,#25 | ||
1444 | veor d17,d24,d25 | ||
1445 | vadd.i64 d27,d28 | ||
1446 | vbsl d30,d20,d19 @ Maj(a,b,c) | ||
1447 | veor d17,d26 @ Sigma0(a) | ||
1448 | vadd.i64 d21,d27 | ||
1449 | vadd.i64 d30,d27 | ||
1450 | @ vadd.i64 d17,d30 | ||
1451 | vshr.u64 d24,d21,#14 @ 23 | ||
1452 | #if 23<16 | ||
1453 | vld1.64 {d7},[r1]! @ handles unaligned | ||
1454 | #endif | ||
1455 | vshr.u64 d25,d21,#18 | ||
1456 | #if 23>0 | ||
1457 | vadd.i64 d17,d30 @ h+=Maj from the past | ||
1458 | #endif | ||
1459 | vshr.u64 d26,d21,#41 | ||
1460 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1461 | vsli.64 d24,d21,#50 | ||
1462 | vsli.64 d25,d21,#46 | ||
1463 | vmov d29,d21 | ||
1464 | vsli.64 d26,d21,#23 | ||
1465 | #if 23<16 && defined(__ARMEL__) | ||
1466 | vrev64.8 , | ||
1467 | #endif | ||
1468 | veor d25,d24 | ||
1469 | vbsl d29,d22,d23 @ Ch(e,f,g) | ||
1470 | vshr.u64 d24,d17,#28 | ||
1471 | veor d26,d25 @ Sigma1(e) | ||
1472 | vadd.i64 d27,d29,d16 | ||
1473 | vshr.u64 d25,d17,#34 | ||
1474 | vsli.64 d24,d17,#36 | ||
1475 | vadd.i64 d27,d26 | ||
1476 | vshr.u64 d26,d17,#39 | ||
1477 | vadd.i64 d28,d7 | ||
1478 | vsli.64 d25,d17,#30 | ||
1479 | veor d30,d17,d18 | ||
1480 | vsli.64 d26,d17,#25 | ||
1481 | veor d16,d24,d25 | ||
1482 | vadd.i64 d27,d28 | ||
1483 | vbsl d30,d19,d18 @ Maj(a,b,c) | ||
1484 | veor d16,d26 @ Sigma0(a) | ||
1485 | vadd.i64 d20,d27 | ||
1486 | vadd.i64 d30,d27 | ||
1487 | @ vadd.i64 d16,d30 | ||
1488 | vshr.u64 q12,q3,#19 | ||
1489 | vshr.u64 q13,q3,#61 | ||
1490 | vadd.i64 d16,d30 @ h+=Maj from the past | ||
1491 | vshr.u64 q15,q3,#6 | ||
1492 | vsli.64 q12,q3,#45 | ||
1493 | vext.8 q14,q4,q5,#8 @ X[i+1] | ||
1494 | vsli.64 q13,q3,#3 | ||
1495 | veor q15,q12 | ||
1496 | vshr.u64 q12,q14,#1 | ||
1497 | veor q15,q13 @ sigma1(X[i+14]) | ||
1498 | vshr.u64 q13,q14,#8 | ||
1499 | vadd.i64 q4,q15 | ||
1500 | vshr.u64 q15,q14,#7 | ||
1501 | vsli.64 q12,q14,#63 | ||
1502 | vsli.64 q13,q14,#56 | ||
1503 | vext.8 q14,q0,q1,#8 @ X[i+9] | ||
1504 | veor q15,q12 | ||
1505 | vshr.u64 d24,d20,#14 @ from NEON_00_15 | ||
1506 | vadd.i64 q4,q14 | ||
1507 | vshr.u64 d25,d20,#18 @ from NEON_00_15 | ||
1508 | veor q15,q13 @ sigma0(X[i+1]) | ||
1509 | vshr.u64 d26,d20,#41 @ from NEON_00_15 | ||
1510 | vadd.i64 q4,q15 | ||
1511 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1512 | vsli.64 d24,d20,#50 | ||
1513 | vsli.64 d25,d20,#46 | ||
1514 | vmov d29,d20 | ||
1515 | vsli.64 d26,d20,#23 | ||
1516 | #if 24<16 && defined(__ARMEL__) | ||
1517 | vrev64.8 , | ||
1518 | #endif | ||
1519 | veor d25,d24 | ||
1520 | vbsl d29,d21,d22 @ Ch(e,f,g) | ||
1521 | vshr.u64 d24,d16,#28 | ||
1522 | veor d26,d25 @ Sigma1(e) | ||
1523 | vadd.i64 d27,d29,d23 | ||
1524 | vshr.u64 d25,d16,#34 | ||
1525 | vsli.64 d24,d16,#36 | ||
1526 | vadd.i64 d27,d26 | ||
1527 | vshr.u64 d26,d16,#39 | ||
1528 | vadd.i64 d28,d8 | ||
1529 | vsli.64 d25,d16,#30 | ||
1530 | veor d30,d16,d17 | ||
1531 | vsli.64 d26,d16,#25 | ||
1532 | veor d23,d24,d25 | ||
1533 | vadd.i64 d27,d28 | ||
1534 | vbsl d30,d18,d17 @ Maj(a,b,c) | ||
1535 | veor d23,d26 @ Sigma0(a) | ||
1536 | vadd.i64 d19,d27 | ||
1537 | vadd.i64 d30,d27 | ||
1538 | @ vadd.i64 d23,d30 | ||
1539 | vshr.u64 d24,d19,#14 @ 25 | ||
1540 | #if 25<16 | ||
1541 | vld1.64 {d9},[r1]! @ handles unaligned | ||
1542 | #endif | ||
1543 | vshr.u64 d25,d19,#18 | ||
1544 | #if 25>0 | ||
1545 | vadd.i64 d23,d30 @ h+=Maj from the past | ||
1546 | #endif | ||
1547 | vshr.u64 d26,d19,#41 | ||
1548 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1549 | vsli.64 d24,d19,#50 | ||
1550 | vsli.64 d25,d19,#46 | ||
1551 | vmov d29,d19 | ||
1552 | vsli.64 d26,d19,#23 | ||
1553 | #if 25<16 && defined(__ARMEL__) | ||
1554 | vrev64.8 , | ||
1555 | #endif | ||
1556 | veor d25,d24 | ||
1557 | vbsl d29,d20,d21 @ Ch(e,f,g) | ||
1558 | vshr.u64 d24,d23,#28 | ||
1559 | veor d26,d25 @ Sigma1(e) | ||
1560 | vadd.i64 d27,d29,d22 | ||
1561 | vshr.u64 d25,d23,#34 | ||
1562 | vsli.64 d24,d23,#36 | ||
1563 | vadd.i64 d27,d26 | ||
1564 | vshr.u64 d26,d23,#39 | ||
1565 | vadd.i64 d28,d9 | ||
1566 | vsli.64 d25,d23,#30 | ||
1567 | veor d30,d23,d16 | ||
1568 | vsli.64 d26,d23,#25 | ||
1569 | veor d22,d24,d25 | ||
1570 | vadd.i64 d27,d28 | ||
1571 | vbsl d30,d17,d16 @ Maj(a,b,c) | ||
1572 | veor d22,d26 @ Sigma0(a) | ||
1573 | vadd.i64 d18,d27 | ||
1574 | vadd.i64 d30,d27 | ||
1575 | @ vadd.i64 d22,d30 | ||
1576 | vshr.u64 q12,q4,#19 | ||
1577 | vshr.u64 q13,q4,#61 | ||
1578 | vadd.i64 d22,d30 @ h+=Maj from the past | ||
1579 | vshr.u64 q15,q4,#6 | ||
1580 | vsli.64 q12,q4,#45 | ||
1581 | vext.8 q14,q5,q6,#8 @ X[i+1] | ||
1582 | vsli.64 q13,q4,#3 | ||
1583 | veor q15,q12 | ||
1584 | vshr.u64 q12,q14,#1 | ||
1585 | veor q15,q13 @ sigma1(X[i+14]) | ||
1586 | vshr.u64 q13,q14,#8 | ||
1587 | vadd.i64 q5,q15 | ||
1588 | vshr.u64 q15,q14,#7 | ||
1589 | vsli.64 q12,q14,#63 | ||
1590 | vsli.64 q13,q14,#56 | ||
1591 | vext.8 q14,q1,q2,#8 @ X[i+9] | ||
1592 | veor q15,q12 | ||
1593 | vshr.u64 d24,d18,#14 @ from NEON_00_15 | ||
1594 | vadd.i64 q5,q14 | ||
1595 | vshr.u64 d25,d18,#18 @ from NEON_00_15 | ||
1596 | veor q15,q13 @ sigma0(X[i+1]) | ||
1597 | vshr.u64 d26,d18,#41 @ from NEON_00_15 | ||
1598 | vadd.i64 q5,q15 | ||
1599 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1600 | vsli.64 d24,d18,#50 | ||
1601 | vsli.64 d25,d18,#46 | ||
1602 | vmov d29,d18 | ||
1603 | vsli.64 d26,d18,#23 | ||
1604 | #if 26<16 && defined(__ARMEL__) | ||
1605 | vrev64.8 , | ||
1606 | #endif | ||
1607 | veor d25,d24 | ||
1608 | vbsl d29,d19,d20 @ Ch(e,f,g) | ||
1609 | vshr.u64 d24,d22,#28 | ||
1610 | veor d26,d25 @ Sigma1(e) | ||
1611 | vadd.i64 d27,d29,d21 | ||
1612 | vshr.u64 d25,d22,#34 | ||
1613 | vsli.64 d24,d22,#36 | ||
1614 | vadd.i64 d27,d26 | ||
1615 | vshr.u64 d26,d22,#39 | ||
1616 | vadd.i64 d28,d10 | ||
1617 | vsli.64 d25,d22,#30 | ||
1618 | veor d30,d22,d23 | ||
1619 | vsli.64 d26,d22,#25 | ||
1620 | veor d21,d24,d25 | ||
1621 | vadd.i64 d27,d28 | ||
1622 | vbsl d30,d16,d23 @ Maj(a,b,c) | ||
1623 | veor d21,d26 @ Sigma0(a) | ||
1624 | vadd.i64 d17,d27 | ||
1625 | vadd.i64 d30,d27 | ||
1626 | @ vadd.i64 d21,d30 | ||
1627 | vshr.u64 d24,d17,#14 @ 27 | ||
1628 | #if 27<16 | ||
1629 | vld1.64 {d11},[r1]! @ handles unaligned | ||
1630 | #endif | ||
1631 | vshr.u64 d25,d17,#18 | ||
1632 | #if 27>0 | ||
1633 | vadd.i64 d21,d30 @ h+=Maj from the past | ||
1634 | #endif | ||
1635 | vshr.u64 d26,d17,#41 | ||
1636 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1637 | vsli.64 d24,d17,#50 | ||
1638 | vsli.64 d25,d17,#46 | ||
1639 | vmov d29,d17 | ||
1640 | vsli.64 d26,d17,#23 | ||
1641 | #if 27<16 && defined(__ARMEL__) | ||
1642 | vrev64.8 , | ||
1643 | #endif | ||
1644 | veor d25,d24 | ||
1645 | vbsl d29,d18,d19 @ Ch(e,f,g) | ||
1646 | vshr.u64 d24,d21,#28 | ||
1647 | veor d26,d25 @ Sigma1(e) | ||
1648 | vadd.i64 d27,d29,d20 | ||
1649 | vshr.u64 d25,d21,#34 | ||
1650 | vsli.64 d24,d21,#36 | ||
1651 | vadd.i64 d27,d26 | ||
1652 | vshr.u64 d26,d21,#39 | ||
1653 | vadd.i64 d28,d11 | ||
1654 | vsli.64 d25,d21,#30 | ||
1655 | veor d30,d21,d22 | ||
1656 | vsli.64 d26,d21,#25 | ||
1657 | veor d20,d24,d25 | ||
1658 | vadd.i64 d27,d28 | ||
1659 | vbsl d30,d23,d22 @ Maj(a,b,c) | ||
1660 | veor d20,d26 @ Sigma0(a) | ||
1661 | vadd.i64 d16,d27 | ||
1662 | vadd.i64 d30,d27 | ||
1663 | @ vadd.i64 d20,d30 | ||
1664 | vshr.u64 q12,q5,#19 | ||
1665 | vshr.u64 q13,q5,#61 | ||
1666 | vadd.i64 d20,d30 @ h+=Maj from the past | ||
1667 | vshr.u64 q15,q5,#6 | ||
1668 | vsli.64 q12,q5,#45 | ||
1669 | vext.8 q14,q6,q7,#8 @ X[i+1] | ||
1670 | vsli.64 q13,q5,#3 | ||
1671 | veor q15,q12 | ||
1672 | vshr.u64 q12,q14,#1 | ||
1673 | veor q15,q13 @ sigma1(X[i+14]) | ||
1674 | vshr.u64 q13,q14,#8 | ||
1675 | vadd.i64 q6,q15 | ||
1676 | vshr.u64 q15,q14,#7 | ||
1677 | vsli.64 q12,q14,#63 | ||
1678 | vsli.64 q13,q14,#56 | ||
1679 | vext.8 q14,q2,q3,#8 @ X[i+9] | ||
1680 | veor q15,q12 | ||
1681 | vshr.u64 d24,d16,#14 @ from NEON_00_15 | ||
1682 | vadd.i64 q6,q14 | ||
1683 | vshr.u64 d25,d16,#18 @ from NEON_00_15 | ||
1684 | veor q15,q13 @ sigma0(X[i+1]) | ||
1685 | vshr.u64 d26,d16,#41 @ from NEON_00_15 | ||
1686 | vadd.i64 q6,q15 | ||
1687 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1688 | vsli.64 d24,d16,#50 | ||
1689 | vsli.64 d25,d16,#46 | ||
1690 | vmov d29,d16 | ||
1691 | vsli.64 d26,d16,#23 | ||
1692 | #if 28<16 && defined(__ARMEL__) | ||
1693 | vrev64.8 , | ||
1694 | #endif | ||
1695 | veor d25,d24 | ||
1696 | vbsl d29,d17,d18 @ Ch(e,f,g) | ||
1697 | vshr.u64 d24,d20,#28 | ||
1698 | veor d26,d25 @ Sigma1(e) | ||
1699 | vadd.i64 d27,d29,d19 | ||
1700 | vshr.u64 d25,d20,#34 | ||
1701 | vsli.64 d24,d20,#36 | ||
1702 | vadd.i64 d27,d26 | ||
1703 | vshr.u64 d26,d20,#39 | ||
1704 | vadd.i64 d28,d12 | ||
1705 | vsli.64 d25,d20,#30 | ||
1706 | veor d30,d20,d21 | ||
1707 | vsli.64 d26,d20,#25 | ||
1708 | veor d19,d24,d25 | ||
1709 | vadd.i64 d27,d28 | ||
1710 | vbsl d30,d22,d21 @ Maj(a,b,c) | ||
1711 | veor d19,d26 @ Sigma0(a) | ||
1712 | vadd.i64 d23,d27 | ||
1713 | vadd.i64 d30,d27 | ||
1714 | @ vadd.i64 d19,d30 | ||
1715 | vshr.u64 d24,d23,#14 @ 29 | ||
1716 | #if 29<16 | ||
1717 | vld1.64 {d13},[r1]! @ handles unaligned | ||
1718 | #endif | ||
1719 | vshr.u64 d25,d23,#18 | ||
1720 | #if 29>0 | ||
1721 | vadd.i64 d19,d30 @ h+=Maj from the past | ||
1722 | #endif | ||
1723 | vshr.u64 d26,d23,#41 | ||
1724 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1725 | vsli.64 d24,d23,#50 | ||
1726 | vsli.64 d25,d23,#46 | ||
1727 | vmov d29,d23 | ||
1728 | vsli.64 d26,d23,#23 | ||
1729 | #if 29<16 && defined(__ARMEL__) | ||
1730 | vrev64.8 , | ||
1731 | #endif | ||
1732 | veor d25,d24 | ||
1733 | vbsl d29,d16,d17 @ Ch(e,f,g) | ||
1734 | vshr.u64 d24,d19,#28 | ||
1735 | veor d26,d25 @ Sigma1(e) | ||
1736 | vadd.i64 d27,d29,d18 | ||
1737 | vshr.u64 d25,d19,#34 | ||
1738 | vsli.64 d24,d19,#36 | ||
1739 | vadd.i64 d27,d26 | ||
1740 | vshr.u64 d26,d19,#39 | ||
1741 | vadd.i64 d28,d13 | ||
1742 | vsli.64 d25,d19,#30 | ||
1743 | veor d30,d19,d20 | ||
1744 | vsli.64 d26,d19,#25 | ||
1745 | veor d18,d24,d25 | ||
1746 | vadd.i64 d27,d28 | ||
1747 | vbsl d30,d21,d20 @ Maj(a,b,c) | ||
1748 | veor d18,d26 @ Sigma0(a) | ||
1749 | vadd.i64 d22,d27 | ||
1750 | vadd.i64 d30,d27 | ||
1751 | @ vadd.i64 d18,d30 | ||
1752 | vshr.u64 q12,q6,#19 | ||
1753 | vshr.u64 q13,q6,#61 | ||
1754 | vadd.i64 d18,d30 @ h+=Maj from the past | ||
1755 | vshr.u64 q15,q6,#6 | ||
1756 | vsli.64 q12,q6,#45 | ||
1757 | vext.8 q14,q7,q0,#8 @ X[i+1] | ||
1758 | vsli.64 q13,q6,#3 | ||
1759 | veor q15,q12 | ||
1760 | vshr.u64 q12,q14,#1 | ||
1761 | veor q15,q13 @ sigma1(X[i+14]) | ||
1762 | vshr.u64 q13,q14,#8 | ||
1763 | vadd.i64 q7,q15 | ||
1764 | vshr.u64 q15,q14,#7 | ||
1765 | vsli.64 q12,q14,#63 | ||
1766 | vsli.64 q13,q14,#56 | ||
1767 | vext.8 q14,q3,q4,#8 @ X[i+9] | ||
1768 | veor q15,q12 | ||
1769 | vshr.u64 d24,d22,#14 @ from NEON_00_15 | ||
1770 | vadd.i64 q7,q14 | ||
1771 | vshr.u64 d25,d22,#18 @ from NEON_00_15 | ||
1772 | veor q15,q13 @ sigma0(X[i+1]) | ||
1773 | vshr.u64 d26,d22,#41 @ from NEON_00_15 | ||
1774 | vadd.i64 q7,q15 | ||
1775 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1776 | vsli.64 d24,d22,#50 | ||
1777 | vsli.64 d25,d22,#46 | ||
1778 | vmov d29,d22 | ||
1779 | vsli.64 d26,d22,#23 | ||
1780 | #if 30<16 && defined(__ARMEL__) | ||
1781 | vrev64.8 , | ||
1782 | #endif | ||
1783 | veor d25,d24 | ||
1784 | vbsl d29,d23,d16 @ Ch(e,f,g) | ||
1785 | vshr.u64 d24,d18,#28 | ||
1786 | veor d26,d25 @ Sigma1(e) | ||
1787 | vadd.i64 d27,d29,d17 | ||
1788 | vshr.u64 d25,d18,#34 | ||
1789 | vsli.64 d24,d18,#36 | ||
1790 | vadd.i64 d27,d26 | ||
1791 | vshr.u64 d26,d18,#39 | ||
1792 | vadd.i64 d28,d14 | ||
1793 | vsli.64 d25,d18,#30 | ||
1794 | veor d30,d18,d19 | ||
1795 | vsli.64 d26,d18,#25 | ||
1796 | veor d17,d24,d25 | ||
1797 | vadd.i64 d27,d28 | ||
1798 | vbsl d30,d20,d19 @ Maj(a,b,c) | ||
1799 | veor d17,d26 @ Sigma0(a) | ||
1800 | vadd.i64 d21,d27 | ||
1801 | vadd.i64 d30,d27 | ||
1802 | @ vadd.i64 d17,d30 | ||
1803 | vshr.u64 d24,d21,#14 @ 31 | ||
1804 | #if 31<16 | ||
1805 | vld1.64 {d15},[r1]! @ handles unaligned | ||
1806 | #endif | ||
1807 | vshr.u64 d25,d21,#18 | ||
1808 | #if 31>0 | ||
1809 | vadd.i64 d17,d30 @ h+=Maj from the past | ||
1810 | #endif | ||
1811 | vshr.u64 d26,d21,#41 | ||
1812 | vld1.64 {d28},[r3,:64]! @ K[i++] | ||
1813 | vsli.64 d24,d21,#50 | ||
1814 | vsli.64 d25,d21,#46 | ||
1815 | vmov d29,d21 | ||
1816 | vsli.64 d26,d21,#23 | ||
1817 | #if 31<16 && defined(__ARMEL__) | ||
1818 | vrev64.8 , | ||
1819 | #endif | ||
1820 | veor d25,d24 | ||
1821 | vbsl d29,d22,d23 @ Ch(e,f,g) | ||
1822 | vshr.u64 d24,d17,#28 | ||
1823 | veor d26,d25 @ Sigma1(e) | ||
1824 | vadd.i64 d27,d29,d16 | ||
1825 | vshr.u64 d25,d17,#34 | ||
1826 | vsli.64 d24,d17,#36 | ||
1827 | vadd.i64 d27,d26 | ||
1828 | vshr.u64 d26,d17,#39 | ||
1829 | vadd.i64 d28,d15 | ||
1830 | vsli.64 d25,d17,#30 | ||
1831 | veor d30,d17,d18 | ||
1832 | vsli.64 d26,d17,#25 | ||
1833 | veor d16,d24,d25 | ||
1834 | vadd.i64 d27,d28 | ||
1835 | vbsl d30,d19,d18 @ Maj(a,b,c) | ||
1836 | veor d16,d26 @ Sigma0(a) | ||
1837 | vadd.i64 d20,d27 | ||
1838 | vadd.i64 d30,d27 | ||
1839 | @ vadd.i64 d16,d30 | ||
1840 | bne .L16_79_neon | ||
1841 | |||
1842 | vadd.i64 d16,d30 @ h+=Maj from the past | ||
1843 | vldmia r0,{d24-d31} @ load context to temp | ||
1844 | vadd.i64 q8,q12 @ vectorized accumulate | ||
1845 | vadd.i64 q9,q13 | ||
1846 | vadd.i64 q10,q14 | ||
1847 | vadd.i64 q11,q15 | ||
1848 | vstmia r0,{d16-d23} @ save context | ||
1849 | teq r1,r2 | ||
1850 | sub r3,#640 @ rewind K512 | ||
1851 | bne .Loop_neon | ||
1852 | |||
1853 | VFP_ABI_POP | ||
1854 | bx lr @ .word 0xe12fff1e | ||
1855 | .size sha512_block_data_order_neon,.-sha512_block_data_order_neon | ||
1856 | #endif | ||
1857 | .asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>" | ||
1858 | .align 2 | ||
1859 | #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) | ||
1860 | .comm OPENSSL_armcap_P,4,4 | ||
1861 | #endif | ||
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c new file mode 100644 index 000000000000..269a394e4a53 --- /dev/null +++ b/arch/arm/crypto/sha512-glue.c | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * sha512-glue.c - accelerated SHA-384/512 for ARM | ||
3 | * | ||
4 | * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <crypto/internal/hash.h> | ||
12 | #include <crypto/sha.h> | ||
13 | #include <crypto/sha512_base.h> | ||
14 | #include <linux/crypto.h> | ||
15 | #include <linux/module.h> | ||
16 | |||
17 | #include <asm/hwcap.h> | ||
18 | #include <asm/neon.h> | ||
19 | |||
20 | #include "sha512.h" | ||
21 | |||
22 | MODULE_DESCRIPTION("Accelerated SHA-384/SHA-512 secure hash for ARM"); | ||
23 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
24 | MODULE_LICENSE("GPL v2"); | ||
25 | |||
26 | MODULE_ALIAS_CRYPTO("sha384"); | ||
27 | MODULE_ALIAS_CRYPTO("sha512"); | ||
28 | MODULE_ALIAS_CRYPTO("sha384-arm"); | ||
29 | MODULE_ALIAS_CRYPTO("sha512-arm"); | ||
30 | |||
31 | asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks); | ||
32 | |||
33 | int sha512_arm_update(struct shash_desc *desc, const u8 *data, | ||
34 | unsigned int len) | ||
35 | { | ||
36 | return sha512_base_do_update(desc, data, len, | ||
37 | (sha512_block_fn *)sha512_block_data_order); | ||
38 | } | ||
39 | |||
40 | int sha512_arm_final(struct shash_desc *desc, u8 *out) | ||
41 | { | ||
42 | sha512_base_do_finalize(desc, | ||
43 | (sha512_block_fn *)sha512_block_data_order); | ||
44 | return sha512_base_finish(desc, out); | ||
45 | } | ||
46 | |||
47 | int sha512_arm_finup(struct shash_desc *desc, const u8 *data, | ||
48 | unsigned int len, u8 *out) | ||
49 | { | ||
50 | sha512_base_do_update(desc, data, len, | ||
51 | (sha512_block_fn *)sha512_block_data_order); | ||
52 | return sha512_arm_final(desc, out); | ||
53 | } | ||
54 | |||
55 | static struct shash_alg sha512_arm_algs[] = { { | ||
56 | .init = sha384_base_init, | ||
57 | .update = sha512_arm_update, | ||
58 | .final = sha512_arm_final, | ||
59 | .finup = sha512_arm_finup, | ||
60 | .descsize = sizeof(struct sha512_state), | ||
61 | .digestsize = SHA384_DIGEST_SIZE, | ||
62 | .base = { | ||
63 | .cra_name = "sha384", | ||
64 | .cra_driver_name = "sha384-arm", | ||
65 | .cra_priority = 250, | ||
66 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
67 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
68 | .cra_module = THIS_MODULE, | ||
69 | } | ||
70 | }, { | ||
71 | .init = sha512_base_init, | ||
72 | .update = sha512_arm_update, | ||
73 | .final = sha512_arm_final, | ||
74 | .finup = sha512_arm_finup, | ||
75 | .descsize = sizeof(struct sha512_state), | ||
76 | .digestsize = SHA512_DIGEST_SIZE, | ||
77 | .base = { | ||
78 | .cra_name = "sha512", | ||
79 | .cra_driver_name = "sha512-arm", | ||
80 | .cra_priority = 250, | ||
81 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
82 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
83 | .cra_module = THIS_MODULE, | ||
84 | } | ||
85 | } }; | ||
86 | |||
87 | static int __init sha512_arm_mod_init(void) | ||
88 | { | ||
89 | int err; | ||
90 | |||
91 | err = crypto_register_shashes(sha512_arm_algs, | ||
92 | ARRAY_SIZE(sha512_arm_algs)); | ||
93 | if (err) | ||
94 | return err; | ||
95 | |||
96 | if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) { | ||
97 | err = crypto_register_shashes(sha512_neon_algs, | ||
98 | ARRAY_SIZE(sha512_neon_algs)); | ||
99 | if (err) | ||
100 | goto err_unregister; | ||
101 | } | ||
102 | return 0; | ||
103 | |||
104 | err_unregister: | ||
105 | crypto_unregister_shashes(sha512_arm_algs, | ||
106 | ARRAY_SIZE(sha512_arm_algs)); | ||
107 | |||
108 | return err; | ||
109 | } | ||
110 | |||
111 | static void __exit sha512_arm_mod_fini(void) | ||
112 | { | ||
113 | crypto_unregister_shashes(sha512_arm_algs, | ||
114 | ARRAY_SIZE(sha512_arm_algs)); | ||
115 | if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) | ||
116 | crypto_unregister_shashes(sha512_neon_algs, | ||
117 | ARRAY_SIZE(sha512_neon_algs)); | ||
118 | } | ||
119 | |||
120 | module_init(sha512_arm_mod_init); | ||
121 | module_exit(sha512_arm_mod_fini); | ||
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c new file mode 100644 index 000000000000..32693684a3ab --- /dev/null +++ b/arch/arm/crypto/sha512-neon-glue.c | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * sha512-neon-glue.c - accelerated SHA-384/512 for ARM NEON | ||
3 | * | ||
4 | * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <crypto/internal/hash.h> | ||
12 | #include <crypto/sha.h> | ||
13 | #include <crypto/sha512_base.h> | ||
14 | #include <linux/crypto.h> | ||
15 | #include <linux/module.h> | ||
16 | |||
17 | #include <asm/simd.h> | ||
18 | #include <asm/neon.h> | ||
19 | |||
20 | #include "sha512.h" | ||
21 | |||
22 | MODULE_ALIAS_CRYPTO("sha384-neon"); | ||
23 | MODULE_ALIAS_CRYPTO("sha512-neon"); | ||
24 | |||
25 | asmlinkage void sha512_block_data_order_neon(u64 *state, u8 const *src, | ||
26 | int blocks); | ||
27 | |||
28 | static int sha512_neon_update(struct shash_desc *desc, const u8 *data, | ||
29 | unsigned int len) | ||
30 | { | ||
31 | struct sha512_state *sctx = shash_desc_ctx(desc); | ||
32 | |||
33 | if (!may_use_simd() || | ||
34 | (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) | ||
35 | return sha512_arm_update(desc, data, len); | ||
36 | |||
37 | kernel_neon_begin(); | ||
38 | sha512_base_do_update(desc, data, len, | ||
39 | (sha512_block_fn *)sha512_block_data_order_neon); | ||
40 | kernel_neon_end(); | ||
41 | |||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | static int sha512_neon_finup(struct shash_desc *desc, const u8 *data, | ||
46 | unsigned int len, u8 *out) | ||
47 | { | ||
48 | if (!may_use_simd()) | ||
49 | return sha512_arm_finup(desc, data, len, out); | ||
50 | |||
51 | kernel_neon_begin(); | ||
52 | if (len) | ||
53 | sha512_base_do_update(desc, data, len, | ||
54 | (sha512_block_fn *)sha512_block_data_order_neon); | ||
55 | sha512_base_do_finalize(desc, | ||
56 | (sha512_block_fn *)sha512_block_data_order_neon); | ||
57 | kernel_neon_end(); | ||
58 | |||
59 | return sha512_base_finish(desc, out); | ||
60 | } | ||
61 | |||
62 | static int sha512_neon_final(struct shash_desc *desc, u8 *out) | ||
63 | { | ||
64 | return sha512_neon_finup(desc, NULL, 0, out); | ||
65 | } | ||
66 | |||
67 | struct shash_alg sha512_neon_algs[] = { { | ||
68 | .init = sha384_base_init, | ||
69 | .update = sha512_neon_update, | ||
70 | .final = sha512_neon_final, | ||
71 | .finup = sha512_neon_finup, | ||
72 | .descsize = sizeof(struct sha512_state), | ||
73 | .digestsize = SHA384_DIGEST_SIZE, | ||
74 | .base = { | ||
75 | .cra_name = "sha384", | ||
76 | .cra_driver_name = "sha384-neon", | ||
77 | .cra_priority = 300, | ||
78 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
79 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
80 | .cra_module = THIS_MODULE, | ||
81 | |||
82 | } | ||
83 | }, { | ||
84 | .init = sha512_base_init, | ||
85 | .update = sha512_neon_update, | ||
86 | .final = sha512_neon_final, | ||
87 | .finup = sha512_neon_finup, | ||
88 | .descsize = sizeof(struct sha512_state), | ||
89 | .digestsize = SHA512_DIGEST_SIZE, | ||
90 | .base = { | ||
91 | .cra_name = "sha512", | ||
92 | .cra_driver_name = "sha512-neon", | ||
93 | .cra_priority = 300, | ||
94 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
95 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
96 | .cra_module = THIS_MODULE, | ||
97 | } | ||
98 | } }; | ||
diff --git a/arch/arm/crypto/sha512.h b/arch/arm/crypto/sha512.h new file mode 100644 index 000000000000..a75d9a82988a --- /dev/null +++ b/arch/arm/crypto/sha512.h | |||
@@ -0,0 +1,8 @@ | |||
1 | |||
2 | int sha512_arm_update(struct shash_desc *desc, const u8 *data, | ||
3 | unsigned int len); | ||
4 | |||
5 | int sha512_arm_finup(struct shash_desc *desc, const u8 *data, | ||
6 | unsigned int len, u8 *out); | ||
7 | |||
8 | extern struct shash_alg sha512_neon_algs[2]; | ||
diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c deleted file mode 100644 index b124dce838d6..000000000000 --- a/arch/arm/crypto/sha512_neon_glue.c +++ /dev/null | |||
@@ -1,305 +0,0 @@ | |||
1 | /* | ||
2 | * Glue code for the SHA512 Secure Hash Algorithm assembly implementation | ||
3 | * using NEON instructions. | ||
4 | * | ||
5 | * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> | ||
6 | * | ||
7 | * This file is based on sha512_ssse3_glue.c: | ||
8 | * Copyright (C) 2013 Intel Corporation | ||
9 | * Author: Tim Chen <tim.c.chen@linux.intel.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the Free | ||
13 | * Software Foundation; either version 2 of the License, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <crypto/internal/hash.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/cryptohash.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <crypto/sha.h> | ||
26 | #include <asm/byteorder.h> | ||
27 | #include <asm/simd.h> | ||
28 | #include <asm/neon.h> | ||
29 | |||
30 | |||
31 | static const u64 sha512_k[] = { | ||
32 | 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, | ||
33 | 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, | ||
34 | 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, | ||
35 | 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, | ||
36 | 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, | ||
37 | 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, | ||
38 | 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, | ||
39 | 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL, | ||
40 | 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, | ||
41 | 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, | ||
42 | 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, | ||
43 | 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, | ||
44 | 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, | ||
45 | 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, | ||
46 | 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, | ||
47 | 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, | ||
48 | 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, | ||
49 | 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, | ||
50 | 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, | ||
51 | 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, | ||
52 | 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, | ||
53 | 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, | ||
54 | 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, | ||
55 | 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, | ||
56 | 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, | ||
57 | 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, | ||
58 | 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, | ||
59 | 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, | ||
60 | 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, | ||
61 | 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, | ||
62 | 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, | ||
63 | 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, | ||
64 | 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, | ||
65 | 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, | ||
66 | 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, | ||
67 | 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, | ||
68 | 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, | ||
69 | 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL, | ||
70 | 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, | ||
71 | 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL | ||
72 | }; | ||
73 | |||
74 | |||
75 | asmlinkage void sha512_transform_neon(u64 *digest, const void *data, | ||
76 | const u64 k[], unsigned int num_blks); | ||
77 | |||
78 | |||
79 | static int sha512_neon_init(struct shash_desc *desc) | ||
80 | { | ||
81 | struct sha512_state *sctx = shash_desc_ctx(desc); | ||
82 | |||
83 | sctx->state[0] = SHA512_H0; | ||
84 | sctx->state[1] = SHA512_H1; | ||
85 | sctx->state[2] = SHA512_H2; | ||
86 | sctx->state[3] = SHA512_H3; | ||
87 | sctx->state[4] = SHA512_H4; | ||
88 | sctx->state[5] = SHA512_H5; | ||
89 | sctx->state[6] = SHA512_H6; | ||
90 | sctx->state[7] = SHA512_H7; | ||
91 | sctx->count[0] = sctx->count[1] = 0; | ||
92 | |||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static int __sha512_neon_update(struct shash_desc *desc, const u8 *data, | ||
97 | unsigned int len, unsigned int partial) | ||
98 | { | ||
99 | struct sha512_state *sctx = shash_desc_ctx(desc); | ||
100 | unsigned int done = 0; | ||
101 | |||
102 | sctx->count[0] += len; | ||
103 | if (sctx->count[0] < len) | ||
104 | sctx->count[1]++; | ||
105 | |||
106 | if (partial) { | ||
107 | done = SHA512_BLOCK_SIZE - partial; | ||
108 | memcpy(sctx->buf + partial, data, done); | ||
109 | sha512_transform_neon(sctx->state, sctx->buf, sha512_k, 1); | ||
110 | } | ||
111 | |||
112 | if (len - done >= SHA512_BLOCK_SIZE) { | ||
113 | const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE; | ||
114 | |||
115 | sha512_transform_neon(sctx->state, data + done, sha512_k, | ||
116 | rounds); | ||
117 | |||
118 | done += rounds * SHA512_BLOCK_SIZE; | ||
119 | } | ||
120 | |||
121 | memcpy(sctx->buf, data + done, len - done); | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | static int sha512_neon_update(struct shash_desc *desc, const u8 *data, | ||
127 | unsigned int len) | ||
128 | { | ||
129 | struct sha512_state *sctx = shash_desc_ctx(desc); | ||
130 | unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; | ||
131 | int res; | ||
132 | |||
133 | /* Handle the fast case right here */ | ||
134 | if (partial + len < SHA512_BLOCK_SIZE) { | ||
135 | sctx->count[0] += len; | ||
136 | if (sctx->count[0] < len) | ||
137 | sctx->count[1]++; | ||
138 | memcpy(sctx->buf + partial, data, len); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | if (!may_use_simd()) { | ||
144 | res = crypto_sha512_update(desc, data, len); | ||
145 | } else { | ||
146 | kernel_neon_begin(); | ||
147 | res = __sha512_neon_update(desc, data, len, partial); | ||
148 | kernel_neon_end(); | ||
149 | } | ||
150 | |||
151 | return res; | ||
152 | } | ||
153 | |||
154 | |||
155 | /* Add padding and return the message digest. */ | ||
156 | static int sha512_neon_final(struct shash_desc *desc, u8 *out) | ||
157 | { | ||
158 | struct sha512_state *sctx = shash_desc_ctx(desc); | ||
159 | unsigned int i, index, padlen; | ||
160 | __be64 *dst = (__be64 *)out; | ||
161 | __be64 bits[2]; | ||
162 | static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, }; | ||
163 | |||
164 | /* save number of bits */ | ||
165 | bits[1] = cpu_to_be64(sctx->count[0] << 3); | ||
166 | bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); | ||
167 | |||
168 | /* Pad out to 112 mod 128 and append length */ | ||
169 | index = sctx->count[0] & 0x7f; | ||
170 | padlen = (index < 112) ? (112 - index) : ((128+112) - index); | ||
171 | |||
172 | if (!may_use_simd()) { | ||
173 | crypto_sha512_update(desc, padding, padlen); | ||
174 | crypto_sha512_update(desc, (const u8 *)&bits, sizeof(bits)); | ||
175 | } else { | ||
176 | kernel_neon_begin(); | ||
177 | /* We need to fill a whole block for __sha512_neon_update() */ | ||
178 | if (padlen <= 112) { | ||
179 | sctx->count[0] += padlen; | ||
180 | if (sctx->count[0] < padlen) | ||
181 | sctx->count[1]++; | ||
182 | memcpy(sctx->buf + index, padding, padlen); | ||
183 | } else { | ||
184 | __sha512_neon_update(desc, padding, padlen, index); | ||
185 | } | ||
186 | __sha512_neon_update(desc, (const u8 *)&bits, | ||
187 | sizeof(bits), 112); | ||
188 | kernel_neon_end(); | ||
189 | } | ||
190 | |||
191 | /* Store state in digest */ | ||
192 | for (i = 0; i < 8; i++) | ||
193 | dst[i] = cpu_to_be64(sctx->state[i]); | ||
194 | |||
195 | /* Wipe context */ | ||
196 | memset(sctx, 0, sizeof(*sctx)); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static int sha512_neon_export(struct shash_desc *desc, void *out) | ||
202 | { | ||
203 | struct sha512_state *sctx = shash_desc_ctx(desc); | ||
204 | |||
205 | memcpy(out, sctx, sizeof(*sctx)); | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static int sha512_neon_import(struct shash_desc *desc, const void *in) | ||
211 | { | ||
212 | struct sha512_state *sctx = shash_desc_ctx(desc); | ||
213 | |||
214 | memcpy(sctx, in, sizeof(*sctx)); | ||
215 | |||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static int sha384_neon_init(struct shash_desc *desc) | ||
220 | { | ||
221 | struct sha512_state *sctx = shash_desc_ctx(desc); | ||
222 | |||
223 | sctx->state[0] = SHA384_H0; | ||
224 | sctx->state[1] = SHA384_H1; | ||
225 | sctx->state[2] = SHA384_H2; | ||
226 | sctx->state[3] = SHA384_H3; | ||
227 | sctx->state[4] = SHA384_H4; | ||
228 | sctx->state[5] = SHA384_H5; | ||
229 | sctx->state[6] = SHA384_H6; | ||
230 | sctx->state[7] = SHA384_H7; | ||
231 | |||
232 | sctx->count[0] = sctx->count[1] = 0; | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | static int sha384_neon_final(struct shash_desc *desc, u8 *hash) | ||
238 | { | ||
239 | u8 D[SHA512_DIGEST_SIZE]; | ||
240 | |||
241 | sha512_neon_final(desc, D); | ||
242 | |||
243 | memcpy(hash, D, SHA384_DIGEST_SIZE); | ||
244 | memzero_explicit(D, SHA512_DIGEST_SIZE); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static struct shash_alg algs[] = { { | ||
250 | .digestsize = SHA512_DIGEST_SIZE, | ||
251 | .init = sha512_neon_init, | ||
252 | .update = sha512_neon_update, | ||
253 | .final = sha512_neon_final, | ||
254 | .export = sha512_neon_export, | ||
255 | .import = sha512_neon_import, | ||
256 | .descsize = sizeof(struct sha512_state), | ||
257 | .statesize = sizeof(struct sha512_state), | ||
258 | .base = { | ||
259 | .cra_name = "sha512", | ||
260 | .cra_driver_name = "sha512-neon", | ||
261 | .cra_priority = 250, | ||
262 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
263 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
264 | .cra_module = THIS_MODULE, | ||
265 | } | ||
266 | }, { | ||
267 | .digestsize = SHA384_DIGEST_SIZE, | ||
268 | .init = sha384_neon_init, | ||
269 | .update = sha512_neon_update, | ||
270 | .final = sha384_neon_final, | ||
271 | .export = sha512_neon_export, | ||
272 | .import = sha512_neon_import, | ||
273 | .descsize = sizeof(struct sha512_state), | ||
274 | .statesize = sizeof(struct sha512_state), | ||
275 | .base = { | ||
276 | .cra_name = "sha384", | ||
277 | .cra_driver_name = "sha384-neon", | ||
278 | .cra_priority = 250, | ||
279 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
280 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
281 | .cra_module = THIS_MODULE, | ||
282 | } | ||
283 | } }; | ||
284 | |||
285 | static int __init sha512_neon_mod_init(void) | ||
286 | { | ||
287 | if (!cpu_has_neon()) | ||
288 | return -ENODEV; | ||
289 | |||
290 | return crypto_register_shashes(algs, ARRAY_SIZE(algs)); | ||
291 | } | ||
292 | |||
293 | static void __exit sha512_neon_mod_fini(void) | ||
294 | { | ||
295 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); | ||
296 | } | ||
297 | |||
298 | module_init(sha512_neon_mod_init); | ||
299 | module_exit(sha512_neon_mod_fini); | ||
300 | |||
301 | MODULE_LICENSE("GPL"); | ||
302 | MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated"); | ||
303 | |||
304 | MODULE_ALIAS_CRYPTO("sha512"); | ||
305 | MODULE_ALIAS_CRYPTO("sha384"); | ||
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 6c348df5bf36..3303e8a7b837 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <crypto/aes.h> | 13 | #include <crypto/aes.h> |
14 | #include <crypto/algapi.h> | 14 | #include <crypto/algapi.h> |
15 | #include <crypto/scatterwalk.h> | 15 | #include <crypto/scatterwalk.h> |
16 | #include <linux/crypto.h> | 16 | #include <crypto/internal/aead.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | 18 | ||
19 | #include "aes-ce-setkey.h" | 19 | #include "aes-ce-setkey.h" |
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c index 12dccdb38286..af4c712f7afc 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-md5.c +++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c | |||
@@ -69,10 +69,10 @@ static int octeon_md5_init(struct shash_desc *desc) | |||
69 | { | 69 | { |
70 | struct md5_state *mctx = shash_desc_ctx(desc); | 70 | struct md5_state *mctx = shash_desc_ctx(desc); |
71 | 71 | ||
72 | mctx->hash[0] = cpu_to_le32(0x67452301); | 72 | mctx->hash[0] = cpu_to_le32(MD5_H0); |
73 | mctx->hash[1] = cpu_to_le32(0xefcdab89); | 73 | mctx->hash[1] = cpu_to_le32(MD5_H1); |
74 | mctx->hash[2] = cpu_to_le32(0x98badcfe); | 74 | mctx->hash[2] = cpu_to_le32(MD5_H2); |
75 | mctx->hash[3] = cpu_to_le32(0x10325476); | 75 | mctx->hash[3] = cpu_to_le32(MD5_H3); |
76 | mctx->byte_count = 0; | 76 | mctx->byte_count = 0; |
77 | 77 | ||
78 | return 0; | 78 | return 0; |
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c index 7f4547418ee1..be186a75f622 100644 --- a/arch/nios2/kernel/time.c +++ b/arch/nios2/kernel/time.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * for more details. | 8 | * for more details. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/export.h> | ||
11 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
12 | #include <linux/clockchips.h> | 13 | #include <linux/clockchips.h> |
13 | #include <linux/clocksource.h> | 14 | #include <linux/clocksource.h> |
@@ -106,6 +107,7 @@ cycles_t get_cycles(void) | |||
106 | { | 107 | { |
107 | return nios2_timer_read(&nios2_cs.cs); | 108 | return nios2_timer_read(&nios2_cs.cs); |
108 | } | 109 | } |
110 | EXPORT_SYMBOL(get_cycles); | ||
109 | 111 | ||
110 | static void nios2_timer_start(struct nios2_timer *timer) | 112 | static void nios2_timer_start(struct nios2_timer *timer) |
111 | { | 113 | { |
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c index 452fb4dc575f..92289679b4c4 100644 --- a/arch/powerpc/crypto/md5-glue.c +++ b/arch/powerpc/crypto/md5-glue.c | |||
@@ -37,10 +37,10 @@ static int ppc_md5_init(struct shash_desc *desc) | |||
37 | { | 37 | { |
38 | struct md5_state *sctx = shash_desc_ctx(desc); | 38 | struct md5_state *sctx = shash_desc_ctx(desc); |
39 | 39 | ||
40 | sctx->hash[0] = 0x67452301; | 40 | sctx->hash[0] = MD5_H0; |
41 | sctx->hash[1] = 0xefcdab89; | 41 | sctx->hash[1] = MD5_H1; |
42 | sctx->hash[2] = 0x98badcfe; | 42 | sctx->hash[2] = MD5_H2; |
43 | sctx->hash[3] = 0x10325476; | 43 | sctx->hash[3] = MD5_H3; |
44 | sctx->byte_count = 0; | 44 | sctx->byte_count = 0; |
45 | 45 | ||
46 | return 0; | 46 | return 0; |
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h new file mode 100644 index 000000000000..9f8402b35115 --- /dev/null +++ b/arch/powerpc/include/asm/icswx.h | |||
@@ -0,0 +1,184 @@ | |||
1 | /* | ||
2 | * ICSWX api | ||
3 | * | ||
4 | * Copyright (C) 2015 IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This provides the Initiate Coprocessor Store Word Indexed (ICSWX) | ||
12 | * instruction. This instruction is used to communicate with PowerPC | ||
13 | * coprocessors. This also provides definitions of the structures used | ||
14 | * to communicate with the coprocessor. | ||
15 | * | ||
16 | * The RFC02130: Coprocessor Architecture document is the reference for | ||
17 | * everything in this file unless otherwise noted. | ||
18 | */ | ||
19 | #ifndef _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ | ||
20 | #define _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ | ||
21 | |||
22 | #include <asm/ppc-opcode.h> /* for PPC_ICSWX */ | ||
23 | |||
24 | /* Chapter 6.5.8 Coprocessor-Completion Block (CCB) */ | ||
25 | |||
26 | #define CCB_VALUE (0x3fffffffffffffff) | ||
27 | #define CCB_ADDRESS (0xfffffffffffffff8) | ||
28 | #define CCB_CM (0x0000000000000007) | ||
29 | #define CCB_CM0 (0x0000000000000004) | ||
30 | #define CCB_CM12 (0x0000000000000003) | ||
31 | |||
32 | #define CCB_CM0_ALL_COMPLETIONS (0x0) | ||
33 | #define CCB_CM0_LAST_IN_CHAIN (0x4) | ||
34 | #define CCB_CM12_STORE (0x0) | ||
35 | #define CCB_CM12_INTERRUPT (0x1) | ||
36 | |||
37 | #define CCB_SIZE (0x10) | ||
38 | #define CCB_ALIGN CCB_SIZE | ||
39 | |||
40 | struct coprocessor_completion_block { | ||
41 | __be64 value; | ||
42 | __be64 address; | ||
43 | } __packed __aligned(CCB_ALIGN); | ||
44 | |||
45 | |||
46 | /* Chapter 6.5.7 Coprocessor-Status Block (CSB) */ | ||
47 | |||
48 | #define CSB_V (0x80) | ||
49 | #define CSB_F (0x04) | ||
50 | #define CSB_CH (0x03) | ||
51 | #define CSB_CE_INCOMPLETE (0x80) | ||
52 | #define CSB_CE_TERMINATION (0x40) | ||
53 | #define CSB_CE_TPBC (0x20) | ||
54 | |||
55 | #define CSB_CC_SUCCESS (0) | ||
56 | #define CSB_CC_INVALID_ALIGN (1) | ||
57 | #define CSB_CC_OPERAND_OVERLAP (2) | ||
58 | #define CSB_CC_DATA_LENGTH (3) | ||
59 | #define CSB_CC_TRANSLATION (5) | ||
60 | #define CSB_CC_PROTECTION (6) | ||
61 | #define CSB_CC_RD_EXTERNAL (7) | ||
62 | #define CSB_CC_INVALID_OPERAND (8) | ||
63 | #define CSB_CC_PRIVILEGE (9) | ||
64 | #define CSB_CC_INTERNAL (10) | ||
65 | #define CSB_CC_WR_EXTERNAL (12) | ||
66 | #define CSB_CC_NOSPC (13) | ||
67 | #define CSB_CC_EXCESSIVE_DDE (14) | ||
68 | #define CSB_CC_WR_TRANSLATION (15) | ||
69 | #define CSB_CC_WR_PROTECTION (16) | ||
70 | #define CSB_CC_UNKNOWN_CODE (17) | ||
71 | #define CSB_CC_ABORT (18) | ||
72 | #define CSB_CC_TRANSPORT (20) | ||
73 | #define CSB_CC_SEGMENTED_DDL (31) | ||
74 | #define CSB_CC_PROGRESS_POINT (32) | ||
75 | #define CSB_CC_DDE_OVERFLOW (33) | ||
76 | #define CSB_CC_SESSION (34) | ||
77 | #define CSB_CC_PROVISION (36) | ||
78 | #define CSB_CC_CHAIN (37) | ||
79 | #define CSB_CC_SEQUENCE (38) | ||
80 | #define CSB_CC_HW (39) | ||
81 | |||
82 | #define CSB_SIZE (0x10) | ||
83 | #define CSB_ALIGN CSB_SIZE | ||
84 | |||
85 | struct coprocessor_status_block { | ||
86 | u8 flags; | ||
87 | u8 cs; | ||
88 | u8 cc; | ||
89 | u8 ce; | ||
90 | __be32 count; | ||
91 | __be64 address; | ||
92 | } __packed __aligned(CSB_ALIGN); | ||
93 | |||
94 | |||
95 | /* Chapter 6.5.10 Data-Descriptor List (DDL) | ||
96 | * each list contains one or more Data-Descriptor Entries (DDE) | ||
97 | */ | ||
98 | |||
99 | #define DDE_P (0x8000) | ||
100 | |||
101 | #define DDE_SIZE (0x10) | ||
102 | #define DDE_ALIGN DDE_SIZE | ||
103 | |||
104 | struct data_descriptor_entry { | ||
105 | __be16 flags; | ||
106 | u8 count; | ||
107 | u8 index; | ||
108 | __be32 length; | ||
109 | __be64 address; | ||
110 | } __packed __aligned(DDE_ALIGN); | ||
111 | |||
112 | |||
113 | /* Chapter 6.5.2 Coprocessor-Request Block (CRB) */ | ||
114 | |||
115 | #define CRB_SIZE (0x80) | ||
116 | #define CRB_ALIGN (0x100) /* Errata: requires 256 alignment */ | ||
117 | |||
118 | /* Coprocessor Status Block field | ||
119 | * ADDRESS address of CSB | ||
120 | * C CCB is valid | ||
121 | * AT 0 = addrs are virtual, 1 = addrs are phys | ||
122 | * M enable perf monitor | ||
123 | */ | ||
124 | #define CRB_CSB_ADDRESS (0xfffffffffffffff0) | ||
125 | #define CRB_CSB_C (0x0000000000000008) | ||
126 | #define CRB_CSB_AT (0x0000000000000002) | ||
127 | #define CRB_CSB_M (0x0000000000000001) | ||
128 | |||
129 | struct coprocessor_request_block { | ||
130 | __be32 ccw; | ||
131 | __be32 flags; | ||
132 | __be64 csb_addr; | ||
133 | |||
134 | struct data_descriptor_entry source; | ||
135 | struct data_descriptor_entry target; | ||
136 | |||
137 | struct coprocessor_completion_block ccb; | ||
138 | |||
139 | u8 reserved[48]; | ||
140 | |||
141 | struct coprocessor_status_block csb; | ||
142 | } __packed __aligned(CRB_ALIGN); | ||
143 | |||
144 | |||
145 | /* RFC02167 Initiate Coprocessor Instructions document | ||
146 | * Chapter 8.2.1.1.1 RS | ||
147 | * Chapter 8.2.3 Coprocessor Directive | ||
148 | * Chapter 8.2.4 Execution | ||
149 | * | ||
150 | * The CCW must be converted to BE before passing to icswx() | ||
151 | */ | ||
152 | |||
153 | #define CCW_PS (0xff000000) | ||
154 | #define CCW_CT (0x00ff0000) | ||
155 | #define CCW_CD (0x0000ffff) | ||
156 | #define CCW_CL (0x0000c000) | ||
157 | |||
158 | |||
159 | /* RFC02167 Initiate Coprocessor Instructions document | ||
160 | * Chapter 8.2.1 Initiate Coprocessor Store Word Indexed (ICSWX) | ||
161 | * Chapter 8.2.4.1 Condition Register 0 | ||
162 | */ | ||
163 | |||
164 | #define ICSWX_INITIATED (0x8) | ||
165 | #define ICSWX_BUSY (0x4) | ||
166 | #define ICSWX_REJECTED (0x2) | ||
167 | |||
168 | static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb) | ||
169 | { | ||
170 | __be64 ccw_reg = ccw; | ||
171 | u32 cr; | ||
172 | |||
173 | __asm__ __volatile__( | ||
174 | PPC_ICSWX(%1,0,%2) "\n" | ||
175 | "mfcr %0\n" | ||
176 | : "=r" (cr) | ||
177 | : "r" (ccw_reg), "r" (crb) | ||
178 | : "cr0", "memory"); | ||
179 | |||
180 | return (int)((cr >> 28) & 0xf); | ||
181 | } | ||
182 | |||
183 | |||
184 | #endif /* _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ */ | ||
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 5c93f691b495..8452335661a5 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -136,6 +136,8 @@ | |||
136 | #define PPC_INST_DCBAL 0x7c2005ec | 136 | #define PPC_INST_DCBAL 0x7c2005ec |
137 | #define PPC_INST_DCBZL 0x7c2007ec | 137 | #define PPC_INST_DCBZL 0x7c2007ec |
138 | #define PPC_INST_ICBT 0x7c00002c | 138 | #define PPC_INST_ICBT 0x7c00002c |
139 | #define PPC_INST_ICSWX 0x7c00032d | ||
140 | #define PPC_INST_ICSWEPX 0x7c00076d | ||
139 | #define PPC_INST_ISEL 0x7c00001e | 141 | #define PPC_INST_ISEL 0x7c00001e |
140 | #define PPC_INST_ISEL_MASK 0xfc00003e | 142 | #define PPC_INST_ISEL_MASK 0xfc00003e |
141 | #define PPC_INST_LDARX 0x7c0000a8 | 143 | #define PPC_INST_LDARX 0x7c0000a8 |
@@ -403,4 +405,15 @@ | |||
403 | #define MFTMR(tmr, r) stringify_in_c(.long PPC_INST_MFTMR | \ | 405 | #define MFTMR(tmr, r) stringify_in_c(.long PPC_INST_MFTMR | \ |
404 | TMRN(tmr) | ___PPC_RT(r)) | 406 | TMRN(tmr) | ___PPC_RT(r)) |
405 | 407 | ||
408 | /* Coprocessor instructions */ | ||
409 | #define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_INST_ICSWX | \ | ||
410 | ___PPC_RS(s) | \ | ||
411 | ___PPC_RA(a) | \ | ||
412 | ___PPC_RB(b)) | ||
413 | #define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_INST_ICSWEPX | \ | ||
414 | ___PPC_RS(s) | \ | ||
415 | ___PPC_RA(a) | \ | ||
416 | ___PPC_RB(b)) | ||
417 | |||
418 | |||
406 | #endif /* _ASM_POWERPC_PPC_OPCODE_H */ | 419 | #endif /* _ASM_POWERPC_PPC_OPCODE_H */ |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 308c5e15676b..ea2cea7eaef1 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -800,6 +800,7 @@ int of_get_ibm_chip_id(struct device_node *np) | |||
800 | } | 800 | } |
801 | return -1; | 801 | return -1; |
802 | } | 802 | } |
803 | EXPORT_SYMBOL(of_get_ibm_chip_id); | ||
803 | 804 | ||
804 | /** | 805 | /** |
805 | * cpu_to_chip_id - Return the cpus chip-id | 806 | * cpu_to_chip_id - Return the cpus chip-id |
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c index b688731d7ede..c9d2b922734b 100644 --- a/arch/sparc/crypto/md5_glue.c +++ b/arch/sparc/crypto/md5_glue.c | |||
@@ -33,10 +33,10 @@ static int md5_sparc64_init(struct shash_desc *desc) | |||
33 | { | 33 | { |
34 | struct md5_state *mctx = shash_desc_ctx(desc); | 34 | struct md5_state *mctx = shash_desc_ctx(desc); |
35 | 35 | ||
36 | mctx->hash[0] = cpu_to_le32(0x67452301); | 36 | mctx->hash[0] = cpu_to_le32(MD5_H0); |
37 | mctx->hash[1] = cpu_to_le32(0xefcdab89); | 37 | mctx->hash[1] = cpu_to_le32(MD5_H1); |
38 | mctx->hash[2] = cpu_to_le32(0x98badcfe); | 38 | mctx->hash[2] = cpu_to_le32(MD5_H2); |
39 | mctx->hash[3] = cpu_to_le32(0x10325476); | 39 | mctx->hash[3] = cpu_to_le32(MD5_H3); |
40 | mctx->byte_count = 0; | 40 | mctx->byte_count = 0; |
41 | 41 | ||
42 | return 0; | 42 | return 0; |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index b419f43ce0c5..2bfc8a7c88c1 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -44,15 +44,19 @@ | |||
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | 46 | ||
47 | #define AESNI_ALIGN 16 | ||
48 | #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1)) | ||
49 | #define RFC4106_HASH_SUBKEY_SIZE 16 | ||
50 | |||
47 | /* This data is stored at the end of the crypto_tfm struct. | 51 | /* This data is stored at the end of the crypto_tfm struct. |
48 | * It's a type of per "session" data storage location. | 52 | * It's a type of per "session" data storage location. |
49 | * This needs to be 16 byte aligned. | 53 | * This needs to be 16 byte aligned. |
50 | */ | 54 | */ |
51 | struct aesni_rfc4106_gcm_ctx { | 55 | struct aesni_rfc4106_gcm_ctx { |
52 | u8 hash_subkey[16]; | 56 | u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN))); |
53 | struct crypto_aes_ctx aes_key_expanded; | 57 | struct crypto_aes_ctx aes_key_expanded |
58 | __attribute__ ((__aligned__(AESNI_ALIGN))); | ||
54 | u8 nonce[4]; | 59 | u8 nonce[4]; |
55 | struct cryptd_aead *cryptd_tfm; | ||
56 | }; | 60 | }; |
57 | 61 | ||
58 | struct aesni_gcm_set_hash_subkey_result { | 62 | struct aesni_gcm_set_hash_subkey_result { |
@@ -66,10 +70,6 @@ struct aesni_hash_subkey_req_data { | |||
66 | struct scatterlist sg; | 70 | struct scatterlist sg; |
67 | }; | 71 | }; |
68 | 72 | ||
69 | #define AESNI_ALIGN (16) | ||
70 | #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) | ||
71 | #define RFC4106_HASH_SUBKEY_SIZE 16 | ||
72 | |||
73 | struct aesni_lrw_ctx { | 73 | struct aesni_lrw_ctx { |
74 | struct lrw_table_ctx lrw_table; | 74 | struct lrw_table_ctx lrw_table; |
75 | u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; | 75 | u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; |
@@ -283,10 +283,11 @@ static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out, | |||
283 | static inline struct | 283 | static inline struct |
284 | aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) | 284 | aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) |
285 | { | 285 | { |
286 | return | 286 | unsigned long align = AESNI_ALIGN; |
287 | (struct aesni_rfc4106_gcm_ctx *) | 287 | |
288 | PTR_ALIGN((u8 *) | 288 | if (align <= crypto_tfm_ctx_alignment()) |
289 | crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN); | 289 | align = 1; |
290 | return PTR_ALIGN(crypto_aead_ctx(tfm), align); | ||
290 | } | 291 | } |
291 | #endif | 292 | #endif |
292 | 293 | ||
@@ -790,36 +791,30 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
790 | #endif | 791 | #endif |
791 | 792 | ||
792 | #ifdef CONFIG_X86_64 | 793 | #ifdef CONFIG_X86_64 |
793 | static int rfc4106_init(struct crypto_tfm *tfm) | 794 | static int rfc4106_init(struct crypto_aead *aead) |
794 | { | 795 | { |
795 | struct cryptd_aead *cryptd_tfm; | 796 | struct cryptd_aead *cryptd_tfm; |
796 | struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) | 797 | struct cryptd_aead **ctx = crypto_aead_ctx(aead); |
797 | PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); | 798 | |
798 | struct crypto_aead *cryptd_child; | ||
799 | struct aesni_rfc4106_gcm_ctx *child_ctx; | ||
800 | cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", | 799 | cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", |
801 | CRYPTO_ALG_INTERNAL, | 800 | CRYPTO_ALG_INTERNAL, |
802 | CRYPTO_ALG_INTERNAL); | 801 | CRYPTO_ALG_INTERNAL); |
803 | if (IS_ERR(cryptd_tfm)) | 802 | if (IS_ERR(cryptd_tfm)) |
804 | return PTR_ERR(cryptd_tfm); | 803 | return PTR_ERR(cryptd_tfm); |
805 | 804 | ||
806 | cryptd_child = cryptd_aead_child(cryptd_tfm); | 805 | *ctx = cryptd_tfm; |
807 | child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); | 806 | crypto_aead_set_reqsize( |
808 | memcpy(child_ctx, ctx, sizeof(*ctx)); | 807 | aead, |
809 | ctx->cryptd_tfm = cryptd_tfm; | 808 | sizeof(struct aead_request) + |
810 | tfm->crt_aead.reqsize = sizeof(struct aead_request) | 809 | crypto_aead_reqsize(&cryptd_tfm->base)); |
811 | + crypto_aead_reqsize(&cryptd_tfm->base); | ||
812 | return 0; | 810 | return 0; |
813 | } | 811 | } |
814 | 812 | ||
815 | static void rfc4106_exit(struct crypto_tfm *tfm) | 813 | static void rfc4106_exit(struct crypto_aead *aead) |
816 | { | 814 | { |
817 | struct aesni_rfc4106_gcm_ctx *ctx = | 815 | struct cryptd_aead **ctx = crypto_aead_ctx(aead); |
818 | (struct aesni_rfc4106_gcm_ctx *) | 816 | |
819 | PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); | 817 | cryptd_free_aead(*ctx); |
820 | if (!IS_ERR(ctx->cryptd_tfm)) | ||
821 | cryptd_free_aead(ctx->cryptd_tfm); | ||
822 | return; | ||
823 | } | 818 | } |
824 | 819 | ||
825 | static void | 820 | static void |
@@ -845,8 +840,6 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) | |||
845 | if (IS_ERR(ctr_tfm)) | 840 | if (IS_ERR(ctr_tfm)) |
846 | return PTR_ERR(ctr_tfm); | 841 | return PTR_ERR(ctr_tfm); |
847 | 842 | ||
848 | crypto_ablkcipher_clear_flags(ctr_tfm, ~0); | ||
849 | |||
850 | ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); | 843 | ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); |
851 | if (ret) | 844 | if (ret) |
852 | goto out_free_ablkcipher; | 845 | goto out_free_ablkcipher; |
@@ -895,73 +888,29 @@ out_free_ablkcipher: | |||
895 | static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, | 888 | static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, |
896 | unsigned int key_len) | 889 | unsigned int key_len) |
897 | { | 890 | { |
898 | int ret = 0; | ||
899 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | ||
900 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); | 891 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); |
901 | u8 *new_key_align, *new_key_mem = NULL; | ||
902 | 892 | ||
903 | if (key_len < 4) { | 893 | if (key_len < 4) { |
904 | crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 894 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); |
905 | return -EINVAL; | 895 | return -EINVAL; |
906 | } | 896 | } |
907 | /*Account for 4 byte nonce at the end.*/ | 897 | /*Account for 4 byte nonce at the end.*/ |
908 | key_len -= 4; | 898 | key_len -= 4; |
909 | if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && | ||
910 | key_len != AES_KEYSIZE_256) { | ||
911 | crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
912 | return -EINVAL; | ||
913 | } | ||
914 | 899 | ||
915 | memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); | 900 | memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); |
916 | /*This must be on a 16 byte boundary!*/ | ||
917 | if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) | ||
918 | return -EINVAL; | ||
919 | |||
920 | if ((unsigned long)key % AESNI_ALIGN) { | ||
921 | /*key is not aligned: use an auxuliar aligned pointer*/ | ||
922 | new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); | ||
923 | if (!new_key_mem) | ||
924 | return -ENOMEM; | ||
925 | |||
926 | new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); | ||
927 | memcpy(new_key_align, key, key_len); | ||
928 | key = new_key_align; | ||
929 | } | ||
930 | 901 | ||
931 | if (!irq_fpu_usable()) | 902 | return aes_set_key_common(crypto_aead_tfm(aead), |
932 | ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), | 903 | &ctx->aes_key_expanded, key, key_len) ?: |
933 | key, key_len); | 904 | rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); |
934 | else { | ||
935 | kernel_fpu_begin(); | ||
936 | ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); | ||
937 | kernel_fpu_end(); | ||
938 | } | ||
939 | /*This must be on a 16 byte boundary!*/ | ||
940 | if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { | ||
941 | ret = -EINVAL; | ||
942 | goto exit; | ||
943 | } | ||
944 | ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); | ||
945 | exit: | ||
946 | kfree(new_key_mem); | ||
947 | return ret; | ||
948 | } | 905 | } |
949 | 906 | ||
950 | static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, | 907 | static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, |
951 | unsigned int key_len) | 908 | unsigned int key_len) |
952 | { | 909 | { |
953 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); | 910 | struct cryptd_aead **ctx = crypto_aead_ctx(parent); |
954 | struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm); | 911 | struct cryptd_aead *cryptd_tfm = *ctx; |
955 | struct aesni_rfc4106_gcm_ctx *c_ctx = aesni_rfc4106_gcm_ctx_get(child); | ||
956 | struct cryptd_aead *cryptd_tfm = ctx->cryptd_tfm; | ||
957 | int ret; | ||
958 | 912 | ||
959 | ret = crypto_aead_setkey(child, key, key_len); | 913 | return crypto_aead_setkey(&cryptd_tfm->base, key, key_len); |
960 | if (!ret) { | ||
961 | memcpy(ctx, c_ctx, sizeof(*ctx)); | ||
962 | ctx->cryptd_tfm = cryptd_tfm; | ||
963 | } | ||
964 | return ret; | ||
965 | } | 914 | } |
966 | 915 | ||
967 | static int common_rfc4106_set_authsize(struct crypto_aead *aead, | 916 | static int common_rfc4106_set_authsize(struct crypto_aead *aead, |
@@ -975,7 +924,7 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead, | |||
975 | default: | 924 | default: |
976 | return -EINVAL; | 925 | return -EINVAL; |
977 | } | 926 | } |
978 | crypto_aead_crt(aead)->authsize = authsize; | 927 | |
979 | return 0; | 928 | return 0; |
980 | } | 929 | } |
981 | 930 | ||
@@ -984,30 +933,23 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead, | |||
984 | static int rfc4106_set_authsize(struct crypto_aead *parent, | 933 | static int rfc4106_set_authsize(struct crypto_aead *parent, |
985 | unsigned int authsize) | 934 | unsigned int authsize) |
986 | { | 935 | { |
987 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); | 936 | struct cryptd_aead **ctx = crypto_aead_ctx(parent); |
988 | struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm); | 937 | struct cryptd_aead *cryptd_tfm = *ctx; |
989 | int ret; | ||
990 | 938 | ||
991 | ret = crypto_aead_setauthsize(child, authsize); | 939 | return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); |
992 | if (!ret) | ||
993 | crypto_aead_crt(parent)->authsize = authsize; | ||
994 | return ret; | ||
995 | } | 940 | } |
996 | 941 | ||
997 | static int __driver_rfc4106_encrypt(struct aead_request *req) | 942 | static int helper_rfc4106_encrypt(struct aead_request *req) |
998 | { | 943 | { |
999 | u8 one_entry_in_sg = 0; | 944 | u8 one_entry_in_sg = 0; |
1000 | u8 *src, *dst, *assoc; | 945 | u8 *src, *dst, *assoc; |
1001 | __be32 counter = cpu_to_be32(1); | 946 | __be32 counter = cpu_to_be32(1); |
1002 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 947 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
1003 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | 948 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); |
1004 | u32 key_len = ctx->aes_key_expanded.key_length; | ||
1005 | void *aes_ctx = &(ctx->aes_key_expanded); | 949 | void *aes_ctx = &(ctx->aes_key_expanded); |
1006 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); | 950 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); |
1007 | u8 iv_tab[16+AESNI_ALIGN]; | 951 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); |
1008 | u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN); | ||
1009 | struct scatter_walk src_sg_walk; | 952 | struct scatter_walk src_sg_walk; |
1010 | struct scatter_walk assoc_sg_walk; | ||
1011 | struct scatter_walk dst_sg_walk; | 953 | struct scatter_walk dst_sg_walk; |
1012 | unsigned int i; | 954 | unsigned int i; |
1013 | 955 | ||
@@ -1016,12 +958,6 @@ static int __driver_rfc4106_encrypt(struct aead_request *req) | |||
1016 | /* to 8 or 12 bytes */ | 958 | /* to 8 or 12 bytes */ |
1017 | if (unlikely(req->assoclen != 8 && req->assoclen != 12)) | 959 | if (unlikely(req->assoclen != 8 && req->assoclen != 12)) |
1018 | return -EINVAL; | 960 | return -EINVAL; |
1019 | if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16)) | ||
1020 | return -EINVAL; | ||
1021 | if (unlikely(key_len != AES_KEYSIZE_128 && | ||
1022 | key_len != AES_KEYSIZE_192 && | ||
1023 | key_len != AES_KEYSIZE_256)) | ||
1024 | return -EINVAL; | ||
1025 | 961 | ||
1026 | /* IV below built */ | 962 | /* IV below built */ |
1027 | for (i = 0; i < 4; i++) | 963 | for (i = 0; i < 4; i++) |
@@ -1030,55 +966,57 @@ static int __driver_rfc4106_encrypt(struct aead_request *req) | |||
1030 | *(iv+4+i) = req->iv[i]; | 966 | *(iv+4+i) = req->iv[i]; |
1031 | *((__be32 *)(iv+12)) = counter; | 967 | *((__be32 *)(iv+12)) = counter; |
1032 | 968 | ||
1033 | if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { | 969 | if (sg_is_last(req->src) && |
970 | req->src->offset + req->src->length <= PAGE_SIZE && | ||
971 | sg_is_last(req->dst) && | ||
972 | req->dst->offset + req->dst->length <= PAGE_SIZE) { | ||
1034 | one_entry_in_sg = 1; | 973 | one_entry_in_sg = 1; |
1035 | scatterwalk_start(&src_sg_walk, req->src); | 974 | scatterwalk_start(&src_sg_walk, req->src); |
1036 | scatterwalk_start(&assoc_sg_walk, req->assoc); | 975 | assoc = scatterwalk_map(&src_sg_walk); |
1037 | src = scatterwalk_map(&src_sg_walk); | 976 | src = assoc + req->assoclen; |
1038 | assoc = scatterwalk_map(&assoc_sg_walk); | ||
1039 | dst = src; | 977 | dst = src; |
1040 | if (unlikely(req->src != req->dst)) { | 978 | if (unlikely(req->src != req->dst)) { |
1041 | scatterwalk_start(&dst_sg_walk, req->dst); | 979 | scatterwalk_start(&dst_sg_walk, req->dst); |
1042 | dst = scatterwalk_map(&dst_sg_walk); | 980 | dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; |
1043 | } | 981 | } |
1044 | |||
1045 | } else { | 982 | } else { |
1046 | /* Allocate memory for src, dst, assoc */ | 983 | /* Allocate memory for src, dst, assoc */ |
1047 | src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, | 984 | assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, |
1048 | GFP_ATOMIC); | 985 | GFP_ATOMIC); |
1049 | if (unlikely(!src)) | 986 | if (unlikely(!assoc)) |
1050 | return -ENOMEM; | 987 | return -ENOMEM; |
1051 | assoc = (src + req->cryptlen + auth_tag_len); | 988 | scatterwalk_map_and_copy(assoc, req->src, 0, |
1052 | scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); | 989 | req->assoclen + req->cryptlen, 0); |
1053 | scatterwalk_map_and_copy(assoc, req->assoc, 0, | 990 | src = assoc + req->assoclen; |
1054 | req->assoclen, 0); | ||
1055 | dst = src; | 991 | dst = src; |
1056 | } | 992 | } |
1057 | 993 | ||
994 | kernel_fpu_begin(); | ||
1058 | aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, | 995 | aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, |
1059 | ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst | 996 | ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst |
1060 | + ((unsigned long)req->cryptlen), auth_tag_len); | 997 | + ((unsigned long)req->cryptlen), auth_tag_len); |
998 | kernel_fpu_end(); | ||
1061 | 999 | ||
1062 | /* The authTag (aka the Integrity Check Value) needs to be written | 1000 | /* The authTag (aka the Integrity Check Value) needs to be written |
1063 | * back to the packet. */ | 1001 | * back to the packet. */ |
1064 | if (one_entry_in_sg) { | 1002 | if (one_entry_in_sg) { |
1065 | if (unlikely(req->src != req->dst)) { | 1003 | if (unlikely(req->src != req->dst)) { |
1066 | scatterwalk_unmap(dst); | 1004 | scatterwalk_unmap(dst - req->assoclen); |
1067 | scatterwalk_done(&dst_sg_walk, 0, 0); | 1005 | scatterwalk_advance(&dst_sg_walk, req->dst->length); |
1006 | scatterwalk_done(&dst_sg_walk, 1, 0); | ||
1068 | } | 1007 | } |
1069 | scatterwalk_unmap(src); | ||
1070 | scatterwalk_unmap(assoc); | 1008 | scatterwalk_unmap(assoc); |
1071 | scatterwalk_done(&src_sg_walk, 0, 0); | 1009 | scatterwalk_advance(&src_sg_walk, req->src->length); |
1072 | scatterwalk_done(&assoc_sg_walk, 0, 0); | 1010 | scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); |
1073 | } else { | 1011 | } else { |
1074 | scatterwalk_map_and_copy(dst, req->dst, 0, | 1012 | scatterwalk_map_and_copy(dst, req->dst, req->assoclen, |
1075 | req->cryptlen + auth_tag_len, 1); | 1013 | req->cryptlen + auth_tag_len, 1); |
1076 | kfree(src); | 1014 | kfree(assoc); |
1077 | } | 1015 | } |
1078 | return 0; | 1016 | return 0; |
1079 | } | 1017 | } |
1080 | 1018 | ||
1081 | static int __driver_rfc4106_decrypt(struct aead_request *req) | 1019 | static int helper_rfc4106_decrypt(struct aead_request *req) |
1082 | { | 1020 | { |
1083 | u8 one_entry_in_sg = 0; | 1021 | u8 one_entry_in_sg = 0; |
1084 | u8 *src, *dst, *assoc; | 1022 | u8 *src, *dst, *assoc; |
@@ -1087,26 +1025,16 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
1087 | int retval = 0; | 1025 | int retval = 0; |
1088 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 1026 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
1089 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | 1027 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); |
1090 | u32 key_len = ctx->aes_key_expanded.key_length; | ||
1091 | void *aes_ctx = &(ctx->aes_key_expanded); | 1028 | void *aes_ctx = &(ctx->aes_key_expanded); |
1092 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); | 1029 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); |
1093 | u8 iv_and_authTag[32+AESNI_ALIGN]; | 1030 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); |
1094 | u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN); | 1031 | u8 authTag[16]; |
1095 | u8 *authTag = iv + 16; | ||
1096 | struct scatter_walk src_sg_walk; | 1032 | struct scatter_walk src_sg_walk; |
1097 | struct scatter_walk assoc_sg_walk; | ||
1098 | struct scatter_walk dst_sg_walk; | 1033 | struct scatter_walk dst_sg_walk; |
1099 | unsigned int i; | 1034 | unsigned int i; |
1100 | 1035 | ||
1101 | if (unlikely((req->cryptlen < auth_tag_len) || | 1036 | if (unlikely(req->assoclen != 8 && req->assoclen != 12)) |
1102 | (req->assoclen != 8 && req->assoclen != 12))) | ||
1103 | return -EINVAL; | 1037 | return -EINVAL; |
1104 | if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16)) | ||
1105 | return -EINVAL; | ||
1106 | if (unlikely(key_len != AES_KEYSIZE_128 && | ||
1107 | key_len != AES_KEYSIZE_192 && | ||
1108 | key_len != AES_KEYSIZE_256)) | ||
1109 | return -EINVAL; | ||
1110 | 1038 | ||
1111 | /* Assuming we are supporting rfc4106 64-bit extended */ | 1039 | /* Assuming we are supporting rfc4106 64-bit extended */ |
1112 | /* sequence numbers We need to have the AAD length */ | 1040 | /* sequence numbers We need to have the AAD length */ |
@@ -1120,33 +1048,36 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
1120 | *(iv+4+i) = req->iv[i]; | 1048 | *(iv+4+i) = req->iv[i]; |
1121 | *((__be32 *)(iv+12)) = counter; | 1049 | *((__be32 *)(iv+12)) = counter; |
1122 | 1050 | ||
1123 | if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) { | 1051 | if (sg_is_last(req->src) && |
1052 | req->src->offset + req->src->length <= PAGE_SIZE && | ||
1053 | sg_is_last(req->dst) && | ||
1054 | req->dst->offset + req->dst->length <= PAGE_SIZE) { | ||
1124 | one_entry_in_sg = 1; | 1055 | one_entry_in_sg = 1; |
1125 | scatterwalk_start(&src_sg_walk, req->src); | 1056 | scatterwalk_start(&src_sg_walk, req->src); |
1126 | scatterwalk_start(&assoc_sg_walk, req->assoc); | 1057 | assoc = scatterwalk_map(&src_sg_walk); |
1127 | src = scatterwalk_map(&src_sg_walk); | 1058 | src = assoc + req->assoclen; |
1128 | assoc = scatterwalk_map(&assoc_sg_walk); | ||
1129 | dst = src; | 1059 | dst = src; |
1130 | if (unlikely(req->src != req->dst)) { | 1060 | if (unlikely(req->src != req->dst)) { |
1131 | scatterwalk_start(&dst_sg_walk, req->dst); | 1061 | scatterwalk_start(&dst_sg_walk, req->dst); |
1132 | dst = scatterwalk_map(&dst_sg_walk); | 1062 | dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; |
1133 | } | 1063 | } |
1134 | 1064 | ||
1135 | } else { | 1065 | } else { |
1136 | /* Allocate memory for src, dst, assoc */ | 1066 | /* Allocate memory for src, dst, assoc */ |
1137 | src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); | 1067 | assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); |
1138 | if (!src) | 1068 | if (!assoc) |
1139 | return -ENOMEM; | 1069 | return -ENOMEM; |
1140 | assoc = (src + req->cryptlen); | 1070 | scatterwalk_map_and_copy(assoc, req->src, 0, |
1141 | scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); | 1071 | req->assoclen + req->cryptlen, 0); |
1142 | scatterwalk_map_and_copy(assoc, req->assoc, 0, | 1072 | src = assoc + req->assoclen; |
1143 | req->assoclen, 0); | ||
1144 | dst = src; | 1073 | dst = src; |
1145 | } | 1074 | } |
1146 | 1075 | ||
1076 | kernel_fpu_begin(); | ||
1147 | aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, | 1077 | aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, |
1148 | ctx->hash_subkey, assoc, (unsigned long)req->assoclen, | 1078 | ctx->hash_subkey, assoc, (unsigned long)req->assoclen, |
1149 | authTag, auth_tag_len); | 1079 | authTag, auth_tag_len); |
1080 | kernel_fpu_end(); | ||
1150 | 1081 | ||
1151 | /* Compare generated tag with passed in tag. */ | 1082 | /* Compare generated tag with passed in tag. */ |
1152 | retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ? | 1083 | retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ? |
@@ -1154,90 +1085,59 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
1154 | 1085 | ||
1155 | if (one_entry_in_sg) { | 1086 | if (one_entry_in_sg) { |
1156 | if (unlikely(req->src != req->dst)) { | 1087 | if (unlikely(req->src != req->dst)) { |
1157 | scatterwalk_unmap(dst); | 1088 | scatterwalk_unmap(dst - req->assoclen); |
1158 | scatterwalk_done(&dst_sg_walk, 0, 0); | 1089 | scatterwalk_advance(&dst_sg_walk, req->dst->length); |
1090 | scatterwalk_done(&dst_sg_walk, 1, 0); | ||
1159 | } | 1091 | } |
1160 | scatterwalk_unmap(src); | ||
1161 | scatterwalk_unmap(assoc); | 1092 | scatterwalk_unmap(assoc); |
1162 | scatterwalk_done(&src_sg_walk, 0, 0); | 1093 | scatterwalk_advance(&src_sg_walk, req->src->length); |
1163 | scatterwalk_done(&assoc_sg_walk, 0, 0); | 1094 | scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); |
1164 | } else { | 1095 | } else { |
1165 | scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1); | 1096 | scatterwalk_map_and_copy(dst, req->dst, req->assoclen, |
1166 | kfree(src); | 1097 | tempCipherLen, 1); |
1098 | kfree(assoc); | ||
1167 | } | 1099 | } |
1168 | return retval; | 1100 | return retval; |
1169 | } | 1101 | } |
1170 | 1102 | ||
1171 | static int rfc4106_encrypt(struct aead_request *req) | 1103 | static int rfc4106_encrypt(struct aead_request *req) |
1172 | { | 1104 | { |
1173 | int ret; | ||
1174 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 1105 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
1175 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | 1106 | struct cryptd_aead **ctx = crypto_aead_ctx(tfm); |
1107 | struct cryptd_aead *cryptd_tfm = *ctx; | ||
1108 | struct aead_request *subreq = aead_request_ctx(req); | ||
1176 | 1109 | ||
1177 | if (!irq_fpu_usable()) { | 1110 | aead_request_set_tfm(subreq, irq_fpu_usable() ? |
1178 | struct aead_request *cryptd_req = | 1111 | cryptd_aead_child(cryptd_tfm) : |
1179 | (struct aead_request *) aead_request_ctx(req); | 1112 | &cryptd_tfm->base); |
1180 | 1113 | ||
1181 | memcpy(cryptd_req, req, sizeof(*req)); | 1114 | aead_request_set_callback(subreq, req->base.flags, |
1182 | aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | 1115 | req->base.complete, req->base.data); |
1183 | ret = crypto_aead_encrypt(cryptd_req); | 1116 | aead_request_set_crypt(subreq, req->src, req->dst, |
1184 | } else { | 1117 | req->cryptlen, req->iv); |
1185 | kernel_fpu_begin(); | 1118 | aead_request_set_ad(subreq, req->assoclen); |
1186 | ret = __driver_rfc4106_encrypt(req); | 1119 | |
1187 | kernel_fpu_end(); | 1120 | return crypto_aead_encrypt(subreq); |
1188 | } | ||
1189 | return ret; | ||
1190 | } | 1121 | } |
1191 | 1122 | ||
1192 | static int rfc4106_decrypt(struct aead_request *req) | 1123 | static int rfc4106_decrypt(struct aead_request *req) |
1193 | { | 1124 | { |
1194 | int ret; | ||
1195 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 1125 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
1196 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | 1126 | struct cryptd_aead **ctx = crypto_aead_ctx(tfm); |
1127 | struct cryptd_aead *cryptd_tfm = *ctx; | ||
1128 | struct aead_request *subreq = aead_request_ctx(req); | ||
1197 | 1129 | ||
1198 | if (!irq_fpu_usable()) { | 1130 | aead_request_set_tfm(subreq, irq_fpu_usable() ? |
1199 | struct aead_request *cryptd_req = | 1131 | cryptd_aead_child(cryptd_tfm) : |
1200 | (struct aead_request *) aead_request_ctx(req); | 1132 | &cryptd_tfm->base); |
1201 | 1133 | ||
1202 | memcpy(cryptd_req, req, sizeof(*req)); | 1134 | aead_request_set_callback(subreq, req->base.flags, |
1203 | aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | 1135 | req->base.complete, req->base.data); |
1204 | ret = crypto_aead_decrypt(cryptd_req); | 1136 | aead_request_set_crypt(subreq, req->src, req->dst, |
1205 | } else { | 1137 | req->cryptlen, req->iv); |
1206 | kernel_fpu_begin(); | 1138 | aead_request_set_ad(subreq, req->assoclen); |
1207 | ret = __driver_rfc4106_decrypt(req); | ||
1208 | kernel_fpu_end(); | ||
1209 | } | ||
1210 | return ret; | ||
1211 | } | ||
1212 | |||
1213 | static int helper_rfc4106_encrypt(struct aead_request *req) | ||
1214 | { | ||
1215 | int ret; | ||
1216 | |||
1217 | if (unlikely(!irq_fpu_usable())) { | ||
1218 | WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context"); | ||
1219 | ret = -EINVAL; | ||
1220 | } else { | ||
1221 | kernel_fpu_begin(); | ||
1222 | ret = __driver_rfc4106_encrypt(req); | ||
1223 | kernel_fpu_end(); | ||
1224 | } | ||
1225 | return ret; | ||
1226 | } | ||
1227 | |||
1228 | static int helper_rfc4106_decrypt(struct aead_request *req) | ||
1229 | { | ||
1230 | int ret; | ||
1231 | 1139 | ||
1232 | if (unlikely(!irq_fpu_usable())) { | 1140 | return crypto_aead_decrypt(subreq); |
1233 | WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context"); | ||
1234 | ret = -EINVAL; | ||
1235 | } else { | ||
1236 | kernel_fpu_begin(); | ||
1237 | ret = __driver_rfc4106_decrypt(req); | ||
1238 | kernel_fpu_end(); | ||
1239 | } | ||
1240 | return ret; | ||
1241 | } | 1141 | } |
1242 | #endif | 1142 | #endif |
1243 | 1143 | ||
@@ -1410,51 +1310,6 @@ static struct crypto_alg aesni_algs[] = { { | |||
1410 | .geniv = "chainiv", | 1310 | .geniv = "chainiv", |
1411 | }, | 1311 | }, |
1412 | }, | 1312 | }, |
1413 | }, { | ||
1414 | .cra_name = "__gcm-aes-aesni", | ||
1415 | .cra_driver_name = "__driver-gcm-aes-aesni", | ||
1416 | .cra_priority = 0, | ||
1417 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_INTERNAL, | ||
1418 | .cra_blocksize = 1, | ||
1419 | .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + | ||
1420 | AESNI_ALIGN, | ||
1421 | .cra_alignmask = 0, | ||
1422 | .cra_type = &crypto_aead_type, | ||
1423 | .cra_module = THIS_MODULE, | ||
1424 | .cra_u = { | ||
1425 | .aead = { | ||
1426 | .setkey = common_rfc4106_set_key, | ||
1427 | .setauthsize = common_rfc4106_set_authsize, | ||
1428 | .encrypt = helper_rfc4106_encrypt, | ||
1429 | .decrypt = helper_rfc4106_decrypt, | ||
1430 | .ivsize = 8, | ||
1431 | .maxauthsize = 16, | ||
1432 | }, | ||
1433 | }, | ||
1434 | }, { | ||
1435 | .cra_name = "rfc4106(gcm(aes))", | ||
1436 | .cra_driver_name = "rfc4106-gcm-aesni", | ||
1437 | .cra_priority = 400, | ||
1438 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | ||
1439 | .cra_blocksize = 1, | ||
1440 | .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + | ||
1441 | AESNI_ALIGN, | ||
1442 | .cra_alignmask = 0, | ||
1443 | .cra_type = &crypto_nivaead_type, | ||
1444 | .cra_module = THIS_MODULE, | ||
1445 | .cra_init = rfc4106_init, | ||
1446 | .cra_exit = rfc4106_exit, | ||
1447 | .cra_u = { | ||
1448 | .aead = { | ||
1449 | .setkey = rfc4106_set_key, | ||
1450 | .setauthsize = rfc4106_set_authsize, | ||
1451 | .encrypt = rfc4106_encrypt, | ||
1452 | .decrypt = rfc4106_decrypt, | ||
1453 | .geniv = "seqiv", | ||
1454 | .ivsize = 8, | ||
1455 | .maxauthsize = 16, | ||
1456 | }, | ||
1457 | }, | ||
1458 | #endif | 1313 | #endif |
1459 | #if IS_ENABLED(CONFIG_CRYPTO_PCBC) | 1314 | #if IS_ENABLED(CONFIG_CRYPTO_PCBC) |
1460 | }, { | 1315 | }, { |
@@ -1569,6 +1424,46 @@ static struct crypto_alg aesni_algs[] = { { | |||
1569 | }, | 1424 | }, |
1570 | } }; | 1425 | } }; |
1571 | 1426 | ||
1427 | #ifdef CONFIG_X86_64 | ||
1428 | static struct aead_alg aesni_aead_algs[] = { { | ||
1429 | .setkey = common_rfc4106_set_key, | ||
1430 | .setauthsize = common_rfc4106_set_authsize, | ||
1431 | .encrypt = helper_rfc4106_encrypt, | ||
1432 | .decrypt = helper_rfc4106_decrypt, | ||
1433 | .ivsize = 8, | ||
1434 | .maxauthsize = 16, | ||
1435 | .base = { | ||
1436 | .cra_name = "__gcm-aes-aesni", | ||
1437 | .cra_driver_name = "__driver-gcm-aes-aesni", | ||
1438 | .cra_flags = CRYPTO_ALG_INTERNAL, | ||
1439 | .cra_blocksize = 1, | ||
1440 | .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx), | ||
1441 | .cra_alignmask = AESNI_ALIGN - 1, | ||
1442 | .cra_module = THIS_MODULE, | ||
1443 | }, | ||
1444 | }, { | ||
1445 | .init = rfc4106_init, | ||
1446 | .exit = rfc4106_exit, | ||
1447 | .setkey = rfc4106_set_key, | ||
1448 | .setauthsize = rfc4106_set_authsize, | ||
1449 | .encrypt = rfc4106_encrypt, | ||
1450 | .decrypt = rfc4106_decrypt, | ||
1451 | .ivsize = 8, | ||
1452 | .maxauthsize = 16, | ||
1453 | .base = { | ||
1454 | .cra_name = "rfc4106(gcm(aes))", | ||
1455 | .cra_driver_name = "rfc4106-gcm-aesni", | ||
1456 | .cra_priority = 400, | ||
1457 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
1458 | .cra_blocksize = 1, | ||
1459 | .cra_ctxsize = sizeof(struct cryptd_aead *), | ||
1460 | .cra_module = THIS_MODULE, | ||
1461 | }, | ||
1462 | } }; | ||
1463 | #else | ||
1464 | static struct aead_alg aesni_aead_algs[0]; | ||
1465 | #endif | ||
1466 | |||
1572 | 1467 | ||
1573 | static const struct x86_cpu_id aesni_cpu_id[] = { | 1468 | static const struct x86_cpu_id aesni_cpu_id[] = { |
1574 | X86_FEATURE_MATCH(X86_FEATURE_AES), | 1469 | X86_FEATURE_MATCH(X86_FEATURE_AES), |
@@ -1616,11 +1511,27 @@ static int __init aesni_init(void) | |||
1616 | if (err) | 1511 | if (err) |
1617 | return err; | 1512 | return err; |
1618 | 1513 | ||
1619 | return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); | 1514 | err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); |
1515 | if (err) | ||
1516 | goto fpu_exit; | ||
1517 | |||
1518 | err = crypto_register_aeads(aesni_aead_algs, | ||
1519 | ARRAY_SIZE(aesni_aead_algs)); | ||
1520 | if (err) | ||
1521 | goto unregister_algs; | ||
1522 | |||
1523 | return err; | ||
1524 | |||
1525 | unregister_algs: | ||
1526 | crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); | ||
1527 | fpu_exit: | ||
1528 | crypto_fpu_exit(); | ||
1529 | return err; | ||
1620 | } | 1530 | } |
1621 | 1531 | ||
1622 | static void __exit aesni_exit(void) | 1532 | static void __exit aesni_exit(void) |
1623 | { | 1533 | { |
1534 | crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs)); | ||
1624 | crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); | 1535 | crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); |
1625 | 1536 | ||
1626 | crypto_fpu_exit(); | 1537 | crypto_fpu_exit(); |
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c index 5a2f30f9f52d..e7d679e2a018 100644 --- a/arch/x86/crypto/fpu.c +++ b/arch/x86/crypto/fpu.c | |||
@@ -156,7 +156,7 @@ int __init crypto_fpu_init(void) | |||
156 | return crypto_register_template(&crypto_fpu_tmpl); | 156 | return crypto_register_template(&crypto_fpu_tmpl); |
157 | } | 157 | } |
158 | 158 | ||
159 | void __exit crypto_fpu_exit(void) | 159 | void crypto_fpu_exit(void) |
160 | { | 160 | { |
161 | crypto_unregister_template(&crypto_fpu_tmpl); | 161 | crypto_unregister_template(&crypto_fpu_tmpl); |
162 | } | 162 | } |
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c index f53ed1dc88ea..a841e9765bd6 100644 --- a/arch/x86/crypto/sha-mb/sha1_mb.c +++ b/arch/x86/crypto/sha-mb/sha1_mb.c | |||
@@ -882,7 +882,8 @@ static int __init sha1_mb_mod_init(void) | |||
882 | INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); | 882 | INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); |
883 | cpu_state->cpu = cpu; | 883 | cpu_state->cpu = cpu; |
884 | cpu_state->alg_state = &sha1_mb_alg_state; | 884 | cpu_state->alg_state = &sha1_mb_alg_state; |
885 | cpu_state->mgr = (struct sha1_ctx_mgr *) kzalloc(sizeof(struct sha1_ctx_mgr), GFP_KERNEL); | 885 | cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr), |
886 | GFP_KERNEL); | ||
886 | if (!cpu_state->mgr) | 887 | if (!cpu_state->mgr) |
887 | goto err2; | 888 | goto err2; |
888 | sha1_ctx_mgr_init(cpu_state->mgr); | 889 | sha1_ctx_mgr_init(cpu_state->mgr); |
diff --git a/crypto/842.c b/crypto/842.c index b48f4f108c47..98e387efb8c8 100644 --- a/crypto/842.c +++ b/crypto/842.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Cryptographic API for the 842 compression algorithm. | 2 | * Cryptographic API for the 842 software compression algorithm. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
@@ -11,173 +11,73 @@ | |||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * Copyright (C) IBM Corporation, 2011-2015 |
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | * | 15 | * |
18 | * Copyright (C) IBM Corporation, 2011 | 16 | * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com> |
17 | * Seth Jennings <sjenning@linux.vnet.ibm.com> | ||
19 | * | 18 | * |
20 | * Authors: Robert Jennings <rcj@linux.vnet.ibm.com> | 19 | * Rewrite: Dan Streetman <ddstreet@ieee.org> |
21 | * Seth Jennings <sjenning@linux.vnet.ibm.com> | 20 | * |
21 | * This is the software implementation of compression and decompression using | ||
22 | * the 842 format. This uses the software 842 library at lib/842/ which is | ||
23 | * only a reference implementation, and is very, very slow as compared to other | ||
24 | * software compressors. You probably do not want to use this software | ||
25 | * compression. If you have access to the PowerPC 842 compression hardware, you | ||
26 | * want to use the 842 hardware compression interface, which is at: | ||
27 | * drivers/crypto/nx/nx-842-crypto.c | ||
22 | */ | 28 | */ |
23 | 29 | ||
24 | #include <linux/init.h> | 30 | #include <linux/init.h> |
25 | #include <linux/module.h> | 31 | #include <linux/module.h> |
26 | #include <linux/crypto.h> | 32 | #include <linux/crypto.h> |
27 | #include <linux/vmalloc.h> | 33 | #include <linux/sw842.h> |
28 | #include <linux/nx842.h> | ||
29 | #include <linux/lzo.h> | ||
30 | #include <linux/timer.h> | ||
31 | |||
32 | static int nx842_uselzo; | ||
33 | |||
34 | struct nx842_ctx { | ||
35 | void *nx842_wmem; /* working memory for 842/lzo */ | ||
36 | }; | ||
37 | 34 | ||
38 | enum nx842_crypto_type { | 35 | struct crypto842_ctx { |
39 | NX842_CRYPTO_TYPE_842, | 36 | char wmem[SW842_MEM_COMPRESS]; /* working memory for compress */ |
40 | NX842_CRYPTO_TYPE_LZO | ||
41 | }; | 37 | }; |
42 | 38 | ||
43 | #define NX842_SENTINEL 0xdeadbeef | 39 | static int crypto842_compress(struct crypto_tfm *tfm, |
44 | 40 | const u8 *src, unsigned int slen, | |
45 | struct nx842_crypto_header { | 41 | u8 *dst, unsigned int *dlen) |
46 | unsigned int sentinel; /* debug */ | ||
47 | enum nx842_crypto_type type; | ||
48 | }; | ||
49 | |||
50 | static int nx842_init(struct crypto_tfm *tfm) | ||
51 | { | ||
52 | struct nx842_ctx *ctx = crypto_tfm_ctx(tfm); | ||
53 | int wmemsize; | ||
54 | |||
55 | wmemsize = max_t(int, nx842_get_workmem_size(), LZO1X_MEM_COMPRESS); | ||
56 | ctx->nx842_wmem = kmalloc(wmemsize, GFP_NOFS); | ||
57 | if (!ctx->nx842_wmem) | ||
58 | return -ENOMEM; | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static void nx842_exit(struct crypto_tfm *tfm) | ||
64 | { | ||
65 | struct nx842_ctx *ctx = crypto_tfm_ctx(tfm); | ||
66 | |||
67 | kfree(ctx->nx842_wmem); | ||
68 | } | ||
69 | |||
70 | static void nx842_reset_uselzo(unsigned long data) | ||
71 | { | 42 | { |
72 | nx842_uselzo = 0; | 43 | struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); |
73 | } | ||
74 | |||
75 | static DEFINE_TIMER(failover_timer, nx842_reset_uselzo, 0, 0); | ||
76 | |||
77 | static int nx842_crypto_compress(struct crypto_tfm *tfm, const u8 *src, | ||
78 | unsigned int slen, u8 *dst, unsigned int *dlen) | ||
79 | { | ||
80 | struct nx842_ctx *ctx = crypto_tfm_ctx(tfm); | ||
81 | struct nx842_crypto_header *hdr; | ||
82 | unsigned int tmp_len = *dlen; | ||
83 | size_t lzodlen; /* needed for lzo */ | ||
84 | int err; | ||
85 | |||
86 | *dlen = 0; | ||
87 | hdr = (struct nx842_crypto_header *)dst; | ||
88 | hdr->sentinel = NX842_SENTINEL; /* debug */ | ||
89 | dst += sizeof(struct nx842_crypto_header); | ||
90 | tmp_len -= sizeof(struct nx842_crypto_header); | ||
91 | lzodlen = tmp_len; | ||
92 | |||
93 | if (likely(!nx842_uselzo)) { | ||
94 | err = nx842_compress(src, slen, dst, &tmp_len, ctx->nx842_wmem); | ||
95 | |||
96 | if (likely(!err)) { | ||
97 | hdr->type = NX842_CRYPTO_TYPE_842; | ||
98 | *dlen = tmp_len + sizeof(struct nx842_crypto_header); | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | /* hardware failed */ | ||
103 | nx842_uselzo = 1; | ||
104 | 44 | ||
105 | /* set timer to check for hardware again in 1 second */ | 45 | return sw842_compress(src, slen, dst, dlen, ctx->wmem); |
106 | mod_timer(&failover_timer, jiffies + msecs_to_jiffies(1000)); | ||
107 | } | ||
108 | |||
109 | /* no hardware, use lzo */ | ||
110 | err = lzo1x_1_compress(src, slen, dst, &lzodlen, ctx->nx842_wmem); | ||
111 | if (err != LZO_E_OK) | ||
112 | return -EINVAL; | ||
113 | |||
114 | hdr->type = NX842_CRYPTO_TYPE_LZO; | ||
115 | *dlen = lzodlen + sizeof(struct nx842_crypto_header); | ||
116 | return 0; | ||
117 | } | 46 | } |
118 | 47 | ||
119 | static int nx842_crypto_decompress(struct crypto_tfm *tfm, const u8 *src, | 48 | static int crypto842_decompress(struct crypto_tfm *tfm, |
120 | unsigned int slen, u8 *dst, unsigned int *dlen) | 49 | const u8 *src, unsigned int slen, |
50 | u8 *dst, unsigned int *dlen) | ||
121 | { | 51 | { |
122 | struct nx842_ctx *ctx = crypto_tfm_ctx(tfm); | 52 | return sw842_decompress(src, slen, dst, dlen); |
123 | struct nx842_crypto_header *hdr; | ||
124 | unsigned int tmp_len = *dlen; | ||
125 | size_t lzodlen; /* needed for lzo */ | ||
126 | int err; | ||
127 | |||
128 | *dlen = 0; | ||
129 | hdr = (struct nx842_crypto_header *)src; | ||
130 | |||
131 | if (unlikely(hdr->sentinel != NX842_SENTINEL)) | ||
132 | return -EINVAL; | ||
133 | |||
134 | src += sizeof(struct nx842_crypto_header); | ||
135 | slen -= sizeof(struct nx842_crypto_header); | ||
136 | |||
137 | if (likely(hdr->type == NX842_CRYPTO_TYPE_842)) { | ||
138 | err = nx842_decompress(src, slen, dst, &tmp_len, | ||
139 | ctx->nx842_wmem); | ||
140 | if (err) | ||
141 | return -EINVAL; | ||
142 | *dlen = tmp_len; | ||
143 | } else if (hdr->type == NX842_CRYPTO_TYPE_LZO) { | ||
144 | lzodlen = tmp_len; | ||
145 | err = lzo1x_decompress_safe(src, slen, dst, &lzodlen); | ||
146 | if (err != LZO_E_OK) | ||
147 | return -EINVAL; | ||
148 | *dlen = lzodlen; | ||
149 | } else | ||
150 | return -EINVAL; | ||
151 | |||
152 | return 0; | ||
153 | } | 53 | } |
154 | 54 | ||
155 | static struct crypto_alg alg = { | 55 | static struct crypto_alg alg = { |
156 | .cra_name = "842", | 56 | .cra_name = "842", |
57 | .cra_driver_name = "842-generic", | ||
58 | .cra_priority = 100, | ||
157 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 59 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
158 | .cra_ctxsize = sizeof(struct nx842_ctx), | 60 | .cra_ctxsize = sizeof(struct crypto842_ctx), |
159 | .cra_module = THIS_MODULE, | 61 | .cra_module = THIS_MODULE, |
160 | .cra_init = nx842_init, | ||
161 | .cra_exit = nx842_exit, | ||
162 | .cra_u = { .compress = { | 62 | .cra_u = { .compress = { |
163 | .coa_compress = nx842_crypto_compress, | 63 | .coa_compress = crypto842_compress, |
164 | .coa_decompress = nx842_crypto_decompress } } | 64 | .coa_decompress = crypto842_decompress } } |
165 | }; | 65 | }; |
166 | 66 | ||
167 | static int __init nx842_mod_init(void) | 67 | static int __init crypto842_mod_init(void) |
168 | { | 68 | { |
169 | del_timer(&failover_timer); | ||
170 | return crypto_register_alg(&alg); | 69 | return crypto_register_alg(&alg); |
171 | } | 70 | } |
71 | module_init(crypto842_mod_init); | ||
172 | 72 | ||
173 | static void __exit nx842_mod_exit(void) | 73 | static void __exit crypto842_mod_exit(void) |
174 | { | 74 | { |
175 | crypto_unregister_alg(&alg); | 75 | crypto_unregister_alg(&alg); |
176 | } | 76 | } |
177 | 77 | module_exit(crypto842_mod_exit); | |
178 | module_init(nx842_mod_init); | ||
179 | module_exit(nx842_mod_exit); | ||
180 | 78 | ||
181 | MODULE_LICENSE("GPL"); | 79 | MODULE_LICENSE("GPL"); |
182 | MODULE_DESCRIPTION("842 Compression Algorithm"); | 80 | MODULE_DESCRIPTION("842 Software Compression Algorithm"); |
183 | MODULE_ALIAS_CRYPTO("842"); | 81 | MODULE_ALIAS_CRYPTO("842"); |
82 | MODULE_ALIAS_CRYPTO("842-generic"); | ||
83 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | ||
diff --git a/crypto/Kconfig b/crypto/Kconfig index 362905e7c841..b4cfc5754033 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -78,6 +78,10 @@ config CRYPTO_RNG2 | |||
78 | tristate | 78 | tristate |
79 | select CRYPTO_ALGAPI2 | 79 | select CRYPTO_ALGAPI2 |
80 | 80 | ||
81 | config CRYPTO_RNG_DEFAULT | ||
82 | tristate | ||
83 | select CRYPTO_DRBG_MENU | ||
84 | |||
81 | config CRYPTO_PCOMP | 85 | config CRYPTO_PCOMP |
82 | tristate | 86 | tristate |
83 | select CRYPTO_PCOMP2 | 87 | select CRYPTO_PCOMP2 |
@@ -87,6 +91,23 @@ config CRYPTO_PCOMP2 | |||
87 | tristate | 91 | tristate |
88 | select CRYPTO_ALGAPI2 | 92 | select CRYPTO_ALGAPI2 |
89 | 93 | ||
94 | config CRYPTO_AKCIPHER2 | ||
95 | tristate | ||
96 | select CRYPTO_ALGAPI2 | ||
97 | |||
98 | config CRYPTO_AKCIPHER | ||
99 | tristate | ||
100 | select CRYPTO_AKCIPHER2 | ||
101 | select CRYPTO_ALGAPI | ||
102 | |||
103 | config CRYPTO_RSA | ||
104 | tristate "RSA algorithm" | ||
105 | select CRYPTO_AKCIPHER | ||
106 | select MPILIB | ||
107 | select ASN1 | ||
108 | help | ||
109 | Generic implementation of the RSA public key algorithm. | ||
110 | |||
90 | config CRYPTO_MANAGER | 111 | config CRYPTO_MANAGER |
91 | tristate "Cryptographic algorithm manager" | 112 | tristate "Cryptographic algorithm manager" |
92 | select CRYPTO_MANAGER2 | 113 | select CRYPTO_MANAGER2 |
@@ -100,6 +121,7 @@ config CRYPTO_MANAGER2 | |||
100 | select CRYPTO_HASH2 | 121 | select CRYPTO_HASH2 |
101 | select CRYPTO_BLKCIPHER2 | 122 | select CRYPTO_BLKCIPHER2 |
102 | select CRYPTO_PCOMP2 | 123 | select CRYPTO_PCOMP2 |
124 | select CRYPTO_AKCIPHER2 | ||
103 | 125 | ||
104 | config CRYPTO_USER | 126 | config CRYPTO_USER |
105 | tristate "Userspace cryptographic algorithm configuration" | 127 | tristate "Userspace cryptographic algorithm configuration" |
@@ -217,15 +239,39 @@ config CRYPTO_GCM | |||
217 | Support for Galois/Counter Mode (GCM) and Galois Message | 239 | Support for Galois/Counter Mode (GCM) and Galois Message |
218 | Authentication Code (GMAC). Required for IPSec. | 240 | Authentication Code (GMAC). Required for IPSec. |
219 | 241 | ||
242 | config CRYPTO_CHACHA20POLY1305 | ||
243 | tristate "ChaCha20-Poly1305 AEAD support" | ||
244 | select CRYPTO_CHACHA20 | ||
245 | select CRYPTO_POLY1305 | ||
246 | select CRYPTO_AEAD | ||
247 | help | ||
248 | ChaCha20-Poly1305 AEAD support, RFC7539. | ||
249 | |||
250 | Support for the AEAD wrapper using the ChaCha20 stream cipher combined | ||
251 | with the Poly1305 authenticator. It is defined in RFC7539 for use in | ||
252 | IETF protocols. | ||
253 | |||
220 | config CRYPTO_SEQIV | 254 | config CRYPTO_SEQIV |
221 | tristate "Sequence Number IV Generator" | 255 | tristate "Sequence Number IV Generator" |
222 | select CRYPTO_AEAD | 256 | select CRYPTO_AEAD |
223 | select CRYPTO_BLKCIPHER | 257 | select CRYPTO_BLKCIPHER |
224 | select CRYPTO_RNG | 258 | select CRYPTO_NULL |
259 | select CRYPTO_RNG_DEFAULT | ||
225 | help | 260 | help |
226 | This IV generator generates an IV based on a sequence number by | 261 | This IV generator generates an IV based on a sequence number by |
227 | xoring it with a salt. This algorithm is mainly useful for CTR | 262 | xoring it with a salt. This algorithm is mainly useful for CTR |
228 | 263 | ||
264 | config CRYPTO_ECHAINIV | ||
265 | tristate "Encrypted Chain IV Generator" | ||
266 | select CRYPTO_AEAD | ||
267 | select CRYPTO_NULL | ||
268 | select CRYPTO_RNG_DEFAULT | ||
269 | default m | ||
270 | help | ||
271 | This IV generator generates an IV based on the encryption of | ||
272 | a sequence number xored with a salt. This is the default | ||
273 | algorithm for CBC. | ||
274 | |||
229 | comment "Block modes" | 275 | comment "Block modes" |
230 | 276 | ||
231 | config CRYPTO_CBC | 277 | config CRYPTO_CBC |
@@ -415,6 +461,15 @@ config CRYPTO_GHASH | |||
415 | help | 461 | help |
416 | GHASH is message digest algorithm for GCM (Galois/Counter Mode). | 462 | GHASH is message digest algorithm for GCM (Galois/Counter Mode). |
417 | 463 | ||
464 | config CRYPTO_POLY1305 | ||
465 | tristate "Poly1305 authenticator algorithm" | ||
466 | help | ||
467 | Poly1305 authenticator algorithm, RFC7539. | ||
468 | |||
469 | Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. | ||
470 | It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use | ||
471 | in IETF protocols. This is the portable C implementation of Poly1305. | ||
472 | |||
418 | config CRYPTO_MD4 | 473 | config CRYPTO_MD4 |
419 | tristate "MD4 digest algorithm" | 474 | tristate "MD4 digest algorithm" |
420 | select CRYPTO_HASH | 475 | select CRYPTO_HASH |
@@ -1145,6 +1200,19 @@ config CRYPTO_SALSA20_X86_64 | |||
1145 | The Salsa20 stream cipher algorithm is designed by Daniel J. | 1200 | The Salsa20 stream cipher algorithm is designed by Daniel J. |
1146 | Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html> | 1201 | Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html> |
1147 | 1202 | ||
1203 | config CRYPTO_CHACHA20 | ||
1204 | tristate "ChaCha20 cipher algorithm" | ||
1205 | select CRYPTO_BLKCIPHER | ||
1206 | help | ||
1207 | ChaCha20 cipher algorithm, RFC7539. | ||
1208 | |||
1209 | ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. | ||
1210 | Bernstein and further specified in RFC7539 for use in IETF protocols. | ||
1211 | This is the portable C implementation of ChaCha20. | ||
1212 | |||
1213 | See also: | ||
1214 | <http://cr.yp.to/chacha/chacha-20080128.pdf> | ||
1215 | |||
1148 | config CRYPTO_SEED | 1216 | config CRYPTO_SEED |
1149 | tristate "SEED cipher algorithm" | 1217 | tristate "SEED cipher algorithm" |
1150 | select CRYPTO_ALGAPI | 1218 | select CRYPTO_ALGAPI |
@@ -1412,10 +1480,9 @@ config CRYPTO_LZO | |||
1412 | 1480 | ||
1413 | config CRYPTO_842 | 1481 | config CRYPTO_842 |
1414 | tristate "842 compression algorithm" | 1482 | tristate "842 compression algorithm" |
1415 | depends on CRYPTO_DEV_NX_COMPRESS | 1483 | select CRYPTO_ALGAPI |
1416 | # 842 uses lzo if the hardware becomes unavailable | 1484 | select 842_COMPRESS |
1417 | select LZO_COMPRESS | 1485 | select 842_DECOMPRESS |
1418 | select LZO_DECOMPRESS | ||
1419 | help | 1486 | help |
1420 | This is the 842 algorithm. | 1487 | This is the 842 algorithm. |
1421 | 1488 | ||
@@ -1439,7 +1506,6 @@ comment "Random Number Generation" | |||
1439 | 1506 | ||
1440 | config CRYPTO_ANSI_CPRNG | 1507 | config CRYPTO_ANSI_CPRNG |
1441 | tristate "Pseudo Random Number Generation for Cryptographic modules" | 1508 | tristate "Pseudo Random Number Generation for Cryptographic modules" |
1442 | default m | ||
1443 | select CRYPTO_AES | 1509 | select CRYPTO_AES |
1444 | select CRYPTO_RNG | 1510 | select CRYPTO_RNG |
1445 | help | 1511 | help |
@@ -1457,15 +1523,14 @@ menuconfig CRYPTO_DRBG_MENU | |||
1457 | if CRYPTO_DRBG_MENU | 1523 | if CRYPTO_DRBG_MENU |
1458 | 1524 | ||
1459 | config CRYPTO_DRBG_HMAC | 1525 | config CRYPTO_DRBG_HMAC |
1460 | bool "Enable HMAC DRBG" | 1526 | bool |
1461 | default y | 1527 | default y |
1462 | select CRYPTO_HMAC | 1528 | select CRYPTO_HMAC |
1463 | help | 1529 | select CRYPTO_SHA256 |
1464 | Enable the HMAC DRBG variant as defined in NIST SP800-90A. | ||
1465 | 1530 | ||
1466 | config CRYPTO_DRBG_HASH | 1531 | config CRYPTO_DRBG_HASH |
1467 | bool "Enable Hash DRBG" | 1532 | bool "Enable Hash DRBG" |
1468 | select CRYPTO_HASH | 1533 | select CRYPTO_SHA256 |
1469 | help | 1534 | help |
1470 | Enable the Hash DRBG variant as defined in NIST SP800-90A. | 1535 | Enable the Hash DRBG variant as defined in NIST SP800-90A. |
1471 | 1536 | ||
@@ -1477,11 +1542,21 @@ config CRYPTO_DRBG_CTR | |||
1477 | 1542 | ||
1478 | config CRYPTO_DRBG | 1543 | config CRYPTO_DRBG |
1479 | tristate | 1544 | tristate |
1480 | default CRYPTO_DRBG_MENU if (CRYPTO_DRBG_HMAC || CRYPTO_DRBG_HASH || CRYPTO_DRBG_CTR) | 1545 | default CRYPTO_DRBG_MENU |
1481 | select CRYPTO_RNG | 1546 | select CRYPTO_RNG |
1547 | select CRYPTO_JITTERENTROPY | ||
1482 | 1548 | ||
1483 | endif # if CRYPTO_DRBG_MENU | 1549 | endif # if CRYPTO_DRBG_MENU |
1484 | 1550 | ||
1551 | config CRYPTO_JITTERENTROPY | ||
1552 | tristate "Jitterentropy Non-Deterministic Random Number Generator" | ||
1553 | help | ||
1554 | The Jitterentropy RNG is a noise that is intended | ||
1555 | to provide seed to another RNG. The RNG does not | ||
1556 | perform any cryptographic whitening of the generated | ||
1557 | random numbers. This Jitterentropy RNG registers with | ||
1558 | the kernel crypto API and can be used by any caller. | ||
1559 | |||
1485 | config CRYPTO_USER_API | 1560 | config CRYPTO_USER_API |
1486 | tristate | 1561 | tristate |
1487 | 1562 | ||
@@ -1512,6 +1587,15 @@ config CRYPTO_USER_API_RNG | |||
1512 | This option enables the user-spaces interface for random | 1587 | This option enables the user-spaces interface for random |
1513 | number generator algorithms. | 1588 | number generator algorithms. |
1514 | 1589 | ||
1590 | config CRYPTO_USER_API_AEAD | ||
1591 | tristate "User-space interface for AEAD cipher algorithms" | ||
1592 | depends on NET | ||
1593 | select CRYPTO_AEAD | ||
1594 | select CRYPTO_USER_API | ||
1595 | help | ||
1596 | This option enables the user-spaces interface for AEAD | ||
1597 | cipher algorithms. | ||
1598 | |||
1515 | config CRYPTO_HASH_INFO | 1599 | config CRYPTO_HASH_INFO |
1516 | bool | 1600 | bool |
1517 | 1601 | ||
diff --git a/crypto/Makefile b/crypto/Makefile index 97b7d3ac87e7..0077476f5024 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -21,12 +21,22 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o | |||
21 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o | 21 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o |
22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o | 22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o |
23 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o | 23 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o |
24 | obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o | ||
24 | 25 | ||
25 | crypto_hash-y += ahash.o | 26 | crypto_hash-y += ahash.o |
26 | crypto_hash-y += shash.o | 27 | crypto_hash-y += shash.o |
27 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o | 28 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o |
28 | 29 | ||
29 | obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o | 30 | obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o |
31 | obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o | ||
32 | |||
33 | $(obj)/rsakey-asn1.o: $(obj)/rsakey-asn1.c $(obj)/rsakey-asn1.h | ||
34 | clean-files += rsakey-asn1.c rsakey-asn1.h | ||
35 | |||
36 | rsa_generic-y := rsakey-asn1.o | ||
37 | rsa_generic-y += rsa.o | ||
38 | rsa_generic-y += rsa_helper.o | ||
39 | obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o | ||
30 | 40 | ||
31 | cryptomgr-y := algboss.o testmgr.o | 41 | cryptomgr-y := algboss.o testmgr.o |
32 | 42 | ||
@@ -58,6 +68,7 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o | |||
58 | obj-$(CONFIG_CRYPTO_CTR) += ctr.o | 68 | obj-$(CONFIG_CRYPTO_CTR) += ctr.o |
59 | obj-$(CONFIG_CRYPTO_GCM) += gcm.o | 69 | obj-$(CONFIG_CRYPTO_GCM) += gcm.o |
60 | obj-$(CONFIG_CRYPTO_CCM) += ccm.o | 70 | obj-$(CONFIG_CRYPTO_CCM) += ccm.o |
71 | obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o | ||
61 | obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o | 72 | obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o |
62 | obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o | 73 | obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o |
63 | obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o | 74 | obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o |
@@ -79,6 +90,8 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o | |||
79 | obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o | 90 | obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o |
80 | obj-$(CONFIG_CRYPTO_SEED) += seed.o | 91 | obj-$(CONFIG_CRYPTO_SEED) += seed.o |
81 | obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o | 92 | obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o |
93 | obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o | ||
94 | obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o | ||
82 | obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o | 95 | obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o |
83 | obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o | 96 | obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o |
84 | obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o | 97 | obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o |
@@ -91,9 +104,9 @@ obj-$(CONFIG_CRYPTO_LZ4) += lz4.o | |||
91 | obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o | 104 | obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o |
92 | obj-$(CONFIG_CRYPTO_842) += 842.o | 105 | obj-$(CONFIG_CRYPTO_842) += 842.o |
93 | obj-$(CONFIG_CRYPTO_RNG2) += rng.o | 106 | obj-$(CONFIG_CRYPTO_RNG2) += rng.o |
94 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o | ||
95 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o | 107 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o |
96 | obj-$(CONFIG_CRYPTO_DRBG) += drbg.o | 108 | obj-$(CONFIG_CRYPTO_DRBG) += drbg.o |
109 | obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy.o | ||
97 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o | 110 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o |
98 | obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o | 111 | obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o |
99 | obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o | 112 | obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o |
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index db201bca1581..b788f169cc98 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
@@ -454,7 +454,7 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, | |||
454 | alg->setkey : setkey; | 454 | alg->setkey : setkey; |
455 | crt->encrypt = alg->encrypt; | 455 | crt->encrypt = alg->encrypt; |
456 | crt->decrypt = alg->decrypt; | 456 | crt->decrypt = alg->decrypt; |
457 | crt->givencrypt = alg->givencrypt; | 457 | crt->givencrypt = alg->givencrypt ?: no_givdecrypt; |
458 | crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt; | 458 | crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt; |
459 | crt->base = __crypto_ablkcipher_cast(tfm); | 459 | crt->base = __crypto_ablkcipher_cast(tfm); |
460 | crt->ivsize = alg->ivsize; | 460 | crt->ivsize = alg->ivsize; |
@@ -586,6 +586,13 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) | |||
586 | if (!tmpl) | 586 | if (!tmpl) |
587 | goto kill_larval; | 587 | goto kill_larval; |
588 | 588 | ||
589 | if (tmpl->create) { | ||
590 | err = tmpl->create(tmpl, tb); | ||
591 | if (err) | ||
592 | goto put_tmpl; | ||
593 | goto ok; | ||
594 | } | ||
595 | |||
589 | inst = tmpl->alloc(tb); | 596 | inst = tmpl->alloc(tb); |
590 | err = PTR_ERR(inst); | 597 | err = PTR_ERR(inst); |
591 | if (IS_ERR(inst)) | 598 | if (IS_ERR(inst)) |
@@ -597,6 +604,7 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) | |||
597 | goto put_tmpl; | 604 | goto put_tmpl; |
598 | } | 605 | } |
599 | 606 | ||
607 | ok: | ||
600 | /* Redo the lookup to use the instance we just registered. */ | 608 | /* Redo the lookup to use the instance we just registered. */ |
601 | err = -EAGAIN; | 609 | err = -EAGAIN; |
602 | 610 | ||
@@ -636,7 +644,7 @@ struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask) | |||
636 | 644 | ||
637 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == | 645 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
638 | CRYPTO_ALG_TYPE_GIVCIPHER) { | 646 | CRYPTO_ALG_TYPE_GIVCIPHER) { |
639 | if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) { | 647 | if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) { |
640 | crypto_mod_put(alg); | 648 | crypto_mod_put(alg); |
641 | alg = ERR_PTR(-ENOENT); | 649 | alg = ERR_PTR(-ENOENT); |
642 | } | 650 | } |
diff --git a/crypto/aead.c b/crypto/aead.c index 222271070b49..07bf99773548 100644 --- a/crypto/aead.c +++ b/crypto/aead.c | |||
@@ -12,7 +12,8 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <crypto/internal/aead.h> | 15 | #include <crypto/internal/geniv.h> |
16 | #include <crypto/scatterwalk.h> | ||
16 | #include <linux/err.h> | 17 | #include <linux/err.h> |
17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
@@ -26,10 +27,20 @@ | |||
26 | 27 | ||
27 | #include "internal.h" | 28 | #include "internal.h" |
28 | 29 | ||
30 | struct compat_request_ctx { | ||
31 | struct scatterlist src[2]; | ||
32 | struct scatterlist dst[2]; | ||
33 | struct scatterlist ivbuf[2]; | ||
34 | struct scatterlist *ivsg; | ||
35 | struct aead_givcrypt_request subreq; | ||
36 | }; | ||
37 | |||
38 | static int aead_null_givencrypt(struct aead_givcrypt_request *req); | ||
39 | static int aead_null_givdecrypt(struct aead_givcrypt_request *req); | ||
40 | |||
29 | static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, | 41 | static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, |
30 | unsigned int keylen) | 42 | unsigned int keylen) |
31 | { | 43 | { |
32 | struct aead_alg *aead = crypto_aead_alg(tfm); | ||
33 | unsigned long alignmask = crypto_aead_alignmask(tfm); | 44 | unsigned long alignmask = crypto_aead_alignmask(tfm); |
34 | int ret; | 45 | int ret; |
35 | u8 *buffer, *alignbuffer; | 46 | u8 *buffer, *alignbuffer; |
@@ -42,47 +53,95 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, | |||
42 | 53 | ||
43 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | 54 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
44 | memcpy(alignbuffer, key, keylen); | 55 | memcpy(alignbuffer, key, keylen); |
45 | ret = aead->setkey(tfm, alignbuffer, keylen); | 56 | ret = tfm->setkey(tfm, alignbuffer, keylen); |
46 | memset(alignbuffer, 0, keylen); | 57 | memset(alignbuffer, 0, keylen); |
47 | kfree(buffer); | 58 | kfree(buffer); |
48 | return ret; | 59 | return ret; |
49 | } | 60 | } |
50 | 61 | ||
51 | static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) | 62 | int crypto_aead_setkey(struct crypto_aead *tfm, |
63 | const u8 *key, unsigned int keylen) | ||
52 | { | 64 | { |
53 | struct aead_alg *aead = crypto_aead_alg(tfm); | ||
54 | unsigned long alignmask = crypto_aead_alignmask(tfm); | 65 | unsigned long alignmask = crypto_aead_alignmask(tfm); |
55 | 66 | ||
67 | tfm = tfm->child; | ||
68 | |||
56 | if ((unsigned long)key & alignmask) | 69 | if ((unsigned long)key & alignmask) |
57 | return setkey_unaligned(tfm, key, keylen); | 70 | return setkey_unaligned(tfm, key, keylen); |
58 | 71 | ||
59 | return aead->setkey(tfm, key, keylen); | 72 | return tfm->setkey(tfm, key, keylen); |
60 | } | 73 | } |
74 | EXPORT_SYMBOL_GPL(crypto_aead_setkey); | ||
61 | 75 | ||
62 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | 76 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) |
63 | { | 77 | { |
64 | struct aead_tfm *crt = crypto_aead_crt(tfm); | ||
65 | int err; | 78 | int err; |
66 | 79 | ||
67 | if (authsize > crypto_aead_alg(tfm)->maxauthsize) | 80 | if (authsize > crypto_aead_maxauthsize(tfm)) |
68 | return -EINVAL; | 81 | return -EINVAL; |
69 | 82 | ||
70 | if (crypto_aead_alg(tfm)->setauthsize) { | 83 | if (tfm->setauthsize) { |
71 | err = crypto_aead_alg(tfm)->setauthsize(crt->base, authsize); | 84 | err = tfm->setauthsize(tfm->child, authsize); |
72 | if (err) | 85 | if (err) |
73 | return err; | 86 | return err; |
74 | } | 87 | } |
75 | 88 | ||
76 | crypto_aead_crt(crt->base)->authsize = authsize; | 89 | tfm->child->authsize = authsize; |
77 | crt->authsize = authsize; | 90 | tfm->authsize = authsize; |
78 | return 0; | 91 | return 0; |
79 | } | 92 | } |
80 | EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); | 93 | EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); |
81 | 94 | ||
82 | static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type, | 95 | struct aead_old_request { |
83 | u32 mask) | 96 | struct scatterlist srcbuf[2]; |
97 | struct scatterlist dstbuf[2]; | ||
98 | struct aead_request subreq; | ||
99 | }; | ||
100 | |||
101 | unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) | ||
84 | { | 102 | { |
85 | return alg->cra_ctxsize; | 103 | return tfm->reqsize + sizeof(struct aead_old_request); |
104 | } | ||
105 | EXPORT_SYMBOL_GPL(crypto_aead_reqsize); | ||
106 | |||
107 | static int old_crypt(struct aead_request *req, | ||
108 | int (*crypt)(struct aead_request *req)) | ||
109 | { | ||
110 | struct aead_old_request *nreq = aead_request_ctx(req); | ||
111 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
112 | struct scatterlist *src, *dst; | ||
113 | |||
114 | if (req->old) | ||
115 | return crypt(req); | ||
116 | |||
117 | src = scatterwalk_ffwd(nreq->srcbuf, req->src, req->assoclen); | ||
118 | dst = req->src == req->dst ? | ||
119 | src : scatterwalk_ffwd(nreq->dstbuf, req->dst, req->assoclen); | ||
120 | |||
121 | aead_request_set_tfm(&nreq->subreq, aead); | ||
122 | aead_request_set_callback(&nreq->subreq, aead_request_flags(req), | ||
123 | req->base.complete, req->base.data); | ||
124 | aead_request_set_crypt(&nreq->subreq, src, dst, req->cryptlen, | ||
125 | req->iv); | ||
126 | aead_request_set_assoc(&nreq->subreq, req->src, req->assoclen); | ||
127 | |||
128 | return crypt(&nreq->subreq); | ||
129 | } | ||
130 | |||
131 | static int old_encrypt(struct aead_request *req) | ||
132 | { | ||
133 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
134 | struct old_aead_alg *alg = crypto_old_aead_alg(aead); | ||
135 | |||
136 | return old_crypt(req, alg->encrypt); | ||
137 | } | ||
138 | |||
139 | static int old_decrypt(struct aead_request *req) | ||
140 | { | ||
141 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
142 | struct old_aead_alg *alg = crypto_old_aead_alg(aead); | ||
143 | |||
144 | return old_crypt(req, alg->decrypt); | ||
86 | } | 145 | } |
87 | 146 | ||
88 | static int no_givcrypt(struct aead_givcrypt_request *req) | 147 | static int no_givcrypt(struct aead_givcrypt_request *req) |
@@ -90,32 +149,68 @@ static int no_givcrypt(struct aead_givcrypt_request *req) | |||
90 | return -ENOSYS; | 149 | return -ENOSYS; |
91 | } | 150 | } |
92 | 151 | ||
93 | static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | 152 | static int crypto_old_aead_init_tfm(struct crypto_tfm *tfm) |
94 | { | 153 | { |
95 | struct aead_alg *alg = &tfm->__crt_alg->cra_aead; | 154 | struct old_aead_alg *alg = &tfm->__crt_alg->cra_aead; |
96 | struct aead_tfm *crt = &tfm->crt_aead; | 155 | struct crypto_aead *crt = __crypto_aead_cast(tfm); |
97 | 156 | ||
98 | if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) | 157 | if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) |
99 | return -EINVAL; | 158 | return -EINVAL; |
100 | 159 | ||
101 | crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ? | 160 | crt->setkey = alg->setkey; |
102 | alg->setkey : setkey; | 161 | crt->setauthsize = alg->setauthsize; |
103 | crt->encrypt = alg->encrypt; | 162 | crt->encrypt = old_encrypt; |
104 | crt->decrypt = alg->decrypt; | 163 | crt->decrypt = old_decrypt; |
105 | crt->givencrypt = alg->givencrypt ?: no_givcrypt; | 164 | if (alg->ivsize) { |
106 | crt->givdecrypt = alg->givdecrypt ?: no_givcrypt; | 165 | crt->givencrypt = alg->givencrypt ?: no_givcrypt; |
107 | crt->base = __crypto_aead_cast(tfm); | 166 | crt->givdecrypt = alg->givdecrypt ?: no_givcrypt; |
108 | crt->ivsize = alg->ivsize; | 167 | } else { |
168 | crt->givencrypt = aead_null_givencrypt; | ||
169 | crt->givdecrypt = aead_null_givdecrypt; | ||
170 | } | ||
171 | crt->child = __crypto_aead_cast(tfm); | ||
109 | crt->authsize = alg->maxauthsize; | 172 | crt->authsize = alg->maxauthsize; |
110 | 173 | ||
111 | return 0; | 174 | return 0; |
112 | } | 175 | } |
113 | 176 | ||
177 | static void crypto_aead_exit_tfm(struct crypto_tfm *tfm) | ||
178 | { | ||
179 | struct crypto_aead *aead = __crypto_aead_cast(tfm); | ||
180 | struct aead_alg *alg = crypto_aead_alg(aead); | ||
181 | |||
182 | alg->exit(aead); | ||
183 | } | ||
184 | |||
185 | static int crypto_aead_init_tfm(struct crypto_tfm *tfm) | ||
186 | { | ||
187 | struct crypto_aead *aead = __crypto_aead_cast(tfm); | ||
188 | struct aead_alg *alg = crypto_aead_alg(aead); | ||
189 | |||
190 | if (crypto_old_aead_alg(aead)->encrypt) | ||
191 | return crypto_old_aead_init_tfm(tfm); | ||
192 | |||
193 | aead->setkey = alg->setkey; | ||
194 | aead->setauthsize = alg->setauthsize; | ||
195 | aead->encrypt = alg->encrypt; | ||
196 | aead->decrypt = alg->decrypt; | ||
197 | aead->child = __crypto_aead_cast(tfm); | ||
198 | aead->authsize = alg->maxauthsize; | ||
199 | |||
200 | if (alg->exit) | ||
201 | aead->base.exit = crypto_aead_exit_tfm; | ||
202 | |||
203 | if (alg->init) | ||
204 | return alg->init(aead); | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | |||
114 | #ifdef CONFIG_NET | 209 | #ifdef CONFIG_NET |
115 | static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) | 210 | static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg) |
116 | { | 211 | { |
117 | struct crypto_report_aead raead; | 212 | struct crypto_report_aead raead; |
118 | struct aead_alg *aead = &alg->cra_aead; | 213 | struct old_aead_alg *aead = &alg->cra_aead; |
119 | 214 | ||
120 | strncpy(raead.type, "aead", sizeof(raead.type)); | 215 | strncpy(raead.type, "aead", sizeof(raead.type)); |
121 | strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv)); | 216 | strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv)); |
@@ -133,6 +228,64 @@ nla_put_failure: | |||
133 | return -EMSGSIZE; | 228 | return -EMSGSIZE; |
134 | } | 229 | } |
135 | #else | 230 | #else |
231 | static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
232 | { | ||
233 | return -ENOSYS; | ||
234 | } | ||
235 | #endif | ||
236 | |||
237 | static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg) | ||
238 | __attribute__ ((unused)); | ||
239 | static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg) | ||
240 | { | ||
241 | struct old_aead_alg *aead = &alg->cra_aead; | ||
242 | |||
243 | seq_printf(m, "type : aead\n"); | ||
244 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? | ||
245 | "yes" : "no"); | ||
246 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | ||
247 | seq_printf(m, "ivsize : %u\n", aead->ivsize); | ||
248 | seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); | ||
249 | seq_printf(m, "geniv : %s\n", aead->geniv ?: "<built-in>"); | ||
250 | } | ||
251 | |||
252 | const struct crypto_type crypto_aead_type = { | ||
253 | .extsize = crypto_alg_extsize, | ||
254 | .init_tfm = crypto_aead_init_tfm, | ||
255 | #ifdef CONFIG_PROC_FS | ||
256 | .show = crypto_old_aead_show, | ||
257 | #endif | ||
258 | .report = crypto_old_aead_report, | ||
259 | .lookup = crypto_lookup_aead, | ||
260 | .maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV), | ||
261 | .maskset = CRYPTO_ALG_TYPE_MASK, | ||
262 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
263 | .tfmsize = offsetof(struct crypto_aead, base), | ||
264 | }; | ||
265 | EXPORT_SYMBOL_GPL(crypto_aead_type); | ||
266 | |||
267 | #ifdef CONFIG_NET | ||
268 | static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
269 | { | ||
270 | struct crypto_report_aead raead; | ||
271 | struct aead_alg *aead = container_of(alg, struct aead_alg, base); | ||
272 | |||
273 | strncpy(raead.type, "aead", sizeof(raead.type)); | ||
274 | strncpy(raead.geniv, "<none>", sizeof(raead.geniv)); | ||
275 | |||
276 | raead.blocksize = alg->cra_blocksize; | ||
277 | raead.maxauthsize = aead->maxauthsize; | ||
278 | raead.ivsize = aead->ivsize; | ||
279 | |||
280 | if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD, | ||
281 | sizeof(struct crypto_report_aead), &raead)) | ||
282 | goto nla_put_failure; | ||
283 | return 0; | ||
284 | |||
285 | nla_put_failure: | ||
286 | return -EMSGSIZE; | ||
287 | } | ||
288 | #else | ||
136 | static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) | 289 | static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) |
137 | { | 290 | { |
138 | return -ENOSYS; | 291 | return -ENOSYS; |
@@ -143,7 +296,7 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) | |||
143 | __attribute__ ((unused)); | 296 | __attribute__ ((unused)); |
144 | static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) | 297 | static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) |
145 | { | 298 | { |
146 | struct aead_alg *aead = &alg->cra_aead; | 299 | struct aead_alg *aead = container_of(alg, struct aead_alg, base); |
147 | 300 | ||
148 | seq_printf(m, "type : aead\n"); | 301 | seq_printf(m, "type : aead\n"); |
149 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? | 302 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
@@ -151,18 +304,21 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) | |||
151 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | 304 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
152 | seq_printf(m, "ivsize : %u\n", aead->ivsize); | 305 | seq_printf(m, "ivsize : %u\n", aead->ivsize); |
153 | seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); | 306 | seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); |
154 | seq_printf(m, "geniv : %s\n", aead->geniv ?: "<built-in>"); | 307 | seq_printf(m, "geniv : <none>\n"); |
155 | } | 308 | } |
156 | 309 | ||
157 | const struct crypto_type crypto_aead_type = { | 310 | static const struct crypto_type crypto_new_aead_type = { |
158 | .ctxsize = crypto_aead_ctxsize, | 311 | .extsize = crypto_alg_extsize, |
159 | .init = crypto_init_aead_ops, | 312 | .init_tfm = crypto_aead_init_tfm, |
160 | #ifdef CONFIG_PROC_FS | 313 | #ifdef CONFIG_PROC_FS |
161 | .show = crypto_aead_show, | 314 | .show = crypto_aead_show, |
162 | #endif | 315 | #endif |
163 | .report = crypto_aead_report, | 316 | .report = crypto_aead_report, |
317 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
318 | .maskset = CRYPTO_ALG_TYPE_MASK, | ||
319 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
320 | .tfmsize = offsetof(struct crypto_aead, base), | ||
164 | }; | 321 | }; |
165 | EXPORT_SYMBOL_GPL(crypto_aead_type); | ||
166 | 322 | ||
167 | static int aead_null_givencrypt(struct aead_givcrypt_request *req) | 323 | static int aead_null_givencrypt(struct aead_givcrypt_request *req) |
168 | { | 324 | { |
@@ -174,33 +330,11 @@ static int aead_null_givdecrypt(struct aead_givcrypt_request *req) | |||
174 | return crypto_aead_decrypt(&req->areq); | 330 | return crypto_aead_decrypt(&req->areq); |
175 | } | 331 | } |
176 | 332 | ||
177 | static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | ||
178 | { | ||
179 | struct aead_alg *alg = &tfm->__crt_alg->cra_aead; | ||
180 | struct aead_tfm *crt = &tfm->crt_aead; | ||
181 | |||
182 | if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) | ||
183 | return -EINVAL; | ||
184 | |||
185 | crt->setkey = setkey; | ||
186 | crt->encrypt = alg->encrypt; | ||
187 | crt->decrypt = alg->decrypt; | ||
188 | if (!alg->ivsize) { | ||
189 | crt->givencrypt = aead_null_givencrypt; | ||
190 | crt->givdecrypt = aead_null_givdecrypt; | ||
191 | } | ||
192 | crt->base = __crypto_aead_cast(tfm); | ||
193 | crt->ivsize = alg->ivsize; | ||
194 | crt->authsize = alg->maxauthsize; | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | #ifdef CONFIG_NET | 333 | #ifdef CONFIG_NET |
200 | static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) | 334 | static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) |
201 | { | 335 | { |
202 | struct crypto_report_aead raead; | 336 | struct crypto_report_aead raead; |
203 | struct aead_alg *aead = &alg->cra_aead; | 337 | struct old_aead_alg *aead = &alg->cra_aead; |
204 | 338 | ||
205 | strncpy(raead.type, "nivaead", sizeof(raead.type)); | 339 | strncpy(raead.type, "nivaead", sizeof(raead.type)); |
206 | strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv)); | 340 | strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv)); |
@@ -229,7 +363,7 @@ static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) | |||
229 | __attribute__ ((unused)); | 363 | __attribute__ ((unused)); |
230 | static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) | 364 | static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) |
231 | { | 365 | { |
232 | struct aead_alg *aead = &alg->cra_aead; | 366 | struct old_aead_alg *aead = &alg->cra_aead; |
233 | 367 | ||
234 | seq_printf(m, "type : nivaead\n"); | 368 | seq_printf(m, "type : nivaead\n"); |
235 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? | 369 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
@@ -241,43 +375,215 @@ static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) | |||
241 | } | 375 | } |
242 | 376 | ||
243 | const struct crypto_type crypto_nivaead_type = { | 377 | const struct crypto_type crypto_nivaead_type = { |
244 | .ctxsize = crypto_aead_ctxsize, | 378 | .extsize = crypto_alg_extsize, |
245 | .init = crypto_init_nivaead_ops, | 379 | .init_tfm = crypto_aead_init_tfm, |
246 | #ifdef CONFIG_PROC_FS | 380 | #ifdef CONFIG_PROC_FS |
247 | .show = crypto_nivaead_show, | 381 | .show = crypto_nivaead_show, |
248 | #endif | 382 | #endif |
249 | .report = crypto_nivaead_report, | 383 | .report = crypto_nivaead_report, |
384 | .maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV), | ||
385 | .maskset = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV, | ||
386 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
387 | .tfmsize = offsetof(struct crypto_aead, base), | ||
250 | }; | 388 | }; |
251 | EXPORT_SYMBOL_GPL(crypto_nivaead_type); | 389 | EXPORT_SYMBOL_GPL(crypto_nivaead_type); |
252 | 390 | ||
253 | static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn, | 391 | static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn, |
254 | const char *name, u32 type, u32 mask) | 392 | const char *name, u32 type, u32 mask) |
255 | { | 393 | { |
256 | struct crypto_alg *alg; | 394 | spawn->base.frontend = &crypto_nivaead_type; |
395 | return crypto_grab_spawn(&spawn->base, name, type, mask); | ||
396 | } | ||
397 | |||
398 | static int aead_geniv_setkey(struct crypto_aead *tfm, | ||
399 | const u8 *key, unsigned int keylen) | ||
400 | { | ||
401 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); | ||
402 | |||
403 | return crypto_aead_setkey(ctx->child, key, keylen); | ||
404 | } | ||
405 | |||
406 | static int aead_geniv_setauthsize(struct crypto_aead *tfm, | ||
407 | unsigned int authsize) | ||
408 | { | ||
409 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); | ||
410 | |||
411 | return crypto_aead_setauthsize(ctx->child, authsize); | ||
412 | } | ||
413 | |||
414 | static void compat_encrypt_complete2(struct aead_request *req, int err) | ||
415 | { | ||
416 | struct compat_request_ctx *rctx = aead_request_ctx(req); | ||
417 | struct aead_givcrypt_request *subreq = &rctx->subreq; | ||
418 | struct crypto_aead *geniv; | ||
419 | |||
420 | if (err == -EINPROGRESS) | ||
421 | return; | ||
422 | |||
423 | if (err) | ||
424 | goto out; | ||
425 | |||
426 | geniv = crypto_aead_reqtfm(req); | ||
427 | scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0, | ||
428 | crypto_aead_ivsize(geniv), 1); | ||
429 | |||
430 | out: | ||
431 | kzfree(subreq->giv); | ||
432 | } | ||
433 | |||
434 | static void compat_encrypt_complete(struct crypto_async_request *base, int err) | ||
435 | { | ||
436 | struct aead_request *req = base->data; | ||
437 | |||
438 | compat_encrypt_complete2(req, err); | ||
439 | aead_request_complete(req, err); | ||
440 | } | ||
441 | |||
442 | static int compat_encrypt(struct aead_request *req) | ||
443 | { | ||
444 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
445 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | ||
446 | struct compat_request_ctx *rctx = aead_request_ctx(req); | ||
447 | struct aead_givcrypt_request *subreq = &rctx->subreq; | ||
448 | unsigned int ivsize = crypto_aead_ivsize(geniv); | ||
449 | struct scatterlist *src, *dst; | ||
450 | crypto_completion_t compl; | ||
451 | void *data; | ||
452 | u8 *info; | ||
453 | __be64 seq; | ||
257 | int err; | 454 | int err; |
258 | 455 | ||
259 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | 456 | if (req->cryptlen < ivsize) |
260 | type |= CRYPTO_ALG_TYPE_AEAD; | 457 | return -EINVAL; |
261 | mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV; | ||
262 | 458 | ||
263 | alg = crypto_alg_mod_lookup(name, type, mask); | 459 | compl = req->base.complete; |
264 | if (IS_ERR(alg)) | 460 | data = req->base.data; |
265 | return PTR_ERR(alg); | 461 | |
462 | rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen); | ||
463 | info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg); | ||
464 | |||
465 | if (!info) { | ||
466 | info = kmalloc(ivsize, req->base.flags & | ||
467 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | ||
468 | GFP_ATOMIC); | ||
469 | if (!info) | ||
470 | return -ENOMEM; | ||
471 | |||
472 | compl = compat_encrypt_complete; | ||
473 | data = req; | ||
474 | } | ||
475 | |||
476 | memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq)); | ||
477 | |||
478 | src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize); | ||
479 | dst = req->src == req->dst ? | ||
480 | src : scatterwalk_ffwd(rctx->dst, rctx->ivsg, ivsize); | ||
481 | |||
482 | aead_givcrypt_set_tfm(subreq, ctx->child); | ||
483 | aead_givcrypt_set_callback(subreq, req->base.flags, | ||
484 | req->base.complete, req->base.data); | ||
485 | aead_givcrypt_set_crypt(subreq, src, dst, | ||
486 | req->cryptlen - ivsize, req->iv); | ||
487 | aead_givcrypt_set_assoc(subreq, req->src, req->assoclen); | ||
488 | aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq)); | ||
489 | |||
490 | err = crypto_aead_givencrypt(subreq); | ||
491 | if (unlikely(PageHighMem(sg_page(rctx->ivsg)))) | ||
492 | compat_encrypt_complete2(req, err); | ||
493 | return err; | ||
494 | } | ||
495 | |||
496 | static int compat_decrypt(struct aead_request *req) | ||
497 | { | ||
498 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
499 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | ||
500 | struct compat_request_ctx *rctx = aead_request_ctx(req); | ||
501 | struct aead_request *subreq = &rctx->subreq.areq; | ||
502 | unsigned int ivsize = crypto_aead_ivsize(geniv); | ||
503 | struct scatterlist *src, *dst; | ||
504 | crypto_completion_t compl; | ||
505 | void *data; | ||
506 | |||
507 | if (req->cryptlen < ivsize) | ||
508 | return -EINVAL; | ||
509 | |||
510 | aead_request_set_tfm(subreq, ctx->child); | ||
511 | |||
512 | compl = req->base.complete; | ||
513 | data = req->base.data; | ||
514 | |||
515 | src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize); | ||
516 | dst = req->src == req->dst ? | ||
517 | src : scatterwalk_ffwd(rctx->dst, req->dst, | ||
518 | req->assoclen + ivsize); | ||
519 | |||
520 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
521 | aead_request_set_crypt(subreq, src, dst, | ||
522 | req->cryptlen - ivsize, req->iv); | ||
523 | aead_request_set_assoc(subreq, req->src, req->assoclen); | ||
524 | |||
525 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); | ||
526 | |||
527 | return crypto_aead_decrypt(subreq); | ||
528 | } | ||
529 | |||
530 | static int compat_encrypt_first(struct aead_request *req) | ||
531 | { | ||
532 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
533 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | ||
534 | int err = 0; | ||
535 | |||
536 | spin_lock_bh(&ctx->lock); | ||
537 | if (geniv->encrypt != compat_encrypt_first) | ||
538 | goto unlock; | ||
539 | |||
540 | geniv->encrypt = compat_encrypt; | ||
541 | |||
542 | unlock: | ||
543 | spin_unlock_bh(&ctx->lock); | ||
544 | |||
545 | if (err) | ||
546 | return err; | ||
547 | |||
548 | return compat_encrypt(req); | ||
549 | } | ||
550 | |||
551 | static int aead_geniv_init_compat(struct crypto_tfm *tfm) | ||
552 | { | ||
553 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | ||
554 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | ||
555 | int err; | ||
556 | |||
557 | spin_lock_init(&ctx->lock); | ||
558 | |||
559 | crypto_aead_set_reqsize(geniv, sizeof(struct compat_request_ctx)); | ||
560 | |||
561 | err = aead_geniv_init(tfm); | ||
562 | |||
563 | ctx->child = geniv->child; | ||
564 | geniv->child = geniv; | ||
266 | 565 | ||
267 | err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); | ||
268 | crypto_mod_put(alg); | ||
269 | return err; | 566 | return err; |
270 | } | 567 | } |
271 | 568 | ||
272 | struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, | 569 | static void aead_geniv_exit_compat(struct crypto_tfm *tfm) |
273 | struct rtattr **tb, u32 type, | 570 | { |
274 | u32 mask) | 571 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); |
572 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | ||
573 | |||
574 | crypto_free_aead(ctx->child); | ||
575 | } | ||
576 | |||
577 | struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, | ||
578 | struct rtattr **tb, u32 type, u32 mask) | ||
275 | { | 579 | { |
276 | const char *name; | 580 | const char *name; |
277 | struct crypto_aead_spawn *spawn; | 581 | struct crypto_aead_spawn *spawn; |
278 | struct crypto_attr_type *algt; | 582 | struct crypto_attr_type *algt; |
279 | struct crypto_instance *inst; | 583 | struct aead_instance *inst; |
280 | struct crypto_alg *alg; | 584 | struct aead_alg *alg; |
585 | unsigned int ivsize; | ||
586 | unsigned int maxauthsize; | ||
281 | int err; | 587 | int err; |
282 | 588 | ||
283 | algt = crypto_get_attr_type(tb); | 589 | algt = crypto_get_attr_type(tb); |
@@ -296,20 +602,25 @@ struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, | |||
296 | if (!inst) | 602 | if (!inst) |
297 | return ERR_PTR(-ENOMEM); | 603 | return ERR_PTR(-ENOMEM); |
298 | 604 | ||
299 | spawn = crypto_instance_ctx(inst); | 605 | spawn = aead_instance_ctx(inst); |
300 | 606 | ||
301 | /* Ignore async algorithms if necessary. */ | 607 | /* Ignore async algorithms if necessary. */ |
302 | mask |= crypto_requires_sync(algt->type, algt->mask); | 608 | mask |= crypto_requires_sync(algt->type, algt->mask); |
303 | 609 | ||
304 | crypto_set_aead_spawn(spawn, inst); | 610 | crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); |
305 | err = crypto_grab_nivaead(spawn, name, type, mask); | 611 | err = (algt->mask & CRYPTO_ALG_GENIV) ? |
612 | crypto_grab_nivaead(spawn, name, type, mask) : | ||
613 | crypto_grab_aead(spawn, name, type, mask); | ||
306 | if (err) | 614 | if (err) |
307 | goto err_free_inst; | 615 | goto err_free_inst; |
308 | 616 | ||
309 | alg = crypto_aead_spawn_alg(spawn); | 617 | alg = crypto_spawn_aead_alg(spawn); |
618 | |||
619 | ivsize = crypto_aead_alg_ivsize(alg); | ||
620 | maxauthsize = crypto_aead_alg_maxauthsize(alg); | ||
310 | 621 | ||
311 | err = -EINVAL; | 622 | err = -EINVAL; |
312 | if (!alg->cra_aead.ivsize) | 623 | if (ivsize < sizeof(u64)) |
313 | goto err_drop_alg; | 624 | goto err_drop_alg; |
314 | 625 | ||
315 | /* | 626 | /* |
@@ -318,39 +629,64 @@ struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, | |||
318 | * template name and double-check the IV generator. | 629 | * template name and double-check the IV generator. |
319 | */ | 630 | */ |
320 | if (algt->mask & CRYPTO_ALG_GENIV) { | 631 | if (algt->mask & CRYPTO_ALG_GENIV) { |
321 | if (strcmp(tmpl->name, alg->cra_aead.geniv)) | 632 | if (!alg->base.cra_aead.encrypt) |
633 | goto err_drop_alg; | ||
634 | if (strcmp(tmpl->name, alg->base.cra_aead.geniv)) | ||
322 | goto err_drop_alg; | 635 | goto err_drop_alg; |
323 | 636 | ||
324 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 637 | memcpy(inst->alg.base.cra_name, alg->base.cra_name, |
325 | memcpy(inst->alg.cra_driver_name, alg->cra_driver_name, | ||
326 | CRYPTO_MAX_ALG_NAME); | 638 | CRYPTO_MAX_ALG_NAME); |
327 | } else { | 639 | memcpy(inst->alg.base.cra_driver_name, |
328 | err = -ENAMETOOLONG; | 640 | alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME); |
329 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 641 | |
330 | "%s(%s)", tmpl->name, alg->cra_name) >= | 642 | inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_AEAD | |
331 | CRYPTO_MAX_ALG_NAME) | 643 | CRYPTO_ALG_GENIV; |
332 | goto err_drop_alg; | 644 | inst->alg.base.cra_flags |= alg->base.cra_flags & |
333 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 645 | CRYPTO_ALG_ASYNC; |
334 | "%s(%s)", tmpl->name, alg->cra_driver_name) >= | 646 | inst->alg.base.cra_priority = alg->base.cra_priority; |
335 | CRYPTO_MAX_ALG_NAME) | 647 | inst->alg.base.cra_blocksize = alg->base.cra_blocksize; |
336 | goto err_drop_alg; | 648 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask; |
649 | inst->alg.base.cra_type = &crypto_aead_type; | ||
650 | |||
651 | inst->alg.base.cra_aead.ivsize = ivsize; | ||
652 | inst->alg.base.cra_aead.maxauthsize = maxauthsize; | ||
653 | |||
654 | inst->alg.base.cra_aead.setkey = alg->base.cra_aead.setkey; | ||
655 | inst->alg.base.cra_aead.setauthsize = | ||
656 | alg->base.cra_aead.setauthsize; | ||
657 | inst->alg.base.cra_aead.encrypt = alg->base.cra_aead.encrypt; | ||
658 | inst->alg.base.cra_aead.decrypt = alg->base.cra_aead.decrypt; | ||
659 | |||
660 | goto out; | ||
337 | } | 661 | } |
338 | 662 | ||
339 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV; | 663 | err = -ENAMETOOLONG; |
340 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; | 664 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
341 | inst->alg.cra_priority = alg->cra_priority; | 665 | "%s(%s)", tmpl->name, alg->base.cra_name) >= |
342 | inst->alg.cra_blocksize = alg->cra_blocksize; | 666 | CRYPTO_MAX_ALG_NAME) |
343 | inst->alg.cra_alignmask = alg->cra_alignmask; | 667 | goto err_drop_alg; |
344 | inst->alg.cra_type = &crypto_aead_type; | 668 | if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
669 | "%s(%s)", tmpl->name, alg->base.cra_driver_name) >= | ||
670 | CRYPTO_MAX_ALG_NAME) | ||
671 | goto err_drop_alg; | ||
672 | |||
673 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; | ||
674 | inst->alg.base.cra_priority = alg->base.cra_priority; | ||
675 | inst->alg.base.cra_blocksize = alg->base.cra_blocksize; | ||
676 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask; | ||
677 | inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); | ||
678 | |||
679 | inst->alg.setkey = aead_geniv_setkey; | ||
680 | inst->alg.setauthsize = aead_geniv_setauthsize; | ||
345 | 681 | ||
346 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; | 682 | inst->alg.ivsize = ivsize; |
347 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; | 683 | inst->alg.maxauthsize = maxauthsize; |
348 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; | ||
349 | 684 | ||
350 | inst->alg.cra_aead.setkey = alg->cra_aead.setkey; | 685 | inst->alg.encrypt = compat_encrypt_first; |
351 | inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; | 686 | inst->alg.decrypt = compat_decrypt; |
352 | inst->alg.cra_aead.encrypt = alg->cra_aead.encrypt; | 687 | |
353 | inst->alg.cra_aead.decrypt = alg->cra_aead.decrypt; | 688 | inst->alg.base.cra_init = aead_geniv_init_compat; |
689 | inst->alg.base.cra_exit = aead_geniv_exit_compat; | ||
354 | 690 | ||
355 | out: | 691 | out: |
356 | return inst; | 692 | return inst; |
@@ -364,9 +700,9 @@ err_free_inst: | |||
364 | } | 700 | } |
365 | EXPORT_SYMBOL_GPL(aead_geniv_alloc); | 701 | EXPORT_SYMBOL_GPL(aead_geniv_alloc); |
366 | 702 | ||
367 | void aead_geniv_free(struct crypto_instance *inst) | 703 | void aead_geniv_free(struct aead_instance *inst) |
368 | { | 704 | { |
369 | crypto_drop_aead(crypto_instance_ctx(inst)); | 705 | crypto_drop_aead(aead_instance_ctx(inst)); |
370 | kfree(inst); | 706 | kfree(inst); |
371 | } | 707 | } |
372 | EXPORT_SYMBOL_GPL(aead_geniv_free); | 708 | EXPORT_SYMBOL_GPL(aead_geniv_free); |
@@ -374,14 +710,17 @@ EXPORT_SYMBOL_GPL(aead_geniv_free); | |||
374 | int aead_geniv_init(struct crypto_tfm *tfm) | 710 | int aead_geniv_init(struct crypto_tfm *tfm) |
375 | { | 711 | { |
376 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 712 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
713 | struct crypto_aead *child; | ||
377 | struct crypto_aead *aead; | 714 | struct crypto_aead *aead; |
378 | 715 | ||
379 | aead = crypto_spawn_aead(crypto_instance_ctx(inst)); | 716 | aead = __crypto_aead_cast(tfm); |
380 | if (IS_ERR(aead)) | 717 | |
381 | return PTR_ERR(aead); | 718 | child = crypto_spawn_aead(crypto_instance_ctx(inst)); |
719 | if (IS_ERR(child)) | ||
720 | return PTR_ERR(child); | ||
382 | 721 | ||
383 | tfm->crt_aead.base = aead; | 722 | aead->child = child; |
384 | tfm->crt_aead.reqsize += crypto_aead_reqsize(aead); | 723 | aead->reqsize += crypto_aead_reqsize(child); |
385 | 724 | ||
386 | return 0; | 725 | return 0; |
387 | } | 726 | } |
@@ -389,7 +728,7 @@ EXPORT_SYMBOL_GPL(aead_geniv_init); | |||
389 | 728 | ||
390 | void aead_geniv_exit(struct crypto_tfm *tfm) | 729 | void aead_geniv_exit(struct crypto_tfm *tfm) |
391 | { | 730 | { |
392 | crypto_free_aead(tfm->crt_aead.base); | 731 | crypto_free_aead(__crypto_aead_cast(tfm)->child); |
393 | } | 732 | } |
394 | EXPORT_SYMBOL_GPL(aead_geniv_exit); | 733 | EXPORT_SYMBOL_GPL(aead_geniv_exit); |
395 | 734 | ||
@@ -443,6 +782,13 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask) | |||
443 | if (!tmpl) | 782 | if (!tmpl) |
444 | goto kill_larval; | 783 | goto kill_larval; |
445 | 784 | ||
785 | if (tmpl->create) { | ||
786 | err = tmpl->create(tmpl, tb); | ||
787 | if (err) | ||
788 | goto put_tmpl; | ||
789 | goto ok; | ||
790 | } | ||
791 | |||
446 | inst = tmpl->alloc(tb); | 792 | inst = tmpl->alloc(tb); |
447 | err = PTR_ERR(inst); | 793 | err = PTR_ERR(inst); |
448 | if (IS_ERR(inst)) | 794 | if (IS_ERR(inst)) |
@@ -454,6 +800,7 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask) | |||
454 | goto put_tmpl; | 800 | goto put_tmpl; |
455 | } | 801 | } |
456 | 802 | ||
803 | ok: | ||
457 | /* Redo the lookup to use the instance we just registered. */ | 804 | /* Redo the lookup to use the instance we just registered. */ |
458 | err = -EAGAIN; | 805 | err = -EAGAIN; |
459 | 806 | ||
@@ -489,7 +836,7 @@ struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask) | |||
489 | return alg; | 836 | return alg; |
490 | 837 | ||
491 | if (alg->cra_type == &crypto_aead_type) { | 838 | if (alg->cra_type == &crypto_aead_type) { |
492 | if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) { | 839 | if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) { |
493 | crypto_mod_put(alg); | 840 | crypto_mod_put(alg); |
494 | alg = ERR_PTR(-ENOENT); | 841 | alg = ERR_PTR(-ENOENT); |
495 | } | 842 | } |
@@ -505,62 +852,91 @@ EXPORT_SYMBOL_GPL(crypto_lookup_aead); | |||
505 | int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, | 852 | int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, |
506 | u32 type, u32 mask) | 853 | u32 type, u32 mask) |
507 | { | 854 | { |
508 | struct crypto_alg *alg; | 855 | spawn->base.frontend = &crypto_aead_type; |
509 | int err; | 856 | return crypto_grab_spawn(&spawn->base, name, type, mask); |
857 | } | ||
858 | EXPORT_SYMBOL_GPL(crypto_grab_aead); | ||
510 | 859 | ||
511 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | 860 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask) |
512 | type |= CRYPTO_ALG_TYPE_AEAD; | 861 | { |
513 | mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | 862 | return crypto_alloc_tfm(alg_name, &crypto_aead_type, type, mask); |
514 | mask |= CRYPTO_ALG_TYPE_MASK; | 863 | } |
864 | EXPORT_SYMBOL_GPL(crypto_alloc_aead); | ||
515 | 865 | ||
516 | alg = crypto_lookup_aead(name, type, mask); | 866 | static int aead_prepare_alg(struct aead_alg *alg) |
517 | if (IS_ERR(alg)) | 867 | { |
518 | return PTR_ERR(alg); | 868 | struct crypto_alg *base = &alg->base; |
519 | 869 | ||
520 | err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); | 870 | if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) |
521 | crypto_mod_put(alg); | 871 | return -EINVAL; |
522 | return err; | 872 | |
873 | base->cra_type = &crypto_new_aead_type; | ||
874 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
875 | base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; | ||
876 | |||
877 | return 0; | ||
523 | } | 878 | } |
524 | EXPORT_SYMBOL_GPL(crypto_grab_aead); | ||
525 | 879 | ||
526 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask) | 880 | int crypto_register_aead(struct aead_alg *alg) |
527 | { | 881 | { |
528 | struct crypto_tfm *tfm; | 882 | struct crypto_alg *base = &alg->base; |
529 | int err; | 883 | int err; |
530 | 884 | ||
531 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | 885 | err = aead_prepare_alg(alg); |
532 | type |= CRYPTO_ALG_TYPE_AEAD; | 886 | if (err) |
533 | mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | 887 | return err; |
534 | mask |= CRYPTO_ALG_TYPE_MASK; | ||
535 | 888 | ||
536 | for (;;) { | 889 | return crypto_register_alg(base); |
537 | struct crypto_alg *alg; | 890 | } |
891 | EXPORT_SYMBOL_GPL(crypto_register_aead); | ||
538 | 892 | ||
539 | alg = crypto_lookup_aead(alg_name, type, mask); | 893 | void crypto_unregister_aead(struct aead_alg *alg) |
540 | if (IS_ERR(alg)) { | 894 | { |
541 | err = PTR_ERR(alg); | 895 | crypto_unregister_alg(&alg->base); |
542 | goto err; | 896 | } |
543 | } | 897 | EXPORT_SYMBOL_GPL(crypto_unregister_aead); |
544 | 898 | ||
545 | tfm = __crypto_alloc_tfm(alg, type, mask); | 899 | int crypto_register_aeads(struct aead_alg *algs, int count) |
546 | if (!IS_ERR(tfm)) | 900 | { |
547 | return __crypto_aead_cast(tfm); | 901 | int i, ret; |
548 | 902 | ||
549 | crypto_mod_put(alg); | 903 | for (i = 0; i < count; i++) { |
550 | err = PTR_ERR(tfm); | 904 | ret = crypto_register_aead(&algs[i]); |
905 | if (ret) | ||
906 | goto err; | ||
907 | } | ||
908 | |||
909 | return 0; | ||
551 | 910 | ||
552 | err: | 911 | err: |
553 | if (err != -EAGAIN) | 912 | for (--i; i >= 0; --i) |
554 | break; | 913 | crypto_unregister_aead(&algs[i]); |
555 | if (signal_pending(current)) { | ||
556 | err = -EINTR; | ||
557 | break; | ||
558 | } | ||
559 | } | ||
560 | 914 | ||
561 | return ERR_PTR(err); | 915 | return ret; |
562 | } | 916 | } |
563 | EXPORT_SYMBOL_GPL(crypto_alloc_aead); | 917 | EXPORT_SYMBOL_GPL(crypto_register_aeads); |
918 | |||
919 | void crypto_unregister_aeads(struct aead_alg *algs, int count) | ||
920 | { | ||
921 | int i; | ||
922 | |||
923 | for (i = count - 1; i >= 0; --i) | ||
924 | crypto_unregister_aead(&algs[i]); | ||
925 | } | ||
926 | EXPORT_SYMBOL_GPL(crypto_unregister_aeads); | ||
927 | |||
928 | int aead_register_instance(struct crypto_template *tmpl, | ||
929 | struct aead_instance *inst) | ||
930 | { | ||
931 | int err; | ||
932 | |||
933 | err = aead_prepare_alg(&inst->alg); | ||
934 | if (err) | ||
935 | return err; | ||
936 | |||
937 | return crypto_register_instance(tmpl, aead_crypto_instance(inst)); | ||
938 | } | ||
939 | EXPORT_SYMBOL_GPL(aead_register_instance); | ||
564 | 940 | ||
565 | MODULE_LICENSE("GPL"); | 941 | MODULE_LICENSE("GPL"); |
566 | MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)"); | 942 | MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)"); |
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index f22cc56fd1b3..2bc180e02115 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
@@ -127,6 +127,7 @@ EXPORT_SYMBOL_GPL(af_alg_release); | |||
127 | 127 | ||
128 | static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 128 | static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
129 | { | 129 | { |
130 | const u32 forbidden = CRYPTO_ALG_INTERNAL; | ||
130 | struct sock *sk = sock->sk; | 131 | struct sock *sk = sock->sk; |
131 | struct alg_sock *ask = alg_sk(sk); | 132 | struct alg_sock *ask = alg_sk(sk); |
132 | struct sockaddr_alg *sa = (void *)uaddr; | 133 | struct sockaddr_alg *sa = (void *)uaddr; |
@@ -151,7 +152,9 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
151 | if (IS_ERR(type)) | 152 | if (IS_ERR(type)) |
152 | return PTR_ERR(type); | 153 | return PTR_ERR(type); |
153 | 154 | ||
154 | private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); | 155 | private = type->bind(sa->salg_name, |
156 | sa->salg_feat & ~forbidden, | ||
157 | sa->salg_mask & ~forbidden); | ||
155 | if (IS_ERR(private)) { | 158 | if (IS_ERR(private)) { |
156 | module_put(type->owner); | 159 | module_put(type->owner); |
157 | return PTR_ERR(private); | 160 | return PTR_ERR(private); |
diff --git a/crypto/akcipher.c b/crypto/akcipher.c new file mode 100644 index 000000000000..d7986414814e --- /dev/null +++ b/crypto/akcipher.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Public Key Encryption | ||
3 | * | ||
4 | * Copyright (c) 2015, Intel Corporation | ||
5 | * Authors: Tadeusz Struk <tadeusz.struk@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/crypto.h> | ||
20 | #include <crypto/algapi.h> | ||
21 | #include <linux/cryptouser.h> | ||
22 | #include <net/netlink.h> | ||
23 | #include <crypto/akcipher.h> | ||
24 | #include <crypto/public_key.h> | ||
25 | #include "internal.h" | ||
26 | |||
27 | #ifdef CONFIG_NET | ||
28 | static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
29 | { | ||
30 | struct crypto_report_akcipher rakcipher; | ||
31 | |||
32 | strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); | ||
33 | |||
34 | if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, | ||
35 | sizeof(struct crypto_report_akcipher), &rakcipher)) | ||
36 | goto nla_put_failure; | ||
37 | return 0; | ||
38 | |||
39 | nla_put_failure: | ||
40 | return -EMSGSIZE; | ||
41 | } | ||
42 | #else | ||
43 | static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
44 | { | ||
45 | return -ENOSYS; | ||
46 | } | ||
47 | #endif | ||
48 | |||
49 | static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg) | ||
50 | __attribute__ ((unused)); | ||
51 | |||
52 | static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg) | ||
53 | { | ||
54 | seq_puts(m, "type : akcipher\n"); | ||
55 | } | ||
56 | |||
57 | static void crypto_akcipher_exit_tfm(struct crypto_tfm *tfm) | ||
58 | { | ||
59 | struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm); | ||
60 | struct akcipher_alg *alg = crypto_akcipher_alg(akcipher); | ||
61 | |||
62 | alg->exit(akcipher); | ||
63 | } | ||
64 | |||
65 | static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm) | ||
66 | { | ||
67 | struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm); | ||
68 | struct akcipher_alg *alg = crypto_akcipher_alg(akcipher); | ||
69 | |||
70 | if (alg->exit) | ||
71 | akcipher->base.exit = crypto_akcipher_exit_tfm; | ||
72 | |||
73 | if (alg->init) | ||
74 | return alg->init(akcipher); | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static const struct crypto_type crypto_akcipher_type = { | ||
80 | .extsize = crypto_alg_extsize, | ||
81 | .init_tfm = crypto_akcipher_init_tfm, | ||
82 | #ifdef CONFIG_PROC_FS | ||
83 | .show = crypto_akcipher_show, | ||
84 | #endif | ||
85 | .report = crypto_akcipher_report, | ||
86 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
87 | .maskset = CRYPTO_ALG_TYPE_MASK, | ||
88 | .type = CRYPTO_ALG_TYPE_AKCIPHER, | ||
89 | .tfmsize = offsetof(struct crypto_akcipher, base), | ||
90 | }; | ||
91 | |||
92 | struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, | ||
93 | u32 mask) | ||
94 | { | ||
95 | return crypto_alloc_tfm(alg_name, &crypto_akcipher_type, type, mask); | ||
96 | } | ||
97 | EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); | ||
98 | |||
99 | int crypto_register_akcipher(struct akcipher_alg *alg) | ||
100 | { | ||
101 | struct crypto_alg *base = &alg->base; | ||
102 | |||
103 | base->cra_type = &crypto_akcipher_type; | ||
104 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
105 | base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; | ||
106 | return crypto_register_alg(base); | ||
107 | } | ||
108 | EXPORT_SYMBOL_GPL(crypto_register_akcipher); | ||
109 | |||
110 | void crypto_unregister_akcipher(struct akcipher_alg *alg) | ||
111 | { | ||
112 | crypto_unregister_alg(&alg->base); | ||
113 | } | ||
114 | EXPORT_SYMBOL_GPL(crypto_unregister_akcipher); | ||
115 | |||
116 | MODULE_LICENSE("GPL"); | ||
117 | MODULE_DESCRIPTION("Generic public key cihper type"); | ||
diff --git a/crypto/algapi.c b/crypto/algapi.c index d2627a3d4ed8..3c079b7f23f6 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/fips.h> | ||
15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/list.h> | 18 | #include <linux/list.h> |
@@ -43,12 +44,9 @@ static inline int crypto_set_driver_name(struct crypto_alg *alg) | |||
43 | 44 | ||
44 | static inline void crypto_check_module_sig(struct module *mod) | 45 | static inline void crypto_check_module_sig(struct module *mod) |
45 | { | 46 | { |
46 | #ifdef CONFIG_CRYPTO_FIPS | 47 | if (fips_enabled && mod && !module_sig_ok(mod)) |
47 | if (fips_enabled && mod && !mod->sig_ok) | ||
48 | panic("Module %s signature verification failed in FIPS mode\n", | 48 | panic("Module %s signature verification failed in FIPS mode\n", |
49 | mod->name); | 49 | module_name(mod)); |
50 | #endif | ||
51 | return; | ||
52 | } | 50 | } |
53 | 51 | ||
54 | static int crypto_check_alg(struct crypto_alg *alg) | 52 | static int crypto_check_alg(struct crypto_alg *alg) |
@@ -614,6 +612,22 @@ out: | |||
614 | } | 612 | } |
615 | EXPORT_SYMBOL_GPL(crypto_init_spawn2); | 613 | EXPORT_SYMBOL_GPL(crypto_init_spawn2); |
616 | 614 | ||
615 | int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, | ||
616 | u32 type, u32 mask) | ||
617 | { | ||
618 | struct crypto_alg *alg; | ||
619 | int err; | ||
620 | |||
621 | alg = crypto_find_alg(name, spawn->frontend, type, mask); | ||
622 | if (IS_ERR(alg)) | ||
623 | return PTR_ERR(alg); | ||
624 | |||
625 | err = crypto_init_spawn(spawn, alg, spawn->inst, mask); | ||
626 | crypto_mod_put(alg); | ||
627 | return err; | ||
628 | } | ||
629 | EXPORT_SYMBOL_GPL(crypto_grab_spawn); | ||
630 | |||
617 | void crypto_drop_spawn(struct crypto_spawn *spawn) | 631 | void crypto_drop_spawn(struct crypto_spawn *spawn) |
618 | { | 632 | { |
619 | if (!spawn->alg) | 633 | if (!spawn->alg) |
@@ -964,6 +978,13 @@ void crypto_xor(u8 *dst, const u8 *src, unsigned int size) | |||
964 | } | 978 | } |
965 | EXPORT_SYMBOL_GPL(crypto_xor); | 979 | EXPORT_SYMBOL_GPL(crypto_xor); |
966 | 980 | ||
981 | unsigned int crypto_alg_extsize(struct crypto_alg *alg) | ||
982 | { | ||
983 | return alg->cra_ctxsize + | ||
984 | (alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1)); | ||
985 | } | ||
986 | EXPORT_SYMBOL_GPL(crypto_alg_extsize); | ||
987 | |||
967 | static int __init crypto_algapi_init(void) | 988 | static int __init crypto_algapi_init(void) |
968 | { | 989 | { |
969 | crypto_init_proc(); | 990 | crypto_init_proc(); |
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 69abada22373..e0408a480d2f 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * any later version. | 13 | * any later version. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <crypto/aead.h> | ||
16 | #include <crypto/scatterwalk.h> | 17 | #include <crypto/scatterwalk.h> |
17 | #include <crypto/if_alg.h> | 18 | #include <crypto/if_alg.h> |
18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
@@ -71,7 +72,7 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx) | |||
71 | { | 72 | { |
72 | unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); | 73 | unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); |
73 | 74 | ||
74 | return (ctx->used >= (ctx->aead_assoclen + (ctx->enc ? 0 : as))); | 75 | return ctx->used >= ctx->aead_assoclen + as; |
75 | } | 76 | } |
76 | 77 | ||
77 | static void aead_put_sgl(struct sock *sk) | 78 | static void aead_put_sgl(struct sock *sk) |
@@ -352,12 +353,8 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, | |||
352 | struct sock *sk = sock->sk; | 353 | struct sock *sk = sock->sk; |
353 | struct alg_sock *ask = alg_sk(sk); | 354 | struct alg_sock *ask = alg_sk(sk); |
354 | struct aead_ctx *ctx = ask->private; | 355 | struct aead_ctx *ctx = ask->private; |
355 | unsigned bs = crypto_aead_blocksize(crypto_aead_reqtfm(&ctx->aead_req)); | ||
356 | unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); | 356 | unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); |
357 | struct aead_sg_list *sgl = &ctx->tsgl; | 357 | struct aead_sg_list *sgl = &ctx->tsgl; |
358 | struct scatterlist *sg = NULL; | ||
359 | struct scatterlist assoc[ALG_MAX_PAGES]; | ||
360 | size_t assoclen = 0; | ||
361 | unsigned int i = 0; | 358 | unsigned int i = 0; |
362 | int err = -EINVAL; | 359 | int err = -EINVAL; |
363 | unsigned long used = 0; | 360 | unsigned long used = 0; |
@@ -406,23 +403,13 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, | |||
406 | if (!aead_sufficient_data(ctx)) | 403 | if (!aead_sufficient_data(ctx)) |
407 | goto unlock; | 404 | goto unlock; |
408 | 405 | ||
406 | outlen = used; | ||
407 | |||
409 | /* | 408 | /* |
410 | * The cipher operation input data is reduced by the associated data | 409 | * The cipher operation input data is reduced by the associated data |
411 | * length as this data is processed separately later on. | 410 | * length as this data is processed separately later on. |
412 | */ | 411 | */ |
413 | used -= ctx->aead_assoclen; | 412 | used -= ctx->aead_assoclen + (ctx->enc ? as : 0); |
414 | |||
415 | if (ctx->enc) { | ||
416 | /* round up output buffer to multiple of block size */ | ||
417 | outlen = ((used + bs - 1) / bs * bs); | ||
418 | /* add the size needed for the auth tag to be created */ | ||
419 | outlen += as; | ||
420 | } else { | ||
421 | /* output data size is input without the authentication tag */ | ||
422 | outlen = used - as; | ||
423 | /* round up output buffer to multiple of block size */ | ||
424 | outlen = ((outlen + bs - 1) / bs * bs); | ||
425 | } | ||
426 | 413 | ||
427 | /* convert iovecs of output buffers into scatterlists */ | 414 | /* convert iovecs of output buffers into scatterlists */ |
428 | while (iov_iter_count(&msg->msg_iter)) { | 415 | while (iov_iter_count(&msg->msg_iter)) { |
@@ -451,47 +438,11 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, | |||
451 | if (usedpages < outlen) | 438 | if (usedpages < outlen) |
452 | goto unlock; | 439 | goto unlock; |
453 | 440 | ||
454 | sg_init_table(assoc, ALG_MAX_PAGES); | 441 | sg_mark_end(sgl->sg + sgl->cur - 1); |
455 | assoclen = ctx->aead_assoclen; | ||
456 | /* | ||
457 | * Split scatterlist into two: first part becomes AD, second part | ||
458 | * is plaintext / ciphertext. The first part is assigned to assoc | ||
459 | * scatterlist. When this loop finishes, sg points to the start of the | ||
460 | * plaintext / ciphertext. | ||
461 | */ | ||
462 | for (i = 0; i < ctx->tsgl.cur; i++) { | ||
463 | sg = sgl->sg + i; | ||
464 | if (sg->length <= assoclen) { | ||
465 | /* AD is larger than one page */ | ||
466 | sg_set_page(assoc + i, sg_page(sg), | ||
467 | sg->length, sg->offset); | ||
468 | assoclen -= sg->length; | ||
469 | if (i >= ctx->tsgl.cur) | ||
470 | goto unlock; | ||
471 | } else if (!assoclen) { | ||
472 | /* current page is to start of plaintext / ciphertext */ | ||
473 | if (i) | ||
474 | /* AD terminates at page boundary */ | ||
475 | sg_mark_end(assoc + i - 1); | ||
476 | else | ||
477 | /* AD size is zero */ | ||
478 | sg_mark_end(assoc); | ||
479 | break; | ||
480 | } else { | ||
481 | /* AD does not terminate at page boundary */ | ||
482 | sg_set_page(assoc + i, sg_page(sg), | ||
483 | assoclen, sg->offset); | ||
484 | sg_mark_end(assoc + i); | ||
485 | /* plaintext / ciphertext starts after AD */ | ||
486 | sg->length -= assoclen; | ||
487 | sg->offset += assoclen; | ||
488 | break; | ||
489 | } | ||
490 | } | ||
491 | 442 | ||
492 | aead_request_set_assoc(&ctx->aead_req, assoc, ctx->aead_assoclen); | 443 | aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg, |
493 | aead_request_set_crypt(&ctx->aead_req, sg, ctx->rsgl[0].sg, used, | 444 | used, ctx->iv); |
494 | ctx->iv); | 445 | aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); |
495 | 446 | ||
496 | err = af_alg_wait_for_completion(ctx->enc ? | 447 | err = af_alg_wait_for_completion(ctx->enc ? |
497 | crypto_aead_encrypt(&ctx->aead_req) : | 448 | crypto_aead_encrypt(&ctx->aead_req) : |
@@ -563,7 +514,8 @@ static struct proto_ops algif_aead_ops = { | |||
563 | 514 | ||
564 | static void *aead_bind(const char *name, u32 type, u32 mask) | 515 | static void *aead_bind(const char *name, u32 type, u32 mask) |
565 | { | 516 | { |
566 | return crypto_alloc_aead(name, type, mask); | 517 | return crypto_alloc_aead(name, type | CRYPTO_ALG_AEAD_NEW, |
518 | mask | CRYPTO_ALG_AEAD_NEW); | ||
567 | } | 519 | } |
568 | 520 | ||
569 | static void aead_release(void *private) | 521 | static void aead_release(void *private) |
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 8109aaad2726..150c2b6480ed 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c | |||
@@ -164,7 +164,7 @@ static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen) | |||
164 | * Check whether seedlen is of sufficient size is done in RNG | 164 | * Check whether seedlen is of sufficient size is done in RNG |
165 | * implementations. | 165 | * implementations. |
166 | */ | 166 | */ |
167 | return crypto_rng_reset(private, (u8 *)seed, seedlen); | 167 | return crypto_rng_reset(private, seed, seedlen); |
168 | } | 168 | } |
169 | 169 | ||
170 | static const struct af_alg_type algif_type_rng = { | 170 | static const struct af_alg_type algif_type_rng = { |
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 765fe7609348..eff337ce9003 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c | |||
@@ -20,8 +20,6 @@ | |||
20 | #include <linux/moduleparam.h> | 20 | #include <linux/moduleparam.h> |
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | 22 | ||
23 | #include "internal.h" | ||
24 | |||
25 | #define DEFAULT_PRNG_KEY "0123456789abcdef" | 23 | #define DEFAULT_PRNG_KEY "0123456789abcdef" |
26 | #define DEFAULT_PRNG_KSZ 16 | 24 | #define DEFAULT_PRNG_KSZ 16 |
27 | #define DEFAULT_BLK_SZ 16 | 25 | #define DEFAULT_BLK_SZ 16 |
@@ -281,11 +279,11 @@ static void free_prng_context(struct prng_context *ctx) | |||
281 | } | 279 | } |
282 | 280 | ||
283 | static int reset_prng_context(struct prng_context *ctx, | 281 | static int reset_prng_context(struct prng_context *ctx, |
284 | unsigned char *key, size_t klen, | 282 | const unsigned char *key, size_t klen, |
285 | unsigned char *V, unsigned char *DT) | 283 | const unsigned char *V, const unsigned char *DT) |
286 | { | 284 | { |
287 | int ret; | 285 | int ret; |
288 | unsigned char *prng_key; | 286 | const unsigned char *prng_key; |
289 | 287 | ||
290 | spin_lock_bh(&ctx->prng_lock); | 288 | spin_lock_bh(&ctx->prng_lock); |
291 | ctx->flags |= PRNG_NEED_RESET; | 289 | ctx->flags |= PRNG_NEED_RESET; |
@@ -353,8 +351,9 @@ static void cprng_exit(struct crypto_tfm *tfm) | |||
353 | free_prng_context(crypto_tfm_ctx(tfm)); | 351 | free_prng_context(crypto_tfm_ctx(tfm)); |
354 | } | 352 | } |
355 | 353 | ||
356 | static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, | 354 | static int cprng_get_random(struct crypto_rng *tfm, |
357 | unsigned int dlen) | 355 | const u8 *src, unsigned int slen, |
356 | u8 *rdata, unsigned int dlen) | ||
358 | { | 357 | { |
359 | struct prng_context *prng = crypto_rng_ctx(tfm); | 358 | struct prng_context *prng = crypto_rng_ctx(tfm); |
360 | 359 | ||
@@ -367,11 +366,12 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, | |||
367 | * V and KEY are required during reset, and DT is optional, detected | 366 | * V and KEY are required during reset, and DT is optional, detected |
368 | * as being present by testing the length of the seed | 367 | * as being present by testing the length of the seed |
369 | */ | 368 | */ |
370 | static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | 369 | static int cprng_reset(struct crypto_rng *tfm, |
370 | const u8 *seed, unsigned int slen) | ||
371 | { | 371 | { |
372 | struct prng_context *prng = crypto_rng_ctx(tfm); | 372 | struct prng_context *prng = crypto_rng_ctx(tfm); |
373 | u8 *key = seed + DEFAULT_BLK_SZ; | 373 | const u8 *key = seed + DEFAULT_BLK_SZ; |
374 | u8 *dt = NULL; | 374 | const u8 *dt = NULL; |
375 | 375 | ||
376 | if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) | 376 | if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) |
377 | return -EINVAL; | 377 | return -EINVAL; |
@@ -387,18 +387,20 @@ static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | |||
387 | } | 387 | } |
388 | 388 | ||
389 | #ifdef CONFIG_CRYPTO_FIPS | 389 | #ifdef CONFIG_CRYPTO_FIPS |
390 | static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata, | 390 | static int fips_cprng_get_random(struct crypto_rng *tfm, |
391 | unsigned int dlen) | 391 | const u8 *src, unsigned int slen, |
392 | u8 *rdata, unsigned int dlen) | ||
392 | { | 393 | { |
393 | struct prng_context *prng = crypto_rng_ctx(tfm); | 394 | struct prng_context *prng = crypto_rng_ctx(tfm); |
394 | 395 | ||
395 | return get_prng_bytes(rdata, dlen, prng, 1); | 396 | return get_prng_bytes(rdata, dlen, prng, 1); |
396 | } | 397 | } |
397 | 398 | ||
398 | static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | 399 | static int fips_cprng_reset(struct crypto_rng *tfm, |
400 | const u8 *seed, unsigned int slen) | ||
399 | { | 401 | { |
400 | u8 rdata[DEFAULT_BLK_SZ]; | 402 | u8 rdata[DEFAULT_BLK_SZ]; |
401 | u8 *key = seed + DEFAULT_BLK_SZ; | 403 | const u8 *key = seed + DEFAULT_BLK_SZ; |
402 | int rc; | 404 | int rc; |
403 | 405 | ||
404 | struct prng_context *prng = crypto_rng_ctx(tfm); | 406 | struct prng_context *prng = crypto_rng_ctx(tfm); |
@@ -424,40 +426,32 @@ out: | |||
424 | } | 426 | } |
425 | #endif | 427 | #endif |
426 | 428 | ||
427 | static struct crypto_alg rng_algs[] = { { | 429 | static struct rng_alg rng_algs[] = { { |
428 | .cra_name = "stdrng", | 430 | .generate = cprng_get_random, |
429 | .cra_driver_name = "ansi_cprng", | 431 | .seed = cprng_reset, |
430 | .cra_priority = 100, | 432 | .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ, |
431 | .cra_flags = CRYPTO_ALG_TYPE_RNG, | 433 | .base = { |
432 | .cra_ctxsize = sizeof(struct prng_context), | 434 | .cra_name = "stdrng", |
433 | .cra_type = &crypto_rng_type, | 435 | .cra_driver_name = "ansi_cprng", |
434 | .cra_module = THIS_MODULE, | 436 | .cra_priority = 100, |
435 | .cra_init = cprng_init, | 437 | .cra_ctxsize = sizeof(struct prng_context), |
436 | .cra_exit = cprng_exit, | 438 | .cra_module = THIS_MODULE, |
437 | .cra_u = { | 439 | .cra_init = cprng_init, |
438 | .rng = { | 440 | .cra_exit = cprng_exit, |
439 | .rng_make_random = cprng_get_random, | ||
440 | .rng_reset = cprng_reset, | ||
441 | .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, | ||
442 | } | ||
443 | } | 441 | } |
444 | #ifdef CONFIG_CRYPTO_FIPS | 442 | #ifdef CONFIG_CRYPTO_FIPS |
445 | }, { | 443 | }, { |
446 | .cra_name = "fips(ansi_cprng)", | 444 | .generate = fips_cprng_get_random, |
447 | .cra_driver_name = "fips_ansi_cprng", | 445 | .seed = fips_cprng_reset, |
448 | .cra_priority = 300, | 446 | .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ, |
449 | .cra_flags = CRYPTO_ALG_TYPE_RNG, | 447 | .base = { |
450 | .cra_ctxsize = sizeof(struct prng_context), | 448 | .cra_name = "fips(ansi_cprng)", |
451 | .cra_type = &crypto_rng_type, | 449 | .cra_driver_name = "fips_ansi_cprng", |
452 | .cra_module = THIS_MODULE, | 450 | .cra_priority = 300, |
453 | .cra_init = cprng_init, | 451 | .cra_ctxsize = sizeof(struct prng_context), |
454 | .cra_exit = cprng_exit, | 452 | .cra_module = THIS_MODULE, |
455 | .cra_u = { | 453 | .cra_init = cprng_init, |
456 | .rng = { | 454 | .cra_exit = cprng_exit, |
457 | .rng_make_random = fips_cprng_get_random, | ||
458 | .rng_reset = fips_cprng_reset, | ||
459 | .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, | ||
460 | } | ||
461 | } | 455 | } |
462 | #endif | 456 | #endif |
463 | } }; | 457 | } }; |
@@ -465,12 +459,12 @@ static struct crypto_alg rng_algs[] = { { | |||
465 | /* Module initalization */ | 459 | /* Module initalization */ |
466 | static int __init prng_mod_init(void) | 460 | static int __init prng_mod_init(void) |
467 | { | 461 | { |
468 | return crypto_register_algs(rng_algs, ARRAY_SIZE(rng_algs)); | 462 | return crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs)); |
469 | } | 463 | } |
470 | 464 | ||
471 | static void __exit prng_mod_fini(void) | 465 | static void __exit prng_mod_fini(void) |
472 | { | 466 | { |
473 | crypto_unregister_algs(rng_algs, ARRAY_SIZE(rng_algs)); | 467 | crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs)); |
474 | } | 468 | } |
475 | 469 | ||
476 | MODULE_LICENSE("GPL"); | 470 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/authenc.c b/crypto/authenc.c index 78fb16cab13f..3e852299afb4 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <crypto/aead.h> | 13 | #include <crypto/internal/aead.h> |
14 | #include <crypto/internal/hash.h> | 14 | #include <crypto/internal/hash.h> |
15 | #include <crypto/internal/skcipher.h> | 15 | #include <crypto/internal/skcipher.h> |
16 | #include <crypto/authenc.h> | 16 | #include <crypto/authenc.h> |
@@ -570,13 +570,14 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) | |||
570 | crypto_ahash_alignmask(auth) + 1) + | 570 | crypto_ahash_alignmask(auth) + 1) + |
571 | crypto_ablkcipher_ivsize(enc); | 571 | crypto_ablkcipher_ivsize(enc); |
572 | 572 | ||
573 | tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) + | 573 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), |
574 | ctx->reqoff + | 574 | sizeof(struct authenc_request_ctx) + |
575 | max_t(unsigned int, | 575 | ctx->reqoff + |
576 | crypto_ahash_reqsize(auth) + | 576 | max_t(unsigned int, |
577 | sizeof(struct ahash_request), | 577 | crypto_ahash_reqsize(auth) + |
578 | sizeof(struct skcipher_givcrypt_request) + | 578 | sizeof(struct ahash_request), |
579 | crypto_ablkcipher_reqsize(enc)); | 579 | sizeof(struct skcipher_givcrypt_request) + |
580 | crypto_ablkcipher_reqsize(enc))); | ||
580 | 581 | ||
581 | return 0; | 582 | return 0; |
582 | 583 | ||
diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 024bff2344fc..a3da6770bc9e 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <crypto/aead.h> | 15 | #include <crypto/internal/aead.h> |
16 | #include <crypto/internal/hash.h> | 16 | #include <crypto/internal/hash.h> |
17 | #include <crypto/internal/skcipher.h> | 17 | #include <crypto/internal/skcipher.h> |
18 | #include <crypto/authenc.h> | 18 | #include <crypto/authenc.h> |
@@ -662,13 +662,14 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm) | |||
662 | crypto_ahash_alignmask(auth) + 1) + | 662 | crypto_ahash_alignmask(auth) + 1) + |
663 | crypto_ablkcipher_ivsize(enc); | 663 | crypto_ablkcipher_ivsize(enc); |
664 | 664 | ||
665 | tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) + | 665 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), |
666 | ctx->reqoff + | 666 | sizeof(struct authenc_esn_request_ctx) + |
667 | max_t(unsigned int, | 667 | ctx->reqoff + |
668 | crypto_ahash_reqsize(auth) + | 668 | max_t(unsigned int, |
669 | sizeof(struct ahash_request), | 669 | crypto_ahash_reqsize(auth) + |
670 | sizeof(struct skcipher_givcrypt_request) + | 670 | sizeof(struct ahash_request), |
671 | crypto_ablkcipher_reqsize(enc)); | 671 | sizeof(struct skcipher_givcrypt_request) + |
672 | crypto_ablkcipher_reqsize(enc))); | ||
672 | 673 | ||
673 | return 0; | 674 | return 0; |
674 | 675 | ||
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 0122bec38564..11b981492031 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c | |||
@@ -14,6 +14,7 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <crypto/aead.h> | ||
17 | #include <crypto/internal/skcipher.h> | 18 | #include <crypto/internal/skcipher.h> |
18 | #include <crypto/scatterwalk.h> | 19 | #include <crypto/scatterwalk.h> |
19 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
diff --git a/crypto/ccm.c b/crypto/ccm.c index 003bbbd21a2b..a4d1a5eda18b 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c | |||
@@ -453,9 +453,9 @@ static int crypto_ccm_init_tfm(struct crypto_tfm *tfm) | |||
453 | 453 | ||
454 | align = crypto_tfm_alg_alignmask(tfm); | 454 | align = crypto_tfm_alg_alignmask(tfm); |
455 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 455 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
456 | tfm->crt_aead.reqsize = align + | 456 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), |
457 | sizeof(struct crypto_ccm_req_priv_ctx) + | 457 | align + sizeof(struct crypto_ccm_req_priv_ctx) + |
458 | crypto_ablkcipher_reqsize(ctr); | 458 | crypto_ablkcipher_reqsize(ctr)); |
459 | 459 | ||
460 | return 0; | 460 | return 0; |
461 | 461 | ||
@@ -729,10 +729,10 @@ static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm) | |||
729 | 729 | ||
730 | align = crypto_aead_alignmask(aead); | 730 | align = crypto_aead_alignmask(aead); |
731 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 731 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
732 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + | 732 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), |
733 | ALIGN(crypto_aead_reqsize(aead), | 733 | sizeof(struct aead_request) + |
734 | crypto_tfm_ctx_alignment()) + | 734 | ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + |
735 | align + 16; | 735 | align + 16); |
736 | 736 | ||
737 | return 0; | 737 | return 0; |
738 | } | 738 | } |
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c new file mode 100644 index 000000000000..fa42e708aa96 --- /dev/null +++ b/crypto/chacha20_generic.c | |||
@@ -0,0 +1,216 @@ | |||
1 | /* | ||
2 | * ChaCha20 256-bit cipher algorithm, RFC7539 | ||
3 | * | ||
4 | * Copyright (C) 2015 Martin Willi | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <crypto/algapi.h> | ||
13 | #include <linux/crypto.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | |||
17 | #define CHACHA20_NONCE_SIZE 16 | ||
18 | #define CHACHA20_KEY_SIZE 32 | ||
19 | #define CHACHA20_BLOCK_SIZE 64 | ||
20 | |||
21 | struct chacha20_ctx { | ||
22 | u32 key[8]; | ||
23 | }; | ||
24 | |||
25 | static inline u32 rotl32(u32 v, u8 n) | ||
26 | { | ||
27 | return (v << n) | (v >> (sizeof(v) * 8 - n)); | ||
28 | } | ||
29 | |||
30 | static inline u32 le32_to_cpuvp(const void *p) | ||
31 | { | ||
32 | return le32_to_cpup(p); | ||
33 | } | ||
34 | |||
35 | static void chacha20_block(u32 *state, void *stream) | ||
36 | { | ||
37 | u32 x[16], *out = stream; | ||
38 | int i; | ||
39 | |||
40 | for (i = 0; i < ARRAY_SIZE(x); i++) | ||
41 | x[i] = state[i]; | ||
42 | |||
43 | for (i = 0; i < 20; i += 2) { | ||
44 | x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16); | ||
45 | x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16); | ||
46 | x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16); | ||
47 | x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16); | ||
48 | |||
49 | x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12); | ||
50 | x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12); | ||
51 | x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12); | ||
52 | x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12); | ||
53 | |||
54 | x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8); | ||
55 | x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8); | ||
56 | x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8); | ||
57 | x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8); | ||
58 | |||
59 | x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7); | ||
60 | x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7); | ||
61 | x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7); | ||
62 | x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7); | ||
63 | |||
64 | x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16); | ||
65 | x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16); | ||
66 | x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16); | ||
67 | x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16); | ||
68 | |||
69 | x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12); | ||
70 | x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12); | ||
71 | x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12); | ||
72 | x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12); | ||
73 | |||
74 | x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8); | ||
75 | x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8); | ||
76 | x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8); | ||
77 | x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8); | ||
78 | |||
79 | x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7); | ||
80 | x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7); | ||
81 | x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7); | ||
82 | x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7); | ||
83 | } | ||
84 | |||
85 | for (i = 0; i < ARRAY_SIZE(x); i++) | ||
86 | out[i] = cpu_to_le32(x[i] + state[i]); | ||
87 | |||
88 | state[12]++; | ||
89 | } | ||
90 | |||
91 | static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, | ||
92 | unsigned int bytes) | ||
93 | { | ||
94 | u8 stream[CHACHA20_BLOCK_SIZE]; | ||
95 | |||
96 | if (dst != src) | ||
97 | memcpy(dst, src, bytes); | ||
98 | |||
99 | while (bytes >= CHACHA20_BLOCK_SIZE) { | ||
100 | chacha20_block(state, stream); | ||
101 | crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE); | ||
102 | bytes -= CHACHA20_BLOCK_SIZE; | ||
103 | dst += CHACHA20_BLOCK_SIZE; | ||
104 | } | ||
105 | if (bytes) { | ||
106 | chacha20_block(state, stream); | ||
107 | crypto_xor(dst, stream, bytes); | ||
108 | } | ||
109 | } | ||
110 | |||
111 | static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) | ||
112 | { | ||
113 | static const char constant[16] = "expand 32-byte k"; | ||
114 | |||
115 | state[0] = le32_to_cpuvp(constant + 0); | ||
116 | state[1] = le32_to_cpuvp(constant + 4); | ||
117 | state[2] = le32_to_cpuvp(constant + 8); | ||
118 | state[3] = le32_to_cpuvp(constant + 12); | ||
119 | state[4] = ctx->key[0]; | ||
120 | state[5] = ctx->key[1]; | ||
121 | state[6] = ctx->key[2]; | ||
122 | state[7] = ctx->key[3]; | ||
123 | state[8] = ctx->key[4]; | ||
124 | state[9] = ctx->key[5]; | ||
125 | state[10] = ctx->key[6]; | ||
126 | state[11] = ctx->key[7]; | ||
127 | state[12] = le32_to_cpuvp(iv + 0); | ||
128 | state[13] = le32_to_cpuvp(iv + 4); | ||
129 | state[14] = le32_to_cpuvp(iv + 8); | ||
130 | state[15] = le32_to_cpuvp(iv + 12); | ||
131 | } | ||
132 | |||
133 | static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
134 | unsigned int keysize) | ||
135 | { | ||
136 | struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm); | ||
137 | int i; | ||
138 | |||
139 | if (keysize != CHACHA20_KEY_SIZE) | ||
140 | return -EINVAL; | ||
141 | |||
142 | for (i = 0; i < ARRAY_SIZE(ctx->key); i++) | ||
143 | ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32)); | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
149 | struct scatterlist *src, unsigned int nbytes) | ||
150 | { | ||
151 | struct blkcipher_walk walk; | ||
152 | u32 state[16]; | ||
153 | int err; | ||
154 | |||
155 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
156 | err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); | ||
157 | |||
158 | chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); | ||
159 | |||
160 | while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { | ||
161 | chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, | ||
162 | rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); | ||
163 | err = blkcipher_walk_done(desc, &walk, | ||
164 | walk.nbytes % CHACHA20_BLOCK_SIZE); | ||
165 | } | ||
166 | |||
167 | if (walk.nbytes) { | ||
168 | chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, | ||
169 | walk.nbytes); | ||
170 | err = blkcipher_walk_done(desc, &walk, 0); | ||
171 | } | ||
172 | |||
173 | return err; | ||
174 | } | ||
175 | |||
176 | static struct crypto_alg alg = { | ||
177 | .cra_name = "chacha20", | ||
178 | .cra_driver_name = "chacha20-generic", | ||
179 | .cra_priority = 100, | ||
180 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
181 | .cra_blocksize = 1, | ||
182 | .cra_type = &crypto_blkcipher_type, | ||
183 | .cra_ctxsize = sizeof(struct chacha20_ctx), | ||
184 | .cra_alignmask = sizeof(u32) - 1, | ||
185 | .cra_module = THIS_MODULE, | ||
186 | .cra_u = { | ||
187 | .blkcipher = { | ||
188 | .min_keysize = CHACHA20_KEY_SIZE, | ||
189 | .max_keysize = CHACHA20_KEY_SIZE, | ||
190 | .ivsize = CHACHA20_NONCE_SIZE, | ||
191 | .geniv = "seqiv", | ||
192 | .setkey = chacha20_setkey, | ||
193 | .encrypt = chacha20_crypt, | ||
194 | .decrypt = chacha20_crypt, | ||
195 | }, | ||
196 | }, | ||
197 | }; | ||
198 | |||
199 | static int __init chacha20_generic_mod_init(void) | ||
200 | { | ||
201 | return crypto_register_alg(&alg); | ||
202 | } | ||
203 | |||
204 | static void __exit chacha20_generic_mod_fini(void) | ||
205 | { | ||
206 | crypto_unregister_alg(&alg); | ||
207 | } | ||
208 | |||
209 | module_init(chacha20_generic_mod_init); | ||
210 | module_exit(chacha20_generic_mod_fini); | ||
211 | |||
212 | MODULE_LICENSE("GPL"); | ||
213 | MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); | ||
214 | MODULE_DESCRIPTION("chacha20 cipher algorithm"); | ||
215 | MODULE_ALIAS_CRYPTO("chacha20"); | ||
216 | MODULE_ALIAS_CRYPTO("chacha20-generic"); | ||
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c new file mode 100644 index 000000000000..7b46ed799a64 --- /dev/null +++ b/crypto/chacha20poly1305.c | |||
@@ -0,0 +1,695 @@ | |||
1 | /* | ||
2 | * ChaCha20-Poly1305 AEAD, RFC7539 | ||
3 | * | ||
4 | * Copyright (C) 2015 Martin Willi | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <crypto/internal/aead.h> | ||
13 | #include <crypto/internal/hash.h> | ||
14 | #include <crypto/internal/skcipher.h> | ||
15 | #include <crypto/scatterwalk.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | |||
21 | #include "internal.h" | ||
22 | |||
23 | #define POLY1305_BLOCK_SIZE 16 | ||
24 | #define POLY1305_DIGEST_SIZE 16 | ||
25 | #define POLY1305_KEY_SIZE 32 | ||
26 | #define CHACHA20_KEY_SIZE 32 | ||
27 | #define CHACHA20_IV_SIZE 16 | ||
28 | #define CHACHAPOLY_IV_SIZE 12 | ||
29 | |||
30 | struct chachapoly_instance_ctx { | ||
31 | struct crypto_skcipher_spawn chacha; | ||
32 | struct crypto_ahash_spawn poly; | ||
33 | unsigned int saltlen; | ||
34 | }; | ||
35 | |||
36 | struct chachapoly_ctx { | ||
37 | struct crypto_ablkcipher *chacha; | ||
38 | struct crypto_ahash *poly; | ||
39 | /* key bytes we use for the ChaCha20 IV */ | ||
40 | unsigned int saltlen; | ||
41 | u8 salt[]; | ||
42 | }; | ||
43 | |||
44 | struct poly_req { | ||
45 | /* zero byte padding for AD/ciphertext, as needed */ | ||
46 | u8 pad[POLY1305_BLOCK_SIZE]; | ||
47 | /* tail data with AD/ciphertext lengths */ | ||
48 | struct { | ||
49 | __le64 assoclen; | ||
50 | __le64 cryptlen; | ||
51 | } tail; | ||
52 | struct scatterlist src[1]; | ||
53 | struct ahash_request req; /* must be last member */ | ||
54 | }; | ||
55 | |||
56 | struct chacha_req { | ||
57 | u8 iv[CHACHA20_IV_SIZE]; | ||
58 | struct scatterlist src[1]; | ||
59 | struct ablkcipher_request req; /* must be last member */ | ||
60 | }; | ||
61 | |||
62 | struct chachapoly_req_ctx { | ||
63 | /* the key we generate for Poly1305 using Chacha20 */ | ||
64 | u8 key[POLY1305_KEY_SIZE]; | ||
65 | /* calculated Poly1305 tag */ | ||
66 | u8 tag[POLY1305_DIGEST_SIZE]; | ||
67 | /* length of data to en/decrypt, without ICV */ | ||
68 | unsigned int cryptlen; | ||
69 | union { | ||
70 | struct poly_req poly; | ||
71 | struct chacha_req chacha; | ||
72 | } u; | ||
73 | }; | ||
74 | |||
75 | static inline void async_done_continue(struct aead_request *req, int err, | ||
76 | int (*cont)(struct aead_request *)) | ||
77 | { | ||
78 | if (!err) | ||
79 | err = cont(req); | ||
80 | |||
81 | if (err != -EINPROGRESS && err != -EBUSY) | ||
82 | aead_request_complete(req, err); | ||
83 | } | ||
84 | |||
85 | static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb) | ||
86 | { | ||
87 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
88 | __le32 leicb = cpu_to_le32(icb); | ||
89 | |||
90 | memcpy(iv, &leicb, sizeof(leicb)); | ||
91 | memcpy(iv + sizeof(leicb), ctx->salt, ctx->saltlen); | ||
92 | memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv, | ||
93 | CHACHA20_IV_SIZE - sizeof(leicb) - ctx->saltlen); | ||
94 | } | ||
95 | |||
96 | static int poly_verify_tag(struct aead_request *req) | ||
97 | { | ||
98 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
99 | u8 tag[sizeof(rctx->tag)]; | ||
100 | |||
101 | scatterwalk_map_and_copy(tag, req->src, rctx->cryptlen, sizeof(tag), 0); | ||
102 | if (crypto_memneq(tag, rctx->tag, sizeof(tag))) | ||
103 | return -EBADMSG; | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int poly_copy_tag(struct aead_request *req) | ||
108 | { | ||
109 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
110 | |||
111 | scatterwalk_map_and_copy(rctx->tag, req->dst, rctx->cryptlen, | ||
112 | sizeof(rctx->tag), 1); | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static void chacha_decrypt_done(struct crypto_async_request *areq, int err) | ||
117 | { | ||
118 | async_done_continue(areq->data, err, poly_verify_tag); | ||
119 | } | ||
120 | |||
121 | static int chacha_decrypt(struct aead_request *req) | ||
122 | { | ||
123 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
124 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
125 | struct chacha_req *creq = &rctx->u.chacha; | ||
126 | int err; | ||
127 | |||
128 | chacha_iv(creq->iv, req, 1); | ||
129 | |||
130 | ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), | ||
131 | chacha_decrypt_done, req); | ||
132 | ablkcipher_request_set_tfm(&creq->req, ctx->chacha); | ||
133 | ablkcipher_request_set_crypt(&creq->req, req->src, req->dst, | ||
134 | rctx->cryptlen, creq->iv); | ||
135 | err = crypto_ablkcipher_decrypt(&creq->req); | ||
136 | if (err) | ||
137 | return err; | ||
138 | |||
139 | return poly_verify_tag(req); | ||
140 | } | ||
141 | |||
142 | static int poly_tail_continue(struct aead_request *req) | ||
143 | { | ||
144 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
145 | |||
146 | if (rctx->cryptlen == req->cryptlen) /* encrypting */ | ||
147 | return poly_copy_tag(req); | ||
148 | |||
149 | return chacha_decrypt(req); | ||
150 | } | ||
151 | |||
152 | static void poly_tail_done(struct crypto_async_request *areq, int err) | ||
153 | { | ||
154 | async_done_continue(areq->data, err, poly_tail_continue); | ||
155 | } | ||
156 | |||
157 | static int poly_tail(struct aead_request *req) | ||
158 | { | ||
159 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
160 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
161 | struct poly_req *preq = &rctx->u.poly; | ||
162 | __le64 len; | ||
163 | int err; | ||
164 | |||
165 | sg_init_table(preq->src, 1); | ||
166 | len = cpu_to_le64(req->assoclen); | ||
167 | memcpy(&preq->tail.assoclen, &len, sizeof(len)); | ||
168 | len = cpu_to_le64(rctx->cryptlen); | ||
169 | memcpy(&preq->tail.cryptlen, &len, sizeof(len)); | ||
170 | sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail)); | ||
171 | |||
172 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | ||
173 | poly_tail_done, req); | ||
174 | ahash_request_set_tfm(&preq->req, ctx->poly); | ||
175 | ahash_request_set_crypt(&preq->req, preq->src, | ||
176 | rctx->tag, sizeof(preq->tail)); | ||
177 | |||
178 | err = crypto_ahash_finup(&preq->req); | ||
179 | if (err) | ||
180 | return err; | ||
181 | |||
182 | return poly_tail_continue(req); | ||
183 | } | ||
184 | |||
185 | static void poly_cipherpad_done(struct crypto_async_request *areq, int err) | ||
186 | { | ||
187 | async_done_continue(areq->data, err, poly_tail); | ||
188 | } | ||
189 | |||
190 | static int poly_cipherpad(struct aead_request *req) | ||
191 | { | ||
192 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
193 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
194 | struct poly_req *preq = &rctx->u.poly; | ||
195 | unsigned int padlen, bs = POLY1305_BLOCK_SIZE; | ||
196 | int err; | ||
197 | |||
198 | padlen = (bs - (rctx->cryptlen % bs)) % bs; | ||
199 | memset(preq->pad, 0, sizeof(preq->pad)); | ||
200 | sg_init_table(preq->src, 1); | ||
201 | sg_set_buf(preq->src, &preq->pad, padlen); | ||
202 | |||
203 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | ||
204 | poly_cipherpad_done, req); | ||
205 | ahash_request_set_tfm(&preq->req, ctx->poly); | ||
206 | ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); | ||
207 | |||
208 | err = crypto_ahash_update(&preq->req); | ||
209 | if (err) | ||
210 | return err; | ||
211 | |||
212 | return poly_tail(req); | ||
213 | } | ||
214 | |||
215 | static void poly_cipher_done(struct crypto_async_request *areq, int err) | ||
216 | { | ||
217 | async_done_continue(areq->data, err, poly_cipherpad); | ||
218 | } | ||
219 | |||
220 | static int poly_cipher(struct aead_request *req) | ||
221 | { | ||
222 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
223 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
224 | struct poly_req *preq = &rctx->u.poly; | ||
225 | struct scatterlist *crypt = req->src; | ||
226 | int err; | ||
227 | |||
228 | if (rctx->cryptlen == req->cryptlen) /* encrypting */ | ||
229 | crypt = req->dst; | ||
230 | |||
231 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | ||
232 | poly_cipher_done, req); | ||
233 | ahash_request_set_tfm(&preq->req, ctx->poly); | ||
234 | ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen); | ||
235 | |||
236 | err = crypto_ahash_update(&preq->req); | ||
237 | if (err) | ||
238 | return err; | ||
239 | |||
240 | return poly_cipherpad(req); | ||
241 | } | ||
242 | |||
243 | static void poly_adpad_done(struct crypto_async_request *areq, int err) | ||
244 | { | ||
245 | async_done_continue(areq->data, err, poly_cipher); | ||
246 | } | ||
247 | |||
248 | static int poly_adpad(struct aead_request *req) | ||
249 | { | ||
250 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
251 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
252 | struct poly_req *preq = &rctx->u.poly; | ||
253 | unsigned int padlen, bs = POLY1305_BLOCK_SIZE; | ||
254 | int err; | ||
255 | |||
256 | padlen = (bs - (req->assoclen % bs)) % bs; | ||
257 | memset(preq->pad, 0, sizeof(preq->pad)); | ||
258 | sg_init_table(preq->src, 1); | ||
259 | sg_set_buf(preq->src, preq->pad, padlen); | ||
260 | |||
261 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | ||
262 | poly_adpad_done, req); | ||
263 | ahash_request_set_tfm(&preq->req, ctx->poly); | ||
264 | ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); | ||
265 | |||
266 | err = crypto_ahash_update(&preq->req); | ||
267 | if (err) | ||
268 | return err; | ||
269 | |||
270 | return poly_cipher(req); | ||
271 | } | ||
272 | |||
273 | static void poly_ad_done(struct crypto_async_request *areq, int err) | ||
274 | { | ||
275 | async_done_continue(areq->data, err, poly_adpad); | ||
276 | } | ||
277 | |||
278 | static int poly_ad(struct aead_request *req) | ||
279 | { | ||
280 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
281 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
282 | struct poly_req *preq = &rctx->u.poly; | ||
283 | int err; | ||
284 | |||
285 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | ||
286 | poly_ad_done, req); | ||
287 | ahash_request_set_tfm(&preq->req, ctx->poly); | ||
288 | ahash_request_set_crypt(&preq->req, req->assoc, NULL, req->assoclen); | ||
289 | |||
290 | err = crypto_ahash_update(&preq->req); | ||
291 | if (err) | ||
292 | return err; | ||
293 | |||
294 | return poly_adpad(req); | ||
295 | } | ||
296 | |||
297 | static void poly_setkey_done(struct crypto_async_request *areq, int err) | ||
298 | { | ||
299 | async_done_continue(areq->data, err, poly_ad); | ||
300 | } | ||
301 | |||
302 | static int poly_setkey(struct aead_request *req) | ||
303 | { | ||
304 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
305 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
306 | struct poly_req *preq = &rctx->u.poly; | ||
307 | int err; | ||
308 | |||
309 | sg_init_table(preq->src, 1); | ||
310 | sg_set_buf(preq->src, rctx->key, sizeof(rctx->key)); | ||
311 | |||
312 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | ||
313 | poly_setkey_done, req); | ||
314 | ahash_request_set_tfm(&preq->req, ctx->poly); | ||
315 | ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key)); | ||
316 | |||
317 | err = crypto_ahash_update(&preq->req); | ||
318 | if (err) | ||
319 | return err; | ||
320 | |||
321 | return poly_ad(req); | ||
322 | } | ||
323 | |||
324 | static void poly_init_done(struct crypto_async_request *areq, int err) | ||
325 | { | ||
326 | async_done_continue(areq->data, err, poly_setkey); | ||
327 | } | ||
328 | |||
329 | static int poly_init(struct aead_request *req) | ||
330 | { | ||
331 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
332 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
333 | struct poly_req *preq = &rctx->u.poly; | ||
334 | int err; | ||
335 | |||
336 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | ||
337 | poly_init_done, req); | ||
338 | ahash_request_set_tfm(&preq->req, ctx->poly); | ||
339 | |||
340 | err = crypto_ahash_init(&preq->req); | ||
341 | if (err) | ||
342 | return err; | ||
343 | |||
344 | return poly_setkey(req); | ||
345 | } | ||
346 | |||
347 | static void poly_genkey_done(struct crypto_async_request *areq, int err) | ||
348 | { | ||
349 | async_done_continue(areq->data, err, poly_init); | ||
350 | } | ||
351 | |||
352 | static int poly_genkey(struct aead_request *req) | ||
353 | { | ||
354 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
355 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
356 | struct chacha_req *creq = &rctx->u.chacha; | ||
357 | int err; | ||
358 | |||
359 | sg_init_table(creq->src, 1); | ||
360 | memset(rctx->key, 0, sizeof(rctx->key)); | ||
361 | sg_set_buf(creq->src, rctx->key, sizeof(rctx->key)); | ||
362 | |||
363 | chacha_iv(creq->iv, req, 0); | ||
364 | |||
365 | ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), | ||
366 | poly_genkey_done, req); | ||
367 | ablkcipher_request_set_tfm(&creq->req, ctx->chacha); | ||
368 | ablkcipher_request_set_crypt(&creq->req, creq->src, creq->src, | ||
369 | POLY1305_KEY_SIZE, creq->iv); | ||
370 | |||
371 | err = crypto_ablkcipher_decrypt(&creq->req); | ||
372 | if (err) | ||
373 | return err; | ||
374 | |||
375 | return poly_init(req); | ||
376 | } | ||
377 | |||
378 | static void chacha_encrypt_done(struct crypto_async_request *areq, int err) | ||
379 | { | ||
380 | async_done_continue(areq->data, err, poly_genkey); | ||
381 | } | ||
382 | |||
383 | static int chacha_encrypt(struct aead_request *req) | ||
384 | { | ||
385 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
386 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
387 | struct chacha_req *creq = &rctx->u.chacha; | ||
388 | int err; | ||
389 | |||
390 | chacha_iv(creq->iv, req, 1); | ||
391 | |||
392 | ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), | ||
393 | chacha_encrypt_done, req); | ||
394 | ablkcipher_request_set_tfm(&creq->req, ctx->chacha); | ||
395 | ablkcipher_request_set_crypt(&creq->req, req->src, req->dst, | ||
396 | req->cryptlen, creq->iv); | ||
397 | err = crypto_ablkcipher_encrypt(&creq->req); | ||
398 | if (err) | ||
399 | return err; | ||
400 | |||
401 | return poly_genkey(req); | ||
402 | } | ||
403 | |||
404 | static int chachapoly_encrypt(struct aead_request *req) | ||
405 | { | ||
406 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
407 | |||
408 | rctx->cryptlen = req->cryptlen; | ||
409 | |||
410 | /* encrypt call chain: | ||
411 | * - chacha_encrypt/done() | ||
412 | * - poly_genkey/done() | ||
413 | * - poly_init/done() | ||
414 | * - poly_setkey/done() | ||
415 | * - poly_ad/done() | ||
416 | * - poly_adpad/done() | ||
417 | * - poly_cipher/done() | ||
418 | * - poly_cipherpad/done() | ||
419 | * - poly_tail/done/continue() | ||
420 | * - poly_copy_tag() | ||
421 | */ | ||
422 | return chacha_encrypt(req); | ||
423 | } | ||
424 | |||
425 | static int chachapoly_decrypt(struct aead_request *req) | ||
426 | { | ||
427 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
428 | |||
429 | if (req->cryptlen < POLY1305_DIGEST_SIZE) | ||
430 | return -EINVAL; | ||
431 | rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE; | ||
432 | |||
433 | /* decrypt call chain: | ||
434 | * - poly_genkey/done() | ||
435 | * - poly_init/done() | ||
436 | * - poly_setkey/done() | ||
437 | * - poly_ad/done() | ||
438 | * - poly_adpad/done() | ||
439 | * - poly_cipher/done() | ||
440 | * - poly_cipherpad/done() | ||
441 | * - poly_tail/done/continue() | ||
442 | * - chacha_decrypt/done() | ||
443 | * - poly_verify_tag() | ||
444 | */ | ||
445 | return poly_genkey(req); | ||
446 | } | ||
447 | |||
448 | static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, | ||
449 | unsigned int keylen) | ||
450 | { | ||
451 | struct chachapoly_ctx *ctx = crypto_aead_ctx(aead); | ||
452 | int err; | ||
453 | |||
454 | if (keylen != ctx->saltlen + CHACHA20_KEY_SIZE) | ||
455 | return -EINVAL; | ||
456 | |||
457 | keylen -= ctx->saltlen; | ||
458 | memcpy(ctx->salt, key + keylen, ctx->saltlen); | ||
459 | |||
460 | crypto_ablkcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK); | ||
461 | crypto_ablkcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) & | ||
462 | CRYPTO_TFM_REQ_MASK); | ||
463 | |||
464 | err = crypto_ablkcipher_setkey(ctx->chacha, key, keylen); | ||
465 | crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctx->chacha) & | ||
466 | CRYPTO_TFM_RES_MASK); | ||
467 | return err; | ||
468 | } | ||
469 | |||
470 | static int chachapoly_setauthsize(struct crypto_aead *tfm, | ||
471 | unsigned int authsize) | ||
472 | { | ||
473 | if (authsize != POLY1305_DIGEST_SIZE) | ||
474 | return -EINVAL; | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | static int chachapoly_init(struct crypto_tfm *tfm) | ||
480 | { | ||
481 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | ||
482 | struct chachapoly_instance_ctx *ictx = crypto_instance_ctx(inst); | ||
483 | struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm); | ||
484 | struct crypto_ablkcipher *chacha; | ||
485 | struct crypto_ahash *poly; | ||
486 | unsigned long align; | ||
487 | |||
488 | poly = crypto_spawn_ahash(&ictx->poly); | ||
489 | if (IS_ERR(poly)) | ||
490 | return PTR_ERR(poly); | ||
491 | |||
492 | chacha = crypto_spawn_skcipher(&ictx->chacha); | ||
493 | if (IS_ERR(chacha)) { | ||
494 | crypto_free_ahash(poly); | ||
495 | return PTR_ERR(chacha); | ||
496 | } | ||
497 | |||
498 | ctx->chacha = chacha; | ||
499 | ctx->poly = poly; | ||
500 | ctx->saltlen = ictx->saltlen; | ||
501 | |||
502 | align = crypto_tfm_alg_alignmask(tfm); | ||
503 | align &= ~(crypto_tfm_ctx_alignment() - 1); | ||
504 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | ||
505 | align + offsetof(struct chachapoly_req_ctx, u) + | ||
506 | max(offsetof(struct chacha_req, req) + | ||
507 | sizeof(struct ablkcipher_request) + | ||
508 | crypto_ablkcipher_reqsize(chacha), | ||
509 | offsetof(struct poly_req, req) + | ||
510 | sizeof(struct ahash_request) + | ||
511 | crypto_ahash_reqsize(poly))); | ||
512 | |||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | static void chachapoly_exit(struct crypto_tfm *tfm) | ||
517 | { | ||
518 | struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm); | ||
519 | |||
520 | crypto_free_ahash(ctx->poly); | ||
521 | crypto_free_ablkcipher(ctx->chacha); | ||
522 | } | ||
523 | |||
524 | static struct crypto_instance *chachapoly_alloc(struct rtattr **tb, | ||
525 | const char *name, | ||
526 | unsigned int ivsize) | ||
527 | { | ||
528 | struct crypto_attr_type *algt; | ||
529 | struct crypto_instance *inst; | ||
530 | struct crypto_alg *chacha; | ||
531 | struct crypto_alg *poly; | ||
532 | struct ahash_alg *poly_ahash; | ||
533 | struct chachapoly_instance_ctx *ctx; | ||
534 | const char *chacha_name, *poly_name; | ||
535 | int err; | ||
536 | |||
537 | if (ivsize > CHACHAPOLY_IV_SIZE) | ||
538 | return ERR_PTR(-EINVAL); | ||
539 | |||
540 | algt = crypto_get_attr_type(tb); | ||
541 | if (IS_ERR(algt)) | ||
542 | return ERR_CAST(algt); | ||
543 | |||
544 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | ||
545 | return ERR_PTR(-EINVAL); | ||
546 | |||
547 | chacha_name = crypto_attr_alg_name(tb[1]); | ||
548 | if (IS_ERR(chacha_name)) | ||
549 | return ERR_CAST(chacha_name); | ||
550 | poly_name = crypto_attr_alg_name(tb[2]); | ||
551 | if (IS_ERR(poly_name)) | ||
552 | return ERR_CAST(poly_name); | ||
553 | |||
554 | poly = crypto_find_alg(poly_name, &crypto_ahash_type, | ||
555 | CRYPTO_ALG_TYPE_HASH, | ||
556 | CRYPTO_ALG_TYPE_AHASH_MASK); | ||
557 | if (IS_ERR(poly)) | ||
558 | return ERR_CAST(poly); | ||
559 | |||
560 | err = -ENOMEM; | ||
561 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | ||
562 | if (!inst) | ||
563 | goto out_put_poly; | ||
564 | |||
565 | ctx = crypto_instance_ctx(inst); | ||
566 | ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; | ||
567 | poly_ahash = container_of(poly, struct ahash_alg, halg.base); | ||
568 | err = crypto_init_ahash_spawn(&ctx->poly, &poly_ahash->halg, inst); | ||
569 | if (err) | ||
570 | goto err_free_inst; | ||
571 | |||
572 | crypto_set_skcipher_spawn(&ctx->chacha, inst); | ||
573 | err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, | ||
574 | crypto_requires_sync(algt->type, | ||
575 | algt->mask)); | ||
576 | if (err) | ||
577 | goto err_drop_poly; | ||
578 | |||
579 | chacha = crypto_skcipher_spawn_alg(&ctx->chacha); | ||
580 | |||
581 | err = -EINVAL; | ||
582 | /* Need 16-byte IV size, including Initial Block Counter value */ | ||
583 | if (chacha->cra_ablkcipher.ivsize != CHACHA20_IV_SIZE) | ||
584 | goto out_drop_chacha; | ||
585 | /* Not a stream cipher? */ | ||
586 | if (chacha->cra_blocksize != 1) | ||
587 | goto out_drop_chacha; | ||
588 | |||
589 | err = -ENAMETOOLONG; | ||
590 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | ||
591 | "%s(%s,%s)", name, chacha_name, | ||
592 | poly_name) >= CRYPTO_MAX_ALG_NAME) | ||
593 | goto out_drop_chacha; | ||
594 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | ||
595 | "%s(%s,%s)", name, chacha->cra_driver_name, | ||
596 | poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | ||
597 | goto out_drop_chacha; | ||
598 | |||
599 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | ||
600 | inst->alg.cra_flags |= (chacha->cra_flags | | ||
601 | poly->cra_flags) & CRYPTO_ALG_ASYNC; | ||
602 | inst->alg.cra_priority = (chacha->cra_priority + | ||
603 | poly->cra_priority) / 2; | ||
604 | inst->alg.cra_blocksize = 1; | ||
605 | inst->alg.cra_alignmask = chacha->cra_alignmask | poly->cra_alignmask; | ||
606 | inst->alg.cra_type = &crypto_nivaead_type; | ||
607 | inst->alg.cra_aead.ivsize = ivsize; | ||
608 | inst->alg.cra_aead.maxauthsize = POLY1305_DIGEST_SIZE; | ||
609 | inst->alg.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; | ||
610 | inst->alg.cra_init = chachapoly_init; | ||
611 | inst->alg.cra_exit = chachapoly_exit; | ||
612 | inst->alg.cra_aead.encrypt = chachapoly_encrypt; | ||
613 | inst->alg.cra_aead.decrypt = chachapoly_decrypt; | ||
614 | inst->alg.cra_aead.setkey = chachapoly_setkey; | ||
615 | inst->alg.cra_aead.setauthsize = chachapoly_setauthsize; | ||
616 | inst->alg.cra_aead.geniv = "seqiv"; | ||
617 | |||
618 | out: | ||
619 | crypto_mod_put(poly); | ||
620 | return inst; | ||
621 | |||
622 | out_drop_chacha: | ||
623 | crypto_drop_skcipher(&ctx->chacha); | ||
624 | err_drop_poly: | ||
625 | crypto_drop_ahash(&ctx->poly); | ||
626 | err_free_inst: | ||
627 | kfree(inst); | ||
628 | out_put_poly: | ||
629 | inst = ERR_PTR(err); | ||
630 | goto out; | ||
631 | } | ||
632 | |||
633 | static struct crypto_instance *rfc7539_alloc(struct rtattr **tb) | ||
634 | { | ||
635 | return chachapoly_alloc(tb, "rfc7539", 12); | ||
636 | } | ||
637 | |||
638 | static struct crypto_instance *rfc7539esp_alloc(struct rtattr **tb) | ||
639 | { | ||
640 | return chachapoly_alloc(tb, "rfc7539esp", 8); | ||
641 | } | ||
642 | |||
643 | static void chachapoly_free(struct crypto_instance *inst) | ||
644 | { | ||
645 | struct chachapoly_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
646 | |||
647 | crypto_drop_skcipher(&ctx->chacha); | ||
648 | crypto_drop_ahash(&ctx->poly); | ||
649 | kfree(inst); | ||
650 | } | ||
651 | |||
652 | static struct crypto_template rfc7539_tmpl = { | ||
653 | .name = "rfc7539", | ||
654 | .alloc = rfc7539_alloc, | ||
655 | .free = chachapoly_free, | ||
656 | .module = THIS_MODULE, | ||
657 | }; | ||
658 | |||
659 | static struct crypto_template rfc7539esp_tmpl = { | ||
660 | .name = "rfc7539esp", | ||
661 | .alloc = rfc7539esp_alloc, | ||
662 | .free = chachapoly_free, | ||
663 | .module = THIS_MODULE, | ||
664 | }; | ||
665 | |||
666 | static int __init chacha20poly1305_module_init(void) | ||
667 | { | ||
668 | int err; | ||
669 | |||
670 | err = crypto_register_template(&rfc7539_tmpl); | ||
671 | if (err) | ||
672 | return err; | ||
673 | |||
674 | err = crypto_register_template(&rfc7539esp_tmpl); | ||
675 | if (err) | ||
676 | crypto_unregister_template(&rfc7539_tmpl); | ||
677 | |||
678 | return err; | ||
679 | } | ||
680 | |||
681 | static void __exit chacha20poly1305_module_exit(void) | ||
682 | { | ||
683 | crypto_unregister_template(&rfc7539esp_tmpl); | ||
684 | crypto_unregister_template(&rfc7539_tmpl); | ||
685 | } | ||
686 | |||
687 | module_init(chacha20poly1305_module_init); | ||
688 | module_exit(chacha20poly1305_module_exit); | ||
689 | |||
690 | MODULE_LICENSE("GPL"); | ||
691 | MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); | ||
692 | MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD"); | ||
693 | MODULE_ALIAS_CRYPTO("chacha20poly1305"); | ||
694 | MODULE_ALIAS_CRYPTO("rfc7539"); | ||
695 | MODULE_ALIAS_CRYPTO("rfc7539esp"); | ||
diff --git a/crypto/chainiv.c b/crypto/chainiv.c index 63c17d5992f7..b4340018c8d4 100644 --- a/crypto/chainiv.c +++ b/crypto/chainiv.c | |||
@@ -80,44 +80,37 @@ unlock: | |||
80 | return err; | 80 | return err; |
81 | } | 81 | } |
82 | 82 | ||
83 | static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) | 83 | static int chainiv_init_common(struct crypto_tfm *tfm, char iv[]) |
84 | { | 84 | { |
85 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 85 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); |
86 | struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | ||
87 | int err = 0; | 86 | int err = 0; |
88 | 87 | ||
89 | spin_lock_bh(&ctx->lock); | ||
90 | if (crypto_ablkcipher_crt(geniv)->givencrypt != | ||
91 | chainiv_givencrypt_first) | ||
92 | goto unlock; | ||
93 | |||
94 | crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt; | ||
95 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, | ||
96 | crypto_ablkcipher_ivsize(geniv)); | ||
97 | |||
98 | unlock: | ||
99 | spin_unlock_bh(&ctx->lock); | ||
100 | |||
101 | if (err) | ||
102 | return err; | ||
103 | |||
104 | return chainiv_givencrypt(req); | ||
105 | } | ||
106 | |||
107 | static int chainiv_init_common(struct crypto_tfm *tfm) | ||
108 | { | ||
109 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); | 88 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); |
110 | 89 | ||
111 | return skcipher_geniv_init(tfm); | 90 | if (iv) { |
91 | err = crypto_rng_get_bytes(crypto_default_rng, iv, | ||
92 | crypto_ablkcipher_ivsize(geniv)); | ||
93 | crypto_put_default_rng(); | ||
94 | } | ||
95 | |||
96 | return err ?: skcipher_geniv_init(tfm); | ||
112 | } | 97 | } |
113 | 98 | ||
114 | static int chainiv_init(struct crypto_tfm *tfm) | 99 | static int chainiv_init(struct crypto_tfm *tfm) |
115 | { | 100 | { |
101 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); | ||
116 | struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); | 102 | struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); |
103 | char *iv; | ||
117 | 104 | ||
118 | spin_lock_init(&ctx->lock); | 105 | spin_lock_init(&ctx->lock); |
119 | 106 | ||
120 | return chainiv_init_common(tfm); | 107 | iv = NULL; |
108 | if (!crypto_get_default_rng()) { | ||
109 | crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt; | ||
110 | iv = ctx->iv; | ||
111 | } | ||
112 | |||
113 | return chainiv_init_common(tfm, iv); | ||
121 | } | 114 | } |
122 | 115 | ||
123 | static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) | 116 | static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) |
@@ -205,33 +198,6 @@ postpone: | |||
205 | return async_chainiv_postpone_request(req); | 198 | return async_chainiv_postpone_request(req); |
206 | } | 199 | } |
207 | 200 | ||
208 | static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) | ||
209 | { | ||
210 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | ||
211 | struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | ||
212 | int err = 0; | ||
213 | |||
214 | if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) | ||
215 | goto out; | ||
216 | |||
217 | if (crypto_ablkcipher_crt(geniv)->givencrypt != | ||
218 | async_chainiv_givencrypt_first) | ||
219 | goto unlock; | ||
220 | |||
221 | crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt; | ||
222 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, | ||
223 | crypto_ablkcipher_ivsize(geniv)); | ||
224 | |||
225 | unlock: | ||
226 | clear_bit(CHAINIV_STATE_INUSE, &ctx->state); | ||
227 | |||
228 | if (err) | ||
229 | return err; | ||
230 | |||
231 | out: | ||
232 | return async_chainiv_givencrypt(req); | ||
233 | } | ||
234 | |||
235 | static void async_chainiv_do_postponed(struct work_struct *work) | 201 | static void async_chainiv_do_postponed(struct work_struct *work) |
236 | { | 202 | { |
237 | struct async_chainiv_ctx *ctx = container_of(work, | 203 | struct async_chainiv_ctx *ctx = container_of(work, |
@@ -263,14 +229,23 @@ static void async_chainiv_do_postponed(struct work_struct *work) | |||
263 | 229 | ||
264 | static int async_chainiv_init(struct crypto_tfm *tfm) | 230 | static int async_chainiv_init(struct crypto_tfm *tfm) |
265 | { | 231 | { |
232 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); | ||
266 | struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); | 233 | struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); |
234 | char *iv; | ||
267 | 235 | ||
268 | spin_lock_init(&ctx->lock); | 236 | spin_lock_init(&ctx->lock); |
269 | 237 | ||
270 | crypto_init_queue(&ctx->queue, 100); | 238 | crypto_init_queue(&ctx->queue, 100); |
271 | INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); | 239 | INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); |
272 | 240 | ||
273 | return chainiv_init_common(tfm); | 241 | iv = NULL; |
242 | if (!crypto_get_default_rng()) { | ||
243 | crypto_ablkcipher_crt(geniv)->givencrypt = | ||
244 | async_chainiv_givencrypt; | ||
245 | iv = ctx->iv; | ||
246 | } | ||
247 | |||
248 | return chainiv_init_common(tfm, iv); | ||
274 | } | 249 | } |
275 | 250 | ||
276 | static void async_chainiv_exit(struct crypto_tfm *tfm) | 251 | static void async_chainiv_exit(struct crypto_tfm *tfm) |
@@ -288,21 +263,14 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb) | |||
288 | { | 263 | { |
289 | struct crypto_attr_type *algt; | 264 | struct crypto_attr_type *algt; |
290 | struct crypto_instance *inst; | 265 | struct crypto_instance *inst; |
291 | int err; | ||
292 | 266 | ||
293 | algt = crypto_get_attr_type(tb); | 267 | algt = crypto_get_attr_type(tb); |
294 | if (IS_ERR(algt)) | 268 | if (IS_ERR(algt)) |
295 | return ERR_CAST(algt); | 269 | return ERR_CAST(algt); |
296 | 270 | ||
297 | err = crypto_get_default_rng(); | ||
298 | if (err) | ||
299 | return ERR_PTR(err); | ||
300 | |||
301 | inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); | 271 | inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); |
302 | if (IS_ERR(inst)) | 272 | if (IS_ERR(inst)) |
303 | goto put_rng; | 273 | goto out; |
304 | |||
305 | inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first; | ||
306 | 274 | ||
307 | inst->alg.cra_init = chainiv_init; | 275 | inst->alg.cra_init = chainiv_init; |
308 | inst->alg.cra_exit = skcipher_geniv_exit; | 276 | inst->alg.cra_exit = skcipher_geniv_exit; |
@@ -312,9 +280,6 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb) | |||
312 | if (!crypto_requires_sync(algt->type, algt->mask)) { | 280 | if (!crypto_requires_sync(algt->type, algt->mask)) { |
313 | inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; | 281 | inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; |
314 | 282 | ||
315 | inst->alg.cra_ablkcipher.givencrypt = | ||
316 | async_chainiv_givencrypt_first; | ||
317 | |||
318 | inst->alg.cra_init = async_chainiv_init; | 283 | inst->alg.cra_init = async_chainiv_init; |
319 | inst->alg.cra_exit = async_chainiv_exit; | 284 | inst->alg.cra_exit = async_chainiv_exit; |
320 | 285 | ||
@@ -325,22 +290,12 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb) | |||
325 | 290 | ||
326 | out: | 291 | out: |
327 | return inst; | 292 | return inst; |
328 | |||
329 | put_rng: | ||
330 | crypto_put_default_rng(); | ||
331 | goto out; | ||
332 | } | ||
333 | |||
334 | static void chainiv_free(struct crypto_instance *inst) | ||
335 | { | ||
336 | skcipher_geniv_free(inst); | ||
337 | crypto_put_default_rng(); | ||
338 | } | 293 | } |
339 | 294 | ||
340 | static struct crypto_template chainiv_tmpl = { | 295 | static struct crypto_template chainiv_tmpl = { |
341 | .name = "chainiv", | 296 | .name = "chainiv", |
342 | .alloc = chainiv_alloc, | 297 | .alloc = chainiv_alloc, |
343 | .free = chainiv_free, | 298 | .free = skcipher_geniv_free, |
344 | .module = THIS_MODULE, | 299 | .module = THIS_MODULE, |
345 | }; | 300 | }; |
346 | 301 | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index b0602ba03111..22ba81f76764 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -295,6 +295,23 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |||
295 | crypto_free_blkcipher(ctx->child); | 295 | crypto_free_blkcipher(ctx->child); |
296 | } | 296 | } |
297 | 297 | ||
298 | static int cryptd_init_instance(struct crypto_instance *inst, | ||
299 | struct crypto_alg *alg) | ||
300 | { | ||
301 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | ||
302 | "cryptd(%s)", | ||
303 | alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | ||
304 | return -ENAMETOOLONG; | ||
305 | |||
306 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | ||
307 | |||
308 | inst->alg.cra_priority = alg->cra_priority + 50; | ||
309 | inst->alg.cra_blocksize = alg->cra_blocksize; | ||
310 | inst->alg.cra_alignmask = alg->cra_alignmask; | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
298 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | 315 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
299 | unsigned int tail) | 316 | unsigned int tail) |
300 | { | 317 | { |
@@ -308,17 +325,10 @@ static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | |||
308 | 325 | ||
309 | inst = (void *)(p + head); | 326 | inst = (void *)(p + head); |
310 | 327 | ||
311 | err = -ENAMETOOLONG; | 328 | err = cryptd_init_instance(inst, alg); |
312 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 329 | if (err) |
313 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | ||
314 | goto out_free_inst; | 330 | goto out_free_inst; |
315 | 331 | ||
316 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | ||
317 | |||
318 | inst->alg.cra_priority = alg->cra_priority + 50; | ||
319 | inst->alg.cra_blocksize = alg->cra_blocksize; | ||
320 | inst->alg.cra_alignmask = alg->cra_alignmask; | ||
321 | |||
322 | out: | 332 | out: |
323 | return p; | 333 | return p; |
324 | 334 | ||
@@ -654,6 +664,24 @@ out_put_alg: | |||
654 | return err; | 664 | return err; |
655 | } | 665 | } |
656 | 666 | ||
667 | static int cryptd_aead_setkey(struct crypto_aead *parent, | ||
668 | const u8 *key, unsigned int keylen) | ||
669 | { | ||
670 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); | ||
671 | struct crypto_aead *child = ctx->child; | ||
672 | |||
673 | return crypto_aead_setkey(child, key, keylen); | ||
674 | } | ||
675 | |||
676 | static int cryptd_aead_setauthsize(struct crypto_aead *parent, | ||
677 | unsigned int authsize) | ||
678 | { | ||
679 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); | ||
680 | struct crypto_aead *child = ctx->child; | ||
681 | |||
682 | return crypto_aead_setauthsize(child, authsize); | ||
683 | } | ||
684 | |||
657 | static void cryptd_aead_crypt(struct aead_request *req, | 685 | static void cryptd_aead_crypt(struct aead_request *req, |
658 | struct crypto_aead *child, | 686 | struct crypto_aead *child, |
659 | int err, | 687 | int err, |
@@ -715,27 +743,26 @@ static int cryptd_aead_decrypt_enqueue(struct aead_request *req) | |||
715 | return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); | 743 | return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); |
716 | } | 744 | } |
717 | 745 | ||
718 | static int cryptd_aead_init_tfm(struct crypto_tfm *tfm) | 746 | static int cryptd_aead_init_tfm(struct crypto_aead *tfm) |
719 | { | 747 | { |
720 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 748 | struct aead_instance *inst = aead_alg_instance(tfm); |
721 | struct aead_instance_ctx *ictx = crypto_instance_ctx(inst); | 749 | struct aead_instance_ctx *ictx = aead_instance_ctx(inst); |
722 | struct crypto_aead_spawn *spawn = &ictx->aead_spawn; | 750 | struct crypto_aead_spawn *spawn = &ictx->aead_spawn; |
723 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 751 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); |
724 | struct crypto_aead *cipher; | 752 | struct crypto_aead *cipher; |
725 | 753 | ||
726 | cipher = crypto_spawn_aead(spawn); | 754 | cipher = crypto_spawn_aead(spawn); |
727 | if (IS_ERR(cipher)) | 755 | if (IS_ERR(cipher)) |
728 | return PTR_ERR(cipher); | 756 | return PTR_ERR(cipher); |
729 | 757 | ||
730 | crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
731 | ctx->child = cipher; | 758 | ctx->child = cipher; |
732 | tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx); | 759 | crypto_aead_set_reqsize(tfm, sizeof(struct cryptd_aead_request_ctx)); |
733 | return 0; | 760 | return 0; |
734 | } | 761 | } |
735 | 762 | ||
736 | static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm) | 763 | static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) |
737 | { | 764 | { |
738 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 765 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); |
739 | crypto_free_aead(ctx->child); | 766 | crypto_free_aead(ctx->child); |
740 | } | 767 | } |
741 | 768 | ||
@@ -744,57 +771,57 @@ static int cryptd_create_aead(struct crypto_template *tmpl, | |||
744 | struct cryptd_queue *queue) | 771 | struct cryptd_queue *queue) |
745 | { | 772 | { |
746 | struct aead_instance_ctx *ctx; | 773 | struct aead_instance_ctx *ctx; |
747 | struct crypto_instance *inst; | 774 | struct aead_instance *inst; |
748 | struct crypto_alg *alg; | 775 | struct aead_alg *alg; |
749 | u32 type = CRYPTO_ALG_TYPE_AEAD; | 776 | const char *name; |
750 | u32 mask = CRYPTO_ALG_TYPE_MASK; | 777 | u32 type = 0; |
778 | u32 mask = 0; | ||
751 | int err; | 779 | int err; |
752 | 780 | ||
753 | cryptd_check_internal(tb, &type, &mask); | 781 | cryptd_check_internal(tb, &type, &mask); |
754 | 782 | ||
755 | alg = crypto_get_attr_alg(tb, type, mask); | 783 | name = crypto_attr_alg_name(tb[1]); |
756 | if (IS_ERR(alg)) | 784 | if (IS_ERR(name)) |
757 | return PTR_ERR(alg); | 785 | return PTR_ERR(name); |
758 | 786 | ||
759 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); | 787 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
760 | err = PTR_ERR(inst); | 788 | if (!inst) |
761 | if (IS_ERR(inst)) | 789 | return -ENOMEM; |
762 | goto out_put_alg; | ||
763 | 790 | ||
764 | ctx = crypto_instance_ctx(inst); | 791 | ctx = aead_instance_ctx(inst); |
765 | ctx->queue = queue; | 792 | ctx->queue = queue; |
766 | 793 | ||
767 | err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst, | 794 | crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); |
768 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | 795 | err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); |
769 | if (err) | 796 | if (err) |
770 | goto out_free_inst; | 797 | goto out_free_inst; |
771 | 798 | ||
772 | type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | 799 | alg = crypto_spawn_aead_alg(&ctx->aead_spawn); |
773 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | 800 | err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); |
774 | type |= CRYPTO_ALG_INTERNAL; | 801 | if (err) |
775 | inst->alg.cra_flags = type; | 802 | goto out_drop_aead; |
776 | inst->alg.cra_type = alg->cra_type; | ||
777 | inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx); | ||
778 | inst->alg.cra_init = cryptd_aead_init_tfm; | ||
779 | inst->alg.cra_exit = cryptd_aead_exit_tfm; | ||
780 | inst->alg.cra_aead.setkey = alg->cra_aead.setkey; | ||
781 | inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; | ||
782 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; | ||
783 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; | ||
784 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; | ||
785 | inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue; | ||
786 | inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue; | ||
787 | inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt; | ||
788 | inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt; | ||
789 | 803 | ||
790 | err = crypto_register_instance(tmpl, inst); | 804 | inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | |
805 | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); | ||
806 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); | ||
807 | |||
808 | inst->alg.ivsize = crypto_aead_alg_ivsize(alg); | ||
809 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); | ||
810 | |||
811 | inst->alg.init = cryptd_aead_init_tfm; | ||
812 | inst->alg.exit = cryptd_aead_exit_tfm; | ||
813 | inst->alg.setkey = cryptd_aead_setkey; | ||
814 | inst->alg.setauthsize = cryptd_aead_setauthsize; | ||
815 | inst->alg.encrypt = cryptd_aead_encrypt_enqueue; | ||
816 | inst->alg.decrypt = cryptd_aead_decrypt_enqueue; | ||
817 | |||
818 | err = aead_register_instance(tmpl, inst); | ||
791 | if (err) { | 819 | if (err) { |
792 | crypto_drop_spawn(&ctx->aead_spawn.base); | 820 | out_drop_aead: |
821 | crypto_drop_aead(&ctx->aead_spawn); | ||
793 | out_free_inst: | 822 | out_free_inst: |
794 | kfree(inst); | 823 | kfree(inst); |
795 | } | 824 | } |
796 | out_put_alg: | ||
797 | crypto_mod_put(alg); | ||
798 | return err; | 825 | return err; |
799 | } | 826 | } |
800 | 827 | ||
@@ -832,8 +859,8 @@ static void cryptd_free(struct crypto_instance *inst) | |||
832 | kfree(ahash_instance(inst)); | 859 | kfree(ahash_instance(inst)); |
833 | return; | 860 | return; |
834 | case CRYPTO_ALG_TYPE_AEAD: | 861 | case CRYPTO_ALG_TYPE_AEAD: |
835 | crypto_drop_spawn(&aead_ctx->aead_spawn.base); | 862 | crypto_drop_aead(&aead_ctx->aead_spawn); |
836 | kfree(inst); | 863 | kfree(aead_instance(inst)); |
837 | return; | 864 | return; |
838 | default: | 865 | default: |
839 | crypto_drop_spawn(&ctx->spawn); | 866 | crypto_drop_spawn(&ctx->spawn); |
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index a20319132e33..941c9a434d50 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c | |||
@@ -25,6 +25,10 @@ | |||
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | 27 | ||
28 | static DEFINE_MUTEX(crypto_default_null_skcipher_lock); | ||
29 | static struct crypto_blkcipher *crypto_default_null_skcipher; | ||
30 | static int crypto_default_null_skcipher_refcnt; | ||
31 | |||
28 | static int null_compress(struct crypto_tfm *tfm, const u8 *src, | 32 | static int null_compress(struct crypto_tfm *tfm, const u8 *src, |
29 | unsigned int slen, u8 *dst, unsigned int *dlen) | 33 | unsigned int slen, u8 *dst, unsigned int *dlen) |
30 | { | 34 | { |
@@ -149,6 +153,41 @@ MODULE_ALIAS_CRYPTO("compress_null"); | |||
149 | MODULE_ALIAS_CRYPTO("digest_null"); | 153 | MODULE_ALIAS_CRYPTO("digest_null"); |
150 | MODULE_ALIAS_CRYPTO("cipher_null"); | 154 | MODULE_ALIAS_CRYPTO("cipher_null"); |
151 | 155 | ||
156 | struct crypto_blkcipher *crypto_get_default_null_skcipher(void) | ||
157 | { | ||
158 | struct crypto_blkcipher *tfm; | ||
159 | |||
160 | mutex_lock(&crypto_default_null_skcipher_lock); | ||
161 | tfm = crypto_default_null_skcipher; | ||
162 | |||
163 | if (!tfm) { | ||
164 | tfm = crypto_alloc_blkcipher("ecb(cipher_null)", 0, 0); | ||
165 | if (IS_ERR(tfm)) | ||
166 | goto unlock; | ||
167 | |||
168 | crypto_default_null_skcipher = tfm; | ||
169 | } | ||
170 | |||
171 | crypto_default_null_skcipher_refcnt++; | ||
172 | |||
173 | unlock: | ||
174 | mutex_unlock(&crypto_default_null_skcipher_lock); | ||
175 | |||
176 | return tfm; | ||
177 | } | ||
178 | EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher); | ||
179 | |||
180 | void crypto_put_default_null_skcipher(void) | ||
181 | { | ||
182 | mutex_lock(&crypto_default_null_skcipher_lock); | ||
183 | if (!--crypto_default_null_skcipher_refcnt) { | ||
184 | crypto_free_blkcipher(crypto_default_null_skcipher); | ||
185 | crypto_default_null_skcipher = NULL; | ||
186 | } | ||
187 | mutex_unlock(&crypto_default_null_skcipher_lock); | ||
188 | } | ||
189 | EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); | ||
190 | |||
152 | static int __init crypto_null_mod_init(void) | 191 | static int __init crypto_null_mod_init(void) |
153 | { | 192 | { |
154 | int ret = 0; | 193 | int ret = 0; |
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 41dfe762b7fb..08ea2867fc8a 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <net/net_namespace.h> | 27 | #include <net/net_namespace.h> |
28 | #include <crypto/internal/aead.h> | 28 | #include <crypto/internal/aead.h> |
29 | #include <crypto/internal/skcipher.h> | 29 | #include <crypto/internal/skcipher.h> |
30 | #include <crypto/internal/rng.h> | ||
31 | #include <crypto/akcipher.h> | ||
30 | 32 | ||
31 | #include "internal.h" | 33 | #include "internal.h" |
32 | 34 | ||
@@ -110,6 +112,21 @@ nla_put_failure: | |||
110 | return -EMSGSIZE; | 112 | return -EMSGSIZE; |
111 | } | 113 | } |
112 | 114 | ||
115 | static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) | ||
116 | { | ||
117 | struct crypto_report_akcipher rakcipher; | ||
118 | |||
119 | strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); | ||
120 | |||
121 | if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, | ||
122 | sizeof(struct crypto_report_akcipher), &rakcipher)) | ||
123 | goto nla_put_failure; | ||
124 | return 0; | ||
125 | |||
126 | nla_put_failure: | ||
127 | return -EMSGSIZE; | ||
128 | } | ||
129 | |||
113 | static int crypto_report_one(struct crypto_alg *alg, | 130 | static int crypto_report_one(struct crypto_alg *alg, |
114 | struct crypto_user_alg *ualg, struct sk_buff *skb) | 131 | struct crypto_user_alg *ualg, struct sk_buff *skb) |
115 | { | 132 | { |
@@ -154,6 +171,12 @@ static int crypto_report_one(struct crypto_alg *alg, | |||
154 | goto nla_put_failure; | 171 | goto nla_put_failure; |
155 | 172 | ||
156 | break; | 173 | break; |
174 | |||
175 | case CRYPTO_ALG_TYPE_AKCIPHER: | ||
176 | if (crypto_report_akcipher(skb, alg)) | ||
177 | goto nla_put_failure; | ||
178 | |||
179 | break; | ||
157 | } | 180 | } |
158 | 181 | ||
159 | out: | 182 | out: |
@@ -450,13 +473,21 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
450 | return 0; | 473 | return 0; |
451 | } | 474 | } |
452 | 475 | ||
476 | static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh, | ||
477 | struct nlattr **attrs) | ||
478 | { | ||
479 | if (!netlink_capable(skb, CAP_NET_ADMIN)) | ||
480 | return -EPERM; | ||
481 | return crypto_del_default_rng(); | ||
482 | } | ||
483 | |||
453 | #define MSGSIZE(type) sizeof(struct type) | 484 | #define MSGSIZE(type) sizeof(struct type) |
454 | 485 | ||
455 | static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = { | 486 | static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = { |
456 | [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), | 487 | [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), |
457 | [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), | 488 | [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), |
458 | [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), | 489 | [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), |
459 | [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), | 490 | [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0, |
460 | }; | 491 | }; |
461 | 492 | ||
462 | static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = { | 493 | static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = { |
@@ -476,6 +507,7 @@ static const struct crypto_link { | |||
476 | [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = { .doit = crypto_report, | 507 | [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = { .doit = crypto_report, |
477 | .dump = crypto_dump_report, | 508 | .dump = crypto_dump_report, |
478 | .done = crypto_dump_report_done}, | 509 | .done = crypto_dump_report_done}, |
510 | [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng }, | ||
479 | }; | 511 | }; |
480 | 512 | ||
481 | static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | 513 | static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
diff --git a/crypto/drbg.c b/crypto/drbg.c index b69409cb7e6a..a7c23146b87f 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
@@ -98,6 +98,7 @@ | |||
98 | */ | 98 | */ |
99 | 99 | ||
100 | #include <crypto/drbg.h> | 100 | #include <crypto/drbg.h> |
101 | #include <linux/kernel.h> | ||
101 | 102 | ||
102 | /*************************************************************** | 103 | /*************************************************************** |
103 | * Backend cipher definitions available to DRBG | 104 | * Backend cipher definitions available to DRBG |
@@ -190,6 +191,8 @@ static const struct drbg_core drbg_cores[] = { | |||
190 | #endif /* CONFIG_CRYPTO_DRBG_HMAC */ | 191 | #endif /* CONFIG_CRYPTO_DRBG_HMAC */ |
191 | }; | 192 | }; |
192 | 193 | ||
194 | static int drbg_uninstantiate(struct drbg_state *drbg); | ||
195 | |||
193 | /****************************************************************** | 196 | /****************************************************************** |
194 | * Generic helper functions | 197 | * Generic helper functions |
195 | ******************************************************************/ | 198 | ******************************************************************/ |
@@ -235,7 +238,7 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg, | |||
235 | #ifdef CONFIG_CRYPTO_FIPS | 238 | #ifdef CONFIG_CRYPTO_FIPS |
236 | int ret = 0; | 239 | int ret = 0; |
237 | /* skip test if we test the overall system */ | 240 | /* skip test if we test the overall system */ |
238 | if (drbg->test_data) | 241 | if (list_empty(&drbg->test_data.list)) |
239 | return true; | 242 | return true; |
240 | /* only perform test in FIPS mode */ | 243 | /* only perform test in FIPS mode */ |
241 | if (0 == fips_enabled) | 244 | if (0 == fips_enabled) |
@@ -487,7 +490,7 @@ static int drbg_ctr_df(struct drbg_state *drbg, | |||
487 | 490 | ||
488 | out: | 491 | out: |
489 | memset(iv, 0, drbg_blocklen(drbg)); | 492 | memset(iv, 0, drbg_blocklen(drbg)); |
490 | memset(temp, 0, drbg_statelen(drbg)); | 493 | memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); |
491 | memset(pad, 0, drbg_blocklen(drbg)); | 494 | memset(pad, 0, drbg_blocklen(drbg)); |
492 | return ret; | 495 | return ret; |
493 | } | 496 | } |
@@ -1041,6 +1044,58 @@ static struct drbg_state_ops drbg_hash_ops = { | |||
1041 | * Functions common for DRBG implementations | 1044 | * Functions common for DRBG implementations |
1042 | ******************************************************************/ | 1045 | ******************************************************************/ |
1043 | 1046 | ||
1047 | static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed, | ||
1048 | int reseed) | ||
1049 | { | ||
1050 | int ret = drbg->d_ops->update(drbg, seed, reseed); | ||
1051 | |||
1052 | if (ret) | ||
1053 | return ret; | ||
1054 | |||
1055 | drbg->seeded = true; | ||
1056 | /* 10.1.1.2 / 10.1.1.3 step 5 */ | ||
1057 | drbg->reseed_ctr = 1; | ||
1058 | |||
1059 | return ret; | ||
1060 | } | ||
1061 | |||
1062 | static void drbg_async_seed(struct work_struct *work) | ||
1063 | { | ||
1064 | struct drbg_string data; | ||
1065 | LIST_HEAD(seedlist); | ||
1066 | struct drbg_state *drbg = container_of(work, struct drbg_state, | ||
1067 | seed_work); | ||
1068 | unsigned int entropylen = drbg_sec_strength(drbg->core->flags); | ||
1069 | unsigned char entropy[32]; | ||
1070 | |||
1071 | BUG_ON(!entropylen); | ||
1072 | BUG_ON(entropylen > sizeof(entropy)); | ||
1073 | get_random_bytes(entropy, entropylen); | ||
1074 | |||
1075 | drbg_string_fill(&data, entropy, entropylen); | ||
1076 | list_add_tail(&data.list, &seedlist); | ||
1077 | |||
1078 | mutex_lock(&drbg->drbg_mutex); | ||
1079 | |||
1080 | /* If nonblocking pool is initialized, deactivate Jitter RNG */ | ||
1081 | crypto_free_rng(drbg->jent); | ||
1082 | drbg->jent = NULL; | ||
1083 | |||
1084 | /* Set seeded to false so that if __drbg_seed fails the | ||
1085 | * next generate call will trigger a reseed. | ||
1086 | */ | ||
1087 | drbg->seeded = false; | ||
1088 | |||
1089 | __drbg_seed(drbg, &seedlist, true); | ||
1090 | |||
1091 | if (drbg->seeded) | ||
1092 | drbg->reseed_threshold = drbg_max_requests(drbg); | ||
1093 | |||
1094 | mutex_unlock(&drbg->drbg_mutex); | ||
1095 | |||
1096 | memzero_explicit(entropy, entropylen); | ||
1097 | } | ||
1098 | |||
1044 | /* | 1099 | /* |
1045 | * Seeding or reseeding of the DRBG | 1100 | * Seeding or reseeding of the DRBG |
1046 | * | 1101 | * |
@@ -1055,9 +1110,9 @@ static struct drbg_state_ops drbg_hash_ops = { | |||
1055 | static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, | 1110 | static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, |
1056 | bool reseed) | 1111 | bool reseed) |
1057 | { | 1112 | { |
1058 | int ret = 0; | 1113 | int ret; |
1059 | unsigned char *entropy = NULL; | 1114 | unsigned char entropy[((32 + 16) * 2)]; |
1060 | size_t entropylen = 0; | 1115 | unsigned int entropylen = drbg_sec_strength(drbg->core->flags); |
1061 | struct drbg_string data1; | 1116 | struct drbg_string data1; |
1062 | LIST_HEAD(seedlist); | 1117 | LIST_HEAD(seedlist); |
1063 | 1118 | ||
@@ -1068,31 +1123,45 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, | |||
1068 | return -EINVAL; | 1123 | return -EINVAL; |
1069 | } | 1124 | } |
1070 | 1125 | ||
1071 | if (drbg->test_data && drbg->test_data->testentropy) { | 1126 | if (list_empty(&drbg->test_data.list)) { |
1072 | drbg_string_fill(&data1, drbg->test_data->testentropy->buf, | 1127 | drbg_string_fill(&data1, drbg->test_data.buf, |
1073 | drbg->test_data->testentropy->len); | 1128 | drbg->test_data.len); |
1074 | pr_devel("DRBG: using test entropy\n"); | 1129 | pr_devel("DRBG: using test entropy\n"); |
1075 | } else { | 1130 | } else { |
1076 | /* | 1131 | /* |
1077 | * Gather entropy equal to the security strength of the DRBG. | 1132 | * Gather entropy equal to the security strength of the DRBG. |
1078 | * With a derivation function, a nonce is required in addition | 1133 | * With a derivation function, a nonce is required in addition |
1079 | * to the entropy. A nonce must be at least 1/2 of the security | 1134 | * to the entropy. A nonce must be at least 1/2 of the security |
1080 | * strength of the DRBG in size. Thus, entropy * nonce is 3/2 | 1135 | * strength of the DRBG in size. Thus, entropy + nonce is 3/2 |
1081 | * of the strength. The consideration of a nonce is only | 1136 | * of the strength. The consideration of a nonce is only |
1082 | * applicable during initial seeding. | 1137 | * applicable during initial seeding. |
1083 | */ | 1138 | */ |
1084 | entropylen = drbg_sec_strength(drbg->core->flags); | 1139 | BUG_ON(!entropylen); |
1085 | if (!entropylen) | ||
1086 | return -EFAULT; | ||
1087 | if (!reseed) | 1140 | if (!reseed) |
1088 | entropylen = ((entropylen + 1) / 2) * 3; | 1141 | entropylen = ((entropylen + 1) / 2) * 3; |
1089 | pr_devel("DRBG: (re)seeding with %zu bytes of entropy\n", | 1142 | BUG_ON((entropylen * 2) > sizeof(entropy)); |
1090 | entropylen); | 1143 | |
1091 | entropy = kzalloc(entropylen, GFP_KERNEL); | 1144 | /* Get seed from in-kernel /dev/urandom */ |
1092 | if (!entropy) | ||
1093 | return -ENOMEM; | ||
1094 | get_random_bytes(entropy, entropylen); | 1145 | get_random_bytes(entropy, entropylen); |
1095 | drbg_string_fill(&data1, entropy, entropylen); | 1146 | |
1147 | if (!drbg->jent) { | ||
1148 | drbg_string_fill(&data1, entropy, entropylen); | ||
1149 | pr_devel("DRBG: (re)seeding with %u bytes of entropy\n", | ||
1150 | entropylen); | ||
1151 | } else { | ||
1152 | /* Get seed from Jitter RNG */ | ||
1153 | ret = crypto_rng_get_bytes(drbg->jent, | ||
1154 | entropy + entropylen, | ||
1155 | entropylen); | ||
1156 | if (ret) { | ||
1157 | pr_devel("DRBG: jent failed with %d\n", ret); | ||
1158 | return ret; | ||
1159 | } | ||
1160 | |||
1161 | drbg_string_fill(&data1, entropy, entropylen * 2); | ||
1162 | pr_devel("DRBG: (re)seeding with %u bytes of entropy\n", | ||
1163 | entropylen * 2); | ||
1164 | } | ||
1096 | } | 1165 | } |
1097 | list_add_tail(&data1.list, &seedlist); | 1166 | list_add_tail(&data1.list, &seedlist); |
1098 | 1167 | ||
@@ -1111,16 +1180,10 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, | |||
1111 | memset(drbg->C, 0, drbg_statelen(drbg)); | 1180 | memset(drbg->C, 0, drbg_statelen(drbg)); |
1112 | } | 1181 | } |
1113 | 1182 | ||
1114 | ret = drbg->d_ops->update(drbg, &seedlist, reseed); | 1183 | ret = __drbg_seed(drbg, &seedlist, reseed); |
1115 | if (ret) | ||
1116 | goto out; | ||
1117 | 1184 | ||
1118 | drbg->seeded = true; | 1185 | memzero_explicit(entropy, entropylen * 2); |
1119 | /* 10.1.1.2 / 10.1.1.3 step 5 */ | ||
1120 | drbg->reseed_ctr = 1; | ||
1121 | 1186 | ||
1122 | out: | ||
1123 | kzfree(entropy); | ||
1124 | return ret; | 1187 | return ret; |
1125 | } | 1188 | } |
1126 | 1189 | ||
@@ -1136,6 +1199,8 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) | |||
1136 | kzfree(drbg->scratchpad); | 1199 | kzfree(drbg->scratchpad); |
1137 | drbg->scratchpad = NULL; | 1200 | drbg->scratchpad = NULL; |
1138 | drbg->reseed_ctr = 0; | 1201 | drbg->reseed_ctr = 0; |
1202 | drbg->d_ops = NULL; | ||
1203 | drbg->core = NULL; | ||
1139 | #ifdef CONFIG_CRYPTO_FIPS | 1204 | #ifdef CONFIG_CRYPTO_FIPS |
1140 | kzfree(drbg->prev); | 1205 | kzfree(drbg->prev); |
1141 | drbg->prev = NULL; | 1206 | drbg->prev = NULL; |
@@ -1152,6 +1217,27 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) | |||
1152 | int ret = -ENOMEM; | 1217 | int ret = -ENOMEM; |
1153 | unsigned int sb_size = 0; | 1218 | unsigned int sb_size = 0; |
1154 | 1219 | ||
1220 | switch (drbg->core->flags & DRBG_TYPE_MASK) { | ||
1221 | #ifdef CONFIG_CRYPTO_DRBG_HMAC | ||
1222 | case DRBG_HMAC: | ||
1223 | drbg->d_ops = &drbg_hmac_ops; | ||
1224 | break; | ||
1225 | #endif /* CONFIG_CRYPTO_DRBG_HMAC */ | ||
1226 | #ifdef CONFIG_CRYPTO_DRBG_HASH | ||
1227 | case DRBG_HASH: | ||
1228 | drbg->d_ops = &drbg_hash_ops; | ||
1229 | break; | ||
1230 | #endif /* CONFIG_CRYPTO_DRBG_HASH */ | ||
1231 | #ifdef CONFIG_CRYPTO_DRBG_CTR | ||
1232 | case DRBG_CTR: | ||
1233 | drbg->d_ops = &drbg_ctr_ops; | ||
1234 | break; | ||
1235 | #endif /* CONFIG_CRYPTO_DRBG_CTR */ | ||
1236 | default: | ||
1237 | ret = -EOPNOTSUPP; | ||
1238 | goto err; | ||
1239 | } | ||
1240 | |||
1155 | drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL); | 1241 | drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL); |
1156 | if (!drbg->V) | 1242 | if (!drbg->V) |
1157 | goto err; | 1243 | goto err; |
@@ -1181,87 +1267,14 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) | |||
1181 | if (!drbg->scratchpad) | 1267 | if (!drbg->scratchpad) |
1182 | goto err; | 1268 | goto err; |
1183 | } | 1269 | } |
1184 | spin_lock_init(&drbg->drbg_lock); | ||
1185 | return 0; | ||
1186 | |||
1187 | err: | ||
1188 | drbg_dealloc_state(drbg); | ||
1189 | return ret; | ||
1190 | } | ||
1191 | |||
1192 | /* | ||
1193 | * Strategy to avoid holding long term locks: generate a shadow copy of DRBG | ||
1194 | * and perform all operations on this shadow copy. After finishing, restore | ||
1195 | * the updated state of the shadow copy into original drbg state. This way, | ||
1196 | * only the read and write operations of the original drbg state must be | ||
1197 | * locked | ||
1198 | */ | ||
1199 | static inline void drbg_copy_drbg(struct drbg_state *src, | ||
1200 | struct drbg_state *dst) | ||
1201 | { | ||
1202 | if (!src || !dst) | ||
1203 | return; | ||
1204 | memcpy(dst->V, src->V, drbg_statelen(src)); | ||
1205 | memcpy(dst->C, src->C, drbg_statelen(src)); | ||
1206 | dst->reseed_ctr = src->reseed_ctr; | ||
1207 | dst->seeded = src->seeded; | ||
1208 | dst->pr = src->pr; | ||
1209 | #ifdef CONFIG_CRYPTO_FIPS | ||
1210 | dst->fips_primed = src->fips_primed; | ||
1211 | memcpy(dst->prev, src->prev, drbg_blocklen(src)); | ||
1212 | #endif | ||
1213 | /* | ||
1214 | * Not copied: | ||
1215 | * scratchpad is initialized drbg_alloc_state; | ||
1216 | * priv_data is initialized with call to crypto_init; | ||
1217 | * d_ops and core are set outside, as these parameters are const; | ||
1218 | * test_data is set outside to prevent it being copied back. | ||
1219 | */ | ||
1220 | } | ||
1221 | |||
1222 | static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow) | ||
1223 | { | ||
1224 | int ret = -ENOMEM; | ||
1225 | struct drbg_state *tmp = NULL; | ||
1226 | |||
1227 | tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL); | ||
1228 | if (!tmp) | ||
1229 | return -ENOMEM; | ||
1230 | |||
1231 | /* read-only data as they are defined as const, no lock needed */ | ||
1232 | tmp->core = drbg->core; | ||
1233 | tmp->d_ops = drbg->d_ops; | ||
1234 | 1270 | ||
1235 | ret = drbg_alloc_state(tmp); | ||
1236 | if (ret) | ||
1237 | goto err; | ||
1238 | |||
1239 | spin_lock_bh(&drbg->drbg_lock); | ||
1240 | drbg_copy_drbg(drbg, tmp); | ||
1241 | /* only make a link to the test buffer, as we only read that data */ | ||
1242 | tmp->test_data = drbg->test_data; | ||
1243 | spin_unlock_bh(&drbg->drbg_lock); | ||
1244 | *shadow = tmp; | ||
1245 | return 0; | 1271 | return 0; |
1246 | 1272 | ||
1247 | err: | 1273 | err: |
1248 | kzfree(tmp); | 1274 | drbg_dealloc_state(drbg); |
1249 | return ret; | 1275 | return ret; |
1250 | } | 1276 | } |
1251 | 1277 | ||
1252 | static void drbg_restore_shadow(struct drbg_state *drbg, | ||
1253 | struct drbg_state **shadow) | ||
1254 | { | ||
1255 | struct drbg_state *tmp = *shadow; | ||
1256 | |||
1257 | spin_lock_bh(&drbg->drbg_lock); | ||
1258 | drbg_copy_drbg(tmp, drbg); | ||
1259 | spin_unlock_bh(&drbg->drbg_lock); | ||
1260 | drbg_dealloc_state(tmp); | ||
1261 | kzfree(tmp); | ||
1262 | *shadow = NULL; | ||
1263 | } | ||
1264 | |||
1265 | /************************************************************************* | 1278 | /************************************************************************* |
1266 | * DRBG interface functions | 1279 | * DRBG interface functions |
1267 | *************************************************************************/ | 1280 | *************************************************************************/ |
@@ -1287,14 +1300,12 @@ static int drbg_generate(struct drbg_state *drbg, | |||
1287 | struct drbg_string *addtl) | 1300 | struct drbg_string *addtl) |
1288 | { | 1301 | { |
1289 | int len = 0; | 1302 | int len = 0; |
1290 | struct drbg_state *shadow = NULL; | ||
1291 | LIST_HEAD(addtllist); | 1303 | LIST_HEAD(addtllist); |
1292 | struct drbg_string timestamp; | ||
1293 | union { | ||
1294 | cycles_t cycles; | ||
1295 | unsigned char char_cycles[sizeof(cycles_t)]; | ||
1296 | } now; | ||
1297 | 1304 | ||
1305 | if (!drbg->core) { | ||
1306 | pr_devel("DRBG: not yet seeded\n"); | ||
1307 | return -EINVAL; | ||
1308 | } | ||
1298 | if (0 == buflen || !buf) { | 1309 | if (0 == buflen || !buf) { |
1299 | pr_devel("DRBG: no output buffer provided\n"); | 1310 | pr_devel("DRBG: no output buffer provided\n"); |
1300 | return -EINVAL; | 1311 | return -EINVAL; |
@@ -1304,15 +1315,9 @@ static int drbg_generate(struct drbg_state *drbg, | |||
1304 | return -EINVAL; | 1315 | return -EINVAL; |
1305 | } | 1316 | } |
1306 | 1317 | ||
1307 | len = drbg_make_shadow(drbg, &shadow); | ||
1308 | if (len) { | ||
1309 | pr_devel("DRBG: shadow copy cannot be generated\n"); | ||
1310 | return len; | ||
1311 | } | ||
1312 | |||
1313 | /* 9.3.1 step 2 */ | 1318 | /* 9.3.1 step 2 */ |
1314 | len = -EINVAL; | 1319 | len = -EINVAL; |
1315 | if (buflen > (drbg_max_request_bytes(shadow))) { | 1320 | if (buflen > (drbg_max_request_bytes(drbg))) { |
1316 | pr_devel("DRBG: requested random numbers too large %u\n", | 1321 | pr_devel("DRBG: requested random numbers too large %u\n", |
1317 | buflen); | 1322 | buflen); |
1318 | goto err; | 1323 | goto err; |
@@ -1321,7 +1326,7 @@ static int drbg_generate(struct drbg_state *drbg, | |||
1321 | /* 9.3.1 step 3 is implicit with the chosen DRBG */ | 1326 | /* 9.3.1 step 3 is implicit with the chosen DRBG */ |
1322 | 1327 | ||
1323 | /* 9.3.1 step 4 */ | 1328 | /* 9.3.1 step 4 */ |
1324 | if (addtl && addtl->len > (drbg_max_addtl(shadow))) { | 1329 | if (addtl && addtl->len > (drbg_max_addtl(drbg))) { |
1325 | pr_devel("DRBG: additional information string too long %zu\n", | 1330 | pr_devel("DRBG: additional information string too long %zu\n", |
1326 | addtl->len); | 1331 | addtl->len); |
1327 | goto err; | 1332 | goto err; |
@@ -1332,46 +1337,29 @@ static int drbg_generate(struct drbg_state *drbg, | |||
1332 | * 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented | 1337 | * 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented |
1333 | * here. The spec is a bit convoluted here, we make it simpler. | 1338 | * here. The spec is a bit convoluted here, we make it simpler. |
1334 | */ | 1339 | */ |
1335 | if ((drbg_max_requests(shadow)) < shadow->reseed_ctr) | 1340 | if (drbg->reseed_threshold < drbg->reseed_ctr) |
1336 | shadow->seeded = false; | 1341 | drbg->seeded = false; |
1337 | 1342 | ||
1338 | /* allocate cipher handle */ | 1343 | if (drbg->pr || !drbg->seeded) { |
1339 | len = shadow->d_ops->crypto_init(shadow); | ||
1340 | if (len) | ||
1341 | goto err; | ||
1342 | |||
1343 | if (shadow->pr || !shadow->seeded) { | ||
1344 | pr_devel("DRBG: reseeding before generation (prediction " | 1344 | pr_devel("DRBG: reseeding before generation (prediction " |
1345 | "resistance: %s, state %s)\n", | 1345 | "resistance: %s, state %s)\n", |
1346 | drbg->pr ? "true" : "false", | 1346 | drbg->pr ? "true" : "false", |
1347 | drbg->seeded ? "seeded" : "unseeded"); | 1347 | drbg->seeded ? "seeded" : "unseeded"); |
1348 | /* 9.3.1 steps 7.1 through 7.3 */ | 1348 | /* 9.3.1 steps 7.1 through 7.3 */ |
1349 | len = drbg_seed(shadow, addtl, true); | 1349 | len = drbg_seed(drbg, addtl, true); |
1350 | if (len) | 1350 | if (len) |
1351 | goto err; | 1351 | goto err; |
1352 | /* 9.3.1 step 7.4 */ | 1352 | /* 9.3.1 step 7.4 */ |
1353 | addtl = NULL; | 1353 | addtl = NULL; |
1354 | } | 1354 | } |
1355 | 1355 | ||
1356 | /* | ||
1357 | * Mix the time stamp into the DRBG state if the DRBG is not in | ||
1358 | * test mode. If there are two callers invoking the DRBG at the same | ||
1359 | * time, i.e. before the first caller merges its shadow state back, | ||
1360 | * both callers would obtain the same random number stream without | ||
1361 | * changing the state here. | ||
1362 | */ | ||
1363 | if (!drbg->test_data) { | ||
1364 | now.cycles = random_get_entropy(); | ||
1365 | drbg_string_fill(×tamp, now.char_cycles, sizeof(cycles_t)); | ||
1366 | list_add_tail(×tamp.list, &addtllist); | ||
1367 | } | ||
1368 | if (addtl && 0 < addtl->len) | 1356 | if (addtl && 0 < addtl->len) |
1369 | list_add_tail(&addtl->list, &addtllist); | 1357 | list_add_tail(&addtl->list, &addtllist); |
1370 | /* 9.3.1 step 8 and 10 */ | 1358 | /* 9.3.1 step 8 and 10 */ |
1371 | len = shadow->d_ops->generate(shadow, buf, buflen, &addtllist); | 1359 | len = drbg->d_ops->generate(drbg, buf, buflen, &addtllist); |
1372 | 1360 | ||
1373 | /* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */ | 1361 | /* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */ |
1374 | shadow->reseed_ctr++; | 1362 | drbg->reseed_ctr++; |
1375 | if (0 >= len) | 1363 | if (0 >= len) |
1376 | goto err; | 1364 | goto err; |
1377 | 1365 | ||
@@ -1391,7 +1379,7 @@ static int drbg_generate(struct drbg_state *drbg, | |||
1391 | * case somebody has a need to implement the test of 11.3.3. | 1379 | * case somebody has a need to implement the test of 11.3.3. |
1392 | */ | 1380 | */ |
1393 | #if 0 | 1381 | #if 0 |
1394 | if (shadow->reseed_ctr && !(shadow->reseed_ctr % 4096)) { | 1382 | if (drbg->reseed_ctr && !(drbg->reseed_ctr % 4096)) { |
1395 | int err = 0; | 1383 | int err = 0; |
1396 | pr_devel("DRBG: start to perform self test\n"); | 1384 | pr_devel("DRBG: start to perform self test\n"); |
1397 | if (drbg->core->flags & DRBG_HMAC) | 1385 | if (drbg->core->flags & DRBG_HMAC) |
@@ -1410,8 +1398,6 @@ static int drbg_generate(struct drbg_state *drbg, | |||
1410 | * are returned when reusing this DRBG cipher handle | 1398 | * are returned when reusing this DRBG cipher handle |
1411 | */ | 1399 | */ |
1412 | drbg_uninstantiate(drbg); | 1400 | drbg_uninstantiate(drbg); |
1413 | drbg_dealloc_state(shadow); | ||
1414 | kzfree(shadow); | ||
1415 | return 0; | 1401 | return 0; |
1416 | } else { | 1402 | } else { |
1417 | pr_devel("DRBG: self test successful\n"); | 1403 | pr_devel("DRBG: self test successful\n"); |
@@ -1425,8 +1411,6 @@ static int drbg_generate(struct drbg_state *drbg, | |||
1425 | */ | 1411 | */ |
1426 | len = 0; | 1412 | len = 0; |
1427 | err: | 1413 | err: |
1428 | shadow->d_ops->crypto_fini(shadow); | ||
1429 | drbg_restore_shadow(drbg, &shadow); | ||
1430 | return len; | 1414 | return len; |
1431 | } | 1415 | } |
1432 | 1416 | ||
@@ -1442,19 +1426,68 @@ static int drbg_generate_long(struct drbg_state *drbg, | |||
1442 | unsigned char *buf, unsigned int buflen, | 1426 | unsigned char *buf, unsigned int buflen, |
1443 | struct drbg_string *addtl) | 1427 | struct drbg_string *addtl) |
1444 | { | 1428 | { |
1445 | int len = 0; | 1429 | unsigned int len = 0; |
1446 | unsigned int slice = 0; | 1430 | unsigned int slice = 0; |
1447 | do { | 1431 | do { |
1448 | int tmplen = 0; | 1432 | int err = 0; |
1449 | unsigned int chunk = 0; | 1433 | unsigned int chunk = 0; |
1450 | slice = ((buflen - len) / drbg_max_request_bytes(drbg)); | 1434 | slice = ((buflen - len) / drbg_max_request_bytes(drbg)); |
1451 | chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len); | 1435 | chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len); |
1452 | tmplen = drbg_generate(drbg, buf + len, chunk, addtl); | 1436 | mutex_lock(&drbg->drbg_mutex); |
1453 | if (0 >= tmplen) | 1437 | err = drbg_generate(drbg, buf + len, chunk, addtl); |
1454 | return tmplen; | 1438 | mutex_unlock(&drbg->drbg_mutex); |
1455 | len += tmplen; | 1439 | if (0 > err) |
1440 | return err; | ||
1441 | len += chunk; | ||
1456 | } while (slice > 0 && (len < buflen)); | 1442 | } while (slice > 0 && (len < buflen)); |
1457 | return len; | 1443 | return 0; |
1444 | } | ||
1445 | |||
1446 | static void drbg_schedule_async_seed(struct random_ready_callback *rdy) | ||
1447 | { | ||
1448 | struct drbg_state *drbg = container_of(rdy, struct drbg_state, | ||
1449 | random_ready); | ||
1450 | |||
1451 | schedule_work(&drbg->seed_work); | ||
1452 | } | ||
1453 | |||
1454 | static int drbg_prepare_hrng(struct drbg_state *drbg) | ||
1455 | { | ||
1456 | int err; | ||
1457 | |||
1458 | /* We do not need an HRNG in test mode. */ | ||
1459 | if (list_empty(&drbg->test_data.list)) | ||
1460 | return 0; | ||
1461 | |||
1462 | INIT_WORK(&drbg->seed_work, drbg_async_seed); | ||
1463 | |||
1464 | drbg->random_ready.owner = THIS_MODULE; | ||
1465 | drbg->random_ready.func = drbg_schedule_async_seed; | ||
1466 | |||
1467 | err = add_random_ready_callback(&drbg->random_ready); | ||
1468 | |||
1469 | switch (err) { | ||
1470 | case 0: | ||
1471 | break; | ||
1472 | |||
1473 | case -EALREADY: | ||
1474 | err = 0; | ||
1475 | /* fall through */ | ||
1476 | |||
1477 | default: | ||
1478 | drbg->random_ready.func = NULL; | ||
1479 | return err; | ||
1480 | } | ||
1481 | |||
1482 | drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); | ||
1483 | |||
1484 | /* | ||
1485 | * Require frequent reseeds until the seed source is fully | ||
1486 | * initialized. | ||
1487 | */ | ||
1488 | drbg->reseed_threshold = 50; | ||
1489 | |||
1490 | return err; | ||
1458 | } | 1491 | } |
1459 | 1492 | ||
1460 | /* | 1493 | /* |
@@ -1477,32 +1510,12 @@ static int drbg_generate_long(struct drbg_state *drbg, | |||
1477 | static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, | 1510 | static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, |
1478 | int coreref, bool pr) | 1511 | int coreref, bool pr) |
1479 | { | 1512 | { |
1480 | int ret = -ENOMEM; | 1513 | int ret; |
1514 | bool reseed = true; | ||
1481 | 1515 | ||
1482 | pr_devel("DRBG: Initializing DRBG core %d with prediction resistance " | 1516 | pr_devel("DRBG: Initializing DRBG core %d with prediction resistance " |
1483 | "%s\n", coreref, pr ? "enabled" : "disabled"); | 1517 | "%s\n", coreref, pr ? "enabled" : "disabled"); |
1484 | drbg->core = &drbg_cores[coreref]; | 1518 | mutex_lock(&drbg->drbg_mutex); |
1485 | drbg->pr = pr; | ||
1486 | drbg->seeded = false; | ||
1487 | switch (drbg->core->flags & DRBG_TYPE_MASK) { | ||
1488 | #ifdef CONFIG_CRYPTO_DRBG_HMAC | ||
1489 | case DRBG_HMAC: | ||
1490 | drbg->d_ops = &drbg_hmac_ops; | ||
1491 | break; | ||
1492 | #endif /* CONFIG_CRYPTO_DRBG_HMAC */ | ||
1493 | #ifdef CONFIG_CRYPTO_DRBG_HASH | ||
1494 | case DRBG_HASH: | ||
1495 | drbg->d_ops = &drbg_hash_ops; | ||
1496 | break; | ||
1497 | #endif /* CONFIG_CRYPTO_DRBG_HASH */ | ||
1498 | #ifdef CONFIG_CRYPTO_DRBG_CTR | ||
1499 | case DRBG_CTR: | ||
1500 | drbg->d_ops = &drbg_ctr_ops; | ||
1501 | break; | ||
1502 | #endif /* CONFIG_CRYPTO_DRBG_CTR */ | ||
1503 | default: | ||
1504 | return -EOPNOTSUPP; | ||
1505 | } | ||
1506 | 1519 | ||
1507 | /* 9.1 step 1 is implicit with the selected DRBG type */ | 1520 | /* 9.1 step 1 is implicit with the selected DRBG type */ |
1508 | 1521 | ||
@@ -1514,22 +1527,52 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, | |||
1514 | 1527 | ||
1515 | /* 9.1 step 4 is implicit in drbg_sec_strength */ | 1528 | /* 9.1 step 4 is implicit in drbg_sec_strength */ |
1516 | 1529 | ||
1517 | ret = drbg_alloc_state(drbg); | 1530 | if (!drbg->core) { |
1518 | if (ret) | 1531 | drbg->core = &drbg_cores[coreref]; |
1519 | return ret; | 1532 | drbg->pr = pr; |
1533 | drbg->seeded = false; | ||
1534 | drbg->reseed_threshold = drbg_max_requests(drbg); | ||
1520 | 1535 | ||
1521 | ret = -EFAULT; | 1536 | ret = drbg_alloc_state(drbg); |
1522 | if (drbg->d_ops->crypto_init(drbg)) | 1537 | if (ret) |
1523 | goto err; | 1538 | goto unlock; |
1524 | ret = drbg_seed(drbg, pers, false); | ||
1525 | drbg->d_ops->crypto_fini(drbg); | ||
1526 | if (ret) | ||
1527 | goto err; | ||
1528 | 1539 | ||
1529 | return 0; | 1540 | ret = -EFAULT; |
1541 | if (drbg->d_ops->crypto_init(drbg)) | ||
1542 | goto err; | ||
1543 | |||
1544 | ret = drbg_prepare_hrng(drbg); | ||
1545 | if (ret) | ||
1546 | goto free_everything; | ||
1547 | |||
1548 | if (IS_ERR(drbg->jent)) { | ||
1549 | ret = PTR_ERR(drbg->jent); | ||
1550 | drbg->jent = NULL; | ||
1551 | if (fips_enabled || ret != -ENOENT) | ||
1552 | goto free_everything; | ||
1553 | pr_info("DRBG: Continuing without Jitter RNG\n"); | ||
1554 | } | ||
1555 | |||
1556 | reseed = false; | ||
1557 | } | ||
1558 | |||
1559 | ret = drbg_seed(drbg, pers, reseed); | ||
1560 | |||
1561 | if (ret && !reseed) | ||
1562 | goto free_everything; | ||
1563 | |||
1564 | mutex_unlock(&drbg->drbg_mutex); | ||
1565 | return ret; | ||
1530 | 1566 | ||
1531 | err: | 1567 | err: |
1532 | drbg_dealloc_state(drbg); | 1568 | drbg_dealloc_state(drbg); |
1569 | unlock: | ||
1570 | mutex_unlock(&drbg->drbg_mutex); | ||
1571 | return ret; | ||
1572 | |||
1573 | free_everything: | ||
1574 | mutex_unlock(&drbg->drbg_mutex); | ||
1575 | drbg_uninstantiate(drbg); | ||
1533 | return ret; | 1576 | return ret; |
1534 | } | 1577 | } |
1535 | 1578 | ||
@@ -1544,10 +1587,17 @@ err: | |||
1544 | */ | 1587 | */ |
1545 | static int drbg_uninstantiate(struct drbg_state *drbg) | 1588 | static int drbg_uninstantiate(struct drbg_state *drbg) |
1546 | { | 1589 | { |
1547 | spin_lock_bh(&drbg->drbg_lock); | 1590 | if (drbg->random_ready.func) { |
1591 | del_random_ready_callback(&drbg->random_ready); | ||
1592 | cancel_work_sync(&drbg->seed_work); | ||
1593 | crypto_free_rng(drbg->jent); | ||
1594 | drbg->jent = NULL; | ||
1595 | } | ||
1596 | |||
1597 | if (drbg->d_ops) | ||
1598 | drbg->d_ops->crypto_fini(drbg); | ||
1548 | drbg_dealloc_state(drbg); | 1599 | drbg_dealloc_state(drbg); |
1549 | /* no scrubbing of test_data -- this shall survive an uninstantiate */ | 1600 | /* no scrubbing of test_data -- this shall survive an uninstantiate */ |
1550 | spin_unlock_bh(&drbg->drbg_lock); | ||
1551 | return 0; | 1601 | return 0; |
1552 | } | 1602 | } |
1553 | 1603 | ||
@@ -1555,16 +1605,17 @@ static int drbg_uninstantiate(struct drbg_state *drbg) | |||
1555 | * Helper function for setting the test data in the DRBG | 1605 | * Helper function for setting the test data in the DRBG |
1556 | * | 1606 | * |
1557 | * @drbg DRBG state handle | 1607 | * @drbg DRBG state handle |
1558 | * @test_data test data to sets | 1608 | * @data test data |
1609 | * @len test data length | ||
1559 | */ | 1610 | */ |
1560 | static inline void drbg_set_testdata(struct drbg_state *drbg, | 1611 | static void drbg_kcapi_set_entropy(struct crypto_rng *tfm, |
1561 | struct drbg_test_data *test_data) | 1612 | const u8 *data, unsigned int len) |
1562 | { | 1613 | { |
1563 | if (!test_data || !test_data->testentropy) | 1614 | struct drbg_state *drbg = crypto_rng_ctx(tfm); |
1564 | return; | 1615 | |
1565 | spin_lock_bh(&drbg->drbg_lock); | 1616 | mutex_lock(&drbg->drbg_mutex); |
1566 | drbg->test_data = test_data; | 1617 | drbg_string_fill(&drbg->test_data, data, len); |
1567 | spin_unlock_bh(&drbg->drbg_lock); | 1618 | mutex_unlock(&drbg->drbg_mutex); |
1568 | } | 1619 | } |
1569 | 1620 | ||
1570 | /*************************************************************** | 1621 | /*************************************************************** |
@@ -1584,7 +1635,8 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg) | |||
1584 | 1635 | ||
1585 | tfm = crypto_alloc_shash(drbg->core->backend_cra_name, 0, 0); | 1636 | tfm = crypto_alloc_shash(drbg->core->backend_cra_name, 0, 0); |
1586 | if (IS_ERR(tfm)) { | 1637 | if (IS_ERR(tfm)) { |
1587 | pr_info("DRBG: could not allocate digest TFM handle\n"); | 1638 | pr_info("DRBG: could not allocate digest TFM handle: %s\n", |
1639 | drbg->core->backend_cra_name); | ||
1588 | return PTR_ERR(tfm); | 1640 | return PTR_ERR(tfm); |
1589 | } | 1641 | } |
1590 | BUG_ON(drbg_blocklen(drbg) != crypto_shash_digestsize(tfm)); | 1642 | BUG_ON(drbg_blocklen(drbg) != crypto_shash_digestsize(tfm)); |
@@ -1635,7 +1687,8 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) | |||
1635 | 1687 | ||
1636 | tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0); | 1688 | tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0); |
1637 | if (IS_ERR(tfm)) { | 1689 | if (IS_ERR(tfm)) { |
1638 | pr_info("DRBG: could not allocate cipher TFM handle\n"); | 1690 | pr_info("DRBG: could not allocate cipher TFM handle: %s\n", |
1691 | drbg->core->backend_cra_name); | ||
1639 | return PTR_ERR(tfm); | 1692 | return PTR_ERR(tfm); |
1640 | } | 1693 | } |
1641 | BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm)); | 1694 | BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm)); |
@@ -1714,15 +1767,10 @@ static inline void drbg_convert_tfm_core(const char *cra_driver_name, | |||
1714 | static int drbg_kcapi_init(struct crypto_tfm *tfm) | 1767 | static int drbg_kcapi_init(struct crypto_tfm *tfm) |
1715 | { | 1768 | { |
1716 | struct drbg_state *drbg = crypto_tfm_ctx(tfm); | 1769 | struct drbg_state *drbg = crypto_tfm_ctx(tfm); |
1717 | bool pr = false; | ||
1718 | int coreref = 0; | ||
1719 | 1770 | ||
1720 | drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm), &coreref, &pr); | 1771 | mutex_init(&drbg->drbg_mutex); |
1721 | /* | 1772 | |
1722 | * when personalization string is needed, the caller must call reset | 1773 | return 0; |
1723 | * and provide the personalization string as seed information | ||
1724 | */ | ||
1725 | return drbg_instantiate(drbg, NULL, coreref, pr); | ||
1726 | } | 1774 | } |
1727 | 1775 | ||
1728 | static void drbg_kcapi_cleanup(struct crypto_tfm *tfm) | 1776 | static void drbg_kcapi_cleanup(struct crypto_tfm *tfm) |
@@ -1734,65 +1782,49 @@ static void drbg_kcapi_cleanup(struct crypto_tfm *tfm) | |||
1734 | * Generate random numbers invoked by the kernel crypto API: | 1782 | * Generate random numbers invoked by the kernel crypto API: |
1735 | * The API of the kernel crypto API is extended as follows: | 1783 | * The API of the kernel crypto API is extended as follows: |
1736 | * | 1784 | * |
1737 | * If dlen is larger than zero, rdata is interpreted as the output buffer | 1785 | * src is additional input supplied to the RNG. |
1738 | * where random data is to be stored. | 1786 | * slen is the length of src. |
1739 | * | 1787 | * dst is the output buffer where random data is to be stored. |
1740 | * If dlen is zero, rdata is interpreted as a pointer to a struct drbg_gen | 1788 | * dlen is the length of dst. |
1741 | * which holds the additional information string that is used for the | ||
1742 | * DRBG generation process. The output buffer that is to be used to store | ||
1743 | * data is also pointed to by struct drbg_gen. | ||
1744 | */ | 1789 | */ |
1745 | static int drbg_kcapi_random(struct crypto_rng *tfm, u8 *rdata, | 1790 | static int drbg_kcapi_random(struct crypto_rng *tfm, |
1746 | unsigned int dlen) | 1791 | const u8 *src, unsigned int slen, |
1792 | u8 *dst, unsigned int dlen) | ||
1747 | { | 1793 | { |
1748 | struct drbg_state *drbg = crypto_rng_ctx(tfm); | 1794 | struct drbg_state *drbg = crypto_rng_ctx(tfm); |
1749 | if (0 < dlen) { | 1795 | struct drbg_string *addtl = NULL; |
1750 | return drbg_generate_long(drbg, rdata, dlen, NULL); | 1796 | struct drbg_string string; |
1751 | } else { | 1797 | |
1752 | struct drbg_gen *data = (struct drbg_gen *)rdata; | 1798 | if (slen) { |
1753 | struct drbg_string addtl; | ||
1754 | /* catch NULL pointer */ | ||
1755 | if (!data) | ||
1756 | return 0; | ||
1757 | drbg_set_testdata(drbg, data->test_data); | ||
1758 | /* linked list variable is now local to allow modification */ | 1799 | /* linked list variable is now local to allow modification */ |
1759 | drbg_string_fill(&addtl, data->addtl->buf, data->addtl->len); | 1800 | drbg_string_fill(&string, src, slen); |
1760 | return drbg_generate_long(drbg, data->outbuf, data->outlen, | 1801 | addtl = &string; |
1761 | &addtl); | ||
1762 | } | 1802 | } |
1803 | |||
1804 | return drbg_generate_long(drbg, dst, dlen, addtl); | ||
1763 | } | 1805 | } |
1764 | 1806 | ||
1765 | /* | 1807 | /* |
1766 | * Reset the DRBG invoked by the kernel crypto API | 1808 | * Seed the DRBG invoked by the kernel crypto API |
1767 | * The reset implies a full re-initialization of the DRBG. Similar to the | ||
1768 | * generate function of drbg_kcapi_random, this function extends the | ||
1769 | * kernel crypto API interface with struct drbg_gen | ||
1770 | */ | 1809 | */ |
1771 | static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | 1810 | static int drbg_kcapi_seed(struct crypto_rng *tfm, |
1811 | const u8 *seed, unsigned int slen) | ||
1772 | { | 1812 | { |
1773 | struct drbg_state *drbg = crypto_rng_ctx(tfm); | 1813 | struct drbg_state *drbg = crypto_rng_ctx(tfm); |
1774 | struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm); | 1814 | struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm); |
1775 | bool pr = false; | 1815 | bool pr = false; |
1776 | struct drbg_string seed_string; | 1816 | struct drbg_string string; |
1817 | struct drbg_string *seed_string = NULL; | ||
1777 | int coreref = 0; | 1818 | int coreref = 0; |
1778 | 1819 | ||
1779 | drbg_uninstantiate(drbg); | ||
1780 | drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref, | 1820 | drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref, |
1781 | &pr); | 1821 | &pr); |
1782 | if (0 < slen) { | 1822 | if (0 < slen) { |
1783 | drbg_string_fill(&seed_string, seed, slen); | 1823 | drbg_string_fill(&string, seed, slen); |
1784 | return drbg_instantiate(drbg, &seed_string, coreref, pr); | 1824 | seed_string = &string; |
1785 | } else { | ||
1786 | struct drbg_gen *data = (struct drbg_gen *)seed; | ||
1787 | /* allow invocation of API call with NULL, 0 */ | ||
1788 | if (!data) | ||
1789 | return drbg_instantiate(drbg, NULL, coreref, pr); | ||
1790 | drbg_set_testdata(drbg, data->test_data); | ||
1791 | /* linked list variable is now local to allow modification */ | ||
1792 | drbg_string_fill(&seed_string, data->addtl->buf, | ||
1793 | data->addtl->len); | ||
1794 | return drbg_instantiate(drbg, &seed_string, coreref, pr); | ||
1795 | } | 1825 | } |
1826 | |||
1827 | return drbg_instantiate(drbg, seed_string, coreref, pr); | ||
1796 | } | 1828 | } |
1797 | 1829 | ||
1798 | /*************************************************************** | 1830 | /*************************************************************** |
@@ -1811,7 +1843,6 @@ static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | |||
1811 | */ | 1843 | */ |
1812 | static inline int __init drbg_healthcheck_sanity(void) | 1844 | static inline int __init drbg_healthcheck_sanity(void) |
1813 | { | 1845 | { |
1814 | #ifdef CONFIG_CRYPTO_FIPS | ||
1815 | int len = 0; | 1846 | int len = 0; |
1816 | #define OUTBUFLEN 16 | 1847 | #define OUTBUFLEN 16 |
1817 | unsigned char buf[OUTBUFLEN]; | 1848 | unsigned char buf[OUTBUFLEN]; |
@@ -1839,6 +1870,8 @@ static inline int __init drbg_healthcheck_sanity(void) | |||
1839 | if (!drbg) | 1870 | if (!drbg) |
1840 | return -ENOMEM; | 1871 | return -ENOMEM; |
1841 | 1872 | ||
1873 | mutex_init(&drbg->drbg_mutex); | ||
1874 | |||
1842 | /* | 1875 | /* |
1843 | * if the following tests fail, it is likely that there is a buffer | 1876 | * if the following tests fail, it is likely that there is a buffer |
1844 | * overflow as buf is much smaller than the requested or provided | 1877 | * overflow as buf is much smaller than the requested or provided |
@@ -1877,37 +1910,33 @@ static inline int __init drbg_healthcheck_sanity(void) | |||
1877 | outbuf: | 1910 | outbuf: |
1878 | kzfree(drbg); | 1911 | kzfree(drbg); |
1879 | return rc; | 1912 | return rc; |
1880 | #else /* CONFIG_CRYPTO_FIPS */ | ||
1881 | return 0; | ||
1882 | #endif /* CONFIG_CRYPTO_FIPS */ | ||
1883 | } | 1913 | } |
1884 | 1914 | ||
1885 | static struct crypto_alg drbg_algs[22]; | 1915 | static struct rng_alg drbg_algs[22]; |
1886 | 1916 | ||
1887 | /* | 1917 | /* |
1888 | * Fill the array drbg_algs used to register the different DRBGs | 1918 | * Fill the array drbg_algs used to register the different DRBGs |
1889 | * with the kernel crypto API. To fill the array, the information | 1919 | * with the kernel crypto API. To fill the array, the information |
1890 | * from drbg_cores[] is used. | 1920 | * from drbg_cores[] is used. |
1891 | */ | 1921 | */ |
1892 | static inline void __init drbg_fill_array(struct crypto_alg *alg, | 1922 | static inline void __init drbg_fill_array(struct rng_alg *alg, |
1893 | const struct drbg_core *core, int pr) | 1923 | const struct drbg_core *core, int pr) |
1894 | { | 1924 | { |
1895 | int pos = 0; | 1925 | int pos = 0; |
1896 | static int priority = 100; | 1926 | static int priority = 200; |
1897 | 1927 | ||
1898 | memset(alg, 0, sizeof(struct crypto_alg)); | 1928 | memcpy(alg->base.cra_name, "stdrng", 6); |
1899 | memcpy(alg->cra_name, "stdrng", 6); | ||
1900 | if (pr) { | 1929 | if (pr) { |
1901 | memcpy(alg->cra_driver_name, "drbg_pr_", 8); | 1930 | memcpy(alg->base.cra_driver_name, "drbg_pr_", 8); |
1902 | pos = 8; | 1931 | pos = 8; |
1903 | } else { | 1932 | } else { |
1904 | memcpy(alg->cra_driver_name, "drbg_nopr_", 10); | 1933 | memcpy(alg->base.cra_driver_name, "drbg_nopr_", 10); |
1905 | pos = 10; | 1934 | pos = 10; |
1906 | } | 1935 | } |
1907 | memcpy(alg->cra_driver_name + pos, core->cra_name, | 1936 | memcpy(alg->base.cra_driver_name + pos, core->cra_name, |
1908 | strlen(core->cra_name)); | 1937 | strlen(core->cra_name)); |
1909 | 1938 | ||
1910 | alg->cra_priority = priority; | 1939 | alg->base.cra_priority = priority; |
1911 | priority++; | 1940 | priority++; |
1912 | /* | 1941 | /* |
1913 | * If FIPS mode enabled, the selected DRBG shall have the | 1942 | * If FIPS mode enabled, the selected DRBG shall have the |
@@ -1915,17 +1944,16 @@ static inline void __init drbg_fill_array(struct crypto_alg *alg, | |||
1915 | * it is selected. | 1944 | * it is selected. |
1916 | */ | 1945 | */ |
1917 | if (fips_enabled) | 1946 | if (fips_enabled) |
1918 | alg->cra_priority += 200; | 1947 | alg->base.cra_priority += 200; |
1919 | 1948 | ||
1920 | alg->cra_flags = CRYPTO_ALG_TYPE_RNG; | 1949 | alg->base.cra_ctxsize = sizeof(struct drbg_state); |
1921 | alg->cra_ctxsize = sizeof(struct drbg_state); | 1950 | alg->base.cra_module = THIS_MODULE; |
1922 | alg->cra_type = &crypto_rng_type; | 1951 | alg->base.cra_init = drbg_kcapi_init; |
1923 | alg->cra_module = THIS_MODULE; | 1952 | alg->base.cra_exit = drbg_kcapi_cleanup; |
1924 | alg->cra_init = drbg_kcapi_init; | 1953 | alg->generate = drbg_kcapi_random; |
1925 | alg->cra_exit = drbg_kcapi_cleanup; | 1954 | alg->seed = drbg_kcapi_seed; |
1926 | alg->cra_u.rng.rng_make_random = drbg_kcapi_random; | 1955 | alg->set_ent = drbg_kcapi_set_entropy; |
1927 | alg->cra_u.rng.rng_reset = drbg_kcapi_reset; | 1956 | alg->seedsize = 0; |
1928 | alg->cra_u.rng.seedsize = 0; | ||
1929 | } | 1957 | } |
1930 | 1958 | ||
1931 | static int __init drbg_init(void) | 1959 | static int __init drbg_init(void) |
@@ -1958,12 +1986,12 @@ static int __init drbg_init(void) | |||
1958 | drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1); | 1986 | drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1); |
1959 | for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++) | 1987 | for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++) |
1960 | drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0); | 1988 | drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0); |
1961 | return crypto_register_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); | 1989 | return crypto_register_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); |
1962 | } | 1990 | } |
1963 | 1991 | ||
1964 | static void __exit drbg_exit(void) | 1992 | static void __exit drbg_exit(void) |
1965 | { | 1993 | { |
1966 | crypto_unregister_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); | 1994 | crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); |
1967 | } | 1995 | } |
1968 | 1996 | ||
1969 | module_init(drbg_init); | 1997 | module_init(drbg_init); |
@@ -1984,3 +2012,4 @@ MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) " | |||
1984 | CRYPTO_DRBG_HASH_STRING | 2012 | CRYPTO_DRBG_HASH_STRING |
1985 | CRYPTO_DRBG_HMAC_STRING | 2013 | CRYPTO_DRBG_HMAC_STRING |
1986 | CRYPTO_DRBG_CTR_STRING); | 2014 | CRYPTO_DRBG_CTR_STRING); |
2015 | MODULE_ALIAS_CRYPTO("stdrng"); | ||
diff --git a/crypto/echainiv.c b/crypto/echainiv.c new file mode 100644 index 000000000000..b6e43dc61356 --- /dev/null +++ b/crypto/echainiv.c | |||
@@ -0,0 +1,312 @@ | |||
1 | /* | ||
2 | * echainiv: Encrypted Chain IV Generator | ||
3 | * | ||
4 | * This generator generates an IV based on a sequence number by xoring it | ||
5 | * with a salt and then encrypting it with the same key as used to encrypt | ||
6 | * the plain text. This algorithm requires that the block size be equal | ||
7 | * to the IV size. It is mainly useful for CBC. | ||
8 | * | ||
9 | * This generator can only be used by algorithms where authentication | ||
10 | * is performed after encryption (i.e., authenc). | ||
11 | * | ||
12 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify it | ||
15 | * under the terms of the GNU General Public License as published by the Free | ||
16 | * Software Foundation; either version 2 of the License, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <crypto/internal/geniv.h> | ||
22 | #include <crypto/null.h> | ||
23 | #include <crypto/rng.h> | ||
24 | #include <crypto/scatterwalk.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/percpu.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/string.h> | ||
33 | |||
34 | #define MAX_IV_SIZE 16 | ||
35 | |||
36 | struct echainiv_ctx { | ||
37 | /* aead_geniv_ctx must be first the element */ | ||
38 | struct aead_geniv_ctx geniv; | ||
39 | struct crypto_blkcipher *null; | ||
40 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); | ||
41 | }; | ||
42 | |||
43 | static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv); | ||
44 | |||
45 | /* We don't care if we get preempted and read/write IVs from the next CPU. */ | ||
46 | static void echainiv_read_iv(u8 *dst, unsigned size) | ||
47 | { | ||
48 | u32 *a = (u32 *)dst; | ||
49 | u32 __percpu *b = echainiv_iv; | ||
50 | |||
51 | for (; size >= 4; size -= 4) { | ||
52 | *a++ = this_cpu_read(*b); | ||
53 | b++; | ||
54 | } | ||
55 | } | ||
56 | |||
57 | static void echainiv_write_iv(const u8 *src, unsigned size) | ||
58 | { | ||
59 | const u32 *a = (const u32 *)src; | ||
60 | u32 __percpu *b = echainiv_iv; | ||
61 | |||
62 | for (; size >= 4; size -= 4) { | ||
63 | this_cpu_write(*b, *a); | ||
64 | a++; | ||
65 | b++; | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static void echainiv_encrypt_complete2(struct aead_request *req, int err) | ||
70 | { | ||
71 | struct aead_request *subreq = aead_request_ctx(req); | ||
72 | struct crypto_aead *geniv; | ||
73 | unsigned int ivsize; | ||
74 | |||
75 | if (err == -EINPROGRESS) | ||
76 | return; | ||
77 | |||
78 | if (err) | ||
79 | goto out; | ||
80 | |||
81 | geniv = crypto_aead_reqtfm(req); | ||
82 | ivsize = crypto_aead_ivsize(geniv); | ||
83 | |||
84 | echainiv_write_iv(subreq->iv, ivsize); | ||
85 | |||
86 | if (req->iv != subreq->iv) | ||
87 | memcpy(req->iv, subreq->iv, ivsize); | ||
88 | |||
89 | out: | ||
90 | if (req->iv != subreq->iv) | ||
91 | kzfree(subreq->iv); | ||
92 | } | ||
93 | |||
94 | static void echainiv_encrypt_complete(struct crypto_async_request *base, | ||
95 | int err) | ||
96 | { | ||
97 | struct aead_request *req = base->data; | ||
98 | |||
99 | echainiv_encrypt_complete2(req, err); | ||
100 | aead_request_complete(req, err); | ||
101 | } | ||
102 | |||
103 | static int echainiv_encrypt(struct aead_request *req) | ||
104 | { | ||
105 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
106 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | ||
107 | struct aead_request *subreq = aead_request_ctx(req); | ||
108 | crypto_completion_t compl; | ||
109 | void *data; | ||
110 | u8 *info; | ||
111 | unsigned int ivsize = crypto_aead_ivsize(geniv); | ||
112 | int err; | ||
113 | |||
114 | if (req->cryptlen < ivsize) | ||
115 | return -EINVAL; | ||
116 | |||
117 | aead_request_set_tfm(subreq, ctx->geniv.child); | ||
118 | |||
119 | compl = echainiv_encrypt_complete; | ||
120 | data = req; | ||
121 | info = req->iv; | ||
122 | |||
123 | if (req->src != req->dst) { | ||
124 | struct blkcipher_desc desc = { | ||
125 | .tfm = ctx->null, | ||
126 | }; | ||
127 | |||
128 | err = crypto_blkcipher_encrypt( | ||
129 | &desc, req->dst, req->src, | ||
130 | req->assoclen + req->cryptlen); | ||
131 | if (err) | ||
132 | return err; | ||
133 | } | ||
134 | |||
135 | if (unlikely(!IS_ALIGNED((unsigned long)info, | ||
136 | crypto_aead_alignmask(geniv) + 1))) { | ||
137 | info = kmalloc(ivsize, req->base.flags & | ||
138 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | ||
139 | GFP_ATOMIC); | ||
140 | if (!info) | ||
141 | return -ENOMEM; | ||
142 | |||
143 | memcpy(info, req->iv, ivsize); | ||
144 | } | ||
145 | |||
146 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
147 | aead_request_set_crypt(subreq, req->dst, req->dst, | ||
148 | req->cryptlen - ivsize, info); | ||
149 | aead_request_set_ad(subreq, req->assoclen + ivsize); | ||
150 | |||
151 | crypto_xor(info, ctx->salt, ivsize); | ||
152 | scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); | ||
153 | echainiv_read_iv(info, ivsize); | ||
154 | |||
155 | err = crypto_aead_encrypt(subreq); | ||
156 | echainiv_encrypt_complete2(req, err); | ||
157 | return err; | ||
158 | } | ||
159 | |||
160 | static int echainiv_decrypt(struct aead_request *req) | ||
161 | { | ||
162 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
163 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | ||
164 | struct aead_request *subreq = aead_request_ctx(req); | ||
165 | crypto_completion_t compl; | ||
166 | void *data; | ||
167 | unsigned int ivsize = crypto_aead_ivsize(geniv); | ||
168 | |||
169 | if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) | ||
170 | return -EINVAL; | ||
171 | |||
172 | aead_request_set_tfm(subreq, ctx->geniv.child); | ||
173 | |||
174 | compl = req->base.complete; | ||
175 | data = req->base.data; | ||
176 | |||
177 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
178 | aead_request_set_crypt(subreq, req->src, req->dst, | ||
179 | req->cryptlen - ivsize, req->iv); | ||
180 | aead_request_set_ad(subreq, req->assoclen + ivsize); | ||
181 | |||
182 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); | ||
183 | if (req->src != req->dst) | ||
184 | scatterwalk_map_and_copy(req->iv, req->dst, | ||
185 | req->assoclen, ivsize, 1); | ||
186 | |||
187 | return crypto_aead_decrypt(subreq); | ||
188 | } | ||
189 | |||
190 | static int echainiv_init(struct crypto_tfm *tfm) | ||
191 | { | ||
192 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | ||
193 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | ||
194 | int err; | ||
195 | |||
196 | spin_lock_init(&ctx->geniv.lock); | ||
197 | |||
198 | crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); | ||
199 | |||
200 | err = crypto_get_default_rng(); | ||
201 | if (err) | ||
202 | goto out; | ||
203 | |||
204 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | ||
205 | crypto_aead_ivsize(geniv)); | ||
206 | crypto_put_default_rng(); | ||
207 | if (err) | ||
208 | goto out; | ||
209 | |||
210 | ctx->null = crypto_get_default_null_skcipher(); | ||
211 | err = PTR_ERR(ctx->null); | ||
212 | if (IS_ERR(ctx->null)) | ||
213 | goto out; | ||
214 | |||
215 | err = aead_geniv_init(tfm); | ||
216 | if (err) | ||
217 | goto drop_null; | ||
218 | |||
219 | ctx->geniv.child = geniv->child; | ||
220 | geniv->child = geniv; | ||
221 | |||
222 | out: | ||
223 | return err; | ||
224 | |||
225 | drop_null: | ||
226 | crypto_put_default_null_skcipher(); | ||
227 | goto out; | ||
228 | } | ||
229 | |||
230 | static void echainiv_exit(struct crypto_tfm *tfm) | ||
231 | { | ||
232 | struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); | ||
233 | |||
234 | crypto_free_aead(ctx->geniv.child); | ||
235 | crypto_put_default_null_skcipher(); | ||
236 | } | ||
237 | |||
238 | static int echainiv_aead_create(struct crypto_template *tmpl, | ||
239 | struct rtattr **tb) | ||
240 | { | ||
241 | struct aead_instance *inst; | ||
242 | struct crypto_aead_spawn *spawn; | ||
243 | struct aead_alg *alg; | ||
244 | int err; | ||
245 | |||
246 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); | ||
247 | |||
248 | if (IS_ERR(inst)) | ||
249 | return PTR_ERR(inst); | ||
250 | |||
251 | spawn = aead_instance_ctx(inst); | ||
252 | alg = crypto_spawn_aead_alg(spawn); | ||
253 | |||
254 | if (alg->base.cra_aead.encrypt) | ||
255 | goto done; | ||
256 | |||
257 | err = -EINVAL; | ||
258 | if (inst->alg.ivsize & (sizeof(u32) - 1) || | ||
259 | inst->alg.ivsize > MAX_IV_SIZE) | ||
260 | goto free_inst; | ||
261 | |||
262 | inst->alg.encrypt = echainiv_encrypt; | ||
263 | inst->alg.decrypt = echainiv_decrypt; | ||
264 | |||
265 | inst->alg.base.cra_init = echainiv_init; | ||
266 | inst->alg.base.cra_exit = echainiv_exit; | ||
267 | |||
268 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; | ||
269 | inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx); | ||
270 | inst->alg.base.cra_ctxsize += inst->alg.ivsize; | ||
271 | |||
272 | done: | ||
273 | err = aead_register_instance(tmpl, inst); | ||
274 | if (err) | ||
275 | goto free_inst; | ||
276 | |||
277 | out: | ||
278 | return err; | ||
279 | |||
280 | free_inst: | ||
281 | aead_geniv_free(inst); | ||
282 | goto out; | ||
283 | } | ||
284 | |||
285 | static void echainiv_free(struct crypto_instance *inst) | ||
286 | { | ||
287 | aead_geniv_free(aead_instance(inst)); | ||
288 | } | ||
289 | |||
290 | static struct crypto_template echainiv_tmpl = { | ||
291 | .name = "echainiv", | ||
292 | .create = echainiv_aead_create, | ||
293 | .free = echainiv_free, | ||
294 | .module = THIS_MODULE, | ||
295 | }; | ||
296 | |||
297 | static int __init echainiv_module_init(void) | ||
298 | { | ||
299 | return crypto_register_template(&echainiv_tmpl); | ||
300 | } | ||
301 | |||
302 | static void __exit echainiv_module_exit(void) | ||
303 | { | ||
304 | crypto_unregister_template(&echainiv_tmpl); | ||
305 | } | ||
306 | |||
307 | module_init(echainiv_module_init); | ||
308 | module_exit(echainiv_module_exit); | ||
309 | |||
310 | MODULE_LICENSE("GPL"); | ||
311 | MODULE_DESCRIPTION("Encrypted Chain IV Generator"); | ||
312 | MODULE_ALIAS_CRYPTO("echainiv"); | ||
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c index f116fae766f8..16dda72fc4f8 100644 --- a/crypto/eseqiv.c +++ b/crypto/eseqiv.c | |||
@@ -146,35 +146,13 @@ out: | |||
146 | return err; | 146 | return err; |
147 | } | 147 | } |
148 | 148 | ||
149 | static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req) | ||
150 | { | ||
151 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | ||
152 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | ||
153 | int err = 0; | ||
154 | |||
155 | spin_lock_bh(&ctx->lock); | ||
156 | if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first) | ||
157 | goto unlock; | ||
158 | |||
159 | crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt; | ||
160 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | ||
161 | crypto_ablkcipher_ivsize(geniv)); | ||
162 | |||
163 | unlock: | ||
164 | spin_unlock_bh(&ctx->lock); | ||
165 | |||
166 | if (err) | ||
167 | return err; | ||
168 | |||
169 | return eseqiv_givencrypt(req); | ||
170 | } | ||
171 | |||
172 | static int eseqiv_init(struct crypto_tfm *tfm) | 149 | static int eseqiv_init(struct crypto_tfm *tfm) |
173 | { | 150 | { |
174 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); | 151 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); |
175 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 152 | struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
176 | unsigned long alignmask; | 153 | unsigned long alignmask; |
177 | unsigned int reqsize; | 154 | unsigned int reqsize; |
155 | int err; | ||
178 | 156 | ||
179 | spin_lock_init(&ctx->lock); | 157 | spin_lock_init(&ctx->lock); |
180 | 158 | ||
@@ -198,7 +176,15 @@ static int eseqiv_init(struct crypto_tfm *tfm) | |||
198 | tfm->crt_ablkcipher.reqsize = reqsize + | 176 | tfm->crt_ablkcipher.reqsize = reqsize + |
199 | sizeof(struct ablkcipher_request); | 177 | sizeof(struct ablkcipher_request); |
200 | 178 | ||
201 | return skcipher_geniv_init(tfm); | 179 | err = 0; |
180 | if (!crypto_get_default_rng()) { | ||
181 | crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt; | ||
182 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | ||
183 | crypto_ablkcipher_ivsize(geniv)); | ||
184 | crypto_put_default_rng(); | ||
185 | } | ||
186 | |||
187 | return err ?: skcipher_geniv_init(tfm); | ||
202 | } | 188 | } |
203 | 189 | ||
204 | static struct crypto_template eseqiv_tmpl; | 190 | static struct crypto_template eseqiv_tmpl; |
@@ -208,20 +194,14 @@ static struct crypto_instance *eseqiv_alloc(struct rtattr **tb) | |||
208 | struct crypto_instance *inst; | 194 | struct crypto_instance *inst; |
209 | int err; | 195 | int err; |
210 | 196 | ||
211 | err = crypto_get_default_rng(); | ||
212 | if (err) | ||
213 | return ERR_PTR(err); | ||
214 | |||
215 | inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0); | 197 | inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0); |
216 | if (IS_ERR(inst)) | 198 | if (IS_ERR(inst)) |
217 | goto put_rng; | 199 | goto out; |
218 | 200 | ||
219 | err = -EINVAL; | 201 | err = -EINVAL; |
220 | if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize) | 202 | if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize) |
221 | goto free_inst; | 203 | goto free_inst; |
222 | 204 | ||
223 | inst->alg.cra_ablkcipher.givencrypt = eseqiv_givencrypt_first; | ||
224 | |||
225 | inst->alg.cra_init = eseqiv_init; | 205 | inst->alg.cra_init = eseqiv_init; |
226 | inst->alg.cra_exit = skcipher_geniv_exit; | 206 | inst->alg.cra_exit = skcipher_geniv_exit; |
227 | 207 | ||
@@ -234,21 +214,13 @@ out: | |||
234 | free_inst: | 214 | free_inst: |
235 | skcipher_geniv_free(inst); | 215 | skcipher_geniv_free(inst); |
236 | inst = ERR_PTR(err); | 216 | inst = ERR_PTR(err); |
237 | put_rng: | ||
238 | crypto_put_default_rng(); | ||
239 | goto out; | 217 | goto out; |
240 | } | 218 | } |
241 | 219 | ||
242 | static void eseqiv_free(struct crypto_instance *inst) | ||
243 | { | ||
244 | skcipher_geniv_free(inst); | ||
245 | crypto_put_default_rng(); | ||
246 | } | ||
247 | |||
248 | static struct crypto_template eseqiv_tmpl = { | 220 | static struct crypto_template eseqiv_tmpl = { |
249 | .name = "eseqiv", | 221 | .name = "eseqiv", |
250 | .alloc = eseqiv_alloc, | 222 | .alloc = eseqiv_alloc, |
251 | .free = eseqiv_free, | 223 | .free = skcipher_geniv_free, |
252 | .module = THIS_MODULE, | 224 | .module = THIS_MODULE, |
253 | }; | 225 | }; |
254 | 226 | ||
diff --git a/crypto/fips.c b/crypto/fips.c index 553970081c62..9d627c1cf8bc 100644 --- a/crypto/fips.c +++ b/crypto/fips.c | |||
@@ -10,7 +10,12 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include "internal.h" | 13 | #include <linux/export.h> |
14 | #include <linux/fips.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/sysctl.h> | ||
14 | 19 | ||
15 | int fips_enabled; | 20 | int fips_enabled; |
16 | EXPORT_SYMBOL_GPL(fips_enabled); | 21 | EXPORT_SYMBOL_GPL(fips_enabled); |
@@ -25,3 +30,49 @@ static int fips_enable(char *str) | |||
25 | } | 30 | } |
26 | 31 | ||
27 | __setup("fips=", fips_enable); | 32 | __setup("fips=", fips_enable); |
33 | |||
34 | static struct ctl_table crypto_sysctl_table[] = { | ||
35 | { | ||
36 | .procname = "fips_enabled", | ||
37 | .data = &fips_enabled, | ||
38 | .maxlen = sizeof(int), | ||
39 | .mode = 0444, | ||
40 | .proc_handler = proc_dointvec | ||
41 | }, | ||
42 | {} | ||
43 | }; | ||
44 | |||
45 | static struct ctl_table crypto_dir_table[] = { | ||
46 | { | ||
47 | .procname = "crypto", | ||
48 | .mode = 0555, | ||
49 | .child = crypto_sysctl_table | ||
50 | }, | ||
51 | {} | ||
52 | }; | ||
53 | |||
54 | static struct ctl_table_header *crypto_sysctls; | ||
55 | |||
56 | static void crypto_proc_fips_init(void) | ||
57 | { | ||
58 | crypto_sysctls = register_sysctl_table(crypto_dir_table); | ||
59 | } | ||
60 | |||
61 | static void crypto_proc_fips_exit(void) | ||
62 | { | ||
63 | unregister_sysctl_table(crypto_sysctls); | ||
64 | } | ||
65 | |||
66 | static int __init fips_init(void) | ||
67 | { | ||
68 | crypto_proc_fips_init(); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static void __exit fips_exit(void) | ||
73 | { | ||
74 | crypto_proc_fips_exit(); | ||
75 | } | ||
76 | |||
77 | module_init(fips_init); | ||
78 | module_exit(fips_exit); | ||
diff --git a/crypto/gcm.c b/crypto/gcm.c index 2e403f6138c1..7d32d4720564 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <crypto/internal/aead.h> | 12 | #include <crypto/internal/aead.h> |
13 | #include <crypto/internal/skcipher.h> | 13 | #include <crypto/internal/skcipher.h> |
14 | #include <crypto/internal/hash.h> | 14 | #include <crypto/internal/hash.h> |
15 | #include <crypto/null.h> | ||
15 | #include <crypto/scatterwalk.h> | 16 | #include <crypto/scatterwalk.h> |
16 | #include <crypto/hash.h> | 17 | #include <crypto/hash.h> |
17 | #include "internal.h" | 18 | #include "internal.h" |
@@ -39,7 +40,6 @@ struct crypto_rfc4106_ctx { | |||
39 | 40 | ||
40 | struct crypto_rfc4543_instance_ctx { | 41 | struct crypto_rfc4543_instance_ctx { |
41 | struct crypto_aead_spawn aead; | 42 | struct crypto_aead_spawn aead; |
42 | struct crypto_skcipher_spawn null; | ||
43 | }; | 43 | }; |
44 | 44 | ||
45 | struct crypto_rfc4543_ctx { | 45 | struct crypto_rfc4543_ctx { |
@@ -49,25 +49,22 @@ struct crypto_rfc4543_ctx { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | struct crypto_rfc4543_req_ctx { | 51 | struct crypto_rfc4543_req_ctx { |
52 | u8 auth_tag[16]; | ||
53 | u8 assocbuf[32]; | ||
54 | struct scatterlist cipher[1]; | ||
55 | struct scatterlist payload[2]; | ||
56 | struct scatterlist assoc[2]; | ||
57 | struct aead_request subreq; | 52 | struct aead_request subreq; |
58 | }; | 53 | }; |
59 | 54 | ||
60 | struct crypto_gcm_ghash_ctx { | 55 | struct crypto_gcm_ghash_ctx { |
61 | unsigned int cryptlen; | 56 | unsigned int cryptlen; |
62 | struct scatterlist *src; | 57 | struct scatterlist *src; |
63 | void (*complete)(struct aead_request *req, int err); | 58 | int (*complete)(struct aead_request *req, u32 flags); |
64 | }; | 59 | }; |
65 | 60 | ||
66 | struct crypto_gcm_req_priv_ctx { | 61 | struct crypto_gcm_req_priv_ctx { |
62 | u8 iv[16]; | ||
67 | u8 auth_tag[16]; | 63 | u8 auth_tag[16]; |
68 | u8 iauth_tag[16]; | 64 | u8 iauth_tag[16]; |
69 | struct scatterlist src[2]; | 65 | struct scatterlist src[3]; |
70 | struct scatterlist dst[2]; | 66 | struct scatterlist dst[3]; |
67 | struct scatterlist sg; | ||
71 | struct crypto_gcm_ghash_ctx ghash_ctx; | 68 | struct crypto_gcm_ghash_ctx ghash_ctx; |
72 | union { | 69 | union { |
73 | struct ahash_request ahreq; | 70 | struct ahash_request ahreq; |
@@ -80,7 +77,12 @@ struct crypto_gcm_setkey_result { | |||
80 | struct completion completion; | 77 | struct completion completion; |
81 | }; | 78 | }; |
82 | 79 | ||
83 | static void *gcm_zeroes; | 80 | static struct { |
81 | u8 buf[16]; | ||
82 | struct scatterlist sg; | ||
83 | } *gcm_zeroes; | ||
84 | |||
85 | static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc); | ||
84 | 86 | ||
85 | static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( | 87 | static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( |
86 | struct aead_request *req) | 88 | struct aead_request *req) |
@@ -120,15 +122,13 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
120 | 122 | ||
121 | crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); | 123 | crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); |
122 | crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & | 124 | crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & |
123 | CRYPTO_TFM_REQ_MASK); | 125 | CRYPTO_TFM_REQ_MASK); |
124 | |||
125 | err = crypto_ablkcipher_setkey(ctr, key, keylen); | 126 | err = crypto_ablkcipher_setkey(ctr, key, keylen); |
127 | crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & | ||
128 | CRYPTO_TFM_RES_MASK); | ||
126 | if (err) | 129 | if (err) |
127 | return err; | 130 | return err; |
128 | 131 | ||
129 | crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & | ||
130 | CRYPTO_TFM_RES_MASK); | ||
131 | |||
132 | data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr), | 132 | data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr), |
133 | GFP_KERNEL); | 133 | GFP_KERNEL); |
134 | if (!data) | 134 | if (!data) |
@@ -163,7 +163,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
163 | CRYPTO_TFM_RES_MASK); | 163 | CRYPTO_TFM_RES_MASK); |
164 | 164 | ||
165 | out: | 165 | out: |
166 | kfree(data); | 166 | kzfree(data); |
167 | return err; | 167 | return err; |
168 | } | 168 | } |
169 | 169 | ||
@@ -186,35 +186,46 @@ static int crypto_gcm_setauthsize(struct crypto_aead *tfm, | |||
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, | 189 | static void crypto_gcm_init_common(struct aead_request *req) |
190 | struct aead_request *req, | ||
191 | unsigned int cryptlen) | ||
192 | { | 190 | { |
193 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
194 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); | ||
195 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 191 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
196 | struct scatterlist *dst; | ||
197 | __be32 counter = cpu_to_be32(1); | 192 | __be32 counter = cpu_to_be32(1); |
193 | struct scatterlist *sg; | ||
198 | 194 | ||
199 | memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); | 195 | memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); |
200 | memcpy(req->iv + 12, &counter, 4); | 196 | memcpy(pctx->iv, req->iv, 12); |
197 | memcpy(pctx->iv + 12, &counter, 4); | ||
201 | 198 | ||
202 | sg_init_table(pctx->src, 2); | 199 | sg_init_table(pctx->src, 3); |
203 | sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); | 200 | sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); |
204 | scatterwalk_sg_chain(pctx->src, 2, req->src); | 201 | sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); |
202 | if (sg != pctx->src + 1) | ||
203 | scatterwalk_sg_chain(pctx->src, 2, sg); | ||
205 | 204 | ||
206 | dst = pctx->src; | ||
207 | if (req->src != req->dst) { | 205 | if (req->src != req->dst) { |
208 | sg_init_table(pctx->dst, 2); | 206 | sg_init_table(pctx->dst, 3); |
209 | sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); | 207 | sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); |
210 | scatterwalk_sg_chain(pctx->dst, 2, req->dst); | 208 | sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); |
211 | dst = pctx->dst; | 209 | if (sg != pctx->dst + 1) |
210 | scatterwalk_sg_chain(pctx->dst, 2, sg); | ||
212 | } | 211 | } |
212 | } | ||
213 | |||
214 | static void crypto_gcm_init_crypt(struct aead_request *req, | ||
215 | unsigned int cryptlen) | ||
216 | { | ||
217 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
218 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); | ||
219 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
220 | struct ablkcipher_request *ablk_req = &pctx->u.abreq; | ||
221 | struct scatterlist *dst; | ||
222 | |||
223 | dst = req->src == req->dst ? pctx->src : pctx->dst; | ||
213 | 224 | ||
214 | ablkcipher_request_set_tfm(ablk_req, ctx->ctr); | 225 | ablkcipher_request_set_tfm(ablk_req, ctx->ctr); |
215 | ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, | 226 | ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, |
216 | cryptlen + sizeof(pctx->auth_tag), | 227 | cryptlen + sizeof(pctx->auth_tag), |
217 | req->iv); | 228 | pctx->iv); |
218 | } | 229 | } |
219 | 230 | ||
220 | static inline unsigned int gcm_remain(unsigned int len) | 231 | static inline unsigned int gcm_remain(unsigned int len) |
@@ -224,41 +235,31 @@ static inline unsigned int gcm_remain(unsigned int len) | |||
224 | } | 235 | } |
225 | 236 | ||
226 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err); | 237 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err); |
227 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err); | ||
228 | 238 | ||
229 | static int gcm_hash_update(struct aead_request *req, | 239 | static int gcm_hash_update(struct aead_request *req, |
230 | struct crypto_gcm_req_priv_ctx *pctx, | ||
231 | crypto_completion_t compl, | 240 | crypto_completion_t compl, |
232 | struct scatterlist *src, | 241 | struct scatterlist *src, |
233 | unsigned int len) | 242 | unsigned int len, u32 flags) |
234 | { | 243 | { |
244 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
235 | struct ahash_request *ahreq = &pctx->u.ahreq; | 245 | struct ahash_request *ahreq = &pctx->u.ahreq; |
236 | 246 | ||
237 | ahash_request_set_callback(ahreq, aead_request_flags(req), | 247 | ahash_request_set_callback(ahreq, flags, compl, req); |
238 | compl, req); | ||
239 | ahash_request_set_crypt(ahreq, src, NULL, len); | 248 | ahash_request_set_crypt(ahreq, src, NULL, len); |
240 | 249 | ||
241 | return crypto_ahash_update(ahreq); | 250 | return crypto_ahash_update(ahreq); |
242 | } | 251 | } |
243 | 252 | ||
244 | static int gcm_hash_remain(struct aead_request *req, | 253 | static int gcm_hash_remain(struct aead_request *req, |
245 | struct crypto_gcm_req_priv_ctx *pctx, | ||
246 | unsigned int remain, | 254 | unsigned int remain, |
247 | crypto_completion_t compl) | 255 | crypto_completion_t compl, u32 flags) |
248 | { | 256 | { |
249 | struct ahash_request *ahreq = &pctx->u.ahreq; | 257 | return gcm_hash_update(req, compl, &gcm_zeroes->sg, remain, flags); |
250 | |||
251 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
252 | compl, req); | ||
253 | sg_init_one(pctx->src, gcm_zeroes, remain); | ||
254 | ahash_request_set_crypt(ahreq, pctx->src, NULL, remain); | ||
255 | |||
256 | return crypto_ahash_update(ahreq); | ||
257 | } | 258 | } |
258 | 259 | ||
259 | static int gcm_hash_len(struct aead_request *req, | 260 | static int gcm_hash_len(struct aead_request *req, u32 flags) |
260 | struct crypto_gcm_req_priv_ctx *pctx) | ||
261 | { | 261 | { |
262 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
262 | struct ahash_request *ahreq = &pctx->u.ahreq; | 263 | struct ahash_request *ahreq = &pctx->u.ahreq; |
263 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 264 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
264 | u128 lengths; | 265 | u128 lengths; |
@@ -266,76 +267,41 @@ static int gcm_hash_len(struct aead_request *req, | |||
266 | lengths.a = cpu_to_be64(req->assoclen * 8); | 267 | lengths.a = cpu_to_be64(req->assoclen * 8); |
267 | lengths.b = cpu_to_be64(gctx->cryptlen * 8); | 268 | lengths.b = cpu_to_be64(gctx->cryptlen * 8); |
268 | memcpy(pctx->iauth_tag, &lengths, 16); | 269 | memcpy(pctx->iauth_tag, &lengths, 16); |
269 | sg_init_one(pctx->src, pctx->iauth_tag, 16); | 270 | sg_init_one(&pctx->sg, pctx->iauth_tag, 16); |
270 | ahash_request_set_callback(ahreq, aead_request_flags(req), | 271 | ahash_request_set_callback(ahreq, flags, gcm_hash_len_done, req); |
271 | gcm_hash_len_done, req); | 272 | ahash_request_set_crypt(ahreq, &pctx->sg, |
272 | ahash_request_set_crypt(ahreq, pctx->src, | 273 | pctx->iauth_tag, sizeof(lengths)); |
273 | NULL, sizeof(lengths)); | ||
274 | 274 | ||
275 | return crypto_ahash_update(ahreq); | 275 | return crypto_ahash_finup(ahreq); |
276 | } | ||
277 | |||
278 | static int gcm_hash_final(struct aead_request *req, | ||
279 | struct crypto_gcm_req_priv_ctx *pctx) | ||
280 | { | ||
281 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
282 | |||
283 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
284 | gcm_hash_final_done, req); | ||
285 | ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0); | ||
286 | |||
287 | return crypto_ahash_final(ahreq); | ||
288 | } | 276 | } |
289 | 277 | ||
290 | static void __gcm_hash_final_done(struct aead_request *req, int err) | 278 | static int gcm_hash_len_continue(struct aead_request *req, u32 flags) |
291 | { | 279 | { |
292 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 280 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
293 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 281 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
294 | 282 | ||
295 | if (!err) | 283 | return gctx->complete(req, flags); |
296 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | ||
297 | |||
298 | gctx->complete(req, err); | ||
299 | } | 284 | } |
300 | 285 | ||
301 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err) | 286 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err) |
302 | { | 287 | { |
303 | struct aead_request *req = areq->data; | 288 | struct aead_request *req = areq->data; |
304 | 289 | ||
305 | __gcm_hash_final_done(req, err); | 290 | if (err) |
306 | } | 291 | goto out; |
307 | |||
308 | static void __gcm_hash_len_done(struct aead_request *req, int err) | ||
309 | { | ||
310 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
311 | |||
312 | if (!err) { | ||
313 | err = gcm_hash_final(req, pctx); | ||
314 | if (err == -EINPROGRESS || err == -EBUSY) | ||
315 | return; | ||
316 | } | ||
317 | |||
318 | __gcm_hash_final_done(req, err); | ||
319 | } | ||
320 | 292 | ||
321 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err) | 293 | err = gcm_hash_len_continue(req, 0); |
322 | { | 294 | if (err == -EINPROGRESS) |
323 | struct aead_request *req = areq->data; | 295 | return; |
324 | 296 | ||
325 | __gcm_hash_len_done(req, err); | 297 | out: |
298 | aead_request_complete(req, err); | ||
326 | } | 299 | } |
327 | 300 | ||
328 | static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err) | 301 | static int gcm_hash_crypt_remain_continue(struct aead_request *req, u32 flags) |
329 | { | 302 | { |
330 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 303 | return gcm_hash_len(req, flags) ?: |
331 | 304 | gcm_hash_len_continue(req, flags); | |
332 | if (!err) { | ||
333 | err = gcm_hash_len(req, pctx); | ||
334 | if (err == -EINPROGRESS || err == -EBUSY) | ||
335 | return; | ||
336 | } | ||
337 | |||
338 | __gcm_hash_len_done(req, err); | ||
339 | } | 305 | } |
340 | 306 | ||
341 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, | 307 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, |
@@ -343,55 +309,58 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, | |||
343 | { | 309 | { |
344 | struct aead_request *req = areq->data; | 310 | struct aead_request *req = areq->data; |
345 | 311 | ||
346 | __gcm_hash_crypt_remain_done(req, err); | 312 | if (err) |
313 | goto out; | ||
314 | |||
315 | err = gcm_hash_crypt_remain_continue(req, 0); | ||
316 | if (err == -EINPROGRESS) | ||
317 | return; | ||
318 | |||
319 | out: | ||
320 | aead_request_complete(req, err); | ||
347 | } | 321 | } |
348 | 322 | ||
349 | static void __gcm_hash_crypt_done(struct aead_request *req, int err) | 323 | static int gcm_hash_crypt_continue(struct aead_request *req, u32 flags) |
350 | { | 324 | { |
351 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 325 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
352 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 326 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
353 | unsigned int remain; | 327 | unsigned int remain; |
354 | 328 | ||
355 | if (!err) { | 329 | remain = gcm_remain(gctx->cryptlen); |
356 | remain = gcm_remain(gctx->cryptlen); | 330 | if (remain) |
357 | BUG_ON(!remain); | 331 | return gcm_hash_remain(req, remain, |
358 | err = gcm_hash_remain(req, pctx, remain, | 332 | gcm_hash_crypt_remain_done, flags) ?: |
359 | gcm_hash_crypt_remain_done); | 333 | gcm_hash_crypt_remain_continue(req, flags); |
360 | if (err == -EINPROGRESS || err == -EBUSY) | ||
361 | return; | ||
362 | } | ||
363 | 334 | ||
364 | __gcm_hash_crypt_remain_done(req, err); | 335 | return gcm_hash_crypt_remain_continue(req, flags); |
365 | } | 336 | } |
366 | 337 | ||
367 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err) | 338 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err) |
368 | { | 339 | { |
369 | struct aead_request *req = areq->data; | 340 | struct aead_request *req = areq->data; |
370 | 341 | ||
371 | __gcm_hash_crypt_done(req, err); | 342 | if (err) |
343 | goto out; | ||
344 | |||
345 | err = gcm_hash_crypt_continue(req, 0); | ||
346 | if (err == -EINPROGRESS) | ||
347 | return; | ||
348 | |||
349 | out: | ||
350 | aead_request_complete(req, err); | ||
372 | } | 351 | } |
373 | 352 | ||
374 | static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err) | 353 | static int gcm_hash_assoc_remain_continue(struct aead_request *req, u32 flags) |
375 | { | 354 | { |
376 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 355 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
377 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 356 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
378 | crypto_completion_t compl; | ||
379 | unsigned int remain = 0; | ||
380 | |||
381 | if (!err && gctx->cryptlen) { | ||
382 | remain = gcm_remain(gctx->cryptlen); | ||
383 | compl = remain ? gcm_hash_crypt_done : | ||
384 | gcm_hash_crypt_remain_done; | ||
385 | err = gcm_hash_update(req, pctx, compl, | ||
386 | gctx->src, gctx->cryptlen); | ||
387 | if (err == -EINPROGRESS || err == -EBUSY) | ||
388 | return; | ||
389 | } | ||
390 | 357 | ||
391 | if (remain) | 358 | if (gctx->cryptlen) |
392 | __gcm_hash_crypt_done(req, err); | 359 | return gcm_hash_update(req, gcm_hash_crypt_done, |
393 | else | 360 | gctx->src, gctx->cryptlen, flags) ?: |
394 | __gcm_hash_crypt_remain_done(req, err); | 361 | gcm_hash_crypt_continue(req, flags); |
362 | |||
363 | return gcm_hash_crypt_remain_continue(req, flags); | ||
395 | } | 364 | } |
396 | 365 | ||
397 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, | 366 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, |
@@ -399,146 +368,120 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, | |||
399 | { | 368 | { |
400 | struct aead_request *req = areq->data; | 369 | struct aead_request *req = areq->data; |
401 | 370 | ||
402 | __gcm_hash_assoc_remain_done(req, err); | 371 | if (err) |
372 | goto out; | ||
373 | |||
374 | err = gcm_hash_assoc_remain_continue(req, 0); | ||
375 | if (err == -EINPROGRESS) | ||
376 | return; | ||
377 | |||
378 | out: | ||
379 | aead_request_complete(req, err); | ||
403 | } | 380 | } |
404 | 381 | ||
405 | static void __gcm_hash_assoc_done(struct aead_request *req, int err) | 382 | static int gcm_hash_assoc_continue(struct aead_request *req, u32 flags) |
406 | { | 383 | { |
407 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
408 | unsigned int remain; | 384 | unsigned int remain; |
409 | 385 | ||
410 | if (!err) { | 386 | remain = gcm_remain(req->assoclen); |
411 | remain = gcm_remain(req->assoclen); | 387 | if (remain) |
412 | BUG_ON(!remain); | 388 | return gcm_hash_remain(req, remain, |
413 | err = gcm_hash_remain(req, pctx, remain, | 389 | gcm_hash_assoc_remain_done, flags) ?: |
414 | gcm_hash_assoc_remain_done); | 390 | gcm_hash_assoc_remain_continue(req, flags); |
415 | if (err == -EINPROGRESS || err == -EBUSY) | ||
416 | return; | ||
417 | } | ||
418 | 391 | ||
419 | __gcm_hash_assoc_remain_done(req, err); | 392 | return gcm_hash_assoc_remain_continue(req, flags); |
420 | } | 393 | } |
421 | 394 | ||
422 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err) | 395 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err) |
423 | { | 396 | { |
424 | struct aead_request *req = areq->data; | 397 | struct aead_request *req = areq->data; |
425 | 398 | ||
426 | __gcm_hash_assoc_done(req, err); | 399 | if (err) |
400 | goto out; | ||
401 | |||
402 | err = gcm_hash_assoc_continue(req, 0); | ||
403 | if (err == -EINPROGRESS) | ||
404 | return; | ||
405 | |||
406 | out: | ||
407 | aead_request_complete(req, err); | ||
427 | } | 408 | } |
428 | 409 | ||
429 | static void __gcm_hash_init_done(struct aead_request *req, int err) | 410 | static int gcm_hash_init_continue(struct aead_request *req, u32 flags) |
430 | { | 411 | { |
431 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 412 | if (req->assoclen) |
432 | crypto_completion_t compl; | 413 | return gcm_hash_update(req, gcm_hash_assoc_done, |
433 | unsigned int remain = 0; | 414 | req->src, req->assoclen, flags) ?: |
434 | 415 | gcm_hash_assoc_continue(req, flags); | |
435 | if (!err && req->assoclen) { | ||
436 | remain = gcm_remain(req->assoclen); | ||
437 | compl = remain ? gcm_hash_assoc_done : | ||
438 | gcm_hash_assoc_remain_done; | ||
439 | err = gcm_hash_update(req, pctx, compl, | ||
440 | req->assoc, req->assoclen); | ||
441 | if (err == -EINPROGRESS || err == -EBUSY) | ||
442 | return; | ||
443 | } | ||
444 | 416 | ||
445 | if (remain) | 417 | return gcm_hash_assoc_remain_continue(req, flags); |
446 | __gcm_hash_assoc_done(req, err); | ||
447 | else | ||
448 | __gcm_hash_assoc_remain_done(req, err); | ||
449 | } | 418 | } |
450 | 419 | ||
451 | static void gcm_hash_init_done(struct crypto_async_request *areq, int err) | 420 | static void gcm_hash_init_done(struct crypto_async_request *areq, int err) |
452 | { | 421 | { |
453 | struct aead_request *req = areq->data; | 422 | struct aead_request *req = areq->data; |
454 | 423 | ||
455 | __gcm_hash_init_done(req, err); | 424 | if (err) |
425 | goto out; | ||
426 | |||
427 | err = gcm_hash_init_continue(req, 0); | ||
428 | if (err == -EINPROGRESS) | ||
429 | return; | ||
430 | |||
431 | out: | ||
432 | aead_request_complete(req, err); | ||
456 | } | 433 | } |
457 | 434 | ||
458 | static int gcm_hash(struct aead_request *req, | 435 | static int gcm_hash(struct aead_request *req, u32 flags) |
459 | struct crypto_gcm_req_priv_ctx *pctx) | ||
460 | { | 436 | { |
437 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
461 | struct ahash_request *ahreq = &pctx->u.ahreq; | 438 | struct ahash_request *ahreq = &pctx->u.ahreq; |
462 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 439 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
463 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
464 | unsigned int remain; | ||
465 | crypto_completion_t compl; | ||
466 | int err; | ||
467 | 440 | ||
468 | ahash_request_set_tfm(ahreq, ctx->ghash); | 441 | ahash_request_set_tfm(ahreq, ctx->ghash); |
469 | 442 | ||
470 | ahash_request_set_callback(ahreq, aead_request_flags(req), | 443 | ahash_request_set_callback(ahreq, flags, gcm_hash_init_done, req); |
471 | gcm_hash_init_done, req); | 444 | return crypto_ahash_init(ahreq) ?: |
472 | err = crypto_ahash_init(ahreq); | 445 | gcm_hash_init_continue(req, flags); |
473 | if (err) | ||
474 | return err; | ||
475 | remain = gcm_remain(req->assoclen); | ||
476 | compl = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done; | ||
477 | err = gcm_hash_update(req, pctx, compl, req->assoc, req->assoclen); | ||
478 | if (err) | ||
479 | return err; | ||
480 | if (remain) { | ||
481 | err = gcm_hash_remain(req, pctx, remain, | ||
482 | gcm_hash_assoc_remain_done); | ||
483 | if (err) | ||
484 | return err; | ||
485 | } | ||
486 | remain = gcm_remain(gctx->cryptlen); | ||
487 | compl = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done; | ||
488 | err = gcm_hash_update(req, pctx, compl, gctx->src, gctx->cryptlen); | ||
489 | if (err) | ||
490 | return err; | ||
491 | if (remain) { | ||
492 | err = gcm_hash_remain(req, pctx, remain, | ||
493 | gcm_hash_crypt_remain_done); | ||
494 | if (err) | ||
495 | return err; | ||
496 | } | ||
497 | err = gcm_hash_len(req, pctx); | ||
498 | if (err) | ||
499 | return err; | ||
500 | err = gcm_hash_final(req, pctx); | ||
501 | if (err) | ||
502 | return err; | ||
503 | |||
504 | return 0; | ||
505 | } | 446 | } |
506 | 447 | ||
507 | static void gcm_enc_copy_hash(struct aead_request *req, | 448 | static int gcm_enc_copy_hash(struct aead_request *req, u32 flags) |
508 | struct crypto_gcm_req_priv_ctx *pctx) | ||
509 | { | 449 | { |
450 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
510 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 451 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
511 | u8 *auth_tag = pctx->auth_tag; | 452 | u8 *auth_tag = pctx->auth_tag; |
512 | 453 | ||
513 | scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, | 454 | crypto_xor(auth_tag, pctx->iauth_tag, 16); |
455 | scatterwalk_map_and_copy(auth_tag, req->dst, | ||
456 | req->assoclen + req->cryptlen, | ||
514 | crypto_aead_authsize(aead), 1); | 457 | crypto_aead_authsize(aead), 1); |
458 | return 0; | ||
515 | } | 459 | } |
516 | 460 | ||
517 | static void gcm_enc_hash_done(struct aead_request *req, int err) | 461 | static int gcm_encrypt_continue(struct aead_request *req, u32 flags) |
518 | { | 462 | { |
519 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 463 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
464 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
520 | 465 | ||
521 | if (!err) | 466 | gctx->src = sg_next(req->src == req->dst ? pctx->src : pctx->dst); |
522 | gcm_enc_copy_hash(req, pctx); | 467 | gctx->cryptlen = req->cryptlen; |
468 | gctx->complete = gcm_enc_copy_hash; | ||
523 | 469 | ||
524 | aead_request_complete(req, err); | 470 | return gcm_hash(req, flags); |
525 | } | 471 | } |
526 | 472 | ||
527 | static void gcm_encrypt_done(struct crypto_async_request *areq, int err) | 473 | static void gcm_encrypt_done(struct crypto_async_request *areq, int err) |
528 | { | 474 | { |
529 | struct aead_request *req = areq->data; | 475 | struct aead_request *req = areq->data; |
530 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
531 | 476 | ||
532 | if (!err) { | 477 | if (err) |
533 | err = gcm_hash(req, pctx); | 478 | goto out; |
534 | if (err == -EINPROGRESS || err == -EBUSY) | 479 | |
535 | return; | 480 | err = gcm_encrypt_continue(req, 0); |
536 | else if (!err) { | 481 | if (err == -EINPROGRESS) |
537 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | 482 | return; |
538 | gcm_enc_copy_hash(req, pctx); | ||
539 | } | ||
540 | } | ||
541 | 483 | ||
484 | out: | ||
542 | aead_request_complete(req, err); | 485 | aead_request_complete(req, err); |
543 | } | 486 | } |
544 | 487 | ||
@@ -546,34 +489,19 @@ static int crypto_gcm_encrypt(struct aead_request *req) | |||
546 | { | 489 | { |
547 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 490 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
548 | struct ablkcipher_request *abreq = &pctx->u.abreq; | 491 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
549 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 492 | u32 flags = aead_request_flags(req); |
550 | int err; | ||
551 | |||
552 | crypto_gcm_init_crypt(abreq, req, req->cryptlen); | ||
553 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
554 | gcm_encrypt_done, req); | ||
555 | |||
556 | gctx->src = req->dst; | ||
557 | gctx->cryptlen = req->cryptlen; | ||
558 | gctx->complete = gcm_enc_hash_done; | ||
559 | |||
560 | err = crypto_ablkcipher_encrypt(abreq); | ||
561 | if (err) | ||
562 | return err; | ||
563 | |||
564 | err = gcm_hash(req, pctx); | ||
565 | if (err) | ||
566 | return err; | ||
567 | 493 | ||
568 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | 494 | crypto_gcm_init_common(req); |
569 | gcm_enc_copy_hash(req, pctx); | 495 | crypto_gcm_init_crypt(req, req->cryptlen); |
496 | ablkcipher_request_set_callback(abreq, flags, gcm_encrypt_done, req); | ||
570 | 497 | ||
571 | return 0; | 498 | return crypto_ablkcipher_encrypt(abreq) ?: |
499 | gcm_encrypt_continue(req, flags); | ||
572 | } | 500 | } |
573 | 501 | ||
574 | static int crypto_gcm_verify(struct aead_request *req, | 502 | static int crypto_gcm_verify(struct aead_request *req) |
575 | struct crypto_gcm_req_priv_ctx *pctx) | ||
576 | { | 503 | { |
504 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
577 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 505 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
578 | u8 *auth_tag = pctx->auth_tag; | 506 | u8 *auth_tag = pctx->auth_tag; |
579 | u8 *iauth_tag = pctx->iauth_tag; | 507 | u8 *iauth_tag = pctx->iauth_tag; |
@@ -581,78 +509,57 @@ static int crypto_gcm_verify(struct aead_request *req, | |||
581 | unsigned int cryptlen = req->cryptlen - authsize; | 509 | unsigned int cryptlen = req->cryptlen - authsize; |
582 | 510 | ||
583 | crypto_xor(auth_tag, iauth_tag, 16); | 511 | crypto_xor(auth_tag, iauth_tag, 16); |
584 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); | 512 | scatterwalk_map_and_copy(iauth_tag, req->src, |
513 | req->assoclen + cryptlen, authsize, 0); | ||
585 | return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; | 514 | return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; |
586 | } | 515 | } |
587 | 516 | ||
588 | static void gcm_decrypt_done(struct crypto_async_request *areq, int err) | 517 | static void gcm_decrypt_done(struct crypto_async_request *areq, int err) |
589 | { | 518 | { |
590 | struct aead_request *req = areq->data; | 519 | struct aead_request *req = areq->data; |
591 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
592 | 520 | ||
593 | if (!err) | 521 | if (!err) |
594 | err = crypto_gcm_verify(req, pctx); | 522 | err = crypto_gcm_verify(req); |
595 | 523 | ||
596 | aead_request_complete(req, err); | 524 | aead_request_complete(req, err); |
597 | } | 525 | } |
598 | 526 | ||
599 | static void gcm_dec_hash_done(struct aead_request *req, int err) | 527 | static int gcm_dec_hash_continue(struct aead_request *req, u32 flags) |
600 | { | 528 | { |
601 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 529 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
602 | struct ablkcipher_request *abreq = &pctx->u.abreq; | 530 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
603 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 531 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
604 | 532 | ||
605 | if (!err) { | 533 | crypto_gcm_init_crypt(req, gctx->cryptlen); |
606 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 534 | ablkcipher_request_set_callback(abreq, flags, gcm_decrypt_done, req); |
607 | gcm_decrypt_done, req); | 535 | return crypto_ablkcipher_decrypt(abreq) ?: crypto_gcm_verify(req); |
608 | crypto_gcm_init_crypt(abreq, req, gctx->cryptlen); | ||
609 | err = crypto_ablkcipher_decrypt(abreq); | ||
610 | if (err == -EINPROGRESS || err == -EBUSY) | ||
611 | return; | ||
612 | else if (!err) | ||
613 | err = crypto_gcm_verify(req, pctx); | ||
614 | } | ||
615 | |||
616 | aead_request_complete(req, err); | ||
617 | } | 536 | } |
618 | 537 | ||
619 | static int crypto_gcm_decrypt(struct aead_request *req) | 538 | static int crypto_gcm_decrypt(struct aead_request *req) |
620 | { | 539 | { |
621 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 540 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
622 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 541 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
623 | struct ablkcipher_request *abreq = &pctx->u.abreq; | ||
624 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 542 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
625 | unsigned int authsize = crypto_aead_authsize(aead); | 543 | unsigned int authsize = crypto_aead_authsize(aead); |
626 | unsigned int cryptlen = req->cryptlen; | 544 | unsigned int cryptlen = req->cryptlen; |
627 | int err; | 545 | u32 flags = aead_request_flags(req); |
628 | 546 | ||
629 | if (cryptlen < authsize) | ||
630 | return -EINVAL; | ||
631 | cryptlen -= authsize; | 547 | cryptlen -= authsize; |
632 | 548 | ||
633 | gctx->src = req->src; | 549 | crypto_gcm_init_common(req); |
634 | gctx->cryptlen = cryptlen; | ||
635 | gctx->complete = gcm_dec_hash_done; | ||
636 | |||
637 | err = gcm_hash(req, pctx); | ||
638 | if (err) | ||
639 | return err; | ||
640 | 550 | ||
641 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 551 | gctx->src = sg_next(pctx->src); |
642 | gcm_decrypt_done, req); | 552 | gctx->cryptlen = cryptlen; |
643 | crypto_gcm_init_crypt(abreq, req, cryptlen); | 553 | gctx->complete = gcm_dec_hash_continue; |
644 | err = crypto_ablkcipher_decrypt(abreq); | ||
645 | if (err) | ||
646 | return err; | ||
647 | 554 | ||
648 | return crypto_gcm_verify(req, pctx); | 555 | return gcm_hash(req, flags); |
649 | } | 556 | } |
650 | 557 | ||
651 | static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) | 558 | static int crypto_gcm_init_tfm(struct crypto_aead *tfm) |
652 | { | 559 | { |
653 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 560 | struct aead_instance *inst = aead_alg_instance(tfm); |
654 | struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); | 561 | struct gcm_instance_ctx *ictx = aead_instance_ctx(inst); |
655 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); | 562 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); |
656 | struct crypto_ablkcipher *ctr; | 563 | struct crypto_ablkcipher *ctr; |
657 | struct crypto_ahash *ghash; | 564 | struct crypto_ahash *ghash; |
658 | unsigned long align; | 565 | unsigned long align; |
@@ -670,14 +577,14 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) | |||
670 | ctx->ctr = ctr; | 577 | ctx->ctr = ctr; |
671 | ctx->ghash = ghash; | 578 | ctx->ghash = ghash; |
672 | 579 | ||
673 | align = crypto_tfm_alg_alignmask(tfm); | 580 | align = crypto_aead_alignmask(tfm); |
674 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 581 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
675 | tfm->crt_aead.reqsize = align + | 582 | crypto_aead_set_reqsize(tfm, |
676 | offsetof(struct crypto_gcm_req_priv_ctx, u) + | 583 | align + offsetof(struct crypto_gcm_req_priv_ctx, u) + |
677 | max(sizeof(struct ablkcipher_request) + | 584 | max(sizeof(struct ablkcipher_request) + |
678 | crypto_ablkcipher_reqsize(ctr), | 585 | crypto_ablkcipher_reqsize(ctr), |
679 | sizeof(struct ahash_request) + | 586 | sizeof(struct ahash_request) + |
680 | crypto_ahash_reqsize(ghash)); | 587 | crypto_ahash_reqsize(ghash))); |
681 | 588 | ||
682 | return 0; | 589 | return 0; |
683 | 590 | ||
@@ -686,53 +593,59 @@ err_free_hash: | |||
686 | return err; | 593 | return err; |
687 | } | 594 | } |
688 | 595 | ||
689 | static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) | 596 | static void crypto_gcm_exit_tfm(struct crypto_aead *tfm) |
690 | { | 597 | { |
691 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); | 598 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); |
692 | 599 | ||
693 | crypto_free_ahash(ctx->ghash); | 600 | crypto_free_ahash(ctx->ghash); |
694 | crypto_free_ablkcipher(ctx->ctr); | 601 | crypto_free_ablkcipher(ctx->ctr); |
695 | } | 602 | } |
696 | 603 | ||
697 | static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | 604 | static int crypto_gcm_create_common(struct crypto_template *tmpl, |
698 | const char *full_name, | 605 | struct rtattr **tb, |
699 | const char *ctr_name, | 606 | const char *full_name, |
700 | const char *ghash_name) | 607 | const char *ctr_name, |
608 | const char *ghash_name) | ||
701 | { | 609 | { |
702 | struct crypto_attr_type *algt; | 610 | struct crypto_attr_type *algt; |
703 | struct crypto_instance *inst; | 611 | struct aead_instance *inst; |
704 | struct crypto_alg *ctr; | 612 | struct crypto_alg *ctr; |
705 | struct crypto_alg *ghash_alg; | 613 | struct crypto_alg *ghash_alg; |
706 | struct ahash_alg *ghash_ahash_alg; | 614 | struct hash_alg_common *ghash; |
707 | struct gcm_instance_ctx *ctx; | 615 | struct gcm_instance_ctx *ctx; |
708 | int err; | 616 | int err; |
709 | 617 | ||
710 | algt = crypto_get_attr_type(tb); | 618 | algt = crypto_get_attr_type(tb); |
711 | if (IS_ERR(algt)) | 619 | if (IS_ERR(algt)) |
712 | return ERR_CAST(algt); | 620 | return PTR_ERR(algt); |
713 | 621 | ||
714 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 622 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
715 | return ERR_PTR(-EINVAL); | 623 | return -EINVAL; |
716 | 624 | ||
717 | ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, | 625 | ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, |
718 | CRYPTO_ALG_TYPE_HASH, | 626 | CRYPTO_ALG_TYPE_HASH, |
719 | CRYPTO_ALG_TYPE_AHASH_MASK); | 627 | CRYPTO_ALG_TYPE_AHASH_MASK); |
720 | if (IS_ERR(ghash_alg)) | 628 | if (IS_ERR(ghash_alg)) |
721 | return ERR_CAST(ghash_alg); | 629 | return PTR_ERR(ghash_alg); |
630 | |||
631 | ghash = __crypto_hash_alg_common(ghash_alg); | ||
722 | 632 | ||
723 | err = -ENOMEM; | 633 | err = -ENOMEM; |
724 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 634 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
725 | if (!inst) | 635 | if (!inst) |
726 | goto out_put_ghash; | 636 | goto out_put_ghash; |
727 | 637 | ||
728 | ctx = crypto_instance_ctx(inst); | 638 | ctx = aead_instance_ctx(inst); |
729 | ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base); | 639 | err = crypto_init_ahash_spawn(&ctx->ghash, ghash, |
730 | err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg, | 640 | aead_crypto_instance(inst)); |
731 | inst); | ||
732 | if (err) | 641 | if (err) |
733 | goto err_free_inst; | 642 | goto err_free_inst; |
734 | 643 | ||
735 | crypto_set_skcipher_spawn(&ctx->ctr, inst); | 644 | err = -EINVAL; |
645 | if (ghash->digestsize != 16) | ||
646 | goto err_drop_ghash; | ||
647 | |||
648 | crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); | ||
736 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, | 649 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, |
737 | crypto_requires_sync(algt->type, | 650 | crypto_requires_sync(algt->type, |
738 | algt->mask)); | 651 | algt->mask)); |
@@ -751,33 +664,38 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | |||
751 | goto out_put_ctr; | 664 | goto out_put_ctr; |
752 | 665 | ||
753 | err = -ENAMETOOLONG; | 666 | err = -ENAMETOOLONG; |
754 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 667 | if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
755 | "gcm_base(%s,%s)", ctr->cra_driver_name, | 668 | "gcm_base(%s,%s)", ctr->cra_driver_name, |
756 | ghash_alg->cra_driver_name) >= | 669 | ghash_alg->cra_driver_name) >= |
757 | CRYPTO_MAX_ALG_NAME) | 670 | CRYPTO_MAX_ALG_NAME) |
758 | goto out_put_ctr; | 671 | goto out_put_ctr; |
759 | 672 | ||
760 | memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); | 673 | memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); |
761 | 674 | ||
762 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 675 | inst->alg.base.cra_flags = (ghash->base.cra_flags | ctr->cra_flags) & |
763 | inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; | 676 | CRYPTO_ALG_ASYNC; |
764 | inst->alg.cra_priority = ctr->cra_priority; | 677 | inst->alg.base.cra_priority = (ghash->base.cra_priority + |
765 | inst->alg.cra_blocksize = 1; | 678 | ctr->cra_priority) / 2; |
766 | inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1); | 679 | inst->alg.base.cra_blocksize = 1; |
767 | inst->alg.cra_type = &crypto_aead_type; | 680 | inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | |
768 | inst->alg.cra_aead.ivsize = 16; | 681 | ctr->cra_alignmask; |
769 | inst->alg.cra_aead.maxauthsize = 16; | 682 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); |
770 | inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx); | 683 | inst->alg.ivsize = 12; |
771 | inst->alg.cra_init = crypto_gcm_init_tfm; | 684 | inst->alg.maxauthsize = 16; |
772 | inst->alg.cra_exit = crypto_gcm_exit_tfm; | 685 | inst->alg.init = crypto_gcm_init_tfm; |
773 | inst->alg.cra_aead.setkey = crypto_gcm_setkey; | 686 | inst->alg.exit = crypto_gcm_exit_tfm; |
774 | inst->alg.cra_aead.setauthsize = crypto_gcm_setauthsize; | 687 | inst->alg.setkey = crypto_gcm_setkey; |
775 | inst->alg.cra_aead.encrypt = crypto_gcm_encrypt; | 688 | inst->alg.setauthsize = crypto_gcm_setauthsize; |
776 | inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; | 689 | inst->alg.encrypt = crypto_gcm_encrypt; |
690 | inst->alg.decrypt = crypto_gcm_decrypt; | ||
691 | |||
692 | err = aead_register_instance(tmpl, inst); | ||
693 | if (err) | ||
694 | goto out_put_ctr; | ||
777 | 695 | ||
778 | out: | 696 | out_put_ghash: |
779 | crypto_mod_put(ghash_alg); | 697 | crypto_mod_put(ghash_alg); |
780 | return inst; | 698 | return err; |
781 | 699 | ||
782 | out_put_ctr: | 700 | out_put_ctr: |
783 | crypto_drop_skcipher(&ctx->ctr); | 701 | crypto_drop_skcipher(&ctx->ctr); |
@@ -785,12 +703,10 @@ err_drop_ghash: | |||
785 | crypto_drop_ahash(&ctx->ghash); | 703 | crypto_drop_ahash(&ctx->ghash); |
786 | err_free_inst: | 704 | err_free_inst: |
787 | kfree(inst); | 705 | kfree(inst); |
788 | out_put_ghash: | 706 | goto out_put_ghash; |
789 | inst = ERR_PTR(err); | ||
790 | goto out; | ||
791 | } | 707 | } |
792 | 708 | ||
793 | static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) | 709 | static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) |
794 | { | 710 | { |
795 | const char *cipher_name; | 711 | const char *cipher_name; |
796 | char ctr_name[CRYPTO_MAX_ALG_NAME]; | 712 | char ctr_name[CRYPTO_MAX_ALG_NAME]; |
@@ -798,17 +714,18 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) | |||
798 | 714 | ||
799 | cipher_name = crypto_attr_alg_name(tb[1]); | 715 | cipher_name = crypto_attr_alg_name(tb[1]); |
800 | if (IS_ERR(cipher_name)) | 716 | if (IS_ERR(cipher_name)) |
801 | return ERR_CAST(cipher_name); | 717 | return PTR_ERR(cipher_name); |
802 | 718 | ||
803 | if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >= | 719 | if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >= |
804 | CRYPTO_MAX_ALG_NAME) | 720 | CRYPTO_MAX_ALG_NAME) |
805 | return ERR_PTR(-ENAMETOOLONG); | 721 | return -ENAMETOOLONG; |
806 | 722 | ||
807 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >= | 723 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >= |
808 | CRYPTO_MAX_ALG_NAME) | 724 | CRYPTO_MAX_ALG_NAME) |
809 | return ERR_PTR(-ENAMETOOLONG); | 725 | return -ENAMETOOLONG; |
810 | 726 | ||
811 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash"); | 727 | return crypto_gcm_create_common(tmpl, tb, full_name, |
728 | ctr_name, "ghash"); | ||
812 | } | 729 | } |
813 | 730 | ||
814 | static void crypto_gcm_free(struct crypto_instance *inst) | 731 | static void crypto_gcm_free(struct crypto_instance *inst) |
@@ -817,17 +734,18 @@ static void crypto_gcm_free(struct crypto_instance *inst) | |||
817 | 734 | ||
818 | crypto_drop_skcipher(&ctx->ctr); | 735 | crypto_drop_skcipher(&ctx->ctr); |
819 | crypto_drop_ahash(&ctx->ghash); | 736 | crypto_drop_ahash(&ctx->ghash); |
820 | kfree(inst); | 737 | kfree(aead_instance(inst)); |
821 | } | 738 | } |
822 | 739 | ||
823 | static struct crypto_template crypto_gcm_tmpl = { | 740 | static struct crypto_template crypto_gcm_tmpl = { |
824 | .name = "gcm", | 741 | .name = "gcm", |
825 | .alloc = crypto_gcm_alloc, | 742 | .create = crypto_gcm_create, |
826 | .free = crypto_gcm_free, | 743 | .free = crypto_gcm_free, |
827 | .module = THIS_MODULE, | 744 | .module = THIS_MODULE, |
828 | }; | 745 | }; |
829 | 746 | ||
830 | static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) | 747 | static int crypto_gcm_base_create(struct crypto_template *tmpl, |
748 | struct rtattr **tb) | ||
831 | { | 749 | { |
832 | const char *ctr_name; | 750 | const char *ctr_name; |
833 | const char *ghash_name; | 751 | const char *ghash_name; |
@@ -835,22 +753,23 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) | |||
835 | 753 | ||
836 | ctr_name = crypto_attr_alg_name(tb[1]); | 754 | ctr_name = crypto_attr_alg_name(tb[1]); |
837 | if (IS_ERR(ctr_name)) | 755 | if (IS_ERR(ctr_name)) |
838 | return ERR_CAST(ctr_name); | 756 | return PTR_ERR(ctr_name); |
839 | 757 | ||
840 | ghash_name = crypto_attr_alg_name(tb[2]); | 758 | ghash_name = crypto_attr_alg_name(tb[2]); |
841 | if (IS_ERR(ghash_name)) | 759 | if (IS_ERR(ghash_name)) |
842 | return ERR_CAST(ghash_name); | 760 | return PTR_ERR(ghash_name); |
843 | 761 | ||
844 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", | 762 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", |
845 | ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) | 763 | ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) |
846 | return ERR_PTR(-ENAMETOOLONG); | 764 | return -ENAMETOOLONG; |
847 | 765 | ||
848 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name); | 766 | return crypto_gcm_create_common(tmpl, tb, full_name, |
767 | ctr_name, ghash_name); | ||
849 | } | 768 | } |
850 | 769 | ||
851 | static struct crypto_template crypto_gcm_base_tmpl = { | 770 | static struct crypto_template crypto_gcm_base_tmpl = { |
852 | .name = "gcm_base", | 771 | .name = "gcm_base", |
853 | .alloc = crypto_gcm_base_alloc, | 772 | .create = crypto_gcm_base_create, |
854 | .free = crypto_gcm_free, | 773 | .free = crypto_gcm_free, |
855 | .module = THIS_MODULE, | 774 | .module = THIS_MODULE, |
856 | }; | 775 | }; |
@@ -911,7 +830,7 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) | |||
911 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, | 830 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, |
912 | req->base.data); | 831 | req->base.data); |
913 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); | 832 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); |
914 | aead_request_set_assoc(subreq, req->assoc, req->assoclen); | 833 | aead_request_set_ad(subreq, req->assoclen); |
915 | 834 | ||
916 | return subreq; | 835 | return subreq; |
917 | } | 836 | } |
@@ -930,11 +849,11 @@ static int crypto_rfc4106_decrypt(struct aead_request *req) | |||
930 | return crypto_aead_decrypt(req); | 849 | return crypto_aead_decrypt(req); |
931 | } | 850 | } |
932 | 851 | ||
933 | static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm) | 852 | static int crypto_rfc4106_init_tfm(struct crypto_aead *tfm) |
934 | { | 853 | { |
935 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 854 | struct aead_instance *inst = aead_alg_instance(tfm); |
936 | struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); | 855 | struct crypto_aead_spawn *spawn = aead_instance_ctx(inst); |
937 | struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm); | 856 | struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(tfm); |
938 | struct crypto_aead *aead; | 857 | struct crypto_aead *aead; |
939 | unsigned long align; | 858 | unsigned long align; |
940 | 859 | ||
@@ -946,126 +865,120 @@ static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm) | |||
946 | 865 | ||
947 | align = crypto_aead_alignmask(aead); | 866 | align = crypto_aead_alignmask(aead); |
948 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 867 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
949 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + | 868 | crypto_aead_set_reqsize( |
950 | ALIGN(crypto_aead_reqsize(aead), | 869 | tfm, |
951 | crypto_tfm_ctx_alignment()) + | 870 | sizeof(struct aead_request) + |
952 | align + 16; | 871 | ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + |
872 | align + 12); | ||
953 | 873 | ||
954 | return 0; | 874 | return 0; |
955 | } | 875 | } |
956 | 876 | ||
957 | static void crypto_rfc4106_exit_tfm(struct crypto_tfm *tfm) | 877 | static void crypto_rfc4106_exit_tfm(struct crypto_aead *tfm) |
958 | { | 878 | { |
959 | struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm); | 879 | struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(tfm); |
960 | 880 | ||
961 | crypto_free_aead(ctx->child); | 881 | crypto_free_aead(ctx->child); |
962 | } | 882 | } |
963 | 883 | ||
964 | static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb) | 884 | static int crypto_rfc4106_create(struct crypto_template *tmpl, |
885 | struct rtattr **tb) | ||
965 | { | 886 | { |
966 | struct crypto_attr_type *algt; | 887 | struct crypto_attr_type *algt; |
967 | struct crypto_instance *inst; | 888 | struct aead_instance *inst; |
968 | struct crypto_aead_spawn *spawn; | 889 | struct crypto_aead_spawn *spawn; |
969 | struct crypto_alg *alg; | 890 | struct aead_alg *alg; |
970 | const char *ccm_name; | 891 | const char *ccm_name; |
971 | int err; | 892 | int err; |
972 | 893 | ||
973 | algt = crypto_get_attr_type(tb); | 894 | algt = crypto_get_attr_type(tb); |
974 | if (IS_ERR(algt)) | 895 | if (IS_ERR(algt)) |
975 | return ERR_CAST(algt); | 896 | return PTR_ERR(algt); |
976 | 897 | ||
977 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 898 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
978 | return ERR_PTR(-EINVAL); | 899 | return -EINVAL; |
979 | 900 | ||
980 | ccm_name = crypto_attr_alg_name(tb[1]); | 901 | ccm_name = crypto_attr_alg_name(tb[1]); |
981 | if (IS_ERR(ccm_name)) | 902 | if (IS_ERR(ccm_name)) |
982 | return ERR_CAST(ccm_name); | 903 | return PTR_ERR(ccm_name); |
983 | 904 | ||
984 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | 905 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
985 | if (!inst) | 906 | if (!inst) |
986 | return ERR_PTR(-ENOMEM); | 907 | return -ENOMEM; |
987 | 908 | ||
988 | spawn = crypto_instance_ctx(inst); | 909 | spawn = aead_instance_ctx(inst); |
989 | crypto_set_aead_spawn(spawn, inst); | 910 | crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); |
990 | err = crypto_grab_aead(spawn, ccm_name, 0, | 911 | err = crypto_grab_aead(spawn, ccm_name, 0, |
991 | crypto_requires_sync(algt->type, algt->mask)); | 912 | crypto_requires_sync(algt->type, algt->mask)); |
992 | if (err) | 913 | if (err) |
993 | goto out_free_inst; | 914 | goto out_free_inst; |
994 | 915 | ||
995 | alg = crypto_aead_spawn_alg(spawn); | 916 | alg = crypto_spawn_aead_alg(spawn); |
996 | 917 | ||
997 | err = -EINVAL; | 918 | err = -EINVAL; |
998 | 919 | ||
999 | /* We only support 16-byte blocks. */ | 920 | /* Underlying IV size must be 12. */ |
1000 | if (alg->cra_aead.ivsize != 16) | 921 | if (crypto_aead_alg_ivsize(alg) != 12) |
1001 | goto out_drop_alg; | 922 | goto out_drop_alg; |
1002 | 923 | ||
1003 | /* Not a stream cipher? */ | 924 | /* Not a stream cipher? */ |
1004 | if (alg->cra_blocksize != 1) | 925 | if (alg->base.cra_blocksize != 1) |
1005 | goto out_drop_alg; | 926 | goto out_drop_alg; |
1006 | 927 | ||
1007 | err = -ENAMETOOLONG; | 928 | err = -ENAMETOOLONG; |
1008 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 929 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
1009 | "rfc4106(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || | 930 | "rfc4106(%s)", alg->base.cra_name) >= |
1010 | snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 931 | CRYPTO_MAX_ALG_NAME || |
1011 | "rfc4106(%s)", alg->cra_driver_name) >= | 932 | snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
933 | "rfc4106(%s)", alg->base.cra_driver_name) >= | ||
1012 | CRYPTO_MAX_ALG_NAME) | 934 | CRYPTO_MAX_ALG_NAME) |
1013 | goto out_drop_alg; | 935 | goto out_drop_alg; |
1014 | 936 | ||
1015 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 937 | inst->alg.base.cra_flags |= alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
1016 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; | 938 | inst->alg.base.cra_priority = alg->base.cra_priority; |
1017 | inst->alg.cra_priority = alg->cra_priority; | 939 | inst->alg.base.cra_blocksize = 1; |
1018 | inst->alg.cra_blocksize = 1; | 940 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask; |
1019 | inst->alg.cra_alignmask = alg->cra_alignmask; | ||
1020 | inst->alg.cra_type = &crypto_nivaead_type; | ||
1021 | 941 | ||
1022 | inst->alg.cra_aead.ivsize = 8; | 942 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); |
1023 | inst->alg.cra_aead.maxauthsize = 16; | ||
1024 | 943 | ||
1025 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); | 944 | inst->alg.ivsize = 8; |
945 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); | ||
1026 | 946 | ||
1027 | inst->alg.cra_init = crypto_rfc4106_init_tfm; | 947 | inst->alg.init = crypto_rfc4106_init_tfm; |
1028 | inst->alg.cra_exit = crypto_rfc4106_exit_tfm; | 948 | inst->alg.exit = crypto_rfc4106_exit_tfm; |
1029 | 949 | ||
1030 | inst->alg.cra_aead.setkey = crypto_rfc4106_setkey; | 950 | inst->alg.setkey = crypto_rfc4106_setkey; |
1031 | inst->alg.cra_aead.setauthsize = crypto_rfc4106_setauthsize; | 951 | inst->alg.setauthsize = crypto_rfc4106_setauthsize; |
1032 | inst->alg.cra_aead.encrypt = crypto_rfc4106_encrypt; | 952 | inst->alg.encrypt = crypto_rfc4106_encrypt; |
1033 | inst->alg.cra_aead.decrypt = crypto_rfc4106_decrypt; | 953 | inst->alg.decrypt = crypto_rfc4106_decrypt; |
1034 | 954 | ||
1035 | inst->alg.cra_aead.geniv = "seqiv"; | 955 | err = aead_register_instance(tmpl, inst); |
956 | if (err) | ||
957 | goto out_drop_alg; | ||
1036 | 958 | ||
1037 | out: | 959 | out: |
1038 | return inst; | 960 | return err; |
1039 | 961 | ||
1040 | out_drop_alg: | 962 | out_drop_alg: |
1041 | crypto_drop_aead(spawn); | 963 | crypto_drop_aead(spawn); |
1042 | out_free_inst: | 964 | out_free_inst: |
1043 | kfree(inst); | 965 | kfree(inst); |
1044 | inst = ERR_PTR(err); | ||
1045 | goto out; | 966 | goto out; |
1046 | } | 967 | } |
1047 | 968 | ||
1048 | static void crypto_rfc4106_free(struct crypto_instance *inst) | 969 | static void crypto_rfc4106_free(struct crypto_instance *inst) |
1049 | { | 970 | { |
1050 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 971 | crypto_drop_aead(crypto_instance_ctx(inst)); |
1051 | kfree(inst); | 972 | kfree(aead_instance(inst)); |
1052 | } | 973 | } |
1053 | 974 | ||
1054 | static struct crypto_template crypto_rfc4106_tmpl = { | 975 | static struct crypto_template crypto_rfc4106_tmpl = { |
1055 | .name = "rfc4106", | 976 | .name = "rfc4106", |
1056 | .alloc = crypto_rfc4106_alloc, | 977 | .create = crypto_rfc4106_create, |
1057 | .free = crypto_rfc4106_free, | 978 | .free = crypto_rfc4106_free, |
1058 | .module = THIS_MODULE, | 979 | .module = THIS_MODULE, |
1059 | }; | 980 | }; |
1060 | 981 | ||
1061 | static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx( | ||
1062 | struct aead_request *req) | ||
1063 | { | ||
1064 | unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); | ||
1065 | |||
1066 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); | ||
1067 | } | ||
1068 | |||
1069 | static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, | 982 | static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, |
1070 | unsigned int keylen) | 983 | unsigned int keylen) |
1071 | { | 984 | { |
@@ -1100,83 +1013,35 @@ static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, | |||
1100 | return crypto_aead_setauthsize(ctx->child, authsize); | 1013 | return crypto_aead_setauthsize(ctx->child, authsize); |
1101 | } | 1014 | } |
1102 | 1015 | ||
1103 | static void crypto_rfc4543_done(struct crypto_async_request *areq, int err) | 1016 | static int crypto_rfc4543_crypt(struct aead_request *req, bool enc) |
1104 | { | ||
1105 | struct aead_request *req = areq->data; | ||
1106 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
1107 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); | ||
1108 | |||
1109 | if (!err) { | ||
1110 | scatterwalk_map_and_copy(rctx->auth_tag, req->dst, | ||
1111 | req->cryptlen, | ||
1112 | crypto_aead_authsize(aead), 1); | ||
1113 | } | ||
1114 | |||
1115 | aead_request_complete(req, err); | ||
1116 | } | ||
1117 | |||
1118 | static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, | ||
1119 | bool enc) | ||
1120 | { | 1017 | { |
1121 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1018 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1122 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); | 1019 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); |
1123 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); | 1020 | struct crypto_rfc4543_req_ctx *rctx = aead_request_ctx(req); |
1124 | struct aead_request *subreq = &rctx->subreq; | 1021 | struct aead_request *subreq = &rctx->subreq; |
1125 | struct scatterlist *src = req->src; | ||
1126 | struct scatterlist *cipher = rctx->cipher; | ||
1127 | struct scatterlist *payload = rctx->payload; | ||
1128 | struct scatterlist *assoc = rctx->assoc; | ||
1129 | unsigned int authsize = crypto_aead_authsize(aead); | 1022 | unsigned int authsize = crypto_aead_authsize(aead); |
1130 | unsigned int assoclen = req->assoclen; | ||
1131 | struct page *srcp; | ||
1132 | u8 *vsrc; | ||
1133 | u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), | 1023 | u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), |
1134 | crypto_aead_alignmask(ctx->child) + 1); | 1024 | crypto_aead_alignmask(ctx->child) + 1); |
1025 | int err; | ||
1026 | |||
1027 | if (req->src != req->dst) { | ||
1028 | err = crypto_rfc4543_copy_src_to_dst(req, enc); | ||
1029 | if (err) | ||
1030 | return err; | ||
1031 | } | ||
1135 | 1032 | ||
1136 | memcpy(iv, ctx->nonce, 4); | 1033 | memcpy(iv, ctx->nonce, 4); |
1137 | memcpy(iv + 4, req->iv, 8); | 1034 | memcpy(iv + 4, req->iv, 8); |
1138 | 1035 | ||
1139 | /* construct cipher/plaintext */ | ||
1140 | if (enc) | ||
1141 | memset(rctx->auth_tag, 0, authsize); | ||
1142 | else | ||
1143 | scatterwalk_map_and_copy(rctx->auth_tag, src, | ||
1144 | req->cryptlen - authsize, | ||
1145 | authsize, 0); | ||
1146 | |||
1147 | sg_init_one(cipher, rctx->auth_tag, authsize); | ||
1148 | |||
1149 | /* construct the aad */ | ||
1150 | srcp = sg_page(src); | ||
1151 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; | ||
1152 | |||
1153 | sg_init_table(payload, 2); | ||
1154 | sg_set_buf(payload, req->iv, 8); | ||
1155 | scatterwalk_crypto_chain(payload, src, vsrc == req->iv + 8, 2); | ||
1156 | assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); | ||
1157 | |||
1158 | if (req->assoc->length == req->assoclen) { | ||
1159 | sg_init_table(assoc, 2); | ||
1160 | sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, | ||
1161 | req->assoc->offset); | ||
1162 | } else { | ||
1163 | BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); | ||
1164 | |||
1165 | scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, | ||
1166 | req->assoclen, 0); | ||
1167 | |||
1168 | sg_init_table(assoc, 2); | ||
1169 | sg_set_buf(assoc, rctx->assocbuf, req->assoclen); | ||
1170 | } | ||
1171 | scatterwalk_crypto_chain(assoc, payload, 0, 2); | ||
1172 | |||
1173 | aead_request_set_tfm(subreq, ctx->child); | 1036 | aead_request_set_tfm(subreq, ctx->child); |
1174 | aead_request_set_callback(subreq, req->base.flags, crypto_rfc4543_done, | 1037 | aead_request_set_callback(subreq, req->base.flags, |
1175 | req); | 1038 | req->base.complete, req->base.data); |
1176 | aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv); | 1039 | aead_request_set_crypt(subreq, req->src, req->dst, |
1177 | aead_request_set_assoc(subreq, assoc, assoclen); | 1040 | enc ? 0 : authsize, iv); |
1178 | 1041 | aead_request_set_ad(subreq, req->assoclen + req->cryptlen - | |
1179 | return subreq; | 1042 | subreq->cryptlen); |
1043 | |||
1044 | return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq); | ||
1180 | } | 1045 | } |
1181 | 1046 | ||
1182 | static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) | 1047 | static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) |
@@ -1184,7 +1049,8 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) | |||
1184 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1049 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1185 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); | 1050 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); |
1186 | unsigned int authsize = crypto_aead_authsize(aead); | 1051 | unsigned int authsize = crypto_aead_authsize(aead); |
1187 | unsigned int nbytes = req->cryptlen - (enc ? 0 : authsize); | 1052 | unsigned int nbytes = req->assoclen + req->cryptlen - |
1053 | (enc ? 0 : authsize); | ||
1188 | struct blkcipher_desc desc = { | 1054 | struct blkcipher_desc desc = { |
1189 | .tfm = ctx->null, | 1055 | .tfm = ctx->null, |
1190 | }; | 1056 | }; |
@@ -1194,49 +1060,20 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) | |||
1194 | 1060 | ||
1195 | static int crypto_rfc4543_encrypt(struct aead_request *req) | 1061 | static int crypto_rfc4543_encrypt(struct aead_request *req) |
1196 | { | 1062 | { |
1197 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1063 | return crypto_rfc4543_crypt(req, true); |
1198 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); | ||
1199 | struct aead_request *subreq; | ||
1200 | int err; | ||
1201 | |||
1202 | if (req->src != req->dst) { | ||
1203 | err = crypto_rfc4543_copy_src_to_dst(req, true); | ||
1204 | if (err) | ||
1205 | return err; | ||
1206 | } | ||
1207 | |||
1208 | subreq = crypto_rfc4543_crypt(req, true); | ||
1209 | err = crypto_aead_encrypt(subreq); | ||
1210 | if (err) | ||
1211 | return err; | ||
1212 | |||
1213 | scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen, | ||
1214 | crypto_aead_authsize(aead), 1); | ||
1215 | |||
1216 | return 0; | ||
1217 | } | 1064 | } |
1218 | 1065 | ||
1219 | static int crypto_rfc4543_decrypt(struct aead_request *req) | 1066 | static int crypto_rfc4543_decrypt(struct aead_request *req) |
1220 | { | 1067 | { |
1221 | int err; | 1068 | return crypto_rfc4543_crypt(req, false); |
1222 | |||
1223 | if (req->src != req->dst) { | ||
1224 | err = crypto_rfc4543_copy_src_to_dst(req, false); | ||
1225 | if (err) | ||
1226 | return err; | ||
1227 | } | ||
1228 | |||
1229 | req = crypto_rfc4543_crypt(req, false); | ||
1230 | |||
1231 | return crypto_aead_decrypt(req); | ||
1232 | } | 1069 | } |
1233 | 1070 | ||
1234 | static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) | 1071 | static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) |
1235 | { | 1072 | { |
1236 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 1073 | struct aead_instance *inst = aead_alg_instance(tfm); |
1237 | struct crypto_rfc4543_instance_ctx *ictx = crypto_instance_ctx(inst); | 1074 | struct crypto_rfc4543_instance_ctx *ictx = aead_instance_ctx(inst); |
1238 | struct crypto_aead_spawn *spawn = &ictx->aead; | 1075 | struct crypto_aead_spawn *spawn = &ictx->aead; |
1239 | struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); | 1076 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); |
1240 | struct crypto_aead *aead; | 1077 | struct crypto_aead *aead; |
1241 | struct crypto_blkcipher *null; | 1078 | struct crypto_blkcipher *null; |
1242 | unsigned long align; | 1079 | unsigned long align; |
@@ -1246,7 +1083,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) | |||
1246 | if (IS_ERR(aead)) | 1083 | if (IS_ERR(aead)) |
1247 | return PTR_ERR(aead); | 1084 | return PTR_ERR(aead); |
1248 | 1085 | ||
1249 | null = crypto_spawn_blkcipher(&ictx->null.base); | 1086 | null = crypto_get_default_null_skcipher(); |
1250 | err = PTR_ERR(null); | 1087 | err = PTR_ERR(null); |
1251 | if (IS_ERR(null)) | 1088 | if (IS_ERR(null)) |
1252 | goto err_free_aead; | 1089 | goto err_free_aead; |
@@ -1256,10 +1093,11 @@ static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) | |||
1256 | 1093 | ||
1257 | align = crypto_aead_alignmask(aead); | 1094 | align = crypto_aead_alignmask(aead); |
1258 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 1095 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
1259 | tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) + | 1096 | crypto_aead_set_reqsize( |
1260 | ALIGN(crypto_aead_reqsize(aead), | 1097 | tfm, |
1261 | crypto_tfm_ctx_alignment()) + | 1098 | sizeof(struct crypto_rfc4543_req_ctx) + |
1262 | align + 16; | 1099 | ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + |
1100 | align + 12); | ||
1263 | 1101 | ||
1264 | return 0; | 1102 | return 0; |
1265 | 1103 | ||
@@ -1268,107 +1106,98 @@ err_free_aead: | |||
1268 | return err; | 1106 | return err; |
1269 | } | 1107 | } |
1270 | 1108 | ||
1271 | static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm) | 1109 | static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) |
1272 | { | 1110 | { |
1273 | struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); | 1111 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); |
1274 | 1112 | ||
1275 | crypto_free_aead(ctx->child); | 1113 | crypto_free_aead(ctx->child); |
1276 | crypto_free_blkcipher(ctx->null); | 1114 | crypto_put_default_null_skcipher(); |
1277 | } | 1115 | } |
1278 | 1116 | ||
1279 | static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) | 1117 | static int crypto_rfc4543_create(struct crypto_template *tmpl, |
1118 | struct rtattr **tb) | ||
1280 | { | 1119 | { |
1281 | struct crypto_attr_type *algt; | 1120 | struct crypto_attr_type *algt; |
1282 | struct crypto_instance *inst; | 1121 | struct aead_instance *inst; |
1283 | struct crypto_aead_spawn *spawn; | 1122 | struct crypto_aead_spawn *spawn; |
1284 | struct crypto_alg *alg; | 1123 | struct aead_alg *alg; |
1285 | struct crypto_rfc4543_instance_ctx *ctx; | 1124 | struct crypto_rfc4543_instance_ctx *ctx; |
1286 | const char *ccm_name; | 1125 | const char *ccm_name; |
1287 | int err; | 1126 | int err; |
1288 | 1127 | ||
1289 | algt = crypto_get_attr_type(tb); | 1128 | algt = crypto_get_attr_type(tb); |
1290 | if (IS_ERR(algt)) | 1129 | if (IS_ERR(algt)) |
1291 | return ERR_CAST(algt); | 1130 | return PTR_ERR(algt); |
1292 | 1131 | ||
1293 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 1132 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
1294 | return ERR_PTR(-EINVAL); | 1133 | return -EINVAL; |
1295 | 1134 | ||
1296 | ccm_name = crypto_attr_alg_name(tb[1]); | 1135 | ccm_name = crypto_attr_alg_name(tb[1]); |
1297 | if (IS_ERR(ccm_name)) | 1136 | if (IS_ERR(ccm_name)) |
1298 | return ERR_CAST(ccm_name); | 1137 | return PTR_ERR(ccm_name); |
1299 | 1138 | ||
1300 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 1139 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
1301 | if (!inst) | 1140 | if (!inst) |
1302 | return ERR_PTR(-ENOMEM); | 1141 | return -ENOMEM; |
1303 | 1142 | ||
1304 | ctx = crypto_instance_ctx(inst); | 1143 | ctx = aead_instance_ctx(inst); |
1305 | spawn = &ctx->aead; | 1144 | spawn = &ctx->aead; |
1306 | crypto_set_aead_spawn(spawn, inst); | 1145 | crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); |
1307 | err = crypto_grab_aead(spawn, ccm_name, 0, | 1146 | err = crypto_grab_aead(spawn, ccm_name, 0, |
1308 | crypto_requires_sync(algt->type, algt->mask)); | 1147 | crypto_requires_sync(algt->type, algt->mask)); |
1309 | if (err) | 1148 | if (err) |
1310 | goto out_free_inst; | 1149 | goto out_free_inst; |
1311 | 1150 | ||
1312 | alg = crypto_aead_spawn_alg(spawn); | 1151 | alg = crypto_spawn_aead_alg(spawn); |
1313 | |||
1314 | crypto_set_skcipher_spawn(&ctx->null, inst); | ||
1315 | err = crypto_grab_skcipher(&ctx->null, "ecb(cipher_null)", 0, | ||
1316 | CRYPTO_ALG_ASYNC); | ||
1317 | if (err) | ||
1318 | goto out_drop_alg; | ||
1319 | |||
1320 | crypto_skcipher_spawn_alg(&ctx->null); | ||
1321 | 1152 | ||
1322 | err = -EINVAL; | 1153 | err = -EINVAL; |
1323 | 1154 | ||
1324 | /* We only support 16-byte blocks. */ | 1155 | /* Underlying IV size must be 12. */ |
1325 | if (alg->cra_aead.ivsize != 16) | 1156 | if (crypto_aead_alg_ivsize(alg) != 12) |
1326 | goto out_drop_ecbnull; | 1157 | goto out_drop_alg; |
1327 | 1158 | ||
1328 | /* Not a stream cipher? */ | 1159 | /* Not a stream cipher? */ |
1329 | if (alg->cra_blocksize != 1) | 1160 | if (alg->base.cra_blocksize != 1) |
1330 | goto out_drop_ecbnull; | 1161 | goto out_drop_alg; |
1331 | 1162 | ||
1332 | err = -ENAMETOOLONG; | 1163 | err = -ENAMETOOLONG; |
1333 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 1164 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
1334 | "rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || | 1165 | "rfc4543(%s)", alg->base.cra_name) >= |
1335 | snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 1166 | CRYPTO_MAX_ALG_NAME || |
1336 | "rfc4543(%s)", alg->cra_driver_name) >= | 1167 | snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
1168 | "rfc4543(%s)", alg->base.cra_driver_name) >= | ||
1337 | CRYPTO_MAX_ALG_NAME) | 1169 | CRYPTO_MAX_ALG_NAME) |
1338 | goto out_drop_ecbnull; | 1170 | goto out_drop_alg; |
1339 | 1171 | ||
1340 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 1172 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
1341 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; | 1173 | inst->alg.base.cra_priority = alg->base.cra_priority; |
1342 | inst->alg.cra_priority = alg->cra_priority; | 1174 | inst->alg.base.cra_blocksize = 1; |
1343 | inst->alg.cra_blocksize = 1; | 1175 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask; |
1344 | inst->alg.cra_alignmask = alg->cra_alignmask; | ||
1345 | inst->alg.cra_type = &crypto_nivaead_type; | ||
1346 | 1176 | ||
1347 | inst->alg.cra_aead.ivsize = 8; | 1177 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); |
1348 | inst->alg.cra_aead.maxauthsize = 16; | ||
1349 | 1178 | ||
1350 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); | 1179 | inst->alg.ivsize = 8; |
1180 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); | ||
1351 | 1181 | ||
1352 | inst->alg.cra_init = crypto_rfc4543_init_tfm; | 1182 | inst->alg.init = crypto_rfc4543_init_tfm; |
1353 | inst->alg.cra_exit = crypto_rfc4543_exit_tfm; | 1183 | inst->alg.exit = crypto_rfc4543_exit_tfm; |
1354 | 1184 | ||
1355 | inst->alg.cra_aead.setkey = crypto_rfc4543_setkey; | 1185 | inst->alg.setkey = crypto_rfc4543_setkey; |
1356 | inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize; | 1186 | inst->alg.setauthsize = crypto_rfc4543_setauthsize; |
1357 | inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt; | 1187 | inst->alg.encrypt = crypto_rfc4543_encrypt; |
1358 | inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt; | 1188 | inst->alg.decrypt = crypto_rfc4543_decrypt; |
1359 | 1189 | ||
1360 | inst->alg.cra_aead.geniv = "seqiv"; | 1190 | err = aead_register_instance(tmpl, inst); |
1191 | if (err) | ||
1192 | goto out_drop_alg; | ||
1361 | 1193 | ||
1362 | out: | 1194 | out: |
1363 | return inst; | 1195 | return err; |
1364 | 1196 | ||
1365 | out_drop_ecbnull: | ||
1366 | crypto_drop_skcipher(&ctx->null); | ||
1367 | out_drop_alg: | 1197 | out_drop_alg: |
1368 | crypto_drop_aead(spawn); | 1198 | crypto_drop_aead(spawn); |
1369 | out_free_inst: | 1199 | out_free_inst: |
1370 | kfree(inst); | 1200 | kfree(inst); |
1371 | inst = ERR_PTR(err); | ||
1372 | goto out; | 1201 | goto out; |
1373 | } | 1202 | } |
1374 | 1203 | ||
@@ -1377,14 +1206,13 @@ static void crypto_rfc4543_free(struct crypto_instance *inst) | |||
1377 | struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst); | 1206 | struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst); |
1378 | 1207 | ||
1379 | crypto_drop_aead(&ctx->aead); | 1208 | crypto_drop_aead(&ctx->aead); |
1380 | crypto_drop_skcipher(&ctx->null); | ||
1381 | 1209 | ||
1382 | kfree(inst); | 1210 | kfree(aead_instance(inst)); |
1383 | } | 1211 | } |
1384 | 1212 | ||
1385 | static struct crypto_template crypto_rfc4543_tmpl = { | 1213 | static struct crypto_template crypto_rfc4543_tmpl = { |
1386 | .name = "rfc4543", | 1214 | .name = "rfc4543", |
1387 | .alloc = crypto_rfc4543_alloc, | 1215 | .create = crypto_rfc4543_create, |
1388 | .free = crypto_rfc4543_free, | 1216 | .free = crypto_rfc4543_free, |
1389 | .module = THIS_MODULE, | 1217 | .module = THIS_MODULE, |
1390 | }; | 1218 | }; |
@@ -1393,10 +1221,12 @@ static int __init crypto_gcm_module_init(void) | |||
1393 | { | 1221 | { |
1394 | int err; | 1222 | int err; |
1395 | 1223 | ||
1396 | gcm_zeroes = kzalloc(16, GFP_KERNEL); | 1224 | gcm_zeroes = kzalloc(sizeof(*gcm_zeroes), GFP_KERNEL); |
1397 | if (!gcm_zeroes) | 1225 | if (!gcm_zeroes) |
1398 | return -ENOMEM; | 1226 | return -ENOMEM; |
1399 | 1227 | ||
1228 | sg_init_one(&gcm_zeroes->sg, gcm_zeroes->buf, sizeof(gcm_zeroes->buf)); | ||
1229 | |||
1400 | err = crypto_register_template(&crypto_gcm_base_tmpl); | 1230 | err = crypto_register_template(&crypto_gcm_base_tmpl); |
1401 | if (err) | 1231 | if (err) |
1402 | goto out; | 1232 | goto out; |
diff --git a/crypto/internal.h b/crypto/internal.h index bd39bfc92eab..00e42a3ed814 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/notifier.h> | 25 | #include <linux/notifier.h> |
26 | #include <linux/rwsem.h> | 26 | #include <linux/rwsem.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/fips.h> | ||
29 | 28 | ||
30 | /* Crypto notification events. */ | 29 | /* Crypto notification events. */ |
31 | enum { | 30 | enum { |
@@ -103,6 +102,8 @@ int crypto_register_notifier(struct notifier_block *nb); | |||
103 | int crypto_unregister_notifier(struct notifier_block *nb); | 102 | int crypto_unregister_notifier(struct notifier_block *nb); |
104 | int crypto_probing_notify(unsigned long val, void *v); | 103 | int crypto_probing_notify(unsigned long val, void *v); |
105 | 104 | ||
105 | unsigned int crypto_alg_extsize(struct crypto_alg *alg); | ||
106 | |||
106 | static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) | 107 | static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) |
107 | { | 108 | { |
108 | atomic_inc(&alg->cra_refcnt); | 109 | atomic_inc(&alg->cra_refcnt); |
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c new file mode 100644 index 000000000000..d3c30452edee --- /dev/null +++ b/crypto/jitterentropy.c | |||
@@ -0,0 +1,928 @@ | |||
1 | /* | ||
2 | * Non-physical true random number generator based on timing jitter. | ||
3 | * | ||
4 | * Copyright Stephan Mueller <smueller@chronox.de>, 2014 | ||
5 | * | ||
6 | * Design | ||
7 | * ====== | ||
8 | * | ||
9 | * See http://www.chronox.de/jent.html | ||
10 | * | ||
11 | * License | ||
12 | * ======= | ||
13 | * | ||
14 | * Redistribution and use in source and binary forms, with or without | ||
15 | * modification, are permitted provided that the following conditions | ||
16 | * are met: | ||
17 | * 1. Redistributions of source code must retain the above copyright | ||
18 | * notice, and the entire permission notice in its entirety, | ||
19 | * including the disclaimer of warranties. | ||
20 | * 2. Redistributions in binary form must reproduce the above copyright | ||
21 | * notice, this list of conditions and the following disclaimer in the | ||
22 | * documentation and/or other materials provided with the distribution. | ||
23 | * 3. The name of the author may not be used to endorse or promote | ||
24 | * products derived from this software without specific prior | ||
25 | * written permission. | ||
26 | * | ||
27 | * ALTERNATIVELY, this product may be distributed under the terms of | ||
28 | * the GNU General Public License, in which case the provisions of the GPL2 are | ||
29 | * required INSTEAD OF the above restrictions. (This clause is | ||
30 | * necessary due to a potential bad interaction between the GPL and | ||
31 | * the restrictions contained in a BSD-style copyright.) | ||
32 | * | ||
33 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
34 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | ||
35 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF | ||
36 | * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE | ||
37 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
38 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT | ||
39 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
40 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
41 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
42 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | ||
43 | * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH | ||
44 | * DAMAGE. | ||
45 | */ | ||
46 | |||
47 | /* | ||
48 | * This Jitterentropy RNG is based on the jitterentropy library | ||
49 | * version 1.1.0 provided at http://www.chronox.de/jent.html | ||
50 | */ | ||
51 | |||
52 | #include <linux/module.h> | ||
53 | #include <linux/slab.h> | ||
54 | #include <linux/module.h> | ||
55 | #include <linux/fips.h> | ||
56 | #include <linux/time.h> | ||
57 | #include <linux/crypto.h> | ||
58 | #include <crypto/internal/rng.h> | ||
59 | |||
60 | /* The entropy pool */ | ||
61 | struct rand_data { | ||
62 | /* all data values that are vital to maintain the security | ||
63 | * of the RNG are marked as SENSITIVE. A user must not | ||
64 | * access that information while the RNG executes its loops to | ||
65 | * calculate the next random value. */ | ||
66 | __u64 data; /* SENSITIVE Actual random number */ | ||
67 | __u64 old_data; /* SENSITIVE Previous random number */ | ||
68 | __u64 prev_time; /* SENSITIVE Previous time stamp */ | ||
69 | #define DATA_SIZE_BITS ((sizeof(__u64)) * 8) | ||
70 | __u64 last_delta; /* SENSITIVE stuck test */ | ||
71 | __s64 last_delta2; /* SENSITIVE stuck test */ | ||
72 | unsigned int stuck:1; /* Time measurement stuck */ | ||
73 | unsigned int osr; /* Oversample rate */ | ||
74 | unsigned int stir:1; /* Post-processing stirring */ | ||
75 | unsigned int disable_unbias:1; /* Deactivate Von-Neuman unbias */ | ||
76 | #define JENT_MEMORY_BLOCKS 64 | ||
77 | #define JENT_MEMORY_BLOCKSIZE 32 | ||
78 | #define JENT_MEMORY_ACCESSLOOPS 128 | ||
79 | #define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE) | ||
80 | unsigned char *mem; /* Memory access location with size of | ||
81 | * memblocks * memblocksize */ | ||
82 | unsigned int memlocation; /* Pointer to byte in *mem */ | ||
83 | unsigned int memblocks; /* Number of memory blocks in *mem */ | ||
84 | unsigned int memblocksize; /* Size of one memory block in bytes */ | ||
85 | unsigned int memaccessloops; /* Number of memory accesses per random | ||
86 | * bit generation */ | ||
87 | }; | ||
88 | |||
89 | /* Flags that can be used to initialize the RNG */ | ||
90 | #define JENT_DISABLE_STIR (1<<0) /* Disable stirring the entropy pool */ | ||
91 | #define JENT_DISABLE_UNBIAS (1<<1) /* Disable the Von-Neuman Unbiaser */ | ||
92 | #define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more | ||
93 | * entropy, saves MEMORY_SIZE RAM for | ||
94 | * entropy collector */ | ||
95 | |||
96 | #define DRIVER_NAME "jitterentropy" | ||
97 | |||
98 | /* -- error codes for init function -- */ | ||
99 | #define JENT_ENOTIME 1 /* Timer service not available */ | ||
100 | #define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */ | ||
101 | #define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */ | ||
102 | #define JENT_EMINVARIATION 4 /* Timer variations too small for RNG */ | ||
103 | #define JENT_EVARVAR 5 /* Timer does not produce variations of | ||
104 | * variations (2nd derivation of time is | ||
105 | * zero). */ | ||
106 | #define JENT_EMINVARVAR 6 /* Timer variations of variations is tooi | ||
107 | * small. */ | ||
108 | |||
109 | /*************************************************************************** | ||
110 | * Helper functions | ||
111 | ***************************************************************************/ | ||
112 | |||
113 | static inline void jent_get_nstime(__u64 *out) | ||
114 | { | ||
115 | struct timespec ts; | ||
116 | __u64 tmp = 0; | ||
117 | |||
118 | tmp = random_get_entropy(); | ||
119 | |||
120 | /* | ||
121 | * If random_get_entropy does not return a value (which is possible on, | ||
122 | * for example, MIPS), invoke __getnstimeofday | ||
123 | * hoping that there are timers we can work with. | ||
124 | * | ||
125 | * The list of available timers can be obtained from | ||
126 | * /sys/devices/system/clocksource/clocksource0/available_clocksource | ||
127 | * and are registered with clocksource_register() | ||
128 | */ | ||
129 | if ((0 == tmp) && | ||
130 | (0 == __getnstimeofday(&ts))) { | ||
131 | tmp = ts.tv_sec; | ||
132 | tmp = tmp << 32; | ||
133 | tmp = tmp | ts.tv_nsec; | ||
134 | } | ||
135 | |||
136 | *out = tmp; | ||
137 | } | ||
138 | |||
139 | |||
140 | /** | ||
141 | * Update of the loop count used for the next round of | ||
142 | * an entropy collection. | ||
143 | * | ||
144 | * Input: | ||
145 | * @ec entropy collector struct -- may be NULL | ||
146 | * @bits is the number of low bits of the timer to consider | ||
147 | * @min is the number of bits we shift the timer value to the right at | ||
148 | * the end to make sure we have a guaranteed minimum value | ||
149 | * | ||
150 | * @return Newly calculated loop counter | ||
151 | */ | ||
152 | static __u64 jent_loop_shuffle(struct rand_data *ec, | ||
153 | unsigned int bits, unsigned int min) | ||
154 | { | ||
155 | __u64 time = 0; | ||
156 | __u64 shuffle = 0; | ||
157 | unsigned int i = 0; | ||
158 | unsigned int mask = (1<<bits) - 1; | ||
159 | |||
160 | jent_get_nstime(&time); | ||
161 | /* | ||
162 | * mix the current state of the random number into the shuffle | ||
163 | * calculation to balance that shuffle a bit more | ||
164 | */ | ||
165 | if (ec) | ||
166 | time ^= ec->data; | ||
167 | /* | ||
168 | * we fold the time value as much as possible to ensure that as many | ||
169 | * bits of the time stamp are included as possible | ||
170 | */ | ||
171 | for (i = 0; (DATA_SIZE_BITS / bits) > i; i++) { | ||
172 | shuffle ^= time & mask; | ||
173 | time = time >> bits; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * We add a lower boundary value to ensure we have a minimum | ||
178 | * RNG loop count. | ||
179 | */ | ||
180 | return (shuffle + (1<<min)); | ||
181 | } | ||
182 | |||
183 | /*************************************************************************** | ||
184 | * Noise sources | ||
185 | ***************************************************************************/ | ||
186 | |||
187 | /* | ||
188 | * The disabling of the optimizations is performed as documented and assessed | ||
189 | * thoroughly in http://www.chronox.de/jent.html. However, instead of disabling | ||
190 | * the optimization of the entire C file, only the main functions the jitter is | ||
191 | * measured for are not optimized. These functions include the noise sources as | ||
192 | * well as the main functions triggering the noise sources. As the time | ||
193 | * measurement is done from one invocation of the jitter noise source to the | ||
194 | * next, even the execution jitter of the code invoking the noise sources | ||
195 | * contribute to the overall randomness as well. The behavior of the RNG and the | ||
196 | * statistical characteristics when only the mentioned functions are not | ||
197 | * optimized is almost equal to the a completely non-optimized RNG compilation | ||
198 | * as tested with the test tools provided at the initially mentioned web site. | ||
199 | */ | ||
200 | |||
201 | /** | ||
202 | * CPU Jitter noise source -- this is the noise source based on the CPU | ||
203 | * execution time jitter | ||
204 | * | ||
205 | * This function folds the time into one bit units by iterating | ||
206 | * through the DATA_SIZE_BITS bit time value as follows: assume our time value | ||
207 | * is 0xabcd | ||
208 | * 1st loop, 1st shift generates 0xd000 | ||
209 | * 1st loop, 2nd shift generates 0x000d | ||
210 | * 2nd loop, 1st shift generates 0xcd00 | ||
211 | * 2nd loop, 2nd shift generates 0x000c | ||
212 | * 3rd loop, 1st shift generates 0xbcd0 | ||
213 | * 3rd loop, 2nd shift generates 0x000b | ||
214 | * 4th loop, 1st shift generates 0xabcd | ||
215 | * 4th loop, 2nd shift generates 0x000a | ||
216 | * Now, the values at the end of the 2nd shifts are XORed together. | ||
217 | * | ||
218 | * The code is deliberately inefficient and shall stay that way. This function | ||
219 | * is the root cause why the code shall be compiled without optimization. This | ||
220 | * function not only acts as folding operation, but this function's execution | ||
221 | * is used to measure the CPU execution time jitter. Any change to the loop in | ||
222 | * this function implies that careful retesting must be done. | ||
223 | * | ||
224 | * Input: | ||
225 | * @ec entropy collector struct -- may be NULL | ||
226 | * @time time stamp to be folded | ||
227 | * @loop_cnt if a value not equal to 0 is set, use the given value as number of | ||
228 | * loops to perform the folding | ||
229 | * | ||
230 | * Output: | ||
231 | * @folded result of folding operation | ||
232 | * | ||
233 | * @return Number of loops the folding operation is performed | ||
234 | */ | ||
235 | #pragma GCC push_options | ||
236 | #pragma GCC optimize ("-O0") | ||
237 | static __u64 jent_fold_time(struct rand_data *ec, __u64 time, | ||
238 | __u64 *folded, __u64 loop_cnt) | ||
239 | { | ||
240 | unsigned int i; | ||
241 | __u64 j = 0; | ||
242 | __u64 new = 0; | ||
243 | #define MAX_FOLD_LOOP_BIT 4 | ||
244 | #define MIN_FOLD_LOOP_BIT 0 | ||
245 | __u64 fold_loop_cnt = | ||
246 | jent_loop_shuffle(ec, MAX_FOLD_LOOP_BIT, MIN_FOLD_LOOP_BIT); | ||
247 | |||
248 | /* | ||
249 | * testing purposes -- allow test app to set the counter, not | ||
250 | * needed during runtime | ||
251 | */ | ||
252 | if (loop_cnt) | ||
253 | fold_loop_cnt = loop_cnt; | ||
254 | for (j = 0; j < fold_loop_cnt; j++) { | ||
255 | new = 0; | ||
256 | for (i = 1; (DATA_SIZE_BITS) >= i; i++) { | ||
257 | __u64 tmp = time << (DATA_SIZE_BITS - i); | ||
258 | |||
259 | tmp = tmp >> (DATA_SIZE_BITS - 1); | ||
260 | new ^= tmp; | ||
261 | } | ||
262 | } | ||
263 | *folded = new; | ||
264 | return fold_loop_cnt; | ||
265 | } | ||
266 | #pragma GCC pop_options | ||
267 | |||
268 | /** | ||
269 | * Memory Access noise source -- this is a noise source based on variations in | ||
270 | * memory access times | ||
271 | * | ||
272 | * This function performs memory accesses which will add to the timing | ||
273 | * variations due to an unknown amount of CPU wait states that need to be | ||
274 | * added when accessing memory. The memory size should be larger than the L1 | ||
275 | * caches as outlined in the documentation and the associated testing. | ||
276 | * | ||
277 | * The L1 cache has a very high bandwidth, albeit its access rate is usually | ||
278 | * slower than accessing CPU registers. Therefore, L1 accesses only add minimal | ||
279 | * variations as the CPU has hardly to wait. Starting with L2, significant | ||
280 | * variations are added because L2 typically does not belong to the CPU any more | ||
281 | * and therefore a wider range of CPU wait states is necessary for accesses. | ||
282 | * L3 and real memory accesses have even a wider range of wait states. However, | ||
283 | * to reliably access either L3 or memory, the ec->mem memory must be quite | ||
284 | * large which is usually not desirable. | ||
285 | * | ||
286 | * Input: | ||
287 | * @ec Reference to the entropy collector with the memory access data -- if | ||
288 | * the reference to the memory block to be accessed is NULL, this noise | ||
289 | * source is disabled | ||
290 | * @loop_cnt if a value not equal to 0 is set, use the given value as number of | ||
291 | * loops to perform the folding | ||
292 | * | ||
293 | * @return Number of memory access operations | ||
294 | */ | ||
295 | #pragma GCC push_options | ||
296 | #pragma GCC optimize ("-O0") | ||
297 | static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) | ||
298 | { | ||
299 | unsigned char *tmpval = NULL; | ||
300 | unsigned int wrap = 0; | ||
301 | __u64 i = 0; | ||
302 | #define MAX_ACC_LOOP_BIT 7 | ||
303 | #define MIN_ACC_LOOP_BIT 0 | ||
304 | __u64 acc_loop_cnt = | ||
305 | jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); | ||
306 | |||
307 | if (NULL == ec || NULL == ec->mem) | ||
308 | return 0; | ||
309 | wrap = ec->memblocksize * ec->memblocks; | ||
310 | |||
311 | /* | ||
312 | * testing purposes -- allow test app to set the counter, not | ||
313 | * needed during runtime | ||
314 | */ | ||
315 | if (loop_cnt) | ||
316 | acc_loop_cnt = loop_cnt; | ||
317 | |||
318 | for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) { | ||
319 | tmpval = ec->mem + ec->memlocation; | ||
320 | /* | ||
321 | * memory access: just add 1 to one byte, | ||
322 | * wrap at 255 -- memory access implies read | ||
323 | * from and write to memory location | ||
324 | */ | ||
325 | *tmpval = (*tmpval + 1) & 0xff; | ||
326 | /* | ||
327 | * Addition of memblocksize - 1 to pointer | ||
328 | * with wrap around logic to ensure that every | ||
329 | * memory location is hit evenly | ||
330 | */ | ||
331 | ec->memlocation = ec->memlocation + ec->memblocksize - 1; | ||
332 | ec->memlocation = ec->memlocation % wrap; | ||
333 | } | ||
334 | return i; | ||
335 | } | ||
336 | #pragma GCC pop_options | ||
337 | |||
338 | /*************************************************************************** | ||
339 | * Start of entropy processing logic | ||
340 | ***************************************************************************/ | ||
341 | |||
342 | /** | ||
343 | * Stuck test by checking the: | ||
344 | * 1st derivation of the jitter measurement (time delta) | ||
345 | * 2nd derivation of the jitter measurement (delta of time deltas) | ||
346 | * 3rd derivation of the jitter measurement (delta of delta of time deltas) | ||
347 | * | ||
348 | * All values must always be non-zero. | ||
349 | * | ||
350 | * Input: | ||
351 | * @ec Reference to entropy collector | ||
352 | * @current_delta Jitter time delta | ||
353 | * | ||
354 | * @return | ||
355 | * 0 jitter measurement not stuck (good bit) | ||
356 | * 1 jitter measurement stuck (reject bit) | ||
357 | */ | ||
358 | static void jent_stuck(struct rand_data *ec, __u64 current_delta) | ||
359 | { | ||
360 | __s64 delta2 = ec->last_delta - current_delta; | ||
361 | __s64 delta3 = delta2 - ec->last_delta2; | ||
362 | |||
363 | ec->last_delta = current_delta; | ||
364 | ec->last_delta2 = delta2; | ||
365 | |||
366 | if (!current_delta || !delta2 || !delta3) | ||
367 | ec->stuck = 1; | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * This is the heart of the entropy generation: calculate time deltas and | ||
372 | * use the CPU jitter in the time deltas. The jitter is folded into one | ||
373 | * bit. You can call this function the "random bit generator" as it | ||
374 | * produces one random bit per invocation. | ||
375 | * | ||
376 | * WARNING: ensure that ->prev_time is primed before using the output | ||
377 | * of this function! This can be done by calling this function | ||
378 | * and not using its result. | ||
379 | * | ||
380 | * Input: | ||
381 | * @entropy_collector Reference to entropy collector | ||
382 | * | ||
383 | * @return One random bit | ||
384 | */ | ||
385 | #pragma GCC push_options | ||
386 | #pragma GCC optimize ("-O0") | ||
387 | static __u64 jent_measure_jitter(struct rand_data *ec) | ||
388 | { | ||
389 | __u64 time = 0; | ||
390 | __u64 data = 0; | ||
391 | __u64 current_delta = 0; | ||
392 | |||
393 | /* Invoke one noise source before time measurement to add variations */ | ||
394 | jent_memaccess(ec, 0); | ||
395 | |||
396 | /* | ||
397 | * Get time stamp and calculate time delta to previous | ||
398 | * invocation to measure the timing variations | ||
399 | */ | ||
400 | jent_get_nstime(&time); | ||
401 | current_delta = time - ec->prev_time; | ||
402 | ec->prev_time = time; | ||
403 | |||
404 | /* Now call the next noise sources which also folds the data */ | ||
405 | jent_fold_time(ec, current_delta, &data, 0); | ||
406 | |||
407 | /* | ||
408 | * Check whether we have a stuck measurement. The enforcement | ||
409 | * is performed after the stuck value has been mixed into the | ||
410 | * entropy pool. | ||
411 | */ | ||
412 | jent_stuck(ec, current_delta); | ||
413 | |||
414 | return data; | ||
415 | } | ||
416 | #pragma GCC pop_options | ||
417 | |||
418 | /** | ||
419 | * Von Neuman unbias as explained in RFC 4086 section 4.2. As shown in the | ||
420 | * documentation of that RNG, the bits from jent_measure_jitter are considered | ||
421 | * independent which implies that the Von Neuman unbias operation is applicable. | ||
422 | * A proof of the Von-Neumann unbias operation to remove skews is given in the | ||
423 | * document "A proposal for: Functionality classes for random number | ||
424 | * generators", version 2.0 by Werner Schindler, section 5.4.1. | ||
425 | * | ||
426 | * Input: | ||
427 | * @entropy_collector Reference to entropy collector | ||
428 | * | ||
429 | * @return One random bit | ||
430 | */ | ||
431 | static __u64 jent_unbiased_bit(struct rand_data *entropy_collector) | ||
432 | { | ||
433 | do { | ||
434 | __u64 a = jent_measure_jitter(entropy_collector); | ||
435 | __u64 b = jent_measure_jitter(entropy_collector); | ||
436 | |||
437 | if (a == b) | ||
438 | continue; | ||
439 | if (1 == a) | ||
440 | return 1; | ||
441 | else | ||
442 | return 0; | ||
443 | } while (1); | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * Shuffle the pool a bit by mixing some value with a bijective function (XOR) | ||
448 | * into the pool. | ||
449 | * | ||
450 | * The function generates a mixer value that depends on the bits set and the | ||
451 | * location of the set bits in the random number generated by the entropy | ||
452 | * source. Therefore, based on the generated random number, this mixer value | ||
453 | * can have 2**64 different values. That mixer value is initialized with the | ||
454 | * first two SHA-1 constants. After obtaining the mixer value, it is XORed into | ||
455 | * the random number. | ||
456 | * | ||
457 | * The mixer value is not assumed to contain any entropy. But due to the XOR | ||
458 | * operation, it can also not destroy any entropy present in the entropy pool. | ||
459 | * | ||
460 | * Input: | ||
461 | * @entropy_collector Reference to entropy collector | ||
462 | */ | ||
463 | static void jent_stir_pool(struct rand_data *entropy_collector) | ||
464 | { | ||
465 | /* | ||
466 | * to shut up GCC on 32 bit, we have to initialize the 64 variable | ||
467 | * with two 32 bit variables | ||
468 | */ | ||
469 | union c { | ||
470 | __u64 u64; | ||
471 | __u32 u32[2]; | ||
472 | }; | ||
473 | /* | ||
474 | * This constant is derived from the first two 32 bit initialization | ||
475 | * vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1 | ||
476 | */ | ||
477 | union c constant; | ||
478 | /* | ||
479 | * The start value of the mixer variable is derived from the third | ||
480 | * and fourth 32 bit initialization vector of SHA-1 as defined in | ||
481 | * FIPS 180-4 section 5.3.1 | ||
482 | */ | ||
483 | union c mixer; | ||
484 | unsigned int i = 0; | ||
485 | |||
486 | /* | ||
487 | * Store the SHA-1 constants in reverse order to make up the 64 bit | ||
488 | * value -- this applies to a little endian system, on a big endian | ||
489 | * system, it reverses as expected. But this really does not matter | ||
490 | * as we do not rely on the specific numbers. We just pick the SHA-1 | ||
491 | * constants as they have a good mix of bit set and unset. | ||
492 | */ | ||
493 | constant.u32[1] = 0x67452301; | ||
494 | constant.u32[0] = 0xefcdab89; | ||
495 | mixer.u32[1] = 0x98badcfe; | ||
496 | mixer.u32[0] = 0x10325476; | ||
497 | |||
498 | for (i = 0; i < DATA_SIZE_BITS; i++) { | ||
499 | /* | ||
500 | * get the i-th bit of the input random number and only XOR | ||
501 | * the constant into the mixer value when that bit is set | ||
502 | */ | ||
503 | if ((entropy_collector->data >> i) & 1) | ||
504 | mixer.u64 ^= constant.u64; | ||
505 | mixer.u64 = rol64(mixer.u64, 1); | ||
506 | } | ||
507 | entropy_collector->data ^= mixer.u64; | ||
508 | } | ||
509 | |||
510 | /** | ||
511 | * Generator of one 64 bit random number | ||
512 | * Function fills rand_data->data | ||
513 | * | ||
514 | * Input: | ||
515 | * @ec Reference to entropy collector | ||
516 | */ | ||
517 | #pragma GCC push_options | ||
518 | #pragma GCC optimize ("-O0") | ||
519 | static void jent_gen_entropy(struct rand_data *ec) | ||
520 | { | ||
521 | unsigned int k = 0; | ||
522 | |||
523 | /* priming of the ->prev_time value */ | ||
524 | jent_measure_jitter(ec); | ||
525 | |||
526 | while (1) { | ||
527 | __u64 data = 0; | ||
528 | |||
529 | if (ec->disable_unbias == 1) | ||
530 | data = jent_measure_jitter(ec); | ||
531 | else | ||
532 | data = jent_unbiased_bit(ec); | ||
533 | |||
534 | /* enforcement of the jent_stuck test */ | ||
535 | if (ec->stuck) { | ||
536 | /* | ||
537 | * We only mix in the bit considered not appropriate | ||
538 | * without the LSFR. The reason is that if we apply | ||
539 | * the LSFR and we do not rotate, the 2nd bit with LSFR | ||
540 | * will cancel out the first LSFR application on the | ||
541 | * bad bit. | ||
542 | * | ||
543 | * And we do not rotate as we apply the next bit to the | ||
544 | * current bit location again. | ||
545 | */ | ||
546 | ec->data ^= data; | ||
547 | ec->stuck = 0; | ||
548 | continue; | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * Fibonacci LSFR with polynom of | ||
553 | * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is | ||
554 | * primitive according to | ||
555 | * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf | ||
556 | * (the shift values are the polynom values minus one | ||
557 | * due to counting bits from 0 to 63). As the current | ||
558 | * position is always the LSB, the polynom only needs | ||
559 | * to shift data in from the left without wrap. | ||
560 | */ | ||
561 | ec->data ^= data; | ||
562 | ec->data ^= ((ec->data >> 63) & 1); | ||
563 | ec->data ^= ((ec->data >> 60) & 1); | ||
564 | ec->data ^= ((ec->data >> 55) & 1); | ||
565 | ec->data ^= ((ec->data >> 30) & 1); | ||
566 | ec->data ^= ((ec->data >> 27) & 1); | ||
567 | ec->data ^= ((ec->data >> 22) & 1); | ||
568 | ec->data = rol64(ec->data, 1); | ||
569 | |||
570 | /* | ||
571 | * We multiply the loop value with ->osr to obtain the | ||
572 | * oversampling rate requested by the caller | ||
573 | */ | ||
574 | if (++k >= (DATA_SIZE_BITS * ec->osr)) | ||
575 | break; | ||
576 | } | ||
577 | if (ec->stir) | ||
578 | jent_stir_pool(ec); | ||
579 | } | ||
580 | #pragma GCC pop_options | ||
581 | |||
582 | /** | ||
583 | * The continuous test required by FIPS 140-2 -- the function automatically | ||
584 | * primes the test if needed. | ||
585 | * | ||
586 | * Return: | ||
587 | * 0 if FIPS test passed | ||
588 | * < 0 if FIPS test failed | ||
589 | */ | ||
590 | static void jent_fips_test(struct rand_data *ec) | ||
591 | { | ||
592 | if (!fips_enabled) | ||
593 | return; | ||
594 | |||
595 | /* prime the FIPS test */ | ||
596 | if (!ec->old_data) { | ||
597 | ec->old_data = ec->data; | ||
598 | jent_gen_entropy(ec); | ||
599 | } | ||
600 | |||
601 | if (ec->data == ec->old_data) | ||
602 | panic(DRIVER_NAME ": Duplicate output detected\n"); | ||
603 | |||
604 | ec->old_data = ec->data; | ||
605 | } | ||
606 | |||
607 | |||
608 | /** | ||
609 | * Entry function: Obtain entropy for the caller. | ||
610 | * | ||
611 | * This function invokes the entropy gathering logic as often to generate | ||
612 | * as many bytes as requested by the caller. The entropy gathering logic | ||
613 | * creates 64 bit per invocation. | ||
614 | * | ||
615 | * This function truncates the last 64 bit entropy value output to the exact | ||
616 | * size specified by the caller. | ||
617 | * | ||
618 | * Input: | ||
619 | * @ec Reference to entropy collector | ||
620 | * @data pointer to buffer for storing random data -- buffer must already | ||
621 | * exist | ||
622 | * @len size of the buffer, specifying also the requested number of random | ||
623 | * in bytes | ||
624 | * | ||
625 | * @return 0 when request is fulfilled or an error | ||
626 | * | ||
627 | * The following error codes can occur: | ||
628 | * -1 entropy_collector is NULL | ||
629 | */ | ||
630 | static ssize_t jent_read_entropy(struct rand_data *ec, u8 *data, size_t len) | ||
631 | { | ||
632 | u8 *p = data; | ||
633 | |||
634 | if (!ec) | ||
635 | return -EINVAL; | ||
636 | |||
637 | while (0 < len) { | ||
638 | size_t tocopy; | ||
639 | |||
640 | jent_gen_entropy(ec); | ||
641 | jent_fips_test(ec); | ||
642 | if ((DATA_SIZE_BITS / 8) < len) | ||
643 | tocopy = (DATA_SIZE_BITS / 8); | ||
644 | else | ||
645 | tocopy = len; | ||
646 | memcpy(p, &ec->data, tocopy); | ||
647 | |||
648 | len -= tocopy; | ||
649 | p += tocopy; | ||
650 | } | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | /*************************************************************************** | ||
656 | * Initialization logic | ||
657 | ***************************************************************************/ | ||
658 | |||
659 | static struct rand_data *jent_entropy_collector_alloc(unsigned int osr, | ||
660 | unsigned int flags) | ||
661 | { | ||
662 | struct rand_data *entropy_collector; | ||
663 | |||
664 | entropy_collector = kzalloc(sizeof(struct rand_data), GFP_KERNEL); | ||
665 | if (!entropy_collector) | ||
666 | return NULL; | ||
667 | |||
668 | if (!(flags & JENT_DISABLE_MEMORY_ACCESS)) { | ||
669 | /* Allocate memory for adding variations based on memory | ||
670 | * access | ||
671 | */ | ||
672 | entropy_collector->mem = kzalloc(JENT_MEMORY_SIZE, GFP_KERNEL); | ||
673 | if (!entropy_collector->mem) { | ||
674 | kfree(entropy_collector); | ||
675 | return NULL; | ||
676 | } | ||
677 | entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE; | ||
678 | entropy_collector->memblocks = JENT_MEMORY_BLOCKS; | ||
679 | entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS; | ||
680 | } | ||
681 | |||
682 | /* verify and set the oversampling rate */ | ||
683 | if (0 == osr) | ||
684 | osr = 1; /* minimum sampling rate is 1 */ | ||
685 | entropy_collector->osr = osr; | ||
686 | |||
687 | entropy_collector->stir = 1; | ||
688 | if (flags & JENT_DISABLE_STIR) | ||
689 | entropy_collector->stir = 0; | ||
690 | if (flags & JENT_DISABLE_UNBIAS) | ||
691 | entropy_collector->disable_unbias = 1; | ||
692 | |||
693 | /* fill the data pad with non-zero values */ | ||
694 | jent_gen_entropy(entropy_collector); | ||
695 | |||
696 | return entropy_collector; | ||
697 | } | ||
698 | |||
699 | static void jent_entropy_collector_free(struct rand_data *entropy_collector) | ||
700 | { | ||
701 | if (entropy_collector->mem) | ||
702 | kzfree(entropy_collector->mem); | ||
703 | entropy_collector->mem = NULL; | ||
704 | if (entropy_collector) | ||
705 | kzfree(entropy_collector); | ||
706 | entropy_collector = NULL; | ||
707 | } | ||
708 | |||
709 | static int jent_entropy_init(void) | ||
710 | { | ||
711 | int i; | ||
712 | __u64 delta_sum = 0; | ||
713 | __u64 old_delta = 0; | ||
714 | int time_backwards = 0; | ||
715 | int count_var = 0; | ||
716 | int count_mod = 0; | ||
717 | |||
718 | /* We could perform statistical tests here, but the problem is | ||
719 | * that we only have a few loop counts to do testing. These | ||
720 | * loop counts may show some slight skew and we produce | ||
721 | * false positives. | ||
722 | * | ||
723 | * Moreover, only old systems show potentially problematic | ||
724 | * jitter entropy that could potentially be caught here. But | ||
725 | * the RNG is intended for hardware that is available or widely | ||
726 | * used, but not old systems that are long out of favor. Thus, | ||
727 | * no statistical tests. | ||
728 | */ | ||
729 | |||
730 | /* | ||
731 | * We could add a check for system capabilities such as clock_getres or | ||
732 | * check for CONFIG_X86_TSC, but it does not make much sense as the | ||
733 | * following sanity checks verify that we have a high-resolution | ||
734 | * timer. | ||
735 | */ | ||
736 | /* | ||
737 | * TESTLOOPCOUNT needs some loops to identify edge systems. 100 is | ||
738 | * definitely too little. | ||
739 | */ | ||
740 | #define TESTLOOPCOUNT 300 | ||
741 | #define CLEARCACHE 100 | ||
742 | for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { | ||
743 | __u64 time = 0; | ||
744 | __u64 time2 = 0; | ||
745 | __u64 folded = 0; | ||
746 | __u64 delta = 0; | ||
747 | unsigned int lowdelta = 0; | ||
748 | |||
749 | jent_get_nstime(&time); | ||
750 | jent_fold_time(NULL, time, &folded, 1<<MIN_FOLD_LOOP_BIT); | ||
751 | jent_get_nstime(&time2); | ||
752 | |||
753 | /* test whether timer works */ | ||
754 | if (!time || !time2) | ||
755 | return JENT_ENOTIME; | ||
756 | delta = time2 - time; | ||
757 | /* | ||
758 | * test whether timer is fine grained enough to provide | ||
759 | * delta even when called shortly after each other -- this | ||
760 | * implies that we also have a high resolution timer | ||
761 | */ | ||
762 | if (!delta) | ||
763 | return JENT_ECOARSETIME; | ||
764 | |||
765 | /* | ||
766 | * up to here we did not modify any variable that will be | ||
767 | * evaluated later, but we already performed some work. Thus we | ||
768 | * already have had an impact on the caches, branch prediction, | ||
769 | * etc. with the goal to clear it to get the worst case | ||
770 | * measurements. | ||
771 | */ | ||
772 | if (CLEARCACHE > i) | ||
773 | continue; | ||
774 | |||
775 | /* test whether we have an increasing timer */ | ||
776 | if (!(time2 > time)) | ||
777 | time_backwards++; | ||
778 | |||
779 | /* | ||
780 | * Avoid modulo of 64 bit integer to allow code to compile | ||
781 | * on 32 bit architectures. | ||
782 | */ | ||
783 | lowdelta = time2 - time; | ||
784 | if (!(lowdelta % 100)) | ||
785 | count_mod++; | ||
786 | |||
787 | /* | ||
788 | * ensure that we have a varying delta timer which is necessary | ||
789 | * for the calculation of entropy -- perform this check | ||
790 | * only after the first loop is executed as we need to prime | ||
791 | * the old_data value | ||
792 | */ | ||
793 | if (i) { | ||
794 | if (delta != old_delta) | ||
795 | count_var++; | ||
796 | if (delta > old_delta) | ||
797 | delta_sum += (delta - old_delta); | ||
798 | else | ||
799 | delta_sum += (old_delta - delta); | ||
800 | } | ||
801 | old_delta = delta; | ||
802 | } | ||
803 | |||
804 | /* | ||
805 | * we allow up to three times the time running backwards. | ||
806 | * CLOCK_REALTIME is affected by adjtime and NTP operations. Thus, | ||
807 | * if such an operation just happens to interfere with our test, it | ||
808 | * should not fail. The value of 3 should cover the NTP case being | ||
809 | * performed during our test run. | ||
810 | */ | ||
811 | if (3 < time_backwards) | ||
812 | return JENT_ENOMONOTONIC; | ||
813 | /* Error if the time variances are always identical */ | ||
814 | if (!delta_sum) | ||
815 | return JENT_EVARVAR; | ||
816 | |||
817 | /* | ||
818 | * Variations of deltas of time must on average be larger | ||
819 | * than 1 to ensure the entropy estimation | ||
820 | * implied with 1 is preserved | ||
821 | */ | ||
822 | if (delta_sum <= 1) | ||
823 | return JENT_EMINVARVAR; | ||
824 | |||
825 | /* | ||
826 | * Ensure that we have variations in the time stamp below 10 for at | ||
827 | * least 10% of all checks -- on some platforms, the counter | ||
828 | * increments in multiples of 100, but not always | ||
829 | */ | ||
830 | if ((TESTLOOPCOUNT/10 * 9) < count_mod) | ||
831 | return JENT_ECOARSETIME; | ||
832 | |||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | /*************************************************************************** | ||
837 | * Kernel crypto API interface | ||
838 | ***************************************************************************/ | ||
839 | |||
840 | struct jitterentropy { | ||
841 | spinlock_t jent_lock; | ||
842 | struct rand_data *entropy_collector; | ||
843 | }; | ||
844 | |||
845 | static int jent_kcapi_init(struct crypto_tfm *tfm) | ||
846 | { | ||
847 | struct jitterentropy *rng = crypto_tfm_ctx(tfm); | ||
848 | int ret = 0; | ||
849 | |||
850 | rng->entropy_collector = jent_entropy_collector_alloc(1, 0); | ||
851 | if (!rng->entropy_collector) | ||
852 | ret = -ENOMEM; | ||
853 | |||
854 | spin_lock_init(&rng->jent_lock); | ||
855 | return ret; | ||
856 | } | ||
857 | |||
858 | static void jent_kcapi_cleanup(struct crypto_tfm *tfm) | ||
859 | { | ||
860 | struct jitterentropy *rng = crypto_tfm_ctx(tfm); | ||
861 | |||
862 | spin_lock(&rng->jent_lock); | ||
863 | if (rng->entropy_collector) | ||
864 | jent_entropy_collector_free(rng->entropy_collector); | ||
865 | rng->entropy_collector = NULL; | ||
866 | spin_unlock(&rng->jent_lock); | ||
867 | } | ||
868 | |||
869 | static int jent_kcapi_random(struct crypto_rng *tfm, | ||
870 | const u8 *src, unsigned int slen, | ||
871 | u8 *rdata, unsigned int dlen) | ||
872 | { | ||
873 | struct jitterentropy *rng = crypto_rng_ctx(tfm); | ||
874 | int ret = 0; | ||
875 | |||
876 | spin_lock(&rng->jent_lock); | ||
877 | ret = jent_read_entropy(rng->entropy_collector, rdata, dlen); | ||
878 | spin_unlock(&rng->jent_lock); | ||
879 | |||
880 | return ret; | ||
881 | } | ||
882 | |||
883 | static int jent_kcapi_reset(struct crypto_rng *tfm, | ||
884 | const u8 *seed, unsigned int slen) | ||
885 | { | ||
886 | return 0; | ||
887 | } | ||
888 | |||
889 | static struct rng_alg jent_alg = { | ||
890 | .generate = jent_kcapi_random, | ||
891 | .seed = jent_kcapi_reset, | ||
892 | .seedsize = 0, | ||
893 | .base = { | ||
894 | .cra_name = "jitterentropy_rng", | ||
895 | .cra_driver_name = "jitterentropy_rng", | ||
896 | .cra_priority = 100, | ||
897 | .cra_ctxsize = sizeof(struct jitterentropy), | ||
898 | .cra_module = THIS_MODULE, | ||
899 | .cra_init = jent_kcapi_init, | ||
900 | .cra_exit = jent_kcapi_cleanup, | ||
901 | |||
902 | } | ||
903 | }; | ||
904 | |||
905 | static int __init jent_mod_init(void) | ||
906 | { | ||
907 | int ret = 0; | ||
908 | |||
909 | ret = jent_entropy_init(); | ||
910 | if (ret) { | ||
911 | pr_info(DRIVER_NAME ": Initialization failed with host not compliant with requirements: %d\n", ret); | ||
912 | return -EFAULT; | ||
913 | } | ||
914 | return crypto_register_rng(&jent_alg); | ||
915 | } | ||
916 | |||
917 | static void __exit jent_mod_exit(void) | ||
918 | { | ||
919 | crypto_unregister_rng(&jent_alg); | ||
920 | } | ||
921 | |||
922 | module_init(jent_mod_init); | ||
923 | module_exit(jent_mod_exit); | ||
924 | |||
925 | MODULE_LICENSE("Dual BSD/GPL"); | ||
926 | MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); | ||
927 | MODULE_DESCRIPTION("Non-physical True Random Number Generator based on CPU Jitter"); | ||
928 | MODULE_ALIAS_CRYPTO("jitterentropy_rng"); | ||
diff --git a/crypto/krng.c b/crypto/krng.c deleted file mode 100644 index 0224841b6579..000000000000 --- a/crypto/krng.c +++ /dev/null | |||
@@ -1,66 +0,0 @@ | |||
1 | /* | ||
2 | * RNG implementation using standard kernel RNG. | ||
3 | * | ||
4 | * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * any later version. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <crypto/internal/rng.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/random.h> | ||
18 | |||
19 | static int krng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) | ||
20 | { | ||
21 | get_random_bytes(rdata, dlen); | ||
22 | return 0; | ||
23 | } | ||
24 | |||
25 | static int krng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static struct crypto_alg krng_alg = { | ||
31 | .cra_name = "stdrng", | ||
32 | .cra_driver_name = "krng", | ||
33 | .cra_priority = 200, | ||
34 | .cra_flags = CRYPTO_ALG_TYPE_RNG, | ||
35 | .cra_ctxsize = 0, | ||
36 | .cra_type = &crypto_rng_type, | ||
37 | .cra_module = THIS_MODULE, | ||
38 | .cra_u = { | ||
39 | .rng = { | ||
40 | .rng_make_random = krng_get_random, | ||
41 | .rng_reset = krng_reset, | ||
42 | .seedsize = 0, | ||
43 | } | ||
44 | } | ||
45 | }; | ||
46 | |||
47 | |||
48 | /* Module initalization */ | ||
49 | static int __init krng_mod_init(void) | ||
50 | { | ||
51 | return crypto_register_alg(&krng_alg); | ||
52 | } | ||
53 | |||
54 | static void __exit krng_mod_fini(void) | ||
55 | { | ||
56 | crypto_unregister_alg(&krng_alg); | ||
57 | return; | ||
58 | } | ||
59 | |||
60 | module_init(krng_mod_init); | ||
61 | module_exit(krng_mod_fini); | ||
62 | |||
63 | MODULE_LICENSE("GPL"); | ||
64 | MODULE_DESCRIPTION("Kernel Random Number Generator"); | ||
65 | MODULE_ALIAS_CRYPTO("stdrng"); | ||
66 | MODULE_ALIAS_CRYPTO("krng"); | ||
diff --git a/crypto/md5.c b/crypto/md5.c index 36f5e5b103f3..33d17e9a8702 100644 --- a/crypto/md5.c +++ b/crypto/md5.c | |||
@@ -51,10 +51,10 @@ static int md5_init(struct shash_desc *desc) | |||
51 | { | 51 | { |
52 | struct md5_state *mctx = shash_desc_ctx(desc); | 52 | struct md5_state *mctx = shash_desc_ctx(desc); |
53 | 53 | ||
54 | mctx->hash[0] = 0x67452301; | 54 | mctx->hash[0] = MD5_H0; |
55 | mctx->hash[1] = 0xefcdab89; | 55 | mctx->hash[1] = MD5_H1; |
56 | mctx->hash[2] = 0x98badcfe; | 56 | mctx->hash[2] = MD5_H2; |
57 | mctx->hash[3] = 0x10325476; | 57 | mctx->hash[3] = MD5_H3; |
58 | mctx->byte_count = 0; | 58 | mctx->byte_count = 0; |
59 | 59 | ||
60 | return 0; | 60 | return 0; |
diff --git a/crypto/pcompress.c b/crypto/pcompress.c index 7140fe70c7af..7a13b4088857 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c | |||
@@ -38,11 +38,6 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg) | ||
42 | { | ||
43 | return alg->cra_ctxsize; | ||
44 | } | ||
45 | |||
46 | static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) | 41 | static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) |
47 | { | 42 | { |
48 | return 0; | 43 | return 0; |
@@ -77,7 +72,7 @@ static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) | |||
77 | } | 72 | } |
78 | 73 | ||
79 | static const struct crypto_type crypto_pcomp_type = { | 74 | static const struct crypto_type crypto_pcomp_type = { |
80 | .extsize = crypto_pcomp_extsize, | 75 | .extsize = crypto_alg_extsize, |
81 | .init = crypto_pcomp_init, | 76 | .init = crypto_pcomp_init, |
82 | .init_tfm = crypto_pcomp_init_tfm, | 77 | .init_tfm = crypto_pcomp_init_tfm, |
83 | #ifdef CONFIG_PROC_FS | 78 | #ifdef CONFIG_PROC_FS |
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index c305d4112735..45e7d5155672 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | #include <crypto/algapi.h> | 21 | #include <crypto/algapi.h> |
22 | #include <crypto/internal/aead.h> | 22 | #include <crypto/internal/aead.h> |
23 | #include <linux/atomic.h> | ||
23 | #include <linux/err.h> | 24 | #include <linux/err.h> |
24 | #include <linux/init.h> | 25 | #include <linux/init.h> |
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
@@ -60,8 +61,8 @@ static struct padata_pcrypt pdecrypt; | |||
60 | static struct kset *pcrypt_kset; | 61 | static struct kset *pcrypt_kset; |
61 | 62 | ||
62 | struct pcrypt_instance_ctx { | 63 | struct pcrypt_instance_ctx { |
63 | struct crypto_spawn spawn; | 64 | struct crypto_aead_spawn spawn; |
64 | unsigned int tfm_count; | 65 | atomic_t tfm_count; |
65 | }; | 66 | }; |
66 | 67 | ||
67 | struct pcrypt_aead_ctx { | 68 | struct pcrypt_aead_ctx { |
@@ -122,14 +123,6 @@ static void pcrypt_aead_serial(struct padata_priv *padata) | |||
122 | aead_request_complete(req->base.data, padata->info); | 123 | aead_request_complete(req->base.data, padata->info); |
123 | } | 124 | } |
124 | 125 | ||
125 | static void pcrypt_aead_giv_serial(struct padata_priv *padata) | ||
126 | { | ||
127 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | ||
128 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); | ||
129 | |||
130 | aead_request_complete(req->areq.base.data, padata->info); | ||
131 | } | ||
132 | |||
133 | static void pcrypt_aead_done(struct crypto_async_request *areq, int err) | 126 | static void pcrypt_aead_done(struct crypto_async_request *areq, int err) |
134 | { | 127 | { |
135 | struct aead_request *req = areq->data; | 128 | struct aead_request *req = areq->data; |
@@ -175,7 +168,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req) | |||
175 | pcrypt_aead_done, req); | 168 | pcrypt_aead_done, req); |
176 | aead_request_set_crypt(creq, req->src, req->dst, | 169 | aead_request_set_crypt(creq, req->src, req->dst, |
177 | req->cryptlen, req->iv); | 170 | req->cryptlen, req->iv); |
178 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | 171 | aead_request_set_ad(creq, req->assoclen); |
179 | 172 | ||
180 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); | 173 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); |
181 | if (!err) | 174 | if (!err) |
@@ -217,7 +210,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req) | |||
217 | pcrypt_aead_done, req); | 210 | pcrypt_aead_done, req); |
218 | aead_request_set_crypt(creq, req->src, req->dst, | 211 | aead_request_set_crypt(creq, req->src, req->dst, |
219 | req->cryptlen, req->iv); | 212 | req->cryptlen, req->iv); |
220 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | 213 | aead_request_set_ad(creq, req->assoclen); |
221 | 214 | ||
222 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); | 215 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); |
223 | if (!err) | 216 | if (!err) |
@@ -226,182 +219,134 @@ static int pcrypt_aead_decrypt(struct aead_request *req) | |||
226 | return err; | 219 | return err; |
227 | } | 220 | } |
228 | 221 | ||
229 | static void pcrypt_aead_givenc(struct padata_priv *padata) | 222 | static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) |
230 | { | ||
231 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | ||
232 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); | ||
233 | |||
234 | padata->info = crypto_aead_givencrypt(req); | ||
235 | |||
236 | if (padata->info == -EINPROGRESS) | ||
237 | return; | ||
238 | |||
239 | padata_do_serial(padata); | ||
240 | } | ||
241 | |||
242 | static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) | ||
243 | { | ||
244 | int err; | ||
245 | struct aead_request *areq = &req->areq; | ||
246 | struct pcrypt_request *preq = aead_request_ctx(areq); | ||
247 | struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); | ||
248 | struct padata_priv *padata = pcrypt_request_padata(preq); | ||
249 | struct crypto_aead *aead = aead_givcrypt_reqtfm(req); | ||
250 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | ||
251 | u32 flags = aead_request_flags(areq); | ||
252 | |||
253 | memset(padata, 0, sizeof(struct padata_priv)); | ||
254 | |||
255 | padata->parallel = pcrypt_aead_givenc; | ||
256 | padata->serial = pcrypt_aead_giv_serial; | ||
257 | |||
258 | aead_givcrypt_set_tfm(creq, ctx->child); | ||
259 | aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | ||
260 | pcrypt_aead_done, areq); | ||
261 | aead_givcrypt_set_crypt(creq, areq->src, areq->dst, | ||
262 | areq->cryptlen, areq->iv); | ||
263 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); | ||
264 | aead_givcrypt_set_giv(creq, req->giv, req->seq); | ||
265 | |||
266 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); | ||
267 | if (!err) | ||
268 | return -EINPROGRESS; | ||
269 | |||
270 | return err; | ||
271 | } | ||
272 | |||
273 | static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) | ||
274 | { | 223 | { |
275 | int cpu, cpu_index; | 224 | int cpu, cpu_index; |
276 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 225 | struct aead_instance *inst = aead_alg_instance(tfm); |
277 | struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst); | 226 | struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst); |
278 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 227 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); |
279 | struct crypto_aead *cipher; | 228 | struct crypto_aead *cipher; |
280 | 229 | ||
281 | ictx->tfm_count++; | 230 | cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) % |
282 | 231 | cpumask_weight(cpu_online_mask); | |
283 | cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask); | ||
284 | 232 | ||
285 | ctx->cb_cpu = cpumask_first(cpu_online_mask); | 233 | ctx->cb_cpu = cpumask_first(cpu_online_mask); |
286 | for (cpu = 0; cpu < cpu_index; cpu++) | 234 | for (cpu = 0; cpu < cpu_index; cpu++) |
287 | ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); | 235 | ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); |
288 | 236 | ||
289 | cipher = crypto_spawn_aead(crypto_instance_ctx(inst)); | 237 | cipher = crypto_spawn_aead(&ictx->spawn); |
290 | 238 | ||
291 | if (IS_ERR(cipher)) | 239 | if (IS_ERR(cipher)) |
292 | return PTR_ERR(cipher); | 240 | return PTR_ERR(cipher); |
293 | 241 | ||
294 | ctx->child = cipher; | 242 | ctx->child = cipher; |
295 | tfm->crt_aead.reqsize = sizeof(struct pcrypt_request) | 243 | crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) + |
296 | + sizeof(struct aead_givcrypt_request) | 244 | sizeof(struct aead_request) + |
297 | + crypto_aead_reqsize(cipher); | 245 | crypto_aead_reqsize(cipher)); |
298 | 246 | ||
299 | return 0; | 247 | return 0; |
300 | } | 248 | } |
301 | 249 | ||
302 | static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm) | 250 | static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) |
303 | { | 251 | { |
304 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 252 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); |
305 | 253 | ||
306 | crypto_free_aead(ctx->child); | 254 | crypto_free_aead(ctx->child); |
307 | } | 255 | } |
308 | 256 | ||
309 | static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg) | 257 | static int pcrypt_init_instance(struct crypto_instance *inst, |
258 | struct crypto_alg *alg) | ||
310 | { | 259 | { |
311 | struct crypto_instance *inst; | ||
312 | struct pcrypt_instance_ctx *ctx; | ||
313 | int err; | ||
314 | |||
315 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | ||
316 | if (!inst) { | ||
317 | inst = ERR_PTR(-ENOMEM); | ||
318 | goto out; | ||
319 | } | ||
320 | |||
321 | err = -ENAMETOOLONG; | ||
322 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 260 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
323 | "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 261 | "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
324 | goto out_free_inst; | 262 | return -ENAMETOOLONG; |
325 | 263 | ||
326 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 264 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
327 | 265 | ||
328 | ctx = crypto_instance_ctx(inst); | ||
329 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | ||
330 | CRYPTO_ALG_TYPE_MASK); | ||
331 | if (err) | ||
332 | goto out_free_inst; | ||
333 | |||
334 | inst->alg.cra_priority = alg->cra_priority + 100; | 266 | inst->alg.cra_priority = alg->cra_priority + 100; |
335 | inst->alg.cra_blocksize = alg->cra_blocksize; | 267 | inst->alg.cra_blocksize = alg->cra_blocksize; |
336 | inst->alg.cra_alignmask = alg->cra_alignmask; | 268 | inst->alg.cra_alignmask = alg->cra_alignmask; |
337 | 269 | ||
338 | out: | 270 | return 0; |
339 | return inst; | ||
340 | |||
341 | out_free_inst: | ||
342 | kfree(inst); | ||
343 | inst = ERR_PTR(err); | ||
344 | goto out; | ||
345 | } | 271 | } |
346 | 272 | ||
347 | static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, | 273 | static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, |
348 | u32 type, u32 mask) | 274 | u32 type, u32 mask) |
349 | { | 275 | { |
350 | struct crypto_instance *inst; | 276 | struct pcrypt_instance_ctx *ctx; |
351 | struct crypto_alg *alg; | 277 | struct aead_instance *inst; |
278 | struct aead_alg *alg; | ||
279 | const char *name; | ||
280 | int err; | ||
281 | |||
282 | name = crypto_attr_alg_name(tb[1]); | ||
283 | if (IS_ERR(name)) | ||
284 | return PTR_ERR(name); | ||
285 | |||
286 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | ||
287 | if (!inst) | ||
288 | return -ENOMEM; | ||
289 | |||
290 | ctx = aead_instance_ctx(inst); | ||
291 | crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst)); | ||
292 | |||
293 | err = crypto_grab_aead(&ctx->spawn, name, 0, 0); | ||
294 | if (err) | ||
295 | goto out_free_inst; | ||
352 | 296 | ||
353 | alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); | 297 | alg = crypto_spawn_aead_alg(&ctx->spawn); |
354 | if (IS_ERR(alg)) | 298 | err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base); |
355 | return ERR_CAST(alg); | 299 | if (err) |
300 | goto out_drop_aead; | ||
356 | 301 | ||
357 | inst = pcrypt_alloc_instance(alg); | 302 | inst->alg.ivsize = crypto_aead_alg_ivsize(alg); |
358 | if (IS_ERR(inst)) | 303 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); |
359 | goto out_put_alg; | ||
360 | 304 | ||
361 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | 305 | inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); |
362 | inst->alg.cra_type = &crypto_aead_type; | ||
363 | 306 | ||
364 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; | 307 | inst->alg.init = pcrypt_aead_init_tfm; |
365 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; | 308 | inst->alg.exit = pcrypt_aead_exit_tfm; |
366 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; | ||
367 | 309 | ||
368 | inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); | 310 | inst->alg.setkey = pcrypt_aead_setkey; |
311 | inst->alg.setauthsize = pcrypt_aead_setauthsize; | ||
312 | inst->alg.encrypt = pcrypt_aead_encrypt; | ||
313 | inst->alg.decrypt = pcrypt_aead_decrypt; | ||
369 | 314 | ||
370 | inst->alg.cra_init = pcrypt_aead_init_tfm; | 315 | err = aead_register_instance(tmpl, inst); |
371 | inst->alg.cra_exit = pcrypt_aead_exit_tfm; | 316 | if (err) |
317 | goto out_drop_aead; | ||
372 | 318 | ||
373 | inst->alg.cra_aead.setkey = pcrypt_aead_setkey; | 319 | out: |
374 | inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize; | 320 | return err; |
375 | inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt; | ||
376 | inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; | ||
377 | inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; | ||
378 | 321 | ||
379 | out_put_alg: | 322 | out_drop_aead: |
380 | crypto_mod_put(alg); | 323 | crypto_drop_aead(&ctx->spawn); |
381 | return inst; | 324 | out_free_inst: |
325 | kfree(inst); | ||
326 | goto out; | ||
382 | } | 327 | } |
383 | 328 | ||
384 | static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) | 329 | static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) |
385 | { | 330 | { |
386 | struct crypto_attr_type *algt; | 331 | struct crypto_attr_type *algt; |
387 | 332 | ||
388 | algt = crypto_get_attr_type(tb); | 333 | algt = crypto_get_attr_type(tb); |
389 | if (IS_ERR(algt)) | 334 | if (IS_ERR(algt)) |
390 | return ERR_CAST(algt); | 335 | return PTR_ERR(algt); |
391 | 336 | ||
392 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 337 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
393 | case CRYPTO_ALG_TYPE_AEAD: | 338 | case CRYPTO_ALG_TYPE_AEAD: |
394 | return pcrypt_alloc_aead(tb, algt->type, algt->mask); | 339 | return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); |
395 | } | 340 | } |
396 | 341 | ||
397 | return ERR_PTR(-EINVAL); | 342 | return -EINVAL; |
398 | } | 343 | } |
399 | 344 | ||
400 | static void pcrypt_free(struct crypto_instance *inst) | 345 | static void pcrypt_free(struct crypto_instance *inst) |
401 | { | 346 | { |
402 | struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); | 347 | struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); |
403 | 348 | ||
404 | crypto_drop_spawn(&ctx->spawn); | 349 | crypto_drop_aead(&ctx->spawn); |
405 | kfree(inst); | 350 | kfree(inst); |
406 | } | 351 | } |
407 | 352 | ||
@@ -516,7 +461,7 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) | |||
516 | 461 | ||
517 | static struct crypto_template pcrypt_tmpl = { | 462 | static struct crypto_template pcrypt_tmpl = { |
518 | .name = "pcrypt", | 463 | .name = "pcrypt", |
519 | .alloc = pcrypt_alloc, | 464 | .create = pcrypt_create, |
520 | .free = pcrypt_free, | 465 | .free = pcrypt_free, |
521 | .module = THIS_MODULE, | 466 | .module = THIS_MODULE, |
522 | }; | 467 | }; |
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c new file mode 100644 index 000000000000..387b5c887a80 --- /dev/null +++ b/crypto/poly1305_generic.c | |||
@@ -0,0 +1,321 @@ | |||
1 | /* | ||
2 | * Poly1305 authenticator algorithm, RFC7539 | ||
3 | * | ||
4 | * Copyright (C) 2015 Martin Willi | ||
5 | * | ||
6 | * Based on public domain code by Andrew Moon and Daniel J. Bernstein. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <crypto/algapi.h> | ||
15 | #include <crypto/internal/hash.h> | ||
16 | #include <linux/crypto.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | |||
20 | #define POLY1305_BLOCK_SIZE 16 | ||
21 | #define POLY1305_KEY_SIZE 32 | ||
22 | #define POLY1305_DIGEST_SIZE 16 | ||
23 | |||
24 | struct poly1305_desc_ctx { | ||
25 | /* key */ | ||
26 | u32 r[5]; | ||
27 | /* finalize key */ | ||
28 | u32 s[4]; | ||
29 | /* accumulator */ | ||
30 | u32 h[5]; | ||
31 | /* partial buffer */ | ||
32 | u8 buf[POLY1305_BLOCK_SIZE]; | ||
33 | /* bytes used in partial buffer */ | ||
34 | unsigned int buflen; | ||
35 | /* r key has been set */ | ||
36 | bool rset; | ||
37 | /* s key has been set */ | ||
38 | bool sset; | ||
39 | }; | ||
40 | |||
41 | static inline u64 mlt(u64 a, u64 b) | ||
42 | { | ||
43 | return a * b; | ||
44 | } | ||
45 | |||
46 | static inline u32 sr(u64 v, u_char n) | ||
47 | { | ||
48 | return v >> n; | ||
49 | } | ||
50 | |||
51 | static inline u32 and(u32 v, u32 mask) | ||
52 | { | ||
53 | return v & mask; | ||
54 | } | ||
55 | |||
56 | static inline u32 le32_to_cpuvp(const void *p) | ||
57 | { | ||
58 | return le32_to_cpup(p); | ||
59 | } | ||
60 | |||
61 | static int poly1305_init(struct shash_desc *desc) | ||
62 | { | ||
63 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); | ||
64 | |||
65 | memset(dctx->h, 0, sizeof(dctx->h)); | ||
66 | dctx->buflen = 0; | ||
67 | dctx->rset = false; | ||
68 | dctx->sset = false; | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static int poly1305_setkey(struct crypto_shash *tfm, | ||
74 | const u8 *key, unsigned int keylen) | ||
75 | { | ||
76 | /* Poly1305 requires a unique key for each tag, which implies that | ||
77 | * we can't set it on the tfm that gets accessed by multiple users | ||
78 | * simultaneously. Instead we expect the key as the first 32 bytes in | ||
79 | * the update() call. */ | ||
80 | return -ENOTSUPP; | ||
81 | } | ||
82 | |||
83 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) | ||
84 | { | ||
85 | /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ | ||
86 | dctx->r[0] = (le32_to_cpuvp(key + 0) >> 0) & 0x3ffffff; | ||
87 | dctx->r[1] = (le32_to_cpuvp(key + 3) >> 2) & 0x3ffff03; | ||
88 | dctx->r[2] = (le32_to_cpuvp(key + 6) >> 4) & 0x3ffc0ff; | ||
89 | dctx->r[3] = (le32_to_cpuvp(key + 9) >> 6) & 0x3f03fff; | ||
90 | dctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff; | ||
91 | } | ||
92 | |||
93 | static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) | ||
94 | { | ||
95 | dctx->s[0] = le32_to_cpuvp(key + 0); | ||
96 | dctx->s[1] = le32_to_cpuvp(key + 4); | ||
97 | dctx->s[2] = le32_to_cpuvp(key + 8); | ||
98 | dctx->s[3] = le32_to_cpuvp(key + 12); | ||
99 | } | ||
100 | |||
101 | static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx, | ||
102 | const u8 *src, unsigned int srclen, | ||
103 | u32 hibit) | ||
104 | { | ||
105 | u32 r0, r1, r2, r3, r4; | ||
106 | u32 s1, s2, s3, s4; | ||
107 | u32 h0, h1, h2, h3, h4; | ||
108 | u64 d0, d1, d2, d3, d4; | ||
109 | |||
110 | if (unlikely(!dctx->sset)) { | ||
111 | if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { | ||
112 | poly1305_setrkey(dctx, src); | ||
113 | src += POLY1305_BLOCK_SIZE; | ||
114 | srclen -= POLY1305_BLOCK_SIZE; | ||
115 | dctx->rset = true; | ||
116 | } | ||
117 | if (srclen >= POLY1305_BLOCK_SIZE) { | ||
118 | poly1305_setskey(dctx, src); | ||
119 | src += POLY1305_BLOCK_SIZE; | ||
120 | srclen -= POLY1305_BLOCK_SIZE; | ||
121 | dctx->sset = true; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | r0 = dctx->r[0]; | ||
126 | r1 = dctx->r[1]; | ||
127 | r2 = dctx->r[2]; | ||
128 | r3 = dctx->r[3]; | ||
129 | r4 = dctx->r[4]; | ||
130 | |||
131 | s1 = r1 * 5; | ||
132 | s2 = r2 * 5; | ||
133 | s3 = r3 * 5; | ||
134 | s4 = r4 * 5; | ||
135 | |||
136 | h0 = dctx->h[0]; | ||
137 | h1 = dctx->h[1]; | ||
138 | h2 = dctx->h[2]; | ||
139 | h3 = dctx->h[3]; | ||
140 | h4 = dctx->h[4]; | ||
141 | |||
142 | while (likely(srclen >= POLY1305_BLOCK_SIZE)) { | ||
143 | |||
144 | /* h += m[i] */ | ||
145 | h0 += (le32_to_cpuvp(src + 0) >> 0) & 0x3ffffff; | ||
146 | h1 += (le32_to_cpuvp(src + 3) >> 2) & 0x3ffffff; | ||
147 | h2 += (le32_to_cpuvp(src + 6) >> 4) & 0x3ffffff; | ||
148 | h3 += (le32_to_cpuvp(src + 9) >> 6) & 0x3ffffff; | ||
149 | h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit; | ||
150 | |||
151 | /* h *= r */ | ||
152 | d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + | ||
153 | mlt(h3, s2) + mlt(h4, s1); | ||
154 | d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) + | ||
155 | mlt(h3, s3) + mlt(h4, s2); | ||
156 | d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) + | ||
157 | mlt(h3, s4) + mlt(h4, s3); | ||
158 | d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) + | ||
159 | mlt(h3, r0) + mlt(h4, s4); | ||
160 | d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) + | ||
161 | mlt(h3, r1) + mlt(h4, r0); | ||
162 | |||
163 | /* (partial) h %= p */ | ||
164 | d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff); | ||
165 | d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff); | ||
166 | d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff); | ||
167 | d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff); | ||
168 | h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff); | ||
169 | h1 += h0 >> 26; h0 = h0 & 0x3ffffff; | ||
170 | |||
171 | src += POLY1305_BLOCK_SIZE; | ||
172 | srclen -= POLY1305_BLOCK_SIZE; | ||
173 | } | ||
174 | |||
175 | dctx->h[0] = h0; | ||
176 | dctx->h[1] = h1; | ||
177 | dctx->h[2] = h2; | ||
178 | dctx->h[3] = h3; | ||
179 | dctx->h[4] = h4; | ||
180 | |||
181 | return srclen; | ||
182 | } | ||
183 | |||
184 | static int poly1305_update(struct shash_desc *desc, | ||
185 | const u8 *src, unsigned int srclen) | ||
186 | { | ||
187 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); | ||
188 | unsigned int bytes; | ||
189 | |||
190 | if (unlikely(dctx->buflen)) { | ||
191 | bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); | ||
192 | memcpy(dctx->buf + dctx->buflen, src, bytes); | ||
193 | src += bytes; | ||
194 | srclen -= bytes; | ||
195 | dctx->buflen += bytes; | ||
196 | |||
197 | if (dctx->buflen == POLY1305_BLOCK_SIZE) { | ||
198 | poly1305_blocks(dctx, dctx->buf, | ||
199 | POLY1305_BLOCK_SIZE, 1 << 24); | ||
200 | dctx->buflen = 0; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | if (likely(srclen >= POLY1305_BLOCK_SIZE)) { | ||
205 | bytes = poly1305_blocks(dctx, src, srclen, 1 << 24); | ||
206 | src += srclen - bytes; | ||
207 | srclen = bytes; | ||
208 | } | ||
209 | |||
210 | if (unlikely(srclen)) { | ||
211 | dctx->buflen = srclen; | ||
212 | memcpy(dctx->buf, src, srclen); | ||
213 | } | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static int poly1305_final(struct shash_desc *desc, u8 *dst) | ||
219 | { | ||
220 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); | ||
221 | __le32 *mac = (__le32 *)dst; | ||
222 | u32 h0, h1, h2, h3, h4; | ||
223 | u32 g0, g1, g2, g3, g4; | ||
224 | u32 mask; | ||
225 | u64 f = 0; | ||
226 | |||
227 | if (unlikely(!dctx->sset)) | ||
228 | return -ENOKEY; | ||
229 | |||
230 | if (unlikely(dctx->buflen)) { | ||
231 | dctx->buf[dctx->buflen++] = 1; | ||
232 | memset(dctx->buf + dctx->buflen, 0, | ||
233 | POLY1305_BLOCK_SIZE - dctx->buflen); | ||
234 | poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 0); | ||
235 | } | ||
236 | |||
237 | /* fully carry h */ | ||
238 | h0 = dctx->h[0]; | ||
239 | h1 = dctx->h[1]; | ||
240 | h2 = dctx->h[2]; | ||
241 | h3 = dctx->h[3]; | ||
242 | h4 = dctx->h[4]; | ||
243 | |||
244 | h2 += (h1 >> 26); h1 = h1 & 0x3ffffff; | ||
245 | h3 += (h2 >> 26); h2 = h2 & 0x3ffffff; | ||
246 | h4 += (h3 >> 26); h3 = h3 & 0x3ffffff; | ||
247 | h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff; | ||
248 | h1 += (h0 >> 26); h0 = h0 & 0x3ffffff; | ||
249 | |||
250 | /* compute h + -p */ | ||
251 | g0 = h0 + 5; | ||
252 | g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff; | ||
253 | g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff; | ||
254 | g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff; | ||
255 | g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff; | ||
256 | |||
257 | /* select h if h < p, or h + -p if h >= p */ | ||
258 | mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; | ||
259 | g0 &= mask; | ||
260 | g1 &= mask; | ||
261 | g2 &= mask; | ||
262 | g3 &= mask; | ||
263 | g4 &= mask; | ||
264 | mask = ~mask; | ||
265 | h0 = (h0 & mask) | g0; | ||
266 | h1 = (h1 & mask) | g1; | ||
267 | h2 = (h2 & mask) | g2; | ||
268 | h3 = (h3 & mask) | g3; | ||
269 | h4 = (h4 & mask) | g4; | ||
270 | |||
271 | /* h = h % (2^128) */ | ||
272 | h0 = (h0 >> 0) | (h1 << 26); | ||
273 | h1 = (h1 >> 6) | (h2 << 20); | ||
274 | h2 = (h2 >> 12) | (h3 << 14); | ||
275 | h3 = (h3 >> 18) | (h4 << 8); | ||
276 | |||
277 | /* mac = (h + s) % (2^128) */ | ||
278 | f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f); | ||
279 | f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f); | ||
280 | f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f); | ||
281 | f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f); | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static struct shash_alg poly1305_alg = { | ||
287 | .digestsize = POLY1305_DIGEST_SIZE, | ||
288 | .init = poly1305_init, | ||
289 | .update = poly1305_update, | ||
290 | .final = poly1305_final, | ||
291 | .setkey = poly1305_setkey, | ||
292 | .descsize = sizeof(struct poly1305_desc_ctx), | ||
293 | .base = { | ||
294 | .cra_name = "poly1305", | ||
295 | .cra_driver_name = "poly1305-generic", | ||
296 | .cra_priority = 100, | ||
297 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
298 | .cra_alignmask = sizeof(u32) - 1, | ||
299 | .cra_blocksize = POLY1305_BLOCK_SIZE, | ||
300 | .cra_module = THIS_MODULE, | ||
301 | }, | ||
302 | }; | ||
303 | |||
304 | static int __init poly1305_mod_init(void) | ||
305 | { | ||
306 | return crypto_register_shash(&poly1305_alg); | ||
307 | } | ||
308 | |||
309 | static void __exit poly1305_mod_exit(void) | ||
310 | { | ||
311 | crypto_unregister_shash(&poly1305_alg); | ||
312 | } | ||
313 | |||
314 | module_init(poly1305_mod_init); | ||
315 | module_exit(poly1305_mod_exit); | ||
316 | |||
317 | MODULE_LICENSE("GPL"); | ||
318 | MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); | ||
319 | MODULE_DESCRIPTION("Poly1305 authenticator"); | ||
320 | MODULE_ALIAS_CRYPTO("poly1305"); | ||
321 | MODULE_ALIAS_CRYPTO("poly1305-generic"); | ||
diff --git a/crypto/proc.c b/crypto/proc.c index 4ffe73b51612..2cc10c96d753 100644 --- a/crypto/proc.c +++ b/crypto/proc.c | |||
@@ -20,47 +20,8 @@ | |||
20 | #include <linux/rwsem.h> | 20 | #include <linux/rwsem.h> |
21 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/sysctl.h> | ||
24 | #include "internal.h" | 23 | #include "internal.h" |
25 | 24 | ||
26 | #ifdef CONFIG_CRYPTO_FIPS | ||
27 | static struct ctl_table crypto_sysctl_table[] = { | ||
28 | { | ||
29 | .procname = "fips_enabled", | ||
30 | .data = &fips_enabled, | ||
31 | .maxlen = sizeof(int), | ||
32 | .mode = 0444, | ||
33 | .proc_handler = proc_dointvec | ||
34 | }, | ||
35 | {} | ||
36 | }; | ||
37 | |||
38 | static struct ctl_table crypto_dir_table[] = { | ||
39 | { | ||
40 | .procname = "crypto", | ||
41 | .mode = 0555, | ||
42 | .child = crypto_sysctl_table | ||
43 | }, | ||
44 | {} | ||
45 | }; | ||
46 | |||
47 | static struct ctl_table_header *crypto_sysctls; | ||
48 | |||
49 | static void crypto_proc_fips_init(void) | ||
50 | { | ||
51 | crypto_sysctls = register_sysctl_table(crypto_dir_table); | ||
52 | } | ||
53 | |||
54 | static void crypto_proc_fips_exit(void) | ||
55 | { | ||
56 | if (crypto_sysctls) | ||
57 | unregister_sysctl_table(crypto_sysctls); | ||
58 | } | ||
59 | #else | ||
60 | #define crypto_proc_fips_init() | ||
61 | #define crypto_proc_fips_exit() | ||
62 | #endif | ||
63 | |||
64 | static void *c_start(struct seq_file *m, loff_t *pos) | 25 | static void *c_start(struct seq_file *m, loff_t *pos) |
65 | { | 26 | { |
66 | down_read(&crypto_alg_sem); | 27 | down_read(&crypto_alg_sem); |
@@ -148,11 +109,9 @@ static const struct file_operations proc_crypto_ops = { | |||
148 | void __init crypto_init_proc(void) | 109 | void __init crypto_init_proc(void) |
149 | { | 110 | { |
150 | proc_create("crypto", 0, NULL, &proc_crypto_ops); | 111 | proc_create("crypto", 0, NULL, &proc_crypto_ops); |
151 | crypto_proc_fips_init(); | ||
152 | } | 112 | } |
153 | 113 | ||
154 | void __exit crypto_exit_proc(void) | 114 | void __exit crypto_exit_proc(void) |
155 | { | 115 | { |
156 | crypto_proc_fips_exit(); | ||
157 | remove_proc_entry("crypto", NULL); | 116 | remove_proc_entry("crypto", NULL); |
158 | } | 117 | } |
diff --git a/crypto/rng.c b/crypto/rng.c index e0a25c2456de..b81cffb13bab 100644 --- a/crypto/rng.c +++ b/crypto/rng.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * RNG operations. | 4 | * RNG operations. |
5 | * | 5 | * |
6 | * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> | 6 | * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> |
7 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 9 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the Free | 10 | * under the terms of the GNU General Public License as published by the Free |
@@ -24,12 +25,19 @@ | |||
24 | #include <linux/cryptouser.h> | 25 | #include <linux/cryptouser.h> |
25 | #include <net/netlink.h> | 26 | #include <net/netlink.h> |
26 | 27 | ||
28 | #include "internal.h" | ||
29 | |||
27 | static DEFINE_MUTEX(crypto_default_rng_lock); | 30 | static DEFINE_MUTEX(crypto_default_rng_lock); |
28 | struct crypto_rng *crypto_default_rng; | 31 | struct crypto_rng *crypto_default_rng; |
29 | EXPORT_SYMBOL_GPL(crypto_default_rng); | 32 | EXPORT_SYMBOL_GPL(crypto_default_rng); |
30 | static int crypto_default_rng_refcnt; | 33 | static int crypto_default_rng_refcnt; |
31 | 34 | ||
32 | static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | 35 | static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) |
36 | { | ||
37 | return container_of(tfm, struct crypto_rng, base); | ||
38 | } | ||
39 | |||
40 | int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) | ||
33 | { | 41 | { |
34 | u8 *buf = NULL; | 42 | u8 *buf = NULL; |
35 | int err; | 43 | int err; |
@@ -43,21 +51,23 @@ static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | |||
43 | seed = buf; | 51 | seed = buf; |
44 | } | 52 | } |
45 | 53 | ||
46 | err = crypto_rng_alg(tfm)->rng_reset(tfm, seed, slen); | 54 | err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); |
47 | 55 | ||
48 | kfree(buf); | 56 | kzfree(buf); |
49 | return err; | 57 | return err; |
50 | } | 58 | } |
59 | EXPORT_SYMBOL_GPL(crypto_rng_reset); | ||
51 | 60 | ||
52 | static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | 61 | static int crypto_rng_init_tfm(struct crypto_tfm *tfm) |
53 | { | 62 | { |
54 | struct rng_alg *alg = &tfm->__crt_alg->cra_rng; | 63 | return 0; |
55 | struct rng_tfm *ops = &tfm->crt_rng; | 64 | } |
56 | 65 | ||
57 | ops->rng_gen_random = alg->rng_make_random; | 66 | static unsigned int seedsize(struct crypto_alg *alg) |
58 | ops->rng_reset = rngapi_reset; | 67 | { |
68 | struct rng_alg *ralg = container_of(alg, struct rng_alg, base); | ||
59 | 69 | ||
60 | return 0; | 70 | return ralg->seedsize; |
61 | } | 71 | } |
62 | 72 | ||
63 | #ifdef CONFIG_NET | 73 | #ifdef CONFIG_NET |
@@ -67,7 +77,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) | |||
67 | 77 | ||
68 | strncpy(rrng.type, "rng", sizeof(rrng.type)); | 78 | strncpy(rrng.type, "rng", sizeof(rrng.type)); |
69 | 79 | ||
70 | rrng.seedsize = alg->cra_rng.seedsize; | 80 | rrng.seedsize = seedsize(alg); |
71 | 81 | ||
72 | if (nla_put(skb, CRYPTOCFGA_REPORT_RNG, | 82 | if (nla_put(skb, CRYPTOCFGA_REPORT_RNG, |
73 | sizeof(struct crypto_report_rng), &rrng)) | 83 | sizeof(struct crypto_report_rng), &rrng)) |
@@ -89,24 +99,27 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) | |||
89 | static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) | 99 | static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) |
90 | { | 100 | { |
91 | seq_printf(m, "type : rng\n"); | 101 | seq_printf(m, "type : rng\n"); |
92 | seq_printf(m, "seedsize : %u\n", alg->cra_rng.seedsize); | 102 | seq_printf(m, "seedsize : %u\n", seedsize(alg)); |
93 | } | ||
94 | |||
95 | static unsigned int crypto_rng_ctxsize(struct crypto_alg *alg, u32 type, | ||
96 | u32 mask) | ||
97 | { | ||
98 | return alg->cra_ctxsize; | ||
99 | } | 103 | } |
100 | 104 | ||
101 | const struct crypto_type crypto_rng_type = { | 105 | static const struct crypto_type crypto_rng_type = { |
102 | .ctxsize = crypto_rng_ctxsize, | 106 | .extsize = crypto_alg_extsize, |
103 | .init = crypto_init_rng_ops, | 107 | .init_tfm = crypto_rng_init_tfm, |
104 | #ifdef CONFIG_PROC_FS | 108 | #ifdef CONFIG_PROC_FS |
105 | .show = crypto_rng_show, | 109 | .show = crypto_rng_show, |
106 | #endif | 110 | #endif |
107 | .report = crypto_rng_report, | 111 | .report = crypto_rng_report, |
112 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
113 | .maskset = CRYPTO_ALG_TYPE_MASK, | ||
114 | .type = CRYPTO_ALG_TYPE_RNG, | ||
115 | .tfmsize = offsetof(struct crypto_rng, base), | ||
108 | }; | 116 | }; |
109 | EXPORT_SYMBOL_GPL(crypto_rng_type); | 117 | |
118 | struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask) | ||
119 | { | ||
120 | return crypto_alloc_tfm(alg_name, &crypto_rng_type, type, mask); | ||
121 | } | ||
122 | EXPORT_SYMBOL_GPL(crypto_alloc_rng); | ||
110 | 123 | ||
111 | int crypto_get_default_rng(void) | 124 | int crypto_get_default_rng(void) |
112 | { | 125 | { |
@@ -142,13 +155,82 @@ EXPORT_SYMBOL_GPL(crypto_get_default_rng); | |||
142 | void crypto_put_default_rng(void) | 155 | void crypto_put_default_rng(void) |
143 | { | 156 | { |
144 | mutex_lock(&crypto_default_rng_lock); | 157 | mutex_lock(&crypto_default_rng_lock); |
145 | if (!--crypto_default_rng_refcnt) { | 158 | crypto_default_rng_refcnt--; |
146 | crypto_free_rng(crypto_default_rng); | ||
147 | crypto_default_rng = NULL; | ||
148 | } | ||
149 | mutex_unlock(&crypto_default_rng_lock); | 159 | mutex_unlock(&crypto_default_rng_lock); |
150 | } | 160 | } |
151 | EXPORT_SYMBOL_GPL(crypto_put_default_rng); | 161 | EXPORT_SYMBOL_GPL(crypto_put_default_rng); |
152 | 162 | ||
163 | #if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE) | ||
164 | int crypto_del_default_rng(void) | ||
165 | { | ||
166 | int err = -EBUSY; | ||
167 | |||
168 | mutex_lock(&crypto_default_rng_lock); | ||
169 | if (crypto_default_rng_refcnt) | ||
170 | goto out; | ||
171 | |||
172 | crypto_free_rng(crypto_default_rng); | ||
173 | crypto_default_rng = NULL; | ||
174 | |||
175 | err = 0; | ||
176 | |||
177 | out: | ||
178 | mutex_unlock(&crypto_default_rng_lock); | ||
179 | |||
180 | return err; | ||
181 | } | ||
182 | EXPORT_SYMBOL_GPL(crypto_del_default_rng); | ||
183 | #endif | ||
184 | |||
185 | int crypto_register_rng(struct rng_alg *alg) | ||
186 | { | ||
187 | struct crypto_alg *base = &alg->base; | ||
188 | |||
189 | if (alg->seedsize > PAGE_SIZE / 8) | ||
190 | return -EINVAL; | ||
191 | |||
192 | base->cra_type = &crypto_rng_type; | ||
193 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
194 | base->cra_flags |= CRYPTO_ALG_TYPE_RNG; | ||
195 | |||
196 | return crypto_register_alg(base); | ||
197 | } | ||
198 | EXPORT_SYMBOL_GPL(crypto_register_rng); | ||
199 | |||
200 | void crypto_unregister_rng(struct rng_alg *alg) | ||
201 | { | ||
202 | crypto_unregister_alg(&alg->base); | ||
203 | } | ||
204 | EXPORT_SYMBOL_GPL(crypto_unregister_rng); | ||
205 | |||
206 | int crypto_register_rngs(struct rng_alg *algs, int count) | ||
207 | { | ||
208 | int i, ret; | ||
209 | |||
210 | for (i = 0; i < count; i++) { | ||
211 | ret = crypto_register_rng(algs + i); | ||
212 | if (ret) | ||
213 | goto err; | ||
214 | } | ||
215 | |||
216 | return 0; | ||
217 | |||
218 | err: | ||
219 | for (--i; i >= 0; --i) | ||
220 | crypto_unregister_rng(algs + i); | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | EXPORT_SYMBOL_GPL(crypto_register_rngs); | ||
225 | |||
226 | void crypto_unregister_rngs(struct rng_alg *algs, int count) | ||
227 | { | ||
228 | int i; | ||
229 | |||
230 | for (i = count - 1; i >= 0; --i) | ||
231 | crypto_unregister_rng(algs + i); | ||
232 | } | ||
233 | EXPORT_SYMBOL_GPL(crypto_unregister_rngs); | ||
234 | |||
153 | MODULE_LICENSE("GPL"); | 235 | MODULE_LICENSE("GPL"); |
154 | MODULE_DESCRIPTION("Random Number Generator"); | 236 | MODULE_DESCRIPTION("Random Number Generator"); |
diff --git a/crypto/rsa.c b/crypto/rsa.c new file mode 100644 index 000000000000..752af0656f2e --- /dev/null +++ b/crypto/rsa.c | |||
@@ -0,0 +1,315 @@ | |||
1 | /* RSA asymmetric public-key algorithm [RFC3447] | ||
2 | * | ||
3 | * Copyright (c) 2015, Intel Corporation | ||
4 | * Authors: Tadeusz Struk <tadeusz.struk@intel.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <crypto/internal/rsa.h> | ||
14 | #include <crypto/internal/akcipher.h> | ||
15 | #include <crypto/akcipher.h> | ||
16 | |||
17 | /* | ||
18 | * RSAEP function [RFC3447 sec 5.1.1] | ||
19 | * c = m^e mod n; | ||
20 | */ | ||
21 | static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m) | ||
22 | { | ||
23 | /* (1) Validate 0 <= m < n */ | ||
24 | if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) | ||
25 | return -EINVAL; | ||
26 | |||
27 | /* (2) c = m^e mod n */ | ||
28 | return mpi_powm(c, m, key->e, key->n); | ||
29 | } | ||
30 | |||
31 | /* | ||
32 | * RSADP function [RFC3447 sec 5.1.2] | ||
33 | * m = c^d mod n; | ||
34 | */ | ||
35 | static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c) | ||
36 | { | ||
37 | /* (1) Validate 0 <= c < n */ | ||
38 | if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0) | ||
39 | return -EINVAL; | ||
40 | |||
41 | /* (2) m = c^d mod n */ | ||
42 | return mpi_powm(m, c, key->d, key->n); | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * RSASP1 function [RFC3447 sec 5.2.1] | ||
47 | * s = m^d mod n | ||
48 | */ | ||
49 | static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m) | ||
50 | { | ||
51 | /* (1) Validate 0 <= m < n */ | ||
52 | if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) | ||
53 | return -EINVAL; | ||
54 | |||
55 | /* (2) s = m^d mod n */ | ||
56 | return mpi_powm(s, m, key->d, key->n); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * RSAVP1 function [RFC3447 sec 5.2.2] | ||
61 | * m = s^e mod n; | ||
62 | */ | ||
63 | static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s) | ||
64 | { | ||
65 | /* (1) Validate 0 <= s < n */ | ||
66 | if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0) | ||
67 | return -EINVAL; | ||
68 | |||
69 | /* (2) m = s^e mod n */ | ||
70 | return mpi_powm(m, s, key->e, key->n); | ||
71 | } | ||
72 | |||
73 | static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm) | ||
74 | { | ||
75 | return akcipher_tfm_ctx(tfm); | ||
76 | } | ||
77 | |||
78 | static int rsa_enc(struct akcipher_request *req) | ||
79 | { | ||
80 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
81 | const struct rsa_key *pkey = rsa_get_key(tfm); | ||
82 | MPI m, c = mpi_alloc(0); | ||
83 | int ret = 0; | ||
84 | int sign; | ||
85 | |||
86 | if (!c) | ||
87 | return -ENOMEM; | ||
88 | |||
89 | if (unlikely(!pkey->n || !pkey->e)) { | ||
90 | ret = -EINVAL; | ||
91 | goto err_free_c; | ||
92 | } | ||
93 | |||
94 | if (req->dst_len < mpi_get_size(pkey->n)) { | ||
95 | req->dst_len = mpi_get_size(pkey->n); | ||
96 | ret = -EOVERFLOW; | ||
97 | goto err_free_c; | ||
98 | } | ||
99 | |||
100 | m = mpi_read_raw_data(req->src, req->src_len); | ||
101 | if (!m) { | ||
102 | ret = -ENOMEM; | ||
103 | goto err_free_c; | ||
104 | } | ||
105 | |||
106 | ret = _rsa_enc(pkey, c, m); | ||
107 | if (ret) | ||
108 | goto err_free_m; | ||
109 | |||
110 | ret = mpi_read_buffer(c, req->dst, req->dst_len, &req->dst_len, &sign); | ||
111 | if (ret) | ||
112 | goto err_free_m; | ||
113 | |||
114 | if (sign < 0) { | ||
115 | ret = -EBADMSG; | ||
116 | goto err_free_m; | ||
117 | } | ||
118 | |||
119 | err_free_m: | ||
120 | mpi_free(m); | ||
121 | err_free_c: | ||
122 | mpi_free(c); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | static int rsa_dec(struct akcipher_request *req) | ||
127 | { | ||
128 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
129 | const struct rsa_key *pkey = rsa_get_key(tfm); | ||
130 | MPI c, m = mpi_alloc(0); | ||
131 | int ret = 0; | ||
132 | int sign; | ||
133 | |||
134 | if (!m) | ||
135 | return -ENOMEM; | ||
136 | |||
137 | if (unlikely(!pkey->n || !pkey->d)) { | ||
138 | ret = -EINVAL; | ||
139 | goto err_free_m; | ||
140 | } | ||
141 | |||
142 | if (req->dst_len < mpi_get_size(pkey->n)) { | ||
143 | req->dst_len = mpi_get_size(pkey->n); | ||
144 | ret = -EOVERFLOW; | ||
145 | goto err_free_m; | ||
146 | } | ||
147 | |||
148 | c = mpi_read_raw_data(req->src, req->src_len); | ||
149 | if (!c) { | ||
150 | ret = -ENOMEM; | ||
151 | goto err_free_m; | ||
152 | } | ||
153 | |||
154 | ret = _rsa_dec(pkey, m, c); | ||
155 | if (ret) | ||
156 | goto err_free_c; | ||
157 | |||
158 | ret = mpi_read_buffer(m, req->dst, req->dst_len, &req->dst_len, &sign); | ||
159 | if (ret) | ||
160 | goto err_free_c; | ||
161 | |||
162 | if (sign < 0) { | ||
163 | ret = -EBADMSG; | ||
164 | goto err_free_c; | ||
165 | } | ||
166 | |||
167 | err_free_c: | ||
168 | mpi_free(c); | ||
169 | err_free_m: | ||
170 | mpi_free(m); | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | static int rsa_sign(struct akcipher_request *req) | ||
175 | { | ||
176 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
177 | const struct rsa_key *pkey = rsa_get_key(tfm); | ||
178 | MPI m, s = mpi_alloc(0); | ||
179 | int ret = 0; | ||
180 | int sign; | ||
181 | |||
182 | if (!s) | ||
183 | return -ENOMEM; | ||
184 | |||
185 | if (unlikely(!pkey->n || !pkey->d)) { | ||
186 | ret = -EINVAL; | ||
187 | goto err_free_s; | ||
188 | } | ||
189 | |||
190 | if (req->dst_len < mpi_get_size(pkey->n)) { | ||
191 | req->dst_len = mpi_get_size(pkey->n); | ||
192 | ret = -EOVERFLOW; | ||
193 | goto err_free_s; | ||
194 | } | ||
195 | |||
196 | m = mpi_read_raw_data(req->src, req->src_len); | ||
197 | if (!m) { | ||
198 | ret = -ENOMEM; | ||
199 | goto err_free_s; | ||
200 | } | ||
201 | |||
202 | ret = _rsa_sign(pkey, s, m); | ||
203 | if (ret) | ||
204 | goto err_free_m; | ||
205 | |||
206 | ret = mpi_read_buffer(s, req->dst, req->dst_len, &req->dst_len, &sign); | ||
207 | if (ret) | ||
208 | goto err_free_m; | ||
209 | |||
210 | if (sign < 0) { | ||
211 | ret = -EBADMSG; | ||
212 | goto err_free_m; | ||
213 | } | ||
214 | |||
215 | err_free_m: | ||
216 | mpi_free(m); | ||
217 | err_free_s: | ||
218 | mpi_free(s); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | static int rsa_verify(struct akcipher_request *req) | ||
223 | { | ||
224 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
225 | const struct rsa_key *pkey = rsa_get_key(tfm); | ||
226 | MPI s, m = mpi_alloc(0); | ||
227 | int ret = 0; | ||
228 | int sign; | ||
229 | |||
230 | if (!m) | ||
231 | return -ENOMEM; | ||
232 | |||
233 | if (unlikely(!pkey->n || !pkey->e)) { | ||
234 | ret = -EINVAL; | ||
235 | goto err_free_m; | ||
236 | } | ||
237 | |||
238 | if (req->dst_len < mpi_get_size(pkey->n)) { | ||
239 | req->dst_len = mpi_get_size(pkey->n); | ||
240 | ret = -EOVERFLOW; | ||
241 | goto err_free_m; | ||
242 | } | ||
243 | |||
244 | s = mpi_read_raw_data(req->src, req->src_len); | ||
245 | if (!s) { | ||
246 | ret = -ENOMEM; | ||
247 | goto err_free_m; | ||
248 | } | ||
249 | |||
250 | ret = _rsa_verify(pkey, m, s); | ||
251 | if (ret) | ||
252 | goto err_free_s; | ||
253 | |||
254 | ret = mpi_read_buffer(m, req->dst, req->dst_len, &req->dst_len, &sign); | ||
255 | if (ret) | ||
256 | goto err_free_s; | ||
257 | |||
258 | if (sign < 0) { | ||
259 | ret = -EBADMSG; | ||
260 | goto err_free_s; | ||
261 | } | ||
262 | |||
263 | err_free_s: | ||
264 | mpi_free(s); | ||
265 | err_free_m: | ||
266 | mpi_free(m); | ||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | static int rsa_setkey(struct crypto_akcipher *tfm, const void *key, | ||
271 | unsigned int keylen) | ||
272 | { | ||
273 | struct rsa_key *pkey = akcipher_tfm_ctx(tfm); | ||
274 | |||
275 | return rsa_parse_key(pkey, key, keylen); | ||
276 | } | ||
277 | |||
278 | static void rsa_exit_tfm(struct crypto_akcipher *tfm) | ||
279 | { | ||
280 | struct rsa_key *pkey = akcipher_tfm_ctx(tfm); | ||
281 | |||
282 | rsa_free_key(pkey); | ||
283 | } | ||
284 | |||
285 | static struct akcipher_alg rsa = { | ||
286 | .encrypt = rsa_enc, | ||
287 | .decrypt = rsa_dec, | ||
288 | .sign = rsa_sign, | ||
289 | .verify = rsa_verify, | ||
290 | .setkey = rsa_setkey, | ||
291 | .exit = rsa_exit_tfm, | ||
292 | .base = { | ||
293 | .cra_name = "rsa", | ||
294 | .cra_driver_name = "rsa-generic", | ||
295 | .cra_priority = 100, | ||
296 | .cra_module = THIS_MODULE, | ||
297 | .cra_ctxsize = sizeof(struct rsa_key), | ||
298 | }, | ||
299 | }; | ||
300 | |||
301 | static int rsa_init(void) | ||
302 | { | ||
303 | return crypto_register_akcipher(&rsa); | ||
304 | } | ||
305 | |||
306 | static void rsa_exit(void) | ||
307 | { | ||
308 | crypto_unregister_akcipher(&rsa); | ||
309 | } | ||
310 | |||
311 | module_init(rsa_init); | ||
312 | module_exit(rsa_exit); | ||
313 | MODULE_ALIAS_CRYPTO("rsa"); | ||
314 | MODULE_LICENSE("GPL"); | ||
315 | MODULE_DESCRIPTION("RSA generic algorithm"); | ||
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c new file mode 100644 index 000000000000..3e8e0a9e5a8e --- /dev/null +++ b/crypto/rsa_helper.c | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * RSA key extract helper | ||
3 | * | ||
4 | * Copyright (c) 2015, Intel Corporation | ||
5 | * Authors: Tadeusz Struk <tadeusz.struk@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/export.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/fips.h> | ||
17 | #include <crypto/internal/rsa.h> | ||
18 | #include "rsakey-asn1.h" | ||
19 | |||
20 | int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, | ||
21 | const void *value, size_t vlen) | ||
22 | { | ||
23 | struct rsa_key *key = context; | ||
24 | |||
25 | key->n = mpi_read_raw_data(value, vlen); | ||
26 | |||
27 | if (!key->n) | ||
28 | return -ENOMEM; | ||
29 | |||
30 | /* In FIPS mode only allow key size 2K & 3K */ | ||
31 | if (fips_enabled && (mpi_get_size(key->n) != 256 || | ||
32 | mpi_get_size(key->n) != 384)) { | ||
33 | pr_err("RSA: key size not allowed in FIPS mode\n"); | ||
34 | mpi_free(key->n); | ||
35 | key->n = NULL; | ||
36 | return -EINVAL; | ||
37 | } | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | int rsa_get_e(void *context, size_t hdrlen, unsigned char tag, | ||
42 | const void *value, size_t vlen) | ||
43 | { | ||
44 | struct rsa_key *key = context; | ||
45 | |||
46 | key->e = mpi_read_raw_data(value, vlen); | ||
47 | |||
48 | if (!key->e) | ||
49 | return -ENOMEM; | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | int rsa_get_d(void *context, size_t hdrlen, unsigned char tag, | ||
55 | const void *value, size_t vlen) | ||
56 | { | ||
57 | struct rsa_key *key = context; | ||
58 | |||
59 | key->d = mpi_read_raw_data(value, vlen); | ||
60 | |||
61 | if (!key->d) | ||
62 | return -ENOMEM; | ||
63 | |||
64 | /* In FIPS mode only allow key size 2K & 3K */ | ||
65 | if (fips_enabled && (mpi_get_size(key->d) != 256 || | ||
66 | mpi_get_size(key->d) != 384)) { | ||
67 | pr_err("RSA: key size not allowed in FIPS mode\n"); | ||
68 | mpi_free(key->d); | ||
69 | key->d = NULL; | ||
70 | return -EINVAL; | ||
71 | } | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static void free_mpis(struct rsa_key *key) | ||
76 | { | ||
77 | mpi_free(key->n); | ||
78 | mpi_free(key->e); | ||
79 | mpi_free(key->d); | ||
80 | key->n = NULL; | ||
81 | key->e = NULL; | ||
82 | key->d = NULL; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * rsa_free_key() - frees rsa key allocated by rsa_parse_key() | ||
87 | * | ||
88 | * @rsa_key: struct rsa_key key representation | ||
89 | */ | ||
90 | void rsa_free_key(struct rsa_key *key) | ||
91 | { | ||
92 | free_mpis(key); | ||
93 | } | ||
94 | EXPORT_SYMBOL_GPL(rsa_free_key); | ||
95 | |||
96 | /** | ||
97 | * rsa_parse_key() - extracts an rsa key from BER encoded buffer | ||
98 | * and stores it in the provided struct rsa_key | ||
99 | * | ||
100 | * @rsa_key: struct rsa_key key representation | ||
101 | * @key: key in BER format | ||
102 | * @key_len: length of key | ||
103 | * | ||
104 | * Return: 0 on success or error code in case of error | ||
105 | */ | ||
106 | int rsa_parse_key(struct rsa_key *rsa_key, const void *key, | ||
107 | unsigned int key_len) | ||
108 | { | ||
109 | int ret; | ||
110 | |||
111 | free_mpis(rsa_key); | ||
112 | ret = asn1_ber_decoder(&rsakey_decoder, rsa_key, key, key_len); | ||
113 | if (ret < 0) | ||
114 | goto error; | ||
115 | |||
116 | return 0; | ||
117 | error: | ||
118 | free_mpis(rsa_key); | ||
119 | return ret; | ||
120 | } | ||
121 | EXPORT_SYMBOL_GPL(rsa_parse_key); | ||
diff --git a/crypto/rsakey.asn1 b/crypto/rsakey.asn1 new file mode 100644 index 000000000000..3c7b5df7b428 --- /dev/null +++ b/crypto/rsakey.asn1 | |||
@@ -0,0 +1,5 @@ | |||
1 | RsaKey ::= SEQUENCE { | ||
2 | n INTEGER ({ rsa_get_n }), | ||
3 | e INTEGER ({ rsa_get_e }), | ||
4 | d INTEGER ({ rsa_get_d }) | ||
5 | } | ||
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 3bd749c7bb70..ea5815c5e128 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c | |||
@@ -54,7 +54,11 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, | |||
54 | struct page *page; | 54 | struct page *page; |
55 | 55 | ||
56 | page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); | 56 | page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); |
57 | if (!PageSlab(page)) | 57 | /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as |
58 | * PageSlab cannot be optimised away per se due to | ||
59 | * use of volatile pointer. | ||
60 | */ | ||
61 | if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) | ||
58 | flush_dcache_page(page); | 62 | flush_dcache_page(page); |
59 | } | 63 | } |
60 | 64 | ||
@@ -104,22 +108,18 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, | |||
104 | unsigned int start, unsigned int nbytes, int out) | 108 | unsigned int start, unsigned int nbytes, int out) |
105 | { | 109 | { |
106 | struct scatter_walk walk; | 110 | struct scatter_walk walk; |
107 | unsigned int offset = 0; | 111 | struct scatterlist tmp[2]; |
108 | 112 | ||
109 | if (!nbytes) | 113 | if (!nbytes) |
110 | return; | 114 | return; |
111 | 115 | ||
112 | for (;;) { | 116 | sg = scatterwalk_ffwd(tmp, sg, start); |
113 | scatterwalk_start(&walk, sg); | ||
114 | |||
115 | if (start < offset + sg->length) | ||
116 | break; | ||
117 | 117 | ||
118 | offset += sg->length; | 118 | if (sg_page(sg) == virt_to_page(buf) && |
119 | sg = sg_next(sg); | 119 | sg->offset == offset_in_page(buf)) |
120 | } | 120 | return; |
121 | 121 | ||
122 | scatterwalk_advance(&walk, start - offset); | 122 | scatterwalk_start(&walk, sg); |
123 | scatterwalk_copychunks(buf, &walk, nbytes, out); | 123 | scatterwalk_copychunks(buf, &walk, nbytes, out); |
124 | scatterwalk_done(&walk, out, 0); | 124 | scatterwalk_done(&walk, out, 0); |
125 | } | 125 | } |
@@ -146,3 +146,26 @@ int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes) | |||
146 | return n; | 146 | return n; |
147 | } | 147 | } |
148 | EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen); | 148 | EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen); |
149 | |||
150 | struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], | ||
151 | struct scatterlist *src, | ||
152 | unsigned int len) | ||
153 | { | ||
154 | for (;;) { | ||
155 | if (!len) | ||
156 | return src; | ||
157 | |||
158 | if (src->length > len) | ||
159 | break; | ||
160 | |||
161 | len -= src->length; | ||
162 | src = sg_next(src); | ||
163 | } | ||
164 | |||
165 | sg_init_table(dst, 2); | ||
166 | sg_set_page(dst, sg_page(src), src->length - len, src->offset + len); | ||
167 | scatterwalk_crypto_chain(dst, sg_next(src), 0, 2); | ||
168 | |||
169 | return dst; | ||
170 | } | ||
171 | EXPORT_SYMBOL_GPL(scatterwalk_ffwd); | ||
diff --git a/crypto/seqiv.c b/crypto/seqiv.c index b7bb9a2f4a31..122c56e3491b 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c | |||
@@ -13,9 +13,11 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <crypto/internal/aead.h> | 16 | #include <crypto/internal/geniv.h> |
17 | #include <crypto/internal/skcipher.h> | 17 | #include <crypto/internal/skcipher.h> |
18 | #include <crypto/null.h> | ||
18 | #include <crypto/rng.h> | 19 | #include <crypto/rng.h> |
20 | #include <crypto/scatterwalk.h> | ||
19 | #include <linux/err.h> | 21 | #include <linux/err.h> |
20 | #include <linux/init.h> | 22 | #include <linux/init.h> |
21 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
@@ -24,11 +26,25 @@ | |||
24 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
25 | #include <linux/string.h> | 27 | #include <linux/string.h> |
26 | 28 | ||
29 | struct seqniv_request_ctx { | ||
30 | struct scatterlist dst[2]; | ||
31 | struct aead_request subreq; | ||
32 | }; | ||
33 | |||
27 | struct seqiv_ctx { | 34 | struct seqiv_ctx { |
28 | spinlock_t lock; | 35 | spinlock_t lock; |
29 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); | 36 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); |
30 | }; | 37 | }; |
31 | 38 | ||
39 | struct seqiv_aead_ctx { | ||
40 | /* aead_geniv_ctx must be first the element */ | ||
41 | struct aead_geniv_ctx geniv; | ||
42 | struct crypto_blkcipher *null; | ||
43 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); | ||
44 | }; | ||
45 | |||
46 | static void seqiv_free(struct crypto_instance *inst); | ||
47 | |||
32 | static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) | 48 | static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) |
33 | { | 49 | { |
34 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); | 50 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); |
@@ -81,6 +97,77 @@ static void seqiv_aead_complete(struct crypto_async_request *base, int err) | |||
81 | aead_givcrypt_complete(req, err); | 97 | aead_givcrypt_complete(req, err); |
82 | } | 98 | } |
83 | 99 | ||
100 | static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) | ||
101 | { | ||
102 | struct aead_request *subreq = aead_request_ctx(req); | ||
103 | struct crypto_aead *geniv; | ||
104 | |||
105 | if (err == -EINPROGRESS) | ||
106 | return; | ||
107 | |||
108 | if (err) | ||
109 | goto out; | ||
110 | |||
111 | geniv = crypto_aead_reqtfm(req); | ||
112 | memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv)); | ||
113 | |||
114 | out: | ||
115 | kzfree(subreq->iv); | ||
116 | } | ||
117 | |||
118 | static void seqiv_aead_encrypt_complete(struct crypto_async_request *base, | ||
119 | int err) | ||
120 | { | ||
121 | struct aead_request *req = base->data; | ||
122 | |||
123 | seqiv_aead_encrypt_complete2(req, err); | ||
124 | aead_request_complete(req, err); | ||
125 | } | ||
126 | |||
127 | static void seqniv_aead_encrypt_complete2(struct aead_request *req, int err) | ||
128 | { | ||
129 | unsigned int ivsize = 8; | ||
130 | u8 data[20]; | ||
131 | |||
132 | if (err == -EINPROGRESS) | ||
133 | return; | ||
134 | |||
135 | /* Swap IV and ESP header back to correct order. */ | ||
136 | scatterwalk_map_and_copy(data, req->dst, 0, req->assoclen + ivsize, 0); | ||
137 | scatterwalk_map_and_copy(data + ivsize, req->dst, 0, req->assoclen, 1); | ||
138 | scatterwalk_map_and_copy(data, req->dst, req->assoclen, ivsize, 1); | ||
139 | } | ||
140 | |||
141 | static void seqniv_aead_encrypt_complete(struct crypto_async_request *base, | ||
142 | int err) | ||
143 | { | ||
144 | struct aead_request *req = base->data; | ||
145 | |||
146 | seqniv_aead_encrypt_complete2(req, err); | ||
147 | aead_request_complete(req, err); | ||
148 | } | ||
149 | |||
150 | static void seqniv_aead_decrypt_complete2(struct aead_request *req, int err) | ||
151 | { | ||
152 | u8 data[4]; | ||
153 | |||
154 | if (err == -EINPROGRESS) | ||
155 | return; | ||
156 | |||
157 | /* Move ESP header back to correct location. */ | ||
158 | scatterwalk_map_and_copy(data, req->dst, 16, req->assoclen - 8, 0); | ||
159 | scatterwalk_map_and_copy(data, req->dst, 8, req->assoclen - 8, 1); | ||
160 | } | ||
161 | |||
162 | static void seqniv_aead_decrypt_complete(struct crypto_async_request *base, | ||
163 | int err) | ||
164 | { | ||
165 | struct aead_request *req = base->data; | ||
166 | |||
167 | seqniv_aead_decrypt_complete2(req, err); | ||
168 | aead_request_complete(req, err); | ||
169 | } | ||
170 | |||
84 | static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, | 171 | static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, |
85 | unsigned int ivsize) | 172 | unsigned int ivsize) |
86 | { | 173 | { |
@@ -186,160 +273,477 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) | |||
186 | return err; | 273 | return err; |
187 | } | 274 | } |
188 | 275 | ||
189 | static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req) | 276 | static int seqniv_aead_encrypt(struct aead_request *req) |
190 | { | 277 | { |
191 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); | 278 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); |
192 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 279 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); |
193 | int err = 0; | 280 | struct seqniv_request_ctx *rctx = aead_request_ctx(req); |
281 | struct aead_request *subreq = &rctx->subreq; | ||
282 | struct scatterlist *dst; | ||
283 | crypto_completion_t compl; | ||
284 | void *data; | ||
285 | unsigned int ivsize = 8; | ||
286 | u8 buf[20] __attribute__ ((aligned(__alignof__(u32)))); | ||
287 | int err; | ||
194 | 288 | ||
195 | spin_lock_bh(&ctx->lock); | 289 | if (req->cryptlen < ivsize) |
196 | if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first) | 290 | return -EINVAL; |
197 | goto unlock; | ||
198 | 291 | ||
199 | crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt; | 292 | /* ESP AD is at most 12 bytes (ESN). */ |
200 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | 293 | if (req->assoclen > 12) |
201 | crypto_ablkcipher_ivsize(geniv)); | 294 | return -EINVAL; |
202 | 295 | ||
203 | unlock: | 296 | aead_request_set_tfm(subreq, ctx->geniv.child); |
204 | spin_unlock_bh(&ctx->lock); | ||
205 | 297 | ||
206 | if (err) | 298 | compl = seqniv_aead_encrypt_complete; |
207 | return err; | 299 | data = req; |
300 | |||
301 | if (req->src != req->dst) { | ||
302 | struct blkcipher_desc desc = { | ||
303 | .tfm = ctx->null, | ||
304 | }; | ||
305 | |||
306 | err = crypto_blkcipher_encrypt(&desc, req->dst, req->src, | ||
307 | req->assoclen + req->cryptlen); | ||
308 | if (err) | ||
309 | return err; | ||
310 | } | ||
208 | 311 | ||
209 | return seqiv_givencrypt(req); | 312 | dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize); |
313 | |||
314 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
315 | aead_request_set_crypt(subreq, dst, dst, | ||
316 | req->cryptlen - ivsize, req->iv); | ||
317 | aead_request_set_ad(subreq, req->assoclen); | ||
318 | |||
319 | memcpy(buf, req->iv, ivsize); | ||
320 | crypto_xor(buf, ctx->salt, ivsize); | ||
321 | memcpy(req->iv, buf, ivsize); | ||
322 | |||
323 | /* Swap order of IV and ESP AD for ICV generation. */ | ||
324 | scatterwalk_map_and_copy(buf + ivsize, req->dst, 0, req->assoclen, 0); | ||
325 | scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 1); | ||
326 | |||
327 | err = crypto_aead_encrypt(subreq); | ||
328 | seqniv_aead_encrypt_complete2(req, err); | ||
329 | return err; | ||
210 | } | 330 | } |
211 | 331 | ||
212 | static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req) | 332 | static int seqiv_aead_encrypt(struct aead_request *req) |
213 | { | 333 | { |
214 | struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); | 334 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); |
215 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); | 335 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); |
216 | int err = 0; | 336 | struct aead_request *subreq = aead_request_ctx(req); |
337 | crypto_completion_t compl; | ||
338 | void *data; | ||
339 | u8 *info; | ||
340 | unsigned int ivsize = 8; | ||
341 | int err; | ||
217 | 342 | ||
218 | spin_lock_bh(&ctx->lock); | 343 | if (req->cryptlen < ivsize) |
219 | if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first) | 344 | return -EINVAL; |
220 | goto unlock; | ||
221 | 345 | ||
222 | crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt; | 346 | aead_request_set_tfm(subreq, ctx->geniv.child); |
223 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | ||
224 | crypto_aead_ivsize(geniv)); | ||
225 | 347 | ||
226 | unlock: | 348 | compl = req->base.complete; |
227 | spin_unlock_bh(&ctx->lock); | 349 | data = req->base.data; |
350 | info = req->iv; | ||
228 | 351 | ||
229 | if (err) | 352 | if (req->src != req->dst) { |
230 | return err; | 353 | struct blkcipher_desc desc = { |
354 | .tfm = ctx->null, | ||
355 | }; | ||
231 | 356 | ||
232 | return seqiv_aead_givencrypt(req); | 357 | err = crypto_blkcipher_encrypt(&desc, req->dst, req->src, |
358 | req->assoclen + req->cryptlen); | ||
359 | if (err) | ||
360 | return err; | ||
361 | } | ||
362 | |||
363 | if (unlikely(!IS_ALIGNED((unsigned long)info, | ||
364 | crypto_aead_alignmask(geniv) + 1))) { | ||
365 | info = kmalloc(ivsize, req->base.flags & | ||
366 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | ||
367 | GFP_ATOMIC); | ||
368 | if (!info) | ||
369 | return -ENOMEM; | ||
370 | |||
371 | memcpy(info, req->iv, ivsize); | ||
372 | compl = seqiv_aead_encrypt_complete; | ||
373 | data = req; | ||
374 | } | ||
375 | |||
376 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
377 | aead_request_set_crypt(subreq, req->dst, req->dst, | ||
378 | req->cryptlen - ivsize, info); | ||
379 | aead_request_set_ad(subreq, req->assoclen + ivsize); | ||
380 | |||
381 | crypto_xor(info, ctx->salt, ivsize); | ||
382 | scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); | ||
383 | |||
384 | err = crypto_aead_encrypt(subreq); | ||
385 | if (unlikely(info != req->iv)) | ||
386 | seqiv_aead_encrypt_complete2(req, err); | ||
387 | return err; | ||
388 | } | ||
389 | |||
390 | static int seqniv_aead_decrypt(struct aead_request *req) | ||
391 | { | ||
392 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
393 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); | ||
394 | struct seqniv_request_ctx *rctx = aead_request_ctx(req); | ||
395 | struct aead_request *subreq = &rctx->subreq; | ||
396 | struct scatterlist *dst; | ||
397 | crypto_completion_t compl; | ||
398 | void *data; | ||
399 | unsigned int ivsize = 8; | ||
400 | u8 buf[20]; | ||
401 | int err; | ||
402 | |||
403 | if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) | ||
404 | return -EINVAL; | ||
405 | |||
406 | aead_request_set_tfm(subreq, ctx->geniv.child); | ||
407 | |||
408 | compl = req->base.complete; | ||
409 | data = req->base.data; | ||
410 | |||
411 | if (req->assoclen > 12) | ||
412 | return -EINVAL; | ||
413 | else if (req->assoclen > 8) { | ||
414 | compl = seqniv_aead_decrypt_complete; | ||
415 | data = req; | ||
416 | } | ||
417 | |||
418 | if (req->src != req->dst) { | ||
419 | struct blkcipher_desc desc = { | ||
420 | .tfm = ctx->null, | ||
421 | }; | ||
422 | |||
423 | err = crypto_blkcipher_encrypt(&desc, req->dst, req->src, | ||
424 | req->assoclen + req->cryptlen); | ||
425 | if (err) | ||
426 | return err; | ||
427 | } | ||
428 | |||
429 | /* Move ESP AD forward for ICV generation. */ | ||
430 | scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 0); | ||
431 | memcpy(req->iv, buf + req->assoclen, ivsize); | ||
432 | scatterwalk_map_and_copy(buf, req->dst, ivsize, req->assoclen, 1); | ||
433 | |||
434 | dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize); | ||
435 | |||
436 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
437 | aead_request_set_crypt(subreq, dst, dst, | ||
438 | req->cryptlen - ivsize, req->iv); | ||
439 | aead_request_set_ad(subreq, req->assoclen); | ||
440 | |||
441 | err = crypto_aead_decrypt(subreq); | ||
442 | if (req->assoclen > 8) | ||
443 | seqniv_aead_decrypt_complete2(req, err); | ||
444 | return err; | ||
445 | } | ||
446 | |||
447 | static int seqiv_aead_decrypt(struct aead_request *req) | ||
448 | { | ||
449 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
450 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); | ||
451 | struct aead_request *subreq = aead_request_ctx(req); | ||
452 | crypto_completion_t compl; | ||
453 | void *data; | ||
454 | unsigned int ivsize = 8; | ||
455 | |||
456 | if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) | ||
457 | return -EINVAL; | ||
458 | |||
459 | aead_request_set_tfm(subreq, ctx->geniv.child); | ||
460 | |||
461 | compl = req->base.complete; | ||
462 | data = req->base.data; | ||
463 | |||
464 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
465 | aead_request_set_crypt(subreq, req->src, req->dst, | ||
466 | req->cryptlen - ivsize, req->iv); | ||
467 | aead_request_set_ad(subreq, req->assoclen + ivsize); | ||
468 | |||
469 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); | ||
470 | if (req->src != req->dst) | ||
471 | scatterwalk_map_and_copy(req->iv, req->dst, | ||
472 | req->assoclen, ivsize, 1); | ||
473 | |||
474 | return crypto_aead_decrypt(subreq); | ||
233 | } | 475 | } |
234 | 476 | ||
235 | static int seqiv_init(struct crypto_tfm *tfm) | 477 | static int seqiv_init(struct crypto_tfm *tfm) |
236 | { | 478 | { |
237 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); | 479 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); |
238 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); | 480 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
481 | int err; | ||
239 | 482 | ||
240 | spin_lock_init(&ctx->lock); | 483 | spin_lock_init(&ctx->lock); |
241 | 484 | ||
242 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); | 485 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); |
243 | 486 | ||
244 | return skcipher_geniv_init(tfm); | 487 | err = 0; |
488 | if (!crypto_get_default_rng()) { | ||
489 | crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt; | ||
490 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | ||
491 | crypto_ablkcipher_ivsize(geniv)); | ||
492 | crypto_put_default_rng(); | ||
493 | } | ||
494 | |||
495 | return err ?: skcipher_geniv_init(tfm); | ||
245 | } | 496 | } |
246 | 497 | ||
247 | static int seqiv_aead_init(struct crypto_tfm *tfm) | 498 | static int seqiv_old_aead_init(struct crypto_tfm *tfm) |
248 | { | 499 | { |
249 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | 500 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); |
250 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); | 501 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); |
502 | int err; | ||
251 | 503 | ||
252 | spin_lock_init(&ctx->lock); | 504 | spin_lock_init(&ctx->lock); |
253 | 505 | ||
254 | tfm->crt_aead.reqsize = sizeof(struct aead_request); | 506 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), |
507 | sizeof(struct aead_request)); | ||
508 | err = 0; | ||
509 | if (!crypto_get_default_rng()) { | ||
510 | geniv->givencrypt = seqiv_aead_givencrypt; | ||
511 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | ||
512 | crypto_aead_ivsize(geniv)); | ||
513 | crypto_put_default_rng(); | ||
514 | } | ||
255 | 515 | ||
256 | return aead_geniv_init(tfm); | 516 | return err ?: aead_geniv_init(tfm); |
257 | } | 517 | } |
258 | 518 | ||
259 | static struct crypto_template seqiv_tmpl; | 519 | static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize) |
260 | |||
261 | static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb) | ||
262 | { | 520 | { |
263 | struct crypto_instance *inst; | 521 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); |
522 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); | ||
523 | int err; | ||
264 | 524 | ||
265 | inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0); | 525 | spin_lock_init(&ctx->geniv.lock); |
266 | 526 | ||
267 | if (IS_ERR(inst)) | 527 | crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); |
528 | |||
529 | err = crypto_get_default_rng(); | ||
530 | if (err) | ||
268 | goto out; | 531 | goto out; |
269 | 532 | ||
270 | if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) { | 533 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, |
271 | skcipher_geniv_free(inst); | 534 | crypto_aead_ivsize(geniv)); |
272 | inst = ERR_PTR(-EINVAL); | 535 | crypto_put_default_rng(); |
536 | if (err) | ||
273 | goto out; | 537 | goto out; |
274 | } | ||
275 | 538 | ||
276 | inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first; | 539 | ctx->null = crypto_get_default_null_skcipher(); |
540 | err = PTR_ERR(ctx->null); | ||
541 | if (IS_ERR(ctx->null)) | ||
542 | goto out; | ||
277 | 543 | ||
278 | inst->alg.cra_init = seqiv_init; | 544 | err = aead_geniv_init(tfm); |
279 | inst->alg.cra_exit = skcipher_geniv_exit; | 545 | if (err) |
546 | goto drop_null; | ||
280 | 547 | ||
281 | inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; | 548 | ctx->geniv.child = geniv->child; |
549 | geniv->child = geniv; | ||
282 | 550 | ||
283 | out: | 551 | out: |
284 | return inst; | 552 | return err; |
553 | |||
554 | drop_null: | ||
555 | crypto_put_default_null_skcipher(); | ||
556 | goto out; | ||
557 | } | ||
558 | |||
559 | static int seqiv_aead_init(struct crypto_tfm *tfm) | ||
560 | { | ||
561 | return seqiv_aead_init_common(tfm, sizeof(struct aead_request)); | ||
562 | } | ||
563 | |||
564 | static int seqniv_aead_init(struct crypto_tfm *tfm) | ||
565 | { | ||
566 | return seqiv_aead_init_common(tfm, sizeof(struct seqniv_request_ctx)); | ||
567 | } | ||
568 | |||
569 | static void seqiv_aead_exit(struct crypto_tfm *tfm) | ||
570 | { | ||
571 | struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
572 | |||
573 | crypto_free_aead(ctx->geniv.child); | ||
574 | crypto_put_default_null_skcipher(); | ||
285 | } | 575 | } |
286 | 576 | ||
287 | static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb) | 577 | static int seqiv_ablkcipher_create(struct crypto_template *tmpl, |
578 | struct rtattr **tb) | ||
288 | { | 579 | { |
289 | struct crypto_instance *inst; | 580 | struct crypto_instance *inst; |
581 | int err; | ||
290 | 582 | ||
291 | inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0); | 583 | inst = skcipher_geniv_alloc(tmpl, tb, 0, 0); |
292 | 584 | ||
293 | if (IS_ERR(inst)) | 585 | if (IS_ERR(inst)) |
294 | goto out; | 586 | return PTR_ERR(inst); |
295 | 587 | ||
296 | if (inst->alg.cra_aead.ivsize < sizeof(u64)) { | 588 | err = -EINVAL; |
297 | aead_geniv_free(inst); | 589 | if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) |
298 | inst = ERR_PTR(-EINVAL); | 590 | goto free_inst; |
299 | goto out; | 591 | |
300 | } | 592 | inst->alg.cra_init = seqiv_init; |
593 | inst->alg.cra_exit = skcipher_geniv_exit; | ||
594 | |||
595 | inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; | ||
596 | inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); | ||
597 | |||
598 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; | ||
301 | 599 | ||
302 | inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; | 600 | err = crypto_register_instance(tmpl, inst); |
601 | if (err) | ||
602 | goto free_inst; | ||
303 | 603 | ||
304 | inst->alg.cra_init = seqiv_aead_init; | 604 | out: |
605 | return err; | ||
606 | |||
607 | free_inst: | ||
608 | skcipher_geniv_free(inst); | ||
609 | goto out; | ||
610 | } | ||
611 | |||
612 | static int seqiv_old_aead_create(struct crypto_template *tmpl, | ||
613 | struct aead_instance *aead) | ||
614 | { | ||
615 | struct crypto_instance *inst = aead_crypto_instance(aead); | ||
616 | int err = -EINVAL; | ||
617 | |||
618 | if (inst->alg.cra_aead.ivsize < sizeof(u64)) | ||
619 | goto free_inst; | ||
620 | |||
621 | inst->alg.cra_init = seqiv_old_aead_init; | ||
305 | inst->alg.cra_exit = aead_geniv_exit; | 622 | inst->alg.cra_exit = aead_geniv_exit; |
306 | 623 | ||
307 | inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize; | 624 | inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize; |
625 | inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); | ||
626 | |||
627 | err = crypto_register_instance(tmpl, inst); | ||
628 | if (err) | ||
629 | goto free_inst; | ||
308 | 630 | ||
309 | out: | 631 | out: |
310 | return inst; | 632 | return err; |
633 | |||
634 | free_inst: | ||
635 | aead_geniv_free(aead); | ||
636 | goto out; | ||
311 | } | 637 | } |
312 | 638 | ||
313 | static struct crypto_instance *seqiv_alloc(struct rtattr **tb) | 639 | static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) |
640 | { | ||
641 | struct aead_instance *inst; | ||
642 | struct crypto_aead_spawn *spawn; | ||
643 | struct aead_alg *alg; | ||
644 | int err; | ||
645 | |||
646 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); | ||
647 | |||
648 | if (IS_ERR(inst)) | ||
649 | return PTR_ERR(inst); | ||
650 | |||
651 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; | ||
652 | |||
653 | if (inst->alg.base.cra_aead.encrypt) | ||
654 | return seqiv_old_aead_create(tmpl, inst); | ||
655 | |||
656 | spawn = aead_instance_ctx(inst); | ||
657 | alg = crypto_spawn_aead_alg(spawn); | ||
658 | |||
659 | if (alg->base.cra_aead.encrypt) | ||
660 | goto done; | ||
661 | |||
662 | err = -EINVAL; | ||
663 | if (inst->alg.ivsize != sizeof(u64)) | ||
664 | goto free_inst; | ||
665 | |||
666 | inst->alg.encrypt = seqiv_aead_encrypt; | ||
667 | inst->alg.decrypt = seqiv_aead_decrypt; | ||
668 | |||
669 | inst->alg.base.cra_init = seqiv_aead_init; | ||
670 | inst->alg.base.cra_exit = seqiv_aead_exit; | ||
671 | |||
672 | inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx); | ||
673 | inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize; | ||
674 | |||
675 | done: | ||
676 | err = aead_register_instance(tmpl, inst); | ||
677 | if (err) | ||
678 | goto free_inst; | ||
679 | |||
680 | out: | ||
681 | return err; | ||
682 | |||
683 | free_inst: | ||
684 | aead_geniv_free(inst); | ||
685 | goto out; | ||
686 | } | ||
687 | |||
688 | static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
314 | { | 689 | { |
315 | struct crypto_attr_type *algt; | 690 | struct crypto_attr_type *algt; |
316 | struct crypto_instance *inst; | ||
317 | int err; | 691 | int err; |
318 | 692 | ||
319 | algt = crypto_get_attr_type(tb); | 693 | algt = crypto_get_attr_type(tb); |
320 | if (IS_ERR(algt)) | 694 | if (IS_ERR(algt)) |
321 | return ERR_CAST(algt); | 695 | return PTR_ERR(algt); |
322 | |||
323 | err = crypto_get_default_rng(); | ||
324 | if (err) | ||
325 | return ERR_PTR(err); | ||
326 | 696 | ||
327 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) | 697 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) |
328 | inst = seqiv_ablkcipher_alloc(tb); | 698 | err = seqiv_ablkcipher_create(tmpl, tb); |
329 | else | 699 | else |
330 | inst = seqiv_aead_alloc(tb); | 700 | err = seqiv_aead_create(tmpl, tb); |
331 | 701 | ||
702 | return err; | ||
703 | } | ||
704 | |||
705 | static int seqniv_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
706 | { | ||
707 | struct aead_instance *inst; | ||
708 | struct crypto_aead_spawn *spawn; | ||
709 | struct aead_alg *alg; | ||
710 | int err; | ||
711 | |||
712 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); | ||
713 | err = PTR_ERR(inst); | ||
332 | if (IS_ERR(inst)) | 714 | if (IS_ERR(inst)) |
333 | goto put_rng; | 715 | goto out; |
334 | 716 | ||
335 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; | 717 | spawn = aead_instance_ctx(inst); |
336 | inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); | 718 | alg = crypto_spawn_aead_alg(spawn); |
719 | |||
720 | if (alg->base.cra_aead.encrypt) | ||
721 | goto done; | ||
722 | |||
723 | err = -EINVAL; | ||
724 | if (inst->alg.ivsize != sizeof(u64)) | ||
725 | goto free_inst; | ||
726 | |||
727 | inst->alg.encrypt = seqniv_aead_encrypt; | ||
728 | inst->alg.decrypt = seqniv_aead_decrypt; | ||
729 | |||
730 | inst->alg.base.cra_init = seqniv_aead_init; | ||
731 | inst->alg.base.cra_exit = seqiv_aead_exit; | ||
732 | |||
733 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; | ||
734 | inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx); | ||
735 | inst->alg.base.cra_ctxsize += inst->alg.ivsize; | ||
736 | |||
737 | done: | ||
738 | err = aead_register_instance(tmpl, inst); | ||
739 | if (err) | ||
740 | goto free_inst; | ||
337 | 741 | ||
338 | out: | 742 | out: |
339 | return inst; | 743 | return err; |
340 | 744 | ||
341 | put_rng: | 745 | free_inst: |
342 | crypto_put_default_rng(); | 746 | aead_geniv_free(inst); |
343 | goto out; | 747 | goto out; |
344 | } | 748 | } |
345 | 749 | ||
@@ -348,24 +752,46 @@ static void seqiv_free(struct crypto_instance *inst) | |||
348 | if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) | 752 | if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) |
349 | skcipher_geniv_free(inst); | 753 | skcipher_geniv_free(inst); |
350 | else | 754 | else |
351 | aead_geniv_free(inst); | 755 | aead_geniv_free(aead_instance(inst)); |
352 | crypto_put_default_rng(); | ||
353 | } | 756 | } |
354 | 757 | ||
355 | static struct crypto_template seqiv_tmpl = { | 758 | static struct crypto_template seqiv_tmpl = { |
356 | .name = "seqiv", | 759 | .name = "seqiv", |
357 | .alloc = seqiv_alloc, | 760 | .create = seqiv_create, |
761 | .free = seqiv_free, | ||
762 | .module = THIS_MODULE, | ||
763 | }; | ||
764 | |||
765 | static struct crypto_template seqniv_tmpl = { | ||
766 | .name = "seqniv", | ||
767 | .create = seqniv_create, | ||
358 | .free = seqiv_free, | 768 | .free = seqiv_free, |
359 | .module = THIS_MODULE, | 769 | .module = THIS_MODULE, |
360 | }; | 770 | }; |
361 | 771 | ||
362 | static int __init seqiv_module_init(void) | 772 | static int __init seqiv_module_init(void) |
363 | { | 773 | { |
364 | return crypto_register_template(&seqiv_tmpl); | 774 | int err; |
775 | |||
776 | err = crypto_register_template(&seqiv_tmpl); | ||
777 | if (err) | ||
778 | goto out; | ||
779 | |||
780 | err = crypto_register_template(&seqniv_tmpl); | ||
781 | if (err) | ||
782 | goto out_undo_niv; | ||
783 | |||
784 | out: | ||
785 | return err; | ||
786 | |||
787 | out_undo_niv: | ||
788 | crypto_unregister_template(&seqiv_tmpl); | ||
789 | goto out; | ||
365 | } | 790 | } |
366 | 791 | ||
367 | static void __exit seqiv_module_exit(void) | 792 | static void __exit seqiv_module_exit(void) |
368 | { | 793 | { |
794 | crypto_unregister_template(&seqniv_tmpl); | ||
369 | crypto_unregister_template(&seqiv_tmpl); | 795 | crypto_unregister_template(&seqiv_tmpl); |
370 | } | 796 | } |
371 | 797 | ||
@@ -375,3 +801,4 @@ module_exit(seqiv_module_exit); | |||
375 | MODULE_LICENSE("GPL"); | 801 | MODULE_LICENSE("GPL"); |
376 | MODULE_DESCRIPTION("Sequence Number IV Generator"); | 802 | MODULE_DESCRIPTION("Sequence Number IV Generator"); |
377 | MODULE_ALIAS_CRYPTO("seqiv"); | 803 | MODULE_ALIAS_CRYPTO("seqiv"); |
804 | MODULE_ALIAS_CRYPTO("seqniv"); | ||
diff --git a/crypto/shash.c b/crypto/shash.c index 47c713954bf3..ecb1e3d39bf0 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
@@ -520,11 +520,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) | |||
520 | return 0; | 520 | return 0; |
521 | } | 521 | } |
522 | 522 | ||
523 | static unsigned int crypto_shash_extsize(struct crypto_alg *alg) | ||
524 | { | ||
525 | return alg->cra_ctxsize; | ||
526 | } | ||
527 | |||
528 | #ifdef CONFIG_NET | 523 | #ifdef CONFIG_NET |
529 | static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) | 524 | static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) |
530 | { | 525 | { |
@@ -564,7 +559,7 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) | |||
564 | 559 | ||
565 | static const struct crypto_type crypto_shash_type = { | 560 | static const struct crypto_type crypto_shash_type = { |
566 | .ctxsize = crypto_shash_ctxsize, | 561 | .ctxsize = crypto_shash_ctxsize, |
567 | .extsize = crypto_shash_extsize, | 562 | .extsize = crypto_alg_extsize, |
568 | .init = crypto_init_shash_ops, | 563 | .init = crypto_init_shash_ops, |
569 | .init_tfm = crypto_shash_init_tfm, | 564 | .init_tfm = crypto_shash_init_tfm, |
570 | #ifdef CONFIG_PROC_FS | 565 | #ifdef CONFIG_PROC_FS |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 1a2800107fc8..9f6f10b498ba 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -22,8 +22,10 @@ | |||
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <crypto/aead.h> | ||
25 | #include <crypto/hash.h> | 26 | #include <crypto/hash.h> |
26 | #include <linux/err.h> | 27 | #include <linux/err.h> |
28 | #include <linux/fips.h> | ||
27 | #include <linux/init.h> | 29 | #include <linux/init.h> |
28 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
29 | #include <linux/module.h> | 31 | #include <linux/module.h> |
@@ -34,7 +36,6 @@ | |||
34 | #include <linux/timex.h> | 36 | #include <linux/timex.h> |
35 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
36 | #include "tcrypt.h" | 38 | #include "tcrypt.h" |
37 | #include "internal.h" | ||
38 | 39 | ||
39 | /* | 40 | /* |
40 | * Need slab memory for testing (size in number of pages). | 41 | * Need slab memory for testing (size in number of pages). |
@@ -257,12 +258,12 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], | |||
257 | rem = buflen % PAGE_SIZE; | 258 | rem = buflen % PAGE_SIZE; |
258 | } | 259 | } |
259 | 260 | ||
260 | sg_init_table(sg, np); | 261 | sg_init_table(sg, np + 1); |
261 | np--; | 262 | np--; |
262 | for (k = 0; k < np; k++) | 263 | for (k = 0; k < np; k++) |
263 | sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE); | 264 | sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); |
264 | 265 | ||
265 | sg_set_buf(&sg[k], xbuf[k], rem); | 266 | sg_set_buf(&sg[k + 1], xbuf[k], rem); |
266 | } | 267 | } |
267 | 268 | ||
268 | static void test_aead_speed(const char *algo, int enc, unsigned int secs, | 269 | static void test_aead_speed(const char *algo, int enc, unsigned int secs, |
@@ -276,7 +277,6 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
276 | const char *key; | 277 | const char *key; |
277 | struct aead_request *req; | 278 | struct aead_request *req; |
278 | struct scatterlist *sg; | 279 | struct scatterlist *sg; |
279 | struct scatterlist *asg; | ||
280 | struct scatterlist *sgout; | 280 | struct scatterlist *sgout; |
281 | const char *e; | 281 | const char *e; |
282 | void *assoc; | 282 | void *assoc; |
@@ -308,11 +308,10 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
308 | if (testmgr_alloc_buf(xoutbuf)) | 308 | if (testmgr_alloc_buf(xoutbuf)) |
309 | goto out_nooutbuf; | 309 | goto out_nooutbuf; |
310 | 310 | ||
311 | sg = kmalloc(sizeof(*sg) * 8 * 3, GFP_KERNEL); | 311 | sg = kmalloc(sizeof(*sg) * 9 * 2, GFP_KERNEL); |
312 | if (!sg) | 312 | if (!sg) |
313 | goto out_nosg; | 313 | goto out_nosg; |
314 | asg = &sg[8]; | 314 | sgout = &sg[9]; |
315 | sgout = &asg[8]; | ||
316 | 315 | ||
317 | tfm = crypto_alloc_aead(algo, 0, 0); | 316 | tfm = crypto_alloc_aead(algo, 0, 0); |
318 | 317 | ||
@@ -338,7 +337,6 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
338 | do { | 337 | do { |
339 | assoc = axbuf[0]; | 338 | assoc = axbuf[0]; |
340 | memset(assoc, 0xff, aad_size); | 339 | memset(assoc, 0xff, aad_size); |
341 | sg_init_one(&asg[0], assoc, aad_size); | ||
342 | 340 | ||
343 | if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) { | 341 | if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) { |
344 | pr_err("template (%u) too big for tvmem (%lu)\n", | 342 | pr_err("template (%u) too big for tvmem (%lu)\n", |
@@ -374,14 +372,17 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
374 | goto out; | 372 | goto out; |
375 | } | 373 | } |
376 | 374 | ||
377 | sg_init_aead(&sg[0], xbuf, | 375 | sg_init_aead(sg, xbuf, |
378 | *b_size + (enc ? authsize : 0)); | 376 | *b_size + (enc ? authsize : 0)); |
379 | 377 | ||
380 | sg_init_aead(&sgout[0], xoutbuf, | 378 | sg_init_aead(sgout, xoutbuf, |
381 | *b_size + (enc ? authsize : 0)); | 379 | *b_size + (enc ? authsize : 0)); |
382 | 380 | ||
381 | sg_set_buf(&sg[0], assoc, aad_size); | ||
382 | sg_set_buf(&sgout[0], assoc, aad_size); | ||
383 | |||
383 | aead_request_set_crypt(req, sg, sgout, *b_size, iv); | 384 | aead_request_set_crypt(req, sg, sgout, *b_size, iv); |
384 | aead_request_set_assoc(req, asg, aad_size); | 385 | aead_request_set_ad(req, aad_size); |
385 | 386 | ||
386 | if (secs) | 387 | if (secs) |
387 | ret = test_aead_jiffies(req, enc, *b_size, | 388 | ret = test_aead_jiffies(req, enc, *b_size, |
@@ -808,7 +809,7 @@ static int test_ahash_jiffies(struct ahash_request *req, int blen, | |||
808 | 809 | ||
809 | for (start = jiffies, end = start + secs * HZ, bcount = 0; | 810 | for (start = jiffies, end = start + secs * HZ, bcount = 0; |
810 | time_before(jiffies, end); bcount++) { | 811 | time_before(jiffies, end); bcount++) { |
811 | ret = crypto_ahash_init(req); | 812 | ret = do_one_ahash_op(req, crypto_ahash_init(req)); |
812 | if (ret) | 813 | if (ret) |
813 | return ret; | 814 | return ret; |
814 | for (pcount = 0; pcount < blen; pcount += plen) { | 815 | for (pcount = 0; pcount < blen; pcount += plen) { |
@@ -877,7 +878,7 @@ static int test_ahash_cycles(struct ahash_request *req, int blen, | |||
877 | 878 | ||
878 | /* Warm-up run. */ | 879 | /* Warm-up run. */ |
879 | for (i = 0; i < 4; i++) { | 880 | for (i = 0; i < 4; i++) { |
880 | ret = crypto_ahash_init(req); | 881 | ret = do_one_ahash_op(req, crypto_ahash_init(req)); |
881 | if (ret) | 882 | if (ret) |
882 | goto out; | 883 | goto out; |
883 | for (pcount = 0; pcount < blen; pcount += plen) { | 884 | for (pcount = 0; pcount < blen; pcount += plen) { |
@@ -896,7 +897,7 @@ static int test_ahash_cycles(struct ahash_request *req, int blen, | |||
896 | 897 | ||
897 | start = get_cycles(); | 898 | start = get_cycles(); |
898 | 899 | ||
899 | ret = crypto_ahash_init(req); | 900 | ret = do_one_ahash_op(req, crypto_ahash_init(req)); |
900 | if (ret) | 901 | if (ret) |
901 | goto out; | 902 | goto out; |
902 | for (pcount = 0; pcount < blen; pcount += plen) { | 903 | for (pcount = 0; pcount < blen; pcount += plen) { |
@@ -1761,6 +1762,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
1761 | NULL, 0, 16, 8, aead_speed_template_20); | 1762 | NULL, 0, 16, 8, aead_speed_template_20); |
1762 | break; | 1763 | break; |
1763 | 1764 | ||
1765 | case 212: | ||
1766 | test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, | ||
1767 | NULL, 0, 16, 8, aead_speed_template_19); | ||
1768 | break; | ||
1769 | |||
1764 | case 300: | 1770 | case 300: |
1765 | if (alg) { | 1771 | if (alg) { |
1766 | test_hash_speed(alg, sec, generic_hash_speed_template); | 1772 | test_hash_speed(alg, sec, generic_hash_speed_template); |
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 6c7e21a09f78..6cc1b856871b 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h | |||
@@ -65,6 +65,7 @@ static u8 speed_template_32_64[] = {32, 64, 0}; | |||
65 | /* | 65 | /* |
66 | * AEAD speed tests | 66 | * AEAD speed tests |
67 | */ | 67 | */ |
68 | static u8 aead_speed_template_19[] = {19, 0}; | ||
68 | static u8 aead_speed_template_20[] = {20, 0}; | 69 | static u8 aead_speed_template_20[] = {20, 0}; |
69 | 70 | ||
70 | /* | 71 | /* |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f9bce3d7ee7f..975e1eac3e2d 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -20,14 +20,17 @@ | |||
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <crypto/aead.h> | ||
23 | #include <crypto/hash.h> | 24 | #include <crypto/hash.h> |
24 | #include <linux/err.h> | 25 | #include <linux/err.h> |
26 | #include <linux/fips.h> | ||
25 | #include <linux/module.h> | 27 | #include <linux/module.h> |
26 | #include <linux/scatterlist.h> | 28 | #include <linux/scatterlist.h> |
27 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
28 | #include <linux/string.h> | 30 | #include <linux/string.h> |
29 | #include <crypto/rng.h> | 31 | #include <crypto/rng.h> |
30 | #include <crypto/drbg.h> | 32 | #include <crypto/drbg.h> |
33 | #include <crypto/akcipher.h> | ||
31 | 34 | ||
32 | #include "internal.h" | 35 | #include "internal.h" |
33 | 36 | ||
@@ -114,6 +117,11 @@ struct drbg_test_suite { | |||
114 | unsigned int count; | 117 | unsigned int count; |
115 | }; | 118 | }; |
116 | 119 | ||
120 | struct akcipher_test_suite { | ||
121 | struct akcipher_testvec *vecs; | ||
122 | unsigned int count; | ||
123 | }; | ||
124 | |||
117 | struct alg_test_desc { | 125 | struct alg_test_desc { |
118 | const char *alg; | 126 | const char *alg; |
119 | int (*test)(const struct alg_test_desc *desc, const char *driver, | 127 | int (*test)(const struct alg_test_desc *desc, const char *driver, |
@@ -128,6 +136,7 @@ struct alg_test_desc { | |||
128 | struct hash_test_suite hash; | 136 | struct hash_test_suite hash; |
129 | struct cprng_test_suite cprng; | 137 | struct cprng_test_suite cprng; |
130 | struct drbg_test_suite drbg; | 138 | struct drbg_test_suite drbg; |
139 | struct akcipher_test_suite akcipher; | ||
131 | } suite; | 140 | } suite; |
132 | }; | 141 | }; |
133 | 142 | ||
@@ -425,7 +434,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
425 | char *key; | 434 | char *key; |
426 | struct aead_request *req; | 435 | struct aead_request *req; |
427 | struct scatterlist *sg; | 436 | struct scatterlist *sg; |
428 | struct scatterlist *asg; | ||
429 | struct scatterlist *sgout; | 437 | struct scatterlist *sgout; |
430 | const char *e, *d; | 438 | const char *e, *d; |
431 | struct tcrypt_result result; | 439 | struct tcrypt_result result; |
@@ -452,11 +460,10 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
452 | goto out_nooutbuf; | 460 | goto out_nooutbuf; |
453 | 461 | ||
454 | /* avoid "the frame size is larger than 1024 bytes" compiler warning */ | 462 | /* avoid "the frame size is larger than 1024 bytes" compiler warning */ |
455 | sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 3 : 2), GFP_KERNEL); | 463 | sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 4 : 2), GFP_KERNEL); |
456 | if (!sg) | 464 | if (!sg) |
457 | goto out_nosg; | 465 | goto out_nosg; |
458 | asg = &sg[8]; | 466 | sgout = &sg[16]; |
459 | sgout = &asg[8]; | ||
460 | 467 | ||
461 | if (diff_dst) | 468 | if (diff_dst) |
462 | d = "-ddst"; | 469 | d = "-ddst"; |
@@ -535,23 +542,27 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
535 | goto out; | 542 | goto out; |
536 | } | 543 | } |
537 | 544 | ||
545 | k = !!template[i].alen; | ||
546 | sg_init_table(sg, k + 1); | ||
547 | sg_set_buf(&sg[0], assoc, template[i].alen); | ||
548 | sg_set_buf(&sg[k], input, | ||
549 | template[i].ilen + (enc ? authsize : 0)); | ||
550 | output = input; | ||
551 | |||
538 | if (diff_dst) { | 552 | if (diff_dst) { |
553 | sg_init_table(sgout, k + 1); | ||
554 | sg_set_buf(&sgout[0], assoc, template[i].alen); | ||
555 | |||
539 | output = xoutbuf[0]; | 556 | output = xoutbuf[0]; |
540 | output += align_offset; | 557 | output += align_offset; |
541 | sg_init_one(&sg[0], input, template[i].ilen); | 558 | sg_set_buf(&sgout[k], output, |
542 | sg_init_one(&sgout[0], output, template[i].rlen); | 559 | template[i].rlen + (enc ? 0 : authsize)); |
543 | } else { | ||
544 | sg_init_one(&sg[0], input, | ||
545 | template[i].ilen + (enc ? authsize : 0)); | ||
546 | output = input; | ||
547 | } | 560 | } |
548 | 561 | ||
549 | sg_init_one(&asg[0], assoc, template[i].alen); | ||
550 | |||
551 | aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, | 562 | aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, |
552 | template[i].ilen, iv); | 563 | template[i].ilen, iv); |
553 | 564 | ||
554 | aead_request_set_assoc(req, asg, template[i].alen); | 565 | aead_request_set_ad(req, template[i].alen); |
555 | 566 | ||
556 | ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); | 567 | ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); |
557 | 568 | ||
@@ -631,9 +642,29 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
631 | authsize = abs(template[i].rlen - template[i].ilen); | 642 | authsize = abs(template[i].rlen - template[i].ilen); |
632 | 643 | ||
633 | ret = -EINVAL; | 644 | ret = -EINVAL; |
634 | sg_init_table(sg, template[i].np); | 645 | sg_init_table(sg, template[i].anp + template[i].np); |
635 | if (diff_dst) | 646 | if (diff_dst) |
636 | sg_init_table(sgout, template[i].np); | 647 | sg_init_table(sgout, template[i].anp + template[i].np); |
648 | |||
649 | ret = -EINVAL; | ||
650 | for (k = 0, temp = 0; k < template[i].anp; k++) { | ||
651 | if (WARN_ON(offset_in_page(IDX[k]) + | ||
652 | template[i].atap[k] > PAGE_SIZE)) | ||
653 | goto out; | ||
654 | sg_set_buf(&sg[k], | ||
655 | memcpy(axbuf[IDX[k] >> PAGE_SHIFT] + | ||
656 | offset_in_page(IDX[k]), | ||
657 | template[i].assoc + temp, | ||
658 | template[i].atap[k]), | ||
659 | template[i].atap[k]); | ||
660 | if (diff_dst) | ||
661 | sg_set_buf(&sgout[k], | ||
662 | axbuf[IDX[k] >> PAGE_SHIFT] + | ||
663 | offset_in_page(IDX[k]), | ||
664 | template[i].atap[k]); | ||
665 | temp += template[i].atap[k]; | ||
666 | } | ||
667 | |||
637 | for (k = 0, temp = 0; k < template[i].np; k++) { | 668 | for (k = 0, temp = 0; k < template[i].np; k++) { |
638 | if (WARN_ON(offset_in_page(IDX[k]) + | 669 | if (WARN_ON(offset_in_page(IDX[k]) + |
639 | template[i].tap[k] > PAGE_SIZE)) | 670 | template[i].tap[k] > PAGE_SIZE)) |
@@ -641,7 +672,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
641 | 672 | ||
642 | q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]); | 673 | q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]); |
643 | memcpy(q, template[i].input + temp, template[i].tap[k]); | 674 | memcpy(q, template[i].input + temp, template[i].tap[k]); |
644 | sg_set_buf(&sg[k], q, template[i].tap[k]); | 675 | sg_set_buf(&sg[template[i].anp + k], |
676 | q, template[i].tap[k]); | ||
645 | 677 | ||
646 | if (diff_dst) { | 678 | if (diff_dst) { |
647 | q = xoutbuf[IDX[k] >> PAGE_SHIFT] + | 679 | q = xoutbuf[IDX[k] >> PAGE_SHIFT] + |
@@ -649,7 +681,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
649 | 681 | ||
650 | memset(q, 0, template[i].tap[k]); | 682 | memset(q, 0, template[i].tap[k]); |
651 | 683 | ||
652 | sg_set_buf(&sgout[k], q, template[i].tap[k]); | 684 | sg_set_buf(&sgout[template[i].anp + k], |
685 | q, template[i].tap[k]); | ||
653 | } | 686 | } |
654 | 687 | ||
655 | n = template[i].tap[k]; | 688 | n = template[i].tap[k]; |
@@ -669,39 +702,24 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
669 | } | 702 | } |
670 | 703 | ||
671 | if (enc) { | 704 | if (enc) { |
672 | if (WARN_ON(sg[k - 1].offset + | 705 | if (WARN_ON(sg[template[i].anp + k - 1].offset + |
673 | sg[k - 1].length + authsize > | 706 | sg[template[i].anp + k - 1].length + |
674 | PAGE_SIZE)) { | 707 | authsize > PAGE_SIZE)) { |
675 | ret = -EINVAL; | 708 | ret = -EINVAL; |
676 | goto out; | 709 | goto out; |
677 | } | 710 | } |
678 | 711 | ||
679 | if (diff_dst) | 712 | if (diff_dst) |
680 | sgout[k - 1].length += authsize; | 713 | sgout[template[i].anp + k - 1].length += |
681 | else | 714 | authsize; |
682 | sg[k - 1].length += authsize; | 715 | sg[template[i].anp + k - 1].length += authsize; |
683 | } | ||
684 | |||
685 | sg_init_table(asg, template[i].anp); | ||
686 | ret = -EINVAL; | ||
687 | for (k = 0, temp = 0; k < template[i].anp; k++) { | ||
688 | if (WARN_ON(offset_in_page(IDX[k]) + | ||
689 | template[i].atap[k] > PAGE_SIZE)) | ||
690 | goto out; | ||
691 | sg_set_buf(&asg[k], | ||
692 | memcpy(axbuf[IDX[k] >> PAGE_SHIFT] + | ||
693 | offset_in_page(IDX[k]), | ||
694 | template[i].assoc + temp, | ||
695 | template[i].atap[k]), | ||
696 | template[i].atap[k]); | ||
697 | temp += template[i].atap[k]; | ||
698 | } | 716 | } |
699 | 717 | ||
700 | aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, | 718 | aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, |
701 | template[i].ilen, | 719 | template[i].ilen, |
702 | iv); | 720 | iv); |
703 | 721 | ||
704 | aead_request_set_assoc(req, asg, template[i].alen); | 722 | aead_request_set_ad(req, template[i].alen); |
705 | 723 | ||
706 | ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); | 724 | ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); |
707 | 725 | ||
@@ -1814,6 +1832,147 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver, | |||
1814 | 1832 | ||
1815 | } | 1833 | } |
1816 | 1834 | ||
1835 | static int do_test_rsa(struct crypto_akcipher *tfm, | ||
1836 | struct akcipher_testvec *vecs) | ||
1837 | { | ||
1838 | struct akcipher_request *req; | ||
1839 | void *outbuf_enc = NULL; | ||
1840 | void *outbuf_dec = NULL; | ||
1841 | struct tcrypt_result result; | ||
1842 | unsigned int out_len_max, out_len = 0; | ||
1843 | int err = -ENOMEM; | ||
1844 | |||
1845 | req = akcipher_request_alloc(tfm, GFP_KERNEL); | ||
1846 | if (!req) | ||
1847 | return err; | ||
1848 | |||
1849 | init_completion(&result.completion); | ||
1850 | err = crypto_akcipher_setkey(tfm, vecs->key, vecs->key_len); | ||
1851 | if (err) | ||
1852 | goto free_req; | ||
1853 | |||
1854 | akcipher_request_set_crypt(req, vecs->m, outbuf_enc, vecs->m_size, | ||
1855 | out_len); | ||
1856 | /* expect this to fail, and update the required buf len */ | ||
1857 | crypto_akcipher_encrypt(req); | ||
1858 | out_len = req->dst_len; | ||
1859 | if (!out_len) { | ||
1860 | err = -EINVAL; | ||
1861 | goto free_req; | ||
1862 | } | ||
1863 | |||
1864 | out_len_max = out_len; | ||
1865 | err = -ENOMEM; | ||
1866 | outbuf_enc = kzalloc(out_len_max, GFP_KERNEL); | ||
1867 | if (!outbuf_enc) | ||
1868 | goto free_req; | ||
1869 | |||
1870 | akcipher_request_set_crypt(req, vecs->m, outbuf_enc, vecs->m_size, | ||
1871 | out_len); | ||
1872 | akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
1873 | tcrypt_complete, &result); | ||
1874 | |||
1875 | /* Run RSA encrypt - c = m^e mod n;*/ | ||
1876 | err = wait_async_op(&result, crypto_akcipher_encrypt(req)); | ||
1877 | if (err) { | ||
1878 | pr_err("alg: rsa: encrypt test failed. err %d\n", err); | ||
1879 | goto free_all; | ||
1880 | } | ||
1881 | if (out_len != vecs->c_size) { | ||
1882 | pr_err("alg: rsa: encrypt test failed. Invalid output len\n"); | ||
1883 | err = -EINVAL; | ||
1884 | goto free_all; | ||
1885 | } | ||
1886 | /* verify that encrypted message is equal to expected */ | ||
1887 | if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) { | ||
1888 | pr_err("alg: rsa: encrypt test failed. Invalid output\n"); | ||
1889 | err = -EINVAL; | ||
1890 | goto free_all; | ||
1891 | } | ||
1892 | /* Don't invoke decrypt for vectors with public key */ | ||
1893 | if (vecs->public_key_vec) { | ||
1894 | err = 0; | ||
1895 | goto free_all; | ||
1896 | } | ||
1897 | outbuf_dec = kzalloc(out_len_max, GFP_KERNEL); | ||
1898 | if (!outbuf_dec) { | ||
1899 | err = -ENOMEM; | ||
1900 | goto free_all; | ||
1901 | } | ||
1902 | init_completion(&result.completion); | ||
1903 | akcipher_request_set_crypt(req, outbuf_enc, outbuf_dec, vecs->c_size, | ||
1904 | out_len); | ||
1905 | |||
1906 | /* Run RSA decrypt - m = c^d mod n;*/ | ||
1907 | err = wait_async_op(&result, crypto_akcipher_decrypt(req)); | ||
1908 | if (err) { | ||
1909 | pr_err("alg: rsa: decrypt test failed. err %d\n", err); | ||
1910 | goto free_all; | ||
1911 | } | ||
1912 | out_len = req->dst_len; | ||
1913 | if (out_len != vecs->m_size) { | ||
1914 | pr_err("alg: rsa: decrypt test failed. Invalid output len\n"); | ||
1915 | err = -EINVAL; | ||
1916 | goto free_all; | ||
1917 | } | ||
1918 | /* verify that decrypted message is equal to the original msg */ | ||
1919 | if (memcmp(vecs->m, outbuf_dec, vecs->m_size)) { | ||
1920 | pr_err("alg: rsa: decrypt test failed. Invalid output\n"); | ||
1921 | err = -EINVAL; | ||
1922 | } | ||
1923 | free_all: | ||
1924 | kfree(outbuf_dec); | ||
1925 | kfree(outbuf_enc); | ||
1926 | free_req: | ||
1927 | akcipher_request_free(req); | ||
1928 | return err; | ||
1929 | } | ||
1930 | |||
1931 | static int test_rsa(struct crypto_akcipher *tfm, struct akcipher_testvec *vecs, | ||
1932 | unsigned int tcount) | ||
1933 | { | ||
1934 | int ret, i; | ||
1935 | |||
1936 | for (i = 0; i < tcount; i++) { | ||
1937 | ret = do_test_rsa(tfm, vecs++); | ||
1938 | if (ret) { | ||
1939 | pr_err("alg: rsa: test failed on vector %d, err=%d\n", | ||
1940 | i + 1, ret); | ||
1941 | return ret; | ||
1942 | } | ||
1943 | } | ||
1944 | return 0; | ||
1945 | } | ||
1946 | |||
1947 | static int test_akcipher(struct crypto_akcipher *tfm, const char *alg, | ||
1948 | struct akcipher_testvec *vecs, unsigned int tcount) | ||
1949 | { | ||
1950 | if (strncmp(alg, "rsa", 3) == 0) | ||
1951 | return test_rsa(tfm, vecs, tcount); | ||
1952 | |||
1953 | return 0; | ||
1954 | } | ||
1955 | |||
1956 | static int alg_test_akcipher(const struct alg_test_desc *desc, | ||
1957 | const char *driver, u32 type, u32 mask) | ||
1958 | { | ||
1959 | struct crypto_akcipher *tfm; | ||
1960 | int err = 0; | ||
1961 | |||
1962 | tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); | ||
1963 | if (IS_ERR(tfm)) { | ||
1964 | pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", | ||
1965 | driver, PTR_ERR(tfm)); | ||
1966 | return PTR_ERR(tfm); | ||
1967 | } | ||
1968 | if (desc->suite.akcipher.vecs) | ||
1969 | err = test_akcipher(tfm, desc->alg, desc->suite.akcipher.vecs, | ||
1970 | desc->suite.akcipher.count); | ||
1971 | |||
1972 | crypto_free_akcipher(tfm); | ||
1973 | return err; | ||
1974 | } | ||
1975 | |||
1817 | static int alg_test_null(const struct alg_test_desc *desc, | 1976 | static int alg_test_null(const struct alg_test_desc *desc, |
1818 | const char *driver, u32 type, u32 mask) | 1977 | const char *driver, u32 type, u32 mask) |
1819 | { | 1978 | { |
@@ -2297,6 +2456,21 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2297 | } | 2456 | } |
2298 | } | 2457 | } |
2299 | }, { | 2458 | }, { |
2459 | .alg = "chacha20", | ||
2460 | .test = alg_test_skcipher, | ||
2461 | .suite = { | ||
2462 | .cipher = { | ||
2463 | .enc = { | ||
2464 | .vecs = chacha20_enc_tv_template, | ||
2465 | .count = CHACHA20_ENC_TEST_VECTORS | ||
2466 | }, | ||
2467 | .dec = { | ||
2468 | .vecs = chacha20_enc_tv_template, | ||
2469 | .count = CHACHA20_ENC_TEST_VECTORS | ||
2470 | }, | ||
2471 | } | ||
2472 | } | ||
2473 | }, { | ||
2300 | .alg = "cmac(aes)", | 2474 | .alg = "cmac(aes)", |
2301 | .test = alg_test_hash, | 2475 | .test = alg_test_hash, |
2302 | .suite = { | 2476 | .suite = { |
@@ -2318,6 +2492,15 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2318 | .alg = "compress_null", | 2492 | .alg = "compress_null", |
2319 | .test = alg_test_null, | 2493 | .test = alg_test_null, |
2320 | }, { | 2494 | }, { |
2495 | .alg = "crc32", | ||
2496 | .test = alg_test_hash, | ||
2497 | .suite = { | ||
2498 | .hash = { | ||
2499 | .vecs = crc32_tv_template, | ||
2500 | .count = CRC32_TEST_VECTORS | ||
2501 | } | ||
2502 | } | ||
2503 | }, { | ||
2321 | .alg = "crc32c", | 2504 | .alg = "crc32c", |
2322 | .test = alg_test_crc32c, | 2505 | .test = alg_test_crc32c, |
2323 | .fips_allowed = 1, | 2506 | .fips_allowed = 1, |
@@ -3095,6 +3278,10 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
3095 | } | 3278 | } |
3096 | } | 3279 | } |
3097 | }, { | 3280 | }, { |
3281 | .alg = "jitterentropy_rng", | ||
3282 | .fips_allowed = 1, | ||
3283 | .test = alg_test_null, | ||
3284 | }, { | ||
3098 | .alg = "lrw(aes)", | 3285 | .alg = "lrw(aes)", |
3099 | .test = alg_test_skcipher, | 3286 | .test = alg_test_skcipher, |
3100 | .suite = { | 3287 | .suite = { |
@@ -3276,6 +3463,15 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
3276 | } | 3463 | } |
3277 | } | 3464 | } |
3278 | }, { | 3465 | }, { |
3466 | .alg = "poly1305", | ||
3467 | .test = alg_test_hash, | ||
3468 | .suite = { | ||
3469 | .hash = { | ||
3470 | .vecs = poly1305_tv_template, | ||
3471 | .count = POLY1305_TEST_VECTORS | ||
3472 | } | ||
3473 | } | ||
3474 | }, { | ||
3279 | .alg = "rfc3686(ctr(aes))", | 3475 | .alg = "rfc3686(ctr(aes))", |
3280 | .test = alg_test_skcipher, | 3476 | .test = alg_test_skcipher, |
3281 | .fips_allowed = 1, | 3477 | .fips_allowed = 1, |
@@ -3339,6 +3535,36 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
3339 | } | 3535 | } |
3340 | } | 3536 | } |
3341 | }, { | 3537 | }, { |
3538 | .alg = "rfc7539(chacha20,poly1305)", | ||
3539 | .test = alg_test_aead, | ||
3540 | .suite = { | ||
3541 | .aead = { | ||
3542 | .enc = { | ||
3543 | .vecs = rfc7539_enc_tv_template, | ||
3544 | .count = RFC7539_ENC_TEST_VECTORS | ||
3545 | }, | ||
3546 | .dec = { | ||
3547 | .vecs = rfc7539_dec_tv_template, | ||
3548 | .count = RFC7539_DEC_TEST_VECTORS | ||
3549 | }, | ||
3550 | } | ||
3551 | } | ||
3552 | }, { | ||
3553 | .alg = "rfc7539esp(chacha20,poly1305)", | ||
3554 | .test = alg_test_aead, | ||
3555 | .suite = { | ||
3556 | .aead = { | ||
3557 | .enc = { | ||
3558 | .vecs = rfc7539esp_enc_tv_template, | ||
3559 | .count = RFC7539ESP_ENC_TEST_VECTORS | ||
3560 | }, | ||
3561 | .dec = { | ||
3562 | .vecs = rfc7539esp_dec_tv_template, | ||
3563 | .count = RFC7539ESP_DEC_TEST_VECTORS | ||
3564 | }, | ||
3565 | } | ||
3566 | } | ||
3567 | }, { | ||
3342 | .alg = "rmd128", | 3568 | .alg = "rmd128", |
3343 | .test = alg_test_hash, | 3569 | .test = alg_test_hash, |
3344 | .suite = { | 3570 | .suite = { |
@@ -3375,6 +3601,16 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
3375 | } | 3601 | } |
3376 | } | 3602 | } |
3377 | }, { | 3603 | }, { |
3604 | .alg = "rsa", | ||
3605 | .test = alg_test_akcipher, | ||
3606 | .fips_allowed = 1, | ||
3607 | .suite = { | ||
3608 | .akcipher = { | ||
3609 | .vecs = rsa_tv_template, | ||
3610 | .count = RSA_TEST_VECTORS | ||
3611 | } | ||
3612 | } | ||
3613 | }, { | ||
3378 | .alg = "salsa20", | 3614 | .alg = "salsa20", |
3379 | .test = alg_test_skcipher, | 3615 | .test = alg_test_skcipher, |
3380 | .suite = { | 3616 | .suite = { |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 62e2485bb428..868edf117041 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -46,6 +46,24 @@ struct hash_testvec { | |||
46 | unsigned char ksize; | 46 | unsigned char ksize; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | /* | ||
50 | * cipher_testvec: structure to describe a cipher test | ||
51 | * @key: A pointer to a key used by the test | ||
52 | * @klen: The length of @key | ||
53 | * @iv: A pointer to the IV used by the test | ||
54 | * @input: A pointer to data used as input | ||
55 | * @ilen The length of data in @input | ||
56 | * @result: A pointer to what the test need to produce | ||
57 | * @rlen: The length of data in @result | ||
58 | * @fail: If set to one, the test need to fail | ||
59 | * @wk: Does the test need CRYPTO_TFM_REQ_WEAK_KEY | ||
60 | * ( e.g. test needs to fail due to a weak key ) | ||
61 | * @np: numbers of SG to distribute data in (from 1 to MAX_TAP) | ||
62 | * @tap: How to distribute data in @np SGs | ||
63 | * @also_non_np: if set to 1, the test will be also done without | ||
64 | * splitting data in @np SGs | ||
65 | */ | ||
66 | |||
49 | struct cipher_testvec { | 67 | struct cipher_testvec { |
50 | char *key; | 68 | char *key; |
51 | char *iv; | 69 | char *iv; |
@@ -54,7 +72,7 @@ struct cipher_testvec { | |||
54 | unsigned short tap[MAX_TAP]; | 72 | unsigned short tap[MAX_TAP]; |
55 | int np; | 73 | int np; |
56 | unsigned char also_non_np; | 74 | unsigned char also_non_np; |
57 | unsigned char fail; | 75 | bool fail; |
58 | unsigned char wk; /* weak key flag */ | 76 | unsigned char wk; /* weak key flag */ |
59 | unsigned char klen; | 77 | unsigned char klen; |
60 | unsigned short ilen; | 78 | unsigned short ilen; |
@@ -71,7 +89,7 @@ struct aead_testvec { | |||
71 | unsigned char atap[MAX_TAP]; | 89 | unsigned char atap[MAX_TAP]; |
72 | int np; | 90 | int np; |
73 | int anp; | 91 | int anp; |
74 | unsigned char fail; | 92 | bool fail; |
75 | unsigned char novrfy; /* ccm dec verification failure expected */ | 93 | unsigned char novrfy; /* ccm dec verification failure expected */ |
76 | unsigned char wk; /* weak key flag */ | 94 | unsigned char wk; /* weak key flag */ |
77 | unsigned char klen; | 95 | unsigned char klen; |
@@ -107,9 +125,196 @@ struct drbg_testvec { | |||
107 | size_t expectedlen; | 125 | size_t expectedlen; |
108 | }; | 126 | }; |
109 | 127 | ||
128 | struct akcipher_testvec { | ||
129 | unsigned char *key; | ||
130 | unsigned char *m; | ||
131 | unsigned char *c; | ||
132 | unsigned int key_len; | ||
133 | unsigned int m_size; | ||
134 | unsigned int c_size; | ||
135 | bool public_key_vec; | ||
136 | }; | ||
137 | |||
110 | static char zeroed_string[48]; | 138 | static char zeroed_string[48]; |
111 | 139 | ||
112 | /* | 140 | /* |
141 | * RSA test vectors. Borrowed from openSSL. | ||
142 | */ | ||
143 | #ifdef CONFIG_CRYPTO_FIPS | ||
144 | #define RSA_TEST_VECTORS 2 | ||
145 | #else | ||
146 | #define RSA_TEST_VECTORS 4 | ||
147 | #endif | ||
148 | static struct akcipher_testvec rsa_tv_template[] = { | ||
149 | { | ||
150 | #ifndef CONFIG_CRYPTO_FIPS | ||
151 | .key = | ||
152 | "\x30\x81\x88" /* sequence of 136 bytes */ | ||
153 | "\x02\x41" /* modulus - integer of 65 bytes */ | ||
154 | "\x00\xAA\x36\xAB\xCE\x88\xAC\xFD\xFF\x55\x52\x3C\x7F\xC4\x52\x3F" | ||
155 | "\x90\xEF\xA0\x0D\xF3\x77\x4A\x25\x9F\x2E\x62\xB4\xC5\xD9\x9C\xB5" | ||
156 | "\xAD\xB3\x00\xA0\x28\x5E\x53\x01\x93\x0E\x0C\x70\xFB\x68\x76\x93" | ||
157 | "\x9C\xE6\x16\xCE\x62\x4A\x11\xE0\x08\x6D\x34\x1E\xBC\xAC\xA0\xA1" | ||
158 | "\xF5" | ||
159 | "\x02\x01\x11" /* public key - integer of 1 byte */ | ||
160 | "\x02\x40" /* private key - integer of 64 bytes */ | ||
161 | "\x0A\x03\x37\x48\x62\x64\x87\x69\x5F\x5F\x30\xBC\x38\xB9\x8B\x44" | ||
162 | "\xC2\xCD\x2D\xFF\x43\x40\x98\xCD\x20\xD8\xA1\x38\xD0\x90\xBF\x64" | ||
163 | "\x79\x7C\x3F\xA7\xA2\xCD\xCB\x3C\xD1\xE0\xBD\xBA\x26\x54\xB4\xF9" | ||
164 | "\xDF\x8E\x8A\xE5\x9D\x73\x3D\x9F\x33\xB3\x01\x62\x4A\xFD\x1D\x51", | ||
165 | .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", | ||
166 | .c = | ||
167 | "\x63\x1c\xcd\x7b\xe1\x7e\xe4\xde\xc9\xa8\x89\xa1\x74\xcb\x3c\x63" | ||
168 | "\x7d\x24\xec\x83\xc3\x15\xe4\x7f\x73\x05\x34\xd1\xec\x22\xbb\x8a" | ||
169 | "\x5e\x32\x39\x6d\xc1\x1d\x7d\x50\x3b\x9f\x7a\xad\xf0\x2e\x25\x53" | ||
170 | "\x9f\x6e\xbd\x4c\x55\x84\x0c\x9b\xcf\x1a\x4b\x51\x1e\x9e\x0c\x06", | ||
171 | .key_len = 139, | ||
172 | .m_size = 8, | ||
173 | .c_size = 64, | ||
174 | }, { | ||
175 | .key = | ||
176 | "\x30\x82\x01\x0B" /* sequence of 267 bytes */ | ||
177 | "\x02\x81\x81" /* modulus - integer of 129 bytes */ | ||
178 | "\x00\xBB\xF8\x2F\x09\x06\x82\xCE\x9C\x23\x38\xAC\x2B\x9D\xA8\x71" | ||
179 | "\xF7\x36\x8D\x07\xEE\xD4\x10\x43\xA4\x40\xD6\xB6\xF0\x74\x54\xF5" | ||
180 | "\x1F\xB8\xDF\xBA\xAF\x03\x5C\x02\xAB\x61\xEA\x48\xCE\xEB\x6F\xCD" | ||
181 | "\x48\x76\xED\x52\x0D\x60\xE1\xEC\x46\x19\x71\x9D\x8A\x5B\x8B\x80" | ||
182 | "\x7F\xAF\xB8\xE0\xA3\xDF\xC7\x37\x72\x3E\xE6\xB4\xB7\xD9\x3A\x25" | ||
183 | "\x84\xEE\x6A\x64\x9D\x06\x09\x53\x74\x88\x34\xB2\x45\x45\x98\x39" | ||
184 | "\x4E\xE0\xAA\xB1\x2D\x7B\x61\xA5\x1F\x52\x7A\x9A\x41\xF6\xC1\x68" | ||
185 | "\x7F\xE2\x53\x72\x98\xCA\x2A\x8F\x59\x46\xF8\xE5\xFD\x09\x1D\xBD" | ||
186 | "\xCB" | ||
187 | "\x02\x01\x11" /* public key - integer of 1 byte */ | ||
188 | "\x02\x81\x81" /* private key - integer of 129 bytes */ | ||
189 | "\x00\xA5\xDA\xFC\x53\x41\xFA\xF2\x89\xC4\xB9\x88\xDB\x30\xC1\xCD" | ||
190 | "\xF8\x3F\x31\x25\x1E\x06\x68\xB4\x27\x84\x81\x38\x01\x57\x96\x41" | ||
191 | "\xB2\x94\x10\xB3\xC7\x99\x8D\x6B\xC4\x65\x74\x5E\x5C\x39\x26\x69" | ||
192 | "\xD6\x87\x0D\xA2\xC0\x82\xA9\x39\xE3\x7F\xDC\xB8\x2E\xC9\x3E\xDA" | ||
193 | "\xC9\x7F\xF3\xAD\x59\x50\xAC\xCF\xBC\x11\x1C\x76\xF1\xA9\x52\x94" | ||
194 | "\x44\xE5\x6A\xAF\x68\xC5\x6C\x09\x2C\xD3\x8D\xC3\xBE\xF5\xD2\x0A" | ||
195 | "\x93\x99\x26\xED\x4F\x74\xA1\x3E\xDD\xFB\xE1\xA1\xCE\xCC\x48\x94" | ||
196 | "\xAF\x94\x28\xC2\xB7\xB8\x88\x3F\xE4\x46\x3A\x4B\xC8\x5B\x1C\xB3" | ||
197 | "\xC1", | ||
198 | .key_len = 271, | ||
199 | .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", | ||
200 | .c = | ||
201 | "\x74\x1b\x55\xac\x47\xb5\x08\x0a\x6e\x2b\x2d\xf7\x94\xb8\x8a\x95" | ||
202 | "\xed\xa3\x6b\xc9\x29\xee\xb2\x2c\x80\xc3\x39\x3b\x8c\x62\x45\x72" | ||
203 | "\xc2\x7f\x74\x81\x91\x68\x44\x48\x5a\xdc\xa0\x7e\xa7\x0b\x05\x7f" | ||
204 | "\x0e\xa0\x6c\xe5\x8f\x19\x4d\xce\x98\x47\x5f\xbd\x5f\xfe\xe5\x34" | ||
205 | "\x59\x89\xaf\xf0\xba\x44\xd7\xf1\x1a\x50\x72\xef\x5e\x4a\xb6\xb7" | ||
206 | "\x54\x34\xd1\xc4\x83\x09\xdf\x0f\x91\x5f\x7d\x91\x70\x2f\xd4\x13" | ||
207 | "\xcc\x5e\xa4\x6c\xc3\x4d\x28\xef\xda\xaf\xec\x14\x92\xfc\xa3\x75" | ||
208 | "\x13\xb4\xc1\xa1\x11\xfc\x40\x2f\x4c\x9d\xdf\x16\x76\x11\x20\x6b", | ||
209 | .m_size = 8, | ||
210 | .c_size = 128, | ||
211 | }, { | ||
212 | #endif | ||
213 | .key = | ||
214 | "\x30\x82\x02\x0D" /* sequence of 525 bytes */ | ||
215 | "\x02\x82\x01\x00" /* modulus - integer of 256 bytes */ | ||
216 | "\xDB\x10\x1A\xC2\xA3\xF1\xDC\xFF\x13\x6B\xED\x44\xDF\xF0\x02\x6D" | ||
217 | "\x13\xC7\x88\xDA\x70\x6B\x54\xF1\xE8\x27\xDC\xC3\x0F\x99\x6A\xFA" | ||
218 | "\xC6\x67\xFF\x1D\x1E\x3C\x1D\xC1\xB5\x5F\x6C\xC0\xB2\x07\x3A\x6D" | ||
219 | "\x41\xE4\x25\x99\xAC\xFC\xD2\x0F\x02\xD3\xD1\x54\x06\x1A\x51\x77" | ||
220 | "\xBD\xB6\xBF\xEA\xA7\x5C\x06\xA9\x5D\x69\x84\x45\xD7\xF5\x05\xBA" | ||
221 | "\x47\xF0\x1B\xD7\x2B\x24\xEC\xCB\x9B\x1B\x10\x8D\x81\xA0\xBE\xB1" | ||
222 | "\x8C\x33\xE4\x36\xB8\x43\xEB\x19\x2A\x81\x8D\xDE\x81\x0A\x99\x48" | ||
223 | "\xB6\xF6\xBC\xCD\x49\x34\x3A\x8F\x26\x94\xE3\x28\x82\x1A\x7C\x8F" | ||
224 | "\x59\x9F\x45\xE8\x5D\x1A\x45\x76\x04\x56\x05\xA1\xD0\x1B\x8C\x77" | ||
225 | "\x6D\xAF\x53\xFA\x71\xE2\x67\xE0\x9A\xFE\x03\xA9\x85\xD2\xC9\xAA" | ||
226 | "\xBA\x2A\xBC\xF4\xA0\x08\xF5\x13\x98\x13\x5D\xF0\xD9\x33\x34\x2A" | ||
227 | "\x61\xC3\x89\x55\xF0\xAE\x1A\x9C\x22\xEE\x19\x05\x8D\x32\xFE\xEC" | ||
228 | "\x9C\x84\xBA\xB7\xF9\x6C\x3A\x4F\x07\xFC\x45\xEB\x12\xE5\x7B\xFD" | ||
229 | "\x55\xE6\x29\x69\xD1\xC2\xE8\xB9\x78\x59\xF6\x79\x10\xC6\x4E\xEB" | ||
230 | "\x6A\x5E\xB9\x9A\xC7\xC4\x5B\x63\xDA\xA3\x3F\x5E\x92\x7A\x81\x5E" | ||
231 | "\xD6\xB0\xE2\x62\x8F\x74\x26\xC2\x0C\xD3\x9A\x17\x47\xE6\x8E\xAB" | ||
232 | "\x02\x03\x01\x00\x01" /* public key - integer of 3 bytes */ | ||
233 | "\x02\x82\x01\x00" /* private key - integer of 256 bytes */ | ||
234 | "\x52\x41\xF4\xDA\x7B\xB7\x59\x55\xCA\xD4\x2F\x0F\x3A\xCB\xA4\x0D" | ||
235 | "\x93\x6C\xCC\x9D\xC1\xB2\xFB\xFD\xAE\x40\x31\xAC\x69\x52\x21\x92" | ||
236 | "\xB3\x27\xDF\xEA\xEE\x2C\x82\xBB\xF7\x40\x32\xD5\x14\xC4\x94\x12" | ||
237 | "\xEC\xB8\x1F\xCA\x59\xE3\xC1\x78\xF3\x85\xD8\x47\xA5\xD7\x02\x1A" | ||
238 | "\x65\x79\x97\x0D\x24\xF4\xF0\x67\x6E\x75\x2D\xBF\x10\x3D\xA8\x7D" | ||
239 | "\xEF\x7F\x60\xE4\xE6\x05\x82\x89\x5D\xDF\xC6\xD2\x6C\x07\x91\x33" | ||
240 | "\x98\x42\xF0\x02\x00\x25\x38\xC5\x85\x69\x8A\x7D\x2F\x95\x6C\x43" | ||
241 | "\x9A\xB8\x81\xE2\xD0\x07\x35\xAA\x05\x41\xC9\x1E\xAF\xE4\x04\x3B" | ||
242 | "\x19\xB8\x73\xA2\xAC\x4B\x1E\x66\x48\xD8\x72\x1F\xAC\xF6\xCB\xBC" | ||
243 | "\x90\x09\xCA\xEC\x0C\xDC\xF9\x2C\xD7\xEB\xAE\xA3\xA4\x47\xD7\x33" | ||
244 | "\x2F\x8A\xCA\xBC\x5E\xF0\x77\xE4\x97\x98\x97\xC7\x10\x91\x7D\x2A" | ||
245 | "\xA6\xFF\x46\x83\x97\xDE\xE9\xE2\x17\x03\x06\x14\xE2\xD7\xB1\x1D" | ||
246 | "\x77\xAF\x51\x27\x5B\x5E\x69\xB8\x81\xE6\x11\xC5\x43\x23\x81\x04" | ||
247 | "\x62\xFF\xE9\x46\xB8\xD8\x44\xDB\xA5\xCC\x31\x54\x34\xCE\x3E\x82" | ||
248 | "\xD6\xBF\x7A\x0B\x64\x21\x6D\x88\x7E\x5B\x45\x12\x1E\x63\x8D\x49" | ||
249 | "\xA7\x1D\xD9\x1E\x06\xCD\xE8\xBA\x2C\x8C\x69\x32\xEA\xBE\x60\x71", | ||
250 | .key_len = 529, | ||
251 | .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", | ||
252 | .c = | ||
253 | "\xb2\x97\x76\xb4\xae\x3e\x38\x3c\x7e\x64\x1f\xcc\xa2\x7f\xf6\xbe" | ||
254 | "\xcf\x49\xbc\x48\xd3\x6c\x8f\x0a\x0e\xc1\x73\xbd\x7b\x55\x79\x36" | ||
255 | "\x0e\xa1\x87\x88\xb9\x2c\x90\xa6\x53\x5e\xe9\xef\xc4\xe2\x4d\xdd" | ||
256 | "\xf7\xa6\x69\x82\x3f\x56\xa4\x7b\xfb\x62\xe0\xae\xb8\xd3\x04\xb3" | ||
257 | "\xac\x5a\x15\x2a\xe3\x19\x9b\x03\x9a\x0b\x41\xda\x64\xec\x0a\x69" | ||
258 | "\xfc\xf2\x10\x92\xf3\xc1\xbf\x84\x7f\xfd\x2c\xae\xc8\xb5\xf6\x41" | ||
259 | "\x70\xc5\x47\x03\x8a\xf8\xff\x6f\x3f\xd2\x6f\x09\xb4\x22\xf3\x30" | ||
260 | "\xbe\xa9\x85\xcb\x9c\x8d\xf9\x8f\xeb\x32\x91\xa2\x25\x84\x8f\xf5" | ||
261 | "\xdc\xc7\x06\x9c\x2d\xe5\x11\x2c\x09\x09\x87\x09\xa9\xf6\x33\x73" | ||
262 | "\x90\xf1\x60\xf2\x65\xdd\x30\xa5\x66\xce\x62\x7b\xd0\xf8\x2d\x3d" | ||
263 | "\x19\x82\x77\xe3\x0a\x5f\x75\x2f\x8e\xb1\xe5\xe8\x91\x35\x1b\x3b" | ||
264 | "\x33\xb7\x66\x92\xd1\xf2\x8e\x6f\xe5\x75\x0c\xad\x36\xfb\x4e\xd0" | ||
265 | "\x66\x61\xbd\x49\xfe\xf4\x1a\xa2\x2b\x49\xfe\x03\x4c\x74\x47\x8d" | ||
266 | "\x9a\x66\xb2\x49\x46\x4d\x77\xea\x33\x4d\x6b\x3c\xb4\x49\x4a\xc6" | ||
267 | "\x7d\x3d\xb5\xb9\x56\x41\x15\x67\x0f\x94\x3c\x93\x65\x27\xe0\x21" | ||
268 | "\x5d\x59\xc3\x62\xd5\xa6\xda\x38\x26\x22\x5e\x34\x1c\x94\xaf\x98", | ||
269 | .m_size = 8, | ||
270 | .c_size = 256, | ||
271 | }, { | ||
272 | .key = | ||
273 | "\x30\x82\x01\x09" /* sequence of 265 bytes */ | ||
274 | "\x02\x82\x01\x00" /* modulus - integer of 256 bytes */ | ||
275 | "\xDB\x10\x1A\xC2\xA3\xF1\xDC\xFF\x13\x6B\xED\x44\xDF\xF0\x02\x6D" | ||
276 | "\x13\xC7\x88\xDA\x70\x6B\x54\xF1\xE8\x27\xDC\xC3\x0F\x99\x6A\xFA" | ||
277 | "\xC6\x67\xFF\x1D\x1E\x3C\x1D\xC1\xB5\x5F\x6C\xC0\xB2\x07\x3A\x6D" | ||
278 | "\x41\xE4\x25\x99\xAC\xFC\xD2\x0F\x02\xD3\xD1\x54\x06\x1A\x51\x77" | ||
279 | "\xBD\xB6\xBF\xEA\xA7\x5C\x06\xA9\x5D\x69\x84\x45\xD7\xF5\x05\xBA" | ||
280 | "\x47\xF0\x1B\xD7\x2B\x24\xEC\xCB\x9B\x1B\x10\x8D\x81\xA0\xBE\xB1" | ||
281 | "\x8C\x33\xE4\x36\xB8\x43\xEB\x19\x2A\x81\x8D\xDE\x81\x0A\x99\x48" | ||
282 | "\xB6\xF6\xBC\xCD\x49\x34\x3A\x8F\x26\x94\xE3\x28\x82\x1A\x7C\x8F" | ||
283 | "\x59\x9F\x45\xE8\x5D\x1A\x45\x76\x04\x56\x05\xA1\xD0\x1B\x8C\x77" | ||
284 | "\x6D\xAF\x53\xFA\x71\xE2\x67\xE0\x9A\xFE\x03\xA9\x85\xD2\xC9\xAA" | ||
285 | "\xBA\x2A\xBC\xF4\xA0\x08\xF5\x13\x98\x13\x5D\xF0\xD9\x33\x34\x2A" | ||
286 | "\x61\xC3\x89\x55\xF0\xAE\x1A\x9C\x22\xEE\x19\x05\x8D\x32\xFE\xEC" | ||
287 | "\x9C\x84\xBA\xB7\xF9\x6C\x3A\x4F\x07\xFC\x45\xEB\x12\xE5\x7B\xFD" | ||
288 | "\x55\xE6\x29\x69\xD1\xC2\xE8\xB9\x78\x59\xF6\x79\x10\xC6\x4E\xEB" | ||
289 | "\x6A\x5E\xB9\x9A\xC7\xC4\x5B\x63\xDA\xA3\x3F\x5E\x92\x7A\x81\x5E" | ||
290 | "\xD6\xB0\xE2\x62\x8F\x74\x26\xC2\x0C\xD3\x9A\x17\x47\xE6\x8E\xAB" | ||
291 | "\x02\x03\x01\x00\x01", /* public key - integer of 3 bytes */ | ||
292 | .key_len = 269, | ||
293 | .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", | ||
294 | .c = | ||
295 | "\xb2\x97\x76\xb4\xae\x3e\x38\x3c\x7e\x64\x1f\xcc\xa2\x7f\xf6\xbe" | ||
296 | "\xcf\x49\xbc\x48\xd3\x6c\x8f\x0a\x0e\xc1\x73\xbd\x7b\x55\x79\x36" | ||
297 | "\x0e\xa1\x87\x88\xb9\x2c\x90\xa6\x53\x5e\xe9\xef\xc4\xe2\x4d\xdd" | ||
298 | "\xf7\xa6\x69\x82\x3f\x56\xa4\x7b\xfb\x62\xe0\xae\xb8\xd3\x04\xb3" | ||
299 | "\xac\x5a\x15\x2a\xe3\x19\x9b\x03\x9a\x0b\x41\xda\x64\xec\x0a\x69" | ||
300 | "\xfc\xf2\x10\x92\xf3\xc1\xbf\x84\x7f\xfd\x2c\xae\xc8\xb5\xf6\x41" | ||
301 | "\x70\xc5\x47\x03\x8a\xf8\xff\x6f\x3f\xd2\x6f\x09\xb4\x22\xf3\x30" | ||
302 | "\xbe\xa9\x85\xcb\x9c\x8d\xf9\x8f\xeb\x32\x91\xa2\x25\x84\x8f\xf5" | ||
303 | "\xdc\xc7\x06\x9c\x2d\xe5\x11\x2c\x09\x09\x87\x09\xa9\xf6\x33\x73" | ||
304 | "\x90\xf1\x60\xf2\x65\xdd\x30\xa5\x66\xce\x62\x7b\xd0\xf8\x2d\x3d" | ||
305 | "\x19\x82\x77\xe3\x0a\x5f\x75\x2f\x8e\xb1\xe5\xe8\x91\x35\x1b\x3b" | ||
306 | "\x33\xb7\x66\x92\xd1\xf2\x8e\x6f\xe5\x75\x0c\xad\x36\xfb\x4e\xd0" | ||
307 | "\x66\x61\xbd\x49\xfe\xf4\x1a\xa2\x2b\x49\xfe\x03\x4c\x74\x47\x8d" | ||
308 | "\x9a\x66\xb2\x49\x46\x4d\x77\xea\x33\x4d\x6b\x3c\xb4\x49\x4a\xc6" | ||
309 | "\x7d\x3d\xb5\xb9\x56\x41\x15\x67\x0f\x94\x3c\x93\x65\x27\xe0\x21" | ||
310 | "\x5d\x59\xc3\x62\xd5\xa6\xda\x38\x26\x22\x5e\x34\x1c\x94\xaf\x98", | ||
311 | .m_size = 8, | ||
312 | .c_size = 256, | ||
313 | .public_key_vec = true, | ||
314 | } | ||
315 | }; | ||
316 | |||
317 | /* | ||
113 | * MD4 test vectors from RFC1320 | 318 | * MD4 test vectors from RFC1320 |
114 | */ | 319 | */ |
115 | #define MD4_TEST_VECTORS 7 | 320 | #define MD4_TEST_VECTORS 7 |
@@ -1822,7 +2027,7 @@ static struct hash_testvec tgr128_tv_template[] = { | |||
1822 | }, | 2027 | }, |
1823 | }; | 2028 | }; |
1824 | 2029 | ||
1825 | #define GHASH_TEST_VECTORS 5 | 2030 | #define GHASH_TEST_VECTORS 6 |
1826 | 2031 | ||
1827 | static struct hash_testvec ghash_tv_template[] = | 2032 | static struct hash_testvec ghash_tv_template[] = |
1828 | { | 2033 | { |
@@ -1875,6 +2080,63 @@ static struct hash_testvec ghash_tv_template[] = | |||
1875 | .psize = 20, | 2080 | .psize = 20, |
1876 | .digest = "\xf8\x94\x87\x2a\x4b\x63\x99\x28" | 2081 | .digest = "\xf8\x94\x87\x2a\x4b\x63\x99\x28" |
1877 | "\x23\xf7\x93\xf7\x19\xf5\x96\xd9", | 2082 | "\x23\xf7\x93\xf7\x19\xf5\x96\xd9", |
2083 | }, { | ||
2084 | .key = "\x0a\x1b\x2c\x3d\x4e\x5f\x64\x71" | ||
2085 | "\x82\x93\xa4\xb5\xc6\xd7\xe8\xf9", | ||
2086 | .ksize = 16, | ||
2087 | .plaintext = "\x56\x6f\x72\x20\x6c\x61\x75\x74" | ||
2088 | "\x65\x72\x20\x4c\x61\x75\x73\x63" | ||
2089 | "\x68\x65\x6e\x20\x75\x6e\x64\x20" | ||
2090 | "\x53\x74\x61\x75\x6e\x65\x6e\x20" | ||
2091 | "\x73\x65\x69\x20\x73\x74\x69\x6c" | ||
2092 | "\x6c\x2c\x0a\x64\x75\x20\x6d\x65" | ||
2093 | "\x69\x6e\x20\x74\x69\x65\x66\x74" | ||
2094 | "\x69\x65\x66\x65\x73\x20\x4c\x65" | ||
2095 | "\x62\x65\x6e\x3b\x0a\x64\x61\x73" | ||
2096 | "\x73\x20\x64\x75\x20\x77\x65\x69" | ||
2097 | "\xc3\x9f\x74\x20\x77\x61\x73\x20" | ||
2098 | "\x64\x65\x72\x20\x57\x69\x6e\x64" | ||
2099 | "\x20\x64\x69\x72\x20\x77\x69\x6c" | ||
2100 | "\x6c\x2c\x0a\x65\x68\x20\x6e\x6f" | ||
2101 | "\x63\x68\x20\x64\x69\x65\x20\x42" | ||
2102 | "\x69\x72\x6b\x65\x6e\x20\x62\x65" | ||
2103 | "\x62\x65\x6e\x2e\x0a\x0a\x55\x6e" | ||
2104 | "\x64\x20\x77\x65\x6e\x6e\x20\x64" | ||
2105 | "\x69\x72\x20\x65\x69\x6e\x6d\x61" | ||
2106 | "\x6c\x20\x64\x61\x73\x20\x53\x63" | ||
2107 | "\x68\x77\x65\x69\x67\x65\x6e\x20" | ||
2108 | "\x73\x70\x72\x61\x63\x68\x2c\x0a" | ||
2109 | "\x6c\x61\x73\x73\x20\x64\x65\x69" | ||
2110 | "\x6e\x65\x20\x53\x69\x6e\x6e\x65" | ||
2111 | "\x20\x62\x65\x73\x69\x65\x67\x65" | ||
2112 | "\x6e\x2e\x0a\x4a\x65\x64\x65\x6d" | ||
2113 | "\x20\x48\x61\x75\x63\x68\x65\x20" | ||
2114 | "\x67\x69\x62\x74\x20\x64\x69\x63" | ||
2115 | "\x68\x2c\x20\x67\x69\x62\x20\x6e" | ||
2116 | "\x61\x63\x68\x2c\x0a\x65\x72\x20" | ||
2117 | "\x77\x69\x72\x64\x20\x64\x69\x63" | ||
2118 | "\x68\x20\x6c\x69\x65\x62\x65\x6e" | ||
2119 | "\x20\x75\x6e\x64\x20\x77\x69\x65" | ||
2120 | "\x67\x65\x6e\x2e\x0a\x0a\x55\x6e" | ||
2121 | "\x64\x20\x64\x61\x6e\x6e\x20\x6d" | ||
2122 | "\x65\x69\x6e\x65\x20\x53\x65\x65" | ||
2123 | "\x6c\x65\x20\x73\x65\x69\x74\x20" | ||
2124 | "\x77\x65\x69\x74\x2c\x20\x73\x65" | ||
2125 | "\x69\x20\x77\x65\x69\x74\x2c\x0a" | ||
2126 | "\x64\x61\x73\x73\x20\x64\x69\x72" | ||
2127 | "\x20\x64\x61\x73\x20\x4c\x65\x62" | ||
2128 | "\x65\x6e\x20\x67\x65\x6c\x69\x6e" | ||
2129 | "\x67\x65\x2c\x0a\x62\x72\x65\x69" | ||
2130 | "\x74\x65\x20\x64\x69\x63\x68\x20" | ||
2131 | "\x77\x69\x65\x20\x65\x69\x6e\x20" | ||
2132 | "\x46\x65\x69\x65\x72\x6b\x6c\x65" | ||
2133 | "\x69\x64\x0a\xc3\xbc\x62\x65\x72" | ||
2134 | "\x20\x64\x69\x65\x20\x73\x69\x6e" | ||
2135 | "\x6e\x65\x6e\x64\x65\x6e\x20\x44" | ||
2136 | "\x69\x6e\x67\x65\x2e\x2e\x2e\x0a", | ||
2137 | .psize = 400, | ||
2138 | .digest = "\xad\xb1\xc1\xe9\x56\x70\x31\x1d" | ||
2139 | "\xbb\x5b\xdf\x5e\x70\x72\x1a\x57", | ||
1878 | }, | 2140 | }, |
1879 | }; | 2141 | }; |
1880 | 2142 | ||
@@ -2969,6 +3231,254 @@ static struct hash_testvec hmac_sha512_tv_template[] = { | |||
2969 | }; | 3231 | }; |
2970 | 3232 | ||
2971 | /* | 3233 | /* |
3234 | * Poly1305 test vectors from RFC7539 A.3. | ||
3235 | */ | ||
3236 | |||
3237 | #define POLY1305_TEST_VECTORS 11 | ||
3238 | |||
3239 | static struct hash_testvec poly1305_tv_template[] = { | ||
3240 | { /* Test Vector #1 */ | ||
3241 | .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3242 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3243 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3244 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3245 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3246 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3247 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3248 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3249 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3250 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3251 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3252 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3253 | .psize = 96, | ||
3254 | .digest = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3255 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3256 | }, { /* Test Vector #2 */ | ||
3257 | .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3258 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3259 | "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" | ||
3260 | "\xf0\xef\xca\x96\x22\x7a\x86\x3e" | ||
3261 | "\x41\x6e\x79\x20\x73\x75\x62\x6d" | ||
3262 | "\x69\x73\x73\x69\x6f\x6e\x20\x74" | ||
3263 | "\x6f\x20\x74\x68\x65\x20\x49\x45" | ||
3264 | "\x54\x46\x20\x69\x6e\x74\x65\x6e" | ||
3265 | "\x64\x65\x64\x20\x62\x79\x20\x74" | ||
3266 | "\x68\x65\x20\x43\x6f\x6e\x74\x72" | ||
3267 | "\x69\x62\x75\x74\x6f\x72\x20\x66" | ||
3268 | "\x6f\x72\x20\x70\x75\x62\x6c\x69" | ||
3269 | "\x63\x61\x74\x69\x6f\x6e\x20\x61" | ||
3270 | "\x73\x20\x61\x6c\x6c\x20\x6f\x72" | ||
3271 | "\x20\x70\x61\x72\x74\x20\x6f\x66" | ||
3272 | "\x20\x61\x6e\x20\x49\x45\x54\x46" | ||
3273 | "\x20\x49\x6e\x74\x65\x72\x6e\x65" | ||
3274 | "\x74\x2d\x44\x72\x61\x66\x74\x20" | ||
3275 | "\x6f\x72\x20\x52\x46\x43\x20\x61" | ||
3276 | "\x6e\x64\x20\x61\x6e\x79\x20\x73" | ||
3277 | "\x74\x61\x74\x65\x6d\x65\x6e\x74" | ||
3278 | "\x20\x6d\x61\x64\x65\x20\x77\x69" | ||
3279 | "\x74\x68\x69\x6e\x20\x74\x68\x65" | ||
3280 | "\x20\x63\x6f\x6e\x74\x65\x78\x74" | ||
3281 | "\x20\x6f\x66\x20\x61\x6e\x20\x49" | ||
3282 | "\x45\x54\x46\x20\x61\x63\x74\x69" | ||
3283 | "\x76\x69\x74\x79\x20\x69\x73\x20" | ||
3284 | "\x63\x6f\x6e\x73\x69\x64\x65\x72" | ||
3285 | "\x65\x64\x20\x61\x6e\x20\x22\x49" | ||
3286 | "\x45\x54\x46\x20\x43\x6f\x6e\x74" | ||
3287 | "\x72\x69\x62\x75\x74\x69\x6f\x6e" | ||
3288 | "\x22\x2e\x20\x53\x75\x63\x68\x20" | ||
3289 | "\x73\x74\x61\x74\x65\x6d\x65\x6e" | ||
3290 | "\x74\x73\x20\x69\x6e\x63\x6c\x75" | ||
3291 | "\x64\x65\x20\x6f\x72\x61\x6c\x20" | ||
3292 | "\x73\x74\x61\x74\x65\x6d\x65\x6e" | ||
3293 | "\x74\x73\x20\x69\x6e\x20\x49\x45" | ||
3294 | "\x54\x46\x20\x73\x65\x73\x73\x69" | ||
3295 | "\x6f\x6e\x73\x2c\x20\x61\x73\x20" | ||
3296 | "\x77\x65\x6c\x6c\x20\x61\x73\x20" | ||
3297 | "\x77\x72\x69\x74\x74\x65\x6e\x20" | ||
3298 | "\x61\x6e\x64\x20\x65\x6c\x65\x63" | ||
3299 | "\x74\x72\x6f\x6e\x69\x63\x20\x63" | ||
3300 | "\x6f\x6d\x6d\x75\x6e\x69\x63\x61" | ||
3301 | "\x74\x69\x6f\x6e\x73\x20\x6d\x61" | ||
3302 | "\x64\x65\x20\x61\x74\x20\x61\x6e" | ||
3303 | "\x79\x20\x74\x69\x6d\x65\x20\x6f" | ||
3304 | "\x72\x20\x70\x6c\x61\x63\x65\x2c" | ||
3305 | "\x20\x77\x68\x69\x63\x68\x20\x61" | ||
3306 | "\x72\x65\x20\x61\x64\x64\x72\x65" | ||
3307 | "\x73\x73\x65\x64\x20\x74\x6f", | ||
3308 | .psize = 407, | ||
3309 | .digest = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" | ||
3310 | "\xf0\xef\xca\x96\x22\x7a\x86\x3e", | ||
3311 | }, { /* Test Vector #3 */ | ||
3312 | .plaintext = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" | ||
3313 | "\xf0\xef\xca\x96\x22\x7a\x86\x3e" | ||
3314 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3315 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3316 | "\x41\x6e\x79\x20\x73\x75\x62\x6d" | ||
3317 | "\x69\x73\x73\x69\x6f\x6e\x20\x74" | ||
3318 | "\x6f\x20\x74\x68\x65\x20\x49\x45" | ||
3319 | "\x54\x46\x20\x69\x6e\x74\x65\x6e" | ||
3320 | "\x64\x65\x64\x20\x62\x79\x20\x74" | ||
3321 | "\x68\x65\x20\x43\x6f\x6e\x74\x72" | ||
3322 | "\x69\x62\x75\x74\x6f\x72\x20\x66" | ||
3323 | "\x6f\x72\x20\x70\x75\x62\x6c\x69" | ||
3324 | "\x63\x61\x74\x69\x6f\x6e\x20\x61" | ||
3325 | "\x73\x20\x61\x6c\x6c\x20\x6f\x72" | ||
3326 | "\x20\x70\x61\x72\x74\x20\x6f\x66" | ||
3327 | "\x20\x61\x6e\x20\x49\x45\x54\x46" | ||
3328 | "\x20\x49\x6e\x74\x65\x72\x6e\x65" | ||
3329 | "\x74\x2d\x44\x72\x61\x66\x74\x20" | ||
3330 | "\x6f\x72\x20\x52\x46\x43\x20\x61" | ||
3331 | "\x6e\x64\x20\x61\x6e\x79\x20\x73" | ||
3332 | "\x74\x61\x74\x65\x6d\x65\x6e\x74" | ||
3333 | "\x20\x6d\x61\x64\x65\x20\x77\x69" | ||
3334 | "\x74\x68\x69\x6e\x20\x74\x68\x65" | ||
3335 | "\x20\x63\x6f\x6e\x74\x65\x78\x74" | ||
3336 | "\x20\x6f\x66\x20\x61\x6e\x20\x49" | ||
3337 | "\x45\x54\x46\x20\x61\x63\x74\x69" | ||
3338 | "\x76\x69\x74\x79\x20\x69\x73\x20" | ||
3339 | "\x63\x6f\x6e\x73\x69\x64\x65\x72" | ||
3340 | "\x65\x64\x20\x61\x6e\x20\x22\x49" | ||
3341 | "\x45\x54\x46\x20\x43\x6f\x6e\x74" | ||
3342 | "\x72\x69\x62\x75\x74\x69\x6f\x6e" | ||
3343 | "\x22\x2e\x20\x53\x75\x63\x68\x20" | ||
3344 | "\x73\x74\x61\x74\x65\x6d\x65\x6e" | ||
3345 | "\x74\x73\x20\x69\x6e\x63\x6c\x75" | ||
3346 | "\x64\x65\x20\x6f\x72\x61\x6c\x20" | ||
3347 | "\x73\x74\x61\x74\x65\x6d\x65\x6e" | ||
3348 | "\x74\x73\x20\x69\x6e\x20\x49\x45" | ||
3349 | "\x54\x46\x20\x73\x65\x73\x73\x69" | ||
3350 | "\x6f\x6e\x73\x2c\x20\x61\x73\x20" | ||
3351 | "\x77\x65\x6c\x6c\x20\x61\x73\x20" | ||
3352 | "\x77\x72\x69\x74\x74\x65\x6e\x20" | ||
3353 | "\x61\x6e\x64\x20\x65\x6c\x65\x63" | ||
3354 | "\x74\x72\x6f\x6e\x69\x63\x20\x63" | ||
3355 | "\x6f\x6d\x6d\x75\x6e\x69\x63\x61" | ||
3356 | "\x74\x69\x6f\x6e\x73\x20\x6d\x61" | ||
3357 | "\x64\x65\x20\x61\x74\x20\x61\x6e" | ||
3358 | "\x79\x20\x74\x69\x6d\x65\x20\x6f" | ||
3359 | "\x72\x20\x70\x6c\x61\x63\x65\x2c" | ||
3360 | "\x20\x77\x68\x69\x63\x68\x20\x61" | ||
3361 | "\x72\x65\x20\x61\x64\x64\x72\x65" | ||
3362 | "\x73\x73\x65\x64\x20\x74\x6f", | ||
3363 | .psize = 407, | ||
3364 | .digest = "\xf3\x47\x7e\x7c\xd9\x54\x17\xaf" | ||
3365 | "\x89\xa6\xb8\x79\x4c\x31\x0c\xf0", | ||
3366 | }, { /* Test Vector #4 */ | ||
3367 | .plaintext = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" | ||
3368 | "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" | ||
3369 | "\x47\x39\x17\xc1\x40\x2b\x80\x09" | ||
3370 | "\x9d\xca\x5c\xbc\x20\x70\x75\xc0" | ||
3371 | "\x27\x54\x77\x61\x73\x20\x62\x72" | ||
3372 | "\x69\x6c\x6c\x69\x67\x2c\x20\x61" | ||
3373 | "\x6e\x64\x20\x74\x68\x65\x20\x73" | ||
3374 | "\x6c\x69\x74\x68\x79\x20\x74\x6f" | ||
3375 | "\x76\x65\x73\x0a\x44\x69\x64\x20" | ||
3376 | "\x67\x79\x72\x65\x20\x61\x6e\x64" | ||
3377 | "\x20\x67\x69\x6d\x62\x6c\x65\x20" | ||
3378 | "\x69\x6e\x20\x74\x68\x65\x20\x77" | ||
3379 | "\x61\x62\x65\x3a\x0a\x41\x6c\x6c" | ||
3380 | "\x20\x6d\x69\x6d\x73\x79\x20\x77" | ||
3381 | "\x65\x72\x65\x20\x74\x68\x65\x20" | ||
3382 | "\x62\x6f\x72\x6f\x67\x6f\x76\x65" | ||
3383 | "\x73\x2c\x0a\x41\x6e\x64\x20\x74" | ||
3384 | "\x68\x65\x20\x6d\x6f\x6d\x65\x20" | ||
3385 | "\x72\x61\x74\x68\x73\x20\x6f\x75" | ||
3386 | "\x74\x67\x72\x61\x62\x65\x2e", | ||
3387 | .psize = 159, | ||
3388 | .digest = "\x45\x41\x66\x9a\x7e\xaa\xee\x61" | ||
3389 | "\xe7\x08\xdc\x7c\xbc\xc5\xeb\x62", | ||
3390 | }, { /* Test Vector #5 */ | ||
3391 | .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" | ||
3392 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3393 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3394 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3395 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
3396 | "\xff\xff\xff\xff\xff\xff\xff\xff", | ||
3397 | .psize = 48, | ||
3398 | .digest = "\x03\x00\x00\x00\x00\x00\x00\x00" | ||
3399 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3400 | }, { /* Test Vector #6 */ | ||
3401 | .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" | ||
3402 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3403 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
3404 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
3405 | "\x02\x00\x00\x00\x00\x00\x00\x00" | ||
3406 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3407 | .psize = 48, | ||
3408 | .digest = "\x03\x00\x00\x00\x00\x00\x00\x00" | ||
3409 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3410 | }, { /* Test Vector #7 */ | ||
3411 | .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" | ||
3412 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3413 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3414 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3415 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
3416 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
3417 | "\xf0\xff\xff\xff\xff\xff\xff\xff" | ||
3418 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
3419 | "\x11\x00\x00\x00\x00\x00\x00\x00" | ||
3420 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3421 | .psize = 80, | ||
3422 | .digest = "\x05\x00\x00\x00\x00\x00\x00\x00" | ||
3423 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3424 | }, { /* Test Vector #8 */ | ||
3425 | .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" | ||
3426 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3427 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3428 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3429 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
3430 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
3431 | "\xfb\xfe\xfe\xfe\xfe\xfe\xfe\xfe" | ||
3432 | "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" | ||
3433 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
3434 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
3435 | .psize = 80, | ||
3436 | .digest = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3437 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3438 | }, { /* Test Vector #9 */ | ||
3439 | .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" | ||
3440 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3441 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3442 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3443 | "\xfd\xff\xff\xff\xff\xff\xff\xff" | ||
3444 | "\xff\xff\xff\xff\xff\xff\xff\xff", | ||
3445 | .psize = 48, | ||
3446 | .digest = "\xfa\xff\xff\xff\xff\xff\xff\xff" | ||
3447 | "\xff\xff\xff\xff\xff\xff\xff\xff", | ||
3448 | }, { /* Test Vector #10 */ | ||
3449 | .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" | ||
3450 | "\x04\x00\x00\x00\x00\x00\x00\x00" | ||
3451 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3452 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3453 | "\xe3\x35\x94\xd7\x50\x5e\x43\xb9" | ||
3454 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3455 | "\x33\x94\xd7\x50\x5e\x43\x79\xcd" | ||
3456 | "\x01\x00\x00\x00\x00\x00\x00\x00" | ||
3457 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3458 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3459 | "\x01\x00\x00\x00\x00\x00\x00\x00" | ||
3460 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3461 | .psize = 96, | ||
3462 | .digest = "\x14\x00\x00\x00\x00\x00\x00\x00" | ||
3463 | "\x55\x00\x00\x00\x00\x00\x00\x00", | ||
3464 | }, { /* Test Vector #11 */ | ||
3465 | .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" | ||
3466 | "\x04\x00\x00\x00\x00\x00\x00\x00" | ||
3467 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3468 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3469 | "\xe3\x35\x94\xd7\x50\x5e\x43\xb9" | ||
3470 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3471 | "\x33\x94\xd7\x50\x5e\x43\x79\xcd" | ||
3472 | "\x01\x00\x00\x00\x00\x00\x00\x00" | ||
3473 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
3474 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3475 | .psize = 80, | ||
3476 | .digest = "\x13\x00\x00\x00\x00\x00\x00\x00" | ||
3477 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3478 | }, | ||
3479 | }; | ||
3480 | |||
3481 | /* | ||
2972 | * DES test vectors. | 3482 | * DES test vectors. |
2973 | */ | 3483 | */ |
2974 | #define DES_ENC_TEST_VECTORS 11 | 3484 | #define DES_ENC_TEST_VECTORS 11 |
@@ -3018,7 +3528,7 @@ static struct cipher_testvec des_enc_tv_template[] = { | |||
3018 | "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90", | 3528 | "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90", |
3019 | .rlen = 24, | 3529 | .rlen = 24, |
3020 | }, { /* Weak key */ | 3530 | }, { /* Weak key */ |
3021 | .fail = 1, | 3531 | .fail = true, |
3022 | .wk = 1, | 3532 | .wk = 1, |
3023 | .key = "\x01\x01\x01\x01\x01\x01\x01\x01", | 3533 | .key = "\x01\x01\x01\x01\x01\x01\x01\x01", |
3024 | .klen = 8, | 3534 | .klen = 8, |
@@ -13629,8 +14139,8 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = { | |||
13629 | #define AES_CTR_3686_DEC_TEST_VECTORS 6 | 14139 | #define AES_CTR_3686_DEC_TEST_VECTORS 6 |
13630 | #define AES_GCM_ENC_TEST_VECTORS 9 | 14140 | #define AES_GCM_ENC_TEST_VECTORS 9 |
13631 | #define AES_GCM_DEC_TEST_VECTORS 8 | 14141 | #define AES_GCM_DEC_TEST_VECTORS 8 |
13632 | #define AES_GCM_4106_ENC_TEST_VECTORS 7 | 14142 | #define AES_GCM_4106_ENC_TEST_VECTORS 23 |
13633 | #define AES_GCM_4106_DEC_TEST_VECTORS 7 | 14143 | #define AES_GCM_4106_DEC_TEST_VECTORS 23 |
13634 | #define AES_GCM_4543_ENC_TEST_VECTORS 1 | 14144 | #define AES_GCM_4543_ENC_TEST_VECTORS 1 |
13635 | #define AES_GCM_4543_DEC_TEST_VECTORS 2 | 14145 | #define AES_GCM_4543_DEC_TEST_VECTORS 2 |
13636 | #define AES_CCM_ENC_TEST_VECTORS 8 | 14146 | #define AES_CCM_ENC_TEST_VECTORS 8 |
@@ -19789,6 +20299,428 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
19789 | "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" | 20299 | "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" |
19790 | "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", | 20300 | "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", |
19791 | .rlen = 208, | 20301 | .rlen = 208, |
20302 | }, { /* From draft-mcgrew-gcm-test-01 */ | ||
20303 | .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" | ||
20304 | "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" | ||
20305 | "\x2E\x44\x3B\x68", | ||
20306 | .klen = 20, | ||
20307 | .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", | ||
20308 | .input = "\x45\x00\x00\x48\x69\x9A\x00\x00" | ||
20309 | "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" | ||
20310 | "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" | ||
20311 | "\x38\xD3\x01\x00\x00\x01\x00\x00" | ||
20312 | "\x00\x00\x00\x00\x04\x5F\x73\x69" | ||
20313 | "\x70\x04\x5F\x75\x64\x70\x03\x73" | ||
20314 | "\x69\x70\x09\x63\x79\x62\x65\x72" | ||
20315 | "\x63\x69\x74\x79\x02\x64\x6B\x00" | ||
20316 | "\x00\x21\x00\x01\x01\x02\x02\x01", | ||
20317 | .ilen = 72, | ||
20318 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | ||
20319 | "\x00\x00\x00\x00", | ||
20320 | .alen = 12, | ||
20321 | .result = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" | ||
20322 | "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76" | ||
20323 | "\x8D\x1B\x98\x73\x66\x96\xA6\xFD" | ||
20324 | "\x34\x85\x09\xFA\x13\xCE\xAC\x34" | ||
20325 | "\xCF\xA2\x43\x6F\x14\xA3\xF3\xCF" | ||
20326 | "\x65\x92\x5B\xF1\xF4\xA1\x3C\x5D" | ||
20327 | "\x15\xB2\x1E\x18\x84\xF5\xFF\x62" | ||
20328 | "\x47\xAE\xAB\xB7\x86\xB9\x3B\xCE" | ||
20329 | "\x61\xBC\x17\xD7\x68\xFD\x97\x32" | ||
20330 | "\x45\x90\x18\x14\x8F\x6C\xBE\x72" | ||
20331 | "\x2F\xD0\x47\x96\x56\x2D\xFD\xB4", | ||
20332 | .rlen = 88, | ||
20333 | }, { | ||
20334 | .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
20335 | "\x6D\x6A\x8F\x94\x67\x30\x83\x08" | ||
20336 | "\xCA\xFE\xBA\xBE", | ||
20337 | .klen = 20, | ||
20338 | .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
20339 | .input = "\x45\x00\x00\x3E\x69\x8F\x00\x00" | ||
20340 | "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" | ||
20341 | "\xC0\xA8\x01\x01\x0A\x98\x00\x35" | ||
20342 | "\x00\x2A\x23\x43\xB2\xD0\x01\x00" | ||
20343 | "\x00\x01\x00\x00\x00\x00\x00\x00" | ||
20344 | "\x03\x73\x69\x70\x09\x63\x79\x62" | ||
20345 | "\x65\x72\x63\x69\x74\x79\x02\x64" | ||
20346 | "\x6B\x00\x00\x01\x00\x01\x00\x01", | ||
20347 | .ilen = 64, | ||
20348 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A", | ||
20349 | .alen = 8, | ||
20350 | .result = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" | ||
20351 | "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04" | ||
20352 | "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F" | ||
20353 | "\x1B\xA7\x6D\x5D\x11\x4D\x2A\x5C" | ||
20354 | "\x3D\xE8\x18\x27\xC1\x0E\x9A\x4F" | ||
20355 | "\x51\x33\x0D\x0E\xEC\x41\x66\x42" | ||
20356 | "\xCF\xBB\x85\xA5\xB4\x7E\x48\xA4" | ||
20357 | "\xEC\x3B\x9B\xA9\x5D\x91\x8B\xD1" | ||
20358 | "\x83\xB7\x0D\x3A\xA8\xBC\x6E\xE4" | ||
20359 | "\xC3\x09\xE9\xD8\x5A\x41\xAD\x4A", | ||
20360 | .rlen = 80, | ||
20361 | }, { | ||
20362 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
20363 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
20364 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
20365 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
20366 | "\x11\x22\x33\x44", | ||
20367 | .klen = 36, | ||
20368 | .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", | ||
20369 | .input = "\x45\x00\x00\x30\x69\xA6\x40\x00" | ||
20370 | "\x80\x06\x26\x90\xC0\xA8\x01\x02" | ||
20371 | "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" | ||
20372 | "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" | ||
20373 | "\x70\x02\x40\x00\x20\xBF\x00\x00" | ||
20374 | "\x02\x04\x05\xB4\x01\x01\x04\x02" | ||
20375 | "\x01\x02\x02\x01", | ||
20376 | .ilen = 52, | ||
20377 | .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02", | ||
20378 | .alen = 8, | ||
20379 | .result = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" | ||
20380 | "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D" | ||
20381 | "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF" | ||
20382 | "\x06\xEF\xAE\x9D\x65\xA5\xD7\x63" | ||
20383 | "\x74\x8A\x63\x79\x85\x77\x1D\x34" | ||
20384 | "\x7F\x05\x45\x65\x9F\x14\xE9\x9D" | ||
20385 | "\xEF\x84\x2D\x8E\xB3\x35\xF4\xEE" | ||
20386 | "\xCF\xDB\xF8\x31\x82\x4B\x4C\x49" | ||
20387 | "\x15\x95\x6C\x96", | ||
20388 | .rlen = 68, | ||
20389 | }, { | ||
20390 | .key = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
20391 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
20392 | "\x00\x00\x00\x00", | ||
20393 | .klen = 20, | ||
20394 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
20395 | .input = "\x45\x00\x00\x3C\x99\xC5\x00\x00" | ||
20396 | "\x80\x01\xCB\x7A\x40\x67\x93\x18" | ||
20397 | "\x01\x01\x01\x01\x08\x00\x07\x5C" | ||
20398 | "\x02\x00\x44\x00\x61\x62\x63\x64" | ||
20399 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
20400 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
20401 | "\x75\x76\x77\x61\x62\x63\x64\x65" | ||
20402 | "\x66\x67\x68\x69\x01\x02\x02\x01", | ||
20403 | .ilen = 64, | ||
20404 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
20405 | .alen = 8, | ||
20406 | .result = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" | ||
20407 | "\x73\x29\x09\xC3\x31\xD5\x6D\x60" | ||
20408 | "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F" | ||
20409 | "\xF5\xFD\xCD\xFF\xF5\xE9\xA2\x84" | ||
20410 | "\x45\x64\x76\x49\x27\x19\xFF\xB6" | ||
20411 | "\x4D\xE7\xD9\xDC\xA1\xE1\xD8\x94" | ||
20412 | "\xBC\x3B\xD5\x78\x73\xED\x4D\x18" | ||
20413 | "\x1D\x19\xD4\xD5\xC8\xC1\x8A\xF3" | ||
20414 | "\xF8\x21\xD4\x96\xEE\xB0\x96\xE9" | ||
20415 | "\x8A\xD2\xB6\x9E\x47\x99\xC7\x1D", | ||
20416 | .rlen = 80, | ||
20417 | }, { | ||
20418 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
20419 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
20420 | "\x57\x69\x0E\x43", | ||
20421 | .klen = 20, | ||
20422 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
20423 | .input = "\x45\x00\x00\x3C\x99\xC3\x00\x00" | ||
20424 | "\x80\x01\xCB\x7C\x40\x67\x93\x18" | ||
20425 | "\x01\x01\x01\x01\x08\x00\x08\x5C" | ||
20426 | "\x02\x00\x43\x00\x61\x62\x63\x64" | ||
20427 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
20428 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
20429 | "\x75\x76\x77\x61\x62\x63\x64\x65" | ||
20430 | "\x66\x67\x68\x69\x01\x02\x02\x01", | ||
20431 | .ilen = 64, | ||
20432 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
20433 | "\x10\x10\x10\x10", | ||
20434 | .alen = 12, | ||
20435 | .result = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" | ||
20436 | "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0" | ||
20437 | "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0" | ||
20438 | "\x0D\x11\x38\xEC\x9C\x35\x79\x17" | ||
20439 | "\x65\xAC\xBD\x87\x01\xAD\x79\x84" | ||
20440 | "\x5B\xF9\xFE\x3F\xBA\x48\x7B\xC9" | ||
20441 | "\x17\x55\xE6\x66\x2B\x4C\x8D\x0D" | ||
20442 | "\x1F\x5E\x22\x73\x95\x30\x32\x0A" | ||
20443 | "\xE0\xD7\x31\xCC\x97\x8E\xCA\xFA" | ||
20444 | "\xEA\xE8\x8F\x00\xE8\x0D\x6E\x48", | ||
20445 | .rlen = 80, | ||
20446 | }, { | ||
20447 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
20448 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
20449 | "\x57\x69\x0E\x43", | ||
20450 | .klen = 20, | ||
20451 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
20452 | .input = "\x45\x00\x00\x1C\x42\xA2\x00\x00" | ||
20453 | "\x80\x01\x44\x1F\x40\x67\x93\xB6" | ||
20454 | "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" | ||
20455 | "\x01\x02\x02\x01", | ||
20456 | .ilen = 28, | ||
20457 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
20458 | "\x10\x10\x10\x10", | ||
20459 | .alen = 12, | ||
20460 | .result = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" | ||
20461 | "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E" | ||
20462 | "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13" | ||
20463 | "\x0E\x13\x79\xED\x36\x9F\x07\x1F" | ||
20464 | "\x35\xE0\x34\xBE\x95\xF1\x12\xE4" | ||
20465 | "\xE7\xD0\x5D\x35", | ||
20466 | .rlen = 44, | ||
20467 | }, { | ||
20468 | .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
20469 | "\x6D\x6A\x8F\x94\x67\x30\x83\x08" | ||
20470 | "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
20471 | "\xCA\xFE\xBA\xBE", | ||
20472 | .klen = 28, | ||
20473 | .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
20474 | .input = "\x45\x00\x00\x28\xA4\xAD\x40\x00" | ||
20475 | "\x40\x06\x78\x80\x0A\x01\x03\x8F" | ||
20476 | "\x0A\x01\x06\x12\x80\x23\x06\xB8" | ||
20477 | "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" | ||
20478 | "\x50\x10\x16\xD0\x75\x68\x00\x01", | ||
20479 | .ilen = 40, | ||
20480 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A", | ||
20481 | .alen = 8, | ||
20482 | .result = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" | ||
20483 | "\x0E\x59\x8B\x81\x22\xDE\x02\x42" | ||
20484 | "\x09\x38\xB3\xAB\x33\xF8\x28\xE6" | ||
20485 | "\x87\xB8\x85\x8B\x5B\xFB\xDB\xD0" | ||
20486 | "\x31\x5B\x27\x45\x21\x44\xCC\x77" | ||
20487 | "\x95\x45\x7B\x96\x52\x03\x7F\x53" | ||
20488 | "\x18\x02\x7B\x5B\x4C\xD7\xA6\x36", | ||
20489 | .rlen = 56, | ||
20490 | }, { | ||
20491 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
20492 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
20493 | "\xDE\xCA\xF8\x88", | ||
20494 | .klen = 20, | ||
20495 | .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", | ||
20496 | .input = "\x45\x00\x00\x49\x33\xBA\x00\x00" | ||
20497 | "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" | ||
20498 | "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" | ||
20499 | "\x00\x35\xDD\x7B\x80\x03\x02\xD5" | ||
20500 | "\x00\x00\x4E\x20\x00\x1E\x8C\x18" | ||
20501 | "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47" | ||
20502 | "\x6B\x91\xB9\x24\xB2\x80\x38\x9D" | ||
20503 | "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" | ||
20504 | "\x9B\x62\x66\xC0\x47\x22\xB1\x49" | ||
20505 | "\x23\x01\x01\x01", | ||
20506 | .ilen = 76, | ||
20507 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | ||
20508 | "\x00\x00\x00\x01", | ||
20509 | .alen = 12, | ||
20510 | .result = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" | ||
20511 | "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14" | ||
20512 | "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1" | ||
20513 | "\xC3\x49\xC1\xD2\xFB\xEC\x16\x8F" | ||
20514 | "\x91\x90\xFE\xEB\xAF\x2C\xB0\x19" | ||
20515 | "\x84\xE6\x58\x63\x96\x5D\x74\x72" | ||
20516 | "\xB7\x9D\xA3\x45\xE0\xE7\x80\x19" | ||
20517 | "\x1F\x0D\x2F\x0E\x0F\x49\x6C\x22" | ||
20518 | "\x6F\x21\x27\xB2\x7D\xB3\x57\x24" | ||
20519 | "\xE7\x84\x5D\x68\x65\x1F\x57\xE6" | ||
20520 | "\x5F\x35\x4F\x75\xFF\x17\x01\x57" | ||
20521 | "\x69\x62\x34\x36", | ||
20522 | .rlen = 92, | ||
20523 | }, { | ||
20524 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
20525 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
20526 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
20527 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
20528 | "\x73\x61\x6C\x74", | ||
20529 | .klen = 36, | ||
20530 | .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", | ||
20531 | .input = "\x45\x08\x00\x28\x73\x2C\x00\x00" | ||
20532 | "\x40\x06\xE9\xF9\x0A\x01\x06\x12" | ||
20533 | "\x0A\x01\x03\x8F\x06\xB8\x80\x23" | ||
20534 | "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" | ||
20535 | "\x50\x10\x1F\x64\x6D\x54\x00\x01", | ||
20536 | .ilen = 40, | ||
20537 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | ||
20538 | "\xDD\x0D\xB9\x9B", | ||
20539 | .alen = 12, | ||
20540 | .result = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" | ||
20541 | "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D" | ||
20542 | "\x1F\x27\x8F\xDE\x98\xEF\x67\x54" | ||
20543 | "\x9D\x52\x4A\x30\x18\xD9\xA5\x7F" | ||
20544 | "\xF4\xD3\xA3\x1C\xE6\x73\x11\x9E" | ||
20545 | "\x45\x16\x26\xC2\x41\x57\x71\xE3" | ||
20546 | "\xB7\xEE\xBC\xA6\x14\xC8\x9B\x35", | ||
20547 | .rlen = 56, | ||
20548 | }, { | ||
20549 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
20550 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
20551 | "\x57\x69\x0E\x43", | ||
20552 | .klen = 20, | ||
20553 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
20554 | .input = "\x45\x00\x00\x49\x33\x3E\x00\x00" | ||
20555 | "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" | ||
20556 | "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" | ||
20557 | "\x00\x35\xCB\x45\x80\x03\x02\x5B" | ||
20558 | "\x00\x00\x01\xE0\x00\x1E\x8C\x18" | ||
20559 | "\xD6\x57\x59\xD5\x22\x84\xA0\x35" | ||
20560 | "\x2C\x71\x47\x5C\x88\x80\x39\x1C" | ||
20561 | "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" | ||
20562 | "\x5A\xE2\x70\xC0\x38\x99\x49\x39" | ||
20563 | "\x15\x01\x01\x01", | ||
20564 | .ilen = 76, | ||
20565 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
20566 | "\x10\x10\x10\x10", | ||
20567 | .alen = 12, | ||
20568 | .result = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" | ||
20569 | "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8" | ||
20570 | "\x3D\x77\x84\xB6\x07\x32\x3D\x22" | ||
20571 | "\x0F\x24\xB0\xA9\x7D\x54\x18\x28" | ||
20572 | "\x00\xCA\xDB\x0F\x68\xD9\x9E\xF0" | ||
20573 | "\xE0\xC0\xC8\x9A\xE9\xBE\xA8\x88" | ||
20574 | "\x4E\x52\xD6\x5B\xC1\xAF\xD0\x74" | ||
20575 | "\x0F\x74\x24\x44\x74\x7B\x5B\x39" | ||
20576 | "\xAB\x53\x31\x63\xAA\xD4\x55\x0E" | ||
20577 | "\xE5\x16\x09\x75\xCD\xB6\x08\xC5" | ||
20578 | "\x76\x91\x89\x60\x97\x63\xB8\xE1" | ||
20579 | "\x8C\xAA\x81\xE2", | ||
20580 | .rlen = 92, | ||
20581 | }, { | ||
20582 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
20583 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
20584 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
20585 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
20586 | "\x73\x61\x6C\x74", | ||
20587 | .klen = 36, | ||
20588 | .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", | ||
20589 | .input = "\x63\x69\x73\x63\x6F\x01\x72\x75" | ||
20590 | "\x6C\x65\x73\x01\x74\x68\x65\x01" | ||
20591 | "\x6E\x65\x74\x77\x65\x01\x64\x65" | ||
20592 | "\x66\x69\x6E\x65\x01\x74\x68\x65" | ||
20593 | "\x74\x65\x63\x68\x6E\x6F\x6C\x6F" | ||
20594 | "\x67\x69\x65\x73\x01\x74\x68\x61" | ||
20595 | "\x74\x77\x69\x6C\x6C\x01\x64\x65" | ||
20596 | "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" | ||
20597 | "\x72\x72\x6F\x77\x01\x02\x02\x01", | ||
20598 | .ilen = 72, | ||
20599 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | ||
20600 | "\xDD\x0D\xB9\x9B", | ||
20601 | .alen = 12, | ||
20602 | .result = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" | ||
20603 | "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E" | ||
20604 | "\x7B\x43\xF8\x26\xFB\x56\x83\x12" | ||
20605 | "\x26\x50\x8B\xEB\xD2\xDC\xEB\x18" | ||
20606 | "\xD0\xA6\xDF\x10\xE5\x48\x7D\xF0" | ||
20607 | "\x74\x11\x3E\x14\xC6\x41\x02\x4E" | ||
20608 | "\x3E\x67\x73\xD9\x1A\x62\xEE\x42" | ||
20609 | "\x9B\x04\x3A\x10\xE3\xEF\xE6\xB0" | ||
20610 | "\x12\xA4\x93\x63\x41\x23\x64\xF8" | ||
20611 | "\xC0\xCA\xC5\x87\xF2\x49\xE5\x6B" | ||
20612 | "\x11\xE2\x4F\x30\xE4\x4C\xCC\x76", | ||
20613 | .rlen = 88, | ||
20614 | }, { | ||
20615 | .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" | ||
20616 | "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" | ||
20617 | "\xD9\x66\x42\x67", | ||
20618 | .klen = 20, | ||
20619 | .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", | ||
20620 | .input = "\x01\x02\x02\x01", | ||
20621 | .ilen = 4, | ||
20622 | .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF", | ||
20623 | .alen = 8, | ||
20624 | .result = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" | ||
20625 | "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45" | ||
20626 | "\x04\xBE\xF2\x70", | ||
20627 | .rlen = 20, | ||
20628 | }, { | ||
20629 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
20630 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
20631 | "\xDE\xCA\xF8\x88", | ||
20632 | .klen = 20, | ||
20633 | .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", | ||
20634 | .input = "\x74\x6F\x01\x62\x65\x01\x6F\x72" | ||
20635 | "\x01\x6E\x6F\x74\x01\x74\x6F\x01" | ||
20636 | "\x62\x65\x00\x01", | ||
20637 | .ilen = 20, | ||
20638 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | ||
20639 | "\x00\x00\x00\x01", | ||
20640 | .alen = 12, | ||
20641 | .result = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" | ||
20642 | "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05" | ||
20643 | "\x43\x33\x21\x64\x41\x25\x03\x52" | ||
20644 | "\x43\x03\xED\x3C\x6C\x5F\x28\x38" | ||
20645 | "\x43\xAF\x8C\x3E", | ||
20646 | .rlen = 36, | ||
20647 | }, { | ||
20648 | .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" | ||
20649 | "\x6D\x61\x72\x69\x6A\x75\x61\x6E" | ||
20650 | "\x61\x61\x6E\x64\x64\x6F\x69\x74" | ||
20651 | "\x62\x65\x66\x6F\x72\x65\x69\x61" | ||
20652 | "\x74\x75\x72\x6E", | ||
20653 | .klen = 36, | ||
20654 | .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", | ||
20655 | .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00" | ||
20656 | "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" | ||
20657 | "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" | ||
20658 | "\x02\x00\x07\x00\x61\x62\x63\x64" | ||
20659 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
20660 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
20661 | "\x01\x02\x02\x01", | ||
20662 | .ilen = 52, | ||
20663 | .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" | ||
20664 | "\xFF\xFF\xFF\xFF", | ||
20665 | .alen = 12, | ||
20666 | .result = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" | ||
20667 | "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D" | ||
20668 | "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6" | ||
20669 | "\x4A\x27\x4B\x39\xB4\x9C\x3A\x86" | ||
20670 | "\x4C\xD3\xD7\x8C\xA4\xAE\x68\xA3" | ||
20671 | "\x2B\x42\x45\x8F\xB5\x7D\xBE\x82" | ||
20672 | "\x1D\xCC\x63\xB9\xD0\x93\x7B\xA2" | ||
20673 | "\x94\x5F\x66\x93\x68\x66\x1A\x32" | ||
20674 | "\x9F\xB4\xC0\x53", | ||
20675 | .rlen = 68, | ||
20676 | }, { | ||
20677 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
20678 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
20679 | "\x57\x69\x0E\x43", | ||
20680 | .klen = 20, | ||
20681 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
20682 | .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00" | ||
20683 | "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" | ||
20684 | "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" | ||
20685 | "\x02\x00\x07\x00\x61\x62\x63\x64" | ||
20686 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
20687 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
20688 | "\x01\x02\x02\x01", | ||
20689 | .ilen = 52, | ||
20690 | .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" | ||
20691 | "\x10\x10\x10\x10", | ||
20692 | .alen = 12, | ||
20693 | .result = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" | ||
20694 | "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD" | ||
20695 | "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21" | ||
20696 | "\x0D\x11\x7C\xEC\x9C\x35\x79\x17" | ||
20697 | "\x65\xAC\xBD\x87\x01\xAD\x79\x84" | ||
20698 | "\x5B\xF9\xFE\x3F\xBA\x48\x7B\xC9" | ||
20699 | "\x63\x21\x93\x06\x84\xEE\xCA\xDB" | ||
20700 | "\x56\x91\x25\x46\xE7\xA9\x5C\x97" | ||
20701 | "\x40\xD7\xCB\x05", | ||
20702 | .rlen = 68, | ||
20703 | }, { | ||
20704 | .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" | ||
20705 | "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" | ||
20706 | "\x22\x43\x3C\x64", | ||
20707 | .klen = 20, | ||
20708 | .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", | ||
20709 | .input = "\x08\x00\xC6\xCD\x02\x00\x07\x00" | ||
20710 | "\x61\x62\x63\x64\x65\x66\x67\x68" | ||
20711 | "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" | ||
20712 | "\x71\x72\x73\x74\x01\x02\x02\x01", | ||
20713 | .ilen = 32, | ||
20714 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | ||
20715 | "\x00\x00\x00\x07", | ||
20716 | .alen = 12, | ||
20717 | .result = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" | ||
20718 | "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF" | ||
20719 | "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6" | ||
20720 | "\xAC\xDA\x68\x94\xBC\x61\x90\x69" | ||
20721 | "\xEF\x9C\xBC\x28\xFE\x1B\x56\xA7" | ||
20722 | "\xC4\xE0\xD5\x8C\x86\xCD\x2B\xC0", | ||
20723 | .rlen = 48, | ||
19792 | } | 20724 | } |
19793 | }; | 20725 | }; |
19794 | 20726 | ||
@@ -19964,7 +20896,428 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
19964 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20896 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
19965 | "\xff\xff\xff\xff\xff\xff\xff\xff", | 20897 | "\xff\xff\xff\xff\xff\xff\xff\xff", |
19966 | .rlen = 192, | 20898 | .rlen = 192, |
19967 | 20899 | }, { | |
20900 | .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" | ||
20901 | "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" | ||
20902 | "\x2E\x44\x3B\x68", | ||
20903 | .klen = 20, | ||
20904 | .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", | ||
20905 | .result = "\x45\x00\x00\x48\x69\x9A\x00\x00" | ||
20906 | "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" | ||
20907 | "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" | ||
20908 | "\x38\xD3\x01\x00\x00\x01\x00\x00" | ||
20909 | "\x00\x00\x00\x00\x04\x5F\x73\x69" | ||
20910 | "\x70\x04\x5F\x75\x64\x70\x03\x73" | ||
20911 | "\x69\x70\x09\x63\x79\x62\x65\x72" | ||
20912 | "\x63\x69\x74\x79\x02\x64\x6B\x00" | ||
20913 | "\x00\x21\x00\x01\x01\x02\x02\x01", | ||
20914 | .rlen = 72, | ||
20915 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | ||
20916 | "\x00\x00\x00\x00", | ||
20917 | .alen = 12, | ||
20918 | .input = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" | ||
20919 | "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76" | ||
20920 | "\x8D\x1B\x98\x73\x66\x96\xA6\xFD" | ||
20921 | "\x34\x85\x09\xFA\x13\xCE\xAC\x34" | ||
20922 | "\xCF\xA2\x43\x6F\x14\xA3\xF3\xCF" | ||
20923 | "\x65\x92\x5B\xF1\xF4\xA1\x3C\x5D" | ||
20924 | "\x15\xB2\x1E\x18\x84\xF5\xFF\x62" | ||
20925 | "\x47\xAE\xAB\xB7\x86\xB9\x3B\xCE" | ||
20926 | "\x61\xBC\x17\xD7\x68\xFD\x97\x32" | ||
20927 | "\x45\x90\x18\x14\x8F\x6C\xBE\x72" | ||
20928 | "\x2F\xD0\x47\x96\x56\x2D\xFD\xB4", | ||
20929 | .ilen = 88, | ||
20930 | }, { | ||
20931 | .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
20932 | "\x6D\x6A\x8F\x94\x67\x30\x83\x08" | ||
20933 | "\xCA\xFE\xBA\xBE", | ||
20934 | .klen = 20, | ||
20935 | .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
20936 | .result = "\x45\x00\x00\x3E\x69\x8F\x00\x00" | ||
20937 | "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" | ||
20938 | "\xC0\xA8\x01\x01\x0A\x98\x00\x35" | ||
20939 | "\x00\x2A\x23\x43\xB2\xD0\x01\x00" | ||
20940 | "\x00\x01\x00\x00\x00\x00\x00\x00" | ||
20941 | "\x03\x73\x69\x70\x09\x63\x79\x62" | ||
20942 | "\x65\x72\x63\x69\x74\x79\x02\x64" | ||
20943 | "\x6B\x00\x00\x01\x00\x01\x00\x01", | ||
20944 | .rlen = 64, | ||
20945 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A", | ||
20946 | .alen = 8, | ||
20947 | .input = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" | ||
20948 | "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04" | ||
20949 | "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F" | ||
20950 | "\x1B\xA7\x6D\x5D\x11\x4D\x2A\x5C" | ||
20951 | "\x3D\xE8\x18\x27\xC1\x0E\x9A\x4F" | ||
20952 | "\x51\x33\x0D\x0E\xEC\x41\x66\x42" | ||
20953 | "\xCF\xBB\x85\xA5\xB4\x7E\x48\xA4" | ||
20954 | "\xEC\x3B\x9B\xA9\x5D\x91\x8B\xD1" | ||
20955 | "\x83\xB7\x0D\x3A\xA8\xBC\x6E\xE4" | ||
20956 | "\xC3\x09\xE9\xD8\x5A\x41\xAD\x4A", | ||
20957 | .ilen = 80, | ||
20958 | }, { | ||
20959 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
20960 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
20961 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
20962 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
20963 | "\x11\x22\x33\x44", | ||
20964 | .klen = 36, | ||
20965 | .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", | ||
20966 | .result = "\x45\x00\x00\x30\x69\xA6\x40\x00" | ||
20967 | "\x80\x06\x26\x90\xC0\xA8\x01\x02" | ||
20968 | "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" | ||
20969 | "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" | ||
20970 | "\x70\x02\x40\x00\x20\xBF\x00\x00" | ||
20971 | "\x02\x04\x05\xB4\x01\x01\x04\x02" | ||
20972 | "\x01\x02\x02\x01", | ||
20973 | .rlen = 52, | ||
20974 | .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02", | ||
20975 | .alen = 8, | ||
20976 | .input = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" | ||
20977 | "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D" | ||
20978 | "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF" | ||
20979 | "\x06\xEF\xAE\x9D\x65\xA5\xD7\x63" | ||
20980 | "\x74\x8A\x63\x79\x85\x77\x1D\x34" | ||
20981 | "\x7F\x05\x45\x65\x9F\x14\xE9\x9D" | ||
20982 | "\xEF\x84\x2D\x8E\xB3\x35\xF4\xEE" | ||
20983 | "\xCF\xDB\xF8\x31\x82\x4B\x4C\x49" | ||
20984 | "\x15\x95\x6C\x96", | ||
20985 | .ilen = 68, | ||
20986 | }, { | ||
20987 | .key = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
20988 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
20989 | "\x00\x00\x00\x00", | ||
20990 | .klen = 20, | ||
20991 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
20992 | .result = "\x45\x00\x00\x3C\x99\xC5\x00\x00" | ||
20993 | "\x80\x01\xCB\x7A\x40\x67\x93\x18" | ||
20994 | "\x01\x01\x01\x01\x08\x00\x07\x5C" | ||
20995 | "\x02\x00\x44\x00\x61\x62\x63\x64" | ||
20996 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
20997 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
20998 | "\x75\x76\x77\x61\x62\x63\x64\x65" | ||
20999 | "\x66\x67\x68\x69\x01\x02\x02\x01", | ||
21000 | .rlen = 64, | ||
21001 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
21002 | .alen = 8, | ||
21003 | .input = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" | ||
21004 | "\x73\x29\x09\xC3\x31\xD5\x6D\x60" | ||
21005 | "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F" | ||
21006 | "\xF5\xFD\xCD\xFF\xF5\xE9\xA2\x84" | ||
21007 | "\x45\x64\x76\x49\x27\x19\xFF\xB6" | ||
21008 | "\x4D\xE7\xD9\xDC\xA1\xE1\xD8\x94" | ||
21009 | "\xBC\x3B\xD5\x78\x73\xED\x4D\x18" | ||
21010 | "\x1D\x19\xD4\xD5\xC8\xC1\x8A\xF3" | ||
21011 | "\xF8\x21\xD4\x96\xEE\xB0\x96\xE9" | ||
21012 | "\x8A\xD2\xB6\x9E\x47\x99\xC7\x1D", | ||
21013 | .ilen = 80, | ||
21014 | }, { | ||
21015 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
21016 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
21017 | "\x57\x69\x0E\x43", | ||
21018 | .klen = 20, | ||
21019 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
21020 | .result = "\x45\x00\x00\x3C\x99\xC3\x00\x00" | ||
21021 | "\x80\x01\xCB\x7C\x40\x67\x93\x18" | ||
21022 | "\x01\x01\x01\x01\x08\x00\x08\x5C" | ||
21023 | "\x02\x00\x43\x00\x61\x62\x63\x64" | ||
21024 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
21025 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
21026 | "\x75\x76\x77\x61\x62\x63\x64\x65" | ||
21027 | "\x66\x67\x68\x69\x01\x02\x02\x01", | ||
21028 | .rlen = 64, | ||
21029 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
21030 | "\x10\x10\x10\x10", | ||
21031 | .alen = 12, | ||
21032 | .input = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" | ||
21033 | "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0" | ||
21034 | "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0" | ||
21035 | "\x0D\x11\x38\xEC\x9C\x35\x79\x17" | ||
21036 | "\x65\xAC\xBD\x87\x01\xAD\x79\x84" | ||
21037 | "\x5B\xF9\xFE\x3F\xBA\x48\x7B\xC9" | ||
21038 | "\x17\x55\xE6\x66\x2B\x4C\x8D\x0D" | ||
21039 | "\x1F\x5E\x22\x73\x95\x30\x32\x0A" | ||
21040 | "\xE0\xD7\x31\xCC\x97\x8E\xCA\xFA" | ||
21041 | "\xEA\xE8\x8F\x00\xE8\x0D\x6E\x48", | ||
21042 | .ilen = 80, | ||
21043 | }, { | ||
21044 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
21045 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
21046 | "\x57\x69\x0E\x43", | ||
21047 | .klen = 20, | ||
21048 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
21049 | .result = "\x45\x00\x00\x1C\x42\xA2\x00\x00" | ||
21050 | "\x80\x01\x44\x1F\x40\x67\x93\xB6" | ||
21051 | "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" | ||
21052 | "\x01\x02\x02\x01", | ||
21053 | .rlen = 28, | ||
21054 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
21055 | "\x10\x10\x10\x10", | ||
21056 | .alen = 12, | ||
21057 | .input = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" | ||
21058 | "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E" | ||
21059 | "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13" | ||
21060 | "\x0E\x13\x79\xED\x36\x9F\x07\x1F" | ||
21061 | "\x35\xE0\x34\xBE\x95\xF1\x12\xE4" | ||
21062 | "\xE7\xD0\x5D\x35", | ||
21063 | .ilen = 44, | ||
21064 | }, { | ||
21065 | .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
21066 | "\x6D\x6A\x8F\x94\x67\x30\x83\x08" | ||
21067 | "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
21068 | "\xCA\xFE\xBA\xBE", | ||
21069 | .klen = 28, | ||
21070 | .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
21071 | .result = "\x45\x00\x00\x28\xA4\xAD\x40\x00" | ||
21072 | "\x40\x06\x78\x80\x0A\x01\x03\x8F" | ||
21073 | "\x0A\x01\x06\x12\x80\x23\x06\xB8" | ||
21074 | "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" | ||
21075 | "\x50\x10\x16\xD0\x75\x68\x00\x01", | ||
21076 | .rlen = 40, | ||
21077 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A", | ||
21078 | .alen = 8, | ||
21079 | .input = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" | ||
21080 | "\x0E\x59\x8B\x81\x22\xDE\x02\x42" | ||
21081 | "\x09\x38\xB3\xAB\x33\xF8\x28\xE6" | ||
21082 | "\x87\xB8\x85\x8B\x5B\xFB\xDB\xD0" | ||
21083 | "\x31\x5B\x27\x45\x21\x44\xCC\x77" | ||
21084 | "\x95\x45\x7B\x96\x52\x03\x7F\x53" | ||
21085 | "\x18\x02\x7B\x5B\x4C\xD7\xA6\x36", | ||
21086 | .ilen = 56, | ||
21087 | }, { | ||
21088 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
21089 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
21090 | "\xDE\xCA\xF8\x88", | ||
21091 | .klen = 20, | ||
21092 | .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", | ||
21093 | .result = "\x45\x00\x00\x49\x33\xBA\x00\x00" | ||
21094 | "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" | ||
21095 | "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" | ||
21096 | "\x00\x35\xDD\x7B\x80\x03\x02\xD5" | ||
21097 | "\x00\x00\x4E\x20\x00\x1E\x8C\x18" | ||
21098 | "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47" | ||
21099 | "\x6B\x91\xB9\x24\xB2\x80\x38\x9D" | ||
21100 | "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" | ||
21101 | "\x9B\x62\x66\xC0\x47\x22\xB1\x49" | ||
21102 | "\x23\x01\x01\x01", | ||
21103 | .rlen = 76, | ||
21104 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | ||
21105 | "\x00\x00\x00\x01", | ||
21106 | .alen = 12, | ||
21107 | .input = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" | ||
21108 | "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14" | ||
21109 | "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1" | ||
21110 | "\xC3\x49\xC1\xD2\xFB\xEC\x16\x8F" | ||
21111 | "\x91\x90\xFE\xEB\xAF\x2C\xB0\x19" | ||
21112 | "\x84\xE6\x58\x63\x96\x5D\x74\x72" | ||
21113 | "\xB7\x9D\xA3\x45\xE0\xE7\x80\x19" | ||
21114 | "\x1F\x0D\x2F\x0E\x0F\x49\x6C\x22" | ||
21115 | "\x6F\x21\x27\xB2\x7D\xB3\x57\x24" | ||
21116 | "\xE7\x84\x5D\x68\x65\x1F\x57\xE6" | ||
21117 | "\x5F\x35\x4F\x75\xFF\x17\x01\x57" | ||
21118 | "\x69\x62\x34\x36", | ||
21119 | .ilen = 92, | ||
21120 | }, { | ||
21121 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
21122 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
21123 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
21124 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
21125 | "\x73\x61\x6C\x74", | ||
21126 | .klen = 36, | ||
21127 | .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", | ||
21128 | .result = "\x45\x08\x00\x28\x73\x2C\x00\x00" | ||
21129 | "\x40\x06\xE9\xF9\x0A\x01\x06\x12" | ||
21130 | "\x0A\x01\x03\x8F\x06\xB8\x80\x23" | ||
21131 | "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" | ||
21132 | "\x50\x10\x1F\x64\x6D\x54\x00\x01", | ||
21133 | .rlen = 40, | ||
21134 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | ||
21135 | "\xDD\x0D\xB9\x9B", | ||
21136 | .alen = 12, | ||
21137 | .input = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" | ||
21138 | "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D" | ||
21139 | "\x1F\x27\x8F\xDE\x98\xEF\x67\x54" | ||
21140 | "\x9D\x52\x4A\x30\x18\xD9\xA5\x7F" | ||
21141 | "\xF4\xD3\xA3\x1C\xE6\x73\x11\x9E" | ||
21142 | "\x45\x16\x26\xC2\x41\x57\x71\xE3" | ||
21143 | "\xB7\xEE\xBC\xA6\x14\xC8\x9B\x35", | ||
21144 | .ilen = 56, | ||
21145 | }, { | ||
21146 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
21147 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
21148 | "\x57\x69\x0E\x43", | ||
21149 | .klen = 20, | ||
21150 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
21151 | .result = "\x45\x00\x00\x49\x33\x3E\x00\x00" | ||
21152 | "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" | ||
21153 | "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" | ||
21154 | "\x00\x35\xCB\x45\x80\x03\x02\x5B" | ||
21155 | "\x00\x00\x01\xE0\x00\x1E\x8C\x18" | ||
21156 | "\xD6\x57\x59\xD5\x22\x84\xA0\x35" | ||
21157 | "\x2C\x71\x47\x5C\x88\x80\x39\x1C" | ||
21158 | "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" | ||
21159 | "\x5A\xE2\x70\xC0\x38\x99\x49\x39" | ||
21160 | "\x15\x01\x01\x01", | ||
21161 | .rlen = 76, | ||
21162 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
21163 | "\x10\x10\x10\x10", | ||
21164 | .alen = 12, | ||
21165 | .input = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" | ||
21166 | "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8" | ||
21167 | "\x3D\x77\x84\xB6\x07\x32\x3D\x22" | ||
21168 | "\x0F\x24\xB0\xA9\x7D\x54\x18\x28" | ||
21169 | "\x00\xCA\xDB\x0F\x68\xD9\x9E\xF0" | ||
21170 | "\xE0\xC0\xC8\x9A\xE9\xBE\xA8\x88" | ||
21171 | "\x4E\x52\xD6\x5B\xC1\xAF\xD0\x74" | ||
21172 | "\x0F\x74\x24\x44\x74\x7B\x5B\x39" | ||
21173 | "\xAB\x53\x31\x63\xAA\xD4\x55\x0E" | ||
21174 | "\xE5\x16\x09\x75\xCD\xB6\x08\xC5" | ||
21175 | "\x76\x91\x89\x60\x97\x63\xB8\xE1" | ||
21176 | "\x8C\xAA\x81\xE2", | ||
21177 | .ilen = 92, | ||
21178 | }, { | ||
21179 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
21180 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
21181 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
21182 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
21183 | "\x73\x61\x6C\x74", | ||
21184 | .klen = 36, | ||
21185 | .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", | ||
21186 | .result = "\x63\x69\x73\x63\x6F\x01\x72\x75" | ||
21187 | "\x6C\x65\x73\x01\x74\x68\x65\x01" | ||
21188 | "\x6E\x65\x74\x77\x65\x01\x64\x65" | ||
21189 | "\x66\x69\x6E\x65\x01\x74\x68\x65" | ||
21190 | "\x74\x65\x63\x68\x6E\x6F\x6C\x6F" | ||
21191 | "\x67\x69\x65\x73\x01\x74\x68\x61" | ||
21192 | "\x74\x77\x69\x6C\x6C\x01\x64\x65" | ||
21193 | "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" | ||
21194 | "\x72\x72\x6F\x77\x01\x02\x02\x01", | ||
21195 | .rlen = 72, | ||
21196 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | ||
21197 | "\xDD\x0D\xB9\x9B", | ||
21198 | .alen = 12, | ||
21199 | .input = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" | ||
21200 | "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E" | ||
21201 | "\x7B\x43\xF8\x26\xFB\x56\x83\x12" | ||
21202 | "\x26\x50\x8B\xEB\xD2\xDC\xEB\x18" | ||
21203 | "\xD0\xA6\xDF\x10\xE5\x48\x7D\xF0" | ||
21204 | "\x74\x11\x3E\x14\xC6\x41\x02\x4E" | ||
21205 | "\x3E\x67\x73\xD9\x1A\x62\xEE\x42" | ||
21206 | "\x9B\x04\x3A\x10\xE3\xEF\xE6\xB0" | ||
21207 | "\x12\xA4\x93\x63\x41\x23\x64\xF8" | ||
21208 | "\xC0\xCA\xC5\x87\xF2\x49\xE5\x6B" | ||
21209 | "\x11\xE2\x4F\x30\xE4\x4C\xCC\x76", | ||
21210 | .ilen = 88, | ||
21211 | }, { | ||
21212 | .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" | ||
21213 | "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" | ||
21214 | "\xD9\x66\x42\x67", | ||
21215 | .klen = 20, | ||
21216 | .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", | ||
21217 | .result = "\x01\x02\x02\x01", | ||
21218 | .rlen = 4, | ||
21219 | .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF", | ||
21220 | .alen = 8, | ||
21221 | .input = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" | ||
21222 | "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45" | ||
21223 | "\x04\xBE\xF2\x70", | ||
21224 | .ilen = 20, | ||
21225 | }, { | ||
21226 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
21227 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
21228 | "\xDE\xCA\xF8\x88", | ||
21229 | .klen = 20, | ||
21230 | .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", | ||
21231 | .result = "\x74\x6F\x01\x62\x65\x01\x6F\x72" | ||
21232 | "\x01\x6E\x6F\x74\x01\x74\x6F\x01" | ||
21233 | "\x62\x65\x00\x01", | ||
21234 | .rlen = 20, | ||
21235 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | ||
21236 | "\x00\x00\x00\x01", | ||
21237 | .alen = 12, | ||
21238 | .input = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" | ||
21239 | "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05" | ||
21240 | "\x43\x33\x21\x64\x41\x25\x03\x52" | ||
21241 | "\x43\x03\xED\x3C\x6C\x5F\x28\x38" | ||
21242 | "\x43\xAF\x8C\x3E", | ||
21243 | .ilen = 36, | ||
21244 | }, { | ||
21245 | .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" | ||
21246 | "\x6D\x61\x72\x69\x6A\x75\x61\x6E" | ||
21247 | "\x61\x61\x6E\x64\x64\x6F\x69\x74" | ||
21248 | "\x62\x65\x66\x6F\x72\x65\x69\x61" | ||
21249 | "\x74\x75\x72\x6E", | ||
21250 | .klen = 36, | ||
21251 | .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", | ||
21252 | .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00" | ||
21253 | "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" | ||
21254 | "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" | ||
21255 | "\x02\x00\x07\x00\x61\x62\x63\x64" | ||
21256 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
21257 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
21258 | "\x01\x02\x02\x01", | ||
21259 | .rlen = 52, | ||
21260 | .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" | ||
21261 | "\xFF\xFF\xFF\xFF", | ||
21262 | .alen = 12, | ||
21263 | .input = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" | ||
21264 | "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D" | ||
21265 | "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6" | ||
21266 | "\x4A\x27\x4B\x39\xB4\x9C\x3A\x86" | ||
21267 | "\x4C\xD3\xD7\x8C\xA4\xAE\x68\xA3" | ||
21268 | "\x2B\x42\x45\x8F\xB5\x7D\xBE\x82" | ||
21269 | "\x1D\xCC\x63\xB9\xD0\x93\x7B\xA2" | ||
21270 | "\x94\x5F\x66\x93\x68\x66\x1A\x32" | ||
21271 | "\x9F\xB4\xC0\x53", | ||
21272 | .ilen = 68, | ||
21273 | }, { | ||
21274 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
21275 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
21276 | "\x57\x69\x0E\x43", | ||
21277 | .klen = 20, | ||
21278 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
21279 | .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00" | ||
21280 | "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" | ||
21281 | "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" | ||
21282 | "\x02\x00\x07\x00\x61\x62\x63\x64" | ||
21283 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
21284 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
21285 | "\x01\x02\x02\x01", | ||
21286 | .rlen = 52, | ||
21287 | .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" | ||
21288 | "\x10\x10\x10\x10", | ||
21289 | .alen = 12, | ||
21290 | .input = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" | ||
21291 | "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD" | ||
21292 | "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21" | ||
21293 | "\x0D\x11\x7C\xEC\x9C\x35\x79\x17" | ||
21294 | "\x65\xAC\xBD\x87\x01\xAD\x79\x84" | ||
21295 | "\x5B\xF9\xFE\x3F\xBA\x48\x7B\xC9" | ||
21296 | "\x63\x21\x93\x06\x84\xEE\xCA\xDB" | ||
21297 | "\x56\x91\x25\x46\xE7\xA9\x5C\x97" | ||
21298 | "\x40\xD7\xCB\x05", | ||
21299 | .ilen = 68, | ||
21300 | }, { | ||
21301 | .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" | ||
21302 | "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" | ||
21303 | "\x22\x43\x3C\x64", | ||
21304 | .klen = 20, | ||
21305 | .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", | ||
21306 | .result = "\x08\x00\xC6\xCD\x02\x00\x07\x00" | ||
21307 | "\x61\x62\x63\x64\x65\x66\x67\x68" | ||
21308 | "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" | ||
21309 | "\x71\x72\x73\x74\x01\x02\x02\x01", | ||
21310 | .rlen = 32, | ||
21311 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | ||
21312 | "\x00\x00\x00\x07", | ||
21313 | .alen = 12, | ||
21314 | .input = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" | ||
21315 | "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF" | ||
21316 | "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6" | ||
21317 | "\xAC\xDA\x68\x94\xBC\x61\x90\x69" | ||
21318 | "\xEF\x9C\xBC\x28\xFE\x1B\x56\xA7" | ||
21319 | "\xC4\xE0\xD5\x8C\x86\xCD\x2B\xC0", | ||
21320 | .ilen = 48, | ||
19968 | } | 21321 | } |
19969 | }; | 21322 | }; |
19970 | 21323 | ||
@@ -19975,8 +21328,9 @@ static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = { | |||
19975 | "\x22\x43\x3c\x64", | 21328 | "\x22\x43\x3c\x64", |
19976 | .klen = 20, | 21329 | .klen = 20, |
19977 | .iv = zeroed_string, | 21330 | .iv = zeroed_string, |
19978 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07", | 21331 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" |
19979 | .alen = 8, | 21332 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
21333 | .alen = 16, | ||
19980 | .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" | 21334 | .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" |
19981 | "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" | 21335 | "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" |
19982 | "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" | 21336 | "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" |
@@ -20005,8 +21359,9 @@ static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = { | |||
20005 | "\x22\x43\x3c\x64", | 21359 | "\x22\x43\x3c\x64", |
20006 | .klen = 20, | 21360 | .klen = 20, |
20007 | .iv = zeroed_string, | 21361 | .iv = zeroed_string, |
20008 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07", | 21362 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" |
20009 | .alen = 8, | 21363 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
21364 | .alen = 16, | ||
20010 | .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" | 21365 | .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" |
20011 | "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" | 21366 | "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" |
20012 | "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" | 21367 | "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" |
@@ -20031,8 +21386,9 @@ static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = { | |||
20031 | "\x22\x43\x3c\x64", | 21386 | "\x22\x43\x3c\x64", |
20032 | .klen = 20, | 21387 | .klen = 20, |
20033 | .iv = zeroed_string, | 21388 | .iv = zeroed_string, |
20034 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07", | 21389 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" |
20035 | .alen = 8, | 21390 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
21391 | .alen = 16, | ||
20036 | .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" | 21392 | .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" |
20037 | "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" | 21393 | "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" |
20038 | "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" | 21394 | "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" |
@@ -20704,6 +22060,454 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
20704 | }; | 22060 | }; |
20705 | 22061 | ||
20706 | /* | 22062 | /* |
22063 | * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5. | ||
22064 | */ | ||
22065 | #define RFC7539_ENC_TEST_VECTORS 2 | ||
22066 | #define RFC7539_DEC_TEST_VECTORS 2 | ||
22067 | static struct aead_testvec rfc7539_enc_tv_template[] = { | ||
22068 | { | ||
22069 | .key = "\x80\x81\x82\x83\x84\x85\x86\x87" | ||
22070 | "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" | ||
22071 | "\x90\x91\x92\x93\x94\x95\x96\x97" | ||
22072 | "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f", | ||
22073 | .klen = 32, | ||
22074 | .iv = "\x07\x00\x00\x00\x40\x41\x42\x43" | ||
22075 | "\x44\x45\x46\x47", | ||
22076 | .assoc = "\x50\x51\x52\x53\xc0\xc1\xc2\xc3" | ||
22077 | "\xc4\xc5\xc6\xc7", | ||
22078 | .alen = 12, | ||
22079 | .input = "\x4c\x61\x64\x69\x65\x73\x20\x61" | ||
22080 | "\x6e\x64\x20\x47\x65\x6e\x74\x6c" | ||
22081 | "\x65\x6d\x65\x6e\x20\x6f\x66\x20" | ||
22082 | "\x74\x68\x65\x20\x63\x6c\x61\x73" | ||
22083 | "\x73\x20\x6f\x66\x20\x27\x39\x39" | ||
22084 | "\x3a\x20\x49\x66\x20\x49\x20\x63" | ||
22085 | "\x6f\x75\x6c\x64\x20\x6f\x66\x66" | ||
22086 | "\x65\x72\x20\x79\x6f\x75\x20\x6f" | ||
22087 | "\x6e\x6c\x79\x20\x6f\x6e\x65\x20" | ||
22088 | "\x74\x69\x70\x20\x66\x6f\x72\x20" | ||
22089 | "\x74\x68\x65\x20\x66\x75\x74\x75" | ||
22090 | "\x72\x65\x2c\x20\x73\x75\x6e\x73" | ||
22091 | "\x63\x72\x65\x65\x6e\x20\x77\x6f" | ||
22092 | "\x75\x6c\x64\x20\x62\x65\x20\x69" | ||
22093 | "\x74\x2e", | ||
22094 | .ilen = 114, | ||
22095 | .result = "\xd3\x1a\x8d\x34\x64\x8e\x60\xdb" | ||
22096 | "\x7b\x86\xaf\xbc\x53\xef\x7e\xc2" | ||
22097 | "\xa4\xad\xed\x51\x29\x6e\x08\xfe" | ||
22098 | "\xa9\xe2\xb5\xa7\x36\xee\x62\xd6" | ||
22099 | "\x3d\xbe\xa4\x5e\x8c\xa9\x67\x12" | ||
22100 | "\x82\xfa\xfb\x69\xda\x92\x72\x8b" | ||
22101 | "\x1a\x71\xde\x0a\x9e\x06\x0b\x29" | ||
22102 | "\x05\xd6\xa5\xb6\x7e\xcd\x3b\x36" | ||
22103 | "\x92\xdd\xbd\x7f\x2d\x77\x8b\x8c" | ||
22104 | "\x98\x03\xae\xe3\x28\x09\x1b\x58" | ||
22105 | "\xfa\xb3\x24\xe4\xfa\xd6\x75\x94" | ||
22106 | "\x55\x85\x80\x8b\x48\x31\xd7\xbc" | ||
22107 | "\x3f\xf4\xde\xf0\x8e\x4b\x7a\x9d" | ||
22108 | "\xe5\x76\xd2\x65\x86\xce\xc6\x4b" | ||
22109 | "\x61\x16\x1a\xe1\x0b\x59\x4f\x09" | ||
22110 | "\xe2\x6a\x7e\x90\x2e\xcb\xd0\x60" | ||
22111 | "\x06\x91", | ||
22112 | .rlen = 130, | ||
22113 | }, { | ||
22114 | .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" | ||
22115 | "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" | ||
22116 | "\x47\x39\x17\xc1\x40\x2b\x80\x09" | ||
22117 | "\x9d\xca\x5c\xbc\x20\x70\x75\xc0", | ||
22118 | .klen = 32, | ||
22119 | .iv = "\x00\x00\x00\x00\x01\x02\x03\x04" | ||
22120 | "\x05\x06\x07\x08", | ||
22121 | .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" | ||
22122 | "\x00\x00\x4e\x91", | ||
22123 | .alen = 12, | ||
22124 | .input = "\x49\x6e\x74\x65\x72\x6e\x65\x74" | ||
22125 | "\x2d\x44\x72\x61\x66\x74\x73\x20" | ||
22126 | "\x61\x72\x65\x20\x64\x72\x61\x66" | ||
22127 | "\x74\x20\x64\x6f\x63\x75\x6d\x65" | ||
22128 | "\x6e\x74\x73\x20\x76\x61\x6c\x69" | ||
22129 | "\x64\x20\x66\x6f\x72\x20\x61\x20" | ||
22130 | "\x6d\x61\x78\x69\x6d\x75\x6d\x20" | ||
22131 | "\x6f\x66\x20\x73\x69\x78\x20\x6d" | ||
22132 | "\x6f\x6e\x74\x68\x73\x20\x61\x6e" | ||
22133 | "\x64\x20\x6d\x61\x79\x20\x62\x65" | ||
22134 | "\x20\x75\x70\x64\x61\x74\x65\x64" | ||
22135 | "\x2c\x20\x72\x65\x70\x6c\x61\x63" | ||
22136 | "\x65\x64\x2c\x20\x6f\x72\x20\x6f" | ||
22137 | "\x62\x73\x6f\x6c\x65\x74\x65\x64" | ||
22138 | "\x20\x62\x79\x20\x6f\x74\x68\x65" | ||
22139 | "\x72\x20\x64\x6f\x63\x75\x6d\x65" | ||
22140 | "\x6e\x74\x73\x20\x61\x74\x20\x61" | ||
22141 | "\x6e\x79\x20\x74\x69\x6d\x65\x2e" | ||
22142 | "\x20\x49\x74\x20\x69\x73\x20\x69" | ||
22143 | "\x6e\x61\x70\x70\x72\x6f\x70\x72" | ||
22144 | "\x69\x61\x74\x65\x20\x74\x6f\x20" | ||
22145 | "\x75\x73\x65\x20\x49\x6e\x74\x65" | ||
22146 | "\x72\x6e\x65\x74\x2d\x44\x72\x61" | ||
22147 | "\x66\x74\x73\x20\x61\x73\x20\x72" | ||
22148 | "\x65\x66\x65\x72\x65\x6e\x63\x65" | ||
22149 | "\x20\x6d\x61\x74\x65\x72\x69\x61" | ||
22150 | "\x6c\x20\x6f\x72\x20\x74\x6f\x20" | ||
22151 | "\x63\x69\x74\x65\x20\x74\x68\x65" | ||
22152 | "\x6d\x20\x6f\x74\x68\x65\x72\x20" | ||
22153 | "\x74\x68\x61\x6e\x20\x61\x73\x20" | ||
22154 | "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b" | ||
22155 | "\x20\x69\x6e\x20\x70\x72\x6f\x67" | ||
22156 | "\x72\x65\x73\x73\x2e\x2f\xe2\x80" | ||
22157 | "\x9d", | ||
22158 | .ilen = 265, | ||
22159 | .result = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" | ||
22160 | "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" | ||
22161 | "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" | ||
22162 | "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2" | ||
22163 | "\x4c\x6c\xfc\x18\x75\x5d\x43\xee" | ||
22164 | "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0" | ||
22165 | "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00" | ||
22166 | "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf" | ||
22167 | "\x33\x2f\x83\x0e\x71\x0b\x97\xce" | ||
22168 | "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81" | ||
22169 | "\x14\xad\x17\x6e\x00\x8d\x33\xbd" | ||
22170 | "\x60\xf9\x82\xb1\xff\x37\xc8\x55" | ||
22171 | "\x97\x97\xa0\x6e\xf4\xf0\xef\x61" | ||
22172 | "\xc1\x86\x32\x4e\x2b\x35\x06\x38" | ||
22173 | "\x36\x06\x90\x7b\x6a\x7c\x02\xb0" | ||
22174 | "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4" | ||
22175 | "\xb9\x16\x6c\x76\x7b\x80\x4d\x46" | ||
22176 | "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9" | ||
22177 | "\x90\x40\xc5\xa4\x04\x33\x22\x5e" | ||
22178 | "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e" | ||
22179 | "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15" | ||
22180 | "\x5b\x00\x47\x71\x8c\xbc\x54\x6a" | ||
22181 | "\x0d\x07\x2b\x04\xb3\x56\x4e\xea" | ||
22182 | "\x1b\x42\x22\x73\xf5\x48\x27\x1a" | ||
22183 | "\x0b\xb2\x31\x60\x53\xfa\x76\x99" | ||
22184 | "\x19\x55\xeb\xd6\x31\x59\x43\x4e" | ||
22185 | "\xce\xbb\x4e\x46\x6d\xae\x5a\x10" | ||
22186 | "\x73\xa6\x72\x76\x27\x09\x7a\x10" | ||
22187 | "\x49\xe6\x17\xd9\x1d\x36\x10\x94" | ||
22188 | "\xfa\x68\xf0\xff\x77\x98\x71\x30" | ||
22189 | "\x30\x5b\xea\xba\x2e\xda\x04\xdf" | ||
22190 | "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29" | ||
22191 | "\xa6\xad\x5c\xb4\x02\x2b\x02\x70" | ||
22192 | "\x9b\xee\xad\x9d\x67\x89\x0c\xbb" | ||
22193 | "\x22\x39\x23\x36\xfe\xa1\x85\x1f" | ||
22194 | "\x38", | ||
22195 | .rlen = 281, | ||
22196 | }, | ||
22197 | }; | ||
22198 | |||
22199 | static struct aead_testvec rfc7539_dec_tv_template[] = { | ||
22200 | { | ||
22201 | .key = "\x80\x81\x82\x83\x84\x85\x86\x87" | ||
22202 | "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" | ||
22203 | "\x90\x91\x92\x93\x94\x95\x96\x97" | ||
22204 | "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f", | ||
22205 | .klen = 32, | ||
22206 | .iv = "\x07\x00\x00\x00\x40\x41\x42\x43" | ||
22207 | "\x44\x45\x46\x47", | ||
22208 | .assoc = "\x50\x51\x52\x53\xc0\xc1\xc2\xc3" | ||
22209 | "\xc4\xc5\xc6\xc7", | ||
22210 | .alen = 12, | ||
22211 | .input = "\xd3\x1a\x8d\x34\x64\x8e\x60\xdb" | ||
22212 | "\x7b\x86\xaf\xbc\x53\xef\x7e\xc2" | ||
22213 | "\xa4\xad\xed\x51\x29\x6e\x08\xfe" | ||
22214 | "\xa9\xe2\xb5\xa7\x36\xee\x62\xd6" | ||
22215 | "\x3d\xbe\xa4\x5e\x8c\xa9\x67\x12" | ||
22216 | "\x82\xfa\xfb\x69\xda\x92\x72\x8b" | ||
22217 | "\x1a\x71\xde\x0a\x9e\x06\x0b\x29" | ||
22218 | "\x05\xd6\xa5\xb6\x7e\xcd\x3b\x36" | ||
22219 | "\x92\xdd\xbd\x7f\x2d\x77\x8b\x8c" | ||
22220 | "\x98\x03\xae\xe3\x28\x09\x1b\x58" | ||
22221 | "\xfa\xb3\x24\xe4\xfa\xd6\x75\x94" | ||
22222 | "\x55\x85\x80\x8b\x48\x31\xd7\xbc" | ||
22223 | "\x3f\xf4\xde\xf0\x8e\x4b\x7a\x9d" | ||
22224 | "\xe5\x76\xd2\x65\x86\xce\xc6\x4b" | ||
22225 | "\x61\x16\x1a\xe1\x0b\x59\x4f\x09" | ||
22226 | "\xe2\x6a\x7e\x90\x2e\xcb\xd0\x60" | ||
22227 | "\x06\x91", | ||
22228 | .ilen = 130, | ||
22229 | .result = "\x4c\x61\x64\x69\x65\x73\x20\x61" | ||
22230 | "\x6e\x64\x20\x47\x65\x6e\x74\x6c" | ||
22231 | "\x65\x6d\x65\x6e\x20\x6f\x66\x20" | ||
22232 | "\x74\x68\x65\x20\x63\x6c\x61\x73" | ||
22233 | "\x73\x20\x6f\x66\x20\x27\x39\x39" | ||
22234 | "\x3a\x20\x49\x66\x20\x49\x20\x63" | ||
22235 | "\x6f\x75\x6c\x64\x20\x6f\x66\x66" | ||
22236 | "\x65\x72\x20\x79\x6f\x75\x20\x6f" | ||
22237 | "\x6e\x6c\x79\x20\x6f\x6e\x65\x20" | ||
22238 | "\x74\x69\x70\x20\x66\x6f\x72\x20" | ||
22239 | "\x74\x68\x65\x20\x66\x75\x74\x75" | ||
22240 | "\x72\x65\x2c\x20\x73\x75\x6e\x73" | ||
22241 | "\x63\x72\x65\x65\x6e\x20\x77\x6f" | ||
22242 | "\x75\x6c\x64\x20\x62\x65\x20\x69" | ||
22243 | "\x74\x2e", | ||
22244 | .rlen = 114, | ||
22245 | }, { | ||
22246 | .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" | ||
22247 | "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" | ||
22248 | "\x47\x39\x17\xc1\x40\x2b\x80\x09" | ||
22249 | "\x9d\xca\x5c\xbc\x20\x70\x75\xc0", | ||
22250 | .klen = 32, | ||
22251 | .iv = "\x00\x00\x00\x00\x01\x02\x03\x04" | ||
22252 | "\x05\x06\x07\x08", | ||
22253 | .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" | ||
22254 | "\x00\x00\x4e\x91", | ||
22255 | .alen = 12, | ||
22256 | .input = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" | ||
22257 | "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" | ||
22258 | "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" | ||
22259 | "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2" | ||
22260 | "\x4c\x6c\xfc\x18\x75\x5d\x43\xee" | ||
22261 | "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0" | ||
22262 | "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00" | ||
22263 | "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf" | ||
22264 | "\x33\x2f\x83\x0e\x71\x0b\x97\xce" | ||
22265 | "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81" | ||
22266 | "\x14\xad\x17\x6e\x00\x8d\x33\xbd" | ||
22267 | "\x60\xf9\x82\xb1\xff\x37\xc8\x55" | ||
22268 | "\x97\x97\xa0\x6e\xf4\xf0\xef\x61" | ||
22269 | "\xc1\x86\x32\x4e\x2b\x35\x06\x38" | ||
22270 | "\x36\x06\x90\x7b\x6a\x7c\x02\xb0" | ||
22271 | "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4" | ||
22272 | "\xb9\x16\x6c\x76\x7b\x80\x4d\x46" | ||
22273 | "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9" | ||
22274 | "\x90\x40\xc5\xa4\x04\x33\x22\x5e" | ||
22275 | "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e" | ||
22276 | "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15" | ||
22277 | "\x5b\x00\x47\x71\x8c\xbc\x54\x6a" | ||
22278 | "\x0d\x07\x2b\x04\xb3\x56\x4e\xea" | ||
22279 | "\x1b\x42\x22\x73\xf5\x48\x27\x1a" | ||
22280 | "\x0b\xb2\x31\x60\x53\xfa\x76\x99" | ||
22281 | "\x19\x55\xeb\xd6\x31\x59\x43\x4e" | ||
22282 | "\xce\xbb\x4e\x46\x6d\xae\x5a\x10" | ||
22283 | "\x73\xa6\x72\x76\x27\x09\x7a\x10" | ||
22284 | "\x49\xe6\x17\xd9\x1d\x36\x10\x94" | ||
22285 | "\xfa\x68\xf0\xff\x77\x98\x71\x30" | ||
22286 | "\x30\x5b\xea\xba\x2e\xda\x04\xdf" | ||
22287 | "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29" | ||
22288 | "\xa6\xad\x5c\xb4\x02\x2b\x02\x70" | ||
22289 | "\x9b\xee\xad\x9d\x67\x89\x0c\xbb" | ||
22290 | "\x22\x39\x23\x36\xfe\xa1\x85\x1f" | ||
22291 | "\x38", | ||
22292 | .ilen = 281, | ||
22293 | .result = "\x49\x6e\x74\x65\x72\x6e\x65\x74" | ||
22294 | "\x2d\x44\x72\x61\x66\x74\x73\x20" | ||
22295 | "\x61\x72\x65\x20\x64\x72\x61\x66" | ||
22296 | "\x74\x20\x64\x6f\x63\x75\x6d\x65" | ||
22297 | "\x6e\x74\x73\x20\x76\x61\x6c\x69" | ||
22298 | "\x64\x20\x66\x6f\x72\x20\x61\x20" | ||
22299 | "\x6d\x61\x78\x69\x6d\x75\x6d\x20" | ||
22300 | "\x6f\x66\x20\x73\x69\x78\x20\x6d" | ||
22301 | "\x6f\x6e\x74\x68\x73\x20\x61\x6e" | ||
22302 | "\x64\x20\x6d\x61\x79\x20\x62\x65" | ||
22303 | "\x20\x75\x70\x64\x61\x74\x65\x64" | ||
22304 | "\x2c\x20\x72\x65\x70\x6c\x61\x63" | ||
22305 | "\x65\x64\x2c\x20\x6f\x72\x20\x6f" | ||
22306 | "\x62\x73\x6f\x6c\x65\x74\x65\x64" | ||
22307 | "\x20\x62\x79\x20\x6f\x74\x68\x65" | ||
22308 | "\x72\x20\x64\x6f\x63\x75\x6d\x65" | ||
22309 | "\x6e\x74\x73\x20\x61\x74\x20\x61" | ||
22310 | "\x6e\x79\x20\x74\x69\x6d\x65\x2e" | ||
22311 | "\x20\x49\x74\x20\x69\x73\x20\x69" | ||
22312 | "\x6e\x61\x70\x70\x72\x6f\x70\x72" | ||
22313 | "\x69\x61\x74\x65\x20\x74\x6f\x20" | ||
22314 | "\x75\x73\x65\x20\x49\x6e\x74\x65" | ||
22315 | "\x72\x6e\x65\x74\x2d\x44\x72\x61" | ||
22316 | "\x66\x74\x73\x20\x61\x73\x20\x72" | ||
22317 | "\x65\x66\x65\x72\x65\x6e\x63\x65" | ||
22318 | "\x20\x6d\x61\x74\x65\x72\x69\x61" | ||
22319 | "\x6c\x20\x6f\x72\x20\x74\x6f\x20" | ||
22320 | "\x63\x69\x74\x65\x20\x74\x68\x65" | ||
22321 | "\x6d\x20\x6f\x74\x68\x65\x72\x20" | ||
22322 | "\x74\x68\x61\x6e\x20\x61\x73\x20" | ||
22323 | "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b" | ||
22324 | "\x20\x69\x6e\x20\x70\x72\x6f\x67" | ||
22325 | "\x72\x65\x73\x73\x2e\x2f\xe2\x80" | ||
22326 | "\x9d", | ||
22327 | .rlen = 265, | ||
22328 | }, | ||
22329 | }; | ||
22330 | |||
22331 | /* | ||
22332 | * draft-irtf-cfrg-chacha20-poly1305 | ||
22333 | */ | ||
22334 | #define RFC7539ESP_DEC_TEST_VECTORS 1 | ||
22335 | #define RFC7539ESP_ENC_TEST_VECTORS 1 | ||
22336 | static struct aead_testvec rfc7539esp_enc_tv_template[] = { | ||
22337 | { | ||
22338 | .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" | ||
22339 | "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" | ||
22340 | "\x47\x39\x17\xc1\x40\x2b\x80\x09" | ||
22341 | "\x9d\xca\x5c\xbc\x20\x70\x75\xc0" | ||
22342 | "\x00\x00\x00\x00", | ||
22343 | .klen = 36, | ||
22344 | .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", | ||
22345 | .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" | ||
22346 | "\x00\x00\x4e\x91", | ||
22347 | .alen = 12, | ||
22348 | .input = "\x49\x6e\x74\x65\x72\x6e\x65\x74" | ||
22349 | "\x2d\x44\x72\x61\x66\x74\x73\x20" | ||
22350 | "\x61\x72\x65\x20\x64\x72\x61\x66" | ||
22351 | "\x74\x20\x64\x6f\x63\x75\x6d\x65" | ||
22352 | "\x6e\x74\x73\x20\x76\x61\x6c\x69" | ||
22353 | "\x64\x20\x66\x6f\x72\x20\x61\x20" | ||
22354 | "\x6d\x61\x78\x69\x6d\x75\x6d\x20" | ||
22355 | "\x6f\x66\x20\x73\x69\x78\x20\x6d" | ||
22356 | "\x6f\x6e\x74\x68\x73\x20\x61\x6e" | ||
22357 | "\x64\x20\x6d\x61\x79\x20\x62\x65" | ||
22358 | "\x20\x75\x70\x64\x61\x74\x65\x64" | ||
22359 | "\x2c\x20\x72\x65\x70\x6c\x61\x63" | ||
22360 | "\x65\x64\x2c\x20\x6f\x72\x20\x6f" | ||
22361 | "\x62\x73\x6f\x6c\x65\x74\x65\x64" | ||
22362 | "\x20\x62\x79\x20\x6f\x74\x68\x65" | ||
22363 | "\x72\x20\x64\x6f\x63\x75\x6d\x65" | ||
22364 | "\x6e\x74\x73\x20\x61\x74\x20\x61" | ||
22365 | "\x6e\x79\x20\x74\x69\x6d\x65\x2e" | ||
22366 | "\x20\x49\x74\x20\x69\x73\x20\x69" | ||
22367 | "\x6e\x61\x70\x70\x72\x6f\x70\x72" | ||
22368 | "\x69\x61\x74\x65\x20\x74\x6f\x20" | ||
22369 | "\x75\x73\x65\x20\x49\x6e\x74\x65" | ||
22370 | "\x72\x6e\x65\x74\x2d\x44\x72\x61" | ||
22371 | "\x66\x74\x73\x20\x61\x73\x20\x72" | ||
22372 | "\x65\x66\x65\x72\x65\x6e\x63\x65" | ||
22373 | "\x20\x6d\x61\x74\x65\x72\x69\x61" | ||
22374 | "\x6c\x20\x6f\x72\x20\x74\x6f\x20" | ||
22375 | "\x63\x69\x74\x65\x20\x74\x68\x65" | ||
22376 | "\x6d\x20\x6f\x74\x68\x65\x72\x20" | ||
22377 | "\x74\x68\x61\x6e\x20\x61\x73\x20" | ||
22378 | "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b" | ||
22379 | "\x20\x69\x6e\x20\x70\x72\x6f\x67" | ||
22380 | "\x72\x65\x73\x73\x2e\x2f\xe2\x80" | ||
22381 | "\x9d", | ||
22382 | .ilen = 265, | ||
22383 | .result = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" | ||
22384 | "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" | ||
22385 | "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" | ||
22386 | "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2" | ||
22387 | "\x4c\x6c\xfc\x18\x75\x5d\x43\xee" | ||
22388 | "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0" | ||
22389 | "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00" | ||
22390 | "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf" | ||
22391 | "\x33\x2f\x83\x0e\x71\x0b\x97\xce" | ||
22392 | "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81" | ||
22393 | "\x14\xad\x17\x6e\x00\x8d\x33\xbd" | ||
22394 | "\x60\xf9\x82\xb1\xff\x37\xc8\x55" | ||
22395 | "\x97\x97\xa0\x6e\xf4\xf0\xef\x61" | ||
22396 | "\xc1\x86\x32\x4e\x2b\x35\x06\x38" | ||
22397 | "\x36\x06\x90\x7b\x6a\x7c\x02\xb0" | ||
22398 | "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4" | ||
22399 | "\xb9\x16\x6c\x76\x7b\x80\x4d\x46" | ||
22400 | "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9" | ||
22401 | "\x90\x40\xc5\xa4\x04\x33\x22\x5e" | ||
22402 | "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e" | ||
22403 | "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15" | ||
22404 | "\x5b\x00\x47\x71\x8c\xbc\x54\x6a" | ||
22405 | "\x0d\x07\x2b\x04\xb3\x56\x4e\xea" | ||
22406 | "\x1b\x42\x22\x73\xf5\x48\x27\x1a" | ||
22407 | "\x0b\xb2\x31\x60\x53\xfa\x76\x99" | ||
22408 | "\x19\x55\xeb\xd6\x31\x59\x43\x4e" | ||
22409 | "\xce\xbb\x4e\x46\x6d\xae\x5a\x10" | ||
22410 | "\x73\xa6\x72\x76\x27\x09\x7a\x10" | ||
22411 | "\x49\xe6\x17\xd9\x1d\x36\x10\x94" | ||
22412 | "\xfa\x68\xf0\xff\x77\x98\x71\x30" | ||
22413 | "\x30\x5b\xea\xba\x2e\xda\x04\xdf" | ||
22414 | "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29" | ||
22415 | "\xa6\xad\x5c\xb4\x02\x2b\x02\x70" | ||
22416 | "\x9b\xee\xad\x9d\x67\x89\x0c\xbb" | ||
22417 | "\x22\x39\x23\x36\xfe\xa1\x85\x1f" | ||
22418 | "\x38", | ||
22419 | .rlen = 281, | ||
22420 | }, | ||
22421 | }; | ||
22422 | |||
22423 | static struct aead_testvec rfc7539esp_dec_tv_template[] = { | ||
22424 | { | ||
22425 | .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" | ||
22426 | "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" | ||
22427 | "\x47\x39\x17\xc1\x40\x2b\x80\x09" | ||
22428 | "\x9d\xca\x5c\xbc\x20\x70\x75\xc0" | ||
22429 | "\x00\x00\x00\x00", | ||
22430 | .klen = 36, | ||
22431 | .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", | ||
22432 | .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" | ||
22433 | "\x00\x00\x4e\x91", | ||
22434 | .alen = 12, | ||
22435 | .input = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" | ||
22436 | "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" | ||
22437 | "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" | ||
22438 | "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2" | ||
22439 | "\x4c\x6c\xfc\x18\x75\x5d\x43\xee" | ||
22440 | "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0" | ||
22441 | "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00" | ||
22442 | "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf" | ||
22443 | "\x33\x2f\x83\x0e\x71\x0b\x97\xce" | ||
22444 | "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81" | ||
22445 | "\x14\xad\x17\x6e\x00\x8d\x33\xbd" | ||
22446 | "\x60\xf9\x82\xb1\xff\x37\xc8\x55" | ||
22447 | "\x97\x97\xa0\x6e\xf4\xf0\xef\x61" | ||
22448 | "\xc1\x86\x32\x4e\x2b\x35\x06\x38" | ||
22449 | "\x36\x06\x90\x7b\x6a\x7c\x02\xb0" | ||
22450 | "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4" | ||
22451 | "\xb9\x16\x6c\x76\x7b\x80\x4d\x46" | ||
22452 | "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9" | ||
22453 | "\x90\x40\xc5\xa4\x04\x33\x22\x5e" | ||
22454 | "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e" | ||
22455 | "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15" | ||
22456 | "\x5b\x00\x47\x71\x8c\xbc\x54\x6a" | ||
22457 | "\x0d\x07\x2b\x04\xb3\x56\x4e\xea" | ||
22458 | "\x1b\x42\x22\x73\xf5\x48\x27\x1a" | ||
22459 | "\x0b\xb2\x31\x60\x53\xfa\x76\x99" | ||
22460 | "\x19\x55\xeb\xd6\x31\x59\x43\x4e" | ||
22461 | "\xce\xbb\x4e\x46\x6d\xae\x5a\x10" | ||
22462 | "\x73\xa6\x72\x76\x27\x09\x7a\x10" | ||
22463 | "\x49\xe6\x17\xd9\x1d\x36\x10\x94" | ||
22464 | "\xfa\x68\xf0\xff\x77\x98\x71\x30" | ||
22465 | "\x30\x5b\xea\xba\x2e\xda\x04\xdf" | ||
22466 | "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29" | ||
22467 | "\xa6\xad\x5c\xb4\x02\x2b\x02\x70" | ||
22468 | "\x9b\xee\xad\x9d\x67\x89\x0c\xbb" | ||
22469 | "\x22\x39\x23\x36\xfe\xa1\x85\x1f" | ||
22470 | "\x38", | ||
22471 | .ilen = 281, | ||
22472 | .result = "\x49\x6e\x74\x65\x72\x6e\x65\x74" | ||
22473 | "\x2d\x44\x72\x61\x66\x74\x73\x20" | ||
22474 | "\x61\x72\x65\x20\x64\x72\x61\x66" | ||
22475 | "\x74\x20\x64\x6f\x63\x75\x6d\x65" | ||
22476 | "\x6e\x74\x73\x20\x76\x61\x6c\x69" | ||
22477 | "\x64\x20\x66\x6f\x72\x20\x61\x20" | ||
22478 | "\x6d\x61\x78\x69\x6d\x75\x6d\x20" | ||
22479 | "\x6f\x66\x20\x73\x69\x78\x20\x6d" | ||
22480 | "\x6f\x6e\x74\x68\x73\x20\x61\x6e" | ||
22481 | "\x64\x20\x6d\x61\x79\x20\x62\x65" | ||
22482 | "\x20\x75\x70\x64\x61\x74\x65\x64" | ||
22483 | "\x2c\x20\x72\x65\x70\x6c\x61\x63" | ||
22484 | "\x65\x64\x2c\x20\x6f\x72\x20\x6f" | ||
22485 | "\x62\x73\x6f\x6c\x65\x74\x65\x64" | ||
22486 | "\x20\x62\x79\x20\x6f\x74\x68\x65" | ||
22487 | "\x72\x20\x64\x6f\x63\x75\x6d\x65" | ||
22488 | "\x6e\x74\x73\x20\x61\x74\x20\x61" | ||
22489 | "\x6e\x79\x20\x74\x69\x6d\x65\x2e" | ||
22490 | "\x20\x49\x74\x20\x69\x73\x20\x69" | ||
22491 | "\x6e\x61\x70\x70\x72\x6f\x70\x72" | ||
22492 | "\x69\x61\x74\x65\x20\x74\x6f\x20" | ||
22493 | "\x75\x73\x65\x20\x49\x6e\x74\x65" | ||
22494 | "\x72\x6e\x65\x74\x2d\x44\x72\x61" | ||
22495 | "\x66\x74\x73\x20\x61\x73\x20\x72" | ||
22496 | "\x65\x66\x65\x72\x65\x6e\x63\x65" | ||
22497 | "\x20\x6d\x61\x74\x65\x72\x69\x61" | ||
22498 | "\x6c\x20\x6f\x72\x20\x74\x6f\x20" | ||
22499 | "\x63\x69\x74\x65\x20\x74\x68\x65" | ||
22500 | "\x6d\x20\x6f\x74\x68\x65\x72\x20" | ||
22501 | "\x74\x68\x61\x6e\x20\x61\x73\x20" | ||
22502 | "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b" | ||
22503 | "\x20\x69\x6e\x20\x70\x72\x6f\x67" | ||
22504 | "\x72\x65\x73\x73\x2e\x2f\xe2\x80" | ||
22505 | "\x9d", | ||
22506 | .rlen = 265, | ||
22507 | }, | ||
22508 | }; | ||
22509 | |||
22510 | /* | ||
20707 | * ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode) | 22511 | * ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode) |
20708 | * test vectors, taken from Appendix B.2.9 and B.2.10: | 22512 | * test vectors, taken from Appendix B.2.9 and B.2.10: |
20709 | * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf | 22513 | * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf |
@@ -28370,6 +30174,183 @@ static struct cipher_testvec salsa20_stream_enc_tv_template[] = { | |||
28370 | }, | 30174 | }, |
28371 | }; | 30175 | }; |
28372 | 30176 | ||
30177 | #define CHACHA20_ENC_TEST_VECTORS 3 | ||
30178 | static struct cipher_testvec chacha20_enc_tv_template[] = { | ||
30179 | { /* RFC7539 A.2. Test Vector #1 */ | ||
30180 | .key = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30181 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30182 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30183 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
30184 | .klen = 32, | ||
30185 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30186 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
30187 | .input = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30188 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30189 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30190 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30191 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30192 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30193 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30194 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
30195 | .ilen = 64, | ||
30196 | .result = "\x76\xb8\xe0\xad\xa0\xf1\x3d\x90" | ||
30197 | "\x40\x5d\x6a\xe5\x53\x86\xbd\x28" | ||
30198 | "\xbd\xd2\x19\xb8\xa0\x8d\xed\x1a" | ||
30199 | "\xa8\x36\xef\xcc\x8b\x77\x0d\xc7" | ||
30200 | "\xda\x41\x59\x7c\x51\x57\x48\x8d" | ||
30201 | "\x77\x24\xe0\x3f\xb8\xd8\x4a\x37" | ||
30202 | "\x6a\x43\xb8\xf4\x15\x18\xa1\x1c" | ||
30203 | "\xc3\x87\xb6\x69\xb2\xee\x65\x86", | ||
30204 | .rlen = 64, | ||
30205 | }, { /* RFC7539 A.2. Test Vector #2 */ | ||
30206 | .key = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30207 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30208 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
30209 | "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
30210 | .klen = 32, | ||
30211 | .iv = "\x01\x00\x00\x00\x00\x00\x00\x00" | ||
30212 | "\x00\x00\x00\x00\x00\x00\x00\x02", | ||
30213 | .input = "\x41\x6e\x79\x20\x73\x75\x62\x6d" | ||
30214 | "\x69\x73\x73\x69\x6f\x6e\x20\x74" | ||
30215 | "\x6f\x20\x74\x68\x65\x20\x49\x45" | ||
30216 | "\x54\x46\x20\x69\x6e\x74\x65\x6e" | ||
30217 | "\x64\x65\x64\x20\x62\x79\x20\x74" | ||
30218 | "\x68\x65\x20\x43\x6f\x6e\x74\x72" | ||
30219 | "\x69\x62\x75\x74\x6f\x72\x20\x66" | ||
30220 | "\x6f\x72\x20\x70\x75\x62\x6c\x69" | ||
30221 | "\x63\x61\x74\x69\x6f\x6e\x20\x61" | ||
30222 | "\x73\x20\x61\x6c\x6c\x20\x6f\x72" | ||
30223 | "\x20\x70\x61\x72\x74\x20\x6f\x66" | ||
30224 | "\x20\x61\x6e\x20\x49\x45\x54\x46" | ||
30225 | "\x20\x49\x6e\x74\x65\x72\x6e\x65" | ||
30226 | "\x74\x2d\x44\x72\x61\x66\x74\x20" | ||
30227 | "\x6f\x72\x20\x52\x46\x43\x20\x61" | ||
30228 | "\x6e\x64\x20\x61\x6e\x79\x20\x73" | ||
30229 | "\x74\x61\x74\x65\x6d\x65\x6e\x74" | ||
30230 | "\x20\x6d\x61\x64\x65\x20\x77\x69" | ||
30231 | "\x74\x68\x69\x6e\x20\x74\x68\x65" | ||
30232 | "\x20\x63\x6f\x6e\x74\x65\x78\x74" | ||
30233 | "\x20\x6f\x66\x20\x61\x6e\x20\x49" | ||
30234 | "\x45\x54\x46\x20\x61\x63\x74\x69" | ||
30235 | "\x76\x69\x74\x79\x20\x69\x73\x20" | ||
30236 | "\x63\x6f\x6e\x73\x69\x64\x65\x72" | ||
30237 | "\x65\x64\x20\x61\x6e\x20\x22\x49" | ||
30238 | "\x45\x54\x46\x20\x43\x6f\x6e\x74" | ||
30239 | "\x72\x69\x62\x75\x74\x69\x6f\x6e" | ||
30240 | "\x22\x2e\x20\x53\x75\x63\x68\x20" | ||
30241 | "\x73\x74\x61\x74\x65\x6d\x65\x6e" | ||
30242 | "\x74\x73\x20\x69\x6e\x63\x6c\x75" | ||
30243 | "\x64\x65\x20\x6f\x72\x61\x6c\x20" | ||
30244 | "\x73\x74\x61\x74\x65\x6d\x65\x6e" | ||
30245 | "\x74\x73\x20\x69\x6e\x20\x49\x45" | ||
30246 | "\x54\x46\x20\x73\x65\x73\x73\x69" | ||
30247 | "\x6f\x6e\x73\x2c\x20\x61\x73\x20" | ||
30248 | "\x77\x65\x6c\x6c\x20\x61\x73\x20" | ||
30249 | "\x77\x72\x69\x74\x74\x65\x6e\x20" | ||
30250 | "\x61\x6e\x64\x20\x65\x6c\x65\x63" | ||
30251 | "\x74\x72\x6f\x6e\x69\x63\x20\x63" | ||
30252 | "\x6f\x6d\x6d\x75\x6e\x69\x63\x61" | ||
30253 | "\x74\x69\x6f\x6e\x73\x20\x6d\x61" | ||
30254 | "\x64\x65\x20\x61\x74\x20\x61\x6e" | ||
30255 | "\x79\x20\x74\x69\x6d\x65\x20\x6f" | ||
30256 | "\x72\x20\x70\x6c\x61\x63\x65\x2c" | ||
30257 | "\x20\x77\x68\x69\x63\x68\x20\x61" | ||
30258 | "\x72\x65\x20\x61\x64\x64\x72\x65" | ||
30259 | "\x73\x73\x65\x64\x20\x74\x6f", | ||
30260 | .ilen = 375, | ||
30261 | .result = "\xa3\xfb\xf0\x7d\xf3\xfa\x2f\xde" | ||
30262 | "\x4f\x37\x6c\xa2\x3e\x82\x73\x70" | ||
30263 | "\x41\x60\x5d\x9f\x4f\x4f\x57\xbd" | ||
30264 | "\x8c\xff\x2c\x1d\x4b\x79\x55\xec" | ||
30265 | "\x2a\x97\x94\x8b\xd3\x72\x29\x15" | ||
30266 | "\xc8\xf3\xd3\x37\xf7\xd3\x70\x05" | ||
30267 | "\x0e\x9e\x96\xd6\x47\xb7\xc3\x9f" | ||
30268 | "\x56\xe0\x31\xca\x5e\xb6\x25\x0d" | ||
30269 | "\x40\x42\xe0\x27\x85\xec\xec\xfa" | ||
30270 | "\x4b\x4b\xb5\xe8\xea\xd0\x44\x0e" | ||
30271 | "\x20\xb6\xe8\xdb\x09\xd8\x81\xa7" | ||
30272 | "\xc6\x13\x2f\x42\x0e\x52\x79\x50" | ||
30273 | "\x42\xbd\xfa\x77\x73\xd8\xa9\x05" | ||
30274 | "\x14\x47\xb3\x29\x1c\xe1\x41\x1c" | ||
30275 | "\x68\x04\x65\x55\x2a\xa6\xc4\x05" | ||
30276 | "\xb7\x76\x4d\x5e\x87\xbe\xa8\x5a" | ||
30277 | "\xd0\x0f\x84\x49\xed\x8f\x72\xd0" | ||
30278 | "\xd6\x62\xab\x05\x26\x91\xca\x66" | ||
30279 | "\x42\x4b\xc8\x6d\x2d\xf8\x0e\xa4" | ||
30280 | "\x1f\x43\xab\xf9\x37\xd3\x25\x9d" | ||
30281 | "\xc4\xb2\xd0\xdf\xb4\x8a\x6c\x91" | ||
30282 | "\x39\xdd\xd7\xf7\x69\x66\xe9\x28" | ||
30283 | "\xe6\x35\x55\x3b\xa7\x6c\x5c\x87" | ||
30284 | "\x9d\x7b\x35\xd4\x9e\xb2\xe6\x2b" | ||
30285 | "\x08\x71\xcd\xac\x63\x89\x39\xe2" | ||
30286 | "\x5e\x8a\x1e\x0e\xf9\xd5\x28\x0f" | ||
30287 | "\xa8\xca\x32\x8b\x35\x1c\x3c\x76" | ||
30288 | "\x59\x89\xcb\xcf\x3d\xaa\x8b\x6c" | ||
30289 | "\xcc\x3a\xaf\x9f\x39\x79\xc9\x2b" | ||
30290 | "\x37\x20\xfc\x88\xdc\x95\xed\x84" | ||
30291 | "\xa1\xbe\x05\x9c\x64\x99\xb9\xfd" | ||
30292 | "\xa2\x36\xe7\xe8\x18\xb0\x4b\x0b" | ||
30293 | "\xc3\x9c\x1e\x87\x6b\x19\x3b\xfe" | ||
30294 | "\x55\x69\x75\x3f\x88\x12\x8c\xc0" | ||
30295 | "\x8a\xaa\x9b\x63\xd1\xa1\x6f\x80" | ||
30296 | "\xef\x25\x54\xd7\x18\x9c\x41\x1f" | ||
30297 | "\x58\x69\xca\x52\xc5\xb8\x3f\xa3" | ||
30298 | "\x6f\xf2\x16\xb9\xc1\xd3\x00\x62" | ||
30299 | "\xbe\xbc\xfd\x2d\xc5\xbc\xe0\x91" | ||
30300 | "\x19\x34\xfd\xa7\x9a\x86\xf6\xe6" | ||
30301 | "\x98\xce\xd7\x59\xc3\xff\x9b\x64" | ||
30302 | "\x77\x33\x8f\x3d\xa4\xf9\xcd\x85" | ||
30303 | "\x14\xea\x99\x82\xcc\xaf\xb3\x41" | ||
30304 | "\xb2\x38\x4d\xd9\x02\xf3\xd1\xab" | ||
30305 | "\x7a\xc6\x1d\xd2\x9c\x6f\x21\xba" | ||
30306 | "\x5b\x86\x2f\x37\x30\xe3\x7c\xfd" | ||
30307 | "\xc4\xfd\x80\x6c\x22\xf2\x21", | ||
30308 | .rlen = 375, | ||
30309 | }, { /* RFC7539 A.2. Test Vector #3 */ | ||
30310 | .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" | ||
30311 | "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" | ||
30312 | "\x47\x39\x17\xc1\x40\x2b\x80\x09" | ||
30313 | "\x9d\xca\x5c\xbc\x20\x70\x75\xc0", | ||
30314 | .klen = 32, | ||
30315 | .iv = "\x2a\x00\x00\x00\x00\x00\x00\x00" | ||
30316 | "\x00\x00\x00\x00\x00\x00\x00\x02", | ||
30317 | .input = "\x27\x54\x77\x61\x73\x20\x62\x72" | ||
30318 | "\x69\x6c\x6c\x69\x67\x2c\x20\x61" | ||
30319 | "\x6e\x64\x20\x74\x68\x65\x20\x73" | ||
30320 | "\x6c\x69\x74\x68\x79\x20\x74\x6f" | ||
30321 | "\x76\x65\x73\x0a\x44\x69\x64\x20" | ||
30322 | "\x67\x79\x72\x65\x20\x61\x6e\x64" | ||
30323 | "\x20\x67\x69\x6d\x62\x6c\x65\x20" | ||
30324 | "\x69\x6e\x20\x74\x68\x65\x20\x77" | ||
30325 | "\x61\x62\x65\x3a\x0a\x41\x6c\x6c" | ||
30326 | "\x20\x6d\x69\x6d\x73\x79\x20\x77" | ||
30327 | "\x65\x72\x65\x20\x74\x68\x65\x20" | ||
30328 | "\x62\x6f\x72\x6f\x67\x6f\x76\x65" | ||
30329 | "\x73\x2c\x0a\x41\x6e\x64\x20\x74" | ||
30330 | "\x68\x65\x20\x6d\x6f\x6d\x65\x20" | ||
30331 | "\x72\x61\x74\x68\x73\x20\x6f\x75" | ||
30332 | "\x74\x67\x72\x61\x62\x65\x2e", | ||
30333 | .ilen = 127, | ||
30334 | .result = "\x62\xe6\x34\x7f\x95\xed\x87\xa4" | ||
30335 | "\x5f\xfa\xe7\x42\x6f\x27\xa1\xdf" | ||
30336 | "\x5f\xb6\x91\x10\x04\x4c\x0d\x73" | ||
30337 | "\x11\x8e\xff\xa9\x5b\x01\xe5\xcf" | ||
30338 | "\x16\x6d\x3d\xf2\xd7\x21\xca\xf9" | ||
30339 | "\xb2\x1e\x5f\xb1\x4c\x61\x68\x71" | ||
30340 | "\xfd\x84\xc5\x4f\x9d\x65\xb2\x83" | ||
30341 | "\x19\x6c\x7f\xe4\xf6\x05\x53\xeb" | ||
30342 | "\xf3\x9c\x64\x02\xc4\x22\x34\xe3" | ||
30343 | "\x2a\x35\x6b\x3e\x76\x43\x12\xa6" | ||
30344 | "\x1a\x55\x32\x05\x57\x16\xea\xd6" | ||
30345 | "\x96\x25\x68\xf8\x7d\x3f\x3f\x77" | ||
30346 | "\x04\xc6\xa8\xd1\xbc\xd1\xbf\x4d" | ||
30347 | "\x50\xd6\x15\x4b\x6d\xa7\x31\xb1" | ||
30348 | "\x87\xb5\x8d\xfd\x72\x8a\xfa\x36" | ||
30349 | "\x75\x7a\x79\x7a\xc1\x88\xd1", | ||
30350 | .rlen = 127, | ||
30351 | }, | ||
30352 | }; | ||
30353 | |||
28373 | /* | 30354 | /* |
28374 | * CTS (Cipher Text Stealing) mode tests | 30355 | * CTS (Cipher Text Stealing) mode tests |
28375 | */ | 30356 | */ |
@@ -28591,7 +30572,7 @@ struct comp_testvec { | |||
28591 | }; | 30572 | }; |
28592 | 30573 | ||
28593 | struct pcomp_testvec { | 30574 | struct pcomp_testvec { |
28594 | void *params; | 30575 | const void *params; |
28595 | unsigned int paramsize; | 30576 | unsigned int paramsize; |
28596 | int inlen, outlen; | 30577 | int inlen, outlen; |
28597 | char input[COMP_BUF_SIZE]; | 30578 | char input[COMP_BUF_SIZE]; |
@@ -28946,6 +30927,440 @@ static struct hash_testvec michael_mic_tv_template[] = { | |||
28946 | }; | 30927 | }; |
28947 | 30928 | ||
28948 | /* | 30929 | /* |
30930 | * CRC32 test vectors | ||
30931 | */ | ||
30932 | #define CRC32_TEST_VECTORS 14 | ||
30933 | |||
30934 | static struct hash_testvec crc32_tv_template[] = { | ||
30935 | { | ||
30936 | .key = "\x87\xa9\xcb\xed", | ||
30937 | .ksize = 4, | ||
30938 | .psize = 0, | ||
30939 | .digest = "\x87\xa9\xcb\xed", | ||
30940 | }, | ||
30941 | { | ||
30942 | .key = "\xff\xff\xff\xff", | ||
30943 | .ksize = 4, | ||
30944 | .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08" | ||
30945 | "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" | ||
30946 | "\x11\x12\x13\x14\x15\x16\x17\x18" | ||
30947 | "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" | ||
30948 | "\x21\x22\x23\x24\x25\x26\x27\x28", | ||
30949 | .psize = 40, | ||
30950 | .digest = "\x3a\xdf\x4b\xb0", | ||
30951 | }, | ||
30952 | { | ||
30953 | .key = "\xff\xff\xff\xff", | ||
30954 | .ksize = 4, | ||
30955 | .plaintext = "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30" | ||
30956 | "\x31\x32\x33\x34\x35\x36\x37\x38" | ||
30957 | "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" | ||
30958 | "\x41\x42\x43\x44\x45\x46\x47\x48" | ||
30959 | "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50", | ||
30960 | .psize = 40, | ||
30961 | .digest = "\xa9\x7a\x7f\x7b", | ||
30962 | }, | ||
30963 | { | ||
30964 | .key = "\xff\xff\xff\xff", | ||
30965 | .ksize = 4, | ||
30966 | .plaintext = "\x51\x52\x53\x54\x55\x56\x57\x58" | ||
30967 | "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" | ||
30968 | "\x61\x62\x63\x64\x65\x66\x67\x68" | ||
30969 | "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70" | ||
30970 | "\x71\x72\x73\x74\x75\x76\x77\x78", | ||
30971 | .psize = 40, | ||
30972 | .digest = "\xba\xd3\xf8\x1c", | ||
30973 | }, | ||
30974 | { | ||
30975 | .key = "\xff\xff\xff\xff", | ||
30976 | .ksize = 4, | ||
30977 | .plaintext = "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" | ||
30978 | "\x81\x82\x83\x84\x85\x86\x87\x88" | ||
30979 | "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90" | ||
30980 | "\x91\x92\x93\x94\x95\x96\x97\x98" | ||
30981 | "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0", | ||
30982 | .psize = 40, | ||
30983 | .digest = "\xa8\xa9\xc2\x02", | ||
30984 | }, | ||
30985 | { | ||
30986 | .key = "\xff\xff\xff\xff", | ||
30987 | .ksize = 4, | ||
30988 | .plaintext = "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8" | ||
30989 | "\xa9\xaa\xab\xac\xad\xae\xaf\xb0" | ||
30990 | "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8" | ||
30991 | "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" | ||
30992 | "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8", | ||
30993 | .psize = 40, | ||
30994 | .digest = "\x27\xf0\x57\xe2", | ||
30995 | }, | ||
30996 | { | ||
30997 | .key = "\xff\xff\xff\xff", | ||
30998 | .ksize = 4, | ||
30999 | .plaintext = "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0" | ||
31000 | "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8" | ||
31001 | "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" | ||
31002 | "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8" | ||
31003 | "\xe9\xea\xeb\xec\xed\xee\xef\xf0", | ||
31004 | .psize = 40, | ||
31005 | .digest = "\x49\x78\x10\x08", | ||
31006 | }, | ||
31007 | { | ||
31008 | .key = "\x80\xea\xd3\xf1", | ||
31009 | .ksize = 4, | ||
31010 | .plaintext = "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30" | ||
31011 | "\x31\x32\x33\x34\x35\x36\x37\x38" | ||
31012 | "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" | ||
31013 | "\x41\x42\x43\x44\x45\x46\x47\x48" | ||
31014 | "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50", | ||
31015 | .psize = 40, | ||
31016 | .digest = "\x9a\xb1\xdc\xf0", | ||
31017 | }, | ||
31018 | { | ||
31019 | .key = "\xf3\x4a\x1d\x5d", | ||
31020 | .ksize = 4, | ||
31021 | .plaintext = "\x51\x52\x53\x54\x55\x56\x57\x58" | ||
31022 | "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" | ||
31023 | "\x61\x62\x63\x64\x65\x66\x67\x68" | ||
31024 | "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70" | ||
31025 | "\x71\x72\x73\x74\x75\x76\x77\x78", | ||
31026 | .psize = 40, | ||
31027 | .digest = "\xb4\x97\xcc\xd4", | ||
31028 | }, | ||
31029 | { | ||
31030 | .key = "\x2e\x80\x04\x59", | ||
31031 | .ksize = 4, | ||
31032 | .plaintext = "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" | ||
31033 | "\x81\x82\x83\x84\x85\x86\x87\x88" | ||
31034 | "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90" | ||
31035 | "\x91\x92\x93\x94\x95\x96\x97\x98" | ||
31036 | "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0", | ||
31037 | .psize = 40, | ||
31038 | .digest = "\x67\x9b\xfa\x79", | ||
31039 | }, | ||
31040 | { | ||
31041 | .key = "\xa6\xcc\x19\x85", | ||
31042 | .ksize = 4, | ||
31043 | .plaintext = "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8" | ||
31044 | "\xa9\xaa\xab\xac\xad\xae\xaf\xb0" | ||
31045 | "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8" | ||
31046 | "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" | ||
31047 | "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8", | ||
31048 | .psize = 40, | ||
31049 | .digest = "\x24\xb5\x16\xef", | ||
31050 | }, | ||
31051 | { | ||
31052 | .key = "\x41\xfc\xfe\x2d", | ||
31053 | .ksize = 4, | ||
31054 | .plaintext = "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0" | ||
31055 | "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8" | ||
31056 | "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" | ||
31057 | "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8" | ||
31058 | "\xe9\xea\xeb\xec\xed\xee\xef\xf0", | ||
31059 | .psize = 40, | ||
31060 | .digest = "\x15\x94\x80\x39", | ||
31061 | }, | ||
31062 | { | ||
31063 | .key = "\xff\xff\xff\xff", | ||
31064 | .ksize = 4, | ||
31065 | .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08" | ||
31066 | "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" | ||
31067 | "\x11\x12\x13\x14\x15\x16\x17\x18" | ||
31068 | "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" | ||
31069 | "\x21\x22\x23\x24\x25\x26\x27\x28" | ||
31070 | "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30" | ||
31071 | "\x31\x32\x33\x34\x35\x36\x37\x38" | ||
31072 | "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" | ||
31073 | "\x41\x42\x43\x44\x45\x46\x47\x48" | ||
31074 | "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50" | ||
31075 | "\x51\x52\x53\x54\x55\x56\x57\x58" | ||
31076 | "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" | ||
31077 | "\x61\x62\x63\x64\x65\x66\x67\x68" | ||
31078 | "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70" | ||
31079 | "\x71\x72\x73\x74\x75\x76\x77\x78" | ||
31080 | "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" | ||
31081 | "\x81\x82\x83\x84\x85\x86\x87\x88" | ||
31082 | "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90" | ||
31083 | "\x91\x92\x93\x94\x95\x96\x97\x98" | ||
31084 | "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0" | ||
31085 | "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8" | ||
31086 | "\xa9\xaa\xab\xac\xad\xae\xaf\xb0" | ||
31087 | "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8" | ||
31088 | "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" | ||
31089 | "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8" | ||
31090 | "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0" | ||
31091 | "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8" | ||
31092 | "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" | ||
31093 | "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8" | ||
31094 | "\xe9\xea\xeb\xec\xed\xee\xef\xf0", | ||
31095 | .psize = 240, | ||
31096 | .digest = "\x6c\xc6\x56\xde", | ||
31097 | .np = 2, | ||
31098 | .tap = { 31, 209 } | ||
31099 | }, { | ||
31100 | .key = "\xff\xff\xff\xff", | ||
31101 | .ksize = 4, | ||
31102 | .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49" | ||
31103 | "\xe0\x54\xeb\x82\x19\x8d\x24\xbb" | ||
31104 | "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a" | ||
31105 | "\xa1\x38\xcf\x43\xda\x71\x08\x7c" | ||
31106 | "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee" | ||
31107 | "\x85\x1c\x90\x27\xbe\x32\xc9\x60" | ||
31108 | "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2" | ||
31109 | "\x46\xdd\x74\x0b\x7f\x16\xad\x21" | ||
31110 | "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93" | ||
31111 | "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05" | ||
31112 | "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77" | ||
31113 | "\x0e\x82\x19\xb0\x24\xbb\x52\xe9" | ||
31114 | "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38" | ||
31115 | "\xcf\x66\xfd\x71\x08\x9f\x13\xaa" | ||
31116 | "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c" | ||
31117 | "\xb3\x27\xbe\x55\xec\x60\xf7\x8e" | ||
31118 | "\x02\x99\x30\xc7\x3b\xd2\x69\x00" | ||
31119 | "\x74\x0b\xa2\x16\xad\x44\xdb\x4f" | ||
31120 | "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1" | ||
31121 | "\x58\xef\x63\xfa\x91\x05\x9c\x33" | ||
31122 | "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5" | ||
31123 | "\x19\xb0\x47\xde\x52\xe9\x80\x17" | ||
31124 | "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66" | ||
31125 | "\xfd\x94\x08\x9f\x36\xcd\x41\xd8" | ||
31126 | "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a" | ||
31127 | "\xe1\x55\xec\x83\x1a\x8e\x25\xbc" | ||
31128 | "\x30\xc7\x5e\xf5\x69\x00\x97\x0b" | ||
31129 | "\xa2\x39\xd0\x44\xdb\x72\x09\x7d" | ||
31130 | "\x14\xab\x1f\xb6\x4d\xe4\x58\xef" | ||
31131 | "\x86\x1d\x91\x28\xbf\x33\xca\x61" | ||
31132 | "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3" | ||
31133 | "\x47\xde\x75\x0c\x80\x17\xae\x22" | ||
31134 | "\xb9\x50\xe7\x5b\xf2\x89\x20\x94" | ||
31135 | "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06" | ||
31136 | "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78" | ||
31137 | "\x0f\x83\x1a\xb1\x25\xbc\x53\xea" | ||
31138 | "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39" | ||
31139 | "\xd0\x67\xfe\x72\x09\xa0\x14\xab" | ||
31140 | "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d" | ||
31141 | "\xb4\x28\xbf\x56\xed\x61\xf8\x8f" | ||
31142 | "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01" | ||
31143 | "\x75\x0c\xa3\x17\xae\x45\xdc\x50" | ||
31144 | "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2" | ||
31145 | "\x59\xf0\x64\xfb\x92\x06\x9d\x34" | ||
31146 | "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6" | ||
31147 | "\x1a\xb1\x48\xdf\x53\xea\x81\x18" | ||
31148 | "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67" | ||
31149 | "\xfe\x95\x09\xa0\x37\xce\x42\xd9" | ||
31150 | "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b" | ||
31151 | "\xe2\x56\xed\x84\x1b\x8f\x26\xbd" | ||
31152 | "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c" | ||
31153 | "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e" | ||
31154 | "\x15\xac\x20\xb7\x4e\xe5\x59\xf0" | ||
31155 | "\x87\x1e\x92\x29\xc0\x34\xcb\x62" | ||
31156 | "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4" | ||
31157 | "\x48\xdf\x76\x0d\x81\x18\xaf\x23" | ||
31158 | "\xba\x51\xe8\x5c\xf3\x8a\x21\x95" | ||
31159 | "\x2c\xc3\x37\xce\x65\xfc\x70\x07" | ||
31160 | "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79" | ||
31161 | "\x10\x84\x1b\xb2\x26\xbd\x54\xeb" | ||
31162 | "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a" | ||
31163 | "\xd1\x68\xff\x73\x0a\xa1\x15\xac" | ||
31164 | "\x43\xda\x4e\xe5\x7c\x13\x87\x1e" | ||
31165 | "\xb5\x29\xc0\x57\xee\x62\xf9\x90" | ||
31166 | "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02" | ||
31167 | "\x76\x0d\xa4\x18\xaf\x46\xdd\x51" | ||
31168 | "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3" | ||
31169 | "\x5a\xf1\x65\xfc\x93\x07\x9e\x35" | ||
31170 | "\xcc\x40\xd7\x6e\x05\x79\x10\xa7" | ||
31171 | "\x1b\xb2\x49\xe0\x54\xeb\x82\x19" | ||
31172 | "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68" | ||
31173 | "\xff\x96\x0a\xa1\x38\xcf\x43\xda" | ||
31174 | "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c" | ||
31175 | "\xe3\x57\xee\x85\x1c\x90\x27\xbe" | ||
31176 | "\x32\xc9\x60\xf7\x6b\x02\x99\x0d" | ||
31177 | "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f" | ||
31178 | "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1" | ||
31179 | "\x88\x1f\x93\x2a\xc1\x35\xcc\x63" | ||
31180 | "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5" | ||
31181 | "\x49\xe0\x77\x0e\x82\x19\xb0\x24" | ||
31182 | "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96" | ||
31183 | "\x2d\xc4\x38\xcf\x66\xfd\x71\x08" | ||
31184 | "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a" | ||
31185 | "\x11\x85\x1c\xb3\x27\xbe\x55\xec" | ||
31186 | "\x60\xf7\x8e\x02\x99\x30\xc7\x3b" | ||
31187 | "\xd2\x69\x00\x74\x0b\xa2\x16\xad" | ||
31188 | "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f" | ||
31189 | "\xb6\x2a\xc1\x58\xef\x63\xfa\x91" | ||
31190 | "\x05\x9c\x33\xca\x3e\xd5\x6c\x03" | ||
31191 | "\x77\x0e\xa5\x19\xb0\x47\xde\x52" | ||
31192 | "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4" | ||
31193 | "\x5b\xf2\x66\xfd\x94\x08\x9f\x36" | ||
31194 | "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8" | ||
31195 | "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a" | ||
31196 | "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69" | ||
31197 | "\x00\x97\x0b\xa2\x39\xd0\x44\xdb" | ||
31198 | "\x72\x09\x7d\x14\xab\x1f\xb6\x4d" | ||
31199 | "\xe4\x58\xef\x86\x1d\x91\x28\xbf" | ||
31200 | "\x33\xca\x61\xf8\x6c\x03\x9a\x0e" | ||
31201 | "\xa5\x3c\xd3\x47\xde\x75\x0c\x80" | ||
31202 | "\x17\xae\x22\xb9\x50\xe7\x5b\xf2" | ||
31203 | "\x89\x20\x94\x2b\xc2\x36\xcd\x64" | ||
31204 | "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6" | ||
31205 | "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25" | ||
31206 | "\xbc\x53\xea\x5e\xf5\x8c\x00\x97" | ||
31207 | "\x2e\xc5\x39\xd0\x67\xfe\x72\x09" | ||
31208 | "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b" | ||
31209 | "\x12\x86\x1d\xb4\x28\xbf\x56\xed" | ||
31210 | "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c" | ||
31211 | "\xd3\x6a\x01\x75\x0c\xa3\x17\xae" | ||
31212 | "\x45\xdc\x50\xe7\x7e\x15\x89\x20" | ||
31213 | "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92" | ||
31214 | "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04" | ||
31215 | "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53" | ||
31216 | "\xea\x81\x18\x8c\x23\xba\x2e\xc5" | ||
31217 | "\x5c\xf3\x67\xfe\x95\x09\xa0\x37" | ||
31218 | "\xce\x42\xd9\x70\x07\x7b\x12\xa9" | ||
31219 | "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b" | ||
31220 | "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a" | ||
31221 | "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc" | ||
31222 | "\x73\x0a\x7e\x15\xac\x20\xb7\x4e" | ||
31223 | "\xe5\x59\xf0\x87\x1e\x92\x29\xc0" | ||
31224 | "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f" | ||
31225 | "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81" | ||
31226 | "\x18\xaf\x23\xba\x51\xe8\x5c\xf3" | ||
31227 | "\x8a\x21\x95\x2c\xc3\x37\xce\x65" | ||
31228 | "\xfc\x70\x07\x9e\x12\xa9\x40\xd7" | ||
31229 | "\x4b\xe2\x79\x10\x84\x1b\xb2\x26" | ||
31230 | "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98" | ||
31231 | "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a" | ||
31232 | "\xa1\x15\xac\x43\xda\x4e\xe5\x7c" | ||
31233 | "\x13\x87\x1e\xb5\x29\xc0\x57\xee" | ||
31234 | "\x62\xf9\x90\x04\x9b\x32\xc9\x3d" | ||
31235 | "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf" | ||
31236 | "\x46\xdd\x51\xe8\x7f\x16\x8a\x21" | ||
31237 | "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93" | ||
31238 | "\x07\x9e\x35\xcc\x40\xd7\x6e\x05" | ||
31239 | "\x79\x10\xa7\x1b\xb2\x49\xe0\x54" | ||
31240 | "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6" | ||
31241 | "\x5d\xf4\x68\xff\x96\x0a\xa1\x38" | ||
31242 | "\xcf\x43\xda\x71\x08\x7c\x13\xaa" | ||
31243 | "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c" | ||
31244 | "\x90\x27\xbe\x32\xc9\x60\xf7\x6b" | ||
31245 | "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd" | ||
31246 | "\x74\x0b\x7f\x16\xad\x21\xb8\x4f" | ||
31247 | "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1" | ||
31248 | "\x35\xcc\x63\xfa\x6e\x05\x9c\x10" | ||
31249 | "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82" | ||
31250 | "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4" | ||
31251 | "\x8b\x22\x96\x2d\xc4\x38\xcf\x66" | ||
31252 | "\xfd\x71\x08\x9f\x13\xaa\x41\xd8" | ||
31253 | "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27" | ||
31254 | "\xbe\x55\xec\x60\xf7\x8e\x02\x99" | ||
31255 | "\x30\xc7\x3b\xd2\x69\x00\x74\x0b" | ||
31256 | "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d" | ||
31257 | "\x14\x88\x1f\xb6\x2a\xc1\x58\xef" | ||
31258 | "\x63\xfa\x91\x05\x9c\x33\xca\x3e" | ||
31259 | "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0" | ||
31260 | "\x47\xde\x52\xe9\x80\x17\x8b\x22" | ||
31261 | "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94" | ||
31262 | "\x08\x9f\x36\xcd\x41\xd8\x6f\x06" | ||
31263 | "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55" | ||
31264 | "\xec\x83\x1a\x8e\x25\xbc\x30\xc7" | ||
31265 | "\x5e\xf5\x69\x00\x97\x0b\xa2\x39" | ||
31266 | "\xd0\x44\xdb\x72\x09\x7d\x14\xab" | ||
31267 | "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d" | ||
31268 | "\x91\x28\xbf\x33\xca\x61\xf8\x6c" | ||
31269 | "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde" | ||
31270 | "\x75\x0c\x80\x17\xae\x22\xb9\x50" | ||
31271 | "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2" | ||
31272 | "\x36\xcd\x64\xfb\x6f\x06\x9d\x11" | ||
31273 | "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83" | ||
31274 | "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5" | ||
31275 | "\x8c\x00\x97\x2e\xc5\x39\xd0\x67" | ||
31276 | "\xfe\x72\x09\xa0\x14\xab\x42\xd9" | ||
31277 | "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28" | ||
31278 | "\xbf\x56\xed\x61\xf8\x8f\x03\x9a" | ||
31279 | "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c" | ||
31280 | "\xa3\x17\xae\x45\xdc\x50\xe7\x7e" | ||
31281 | "\x15\x89\x20\xb7\x2b\xc2\x59\xf0" | ||
31282 | "\x64\xfb\x92\x06\x9d\x34\xcb\x3f" | ||
31283 | "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1" | ||
31284 | "\x48\xdf\x53\xea\x81\x18\x8c\x23" | ||
31285 | "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95" | ||
31286 | "\x09\xa0\x37\xce\x42\xd9\x70\x07" | ||
31287 | "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56" | ||
31288 | "\xed\x84\x1b\x8f\x26\xbd\x31\xc8" | ||
31289 | "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a" | ||
31290 | "\xd1\x45\xdc\x73\x0a\x7e\x15\xac" | ||
31291 | "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e" | ||
31292 | "\x92\x29\xc0\x34\xcb\x62\xf9\x6d" | ||
31293 | "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf" | ||
31294 | "\x76\x0d\x81\x18\xaf\x23\xba\x51" | ||
31295 | "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3" | ||
31296 | "\x37\xce\x65\xfc\x70\x07\x9e\x12" | ||
31297 | "\xa9\x40\xd7\x4b\xe2\x79\x10\x84" | ||
31298 | "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6" | ||
31299 | "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68" | ||
31300 | "\xff\x73\x0a\xa1\x15\xac\x43\xda" | ||
31301 | "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29" | ||
31302 | "\xc0\x57\xee\x62\xf9\x90\x04\x9b" | ||
31303 | "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d" | ||
31304 | "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f" | ||
31305 | "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1" | ||
31306 | "\x65\xfc\x93\x07\x9e\x35\xcc\x40" | ||
31307 | "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2" | ||
31308 | "\x49\xe0\x54\xeb\x82\x19\x8d\x24" | ||
31309 | "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96" | ||
31310 | "\x0a\xa1\x38\xcf\x43\xda\x71\x08" | ||
31311 | "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57" | ||
31312 | "\xee\x85\x1c\x90\x27\xbe\x32\xc9" | ||
31313 | "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b" | ||
31314 | "\xd2\x46\xdd\x74\x0b\x7f\x16\xad" | ||
31315 | "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f" | ||
31316 | "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e" | ||
31317 | "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0" | ||
31318 | "\x77\x0e\x82\x19\xb0\x24\xbb\x52" | ||
31319 | "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4" | ||
31320 | "\x38\xcf\x66\xfd\x71\x08\x9f\x13" | ||
31321 | "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85" | ||
31322 | "\x1c\xb3\x27\xbe\x55\xec\x60\xf7" | ||
31323 | "\x8e\x02\x99\x30\xc7\x3b\xd2\x69" | ||
31324 | "\x00\x74\x0b\xa2\x16\xad\x44\xdb" | ||
31325 | "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a" | ||
31326 | "\xc1\x58\xef\x63\xfa\x91\x05\x9c" | ||
31327 | "\x33\xca\x3e\xd5\x6c\x03\x77\x0e" | ||
31328 | "\xa5\x19\xb0\x47\xde\x52\xe9\x80" | ||
31329 | "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2" | ||
31330 | "\x66\xfd\x94\x08\x9f\x36\xcd\x41" | ||
31331 | "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3" | ||
31332 | "\x4a\xe1\x55\xec\x83\x1a\x8e\x25" | ||
31333 | "\xbc\x30\xc7\x5e\xf5\x69\x00\x97" | ||
31334 | "\x0b\xa2\x39\xd0\x44\xdb\x72\x09" | ||
31335 | "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58" | ||
31336 | "\xef\x86\x1d\x91\x28\xbf\x33\xca" | ||
31337 | "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c" | ||
31338 | "\xd3\x47\xde\x75\x0c\x80\x17\xae" | ||
31339 | "\x22\xb9\x50\xe7\x5b\xf2\x89\x20" | ||
31340 | "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f" | ||
31341 | "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1" | ||
31342 | "\x78\x0f\x83\x1a\xb1\x25\xbc\x53" | ||
31343 | "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5" | ||
31344 | "\x39\xd0\x67\xfe\x72\x09\xa0\x14" | ||
31345 | "\xab\x42\xd9\x4d\xe4\x7b\x12\x86" | ||
31346 | "\x1d\xb4\x28\xbf\x56\xed\x61\xf8" | ||
31347 | "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a" | ||
31348 | "\x01\x75\x0c\xa3\x17\xae\x45\xdc" | ||
31349 | "\x50\xe7\x7e\x15\x89\x20\xb7\x2b" | ||
31350 | "\xc2\x59\xf0\x64\xfb\x92\x06\x9d" | ||
31351 | "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f" | ||
31352 | "\xa6\x1a\xb1\x48\xdf\x53\xea\x81" | ||
31353 | "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3" | ||
31354 | "\x67\xfe\x95\x09\xa0\x37\xce\x42" | ||
31355 | "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4" | ||
31356 | "\x4b\xe2\x56\xed\x84\x1b\x8f\x26" | ||
31357 | "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98", | ||
31358 | .psize = 2048, | ||
31359 | .digest = "\xfb\x3a\x7a\xda", | ||
31360 | } | ||
31361 | }; | ||
31362 | |||
31363 | /* | ||
28949 | * CRC32C test vectors | 31364 | * CRC32C test vectors |
28950 | */ | 31365 | */ |
28951 | #define CRC32C_TEST_VECTORS 15 | 31366 | #define CRC32C_TEST_VECTORS 15 |
diff --git a/crypto/zlib.c b/crypto/zlib.c index 0eefa9d237ac..d51a30a29e42 100644 --- a/crypto/zlib.c +++ b/crypto/zlib.c | |||
@@ -78,7 +78,7 @@ static void zlib_exit(struct crypto_tfm *tfm) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | 80 | ||
81 | static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params, | 81 | static int zlib_compress_setup(struct crypto_pcomp *tfm, const void *params, |
82 | unsigned int len) | 82 | unsigned int len) |
83 | { | 83 | { |
84 | struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | 84 | struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); |
@@ -209,7 +209,7 @@ static int zlib_compress_final(struct crypto_pcomp *tfm, | |||
209 | } | 209 | } |
210 | 210 | ||
211 | 211 | ||
212 | static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params, | 212 | static int zlib_decompress_setup(struct crypto_pcomp *tfm, const void *params, |
213 | unsigned int len) | 213 | unsigned int len) |
214 | { | 214 | { |
215 | struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); | 215 | struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); |
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 6f047dcb94c2..c43c3d2baf73 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/of_address.h> | 57 | #include <linux/of_address.h> |
58 | #include <linux/debugfs.h> | 58 | #include <linux/debugfs.h> |
59 | #include <linux/log2.h> | 59 | #include <linux/log2.h> |
60 | #include <linux/memblock.h> | ||
60 | #include <linux/syscore_ops.h> | 61 | #include <linux/syscore_ops.h> |
61 | 62 | ||
62 | /* | 63 | /* |
@@ -152,13 +153,39 @@ struct mvebu_mbus_state { | |||
152 | 153 | ||
153 | static struct mvebu_mbus_state mbus_state; | 154 | static struct mvebu_mbus_state mbus_state; |
154 | 155 | ||
156 | /* | ||
157 | * We provide two variants of the mv_mbus_dram_info() function: | ||
158 | * | ||
159 | * - The normal one, where the described DRAM ranges may overlap with | ||
160 | * the I/O windows, but for which the DRAM ranges are guaranteed to | ||
161 | * have a power of two size. Such ranges are suitable for the DMA | ||
162 | * masters that only DMA between the RAM and the device, which is | ||
163 | * actually all devices except the crypto engines. | ||
164 | * | ||
165 | * - The 'nooverlap' one, where the described DRAM ranges are | ||
166 | * guaranteed to not overlap with the I/O windows, but for which the | ||
167 | * DRAM ranges will not have power of two sizes. They will only be | ||
168 | * aligned on a 64 KB boundary, and have a size multiple of 64 | ||
169 | * KB. Such ranges are suitable for the DMA masters that DMA between | ||
170 | * the crypto SRAM (which is mapped through an I/O window) and a | ||
171 | * device. This is the case for the crypto engines. | ||
172 | */ | ||
173 | |||
155 | static struct mbus_dram_target_info mvebu_mbus_dram_info; | 174 | static struct mbus_dram_target_info mvebu_mbus_dram_info; |
175 | static struct mbus_dram_target_info mvebu_mbus_dram_info_nooverlap; | ||
176 | |||
156 | const struct mbus_dram_target_info *mv_mbus_dram_info(void) | 177 | const struct mbus_dram_target_info *mv_mbus_dram_info(void) |
157 | { | 178 | { |
158 | return &mvebu_mbus_dram_info; | 179 | return &mvebu_mbus_dram_info; |
159 | } | 180 | } |
160 | EXPORT_SYMBOL_GPL(mv_mbus_dram_info); | 181 | EXPORT_SYMBOL_GPL(mv_mbus_dram_info); |
161 | 182 | ||
183 | const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void) | ||
184 | { | ||
185 | return &mvebu_mbus_dram_info_nooverlap; | ||
186 | } | ||
187 | EXPORT_SYMBOL_GPL(mv_mbus_dram_info_nooverlap); | ||
188 | |||
162 | /* Checks whether the given window has remap capability */ | 189 | /* Checks whether the given window has remap capability */ |
163 | static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus, | 190 | static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus, |
164 | const int win) | 191 | const int win) |
@@ -576,6 +603,95 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win) | |||
576 | return MVEBU_MBUS_NO_REMAP; | 603 | return MVEBU_MBUS_NO_REMAP; |
577 | } | 604 | } |
578 | 605 | ||
606 | /* | ||
607 | * Use the memblock information to find the MBus bridge hole in the | ||
608 | * physical address space. | ||
609 | */ | ||
610 | static void __init | ||
611 | mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end) | ||
612 | { | ||
613 | struct memblock_region *r; | ||
614 | uint64_t s = 0; | ||
615 | |||
616 | for_each_memblock(memory, r) { | ||
617 | /* | ||
618 | * This part of the memory is above 4 GB, so we don't | ||
619 | * care for the MBus bridge hole. | ||
620 | */ | ||
621 | if (r->base >= 0x100000000ULL) | ||
622 | continue; | ||
623 | |||
624 | /* | ||
625 | * The MBus bridge hole is at the end of the RAM under | ||
626 | * the 4 GB limit. | ||
627 | */ | ||
628 | if (r->base + r->size > s) | ||
629 | s = r->base + r->size; | ||
630 | } | ||
631 | |||
632 | *start = s; | ||
633 | *end = 0x100000000ULL; | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * This function fills in the mvebu_mbus_dram_info_nooverlap data | ||
638 | * structure, by looking at the mvebu_mbus_dram_info data, and | ||
639 | * removing the parts of it that overlap with I/O windows. | ||
640 | */ | ||
641 | static void __init | ||
642 | mvebu_mbus_setup_cpu_target_nooverlap(struct mvebu_mbus_state *mbus) | ||
643 | { | ||
644 | uint64_t mbus_bridge_base, mbus_bridge_end; | ||
645 | int cs_nooverlap = 0; | ||
646 | int i; | ||
647 | |||
648 | mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end); | ||
649 | |||
650 | for (i = 0; i < mvebu_mbus_dram_info.num_cs; i++) { | ||
651 | struct mbus_dram_window *w; | ||
652 | u64 base, size, end; | ||
653 | |||
654 | w = &mvebu_mbus_dram_info.cs[i]; | ||
655 | base = w->base; | ||
656 | size = w->size; | ||
657 | end = base + size; | ||
658 | |||
659 | /* | ||
660 | * The CS is fully enclosed inside the MBus bridge | ||
661 | * area, so ignore it. | ||
662 | */ | ||
663 | if (base >= mbus_bridge_base && end <= mbus_bridge_end) | ||
664 | continue; | ||
665 | |||
666 | /* | ||
667 | * Beginning of CS overlaps with end of MBus, raise CS | ||
668 | * base address, and shrink its size. | ||
669 | */ | ||
670 | if (base >= mbus_bridge_base && end > mbus_bridge_end) { | ||
671 | size -= mbus_bridge_end - base; | ||
672 | base = mbus_bridge_end; | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * End of CS overlaps with beginning of MBus, shrink | ||
677 | * CS size. | ||
678 | */ | ||
679 | if (base < mbus_bridge_base && end > mbus_bridge_base) | ||
680 | size -= end - mbus_bridge_base; | ||
681 | |||
682 | w = &mvebu_mbus_dram_info_nooverlap.cs[cs_nooverlap++]; | ||
683 | w->cs_index = i; | ||
684 | w->mbus_attr = 0xf & ~(1 << i); | ||
685 | if (mbus->hw_io_coherency) | ||
686 | w->mbus_attr |= ATTR_HW_COHERENCY; | ||
687 | w->base = base; | ||
688 | w->size = size; | ||
689 | } | ||
690 | |||
691 | mvebu_mbus_dram_info_nooverlap.mbus_dram_target_id = TARGET_DDR; | ||
692 | mvebu_mbus_dram_info_nooverlap.num_cs = cs_nooverlap; | ||
693 | } | ||
694 | |||
579 | static void __init | 695 | static void __init |
580 | mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) | 696 | mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) |
581 | { | 697 | { |
@@ -964,6 +1080,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, | |||
964 | mvebu_mbus_disable_window(mbus, win); | 1080 | mvebu_mbus_disable_window(mbus, win); |
965 | 1081 | ||
966 | mbus->soc->setup_cpu_target(mbus); | 1082 | mbus->soc->setup_cpu_target(mbus); |
1083 | mvebu_mbus_setup_cpu_target_nooverlap(mbus); | ||
967 | 1084 | ||
968 | if (is_coherent) | 1085 | if (is_coherent) |
969 | writel(UNIT_SYNC_BARRIER_ALL, | 1086 | writel(UNIT_SYNC_BARRIER_ALL, |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 9cd6968e2f92..d0da5d852d41 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -409,6 +409,9 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); | |||
409 | static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait); | 409 | static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait); |
410 | static struct fasync_struct *fasync; | 410 | static struct fasync_struct *fasync; |
411 | 411 | ||
412 | static DEFINE_SPINLOCK(random_ready_list_lock); | ||
413 | static LIST_HEAD(random_ready_list); | ||
414 | |||
412 | /********************************************************************** | 415 | /********************************************************************** |
413 | * | 416 | * |
414 | * OS independent entropy store. Here are the functions which handle | 417 | * OS independent entropy store. Here are the functions which handle |
@@ -589,6 +592,22 @@ static void fast_mix(struct fast_pool *f) | |||
589 | f->count++; | 592 | f->count++; |
590 | } | 593 | } |
591 | 594 | ||
595 | static void process_random_ready_list(void) | ||
596 | { | ||
597 | unsigned long flags; | ||
598 | struct random_ready_callback *rdy, *tmp; | ||
599 | |||
600 | spin_lock_irqsave(&random_ready_list_lock, flags); | ||
601 | list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { | ||
602 | struct module *owner = rdy->owner; | ||
603 | |||
604 | list_del_init(&rdy->list); | ||
605 | rdy->func(rdy); | ||
606 | module_put(owner); | ||
607 | } | ||
608 | spin_unlock_irqrestore(&random_ready_list_lock, flags); | ||
609 | } | ||
610 | |||
592 | /* | 611 | /* |
593 | * Credit (or debit) the entropy store with n bits of entropy. | 612 | * Credit (or debit) the entropy store with n bits of entropy. |
594 | * Use credit_entropy_bits_safe() if the value comes from userspace | 613 | * Use credit_entropy_bits_safe() if the value comes from userspace |
@@ -660,7 +679,8 @@ retry: | |||
660 | r->entropy_total = 0; | 679 | r->entropy_total = 0; |
661 | if (r == &nonblocking_pool) { | 680 | if (r == &nonblocking_pool) { |
662 | prandom_reseed_late(); | 681 | prandom_reseed_late(); |
663 | wake_up_interruptible(&urandom_init_wait); | 682 | process_random_ready_list(); |
683 | wake_up_all(&urandom_init_wait); | ||
664 | pr_notice("random: %s pool is initialized\n", r->name); | 684 | pr_notice("random: %s pool is initialized\n", r->name); |
665 | } | 685 | } |
666 | } | 686 | } |
@@ -1245,6 +1265,64 @@ void get_random_bytes(void *buf, int nbytes) | |||
1245 | EXPORT_SYMBOL(get_random_bytes); | 1265 | EXPORT_SYMBOL(get_random_bytes); |
1246 | 1266 | ||
1247 | /* | 1267 | /* |
1268 | * Add a callback function that will be invoked when the nonblocking | ||
1269 | * pool is initialised. | ||
1270 | * | ||
1271 | * returns: 0 if callback is successfully added | ||
1272 | * -EALREADY if pool is already initialised (callback not called) | ||
1273 | * -ENOENT if module for callback is not alive | ||
1274 | */ | ||
1275 | int add_random_ready_callback(struct random_ready_callback *rdy) | ||
1276 | { | ||
1277 | struct module *owner; | ||
1278 | unsigned long flags; | ||
1279 | int err = -EALREADY; | ||
1280 | |||
1281 | if (likely(nonblocking_pool.initialized)) | ||
1282 | return err; | ||
1283 | |||
1284 | owner = rdy->owner; | ||
1285 | if (!try_module_get(owner)) | ||
1286 | return -ENOENT; | ||
1287 | |||
1288 | spin_lock_irqsave(&random_ready_list_lock, flags); | ||
1289 | if (nonblocking_pool.initialized) | ||
1290 | goto out; | ||
1291 | |||
1292 | owner = NULL; | ||
1293 | |||
1294 | list_add(&rdy->list, &random_ready_list); | ||
1295 | err = 0; | ||
1296 | |||
1297 | out: | ||
1298 | spin_unlock_irqrestore(&random_ready_list_lock, flags); | ||
1299 | |||
1300 | module_put(owner); | ||
1301 | |||
1302 | return err; | ||
1303 | } | ||
1304 | EXPORT_SYMBOL(add_random_ready_callback); | ||
1305 | |||
1306 | /* | ||
1307 | * Delete a previously registered readiness callback function. | ||
1308 | */ | ||
1309 | void del_random_ready_callback(struct random_ready_callback *rdy) | ||
1310 | { | ||
1311 | unsigned long flags; | ||
1312 | struct module *owner = NULL; | ||
1313 | |||
1314 | spin_lock_irqsave(&random_ready_list_lock, flags); | ||
1315 | if (!list_empty(&rdy->list)) { | ||
1316 | list_del_init(&rdy->list); | ||
1317 | owner = rdy->owner; | ||
1318 | } | ||
1319 | spin_unlock_irqrestore(&random_ready_list_lock, flags); | ||
1320 | |||
1321 | module_put(owner); | ||
1322 | } | ||
1323 | EXPORT_SYMBOL(del_random_ready_callback); | ||
1324 | |||
1325 | /* | ||
1248 | * This function will use the architecture-specific hardware random | 1326 | * This function will use the architecture-specific hardware random |
1249 | * number generator if it is available. The arch-specific hw RNG will | 1327 | * number generator if it is available. The arch-specific hw RNG will |
1250 | * almost certainly be faster than what we can do in software, but it | 1328 | * almost certainly be faster than what we can do in software, but it |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 033c0c86f6ec..4044125fb5d5 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -162,10 +162,10 @@ config CRYPTO_GHASH_S390 | |||
162 | config CRYPTO_DEV_MV_CESA | 162 | config CRYPTO_DEV_MV_CESA |
163 | tristate "Marvell's Cryptographic Engine" | 163 | tristate "Marvell's Cryptographic Engine" |
164 | depends on PLAT_ORION | 164 | depends on PLAT_ORION |
165 | select CRYPTO_ALGAPI | ||
166 | select CRYPTO_AES | 165 | select CRYPTO_AES |
167 | select CRYPTO_BLKCIPHER2 | 166 | select CRYPTO_BLKCIPHER |
168 | select CRYPTO_HASH | 167 | select CRYPTO_HASH |
168 | select SRAM | ||
169 | help | 169 | help |
170 | This driver allows you to utilize the Cryptographic Engines and | 170 | This driver allows you to utilize the Cryptographic Engines and |
171 | Security Accelerator (CESA) which can be found on the Marvell Orion | 171 | Security Accelerator (CESA) which can be found on the Marvell Orion |
@@ -173,10 +173,27 @@ config CRYPTO_DEV_MV_CESA | |||
173 | 173 | ||
174 | Currently the driver supports AES in ECB and CBC mode without DMA. | 174 | Currently the driver supports AES in ECB and CBC mode without DMA. |
175 | 175 | ||
176 | config CRYPTO_DEV_MARVELL_CESA | ||
177 | tristate "New Marvell's Cryptographic Engine driver" | ||
178 | depends on PLAT_ORION || ARCH_MVEBU | ||
179 | select CRYPTO_AES | ||
180 | select CRYPTO_DES | ||
181 | select CRYPTO_BLKCIPHER | ||
182 | select CRYPTO_HASH | ||
183 | select SRAM | ||
184 | help | ||
185 | This driver allows you to utilize the Cryptographic Engines and | ||
186 | Security Accelerator (CESA) which can be found on the Armada 370. | ||
187 | This driver supports CPU offload through DMA transfers. | ||
188 | |||
189 | This driver is aimed at replacing the mv_cesa driver. This will only | ||
190 | happen once it has received proper testing. | ||
191 | |||
176 | config CRYPTO_DEV_NIAGARA2 | 192 | config CRYPTO_DEV_NIAGARA2 |
177 | tristate "Niagara2 Stream Processing Unit driver" | 193 | tristate "Niagara2 Stream Processing Unit driver" |
178 | select CRYPTO_DES | 194 | select CRYPTO_DES |
179 | select CRYPTO_ALGAPI | 195 | select CRYPTO_BLKCIPHER |
196 | select CRYPTO_HASH | ||
180 | depends on SPARC64 | 197 | depends on SPARC64 |
181 | help | 198 | help |
182 | Each core of a Niagara2 processor contains a Stream | 199 | Each core of a Niagara2 processor contains a Stream |
@@ -189,7 +206,6 @@ config CRYPTO_DEV_NIAGARA2 | |||
189 | config CRYPTO_DEV_HIFN_795X | 206 | config CRYPTO_DEV_HIFN_795X |
190 | tristate "Driver HIFN 795x crypto accelerator chips" | 207 | tristate "Driver HIFN 795x crypto accelerator chips" |
191 | select CRYPTO_DES | 208 | select CRYPTO_DES |
192 | select CRYPTO_ALGAPI | ||
193 | select CRYPTO_BLKCIPHER | 209 | select CRYPTO_BLKCIPHER |
194 | select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG | 210 | select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG |
195 | depends on PCI | 211 | depends on PCI |
@@ -208,8 +224,10 @@ source drivers/crypto/caam/Kconfig | |||
208 | 224 | ||
209 | config CRYPTO_DEV_TALITOS | 225 | config CRYPTO_DEV_TALITOS |
210 | tristate "Talitos Freescale Security Engine (SEC)" | 226 | tristate "Talitos Freescale Security Engine (SEC)" |
211 | select CRYPTO_ALGAPI | 227 | select CRYPTO_AEAD |
212 | select CRYPTO_AUTHENC | 228 | select CRYPTO_AUTHENC |
229 | select CRYPTO_BLKCIPHER | ||
230 | select CRYPTO_HASH | ||
213 | select HW_RANDOM | 231 | select HW_RANDOM |
214 | depends on FSL_SOC | 232 | depends on FSL_SOC |
215 | help | 233 | help |
@@ -222,11 +240,29 @@ config CRYPTO_DEV_TALITOS | |||
222 | To compile this driver as a module, choose M here: the module | 240 | To compile this driver as a module, choose M here: the module |
223 | will be called talitos. | 241 | will be called talitos. |
224 | 242 | ||
243 | config CRYPTO_DEV_TALITOS1 | ||
244 | bool "SEC1 (SEC 1.0 and SEC Lite 1.2)" | ||
245 | depends on CRYPTO_DEV_TALITOS | ||
246 | depends on PPC_8xx || PPC_82xx | ||
247 | default y | ||
248 | help | ||
249 | Say 'Y' here to use the Freescale Security Engine (SEC) version 1.0 | ||
250 | found on MPC82xx or the Freescale Security Engine (SEC Lite) | ||
251 | version 1.2 found on MPC8xx | ||
252 | |||
253 | config CRYPTO_DEV_TALITOS2 | ||
254 | bool "SEC2+ (SEC version 2.0 or upper)" | ||
255 | depends on CRYPTO_DEV_TALITOS | ||
256 | default y if !PPC_8xx | ||
257 | help | ||
258 | Say 'Y' here to use the Freescale Security Engine (SEC) | ||
259 | version 2 and following as found on MPC83xx, MPC85xx, etc ... | ||
260 | |||
225 | config CRYPTO_DEV_IXP4XX | 261 | config CRYPTO_DEV_IXP4XX |
226 | tristate "Driver for IXP4xx crypto hardware acceleration" | 262 | tristate "Driver for IXP4xx crypto hardware acceleration" |
227 | depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE | 263 | depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE |
228 | select CRYPTO_DES | 264 | select CRYPTO_DES |
229 | select CRYPTO_ALGAPI | 265 | select CRYPTO_AEAD |
230 | select CRYPTO_AUTHENC | 266 | select CRYPTO_AUTHENC |
231 | select CRYPTO_BLKCIPHER | 267 | select CRYPTO_BLKCIPHER |
232 | help | 268 | help |
@@ -236,7 +272,6 @@ config CRYPTO_DEV_PPC4XX | |||
236 | tristate "Driver AMCC PPC4xx crypto accelerator" | 272 | tristate "Driver AMCC PPC4xx crypto accelerator" |
237 | depends on PPC && 4xx | 273 | depends on PPC && 4xx |
238 | select CRYPTO_HASH | 274 | select CRYPTO_HASH |
239 | select CRYPTO_ALGAPI | ||
240 | select CRYPTO_BLKCIPHER | 275 | select CRYPTO_BLKCIPHER |
241 | help | 276 | help |
242 | This option allows you to have support for AMCC crypto acceleration. | 277 | This option allows you to have support for AMCC crypto acceleration. |
@@ -257,7 +292,7 @@ config CRYPTO_DEV_OMAP_AES | |||
257 | tristate "Support for OMAP AES hw engine" | 292 | tristate "Support for OMAP AES hw engine" |
258 | depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS | 293 | depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS |
259 | select CRYPTO_AES | 294 | select CRYPTO_AES |
260 | select CRYPTO_BLKCIPHER2 | 295 | select CRYPTO_BLKCIPHER |
261 | help | 296 | help |
262 | OMAP processors have AES module accelerator. Select this if you | 297 | OMAP processors have AES module accelerator. Select this if you |
263 | want to use the OMAP module for AES algorithms. | 298 | want to use the OMAP module for AES algorithms. |
@@ -266,7 +301,7 @@ config CRYPTO_DEV_OMAP_DES | |||
266 | tristate "Support for OMAP DES3DES hw engine" | 301 | tristate "Support for OMAP DES3DES hw engine" |
267 | depends on ARCH_OMAP2PLUS | 302 | depends on ARCH_OMAP2PLUS |
268 | select CRYPTO_DES | 303 | select CRYPTO_DES |
269 | select CRYPTO_BLKCIPHER2 | 304 | select CRYPTO_BLKCIPHER |
270 | help | 305 | help |
271 | OMAP processors have DES/3DES module accelerator. Select this if you | 306 | OMAP processors have DES/3DES module accelerator. Select this if you |
272 | want to use the OMAP module for DES and 3DES algorithms. Currently | 307 | want to use the OMAP module for DES and 3DES algorithms. Currently |
@@ -276,9 +311,10 @@ config CRYPTO_DEV_OMAP_DES | |||
276 | config CRYPTO_DEV_PICOXCELL | 311 | config CRYPTO_DEV_PICOXCELL |
277 | tristate "Support for picoXcell IPSEC and Layer2 crypto engines" | 312 | tristate "Support for picoXcell IPSEC and Layer2 crypto engines" |
278 | depends on ARCH_PICOXCELL && HAVE_CLK | 313 | depends on ARCH_PICOXCELL && HAVE_CLK |
314 | select CRYPTO_AEAD | ||
279 | select CRYPTO_AES | 315 | select CRYPTO_AES |
280 | select CRYPTO_AUTHENC | 316 | select CRYPTO_AUTHENC |
281 | select CRYPTO_ALGAPI | 317 | select CRYPTO_BLKCIPHER |
282 | select CRYPTO_DES | 318 | select CRYPTO_DES |
283 | select CRYPTO_CBC | 319 | select CRYPTO_CBC |
284 | select CRYPTO_ECB | 320 | select CRYPTO_ECB |
@@ -304,7 +340,6 @@ config CRYPTO_DEV_S5P | |||
304 | tristate "Support for Samsung S5PV210/Exynos crypto accelerator" | 340 | tristate "Support for Samsung S5PV210/Exynos crypto accelerator" |
305 | depends on ARCH_S5PV210 || ARCH_EXYNOS | 341 | depends on ARCH_S5PV210 || ARCH_EXYNOS |
306 | select CRYPTO_AES | 342 | select CRYPTO_AES |
307 | select CRYPTO_ALGAPI | ||
308 | select CRYPTO_BLKCIPHER | 343 | select CRYPTO_BLKCIPHER |
309 | help | 344 | help |
310 | This option allows you to have support for S5P crypto acceleration. | 345 | This option allows you to have support for S5P crypto acceleration. |
@@ -312,11 +347,13 @@ config CRYPTO_DEV_S5P | |||
312 | algorithms execution. | 347 | algorithms execution. |
313 | 348 | ||
314 | config CRYPTO_DEV_NX | 349 | config CRYPTO_DEV_NX |
315 | bool "Support for IBM Power7+ in-Nest cryptographic acceleration" | 350 | bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration" |
316 | depends on PPC64 && IBMVIO && !CPU_LITTLE_ENDIAN | 351 | depends on PPC64 |
317 | default n | ||
318 | help | 352 | help |
319 | Support for Power7+ in-Nest cryptographic acceleration. | 353 | This enables support for the NX hardware cryptographic accelerator |
354 | coprocessor that is in IBM PowerPC P7+ or later processors. This | ||
355 | does not actually enable any drivers, it only allows you to select | ||
356 | which acceleration type (encryption and/or compression) to enable. | ||
320 | 357 | ||
321 | if CRYPTO_DEV_NX | 358 | if CRYPTO_DEV_NX |
322 | source "drivers/crypto/nx/Kconfig" | 359 | source "drivers/crypto/nx/Kconfig" |
@@ -325,7 +362,6 @@ endif | |||
325 | config CRYPTO_DEV_UX500 | 362 | config CRYPTO_DEV_UX500 |
326 | tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" | 363 | tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" |
327 | depends on ARCH_U8500 | 364 | depends on ARCH_U8500 |
328 | select CRYPTO_ALGAPI | ||
329 | help | 365 | help |
330 | Driver for ST-Ericsson UX500 crypto engine. | 366 | Driver for ST-Ericsson UX500 crypto engine. |
331 | 367 | ||
@@ -343,10 +379,7 @@ config CRYPTO_DEV_BFIN_CRC | |||
343 | config CRYPTO_DEV_ATMEL_AES | 379 | config CRYPTO_DEV_ATMEL_AES |
344 | tristate "Support for Atmel AES hw accelerator" | 380 | tristate "Support for Atmel AES hw accelerator" |
345 | depends on ARCH_AT91 | 381 | depends on ARCH_AT91 |
346 | select CRYPTO_CBC | ||
347 | select CRYPTO_ECB | ||
348 | select CRYPTO_AES | 382 | select CRYPTO_AES |
349 | select CRYPTO_ALGAPI | ||
350 | select CRYPTO_BLKCIPHER | 383 | select CRYPTO_BLKCIPHER |
351 | select AT_HDMAC | 384 | select AT_HDMAC |
352 | help | 385 | help |
@@ -361,9 +394,6 @@ config CRYPTO_DEV_ATMEL_TDES | |||
361 | tristate "Support for Atmel DES/TDES hw accelerator" | 394 | tristate "Support for Atmel DES/TDES hw accelerator" |
362 | depends on ARCH_AT91 | 395 | depends on ARCH_AT91 |
363 | select CRYPTO_DES | 396 | select CRYPTO_DES |
364 | select CRYPTO_CBC | ||
365 | select CRYPTO_ECB | ||
366 | select CRYPTO_ALGAPI | ||
367 | select CRYPTO_BLKCIPHER | 397 | select CRYPTO_BLKCIPHER |
368 | help | 398 | help |
369 | Some Atmel processors have DES/TDES hw accelerator. | 399 | Some Atmel processors have DES/TDES hw accelerator. |
@@ -376,10 +406,7 @@ config CRYPTO_DEV_ATMEL_TDES | |||
376 | config CRYPTO_DEV_ATMEL_SHA | 406 | config CRYPTO_DEV_ATMEL_SHA |
377 | tristate "Support for Atmel SHA hw accelerator" | 407 | tristate "Support for Atmel SHA hw accelerator" |
378 | depends on ARCH_AT91 | 408 | depends on ARCH_AT91 |
379 | select CRYPTO_SHA1 | 409 | select CRYPTO_HASH |
380 | select CRYPTO_SHA256 | ||
381 | select CRYPTO_SHA512 | ||
382 | select CRYPTO_ALGAPI | ||
383 | help | 410 | help |
384 | Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512 | 411 | Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512 |
385 | hw accelerator. | 412 | hw accelerator. |
@@ -392,7 +419,6 @@ config CRYPTO_DEV_ATMEL_SHA | |||
392 | config CRYPTO_DEV_CCP | 419 | config CRYPTO_DEV_CCP |
393 | bool "Support for AMD Cryptographic Coprocessor" | 420 | bool "Support for AMD Cryptographic Coprocessor" |
394 | depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM | 421 | depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM |
395 | default n | ||
396 | help | 422 | help |
397 | The AMD Cryptographic Coprocessor provides hardware support | 423 | The AMD Cryptographic Coprocessor provides hardware support |
398 | for encryption, hashing and related operations. | 424 | for encryption, hashing and related operations. |
@@ -404,13 +430,11 @@ endif | |||
404 | config CRYPTO_DEV_MXS_DCP | 430 | config CRYPTO_DEV_MXS_DCP |
405 | tristate "Support for Freescale MXS DCP" | 431 | tristate "Support for Freescale MXS DCP" |
406 | depends on ARCH_MXS | 432 | depends on ARCH_MXS |
407 | select CRYPTO_SHA1 | ||
408 | select CRYPTO_SHA256 | ||
409 | select CRYPTO_CBC | 433 | select CRYPTO_CBC |
410 | select CRYPTO_ECB | 434 | select CRYPTO_ECB |
411 | select CRYPTO_AES | 435 | select CRYPTO_AES |
412 | select CRYPTO_BLKCIPHER | 436 | select CRYPTO_BLKCIPHER |
413 | select CRYPTO_ALGAPI | 437 | select CRYPTO_HASH |
414 | help | 438 | help |
415 | The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB | 439 | The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB |
416 | co-processor on the die. | 440 | co-processor on the die. |
@@ -429,7 +453,6 @@ config CRYPTO_DEV_QCE | |||
429 | select CRYPTO_CBC | 453 | select CRYPTO_CBC |
430 | select CRYPTO_XTS | 454 | select CRYPTO_XTS |
431 | select CRYPTO_CTR | 455 | select CRYPTO_CTR |
432 | select CRYPTO_ALGAPI | ||
433 | select CRYPTO_BLKCIPHER | 456 | select CRYPTO_BLKCIPHER |
434 | help | 457 | help |
435 | This driver supports Qualcomm crypto engine accelerator | 458 | This driver supports Qualcomm crypto engine accelerator |
@@ -439,7 +462,6 @@ config CRYPTO_DEV_QCE | |||
439 | config CRYPTO_DEV_VMX | 462 | config CRYPTO_DEV_VMX |
440 | bool "Support for VMX cryptographic acceleration instructions" | 463 | bool "Support for VMX cryptographic acceleration instructions" |
441 | depends on PPC64 | 464 | depends on PPC64 |
442 | default n | ||
443 | help | 465 | help |
444 | Support for VMX cryptographic acceleration instructions. | 466 | Support for VMX cryptographic acceleration instructions. |
445 | 467 | ||
@@ -449,7 +471,6 @@ config CRYPTO_DEV_IMGTEC_HASH | |||
449 | tristate "Imagination Technologies hardware hash accelerator" | 471 | tristate "Imagination Technologies hardware hash accelerator" |
450 | depends on MIPS || COMPILE_TEST | 472 | depends on MIPS || COMPILE_TEST |
451 | depends on HAS_DMA | 473 | depends on HAS_DMA |
452 | select CRYPTO_ALGAPI | ||
453 | select CRYPTO_MD5 | 474 | select CRYPTO_MD5 |
454 | select CRYPTO_SHA1 | 475 | select CRYPTO_SHA1 |
455 | select CRYPTO_SHA256 | 476 | select CRYPTO_SHA256 |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index fb84be7e6be5..e35c07a8da85 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o | |||
9 | obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o | 9 | obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o |
10 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o | 10 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o |
11 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o | 11 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o |
12 | obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/ | ||
12 | obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o | 13 | obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o |
13 | obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o | 14 | obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o |
14 | n2_crypto-y := n2_core.o n2_asm.o | 15 | n2_crypto-y := n2_core.o n2_asm.o |
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index e7555ff4cafd..e286e285aa8a 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
@@ -45,7 +45,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE | |||
45 | config CRYPTO_DEV_FSL_CAAM_INTC | 45 | config CRYPTO_DEV_FSL_CAAM_INTC |
46 | bool "Job Ring interrupt coalescing" | 46 | bool "Job Ring interrupt coalescing" |
47 | depends on CRYPTO_DEV_FSL_CAAM_JR | 47 | depends on CRYPTO_DEV_FSL_CAAM_JR |
48 | default n | ||
49 | help | 48 | help |
50 | Enable the Job Ring's interrupt coalescing feature. | 49 | Enable the Job Ring's interrupt coalescing feature. |
51 | 50 | ||
@@ -77,8 +76,9 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | |||
77 | tristate "Register algorithm implementations with the Crypto API" | 76 | tristate "Register algorithm implementations with the Crypto API" |
78 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR | 77 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR |
79 | default y | 78 | default y |
80 | select CRYPTO_ALGAPI | 79 | select CRYPTO_AEAD |
81 | select CRYPTO_AUTHENC | 80 | select CRYPTO_AUTHENC |
81 | select CRYPTO_BLKCIPHER | ||
82 | help | 82 | help |
83 | Selecting this will offload crypto for users of the | 83 | Selecting this will offload crypto for users of the |
84 | scatterlist crypto API (such as the linux native IPSec | 84 | scatterlist crypto API (such as the linux native IPSec |
@@ -115,7 +115,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API | |||
115 | config CRYPTO_DEV_FSL_CAAM_DEBUG | 115 | config CRYPTO_DEV_FSL_CAAM_DEBUG |
116 | bool "Enable debug output in CAAM driver" | 116 | bool "Enable debug output in CAAM driver" |
117 | depends on CRYPTO_DEV_FSL_CAAM | 117 | depends on CRYPTO_DEV_FSL_CAAM |
118 | default n | ||
119 | help | 118 | help |
120 | Selecting this will enable printing of various debug | 119 | Selecting this will enable printing of various debug |
121 | information in the CAAM driver. | 120 | information in the CAAM driver. |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 29071a156cbe..daca933a82ec 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -65,6 +65,10 @@ | |||
65 | /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | 65 | /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ |
66 | #define CAAM_MAX_IV_LENGTH 16 | 66 | #define CAAM_MAX_IV_LENGTH 16 |
67 | 67 | ||
68 | #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) | ||
69 | #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ | ||
70 | CAAM_CMD_SZ * 4) | ||
71 | |||
68 | /* length of descriptors text */ | 72 | /* length of descriptors text */ |
69 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) | 73 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) |
70 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) | 74 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) |
@@ -79,18 +83,16 @@ | |||
79 | #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) | 83 | #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) |
80 | 84 | ||
81 | #define DESC_GCM_BASE (3 * CAAM_CMD_SZ) | 85 | #define DESC_GCM_BASE (3 * CAAM_CMD_SZ) |
82 | #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ) | 86 | #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) |
83 | #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ) | 87 | #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) |
84 | 88 | ||
85 | #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) | 89 | #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) |
86 | #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ) | 90 | #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ) |
87 | #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ) | 91 | #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ) |
88 | #define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ) | ||
89 | 92 | ||
90 | #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) | 93 | #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) |
91 | #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ) | 94 | #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) |
92 | #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ) | 95 | #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ) |
93 | #define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ) | ||
94 | 96 | ||
95 | #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) | 97 | #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) |
96 | #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ | 98 | #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ |
@@ -98,8 +100,7 @@ | |||
98 | #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ | 100 | #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ |
99 | 15 * CAAM_CMD_SZ) | 101 | 15 * CAAM_CMD_SZ) |
100 | 102 | ||
101 | #define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \ | 103 | #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) |
102 | CAAM_MAX_KEY_SIZE) | ||
103 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) | 104 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) |
104 | 105 | ||
105 | #ifdef DEBUG | 106 | #ifdef DEBUG |
@@ -258,7 +259,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, | |||
258 | 259 | ||
259 | static int aead_null_set_sh_desc(struct crypto_aead *aead) | 260 | static int aead_null_set_sh_desc(struct crypto_aead *aead) |
260 | { | 261 | { |
261 | struct aead_tfm *tfm = &aead->base.crt_aead; | 262 | unsigned int ivsize = crypto_aead_ivsize(aead); |
262 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 263 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
263 | struct device *jrdev = ctx->jrdev; | 264 | struct device *jrdev = ctx->jrdev; |
264 | bool keys_fit_inline = false; | 265 | bool keys_fit_inline = false; |
@@ -273,7 +274,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
273 | ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) | 274 | ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) |
274 | keys_fit_inline = true; | 275 | keys_fit_inline = true; |
275 | 276 | ||
276 | /* aead_encrypt shared descriptor */ | 277 | /* old_aead_encrypt shared descriptor */ |
277 | desc = ctx->sh_desc_enc; | 278 | desc = ctx->sh_desc_enc; |
278 | 279 | ||
279 | init_sh_desc(desc, HDR_SHARE_SERIAL); | 280 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
@@ -362,7 +363,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
362 | 363 | ||
363 | desc = ctx->sh_desc_dec; | 364 | desc = ctx->sh_desc_dec; |
364 | 365 | ||
365 | /* aead_decrypt shared descriptor */ | 366 | /* old_aead_decrypt shared descriptor */ |
366 | init_sh_desc(desc, HDR_SHARE_SERIAL); | 367 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
367 | 368 | ||
368 | /* Skip if already shared */ | 369 | /* Skip if already shared */ |
@@ -383,7 +384,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
383 | 384 | ||
384 | /* assoclen + cryptlen = seqinlen - ivsize - authsize */ | 385 | /* assoclen + cryptlen = seqinlen - ivsize - authsize */ |
385 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | 386 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, |
386 | ctx->authsize + tfm->ivsize); | 387 | ctx->authsize + ivsize); |
387 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | 388 | /* assoclen = (assoclen + cryptlen) - cryptlen */ |
388 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | 389 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
389 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | 390 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); |
@@ -449,7 +450,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
449 | 450 | ||
450 | static int aead_set_sh_desc(struct crypto_aead *aead) | 451 | static int aead_set_sh_desc(struct crypto_aead *aead) |
451 | { | 452 | { |
452 | struct aead_tfm *tfm = &aead->base.crt_aead; | 453 | unsigned int ivsize = crypto_aead_ivsize(aead); |
453 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 454 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
454 | struct crypto_tfm *ctfm = crypto_aead_tfm(aead); | 455 | struct crypto_tfm *ctfm = crypto_aead_tfm(aead); |
455 | const char *alg_name = crypto_tfm_alg_name(ctfm); | 456 | const char *alg_name = crypto_tfm_alg_name(ctfm); |
@@ -496,7 +497,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
496 | CAAM_DESC_BYTES_MAX) | 497 | CAAM_DESC_BYTES_MAX) |
497 | keys_fit_inline = true; | 498 | keys_fit_inline = true; |
498 | 499 | ||
499 | /* aead_encrypt shared descriptor */ | 500 | /* old_aead_encrypt shared descriptor */ |
500 | desc = ctx->sh_desc_enc; | 501 | desc = ctx->sh_desc_enc; |
501 | 502 | ||
502 | /* Note: Context registers are saved. */ | 503 | /* Note: Context registers are saved. */ |
@@ -510,7 +511,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
510 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | 511 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); |
511 | 512 | ||
512 | /* assoclen + cryptlen = seqinlen - ivsize */ | 513 | /* assoclen + cryptlen = seqinlen - ivsize */ |
513 | append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); | 514 | append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize); |
514 | 515 | ||
515 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | 516 | /* assoclen = (assoclen + cryptlen) - cryptlen */ |
516 | append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); | 517 | append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); |
@@ -518,7 +519,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
518 | /* read assoc before reading payload */ | 519 | /* read assoc before reading payload */ |
519 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | 520 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | |
520 | KEY_VLF); | 521 | KEY_VLF); |
521 | aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off); | 522 | aead_append_ld_iv(desc, ivsize, ctx1_iv_off); |
522 | 523 | ||
523 | /* Load Counter into CONTEXT1 reg */ | 524 | /* Load Counter into CONTEXT1 reg */ |
524 | if (is_rfc3686) | 525 | if (is_rfc3686) |
@@ -565,7 +566,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
565 | CAAM_DESC_BYTES_MAX) | 566 | CAAM_DESC_BYTES_MAX) |
566 | keys_fit_inline = true; | 567 | keys_fit_inline = true; |
567 | 568 | ||
568 | /* aead_decrypt shared descriptor */ | 569 | /* old_aead_decrypt shared descriptor */ |
569 | desc = ctx->sh_desc_dec; | 570 | desc = ctx->sh_desc_dec; |
570 | 571 | ||
571 | /* Note: Context registers are saved. */ | 572 | /* Note: Context registers are saved. */ |
@@ -577,7 +578,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
577 | 578 | ||
578 | /* assoclen + cryptlen = seqinlen - ivsize - authsize */ | 579 | /* assoclen + cryptlen = seqinlen - ivsize - authsize */ |
579 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | 580 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, |
580 | ctx->authsize + tfm->ivsize); | 581 | ctx->authsize + ivsize); |
581 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | 582 | /* assoclen = (assoclen + cryptlen) - cryptlen */ |
582 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | 583 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
583 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | 584 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); |
@@ -586,7 +587,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
586 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | 587 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | |
587 | KEY_VLF); | 588 | KEY_VLF); |
588 | 589 | ||
589 | aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off); | 590 | aead_append_ld_iv(desc, ivsize, ctx1_iv_off); |
590 | 591 | ||
591 | /* Load Counter into CONTEXT1 reg */ | 592 | /* Load Counter into CONTEXT1 reg */ |
592 | if (is_rfc3686) | 593 | if (is_rfc3686) |
@@ -645,20 +646,20 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
645 | /* Generate IV */ | 646 | /* Generate IV */ |
646 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | 647 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | |
647 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | | 648 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | |
648 | NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); | 649 | NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); |
649 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | | 650 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | |
650 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | 651 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); |
651 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | 652 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); |
652 | append_move(desc, MOVE_WAITCOMP | | 653 | append_move(desc, MOVE_WAITCOMP | |
653 | MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | | 654 | MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | |
654 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | | 655 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | |
655 | (tfm->ivsize << MOVE_LEN_SHIFT)); | 656 | (ivsize << MOVE_LEN_SHIFT)); |
656 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | 657 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); |
657 | 658 | ||
658 | /* Copy IV to class 1 context */ | 659 | /* Copy IV to class 1 context */ |
659 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | | 660 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | |
660 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | | 661 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | |
661 | (tfm->ivsize << MOVE_LEN_SHIFT)); | 662 | (ivsize << MOVE_LEN_SHIFT)); |
662 | 663 | ||
663 | /* Return to encryption */ | 664 | /* Return to encryption */ |
664 | append_operation(desc, ctx->class2_alg_type | | 665 | append_operation(desc, ctx->class2_alg_type | |
@@ -676,10 +677,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
676 | 677 | ||
677 | /* Copy iv from outfifo to class 2 fifo */ | 678 | /* Copy iv from outfifo to class 2 fifo */ |
678 | moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | | 679 | moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | |
679 | NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); | 680 | NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT); |
680 | append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | | 681 | append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | |
681 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | 682 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); |
682 | append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | | 683 | append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB | |
683 | LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); | 684 | LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); |
684 | 685 | ||
685 | /* Load Counter into CONTEXT1 reg */ | 686 | /* Load Counter into CONTEXT1 reg */ |
@@ -698,7 +699,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
698 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | 699 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
699 | 700 | ||
700 | /* Not need to reload iv */ | 701 | /* Not need to reload iv */ |
701 | append_seq_fifo_load(desc, tfm->ivsize, | 702 | append_seq_fifo_load(desc, ivsize, |
702 | FIFOLD_CLASS_SKIP); | 703 | FIFOLD_CLASS_SKIP); |
703 | 704 | ||
704 | /* Will read cryptlen */ | 705 | /* Will read cryptlen */ |
@@ -738,7 +739,6 @@ static int aead_setauthsize(struct crypto_aead *authenc, | |||
738 | 739 | ||
739 | static int gcm_set_sh_desc(struct crypto_aead *aead) | 740 | static int gcm_set_sh_desc(struct crypto_aead *aead) |
740 | { | 741 | { |
741 | struct aead_tfm *tfm = &aead->base.crt_aead; | ||
742 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 742 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
743 | struct device *jrdev = ctx->jrdev; | 743 | struct device *jrdev = ctx->jrdev; |
744 | bool keys_fit_inline = false; | 744 | bool keys_fit_inline = false; |
@@ -754,7 +754,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
754 | * Job Descriptor and Shared Descriptor | 754 | * Job Descriptor and Shared Descriptor |
755 | * must fit into the 64-word Descriptor h/w Buffer | 755 | * must fit into the 64-word Descriptor h/w Buffer |
756 | */ | 756 | */ |
757 | if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN + | 757 | if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN + |
758 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 758 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) |
759 | keys_fit_inline = true; | 759 | keys_fit_inline = true; |
760 | 760 | ||
@@ -777,34 +777,34 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
777 | append_operation(desc, ctx->class1_alg_type | | 777 | append_operation(desc, ctx->class1_alg_type | |
778 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | 778 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); |
779 | 779 | ||
780 | /* cryptlen = seqoutlen - authsize */ | 780 | /* if assoclen + cryptlen is ZERO, skip to ICV write */ |
781 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | 781 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
782 | zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | | ||
783 | JUMP_COND_MATH_Z); | ||
782 | 784 | ||
783 | /* assoclen + cryptlen = seqinlen - ivsize */ | 785 | /* if assoclen is ZERO, skip reading the assoc data */ |
784 | append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); | 786 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
787 | zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | | ||
788 | JUMP_COND_MATH_Z); | ||
785 | 789 | ||
786 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | 790 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
787 | append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ); | 791 | |
792 | /* skip assoc data */ | ||
793 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
794 | |||
795 | /* cryptlen = seqinlen - assoclen */ | ||
796 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
788 | 797 | ||
789 | /* if cryptlen is ZERO jump to zero-payload commands */ | 798 | /* if cryptlen is ZERO jump to zero-payload commands */ |
790 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
791 | zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | | 799 | zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | |
792 | JUMP_COND_MATH_Z); | 800 | JUMP_COND_MATH_Z); |
793 | /* read IV */ | ||
794 | append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | | ||
795 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); | ||
796 | |||
797 | /* if assoclen is ZERO, skip reading the assoc data */ | ||
798 | append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); | ||
799 | zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | | ||
800 | JUMP_COND_MATH_Z); | ||
801 | 801 | ||
802 | /* read assoc data */ | 802 | /* read assoc data */ |
803 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | 803 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
804 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | 804 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); |
805 | set_jump_tgt_here(desc, zero_assoc_jump_cmd1); | 805 | set_jump_tgt_here(desc, zero_assoc_jump_cmd1); |
806 | 806 | ||
807 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 807 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
808 | 808 | ||
809 | /* write encrypted data */ | 809 | /* write encrypted data */ |
810 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | 810 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); |
@@ -814,31 +814,17 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
814 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); | 814 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); |
815 | 815 | ||
816 | /* jump the zero-payload commands */ | 816 | /* jump the zero-payload commands */ |
817 | append_jump(desc, JUMP_TEST_ALL | 7); | 817 | append_jump(desc, JUMP_TEST_ALL | 2); |
818 | 818 | ||
819 | /* zero-payload commands */ | 819 | /* zero-payload commands */ |
820 | set_jump_tgt_here(desc, zero_payload_jump_cmd); | 820 | set_jump_tgt_here(desc, zero_payload_jump_cmd); |
821 | 821 | ||
822 | /* if assoclen is ZERO, jump to IV reading - is the only input data */ | ||
823 | append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); | ||
824 | zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | | ||
825 | JUMP_COND_MATH_Z); | ||
826 | /* read IV */ | ||
827 | append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | | ||
828 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); | ||
829 | |||
830 | /* read assoc data */ | 822 | /* read assoc data */ |
831 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | 823 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
832 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); | 824 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); |
833 | 825 | ||
834 | /* jump to ICV writing */ | 826 | /* There is no input data */ |
835 | append_jump(desc, JUMP_TEST_ALL | 2); | ||
836 | |||
837 | /* read IV - is the only input data */ | ||
838 | set_jump_tgt_here(desc, zero_assoc_jump_cmd2); | 827 | set_jump_tgt_here(desc, zero_assoc_jump_cmd2); |
839 | append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | | ||
840 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | | ||
841 | FIFOLD_TYPE_LAST1); | ||
842 | 828 | ||
843 | /* write ICV */ | 829 | /* write ICV */ |
844 | append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | | 830 | append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | |
@@ -862,7 +848,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
862 | * must all fit into the 64-word Descriptor h/w Buffer | 848 | * must all fit into the 64-word Descriptor h/w Buffer |
863 | */ | 849 | */ |
864 | keys_fit_inline = false; | 850 | keys_fit_inline = false; |
865 | if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN + | 851 | if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN + |
866 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 852 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) |
867 | keys_fit_inline = true; | 853 | keys_fit_inline = true; |
868 | 854 | ||
@@ -886,33 +872,30 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
886 | append_operation(desc, ctx->class1_alg_type | | 872 | append_operation(desc, ctx->class1_alg_type | |
887 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | 873 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
888 | 874 | ||
889 | /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ | 875 | /* if assoclen is ZERO, skip reading the assoc data */ |
890 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | 876 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
891 | ctx->authsize + tfm->ivsize); | 877 | zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | |
892 | 878 | JUMP_COND_MATH_Z); | |
893 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | ||
894 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
895 | append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ); | ||
896 | 879 | ||
897 | /* read IV */ | 880 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
898 | append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | | ||
899 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); | ||
900 | 881 | ||
901 | /* jump to zero-payload command if cryptlen is zero */ | 882 | /* skip assoc data */ |
902 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); | 883 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
903 | zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | | ||
904 | JUMP_COND_MATH_Z); | ||
905 | 884 | ||
906 | append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); | ||
907 | /* if asoclen is ZERO, skip reading assoc data */ | ||
908 | zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | | ||
909 | JUMP_COND_MATH_Z); | ||
910 | /* read assoc data */ | 885 | /* read assoc data */ |
911 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | 886 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
912 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | 887 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); |
888 | |||
913 | set_jump_tgt_here(desc, zero_assoc_jump_cmd1); | 889 | set_jump_tgt_here(desc, zero_assoc_jump_cmd1); |
914 | 890 | ||
915 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | 891 | /* cryptlen = seqoutlen - assoclen */ |
892 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
893 | |||
894 | /* jump to zero-payload command if cryptlen is zero */ | ||
895 | zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | | ||
896 | JUMP_COND_MATH_Z); | ||
897 | |||
898 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
916 | 899 | ||
917 | /* store encrypted data */ | 900 | /* store encrypted data */ |
918 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | 901 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); |
@@ -921,21 +904,9 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) | |||
921 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | 904 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
922 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); | 905 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); |
923 | 906 | ||
924 | /* jump the zero-payload commands */ | ||
925 | append_jump(desc, JUMP_TEST_ALL | 4); | ||
926 | |||
927 | /* zero-payload command */ | 907 | /* zero-payload command */ |
928 | set_jump_tgt_here(desc, zero_payload_jump_cmd); | 908 | set_jump_tgt_here(desc, zero_payload_jump_cmd); |
929 | 909 | ||
930 | /* if assoclen is ZERO, jump to ICV reading */ | ||
931 | append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); | ||
932 | zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | | ||
933 | JUMP_COND_MATH_Z); | ||
934 | /* read assoc data */ | ||
935 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
936 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
937 | set_jump_tgt_here(desc, zero_assoc_jump_cmd2); | ||
938 | |||
939 | /* read ICV */ | 910 | /* read ICV */ |
940 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | | 911 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | |
941 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | 912 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); |
@@ -968,13 +939,11 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) | |||
968 | 939 | ||
969 | static int rfc4106_set_sh_desc(struct crypto_aead *aead) | 940 | static int rfc4106_set_sh_desc(struct crypto_aead *aead) |
970 | { | 941 | { |
971 | struct aead_tfm *tfm = &aead->base.crt_aead; | ||
972 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 942 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
973 | struct device *jrdev = ctx->jrdev; | 943 | struct device *jrdev = ctx->jrdev; |
974 | bool keys_fit_inline = false; | 944 | bool keys_fit_inline = false; |
975 | u32 *key_jump_cmd, *move_cmd, *write_iv_cmd; | 945 | u32 *key_jump_cmd; |
976 | u32 *desc; | 946 | u32 *desc; |
977 | u32 geniv; | ||
978 | 947 | ||
979 | if (!ctx->enckeylen || !ctx->authsize) | 948 | if (!ctx->enckeylen || !ctx->authsize) |
980 | return 0; | 949 | return 0; |
@@ -984,7 +953,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
984 | * Job Descriptor and Shared Descriptor | 953 | * Job Descriptor and Shared Descriptor |
985 | * must fit into the 64-word Descriptor h/w Buffer | 954 | * must fit into the 64-word Descriptor h/w Buffer |
986 | */ | 955 | */ |
987 | if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN + | 956 | if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN + |
988 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 957 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) |
989 | keys_fit_inline = true; | 958 | keys_fit_inline = true; |
990 | 959 | ||
@@ -1007,29 +976,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
1007 | append_operation(desc, ctx->class1_alg_type | | 976 | append_operation(desc, ctx->class1_alg_type | |
1008 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | 977 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); |
1009 | 978 | ||
1010 | /* cryptlen = seqoutlen - authsize */ | 979 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
1011 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
1012 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 980 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
1013 | 981 | ||
1014 | /* assoclen + cryptlen = seqinlen - ivsize */ | 982 | /* Skip assoc data */ |
1015 | append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); | 983 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
1016 | |||
1017 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | ||
1018 | append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); | ||
1019 | |||
1020 | /* Read Salt */ | ||
1021 | append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), | ||
1022 | 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); | ||
1023 | /* Read AES-GCM-ESP IV */ | ||
1024 | append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | | ||
1025 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); | ||
1026 | 984 | ||
1027 | /* Read assoc data */ | 985 | /* Read assoc data */ |
1028 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | 986 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
1029 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | 987 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); |
1030 | 988 | ||
989 | /* cryptlen = seqoutlen - assoclen */ | ||
990 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
991 | |||
1031 | /* Will read cryptlen bytes */ | 992 | /* Will read cryptlen bytes */ |
1032 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 993 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
1033 | 994 | ||
1034 | /* Write encrypted data */ | 995 | /* Write encrypted data */ |
1035 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | 996 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); |
@@ -1083,30 +1044,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
1083 | append_operation(desc, ctx->class1_alg_type | | 1044 | append_operation(desc, ctx->class1_alg_type | |
1084 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | 1045 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
1085 | 1046 | ||
1086 | /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ | 1047 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
1087 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | 1048 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
1088 | ctx->authsize + tfm->ivsize); | ||
1089 | |||
1090 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | ||
1091 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
1092 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | ||
1093 | |||
1094 | /* Will write cryptlen bytes */ | ||
1095 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
1096 | 1049 | ||
1097 | /* Read Salt */ | 1050 | /* Skip assoc data */ |
1098 | append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), | 1051 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
1099 | 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); | ||
1100 | /* Read AES-GCM-ESP IV */ | ||
1101 | append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | | ||
1102 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); | ||
1103 | 1052 | ||
1104 | /* Read assoc data */ | 1053 | /* Read assoc data */ |
1105 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | 1054 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
1106 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | 1055 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); |
1107 | 1056 | ||
1057 | /* Will write cryptlen bytes */ | ||
1058 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
1059 | |||
1108 | /* Will read cryptlen bytes */ | 1060 | /* Will read cryptlen bytes */ |
1109 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | 1061 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
1110 | 1062 | ||
1111 | /* Store payload data */ | 1063 | /* Store payload data */ |
1112 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | 1064 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); |
@@ -1132,107 +1084,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
1132 | desc_bytes(desc), 1); | 1084 | desc_bytes(desc), 1); |
1133 | #endif | 1085 | #endif |
1134 | 1086 | ||
1135 | /* | ||
1136 | * Job Descriptor and Shared Descriptors | ||
1137 | * must all fit into the 64-word Descriptor h/w Buffer | ||
1138 | */ | ||
1139 | keys_fit_inline = false; | ||
1140 | if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN + | ||
1141 | ctx->split_key_pad_len + ctx->enckeylen <= | ||
1142 | CAAM_DESC_BYTES_MAX) | ||
1143 | keys_fit_inline = true; | ||
1144 | |||
1145 | /* rfc4106_givencrypt shared descriptor */ | ||
1146 | desc = ctx->sh_desc_givenc; | ||
1147 | |||
1148 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
1149 | |||
1150 | /* Skip key loading if it is loaded due to sharing */ | ||
1151 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
1152 | JUMP_COND_SHRD); | ||
1153 | if (keys_fit_inline) | ||
1154 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
1155 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
1156 | else | ||
1157 | append_key(desc, ctx->key_dma, ctx->enckeylen, | ||
1158 | CLASS_1 | KEY_DEST_CLASS_REG); | ||
1159 | set_jump_tgt_here(desc, key_jump_cmd); | ||
1160 | |||
1161 | /* Generate IV */ | ||
1162 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | ||
1163 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | | ||
1164 | NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
1165 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | | ||
1166 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
1167 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
1168 | move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF | | ||
1169 | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
1170 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
1171 | |||
1172 | /* Copy generated IV to OFIFO */ | ||
1173 | write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO | | ||
1174 | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
1175 | |||
1176 | /* Class 1 operation */ | ||
1177 | append_operation(desc, ctx->class1_alg_type | | ||
1178 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
1179 | |||
1180 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
1181 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
1182 | |||
1183 | /* assoclen = seqinlen - (ivsize + cryptlen) */ | ||
1184 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
1185 | |||
1186 | /* Will write ivsize + cryptlen */ | ||
1187 | append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ); | ||
1188 | |||
1189 | /* Read Salt and generated IV */ | ||
1190 | append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | | ||
1191 | FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12); | ||
1192 | /* Append Salt */ | ||
1193 | append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); | ||
1194 | set_move_tgt_here(desc, move_cmd); | ||
1195 | set_move_tgt_here(desc, write_iv_cmd); | ||
1196 | /* Blank commands. Will be overwritten by generated IV. */ | ||
1197 | append_cmd(desc, 0x00000000); | ||
1198 | append_cmd(desc, 0x00000000); | ||
1199 | /* End of blank commands */ | ||
1200 | |||
1201 | /* No need to reload iv */ | ||
1202 | append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP); | ||
1203 | |||
1204 | /* Read assoc data */ | ||
1205 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
1206 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | ||
1207 | |||
1208 | /* Will read cryptlen */ | ||
1209 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
1210 | |||
1211 | /* Store generated IV and encrypted data */ | ||
1212 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | ||
1213 | |||
1214 | /* Read payload data */ | ||
1215 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
1216 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); | ||
1217 | |||
1218 | /* Write ICV */ | ||
1219 | append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | | ||
1220 | LDST_SRCDST_BYTE_CONTEXT); | ||
1221 | |||
1222 | ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, | ||
1223 | desc_bytes(desc), | ||
1224 | DMA_TO_DEVICE); | ||
1225 | if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { | ||
1226 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
1227 | return -ENOMEM; | ||
1228 | } | ||
1229 | #ifdef DEBUG | ||
1230 | print_hex_dump(KERN_ERR, | ||
1231 | "rfc4106 givenc shdesc@"__stringify(__LINE__)": ", | ||
1232 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
1233 | desc_bytes(desc), 1); | ||
1234 | #endif | ||
1235 | |||
1236 | return 0; | 1087 | return 0; |
1237 | } | 1088 | } |
1238 | 1089 | ||
@@ -1249,14 +1100,12 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, | |||
1249 | 1100 | ||
1250 | static int rfc4543_set_sh_desc(struct crypto_aead *aead) | 1101 | static int rfc4543_set_sh_desc(struct crypto_aead *aead) |
1251 | { | 1102 | { |
1252 | struct aead_tfm *tfm = &aead->base.crt_aead; | ||
1253 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1103 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
1254 | struct device *jrdev = ctx->jrdev; | 1104 | struct device *jrdev = ctx->jrdev; |
1255 | bool keys_fit_inline = false; | 1105 | bool keys_fit_inline = false; |
1256 | u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd; | 1106 | u32 *key_jump_cmd; |
1257 | u32 *read_move_cmd, *write_move_cmd; | 1107 | u32 *read_move_cmd, *write_move_cmd; |
1258 | u32 *desc; | 1108 | u32 *desc; |
1259 | u32 geniv; | ||
1260 | 1109 | ||
1261 | if (!ctx->enckeylen || !ctx->authsize) | 1110 | if (!ctx->enckeylen || !ctx->authsize) |
1262 | return 0; | 1111 | return 0; |
@@ -1266,7 +1115,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
1266 | * Job Descriptor and Shared Descriptor | 1115 | * Job Descriptor and Shared Descriptor |
1267 | * must fit into the 64-word Descriptor h/w Buffer | 1116 | * must fit into the 64-word Descriptor h/w Buffer |
1268 | */ | 1117 | */ |
1269 | if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN + | 1118 | if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN + |
1270 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 1119 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) |
1271 | keys_fit_inline = true; | 1120 | keys_fit_inline = true; |
1272 | 1121 | ||
@@ -1289,48 +1138,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
1289 | append_operation(desc, ctx->class1_alg_type | | 1138 | append_operation(desc, ctx->class1_alg_type | |
1290 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | 1139 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); |
1291 | 1140 | ||
1292 | /* Load AES-GMAC ESP IV into Math1 register */ | 1141 | /* assoclen + cryptlen = seqinlen */ |
1293 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | | 1142 | append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); |
1294 | LDST_CLASS_DECO | tfm->ivsize); | ||
1295 | |||
1296 | /* Wait the DMA transaction to finish */ | ||
1297 | append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | | ||
1298 | (1 << JUMP_OFFSET_SHIFT)); | ||
1299 | |||
1300 | /* Overwrite blank immediate AES-GMAC ESP IV data */ | ||
1301 | write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | | ||
1302 | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
1303 | |||
1304 | /* Overwrite blank immediate AAD data */ | ||
1305 | write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | | ||
1306 | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
1307 | |||
1308 | /* cryptlen = seqoutlen - authsize */ | ||
1309 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
1310 | |||
1311 | /* assoclen = (seqinlen - ivsize) - cryptlen */ | ||
1312 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
1313 | |||
1314 | /* Read Salt and AES-GMAC ESP IV */ | ||
1315 | append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | | ||
1316 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); | ||
1317 | /* Append Salt */ | ||
1318 | append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); | ||
1319 | set_move_tgt_here(desc, write_iv_cmd); | ||
1320 | /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ | ||
1321 | append_cmd(desc, 0x00000000); | ||
1322 | append_cmd(desc, 0x00000000); | ||
1323 | /* End of blank commands */ | ||
1324 | |||
1325 | /* Read assoc data */ | ||
1326 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
1327 | FIFOLD_TYPE_AAD); | ||
1328 | |||
1329 | /* Will read cryptlen bytes */ | ||
1330 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
1331 | |||
1332 | /* Will write cryptlen bytes */ | ||
1333 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
1334 | 1143 | ||
1335 | /* | 1144 | /* |
1336 | * MOVE_LEN opcode is not available in all SEC HW revisions, | 1145 | * MOVE_LEN opcode is not available in all SEC HW revisions, |
@@ -1342,16 +1151,13 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
1342 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | | 1151 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | |
1343 | (0x8 << MOVE_LEN_SHIFT)); | 1152 | (0x8 << MOVE_LEN_SHIFT)); |
1344 | 1153 | ||
1345 | /* Authenticate AES-GMAC ESP IV */ | 1154 | /* Will read assoclen + cryptlen bytes */ |
1346 | append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | | 1155 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
1347 | FIFOLD_TYPE_AAD | tfm->ivsize); | ||
1348 | set_move_tgt_here(desc, write_aad_cmd); | ||
1349 | /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ | ||
1350 | append_cmd(desc, 0x00000000); | ||
1351 | append_cmd(desc, 0x00000000); | ||
1352 | /* End of blank commands */ | ||
1353 | 1156 | ||
1354 | /* Read and write cryptlen bytes */ | 1157 | /* Will write assoclen + cryptlen bytes */ |
1158 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
1159 | |||
1160 | /* Read and write assoclen + cryptlen bytes */ | ||
1355 | aead_append_src_dst(desc, FIFOLD_TYPE_AAD); | 1161 | aead_append_src_dst(desc, FIFOLD_TYPE_AAD); |
1356 | 1162 | ||
1357 | set_move_tgt_here(desc, read_move_cmd); | 1163 | set_move_tgt_here(desc, read_move_cmd); |
@@ -1382,7 +1188,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
1382 | * must all fit into the 64-word Descriptor h/w Buffer | 1188 | * must all fit into the 64-word Descriptor h/w Buffer |
1383 | */ | 1189 | */ |
1384 | keys_fit_inline = false; | 1190 | keys_fit_inline = false; |
1385 | if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN + | 1191 | if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN + |
1386 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | 1192 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) |
1387 | keys_fit_inline = true; | 1193 | keys_fit_inline = true; |
1388 | 1194 | ||
@@ -1405,28 +1211,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
1405 | append_operation(desc, ctx->class1_alg_type | | 1211 | append_operation(desc, ctx->class1_alg_type | |
1406 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | 1212 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
1407 | 1213 | ||
1408 | /* Load AES-GMAC ESP IV into Math1 register */ | 1214 | /* assoclen + cryptlen = seqoutlen */ |
1409 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | | 1215 | append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
1410 | LDST_CLASS_DECO | tfm->ivsize); | ||
1411 | |||
1412 | /* Wait the DMA transaction to finish */ | ||
1413 | append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | | ||
1414 | (1 << JUMP_OFFSET_SHIFT)); | ||
1415 | |||
1416 | /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */ | ||
1417 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize); | ||
1418 | |||
1419 | /* Overwrite blank immediate AES-GMAC ESP IV data */ | ||
1420 | write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | | ||
1421 | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
1422 | |||
1423 | /* Overwrite blank immediate AAD data */ | ||
1424 | write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | | ||
1425 | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
1426 | |||
1427 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | ||
1428 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
1429 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | ||
1430 | 1216 | ||
1431 | /* | 1217 | /* |
1432 | * MOVE_LEN opcode is not available in all SEC HW revisions, | 1218 | * MOVE_LEN opcode is not available in all SEC HW revisions, |
@@ -1438,40 +1224,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
1438 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | | 1224 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | |
1439 | (0x8 << MOVE_LEN_SHIFT)); | 1225 | (0x8 << MOVE_LEN_SHIFT)); |
1440 | 1226 | ||
1441 | /* Read Salt and AES-GMAC ESP IV */ | 1227 | /* Will read assoclen + cryptlen bytes */ |
1442 | append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | | 1228 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
1443 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); | ||
1444 | /* Append Salt */ | ||
1445 | append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); | ||
1446 | set_move_tgt_here(desc, write_iv_cmd); | ||
1447 | /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ | ||
1448 | append_cmd(desc, 0x00000000); | ||
1449 | append_cmd(desc, 0x00000000); | ||
1450 | /* End of blank commands */ | ||
1451 | |||
1452 | /* Read assoc data */ | ||
1453 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
1454 | FIFOLD_TYPE_AAD); | ||
1455 | |||
1456 | /* Will read cryptlen bytes */ | ||
1457 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
1458 | 1229 | ||
1459 | /* Will write cryptlen bytes */ | 1230 | /* Will write assoclen + cryptlen bytes */ |
1460 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); | 1231 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
1461 | |||
1462 | /* Authenticate AES-GMAC ESP IV */ | ||
1463 | append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | | ||
1464 | FIFOLD_TYPE_AAD | tfm->ivsize); | ||
1465 | set_move_tgt_here(desc, write_aad_cmd); | ||
1466 | /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ | ||
1467 | append_cmd(desc, 0x00000000); | ||
1468 | append_cmd(desc, 0x00000000); | ||
1469 | /* End of blank commands */ | ||
1470 | 1232 | ||
1471 | /* Store payload data */ | 1233 | /* Store payload data */ |
1472 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | 1234 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); |
1473 | 1235 | ||
1474 | /* In-snoop cryptlen data */ | 1236 | /* In-snoop assoclen + cryptlen data */ |
1475 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | | 1237 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | |
1476 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); | 1238 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); |
1477 | 1239 | ||
@@ -1499,135 +1261,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) | |||
1499 | desc_bytes(desc), 1); | 1261 | desc_bytes(desc), 1); |
1500 | #endif | 1262 | #endif |
1501 | 1263 | ||
1502 | /* | ||
1503 | * Job Descriptor and Shared Descriptors | ||
1504 | * must all fit into the 64-word Descriptor h/w Buffer | ||
1505 | */ | ||
1506 | keys_fit_inline = false; | ||
1507 | if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN + | ||
1508 | ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | ||
1509 | keys_fit_inline = true; | ||
1510 | |||
1511 | /* rfc4543_givencrypt shared descriptor */ | ||
1512 | desc = ctx->sh_desc_givenc; | ||
1513 | |||
1514 | init_sh_desc(desc, HDR_SHARE_SERIAL); | ||
1515 | |||
1516 | /* Skip key loading if it is loaded due to sharing */ | ||
1517 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
1518 | JUMP_COND_SHRD); | ||
1519 | if (keys_fit_inline) | ||
1520 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
1521 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
1522 | else | ||
1523 | append_key(desc, ctx->key_dma, ctx->enckeylen, | ||
1524 | CLASS_1 | KEY_DEST_CLASS_REG); | ||
1525 | set_jump_tgt_here(desc, key_jump_cmd); | ||
1526 | |||
1527 | /* Generate IV */ | ||
1528 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | ||
1529 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | | ||
1530 | NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
1531 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | | ||
1532 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
1533 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
1534 | /* Move generated IV to Math1 register */ | ||
1535 | append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 | | ||
1536 | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
1537 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
1538 | |||
1539 | /* Overwrite blank immediate AES-GMAC IV data */ | ||
1540 | write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | | ||
1541 | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
1542 | |||
1543 | /* Overwrite blank immediate AAD data */ | ||
1544 | write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | | ||
1545 | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
1546 | |||
1547 | /* Copy generated IV to OFIFO */ | ||
1548 | append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO | | ||
1549 | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
1550 | |||
1551 | /* Class 1 operation */ | ||
1552 | append_operation(desc, ctx->class1_alg_type | | ||
1553 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
1554 | |||
1555 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
1556 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
1557 | |||
1558 | /* assoclen = seqinlen - (ivsize + cryptlen) */ | ||
1559 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
1560 | |||
1561 | /* Will write ivsize + cryptlen */ | ||
1562 | append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ); | ||
1563 | |||
1564 | /* | ||
1565 | * MOVE_LEN opcode is not available in all SEC HW revisions, | ||
1566 | * thus need to do some magic, i.e. self-patch the descriptor | ||
1567 | * buffer. | ||
1568 | */ | ||
1569 | read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | | ||
1570 | (0x6 << MOVE_LEN_SHIFT)); | ||
1571 | write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | | ||
1572 | (0x8 << MOVE_LEN_SHIFT)); | ||
1573 | |||
1574 | /* Read Salt and AES-GMAC generated IV */ | ||
1575 | append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | | ||
1576 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); | ||
1577 | /* Append Salt */ | ||
1578 | append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); | ||
1579 | set_move_tgt_here(desc, write_iv_cmd); | ||
1580 | /* Blank commands. Will be overwritten by AES-GMAC generated IV. */ | ||
1581 | append_cmd(desc, 0x00000000); | ||
1582 | append_cmd(desc, 0x00000000); | ||
1583 | /* End of blank commands */ | ||
1584 | |||
1585 | /* No need to reload iv */ | ||
1586 | append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP); | ||
1587 | |||
1588 | /* Read assoc data */ | ||
1589 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | ||
1590 | FIFOLD_TYPE_AAD); | ||
1591 | |||
1592 | /* Will read cryptlen */ | ||
1593 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
1594 | |||
1595 | /* Authenticate AES-GMAC IV */ | ||
1596 | append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | | ||
1597 | FIFOLD_TYPE_AAD | tfm->ivsize); | ||
1598 | set_move_tgt_here(desc, write_aad_cmd); | ||
1599 | /* Blank commands. Will be overwritten by AES-GMAC IV. */ | ||
1600 | append_cmd(desc, 0x00000000); | ||
1601 | append_cmd(desc, 0x00000000); | ||
1602 | /* End of blank commands */ | ||
1603 | |||
1604 | /* Read and write cryptlen bytes */ | ||
1605 | aead_append_src_dst(desc, FIFOLD_TYPE_AAD); | ||
1606 | |||
1607 | set_move_tgt_here(desc, read_move_cmd); | ||
1608 | set_move_tgt_here(desc, write_move_cmd); | ||
1609 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
1610 | /* Move payload data to OFIFO */ | ||
1611 | append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); | ||
1612 | |||
1613 | /* Write ICV */ | ||
1614 | append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | | ||
1615 | LDST_SRCDST_BYTE_CONTEXT); | ||
1616 | |||
1617 | ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, | ||
1618 | desc_bytes(desc), | ||
1619 | DMA_TO_DEVICE); | ||
1620 | if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { | ||
1621 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
1622 | return -ENOMEM; | ||
1623 | } | ||
1624 | #ifdef DEBUG | ||
1625 | print_hex_dump(KERN_ERR, | ||
1626 | "rfc4543 givenc shdesc@"__stringify(__LINE__)": ", | ||
1627 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
1628 | desc_bytes(desc), 1); | ||
1629 | #endif | ||
1630 | |||
1631 | return 0; | 1264 | return 0; |
1632 | } | 1265 | } |
1633 | 1266 | ||
@@ -2100,7 +1733,7 @@ struct aead_edesc { | |||
2100 | int sec4_sg_bytes; | 1733 | int sec4_sg_bytes; |
2101 | dma_addr_t sec4_sg_dma; | 1734 | dma_addr_t sec4_sg_dma; |
2102 | struct sec4_sg_entry *sec4_sg; | 1735 | struct sec4_sg_entry *sec4_sg; |
2103 | u32 hw_desc[0]; | 1736 | u32 hw_desc[]; |
2104 | }; | 1737 | }; |
2105 | 1738 | ||
2106 | /* | 1739 | /* |
@@ -2154,6 +1787,16 @@ static void aead_unmap(struct device *dev, | |||
2154 | struct aead_edesc *edesc, | 1787 | struct aead_edesc *edesc, |
2155 | struct aead_request *req) | 1788 | struct aead_request *req) |
2156 | { | 1789 | { |
1790 | caam_unmap(dev, req->src, req->dst, | ||
1791 | edesc->src_nents, edesc->src_chained, edesc->dst_nents, | ||
1792 | edesc->dst_chained, 0, 0, | ||
1793 | edesc->sec4_sg_dma, edesc->sec4_sg_bytes); | ||
1794 | } | ||
1795 | |||
1796 | static void old_aead_unmap(struct device *dev, | ||
1797 | struct aead_edesc *edesc, | ||
1798 | struct aead_request *req) | ||
1799 | { | ||
2157 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1800 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
2158 | int ivsize = crypto_aead_ivsize(aead); | 1801 | int ivsize = crypto_aead_ivsize(aead); |
2159 | 1802 | ||
@@ -2184,6 +1827,28 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
2184 | { | 1827 | { |
2185 | struct aead_request *req = context; | 1828 | struct aead_request *req = context; |
2186 | struct aead_edesc *edesc; | 1829 | struct aead_edesc *edesc; |
1830 | |||
1831 | #ifdef DEBUG | ||
1832 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
1833 | #endif | ||
1834 | |||
1835 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); | ||
1836 | |||
1837 | if (err) | ||
1838 | caam_jr_strstatus(jrdev, err); | ||
1839 | |||
1840 | aead_unmap(jrdev, edesc, req); | ||
1841 | |||
1842 | kfree(edesc); | ||
1843 | |||
1844 | aead_request_complete(req, err); | ||
1845 | } | ||
1846 | |||
1847 | static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
1848 | void *context) | ||
1849 | { | ||
1850 | struct aead_request *req = context; | ||
1851 | struct aead_edesc *edesc; | ||
2187 | #ifdef DEBUG | 1852 | #ifdef DEBUG |
2188 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1853 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
2189 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1854 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
@@ -2198,7 +1863,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
2198 | if (err) | 1863 | if (err) |
2199 | caam_jr_strstatus(jrdev, err); | 1864 | caam_jr_strstatus(jrdev, err); |
2200 | 1865 | ||
2201 | aead_unmap(jrdev, edesc, req); | 1866 | old_aead_unmap(jrdev, edesc, req); |
2202 | 1867 | ||
2203 | #ifdef DEBUG | 1868 | #ifdef DEBUG |
2204 | print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", | 1869 | print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", |
@@ -2223,6 +1888,34 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
2223 | { | 1888 | { |
2224 | struct aead_request *req = context; | 1889 | struct aead_request *req = context; |
2225 | struct aead_edesc *edesc; | 1890 | struct aead_edesc *edesc; |
1891 | |||
1892 | #ifdef DEBUG | ||
1893 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
1894 | #endif | ||
1895 | |||
1896 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); | ||
1897 | |||
1898 | if (err) | ||
1899 | caam_jr_strstatus(jrdev, err); | ||
1900 | |||
1901 | aead_unmap(jrdev, edesc, req); | ||
1902 | |||
1903 | /* | ||
1904 | * verify hw auth check passed else return -EBADMSG | ||
1905 | */ | ||
1906 | if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) | ||
1907 | err = -EBADMSG; | ||
1908 | |||
1909 | kfree(edesc); | ||
1910 | |||
1911 | aead_request_complete(req, err); | ||
1912 | } | ||
1913 | |||
1914 | static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
1915 | void *context) | ||
1916 | { | ||
1917 | struct aead_request *req = context; | ||
1918 | struct aead_edesc *edesc; | ||
2226 | #ifdef DEBUG | 1919 | #ifdef DEBUG |
2227 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1920 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
2228 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1921 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
@@ -2246,7 +1939,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
2246 | if (err) | 1939 | if (err) |
2247 | caam_jr_strstatus(jrdev, err); | 1940 | caam_jr_strstatus(jrdev, err); |
2248 | 1941 | ||
2249 | aead_unmap(jrdev, edesc, req); | 1942 | old_aead_unmap(jrdev, edesc, req); |
2250 | 1943 | ||
2251 | /* | 1944 | /* |
2252 | * verify hw auth check passed else return -EBADMSG | 1945 | * verify hw auth check passed else return -EBADMSG |
@@ -2342,10 +2035,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
2342 | /* | 2035 | /* |
2343 | * Fill in aead job descriptor | 2036 | * Fill in aead job descriptor |
2344 | */ | 2037 | */ |
2345 | static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | 2038 | static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr, |
2346 | struct aead_edesc *edesc, | 2039 | struct aead_edesc *edesc, |
2347 | struct aead_request *req, | 2040 | struct aead_request *req, |
2348 | bool all_contig, bool encrypt) | 2041 | bool all_contig, bool encrypt) |
2349 | { | 2042 | { |
2350 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 2043 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
2351 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 2044 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
@@ -2425,6 +2118,97 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
2425 | } | 2118 | } |
2426 | 2119 | ||
2427 | /* | 2120 | /* |
2121 | * Fill in aead job descriptor | ||
2122 | */ | ||
2123 | static void init_aead_job(struct aead_request *req, | ||
2124 | struct aead_edesc *edesc, | ||
2125 | bool all_contig, bool encrypt) | ||
2126 | { | ||
2127 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
2128 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
2129 | int authsize = ctx->authsize; | ||
2130 | u32 *desc = edesc->hw_desc; | ||
2131 | u32 out_options, in_options; | ||
2132 | dma_addr_t dst_dma, src_dma; | ||
2133 | int len, sec4_sg_index = 0; | ||
2134 | dma_addr_t ptr; | ||
2135 | u32 *sh_desc; | ||
2136 | |||
2137 | sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; | ||
2138 | ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; | ||
2139 | |||
2140 | len = desc_len(sh_desc); | ||
2141 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
2142 | |||
2143 | if (all_contig) { | ||
2144 | src_dma = sg_dma_address(req->src); | ||
2145 | in_options = 0; | ||
2146 | } else { | ||
2147 | src_dma = edesc->sec4_sg_dma; | ||
2148 | sec4_sg_index += edesc->src_nents; | ||
2149 | in_options = LDST_SGF; | ||
2150 | } | ||
2151 | |||
2152 | append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen, | ||
2153 | in_options); | ||
2154 | |||
2155 | dst_dma = src_dma; | ||
2156 | out_options = in_options; | ||
2157 | |||
2158 | if (unlikely(req->src != req->dst)) { | ||
2159 | if (!edesc->dst_nents) { | ||
2160 | dst_dma = sg_dma_address(req->dst); | ||
2161 | } else { | ||
2162 | dst_dma = edesc->sec4_sg_dma + | ||
2163 | sec4_sg_index * | ||
2164 | sizeof(struct sec4_sg_entry); | ||
2165 | out_options = LDST_SGF; | ||
2166 | } | ||
2167 | } | ||
2168 | |||
2169 | if (encrypt) | ||
2170 | append_seq_out_ptr(desc, dst_dma, | ||
2171 | req->assoclen + req->cryptlen + authsize, | ||
2172 | out_options); | ||
2173 | else | ||
2174 | append_seq_out_ptr(desc, dst_dma, | ||
2175 | req->assoclen + req->cryptlen - authsize, | ||
2176 | out_options); | ||
2177 | |||
2178 | /* REG3 = assoclen */ | ||
2179 | append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); | ||
2180 | } | ||
2181 | |||
2182 | static void init_gcm_job(struct aead_request *req, | ||
2183 | struct aead_edesc *edesc, | ||
2184 | bool all_contig, bool encrypt) | ||
2185 | { | ||
2186 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
2187 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
2188 | unsigned int ivsize = crypto_aead_ivsize(aead); | ||
2189 | u32 *desc = edesc->hw_desc; | ||
2190 | bool generic_gcm = (ivsize == 12); | ||
2191 | unsigned int last; | ||
2192 | |||
2193 | init_aead_job(req, edesc, all_contig, encrypt); | ||
2194 | |||
2195 | /* BUG This should not be specific to generic GCM. */ | ||
2196 | last = 0; | ||
2197 | if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen)) | ||
2198 | last = FIFOLD_TYPE_LAST1; | ||
2199 | |||
2200 | /* Read GCM IV */ | ||
2201 | append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | | ||
2202 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last); | ||
2203 | /* Append Salt */ | ||
2204 | if (!generic_gcm) | ||
2205 | append_data(desc, ctx->key + ctx->enckeylen, 4); | ||
2206 | /* Append IV */ | ||
2207 | append_data(desc, req->iv, ivsize); | ||
2208 | /* End of blank commands */ | ||
2209 | } | ||
2210 | |||
2211 | /* | ||
2428 | * Fill in aead givencrypt job descriptor | 2212 | * Fill in aead givencrypt job descriptor |
2429 | */ | 2213 | */ |
2430 | static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | 2214 | static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, |
@@ -2608,9 +2392,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
2608 | /* | 2392 | /* |
2609 | * allocate and map the aead extended descriptor | 2393 | * allocate and map the aead extended descriptor |
2610 | */ | 2394 | */ |
2611 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | 2395 | static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req, |
2612 | int desc_bytes, bool *all_contig_ptr, | 2396 | int desc_bytes, |
2613 | bool encrypt) | 2397 | bool *all_contig_ptr, |
2398 | bool encrypt) | ||
2614 | { | 2399 | { |
2615 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 2400 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
2616 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 2401 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
@@ -2713,10 +2498,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
2713 | sec4_sg_index = 0; | 2498 | sec4_sg_index = 0; |
2714 | if (!all_contig) { | 2499 | if (!all_contig) { |
2715 | if (!is_gcm) { | 2500 | if (!is_gcm) { |
2716 | sg_to_sec4_sg(req->assoc, | 2501 | sg_to_sec4_sg_len(req->assoc, req->assoclen, |
2717 | assoc_nents, | 2502 | edesc->sec4_sg + sec4_sg_index); |
2718 | edesc->sec4_sg + | ||
2719 | sec4_sg_index, 0); | ||
2720 | sec4_sg_index += assoc_nents; | 2503 | sec4_sg_index += assoc_nents; |
2721 | } | 2504 | } |
2722 | 2505 | ||
@@ -2725,10 +2508,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
2725 | sec4_sg_index += 1; | 2508 | sec4_sg_index += 1; |
2726 | 2509 | ||
2727 | if (is_gcm) { | 2510 | if (is_gcm) { |
2728 | sg_to_sec4_sg(req->assoc, | 2511 | sg_to_sec4_sg_len(req->assoc, req->assoclen, |
2729 | assoc_nents, | 2512 | edesc->sec4_sg + sec4_sg_index); |
2730 | edesc->sec4_sg + | ||
2731 | sec4_sg_index, 0); | ||
2732 | sec4_sg_index += assoc_nents; | 2513 | sec4_sg_index += assoc_nents; |
2733 | } | 2514 | } |
2734 | 2515 | ||
@@ -2752,7 +2533,124 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
2752 | return edesc; | 2533 | return edesc; |
2753 | } | 2534 | } |
2754 | 2535 | ||
2755 | static int aead_encrypt(struct aead_request *req) | 2536 | /* |
2537 | * allocate and map the aead extended descriptor | ||
2538 | */ | ||
2539 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | ||
2540 | int desc_bytes, bool *all_contig_ptr, | ||
2541 | bool encrypt) | ||
2542 | { | ||
2543 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
2544 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
2545 | struct device *jrdev = ctx->jrdev; | ||
2546 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
2547 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
2548 | int src_nents, dst_nents = 0; | ||
2549 | struct aead_edesc *edesc; | ||
2550 | int sgc; | ||
2551 | bool all_contig = true; | ||
2552 | bool src_chained = false, dst_chained = false; | ||
2553 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | ||
2554 | unsigned int authsize = ctx->authsize; | ||
2555 | |||
2556 | if (unlikely(req->dst != req->src)) { | ||
2557 | src_nents = sg_count(req->src, req->assoclen + req->cryptlen, | ||
2558 | &src_chained); | ||
2559 | dst_nents = sg_count(req->dst, | ||
2560 | req->assoclen + req->cryptlen + | ||
2561 | (encrypt ? authsize : (-authsize)), | ||
2562 | &dst_chained); | ||
2563 | } else { | ||
2564 | src_nents = sg_count(req->src, | ||
2565 | req->assoclen + req->cryptlen + | ||
2566 | (encrypt ? authsize : 0), | ||
2567 | &src_chained); | ||
2568 | } | ||
2569 | |||
2570 | /* Check if data are contiguous. */ | ||
2571 | all_contig = !src_nents; | ||
2572 | if (!all_contig) { | ||
2573 | src_nents = src_nents ? : 1; | ||
2574 | sec4_sg_len = src_nents; | ||
2575 | } | ||
2576 | |||
2577 | sec4_sg_len += dst_nents; | ||
2578 | |||
2579 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); | ||
2580 | |||
2581 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
2582 | edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes + | ||
2583 | sec4_sg_bytes, GFP_DMA | flags); | ||
2584 | if (!edesc) { | ||
2585 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
2586 | return ERR_PTR(-ENOMEM); | ||
2587 | } | ||
2588 | |||
2589 | if (likely(req->src == req->dst)) { | ||
2590 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, | ||
2591 | DMA_BIDIRECTIONAL, src_chained); | ||
2592 | if (unlikely(!sgc)) { | ||
2593 | dev_err(jrdev, "unable to map source\n"); | ||
2594 | kfree(edesc); | ||
2595 | return ERR_PTR(-ENOMEM); | ||
2596 | } | ||
2597 | } else { | ||
2598 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, | ||
2599 | DMA_TO_DEVICE, src_chained); | ||
2600 | if (unlikely(!sgc)) { | ||
2601 | dev_err(jrdev, "unable to map source\n"); | ||
2602 | kfree(edesc); | ||
2603 | return ERR_PTR(-ENOMEM); | ||
2604 | } | ||
2605 | |||
2606 | sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, | ||
2607 | DMA_FROM_DEVICE, dst_chained); | ||
2608 | if (unlikely(!sgc)) { | ||
2609 | dev_err(jrdev, "unable to map destination\n"); | ||
2610 | dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1, | ||
2611 | DMA_TO_DEVICE, src_chained); | ||
2612 | kfree(edesc); | ||
2613 | return ERR_PTR(-ENOMEM); | ||
2614 | } | ||
2615 | } | ||
2616 | |||
2617 | edesc->src_nents = src_nents; | ||
2618 | edesc->src_chained = src_chained; | ||
2619 | edesc->dst_nents = dst_nents; | ||
2620 | edesc->dst_chained = dst_chained; | ||
2621 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + | ||
2622 | desc_bytes; | ||
2623 | *all_contig_ptr = all_contig; | ||
2624 | |||
2625 | sec4_sg_index = 0; | ||
2626 | if (!all_contig) { | ||
2627 | sg_to_sec4_sg_last(req->src, src_nents, | ||
2628 | edesc->sec4_sg + sec4_sg_index, 0); | ||
2629 | sec4_sg_index += src_nents; | ||
2630 | } | ||
2631 | if (dst_nents) { | ||
2632 | sg_to_sec4_sg_last(req->dst, dst_nents, | ||
2633 | edesc->sec4_sg + sec4_sg_index, 0); | ||
2634 | } | ||
2635 | |||
2636 | if (!sec4_sg_bytes) | ||
2637 | return edesc; | ||
2638 | |||
2639 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
2640 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
2641 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
2642 | dev_err(jrdev, "unable to map S/G table\n"); | ||
2643 | aead_unmap(jrdev, edesc, req); | ||
2644 | kfree(edesc); | ||
2645 | return ERR_PTR(-ENOMEM); | ||
2646 | } | ||
2647 | |||
2648 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
2649 | |||
2650 | return edesc; | ||
2651 | } | ||
2652 | |||
2653 | static int gcm_encrypt(struct aead_request *req) | ||
2756 | { | 2654 | { |
2757 | struct aead_edesc *edesc; | 2655 | struct aead_edesc *edesc; |
2758 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 2656 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
@@ -2763,14 +2661,12 @@ static int aead_encrypt(struct aead_request *req) | |||
2763 | int ret = 0; | 2661 | int ret = 0; |
2764 | 2662 | ||
2765 | /* allocate extended descriptor */ | 2663 | /* allocate extended descriptor */ |
2766 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * | 2664 | edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true); |
2767 | CAAM_CMD_SZ, &all_contig, true); | ||
2768 | if (IS_ERR(edesc)) | 2665 | if (IS_ERR(edesc)) |
2769 | return PTR_ERR(edesc); | 2666 | return PTR_ERR(edesc); |
2770 | 2667 | ||
2771 | /* Create and submit job descriptor */ | 2668 | /* Create and submit job descriptor */ |
2772 | init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, | 2669 | init_gcm_job(req, edesc, all_contig, true); |
2773 | all_contig, true); | ||
2774 | #ifdef DEBUG | 2670 | #ifdef DEBUG |
2775 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 2671 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", |
2776 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 2672 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
@@ -2789,7 +2685,7 @@ static int aead_encrypt(struct aead_request *req) | |||
2789 | return ret; | 2685 | return ret; |
2790 | } | 2686 | } |
2791 | 2687 | ||
2792 | static int aead_decrypt(struct aead_request *req) | 2688 | static int old_aead_encrypt(struct aead_request *req) |
2793 | { | 2689 | { |
2794 | struct aead_edesc *edesc; | 2690 | struct aead_edesc *edesc; |
2795 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 2691 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
@@ -2800,8 +2696,80 @@ static int aead_decrypt(struct aead_request *req) | |||
2800 | int ret = 0; | 2696 | int ret = 0; |
2801 | 2697 | ||
2802 | /* allocate extended descriptor */ | 2698 | /* allocate extended descriptor */ |
2803 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * | 2699 | edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
2804 | CAAM_CMD_SZ, &all_contig, false); | 2700 | CAAM_CMD_SZ, &all_contig, true); |
2701 | if (IS_ERR(edesc)) | ||
2702 | return PTR_ERR(edesc); | ||
2703 | |||
2704 | /* Create and submit job descriptor */ | ||
2705 | old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, | ||
2706 | all_contig, true); | ||
2707 | #ifdef DEBUG | ||
2708 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | ||
2709 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
2710 | desc_bytes(edesc->hw_desc), 1); | ||
2711 | #endif | ||
2712 | |||
2713 | desc = edesc->hw_desc; | ||
2714 | ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req); | ||
2715 | if (!ret) { | ||
2716 | ret = -EINPROGRESS; | ||
2717 | } else { | ||
2718 | old_aead_unmap(jrdev, edesc, req); | ||
2719 | kfree(edesc); | ||
2720 | } | ||
2721 | |||
2722 | return ret; | ||
2723 | } | ||
2724 | |||
2725 | static int gcm_decrypt(struct aead_request *req) | ||
2726 | { | ||
2727 | struct aead_edesc *edesc; | ||
2728 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
2729 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
2730 | struct device *jrdev = ctx->jrdev; | ||
2731 | bool all_contig; | ||
2732 | u32 *desc; | ||
2733 | int ret = 0; | ||
2734 | |||
2735 | /* allocate extended descriptor */ | ||
2736 | edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false); | ||
2737 | if (IS_ERR(edesc)) | ||
2738 | return PTR_ERR(edesc); | ||
2739 | |||
2740 | /* Create and submit job descriptor*/ | ||
2741 | init_gcm_job(req, edesc, all_contig, false); | ||
2742 | #ifdef DEBUG | ||
2743 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | ||
2744 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
2745 | desc_bytes(edesc->hw_desc), 1); | ||
2746 | #endif | ||
2747 | |||
2748 | desc = edesc->hw_desc; | ||
2749 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); | ||
2750 | if (!ret) { | ||
2751 | ret = -EINPROGRESS; | ||
2752 | } else { | ||
2753 | aead_unmap(jrdev, edesc, req); | ||
2754 | kfree(edesc); | ||
2755 | } | ||
2756 | |||
2757 | return ret; | ||
2758 | } | ||
2759 | |||
2760 | static int old_aead_decrypt(struct aead_request *req) | ||
2761 | { | ||
2762 | struct aead_edesc *edesc; | ||
2763 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
2764 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
2765 | struct device *jrdev = ctx->jrdev; | ||
2766 | bool all_contig; | ||
2767 | u32 *desc; | ||
2768 | int ret = 0; | ||
2769 | |||
2770 | /* allocate extended descriptor */ | ||
2771 | edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN * | ||
2772 | CAAM_CMD_SZ, &all_contig, false); | ||
2805 | if (IS_ERR(edesc)) | 2773 | if (IS_ERR(edesc)) |
2806 | return PTR_ERR(edesc); | 2774 | return PTR_ERR(edesc); |
2807 | 2775 | ||
@@ -2812,8 +2780,8 @@ static int aead_decrypt(struct aead_request *req) | |||
2812 | #endif | 2780 | #endif |
2813 | 2781 | ||
2814 | /* Create and submit job descriptor*/ | 2782 | /* Create and submit job descriptor*/ |
2815 | init_aead_job(ctx->sh_desc_dec, | 2783 | old_init_aead_job(ctx->sh_desc_dec, |
2816 | ctx->sh_desc_dec_dma, edesc, req, all_contig, false); | 2784 | ctx->sh_desc_dec_dma, edesc, req, all_contig, false); |
2817 | #ifdef DEBUG | 2785 | #ifdef DEBUG |
2818 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 2786 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", |
2819 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 2787 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
@@ -2821,11 +2789,11 @@ static int aead_decrypt(struct aead_request *req) | |||
2821 | #endif | 2789 | #endif |
2822 | 2790 | ||
2823 | desc = edesc->hw_desc; | 2791 | desc = edesc->hw_desc; |
2824 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); | 2792 | ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req); |
2825 | if (!ret) { | 2793 | if (!ret) { |
2826 | ret = -EINPROGRESS; | 2794 | ret = -EINPROGRESS; |
2827 | } else { | 2795 | } else { |
2828 | aead_unmap(jrdev, edesc, req); | 2796 | old_aead_unmap(jrdev, edesc, req); |
2829 | kfree(edesc); | 2797 | kfree(edesc); |
2830 | } | 2798 | } |
2831 | 2799 | ||
@@ -2953,8 +2921,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
2953 | sec4_sg_index = 0; | 2921 | sec4_sg_index = 0; |
2954 | if (!(contig & GIV_SRC_CONTIG)) { | 2922 | if (!(contig & GIV_SRC_CONTIG)) { |
2955 | if (!is_gcm) { | 2923 | if (!is_gcm) { |
2956 | sg_to_sec4_sg(req->assoc, assoc_nents, | 2924 | sg_to_sec4_sg_len(req->assoc, req->assoclen, |
2957 | edesc->sec4_sg + sec4_sg_index, 0); | 2925 | edesc->sec4_sg + sec4_sg_index); |
2958 | sec4_sg_index += assoc_nents; | 2926 | sec4_sg_index += assoc_nents; |
2959 | } | 2927 | } |
2960 | 2928 | ||
@@ -2963,8 +2931,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
2963 | sec4_sg_index += 1; | 2931 | sec4_sg_index += 1; |
2964 | 2932 | ||
2965 | if (is_gcm) { | 2933 | if (is_gcm) { |
2966 | sg_to_sec4_sg(req->assoc, assoc_nents, | 2934 | sg_to_sec4_sg_len(req->assoc, req->assoclen, |
2967 | edesc->sec4_sg + sec4_sg_index, 0); | 2935 | edesc->sec4_sg + sec4_sg_index); |
2968 | sec4_sg_index += assoc_nents; | 2936 | sec4_sg_index += assoc_nents; |
2969 | } | 2937 | } |
2970 | 2938 | ||
@@ -2999,7 +2967,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
2999 | return edesc; | 2967 | return edesc; |
3000 | } | 2968 | } |
3001 | 2969 | ||
3002 | static int aead_givencrypt(struct aead_givcrypt_request *areq) | 2970 | static int old_aead_givencrypt(struct aead_givcrypt_request *areq) |
3003 | { | 2971 | { |
3004 | struct aead_request *req = &areq->areq; | 2972 | struct aead_request *req = &areq->areq; |
3005 | struct aead_edesc *edesc; | 2973 | struct aead_edesc *edesc; |
@@ -3033,11 +3001,11 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) | |||
3033 | #endif | 3001 | #endif |
3034 | 3002 | ||
3035 | desc = edesc->hw_desc; | 3003 | desc = edesc->hw_desc; |
3036 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); | 3004 | ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req); |
3037 | if (!ret) { | 3005 | if (!ret) { |
3038 | ret = -EINPROGRESS; | 3006 | ret = -EINPROGRESS; |
3039 | } else { | 3007 | } else { |
3040 | aead_unmap(jrdev, edesc, req); | 3008 | old_aead_unmap(jrdev, edesc, req); |
3041 | kfree(edesc); | 3009 | kfree(edesc); |
3042 | } | 3010 | } |
3043 | 3011 | ||
@@ -3046,7 +3014,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) | |||
3046 | 3014 | ||
3047 | static int aead_null_givencrypt(struct aead_givcrypt_request *areq) | 3015 | static int aead_null_givencrypt(struct aead_givcrypt_request *areq) |
3048 | { | 3016 | { |
3049 | return aead_encrypt(&areq->areq); | 3017 | return old_aead_encrypt(&areq->areq); |
3050 | } | 3018 | } |
3051 | 3019 | ||
3052 | /* | 3020 | /* |
@@ -3379,11 +3347,7 @@ struct caam_alg_template { | |||
3379 | u32 type; | 3347 | u32 type; |
3380 | union { | 3348 | union { |
3381 | struct ablkcipher_alg ablkcipher; | 3349 | struct ablkcipher_alg ablkcipher; |
3382 | struct aead_alg aead; | 3350 | struct old_aead_alg aead; |
3383 | struct blkcipher_alg blkcipher; | ||
3384 | struct cipher_alg cipher; | ||
3385 | struct compress_alg compress; | ||
3386 | struct rng_alg rng; | ||
3387 | } template_u; | 3351 | } template_u; |
3388 | u32 class1_alg_type; | 3352 | u32 class1_alg_type; |
3389 | u32 class2_alg_type; | 3353 | u32 class2_alg_type; |
@@ -3400,8 +3364,8 @@ static struct caam_alg_template driver_algs[] = { | |||
3400 | .template_aead = { | 3364 | .template_aead = { |
3401 | .setkey = aead_setkey, | 3365 | .setkey = aead_setkey, |
3402 | .setauthsize = aead_setauthsize, | 3366 | .setauthsize = aead_setauthsize, |
3403 | .encrypt = aead_encrypt, | 3367 | .encrypt = old_aead_encrypt, |
3404 | .decrypt = aead_decrypt, | 3368 | .decrypt = old_aead_decrypt, |
3405 | .givencrypt = aead_null_givencrypt, | 3369 | .givencrypt = aead_null_givencrypt, |
3406 | .geniv = "<built-in>", | 3370 | .geniv = "<built-in>", |
3407 | .ivsize = NULL_IV_SIZE, | 3371 | .ivsize = NULL_IV_SIZE, |
@@ -3419,8 +3383,8 @@ static struct caam_alg_template driver_algs[] = { | |||
3419 | .template_aead = { | 3383 | .template_aead = { |
3420 | .setkey = aead_setkey, | 3384 | .setkey = aead_setkey, |
3421 | .setauthsize = aead_setauthsize, | 3385 | .setauthsize = aead_setauthsize, |
3422 | .encrypt = aead_encrypt, | 3386 | .encrypt = old_aead_encrypt, |
3423 | .decrypt = aead_decrypt, | 3387 | .decrypt = old_aead_decrypt, |
3424 | .givencrypt = aead_null_givencrypt, | 3388 | .givencrypt = aead_null_givencrypt, |
3425 | .geniv = "<built-in>", | 3389 | .geniv = "<built-in>", |
3426 | .ivsize = NULL_IV_SIZE, | 3390 | .ivsize = NULL_IV_SIZE, |
@@ -3438,8 +3402,8 @@ static struct caam_alg_template driver_algs[] = { | |||
3438 | .template_aead = { | 3402 | .template_aead = { |
3439 | .setkey = aead_setkey, | 3403 | .setkey = aead_setkey, |
3440 | .setauthsize = aead_setauthsize, | 3404 | .setauthsize = aead_setauthsize, |
3441 | .encrypt = aead_encrypt, | 3405 | .encrypt = old_aead_encrypt, |
3442 | .decrypt = aead_decrypt, | 3406 | .decrypt = old_aead_decrypt, |
3443 | .givencrypt = aead_null_givencrypt, | 3407 | .givencrypt = aead_null_givencrypt, |
3444 | .geniv = "<built-in>", | 3408 | .geniv = "<built-in>", |
3445 | .ivsize = NULL_IV_SIZE, | 3409 | .ivsize = NULL_IV_SIZE, |
@@ -3458,8 +3422,8 @@ static struct caam_alg_template driver_algs[] = { | |||
3458 | .template_aead = { | 3422 | .template_aead = { |
3459 | .setkey = aead_setkey, | 3423 | .setkey = aead_setkey, |
3460 | .setauthsize = aead_setauthsize, | 3424 | .setauthsize = aead_setauthsize, |
3461 | .encrypt = aead_encrypt, | 3425 | .encrypt = old_aead_encrypt, |
3462 | .decrypt = aead_decrypt, | 3426 | .decrypt = old_aead_decrypt, |
3463 | .givencrypt = aead_null_givencrypt, | 3427 | .givencrypt = aead_null_givencrypt, |
3464 | .geniv = "<built-in>", | 3428 | .geniv = "<built-in>", |
3465 | .ivsize = NULL_IV_SIZE, | 3429 | .ivsize = NULL_IV_SIZE, |
@@ -3478,8 +3442,8 @@ static struct caam_alg_template driver_algs[] = { | |||
3478 | .template_aead = { | 3442 | .template_aead = { |
3479 | .setkey = aead_setkey, | 3443 | .setkey = aead_setkey, |
3480 | .setauthsize = aead_setauthsize, | 3444 | .setauthsize = aead_setauthsize, |
3481 | .encrypt = aead_encrypt, | 3445 | .encrypt = old_aead_encrypt, |
3482 | .decrypt = aead_decrypt, | 3446 | .decrypt = old_aead_decrypt, |
3483 | .givencrypt = aead_null_givencrypt, | 3447 | .givencrypt = aead_null_givencrypt, |
3484 | .geniv = "<built-in>", | 3448 | .geniv = "<built-in>", |
3485 | .ivsize = NULL_IV_SIZE, | 3449 | .ivsize = NULL_IV_SIZE, |
@@ -3498,8 +3462,8 @@ static struct caam_alg_template driver_algs[] = { | |||
3498 | .template_aead = { | 3462 | .template_aead = { |
3499 | .setkey = aead_setkey, | 3463 | .setkey = aead_setkey, |
3500 | .setauthsize = aead_setauthsize, | 3464 | .setauthsize = aead_setauthsize, |
3501 | .encrypt = aead_encrypt, | 3465 | .encrypt = old_aead_encrypt, |
3502 | .decrypt = aead_decrypt, | 3466 | .decrypt = old_aead_decrypt, |
3503 | .givencrypt = aead_null_givencrypt, | 3467 | .givencrypt = aead_null_givencrypt, |
3504 | .geniv = "<built-in>", | 3468 | .geniv = "<built-in>", |
3505 | .ivsize = NULL_IV_SIZE, | 3469 | .ivsize = NULL_IV_SIZE, |
@@ -3518,9 +3482,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3518 | .template_aead = { | 3482 | .template_aead = { |
3519 | .setkey = aead_setkey, | 3483 | .setkey = aead_setkey, |
3520 | .setauthsize = aead_setauthsize, | 3484 | .setauthsize = aead_setauthsize, |
3521 | .encrypt = aead_encrypt, | 3485 | .encrypt = old_aead_encrypt, |
3522 | .decrypt = aead_decrypt, | 3486 | .decrypt = old_aead_decrypt, |
3523 | .givencrypt = aead_givencrypt, | 3487 | .givencrypt = old_aead_givencrypt, |
3524 | .geniv = "<built-in>", | 3488 | .geniv = "<built-in>", |
3525 | .ivsize = AES_BLOCK_SIZE, | 3489 | .ivsize = AES_BLOCK_SIZE, |
3526 | .maxauthsize = MD5_DIGEST_SIZE, | 3490 | .maxauthsize = MD5_DIGEST_SIZE, |
@@ -3537,9 +3501,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3537 | .template_aead = { | 3501 | .template_aead = { |
3538 | .setkey = aead_setkey, | 3502 | .setkey = aead_setkey, |
3539 | .setauthsize = aead_setauthsize, | 3503 | .setauthsize = aead_setauthsize, |
3540 | .encrypt = aead_encrypt, | 3504 | .encrypt = old_aead_encrypt, |
3541 | .decrypt = aead_decrypt, | 3505 | .decrypt = old_aead_decrypt, |
3542 | .givencrypt = aead_givencrypt, | 3506 | .givencrypt = old_aead_givencrypt, |
3543 | .geniv = "<built-in>", | 3507 | .geniv = "<built-in>", |
3544 | .ivsize = AES_BLOCK_SIZE, | 3508 | .ivsize = AES_BLOCK_SIZE, |
3545 | .maxauthsize = SHA1_DIGEST_SIZE, | 3509 | .maxauthsize = SHA1_DIGEST_SIZE, |
@@ -3556,9 +3520,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3556 | .template_aead = { | 3520 | .template_aead = { |
3557 | .setkey = aead_setkey, | 3521 | .setkey = aead_setkey, |
3558 | .setauthsize = aead_setauthsize, | 3522 | .setauthsize = aead_setauthsize, |
3559 | .encrypt = aead_encrypt, | 3523 | .encrypt = old_aead_encrypt, |
3560 | .decrypt = aead_decrypt, | 3524 | .decrypt = old_aead_decrypt, |
3561 | .givencrypt = aead_givencrypt, | 3525 | .givencrypt = old_aead_givencrypt, |
3562 | .geniv = "<built-in>", | 3526 | .geniv = "<built-in>", |
3563 | .ivsize = AES_BLOCK_SIZE, | 3527 | .ivsize = AES_BLOCK_SIZE, |
3564 | .maxauthsize = SHA224_DIGEST_SIZE, | 3528 | .maxauthsize = SHA224_DIGEST_SIZE, |
@@ -3576,9 +3540,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3576 | .template_aead = { | 3540 | .template_aead = { |
3577 | .setkey = aead_setkey, | 3541 | .setkey = aead_setkey, |
3578 | .setauthsize = aead_setauthsize, | 3542 | .setauthsize = aead_setauthsize, |
3579 | .encrypt = aead_encrypt, | 3543 | .encrypt = old_aead_encrypt, |
3580 | .decrypt = aead_decrypt, | 3544 | .decrypt = old_aead_decrypt, |
3581 | .givencrypt = aead_givencrypt, | 3545 | .givencrypt = old_aead_givencrypt, |
3582 | .geniv = "<built-in>", | 3546 | .geniv = "<built-in>", |
3583 | .ivsize = AES_BLOCK_SIZE, | 3547 | .ivsize = AES_BLOCK_SIZE, |
3584 | .maxauthsize = SHA256_DIGEST_SIZE, | 3548 | .maxauthsize = SHA256_DIGEST_SIZE, |
@@ -3596,9 +3560,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3596 | .template_aead = { | 3560 | .template_aead = { |
3597 | .setkey = aead_setkey, | 3561 | .setkey = aead_setkey, |
3598 | .setauthsize = aead_setauthsize, | 3562 | .setauthsize = aead_setauthsize, |
3599 | .encrypt = aead_encrypt, | 3563 | .encrypt = old_aead_encrypt, |
3600 | .decrypt = aead_decrypt, | 3564 | .decrypt = old_aead_decrypt, |
3601 | .givencrypt = aead_givencrypt, | 3565 | .givencrypt = old_aead_givencrypt, |
3602 | .geniv = "<built-in>", | 3566 | .geniv = "<built-in>", |
3603 | .ivsize = AES_BLOCK_SIZE, | 3567 | .ivsize = AES_BLOCK_SIZE, |
3604 | .maxauthsize = SHA384_DIGEST_SIZE, | 3568 | .maxauthsize = SHA384_DIGEST_SIZE, |
@@ -3617,9 +3581,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3617 | .template_aead = { | 3581 | .template_aead = { |
3618 | .setkey = aead_setkey, | 3582 | .setkey = aead_setkey, |
3619 | .setauthsize = aead_setauthsize, | 3583 | .setauthsize = aead_setauthsize, |
3620 | .encrypt = aead_encrypt, | 3584 | .encrypt = old_aead_encrypt, |
3621 | .decrypt = aead_decrypt, | 3585 | .decrypt = old_aead_decrypt, |
3622 | .givencrypt = aead_givencrypt, | 3586 | .givencrypt = old_aead_givencrypt, |
3623 | .geniv = "<built-in>", | 3587 | .geniv = "<built-in>", |
3624 | .ivsize = AES_BLOCK_SIZE, | 3588 | .ivsize = AES_BLOCK_SIZE, |
3625 | .maxauthsize = SHA512_DIGEST_SIZE, | 3589 | .maxauthsize = SHA512_DIGEST_SIZE, |
@@ -3637,9 +3601,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3637 | .template_aead = { | 3601 | .template_aead = { |
3638 | .setkey = aead_setkey, | 3602 | .setkey = aead_setkey, |
3639 | .setauthsize = aead_setauthsize, | 3603 | .setauthsize = aead_setauthsize, |
3640 | .encrypt = aead_encrypt, | 3604 | .encrypt = old_aead_encrypt, |
3641 | .decrypt = aead_decrypt, | 3605 | .decrypt = old_aead_decrypt, |
3642 | .givencrypt = aead_givencrypt, | 3606 | .givencrypt = old_aead_givencrypt, |
3643 | .geniv = "<built-in>", | 3607 | .geniv = "<built-in>", |
3644 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3608 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3645 | .maxauthsize = MD5_DIGEST_SIZE, | 3609 | .maxauthsize = MD5_DIGEST_SIZE, |
@@ -3656,9 +3620,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3656 | .template_aead = { | 3620 | .template_aead = { |
3657 | .setkey = aead_setkey, | 3621 | .setkey = aead_setkey, |
3658 | .setauthsize = aead_setauthsize, | 3622 | .setauthsize = aead_setauthsize, |
3659 | .encrypt = aead_encrypt, | 3623 | .encrypt = old_aead_encrypt, |
3660 | .decrypt = aead_decrypt, | 3624 | .decrypt = old_aead_decrypt, |
3661 | .givencrypt = aead_givencrypt, | 3625 | .givencrypt = old_aead_givencrypt, |
3662 | .geniv = "<built-in>", | 3626 | .geniv = "<built-in>", |
3663 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3627 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3664 | .maxauthsize = SHA1_DIGEST_SIZE, | 3628 | .maxauthsize = SHA1_DIGEST_SIZE, |
@@ -3675,9 +3639,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3675 | .template_aead = { | 3639 | .template_aead = { |
3676 | .setkey = aead_setkey, | 3640 | .setkey = aead_setkey, |
3677 | .setauthsize = aead_setauthsize, | 3641 | .setauthsize = aead_setauthsize, |
3678 | .encrypt = aead_encrypt, | 3642 | .encrypt = old_aead_encrypt, |
3679 | .decrypt = aead_decrypt, | 3643 | .decrypt = old_aead_decrypt, |
3680 | .givencrypt = aead_givencrypt, | 3644 | .givencrypt = old_aead_givencrypt, |
3681 | .geniv = "<built-in>", | 3645 | .geniv = "<built-in>", |
3682 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3646 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3683 | .maxauthsize = SHA224_DIGEST_SIZE, | 3647 | .maxauthsize = SHA224_DIGEST_SIZE, |
@@ -3695,9 +3659,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3695 | .template_aead = { | 3659 | .template_aead = { |
3696 | .setkey = aead_setkey, | 3660 | .setkey = aead_setkey, |
3697 | .setauthsize = aead_setauthsize, | 3661 | .setauthsize = aead_setauthsize, |
3698 | .encrypt = aead_encrypt, | 3662 | .encrypt = old_aead_encrypt, |
3699 | .decrypt = aead_decrypt, | 3663 | .decrypt = old_aead_decrypt, |
3700 | .givencrypt = aead_givencrypt, | 3664 | .givencrypt = old_aead_givencrypt, |
3701 | .geniv = "<built-in>", | 3665 | .geniv = "<built-in>", |
3702 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3666 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3703 | .maxauthsize = SHA256_DIGEST_SIZE, | 3667 | .maxauthsize = SHA256_DIGEST_SIZE, |
@@ -3715,9 +3679,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3715 | .template_aead = { | 3679 | .template_aead = { |
3716 | .setkey = aead_setkey, | 3680 | .setkey = aead_setkey, |
3717 | .setauthsize = aead_setauthsize, | 3681 | .setauthsize = aead_setauthsize, |
3718 | .encrypt = aead_encrypt, | 3682 | .encrypt = old_aead_encrypt, |
3719 | .decrypt = aead_decrypt, | 3683 | .decrypt = old_aead_decrypt, |
3720 | .givencrypt = aead_givencrypt, | 3684 | .givencrypt = old_aead_givencrypt, |
3721 | .geniv = "<built-in>", | 3685 | .geniv = "<built-in>", |
3722 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3686 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3723 | .maxauthsize = SHA384_DIGEST_SIZE, | 3687 | .maxauthsize = SHA384_DIGEST_SIZE, |
@@ -3735,9 +3699,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3735 | .template_aead = { | 3699 | .template_aead = { |
3736 | .setkey = aead_setkey, | 3700 | .setkey = aead_setkey, |
3737 | .setauthsize = aead_setauthsize, | 3701 | .setauthsize = aead_setauthsize, |
3738 | .encrypt = aead_encrypt, | 3702 | .encrypt = old_aead_encrypt, |
3739 | .decrypt = aead_decrypt, | 3703 | .decrypt = old_aead_decrypt, |
3740 | .givencrypt = aead_givencrypt, | 3704 | .givencrypt = old_aead_givencrypt, |
3741 | .geniv = "<built-in>", | 3705 | .geniv = "<built-in>", |
3742 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3706 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3743 | .maxauthsize = SHA512_DIGEST_SIZE, | 3707 | .maxauthsize = SHA512_DIGEST_SIZE, |
@@ -3755,9 +3719,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3755 | .template_aead = { | 3719 | .template_aead = { |
3756 | .setkey = aead_setkey, | 3720 | .setkey = aead_setkey, |
3757 | .setauthsize = aead_setauthsize, | 3721 | .setauthsize = aead_setauthsize, |
3758 | .encrypt = aead_encrypt, | 3722 | .encrypt = old_aead_encrypt, |
3759 | .decrypt = aead_decrypt, | 3723 | .decrypt = old_aead_decrypt, |
3760 | .givencrypt = aead_givencrypt, | 3724 | .givencrypt = old_aead_givencrypt, |
3761 | .geniv = "<built-in>", | 3725 | .geniv = "<built-in>", |
3762 | .ivsize = DES_BLOCK_SIZE, | 3726 | .ivsize = DES_BLOCK_SIZE, |
3763 | .maxauthsize = MD5_DIGEST_SIZE, | 3727 | .maxauthsize = MD5_DIGEST_SIZE, |
@@ -3774,9 +3738,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3774 | .template_aead = { | 3738 | .template_aead = { |
3775 | .setkey = aead_setkey, | 3739 | .setkey = aead_setkey, |
3776 | .setauthsize = aead_setauthsize, | 3740 | .setauthsize = aead_setauthsize, |
3777 | .encrypt = aead_encrypt, | 3741 | .encrypt = old_aead_encrypt, |
3778 | .decrypt = aead_decrypt, | 3742 | .decrypt = old_aead_decrypt, |
3779 | .givencrypt = aead_givencrypt, | 3743 | .givencrypt = old_aead_givencrypt, |
3780 | .geniv = "<built-in>", | 3744 | .geniv = "<built-in>", |
3781 | .ivsize = DES_BLOCK_SIZE, | 3745 | .ivsize = DES_BLOCK_SIZE, |
3782 | .maxauthsize = SHA1_DIGEST_SIZE, | 3746 | .maxauthsize = SHA1_DIGEST_SIZE, |
@@ -3793,9 +3757,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3793 | .template_aead = { | 3757 | .template_aead = { |
3794 | .setkey = aead_setkey, | 3758 | .setkey = aead_setkey, |
3795 | .setauthsize = aead_setauthsize, | 3759 | .setauthsize = aead_setauthsize, |
3796 | .encrypt = aead_encrypt, | 3760 | .encrypt = old_aead_encrypt, |
3797 | .decrypt = aead_decrypt, | 3761 | .decrypt = old_aead_decrypt, |
3798 | .givencrypt = aead_givencrypt, | 3762 | .givencrypt = old_aead_givencrypt, |
3799 | .geniv = "<built-in>", | 3763 | .geniv = "<built-in>", |
3800 | .ivsize = DES_BLOCK_SIZE, | 3764 | .ivsize = DES_BLOCK_SIZE, |
3801 | .maxauthsize = SHA224_DIGEST_SIZE, | 3765 | .maxauthsize = SHA224_DIGEST_SIZE, |
@@ -3813,9 +3777,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3813 | .template_aead = { | 3777 | .template_aead = { |
3814 | .setkey = aead_setkey, | 3778 | .setkey = aead_setkey, |
3815 | .setauthsize = aead_setauthsize, | 3779 | .setauthsize = aead_setauthsize, |
3816 | .encrypt = aead_encrypt, | 3780 | .encrypt = old_aead_encrypt, |
3817 | .decrypt = aead_decrypt, | 3781 | .decrypt = old_aead_decrypt, |
3818 | .givencrypt = aead_givencrypt, | 3782 | .givencrypt = old_aead_givencrypt, |
3819 | .geniv = "<built-in>", | 3783 | .geniv = "<built-in>", |
3820 | .ivsize = DES_BLOCK_SIZE, | 3784 | .ivsize = DES_BLOCK_SIZE, |
3821 | .maxauthsize = SHA256_DIGEST_SIZE, | 3785 | .maxauthsize = SHA256_DIGEST_SIZE, |
@@ -3833,9 +3797,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3833 | .template_aead = { | 3797 | .template_aead = { |
3834 | .setkey = aead_setkey, | 3798 | .setkey = aead_setkey, |
3835 | .setauthsize = aead_setauthsize, | 3799 | .setauthsize = aead_setauthsize, |
3836 | .encrypt = aead_encrypt, | 3800 | .encrypt = old_aead_encrypt, |
3837 | .decrypt = aead_decrypt, | 3801 | .decrypt = old_aead_decrypt, |
3838 | .givencrypt = aead_givencrypt, | 3802 | .givencrypt = old_aead_givencrypt, |
3839 | .geniv = "<built-in>", | 3803 | .geniv = "<built-in>", |
3840 | .ivsize = DES_BLOCK_SIZE, | 3804 | .ivsize = DES_BLOCK_SIZE, |
3841 | .maxauthsize = SHA384_DIGEST_SIZE, | 3805 | .maxauthsize = SHA384_DIGEST_SIZE, |
@@ -3853,9 +3817,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3853 | .template_aead = { | 3817 | .template_aead = { |
3854 | .setkey = aead_setkey, | 3818 | .setkey = aead_setkey, |
3855 | .setauthsize = aead_setauthsize, | 3819 | .setauthsize = aead_setauthsize, |
3856 | .encrypt = aead_encrypt, | 3820 | .encrypt = old_aead_encrypt, |
3857 | .decrypt = aead_decrypt, | 3821 | .decrypt = old_aead_decrypt, |
3858 | .givencrypt = aead_givencrypt, | 3822 | .givencrypt = old_aead_givencrypt, |
3859 | .geniv = "<built-in>", | 3823 | .geniv = "<built-in>", |
3860 | .ivsize = DES_BLOCK_SIZE, | 3824 | .ivsize = DES_BLOCK_SIZE, |
3861 | .maxauthsize = SHA512_DIGEST_SIZE, | 3825 | .maxauthsize = SHA512_DIGEST_SIZE, |
@@ -3873,9 +3837,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3873 | .template_aead = { | 3837 | .template_aead = { |
3874 | .setkey = aead_setkey, | 3838 | .setkey = aead_setkey, |
3875 | .setauthsize = aead_setauthsize, | 3839 | .setauthsize = aead_setauthsize, |
3876 | .encrypt = aead_encrypt, | 3840 | .encrypt = old_aead_encrypt, |
3877 | .decrypt = aead_decrypt, | 3841 | .decrypt = old_aead_decrypt, |
3878 | .givencrypt = aead_givencrypt, | 3842 | .givencrypt = old_aead_givencrypt, |
3879 | .geniv = "<built-in>", | 3843 | .geniv = "<built-in>", |
3880 | .ivsize = CTR_RFC3686_IV_SIZE, | 3844 | .ivsize = CTR_RFC3686_IV_SIZE, |
3881 | .maxauthsize = MD5_DIGEST_SIZE, | 3845 | .maxauthsize = MD5_DIGEST_SIZE, |
@@ -3892,9 +3856,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3892 | .template_aead = { | 3856 | .template_aead = { |
3893 | .setkey = aead_setkey, | 3857 | .setkey = aead_setkey, |
3894 | .setauthsize = aead_setauthsize, | 3858 | .setauthsize = aead_setauthsize, |
3895 | .encrypt = aead_encrypt, | 3859 | .encrypt = old_aead_encrypt, |
3896 | .decrypt = aead_decrypt, | 3860 | .decrypt = old_aead_decrypt, |
3897 | .givencrypt = aead_givencrypt, | 3861 | .givencrypt = old_aead_givencrypt, |
3898 | .geniv = "<built-in>", | 3862 | .geniv = "<built-in>", |
3899 | .ivsize = CTR_RFC3686_IV_SIZE, | 3863 | .ivsize = CTR_RFC3686_IV_SIZE, |
3900 | .maxauthsize = SHA1_DIGEST_SIZE, | 3864 | .maxauthsize = SHA1_DIGEST_SIZE, |
@@ -3911,9 +3875,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3911 | .template_aead = { | 3875 | .template_aead = { |
3912 | .setkey = aead_setkey, | 3876 | .setkey = aead_setkey, |
3913 | .setauthsize = aead_setauthsize, | 3877 | .setauthsize = aead_setauthsize, |
3914 | .encrypt = aead_encrypt, | 3878 | .encrypt = old_aead_encrypt, |
3915 | .decrypt = aead_decrypt, | 3879 | .decrypt = old_aead_decrypt, |
3916 | .givencrypt = aead_givencrypt, | 3880 | .givencrypt = old_aead_givencrypt, |
3917 | .geniv = "<built-in>", | 3881 | .geniv = "<built-in>", |
3918 | .ivsize = CTR_RFC3686_IV_SIZE, | 3882 | .ivsize = CTR_RFC3686_IV_SIZE, |
3919 | .maxauthsize = SHA224_DIGEST_SIZE, | 3883 | .maxauthsize = SHA224_DIGEST_SIZE, |
@@ -3931,9 +3895,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3931 | .template_aead = { | 3895 | .template_aead = { |
3932 | .setkey = aead_setkey, | 3896 | .setkey = aead_setkey, |
3933 | .setauthsize = aead_setauthsize, | 3897 | .setauthsize = aead_setauthsize, |
3934 | .encrypt = aead_encrypt, | 3898 | .encrypt = old_aead_encrypt, |
3935 | .decrypt = aead_decrypt, | 3899 | .decrypt = old_aead_decrypt, |
3936 | .givencrypt = aead_givencrypt, | 3900 | .givencrypt = old_aead_givencrypt, |
3937 | .geniv = "<built-in>", | 3901 | .geniv = "<built-in>", |
3938 | .ivsize = CTR_RFC3686_IV_SIZE, | 3902 | .ivsize = CTR_RFC3686_IV_SIZE, |
3939 | .maxauthsize = SHA256_DIGEST_SIZE, | 3903 | .maxauthsize = SHA256_DIGEST_SIZE, |
@@ -3951,9 +3915,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3951 | .template_aead = { | 3915 | .template_aead = { |
3952 | .setkey = aead_setkey, | 3916 | .setkey = aead_setkey, |
3953 | .setauthsize = aead_setauthsize, | 3917 | .setauthsize = aead_setauthsize, |
3954 | .encrypt = aead_encrypt, | 3918 | .encrypt = old_aead_encrypt, |
3955 | .decrypt = aead_decrypt, | 3919 | .decrypt = old_aead_decrypt, |
3956 | .givencrypt = aead_givencrypt, | 3920 | .givencrypt = old_aead_givencrypt, |
3957 | .geniv = "<built-in>", | 3921 | .geniv = "<built-in>", |
3958 | .ivsize = CTR_RFC3686_IV_SIZE, | 3922 | .ivsize = CTR_RFC3686_IV_SIZE, |
3959 | .maxauthsize = SHA384_DIGEST_SIZE, | 3923 | .maxauthsize = SHA384_DIGEST_SIZE, |
@@ -3971,9 +3935,9 @@ static struct caam_alg_template driver_algs[] = { | |||
3971 | .template_aead = { | 3935 | .template_aead = { |
3972 | .setkey = aead_setkey, | 3936 | .setkey = aead_setkey, |
3973 | .setauthsize = aead_setauthsize, | 3937 | .setauthsize = aead_setauthsize, |
3974 | .encrypt = aead_encrypt, | 3938 | .encrypt = old_aead_encrypt, |
3975 | .decrypt = aead_decrypt, | 3939 | .decrypt = old_aead_decrypt, |
3976 | .givencrypt = aead_givencrypt, | 3940 | .givencrypt = old_aead_givencrypt, |
3977 | .geniv = "<built-in>", | 3941 | .geniv = "<built-in>", |
3978 | .ivsize = CTR_RFC3686_IV_SIZE, | 3942 | .ivsize = CTR_RFC3686_IV_SIZE, |
3979 | .maxauthsize = SHA512_DIGEST_SIZE, | 3943 | .maxauthsize = SHA512_DIGEST_SIZE, |
@@ -3983,58 +3947,6 @@ static struct caam_alg_template driver_algs[] = { | |||
3983 | OP_ALG_AAI_HMAC_PRECOMP, | 3947 | OP_ALG_AAI_HMAC_PRECOMP, |
3984 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | 3948 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, |
3985 | }, | 3949 | }, |
3986 | { | ||
3987 | .name = "rfc4106(gcm(aes))", | ||
3988 | .driver_name = "rfc4106-gcm-aes-caam", | ||
3989 | .blocksize = 1, | ||
3990 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
3991 | .template_aead = { | ||
3992 | .setkey = rfc4106_setkey, | ||
3993 | .setauthsize = rfc4106_setauthsize, | ||
3994 | .encrypt = aead_encrypt, | ||
3995 | .decrypt = aead_decrypt, | ||
3996 | .givencrypt = aead_givencrypt, | ||
3997 | .geniv = "<built-in>", | ||
3998 | .ivsize = 8, | ||
3999 | .maxauthsize = AES_BLOCK_SIZE, | ||
4000 | }, | ||
4001 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | ||
4002 | }, | ||
4003 | { | ||
4004 | .name = "rfc4543(gcm(aes))", | ||
4005 | .driver_name = "rfc4543-gcm-aes-caam", | ||
4006 | .blocksize = 1, | ||
4007 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
4008 | .template_aead = { | ||
4009 | .setkey = rfc4543_setkey, | ||
4010 | .setauthsize = rfc4543_setauthsize, | ||
4011 | .encrypt = aead_encrypt, | ||
4012 | .decrypt = aead_decrypt, | ||
4013 | .givencrypt = aead_givencrypt, | ||
4014 | .geniv = "<built-in>", | ||
4015 | .ivsize = 8, | ||
4016 | .maxauthsize = AES_BLOCK_SIZE, | ||
4017 | }, | ||
4018 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | ||
4019 | }, | ||
4020 | /* Galois Counter Mode */ | ||
4021 | { | ||
4022 | .name = "gcm(aes)", | ||
4023 | .driver_name = "gcm-aes-caam", | ||
4024 | .blocksize = 1, | ||
4025 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
4026 | .template_aead = { | ||
4027 | .setkey = gcm_setkey, | ||
4028 | .setauthsize = gcm_setauthsize, | ||
4029 | .encrypt = aead_encrypt, | ||
4030 | .decrypt = aead_decrypt, | ||
4031 | .givencrypt = NULL, | ||
4032 | .geniv = "<built-in>", | ||
4033 | .ivsize = 12, | ||
4034 | .maxauthsize = AES_BLOCK_SIZE, | ||
4035 | }, | ||
4036 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | ||
4037 | }, | ||
4038 | /* ablkcipher descriptor */ | 3950 | /* ablkcipher descriptor */ |
4039 | { | 3951 | { |
4040 | .name = "cbc(aes)", | 3952 | .name = "cbc(aes)", |
@@ -4124,21 +4036,84 @@ static struct caam_alg_template driver_algs[] = { | |||
4124 | } | 4036 | } |
4125 | }; | 4037 | }; |
4126 | 4038 | ||
4127 | struct caam_crypto_alg { | 4039 | struct caam_alg_entry { |
4128 | struct list_head entry; | ||
4129 | int class1_alg_type; | 4040 | int class1_alg_type; |
4130 | int class2_alg_type; | 4041 | int class2_alg_type; |
4131 | int alg_op; | 4042 | int alg_op; |
4043 | }; | ||
4044 | |||
4045 | struct caam_aead_alg { | ||
4046 | struct aead_alg aead; | ||
4047 | struct caam_alg_entry caam; | ||
4048 | bool registered; | ||
4049 | }; | ||
4050 | |||
4051 | static struct caam_aead_alg driver_aeads[] = { | ||
4052 | { | ||
4053 | .aead = { | ||
4054 | .base = { | ||
4055 | .cra_name = "rfc4106(gcm(aes))", | ||
4056 | .cra_driver_name = "rfc4106-gcm-aes-caam", | ||
4057 | .cra_blocksize = 1, | ||
4058 | }, | ||
4059 | .setkey = rfc4106_setkey, | ||
4060 | .setauthsize = rfc4106_setauthsize, | ||
4061 | .encrypt = gcm_encrypt, | ||
4062 | .decrypt = gcm_decrypt, | ||
4063 | .ivsize = 8, | ||
4064 | .maxauthsize = AES_BLOCK_SIZE, | ||
4065 | }, | ||
4066 | .caam = { | ||
4067 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | ||
4068 | }, | ||
4069 | }, | ||
4070 | { | ||
4071 | .aead = { | ||
4072 | .base = { | ||
4073 | .cra_name = "rfc4543(gcm(aes))", | ||
4074 | .cra_driver_name = "rfc4543-gcm-aes-caam", | ||
4075 | .cra_blocksize = 1, | ||
4076 | }, | ||
4077 | .setkey = rfc4543_setkey, | ||
4078 | .setauthsize = rfc4543_setauthsize, | ||
4079 | .encrypt = gcm_encrypt, | ||
4080 | .decrypt = gcm_decrypt, | ||
4081 | .ivsize = 8, | ||
4082 | .maxauthsize = AES_BLOCK_SIZE, | ||
4083 | }, | ||
4084 | .caam = { | ||
4085 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | ||
4086 | }, | ||
4087 | }, | ||
4088 | /* Galois Counter Mode */ | ||
4089 | { | ||
4090 | .aead = { | ||
4091 | .base = { | ||
4092 | .cra_name = "gcm(aes)", | ||
4093 | .cra_driver_name = "gcm-aes-caam", | ||
4094 | .cra_blocksize = 1, | ||
4095 | }, | ||
4096 | .setkey = gcm_setkey, | ||
4097 | .setauthsize = gcm_setauthsize, | ||
4098 | .encrypt = gcm_encrypt, | ||
4099 | .decrypt = gcm_decrypt, | ||
4100 | .ivsize = 12, | ||
4101 | .maxauthsize = AES_BLOCK_SIZE, | ||
4102 | }, | ||
4103 | .caam = { | ||
4104 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | ||
4105 | }, | ||
4106 | }, | ||
4107 | }; | ||
4108 | |||
4109 | struct caam_crypto_alg { | ||
4132 | struct crypto_alg crypto_alg; | 4110 | struct crypto_alg crypto_alg; |
4111 | struct list_head entry; | ||
4112 | struct caam_alg_entry caam; | ||
4133 | }; | 4113 | }; |
4134 | 4114 | ||
4135 | static int caam_cra_init(struct crypto_tfm *tfm) | 4115 | static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) |
4136 | { | 4116 | { |
4137 | struct crypto_alg *alg = tfm->__crt_alg; | ||
4138 | struct caam_crypto_alg *caam_alg = | ||
4139 | container_of(alg, struct caam_crypto_alg, crypto_alg); | ||
4140 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | ||
4141 | |||
4142 | ctx->jrdev = caam_jr_alloc(); | 4117 | ctx->jrdev = caam_jr_alloc(); |
4143 | if (IS_ERR(ctx->jrdev)) { | 4118 | if (IS_ERR(ctx->jrdev)) { |
4144 | pr_err("Job Ring Device allocation for transform failed\n"); | 4119 | pr_err("Job Ring Device allocation for transform failed\n"); |
@@ -4146,17 +4121,35 @@ static int caam_cra_init(struct crypto_tfm *tfm) | |||
4146 | } | 4121 | } |
4147 | 4122 | ||
4148 | /* copy descriptor header template value */ | 4123 | /* copy descriptor header template value */ |
4149 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; | 4124 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; |
4150 | ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; | 4125 | ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; |
4151 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; | 4126 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op; |
4152 | 4127 | ||
4153 | return 0; | 4128 | return 0; |
4154 | } | 4129 | } |
4155 | 4130 | ||
4156 | static void caam_cra_exit(struct crypto_tfm *tfm) | 4131 | static int caam_cra_init(struct crypto_tfm *tfm) |
4157 | { | 4132 | { |
4133 | struct crypto_alg *alg = tfm->__crt_alg; | ||
4134 | struct caam_crypto_alg *caam_alg = | ||
4135 | container_of(alg, struct caam_crypto_alg, crypto_alg); | ||
4158 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | 4136 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); |
4159 | 4137 | ||
4138 | return caam_init_common(ctx, &caam_alg->caam); | ||
4139 | } | ||
4140 | |||
4141 | static int caam_aead_init(struct crypto_aead *tfm) | ||
4142 | { | ||
4143 | struct aead_alg *alg = crypto_aead_alg(tfm); | ||
4144 | struct caam_aead_alg *caam_alg = | ||
4145 | container_of(alg, struct caam_aead_alg, aead); | ||
4146 | struct caam_ctx *ctx = crypto_aead_ctx(tfm); | ||
4147 | |||
4148 | return caam_init_common(ctx, &caam_alg->caam); | ||
4149 | } | ||
4150 | |||
4151 | static void caam_exit_common(struct caam_ctx *ctx) | ||
4152 | { | ||
4160 | if (ctx->sh_desc_enc_dma && | 4153 | if (ctx->sh_desc_enc_dma && |
4161 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) | 4154 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) |
4162 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, | 4155 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, |
@@ -4179,10 +4172,28 @@ static void caam_cra_exit(struct crypto_tfm *tfm) | |||
4179 | caam_jr_free(ctx->jrdev); | 4172 | caam_jr_free(ctx->jrdev); |
4180 | } | 4173 | } |
4181 | 4174 | ||
4175 | static void caam_cra_exit(struct crypto_tfm *tfm) | ||
4176 | { | ||
4177 | caam_exit_common(crypto_tfm_ctx(tfm)); | ||
4178 | } | ||
4179 | |||
4180 | static void caam_aead_exit(struct crypto_aead *tfm) | ||
4181 | { | ||
4182 | caam_exit_common(crypto_aead_ctx(tfm)); | ||
4183 | } | ||
4184 | |||
4182 | static void __exit caam_algapi_exit(void) | 4185 | static void __exit caam_algapi_exit(void) |
4183 | { | 4186 | { |
4184 | 4187 | ||
4185 | struct caam_crypto_alg *t_alg, *n; | 4188 | struct caam_crypto_alg *t_alg, *n; |
4189 | int i; | ||
4190 | |||
4191 | for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { | ||
4192 | struct caam_aead_alg *t_alg = driver_aeads + i; | ||
4193 | |||
4194 | if (t_alg->registered) | ||
4195 | crypto_unregister_aead(&t_alg->aead); | ||
4196 | } | ||
4186 | 4197 | ||
4187 | if (!alg_list.next) | 4198 | if (!alg_list.next) |
4188 | return; | 4199 | return; |
@@ -4235,13 +4246,26 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template | |||
4235 | break; | 4246 | break; |
4236 | } | 4247 | } |
4237 | 4248 | ||
4238 | t_alg->class1_alg_type = template->class1_alg_type; | 4249 | t_alg->caam.class1_alg_type = template->class1_alg_type; |
4239 | t_alg->class2_alg_type = template->class2_alg_type; | 4250 | t_alg->caam.class2_alg_type = template->class2_alg_type; |
4240 | t_alg->alg_op = template->alg_op; | 4251 | t_alg->caam.alg_op = template->alg_op; |
4241 | 4252 | ||
4242 | return t_alg; | 4253 | return t_alg; |
4243 | } | 4254 | } |
4244 | 4255 | ||
4256 | static void caam_aead_alg_init(struct caam_aead_alg *t_alg) | ||
4257 | { | ||
4258 | struct aead_alg *alg = &t_alg->aead; | ||
4259 | |||
4260 | alg->base.cra_module = THIS_MODULE; | ||
4261 | alg->base.cra_priority = CAAM_CRA_PRIORITY; | ||
4262 | alg->base.cra_ctxsize = sizeof(struct caam_ctx); | ||
4263 | alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; | ||
4264 | |||
4265 | alg->init = caam_aead_init; | ||
4266 | alg->exit = caam_aead_exit; | ||
4267 | } | ||
4268 | |||
4245 | static int __init caam_algapi_init(void) | 4269 | static int __init caam_algapi_init(void) |
4246 | { | 4270 | { |
4247 | struct device_node *dev_node; | 4271 | struct device_node *dev_node; |
@@ -4249,6 +4273,7 @@ static int __init caam_algapi_init(void) | |||
4249 | struct device *ctrldev; | 4273 | struct device *ctrldev; |
4250 | void *priv; | 4274 | void *priv; |
4251 | int i = 0, err = 0; | 4275 | int i = 0, err = 0; |
4276 | bool registered = false; | ||
4252 | 4277 | ||
4253 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 4278 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
4254 | if (!dev_node) { | 4279 | if (!dev_node) { |
@@ -4295,10 +4320,30 @@ static int __init caam_algapi_init(void) | |||
4295 | pr_warn("%s alg registration failed\n", | 4320 | pr_warn("%s alg registration failed\n", |
4296 | t_alg->crypto_alg.cra_driver_name); | 4321 | t_alg->crypto_alg.cra_driver_name); |
4297 | kfree(t_alg); | 4322 | kfree(t_alg); |
4298 | } else | 4323 | continue; |
4299 | list_add_tail(&t_alg->entry, &alg_list); | 4324 | } |
4325 | |||
4326 | list_add_tail(&t_alg->entry, &alg_list); | ||
4327 | registered = true; | ||
4300 | } | 4328 | } |
4301 | if (!list_empty(&alg_list)) | 4329 | |
4330 | for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { | ||
4331 | struct caam_aead_alg *t_alg = driver_aeads + i; | ||
4332 | |||
4333 | caam_aead_alg_init(t_alg); | ||
4334 | |||
4335 | err = crypto_register_aead(&t_alg->aead); | ||
4336 | if (err) { | ||
4337 | pr_warn("%s alg registration failed\n", | ||
4338 | t_alg->aead.base.cra_driver_name); | ||
4339 | continue; | ||
4340 | } | ||
4341 | |||
4342 | t_alg->registered = true; | ||
4343 | registered = true; | ||
4344 | } | ||
4345 | |||
4346 | if (registered) | ||
4302 | pr_info("caam algorithms registered in /proc/crypto\n"); | 4347 | pr_info("caam algorithms registered in /proc/crypto\n"); |
4303 | 4348 | ||
4304 | return err; | 4349 | return err; |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 332c8ef8dae2..dae1e8099969 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -835,17 +835,17 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
835 | src_map_to_sec4_sg(jrdev, req->src, src_nents, | 835 | src_map_to_sec4_sg(jrdev, req->src, src_nents, |
836 | edesc->sec4_sg + sec4_sg_src_index, | 836 | edesc->sec4_sg + sec4_sg_src_index, |
837 | chained); | 837 | chained); |
838 | if (*next_buflen) { | 838 | if (*next_buflen) |
839 | scatterwalk_map_and_copy(next_buf, req->src, | 839 | scatterwalk_map_and_copy(next_buf, req->src, |
840 | to_hash - *buflen, | 840 | to_hash - *buflen, |
841 | *next_buflen, 0); | 841 | *next_buflen, 0); |
842 | state->current_buf = !state->current_buf; | ||
843 | } | ||
844 | } else { | 842 | } else { |
845 | (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= | 843 | (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= |
846 | SEC4_SG_LEN_FIN; | 844 | SEC4_SG_LEN_FIN; |
847 | } | 845 | } |
848 | 846 | ||
847 | state->current_buf = !state->current_buf; | ||
848 | |||
849 | sh_len = desc_len(sh_desc); | 849 | sh_len = desc_len(sh_desc); |
850 | desc = edesc->hw_desc; | 850 | desc = edesc->hw_desc; |
851 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | 851 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | |
@@ -1268,9 +1268,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1268 | scatterwalk_map_and_copy(next_buf, req->src, | 1268 | scatterwalk_map_and_copy(next_buf, req->src, |
1269 | to_hash - *buflen, | 1269 | to_hash - *buflen, |
1270 | *next_buflen, 0); | 1270 | *next_buflen, 0); |
1271 | state->current_buf = !state->current_buf; | ||
1272 | } | 1271 | } |
1273 | 1272 | ||
1273 | state->current_buf = !state->current_buf; | ||
1274 | |||
1274 | sh_len = desc_len(sh_desc); | 1275 | sh_len = desc_len(sh_desc); |
1275 | desc = edesc->hw_desc; | 1276 | desc = edesc->hw_desc; |
1276 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | 1277 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | |
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index acd7743e2603..f57f395db33f 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <crypto/des.h> | 32 | #include <crypto/des.h> |
33 | #include <crypto/sha.h> | 33 | #include <crypto/sha.h> |
34 | #include <crypto/md5.h> | 34 | #include <crypto/md5.h> |
35 | #include <crypto/aead.h> | 35 | #include <crypto/internal/aead.h> |
36 | #include <crypto/authenc.h> | 36 | #include <crypto/authenc.h> |
37 | #include <crypto/scatterwalk.h> | 37 | #include <crypto/scatterwalk.h> |
38 | #include <crypto/internal/skcipher.h> | 38 | #include <crypto/internal/skcipher.h> |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index efba4ccd4fac..efacab7539ef 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -301,7 +301,7 @@ static int caam_remove(struct platform_device *pdev) | |||
301 | #endif | 301 | #endif |
302 | 302 | ||
303 | /* Unmap controller region */ | 303 | /* Unmap controller region */ |
304 | iounmap(&ctrl); | 304 | iounmap(ctrl); |
305 | 305 | ||
306 | return ret; | 306 | return ret; |
307 | } | 307 | } |
@@ -496,7 +496,7 @@ static int caam_probe(struct platform_device *pdev) | |||
496 | sizeof(struct platform_device *) * rspec, | 496 | sizeof(struct platform_device *) * rspec, |
497 | GFP_KERNEL); | 497 | GFP_KERNEL); |
498 | if (ctrlpriv->jrpdev == NULL) { | 498 | if (ctrlpriv->jrpdev == NULL) { |
499 | iounmap(&ctrl); | 499 | iounmap(ctrl); |
500 | return -ENOMEM; | 500 | return -ENOMEM; |
501 | } | 501 | } |
502 | 502 | ||
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 378ddc17f60e..672c97489505 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -83,35 +83,35 @@ | |||
83 | #endif | 83 | #endif |
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | /* | ||
87 | * The only users of these wr/rd_reg64 functions is the Job Ring (JR). | ||
88 | * The DMA address registers in the JR are a pair of 32-bit registers. | ||
89 | * The layout is: | ||
90 | * | ||
91 | * base + 0x0000 : most-significant 32 bits | ||
92 | * base + 0x0004 : least-significant 32 bits | ||
93 | * | ||
94 | * The 32-bit version of this core therefore has to write to base + 0x0004 | ||
95 | * to set the 32-bit wide DMA address. This seems to be independent of the | ||
96 | * endianness of the written/read data. | ||
97 | */ | ||
98 | |||
86 | #ifndef CONFIG_64BIT | 99 | #ifndef CONFIG_64BIT |
87 | #ifdef __BIG_ENDIAN | 100 | #define REG64_MS32(reg) ((u32 __iomem *)(reg)) |
88 | static inline void wr_reg64(u64 __iomem *reg, u64 data) | 101 | #define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1) |
89 | { | ||
90 | wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32); | ||
91 | wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull); | ||
92 | } | ||
93 | 102 | ||
94 | static inline u64 rd_reg64(u64 __iomem *reg) | ||
95 | { | ||
96 | return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) | | ||
97 | ((u64)rd_reg32((u32 __iomem *)reg + 1)); | ||
98 | } | ||
99 | #else | ||
100 | #ifdef __LITTLE_ENDIAN | ||
101 | static inline void wr_reg64(u64 __iomem *reg, u64 data) | 103 | static inline void wr_reg64(u64 __iomem *reg, u64 data) |
102 | { | 104 | { |
103 | wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32); | 105 | wr_reg32(REG64_MS32(reg), data >> 32); |
104 | wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull); | 106 | wr_reg32(REG64_LS32(reg), data); |
105 | } | 107 | } |
106 | 108 | ||
107 | static inline u64 rd_reg64(u64 __iomem *reg) | 109 | static inline u64 rd_reg64(u64 __iomem *reg) |
108 | { | 110 | { |
109 | return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) | | 111 | return ((u64)rd_reg32(REG64_MS32(reg)) << 32 | |
110 | ((u64)rd_reg32((u32 __iomem *)reg)); | 112 | (u64)rd_reg32(REG64_LS32(reg))); |
111 | } | 113 | } |
112 | #endif | 114 | #endif |
113 | #endif | ||
114 | #endif | ||
115 | 115 | ||
116 | /* | 116 | /* |
117 | * jr_outentry | 117 | * jr_outentry |
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index 3b918218aa4c..b68b74cc7b77 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
@@ -55,6 +55,21 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, | |||
55 | sec4_sg_ptr->len |= SEC4_SG_LEN_FIN; | 55 | sec4_sg_ptr->len |= SEC4_SG_LEN_FIN; |
56 | } | 56 | } |
57 | 57 | ||
58 | static inline struct sec4_sg_entry *sg_to_sec4_sg_len( | ||
59 | struct scatterlist *sg, unsigned int total, | ||
60 | struct sec4_sg_entry *sec4_sg_ptr) | ||
61 | { | ||
62 | do { | ||
63 | unsigned int len = min(sg_dma_len(sg), total); | ||
64 | |||
65 | dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0); | ||
66 | sec4_sg_ptr++; | ||
67 | sg = sg_next(sg); | ||
68 | total -= len; | ||
69 | } while (total); | ||
70 | return sec4_sg_ptr - 1; | ||
71 | } | ||
72 | |||
58 | /* count number of elements in scatterlist */ | 73 | /* count number of elements in scatterlist */ |
59 | static inline int __sg_count(struct scatterlist *sg_list, int nbytes, | 74 | static inline int __sg_count(struct scatterlist *sg_list, int nbytes, |
60 | bool *chained) | 75 | bool *chained) |
@@ -85,34 +100,41 @@ static inline int sg_count(struct scatterlist *sg_list, int nbytes, | |||
85 | return sg_nents; | 100 | return sg_nents; |
86 | } | 101 | } |
87 | 102 | ||
88 | static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg, | 103 | static inline void dma_unmap_sg_chained( |
89 | unsigned int nents, enum dma_data_direction dir, | 104 | struct device *dev, struct scatterlist *sg, unsigned int nents, |
90 | bool chained) | 105 | enum dma_data_direction dir, bool chained) |
91 | { | 106 | { |
92 | if (unlikely(chained)) { | 107 | if (unlikely(chained)) { |
93 | int i; | 108 | int i; |
94 | for (i = 0; i < nents; i++) { | 109 | for (i = 0; i < nents; i++) { |
95 | dma_map_sg(dev, sg, 1, dir); | 110 | dma_unmap_sg(dev, sg, 1, dir); |
96 | sg = sg_next(sg); | 111 | sg = sg_next(sg); |
97 | } | 112 | } |
98 | } else { | 113 | } else if (nents) { |
99 | dma_map_sg(dev, sg, nents, dir); | 114 | dma_unmap_sg(dev, sg, nents, dir); |
100 | } | 115 | } |
101 | return nents; | ||
102 | } | 116 | } |
103 | 117 | ||
104 | static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, | 118 | static inline int dma_map_sg_chained( |
105 | unsigned int nents, enum dma_data_direction dir, | 119 | struct device *dev, struct scatterlist *sg, unsigned int nents, |
106 | bool chained) | 120 | enum dma_data_direction dir, bool chained) |
107 | { | 121 | { |
122 | struct scatterlist *first = sg; | ||
123 | |||
108 | if (unlikely(chained)) { | 124 | if (unlikely(chained)) { |
109 | int i; | 125 | int i; |
110 | for (i = 0; i < nents; i++) { | 126 | for (i = 0; i < nents; i++) { |
111 | dma_unmap_sg(dev, sg, 1, dir); | 127 | if (!dma_map_sg(dev, sg, 1, dir)) { |
128 | dma_unmap_sg_chained(dev, first, i, dir, | ||
129 | chained); | ||
130 | nents = 0; | ||
131 | break; | ||
132 | } | ||
133 | |||
112 | sg = sg_next(sg); | 134 | sg = sg_next(sg); |
113 | } | 135 | } |
114 | } else { | 136 | } else |
115 | dma_unmap_sg(dev, sg, nents, dir); | 137 | nents = dma_map_sg(dev, sg, nents, dir); |
116 | } | 138 | |
117 | return nents; | 139 | return nents; |
118 | } | 140 | } |
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 7639ffc36c68..ae38f6b6cc10 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig | |||
@@ -13,7 +13,6 @@ config CRYPTO_DEV_CCP_CRYPTO | |||
13 | tristate "Encryption and hashing acceleration support" | 13 | tristate "Encryption and hashing acceleration support" |
14 | depends on CRYPTO_DEV_CCP_DD | 14 | depends on CRYPTO_DEV_CCP_DD |
15 | default m | 15 | default m |
16 | select CRYPTO_ALGAPI | ||
17 | select CRYPTO_HASH | 16 | select CRYPTO_HASH |
18 | select CRYPTO_BLKCIPHER | 17 | select CRYPTO_BLKCIPHER |
19 | select CRYPTO_AUTHENC | 18 | select CRYPTO_AUTHENC |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 71f2e3c89424..d09c6c4af4aa 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -52,8 +52,7 @@ struct ccp_dm_workarea { | |||
52 | 52 | ||
53 | struct ccp_sg_workarea { | 53 | struct ccp_sg_workarea { |
54 | struct scatterlist *sg; | 54 | struct scatterlist *sg; |
55 | unsigned int nents; | 55 | int nents; |
56 | unsigned int length; | ||
57 | 56 | ||
58 | struct scatterlist *dma_sg; | 57 | struct scatterlist *dma_sg; |
59 | struct device *dma_dev; | 58 | struct device *dma_dev; |
@@ -496,8 +495,10 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, | |||
496 | if (!sg) | 495 | if (!sg) |
497 | return 0; | 496 | return 0; |
498 | 497 | ||
499 | wa->nents = sg_nents(sg); | 498 | wa->nents = sg_nents_for_len(sg, len); |
500 | wa->length = sg->length; | 499 | if (wa->nents < 0) |
500 | return wa->nents; | ||
501 | |||
501 | wa->bytes_left = len; | 502 | wa->bytes_left = len; |
502 | wa->sg_used = 0; | 503 | wa->sg_used = 0; |
503 | 504 | ||
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index b1c20b2b5647..c0aa5c5c5f9d 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c | |||
@@ -174,8 +174,6 @@ static int ccp_platform_probe(struct platform_device *pdev) | |||
174 | } | 174 | } |
175 | ccp->io_regs = ccp->io_map; | 175 | ccp->io_regs = ccp->io_map; |
176 | 176 | ||
177 | if (!dev->dma_mask) | ||
178 | dev->dma_mask = &dev->coherent_dma_mask; | ||
179 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); | 177 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); |
180 | if (ret) { | 178 | if (ret) { |
181 | dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); | 179 | dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 48f453555f1f..7ba495f75370 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <crypto/aes.h> | 25 | #include <crypto/aes.h> |
26 | #include <crypto/sha.h> | 26 | #include <crypto/sha.h> |
27 | #include <crypto/algapi.h> | 27 | #include <crypto/algapi.h> |
28 | #include <crypto/aead.h> | 28 | #include <crypto/internal/aead.h> |
29 | #include <crypto/authenc.h> | 29 | #include <crypto/authenc.h> |
30 | #include <crypto/scatterwalk.h> | 30 | #include <crypto/scatterwalk.h> |
31 | 31 | ||
@@ -575,7 +575,8 @@ static int init_tfm_ablk(struct crypto_tfm *tfm) | |||
575 | 575 | ||
576 | static int init_tfm_aead(struct crypto_tfm *tfm) | 576 | static int init_tfm_aead(struct crypto_tfm *tfm) |
577 | { | 577 | { |
578 | tfm->crt_aead.reqsize = sizeof(struct aead_ctx); | 578 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), |
579 | sizeof(struct aead_ctx)); | ||
579 | return init_tfm(tfm); | 580 | return init_tfm(tfm); |
580 | } | 581 | } |
581 | 582 | ||
@@ -1096,7 +1097,7 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) | |||
1096 | { | 1097 | { |
1097 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); | 1098 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); |
1098 | u32 *flags = &tfm->base.crt_flags; | 1099 | u32 *flags = &tfm->base.crt_flags; |
1099 | unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize; | 1100 | unsigned digest_len = crypto_aead_maxauthsize(tfm); |
1100 | int ret; | 1101 | int ret; |
1101 | 1102 | ||
1102 | if (!ctx->enckey_len && !ctx->authkey_len) | 1103 | if (!ctx->enckey_len && !ctx->authkey_len) |
@@ -1138,7 +1139,7 @@ out: | |||
1138 | 1139 | ||
1139 | static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | 1140 | static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) |
1140 | { | 1141 | { |
1141 | int max = crypto_aead_alg(tfm)->maxauthsize >> 2; | 1142 | int max = crypto_aead_maxauthsize(tfm) >> 2; |
1142 | 1143 | ||
1143 | if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3)) | 1144 | if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3)) |
1144 | return -EINVAL; | 1145 | return -EINVAL; |
diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile new file mode 100644 index 000000000000..0c12b13574dc --- /dev/null +++ b/drivers/crypto/marvell/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o | ||
2 | marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o | ||
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c new file mode 100644 index 000000000000..a432633bced4 --- /dev/null +++ b/drivers/crypto/marvell/cesa.c | |||
@@ -0,0 +1,548 @@ | |||
1 | /* | ||
2 | * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA) | ||
3 | * that can be found on the following platform: Orion, Kirkwood, Armada. This | ||
4 | * driver supports the TDMA engine on platforms on which it is available. | ||
5 | * | ||
6 | * Author: Boris Brezillon <boris.brezillon@free-electrons.com> | ||
7 | * Author: Arnaud Ebalard <arno@natisbad.org> | ||
8 | * | ||
9 | * This work is based on an initial version written by | ||
10 | * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of the GNU General Public License version 2 as published | ||
14 | * by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/delay.h> | ||
18 | #include <linux/genalloc.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/kthread.h> | ||
22 | #include <linux/mbus.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/scatterlist.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/clk.h> | ||
28 | #include <linux/of.h> | ||
29 | #include <linux/of_platform.h> | ||
30 | #include <linux/of_irq.h> | ||
31 | |||
32 | #include "cesa.h" | ||
33 | |||
34 | static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA); | ||
35 | module_param_named(allhwsupport, allhwsupport, int, 0444); | ||
36 | MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)"); | ||
37 | |||
38 | struct mv_cesa_dev *cesa_dev; | ||
39 | |||
40 | static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine) | ||
41 | { | ||
42 | struct crypto_async_request *req, *backlog; | ||
43 | struct mv_cesa_ctx *ctx; | ||
44 | |||
45 | spin_lock_bh(&cesa_dev->lock); | ||
46 | backlog = crypto_get_backlog(&cesa_dev->queue); | ||
47 | req = crypto_dequeue_request(&cesa_dev->queue); | ||
48 | engine->req = req; | ||
49 | spin_unlock_bh(&cesa_dev->lock); | ||
50 | |||
51 | if (!req) | ||
52 | return; | ||
53 | |||
54 | if (backlog) | ||
55 | backlog->complete(backlog, -EINPROGRESS); | ||
56 | |||
57 | ctx = crypto_tfm_ctx(req->tfm); | ||
58 | ctx->ops->prepare(req, engine); | ||
59 | ctx->ops->step(req); | ||
60 | } | ||
61 | |||
62 | static irqreturn_t mv_cesa_int(int irq, void *priv) | ||
63 | { | ||
64 | struct mv_cesa_engine *engine = priv; | ||
65 | struct crypto_async_request *req; | ||
66 | struct mv_cesa_ctx *ctx; | ||
67 | u32 status, mask; | ||
68 | irqreturn_t ret = IRQ_NONE; | ||
69 | |||
70 | while (true) { | ||
71 | int res; | ||
72 | |||
73 | mask = mv_cesa_get_int_mask(engine); | ||
74 | status = readl(engine->regs + CESA_SA_INT_STATUS); | ||
75 | |||
76 | if (!(status & mask)) | ||
77 | break; | ||
78 | |||
79 | /* | ||
80 | * TODO: avoid clearing the FPGA_INT_STATUS if this not | ||
81 | * relevant on some platforms. | ||
82 | */ | ||
83 | writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS); | ||
84 | writel(~status, engine->regs + CESA_SA_INT_STATUS); | ||
85 | |||
86 | ret = IRQ_HANDLED; | ||
87 | spin_lock_bh(&engine->lock); | ||
88 | req = engine->req; | ||
89 | spin_unlock_bh(&engine->lock); | ||
90 | if (req) { | ||
91 | ctx = crypto_tfm_ctx(req->tfm); | ||
92 | res = ctx->ops->process(req, status & mask); | ||
93 | if (res != -EINPROGRESS) { | ||
94 | spin_lock_bh(&engine->lock); | ||
95 | engine->req = NULL; | ||
96 | mv_cesa_dequeue_req_unlocked(engine); | ||
97 | spin_unlock_bh(&engine->lock); | ||
98 | ctx->ops->cleanup(req); | ||
99 | local_bh_disable(); | ||
100 | req->complete(req, res); | ||
101 | local_bh_enable(); | ||
102 | } else { | ||
103 | ctx->ops->step(req); | ||
104 | } | ||
105 | } | ||
106 | } | ||
107 | |||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | int mv_cesa_queue_req(struct crypto_async_request *req) | ||
112 | { | ||
113 | int ret; | ||
114 | int i; | ||
115 | |||
116 | spin_lock_bh(&cesa_dev->lock); | ||
117 | ret = crypto_enqueue_request(&cesa_dev->queue, req); | ||
118 | spin_unlock_bh(&cesa_dev->lock); | ||
119 | |||
120 | if (ret != -EINPROGRESS) | ||
121 | return ret; | ||
122 | |||
123 | for (i = 0; i < cesa_dev->caps->nengines; i++) { | ||
124 | spin_lock_bh(&cesa_dev->engines[i].lock); | ||
125 | if (!cesa_dev->engines[i].req) | ||
126 | mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]); | ||
127 | spin_unlock_bh(&cesa_dev->engines[i].lock); | ||
128 | } | ||
129 | |||
130 | return -EINPROGRESS; | ||
131 | } | ||
132 | |||
133 | static int mv_cesa_add_algs(struct mv_cesa_dev *cesa) | ||
134 | { | ||
135 | int ret; | ||
136 | int i, j; | ||
137 | |||
138 | for (i = 0; i < cesa->caps->ncipher_algs; i++) { | ||
139 | ret = crypto_register_alg(cesa->caps->cipher_algs[i]); | ||
140 | if (ret) | ||
141 | goto err_unregister_crypto; | ||
142 | } | ||
143 | |||
144 | for (i = 0; i < cesa->caps->nahash_algs; i++) { | ||
145 | ret = crypto_register_ahash(cesa->caps->ahash_algs[i]); | ||
146 | if (ret) | ||
147 | goto err_unregister_ahash; | ||
148 | } | ||
149 | |||
150 | return 0; | ||
151 | |||
152 | err_unregister_ahash: | ||
153 | for (j = 0; j < i; j++) | ||
154 | crypto_unregister_ahash(cesa->caps->ahash_algs[j]); | ||
155 | i = cesa->caps->ncipher_algs; | ||
156 | |||
157 | err_unregister_crypto: | ||
158 | for (j = 0; j < i; j++) | ||
159 | crypto_unregister_alg(cesa->caps->cipher_algs[j]); | ||
160 | |||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) | ||
165 | { | ||
166 | int i; | ||
167 | |||
168 | for (i = 0; i < cesa->caps->nahash_algs; i++) | ||
169 | crypto_unregister_ahash(cesa->caps->ahash_algs[i]); | ||
170 | |||
171 | for (i = 0; i < cesa->caps->ncipher_algs; i++) | ||
172 | crypto_unregister_alg(cesa->caps->cipher_algs[i]); | ||
173 | } | ||
174 | |||
175 | static struct crypto_alg *orion_cipher_algs[] = { | ||
176 | &mv_cesa_ecb_des_alg, | ||
177 | &mv_cesa_cbc_des_alg, | ||
178 | &mv_cesa_ecb_des3_ede_alg, | ||
179 | &mv_cesa_cbc_des3_ede_alg, | ||
180 | &mv_cesa_ecb_aes_alg, | ||
181 | &mv_cesa_cbc_aes_alg, | ||
182 | }; | ||
183 | |||
184 | static struct ahash_alg *orion_ahash_algs[] = { | ||
185 | &mv_md5_alg, | ||
186 | &mv_sha1_alg, | ||
187 | &mv_ahmac_md5_alg, | ||
188 | &mv_ahmac_sha1_alg, | ||
189 | }; | ||
190 | |||
191 | static struct crypto_alg *armada_370_cipher_algs[] = { | ||
192 | &mv_cesa_ecb_des_alg, | ||
193 | &mv_cesa_cbc_des_alg, | ||
194 | &mv_cesa_ecb_des3_ede_alg, | ||
195 | &mv_cesa_cbc_des3_ede_alg, | ||
196 | &mv_cesa_ecb_aes_alg, | ||
197 | &mv_cesa_cbc_aes_alg, | ||
198 | }; | ||
199 | |||
200 | static struct ahash_alg *armada_370_ahash_algs[] = { | ||
201 | &mv_md5_alg, | ||
202 | &mv_sha1_alg, | ||
203 | &mv_sha256_alg, | ||
204 | &mv_ahmac_md5_alg, | ||
205 | &mv_ahmac_sha1_alg, | ||
206 | &mv_ahmac_sha256_alg, | ||
207 | }; | ||
208 | |||
209 | static const struct mv_cesa_caps orion_caps = { | ||
210 | .nengines = 1, | ||
211 | .cipher_algs = orion_cipher_algs, | ||
212 | .ncipher_algs = ARRAY_SIZE(orion_cipher_algs), | ||
213 | .ahash_algs = orion_ahash_algs, | ||
214 | .nahash_algs = ARRAY_SIZE(orion_ahash_algs), | ||
215 | .has_tdma = false, | ||
216 | }; | ||
217 | |||
218 | static const struct mv_cesa_caps kirkwood_caps = { | ||
219 | .nengines = 1, | ||
220 | .cipher_algs = orion_cipher_algs, | ||
221 | .ncipher_algs = ARRAY_SIZE(orion_cipher_algs), | ||
222 | .ahash_algs = orion_ahash_algs, | ||
223 | .nahash_algs = ARRAY_SIZE(orion_ahash_algs), | ||
224 | .has_tdma = true, | ||
225 | }; | ||
226 | |||
227 | static const struct mv_cesa_caps armada_370_caps = { | ||
228 | .nengines = 1, | ||
229 | .cipher_algs = armada_370_cipher_algs, | ||
230 | .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), | ||
231 | .ahash_algs = armada_370_ahash_algs, | ||
232 | .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), | ||
233 | .has_tdma = true, | ||
234 | }; | ||
235 | |||
236 | static const struct mv_cesa_caps armada_xp_caps = { | ||
237 | .nengines = 2, | ||
238 | .cipher_algs = armada_370_cipher_algs, | ||
239 | .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), | ||
240 | .ahash_algs = armada_370_ahash_algs, | ||
241 | .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), | ||
242 | .has_tdma = true, | ||
243 | }; | ||
244 | |||
245 | static const struct of_device_id mv_cesa_of_match_table[] = { | ||
246 | { .compatible = "marvell,orion-crypto", .data = &orion_caps }, | ||
247 | { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps }, | ||
248 | { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps }, | ||
249 | { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps }, | ||
250 | { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps }, | ||
251 | { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps }, | ||
252 | { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps }, | ||
253 | {} | ||
254 | }; | ||
255 | MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); | ||
256 | |||
257 | static void | ||
258 | mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine, | ||
259 | const struct mbus_dram_target_info *dram) | ||
260 | { | ||
261 | void __iomem *iobase = engine->regs; | ||
262 | int i; | ||
263 | |||
264 | for (i = 0; i < 4; i++) { | ||
265 | writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i)); | ||
266 | writel(0, iobase + CESA_TDMA_WINDOW_BASE(i)); | ||
267 | } | ||
268 | |||
269 | for (i = 0; i < dram->num_cs; i++) { | ||
270 | const struct mbus_dram_window *cs = dram->cs + i; | ||
271 | |||
272 | writel(((cs->size - 1) & 0xffff0000) | | ||
273 | (cs->mbus_attr << 8) | | ||
274 | (dram->mbus_dram_target_id << 4) | 1, | ||
275 | iobase + CESA_TDMA_WINDOW_CTRL(i)); | ||
276 | writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i)); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa) | ||
281 | { | ||
282 | struct device *dev = cesa->dev; | ||
283 | struct mv_cesa_dev_dma *dma; | ||
284 | |||
285 | if (!cesa->caps->has_tdma) | ||
286 | return 0; | ||
287 | |||
288 | dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); | ||
289 | if (!dma) | ||
290 | return -ENOMEM; | ||
291 | |||
292 | dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev, | ||
293 | sizeof(struct mv_cesa_tdma_desc), | ||
294 | 16, 0); | ||
295 | if (!dma->tdma_desc_pool) | ||
296 | return -ENOMEM; | ||
297 | |||
298 | dma->op_pool = dmam_pool_create("cesa_op", dev, | ||
299 | sizeof(struct mv_cesa_op_ctx), 16, 0); | ||
300 | if (!dma->op_pool) | ||
301 | return -ENOMEM; | ||
302 | |||
303 | dma->cache_pool = dmam_pool_create("cesa_cache", dev, | ||
304 | CESA_MAX_HASH_BLOCK_SIZE, 1, 0); | ||
305 | if (!dma->cache_pool) | ||
306 | return -ENOMEM; | ||
307 | |||
308 | dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); | ||
309 | if (!dma->cache_pool) | ||
310 | return -ENOMEM; | ||
311 | |||
312 | cesa->dma = dma; | ||
313 | |||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static int mv_cesa_get_sram(struct platform_device *pdev, int idx) | ||
318 | { | ||
319 | struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); | ||
320 | struct mv_cesa_engine *engine = &cesa->engines[idx]; | ||
321 | const char *res_name = "sram"; | ||
322 | struct resource *res; | ||
323 | |||
324 | engine->pool = of_get_named_gen_pool(cesa->dev->of_node, | ||
325 | "marvell,crypto-srams", | ||
326 | idx); | ||
327 | if (engine->pool) { | ||
328 | engine->sram = gen_pool_dma_alloc(engine->pool, | ||
329 | cesa->sram_size, | ||
330 | &engine->sram_dma); | ||
331 | if (engine->sram) | ||
332 | return 0; | ||
333 | |||
334 | engine->pool = NULL; | ||
335 | return -ENOMEM; | ||
336 | } | ||
337 | |||
338 | if (cesa->caps->nengines > 1) { | ||
339 | if (!idx) | ||
340 | res_name = "sram0"; | ||
341 | else | ||
342 | res_name = "sram1"; | ||
343 | } | ||
344 | |||
345 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
346 | res_name); | ||
347 | if (!res || resource_size(res) < cesa->sram_size) | ||
348 | return -EINVAL; | ||
349 | |||
350 | engine->sram = devm_ioremap_resource(cesa->dev, res); | ||
351 | if (IS_ERR(engine->sram)) | ||
352 | return PTR_ERR(engine->sram); | ||
353 | |||
354 | engine->sram_dma = phys_to_dma(cesa->dev, | ||
355 | (phys_addr_t)res->start); | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | static void mv_cesa_put_sram(struct platform_device *pdev, int idx) | ||
361 | { | ||
362 | struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); | ||
363 | struct mv_cesa_engine *engine = &cesa->engines[idx]; | ||
364 | |||
365 | if (!engine->pool) | ||
366 | return; | ||
367 | |||
368 | gen_pool_free(engine->pool, (unsigned long)engine->sram, | ||
369 | cesa->sram_size); | ||
370 | } | ||
371 | |||
372 | static int mv_cesa_probe(struct platform_device *pdev) | ||
373 | { | ||
374 | const struct mv_cesa_caps *caps = &orion_caps; | ||
375 | const struct mbus_dram_target_info *dram; | ||
376 | const struct of_device_id *match; | ||
377 | struct device *dev = &pdev->dev; | ||
378 | struct mv_cesa_dev *cesa; | ||
379 | struct mv_cesa_engine *engines; | ||
380 | struct resource *res; | ||
381 | int irq, ret, i; | ||
382 | u32 sram_size; | ||
383 | |||
384 | if (cesa_dev) { | ||
385 | dev_err(&pdev->dev, "Only one CESA device authorized\n"); | ||
386 | return -EEXIST; | ||
387 | } | ||
388 | |||
389 | if (dev->of_node) { | ||
390 | match = of_match_node(mv_cesa_of_match_table, dev->of_node); | ||
391 | if (!match || !match->data) | ||
392 | return -ENOTSUPP; | ||
393 | |||
394 | caps = match->data; | ||
395 | } | ||
396 | |||
397 | if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport) | ||
398 | return -ENOTSUPP; | ||
399 | |||
400 | cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); | ||
401 | if (!cesa) | ||
402 | return -ENOMEM; | ||
403 | |||
404 | cesa->caps = caps; | ||
405 | cesa->dev = dev; | ||
406 | |||
407 | sram_size = CESA_SA_DEFAULT_SRAM_SIZE; | ||
408 | of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size", | ||
409 | &sram_size); | ||
410 | if (sram_size < CESA_SA_MIN_SRAM_SIZE) | ||
411 | sram_size = CESA_SA_MIN_SRAM_SIZE; | ||
412 | |||
413 | cesa->sram_size = sram_size; | ||
414 | cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines), | ||
415 | GFP_KERNEL); | ||
416 | if (!cesa->engines) | ||
417 | return -ENOMEM; | ||
418 | |||
419 | spin_lock_init(&cesa->lock); | ||
420 | crypto_init_queue(&cesa->queue, 50); | ||
421 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | ||
422 | cesa->regs = devm_ioremap_resource(dev, res); | ||
423 | if (IS_ERR(cesa->regs)) | ||
424 | return -ENOMEM; | ||
425 | |||
426 | ret = mv_cesa_dev_dma_init(cesa); | ||
427 | if (ret) | ||
428 | return ret; | ||
429 | |||
430 | dram = mv_mbus_dram_info_nooverlap(); | ||
431 | |||
432 | platform_set_drvdata(pdev, cesa); | ||
433 | |||
434 | for (i = 0; i < caps->nengines; i++) { | ||
435 | struct mv_cesa_engine *engine = &cesa->engines[i]; | ||
436 | char res_name[7]; | ||
437 | |||
438 | engine->id = i; | ||
439 | spin_lock_init(&engine->lock); | ||
440 | |||
441 | ret = mv_cesa_get_sram(pdev, i); | ||
442 | if (ret) | ||
443 | goto err_cleanup; | ||
444 | |||
445 | irq = platform_get_irq(pdev, i); | ||
446 | if (irq < 0) { | ||
447 | ret = irq; | ||
448 | goto err_cleanup; | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Not all platforms can gate the CESA clocks: do not complain | ||
453 | * if the clock does not exist. | ||
454 | */ | ||
455 | snprintf(res_name, sizeof(res_name), "cesa%d", i); | ||
456 | engine->clk = devm_clk_get(dev, res_name); | ||
457 | if (IS_ERR(engine->clk)) { | ||
458 | engine->clk = devm_clk_get(dev, NULL); | ||
459 | if (IS_ERR(engine->clk)) | ||
460 | engine->clk = NULL; | ||
461 | } | ||
462 | |||
463 | snprintf(res_name, sizeof(res_name), "cesaz%d", i); | ||
464 | engine->zclk = devm_clk_get(dev, res_name); | ||
465 | if (IS_ERR(engine->zclk)) | ||
466 | engine->zclk = NULL; | ||
467 | |||
468 | ret = clk_prepare_enable(engine->clk); | ||
469 | if (ret) | ||
470 | goto err_cleanup; | ||
471 | |||
472 | ret = clk_prepare_enable(engine->zclk); | ||
473 | if (ret) | ||
474 | goto err_cleanup; | ||
475 | |||
476 | engine->regs = cesa->regs + CESA_ENGINE_OFF(i); | ||
477 | |||
478 | if (dram && cesa->caps->has_tdma) | ||
479 | mv_cesa_conf_mbus_windows(&cesa->engines[i], dram); | ||
480 | |||
481 | writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS); | ||
482 | writel(CESA_SA_CFG_STOP_DIG_ERR, | ||
483 | cesa->engines[i].regs + CESA_SA_CFG); | ||
484 | writel(engine->sram_dma & CESA_SA_SRAM_MSK, | ||
485 | cesa->engines[i].regs + CESA_SA_DESC_P0); | ||
486 | |||
487 | ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int, | ||
488 | IRQF_ONESHOT, | ||
489 | dev_name(&pdev->dev), | ||
490 | &cesa->engines[i]); | ||
491 | if (ret) | ||
492 | goto err_cleanup; | ||
493 | } | ||
494 | |||
495 | cesa_dev = cesa; | ||
496 | |||
497 | ret = mv_cesa_add_algs(cesa); | ||
498 | if (ret) { | ||
499 | cesa_dev = NULL; | ||
500 | goto err_cleanup; | ||
501 | } | ||
502 | |||
503 | dev_info(dev, "CESA device successfully registered\n"); | ||
504 | |||
505 | return 0; | ||
506 | |||
507 | err_cleanup: | ||
508 | for (i = 0; i < caps->nengines; i++) { | ||
509 | clk_disable_unprepare(cesa->engines[i].zclk); | ||
510 | clk_disable_unprepare(cesa->engines[i].clk); | ||
511 | mv_cesa_put_sram(pdev, i); | ||
512 | } | ||
513 | |||
514 | return ret; | ||
515 | } | ||
516 | |||
517 | static int mv_cesa_remove(struct platform_device *pdev) | ||
518 | { | ||
519 | struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); | ||
520 | int i; | ||
521 | |||
522 | mv_cesa_remove_algs(cesa); | ||
523 | |||
524 | for (i = 0; i < cesa->caps->nengines; i++) { | ||
525 | clk_disable_unprepare(cesa->engines[i].zclk); | ||
526 | clk_disable_unprepare(cesa->engines[i].clk); | ||
527 | mv_cesa_put_sram(pdev, i); | ||
528 | } | ||
529 | |||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static struct platform_driver marvell_cesa = { | ||
534 | .probe = mv_cesa_probe, | ||
535 | .remove = mv_cesa_remove, | ||
536 | .driver = { | ||
537 | .owner = THIS_MODULE, | ||
538 | .name = "marvell-cesa", | ||
539 | .of_match_table = mv_cesa_of_match_table, | ||
540 | }, | ||
541 | }; | ||
542 | module_platform_driver(marvell_cesa); | ||
543 | |||
544 | MODULE_ALIAS("platform:mv_crypto"); | ||
545 | MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); | ||
546 | MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>"); | ||
547 | MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); | ||
548 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h new file mode 100644 index 000000000000..b60698b30d30 --- /dev/null +++ b/drivers/crypto/marvell/cesa.h | |||
@@ -0,0 +1,791 @@ | |||
1 | #ifndef __MARVELL_CESA_H__ | ||
2 | #define __MARVELL_CESA_H__ | ||
3 | |||
4 | #include <crypto/algapi.h> | ||
5 | #include <crypto/hash.h> | ||
6 | #include <crypto/internal/hash.h> | ||
7 | |||
8 | #include <linux/crypto.h> | ||
9 | #include <linux/dmapool.h> | ||
10 | |||
11 | #define CESA_ENGINE_OFF(i) (((i) * 0x2000)) | ||
12 | |||
13 | #define CESA_TDMA_BYTE_CNT 0x800 | ||
14 | #define CESA_TDMA_SRC_ADDR 0x810 | ||
15 | #define CESA_TDMA_DST_ADDR 0x820 | ||
16 | #define CESA_TDMA_NEXT_ADDR 0x830 | ||
17 | |||
18 | #define CESA_TDMA_CONTROL 0x840 | ||
19 | #define CESA_TDMA_DST_BURST GENMASK(2, 0) | ||
20 | #define CESA_TDMA_DST_BURST_32B 3 | ||
21 | #define CESA_TDMA_DST_BURST_128B 4 | ||
22 | #define CESA_TDMA_OUT_RD_EN BIT(4) | ||
23 | #define CESA_TDMA_SRC_BURST GENMASK(8, 6) | ||
24 | #define CESA_TDMA_SRC_BURST_32B (3 << 6) | ||
25 | #define CESA_TDMA_SRC_BURST_128B (4 << 6) | ||
26 | #define CESA_TDMA_CHAIN BIT(9) | ||
27 | #define CESA_TDMA_BYTE_SWAP BIT(11) | ||
28 | #define CESA_TDMA_NO_BYTE_SWAP BIT(11) | ||
29 | #define CESA_TDMA_EN BIT(12) | ||
30 | #define CESA_TDMA_FETCH_ND BIT(13) | ||
31 | #define CESA_TDMA_ACT BIT(14) | ||
32 | |||
33 | #define CESA_TDMA_CUR 0x870 | ||
34 | #define CESA_TDMA_ERROR_CAUSE 0x8c8 | ||
35 | #define CESA_TDMA_ERROR_MSK 0x8cc | ||
36 | |||
37 | #define CESA_TDMA_WINDOW_BASE(x) (((x) * 0x8) + 0xa00) | ||
38 | #define CESA_TDMA_WINDOW_CTRL(x) (((x) * 0x8) + 0xa04) | ||
39 | |||
40 | #define CESA_IVDIG(x) (0xdd00 + ((x) * 4) + \ | ||
41 | (((x) < 5) ? 0 : 0x14)) | ||
42 | |||
43 | #define CESA_SA_CMD 0xde00 | ||
44 | #define CESA_SA_CMD_EN_CESA_SA_ACCL0 BIT(0) | ||
45 | #define CESA_SA_CMD_EN_CESA_SA_ACCL1 BIT(1) | ||
46 | #define CESA_SA_CMD_DISABLE_SEC BIT(2) | ||
47 | |||
48 | #define CESA_SA_DESC_P0 0xde04 | ||
49 | |||
50 | #define CESA_SA_DESC_P1 0xde14 | ||
51 | |||
52 | #define CESA_SA_CFG 0xde08 | ||
53 | #define CESA_SA_CFG_STOP_DIG_ERR GENMASK(1, 0) | ||
54 | #define CESA_SA_CFG_DIG_ERR_CONT 0 | ||
55 | #define CESA_SA_CFG_DIG_ERR_SKIP 1 | ||
56 | #define CESA_SA_CFG_DIG_ERR_STOP 3 | ||
57 | #define CESA_SA_CFG_CH0_W_IDMA BIT(7) | ||
58 | #define CESA_SA_CFG_CH1_W_IDMA BIT(8) | ||
59 | #define CESA_SA_CFG_ACT_CH0_IDMA BIT(9) | ||
60 | #define CESA_SA_CFG_ACT_CH1_IDMA BIT(10) | ||
61 | #define CESA_SA_CFG_MULTI_PKT BIT(11) | ||
62 | #define CESA_SA_CFG_PARA_DIS BIT(13) | ||
63 | |||
64 | #define CESA_SA_ACCEL_STATUS 0xde0c | ||
65 | #define CESA_SA_ST_ACT_0 BIT(0) | ||
66 | #define CESA_SA_ST_ACT_1 BIT(1) | ||
67 | |||
68 | /* | ||
69 | * CESA_SA_FPGA_INT_STATUS looks like a FPGA leftover and is documented only | ||
70 | * in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA | ||
71 | * and someone forgot to remove it while switching to the core and moving to | ||
72 | * CESA_SA_INT_STATUS. | ||
73 | */ | ||
74 | #define CESA_SA_FPGA_INT_STATUS 0xdd68 | ||
75 | #define CESA_SA_INT_STATUS 0xde20 | ||
76 | #define CESA_SA_INT_AUTH_DONE BIT(0) | ||
77 | #define CESA_SA_INT_DES_E_DONE BIT(1) | ||
78 | #define CESA_SA_INT_AES_E_DONE BIT(2) | ||
79 | #define CESA_SA_INT_AES_D_DONE BIT(3) | ||
80 | #define CESA_SA_INT_ENC_DONE BIT(4) | ||
81 | #define CESA_SA_INT_ACCEL0_DONE BIT(5) | ||
82 | #define CESA_SA_INT_ACCEL1_DONE BIT(6) | ||
83 | #define CESA_SA_INT_ACC0_IDMA_DONE BIT(7) | ||
84 | #define CESA_SA_INT_ACC1_IDMA_DONE BIT(8) | ||
85 | #define CESA_SA_INT_IDMA_DONE BIT(9) | ||
86 | #define CESA_SA_INT_IDMA_OWN_ERR BIT(10) | ||
87 | |||
88 | #define CESA_SA_INT_MSK 0xde24 | ||
89 | |||
90 | #define CESA_SA_DESC_CFG_OP_MAC_ONLY 0 | ||
91 | #define CESA_SA_DESC_CFG_OP_CRYPT_ONLY 1 | ||
92 | #define CESA_SA_DESC_CFG_OP_MAC_CRYPT 2 | ||
93 | #define CESA_SA_DESC_CFG_OP_CRYPT_MAC 3 | ||
94 | #define CESA_SA_DESC_CFG_OP_MSK GENMASK(1, 0) | ||
95 | #define CESA_SA_DESC_CFG_MACM_SHA256 (1 << 4) | ||
96 | #define CESA_SA_DESC_CFG_MACM_HMAC_SHA256 (3 << 4) | ||
97 | #define CESA_SA_DESC_CFG_MACM_MD5 (4 << 4) | ||
98 | #define CESA_SA_DESC_CFG_MACM_SHA1 (5 << 4) | ||
99 | #define CESA_SA_DESC_CFG_MACM_HMAC_MD5 (6 << 4) | ||
100 | #define CESA_SA_DESC_CFG_MACM_HMAC_SHA1 (7 << 4) | ||
101 | #define CESA_SA_DESC_CFG_MACM_MSK GENMASK(6, 4) | ||
102 | #define CESA_SA_DESC_CFG_CRYPTM_DES (1 << 8) | ||
103 | #define CESA_SA_DESC_CFG_CRYPTM_3DES (2 << 8) | ||
104 | #define CESA_SA_DESC_CFG_CRYPTM_AES (3 << 8) | ||
105 | #define CESA_SA_DESC_CFG_CRYPTM_MSK GENMASK(9, 8) | ||
106 | #define CESA_SA_DESC_CFG_DIR_ENC (0 << 12) | ||
107 | #define CESA_SA_DESC_CFG_DIR_DEC (1 << 12) | ||
108 | #define CESA_SA_DESC_CFG_CRYPTCM_ECB (0 << 16) | ||
109 | #define CESA_SA_DESC_CFG_CRYPTCM_CBC (1 << 16) | ||
110 | #define CESA_SA_DESC_CFG_CRYPTCM_MSK BIT(16) | ||
111 | #define CESA_SA_DESC_CFG_3DES_EEE (0 << 20) | ||
112 | #define CESA_SA_DESC_CFG_3DES_EDE (1 << 20) | ||
113 | #define CESA_SA_DESC_CFG_AES_LEN_128 (0 << 24) | ||
114 | #define CESA_SA_DESC_CFG_AES_LEN_192 (1 << 24) | ||
115 | #define CESA_SA_DESC_CFG_AES_LEN_256 (2 << 24) | ||
116 | #define CESA_SA_DESC_CFG_AES_LEN_MSK GENMASK(25, 24) | ||
117 | #define CESA_SA_DESC_CFG_NOT_FRAG (0 << 30) | ||
118 | #define CESA_SA_DESC_CFG_FIRST_FRAG (1 << 30) | ||
119 | #define CESA_SA_DESC_CFG_LAST_FRAG (2 << 30) | ||
120 | #define CESA_SA_DESC_CFG_MID_FRAG (3 << 30) | ||
121 | #define CESA_SA_DESC_CFG_FRAG_MSK GENMASK(31, 30) | ||
122 | |||
123 | /* | ||
124 | * /-----------\ 0 | ||
125 | * | ACCEL CFG | 4 * 8 | ||
126 | * |-----------| 0x20 | ||
127 | * | CRYPT KEY | 8 * 4 | ||
128 | * |-----------| 0x40 | ||
129 | * | IV IN | 4 * 4 | ||
130 | * |-----------| 0x40 (inplace) | ||
131 | * | IV BUF | 4 * 4 | ||
132 | * |-----------| 0x80 | ||
133 | * | DATA IN | 16 * x (max ->max_req_size) | ||
134 | * |-----------| 0x80 (inplace operation) | ||
135 | * | DATA OUT | 16 * x (max ->max_req_size) | ||
136 | * \-----------/ SRAM size | ||
137 | */ | ||
138 | |||
139 | /* | ||
140 | * Hashing memory map: | ||
141 | * /-----------\ 0 | ||
142 | * | ACCEL CFG | 4 * 8 | ||
143 | * |-----------| 0x20 | ||
144 | * | Inner IV | 8 * 4 | ||
145 | * |-----------| 0x40 | ||
146 | * | Outer IV | 8 * 4 | ||
147 | * |-----------| 0x60 | ||
148 | * | Output BUF| 8 * 4 | ||
149 | * |-----------| 0x80 | ||
150 | * | DATA IN | 64 * x (max ->max_req_size) | ||
151 | * \-----------/ SRAM size | ||
152 | */ | ||
153 | |||
154 | #define CESA_SA_CFG_SRAM_OFFSET 0x00 | ||
155 | #define CESA_SA_DATA_SRAM_OFFSET 0x80 | ||
156 | |||
157 | #define CESA_SA_CRYPT_KEY_SRAM_OFFSET 0x20 | ||
158 | #define CESA_SA_CRYPT_IV_SRAM_OFFSET 0x40 | ||
159 | |||
160 | #define CESA_SA_MAC_IIV_SRAM_OFFSET 0x20 | ||
161 | #define CESA_SA_MAC_OIV_SRAM_OFFSET 0x40 | ||
162 | #define CESA_SA_MAC_DIG_SRAM_OFFSET 0x60 | ||
163 | |||
164 | #define CESA_SA_DESC_CRYPT_DATA(offset) \ | ||
165 | cpu_to_le32((CESA_SA_DATA_SRAM_OFFSET + (offset)) | \ | ||
166 | ((CESA_SA_DATA_SRAM_OFFSET + (offset)) << 16)) | ||
167 | |||
168 | #define CESA_SA_DESC_CRYPT_IV(offset) \ | ||
169 | cpu_to_le32((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) | \ | ||
170 | ((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) << 16)) | ||
171 | |||
172 | #define CESA_SA_DESC_CRYPT_KEY(offset) \ | ||
173 | cpu_to_le32(CESA_SA_CRYPT_KEY_SRAM_OFFSET + (offset)) | ||
174 | |||
175 | #define CESA_SA_DESC_MAC_DATA(offset) \ | ||
176 | cpu_to_le32(CESA_SA_DATA_SRAM_OFFSET + (offset)) | ||
177 | #define CESA_SA_DESC_MAC_DATA_MSK GENMASK(15, 0) | ||
178 | |||
179 | #define CESA_SA_DESC_MAC_TOTAL_LEN(total_len) cpu_to_le32((total_len) << 16) | ||
180 | #define CESA_SA_DESC_MAC_TOTAL_LEN_MSK GENMASK(31, 16) | ||
181 | |||
182 | #define CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX 0xffff | ||
183 | |||
184 | #define CESA_SA_DESC_MAC_DIGEST(offset) \ | ||
185 | cpu_to_le32(CESA_SA_MAC_DIG_SRAM_OFFSET + (offset)) | ||
186 | #define CESA_SA_DESC_MAC_DIGEST_MSK GENMASK(15, 0) | ||
187 | |||
188 | #define CESA_SA_DESC_MAC_FRAG_LEN(frag_len) cpu_to_le32((frag_len) << 16) | ||
189 | #define CESA_SA_DESC_MAC_FRAG_LEN_MSK GENMASK(31, 16) | ||
190 | |||
191 | #define CESA_SA_DESC_MAC_IV(offset) \ | ||
192 | cpu_to_le32((CESA_SA_MAC_IIV_SRAM_OFFSET + (offset)) | \ | ||
193 | ((CESA_SA_MAC_OIV_SRAM_OFFSET + (offset)) << 16)) | ||
194 | |||
195 | #define CESA_SA_SRAM_SIZE 2048 | ||
196 | #define CESA_SA_SRAM_PAYLOAD_SIZE (cesa_dev->sram_size - \ | ||
197 | CESA_SA_DATA_SRAM_OFFSET) | ||
198 | |||
199 | #define CESA_SA_DEFAULT_SRAM_SIZE 2048 | ||
200 | #define CESA_SA_MIN_SRAM_SIZE 1024 | ||
201 | |||
202 | #define CESA_SA_SRAM_MSK (2048 - 1) | ||
203 | |||
204 | #define CESA_MAX_HASH_BLOCK_SIZE 64 | ||
205 | #define CESA_HASH_BLOCK_SIZE_MSK (CESA_MAX_HASH_BLOCK_SIZE - 1) | ||
206 | |||
207 | /** | ||
208 | * struct mv_cesa_sec_accel_desc - security accelerator descriptor | ||
209 | * @config: engine config | ||
210 | * @enc_p: input and output data pointers for a cipher operation | ||
211 | * @enc_len: cipher operation length | ||
212 | * @enc_key_p: cipher key pointer | ||
213 | * @enc_iv: cipher IV pointers | ||
214 | * @mac_src_p: input pointer and total hash length | ||
215 | * @mac_digest: digest pointer and hash operation length | ||
216 | * @mac_iv: hmac IV pointers | ||
217 | * | ||
218 | * Structure passed to the CESA engine to describe the crypto operation | ||
219 | * to be executed. | ||
220 | */ | ||
221 | struct mv_cesa_sec_accel_desc { | ||
222 | u32 config; | ||
223 | u32 enc_p; | ||
224 | u32 enc_len; | ||
225 | u32 enc_key_p; | ||
226 | u32 enc_iv; | ||
227 | u32 mac_src_p; | ||
228 | u32 mac_digest; | ||
229 | u32 mac_iv; | ||
230 | }; | ||
231 | |||
232 | /** | ||
233 | * struct mv_cesa_blkcipher_op_ctx - cipher operation context | ||
234 | * @key: cipher key | ||
235 | * @iv: cipher IV | ||
236 | * | ||
237 | * Context associated to a cipher operation. | ||
238 | */ | ||
239 | struct mv_cesa_blkcipher_op_ctx { | ||
240 | u32 key[8]; | ||
241 | u32 iv[4]; | ||
242 | }; | ||
243 | |||
244 | /** | ||
245 | * struct mv_cesa_hash_op_ctx - hash or hmac operation context | ||
246 | * @key: cipher key | ||
247 | * @iv: cipher IV | ||
248 | * | ||
249 | * Context associated to an hash or hmac operation. | ||
250 | */ | ||
251 | struct mv_cesa_hash_op_ctx { | ||
252 | u32 iv[16]; | ||
253 | u32 hash[8]; | ||
254 | }; | ||
255 | |||
256 | /** | ||
257 | * struct mv_cesa_op_ctx - crypto operation context | ||
258 | * @desc: CESA descriptor | ||
259 | * @ctx: context associated to the crypto operation | ||
260 | * | ||
261 | * Context associated to a crypto operation. | ||
262 | */ | ||
263 | struct mv_cesa_op_ctx { | ||
264 | struct mv_cesa_sec_accel_desc desc; | ||
265 | union { | ||
266 | struct mv_cesa_blkcipher_op_ctx blkcipher; | ||
267 | struct mv_cesa_hash_op_ctx hash; | ||
268 | } ctx; | ||
269 | }; | ||
270 | |||
271 | /* TDMA descriptor flags */ | ||
272 | #define CESA_TDMA_DST_IN_SRAM BIT(31) | ||
273 | #define CESA_TDMA_SRC_IN_SRAM BIT(30) | ||
274 | #define CESA_TDMA_TYPE_MSK GENMASK(29, 0) | ||
275 | #define CESA_TDMA_DUMMY 0 | ||
276 | #define CESA_TDMA_DATA 1 | ||
277 | #define CESA_TDMA_OP 2 | ||
278 | |||
279 | /** | ||
280 | * struct mv_cesa_tdma_desc - TDMA descriptor | ||
281 | * @byte_cnt: number of bytes to transfer | ||
282 | * @src: DMA address of the source | ||
283 | * @dst: DMA address of the destination | ||
284 | * @next_dma: DMA address of the next TDMA descriptor | ||
285 | * @cur_dma: DMA address of this TDMA descriptor | ||
286 | * @next: pointer to the next TDMA descriptor | ||
287 | * @op: CESA operation attached to this TDMA descriptor | ||
288 | * @data: raw data attached to this TDMA descriptor | ||
289 | * @flags: flags describing the TDMA transfer. See the | ||
290 | * "TDMA descriptor flags" section above | ||
291 | * | ||
292 | * TDMA descriptor used to create a transfer chain describing a crypto | ||
293 | * operation. | ||
294 | */ | ||
295 | struct mv_cesa_tdma_desc { | ||
296 | u32 byte_cnt; | ||
297 | u32 src; | ||
298 | u32 dst; | ||
299 | u32 next_dma; | ||
300 | u32 cur_dma; | ||
301 | struct mv_cesa_tdma_desc *next; | ||
302 | union { | ||
303 | struct mv_cesa_op_ctx *op; | ||
304 | void *data; | ||
305 | }; | ||
306 | u32 flags; | ||
307 | }; | ||
308 | |||
309 | /** | ||
310 | * struct mv_cesa_sg_dma_iter - scatter-gather iterator | ||
311 | * @dir: transfer direction | ||
312 | * @sg: scatter list | ||
313 | * @offset: current position in the scatter list | ||
314 | * @op_offset: current position in the crypto operation | ||
315 | * | ||
316 | * Iterator used to iterate over a scatterlist while creating a TDMA chain for | ||
317 | * a crypto operation. | ||
318 | */ | ||
319 | struct mv_cesa_sg_dma_iter { | ||
320 | enum dma_data_direction dir; | ||
321 | struct scatterlist *sg; | ||
322 | unsigned int offset; | ||
323 | unsigned int op_offset; | ||
324 | }; | ||
325 | |||
326 | /** | ||
327 | * struct mv_cesa_dma_iter - crypto operation iterator | ||
328 | * @len: the crypto operation length | ||
329 | * @offset: current position in the crypto operation | ||
330 | * @op_len: sub-operation length (the crypto engine can only act on 2kb | ||
331 | * chunks) | ||
332 | * | ||
333 | * Iterator used to create a TDMA chain for a given crypto operation. | ||
334 | */ | ||
335 | struct mv_cesa_dma_iter { | ||
336 | unsigned int len; | ||
337 | unsigned int offset; | ||
338 | unsigned int op_len; | ||
339 | }; | ||
340 | |||
341 | /** | ||
342 | * struct mv_cesa_tdma_chain - TDMA chain | ||
343 | * @first: first entry in the TDMA chain | ||
344 | * @last: last entry in the TDMA chain | ||
345 | * | ||
346 | * Stores a TDMA chain for a specific crypto operation. | ||
347 | */ | ||
348 | struct mv_cesa_tdma_chain { | ||
349 | struct mv_cesa_tdma_desc *first; | ||
350 | struct mv_cesa_tdma_desc *last; | ||
351 | }; | ||
352 | |||
353 | struct mv_cesa_engine; | ||
354 | |||
355 | /** | ||
356 | * struct mv_cesa_caps - CESA device capabilities | ||
357 | * @engines: number of engines | ||
358 | * @has_tdma: whether this device has a TDMA block | ||
359 | * @cipher_algs: supported cipher algorithms | ||
360 | * @ncipher_algs: number of supported cipher algorithms | ||
361 | * @ahash_algs: supported hash algorithms | ||
362 | * @nahash_algs: number of supported hash algorithms | ||
363 | * | ||
364 | * Structure used to describe CESA device capabilities. | ||
365 | */ | ||
366 | struct mv_cesa_caps { | ||
367 | int nengines; | ||
368 | bool has_tdma; | ||
369 | struct crypto_alg **cipher_algs; | ||
370 | int ncipher_algs; | ||
371 | struct ahash_alg **ahash_algs; | ||
372 | int nahash_algs; | ||
373 | }; | ||
374 | |||
375 | /** | ||
376 | * struct mv_cesa_dev_dma - DMA pools | ||
377 | * @tdma_desc_pool: TDMA desc pool | ||
378 | * @op_pool: crypto operation pool | ||
379 | * @cache_pool: data cache pool (used by hash implementation when the | ||
380 | * hash request is smaller than the hash block size) | ||
381 | * @padding_pool: padding pool (used by hash implementation when hardware | ||
382 | * padding cannot be used) | ||
383 | * | ||
384 | * Structure containing the different DMA pools used by this driver. | ||
385 | */ | ||
386 | struct mv_cesa_dev_dma { | ||
387 | struct dma_pool *tdma_desc_pool; | ||
388 | struct dma_pool *op_pool; | ||
389 | struct dma_pool *cache_pool; | ||
390 | struct dma_pool *padding_pool; | ||
391 | }; | ||
392 | |||
393 | /** | ||
394 | * struct mv_cesa_dev - CESA device | ||
395 | * @caps: device capabilities | ||
396 | * @regs: device registers | ||
397 | * @sram_size: usable SRAM size | ||
398 | * @lock: device lock | ||
399 | * @queue: crypto request queue | ||
400 | * @engines: array of engines | ||
401 | * @dma: dma pools | ||
402 | * | ||
403 | * Structure storing CESA device information. | ||
404 | */ | ||
405 | struct mv_cesa_dev { | ||
406 | const struct mv_cesa_caps *caps; | ||
407 | void __iomem *regs; | ||
408 | struct device *dev; | ||
409 | unsigned int sram_size; | ||
410 | spinlock_t lock; | ||
411 | struct crypto_queue queue; | ||
412 | struct mv_cesa_engine *engines; | ||
413 | struct mv_cesa_dev_dma *dma; | ||
414 | }; | ||
415 | |||
416 | /** | ||
417 | * struct mv_cesa_engine - CESA engine | ||
418 | * @id: engine id | ||
419 | * @regs: engine registers | ||
420 | * @sram: SRAM memory region | ||
421 | * @sram_dma: DMA address of the SRAM memory region | ||
422 | * @lock: engine lock | ||
423 | * @req: current crypto request | ||
424 | * @clk: engine clk | ||
425 | * @zclk: engine zclk | ||
426 | * @max_req_len: maximum chunk length (useful to create the TDMA chain) | ||
427 | * @int_mask: interrupt mask cache | ||
428 | * @pool: memory pool pointing to the memory region reserved in | ||
429 | * SRAM | ||
430 | * | ||
431 | * Structure storing CESA engine information. | ||
432 | */ | ||
433 | struct mv_cesa_engine { | ||
434 | int id; | ||
435 | void __iomem *regs; | ||
436 | void __iomem *sram; | ||
437 | dma_addr_t sram_dma; | ||
438 | spinlock_t lock; | ||
439 | struct crypto_async_request *req; | ||
440 | struct clk *clk; | ||
441 | struct clk *zclk; | ||
442 | size_t max_req_len; | ||
443 | u32 int_mask; | ||
444 | struct gen_pool *pool; | ||
445 | }; | ||
446 | |||
447 | /** | ||
448 | * struct mv_cesa_req_ops - CESA request operations | ||
449 | * @prepare: prepare a request to be executed on the specified engine | ||
450 | * @process: process a request chunk result (should return 0 if the | ||
451 | * operation, -EINPROGRESS if it needs more steps or an error | ||
452 | * code) | ||
453 | * @step: launch the crypto operation on the next chunk | ||
454 | * @cleanup: cleanup the crypto request (release associated data) | ||
455 | */ | ||
456 | struct mv_cesa_req_ops { | ||
457 | void (*prepare)(struct crypto_async_request *req, | ||
458 | struct mv_cesa_engine *engine); | ||
459 | int (*process)(struct crypto_async_request *req, u32 status); | ||
460 | void (*step)(struct crypto_async_request *req); | ||
461 | void (*cleanup)(struct crypto_async_request *req); | ||
462 | }; | ||
463 | |||
464 | /** | ||
465 | * struct mv_cesa_ctx - CESA operation context | ||
466 | * @ops: crypto operations | ||
467 | * | ||
468 | * Base context structure inherited by operation specific ones. | ||
469 | */ | ||
470 | struct mv_cesa_ctx { | ||
471 | const struct mv_cesa_req_ops *ops; | ||
472 | }; | ||
473 | |||
474 | /** | ||
475 | * struct mv_cesa_hash_ctx - CESA hash operation context | ||
476 | * @base: base context structure | ||
477 | * | ||
478 | * Hash context structure. | ||
479 | */ | ||
480 | struct mv_cesa_hash_ctx { | ||
481 | struct mv_cesa_ctx base; | ||
482 | }; | ||
483 | |||
484 | /** | ||
485 | * struct mv_cesa_hash_ctx - CESA hmac operation context | ||
486 | * @base: base context structure | ||
487 | * @iv: initialization vectors | ||
488 | * | ||
489 | * HMAC context structure. | ||
490 | */ | ||
491 | struct mv_cesa_hmac_ctx { | ||
492 | struct mv_cesa_ctx base; | ||
493 | u32 iv[16]; | ||
494 | }; | ||
495 | |||
496 | /** | ||
497 | * enum mv_cesa_req_type - request type definitions | ||
498 | * @CESA_STD_REQ: standard request | ||
499 | * @CESA_DMA_REQ: DMA request | ||
500 | */ | ||
501 | enum mv_cesa_req_type { | ||
502 | CESA_STD_REQ, | ||
503 | CESA_DMA_REQ, | ||
504 | }; | ||
505 | |||
506 | /** | ||
507 | * struct mv_cesa_req - CESA request | ||
508 | * @type: request type | ||
509 | * @engine: engine associated with this request | ||
510 | */ | ||
511 | struct mv_cesa_req { | ||
512 | enum mv_cesa_req_type type; | ||
513 | struct mv_cesa_engine *engine; | ||
514 | }; | ||
515 | |||
516 | /** | ||
517 | * struct mv_cesa_tdma_req - CESA TDMA request | ||
518 | * @base: base information | ||
519 | * @chain: TDMA chain | ||
520 | */ | ||
521 | struct mv_cesa_tdma_req { | ||
522 | struct mv_cesa_req base; | ||
523 | struct mv_cesa_tdma_chain chain; | ||
524 | }; | ||
525 | |||
526 | /** | ||
527 | * struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard | ||
528 | * requests | ||
529 | * @iter: sg mapping iterator | ||
530 | * @offset: current offset in the SG entry mapped in memory | ||
531 | */ | ||
532 | struct mv_cesa_sg_std_iter { | ||
533 | struct sg_mapping_iter iter; | ||
534 | unsigned int offset; | ||
535 | }; | ||
536 | |||
537 | /** | ||
538 | * struct mv_cesa_ablkcipher_std_req - cipher standard request | ||
539 | * @base: base information | ||
540 | * @op: operation context | ||
541 | * @offset: current operation offset | ||
542 | * @size: size of the crypto operation | ||
543 | */ | ||
544 | struct mv_cesa_ablkcipher_std_req { | ||
545 | struct mv_cesa_req base; | ||
546 | struct mv_cesa_op_ctx op; | ||
547 | unsigned int offset; | ||
548 | unsigned int size; | ||
549 | bool skip_ctx; | ||
550 | }; | ||
551 | |||
552 | /** | ||
553 | * struct mv_cesa_ablkcipher_req - cipher request | ||
554 | * @req: type specific request information | ||
555 | * @src_nents: number of entries in the src sg list | ||
556 | * @dst_nents: number of entries in the dest sg list | ||
557 | */ | ||
558 | struct mv_cesa_ablkcipher_req { | ||
559 | union { | ||
560 | struct mv_cesa_req base; | ||
561 | struct mv_cesa_tdma_req dma; | ||
562 | struct mv_cesa_ablkcipher_std_req std; | ||
563 | } req; | ||
564 | int src_nents; | ||
565 | int dst_nents; | ||
566 | }; | ||
567 | |||
568 | /** | ||
569 | * struct mv_cesa_ahash_std_req - standard hash request | ||
570 | * @base: base information | ||
571 | * @offset: current operation offset | ||
572 | */ | ||
573 | struct mv_cesa_ahash_std_req { | ||
574 | struct mv_cesa_req base; | ||
575 | unsigned int offset; | ||
576 | }; | ||
577 | |||
578 | /** | ||
579 | * struct mv_cesa_ahash_dma_req - DMA hash request | ||
580 | * @base: base information | ||
581 | * @padding: padding buffer | ||
582 | * @padding_dma: DMA address of the padding buffer | ||
583 | * @cache_dma: DMA address of the cache buffer | ||
584 | */ | ||
585 | struct mv_cesa_ahash_dma_req { | ||
586 | struct mv_cesa_tdma_req base; | ||
587 | u8 *padding; | ||
588 | dma_addr_t padding_dma; | ||
589 | dma_addr_t cache_dma; | ||
590 | }; | ||
591 | |||
592 | /** | ||
593 | * struct mv_cesa_ahash_req - hash request | ||
594 | * @req: type specific request information | ||
595 | * @cache: cache buffer | ||
596 | * @cache_ptr: write pointer in the cache buffer | ||
597 | * @len: hash total length | ||
598 | * @src_nents: number of entries in the scatterlist | ||
599 | * @last_req: define whether the current operation is the last one | ||
600 | * or not | ||
601 | * @state: hash state | ||
602 | */ | ||
603 | struct mv_cesa_ahash_req { | ||
604 | union { | ||
605 | struct mv_cesa_req base; | ||
606 | struct mv_cesa_ahash_dma_req dma; | ||
607 | struct mv_cesa_ahash_std_req std; | ||
608 | } req; | ||
609 | struct mv_cesa_op_ctx op_tmpl; | ||
610 | u8 *cache; | ||
611 | unsigned int cache_ptr; | ||
612 | u64 len; | ||
613 | int src_nents; | ||
614 | bool last_req; | ||
615 | __be32 state[8]; | ||
616 | }; | ||
617 | |||
618 | /* CESA functions */ | ||
619 | |||
620 | extern struct mv_cesa_dev *cesa_dev; | ||
621 | |||
622 | static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op, | ||
623 | u32 cfg, u32 mask) | ||
624 | { | ||
625 | op->desc.config &= cpu_to_le32(~mask); | ||
626 | op->desc.config |= cpu_to_le32(cfg); | ||
627 | } | ||
628 | |||
629 | static inline u32 mv_cesa_get_op_cfg(struct mv_cesa_op_ctx *op) | ||
630 | { | ||
631 | return le32_to_cpu(op->desc.config); | ||
632 | } | ||
633 | |||
634 | static inline void mv_cesa_set_op_cfg(struct mv_cesa_op_ctx *op, u32 cfg) | ||
635 | { | ||
636 | op->desc.config = cpu_to_le32(cfg); | ||
637 | } | ||
638 | |||
639 | static inline void mv_cesa_adjust_op(struct mv_cesa_engine *engine, | ||
640 | struct mv_cesa_op_ctx *op) | ||
641 | { | ||
642 | u32 offset = engine->sram_dma & CESA_SA_SRAM_MSK; | ||
643 | |||
644 | op->desc.enc_p = CESA_SA_DESC_CRYPT_DATA(offset); | ||
645 | op->desc.enc_key_p = CESA_SA_DESC_CRYPT_KEY(offset); | ||
646 | op->desc.enc_iv = CESA_SA_DESC_CRYPT_IV(offset); | ||
647 | op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_DATA_MSK; | ||
648 | op->desc.mac_src_p |= CESA_SA_DESC_MAC_DATA(offset); | ||
649 | op->desc.mac_digest &= ~CESA_SA_DESC_MAC_DIGEST_MSK; | ||
650 | op->desc.mac_digest |= CESA_SA_DESC_MAC_DIGEST(offset); | ||
651 | op->desc.mac_iv = CESA_SA_DESC_MAC_IV(offset); | ||
652 | } | ||
653 | |||
654 | static inline void mv_cesa_set_crypt_op_len(struct mv_cesa_op_ctx *op, int len) | ||
655 | { | ||
656 | op->desc.enc_len = cpu_to_le32(len); | ||
657 | } | ||
658 | |||
659 | static inline void mv_cesa_set_mac_op_total_len(struct mv_cesa_op_ctx *op, | ||
660 | int len) | ||
661 | { | ||
662 | op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_TOTAL_LEN_MSK; | ||
663 | op->desc.mac_src_p |= CESA_SA_DESC_MAC_TOTAL_LEN(len); | ||
664 | } | ||
665 | |||
666 | static inline void mv_cesa_set_mac_op_frag_len(struct mv_cesa_op_ctx *op, | ||
667 | int len) | ||
668 | { | ||
669 | op->desc.mac_digest &= ~CESA_SA_DESC_MAC_FRAG_LEN_MSK; | ||
670 | op->desc.mac_digest |= CESA_SA_DESC_MAC_FRAG_LEN(len); | ||
671 | } | ||
672 | |||
673 | static inline void mv_cesa_set_int_mask(struct mv_cesa_engine *engine, | ||
674 | u32 int_mask) | ||
675 | { | ||
676 | if (int_mask == engine->int_mask) | ||
677 | return; | ||
678 | |||
679 | writel(int_mask, engine->regs + CESA_SA_INT_MSK); | ||
680 | engine->int_mask = int_mask; | ||
681 | } | ||
682 | |||
683 | static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) | ||
684 | { | ||
685 | return engine->int_mask; | ||
686 | } | ||
687 | |||
688 | int mv_cesa_queue_req(struct crypto_async_request *req); | ||
689 | |||
690 | /* TDMA functions */ | ||
691 | |||
692 | static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter, | ||
693 | unsigned int len) | ||
694 | { | ||
695 | iter->len = len; | ||
696 | iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE); | ||
697 | iter->offset = 0; | ||
698 | } | ||
699 | |||
700 | static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter, | ||
701 | struct scatterlist *sg, | ||
702 | enum dma_data_direction dir) | ||
703 | { | ||
704 | iter->op_offset = 0; | ||
705 | iter->offset = 0; | ||
706 | iter->sg = sg; | ||
707 | iter->dir = dir; | ||
708 | } | ||
709 | |||
710 | static inline unsigned int | ||
711 | mv_cesa_req_dma_iter_transfer_len(struct mv_cesa_dma_iter *iter, | ||
712 | struct mv_cesa_sg_dma_iter *sgiter) | ||
713 | { | ||
714 | return min(iter->op_len - sgiter->op_offset, | ||
715 | sg_dma_len(sgiter->sg) - sgiter->offset); | ||
716 | } | ||
717 | |||
718 | bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *chain, | ||
719 | struct mv_cesa_sg_dma_iter *sgiter, | ||
720 | unsigned int len); | ||
721 | |||
722 | static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter) | ||
723 | { | ||
724 | iter->offset += iter->op_len; | ||
725 | iter->op_len = min(iter->len - iter->offset, | ||
726 | CESA_SA_SRAM_PAYLOAD_SIZE); | ||
727 | |||
728 | return iter->op_len; | ||
729 | } | ||
730 | |||
731 | void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq); | ||
732 | |||
733 | static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq, | ||
734 | u32 status) | ||
735 | { | ||
736 | if (!(status & CESA_SA_INT_ACC0_IDMA_DONE)) | ||
737 | return -EINPROGRESS; | ||
738 | |||
739 | if (status & CESA_SA_INT_IDMA_OWN_ERR) | ||
740 | return -EINVAL; | ||
741 | |||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, | ||
746 | struct mv_cesa_engine *engine); | ||
747 | |||
748 | void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq); | ||
749 | |||
750 | static inline void | ||
751 | mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain) | ||
752 | { | ||
753 | memset(chain, 0, sizeof(*chain)); | ||
754 | } | ||
755 | |||
756 | struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, | ||
757 | const struct mv_cesa_op_ctx *op_templ, | ||
758 | bool skip_ctx, | ||
759 | gfp_t flags); | ||
760 | |||
761 | int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, | ||
762 | dma_addr_t dst, dma_addr_t src, u32 size, | ||
763 | u32 flags, gfp_t gfp_flags); | ||
764 | |||
765 | int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, | ||
766 | u32 flags); | ||
767 | |||
768 | int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags); | ||
769 | |||
770 | int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, | ||
771 | struct mv_cesa_dma_iter *dma_iter, | ||
772 | struct mv_cesa_sg_dma_iter *sgiter, | ||
773 | gfp_t gfp_flags); | ||
774 | |||
775 | /* Algorithm definitions */ | ||
776 | |||
777 | extern struct ahash_alg mv_md5_alg; | ||
778 | extern struct ahash_alg mv_sha1_alg; | ||
779 | extern struct ahash_alg mv_sha256_alg; | ||
780 | extern struct ahash_alg mv_ahmac_md5_alg; | ||
781 | extern struct ahash_alg mv_ahmac_sha1_alg; | ||
782 | extern struct ahash_alg mv_ahmac_sha256_alg; | ||
783 | |||
784 | extern struct crypto_alg mv_cesa_ecb_des_alg; | ||
785 | extern struct crypto_alg mv_cesa_cbc_des_alg; | ||
786 | extern struct crypto_alg mv_cesa_ecb_des3_ede_alg; | ||
787 | extern struct crypto_alg mv_cesa_cbc_des3_ede_alg; | ||
788 | extern struct crypto_alg mv_cesa_ecb_aes_alg; | ||
789 | extern struct crypto_alg mv_cesa_cbc_aes_alg; | ||
790 | |||
791 | #endif /* __MARVELL_CESA_H__ */ | ||
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c new file mode 100644 index 000000000000..0745cf3b9c0e --- /dev/null +++ b/drivers/crypto/marvell/cipher.c | |||
@@ -0,0 +1,797 @@ | |||
1 | /* | ||
2 | * Cipher algorithms supported by the CESA: DES, 3DES and AES. | ||
3 | * | ||
4 | * Author: Boris Brezillon <boris.brezillon@free-electrons.com> | ||
5 | * Author: Arnaud Ebalard <arno@natisbad.org> | ||
6 | * | ||
7 | * This work is based on an initial version written by | ||
8 | * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published | ||
12 | * by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <crypto/aes.h> | ||
16 | #include <crypto/des.h> | ||
17 | |||
18 | #include "cesa.h" | ||
19 | |||
20 | struct mv_cesa_des_ctx { | ||
21 | struct mv_cesa_ctx base; | ||
22 | u8 key[DES_KEY_SIZE]; | ||
23 | }; | ||
24 | |||
25 | struct mv_cesa_des3_ctx { | ||
26 | struct mv_cesa_ctx base; | ||
27 | u8 key[DES3_EDE_KEY_SIZE]; | ||
28 | }; | ||
29 | |||
30 | struct mv_cesa_aes_ctx { | ||
31 | struct mv_cesa_ctx base; | ||
32 | struct crypto_aes_ctx aes; | ||
33 | }; | ||
34 | |||
35 | struct mv_cesa_ablkcipher_dma_iter { | ||
36 | struct mv_cesa_dma_iter base; | ||
37 | struct mv_cesa_sg_dma_iter src; | ||
38 | struct mv_cesa_sg_dma_iter dst; | ||
39 | }; | ||
40 | |||
41 | static inline void | ||
42 | mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter, | ||
43 | struct ablkcipher_request *req) | ||
44 | { | ||
45 | mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); | ||
46 | mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); | ||
47 | mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); | ||
48 | } | ||
49 | |||
50 | static inline bool | ||
51 | mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter) | ||
52 | { | ||
53 | iter->src.op_offset = 0; | ||
54 | iter->dst.op_offset = 0; | ||
55 | |||
56 | return mv_cesa_req_dma_iter_next_op(&iter->base); | ||
57 | } | ||
58 | |||
59 | static inline void | ||
60 | mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) | ||
61 | { | ||
62 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | ||
63 | |||
64 | if (req->dst != req->src) { | ||
65 | dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, | ||
66 | DMA_FROM_DEVICE); | ||
67 | dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, | ||
68 | DMA_TO_DEVICE); | ||
69 | } else { | ||
70 | dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, | ||
71 | DMA_BIDIRECTIONAL); | ||
72 | } | ||
73 | mv_cesa_dma_cleanup(&creq->req.dma); | ||
74 | } | ||
75 | |||
76 | static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req) | ||
77 | { | ||
78 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | ||
79 | |||
80 | if (creq->req.base.type == CESA_DMA_REQ) | ||
81 | mv_cesa_ablkcipher_dma_cleanup(req); | ||
82 | } | ||
83 | |||
84 | static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) | ||
85 | { | ||
86 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | ||
87 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; | ||
88 | struct mv_cesa_engine *engine = sreq->base.engine; | ||
89 | size_t len = min_t(size_t, req->nbytes - sreq->offset, | ||
90 | CESA_SA_SRAM_PAYLOAD_SIZE); | ||
91 | |||
92 | len = sg_pcopy_to_buffer(req->src, creq->src_nents, | ||
93 | engine->sram + CESA_SA_DATA_SRAM_OFFSET, | ||
94 | len, sreq->offset); | ||
95 | |||
96 | sreq->size = len; | ||
97 | mv_cesa_set_crypt_op_len(&sreq->op, len); | ||
98 | |||
99 | /* FIXME: only update enc_len field */ | ||
100 | if (!sreq->skip_ctx) { | ||
101 | memcpy(engine->sram, &sreq->op, sizeof(sreq->op)); | ||
102 | sreq->skip_ctx = true; | ||
103 | } else { | ||
104 | memcpy(engine->sram, &sreq->op, sizeof(sreq->op.desc)); | ||
105 | } | ||
106 | |||
107 | mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); | ||
108 | writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); | ||
109 | writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); | ||
110 | } | ||
111 | |||
112 | static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req, | ||
113 | u32 status) | ||
114 | { | ||
115 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | ||
116 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; | ||
117 | struct mv_cesa_engine *engine = sreq->base.engine; | ||
118 | size_t len; | ||
119 | |||
120 | len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, | ||
121 | engine->sram + CESA_SA_DATA_SRAM_OFFSET, | ||
122 | sreq->size, sreq->offset); | ||
123 | |||
124 | sreq->offset += len; | ||
125 | if (sreq->offset < req->nbytes) | ||
126 | return -EINPROGRESS; | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, | ||
132 | u32 status) | ||
133 | { | ||
134 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); | ||
135 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); | ||
136 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; | ||
137 | struct mv_cesa_engine *engine = sreq->base.engine; | ||
138 | int ret; | ||
139 | |||
140 | if (creq->req.base.type == CESA_DMA_REQ) | ||
141 | ret = mv_cesa_dma_process(&creq->req.dma, status); | ||
142 | else | ||
143 | ret = mv_cesa_ablkcipher_std_process(ablkreq, status); | ||
144 | |||
145 | if (ret) | ||
146 | return ret; | ||
147 | |||
148 | memcpy(ablkreq->info, engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, | ||
149 | crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq))); | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) | ||
155 | { | ||
156 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); | ||
157 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); | ||
158 | |||
159 | if (creq->req.base.type == CESA_DMA_REQ) | ||
160 | mv_cesa_dma_step(&creq->req.dma); | ||
161 | else | ||
162 | mv_cesa_ablkcipher_std_step(ablkreq); | ||
163 | } | ||
164 | |||
165 | static inline void | ||
166 | mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req) | ||
167 | { | ||
168 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | ||
169 | struct mv_cesa_tdma_req *dreq = &creq->req.dma; | ||
170 | |||
171 | mv_cesa_dma_prepare(dreq, dreq->base.engine); | ||
172 | } | ||
173 | |||
174 | static inline void | ||
175 | mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req) | ||
176 | { | ||
177 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | ||
178 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; | ||
179 | struct mv_cesa_engine *engine = sreq->base.engine; | ||
180 | |||
181 | sreq->size = 0; | ||
182 | sreq->offset = 0; | ||
183 | mv_cesa_adjust_op(engine, &sreq->op); | ||
184 | memcpy(engine->sram, &sreq->op, sizeof(sreq->op)); | ||
185 | } | ||
186 | |||
187 | static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, | ||
188 | struct mv_cesa_engine *engine) | ||
189 | { | ||
190 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); | ||
191 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); | ||
192 | |||
193 | creq->req.base.engine = engine; | ||
194 | |||
195 | if (creq->req.base.type == CESA_DMA_REQ) | ||
196 | mv_cesa_ablkcipher_dma_prepare(ablkreq); | ||
197 | else | ||
198 | mv_cesa_ablkcipher_std_prepare(ablkreq); | ||
199 | } | ||
200 | |||
201 | static inline void | ||
202 | mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) | ||
203 | { | ||
204 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); | ||
205 | |||
206 | mv_cesa_ablkcipher_cleanup(ablkreq); | ||
207 | } | ||
208 | |||
209 | static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { | ||
210 | .step = mv_cesa_ablkcipher_step, | ||
211 | .process = mv_cesa_ablkcipher_process, | ||
212 | .prepare = mv_cesa_ablkcipher_prepare, | ||
213 | .cleanup = mv_cesa_ablkcipher_req_cleanup, | ||
214 | }; | ||
215 | |||
216 | static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm) | ||
217 | { | ||
218 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
219 | |||
220 | ctx->base.ops = &mv_cesa_ablkcipher_req_ops; | ||
221 | |||
222 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
228 | unsigned int len) | ||
229 | { | ||
230 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
231 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
232 | int remaining; | ||
233 | int offset; | ||
234 | int ret; | ||
235 | int i; | ||
236 | |||
237 | ret = crypto_aes_expand_key(&ctx->aes, key, len); | ||
238 | if (ret) { | ||
239 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
240 | return ret; | ||
241 | } | ||
242 | |||
243 | remaining = (ctx->aes.key_length - 16) / 4; | ||
244 | offset = ctx->aes.key_length + 24 - remaining; | ||
245 | for (i = 0; i < remaining; i++) | ||
246 | ctx->aes.key_dec[4 + i] = | ||
247 | cpu_to_le32(ctx->aes.key_enc[offset + i]); | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
253 | unsigned int len) | ||
254 | { | ||
255 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
256 | struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); | ||
257 | u32 tmp[DES_EXPKEY_WORDS]; | ||
258 | int ret; | ||
259 | |||
260 | if (len != DES_KEY_SIZE) { | ||
261 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
262 | return -EINVAL; | ||
263 | } | ||
264 | |||
265 | ret = des_ekey(tmp, key); | ||
266 | if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | ||
267 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
268 | return -EINVAL; | ||
269 | } | ||
270 | |||
271 | memcpy(ctx->key, key, DES_KEY_SIZE); | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher, | ||
277 | const u8 *key, unsigned int len) | ||
278 | { | ||
279 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
280 | struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); | ||
281 | |||
282 | if (len != DES3_EDE_KEY_SIZE) { | ||
283 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
284 | return -EINVAL; | ||
285 | } | ||
286 | |||
287 | memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); | ||
288 | |||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, | ||
293 | const struct mv_cesa_op_ctx *op_templ) | ||
294 | { | ||
295 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | ||
296 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
297 | GFP_KERNEL : GFP_ATOMIC; | ||
298 | struct mv_cesa_tdma_req *dreq = &creq->req.dma; | ||
299 | struct mv_cesa_ablkcipher_dma_iter iter; | ||
300 | struct mv_cesa_tdma_chain chain; | ||
301 | bool skip_ctx = false; | ||
302 | int ret; | ||
303 | |||
304 | dreq->base.type = CESA_DMA_REQ; | ||
305 | dreq->chain.first = NULL; | ||
306 | dreq->chain.last = NULL; | ||
307 | |||
308 | if (req->src != req->dst) { | ||
309 | ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, | ||
310 | DMA_TO_DEVICE); | ||
311 | if (!ret) | ||
312 | return -ENOMEM; | ||
313 | |||
314 | ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents, | ||
315 | DMA_FROM_DEVICE); | ||
316 | if (!ret) { | ||
317 | ret = -ENOMEM; | ||
318 | goto err_unmap_src; | ||
319 | } | ||
320 | } else { | ||
321 | ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, | ||
322 | DMA_BIDIRECTIONAL); | ||
323 | if (!ret) | ||
324 | return -ENOMEM; | ||
325 | } | ||
326 | |||
327 | mv_cesa_tdma_desc_iter_init(&chain); | ||
328 | mv_cesa_ablkcipher_req_iter_init(&iter, req); | ||
329 | |||
330 | do { | ||
331 | struct mv_cesa_op_ctx *op; | ||
332 | |||
333 | op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags); | ||
334 | if (IS_ERR(op)) { | ||
335 | ret = PTR_ERR(op); | ||
336 | goto err_free_tdma; | ||
337 | } | ||
338 | skip_ctx = true; | ||
339 | |||
340 | mv_cesa_set_crypt_op_len(op, iter.base.op_len); | ||
341 | |||
342 | /* Add input transfers */ | ||
343 | ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, | ||
344 | &iter.src, flags); | ||
345 | if (ret) | ||
346 | goto err_free_tdma; | ||
347 | |||
348 | /* Add dummy desc to launch the crypto operation */ | ||
349 | ret = mv_cesa_dma_add_dummy_launch(&chain, flags); | ||
350 | if (ret) | ||
351 | goto err_free_tdma; | ||
352 | |||
353 | /* Add output transfers */ | ||
354 | ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, | ||
355 | &iter.dst, flags); | ||
356 | if (ret) | ||
357 | goto err_free_tdma; | ||
358 | |||
359 | } while (mv_cesa_ablkcipher_req_iter_next_op(&iter)); | ||
360 | |||
361 | dreq->chain = chain; | ||
362 | |||
363 | return 0; | ||
364 | |||
365 | err_free_tdma: | ||
366 | mv_cesa_dma_cleanup(dreq); | ||
367 | if (req->dst != req->src) | ||
368 | dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, | ||
369 | DMA_FROM_DEVICE); | ||
370 | |||
371 | err_unmap_src: | ||
372 | dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, | ||
373 | req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); | ||
374 | |||
375 | return ret; | ||
376 | } | ||
377 | |||
378 | static inline int | ||
379 | mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, | ||
380 | const struct mv_cesa_op_ctx *op_templ) | ||
381 | { | ||
382 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | ||
383 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; | ||
384 | |||
385 | sreq->base.type = CESA_STD_REQ; | ||
386 | sreq->op = *op_templ; | ||
387 | sreq->skip_ctx = false; | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, | ||
393 | struct mv_cesa_op_ctx *tmpl) | ||
394 | { | ||
395 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); | ||
396 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
397 | unsigned int blksize = crypto_ablkcipher_blocksize(tfm); | ||
398 | int ret; | ||
399 | |||
400 | if (!IS_ALIGNED(req->nbytes, blksize)) | ||
401 | return -EINVAL; | ||
402 | |||
403 | creq->src_nents = sg_nents_for_len(req->src, req->nbytes); | ||
404 | creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); | ||
405 | |||
406 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, | ||
407 | CESA_SA_DESC_CFG_OP_MSK); | ||
408 | |||
409 | /* TODO: add a threshold for DMA usage */ | ||
410 | if (cesa_dev->caps->has_tdma) | ||
411 | ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl); | ||
412 | else | ||
413 | ret = mv_cesa_ablkcipher_std_req_init(req, tmpl); | ||
414 | |||
415 | return ret; | ||
416 | } | ||
417 | |||
418 | static int mv_cesa_des_op(struct ablkcipher_request *req, | ||
419 | struct mv_cesa_op_ctx *tmpl) | ||
420 | { | ||
421 | struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
422 | int ret; | ||
423 | |||
424 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, | ||
425 | CESA_SA_DESC_CFG_CRYPTM_MSK); | ||
426 | |||
427 | memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); | ||
428 | |||
429 | ret = mv_cesa_ablkcipher_req_init(req, tmpl); | ||
430 | if (ret) | ||
431 | return ret; | ||
432 | |||
433 | ret = mv_cesa_queue_req(&req->base); | ||
434 | if (ret && ret != -EINPROGRESS) | ||
435 | mv_cesa_ablkcipher_cleanup(req); | ||
436 | |||
437 | return ret; | ||
438 | } | ||
439 | |||
440 | static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) | ||
441 | { | ||
442 | struct mv_cesa_op_ctx tmpl; | ||
443 | |||
444 | mv_cesa_set_op_cfg(&tmpl, | ||
445 | CESA_SA_DESC_CFG_CRYPTCM_ECB | | ||
446 | CESA_SA_DESC_CFG_DIR_ENC); | ||
447 | |||
448 | return mv_cesa_des_op(req, &tmpl); | ||
449 | } | ||
450 | |||
451 | static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req) | ||
452 | { | ||
453 | struct mv_cesa_op_ctx tmpl; | ||
454 | |||
455 | mv_cesa_set_op_cfg(&tmpl, | ||
456 | CESA_SA_DESC_CFG_CRYPTCM_ECB | | ||
457 | CESA_SA_DESC_CFG_DIR_DEC); | ||
458 | |||
459 | return mv_cesa_des_op(req, &tmpl); | ||
460 | } | ||
461 | |||
462 | struct crypto_alg mv_cesa_ecb_des_alg = { | ||
463 | .cra_name = "ecb(des)", | ||
464 | .cra_driver_name = "mv-ecb-des", | ||
465 | .cra_priority = 300, | ||
466 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
467 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | ||
468 | .cra_blocksize = DES_BLOCK_SIZE, | ||
469 | .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), | ||
470 | .cra_alignmask = 0, | ||
471 | .cra_type = &crypto_ablkcipher_type, | ||
472 | .cra_module = THIS_MODULE, | ||
473 | .cra_init = mv_cesa_ablkcipher_cra_init, | ||
474 | .cra_u = { | ||
475 | .ablkcipher = { | ||
476 | .min_keysize = DES_KEY_SIZE, | ||
477 | .max_keysize = DES_KEY_SIZE, | ||
478 | .setkey = mv_cesa_des_setkey, | ||
479 | .encrypt = mv_cesa_ecb_des_encrypt, | ||
480 | .decrypt = mv_cesa_ecb_des_decrypt, | ||
481 | }, | ||
482 | }, | ||
483 | }; | ||
484 | |||
485 | static int mv_cesa_cbc_des_op(struct ablkcipher_request *req, | ||
486 | struct mv_cesa_op_ctx *tmpl) | ||
487 | { | ||
488 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, | ||
489 | CESA_SA_DESC_CFG_CRYPTCM_MSK); | ||
490 | |||
491 | memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE); | ||
492 | |||
493 | return mv_cesa_des_op(req, tmpl); | ||
494 | } | ||
495 | |||
496 | static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req) | ||
497 | { | ||
498 | struct mv_cesa_op_ctx tmpl; | ||
499 | |||
500 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); | ||
501 | |||
502 | return mv_cesa_cbc_des_op(req, &tmpl); | ||
503 | } | ||
504 | |||
505 | static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req) | ||
506 | { | ||
507 | struct mv_cesa_op_ctx tmpl; | ||
508 | |||
509 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); | ||
510 | |||
511 | return mv_cesa_cbc_des_op(req, &tmpl); | ||
512 | } | ||
513 | |||
514 | struct crypto_alg mv_cesa_cbc_des_alg = { | ||
515 | .cra_name = "cbc(des)", | ||
516 | .cra_driver_name = "mv-cbc-des", | ||
517 | .cra_priority = 300, | ||
518 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
519 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | ||
520 | .cra_blocksize = DES_BLOCK_SIZE, | ||
521 | .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), | ||
522 | .cra_alignmask = 0, | ||
523 | .cra_type = &crypto_ablkcipher_type, | ||
524 | .cra_module = THIS_MODULE, | ||
525 | .cra_init = mv_cesa_ablkcipher_cra_init, | ||
526 | .cra_u = { | ||
527 | .ablkcipher = { | ||
528 | .min_keysize = DES_KEY_SIZE, | ||
529 | .max_keysize = DES_KEY_SIZE, | ||
530 | .ivsize = DES_BLOCK_SIZE, | ||
531 | .setkey = mv_cesa_des_setkey, | ||
532 | .encrypt = mv_cesa_cbc_des_encrypt, | ||
533 | .decrypt = mv_cesa_cbc_des_decrypt, | ||
534 | }, | ||
535 | }, | ||
536 | }; | ||
537 | |||
538 | static int mv_cesa_des3_op(struct ablkcipher_request *req, | ||
539 | struct mv_cesa_op_ctx *tmpl) | ||
540 | { | ||
541 | struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
542 | int ret; | ||
543 | |||
544 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES, | ||
545 | CESA_SA_DESC_CFG_CRYPTM_MSK); | ||
546 | |||
547 | memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); | ||
548 | |||
549 | ret = mv_cesa_ablkcipher_req_init(req, tmpl); | ||
550 | if (ret) | ||
551 | return ret; | ||
552 | |||
553 | ret = mv_cesa_queue_req(&req->base); | ||
554 | if (ret && ret != -EINPROGRESS) | ||
555 | mv_cesa_ablkcipher_cleanup(req); | ||
556 | |||
557 | return ret; | ||
558 | } | ||
559 | |||
560 | static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) | ||
561 | { | ||
562 | struct mv_cesa_op_ctx tmpl; | ||
563 | |||
564 | mv_cesa_set_op_cfg(&tmpl, | ||
565 | CESA_SA_DESC_CFG_CRYPTCM_ECB | | ||
566 | CESA_SA_DESC_CFG_3DES_EDE | | ||
567 | CESA_SA_DESC_CFG_DIR_ENC); | ||
568 | |||
569 | return mv_cesa_des3_op(req, &tmpl); | ||
570 | } | ||
571 | |||
572 | static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req) | ||
573 | { | ||
574 | struct mv_cesa_op_ctx tmpl; | ||
575 | |||
576 | mv_cesa_set_op_cfg(&tmpl, | ||
577 | CESA_SA_DESC_CFG_CRYPTCM_ECB | | ||
578 | CESA_SA_DESC_CFG_3DES_EDE | | ||
579 | CESA_SA_DESC_CFG_DIR_DEC); | ||
580 | |||
581 | return mv_cesa_des3_op(req, &tmpl); | ||
582 | } | ||
583 | |||
584 | struct crypto_alg mv_cesa_ecb_des3_ede_alg = { | ||
585 | .cra_name = "ecb(des3_ede)", | ||
586 | .cra_driver_name = "mv-ecb-des3-ede", | ||
587 | .cra_priority = 300, | ||
588 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
589 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | ||
590 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
591 | .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), | ||
592 | .cra_alignmask = 0, | ||
593 | .cra_type = &crypto_ablkcipher_type, | ||
594 | .cra_module = THIS_MODULE, | ||
595 | .cra_init = mv_cesa_ablkcipher_cra_init, | ||
596 | .cra_u = { | ||
597 | .ablkcipher = { | ||
598 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
599 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
600 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
601 | .setkey = mv_cesa_des3_ede_setkey, | ||
602 | .encrypt = mv_cesa_ecb_des3_ede_encrypt, | ||
603 | .decrypt = mv_cesa_ecb_des3_ede_decrypt, | ||
604 | }, | ||
605 | }, | ||
606 | }; | ||
607 | |||
608 | static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req, | ||
609 | struct mv_cesa_op_ctx *tmpl) | ||
610 | { | ||
611 | memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE); | ||
612 | |||
613 | return mv_cesa_des3_op(req, tmpl); | ||
614 | } | ||
615 | |||
616 | static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req) | ||
617 | { | ||
618 | struct mv_cesa_op_ctx tmpl; | ||
619 | |||
620 | mv_cesa_set_op_cfg(&tmpl, | ||
621 | CESA_SA_DESC_CFG_CRYPTCM_CBC | | ||
622 | CESA_SA_DESC_CFG_3DES_EDE | | ||
623 | CESA_SA_DESC_CFG_DIR_ENC); | ||
624 | |||
625 | return mv_cesa_cbc_des3_op(req, &tmpl); | ||
626 | } | ||
627 | |||
628 | static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req) | ||
629 | { | ||
630 | struct mv_cesa_op_ctx tmpl; | ||
631 | |||
632 | mv_cesa_set_op_cfg(&tmpl, | ||
633 | CESA_SA_DESC_CFG_CRYPTCM_CBC | | ||
634 | CESA_SA_DESC_CFG_3DES_EDE | | ||
635 | CESA_SA_DESC_CFG_DIR_DEC); | ||
636 | |||
637 | return mv_cesa_cbc_des3_op(req, &tmpl); | ||
638 | } | ||
639 | |||
640 | struct crypto_alg mv_cesa_cbc_des3_ede_alg = { | ||
641 | .cra_name = "cbc(des3_ede)", | ||
642 | .cra_driver_name = "mv-cbc-des3-ede", | ||
643 | .cra_priority = 300, | ||
644 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
645 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | ||
646 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
647 | .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), | ||
648 | .cra_alignmask = 0, | ||
649 | .cra_type = &crypto_ablkcipher_type, | ||
650 | .cra_module = THIS_MODULE, | ||
651 | .cra_init = mv_cesa_ablkcipher_cra_init, | ||
652 | .cra_u = { | ||
653 | .ablkcipher = { | ||
654 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
655 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
656 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
657 | .setkey = mv_cesa_des3_ede_setkey, | ||
658 | .encrypt = mv_cesa_cbc_des3_ede_encrypt, | ||
659 | .decrypt = mv_cesa_cbc_des3_ede_decrypt, | ||
660 | }, | ||
661 | }, | ||
662 | }; | ||
663 | |||
664 | static int mv_cesa_aes_op(struct ablkcipher_request *req, | ||
665 | struct mv_cesa_op_ctx *tmpl) | ||
666 | { | ||
667 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
668 | int ret, i; | ||
669 | u32 *key; | ||
670 | u32 cfg; | ||
671 | |||
672 | cfg = CESA_SA_DESC_CFG_CRYPTM_AES; | ||
673 | |||
674 | if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC) | ||
675 | key = ctx->aes.key_dec; | ||
676 | else | ||
677 | key = ctx->aes.key_enc; | ||
678 | |||
679 | for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++) | ||
680 | tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]); | ||
681 | |||
682 | if (ctx->aes.key_length == 24) | ||
683 | cfg |= CESA_SA_DESC_CFG_AES_LEN_192; | ||
684 | else if (ctx->aes.key_length == 32) | ||
685 | cfg |= CESA_SA_DESC_CFG_AES_LEN_256; | ||
686 | |||
687 | mv_cesa_update_op_cfg(tmpl, cfg, | ||
688 | CESA_SA_DESC_CFG_CRYPTM_MSK | | ||
689 | CESA_SA_DESC_CFG_AES_LEN_MSK); | ||
690 | |||
691 | ret = mv_cesa_ablkcipher_req_init(req, tmpl); | ||
692 | if (ret) | ||
693 | return ret; | ||
694 | |||
695 | ret = mv_cesa_queue_req(&req->base); | ||
696 | if (ret && ret != -EINPROGRESS) | ||
697 | mv_cesa_ablkcipher_cleanup(req); | ||
698 | |||
699 | return ret; | ||
700 | } | ||
701 | |||
702 | static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) | ||
703 | { | ||
704 | struct mv_cesa_op_ctx tmpl; | ||
705 | |||
706 | mv_cesa_set_op_cfg(&tmpl, | ||
707 | CESA_SA_DESC_CFG_CRYPTCM_ECB | | ||
708 | CESA_SA_DESC_CFG_DIR_ENC); | ||
709 | |||
710 | return mv_cesa_aes_op(req, &tmpl); | ||
711 | } | ||
712 | |||
713 | static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req) | ||
714 | { | ||
715 | struct mv_cesa_op_ctx tmpl; | ||
716 | |||
717 | mv_cesa_set_op_cfg(&tmpl, | ||
718 | CESA_SA_DESC_CFG_CRYPTCM_ECB | | ||
719 | CESA_SA_DESC_CFG_DIR_DEC); | ||
720 | |||
721 | return mv_cesa_aes_op(req, &tmpl); | ||
722 | } | ||
723 | |||
724 | struct crypto_alg mv_cesa_ecb_aes_alg = { | ||
725 | .cra_name = "ecb(aes)", | ||
726 | .cra_driver_name = "mv-ecb-aes", | ||
727 | .cra_priority = 300, | ||
728 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
729 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | ||
730 | .cra_blocksize = AES_BLOCK_SIZE, | ||
731 | .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), | ||
732 | .cra_alignmask = 0, | ||
733 | .cra_type = &crypto_ablkcipher_type, | ||
734 | .cra_module = THIS_MODULE, | ||
735 | .cra_init = mv_cesa_ablkcipher_cra_init, | ||
736 | .cra_u = { | ||
737 | .ablkcipher = { | ||
738 | .min_keysize = AES_MIN_KEY_SIZE, | ||
739 | .max_keysize = AES_MAX_KEY_SIZE, | ||
740 | .setkey = mv_cesa_aes_setkey, | ||
741 | .encrypt = mv_cesa_ecb_aes_encrypt, | ||
742 | .decrypt = mv_cesa_ecb_aes_decrypt, | ||
743 | }, | ||
744 | }, | ||
745 | }; | ||
746 | |||
747 | static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req, | ||
748 | struct mv_cesa_op_ctx *tmpl) | ||
749 | { | ||
750 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, | ||
751 | CESA_SA_DESC_CFG_CRYPTCM_MSK); | ||
752 | memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE); | ||
753 | |||
754 | return mv_cesa_aes_op(req, tmpl); | ||
755 | } | ||
756 | |||
757 | static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req) | ||
758 | { | ||
759 | struct mv_cesa_op_ctx tmpl; | ||
760 | |||
761 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); | ||
762 | |||
763 | return mv_cesa_cbc_aes_op(req, &tmpl); | ||
764 | } | ||
765 | |||
766 | static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req) | ||
767 | { | ||
768 | struct mv_cesa_op_ctx tmpl; | ||
769 | |||
770 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); | ||
771 | |||
772 | return mv_cesa_cbc_aes_op(req, &tmpl); | ||
773 | } | ||
774 | |||
775 | struct crypto_alg mv_cesa_cbc_aes_alg = { | ||
776 | .cra_name = "cbc(aes)", | ||
777 | .cra_driver_name = "mv-cbc-aes", | ||
778 | .cra_priority = 300, | ||
779 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
780 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, | ||
781 | .cra_blocksize = AES_BLOCK_SIZE, | ||
782 | .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), | ||
783 | .cra_alignmask = 0, | ||
784 | .cra_type = &crypto_ablkcipher_type, | ||
785 | .cra_module = THIS_MODULE, | ||
786 | .cra_init = mv_cesa_ablkcipher_cra_init, | ||
787 | .cra_u = { | ||
788 | .ablkcipher = { | ||
789 | .min_keysize = AES_MIN_KEY_SIZE, | ||
790 | .max_keysize = AES_MAX_KEY_SIZE, | ||
791 | .ivsize = AES_BLOCK_SIZE, | ||
792 | .setkey = mv_cesa_aes_setkey, | ||
793 | .encrypt = mv_cesa_cbc_aes_encrypt, | ||
794 | .decrypt = mv_cesa_cbc_aes_decrypt, | ||
795 | }, | ||
796 | }, | ||
797 | }; | ||
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c new file mode 100644 index 000000000000..ae9272eb9c1a --- /dev/null +++ b/drivers/crypto/marvell/hash.c | |||
@@ -0,0 +1,1441 @@ | |||
1 | /* | ||
2 | * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. | ||
3 | * | ||
4 | * Author: Boris Brezillon <boris.brezillon@free-electrons.com> | ||
5 | * Author: Arnaud Ebalard <arno@natisbad.org> | ||
6 | * | ||
7 | * This work is based on an initial version written by | ||
8 | * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published | ||
12 | * by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <crypto/md5.h> | ||
16 | #include <crypto/sha.h> | ||
17 | |||
18 | #include "cesa.h" | ||
19 | |||
20 | struct mv_cesa_ahash_dma_iter { | ||
21 | struct mv_cesa_dma_iter base; | ||
22 | struct mv_cesa_sg_dma_iter src; | ||
23 | }; | ||
24 | |||
25 | static inline void | ||
26 | mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, | ||
27 | struct ahash_request *req) | ||
28 | { | ||
29 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
30 | unsigned int len = req->nbytes; | ||
31 | |||
32 | if (!creq->last_req) | ||
33 | len = (len + creq->cache_ptr) & ~CESA_HASH_BLOCK_SIZE_MSK; | ||
34 | |||
35 | mv_cesa_req_dma_iter_init(&iter->base, len); | ||
36 | mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); | ||
37 | iter->src.op_offset = creq->cache_ptr; | ||
38 | } | ||
39 | |||
40 | static inline bool | ||
41 | mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) | ||
42 | { | ||
43 | iter->src.op_offset = 0; | ||
44 | |||
45 | return mv_cesa_req_dma_iter_next_op(&iter->base); | ||
46 | } | ||
47 | |||
48 | static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq, | ||
49 | gfp_t flags) | ||
50 | { | ||
51 | struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma; | ||
52 | |||
53 | creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, | ||
54 | &dreq->cache_dma); | ||
55 | if (!creq->cache) | ||
56 | return -ENOMEM; | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq, | ||
62 | gfp_t flags) | ||
63 | { | ||
64 | creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags); | ||
65 | if (!creq->cache) | ||
66 | return -ENOMEM; | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static int mv_cesa_ahash_alloc_cache(struct ahash_request *req) | ||
72 | { | ||
73 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
74 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
75 | GFP_KERNEL : GFP_ATOMIC; | ||
76 | int ret; | ||
77 | |||
78 | if (creq->cache) | ||
79 | return 0; | ||
80 | |||
81 | if (creq->req.base.type == CESA_DMA_REQ) | ||
82 | ret = mv_cesa_ahash_dma_alloc_cache(creq, flags); | ||
83 | else | ||
84 | ret = mv_cesa_ahash_std_alloc_cache(creq, flags); | ||
85 | |||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq) | ||
90 | { | ||
91 | dma_pool_free(cesa_dev->dma->cache_pool, creq->cache, | ||
92 | creq->req.dma.cache_dma); | ||
93 | } | ||
94 | |||
95 | static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq) | ||
96 | { | ||
97 | kfree(creq->cache); | ||
98 | } | ||
99 | |||
100 | static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq) | ||
101 | { | ||
102 | if (!creq->cache) | ||
103 | return; | ||
104 | |||
105 | if (creq->req.base.type == CESA_DMA_REQ) | ||
106 | mv_cesa_ahash_dma_free_cache(creq); | ||
107 | else | ||
108 | mv_cesa_ahash_std_free_cache(creq); | ||
109 | |||
110 | creq->cache = NULL; | ||
111 | } | ||
112 | |||
113 | static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, | ||
114 | gfp_t flags) | ||
115 | { | ||
116 | if (req->padding) | ||
117 | return 0; | ||
118 | |||
119 | req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, | ||
120 | &req->padding_dma); | ||
121 | if (!req->padding) | ||
122 | return -ENOMEM; | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) | ||
128 | { | ||
129 | if (!req->padding) | ||
130 | return; | ||
131 | |||
132 | dma_pool_free(cesa_dev->dma->padding_pool, req->padding, | ||
133 | req->padding_dma); | ||
134 | req->padding = NULL; | ||
135 | } | ||
136 | |||
137 | static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) | ||
138 | { | ||
139 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
140 | |||
141 | mv_cesa_ahash_dma_free_padding(&creq->req.dma); | ||
142 | } | ||
143 | |||
144 | static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) | ||
145 | { | ||
146 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
147 | |||
148 | dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); | ||
149 | mv_cesa_dma_cleanup(&creq->req.dma.base); | ||
150 | } | ||
151 | |||
152 | static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) | ||
153 | { | ||
154 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
155 | |||
156 | if (creq->req.base.type == CESA_DMA_REQ) | ||
157 | mv_cesa_ahash_dma_cleanup(req); | ||
158 | } | ||
159 | |||
160 | static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) | ||
161 | { | ||
162 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
163 | |||
164 | mv_cesa_ahash_free_cache(creq); | ||
165 | |||
166 | if (creq->req.base.type == CESA_DMA_REQ) | ||
167 | mv_cesa_ahash_dma_last_cleanup(req); | ||
168 | } | ||
169 | |||
170 | static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) | ||
171 | { | ||
172 | unsigned int index, padlen; | ||
173 | |||
174 | index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; | ||
175 | padlen = (index < 56) ? (56 - index) : (64 + 56 - index); | ||
176 | |||
177 | return padlen; | ||
178 | } | ||
179 | |||
180 | static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) | ||
181 | { | ||
182 | __be64 bits = cpu_to_be64(creq->len << 3); | ||
183 | unsigned int index, padlen; | ||
184 | |||
185 | buf[0] = 0x80; | ||
186 | /* Pad out to 56 mod 64 */ | ||
187 | index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; | ||
188 | padlen = mv_cesa_ahash_pad_len(creq); | ||
189 | memset(buf + 1, 0, padlen - 1); | ||
190 | memcpy(buf + padlen, &bits, sizeof(bits)); | ||
191 | |||
192 | return padlen + 8; | ||
193 | } | ||
194 | |||
195 | static void mv_cesa_ahash_std_step(struct ahash_request *req) | ||
196 | { | ||
197 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
198 | struct mv_cesa_ahash_std_req *sreq = &creq->req.std; | ||
199 | struct mv_cesa_engine *engine = sreq->base.engine; | ||
200 | struct mv_cesa_op_ctx *op; | ||
201 | unsigned int new_cache_ptr = 0; | ||
202 | u32 frag_mode; | ||
203 | size_t len; | ||
204 | |||
205 | if (creq->cache_ptr) | ||
206 | memcpy(engine->sram + CESA_SA_DATA_SRAM_OFFSET, creq->cache, | ||
207 | creq->cache_ptr); | ||
208 | |||
209 | len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, | ||
210 | CESA_SA_SRAM_PAYLOAD_SIZE); | ||
211 | |||
212 | if (!creq->last_req) { | ||
213 | new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; | ||
214 | len &= ~CESA_HASH_BLOCK_SIZE_MSK; | ||
215 | } | ||
216 | |||
217 | if (len - creq->cache_ptr) | ||
218 | sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, | ||
219 | engine->sram + | ||
220 | CESA_SA_DATA_SRAM_OFFSET + | ||
221 | creq->cache_ptr, | ||
222 | len - creq->cache_ptr, | ||
223 | sreq->offset); | ||
224 | |||
225 | op = &creq->op_tmpl; | ||
226 | |||
227 | frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; | ||
228 | |||
229 | if (creq->last_req && sreq->offset == req->nbytes && | ||
230 | creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { | ||
231 | if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) | ||
232 | frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; | ||
233 | else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) | ||
234 | frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; | ||
235 | } | ||
236 | |||
237 | if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || | ||
238 | frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { | ||
239 | if (len && | ||
240 | creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { | ||
241 | mv_cesa_set_mac_op_total_len(op, creq->len); | ||
242 | } else { | ||
243 | int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; | ||
244 | |||
245 | if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { | ||
246 | len &= CESA_HASH_BLOCK_SIZE_MSK; | ||
247 | new_cache_ptr = 64 - trailerlen; | ||
248 | memcpy(creq->cache, | ||
249 | engine->sram + | ||
250 | CESA_SA_DATA_SRAM_OFFSET + len, | ||
251 | new_cache_ptr); | ||
252 | } else { | ||
253 | len += mv_cesa_ahash_pad_req(creq, | ||
254 | engine->sram + len + | ||
255 | CESA_SA_DATA_SRAM_OFFSET); | ||
256 | } | ||
257 | |||
258 | if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) | ||
259 | frag_mode = CESA_SA_DESC_CFG_MID_FRAG; | ||
260 | else | ||
261 | frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | mv_cesa_set_mac_op_frag_len(op, len); | ||
266 | mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); | ||
267 | |||
268 | /* FIXME: only update enc_len field */ | ||
269 | memcpy(engine->sram, op, sizeof(*op)); | ||
270 | |||
271 | if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) | ||
272 | mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, | ||
273 | CESA_SA_DESC_CFG_FRAG_MSK); | ||
274 | |||
275 | creq->cache_ptr = new_cache_ptr; | ||
276 | |||
277 | mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); | ||
278 | writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); | ||
279 | writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); | ||
280 | } | ||
281 | |||
282 | static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) | ||
283 | { | ||
284 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
285 | struct mv_cesa_ahash_std_req *sreq = &creq->req.std; | ||
286 | |||
287 | if (sreq->offset < (req->nbytes - creq->cache_ptr)) | ||
288 | return -EINPROGRESS; | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) | ||
294 | { | ||
295 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
296 | struct mv_cesa_tdma_req *dreq = &creq->req.dma.base; | ||
297 | |||
298 | mv_cesa_dma_prepare(dreq, dreq->base.engine); | ||
299 | } | ||
300 | |||
301 | static void mv_cesa_ahash_std_prepare(struct ahash_request *req) | ||
302 | { | ||
303 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
304 | struct mv_cesa_ahash_std_req *sreq = &creq->req.std; | ||
305 | struct mv_cesa_engine *engine = sreq->base.engine; | ||
306 | |||
307 | sreq->offset = 0; | ||
308 | mv_cesa_adjust_op(engine, &creq->op_tmpl); | ||
309 | memcpy(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); | ||
310 | } | ||
311 | |||
312 | static void mv_cesa_ahash_step(struct crypto_async_request *req) | ||
313 | { | ||
314 | struct ahash_request *ahashreq = ahash_request_cast(req); | ||
315 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); | ||
316 | |||
317 | if (creq->req.base.type == CESA_DMA_REQ) | ||
318 | mv_cesa_dma_step(&creq->req.dma.base); | ||
319 | else | ||
320 | mv_cesa_ahash_std_step(ahashreq); | ||
321 | } | ||
322 | |||
323 | static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) | ||
324 | { | ||
325 | struct ahash_request *ahashreq = ahash_request_cast(req); | ||
326 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); | ||
327 | struct mv_cesa_engine *engine = creq->req.base.engine; | ||
328 | unsigned int digsize; | ||
329 | int ret, i; | ||
330 | |||
331 | if (creq->req.base.type == CESA_DMA_REQ) | ||
332 | ret = mv_cesa_dma_process(&creq->req.dma.base, status); | ||
333 | else | ||
334 | ret = mv_cesa_ahash_std_process(ahashreq, status); | ||
335 | |||
336 | if (ret == -EINPROGRESS) | ||
337 | return ret; | ||
338 | |||
339 | digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); | ||
340 | for (i = 0; i < digsize / 4; i++) | ||
341 | creq->state[i] = readl(engine->regs + CESA_IVDIG(i)); | ||
342 | |||
343 | if (creq->cache_ptr) | ||
344 | sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, | ||
345 | creq->cache, | ||
346 | creq->cache_ptr, | ||
347 | ahashreq->nbytes - creq->cache_ptr); | ||
348 | |||
349 | if (creq->last_req) { | ||
350 | for (i = 0; i < digsize / 4; i++) { | ||
351 | /* | ||
352 | * Hardware provides MD5 digest in a different | ||
353 | * endianness than SHA-1 and SHA-256 ones. | ||
354 | */ | ||
355 | if (digsize == MD5_DIGEST_SIZE) | ||
356 | creq->state[i] = cpu_to_le32(creq->state[i]); | ||
357 | else | ||
358 | creq->state[i] = cpu_to_be32(creq->state[i]); | ||
359 | } | ||
360 | |||
361 | memcpy(ahashreq->result, creq->state, digsize); | ||
362 | } | ||
363 | |||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | static void mv_cesa_ahash_prepare(struct crypto_async_request *req, | ||
368 | struct mv_cesa_engine *engine) | ||
369 | { | ||
370 | struct ahash_request *ahashreq = ahash_request_cast(req); | ||
371 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); | ||
372 | unsigned int digsize; | ||
373 | int i; | ||
374 | |||
375 | creq->req.base.engine = engine; | ||
376 | |||
377 | if (creq->req.base.type == CESA_DMA_REQ) | ||
378 | mv_cesa_ahash_dma_prepare(ahashreq); | ||
379 | else | ||
380 | mv_cesa_ahash_std_prepare(ahashreq); | ||
381 | |||
382 | digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); | ||
383 | for (i = 0; i < digsize / 4; i++) | ||
384 | writel(creq->state[i], | ||
385 | engine->regs + CESA_IVDIG(i)); | ||
386 | } | ||
387 | |||
388 | static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) | ||
389 | { | ||
390 | struct ahash_request *ahashreq = ahash_request_cast(req); | ||
391 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); | ||
392 | |||
393 | if (creq->last_req) | ||
394 | mv_cesa_ahash_last_cleanup(ahashreq); | ||
395 | |||
396 | mv_cesa_ahash_cleanup(ahashreq); | ||
397 | } | ||
398 | |||
399 | static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { | ||
400 | .step = mv_cesa_ahash_step, | ||
401 | .process = mv_cesa_ahash_process, | ||
402 | .prepare = mv_cesa_ahash_prepare, | ||
403 | .cleanup = mv_cesa_ahash_req_cleanup, | ||
404 | }; | ||
405 | |||
406 | static int mv_cesa_ahash_init(struct ahash_request *req, | ||
407 | struct mv_cesa_op_ctx *tmpl) | ||
408 | { | ||
409 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
410 | |||
411 | memset(creq, 0, sizeof(*creq)); | ||
412 | mv_cesa_update_op_cfg(tmpl, | ||
413 | CESA_SA_DESC_CFG_OP_MAC_ONLY | | ||
414 | CESA_SA_DESC_CFG_FIRST_FRAG, | ||
415 | CESA_SA_DESC_CFG_OP_MSK | | ||
416 | CESA_SA_DESC_CFG_FRAG_MSK); | ||
417 | mv_cesa_set_mac_op_total_len(tmpl, 0); | ||
418 | mv_cesa_set_mac_op_frag_len(tmpl, 0); | ||
419 | creq->op_tmpl = *tmpl; | ||
420 | creq->len = 0; | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) | ||
426 | { | ||
427 | struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
428 | |||
429 | ctx->base.ops = &mv_cesa_ahash_req_ops; | ||
430 | |||
431 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
432 | sizeof(struct mv_cesa_ahash_req)); | ||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) | ||
437 | { | ||
438 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
439 | int ret; | ||
440 | |||
441 | if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) && | ||
442 | !creq->last_req) { | ||
443 | ret = mv_cesa_ahash_alloc_cache(req); | ||
444 | if (ret) | ||
445 | return ret; | ||
446 | } | ||
447 | |||
448 | if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) { | ||
449 | *cached = true; | ||
450 | |||
451 | if (!req->nbytes) | ||
452 | return 0; | ||
453 | |||
454 | sg_pcopy_to_buffer(req->src, creq->src_nents, | ||
455 | creq->cache + creq->cache_ptr, | ||
456 | req->nbytes, 0); | ||
457 | |||
458 | creq->cache_ptr += req->nbytes; | ||
459 | } | ||
460 | |||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | static struct mv_cesa_op_ctx * | ||
465 | mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, | ||
466 | struct mv_cesa_ahash_dma_iter *dma_iter, | ||
467 | struct mv_cesa_ahash_req *creq, | ||
468 | gfp_t flags) | ||
469 | { | ||
470 | struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; | ||
471 | struct mv_cesa_op_ctx *op = NULL; | ||
472 | int ret; | ||
473 | |||
474 | if (!creq->cache_ptr) | ||
475 | return NULL; | ||
476 | |||
477 | ret = mv_cesa_dma_add_data_transfer(chain, | ||
478 | CESA_SA_DATA_SRAM_OFFSET, | ||
479 | ahashdreq->cache_dma, | ||
480 | creq->cache_ptr, | ||
481 | CESA_TDMA_DST_IN_SRAM, | ||
482 | flags); | ||
483 | if (ret) | ||
484 | return ERR_PTR(ret); | ||
485 | |||
486 | if (!dma_iter->base.op_len) { | ||
487 | op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); | ||
488 | if (IS_ERR(op)) | ||
489 | return op; | ||
490 | |||
491 | mv_cesa_set_mac_op_frag_len(op, creq->cache_ptr); | ||
492 | |||
493 | /* Add dummy desc to launch crypto operation */ | ||
494 | ret = mv_cesa_dma_add_dummy_launch(chain, flags); | ||
495 | if (ret) | ||
496 | return ERR_PTR(ret); | ||
497 | } | ||
498 | |||
499 | return op; | ||
500 | } | ||
501 | |||
502 | static struct mv_cesa_op_ctx * | ||
503 | mv_cesa_ahash_dma_add_data(struct mv_cesa_tdma_chain *chain, | ||
504 | struct mv_cesa_ahash_dma_iter *dma_iter, | ||
505 | struct mv_cesa_ahash_req *creq, | ||
506 | gfp_t flags) | ||
507 | { | ||
508 | struct mv_cesa_op_ctx *op; | ||
509 | int ret; | ||
510 | |||
511 | op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); | ||
512 | if (IS_ERR(op)) | ||
513 | return op; | ||
514 | |||
515 | mv_cesa_set_mac_op_frag_len(op, dma_iter->base.op_len); | ||
516 | |||
517 | if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) == | ||
518 | CESA_SA_DESC_CFG_FIRST_FRAG) | ||
519 | mv_cesa_update_op_cfg(&creq->op_tmpl, | ||
520 | CESA_SA_DESC_CFG_MID_FRAG, | ||
521 | CESA_SA_DESC_CFG_FRAG_MSK); | ||
522 | |||
523 | /* Add input transfers */ | ||
524 | ret = mv_cesa_dma_add_op_transfers(chain, &dma_iter->base, | ||
525 | &dma_iter->src, flags); | ||
526 | if (ret) | ||
527 | return ERR_PTR(ret); | ||
528 | |||
529 | /* Add dummy desc to launch crypto operation */ | ||
530 | ret = mv_cesa_dma_add_dummy_launch(chain, flags); | ||
531 | if (ret) | ||
532 | return ERR_PTR(ret); | ||
533 | |||
534 | return op; | ||
535 | } | ||
536 | |||
537 | static struct mv_cesa_op_ctx * | ||
538 | mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, | ||
539 | struct mv_cesa_ahash_dma_iter *dma_iter, | ||
540 | struct mv_cesa_ahash_req *creq, | ||
541 | struct mv_cesa_op_ctx *op, | ||
542 | gfp_t flags) | ||
543 | { | ||
544 | struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; | ||
545 | unsigned int len, trailerlen, padoff = 0; | ||
546 | int ret; | ||
547 | |||
548 | if (!creq->last_req) | ||
549 | return op; | ||
550 | |||
551 | if (op && creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { | ||
552 | u32 frag = CESA_SA_DESC_CFG_NOT_FRAG; | ||
553 | |||
554 | if ((mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) != | ||
555 | CESA_SA_DESC_CFG_FIRST_FRAG) | ||
556 | frag = CESA_SA_DESC_CFG_LAST_FRAG; | ||
557 | |||
558 | mv_cesa_update_op_cfg(op, frag, CESA_SA_DESC_CFG_FRAG_MSK); | ||
559 | |||
560 | return op; | ||
561 | } | ||
562 | |||
563 | ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); | ||
564 | if (ret) | ||
565 | return ERR_PTR(ret); | ||
566 | |||
567 | trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); | ||
568 | |||
569 | if (op) { | ||
570 | len = min(CESA_SA_SRAM_PAYLOAD_SIZE - dma_iter->base.op_len, | ||
571 | trailerlen); | ||
572 | if (len) { | ||
573 | ret = mv_cesa_dma_add_data_transfer(chain, | ||
574 | CESA_SA_DATA_SRAM_OFFSET + | ||
575 | dma_iter->base.op_len, | ||
576 | ahashdreq->padding_dma, | ||
577 | len, CESA_TDMA_DST_IN_SRAM, | ||
578 | flags); | ||
579 | if (ret) | ||
580 | return ERR_PTR(ret); | ||
581 | |||
582 | mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, | ||
583 | CESA_SA_DESC_CFG_FRAG_MSK); | ||
584 | mv_cesa_set_mac_op_frag_len(op, | ||
585 | dma_iter->base.op_len + len); | ||
586 | padoff += len; | ||
587 | } | ||
588 | } | ||
589 | |||
590 | if (padoff >= trailerlen) | ||
591 | return op; | ||
592 | |||
593 | if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) != | ||
594 | CESA_SA_DESC_CFG_FIRST_FRAG) | ||
595 | mv_cesa_update_op_cfg(&creq->op_tmpl, | ||
596 | CESA_SA_DESC_CFG_MID_FRAG, | ||
597 | CESA_SA_DESC_CFG_FRAG_MSK); | ||
598 | |||
599 | op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); | ||
600 | if (IS_ERR(op)) | ||
601 | return op; | ||
602 | |||
603 | mv_cesa_set_mac_op_frag_len(op, trailerlen - padoff); | ||
604 | |||
605 | ret = mv_cesa_dma_add_data_transfer(chain, | ||
606 | CESA_SA_DATA_SRAM_OFFSET, | ||
607 | ahashdreq->padding_dma + | ||
608 | padoff, | ||
609 | trailerlen - padoff, | ||
610 | CESA_TDMA_DST_IN_SRAM, | ||
611 | flags); | ||
612 | if (ret) | ||
613 | return ERR_PTR(ret); | ||
614 | |||
615 | /* Add dummy desc to launch crypto operation */ | ||
616 | ret = mv_cesa_dma_add_dummy_launch(chain, flags); | ||
617 | if (ret) | ||
618 | return ERR_PTR(ret); | ||
619 | |||
620 | return op; | ||
621 | } | ||
622 | |||
623 | static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) | ||
624 | { | ||
625 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
626 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
627 | GFP_KERNEL : GFP_ATOMIC; | ||
628 | struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; | ||
629 | struct mv_cesa_tdma_req *dreq = &ahashdreq->base; | ||
630 | struct mv_cesa_tdma_chain chain; | ||
631 | struct mv_cesa_ahash_dma_iter iter; | ||
632 | struct mv_cesa_op_ctx *op = NULL; | ||
633 | int ret; | ||
634 | |||
635 | dreq->chain.first = NULL; | ||
636 | dreq->chain.last = NULL; | ||
637 | |||
638 | if (creq->src_nents) { | ||
639 | ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, | ||
640 | DMA_TO_DEVICE); | ||
641 | if (!ret) { | ||
642 | ret = -ENOMEM; | ||
643 | goto err; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | mv_cesa_tdma_desc_iter_init(&chain); | ||
648 | mv_cesa_ahash_req_iter_init(&iter, req); | ||
649 | |||
650 | op = mv_cesa_ahash_dma_add_cache(&chain, &iter, | ||
651 | creq, flags); | ||
652 | if (IS_ERR(op)) { | ||
653 | ret = PTR_ERR(op); | ||
654 | goto err_free_tdma; | ||
655 | } | ||
656 | |||
657 | do { | ||
658 | if (!iter.base.op_len) | ||
659 | break; | ||
660 | |||
661 | op = mv_cesa_ahash_dma_add_data(&chain, &iter, | ||
662 | creq, flags); | ||
663 | if (IS_ERR(op)) { | ||
664 | ret = PTR_ERR(op); | ||
665 | goto err_free_tdma; | ||
666 | } | ||
667 | } while (mv_cesa_ahash_req_iter_next_op(&iter)); | ||
668 | |||
669 | op = mv_cesa_ahash_dma_last_req(&chain, &iter, creq, op, flags); | ||
670 | if (IS_ERR(op)) { | ||
671 | ret = PTR_ERR(op); | ||
672 | goto err_free_tdma; | ||
673 | } | ||
674 | |||
675 | if (op) { | ||
676 | /* Add dummy desc to wait for crypto operation end */ | ||
677 | ret = mv_cesa_dma_add_dummy_end(&chain, flags); | ||
678 | if (ret) | ||
679 | goto err_free_tdma; | ||
680 | } | ||
681 | |||
682 | if (!creq->last_req) | ||
683 | creq->cache_ptr = req->nbytes + creq->cache_ptr - | ||
684 | iter.base.len; | ||
685 | else | ||
686 | creq->cache_ptr = 0; | ||
687 | |||
688 | dreq->chain = chain; | ||
689 | |||
690 | return 0; | ||
691 | |||
692 | err_free_tdma: | ||
693 | mv_cesa_dma_cleanup(dreq); | ||
694 | dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); | ||
695 | |||
696 | err: | ||
697 | mv_cesa_ahash_last_cleanup(req); | ||
698 | |||
699 | return ret; | ||
700 | } | ||
701 | |||
702 | static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) | ||
703 | { | ||
704 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
705 | int ret; | ||
706 | |||
707 | if (cesa_dev->caps->has_tdma) | ||
708 | creq->req.base.type = CESA_DMA_REQ; | ||
709 | else | ||
710 | creq->req.base.type = CESA_STD_REQ; | ||
711 | |||
712 | creq->src_nents = sg_nents_for_len(req->src, req->nbytes); | ||
713 | |||
714 | ret = mv_cesa_ahash_cache_req(req, cached); | ||
715 | if (ret) | ||
716 | return ret; | ||
717 | |||
718 | if (*cached) | ||
719 | return 0; | ||
720 | |||
721 | if (creq->req.base.type == CESA_DMA_REQ) | ||
722 | ret = mv_cesa_ahash_dma_req_init(req); | ||
723 | |||
724 | return ret; | ||
725 | } | ||
726 | |||
727 | static int mv_cesa_ahash_update(struct ahash_request *req) | ||
728 | { | ||
729 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
730 | bool cached = false; | ||
731 | int ret; | ||
732 | |||
733 | creq->len += req->nbytes; | ||
734 | ret = mv_cesa_ahash_req_init(req, &cached); | ||
735 | if (ret) | ||
736 | return ret; | ||
737 | |||
738 | if (cached) | ||
739 | return 0; | ||
740 | |||
741 | ret = mv_cesa_queue_req(&req->base); | ||
742 | if (ret && ret != -EINPROGRESS) { | ||
743 | mv_cesa_ahash_cleanup(req); | ||
744 | return ret; | ||
745 | } | ||
746 | |||
747 | return ret; | ||
748 | } | ||
749 | |||
750 | static int mv_cesa_ahash_final(struct ahash_request *req) | ||
751 | { | ||
752 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
753 | struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; | ||
754 | bool cached = false; | ||
755 | int ret; | ||
756 | |||
757 | mv_cesa_set_mac_op_total_len(tmpl, creq->len); | ||
758 | creq->last_req = true; | ||
759 | req->nbytes = 0; | ||
760 | |||
761 | ret = mv_cesa_ahash_req_init(req, &cached); | ||
762 | if (ret) | ||
763 | return ret; | ||
764 | |||
765 | if (cached) | ||
766 | return 0; | ||
767 | |||
768 | ret = mv_cesa_queue_req(&req->base); | ||
769 | if (ret && ret != -EINPROGRESS) | ||
770 | mv_cesa_ahash_cleanup(req); | ||
771 | |||
772 | return ret; | ||
773 | } | ||
774 | |||
775 | static int mv_cesa_ahash_finup(struct ahash_request *req) | ||
776 | { | ||
777 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
778 | struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; | ||
779 | bool cached = false; | ||
780 | int ret; | ||
781 | |||
782 | creq->len += req->nbytes; | ||
783 | mv_cesa_set_mac_op_total_len(tmpl, creq->len); | ||
784 | creq->last_req = true; | ||
785 | |||
786 | ret = mv_cesa_ahash_req_init(req, &cached); | ||
787 | if (ret) | ||
788 | return ret; | ||
789 | |||
790 | if (cached) | ||
791 | return 0; | ||
792 | |||
793 | ret = mv_cesa_queue_req(&req->base); | ||
794 | if (ret && ret != -EINPROGRESS) | ||
795 | mv_cesa_ahash_cleanup(req); | ||
796 | |||
797 | return ret; | ||
798 | } | ||
799 | |||
800 | static int mv_cesa_md5_init(struct ahash_request *req) | ||
801 | { | ||
802 | struct mv_cesa_op_ctx tmpl; | ||
803 | |||
804 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); | ||
805 | |||
806 | mv_cesa_ahash_init(req, &tmpl); | ||
807 | |||
808 | return 0; | ||
809 | } | ||
810 | |||
811 | static int mv_cesa_md5_export(struct ahash_request *req, void *out) | ||
812 | { | ||
813 | struct md5_state *out_state = out; | ||
814 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
815 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
816 | unsigned int digsize = crypto_ahash_digestsize(ahash); | ||
817 | |||
818 | out_state->byte_count = creq->len; | ||
819 | memcpy(out_state->hash, creq->state, digsize); | ||
820 | memset(out_state->block, 0, sizeof(out_state->block)); | ||
821 | if (creq->cache) | ||
822 | memcpy(out_state->block, creq->cache, creq->cache_ptr); | ||
823 | |||
824 | return 0; | ||
825 | } | ||
826 | |||
827 | static int mv_cesa_md5_import(struct ahash_request *req, const void *in) | ||
828 | { | ||
829 | const struct md5_state *in_state = in; | ||
830 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
831 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
832 | unsigned int digsize = crypto_ahash_digestsize(ahash); | ||
833 | unsigned int cache_ptr; | ||
834 | int ret; | ||
835 | |||
836 | creq->len = in_state->byte_count; | ||
837 | memcpy(creq->state, in_state->hash, digsize); | ||
838 | creq->cache_ptr = 0; | ||
839 | |||
840 | cache_ptr = creq->len % sizeof(in_state->block); | ||
841 | if (!cache_ptr) | ||
842 | return 0; | ||
843 | |||
844 | ret = mv_cesa_ahash_alloc_cache(req); | ||
845 | if (ret) | ||
846 | return ret; | ||
847 | |||
848 | memcpy(creq->cache, in_state->block, cache_ptr); | ||
849 | creq->cache_ptr = cache_ptr; | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | static int mv_cesa_md5_digest(struct ahash_request *req) | ||
855 | { | ||
856 | int ret; | ||
857 | |||
858 | ret = mv_cesa_md5_init(req); | ||
859 | if (ret) | ||
860 | return ret; | ||
861 | |||
862 | return mv_cesa_ahash_finup(req); | ||
863 | } | ||
864 | |||
865 | struct ahash_alg mv_md5_alg = { | ||
866 | .init = mv_cesa_md5_init, | ||
867 | .update = mv_cesa_ahash_update, | ||
868 | .final = mv_cesa_ahash_final, | ||
869 | .finup = mv_cesa_ahash_finup, | ||
870 | .digest = mv_cesa_md5_digest, | ||
871 | .export = mv_cesa_md5_export, | ||
872 | .import = mv_cesa_md5_import, | ||
873 | .halg = { | ||
874 | .digestsize = MD5_DIGEST_SIZE, | ||
875 | .base = { | ||
876 | .cra_name = "md5", | ||
877 | .cra_driver_name = "mv-md5", | ||
878 | .cra_priority = 300, | ||
879 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
880 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
881 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | ||
882 | .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), | ||
883 | .cra_init = mv_cesa_ahash_cra_init, | ||
884 | .cra_module = THIS_MODULE, | ||
885 | } | ||
886 | } | ||
887 | }; | ||
888 | |||
889 | static int mv_cesa_sha1_init(struct ahash_request *req) | ||
890 | { | ||
891 | struct mv_cesa_op_ctx tmpl; | ||
892 | |||
893 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); | ||
894 | |||
895 | mv_cesa_ahash_init(req, &tmpl); | ||
896 | |||
897 | return 0; | ||
898 | } | ||
899 | |||
900 | static int mv_cesa_sha1_export(struct ahash_request *req, void *out) | ||
901 | { | ||
902 | struct sha1_state *out_state = out; | ||
903 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
904 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
905 | unsigned int digsize = crypto_ahash_digestsize(ahash); | ||
906 | |||
907 | out_state->count = creq->len; | ||
908 | memcpy(out_state->state, creq->state, digsize); | ||
909 | memset(out_state->buffer, 0, sizeof(out_state->buffer)); | ||
910 | if (creq->cache) | ||
911 | memcpy(out_state->buffer, creq->cache, creq->cache_ptr); | ||
912 | |||
913 | return 0; | ||
914 | } | ||
915 | |||
916 | static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) | ||
917 | { | ||
918 | const struct sha1_state *in_state = in; | ||
919 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
920 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
921 | unsigned int digsize = crypto_ahash_digestsize(ahash); | ||
922 | unsigned int cache_ptr; | ||
923 | int ret; | ||
924 | |||
925 | creq->len = in_state->count; | ||
926 | memcpy(creq->state, in_state->state, digsize); | ||
927 | creq->cache_ptr = 0; | ||
928 | |||
929 | cache_ptr = creq->len % SHA1_BLOCK_SIZE; | ||
930 | if (!cache_ptr) | ||
931 | return 0; | ||
932 | |||
933 | ret = mv_cesa_ahash_alloc_cache(req); | ||
934 | if (ret) | ||
935 | return ret; | ||
936 | |||
937 | memcpy(creq->cache, in_state->buffer, cache_ptr); | ||
938 | creq->cache_ptr = cache_ptr; | ||
939 | |||
940 | return 0; | ||
941 | } | ||
942 | |||
943 | static int mv_cesa_sha1_digest(struct ahash_request *req) | ||
944 | { | ||
945 | int ret; | ||
946 | |||
947 | ret = mv_cesa_sha1_init(req); | ||
948 | if (ret) | ||
949 | return ret; | ||
950 | |||
951 | return mv_cesa_ahash_finup(req); | ||
952 | } | ||
953 | |||
954 | struct ahash_alg mv_sha1_alg = { | ||
955 | .init = mv_cesa_sha1_init, | ||
956 | .update = mv_cesa_ahash_update, | ||
957 | .final = mv_cesa_ahash_final, | ||
958 | .finup = mv_cesa_ahash_finup, | ||
959 | .digest = mv_cesa_sha1_digest, | ||
960 | .export = mv_cesa_sha1_export, | ||
961 | .import = mv_cesa_sha1_import, | ||
962 | .halg = { | ||
963 | .digestsize = SHA1_DIGEST_SIZE, | ||
964 | .base = { | ||
965 | .cra_name = "sha1", | ||
966 | .cra_driver_name = "mv-sha1", | ||
967 | .cra_priority = 300, | ||
968 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
969 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
970 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
971 | .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), | ||
972 | .cra_init = mv_cesa_ahash_cra_init, | ||
973 | .cra_module = THIS_MODULE, | ||
974 | } | ||
975 | } | ||
976 | }; | ||
977 | |||
978 | static int mv_cesa_sha256_init(struct ahash_request *req) | ||
979 | { | ||
980 | struct mv_cesa_op_ctx tmpl; | ||
981 | |||
982 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); | ||
983 | |||
984 | mv_cesa_ahash_init(req, &tmpl); | ||
985 | |||
986 | return 0; | ||
987 | } | ||
988 | |||
989 | static int mv_cesa_sha256_digest(struct ahash_request *req) | ||
990 | { | ||
991 | int ret; | ||
992 | |||
993 | ret = mv_cesa_sha256_init(req); | ||
994 | if (ret) | ||
995 | return ret; | ||
996 | |||
997 | return mv_cesa_ahash_finup(req); | ||
998 | } | ||
999 | |||
1000 | static int mv_cesa_sha256_export(struct ahash_request *req, void *out) | ||
1001 | { | ||
1002 | struct sha256_state *out_state = out; | ||
1003 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
1004 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
1005 | unsigned int ds = crypto_ahash_digestsize(ahash); | ||
1006 | |||
1007 | out_state->count = creq->len; | ||
1008 | memcpy(out_state->state, creq->state, ds); | ||
1009 | memset(out_state->buf, 0, sizeof(out_state->buf)); | ||
1010 | if (creq->cache) | ||
1011 | memcpy(out_state->buf, creq->cache, creq->cache_ptr); | ||
1012 | |||
1013 | return 0; | ||
1014 | } | ||
1015 | |||
1016 | static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) | ||
1017 | { | ||
1018 | const struct sha256_state *in_state = in; | ||
1019 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | ||
1020 | struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); | ||
1021 | unsigned int digsize = crypto_ahash_digestsize(ahash); | ||
1022 | unsigned int cache_ptr; | ||
1023 | int ret; | ||
1024 | |||
1025 | creq->len = in_state->count; | ||
1026 | memcpy(creq->state, in_state->state, digsize); | ||
1027 | creq->cache_ptr = 0; | ||
1028 | |||
1029 | cache_ptr = creq->len % SHA256_BLOCK_SIZE; | ||
1030 | if (!cache_ptr) | ||
1031 | return 0; | ||
1032 | |||
1033 | ret = mv_cesa_ahash_alloc_cache(req); | ||
1034 | if (ret) | ||
1035 | return ret; | ||
1036 | |||
1037 | memcpy(creq->cache, in_state->buf, cache_ptr); | ||
1038 | creq->cache_ptr = cache_ptr; | ||
1039 | |||
1040 | return 0; | ||
1041 | } | ||
1042 | |||
1043 | struct ahash_alg mv_sha256_alg = { | ||
1044 | .init = mv_cesa_sha256_init, | ||
1045 | .update = mv_cesa_ahash_update, | ||
1046 | .final = mv_cesa_ahash_final, | ||
1047 | .finup = mv_cesa_ahash_finup, | ||
1048 | .digest = mv_cesa_sha256_digest, | ||
1049 | .export = mv_cesa_sha256_export, | ||
1050 | .import = mv_cesa_sha256_import, | ||
1051 | .halg = { | ||
1052 | .digestsize = SHA256_DIGEST_SIZE, | ||
1053 | .base = { | ||
1054 | .cra_name = "sha256", | ||
1055 | .cra_driver_name = "mv-sha256", | ||
1056 | .cra_priority = 300, | ||
1057 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1058 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1059 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
1060 | .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), | ||
1061 | .cra_init = mv_cesa_ahash_cra_init, | ||
1062 | .cra_module = THIS_MODULE, | ||
1063 | } | ||
1064 | } | ||
1065 | }; | ||
1066 | |||
1067 | struct mv_cesa_ahash_result { | ||
1068 | struct completion completion; | ||
1069 | int error; | ||
1070 | }; | ||
1071 | |||
1072 | static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, | ||
1073 | int error) | ||
1074 | { | ||
1075 | struct mv_cesa_ahash_result *result = req->data; | ||
1076 | |||
1077 | if (error == -EINPROGRESS) | ||
1078 | return; | ||
1079 | |||
1080 | result->error = error; | ||
1081 | complete(&result->completion); | ||
1082 | } | ||
1083 | |||
1084 | static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, | ||
1085 | void *state, unsigned int blocksize) | ||
1086 | { | ||
1087 | struct mv_cesa_ahash_result result; | ||
1088 | struct scatterlist sg; | ||
1089 | int ret; | ||
1090 | |||
1091 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
1092 | mv_cesa_hmac_ahash_complete, &result); | ||
1093 | sg_init_one(&sg, pad, blocksize); | ||
1094 | ahash_request_set_crypt(req, &sg, pad, blocksize); | ||
1095 | init_completion(&result.completion); | ||
1096 | |||
1097 | ret = crypto_ahash_init(req); | ||
1098 | if (ret) | ||
1099 | return ret; | ||
1100 | |||
1101 | ret = crypto_ahash_update(req); | ||
1102 | if (ret && ret != -EINPROGRESS) | ||
1103 | return ret; | ||
1104 | |||
1105 | wait_for_completion_interruptible(&result.completion); | ||
1106 | if (result.error) | ||
1107 | return result.error; | ||
1108 | |||
1109 | ret = crypto_ahash_export(req, state); | ||
1110 | if (ret) | ||
1111 | return ret; | ||
1112 | |||
1113 | return 0; | ||
1114 | } | ||
1115 | |||
1116 | static int mv_cesa_ahmac_pad_init(struct ahash_request *req, | ||
1117 | const u8 *key, unsigned int keylen, | ||
1118 | u8 *ipad, u8 *opad, | ||
1119 | unsigned int blocksize) | ||
1120 | { | ||
1121 | struct mv_cesa_ahash_result result; | ||
1122 | struct scatterlist sg; | ||
1123 | int ret; | ||
1124 | int i; | ||
1125 | |||
1126 | if (keylen <= blocksize) { | ||
1127 | memcpy(ipad, key, keylen); | ||
1128 | } else { | ||
1129 | u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); | ||
1130 | |||
1131 | if (!keydup) | ||
1132 | return -ENOMEM; | ||
1133 | |||
1134 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
1135 | mv_cesa_hmac_ahash_complete, | ||
1136 | &result); | ||
1137 | sg_init_one(&sg, keydup, keylen); | ||
1138 | ahash_request_set_crypt(req, &sg, ipad, keylen); | ||
1139 | init_completion(&result.completion); | ||
1140 | |||
1141 | ret = crypto_ahash_digest(req); | ||
1142 | if (ret == -EINPROGRESS) { | ||
1143 | wait_for_completion_interruptible(&result.completion); | ||
1144 | ret = result.error; | ||
1145 | } | ||
1146 | |||
1147 | /* Set the memory region to 0 to avoid any leak. */ | ||
1148 | memset(keydup, 0, keylen); | ||
1149 | kfree(keydup); | ||
1150 | |||
1151 | if (ret) | ||
1152 | return ret; | ||
1153 | |||
1154 | keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); | ||
1155 | } | ||
1156 | |||
1157 | memset(ipad + keylen, 0, blocksize - keylen); | ||
1158 | memcpy(opad, ipad, blocksize); | ||
1159 | |||
1160 | for (i = 0; i < blocksize; i++) { | ||
1161 | ipad[i] ^= 0x36; | ||
1162 | opad[i] ^= 0x5c; | ||
1163 | } | ||
1164 | |||
1165 | return 0; | ||
1166 | } | ||
1167 | |||
1168 | static int mv_cesa_ahmac_setkey(const char *hash_alg_name, | ||
1169 | const u8 *key, unsigned int keylen, | ||
1170 | void *istate, void *ostate) | ||
1171 | { | ||
1172 | struct ahash_request *req; | ||
1173 | struct crypto_ahash *tfm; | ||
1174 | unsigned int blocksize; | ||
1175 | u8 *ipad = NULL; | ||
1176 | u8 *opad; | ||
1177 | int ret; | ||
1178 | |||
1179 | tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH, | ||
1180 | CRYPTO_ALG_TYPE_AHASH_MASK); | ||
1181 | if (IS_ERR(tfm)) | ||
1182 | return PTR_ERR(tfm); | ||
1183 | |||
1184 | req = ahash_request_alloc(tfm, GFP_KERNEL); | ||
1185 | if (!req) { | ||
1186 | ret = -ENOMEM; | ||
1187 | goto free_ahash; | ||
1188 | } | ||
1189 | |||
1190 | crypto_ahash_clear_flags(tfm, ~0); | ||
1191 | |||
1192 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
1193 | |||
1194 | ipad = kzalloc(2 * blocksize, GFP_KERNEL); | ||
1195 | if (!ipad) { | ||
1196 | ret = -ENOMEM; | ||
1197 | goto free_req; | ||
1198 | } | ||
1199 | |||
1200 | opad = ipad + blocksize; | ||
1201 | |||
1202 | ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); | ||
1203 | if (ret) | ||
1204 | goto free_ipad; | ||
1205 | |||
1206 | ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); | ||
1207 | if (ret) | ||
1208 | goto free_ipad; | ||
1209 | |||
1210 | ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); | ||
1211 | |||
1212 | free_ipad: | ||
1213 | kfree(ipad); | ||
1214 | free_req: | ||
1215 | ahash_request_free(req); | ||
1216 | free_ahash: | ||
1217 | crypto_free_ahash(tfm); | ||
1218 | |||
1219 | return ret; | ||
1220 | } | ||
1221 | |||
1222 | static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) | ||
1223 | { | ||
1224 | struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1225 | |||
1226 | ctx->base.ops = &mv_cesa_ahash_req_ops; | ||
1227 | |||
1228 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
1229 | sizeof(struct mv_cesa_ahash_req)); | ||
1230 | return 0; | ||
1231 | } | ||
1232 | |||
1233 | static int mv_cesa_ahmac_md5_init(struct ahash_request *req) | ||
1234 | { | ||
1235 | struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
1236 | struct mv_cesa_op_ctx tmpl; | ||
1237 | |||
1238 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); | ||
1239 | memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); | ||
1240 | |||
1241 | mv_cesa_ahash_init(req, &tmpl); | ||
1242 | |||
1243 | return 0; | ||
1244 | } | ||
1245 | |||
1246 | static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
1247 | unsigned int keylen) | ||
1248 | { | ||
1249 | struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | ||
1250 | struct md5_state istate, ostate; | ||
1251 | int ret, i; | ||
1252 | |||
1253 | ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); | ||
1254 | if (ret) | ||
1255 | return ret; | ||
1256 | |||
1257 | for (i = 0; i < ARRAY_SIZE(istate.hash); i++) | ||
1258 | ctx->iv[i] = be32_to_cpu(istate.hash[i]); | ||
1259 | |||
1260 | for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) | ||
1261 | ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); | ||
1262 | |||
1263 | return 0; | ||
1264 | } | ||
1265 | |||
1266 | static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) | ||
1267 | { | ||
1268 | int ret; | ||
1269 | |||
1270 | ret = mv_cesa_ahmac_md5_init(req); | ||
1271 | if (ret) | ||
1272 | return ret; | ||
1273 | |||
1274 | return mv_cesa_ahash_finup(req); | ||
1275 | } | ||
1276 | |||
1277 | struct ahash_alg mv_ahmac_md5_alg = { | ||
1278 | .init = mv_cesa_ahmac_md5_init, | ||
1279 | .update = mv_cesa_ahash_update, | ||
1280 | .final = mv_cesa_ahash_final, | ||
1281 | .finup = mv_cesa_ahash_finup, | ||
1282 | .digest = mv_cesa_ahmac_md5_digest, | ||
1283 | .setkey = mv_cesa_ahmac_md5_setkey, | ||
1284 | .export = mv_cesa_md5_export, | ||
1285 | .import = mv_cesa_md5_import, | ||
1286 | .halg = { | ||
1287 | .digestsize = MD5_DIGEST_SIZE, | ||
1288 | .statesize = sizeof(struct md5_state), | ||
1289 | .base = { | ||
1290 | .cra_name = "hmac(md5)", | ||
1291 | .cra_driver_name = "mv-hmac-md5", | ||
1292 | .cra_priority = 300, | ||
1293 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1294 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1295 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | ||
1296 | .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), | ||
1297 | .cra_init = mv_cesa_ahmac_cra_init, | ||
1298 | .cra_module = THIS_MODULE, | ||
1299 | } | ||
1300 | } | ||
1301 | }; | ||
1302 | |||
1303 | static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) | ||
1304 | { | ||
1305 | struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
1306 | struct mv_cesa_op_ctx tmpl; | ||
1307 | |||
1308 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); | ||
1309 | memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); | ||
1310 | |||
1311 | mv_cesa_ahash_init(req, &tmpl); | ||
1312 | |||
1313 | return 0; | ||
1314 | } | ||
1315 | |||
1316 | static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
1317 | unsigned int keylen) | ||
1318 | { | ||
1319 | struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | ||
1320 | struct sha1_state istate, ostate; | ||
1321 | int ret, i; | ||
1322 | |||
1323 | ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); | ||
1324 | if (ret) | ||
1325 | return ret; | ||
1326 | |||
1327 | for (i = 0; i < ARRAY_SIZE(istate.state); i++) | ||
1328 | ctx->iv[i] = be32_to_cpu(istate.state[i]); | ||
1329 | |||
1330 | for (i = 0; i < ARRAY_SIZE(ostate.state); i++) | ||
1331 | ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); | ||
1332 | |||
1333 | return 0; | ||
1334 | } | ||
1335 | |||
1336 | static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) | ||
1337 | { | ||
1338 | int ret; | ||
1339 | |||
1340 | ret = mv_cesa_ahmac_sha1_init(req); | ||
1341 | if (ret) | ||
1342 | return ret; | ||
1343 | |||
1344 | return mv_cesa_ahash_finup(req); | ||
1345 | } | ||
1346 | |||
1347 | struct ahash_alg mv_ahmac_sha1_alg = { | ||
1348 | .init = mv_cesa_ahmac_sha1_init, | ||
1349 | .update = mv_cesa_ahash_update, | ||
1350 | .final = mv_cesa_ahash_final, | ||
1351 | .finup = mv_cesa_ahash_finup, | ||
1352 | .digest = mv_cesa_ahmac_sha1_digest, | ||
1353 | .setkey = mv_cesa_ahmac_sha1_setkey, | ||
1354 | .export = mv_cesa_sha1_export, | ||
1355 | .import = mv_cesa_sha1_import, | ||
1356 | .halg = { | ||
1357 | .digestsize = SHA1_DIGEST_SIZE, | ||
1358 | .statesize = sizeof(struct sha1_state), | ||
1359 | .base = { | ||
1360 | .cra_name = "hmac(sha1)", | ||
1361 | .cra_driver_name = "mv-hmac-sha1", | ||
1362 | .cra_priority = 300, | ||
1363 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1364 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1365 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
1366 | .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), | ||
1367 | .cra_init = mv_cesa_ahmac_cra_init, | ||
1368 | .cra_module = THIS_MODULE, | ||
1369 | } | ||
1370 | } | ||
1371 | }; | ||
1372 | |||
1373 | static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
1374 | unsigned int keylen) | ||
1375 | { | ||
1376 | struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | ||
1377 | struct sha256_state istate, ostate; | ||
1378 | int ret, i; | ||
1379 | |||
1380 | ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); | ||
1381 | if (ret) | ||
1382 | return ret; | ||
1383 | |||
1384 | for (i = 0; i < ARRAY_SIZE(istate.state); i++) | ||
1385 | ctx->iv[i] = be32_to_cpu(istate.state[i]); | ||
1386 | |||
1387 | for (i = 0; i < ARRAY_SIZE(ostate.state); i++) | ||
1388 | ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); | ||
1389 | |||
1390 | return 0; | ||
1391 | } | ||
1392 | |||
1393 | static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) | ||
1394 | { | ||
1395 | struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
1396 | struct mv_cesa_op_ctx tmpl; | ||
1397 | |||
1398 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); | ||
1399 | memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); | ||
1400 | |||
1401 | mv_cesa_ahash_init(req, &tmpl); | ||
1402 | |||
1403 | return 0; | ||
1404 | } | ||
1405 | |||
1406 | static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) | ||
1407 | { | ||
1408 | int ret; | ||
1409 | |||
1410 | ret = mv_cesa_ahmac_sha256_init(req); | ||
1411 | if (ret) | ||
1412 | return ret; | ||
1413 | |||
1414 | return mv_cesa_ahash_finup(req); | ||
1415 | } | ||
1416 | |||
1417 | struct ahash_alg mv_ahmac_sha256_alg = { | ||
1418 | .init = mv_cesa_ahmac_sha256_init, | ||
1419 | .update = mv_cesa_ahash_update, | ||
1420 | .final = mv_cesa_ahash_final, | ||
1421 | .finup = mv_cesa_ahash_finup, | ||
1422 | .digest = mv_cesa_ahmac_sha256_digest, | ||
1423 | .setkey = mv_cesa_ahmac_sha256_setkey, | ||
1424 | .export = mv_cesa_sha256_export, | ||
1425 | .import = mv_cesa_sha256_import, | ||
1426 | .halg = { | ||
1427 | .digestsize = SHA256_DIGEST_SIZE, | ||
1428 | .statesize = sizeof(struct sha256_state), | ||
1429 | .base = { | ||
1430 | .cra_name = "hmac(sha256)", | ||
1431 | .cra_driver_name = "mv-hmac-sha256", | ||
1432 | .cra_priority = 300, | ||
1433 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1434 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1435 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
1436 | .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), | ||
1437 | .cra_init = mv_cesa_ahmac_cra_init, | ||
1438 | .cra_module = THIS_MODULE, | ||
1439 | } | ||
1440 | } | ||
1441 | }; | ||
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c new file mode 100644 index 000000000000..64a366c50174 --- /dev/null +++ b/drivers/crypto/marvell/tdma.c | |||
@@ -0,0 +1,224 @@ | |||
1 | /* | ||
2 | * Provide TDMA helper functions used by cipher and hash algorithm | ||
3 | * implementations. | ||
4 | * | ||
5 | * Author: Boris Brezillon <boris.brezillon@free-electrons.com> | ||
6 | * Author: Arnaud Ebalard <arno@natisbad.org> | ||
7 | * | ||
8 | * This work is based on an initial version written by | ||
9 | * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License version 2 as published | ||
13 | * by the Free Software Foundation. | ||
14 | */ | ||
15 | |||
16 | #include "cesa.h" | ||
17 | |||
18 | bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter, | ||
19 | struct mv_cesa_sg_dma_iter *sgiter, | ||
20 | unsigned int len) | ||
21 | { | ||
22 | if (!sgiter->sg) | ||
23 | return false; | ||
24 | |||
25 | sgiter->op_offset += len; | ||
26 | sgiter->offset += len; | ||
27 | if (sgiter->offset == sg_dma_len(sgiter->sg)) { | ||
28 | if (sg_is_last(sgiter->sg)) | ||
29 | return false; | ||
30 | sgiter->offset = 0; | ||
31 | sgiter->sg = sg_next(sgiter->sg); | ||
32 | } | ||
33 | |||
34 | if (sgiter->op_offset == iter->op_len) | ||
35 | return false; | ||
36 | |||
37 | return true; | ||
38 | } | ||
39 | |||
40 | void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq) | ||
41 | { | ||
42 | struct mv_cesa_engine *engine = dreq->base.engine; | ||
43 | |||
44 | writel(0, engine->regs + CESA_SA_CFG); | ||
45 | |||
46 | mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); | ||
47 | writel(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B | | ||
48 | CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN, | ||
49 | engine->regs + CESA_TDMA_CONTROL); | ||
50 | |||
51 | writel(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT | | ||
52 | CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS, | ||
53 | engine->regs + CESA_SA_CFG); | ||
54 | writel(dreq->chain.first->cur_dma, | ||
55 | engine->regs + CESA_TDMA_NEXT_ADDR); | ||
56 | writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); | ||
57 | } | ||
58 | |||
59 | void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq) | ||
60 | { | ||
61 | struct mv_cesa_tdma_desc *tdma; | ||
62 | |||
63 | for (tdma = dreq->chain.first; tdma;) { | ||
64 | struct mv_cesa_tdma_desc *old_tdma = tdma; | ||
65 | |||
66 | if (tdma->flags & CESA_TDMA_OP) | ||
67 | dma_pool_free(cesa_dev->dma->op_pool, tdma->op, | ||
68 | le32_to_cpu(tdma->src)); | ||
69 | |||
70 | tdma = tdma->next; | ||
71 | dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, | ||
72 | le32_to_cpu(old_tdma->cur_dma)); | ||
73 | } | ||
74 | |||
75 | dreq->chain.first = NULL; | ||
76 | dreq->chain.last = NULL; | ||
77 | } | ||
78 | |||
79 | void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, | ||
80 | struct mv_cesa_engine *engine) | ||
81 | { | ||
82 | struct mv_cesa_tdma_desc *tdma; | ||
83 | |||
84 | for (tdma = dreq->chain.first; tdma; tdma = tdma->next) { | ||
85 | if (tdma->flags & CESA_TDMA_DST_IN_SRAM) | ||
86 | tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma); | ||
87 | |||
88 | if (tdma->flags & CESA_TDMA_SRC_IN_SRAM) | ||
89 | tdma->src = cpu_to_le32(tdma->src + engine->sram_dma); | ||
90 | |||
91 | if (tdma->flags & CESA_TDMA_OP) | ||
92 | mv_cesa_adjust_op(engine, tdma->op); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | static struct mv_cesa_tdma_desc * | ||
97 | mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) | ||
98 | { | ||
99 | struct mv_cesa_tdma_desc *new_tdma = NULL; | ||
100 | dma_addr_t dma_handle; | ||
101 | |||
102 | new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags, | ||
103 | &dma_handle); | ||
104 | if (!new_tdma) | ||
105 | return ERR_PTR(-ENOMEM); | ||
106 | |||
107 | memset(new_tdma, 0, sizeof(*new_tdma)); | ||
108 | new_tdma->cur_dma = cpu_to_le32(dma_handle); | ||
109 | if (chain->last) { | ||
110 | chain->last->next_dma = new_tdma->cur_dma; | ||
111 | chain->last->next = new_tdma; | ||
112 | } else { | ||
113 | chain->first = new_tdma; | ||
114 | } | ||
115 | |||
116 | chain->last = new_tdma; | ||
117 | |||
118 | return new_tdma; | ||
119 | } | ||
120 | |||
121 | struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, | ||
122 | const struct mv_cesa_op_ctx *op_templ, | ||
123 | bool skip_ctx, | ||
124 | gfp_t flags) | ||
125 | { | ||
126 | struct mv_cesa_tdma_desc *tdma; | ||
127 | struct mv_cesa_op_ctx *op; | ||
128 | dma_addr_t dma_handle; | ||
129 | |||
130 | tdma = mv_cesa_dma_add_desc(chain, flags); | ||
131 | if (IS_ERR(tdma)) | ||
132 | return ERR_CAST(tdma); | ||
133 | |||
134 | op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle); | ||
135 | if (!op) | ||
136 | return ERR_PTR(-ENOMEM); | ||
137 | |||
138 | *op = *op_templ; | ||
139 | |||
140 | tdma = chain->last; | ||
141 | tdma->op = op; | ||
142 | tdma->byte_cnt = (skip_ctx ? sizeof(op->desc) : sizeof(*op)) | BIT(31); | ||
143 | tdma->src = dma_handle; | ||
144 | tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; | ||
145 | |||
146 | return op; | ||
147 | } | ||
148 | |||
149 | int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, | ||
150 | dma_addr_t dst, dma_addr_t src, u32 size, | ||
151 | u32 flags, gfp_t gfp_flags) | ||
152 | { | ||
153 | struct mv_cesa_tdma_desc *tdma; | ||
154 | |||
155 | tdma = mv_cesa_dma_add_desc(chain, gfp_flags); | ||
156 | if (IS_ERR(tdma)) | ||
157 | return PTR_ERR(tdma); | ||
158 | |||
159 | tdma->byte_cnt = size | BIT(31); | ||
160 | tdma->src = src; | ||
161 | tdma->dst = dst; | ||
162 | |||
163 | flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); | ||
164 | tdma->flags = flags | CESA_TDMA_DATA; | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, | ||
170 | u32 flags) | ||
171 | { | ||
172 | struct mv_cesa_tdma_desc *tdma; | ||
173 | |||
174 | tdma = mv_cesa_dma_add_desc(chain, flags); | ||
175 | if (IS_ERR(tdma)) | ||
176 | return PTR_ERR(tdma); | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags) | ||
182 | { | ||
183 | struct mv_cesa_tdma_desc *tdma; | ||
184 | |||
185 | tdma = mv_cesa_dma_add_desc(chain, flags); | ||
186 | if (IS_ERR(tdma)) | ||
187 | return PTR_ERR(tdma); | ||
188 | |||
189 | tdma->byte_cnt = BIT(31); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, | ||
195 | struct mv_cesa_dma_iter *dma_iter, | ||
196 | struct mv_cesa_sg_dma_iter *sgiter, | ||
197 | gfp_t gfp_flags) | ||
198 | { | ||
199 | u32 flags = sgiter->dir == DMA_TO_DEVICE ? | ||
200 | CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM; | ||
201 | unsigned int len; | ||
202 | |||
203 | do { | ||
204 | dma_addr_t dst, src; | ||
205 | int ret; | ||
206 | |||
207 | len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter); | ||
208 | if (sgiter->dir == DMA_TO_DEVICE) { | ||
209 | dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; | ||
210 | src = sg_dma_address(sgiter->sg) + sgiter->offset; | ||
211 | } else { | ||
212 | dst = sg_dma_address(sgiter->sg) + sgiter->offset; | ||
213 | src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; | ||
214 | } | ||
215 | |||
216 | ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len, | ||
217 | flags, gfp_flags); | ||
218 | if (ret) | ||
219 | return ret; | ||
220 | |||
221 | } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len)); | ||
222 | |||
223 | return 0; | ||
224 | } | ||
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index f91f15ddee92..5bcd575fa96f 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <crypto/aes.h> | 9 | #include <crypto/aes.h> |
10 | #include <crypto/algapi.h> | 10 | #include <crypto/algapi.h> |
11 | #include <linux/crypto.h> | 11 | #include <linux/crypto.h> |
12 | #include <linux/genalloc.h> | ||
12 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> | 14 | #include <linux/io.h> |
14 | #include <linux/kthread.h> | 15 | #include <linux/kthread.h> |
@@ -29,6 +30,8 @@ | |||
29 | #define MAX_HW_HASH_SIZE 0xFFFF | 30 | #define MAX_HW_HASH_SIZE 0xFFFF |
30 | #define MV_CESA_EXPIRE 500 /* msec */ | 31 | #define MV_CESA_EXPIRE 500 /* msec */ |
31 | 32 | ||
33 | #define MV_CESA_DEFAULT_SRAM_SIZE 2048 | ||
34 | |||
32 | /* | 35 | /* |
33 | * STM: | 36 | * STM: |
34 | * /---------------------------------------\ | 37 | * /---------------------------------------\ |
@@ -83,6 +86,8 @@ struct req_progress { | |||
83 | struct crypto_priv { | 86 | struct crypto_priv { |
84 | void __iomem *reg; | 87 | void __iomem *reg; |
85 | void __iomem *sram; | 88 | void __iomem *sram; |
89 | struct gen_pool *sram_pool; | ||
90 | dma_addr_t sram_dma; | ||
86 | int irq; | 91 | int irq; |
87 | struct clk *clk; | 92 | struct clk *clk; |
88 | struct task_struct *queue_th; | 93 | struct task_struct *queue_th; |
@@ -595,7 +600,7 @@ static int queue_manag(void *data) | |||
595 | cpg->eng_st = ENGINE_IDLE; | 600 | cpg->eng_st = ENGINE_IDLE; |
596 | do { | 601 | do { |
597 | struct crypto_async_request *async_req = NULL; | 602 | struct crypto_async_request *async_req = NULL; |
598 | struct crypto_async_request *backlog; | 603 | struct crypto_async_request *backlog = NULL; |
599 | 604 | ||
600 | __set_current_state(TASK_INTERRUPTIBLE); | 605 | __set_current_state(TASK_INTERRUPTIBLE); |
601 | 606 | ||
@@ -1019,6 +1024,39 @@ static struct ahash_alg mv_hmac_sha1_alg = { | |||
1019 | } | 1024 | } |
1020 | }; | 1025 | }; |
1021 | 1026 | ||
1027 | static int mv_cesa_get_sram(struct platform_device *pdev, | ||
1028 | struct crypto_priv *cp) | ||
1029 | { | ||
1030 | struct resource *res; | ||
1031 | u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE; | ||
1032 | |||
1033 | of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size", | ||
1034 | &sram_size); | ||
1035 | |||
1036 | cp->sram_size = sram_size; | ||
1037 | cp->sram_pool = of_get_named_gen_pool(pdev->dev.of_node, | ||
1038 | "marvell,crypto-srams", 0); | ||
1039 | if (cp->sram_pool) { | ||
1040 | cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size, | ||
1041 | &cp->sram_dma); | ||
1042 | if (cp->sram) | ||
1043 | return 0; | ||
1044 | |||
1045 | return -ENOMEM; | ||
1046 | } | ||
1047 | |||
1048 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
1049 | "sram"); | ||
1050 | if (!res || resource_size(res) < cp->sram_size) | ||
1051 | return -EINVAL; | ||
1052 | |||
1053 | cp->sram = devm_ioremap_resource(&pdev->dev, res); | ||
1054 | if (IS_ERR(cp->sram)) | ||
1055 | return PTR_ERR(cp->sram); | ||
1056 | |||
1057 | return 0; | ||
1058 | } | ||
1059 | |||
1022 | static int mv_probe(struct platform_device *pdev) | 1060 | static int mv_probe(struct platform_device *pdev) |
1023 | { | 1061 | { |
1024 | struct crypto_priv *cp; | 1062 | struct crypto_priv *cp; |
@@ -1041,24 +1079,17 @@ static int mv_probe(struct platform_device *pdev) | |||
1041 | 1079 | ||
1042 | spin_lock_init(&cp->lock); | 1080 | spin_lock_init(&cp->lock); |
1043 | crypto_init_queue(&cp->queue, 50); | 1081 | crypto_init_queue(&cp->queue, 50); |
1044 | cp->reg = ioremap(res->start, resource_size(res)); | 1082 | cp->reg = devm_ioremap_resource(&pdev->dev, res); |
1045 | if (!cp->reg) { | 1083 | if (IS_ERR(cp->reg)) { |
1046 | ret = -ENOMEM; | 1084 | ret = PTR_ERR(cp->reg); |
1047 | goto err; | 1085 | goto err; |
1048 | } | 1086 | } |
1049 | 1087 | ||
1050 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); | 1088 | ret = mv_cesa_get_sram(pdev, cp); |
1051 | if (!res) { | 1089 | if (ret) |
1052 | ret = -ENXIO; | 1090 | goto err; |
1053 | goto err_unmap_reg; | 1091 | |
1054 | } | ||
1055 | cp->sram_size = resource_size(res); | ||
1056 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; | 1092 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; |
1057 | cp->sram = ioremap(res->start, cp->sram_size); | ||
1058 | if (!cp->sram) { | ||
1059 | ret = -ENOMEM; | ||
1060 | goto err_unmap_reg; | ||
1061 | } | ||
1062 | 1093 | ||
1063 | if (pdev->dev.of_node) | 1094 | if (pdev->dev.of_node) |
1064 | irq = irq_of_parse_and_map(pdev->dev.of_node, 0); | 1095 | irq = irq_of_parse_and_map(pdev->dev.of_node, 0); |
@@ -1066,7 +1097,7 @@ static int mv_probe(struct platform_device *pdev) | |||
1066 | irq = platform_get_irq(pdev, 0); | 1097 | irq = platform_get_irq(pdev, 0); |
1067 | if (irq < 0 || irq == NO_IRQ) { | 1098 | if (irq < 0 || irq == NO_IRQ) { |
1068 | ret = irq; | 1099 | ret = irq; |
1069 | goto err_unmap_sram; | 1100 | goto err; |
1070 | } | 1101 | } |
1071 | cp->irq = irq; | 1102 | cp->irq = irq; |
1072 | 1103 | ||
@@ -1076,7 +1107,7 @@ static int mv_probe(struct platform_device *pdev) | |||
1076 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); | 1107 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); |
1077 | if (IS_ERR(cp->queue_th)) { | 1108 | if (IS_ERR(cp->queue_th)) { |
1078 | ret = PTR_ERR(cp->queue_th); | 1109 | ret = PTR_ERR(cp->queue_th); |
1079 | goto err_unmap_sram; | 1110 | goto err; |
1080 | } | 1111 | } |
1081 | 1112 | ||
1082 | ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), | 1113 | ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), |
@@ -1134,10 +1165,6 @@ err_irq: | |||
1134 | } | 1165 | } |
1135 | err_thread: | 1166 | err_thread: |
1136 | kthread_stop(cp->queue_th); | 1167 | kthread_stop(cp->queue_th); |
1137 | err_unmap_sram: | ||
1138 | iounmap(cp->sram); | ||
1139 | err_unmap_reg: | ||
1140 | iounmap(cp->reg); | ||
1141 | err: | 1168 | err: |
1142 | kfree(cp); | 1169 | kfree(cp); |
1143 | cpg = NULL; | 1170 | cpg = NULL; |
@@ -1157,8 +1184,6 @@ static int mv_remove(struct platform_device *pdev) | |||
1157 | kthread_stop(cp->queue_th); | 1184 | kthread_stop(cp->queue_th); |
1158 | free_irq(cp->irq, cp); | 1185 | free_irq(cp->irq, cp); |
1159 | memset(cp->sram, 0, cp->sram_size); | 1186 | memset(cp->sram, 0, cp->sram_size); |
1160 | iounmap(cp->sram); | ||
1161 | iounmap(cp->reg); | ||
1162 | 1187 | ||
1163 | if (!IS_ERR(cp->clk)) { | 1188 | if (!IS_ERR(cp->clk)) { |
1164 | clk_disable_unprepare(cp->clk); | 1189 | clk_disable_unprepare(cp->clk); |
@@ -1172,6 +1197,8 @@ static int mv_remove(struct platform_device *pdev) | |||
1172 | 1197 | ||
1173 | static const struct of_device_id mv_cesa_of_match_table[] = { | 1198 | static const struct of_device_id mv_cesa_of_match_table[] = { |
1174 | { .compatible = "marvell,orion-crypto", }, | 1199 | { .compatible = "marvell,orion-crypto", }, |
1200 | { .compatible = "marvell,kirkwood-crypto", }, | ||
1201 | { .compatible = "marvell,dove-crypto", }, | ||
1175 | {} | 1202 | {} |
1176 | }; | 1203 | }; |
1177 | MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); | 1204 | MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 10a9aeff1666..2e8dab9d4263 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -1281,10 +1281,10 @@ static const char md5_zero[MD5_DIGEST_SIZE] = { | |||
1281 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | 1281 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, |
1282 | }; | 1282 | }; |
1283 | static const u32 md5_init[MD5_HASH_WORDS] = { | 1283 | static const u32 md5_init[MD5_HASH_WORDS] = { |
1284 | cpu_to_le32(0x67452301), | 1284 | cpu_to_le32(MD5_H0), |
1285 | cpu_to_le32(0xefcdab89), | 1285 | cpu_to_le32(MD5_H1), |
1286 | cpu_to_le32(0x98badcfe), | 1286 | cpu_to_le32(MD5_H2), |
1287 | cpu_to_le32(0x10325476), | 1287 | cpu_to_le32(MD5_H3), |
1288 | }; | 1288 | }; |
1289 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | 1289 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { |
1290 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | 1290 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, |
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig index f82616621ae1..e421c96c763a 100644 --- a/drivers/crypto/nx/Kconfig +++ b/drivers/crypto/nx/Kconfig | |||
@@ -1,26 +1,55 @@ | |||
1 | |||
1 | config CRYPTO_DEV_NX_ENCRYPT | 2 | config CRYPTO_DEV_NX_ENCRYPT |
2 | tristate "Encryption acceleration support" | 3 | tristate "Encryption acceleration support on pSeries platform" |
3 | depends on PPC64 && IBMVIO | 4 | depends on PPC_PSERIES && IBMVIO && !CPU_LITTLE_ENDIAN |
4 | default y | 5 | default y |
5 | select CRYPTO_AES | 6 | select CRYPTO_AES |
6 | select CRYPTO_CBC | ||
7 | select CRYPTO_ECB | ||
8 | select CRYPTO_CCM | 7 | select CRYPTO_CCM |
9 | select CRYPTO_GCM | ||
10 | select CRYPTO_AUTHENC | ||
11 | select CRYPTO_XCBC | ||
12 | select CRYPTO_SHA256 | ||
13 | select CRYPTO_SHA512 | ||
14 | help | 8 | help |
15 | Support for Power7+ in-Nest encryption acceleration. This | 9 | Support for PowerPC Nest (NX) encryption acceleration. This |
16 | module supports acceleration for AES and SHA2 algorithms. If you | 10 | module supports acceleration for AES and SHA2 algorithms on |
17 | choose 'M' here, this module will be called nx_crypto. | 11 | the pSeries platform. If you choose 'M' here, this module |
12 | will be called nx_crypto. | ||
18 | 13 | ||
19 | config CRYPTO_DEV_NX_COMPRESS | 14 | config CRYPTO_DEV_NX_COMPRESS |
20 | tristate "Compression acceleration support" | 15 | tristate "Compression acceleration support" |
21 | depends on PPC64 && IBMVIO | ||
22 | default y | 16 | default y |
23 | help | 17 | help |
24 | Support for Power7+ in-Nest compression acceleration. This | 18 | Support for PowerPC Nest (NX) compression acceleration. This |
25 | module supports acceleration for AES and SHA2 algorithms. If you | 19 | module supports acceleration for compressing memory with the 842 |
26 | choose 'M' here, this module will be called nx_compress. | 20 | algorithm. One of the platform drivers must be selected also. |
21 | If you choose 'M' here, this module will be called nx_compress. | ||
22 | |||
23 | if CRYPTO_DEV_NX_COMPRESS | ||
24 | |||
25 | config CRYPTO_DEV_NX_COMPRESS_PSERIES | ||
26 | tristate "Compression acceleration support on pSeries platform" | ||
27 | depends on PPC_PSERIES && IBMVIO | ||
28 | default y | ||
29 | help | ||
30 | Support for PowerPC Nest (NX) compression acceleration. This | ||
31 | module supports acceleration for compressing memory with the 842 | ||
32 | algorithm. This supports NX hardware on the pSeries platform. | ||
33 | If you choose 'M' here, this module will be called nx_compress_pseries. | ||
34 | |||
35 | config CRYPTO_DEV_NX_COMPRESS_POWERNV | ||
36 | tristate "Compression acceleration support on PowerNV platform" | ||
37 | depends on PPC_POWERNV | ||
38 | default y | ||
39 | help | ||
40 | Support for PowerPC Nest (NX) compression acceleration. This | ||
41 | module supports acceleration for compressing memory with the 842 | ||
42 | algorithm. This supports NX hardware on the PowerNV platform. | ||
43 | If you choose 'M' here, this module will be called nx_compress_powernv. | ||
44 | |||
45 | config CRYPTO_DEV_NX_COMPRESS_CRYPTO | ||
46 | tristate "Compression acceleration cryptographic interface" | ||
47 | select CRYPTO_ALGAPI | ||
48 | select 842_DECOMPRESS | ||
49 | default y | ||
50 | help | ||
51 | Support for PowerPC Nest (NX) accelerators using the cryptographic | ||
52 | API. If you choose 'M' here, this module will be called | ||
53 | nx_compress_crypto. | ||
54 | |||
55 | endif | ||
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index bb770ea45ce9..e1684f5adb11 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile | |||
@@ -10,5 +10,12 @@ nx-crypto-objs := nx.o \ | |||
10 | nx-sha256.o \ | 10 | nx-sha256.o \ |
11 | nx-sha512.o | 11 | nx-sha512.o |
12 | 12 | ||
13 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o | 13 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o |
14 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o | ||
15 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o | ||
16 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o | ||
14 | nx-compress-objs := nx-842.o | 17 | nx-compress-objs := nx-842.o |
18 | nx-compress-platform-objs := nx-842-platform.o | ||
19 | nx-compress-pseries-objs := nx-842-pseries.o | ||
20 | nx-compress-powernv-objs := nx-842-powernv.o | ||
21 | nx-compress-crypto-objs := nx-842-crypto.o | ||
diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c new file mode 100644 index 000000000000..d53a1dcd7b4e --- /dev/null +++ b/drivers/crypto/nx/nx-842-crypto.c | |||
@@ -0,0 +1,580 @@ | |||
1 | /* | ||
2 | * Cryptographic API for the NX-842 hardware compression. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * Copyright (C) IBM Corporation, 2011-2015 | ||
15 | * | ||
16 | * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com> | ||
17 | * Seth Jennings <sjenning@linux.vnet.ibm.com> | ||
18 | * | ||
19 | * Rewrite: Dan Streetman <ddstreet@ieee.org> | ||
20 | * | ||
21 | * This is an interface to the NX-842 compression hardware in PowerPC | ||
22 | * processors. Most of the complexity of this drvier is due to the fact that | ||
23 | * the NX-842 compression hardware requires the input and output data buffers | ||
24 | * to be specifically aligned, to be a specific multiple in length, and within | ||
25 | * specific minimum and maximum lengths. Those restrictions, provided by the | ||
26 | * nx-842 driver via nx842_constraints, mean this driver must use bounce | ||
27 | * buffers and headers to correct misaligned in or out buffers, and to split | ||
28 | * input buffers that are too large. | ||
29 | * | ||
30 | * This driver will fall back to software decompression if the hardware | ||
31 | * decompression fails, so this driver's decompression should never fail as | ||
32 | * long as the provided compressed buffer is valid. Any compressed buffer | ||
33 | * created by this driver will have a header (except ones where the input | ||
34 | * perfectly matches the constraints); so users of this driver cannot simply | ||
35 | * pass a compressed buffer created by this driver over to the 842 software | ||
36 | * decompression library. Instead, users must use this driver to decompress; | ||
37 | * if the hardware fails or is unavailable, the compressed buffer will be | ||
38 | * parsed and the header removed, and the raw 842 buffer(s) passed to the 842 | ||
39 | * software decompression library. | ||
40 | * | ||
41 | * This does not fall back to software compression, however, since the caller | ||
42 | * of this function is specifically requesting hardware compression; if the | ||
43 | * hardware compression fails, the caller can fall back to software | ||
44 | * compression, and the raw 842 compressed buffer that the software compressor | ||
45 | * creates can be passed to this driver for hardware decompression; any | ||
46 | * buffer without our specific header magic is assumed to be a raw 842 buffer | ||
47 | * and passed directly to the hardware. Note that the software compression | ||
48 | * library will produce a compressed buffer that is incompatible with the | ||
49 | * hardware decompressor if the original input buffer length is not a multiple | ||
50 | * of 8; if such a compressed buffer is passed to this driver for | ||
51 | * decompression, the hardware will reject it and this driver will then pass | ||
52 | * it over to the software library for decompression. | ||
53 | */ | ||
54 | |||
55 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
56 | |||
57 | #include <linux/init.h> | ||
58 | #include <linux/module.h> | ||
59 | #include <linux/crypto.h> | ||
60 | #include <linux/vmalloc.h> | ||
61 | #include <linux/sw842.h> | ||
62 | #include <linux/ratelimit.h> | ||
63 | |||
64 | #include "nx-842.h" | ||
65 | |||
66 | /* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit | ||
67 | * template (see lib/842/842.h), so this magic number will never appear at | ||
68 | * the start of a raw 842 compressed buffer. That is important, as any buffer | ||
69 | * passed to us without this magic is assumed to be a raw 842 compressed | ||
70 | * buffer, and passed directly to the hardware to decompress. | ||
71 | */ | ||
72 | #define NX842_CRYPTO_MAGIC (0xf842) | ||
73 | #define NX842_CRYPTO_GROUP_MAX (0x20) | ||
74 | #define NX842_CRYPTO_HEADER_SIZE(g) \ | ||
75 | (sizeof(struct nx842_crypto_header) + \ | ||
76 | sizeof(struct nx842_crypto_header_group) * (g)) | ||
77 | #define NX842_CRYPTO_HEADER_MAX_SIZE \ | ||
78 | NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX) | ||
79 | |||
80 | /* bounce buffer size */ | ||
81 | #define BOUNCE_BUFFER_ORDER (2) | ||
82 | #define BOUNCE_BUFFER_SIZE \ | ||
83 | ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER)) | ||
84 | |||
85 | /* try longer on comp because we can fallback to sw decomp if hw is busy */ | ||
86 | #define COMP_BUSY_TIMEOUT (250) /* ms */ | ||
87 | #define DECOMP_BUSY_TIMEOUT (50) /* ms */ | ||
88 | |||
89 | struct nx842_crypto_header_group { | ||
90 | __be16 padding; /* unused bytes at start of group */ | ||
91 | __be32 compressed_length; /* compressed bytes in group */ | ||
92 | __be32 uncompressed_length; /* bytes after decompression */ | ||
93 | } __packed; | ||
94 | |||
95 | struct nx842_crypto_header { | ||
96 | __be16 magic; /* NX842_CRYPTO_MAGIC */ | ||
97 | __be16 ignore; /* decompressed end bytes to ignore */ | ||
98 | u8 groups; /* total groups in this header */ | ||
99 | struct nx842_crypto_header_group group[]; | ||
100 | } __packed; | ||
101 | |||
102 | struct nx842_crypto_param { | ||
103 | u8 *in; | ||
104 | unsigned int iremain; | ||
105 | u8 *out; | ||
106 | unsigned int oremain; | ||
107 | unsigned int ototal; | ||
108 | }; | ||
109 | |||
110 | static int update_param(struct nx842_crypto_param *p, | ||
111 | unsigned int slen, unsigned int dlen) | ||
112 | { | ||
113 | if (p->iremain < slen) | ||
114 | return -EOVERFLOW; | ||
115 | if (p->oremain < dlen) | ||
116 | return -ENOSPC; | ||
117 | |||
118 | p->in += slen; | ||
119 | p->iremain -= slen; | ||
120 | p->out += dlen; | ||
121 | p->oremain -= dlen; | ||
122 | p->ototal += dlen; | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | struct nx842_crypto_ctx { | ||
128 | u8 *wmem; | ||
129 | u8 *sbounce, *dbounce; | ||
130 | |||
131 | struct nx842_crypto_header header; | ||
132 | struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX]; | ||
133 | }; | ||
134 | |||
135 | static int nx842_crypto_init(struct crypto_tfm *tfm) | ||
136 | { | ||
137 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); | ||
138 | |||
139 | ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL); | ||
140 | ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); | ||
141 | ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); | ||
142 | if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { | ||
143 | kfree(ctx->wmem); | ||
144 | free_page((unsigned long)ctx->sbounce); | ||
145 | free_page((unsigned long)ctx->dbounce); | ||
146 | return -ENOMEM; | ||
147 | } | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static void nx842_crypto_exit(struct crypto_tfm *tfm) | ||
153 | { | ||
154 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); | ||
155 | |||
156 | kfree(ctx->wmem); | ||
157 | free_page((unsigned long)ctx->sbounce); | ||
158 | free_page((unsigned long)ctx->dbounce); | ||
159 | } | ||
160 | |||
161 | static int read_constraints(struct nx842_constraints *c) | ||
162 | { | ||
163 | int ret; | ||
164 | |||
165 | ret = nx842_constraints(c); | ||
166 | if (ret) { | ||
167 | pr_err_ratelimited("could not get nx842 constraints : %d\n", | ||
168 | ret); | ||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | /* limit maximum, to always have enough bounce buffer to decompress */ | ||
173 | if (c->maximum > BOUNCE_BUFFER_SIZE) { | ||
174 | c->maximum = BOUNCE_BUFFER_SIZE; | ||
175 | pr_info_once("limiting nx842 maximum to %x\n", c->maximum); | ||
176 | } | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf) | ||
182 | { | ||
183 | int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups); | ||
184 | |||
185 | /* compress should have added space for header */ | ||
186 | if (s > be16_to_cpu(hdr->group[0].padding)) { | ||
187 | pr_err("Internal error: no space for header\n"); | ||
188 | return -EINVAL; | ||
189 | } | ||
190 | |||
191 | memcpy(buf, hdr, s); | ||
192 | |||
193 | print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0); | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static int compress(struct nx842_crypto_ctx *ctx, | ||
199 | struct nx842_crypto_param *p, | ||
200 | struct nx842_crypto_header_group *g, | ||
201 | struct nx842_constraints *c, | ||
202 | u16 *ignore, | ||
203 | unsigned int hdrsize) | ||
204 | { | ||
205 | unsigned int slen = p->iremain, dlen = p->oremain, tmplen; | ||
206 | unsigned int adj_slen = slen; | ||
207 | u8 *src = p->in, *dst = p->out; | ||
208 | int ret, dskip = 0; | ||
209 | ktime_t timeout; | ||
210 | |||
211 | if (p->iremain == 0) | ||
212 | return -EOVERFLOW; | ||
213 | |||
214 | if (p->oremain == 0 || hdrsize + c->minimum > dlen) | ||
215 | return -ENOSPC; | ||
216 | |||
217 | if (slen % c->multiple) | ||
218 | adj_slen = round_up(slen, c->multiple); | ||
219 | if (slen < c->minimum) | ||
220 | adj_slen = c->minimum; | ||
221 | if (slen > c->maximum) | ||
222 | adj_slen = slen = c->maximum; | ||
223 | if (adj_slen > slen || (u64)src % c->alignment) { | ||
224 | adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE); | ||
225 | slen = min(slen, BOUNCE_BUFFER_SIZE); | ||
226 | if (adj_slen > slen) | ||
227 | memset(ctx->sbounce + slen, 0, adj_slen - slen); | ||
228 | memcpy(ctx->sbounce, src, slen); | ||
229 | src = ctx->sbounce; | ||
230 | slen = adj_slen; | ||
231 | pr_debug("using comp sbounce buffer, len %x\n", slen); | ||
232 | } | ||
233 | |||
234 | dst += hdrsize; | ||
235 | dlen -= hdrsize; | ||
236 | |||
237 | if ((u64)dst % c->alignment) { | ||
238 | dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst); | ||
239 | dst += dskip; | ||
240 | dlen -= dskip; | ||
241 | } | ||
242 | if (dlen % c->multiple) | ||
243 | dlen = round_down(dlen, c->multiple); | ||
244 | if (dlen < c->minimum) { | ||
245 | nospc: | ||
246 | dst = ctx->dbounce; | ||
247 | dlen = min(p->oremain, BOUNCE_BUFFER_SIZE); | ||
248 | dlen = round_down(dlen, c->multiple); | ||
249 | dskip = 0; | ||
250 | pr_debug("using comp dbounce buffer, len %x\n", dlen); | ||
251 | } | ||
252 | if (dlen > c->maximum) | ||
253 | dlen = c->maximum; | ||
254 | |||
255 | tmplen = dlen; | ||
256 | timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT); | ||
257 | do { | ||
258 | dlen = tmplen; /* reset dlen, if we're retrying */ | ||
259 | ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem); | ||
260 | /* possibly we should reduce the slen here, instead of | ||
261 | * retrying with the dbounce buffer? | ||
262 | */ | ||
263 | if (ret == -ENOSPC && dst != ctx->dbounce) | ||
264 | goto nospc; | ||
265 | } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); | ||
266 | if (ret) | ||
267 | return ret; | ||
268 | |||
269 | dskip += hdrsize; | ||
270 | |||
271 | if (dst == ctx->dbounce) | ||
272 | memcpy(p->out + dskip, dst, dlen); | ||
273 | |||
274 | g->padding = cpu_to_be16(dskip); | ||
275 | g->compressed_length = cpu_to_be32(dlen); | ||
276 | g->uncompressed_length = cpu_to_be32(slen); | ||
277 | |||
278 | if (p->iremain < slen) { | ||
279 | *ignore = slen - p->iremain; | ||
280 | slen = p->iremain; | ||
281 | } | ||
282 | |||
283 | pr_debug("compress slen %x ignore %x dlen %x padding %x\n", | ||
284 | slen, *ignore, dlen, dskip); | ||
285 | |||
286 | return update_param(p, slen, dskip + dlen); | ||
287 | } | ||
288 | |||
289 | static int nx842_crypto_compress(struct crypto_tfm *tfm, | ||
290 | const u8 *src, unsigned int slen, | ||
291 | u8 *dst, unsigned int *dlen) | ||
292 | { | ||
293 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); | ||
294 | struct nx842_crypto_header *hdr = &ctx->header; | ||
295 | struct nx842_crypto_param p; | ||
296 | struct nx842_constraints c; | ||
297 | unsigned int groups, hdrsize, h; | ||
298 | int ret, n; | ||
299 | bool add_header; | ||
300 | u16 ignore = 0; | ||
301 | |||
302 | p.in = (u8 *)src; | ||
303 | p.iremain = slen; | ||
304 | p.out = dst; | ||
305 | p.oremain = *dlen; | ||
306 | p.ototal = 0; | ||
307 | |||
308 | *dlen = 0; | ||
309 | |||
310 | ret = read_constraints(&c); | ||
311 | if (ret) | ||
312 | return ret; | ||
313 | |||
314 | groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX, | ||
315 | DIV_ROUND_UP(p.iremain, c.maximum)); | ||
316 | hdrsize = NX842_CRYPTO_HEADER_SIZE(groups); | ||
317 | |||
318 | /* skip adding header if the buffers meet all constraints */ | ||
319 | add_header = (p.iremain % c.multiple || | ||
320 | p.iremain < c.minimum || | ||
321 | p.iremain > c.maximum || | ||
322 | (u64)p.in % c.alignment || | ||
323 | p.oremain % c.multiple || | ||
324 | p.oremain < c.minimum || | ||
325 | p.oremain > c.maximum || | ||
326 | (u64)p.out % c.alignment); | ||
327 | |||
328 | hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC); | ||
329 | hdr->groups = 0; | ||
330 | hdr->ignore = 0; | ||
331 | |||
332 | while (p.iremain > 0) { | ||
333 | n = hdr->groups++; | ||
334 | if (hdr->groups > NX842_CRYPTO_GROUP_MAX) | ||
335 | return -ENOSPC; | ||
336 | |||
337 | /* header goes before first group */ | ||
338 | h = !n && add_header ? hdrsize : 0; | ||
339 | |||
340 | if (ignore) | ||
341 | pr_warn("interal error, ignore is set %x\n", ignore); | ||
342 | |||
343 | ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); | ||
344 | if (ret) | ||
345 | return ret; | ||
346 | } | ||
347 | |||
348 | if (!add_header && hdr->groups > 1) { | ||
349 | pr_err("Internal error: No header but multiple groups\n"); | ||
350 | return -EINVAL; | ||
351 | } | ||
352 | |||
353 | /* ignore indicates the input stream needed to be padded */ | ||
354 | hdr->ignore = cpu_to_be16(ignore); | ||
355 | if (ignore) | ||
356 | pr_debug("marked %d bytes as ignore\n", ignore); | ||
357 | |||
358 | if (add_header) | ||
359 | ret = nx842_crypto_add_header(hdr, dst); | ||
360 | if (ret) | ||
361 | return ret; | ||
362 | |||
363 | *dlen = p.ototal; | ||
364 | |||
365 | pr_debug("compress total slen %x dlen %x\n", slen, *dlen); | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int decompress(struct nx842_crypto_ctx *ctx, | ||
371 | struct nx842_crypto_param *p, | ||
372 | struct nx842_crypto_header_group *g, | ||
373 | struct nx842_constraints *c, | ||
374 | u16 ignore, | ||
375 | bool usehw) | ||
376 | { | ||
377 | unsigned int slen = be32_to_cpu(g->compressed_length); | ||
378 | unsigned int required_len = be32_to_cpu(g->uncompressed_length); | ||
379 | unsigned int dlen = p->oremain, tmplen; | ||
380 | unsigned int adj_slen = slen; | ||
381 | u8 *src = p->in, *dst = p->out; | ||
382 | u16 padding = be16_to_cpu(g->padding); | ||
383 | int ret, spadding = 0, dpadding = 0; | ||
384 | ktime_t timeout; | ||
385 | |||
386 | if (!slen || !required_len) | ||
387 | return -EINVAL; | ||
388 | |||
389 | if (p->iremain <= 0 || padding + slen > p->iremain) | ||
390 | return -EOVERFLOW; | ||
391 | |||
392 | if (p->oremain <= 0 || required_len - ignore > p->oremain) | ||
393 | return -ENOSPC; | ||
394 | |||
395 | src += padding; | ||
396 | |||
397 | if (!usehw) | ||
398 | goto usesw; | ||
399 | |||
400 | if (slen % c->multiple) | ||
401 | adj_slen = round_up(slen, c->multiple); | ||
402 | if (slen < c->minimum) | ||
403 | adj_slen = c->minimum; | ||
404 | if (slen > c->maximum) | ||
405 | goto usesw; | ||
406 | if (slen < adj_slen || (u64)src % c->alignment) { | ||
407 | /* we can append padding bytes because the 842 format defines | ||
408 | * an "end" template (see lib/842/842_decompress.c) and will | ||
409 | * ignore any bytes following it. | ||
410 | */ | ||
411 | if (slen < adj_slen) | ||
412 | memset(ctx->sbounce + slen, 0, adj_slen - slen); | ||
413 | memcpy(ctx->sbounce, src, slen); | ||
414 | src = ctx->sbounce; | ||
415 | spadding = adj_slen - slen; | ||
416 | slen = adj_slen; | ||
417 | pr_debug("using decomp sbounce buffer, len %x\n", slen); | ||
418 | } | ||
419 | |||
420 | if (dlen % c->multiple) | ||
421 | dlen = round_down(dlen, c->multiple); | ||
422 | if (dlen < required_len || (u64)dst % c->alignment) { | ||
423 | dst = ctx->dbounce; | ||
424 | dlen = min(required_len, BOUNCE_BUFFER_SIZE); | ||
425 | pr_debug("using decomp dbounce buffer, len %x\n", dlen); | ||
426 | } | ||
427 | if (dlen < c->minimum) | ||
428 | goto usesw; | ||
429 | if (dlen > c->maximum) | ||
430 | dlen = c->maximum; | ||
431 | |||
432 | tmplen = dlen; | ||
433 | timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT); | ||
434 | do { | ||
435 | dlen = tmplen; /* reset dlen, if we're retrying */ | ||
436 | ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem); | ||
437 | } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); | ||
438 | if (ret) { | ||
439 | usesw: | ||
440 | /* reset everything, sw doesn't have constraints */ | ||
441 | src = p->in + padding; | ||
442 | slen = be32_to_cpu(g->compressed_length); | ||
443 | spadding = 0; | ||
444 | dst = p->out; | ||
445 | dlen = p->oremain; | ||
446 | dpadding = 0; | ||
447 | if (dlen < required_len) { /* have ignore bytes */ | ||
448 | dst = ctx->dbounce; | ||
449 | dlen = BOUNCE_BUFFER_SIZE; | ||
450 | } | ||
451 | pr_info_ratelimited("using software 842 decompression\n"); | ||
452 | ret = sw842_decompress(src, slen, dst, &dlen); | ||
453 | } | ||
454 | if (ret) | ||
455 | return ret; | ||
456 | |||
457 | slen -= spadding; | ||
458 | |||
459 | dlen -= ignore; | ||
460 | if (ignore) | ||
461 | pr_debug("ignoring last %x bytes\n", ignore); | ||
462 | |||
463 | if (dst == ctx->dbounce) | ||
464 | memcpy(p->out, dst, dlen); | ||
465 | |||
466 | pr_debug("decompress slen %x padding %x dlen %x ignore %x\n", | ||
467 | slen, padding, dlen, ignore); | ||
468 | |||
469 | return update_param(p, slen + padding, dlen); | ||
470 | } | ||
471 | |||
472 | static int nx842_crypto_decompress(struct crypto_tfm *tfm, | ||
473 | const u8 *src, unsigned int slen, | ||
474 | u8 *dst, unsigned int *dlen) | ||
475 | { | ||
476 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); | ||
477 | struct nx842_crypto_header *hdr; | ||
478 | struct nx842_crypto_param p; | ||
479 | struct nx842_constraints c; | ||
480 | int n, ret, hdr_len; | ||
481 | u16 ignore = 0; | ||
482 | bool usehw = true; | ||
483 | |||
484 | p.in = (u8 *)src; | ||
485 | p.iremain = slen; | ||
486 | p.out = dst; | ||
487 | p.oremain = *dlen; | ||
488 | p.ototal = 0; | ||
489 | |||
490 | *dlen = 0; | ||
491 | |||
492 | if (read_constraints(&c)) | ||
493 | usehw = false; | ||
494 | |||
495 | hdr = (struct nx842_crypto_header *)src; | ||
496 | |||
497 | /* If it doesn't start with our header magic number, assume it's a raw | ||
498 | * 842 compressed buffer and pass it directly to the hardware driver | ||
499 | */ | ||
500 | if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) { | ||
501 | struct nx842_crypto_header_group g = { | ||
502 | .padding = 0, | ||
503 | .compressed_length = cpu_to_be32(p.iremain), | ||
504 | .uncompressed_length = cpu_to_be32(p.oremain), | ||
505 | }; | ||
506 | |||
507 | ret = decompress(ctx, &p, &g, &c, 0, usehw); | ||
508 | if (ret) | ||
509 | return ret; | ||
510 | |||
511 | *dlen = p.ototal; | ||
512 | |||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | if (!hdr->groups) { | ||
517 | pr_err("header has no groups\n"); | ||
518 | return -EINVAL; | ||
519 | } | ||
520 | if (hdr->groups > NX842_CRYPTO_GROUP_MAX) { | ||
521 | pr_err("header has too many groups %x, max %x\n", | ||
522 | hdr->groups, NX842_CRYPTO_GROUP_MAX); | ||
523 | return -EINVAL; | ||
524 | } | ||
525 | |||
526 | hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups); | ||
527 | if (hdr_len > slen) | ||
528 | return -EOVERFLOW; | ||
529 | |||
530 | memcpy(&ctx->header, src, hdr_len); | ||
531 | hdr = &ctx->header; | ||
532 | |||
533 | for (n = 0; n < hdr->groups; n++) { | ||
534 | /* ignore applies to last group */ | ||
535 | if (n + 1 == hdr->groups) | ||
536 | ignore = be16_to_cpu(hdr->ignore); | ||
537 | |||
538 | ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw); | ||
539 | if (ret) | ||
540 | return ret; | ||
541 | } | ||
542 | |||
543 | *dlen = p.ototal; | ||
544 | |||
545 | pr_debug("decompress total slen %x dlen %x\n", slen, *dlen); | ||
546 | |||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static struct crypto_alg alg = { | ||
551 | .cra_name = "842", | ||
552 | .cra_driver_name = "842-nx", | ||
553 | .cra_priority = 300, | ||
554 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | ||
555 | .cra_ctxsize = sizeof(struct nx842_crypto_ctx), | ||
556 | .cra_module = THIS_MODULE, | ||
557 | .cra_init = nx842_crypto_init, | ||
558 | .cra_exit = nx842_crypto_exit, | ||
559 | .cra_u = { .compress = { | ||
560 | .coa_compress = nx842_crypto_compress, | ||
561 | .coa_decompress = nx842_crypto_decompress } } | ||
562 | }; | ||
563 | |||
564 | static int __init nx842_crypto_mod_init(void) | ||
565 | { | ||
566 | return crypto_register_alg(&alg); | ||
567 | } | ||
568 | module_init(nx842_crypto_mod_init); | ||
569 | |||
570 | static void __exit nx842_crypto_mod_exit(void) | ||
571 | { | ||
572 | crypto_unregister_alg(&alg); | ||
573 | } | ||
574 | module_exit(nx842_crypto_mod_exit); | ||
575 | |||
576 | MODULE_LICENSE("GPL"); | ||
577 | MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface"); | ||
578 | MODULE_ALIAS_CRYPTO("842"); | ||
579 | MODULE_ALIAS_CRYPTO("842-nx"); | ||
580 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | ||
diff --git a/drivers/crypto/nx/nx-842-platform.c b/drivers/crypto/nx/nx-842-platform.c new file mode 100644 index 000000000000..664f13dd06ed --- /dev/null +++ b/drivers/crypto/nx/nx-842-platform.c | |||
@@ -0,0 +1,84 @@ | |||
1 | |||
2 | #include "nx-842.h" | ||
3 | |||
4 | /* this is needed, separate from the main nx-842.c driver, because that main | ||
5 | * driver loads the platform drivers during its init(), and it expects one | ||
6 | * (or none) of the platform drivers to set this pointer to its driver. | ||
7 | * That means this pointer can't be in the main nx-842 driver, because it | ||
8 | * wouldn't be accessible until after the main driver loaded, which wouldn't | ||
9 | * be possible as it's waiting for the platform driver to load. So place it | ||
10 | * here. | ||
11 | */ | ||
12 | static struct nx842_driver *driver; | ||
13 | static DEFINE_SPINLOCK(driver_lock); | ||
14 | |||
15 | struct nx842_driver *nx842_platform_driver(void) | ||
16 | { | ||
17 | return driver; | ||
18 | } | ||
19 | EXPORT_SYMBOL_GPL(nx842_platform_driver); | ||
20 | |||
21 | bool nx842_platform_driver_set(struct nx842_driver *_driver) | ||
22 | { | ||
23 | bool ret = false; | ||
24 | |||
25 | spin_lock(&driver_lock); | ||
26 | |||
27 | if (!driver) { | ||
28 | driver = _driver; | ||
29 | ret = true; | ||
30 | } else | ||
31 | WARN(1, "can't set platform driver, already set to %s\n", | ||
32 | driver->name); | ||
33 | |||
34 | spin_unlock(&driver_lock); | ||
35 | return ret; | ||
36 | } | ||
37 | EXPORT_SYMBOL_GPL(nx842_platform_driver_set); | ||
38 | |||
39 | /* only call this from the platform driver exit function */ | ||
40 | void nx842_platform_driver_unset(struct nx842_driver *_driver) | ||
41 | { | ||
42 | spin_lock(&driver_lock); | ||
43 | |||
44 | if (driver == _driver) | ||
45 | driver = NULL; | ||
46 | else if (driver) | ||
47 | WARN(1, "can't unset platform driver %s, currently set to %s\n", | ||
48 | _driver->name, driver->name); | ||
49 | else | ||
50 | WARN(1, "can't unset platform driver, already unset\n"); | ||
51 | |||
52 | spin_unlock(&driver_lock); | ||
53 | } | ||
54 | EXPORT_SYMBOL_GPL(nx842_platform_driver_unset); | ||
55 | |||
56 | bool nx842_platform_driver_get(void) | ||
57 | { | ||
58 | bool ret = false; | ||
59 | |||
60 | spin_lock(&driver_lock); | ||
61 | |||
62 | if (driver) | ||
63 | ret = try_module_get(driver->owner); | ||
64 | |||
65 | spin_unlock(&driver_lock); | ||
66 | |||
67 | return ret; | ||
68 | } | ||
69 | EXPORT_SYMBOL_GPL(nx842_platform_driver_get); | ||
70 | |||
71 | void nx842_platform_driver_put(void) | ||
72 | { | ||
73 | spin_lock(&driver_lock); | ||
74 | |||
75 | if (driver) | ||
76 | module_put(driver->owner); | ||
77 | |||
78 | spin_unlock(&driver_lock); | ||
79 | } | ||
80 | EXPORT_SYMBOL_GPL(nx842_platform_driver_put); | ||
81 | |||
82 | MODULE_LICENSE("GPL"); | ||
83 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | ||
84 | MODULE_DESCRIPTION("842 H/W Compression platform driver"); | ||
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c new file mode 100644 index 000000000000..33b3b0abf4ae --- /dev/null +++ b/drivers/crypto/nx/nx-842-powernv.c | |||
@@ -0,0 +1,637 @@ | |||
1 | /* | ||
2 | * Driver for IBM PowerNV 842 compression accelerator | ||
3 | * | ||
4 | * Copyright (C) 2015 Dan Streetman, IBM Corp | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | |||
19 | #include "nx-842.h" | ||
20 | |||
21 | #include <linux/timer.h> | ||
22 | |||
23 | #include <asm/prom.h> | ||
24 | #include <asm/icswx.h> | ||
25 | |||
26 | MODULE_LICENSE("GPL"); | ||
27 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | ||
28 | MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors"); | ||
29 | |||
30 | #define WORKMEM_ALIGN (CRB_ALIGN) | ||
31 | #define CSB_WAIT_MAX (5000) /* ms */ | ||
32 | |||
33 | struct nx842_workmem { | ||
34 | /* Below fields must be properly aligned */ | ||
35 | struct coprocessor_request_block crb; /* CRB_ALIGN align */ | ||
36 | struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */ | ||
37 | struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */ | ||
38 | /* Above fields must be properly aligned */ | ||
39 | |||
40 | ktime_t start; | ||
41 | |||
42 | char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */ | ||
43 | } __packed __aligned(WORKMEM_ALIGN); | ||
44 | |||
45 | struct nx842_coproc { | ||
46 | unsigned int chip_id; | ||
47 | unsigned int ct; | ||
48 | unsigned int ci; | ||
49 | struct list_head list; | ||
50 | }; | ||
51 | |||
52 | /* no cpu hotplug on powernv, so this list never changes after init */ | ||
53 | static LIST_HEAD(nx842_coprocs); | ||
54 | static unsigned int nx842_ct; | ||
55 | |||
56 | /** | ||
57 | * setup_indirect_dde - Setup an indirect DDE | ||
58 | * | ||
59 | * The DDE is setup with the the DDE count, byte count, and address of | ||
60 | * first direct DDE in the list. | ||
61 | */ | ||
62 | static void setup_indirect_dde(struct data_descriptor_entry *dde, | ||
63 | struct data_descriptor_entry *ddl, | ||
64 | unsigned int dde_count, unsigned int byte_count) | ||
65 | { | ||
66 | dde->flags = 0; | ||
67 | dde->count = dde_count; | ||
68 | dde->index = 0; | ||
69 | dde->length = cpu_to_be32(byte_count); | ||
70 | dde->address = cpu_to_be64(nx842_get_pa(ddl)); | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * setup_direct_dde - Setup single DDE from buffer | ||
75 | * | ||
76 | * The DDE is setup with the buffer and length. The buffer must be properly | ||
77 | * aligned. The used length is returned. | ||
78 | * Returns: | ||
79 | * N Successfully set up DDE with N bytes | ||
80 | */ | ||
81 | static unsigned int setup_direct_dde(struct data_descriptor_entry *dde, | ||
82 | unsigned long pa, unsigned int len) | ||
83 | { | ||
84 | unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa)); | ||
85 | |||
86 | dde->flags = 0; | ||
87 | dde->count = 0; | ||
88 | dde->index = 0; | ||
89 | dde->length = cpu_to_be32(l); | ||
90 | dde->address = cpu_to_be64(pa); | ||
91 | |||
92 | return l; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * setup_ddl - Setup DDL from buffer | ||
97 | * | ||
98 | * Returns: | ||
99 | * 0 Successfully set up DDL | ||
100 | */ | ||
101 | static int setup_ddl(struct data_descriptor_entry *dde, | ||
102 | struct data_descriptor_entry *ddl, | ||
103 | unsigned char *buf, unsigned int len, | ||
104 | bool in) | ||
105 | { | ||
106 | unsigned long pa = nx842_get_pa(buf); | ||
107 | int i, ret, total_len = len; | ||
108 | |||
109 | if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) { | ||
110 | pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n", | ||
111 | in ? "input" : "output", pa, DDE_BUFFER_ALIGN); | ||
112 | return -EINVAL; | ||
113 | } | ||
114 | |||
115 | /* only need to check last mult; since buffer must be | ||
116 | * DDE_BUFFER_ALIGN aligned, and that is a multiple of | ||
117 | * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers | ||
118 | * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT. | ||
119 | */ | ||
120 | if (len % DDE_BUFFER_LAST_MULT) { | ||
121 | pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n", | ||
122 | in ? "input" : "output", len, DDE_BUFFER_LAST_MULT); | ||
123 | if (in) | ||
124 | return -EINVAL; | ||
125 | len = round_down(len, DDE_BUFFER_LAST_MULT); | ||
126 | } | ||
127 | |||
128 | /* use a single direct DDE */ | ||
129 | if (len <= LEN_ON_PAGE(pa)) { | ||
130 | ret = setup_direct_dde(dde, pa, len); | ||
131 | WARN_ON(ret < len); | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | /* use the DDL */ | ||
136 | for (i = 0; i < DDL_LEN_MAX && len > 0; i++) { | ||
137 | ret = setup_direct_dde(&ddl[i], pa, len); | ||
138 | buf += ret; | ||
139 | len -= ret; | ||
140 | pa = nx842_get_pa(buf); | ||
141 | } | ||
142 | |||
143 | if (len > 0) { | ||
144 | pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n", | ||
145 | total_len, in ? "input" : "output", len); | ||
146 | if (in) | ||
147 | return -EMSGSIZE; | ||
148 | total_len -= len; | ||
149 | } | ||
150 | setup_indirect_dde(dde, ddl, i, total_len); | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | #define CSB_ERR(csb, msg, ...) \ | ||
156 | pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n", \ | ||
157 | ##__VA_ARGS__, (csb)->flags, \ | ||
158 | (csb)->cs, (csb)->cc, (csb)->ce, \ | ||
159 | be32_to_cpu((csb)->count)) | ||
160 | |||
161 | #define CSB_ERR_ADDR(csb, msg, ...) \ | ||
162 | CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \ | ||
163 | (unsigned long)be64_to_cpu((csb)->address)) | ||
164 | |||
165 | /** | ||
166 | * wait_for_csb | ||
167 | */ | ||
168 | static int wait_for_csb(struct nx842_workmem *wmem, | ||
169 | struct coprocessor_status_block *csb) | ||
170 | { | ||
171 | ktime_t start = wmem->start, now = ktime_get(); | ||
172 | ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); | ||
173 | |||
174 | while (!(ACCESS_ONCE(csb->flags) & CSB_V)) { | ||
175 | cpu_relax(); | ||
176 | now = ktime_get(); | ||
177 | if (ktime_after(now, timeout)) | ||
178 | break; | ||
179 | } | ||
180 | |||
181 | /* hw has updated csb and output buffer */ | ||
182 | barrier(); | ||
183 | |||
184 | /* check CSB flags */ | ||
185 | if (!(csb->flags & CSB_V)) { | ||
186 | CSB_ERR(csb, "CSB still not valid after %ld us, giving up", | ||
187 | (long)ktime_us_delta(now, start)); | ||
188 | return -ETIMEDOUT; | ||
189 | } | ||
190 | if (csb->flags & CSB_F) { | ||
191 | CSB_ERR(csb, "Invalid CSB format"); | ||
192 | return -EPROTO; | ||
193 | } | ||
194 | if (csb->flags & CSB_CH) { | ||
195 | CSB_ERR(csb, "Invalid CSB chaining state"); | ||
196 | return -EPROTO; | ||
197 | } | ||
198 | |||
199 | /* verify CSB completion sequence is 0 */ | ||
200 | if (csb->cs) { | ||
201 | CSB_ERR(csb, "Invalid CSB completion sequence"); | ||
202 | return -EPROTO; | ||
203 | } | ||
204 | |||
205 | /* check CSB Completion Code */ | ||
206 | switch (csb->cc) { | ||
207 | /* no error */ | ||
208 | case CSB_CC_SUCCESS: | ||
209 | break; | ||
210 | case CSB_CC_TPBC_GT_SPBC: | ||
211 | /* not an error, but the compressed data is | ||
212 | * larger than the uncompressed data :( | ||
213 | */ | ||
214 | break; | ||
215 | |||
216 | /* input data errors */ | ||
217 | case CSB_CC_OPERAND_OVERLAP: | ||
218 | /* input and output buffers overlap */ | ||
219 | CSB_ERR(csb, "Operand Overlap error"); | ||
220 | return -EINVAL; | ||
221 | case CSB_CC_INVALID_OPERAND: | ||
222 | CSB_ERR(csb, "Invalid operand"); | ||
223 | return -EINVAL; | ||
224 | case CSB_CC_NOSPC: | ||
225 | /* output buffer too small */ | ||
226 | return -ENOSPC; | ||
227 | case CSB_CC_ABORT: | ||
228 | CSB_ERR(csb, "Function aborted"); | ||
229 | return -EINTR; | ||
230 | case CSB_CC_CRC_MISMATCH: | ||
231 | CSB_ERR(csb, "CRC mismatch"); | ||
232 | return -EINVAL; | ||
233 | case CSB_CC_TEMPL_INVALID: | ||
234 | CSB_ERR(csb, "Compressed data template invalid"); | ||
235 | return -EINVAL; | ||
236 | case CSB_CC_TEMPL_OVERFLOW: | ||
237 | CSB_ERR(csb, "Compressed data template shows data past end"); | ||
238 | return -EINVAL; | ||
239 | |||
240 | /* these should not happen */ | ||
241 | case CSB_CC_INVALID_ALIGN: | ||
242 | /* setup_ddl should have detected this */ | ||
243 | CSB_ERR_ADDR(csb, "Invalid alignment"); | ||
244 | return -EINVAL; | ||
245 | case CSB_CC_DATA_LENGTH: | ||
246 | /* setup_ddl should have detected this */ | ||
247 | CSB_ERR(csb, "Invalid data length"); | ||
248 | return -EINVAL; | ||
249 | case CSB_CC_WR_TRANSLATION: | ||
250 | case CSB_CC_TRANSLATION: | ||
251 | case CSB_CC_TRANSLATION_DUP1: | ||
252 | case CSB_CC_TRANSLATION_DUP2: | ||
253 | case CSB_CC_TRANSLATION_DUP3: | ||
254 | case CSB_CC_TRANSLATION_DUP4: | ||
255 | case CSB_CC_TRANSLATION_DUP5: | ||
256 | case CSB_CC_TRANSLATION_DUP6: | ||
257 | /* should not happen, we use physical addrs */ | ||
258 | CSB_ERR_ADDR(csb, "Translation error"); | ||
259 | return -EPROTO; | ||
260 | case CSB_CC_WR_PROTECTION: | ||
261 | case CSB_CC_PROTECTION: | ||
262 | case CSB_CC_PROTECTION_DUP1: | ||
263 | case CSB_CC_PROTECTION_DUP2: | ||
264 | case CSB_CC_PROTECTION_DUP3: | ||
265 | case CSB_CC_PROTECTION_DUP4: | ||
266 | case CSB_CC_PROTECTION_DUP5: | ||
267 | case CSB_CC_PROTECTION_DUP6: | ||
268 | /* should not happen, we use physical addrs */ | ||
269 | CSB_ERR_ADDR(csb, "Protection error"); | ||
270 | return -EPROTO; | ||
271 | case CSB_CC_PRIVILEGE: | ||
272 | /* shouldn't happen, we're in HYP mode */ | ||
273 | CSB_ERR(csb, "Insufficient Privilege error"); | ||
274 | return -EPROTO; | ||
275 | case CSB_CC_EXCESSIVE_DDE: | ||
276 | /* shouldn't happen, setup_ddl doesn't use many dde's */ | ||
277 | CSB_ERR(csb, "Too many DDEs in DDL"); | ||
278 | return -EINVAL; | ||
279 | case CSB_CC_TRANSPORT: | ||
280 | /* shouldn't happen, we setup CRB correctly */ | ||
281 | CSB_ERR(csb, "Invalid CRB"); | ||
282 | return -EINVAL; | ||
283 | case CSB_CC_SEGMENTED_DDL: | ||
284 | /* shouldn't happen, setup_ddl creates DDL right */ | ||
285 | CSB_ERR(csb, "Segmented DDL error"); | ||
286 | return -EINVAL; | ||
287 | case CSB_CC_DDE_OVERFLOW: | ||
288 | /* shouldn't happen, setup_ddl creates DDL right */ | ||
289 | CSB_ERR(csb, "DDE overflow error"); | ||
290 | return -EINVAL; | ||
291 | case CSB_CC_SESSION: | ||
292 | /* should not happen with ICSWX */ | ||
293 | CSB_ERR(csb, "Session violation error"); | ||
294 | return -EPROTO; | ||
295 | case CSB_CC_CHAIN: | ||
296 | /* should not happen, we don't use chained CRBs */ | ||
297 | CSB_ERR(csb, "Chained CRB error"); | ||
298 | return -EPROTO; | ||
299 | case CSB_CC_SEQUENCE: | ||
300 | /* should not happen, we don't use chained CRBs */ | ||
301 | CSB_ERR(csb, "CRB seqeunce number error"); | ||
302 | return -EPROTO; | ||
303 | case CSB_CC_UNKNOWN_CODE: | ||
304 | CSB_ERR(csb, "Unknown subfunction code"); | ||
305 | return -EPROTO; | ||
306 | |||
307 | /* hardware errors */ | ||
308 | case CSB_CC_RD_EXTERNAL: | ||
309 | case CSB_CC_RD_EXTERNAL_DUP1: | ||
310 | case CSB_CC_RD_EXTERNAL_DUP2: | ||
311 | case CSB_CC_RD_EXTERNAL_DUP3: | ||
312 | CSB_ERR_ADDR(csb, "Read error outside coprocessor"); | ||
313 | return -EPROTO; | ||
314 | case CSB_CC_WR_EXTERNAL: | ||
315 | CSB_ERR_ADDR(csb, "Write error outside coprocessor"); | ||
316 | return -EPROTO; | ||
317 | case CSB_CC_INTERNAL: | ||
318 | CSB_ERR(csb, "Internal error in coprocessor"); | ||
319 | return -EPROTO; | ||
320 | case CSB_CC_PROVISION: | ||
321 | CSB_ERR(csb, "Storage provision error"); | ||
322 | return -EPROTO; | ||
323 | case CSB_CC_HW: | ||
324 | CSB_ERR(csb, "Correctable hardware error"); | ||
325 | return -EPROTO; | ||
326 | |||
327 | default: | ||
328 | CSB_ERR(csb, "Invalid CC %d", csb->cc); | ||
329 | return -EPROTO; | ||
330 | } | ||
331 | |||
332 | /* check Completion Extension state */ | ||
333 | if (csb->ce & CSB_CE_TERMINATION) { | ||
334 | CSB_ERR(csb, "CSB request was terminated"); | ||
335 | return -EPROTO; | ||
336 | } | ||
337 | if (csb->ce & CSB_CE_INCOMPLETE) { | ||
338 | CSB_ERR(csb, "CSB request not complete"); | ||
339 | return -EPROTO; | ||
340 | } | ||
341 | if (!(csb->ce & CSB_CE_TPBC)) { | ||
342 | CSB_ERR(csb, "TPBC not provided, unknown target length"); | ||
343 | return -EPROTO; | ||
344 | } | ||
345 | |||
346 | /* successful completion */ | ||
347 | pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count, | ||
348 | (unsigned long)ktime_us_delta(now, start)); | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | /** | ||
354 | * nx842_powernv_function - compress/decompress data using the 842 algorithm | ||
355 | * | ||
356 | * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. | ||
357 | * This compresses or decompresses the provided input buffer into the provided | ||
358 | * output buffer. | ||
359 | * | ||
360 | * Upon return from this function @outlen contains the length of the | ||
361 | * output data. If there is an error then @outlen will be 0 and an | ||
362 | * error will be specified by the return code from this function. | ||
363 | * | ||
364 | * The @workmem buffer should only be used by one function call at a time. | ||
365 | * | ||
366 | * @in: input buffer pointer | ||
367 | * @inlen: input buffer size | ||
368 | * @out: output buffer pointer | ||
369 | * @outlenp: output buffer size pointer | ||
370 | * @workmem: working memory buffer pointer, size determined by | ||
371 | * nx842_powernv_driver.workmem_size | ||
372 | * @fc: function code, see CCW Function Codes in nx-842.h | ||
373 | * | ||
374 | * Returns: | ||
375 | * 0 Success, output of length @outlenp stored in the buffer at @out | ||
376 | * -ENODEV Hardware unavailable | ||
377 | * -ENOSPC Output buffer is to small | ||
378 | * -EMSGSIZE Input buffer too large | ||
379 | * -EINVAL buffer constraints do not fix nx842_constraints | ||
380 | * -EPROTO hardware error during operation | ||
381 | * -ETIMEDOUT hardware did not complete operation in reasonable time | ||
382 | * -EINTR operation was aborted | ||
383 | */ | ||
384 | static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, | ||
385 | unsigned char *out, unsigned int *outlenp, | ||
386 | void *workmem, int fc) | ||
387 | { | ||
388 | struct coprocessor_request_block *crb; | ||
389 | struct coprocessor_status_block *csb; | ||
390 | struct nx842_workmem *wmem; | ||
391 | int ret; | ||
392 | u64 csb_addr; | ||
393 | u32 ccw; | ||
394 | unsigned int outlen = *outlenp; | ||
395 | |||
396 | wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN); | ||
397 | |||
398 | *outlenp = 0; | ||
399 | |||
400 | /* shoudn't happen, we don't load without a coproc */ | ||
401 | if (!nx842_ct) { | ||
402 | pr_err_ratelimited("coprocessor CT is 0"); | ||
403 | return -ENODEV; | ||
404 | } | ||
405 | |||
406 | crb = &wmem->crb; | ||
407 | csb = &crb->csb; | ||
408 | |||
409 | /* Clear any previous values */ | ||
410 | memset(crb, 0, sizeof(*crb)); | ||
411 | |||
412 | /* set up DDLs */ | ||
413 | ret = setup_ddl(&crb->source, wmem->ddl_in, | ||
414 | (unsigned char *)in, inlen, true); | ||
415 | if (ret) | ||
416 | return ret; | ||
417 | ret = setup_ddl(&crb->target, wmem->ddl_out, | ||
418 | out, outlen, false); | ||
419 | if (ret) | ||
420 | return ret; | ||
421 | |||
422 | /* set up CCW */ | ||
423 | ccw = 0; | ||
424 | ccw = SET_FIELD(ccw, CCW_CT, nx842_ct); | ||
425 | ccw = SET_FIELD(ccw, CCW_CI_842, 0); /* use 0 for hw auto-selection */ | ||
426 | ccw = SET_FIELD(ccw, CCW_FC_842, fc); | ||
427 | |||
428 | /* set up CRB's CSB addr */ | ||
429 | csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS; | ||
430 | csb_addr |= CRB_CSB_AT; /* Addrs are phys */ | ||
431 | crb->csb_addr = cpu_to_be64(csb_addr); | ||
432 | |||
433 | wmem->start = ktime_get(); | ||
434 | |||
435 | /* do ICSWX */ | ||
436 | ret = icswx(cpu_to_be32(ccw), crb); | ||
437 | |||
438 | pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret, | ||
439 | (unsigned int)ccw, | ||
440 | (unsigned int)be32_to_cpu(crb->ccw)); | ||
441 | |||
442 | switch (ret) { | ||
443 | case ICSWX_INITIATED: | ||
444 | ret = wait_for_csb(wmem, csb); | ||
445 | break; | ||
446 | case ICSWX_BUSY: | ||
447 | pr_debug_ratelimited("842 Coprocessor busy\n"); | ||
448 | ret = -EBUSY; | ||
449 | break; | ||
450 | case ICSWX_REJECTED: | ||
451 | pr_err_ratelimited("ICSWX rejected\n"); | ||
452 | ret = -EPROTO; | ||
453 | break; | ||
454 | default: | ||
455 | pr_err_ratelimited("Invalid ICSWX return code %x\n", ret); | ||
456 | ret = -EPROTO; | ||
457 | break; | ||
458 | } | ||
459 | |||
460 | if (!ret) | ||
461 | *outlenp = be32_to_cpu(csb->count); | ||
462 | |||
463 | return ret; | ||
464 | } | ||
465 | |||
466 | /** | ||
467 | * nx842_powernv_compress - Compress data using the 842 algorithm | ||
468 | * | ||
469 | * Compression provided by the NX842 coprocessor on IBM PowerNV systems. | ||
470 | * The input buffer is compressed and the result is stored in the | ||
471 | * provided output buffer. | ||
472 | * | ||
473 | * Upon return from this function @outlen contains the length of the | ||
474 | * compressed data. If there is an error then @outlen will be 0 and an | ||
475 | * error will be specified by the return code from this function. | ||
476 | * | ||
477 | * @in: input buffer pointer | ||
478 | * @inlen: input buffer size | ||
479 | * @out: output buffer pointer | ||
480 | * @outlenp: output buffer size pointer | ||
481 | * @workmem: working memory buffer pointer, size determined by | ||
482 | * nx842_powernv_driver.workmem_size | ||
483 | * | ||
484 | * Returns: see @nx842_powernv_function() | ||
485 | */ | ||
486 | static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen, | ||
487 | unsigned char *out, unsigned int *outlenp, | ||
488 | void *wmem) | ||
489 | { | ||
490 | return nx842_powernv_function(in, inlen, out, outlenp, | ||
491 | wmem, CCW_FC_842_COMP_NOCRC); | ||
492 | } | ||
493 | |||
494 | /** | ||
495 | * nx842_powernv_decompress - Decompress data using the 842 algorithm | ||
496 | * | ||
497 | * Decompression provided by the NX842 coprocessor on IBM PowerNV systems. | ||
498 | * The input buffer is decompressed and the result is stored in the | ||
499 | * provided output buffer. | ||
500 | * | ||
501 | * Upon return from this function @outlen contains the length of the | ||
502 | * decompressed data. If there is an error then @outlen will be 0 and an | ||
503 | * error will be specified by the return code from this function. | ||
504 | * | ||
505 | * @in: input buffer pointer | ||
506 | * @inlen: input buffer size | ||
507 | * @out: output buffer pointer | ||
508 | * @outlenp: output buffer size pointer | ||
509 | * @workmem: working memory buffer pointer, size determined by | ||
510 | * nx842_powernv_driver.workmem_size | ||
511 | * | ||
512 | * Returns: see @nx842_powernv_function() | ||
513 | */ | ||
514 | static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, | ||
515 | unsigned char *out, unsigned int *outlenp, | ||
516 | void *wmem) | ||
517 | { | ||
518 | return nx842_powernv_function(in, inlen, out, outlenp, | ||
519 | wmem, CCW_FC_842_DECOMP_NOCRC); | ||
520 | } | ||
521 | |||
522 | static int __init nx842_powernv_probe(struct device_node *dn) | ||
523 | { | ||
524 | struct nx842_coproc *coproc; | ||
525 | struct property *ct_prop, *ci_prop; | ||
526 | unsigned int ct, ci; | ||
527 | int chip_id; | ||
528 | |||
529 | chip_id = of_get_ibm_chip_id(dn); | ||
530 | if (chip_id < 0) { | ||
531 | pr_err("ibm,chip-id missing\n"); | ||
532 | return -EINVAL; | ||
533 | } | ||
534 | ct_prop = of_find_property(dn, "ibm,842-coprocessor-type", NULL); | ||
535 | if (!ct_prop) { | ||
536 | pr_err("ibm,842-coprocessor-type missing\n"); | ||
537 | return -EINVAL; | ||
538 | } | ||
539 | ct = be32_to_cpu(*(unsigned int *)ct_prop->value); | ||
540 | ci_prop = of_find_property(dn, "ibm,842-coprocessor-instance", NULL); | ||
541 | if (!ci_prop) { | ||
542 | pr_err("ibm,842-coprocessor-instance missing\n"); | ||
543 | return -EINVAL; | ||
544 | } | ||
545 | ci = be32_to_cpu(*(unsigned int *)ci_prop->value); | ||
546 | |||
547 | coproc = kmalloc(sizeof(*coproc), GFP_KERNEL); | ||
548 | if (!coproc) | ||
549 | return -ENOMEM; | ||
550 | |||
551 | coproc->chip_id = chip_id; | ||
552 | coproc->ct = ct; | ||
553 | coproc->ci = ci; | ||
554 | INIT_LIST_HEAD(&coproc->list); | ||
555 | list_add(&coproc->list, &nx842_coprocs); | ||
556 | |||
557 | pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci); | ||
558 | |||
559 | if (!nx842_ct) | ||
560 | nx842_ct = ct; | ||
561 | else if (nx842_ct != ct) | ||
562 | pr_err("NX842 chip %d, CT %d != first found CT %d\n", | ||
563 | chip_id, ct, nx842_ct); | ||
564 | |||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | static struct nx842_constraints nx842_powernv_constraints = { | ||
569 | .alignment = DDE_BUFFER_ALIGN, | ||
570 | .multiple = DDE_BUFFER_LAST_MULT, | ||
571 | .minimum = DDE_BUFFER_LAST_MULT, | ||
572 | .maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE, | ||
573 | }; | ||
574 | |||
575 | static struct nx842_driver nx842_powernv_driver = { | ||
576 | .name = KBUILD_MODNAME, | ||
577 | .owner = THIS_MODULE, | ||
578 | .workmem_size = sizeof(struct nx842_workmem), | ||
579 | .constraints = &nx842_powernv_constraints, | ||
580 | .compress = nx842_powernv_compress, | ||
581 | .decompress = nx842_powernv_decompress, | ||
582 | }; | ||
583 | |||
584 | static __init int nx842_powernv_init(void) | ||
585 | { | ||
586 | struct device_node *dn; | ||
587 | |||
588 | /* verify workmem size/align restrictions */ | ||
589 | BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN); | ||
590 | BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN); | ||
591 | BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN); | ||
592 | /* verify buffer size/align restrictions */ | ||
593 | BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN); | ||
594 | BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT); | ||
595 | BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); | ||
596 | |||
597 | pr_info("loading\n"); | ||
598 | |||
599 | for_each_compatible_node(dn, NULL, "ibm,power-nx") | ||
600 | nx842_powernv_probe(dn); | ||
601 | |||
602 | if (!nx842_ct) { | ||
603 | pr_err("no coprocessors found\n"); | ||
604 | return -ENODEV; | ||
605 | } | ||
606 | |||
607 | if (!nx842_platform_driver_set(&nx842_powernv_driver)) { | ||
608 | struct nx842_coproc *coproc, *n; | ||
609 | |||
610 | list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { | ||
611 | list_del(&coproc->list); | ||
612 | kfree(coproc); | ||
613 | } | ||
614 | |||
615 | return -EEXIST; | ||
616 | } | ||
617 | |||
618 | pr_info("loaded\n"); | ||
619 | |||
620 | return 0; | ||
621 | } | ||
622 | module_init(nx842_powernv_init); | ||
623 | |||
624 | static void __exit nx842_powernv_exit(void) | ||
625 | { | ||
626 | struct nx842_coproc *coproc, *n; | ||
627 | |||
628 | nx842_platform_driver_unset(&nx842_powernv_driver); | ||
629 | |||
630 | list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { | ||
631 | list_del(&coproc->list); | ||
632 | kfree(coproc); | ||
633 | } | ||
634 | |||
635 | pr_info("unloaded\n"); | ||
636 | } | ||
637 | module_exit(nx842_powernv_exit); | ||
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c new file mode 100644 index 000000000000..3040a6091bf2 --- /dev/null +++ b/drivers/crypto/nx/nx-842-pseries.c | |||
@@ -0,0 +1,1140 @@ | |||
1 | /* | ||
2 | * Driver for IBM Power 842 compression accelerator | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2012 | ||
19 | * | ||
20 | * Authors: Robert Jennings <rcj@linux.vnet.ibm.com> | ||
21 | * Seth Jennings <sjenning@linux.vnet.ibm.com> | ||
22 | */ | ||
23 | |||
24 | #include <asm/vio.h> | ||
25 | |||
26 | #include "nx-842.h" | ||
27 | #include "nx_csbcpb.h" /* struct nx_csbcpb */ | ||
28 | |||
29 | MODULE_LICENSE("GPL"); | ||
30 | MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>"); | ||
31 | MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); | ||
32 | |||
33 | static struct nx842_constraints nx842_pseries_constraints = { | ||
34 | .alignment = DDE_BUFFER_ALIGN, | ||
35 | .multiple = DDE_BUFFER_LAST_MULT, | ||
36 | .minimum = DDE_BUFFER_LAST_MULT, | ||
37 | .maximum = PAGE_SIZE, /* dynamic, max_sync_size */ | ||
38 | }; | ||
39 | |||
40 | static int check_constraints(unsigned long buf, unsigned int *len, bool in) | ||
41 | { | ||
42 | if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) { | ||
43 | pr_debug("%s buffer 0x%lx not aligned to 0x%x\n", | ||
44 | in ? "input" : "output", buf, | ||
45 | nx842_pseries_constraints.alignment); | ||
46 | return -EINVAL; | ||
47 | } | ||
48 | if (*len % nx842_pseries_constraints.multiple) { | ||
49 | pr_debug("%s buffer len 0x%x not multiple of 0x%x\n", | ||
50 | in ? "input" : "output", *len, | ||
51 | nx842_pseries_constraints.multiple); | ||
52 | if (in) | ||
53 | return -EINVAL; | ||
54 | *len = round_down(*len, nx842_pseries_constraints.multiple); | ||
55 | } | ||
56 | if (*len < nx842_pseries_constraints.minimum) { | ||
57 | pr_debug("%s buffer len 0x%x under minimum 0x%x\n", | ||
58 | in ? "input" : "output", *len, | ||
59 | nx842_pseries_constraints.minimum); | ||
60 | return -EINVAL; | ||
61 | } | ||
62 | if (*len > nx842_pseries_constraints.maximum) { | ||
63 | pr_debug("%s buffer len 0x%x over maximum 0x%x\n", | ||
64 | in ? "input" : "output", *len, | ||
65 | nx842_pseries_constraints.maximum); | ||
66 | if (in) | ||
67 | return -EINVAL; | ||
68 | *len = nx842_pseries_constraints.maximum; | ||
69 | } | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | /* I assume we need to align the CSB? */ | ||
74 | #define WORKMEM_ALIGN (256) | ||
75 | |||
76 | struct nx842_workmem { | ||
77 | /* scatterlist */ | ||
78 | char slin[4096]; | ||
79 | char slout[4096]; | ||
80 | /* coprocessor status/parameter block */ | ||
81 | struct nx_csbcpb csbcpb; | ||
82 | |||
83 | char padding[WORKMEM_ALIGN]; | ||
84 | } __aligned(WORKMEM_ALIGN); | ||
85 | |||
86 | /* Macros for fields within nx_csbcpb */ | ||
87 | /* Check the valid bit within the csbcpb valid field */ | ||
88 | #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) | ||
89 | |||
90 | /* CE macros operate on the completion_extension field bits in the csbcpb. | ||
91 | * CE0 0=full completion, 1=partial completion | ||
92 | * CE1 0=CE0 indicates completion, 1=termination (output may be modified) | ||
93 | * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */ | ||
94 | #define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7)) | ||
95 | #define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6)) | ||
96 | #define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) | ||
97 | |||
98 | /* The NX unit accepts data only on 4K page boundaries */ | ||
99 | #define NX842_HW_PAGE_SIZE (4096) | ||
100 | #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) | ||
101 | |||
102 | enum nx842_status { | ||
103 | UNAVAILABLE, | ||
104 | AVAILABLE | ||
105 | }; | ||
106 | |||
107 | struct ibm_nx842_counters { | ||
108 | atomic64_t comp_complete; | ||
109 | atomic64_t comp_failed; | ||
110 | atomic64_t decomp_complete; | ||
111 | atomic64_t decomp_failed; | ||
112 | atomic64_t swdecomp; | ||
113 | atomic64_t comp_times[32]; | ||
114 | atomic64_t decomp_times[32]; | ||
115 | }; | ||
116 | |||
117 | static struct nx842_devdata { | ||
118 | struct vio_dev *vdev; | ||
119 | struct device *dev; | ||
120 | struct ibm_nx842_counters *counters; | ||
121 | unsigned int max_sg_len; | ||
122 | unsigned int max_sync_size; | ||
123 | unsigned int max_sync_sg; | ||
124 | enum nx842_status status; | ||
125 | } __rcu *devdata; | ||
126 | static DEFINE_SPINLOCK(devdata_mutex); | ||
127 | |||
128 | #define NX842_COUNTER_INC(_x) \ | ||
129 | static inline void nx842_inc_##_x( \ | ||
130 | const struct nx842_devdata *dev) { \ | ||
131 | if (dev) \ | ||
132 | atomic64_inc(&dev->counters->_x); \ | ||
133 | } | ||
134 | NX842_COUNTER_INC(comp_complete); | ||
135 | NX842_COUNTER_INC(comp_failed); | ||
136 | NX842_COUNTER_INC(decomp_complete); | ||
137 | NX842_COUNTER_INC(decomp_failed); | ||
138 | NX842_COUNTER_INC(swdecomp); | ||
139 | |||
140 | #define NX842_HIST_SLOTS 16 | ||
141 | |||
142 | static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time) | ||
143 | { | ||
144 | int bucket = fls(time); | ||
145 | |||
146 | if (bucket) | ||
147 | bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); | ||
148 | |||
149 | atomic64_inc(×[bucket]); | ||
150 | } | ||
151 | |||
152 | /* NX unit operation flags */ | ||
153 | #define NX842_OP_COMPRESS 0x0 | ||
154 | #define NX842_OP_CRC 0x1 | ||
155 | #define NX842_OP_DECOMPRESS 0x2 | ||
156 | #define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC) | ||
157 | #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC) | ||
158 | #define NX842_OP_ASYNC (1<<23) | ||
159 | #define NX842_OP_NOTIFY (1<<22) | ||
160 | #define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8) | ||
161 | |||
162 | static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) | ||
163 | { | ||
164 | /* No use of DMA mappings within the driver. */ | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | struct nx842_slentry { | ||
169 | __be64 ptr; /* Real address (use __pa()) */ | ||
170 | __be64 len; | ||
171 | }; | ||
172 | |||
173 | /* pHyp scatterlist entry */ | ||
174 | struct nx842_scatterlist { | ||
175 | int entry_nr; /* number of slentries */ | ||
176 | struct nx842_slentry *entries; /* ptr to array of slentries */ | ||
177 | }; | ||
178 | |||
179 | /* Does not include sizeof(entry_nr) in the size */ | ||
180 | static inline unsigned long nx842_get_scatterlist_size( | ||
181 | struct nx842_scatterlist *sl) | ||
182 | { | ||
183 | return sl->entry_nr * sizeof(struct nx842_slentry); | ||
184 | } | ||
185 | |||
186 | static int nx842_build_scatterlist(unsigned long buf, int len, | ||
187 | struct nx842_scatterlist *sl) | ||
188 | { | ||
189 | unsigned long entrylen; | ||
190 | struct nx842_slentry *entry; | ||
191 | |||
192 | sl->entry_nr = 0; | ||
193 | |||
194 | entry = sl->entries; | ||
195 | while (len) { | ||
196 | entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf)); | ||
197 | entrylen = min_t(int, len, | ||
198 | LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE)); | ||
199 | entry->len = cpu_to_be64(entrylen); | ||
200 | |||
201 | len -= entrylen; | ||
202 | buf += entrylen; | ||
203 | |||
204 | sl->entry_nr++; | ||
205 | entry++; | ||
206 | } | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int nx842_validate_result(struct device *dev, | ||
212 | struct cop_status_block *csb) | ||
213 | { | ||
214 | /* The csb must be valid after returning from vio_h_cop_sync */ | ||
215 | if (!NX842_CSBCBP_VALID_CHK(csb->valid)) { | ||
216 | dev_err(dev, "%s: cspcbp not valid upon completion.\n", | ||
217 | __func__); | ||
218 | dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n", | ||
219 | csb->valid, | ||
220 | csb->crb_seq_number, | ||
221 | csb->completion_code, | ||
222 | csb->completion_extension); | ||
223 | dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", | ||
224 | be32_to_cpu(csb->processed_byte_count), | ||
225 | (unsigned long)be64_to_cpu(csb->address)); | ||
226 | return -EIO; | ||
227 | } | ||
228 | |||
229 | /* Check return values from the hardware in the CSB */ | ||
230 | switch (csb->completion_code) { | ||
231 | case 0: /* Completed without error */ | ||
232 | break; | ||
233 | case 64: /* Target bytes > Source bytes during compression */ | ||
234 | case 13: /* Output buffer too small */ | ||
235 | dev_dbg(dev, "%s: Compression output larger than input\n", | ||
236 | __func__); | ||
237 | return -ENOSPC; | ||
238 | case 66: /* Input data contains an illegal template field */ | ||
239 | case 67: /* Template indicates data past the end of the input stream */ | ||
240 | dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", | ||
241 | __func__, csb->completion_code); | ||
242 | return -EINVAL; | ||
243 | default: | ||
244 | dev_dbg(dev, "%s: Unspecified error (code:%d)\n", | ||
245 | __func__, csb->completion_code); | ||
246 | return -EIO; | ||
247 | } | ||
248 | |||
249 | /* Hardware sanity check */ | ||
250 | if (!NX842_CSBCPB_CE2(csb->completion_extension)) { | ||
251 | dev_err(dev, "%s: No error returned by hardware, but " | ||
252 | "data returned is unusable, contact support.\n" | ||
253 | "(Additional info: csbcbp->processed bytes " | ||
254 | "does not specify processed bytes for the " | ||
255 | "target buffer.)\n", __func__); | ||
256 | return -EIO; | ||
257 | } | ||
258 | |||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * nx842_pseries_compress - Compress data using the 842 algorithm | ||
264 | * | ||
265 | * Compression provide by the NX842 coprocessor on IBM Power systems. | ||
266 | * The input buffer is compressed and the result is stored in the | ||
267 | * provided output buffer. | ||
268 | * | ||
269 | * Upon return from this function @outlen contains the length of the | ||
270 | * compressed data. If there is an error then @outlen will be 0 and an | ||
271 | * error will be specified by the return code from this function. | ||
272 | * | ||
273 | * @in: Pointer to input buffer | ||
274 | * @inlen: Length of input buffer | ||
275 | * @out: Pointer to output buffer | ||
276 | * @outlen: Length of output buffer | ||
277 | * @wrkmem: ptr to buffer for working memory, size determined by | ||
278 | * nx842_pseries_driver.workmem_size | ||
279 | * | ||
280 | * Returns: | ||
281 | * 0 Success, output of length @outlen stored in the buffer at @out | ||
282 | * -ENOMEM Unable to allocate internal buffers | ||
283 | * -ENOSPC Output buffer is to small | ||
284 | * -EIO Internal error | ||
285 | * -ENODEV Hardware unavailable | ||
286 | */ | ||
287 | static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, | ||
288 | unsigned char *out, unsigned int *outlen, | ||
289 | void *wmem) | ||
290 | { | ||
291 | struct nx842_devdata *local_devdata; | ||
292 | struct device *dev = NULL; | ||
293 | struct nx842_workmem *workmem; | ||
294 | struct nx842_scatterlist slin, slout; | ||
295 | struct nx_csbcpb *csbcpb; | ||
296 | int ret = 0, max_sync_size; | ||
297 | unsigned long inbuf, outbuf; | ||
298 | struct vio_pfo_op op = { | ||
299 | .done = NULL, | ||
300 | .handle = 0, | ||
301 | .timeout = 0, | ||
302 | }; | ||
303 | unsigned long start = get_tb(); | ||
304 | |||
305 | inbuf = (unsigned long)in; | ||
306 | if (check_constraints(inbuf, &inlen, true)) | ||
307 | return -EINVAL; | ||
308 | |||
309 | outbuf = (unsigned long)out; | ||
310 | if (check_constraints(outbuf, outlen, false)) | ||
311 | return -EINVAL; | ||
312 | |||
313 | rcu_read_lock(); | ||
314 | local_devdata = rcu_dereference(devdata); | ||
315 | if (!local_devdata || !local_devdata->dev) { | ||
316 | rcu_read_unlock(); | ||
317 | return -ENODEV; | ||
318 | } | ||
319 | max_sync_size = local_devdata->max_sync_size; | ||
320 | dev = local_devdata->dev; | ||
321 | |||
322 | /* Init scatterlist */ | ||
323 | workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); | ||
324 | slin.entries = (struct nx842_slentry *)workmem->slin; | ||
325 | slout.entries = (struct nx842_slentry *)workmem->slout; | ||
326 | |||
327 | /* Init operation */ | ||
328 | op.flags = NX842_OP_COMPRESS; | ||
329 | csbcpb = &workmem->csbcpb; | ||
330 | memset(csbcpb, 0, sizeof(*csbcpb)); | ||
331 | op.csbcpb = nx842_get_pa(csbcpb); | ||
332 | |||
333 | if ((inbuf & NX842_HW_PAGE_MASK) == | ||
334 | ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { | ||
335 | /* Create direct DDE */ | ||
336 | op.in = nx842_get_pa((void *)inbuf); | ||
337 | op.inlen = inlen; | ||
338 | } else { | ||
339 | /* Create indirect DDE (scatterlist) */ | ||
340 | nx842_build_scatterlist(inbuf, inlen, &slin); | ||
341 | op.in = nx842_get_pa(slin.entries); | ||
342 | op.inlen = -nx842_get_scatterlist_size(&slin); | ||
343 | } | ||
344 | |||
345 | if ((outbuf & NX842_HW_PAGE_MASK) == | ||
346 | ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) { | ||
347 | /* Create direct DDE */ | ||
348 | op.out = nx842_get_pa((void *)outbuf); | ||
349 | op.outlen = *outlen; | ||
350 | } else { | ||
351 | /* Create indirect DDE (scatterlist) */ | ||
352 | nx842_build_scatterlist(outbuf, *outlen, &slout); | ||
353 | op.out = nx842_get_pa(slout.entries); | ||
354 | op.outlen = -nx842_get_scatterlist_size(&slout); | ||
355 | } | ||
356 | |||
357 | dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n", | ||
358 | __func__, (unsigned long)op.in, (long)op.inlen, | ||
359 | (unsigned long)op.out, (long)op.outlen); | ||
360 | |||
361 | /* Send request to pHyp */ | ||
362 | ret = vio_h_cop_sync(local_devdata->vdev, &op); | ||
363 | |||
364 | /* Check for pHyp error */ | ||
365 | if (ret) { | ||
366 | dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", | ||
367 | __func__, ret, op.hcall_err); | ||
368 | ret = -EIO; | ||
369 | goto unlock; | ||
370 | } | ||
371 | |||
372 | /* Check for hardware error */ | ||
373 | ret = nx842_validate_result(dev, &csbcpb->csb); | ||
374 | if (ret) | ||
375 | goto unlock; | ||
376 | |||
377 | *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count); | ||
378 | dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen); | ||
379 | |||
380 | unlock: | ||
381 | if (ret) | ||
382 | nx842_inc_comp_failed(local_devdata); | ||
383 | else { | ||
384 | nx842_inc_comp_complete(local_devdata); | ||
385 | ibm_nx842_incr_hist(local_devdata->counters->comp_times, | ||
386 | (get_tb() - start) / tb_ticks_per_usec); | ||
387 | } | ||
388 | rcu_read_unlock(); | ||
389 | return ret; | ||
390 | } | ||
391 | |||
392 | /** | ||
393 | * nx842_pseries_decompress - Decompress data using the 842 algorithm | ||
394 | * | ||
395 | * Decompression provide by the NX842 coprocessor on IBM Power systems. | ||
396 | * The input buffer is decompressed and the result is stored in the | ||
397 | * provided output buffer. The size allocated to the output buffer is | ||
398 | * provided by the caller of this function in @outlen. Upon return from | ||
399 | * this function @outlen contains the length of the decompressed data. | ||
400 | * If there is an error then @outlen will be 0 and an error will be | ||
401 | * specified by the return code from this function. | ||
402 | * | ||
403 | * @in: Pointer to input buffer | ||
404 | * @inlen: Length of input buffer | ||
405 | * @out: Pointer to output buffer | ||
406 | * @outlen: Length of output buffer | ||
407 | * @wrkmem: ptr to buffer for working memory, size determined by | ||
408 | * nx842_pseries_driver.workmem_size | ||
409 | * | ||
410 | * Returns: | ||
411 | * 0 Success, output of length @outlen stored in the buffer at @out | ||
412 | * -ENODEV Hardware decompression device is unavailable | ||
413 | * -ENOMEM Unable to allocate internal buffers | ||
414 | * -ENOSPC Output buffer is to small | ||
415 | * -EINVAL Bad input data encountered when attempting decompress | ||
416 | * -EIO Internal error | ||
417 | */ | ||
418 | static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen, | ||
419 | unsigned char *out, unsigned int *outlen, | ||
420 | void *wmem) | ||
421 | { | ||
422 | struct nx842_devdata *local_devdata; | ||
423 | struct device *dev = NULL; | ||
424 | struct nx842_workmem *workmem; | ||
425 | struct nx842_scatterlist slin, slout; | ||
426 | struct nx_csbcpb *csbcpb; | ||
427 | int ret = 0, max_sync_size; | ||
428 | unsigned long inbuf, outbuf; | ||
429 | struct vio_pfo_op op = { | ||
430 | .done = NULL, | ||
431 | .handle = 0, | ||
432 | .timeout = 0, | ||
433 | }; | ||
434 | unsigned long start = get_tb(); | ||
435 | |||
436 | /* Ensure page alignment and size */ | ||
437 | inbuf = (unsigned long)in; | ||
438 | if (check_constraints(inbuf, &inlen, true)) | ||
439 | return -EINVAL; | ||
440 | |||
441 | outbuf = (unsigned long)out; | ||
442 | if (check_constraints(outbuf, outlen, false)) | ||
443 | return -EINVAL; | ||
444 | |||
445 | rcu_read_lock(); | ||
446 | local_devdata = rcu_dereference(devdata); | ||
447 | if (!local_devdata || !local_devdata->dev) { | ||
448 | rcu_read_unlock(); | ||
449 | return -ENODEV; | ||
450 | } | ||
451 | max_sync_size = local_devdata->max_sync_size; | ||
452 | dev = local_devdata->dev; | ||
453 | |||
454 | workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); | ||
455 | |||
456 | /* Init scatterlist */ | ||
457 | slin.entries = (struct nx842_slentry *)workmem->slin; | ||
458 | slout.entries = (struct nx842_slentry *)workmem->slout; | ||
459 | |||
460 | /* Init operation */ | ||
461 | op.flags = NX842_OP_DECOMPRESS; | ||
462 | csbcpb = &workmem->csbcpb; | ||
463 | memset(csbcpb, 0, sizeof(*csbcpb)); | ||
464 | op.csbcpb = nx842_get_pa(csbcpb); | ||
465 | |||
466 | if ((inbuf & NX842_HW_PAGE_MASK) == | ||
467 | ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { | ||
468 | /* Create direct DDE */ | ||
469 | op.in = nx842_get_pa((void *)inbuf); | ||
470 | op.inlen = inlen; | ||
471 | } else { | ||
472 | /* Create indirect DDE (scatterlist) */ | ||
473 | nx842_build_scatterlist(inbuf, inlen, &slin); | ||
474 | op.in = nx842_get_pa(slin.entries); | ||
475 | op.inlen = -nx842_get_scatterlist_size(&slin); | ||
476 | } | ||
477 | |||
478 | if ((outbuf & NX842_HW_PAGE_MASK) == | ||
479 | ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) { | ||
480 | /* Create direct DDE */ | ||
481 | op.out = nx842_get_pa((void *)outbuf); | ||
482 | op.outlen = *outlen; | ||
483 | } else { | ||
484 | /* Create indirect DDE (scatterlist) */ | ||
485 | nx842_build_scatterlist(outbuf, *outlen, &slout); | ||
486 | op.out = nx842_get_pa(slout.entries); | ||
487 | op.outlen = -nx842_get_scatterlist_size(&slout); | ||
488 | } | ||
489 | |||
490 | dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n", | ||
491 | __func__, (unsigned long)op.in, (long)op.inlen, | ||
492 | (unsigned long)op.out, (long)op.outlen); | ||
493 | |||
494 | /* Send request to pHyp */ | ||
495 | ret = vio_h_cop_sync(local_devdata->vdev, &op); | ||
496 | |||
497 | /* Check for pHyp error */ | ||
498 | if (ret) { | ||
499 | dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", | ||
500 | __func__, ret, op.hcall_err); | ||
501 | goto unlock; | ||
502 | } | ||
503 | |||
504 | /* Check for hardware error */ | ||
505 | ret = nx842_validate_result(dev, &csbcpb->csb); | ||
506 | if (ret) | ||
507 | goto unlock; | ||
508 | |||
509 | *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count); | ||
510 | |||
511 | unlock: | ||
512 | if (ret) | ||
513 | /* decompress fail */ | ||
514 | nx842_inc_decomp_failed(local_devdata); | ||
515 | else { | ||
516 | nx842_inc_decomp_complete(local_devdata); | ||
517 | ibm_nx842_incr_hist(local_devdata->counters->decomp_times, | ||
518 | (get_tb() - start) / tb_ticks_per_usec); | ||
519 | } | ||
520 | |||
521 | rcu_read_unlock(); | ||
522 | return ret; | ||
523 | } | ||
524 | |||
525 | /** | ||
526 | * nx842_OF_set_defaults -- Set default (disabled) values for devdata | ||
527 | * | ||
528 | * @devdata - struct nx842_devdata to update | ||
529 | * | ||
530 | * Returns: | ||
531 | * 0 on success | ||
532 | * -ENOENT if @devdata ptr is NULL | ||
533 | */ | ||
534 | static int nx842_OF_set_defaults(struct nx842_devdata *devdata) | ||
535 | { | ||
536 | if (devdata) { | ||
537 | devdata->max_sync_size = 0; | ||
538 | devdata->max_sync_sg = 0; | ||
539 | devdata->max_sg_len = 0; | ||
540 | devdata->status = UNAVAILABLE; | ||
541 | return 0; | ||
542 | } else | ||
543 | return -ENOENT; | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * nx842_OF_upd_status -- Update the device info from OF status prop | ||
548 | * | ||
549 | * The status property indicates if the accelerator is enabled. If the | ||
550 | * device is in the OF tree it indicates that the hardware is present. | ||
551 | * The status field indicates if the device is enabled when the status | ||
552 | * is 'okay'. Otherwise the device driver will be disabled. | ||
553 | * | ||
554 | * @devdata - struct nx842_devdata to update | ||
555 | * @prop - struct property point containing the maxsyncop for the update | ||
556 | * | ||
557 | * Returns: | ||
558 | * 0 - Device is available | ||
559 | * -EINVAL - Device is not available | ||
560 | */ | ||
561 | static int nx842_OF_upd_status(struct nx842_devdata *devdata, | ||
562 | struct property *prop) { | ||
563 | int ret = 0; | ||
564 | const char *status = (const char *)prop->value; | ||
565 | |||
566 | if (!strncmp(status, "okay", (size_t)prop->length)) { | ||
567 | devdata->status = AVAILABLE; | ||
568 | } else { | ||
569 | dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n", | ||
570 | __func__, status); | ||
571 | devdata->status = UNAVAILABLE; | ||
572 | } | ||
573 | |||
574 | return ret; | ||
575 | } | ||
576 | |||
577 | /** | ||
578 | * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop | ||
579 | * | ||
580 | * Definition of the 'ibm,max-sg-len' OF property: | ||
581 | * This field indicates the maximum byte length of a scatter list | ||
582 | * for the platform facility. It is a single cell encoded as with encode-int. | ||
583 | * | ||
584 | * Example: | ||
585 | * # od -x ibm,max-sg-len | ||
586 | * 0000000 0000 0ff0 | ||
587 | * | ||
588 | * In this example, the maximum byte length of a scatter list is | ||
589 | * 0x0ff0 (4,080). | ||
590 | * | ||
591 | * @devdata - struct nx842_devdata to update | ||
592 | * @prop - struct property point containing the maxsyncop for the update | ||
593 | * | ||
594 | * Returns: | ||
595 | * 0 on success | ||
596 | * -EINVAL on failure | ||
597 | */ | ||
598 | static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, | ||
599 | struct property *prop) { | ||
600 | int ret = 0; | ||
601 | const unsigned int maxsglen = of_read_number(prop->value, 1); | ||
602 | |||
603 | if (prop->length != sizeof(maxsglen)) { | ||
604 | dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); | ||
605 | dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, | ||
606 | prop->length, sizeof(maxsglen)); | ||
607 | ret = -EINVAL; | ||
608 | } else { | ||
609 | devdata->max_sg_len = min_t(unsigned int, | ||
610 | maxsglen, NX842_HW_PAGE_SIZE); | ||
611 | } | ||
612 | |||
613 | return ret; | ||
614 | } | ||
615 | |||
616 | /** | ||
617 | * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop | ||
618 | * | ||
619 | * Definition of the 'ibm,max-sync-cop' OF property: | ||
620 | * Two series of cells. The first series of cells represents the maximums | ||
621 | * that can be synchronously compressed. The second series of cells | ||
622 | * represents the maximums that can be synchronously decompressed. | ||
623 | * 1. The first cell in each series contains the count of the number of | ||
624 | * data length, scatter list elements pairs that follow – each being | ||
625 | * of the form | ||
626 | * a. One cell data byte length | ||
627 | * b. One cell total number of scatter list elements | ||
628 | * | ||
629 | * Example: | ||
630 | * # od -x ibm,max-sync-cop | ||
631 | * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001 | ||
632 | * 0000020 0000 1000 0000 01fe | ||
633 | * | ||
634 | * In this example, compression supports 0x1000 (4,096) data byte length | ||
635 | * and 0x1fe (510) total scatter list elements. Decompression supports | ||
636 | * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list | ||
637 | * elements. | ||
638 | * | ||
639 | * @devdata - struct nx842_devdata to update | ||
640 | * @prop - struct property point containing the maxsyncop for the update | ||
641 | * | ||
642 | * Returns: | ||
643 | * 0 on success | ||
644 | * -EINVAL on failure | ||
645 | */ | ||
646 | static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, | ||
647 | struct property *prop) { | ||
648 | int ret = 0; | ||
649 | unsigned int comp_data_limit, decomp_data_limit; | ||
650 | unsigned int comp_sg_limit, decomp_sg_limit; | ||
651 | const struct maxsynccop_t { | ||
652 | __be32 comp_elements; | ||
653 | __be32 comp_data_limit; | ||
654 | __be32 comp_sg_limit; | ||
655 | __be32 decomp_elements; | ||
656 | __be32 decomp_data_limit; | ||
657 | __be32 decomp_sg_limit; | ||
658 | } *maxsynccop; | ||
659 | |||
660 | if (prop->length != sizeof(*maxsynccop)) { | ||
661 | dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__); | ||
662 | dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length, | ||
663 | sizeof(*maxsynccop)); | ||
664 | ret = -EINVAL; | ||
665 | goto out; | ||
666 | } | ||
667 | |||
668 | maxsynccop = (const struct maxsynccop_t *)prop->value; | ||
669 | comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit); | ||
670 | comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit); | ||
671 | decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit); | ||
672 | decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit); | ||
673 | |||
674 | /* Use one limit rather than separate limits for compression and | ||
675 | * decompression. Set a maximum for this so as not to exceed the | ||
676 | * size that the header can support and round the value down to | ||
677 | * the hardware page size (4K) */ | ||
678 | devdata->max_sync_size = min(comp_data_limit, decomp_data_limit); | ||
679 | |||
680 | devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, | ||
681 | 65536); | ||
682 | |||
683 | if (devdata->max_sync_size < 4096) { | ||
684 | dev_err(devdata->dev, "%s: hardware max data size (%u) is " | ||
685 | "less than the driver minimum, unable to use " | ||
686 | "the hardware device\n", | ||
687 | __func__, devdata->max_sync_size); | ||
688 | ret = -EINVAL; | ||
689 | goto out; | ||
690 | } | ||
691 | |||
692 | nx842_pseries_constraints.maximum = devdata->max_sync_size; | ||
693 | |||
694 | devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit); | ||
695 | if (devdata->max_sync_sg < 1) { | ||
696 | dev_err(devdata->dev, "%s: hardware max sg size (%u) is " | ||
697 | "less than the driver minimum, unable to use " | ||
698 | "the hardware device\n", | ||
699 | __func__, devdata->max_sync_sg); | ||
700 | ret = -EINVAL; | ||
701 | goto out; | ||
702 | } | ||
703 | |||
704 | out: | ||
705 | return ret; | ||
706 | } | ||
707 | |||
708 | /** | ||
709 | * | ||
710 | * nx842_OF_upd -- Handle OF properties updates for the device. | ||
711 | * | ||
712 | * Set all properties from the OF tree. Optionally, a new property | ||
713 | * can be provided by the @new_prop pointer to overwrite an existing value. | ||
714 | * The device will remain disabled until all values are valid, this function | ||
715 | * will return an error for updates unless all values are valid. | ||
716 | * | ||
717 | * @new_prop: If not NULL, this property is being updated. If NULL, update | ||
718 | * all properties from the current values in the OF tree. | ||
719 | * | ||
720 | * Returns: | ||
721 | * 0 - Success | ||
722 | * -ENOMEM - Could not allocate memory for new devdata structure | ||
723 | * -EINVAL - property value not found, new_prop is not a recognized | ||
724 | * property for the device or property value is not valid. | ||
725 | * -ENODEV - Device is not available | ||
726 | */ | ||
727 | static int nx842_OF_upd(struct property *new_prop) | ||
728 | { | ||
729 | struct nx842_devdata *old_devdata = NULL; | ||
730 | struct nx842_devdata *new_devdata = NULL; | ||
731 | struct device_node *of_node = NULL; | ||
732 | struct property *status = NULL; | ||
733 | struct property *maxsglen = NULL; | ||
734 | struct property *maxsyncop = NULL; | ||
735 | int ret = 0; | ||
736 | unsigned long flags; | ||
737 | |||
738 | spin_lock_irqsave(&devdata_mutex, flags); | ||
739 | old_devdata = rcu_dereference_check(devdata, | ||
740 | lockdep_is_held(&devdata_mutex)); | ||
741 | if (old_devdata) | ||
742 | of_node = old_devdata->dev->of_node; | ||
743 | |||
744 | if (!old_devdata || !of_node) { | ||
745 | pr_err("%s: device is not available\n", __func__); | ||
746 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
747 | return -ENODEV; | ||
748 | } | ||
749 | |||
750 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); | ||
751 | if (!new_devdata) { | ||
752 | dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__); | ||
753 | ret = -ENOMEM; | ||
754 | goto error_out; | ||
755 | } | ||
756 | |||
757 | memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); | ||
758 | new_devdata->counters = old_devdata->counters; | ||
759 | |||
760 | /* Set ptrs for existing properties */ | ||
761 | status = of_find_property(of_node, "status", NULL); | ||
762 | maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL); | ||
763 | maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL); | ||
764 | if (!status || !maxsglen || !maxsyncop) { | ||
765 | dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__); | ||
766 | ret = -EINVAL; | ||
767 | goto error_out; | ||
768 | } | ||
769 | |||
770 | /* | ||
771 | * If this is a property update, there are only certain properties that | ||
772 | * we care about. Bail if it isn't in the below list | ||
773 | */ | ||
774 | if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) || | ||
775 | strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) || | ||
776 | strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length))) | ||
777 | goto out; | ||
778 | |||
779 | /* Perform property updates */ | ||
780 | ret = nx842_OF_upd_status(new_devdata, status); | ||
781 | if (ret) | ||
782 | goto error_out; | ||
783 | |||
784 | ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen); | ||
785 | if (ret) | ||
786 | goto error_out; | ||
787 | |||
788 | ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop); | ||
789 | if (ret) | ||
790 | goto error_out; | ||
791 | |||
792 | out: | ||
793 | dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n", | ||
794 | __func__, new_devdata->max_sync_size, | ||
795 | old_devdata->max_sync_size); | ||
796 | dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n", | ||
797 | __func__, new_devdata->max_sync_sg, | ||
798 | old_devdata->max_sync_sg); | ||
799 | dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n", | ||
800 | __func__, new_devdata->max_sg_len, | ||
801 | old_devdata->max_sg_len); | ||
802 | |||
803 | rcu_assign_pointer(devdata, new_devdata); | ||
804 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
805 | synchronize_rcu(); | ||
806 | dev_set_drvdata(new_devdata->dev, new_devdata); | ||
807 | kfree(old_devdata); | ||
808 | return 0; | ||
809 | |||
810 | error_out: | ||
811 | if (new_devdata) { | ||
812 | dev_info(old_devdata->dev, "%s: device disabled\n", __func__); | ||
813 | nx842_OF_set_defaults(new_devdata); | ||
814 | rcu_assign_pointer(devdata, new_devdata); | ||
815 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
816 | synchronize_rcu(); | ||
817 | dev_set_drvdata(new_devdata->dev, new_devdata); | ||
818 | kfree(old_devdata); | ||
819 | } else { | ||
820 | dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__); | ||
821 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
822 | } | ||
823 | |||
824 | if (!ret) | ||
825 | ret = -EINVAL; | ||
826 | return ret; | ||
827 | } | ||
828 | |||
829 | /** | ||
830 | * nx842_OF_notifier - Process updates to OF properties for the device | ||
831 | * | ||
832 | * @np: notifier block | ||
833 | * @action: notifier action | ||
834 | * @update: struct pSeries_reconfig_prop_update pointer if action is | ||
835 | * PSERIES_UPDATE_PROPERTY | ||
836 | * | ||
837 | * Returns: | ||
838 | * NOTIFY_OK on success | ||
839 | * NOTIFY_BAD encoded with error number on failure, use | ||
840 | * notifier_to_errno() to decode this value | ||
841 | */ | ||
842 | static int nx842_OF_notifier(struct notifier_block *np, unsigned long action, | ||
843 | void *data) | ||
844 | { | ||
845 | struct of_reconfig_data *upd = data; | ||
846 | struct nx842_devdata *local_devdata; | ||
847 | struct device_node *node = NULL; | ||
848 | |||
849 | rcu_read_lock(); | ||
850 | local_devdata = rcu_dereference(devdata); | ||
851 | if (local_devdata) | ||
852 | node = local_devdata->dev->of_node; | ||
853 | |||
854 | if (local_devdata && | ||
855 | action == OF_RECONFIG_UPDATE_PROPERTY && | ||
856 | !strcmp(upd->dn->name, node->name)) { | ||
857 | rcu_read_unlock(); | ||
858 | nx842_OF_upd(upd->prop); | ||
859 | } else | ||
860 | rcu_read_unlock(); | ||
861 | |||
862 | return NOTIFY_OK; | ||
863 | } | ||
864 | |||
865 | static struct notifier_block nx842_of_nb = { | ||
866 | .notifier_call = nx842_OF_notifier, | ||
867 | }; | ||
868 | |||
869 | #define nx842_counter_read(_name) \ | ||
870 | static ssize_t nx842_##_name##_show(struct device *dev, \ | ||
871 | struct device_attribute *attr, \ | ||
872 | char *buf) { \ | ||
873 | struct nx842_devdata *local_devdata; \ | ||
874 | int p = 0; \ | ||
875 | rcu_read_lock(); \ | ||
876 | local_devdata = rcu_dereference(devdata); \ | ||
877 | if (local_devdata) \ | ||
878 | p = snprintf(buf, PAGE_SIZE, "%ld\n", \ | ||
879 | atomic64_read(&local_devdata->counters->_name)); \ | ||
880 | rcu_read_unlock(); \ | ||
881 | return p; \ | ||
882 | } | ||
883 | |||
884 | #define NX842DEV_COUNTER_ATTR_RO(_name) \ | ||
885 | nx842_counter_read(_name); \ | ||
886 | static struct device_attribute dev_attr_##_name = __ATTR(_name, \ | ||
887 | 0444, \ | ||
888 | nx842_##_name##_show,\ | ||
889 | NULL); | ||
890 | |||
891 | NX842DEV_COUNTER_ATTR_RO(comp_complete); | ||
892 | NX842DEV_COUNTER_ATTR_RO(comp_failed); | ||
893 | NX842DEV_COUNTER_ATTR_RO(decomp_complete); | ||
894 | NX842DEV_COUNTER_ATTR_RO(decomp_failed); | ||
895 | NX842DEV_COUNTER_ATTR_RO(swdecomp); | ||
896 | |||
897 | static ssize_t nx842_timehist_show(struct device *, | ||
898 | struct device_attribute *, char *); | ||
899 | |||
900 | static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444, | ||
901 | nx842_timehist_show, NULL); | ||
902 | static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times, | ||
903 | 0444, nx842_timehist_show, NULL); | ||
904 | |||
905 | static ssize_t nx842_timehist_show(struct device *dev, | ||
906 | struct device_attribute *attr, char *buf) { | ||
907 | char *p = buf; | ||
908 | struct nx842_devdata *local_devdata; | ||
909 | atomic64_t *times; | ||
910 | int bytes_remain = PAGE_SIZE; | ||
911 | int bytes; | ||
912 | int i; | ||
913 | |||
914 | rcu_read_lock(); | ||
915 | local_devdata = rcu_dereference(devdata); | ||
916 | if (!local_devdata) { | ||
917 | rcu_read_unlock(); | ||
918 | return 0; | ||
919 | } | ||
920 | |||
921 | if (attr == &dev_attr_comp_times) | ||
922 | times = local_devdata->counters->comp_times; | ||
923 | else if (attr == &dev_attr_decomp_times) | ||
924 | times = local_devdata->counters->decomp_times; | ||
925 | else { | ||
926 | rcu_read_unlock(); | ||
927 | return 0; | ||
928 | } | ||
929 | |||
930 | for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) { | ||
931 | bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n", | ||
932 | i ? (2<<(i-1)) : 0, (2<<i)-1, | ||
933 | atomic64_read(×[i])); | ||
934 | bytes_remain -= bytes; | ||
935 | p += bytes; | ||
936 | } | ||
937 | /* The last bucket holds everything over | ||
938 | * 2<<(NX842_HIST_SLOTS - 2) us */ | ||
939 | bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n", | ||
940 | 2<<(NX842_HIST_SLOTS - 2), | ||
941 | atomic64_read(×[(NX842_HIST_SLOTS - 1)])); | ||
942 | p += bytes; | ||
943 | |||
944 | rcu_read_unlock(); | ||
945 | return p - buf; | ||
946 | } | ||
947 | |||
948 | static struct attribute *nx842_sysfs_entries[] = { | ||
949 | &dev_attr_comp_complete.attr, | ||
950 | &dev_attr_comp_failed.attr, | ||
951 | &dev_attr_decomp_complete.attr, | ||
952 | &dev_attr_decomp_failed.attr, | ||
953 | &dev_attr_swdecomp.attr, | ||
954 | &dev_attr_comp_times.attr, | ||
955 | &dev_attr_decomp_times.attr, | ||
956 | NULL, | ||
957 | }; | ||
958 | |||
959 | static struct attribute_group nx842_attribute_group = { | ||
960 | .name = NULL, /* put in device directory */ | ||
961 | .attrs = nx842_sysfs_entries, | ||
962 | }; | ||
963 | |||
964 | static struct nx842_driver nx842_pseries_driver = { | ||
965 | .name = KBUILD_MODNAME, | ||
966 | .owner = THIS_MODULE, | ||
967 | .workmem_size = sizeof(struct nx842_workmem), | ||
968 | .constraints = &nx842_pseries_constraints, | ||
969 | .compress = nx842_pseries_compress, | ||
970 | .decompress = nx842_pseries_decompress, | ||
971 | }; | ||
972 | |||
973 | static int __init nx842_probe(struct vio_dev *viodev, | ||
974 | const struct vio_device_id *id) | ||
975 | { | ||
976 | struct nx842_devdata *old_devdata, *new_devdata = NULL; | ||
977 | unsigned long flags; | ||
978 | int ret = 0; | ||
979 | |||
980 | spin_lock_irqsave(&devdata_mutex, flags); | ||
981 | old_devdata = rcu_dereference_check(devdata, | ||
982 | lockdep_is_held(&devdata_mutex)); | ||
983 | |||
984 | if (old_devdata && old_devdata->vdev != NULL) { | ||
985 | dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__); | ||
986 | ret = -1; | ||
987 | goto error_unlock; | ||
988 | } | ||
989 | |||
990 | dev_set_drvdata(&viodev->dev, NULL); | ||
991 | |||
992 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); | ||
993 | if (!new_devdata) { | ||
994 | dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__); | ||
995 | ret = -ENOMEM; | ||
996 | goto error_unlock; | ||
997 | } | ||
998 | |||
999 | new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), | ||
1000 | GFP_NOFS); | ||
1001 | if (!new_devdata->counters) { | ||
1002 | dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__); | ||
1003 | ret = -ENOMEM; | ||
1004 | goto error_unlock; | ||
1005 | } | ||
1006 | |||
1007 | new_devdata->vdev = viodev; | ||
1008 | new_devdata->dev = &viodev->dev; | ||
1009 | nx842_OF_set_defaults(new_devdata); | ||
1010 | |||
1011 | rcu_assign_pointer(devdata, new_devdata); | ||
1012 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1013 | synchronize_rcu(); | ||
1014 | kfree(old_devdata); | ||
1015 | |||
1016 | of_reconfig_notifier_register(&nx842_of_nb); | ||
1017 | |||
1018 | ret = nx842_OF_upd(NULL); | ||
1019 | if (ret && ret != -ENODEV) { | ||
1020 | dev_err(&viodev->dev, "could not parse device tree. %d\n", ret); | ||
1021 | ret = -1; | ||
1022 | goto error; | ||
1023 | } | ||
1024 | |||
1025 | rcu_read_lock(); | ||
1026 | dev_set_drvdata(&viodev->dev, rcu_dereference(devdata)); | ||
1027 | rcu_read_unlock(); | ||
1028 | |||
1029 | if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) { | ||
1030 | dev_err(&viodev->dev, "could not create sysfs device attributes\n"); | ||
1031 | ret = -1; | ||
1032 | goto error; | ||
1033 | } | ||
1034 | |||
1035 | return 0; | ||
1036 | |||
1037 | error_unlock: | ||
1038 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1039 | if (new_devdata) | ||
1040 | kfree(new_devdata->counters); | ||
1041 | kfree(new_devdata); | ||
1042 | error: | ||
1043 | return ret; | ||
1044 | } | ||
1045 | |||
1046 | static int __exit nx842_remove(struct vio_dev *viodev) | ||
1047 | { | ||
1048 | struct nx842_devdata *old_devdata; | ||
1049 | unsigned long flags; | ||
1050 | |||
1051 | pr_info("Removing IBM Power 842 compression device\n"); | ||
1052 | sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); | ||
1053 | |||
1054 | spin_lock_irqsave(&devdata_mutex, flags); | ||
1055 | old_devdata = rcu_dereference_check(devdata, | ||
1056 | lockdep_is_held(&devdata_mutex)); | ||
1057 | of_reconfig_notifier_unregister(&nx842_of_nb); | ||
1058 | RCU_INIT_POINTER(devdata, NULL); | ||
1059 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1060 | synchronize_rcu(); | ||
1061 | dev_set_drvdata(&viodev->dev, NULL); | ||
1062 | if (old_devdata) | ||
1063 | kfree(old_devdata->counters); | ||
1064 | kfree(old_devdata); | ||
1065 | |||
1066 | return 0; | ||
1067 | } | ||
1068 | |||
1069 | static struct vio_device_id nx842_vio_driver_ids[] = { | ||
1070 | {"ibm,compression-v1", "ibm,compression"}, | ||
1071 | {"", ""}, | ||
1072 | }; | ||
1073 | |||
1074 | static struct vio_driver nx842_vio_driver = { | ||
1075 | .name = KBUILD_MODNAME, | ||
1076 | .probe = nx842_probe, | ||
1077 | .remove = __exit_p(nx842_remove), | ||
1078 | .get_desired_dma = nx842_get_desired_dma, | ||
1079 | .id_table = nx842_vio_driver_ids, | ||
1080 | }; | ||
1081 | |||
1082 | static int __init nx842_init(void) | ||
1083 | { | ||
1084 | struct nx842_devdata *new_devdata; | ||
1085 | int ret; | ||
1086 | |||
1087 | pr_info("Registering IBM Power 842 compression driver\n"); | ||
1088 | |||
1089 | if (!of_find_compatible_node(NULL, NULL, "ibm,compression")) | ||
1090 | return -ENODEV; | ||
1091 | |||
1092 | RCU_INIT_POINTER(devdata, NULL); | ||
1093 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); | ||
1094 | if (!new_devdata) { | ||
1095 | pr_err("Could not allocate memory for device data\n"); | ||
1096 | return -ENOMEM; | ||
1097 | } | ||
1098 | new_devdata->status = UNAVAILABLE; | ||
1099 | RCU_INIT_POINTER(devdata, new_devdata); | ||
1100 | |||
1101 | ret = vio_register_driver(&nx842_vio_driver); | ||
1102 | if (ret) { | ||
1103 | pr_err("Could not register VIO driver %d\n", ret); | ||
1104 | |||
1105 | kfree(new_devdata); | ||
1106 | return ret; | ||
1107 | } | ||
1108 | |||
1109 | if (!nx842_platform_driver_set(&nx842_pseries_driver)) { | ||
1110 | vio_unregister_driver(&nx842_vio_driver); | ||
1111 | kfree(new_devdata); | ||
1112 | return -EEXIST; | ||
1113 | } | ||
1114 | |||
1115 | return 0; | ||
1116 | } | ||
1117 | |||
1118 | module_init(nx842_init); | ||
1119 | |||
1120 | static void __exit nx842_exit(void) | ||
1121 | { | ||
1122 | struct nx842_devdata *old_devdata; | ||
1123 | unsigned long flags; | ||
1124 | |||
1125 | pr_info("Exiting IBM Power 842 compression driver\n"); | ||
1126 | nx842_platform_driver_unset(&nx842_pseries_driver); | ||
1127 | spin_lock_irqsave(&devdata_mutex, flags); | ||
1128 | old_devdata = rcu_dereference_check(devdata, | ||
1129 | lockdep_is_held(&devdata_mutex)); | ||
1130 | RCU_INIT_POINTER(devdata, NULL); | ||
1131 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1132 | synchronize_rcu(); | ||
1133 | if (old_devdata && old_devdata->dev) | ||
1134 | dev_set_drvdata(old_devdata->dev, NULL); | ||
1135 | kfree(old_devdata); | ||
1136 | vio_unregister_driver(&nx842_vio_driver); | ||
1137 | } | ||
1138 | |||
1139 | module_exit(nx842_exit); | ||
1140 | |||
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 887196e9b50c..6e5e0d60d0c8 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c | |||
@@ -1,5 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Driver for IBM Power 842 compression accelerator | 2 | * Driver frontend for IBM Power 842 compression accelerator |
3 | * | ||
4 | * Copyright (C) 2015 Dan Streetman, IBM Corp | ||
5 | * | ||
6 | * Designer of the Power data compression engine: | ||
7 | * Bulent Abali <abali@us.ibm.com> | ||
3 | * | 8 | * |
4 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
@@ -10,1594 +15,89 @@ | |||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 17 | * GNU General Public License for more details. |
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2012 | ||
19 | * | ||
20 | * Authors: Robert Jennings <rcj@linux.vnet.ibm.com> | ||
21 | * Seth Jennings <sjenning@linux.vnet.ibm.com> | ||
22 | */ | 18 | */ |
23 | 19 | ||
24 | #include <linux/kernel.h> | 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
25 | #include <linux/module.h> | ||
26 | #include <linux/nx842.h> | ||
27 | #include <linux/of.h> | ||
28 | #include <linux/slab.h> | ||
29 | |||
30 | #include <asm/page.h> | ||
31 | #include <asm/vio.h> | ||
32 | 21 | ||
33 | #include "nx_csbcpb.h" /* struct nx_csbcpb */ | 22 | #include "nx-842.h" |
34 | 23 | ||
35 | #define MODULE_NAME "nx-compress" | ||
36 | MODULE_LICENSE("GPL"); | 24 | MODULE_LICENSE("GPL"); |
37 | MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>"); | 25 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); |
38 | MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); | 26 | MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); |
39 | 27 | ||
40 | #define SHIFT_4K 12 | 28 | /** |
41 | #define SHIFT_64K 16 | 29 | * nx842_constraints |
42 | #define SIZE_4K (1UL << SHIFT_4K) | 30 | * |
43 | #define SIZE_64K (1UL << SHIFT_64K) | 31 | * This provides the driver's constraints. Different nx842 implementations |
44 | 32 | * may have varying requirements. The constraints are: | |
45 | /* IO buffer must be 128 byte aligned */ | 33 | * @alignment: All buffers should be aligned to this |
46 | #define IO_BUFFER_ALIGN 128 | 34 | * @multiple: All buffer lengths should be a multiple of this |
47 | 35 | * @minimum: Buffer lengths must not be less than this amount | |
48 | struct nx842_header { | 36 | * @maximum: Buffer lengths must not be more than this amount |
49 | int blocks_nr; /* number of compressed blocks */ | 37 | * |
50 | int offset; /* offset of the first block (from beginning of header) */ | 38 | * The constraints apply to all buffers and lengths, both input and output, |
51 | int sizes[0]; /* size of compressed blocks */ | 39 | * for both compression and decompression, except for the minimum which |
52 | }; | 40 | * only applies to compression input and decompression output; the |
53 | 41 | * compressed data can be less than the minimum constraint. It can be | |
54 | static inline int nx842_header_size(const struct nx842_header *hdr) | 42 | * assumed that compressed data will always adhere to the multiple |
55 | { | 43 | * constraint. |
56 | return sizeof(struct nx842_header) + | 44 | * |
57 | hdr->blocks_nr * sizeof(hdr->sizes[0]); | 45 | * The driver may succeed even if these constraints are violated; |
58 | } | 46 | * however the driver can return failure or suffer reduced performance |
59 | 47 | * if any constraint is not met. | |
60 | /* Macros for fields within nx_csbcpb */ | ||
61 | /* Check the valid bit within the csbcpb valid field */ | ||
62 | #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) | ||
63 | |||
64 | /* CE macros operate on the completion_extension field bits in the csbcpb. | ||
65 | * CE0 0=full completion, 1=partial completion | ||
66 | * CE1 0=CE0 indicates completion, 1=termination (output may be modified) | ||
67 | * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */ | ||
68 | #define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7)) | ||
69 | #define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6)) | ||
70 | #define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) | ||
71 | |||
72 | /* The NX unit accepts data only on 4K page boundaries */ | ||
73 | #define NX842_HW_PAGE_SHIFT SHIFT_4K | ||
74 | #define NX842_HW_PAGE_SIZE (ASM_CONST(1) << NX842_HW_PAGE_SHIFT) | ||
75 | #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) | ||
76 | |||
77 | enum nx842_status { | ||
78 | UNAVAILABLE, | ||
79 | AVAILABLE | ||
80 | }; | ||
81 | |||
82 | struct ibm_nx842_counters { | ||
83 | atomic64_t comp_complete; | ||
84 | atomic64_t comp_failed; | ||
85 | atomic64_t decomp_complete; | ||
86 | atomic64_t decomp_failed; | ||
87 | atomic64_t swdecomp; | ||
88 | atomic64_t comp_times[32]; | ||
89 | atomic64_t decomp_times[32]; | ||
90 | }; | ||
91 | |||
92 | static struct nx842_devdata { | ||
93 | struct vio_dev *vdev; | ||
94 | struct device *dev; | ||
95 | struct ibm_nx842_counters *counters; | ||
96 | unsigned int max_sg_len; | ||
97 | unsigned int max_sync_size; | ||
98 | unsigned int max_sync_sg; | ||
99 | enum nx842_status status; | ||
100 | } __rcu *devdata; | ||
101 | static DEFINE_SPINLOCK(devdata_mutex); | ||
102 | |||
103 | #define NX842_COUNTER_INC(_x) \ | ||
104 | static inline void nx842_inc_##_x( \ | ||
105 | const struct nx842_devdata *dev) { \ | ||
106 | if (dev) \ | ||
107 | atomic64_inc(&dev->counters->_x); \ | ||
108 | } | ||
109 | NX842_COUNTER_INC(comp_complete); | ||
110 | NX842_COUNTER_INC(comp_failed); | ||
111 | NX842_COUNTER_INC(decomp_complete); | ||
112 | NX842_COUNTER_INC(decomp_failed); | ||
113 | NX842_COUNTER_INC(swdecomp); | ||
114 | |||
115 | #define NX842_HIST_SLOTS 16 | ||
116 | |||
117 | static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time) | ||
118 | { | ||
119 | int bucket = fls(time); | ||
120 | |||
121 | if (bucket) | ||
122 | bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); | ||
123 | |||
124 | atomic64_inc(×[bucket]); | ||
125 | } | ||
126 | |||
127 | /* NX unit operation flags */ | ||
128 | #define NX842_OP_COMPRESS 0x0 | ||
129 | #define NX842_OP_CRC 0x1 | ||
130 | #define NX842_OP_DECOMPRESS 0x2 | ||
131 | #define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC) | ||
132 | #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC) | ||
133 | #define NX842_OP_ASYNC (1<<23) | ||
134 | #define NX842_OP_NOTIFY (1<<22) | ||
135 | #define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8) | ||
136 | |||
137 | static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) | ||
138 | { | ||
139 | /* No use of DMA mappings within the driver. */ | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | struct nx842_slentry { | ||
144 | unsigned long ptr; /* Real address (use __pa()) */ | ||
145 | unsigned long len; | ||
146 | }; | ||
147 | |||
148 | /* pHyp scatterlist entry */ | ||
149 | struct nx842_scatterlist { | ||
150 | int entry_nr; /* number of slentries */ | ||
151 | struct nx842_slentry *entries; /* ptr to array of slentries */ | ||
152 | }; | ||
153 | |||
154 | /* Does not include sizeof(entry_nr) in the size */ | ||
155 | static inline unsigned long nx842_get_scatterlist_size( | ||
156 | struct nx842_scatterlist *sl) | ||
157 | { | ||
158 | return sl->entry_nr * sizeof(struct nx842_slentry); | ||
159 | } | ||
160 | |||
161 | static inline unsigned long nx842_get_pa(void *addr) | ||
162 | { | ||
163 | if (is_vmalloc_addr(addr)) | ||
164 | return page_to_phys(vmalloc_to_page(addr)) | ||
165 | + offset_in_page(addr); | ||
166 | else | ||
167 | return __pa(addr); | ||
168 | } | ||
169 | |||
170 | static int nx842_build_scatterlist(unsigned long buf, int len, | ||
171 | struct nx842_scatterlist *sl) | ||
172 | { | ||
173 | unsigned long nextpage; | ||
174 | struct nx842_slentry *entry; | ||
175 | |||
176 | sl->entry_nr = 0; | ||
177 | |||
178 | entry = sl->entries; | ||
179 | while (len) { | ||
180 | entry->ptr = nx842_get_pa((void *)buf); | ||
181 | nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); | ||
182 | if (nextpage < buf + len) { | ||
183 | /* we aren't at the end yet */ | ||
184 | if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE)) | ||
185 | /* we are in the middle (or beginning) */ | ||
186 | entry->len = NX842_HW_PAGE_SIZE; | ||
187 | else | ||
188 | /* we are at the beginning */ | ||
189 | entry->len = nextpage - buf; | ||
190 | } else { | ||
191 | /* at the end */ | ||
192 | entry->len = len; | ||
193 | } | ||
194 | |||
195 | len -= entry->len; | ||
196 | buf += entry->len; | ||
197 | sl->entry_nr++; | ||
198 | entry++; | ||
199 | } | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * Working memory for software decompression | ||
206 | */ | ||
207 | struct sw842_fifo { | ||
208 | union { | ||
209 | char f8[256][8]; | ||
210 | char f4[512][4]; | ||
211 | }; | ||
212 | char f2[256][2]; | ||
213 | unsigned char f84_full; | ||
214 | unsigned char f2_full; | ||
215 | unsigned char f8_count; | ||
216 | unsigned char f2_count; | ||
217 | unsigned int f4_count; | ||
218 | }; | ||
219 | |||
220 | /* | ||
221 | * Working memory for crypto API | ||
222 | */ | 48 | */ |
223 | struct nx842_workmem { | 49 | int nx842_constraints(struct nx842_constraints *c) |
224 | char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */ | ||
225 | union { | ||
226 | /* hardware working memory */ | ||
227 | struct { | ||
228 | /* scatterlist */ | ||
229 | char slin[SIZE_4K]; | ||
230 | char slout[SIZE_4K]; | ||
231 | /* coprocessor status/parameter block */ | ||
232 | struct nx_csbcpb csbcpb; | ||
233 | }; | ||
234 | /* software working memory */ | ||
235 | struct sw842_fifo swfifo; /* software decompression fifo */ | ||
236 | }; | ||
237 | }; | ||
238 | |||
239 | int nx842_get_workmem_size(void) | ||
240 | { | ||
241 | return sizeof(struct nx842_workmem) + NX842_HW_PAGE_SIZE; | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(nx842_get_workmem_size); | ||
244 | |||
245 | int nx842_get_workmem_size_aligned(void) | ||
246 | { | ||
247 | return sizeof(struct nx842_workmem); | ||
248 | } | ||
249 | EXPORT_SYMBOL_GPL(nx842_get_workmem_size_aligned); | ||
250 | |||
251 | static int nx842_validate_result(struct device *dev, | ||
252 | struct cop_status_block *csb) | ||
253 | { | 50 | { |
254 | /* The csb must be valid after returning from vio_h_cop_sync */ | 51 | memcpy(c, nx842_platform_driver()->constraints, sizeof(*c)); |
255 | if (!NX842_CSBCBP_VALID_CHK(csb->valid)) { | ||
256 | dev_err(dev, "%s: cspcbp not valid upon completion.\n", | ||
257 | __func__); | ||
258 | dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n", | ||
259 | csb->valid, | ||
260 | csb->crb_seq_number, | ||
261 | csb->completion_code, | ||
262 | csb->completion_extension); | ||
263 | dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", | ||
264 | csb->processed_byte_count, | ||
265 | (unsigned long)csb->address); | ||
266 | return -EIO; | ||
267 | } | ||
268 | |||
269 | /* Check return values from the hardware in the CSB */ | ||
270 | switch (csb->completion_code) { | ||
271 | case 0: /* Completed without error */ | ||
272 | break; | ||
273 | case 64: /* Target bytes > Source bytes during compression */ | ||
274 | case 13: /* Output buffer too small */ | ||
275 | dev_dbg(dev, "%s: Compression output larger than input\n", | ||
276 | __func__); | ||
277 | return -ENOSPC; | ||
278 | case 66: /* Input data contains an illegal template field */ | ||
279 | case 67: /* Template indicates data past the end of the input stream */ | ||
280 | dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", | ||
281 | __func__, csb->completion_code); | ||
282 | return -EINVAL; | ||
283 | default: | ||
284 | dev_dbg(dev, "%s: Unspecified error (code:%d)\n", | ||
285 | __func__, csb->completion_code); | ||
286 | return -EIO; | ||
287 | } | ||
288 | |||
289 | /* Hardware sanity check */ | ||
290 | if (!NX842_CSBCPB_CE2(csb->completion_extension)) { | ||
291 | dev_err(dev, "%s: No error returned by hardware, but " | ||
292 | "data returned is unusable, contact support.\n" | ||
293 | "(Additional info: csbcbp->processed bytes " | ||
294 | "does not specify processed bytes for the " | ||
295 | "target buffer.)\n", __func__); | ||
296 | return -EIO; | ||
297 | } | ||
298 | |||
299 | return 0; | 52 | return 0; |
300 | } | 53 | } |
54 | EXPORT_SYMBOL_GPL(nx842_constraints); | ||
301 | 55 | ||
302 | /** | 56 | /** |
303 | * nx842_compress - Compress data using the 842 algorithm | 57 | * nx842_workmem_size |
304 | * | ||
305 | * Compression provide by the NX842 coprocessor on IBM Power systems. | ||
306 | * The input buffer is compressed and the result is stored in the | ||
307 | * provided output buffer. | ||
308 | * | ||
309 | * Upon return from this function @outlen contains the length of the | ||
310 | * compressed data. If there is an error then @outlen will be 0 and an | ||
311 | * error will be specified by the return code from this function. | ||
312 | * | ||
313 | * @in: Pointer to input buffer, must be page aligned | ||
314 | * @inlen: Length of input buffer, must be PAGE_SIZE | ||
315 | * @out: Pointer to output buffer | ||
316 | * @outlen: Length of output buffer | ||
317 | * @wrkmem: ptr to buffer for working memory, size determined by | ||
318 | * nx842_get_workmem_size() | ||
319 | * | 58 | * |
320 | * Returns: | 59 | * Get the amount of working memory the driver requires. |
321 | * 0 Success, output of length @outlen stored in the buffer at @out | ||
322 | * -ENOMEM Unable to allocate internal buffers | ||
323 | * -ENOSPC Output buffer is to small | ||
324 | * -EMSGSIZE XXX Difficult to describe this limitation | ||
325 | * -EIO Internal error | ||
326 | * -ENODEV Hardware unavailable | ||
327 | */ | 60 | */ |
328 | int nx842_compress(const unsigned char *in, unsigned int inlen, | 61 | size_t nx842_workmem_size(void) |
329 | unsigned char *out, unsigned int *outlen, void *wmem) | ||
330 | { | 62 | { |
331 | struct nx842_header *hdr; | 63 | return nx842_platform_driver()->workmem_size; |
332 | struct nx842_devdata *local_devdata; | ||
333 | struct device *dev = NULL; | ||
334 | struct nx842_workmem *workmem; | ||
335 | struct nx842_scatterlist slin, slout; | ||
336 | struct nx_csbcpb *csbcpb; | ||
337 | int ret = 0, max_sync_size, i, bytesleft, size, hdrsize; | ||
338 | unsigned long inbuf, outbuf, padding; | ||
339 | struct vio_pfo_op op = { | ||
340 | .done = NULL, | ||
341 | .handle = 0, | ||
342 | .timeout = 0, | ||
343 | }; | ||
344 | unsigned long start_time = get_tb(); | ||
345 | |||
346 | /* | ||
347 | * Make sure input buffer is 64k page aligned. This is assumed since | ||
348 | * this driver is designed for page compression only (for now). This | ||
349 | * is very nice since we can now use direct DDE(s) for the input and | ||
350 | * the alignment is guaranteed. | ||
351 | */ | ||
352 | inbuf = (unsigned long)in; | ||
353 | if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE) | ||
354 | return -EINVAL; | ||
355 | |||
356 | rcu_read_lock(); | ||
357 | local_devdata = rcu_dereference(devdata); | ||
358 | if (!local_devdata || !local_devdata->dev) { | ||
359 | rcu_read_unlock(); | ||
360 | return -ENODEV; | ||
361 | } | ||
362 | max_sync_size = local_devdata->max_sync_size; | ||
363 | dev = local_devdata->dev; | ||
364 | |||
365 | /* Create the header */ | ||
366 | hdr = (struct nx842_header *)out; | ||
367 | hdr->blocks_nr = PAGE_SIZE / max_sync_size; | ||
368 | hdrsize = nx842_header_size(hdr); | ||
369 | outbuf = (unsigned long)out + hdrsize; | ||
370 | bytesleft = *outlen - hdrsize; | ||
371 | |||
372 | /* Init scatterlist */ | ||
373 | workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, | ||
374 | NX842_HW_PAGE_SIZE); | ||
375 | slin.entries = (struct nx842_slentry *)workmem->slin; | ||
376 | slout.entries = (struct nx842_slentry *)workmem->slout; | ||
377 | |||
378 | /* Init operation */ | ||
379 | op.flags = NX842_OP_COMPRESS; | ||
380 | csbcpb = &workmem->csbcpb; | ||
381 | memset(csbcpb, 0, sizeof(*csbcpb)); | ||
382 | op.csbcpb = nx842_get_pa(csbcpb); | ||
383 | op.out = nx842_get_pa(slout.entries); | ||
384 | |||
385 | for (i = 0; i < hdr->blocks_nr; i++) { | ||
386 | /* | ||
387 | * Aligning the output blocks to 128 bytes does waste space, | ||
388 | * but it prevents the need for bounce buffers and memory | ||
389 | * copies. It also simplifies the code a lot. In the worst | ||
390 | * case (64k page, 4k max_sync_size), you lose up to | ||
391 | * (128*16)/64k = ~3% the compression factor. For 64k | ||
392 | * max_sync_size, the loss would be at most 128/64k = ~0.2%. | ||
393 | */ | ||
394 | padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf; | ||
395 | outbuf += padding; | ||
396 | bytesleft -= padding; | ||
397 | if (i == 0) | ||
398 | /* save offset into first block in header */ | ||
399 | hdr->offset = padding + hdrsize; | ||
400 | |||
401 | if (bytesleft <= 0) { | ||
402 | ret = -ENOSPC; | ||
403 | goto unlock; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * NOTE: If the default max_sync_size is changed from 4k | ||
408 | * to 64k, remove the "likely" case below, since a | ||
409 | * scatterlist will always be needed. | ||
410 | */ | ||
411 | if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { | ||
412 | /* Create direct DDE */ | ||
413 | op.in = nx842_get_pa((void *)inbuf); | ||
414 | op.inlen = max_sync_size; | ||
415 | |||
416 | } else { | ||
417 | /* Create indirect DDE (scatterlist) */ | ||
418 | nx842_build_scatterlist(inbuf, max_sync_size, &slin); | ||
419 | op.in = nx842_get_pa(slin.entries); | ||
420 | op.inlen = -nx842_get_scatterlist_size(&slin); | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect | ||
425 | * DDE is required for the outbuf. | ||
426 | * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must | ||
427 | * also be page aligned (1 in 128/4k=32 chance) in order | ||
428 | * to use a direct DDE. | ||
429 | * This is unlikely, just use an indirect DDE always. | ||
430 | */ | ||
431 | nx842_build_scatterlist(outbuf, | ||
432 | min(bytesleft, max_sync_size), &slout); | ||
433 | /* op.out set before loop */ | ||
434 | op.outlen = -nx842_get_scatterlist_size(&slout); | ||
435 | |||
436 | /* Send request to pHyp */ | ||
437 | ret = vio_h_cop_sync(local_devdata->vdev, &op); | ||
438 | |||
439 | /* Check for pHyp error */ | ||
440 | if (ret) { | ||
441 | dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", | ||
442 | __func__, ret, op.hcall_err); | ||
443 | ret = -EIO; | ||
444 | goto unlock; | ||
445 | } | ||
446 | |||
447 | /* Check for hardware error */ | ||
448 | ret = nx842_validate_result(dev, &csbcpb->csb); | ||
449 | if (ret && ret != -ENOSPC) | ||
450 | goto unlock; | ||
451 | |||
452 | /* Handle incompressible data */ | ||
453 | if (unlikely(ret == -ENOSPC)) { | ||
454 | if (bytesleft < max_sync_size) { | ||
455 | /* | ||
456 | * Not enough space left in the output buffer | ||
457 | * to store uncompressed block | ||
458 | */ | ||
459 | goto unlock; | ||
460 | } else { | ||
461 | /* Store incompressible block */ | ||
462 | memcpy((void *)outbuf, (void *)inbuf, | ||
463 | max_sync_size); | ||
464 | hdr->sizes[i] = -max_sync_size; | ||
465 | outbuf += max_sync_size; | ||
466 | bytesleft -= max_sync_size; | ||
467 | /* Reset ret, incompressible data handled */ | ||
468 | ret = 0; | ||
469 | } | ||
470 | } else { | ||
471 | /* Normal case, compression was successful */ | ||
472 | size = csbcpb->csb.processed_byte_count; | ||
473 | dev_dbg(dev, "%s: processed_bytes=%d\n", | ||
474 | __func__, size); | ||
475 | hdr->sizes[i] = size; | ||
476 | outbuf += size; | ||
477 | bytesleft -= size; | ||
478 | } | ||
479 | |||
480 | inbuf += max_sync_size; | ||
481 | } | ||
482 | |||
483 | *outlen = (unsigned int)(outbuf - (unsigned long)out); | ||
484 | |||
485 | unlock: | ||
486 | if (ret) | ||
487 | nx842_inc_comp_failed(local_devdata); | ||
488 | else { | ||
489 | nx842_inc_comp_complete(local_devdata); | ||
490 | ibm_nx842_incr_hist(local_devdata->counters->comp_times, | ||
491 | (get_tb() - start_time) / tb_ticks_per_usec); | ||
492 | } | ||
493 | rcu_read_unlock(); | ||
494 | return ret; | ||
495 | } | 64 | } |
496 | EXPORT_SYMBOL_GPL(nx842_compress); | 65 | EXPORT_SYMBOL_GPL(nx842_workmem_size); |
497 | |||
498 | static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, | ||
499 | const void *); | ||
500 | 66 | ||
501 | /** | 67 | int nx842_compress(const unsigned char *in, unsigned int ilen, |
502 | * nx842_decompress - Decompress data using the 842 algorithm | 68 | unsigned char *out, unsigned int *olen, void *wmem) |
503 | * | ||
504 | * Decompression provide by the NX842 coprocessor on IBM Power systems. | ||
505 | * The input buffer is decompressed and the result is stored in the | ||
506 | * provided output buffer. The size allocated to the output buffer is | ||
507 | * provided by the caller of this function in @outlen. Upon return from | ||
508 | * this function @outlen contains the length of the decompressed data. | ||
509 | * If there is an error then @outlen will be 0 and an error will be | ||
510 | * specified by the return code from this function. | ||
511 | * | ||
512 | * @in: Pointer to input buffer, will use bounce buffer if not 128 byte | ||
513 | * aligned | ||
514 | * @inlen: Length of input buffer | ||
515 | * @out: Pointer to output buffer, must be page aligned | ||
516 | * @outlen: Length of output buffer, must be PAGE_SIZE | ||
517 | * @wrkmem: ptr to buffer for working memory, size determined by | ||
518 | * nx842_get_workmem_size() | ||
519 | * | ||
520 | * Returns: | ||
521 | * 0 Success, output of length @outlen stored in the buffer at @out | ||
522 | * -ENODEV Hardware decompression device is unavailable | ||
523 | * -ENOMEM Unable to allocate internal buffers | ||
524 | * -ENOSPC Output buffer is to small | ||
525 | * -EINVAL Bad input data encountered when attempting decompress | ||
526 | * -EIO Internal error | ||
527 | */ | ||
528 | int nx842_decompress(const unsigned char *in, unsigned int inlen, | ||
529 | unsigned char *out, unsigned int *outlen, void *wmem) | ||
530 | { | 69 | { |
531 | struct nx842_header *hdr; | 70 | return nx842_platform_driver()->compress(in, ilen, out, olen, wmem); |
532 | struct nx842_devdata *local_devdata; | ||
533 | struct device *dev = NULL; | ||
534 | struct nx842_workmem *workmem; | ||
535 | struct nx842_scatterlist slin, slout; | ||
536 | struct nx_csbcpb *csbcpb; | ||
537 | int ret = 0, i, size, max_sync_size; | ||
538 | unsigned long inbuf, outbuf; | ||
539 | struct vio_pfo_op op = { | ||
540 | .done = NULL, | ||
541 | .handle = 0, | ||
542 | .timeout = 0, | ||
543 | }; | ||
544 | unsigned long start_time = get_tb(); | ||
545 | |||
546 | /* Ensure page alignment and size */ | ||
547 | outbuf = (unsigned long)out; | ||
548 | if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE) | ||
549 | return -EINVAL; | ||
550 | |||
551 | rcu_read_lock(); | ||
552 | local_devdata = rcu_dereference(devdata); | ||
553 | if (local_devdata) | ||
554 | dev = local_devdata->dev; | ||
555 | |||
556 | /* Get header */ | ||
557 | hdr = (struct nx842_header *)in; | ||
558 | |||
559 | workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, | ||
560 | NX842_HW_PAGE_SIZE); | ||
561 | |||
562 | inbuf = (unsigned long)in + hdr->offset; | ||
563 | if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) { | ||
564 | /* Copy block(s) into bounce buffer for alignment */ | ||
565 | memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset); | ||
566 | inbuf = (unsigned long)workmem->bounce; | ||
567 | } | ||
568 | |||
569 | /* Init scatterlist */ | ||
570 | slin.entries = (struct nx842_slentry *)workmem->slin; | ||
571 | slout.entries = (struct nx842_slentry *)workmem->slout; | ||
572 | |||
573 | /* Init operation */ | ||
574 | op.flags = NX842_OP_DECOMPRESS; | ||
575 | csbcpb = &workmem->csbcpb; | ||
576 | memset(csbcpb, 0, sizeof(*csbcpb)); | ||
577 | op.csbcpb = nx842_get_pa(csbcpb); | ||
578 | |||
579 | /* | ||
580 | * max_sync_size may have changed since compression, | ||
581 | * so we can't read it from the device info. We need | ||
582 | * to derive it from hdr->blocks_nr. | ||
583 | */ | ||
584 | max_sync_size = PAGE_SIZE / hdr->blocks_nr; | ||
585 | |||
586 | for (i = 0; i < hdr->blocks_nr; i++) { | ||
587 | /* Skip padding */ | ||
588 | inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN); | ||
589 | |||
590 | if (hdr->sizes[i] < 0) { | ||
591 | /* Negative sizes indicate uncompressed data blocks */ | ||
592 | size = abs(hdr->sizes[i]); | ||
593 | memcpy((void *)outbuf, (void *)inbuf, size); | ||
594 | outbuf += size; | ||
595 | inbuf += size; | ||
596 | continue; | ||
597 | } | ||
598 | |||
599 | if (!dev) | ||
600 | goto sw; | ||
601 | |||
602 | /* | ||
603 | * The better the compression, the more likely the "likely" | ||
604 | * case becomes. | ||
605 | */ | ||
606 | if (likely((inbuf & NX842_HW_PAGE_MASK) == | ||
607 | ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { | ||
608 | /* Create direct DDE */ | ||
609 | op.in = nx842_get_pa((void *)inbuf); | ||
610 | op.inlen = hdr->sizes[i]; | ||
611 | } else { | ||
612 | /* Create indirect DDE (scatterlist) */ | ||
613 | nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); | ||
614 | op.in = nx842_get_pa(slin.entries); | ||
615 | op.inlen = -nx842_get_scatterlist_size(&slin); | ||
616 | } | ||
617 | |||
618 | /* | ||
619 | * NOTE: If the default max_sync_size is changed from 4k | ||
620 | * to 64k, remove the "likely" case below, since a | ||
621 | * scatterlist will always be needed. | ||
622 | */ | ||
623 | if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { | ||
624 | /* Create direct DDE */ | ||
625 | op.out = nx842_get_pa((void *)outbuf); | ||
626 | op.outlen = max_sync_size; | ||
627 | } else { | ||
628 | /* Create indirect DDE (scatterlist) */ | ||
629 | nx842_build_scatterlist(outbuf, max_sync_size, &slout); | ||
630 | op.out = nx842_get_pa(slout.entries); | ||
631 | op.outlen = -nx842_get_scatterlist_size(&slout); | ||
632 | } | ||
633 | |||
634 | /* Send request to pHyp */ | ||
635 | ret = vio_h_cop_sync(local_devdata->vdev, &op); | ||
636 | |||
637 | /* Check for pHyp error */ | ||
638 | if (ret) { | ||
639 | dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", | ||
640 | __func__, ret, op.hcall_err); | ||
641 | dev = NULL; | ||
642 | goto sw; | ||
643 | } | ||
644 | |||
645 | /* Check for hardware error */ | ||
646 | ret = nx842_validate_result(dev, &csbcpb->csb); | ||
647 | if (ret) { | ||
648 | dev = NULL; | ||
649 | goto sw; | ||
650 | } | ||
651 | |||
652 | /* HW decompression success */ | ||
653 | inbuf += hdr->sizes[i]; | ||
654 | outbuf += csbcpb->csb.processed_byte_count; | ||
655 | continue; | ||
656 | |||
657 | sw: | ||
658 | /* software decompression */ | ||
659 | size = max_sync_size; | ||
660 | ret = sw842_decompress( | ||
661 | (unsigned char *)inbuf, hdr->sizes[i], | ||
662 | (unsigned char *)outbuf, &size, wmem); | ||
663 | if (ret) | ||
664 | pr_debug("%s: sw842_decompress failed with %d\n", | ||
665 | __func__, ret); | ||
666 | |||
667 | if (ret) { | ||
668 | if (ret != -ENOSPC && ret != -EINVAL && | ||
669 | ret != -EMSGSIZE) | ||
670 | ret = -EIO; | ||
671 | goto unlock; | ||
672 | } | ||
673 | |||
674 | /* SW decompression success */ | ||
675 | inbuf += hdr->sizes[i]; | ||
676 | outbuf += size; | ||
677 | } | ||
678 | |||
679 | *outlen = (unsigned int)(outbuf - (unsigned long)out); | ||
680 | |||
681 | unlock: | ||
682 | if (ret) | ||
683 | /* decompress fail */ | ||
684 | nx842_inc_decomp_failed(local_devdata); | ||
685 | else { | ||
686 | if (!dev) | ||
687 | /* software decompress */ | ||
688 | nx842_inc_swdecomp(local_devdata); | ||
689 | nx842_inc_decomp_complete(local_devdata); | ||
690 | ibm_nx842_incr_hist(local_devdata->counters->decomp_times, | ||
691 | (get_tb() - start_time) / tb_ticks_per_usec); | ||
692 | } | ||
693 | |||
694 | rcu_read_unlock(); | ||
695 | return ret; | ||
696 | } | 71 | } |
697 | EXPORT_SYMBOL_GPL(nx842_decompress); | 72 | EXPORT_SYMBOL_GPL(nx842_compress); |
698 | 73 | ||
699 | /** | 74 | int nx842_decompress(const unsigned char *in, unsigned int ilen, |
700 | * nx842_OF_set_defaults -- Set default (disabled) values for devdata | 75 | unsigned char *out, unsigned int *olen, void *wmem) |
701 | * | ||
702 | * @devdata - struct nx842_devdata to update | ||
703 | * | ||
704 | * Returns: | ||
705 | * 0 on success | ||
706 | * -ENOENT if @devdata ptr is NULL | ||
707 | */ | ||
708 | static int nx842_OF_set_defaults(struct nx842_devdata *devdata) | ||
709 | { | 76 | { |
710 | if (devdata) { | 77 | return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem); |
711 | devdata->max_sync_size = 0; | ||
712 | devdata->max_sync_sg = 0; | ||
713 | devdata->max_sg_len = 0; | ||
714 | devdata->status = UNAVAILABLE; | ||
715 | return 0; | ||
716 | } else | ||
717 | return -ENOENT; | ||
718 | } | ||
719 | |||
720 | /** | ||
721 | * nx842_OF_upd_status -- Update the device info from OF status prop | ||
722 | * | ||
723 | * The status property indicates if the accelerator is enabled. If the | ||
724 | * device is in the OF tree it indicates that the hardware is present. | ||
725 | * The status field indicates if the device is enabled when the status | ||
726 | * is 'okay'. Otherwise the device driver will be disabled. | ||
727 | * | ||
728 | * @devdata - struct nx842_devdata to update | ||
729 | * @prop - struct property point containing the maxsyncop for the update | ||
730 | * | ||
731 | * Returns: | ||
732 | * 0 - Device is available | ||
733 | * -EINVAL - Device is not available | ||
734 | */ | ||
735 | static int nx842_OF_upd_status(struct nx842_devdata *devdata, | ||
736 | struct property *prop) { | ||
737 | int ret = 0; | ||
738 | const char *status = (const char *)prop->value; | ||
739 | |||
740 | if (!strncmp(status, "okay", (size_t)prop->length)) { | ||
741 | devdata->status = AVAILABLE; | ||
742 | } else { | ||
743 | dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n", | ||
744 | __func__, status); | ||
745 | devdata->status = UNAVAILABLE; | ||
746 | } | ||
747 | |||
748 | return ret; | ||
749 | } | ||
750 | |||
751 | /** | ||
752 | * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop | ||
753 | * | ||
754 | * Definition of the 'ibm,max-sg-len' OF property: | ||
755 | * This field indicates the maximum byte length of a scatter list | ||
756 | * for the platform facility. It is a single cell encoded as with encode-int. | ||
757 | * | ||
758 | * Example: | ||
759 | * # od -x ibm,max-sg-len | ||
760 | * 0000000 0000 0ff0 | ||
761 | * | ||
762 | * In this example, the maximum byte length of a scatter list is | ||
763 | * 0x0ff0 (4,080). | ||
764 | * | ||
765 | * @devdata - struct nx842_devdata to update | ||
766 | * @prop - struct property point containing the maxsyncop for the update | ||
767 | * | ||
768 | * Returns: | ||
769 | * 0 on success | ||
770 | * -EINVAL on failure | ||
771 | */ | ||
772 | static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, | ||
773 | struct property *prop) { | ||
774 | int ret = 0; | ||
775 | const int *maxsglen = prop->value; | ||
776 | |||
777 | if (prop->length != sizeof(*maxsglen)) { | ||
778 | dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); | ||
779 | dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, | ||
780 | prop->length, sizeof(*maxsglen)); | ||
781 | ret = -EINVAL; | ||
782 | } else { | ||
783 | devdata->max_sg_len = (unsigned int)min(*maxsglen, | ||
784 | (int)NX842_HW_PAGE_SIZE); | ||
785 | } | ||
786 | |||
787 | return ret; | ||
788 | } | ||
789 | |||
790 | /** | ||
791 | * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop | ||
792 | * | ||
793 | * Definition of the 'ibm,max-sync-cop' OF property: | ||
794 | * Two series of cells. The first series of cells represents the maximums | ||
795 | * that can be synchronously compressed. The second series of cells | ||
796 | * represents the maximums that can be synchronously decompressed. | ||
797 | * 1. The first cell in each series contains the count of the number of | ||
798 | * data length, scatter list elements pairs that follow – each being | ||
799 | * of the form | ||
800 | * a. One cell data byte length | ||
801 | * b. One cell total number of scatter list elements | ||
802 | * | ||
803 | * Example: | ||
804 | * # od -x ibm,max-sync-cop | ||
805 | * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001 | ||
806 | * 0000020 0000 1000 0000 01fe | ||
807 | * | ||
808 | * In this example, compression supports 0x1000 (4,096) data byte length | ||
809 | * and 0x1fe (510) total scatter list elements. Decompression supports | ||
810 | * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list | ||
811 | * elements. | ||
812 | * | ||
813 | * @devdata - struct nx842_devdata to update | ||
814 | * @prop - struct property point containing the maxsyncop for the update | ||
815 | * | ||
816 | * Returns: | ||
817 | * 0 on success | ||
818 | * -EINVAL on failure | ||
819 | */ | ||
820 | static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, | ||
821 | struct property *prop) { | ||
822 | int ret = 0; | ||
823 | const struct maxsynccop_t { | ||
824 | int comp_elements; | ||
825 | int comp_data_limit; | ||
826 | int comp_sg_limit; | ||
827 | int decomp_elements; | ||
828 | int decomp_data_limit; | ||
829 | int decomp_sg_limit; | ||
830 | } *maxsynccop; | ||
831 | |||
832 | if (prop->length != sizeof(*maxsynccop)) { | ||
833 | dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__); | ||
834 | dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length, | ||
835 | sizeof(*maxsynccop)); | ||
836 | ret = -EINVAL; | ||
837 | goto out; | ||
838 | } | ||
839 | |||
840 | maxsynccop = (const struct maxsynccop_t *)prop->value; | ||
841 | |||
842 | /* Use one limit rather than separate limits for compression and | ||
843 | * decompression. Set a maximum for this so as not to exceed the | ||
844 | * size that the header can support and round the value down to | ||
845 | * the hardware page size (4K) */ | ||
846 | devdata->max_sync_size = | ||
847 | (unsigned int)min(maxsynccop->comp_data_limit, | ||
848 | maxsynccop->decomp_data_limit); | ||
849 | |||
850 | devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, | ||
851 | SIZE_64K); | ||
852 | |||
853 | if (devdata->max_sync_size < SIZE_4K) { | ||
854 | dev_err(devdata->dev, "%s: hardware max data size (%u) is " | ||
855 | "less than the driver minimum, unable to use " | ||
856 | "the hardware device\n", | ||
857 | __func__, devdata->max_sync_size); | ||
858 | ret = -EINVAL; | ||
859 | goto out; | ||
860 | } | ||
861 | |||
862 | devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit, | ||
863 | maxsynccop->decomp_sg_limit); | ||
864 | if (devdata->max_sync_sg < 1) { | ||
865 | dev_err(devdata->dev, "%s: hardware max sg size (%u) is " | ||
866 | "less than the driver minimum, unable to use " | ||
867 | "the hardware device\n", | ||
868 | __func__, devdata->max_sync_sg); | ||
869 | ret = -EINVAL; | ||
870 | goto out; | ||
871 | } | ||
872 | |||
873 | out: | ||
874 | return ret; | ||
875 | } | 78 | } |
79 | EXPORT_SYMBOL_GPL(nx842_decompress); | ||
876 | 80 | ||
877 | /** | 81 | static __init int nx842_init(void) |
878 | * | ||
879 | * nx842_OF_upd -- Handle OF properties updates for the device. | ||
880 | * | ||
881 | * Set all properties from the OF tree. Optionally, a new property | ||
882 | * can be provided by the @new_prop pointer to overwrite an existing value. | ||
883 | * The device will remain disabled until all values are valid, this function | ||
884 | * will return an error for updates unless all values are valid. | ||
885 | * | ||
886 | * @new_prop: If not NULL, this property is being updated. If NULL, update | ||
887 | * all properties from the current values in the OF tree. | ||
888 | * | ||
889 | * Returns: | ||
890 | * 0 - Success | ||
891 | * -ENOMEM - Could not allocate memory for new devdata structure | ||
892 | * -EINVAL - property value not found, new_prop is not a recognized | ||
893 | * property for the device or property value is not valid. | ||
894 | * -ENODEV - Device is not available | ||
895 | */ | ||
896 | static int nx842_OF_upd(struct property *new_prop) | ||
897 | { | 82 | { |
898 | struct nx842_devdata *old_devdata = NULL; | 83 | request_module("nx-compress-powernv"); |
899 | struct nx842_devdata *new_devdata = NULL; | 84 | request_module("nx-compress-pseries"); |
900 | struct device_node *of_node = NULL; | ||
901 | struct property *status = NULL; | ||
902 | struct property *maxsglen = NULL; | ||
903 | struct property *maxsyncop = NULL; | ||
904 | int ret = 0; | ||
905 | unsigned long flags; | ||
906 | |||
907 | spin_lock_irqsave(&devdata_mutex, flags); | ||
908 | old_devdata = rcu_dereference_check(devdata, | ||
909 | lockdep_is_held(&devdata_mutex)); | ||
910 | if (old_devdata) | ||
911 | of_node = old_devdata->dev->of_node; | ||
912 | 85 | ||
913 | if (!old_devdata || !of_node) { | 86 | /* we prevent loading if there's no platform driver, and we get the |
914 | pr_err("%s: device is not available\n", __func__); | 87 | * module that set it so it won't unload, so we don't need to check |
915 | spin_unlock_irqrestore(&devdata_mutex, flags); | 88 | * if it's set in any of the above functions |
916 | return -ENODEV; | ||
917 | } | ||
918 | |||
919 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); | ||
920 | if (!new_devdata) { | ||
921 | dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__); | ||
922 | ret = -ENOMEM; | ||
923 | goto error_out; | ||
924 | } | ||
925 | |||
926 | memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); | ||
927 | new_devdata->counters = old_devdata->counters; | ||
928 | |||
929 | /* Set ptrs for existing properties */ | ||
930 | status = of_find_property(of_node, "status", NULL); | ||
931 | maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL); | ||
932 | maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL); | ||
933 | if (!status || !maxsglen || !maxsyncop) { | ||
934 | dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__); | ||
935 | ret = -EINVAL; | ||
936 | goto error_out; | ||
937 | } | ||
938 | |||
939 | /* | ||
940 | * If this is a property update, there are only certain properties that | ||
941 | * we care about. Bail if it isn't in the below list | ||
942 | */ | 89 | */ |
943 | if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) || | 90 | if (!nx842_platform_driver_get()) { |
944 | strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) || | 91 | pr_err("no nx842 driver found.\n"); |
945 | strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length))) | 92 | return -ENODEV; |
946 | goto out; | ||
947 | |||
948 | /* Perform property updates */ | ||
949 | ret = nx842_OF_upd_status(new_devdata, status); | ||
950 | if (ret) | ||
951 | goto error_out; | ||
952 | |||
953 | ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen); | ||
954 | if (ret) | ||
955 | goto error_out; | ||
956 | |||
957 | ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop); | ||
958 | if (ret) | ||
959 | goto error_out; | ||
960 | |||
961 | out: | ||
962 | dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n", | ||
963 | __func__, new_devdata->max_sync_size, | ||
964 | old_devdata->max_sync_size); | ||
965 | dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n", | ||
966 | __func__, new_devdata->max_sync_sg, | ||
967 | old_devdata->max_sync_sg); | ||
968 | dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n", | ||
969 | __func__, new_devdata->max_sg_len, | ||
970 | old_devdata->max_sg_len); | ||
971 | |||
972 | rcu_assign_pointer(devdata, new_devdata); | ||
973 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
974 | synchronize_rcu(); | ||
975 | dev_set_drvdata(new_devdata->dev, new_devdata); | ||
976 | kfree(old_devdata); | ||
977 | return 0; | ||
978 | |||
979 | error_out: | ||
980 | if (new_devdata) { | ||
981 | dev_info(old_devdata->dev, "%s: device disabled\n", __func__); | ||
982 | nx842_OF_set_defaults(new_devdata); | ||
983 | rcu_assign_pointer(devdata, new_devdata); | ||
984 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
985 | synchronize_rcu(); | ||
986 | dev_set_drvdata(new_devdata->dev, new_devdata); | ||
987 | kfree(old_devdata); | ||
988 | } else { | ||
989 | dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__); | ||
990 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
991 | } | ||
992 | |||
993 | if (!ret) | ||
994 | ret = -EINVAL; | ||
995 | return ret; | ||
996 | } | ||
997 | |||
998 | /** | ||
999 | * nx842_OF_notifier - Process updates to OF properties for the device | ||
1000 | * | ||
1001 | * @np: notifier block | ||
1002 | * @action: notifier action | ||
1003 | * @update: struct pSeries_reconfig_prop_update pointer if action is | ||
1004 | * PSERIES_UPDATE_PROPERTY | ||
1005 | * | ||
1006 | * Returns: | ||
1007 | * NOTIFY_OK on success | ||
1008 | * NOTIFY_BAD encoded with error number on failure, use | ||
1009 | * notifier_to_errno() to decode this value | ||
1010 | */ | ||
1011 | static int nx842_OF_notifier(struct notifier_block *np, unsigned long action, | ||
1012 | void *data) | ||
1013 | { | ||
1014 | struct of_reconfig_data *upd = data; | ||
1015 | struct nx842_devdata *local_devdata; | ||
1016 | struct device_node *node = NULL; | ||
1017 | |||
1018 | rcu_read_lock(); | ||
1019 | local_devdata = rcu_dereference(devdata); | ||
1020 | if (local_devdata) | ||
1021 | node = local_devdata->dev->of_node; | ||
1022 | |||
1023 | if (local_devdata && | ||
1024 | action == OF_RECONFIG_UPDATE_PROPERTY && | ||
1025 | !strcmp(upd->dn->name, node->name)) { | ||
1026 | rcu_read_unlock(); | ||
1027 | nx842_OF_upd(upd->prop); | ||
1028 | } else | ||
1029 | rcu_read_unlock(); | ||
1030 | |||
1031 | return NOTIFY_OK; | ||
1032 | } | ||
1033 | |||
1034 | static struct notifier_block nx842_of_nb = { | ||
1035 | .notifier_call = nx842_OF_notifier, | ||
1036 | }; | ||
1037 | |||
1038 | #define nx842_counter_read(_name) \ | ||
1039 | static ssize_t nx842_##_name##_show(struct device *dev, \ | ||
1040 | struct device_attribute *attr, \ | ||
1041 | char *buf) { \ | ||
1042 | struct nx842_devdata *local_devdata; \ | ||
1043 | int p = 0; \ | ||
1044 | rcu_read_lock(); \ | ||
1045 | local_devdata = rcu_dereference(devdata); \ | ||
1046 | if (local_devdata) \ | ||
1047 | p = snprintf(buf, PAGE_SIZE, "%ld\n", \ | ||
1048 | atomic64_read(&local_devdata->counters->_name)); \ | ||
1049 | rcu_read_unlock(); \ | ||
1050 | return p; \ | ||
1051 | } | ||
1052 | |||
1053 | #define NX842DEV_COUNTER_ATTR_RO(_name) \ | ||
1054 | nx842_counter_read(_name); \ | ||
1055 | static struct device_attribute dev_attr_##_name = __ATTR(_name, \ | ||
1056 | 0444, \ | ||
1057 | nx842_##_name##_show,\ | ||
1058 | NULL); | ||
1059 | |||
1060 | NX842DEV_COUNTER_ATTR_RO(comp_complete); | ||
1061 | NX842DEV_COUNTER_ATTR_RO(comp_failed); | ||
1062 | NX842DEV_COUNTER_ATTR_RO(decomp_complete); | ||
1063 | NX842DEV_COUNTER_ATTR_RO(decomp_failed); | ||
1064 | NX842DEV_COUNTER_ATTR_RO(swdecomp); | ||
1065 | |||
1066 | static ssize_t nx842_timehist_show(struct device *, | ||
1067 | struct device_attribute *, char *); | ||
1068 | |||
1069 | static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444, | ||
1070 | nx842_timehist_show, NULL); | ||
1071 | static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times, | ||
1072 | 0444, nx842_timehist_show, NULL); | ||
1073 | |||
1074 | static ssize_t nx842_timehist_show(struct device *dev, | ||
1075 | struct device_attribute *attr, char *buf) { | ||
1076 | char *p = buf; | ||
1077 | struct nx842_devdata *local_devdata; | ||
1078 | atomic64_t *times; | ||
1079 | int bytes_remain = PAGE_SIZE; | ||
1080 | int bytes; | ||
1081 | int i; | ||
1082 | |||
1083 | rcu_read_lock(); | ||
1084 | local_devdata = rcu_dereference(devdata); | ||
1085 | if (!local_devdata) { | ||
1086 | rcu_read_unlock(); | ||
1087 | return 0; | ||
1088 | } | ||
1089 | |||
1090 | if (attr == &dev_attr_comp_times) | ||
1091 | times = local_devdata->counters->comp_times; | ||
1092 | else if (attr == &dev_attr_decomp_times) | ||
1093 | times = local_devdata->counters->decomp_times; | ||
1094 | else { | ||
1095 | rcu_read_unlock(); | ||
1096 | return 0; | ||
1097 | } | ||
1098 | |||
1099 | for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) { | ||
1100 | bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n", | ||
1101 | i ? (2<<(i-1)) : 0, (2<<i)-1, | ||
1102 | atomic64_read(×[i])); | ||
1103 | bytes_remain -= bytes; | ||
1104 | p += bytes; | ||
1105 | } | ||
1106 | /* The last bucket holds everything over | ||
1107 | * 2<<(NX842_HIST_SLOTS - 2) us */ | ||
1108 | bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n", | ||
1109 | 2<<(NX842_HIST_SLOTS - 2), | ||
1110 | atomic64_read(×[(NX842_HIST_SLOTS - 1)])); | ||
1111 | p += bytes; | ||
1112 | |||
1113 | rcu_read_unlock(); | ||
1114 | return p - buf; | ||
1115 | } | ||
1116 | |||
1117 | static struct attribute *nx842_sysfs_entries[] = { | ||
1118 | &dev_attr_comp_complete.attr, | ||
1119 | &dev_attr_comp_failed.attr, | ||
1120 | &dev_attr_decomp_complete.attr, | ||
1121 | &dev_attr_decomp_failed.attr, | ||
1122 | &dev_attr_swdecomp.attr, | ||
1123 | &dev_attr_comp_times.attr, | ||
1124 | &dev_attr_decomp_times.attr, | ||
1125 | NULL, | ||
1126 | }; | ||
1127 | |||
1128 | static struct attribute_group nx842_attribute_group = { | ||
1129 | .name = NULL, /* put in device directory */ | ||
1130 | .attrs = nx842_sysfs_entries, | ||
1131 | }; | ||
1132 | |||
1133 | static int __init nx842_probe(struct vio_dev *viodev, | ||
1134 | const struct vio_device_id *id) | ||
1135 | { | ||
1136 | struct nx842_devdata *old_devdata, *new_devdata = NULL; | ||
1137 | unsigned long flags; | ||
1138 | int ret = 0; | ||
1139 | |||
1140 | spin_lock_irqsave(&devdata_mutex, flags); | ||
1141 | old_devdata = rcu_dereference_check(devdata, | ||
1142 | lockdep_is_held(&devdata_mutex)); | ||
1143 | |||
1144 | if (old_devdata && old_devdata->vdev != NULL) { | ||
1145 | dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__); | ||
1146 | ret = -1; | ||
1147 | goto error_unlock; | ||
1148 | } | ||
1149 | |||
1150 | dev_set_drvdata(&viodev->dev, NULL); | ||
1151 | |||
1152 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); | ||
1153 | if (!new_devdata) { | ||
1154 | dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__); | ||
1155 | ret = -ENOMEM; | ||
1156 | goto error_unlock; | ||
1157 | } | ||
1158 | |||
1159 | new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), | ||
1160 | GFP_NOFS); | ||
1161 | if (!new_devdata->counters) { | ||
1162 | dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__); | ||
1163 | ret = -ENOMEM; | ||
1164 | goto error_unlock; | ||
1165 | } | ||
1166 | |||
1167 | new_devdata->vdev = viodev; | ||
1168 | new_devdata->dev = &viodev->dev; | ||
1169 | nx842_OF_set_defaults(new_devdata); | ||
1170 | |||
1171 | rcu_assign_pointer(devdata, new_devdata); | ||
1172 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1173 | synchronize_rcu(); | ||
1174 | kfree(old_devdata); | ||
1175 | |||
1176 | of_reconfig_notifier_register(&nx842_of_nb); | ||
1177 | |||
1178 | ret = nx842_OF_upd(NULL); | ||
1179 | if (ret && ret != -ENODEV) { | ||
1180 | dev_err(&viodev->dev, "could not parse device tree. %d\n", ret); | ||
1181 | ret = -1; | ||
1182 | goto error; | ||
1183 | } | ||
1184 | |||
1185 | rcu_read_lock(); | ||
1186 | dev_set_drvdata(&viodev->dev, rcu_dereference(devdata)); | ||
1187 | rcu_read_unlock(); | ||
1188 | |||
1189 | if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) { | ||
1190 | dev_err(&viodev->dev, "could not create sysfs device attributes\n"); | ||
1191 | ret = -1; | ||
1192 | goto error; | ||
1193 | } | 93 | } |
1194 | 94 | ||
1195 | return 0; | 95 | return 0; |
1196 | |||
1197 | error_unlock: | ||
1198 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1199 | if (new_devdata) | ||
1200 | kfree(new_devdata->counters); | ||
1201 | kfree(new_devdata); | ||
1202 | error: | ||
1203 | return ret; | ||
1204 | } | ||
1205 | |||
1206 | static int __exit nx842_remove(struct vio_dev *viodev) | ||
1207 | { | ||
1208 | struct nx842_devdata *old_devdata; | ||
1209 | unsigned long flags; | ||
1210 | |||
1211 | pr_info("Removing IBM Power 842 compression device\n"); | ||
1212 | sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); | ||
1213 | |||
1214 | spin_lock_irqsave(&devdata_mutex, flags); | ||
1215 | old_devdata = rcu_dereference_check(devdata, | ||
1216 | lockdep_is_held(&devdata_mutex)); | ||
1217 | of_reconfig_notifier_unregister(&nx842_of_nb); | ||
1218 | RCU_INIT_POINTER(devdata, NULL); | ||
1219 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1220 | synchronize_rcu(); | ||
1221 | dev_set_drvdata(&viodev->dev, NULL); | ||
1222 | if (old_devdata) | ||
1223 | kfree(old_devdata->counters); | ||
1224 | kfree(old_devdata); | ||
1225 | return 0; | ||
1226 | } | ||
1227 | |||
1228 | static struct vio_device_id nx842_driver_ids[] = { | ||
1229 | {"ibm,compression-v1", "ibm,compression"}, | ||
1230 | {"", ""}, | ||
1231 | }; | ||
1232 | |||
1233 | static struct vio_driver nx842_driver = { | ||
1234 | .name = MODULE_NAME, | ||
1235 | .probe = nx842_probe, | ||
1236 | .remove = __exit_p(nx842_remove), | ||
1237 | .get_desired_dma = nx842_get_desired_dma, | ||
1238 | .id_table = nx842_driver_ids, | ||
1239 | }; | ||
1240 | |||
1241 | static int __init nx842_init(void) | ||
1242 | { | ||
1243 | struct nx842_devdata *new_devdata; | ||
1244 | pr_info("Registering IBM Power 842 compression driver\n"); | ||
1245 | |||
1246 | RCU_INIT_POINTER(devdata, NULL); | ||
1247 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); | ||
1248 | if (!new_devdata) { | ||
1249 | pr_err("Could not allocate memory for device data\n"); | ||
1250 | return -ENOMEM; | ||
1251 | } | ||
1252 | new_devdata->status = UNAVAILABLE; | ||
1253 | RCU_INIT_POINTER(devdata, new_devdata); | ||
1254 | |||
1255 | return vio_register_driver(&nx842_driver); | ||
1256 | } | 96 | } |
1257 | |||
1258 | module_init(nx842_init); | 97 | module_init(nx842_init); |
1259 | 98 | ||
1260 | static void __exit nx842_exit(void) | 99 | static void __exit nx842_exit(void) |
1261 | { | 100 | { |
1262 | struct nx842_devdata *old_devdata; | 101 | nx842_platform_driver_put(); |
1263 | unsigned long flags; | ||
1264 | |||
1265 | pr_info("Exiting IBM Power 842 compression driver\n"); | ||
1266 | spin_lock_irqsave(&devdata_mutex, flags); | ||
1267 | old_devdata = rcu_dereference_check(devdata, | ||
1268 | lockdep_is_held(&devdata_mutex)); | ||
1269 | RCU_INIT_POINTER(devdata, NULL); | ||
1270 | spin_unlock_irqrestore(&devdata_mutex, flags); | ||
1271 | synchronize_rcu(); | ||
1272 | if (old_devdata) | ||
1273 | dev_set_drvdata(old_devdata->dev, NULL); | ||
1274 | kfree(old_devdata); | ||
1275 | vio_unregister_driver(&nx842_driver); | ||
1276 | } | 102 | } |
1277 | |||
1278 | module_exit(nx842_exit); | 103 | module_exit(nx842_exit); |
1279 | |||
1280 | /********************************* | ||
1281 | * 842 software decompressor | ||
1282 | *********************************/ | ||
1283 | typedef int (*sw842_template_op)(const char **, int *, unsigned char **, | ||
1284 | struct sw842_fifo *); | ||
1285 | |||
1286 | static int sw842_data8(const char **, int *, unsigned char **, | ||
1287 | struct sw842_fifo *); | ||
1288 | static int sw842_data4(const char **, int *, unsigned char **, | ||
1289 | struct sw842_fifo *); | ||
1290 | static int sw842_data2(const char **, int *, unsigned char **, | ||
1291 | struct sw842_fifo *); | ||
1292 | static int sw842_ptr8(const char **, int *, unsigned char **, | ||
1293 | struct sw842_fifo *); | ||
1294 | static int sw842_ptr4(const char **, int *, unsigned char **, | ||
1295 | struct sw842_fifo *); | ||
1296 | static int sw842_ptr2(const char **, int *, unsigned char **, | ||
1297 | struct sw842_fifo *); | ||
1298 | |||
1299 | /* special templates */ | ||
1300 | #define SW842_TMPL_REPEAT 0x1B | ||
1301 | #define SW842_TMPL_ZEROS 0x1C | ||
1302 | #define SW842_TMPL_EOF 0x1E | ||
1303 | |||
1304 | static sw842_template_op sw842_tmpl_ops[26][4] = { | ||
1305 | { sw842_data8, NULL}, /* 0 (00000) */ | ||
1306 | { sw842_data4, sw842_data2, sw842_ptr2, NULL}, | ||
1307 | { sw842_data4, sw842_ptr2, sw842_data2, NULL}, | ||
1308 | { sw842_data4, sw842_ptr2, sw842_ptr2, NULL}, | ||
1309 | { sw842_data4, sw842_ptr4, NULL}, | ||
1310 | { sw842_data2, sw842_ptr2, sw842_data4, NULL}, | ||
1311 | { sw842_data2, sw842_ptr2, sw842_data2, sw842_ptr2}, | ||
1312 | { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_data2}, | ||
1313 | { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_ptr2,}, | ||
1314 | { sw842_data2, sw842_ptr2, sw842_ptr4, NULL}, | ||
1315 | { sw842_ptr2, sw842_data2, sw842_data4, NULL}, /* 10 (01010) */ | ||
1316 | { sw842_ptr2, sw842_data4, sw842_ptr2, NULL}, | ||
1317 | { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_data2}, | ||
1318 | { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_ptr2}, | ||
1319 | { sw842_ptr2, sw842_data2, sw842_ptr4, NULL}, | ||
1320 | { sw842_ptr2, sw842_ptr2, sw842_data4, NULL}, | ||
1321 | { sw842_ptr2, sw842_ptr2, sw842_data2, sw842_ptr2}, | ||
1322 | { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_data2}, | ||
1323 | { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_ptr2}, | ||
1324 | { sw842_ptr2, sw842_ptr2, sw842_ptr4, NULL}, | ||
1325 | { sw842_ptr4, sw842_data4, NULL}, /* 20 (10100) */ | ||
1326 | { sw842_ptr4, sw842_data2, sw842_ptr2, NULL}, | ||
1327 | { sw842_ptr4, sw842_ptr2, sw842_data2, NULL}, | ||
1328 | { sw842_ptr4, sw842_ptr2, sw842_ptr2, NULL}, | ||
1329 | { sw842_ptr4, sw842_ptr4, NULL}, | ||
1330 | { sw842_ptr8, NULL} | ||
1331 | }; | ||
1332 | |||
1333 | /* Software decompress helpers */ | ||
1334 | |||
1335 | static uint8_t sw842_get_byte(const char *buf, int bit) | ||
1336 | { | ||
1337 | uint8_t tmpl; | ||
1338 | uint16_t tmp; | ||
1339 | tmp = htons(*(uint16_t *)(buf)); | ||
1340 | tmp = (uint16_t)(tmp << bit); | ||
1341 | tmp = ntohs(tmp); | ||
1342 | memcpy(&tmpl, &tmp, 1); | ||
1343 | return tmpl; | ||
1344 | } | ||
1345 | |||
1346 | static uint8_t sw842_get_template(const char **buf, int *bit) | ||
1347 | { | ||
1348 | uint8_t byte; | ||
1349 | byte = sw842_get_byte(*buf, *bit); | ||
1350 | byte = byte >> 3; | ||
1351 | byte &= 0x1F; | ||
1352 | *buf += (*bit + 5) / 8; | ||
1353 | *bit = (*bit + 5) % 8; | ||
1354 | return byte; | ||
1355 | } | ||
1356 | |||
1357 | /* repeat_count happens to be 5-bit too (like the template) */ | ||
1358 | static uint8_t sw842_get_repeat_count(const char **buf, int *bit) | ||
1359 | { | ||
1360 | uint8_t byte; | ||
1361 | byte = sw842_get_byte(*buf, *bit); | ||
1362 | byte = byte >> 2; | ||
1363 | byte &= 0x3F; | ||
1364 | *buf += (*bit + 6) / 8; | ||
1365 | *bit = (*bit + 6) % 8; | ||
1366 | return byte; | ||
1367 | } | ||
1368 | |||
1369 | static uint8_t sw842_get_ptr2(const char **buf, int *bit) | ||
1370 | { | ||
1371 | uint8_t ptr; | ||
1372 | ptr = sw842_get_byte(*buf, *bit); | ||
1373 | (*buf)++; | ||
1374 | return ptr; | ||
1375 | } | ||
1376 | |||
1377 | static uint16_t sw842_get_ptr4(const char **buf, int *bit, | ||
1378 | struct sw842_fifo *fifo) | ||
1379 | { | ||
1380 | uint16_t ptr; | ||
1381 | ptr = htons(*(uint16_t *)(*buf)); | ||
1382 | ptr = (uint16_t)(ptr << *bit); | ||
1383 | ptr = ptr >> 7; | ||
1384 | ptr &= 0x01FF; | ||
1385 | *buf += (*bit + 9) / 8; | ||
1386 | *bit = (*bit + 9) % 8; | ||
1387 | return ptr; | ||
1388 | } | ||
1389 | |||
1390 | static uint8_t sw842_get_ptr8(const char **buf, int *bit, | ||
1391 | struct sw842_fifo *fifo) | ||
1392 | { | ||
1393 | return sw842_get_ptr2(buf, bit); | ||
1394 | } | ||
1395 | |||
1396 | /* Software decompress template ops */ | ||
1397 | |||
1398 | static int sw842_data8(const char **inbuf, int *inbit, | ||
1399 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1400 | { | ||
1401 | int ret; | ||
1402 | |||
1403 | ret = sw842_data4(inbuf, inbit, outbuf, fifo); | ||
1404 | if (ret) | ||
1405 | return ret; | ||
1406 | ret = sw842_data4(inbuf, inbit, outbuf, fifo); | ||
1407 | return ret; | ||
1408 | } | ||
1409 | |||
1410 | static int sw842_data4(const char **inbuf, int *inbit, | ||
1411 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1412 | { | ||
1413 | int ret; | ||
1414 | |||
1415 | ret = sw842_data2(inbuf, inbit, outbuf, fifo); | ||
1416 | if (ret) | ||
1417 | return ret; | ||
1418 | ret = sw842_data2(inbuf, inbit, outbuf, fifo); | ||
1419 | return ret; | ||
1420 | } | ||
1421 | |||
1422 | static int sw842_data2(const char **inbuf, int *inbit, | ||
1423 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1424 | { | ||
1425 | **outbuf = sw842_get_byte(*inbuf, *inbit); | ||
1426 | (*inbuf)++; | ||
1427 | (*outbuf)++; | ||
1428 | **outbuf = sw842_get_byte(*inbuf, *inbit); | ||
1429 | (*inbuf)++; | ||
1430 | (*outbuf)++; | ||
1431 | return 0; | ||
1432 | } | ||
1433 | |||
1434 | static int sw842_ptr8(const char **inbuf, int *inbit, | ||
1435 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1436 | { | ||
1437 | uint8_t ptr; | ||
1438 | ptr = sw842_get_ptr8(inbuf, inbit, fifo); | ||
1439 | if (!fifo->f84_full && (ptr >= fifo->f8_count)) | ||
1440 | return 1; | ||
1441 | memcpy(*outbuf, fifo->f8[ptr], 8); | ||
1442 | *outbuf += 8; | ||
1443 | return 0; | ||
1444 | } | ||
1445 | |||
1446 | static int sw842_ptr4(const char **inbuf, int *inbit, | ||
1447 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1448 | { | ||
1449 | uint16_t ptr; | ||
1450 | ptr = sw842_get_ptr4(inbuf, inbit, fifo); | ||
1451 | if (!fifo->f84_full && (ptr >= fifo->f4_count)) | ||
1452 | return 1; | ||
1453 | memcpy(*outbuf, fifo->f4[ptr], 4); | ||
1454 | *outbuf += 4; | ||
1455 | return 0; | ||
1456 | } | ||
1457 | |||
1458 | static int sw842_ptr2(const char **inbuf, int *inbit, | ||
1459 | unsigned char **outbuf, struct sw842_fifo *fifo) | ||
1460 | { | ||
1461 | uint8_t ptr; | ||
1462 | ptr = sw842_get_ptr2(inbuf, inbit); | ||
1463 | if (!fifo->f2_full && (ptr >= fifo->f2_count)) | ||
1464 | return 1; | ||
1465 | memcpy(*outbuf, fifo->f2[ptr], 2); | ||
1466 | *outbuf += 2; | ||
1467 | return 0; | ||
1468 | } | ||
1469 | |||
1470 | static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo) | ||
1471 | { | ||
1472 | unsigned char initial_f2count = fifo->f2_count; | ||
1473 | |||
1474 | memcpy(fifo->f8[fifo->f8_count], buf, 8); | ||
1475 | fifo->f4_count += 2; | ||
1476 | fifo->f8_count += 1; | ||
1477 | |||
1478 | if (!fifo->f84_full && fifo->f4_count >= 512) { | ||
1479 | fifo->f84_full = 1; | ||
1480 | fifo->f4_count /= 512; | ||
1481 | } | ||
1482 | |||
1483 | memcpy(fifo->f2[fifo->f2_count++], buf, 2); | ||
1484 | memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2); | ||
1485 | memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2); | ||
1486 | memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2); | ||
1487 | if (fifo->f2_count < initial_f2count) | ||
1488 | fifo->f2_full = 1; | ||
1489 | } | ||
1490 | |||
1491 | static int sw842_decompress(const unsigned char *src, int srclen, | ||
1492 | unsigned char *dst, int *destlen, | ||
1493 | const void *wrkmem) | ||
1494 | { | ||
1495 | uint8_t tmpl; | ||
1496 | const char *inbuf; | ||
1497 | int inbit = 0; | ||
1498 | unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf; | ||
1499 | const char *inbuf_end; | ||
1500 | sw842_template_op op; | ||
1501 | int opindex; | ||
1502 | int i, repeat_count; | ||
1503 | struct sw842_fifo *fifo; | ||
1504 | int ret = 0; | ||
1505 | |||
1506 | fifo = &((struct nx842_workmem *)(wrkmem))->swfifo; | ||
1507 | memset(fifo, 0, sizeof(*fifo)); | ||
1508 | |||
1509 | origbuf = NULL; | ||
1510 | inbuf = src; | ||
1511 | inbuf_end = src + srclen; | ||
1512 | outbuf = dst; | ||
1513 | outbuf_end = dst + *destlen; | ||
1514 | |||
1515 | while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) { | ||
1516 | if (inbuf >= inbuf_end) { | ||
1517 | ret = -EINVAL; | ||
1518 | goto out; | ||
1519 | } | ||
1520 | |||
1521 | opindex = 0; | ||
1522 | prevbuf = origbuf; | ||
1523 | origbuf = outbuf; | ||
1524 | switch (tmpl) { | ||
1525 | case SW842_TMPL_REPEAT: | ||
1526 | if (prevbuf == NULL) { | ||
1527 | ret = -EINVAL; | ||
1528 | goto out; | ||
1529 | } | ||
1530 | |||
1531 | repeat_count = sw842_get_repeat_count(&inbuf, | ||
1532 | &inbit) + 1; | ||
1533 | |||
1534 | /* Did the repeat count advance past the end of input */ | ||
1535 | if (inbuf > inbuf_end) { | ||
1536 | ret = -EINVAL; | ||
1537 | goto out; | ||
1538 | } | ||
1539 | |||
1540 | for (i = 0; i < repeat_count; i++) { | ||
1541 | /* Would this overflow the output buffer */ | ||
1542 | if ((outbuf + 8) > outbuf_end) { | ||
1543 | ret = -ENOSPC; | ||
1544 | goto out; | ||
1545 | } | ||
1546 | |||
1547 | memcpy(outbuf, prevbuf, 8); | ||
1548 | sw842_copy_to_fifo(outbuf, fifo); | ||
1549 | outbuf += 8; | ||
1550 | } | ||
1551 | break; | ||
1552 | |||
1553 | case SW842_TMPL_ZEROS: | ||
1554 | /* Would this overflow the output buffer */ | ||
1555 | if ((outbuf + 8) > outbuf_end) { | ||
1556 | ret = -ENOSPC; | ||
1557 | goto out; | ||
1558 | } | ||
1559 | |||
1560 | memset(outbuf, 0, 8); | ||
1561 | sw842_copy_to_fifo(outbuf, fifo); | ||
1562 | outbuf += 8; | ||
1563 | break; | ||
1564 | |||
1565 | default: | ||
1566 | if (tmpl > 25) { | ||
1567 | ret = -EINVAL; | ||
1568 | goto out; | ||
1569 | } | ||
1570 | |||
1571 | /* Does this go past the end of the input buffer */ | ||
1572 | if ((inbuf + 2) > inbuf_end) { | ||
1573 | ret = -EINVAL; | ||
1574 | goto out; | ||
1575 | } | ||
1576 | |||
1577 | /* Would this overflow the output buffer */ | ||
1578 | if ((outbuf + 8) > outbuf_end) { | ||
1579 | ret = -ENOSPC; | ||
1580 | goto out; | ||
1581 | } | ||
1582 | |||
1583 | while (opindex < 4 && | ||
1584 | (op = sw842_tmpl_ops[tmpl][opindex++]) | ||
1585 | != NULL) { | ||
1586 | ret = (*op)(&inbuf, &inbit, &outbuf, fifo); | ||
1587 | if (ret) { | ||
1588 | ret = -EINVAL; | ||
1589 | goto out; | ||
1590 | } | ||
1591 | sw842_copy_to_fifo(origbuf, fifo); | ||
1592 | } | ||
1593 | } | ||
1594 | } | ||
1595 | |||
1596 | out: | ||
1597 | if (!ret) | ||
1598 | *destlen = (unsigned int)(outbuf - dst); | ||
1599 | else | ||
1600 | *destlen = 0; | ||
1601 | |||
1602 | return ret; | ||
1603 | } | ||
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h new file mode 100644 index 000000000000..ac0ea79d0f8b --- /dev/null +++ b/drivers/crypto/nx/nx-842.h | |||
@@ -0,0 +1,144 @@ | |||
1 | |||
2 | #ifndef __NX_842_H__ | ||
3 | #define __NX_842_H__ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/sw842.h> | ||
8 | #include <linux/of.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/io.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/ratelimit.h> | ||
13 | |||
14 | /* Restrictions on Data Descriptor List (DDL) and Entry (DDE) buffers | ||
15 | * | ||
16 | * From NX P8 workbook, sec 4.9.1 "842 details" | ||
17 | * Each DDE buffer is 128 byte aligned | ||
18 | * Each DDE buffer size is a multiple of 32 bytes (except the last) | ||
19 | * The last DDE buffer size is a multiple of 8 bytes | ||
20 | */ | ||
21 | #define DDE_BUFFER_ALIGN (128) | ||
22 | #define DDE_BUFFER_SIZE_MULT (32) | ||
23 | #define DDE_BUFFER_LAST_MULT (8) | ||
24 | |||
25 | /* Arbitrary DDL length limit | ||
26 | * Allows max buffer size of MAX-1 to MAX pages | ||
27 | * (depending on alignment) | ||
28 | */ | ||
29 | #define DDL_LEN_MAX (17) | ||
30 | |||
31 | /* CCW 842 CI/FC masks | ||
32 | * NX P8 workbook, section 4.3.1, figure 4-6 | ||
33 | * "CI/FC Boundary by NX CT type" | ||
34 | */ | ||
35 | #define CCW_CI_842 (0x00003ff8) | ||
36 | #define CCW_FC_842 (0x00000007) | ||
37 | |||
38 | /* CCW Function Codes (FC) for 842 | ||
39 | * NX P8 workbook, section 4.9, table 4-28 | ||
40 | * "Function Code Definitions for 842 Memory Compression" | ||
41 | */ | ||
42 | #define CCW_FC_842_COMP_NOCRC (0) | ||
43 | #define CCW_FC_842_COMP_CRC (1) | ||
44 | #define CCW_FC_842_DECOMP_NOCRC (2) | ||
45 | #define CCW_FC_842_DECOMP_CRC (3) | ||
46 | #define CCW_FC_842_MOVE (4) | ||
47 | |||
48 | /* CSB CC Error Types for 842 | ||
49 | * NX P8 workbook, section 4.10.3, table 4-30 | ||
50 | * "Reported Error Types Summary Table" | ||
51 | */ | ||
52 | /* These are all duplicates of existing codes defined in icswx.h. */ | ||
53 | #define CSB_CC_TRANSLATION_DUP1 (80) | ||
54 | #define CSB_CC_TRANSLATION_DUP2 (82) | ||
55 | #define CSB_CC_TRANSLATION_DUP3 (84) | ||
56 | #define CSB_CC_TRANSLATION_DUP4 (86) | ||
57 | #define CSB_CC_TRANSLATION_DUP5 (92) | ||
58 | #define CSB_CC_TRANSLATION_DUP6 (94) | ||
59 | #define CSB_CC_PROTECTION_DUP1 (81) | ||
60 | #define CSB_CC_PROTECTION_DUP2 (83) | ||
61 | #define CSB_CC_PROTECTION_DUP3 (85) | ||
62 | #define CSB_CC_PROTECTION_DUP4 (87) | ||
63 | #define CSB_CC_PROTECTION_DUP5 (93) | ||
64 | #define CSB_CC_PROTECTION_DUP6 (95) | ||
65 | #define CSB_CC_RD_EXTERNAL_DUP1 (89) | ||
66 | #define CSB_CC_RD_EXTERNAL_DUP2 (90) | ||
67 | #define CSB_CC_RD_EXTERNAL_DUP3 (91) | ||
68 | /* These are specific to NX */ | ||
69 | /* 842 codes */ | ||
70 | #define CSB_CC_TPBC_GT_SPBC (64) /* no error, but >1 comp ratio */ | ||
71 | #define CSB_CC_CRC_MISMATCH (65) /* decomp crc mismatch */ | ||
72 | #define CSB_CC_TEMPL_INVALID (66) /* decomp invalid template value */ | ||
73 | #define CSB_CC_TEMPL_OVERFLOW (67) /* decomp template shows data after end */ | ||
74 | /* sym crypt codes */ | ||
75 | #define CSB_CC_DECRYPT_OVERFLOW (64) | ||
76 | /* asym crypt codes */ | ||
77 | #define CSB_CC_MINV_OVERFLOW (128) | ||
78 | /* These are reserved for hypervisor use */ | ||
79 | #define CSB_CC_HYP_RESERVE_START (240) | ||
80 | #define CSB_CC_HYP_RESERVE_END (253) | ||
81 | #define CSB_CC_HYP_NO_HW (254) | ||
82 | #define CSB_CC_HYP_HANG_ABORTED (255) | ||
83 | |||
84 | /* CCB Completion Modes (CM) for 842 | ||
85 | * NX P8 workbook, section 4.3, figure 4-5 | ||
86 | * "CRB Details - Normal Cop_Req (CL=00, C=1)" | ||
87 | */ | ||
88 | #define CCB_CM_EXTRA_WRITE (CCB_CM0_ALL_COMPLETIONS & CCB_CM12_STORE) | ||
89 | #define CCB_CM_INTERRUPT (CCB_CM0_ALL_COMPLETIONS & CCB_CM12_INTERRUPT) | ||
90 | |||
91 | #define LEN_ON_SIZE(pa, size) ((size) - ((pa) & ((size) - 1))) | ||
92 | #define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE) | ||
93 | |||
94 | static inline unsigned long nx842_get_pa(void *addr) | ||
95 | { | ||
96 | if (!is_vmalloc_addr(addr)) | ||
97 | return __pa(addr); | ||
98 | |||
99 | return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr); | ||
100 | } | ||
101 | |||
102 | /* Get/Set bit fields */ | ||
103 | #define MASK_LSH(m) (__builtin_ffsl(m) - 1) | ||
104 | #define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m)) | ||
105 | #define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m))) | ||
106 | |||
107 | struct nx842_constraints { | ||
108 | int alignment; | ||
109 | int multiple; | ||
110 | int minimum; | ||
111 | int maximum; | ||
112 | }; | ||
113 | |||
114 | struct nx842_driver { | ||
115 | char *name; | ||
116 | struct module *owner; | ||
117 | size_t workmem_size; | ||
118 | |||
119 | struct nx842_constraints *constraints; | ||
120 | |||
121 | int (*compress)(const unsigned char *in, unsigned int in_len, | ||
122 | unsigned char *out, unsigned int *out_len, | ||
123 | void *wrkmem); | ||
124 | int (*decompress)(const unsigned char *in, unsigned int in_len, | ||
125 | unsigned char *out, unsigned int *out_len, | ||
126 | void *wrkmem); | ||
127 | }; | ||
128 | |||
129 | struct nx842_driver *nx842_platform_driver(void); | ||
130 | bool nx842_platform_driver_set(struct nx842_driver *driver); | ||
131 | void nx842_platform_driver_unset(struct nx842_driver *driver); | ||
132 | bool nx842_platform_driver_get(void); | ||
133 | void nx842_platform_driver_put(void); | ||
134 | |||
135 | size_t nx842_workmem_size(void); | ||
136 | |||
137 | int nx842_constraints(struct nx842_constraints *constraints); | ||
138 | |||
139 | int nx842_compress(const unsigned char *in, unsigned int in_len, | ||
140 | unsigned char *out, unsigned int *out_len, void *wrkmem); | ||
141 | int nx842_decompress(const unsigned char *in, unsigned int in_len, | ||
142 | unsigned char *out, unsigned int *out_len, void *wrkmem); | ||
143 | |||
144 | #endif /* __NX_842_H__ */ | ||
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 88c562434bc0..08ac6d48688c 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c | |||
@@ -93,17 +93,6 @@ out: | |||
93 | return rc; | 93 | return rc; |
94 | } | 94 | } |
95 | 95 | ||
96 | static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm, | ||
97 | unsigned int authsize) | ||
98 | { | ||
99 | if (authsize > crypto_aead_alg(tfm)->maxauthsize) | ||
100 | return -EINVAL; | ||
101 | |||
102 | crypto_aead_crt(tfm)->authsize = authsize; | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, | 96 | static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, |
108 | unsigned int authsize) | 97 | unsigned int authsize) |
109 | { | 98 | { |
@@ -116,8 +105,6 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, | |||
116 | return -EINVAL; | 105 | return -EINVAL; |
117 | } | 106 | } |
118 | 107 | ||
119 | crypto_aead_crt(tfm)->authsize = authsize; | ||
120 | |||
121 | return 0; | 108 | return 0; |
122 | } | 109 | } |
123 | 110 | ||
@@ -134,7 +121,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, | |||
134 | unsigned int max_sg_len; | 121 | unsigned int max_sg_len; |
135 | 122 | ||
136 | if (nbytes <= AES_BLOCK_SIZE) { | 123 | if (nbytes <= AES_BLOCK_SIZE) { |
137 | scatterwalk_start(&walk, req->assoc); | 124 | scatterwalk_start(&walk, req->src); |
138 | scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); | 125 | scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); |
139 | scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); | 126 | scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); |
140 | return 0; | 127 | return 0; |
@@ -159,7 +146,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, | |||
159 | NX_PAGE_SIZE * (max_sg_len - 1)); | 146 | NX_PAGE_SIZE * (max_sg_len - 1)); |
160 | 147 | ||
161 | nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, | 148 | nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, |
162 | req->assoc, processed, &to_process); | 149 | req->src, processed, &to_process); |
163 | 150 | ||
164 | if ((to_process + processed) < nbytes) | 151 | if ((to_process + processed) < nbytes) |
165 | NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; | 152 | NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; |
@@ -225,7 +212,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc) | |||
225 | NX_PAGE_SIZE * (max_sg_len - 1)); | 212 | NX_PAGE_SIZE * (max_sg_len - 1)); |
226 | 213 | ||
227 | nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, | 214 | nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, |
228 | req->assoc, processed, &to_process); | 215 | req->src, processed, &to_process); |
229 | 216 | ||
230 | if ((to_process + processed) < nbytes) | 217 | if ((to_process + processed) < nbytes) |
231 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | 218 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
@@ -377,7 +364,8 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) | |||
377 | csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; | 364 | csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; |
378 | desc.tfm = (struct crypto_blkcipher *) req->base.tfm; | 365 | desc.tfm = (struct crypto_blkcipher *) req->base.tfm; |
379 | rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, | 366 | rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, |
380 | req->src, &to_process, processed, | 367 | req->src, &to_process, |
368 | processed + req->assoclen, | ||
381 | csbcpb->cpb.aes_gcm.iv_or_cnt); | 369 | csbcpb->cpb.aes_gcm.iv_or_cnt); |
382 | 370 | ||
383 | if (rc) | 371 | if (rc) |
@@ -412,17 +400,19 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) | |||
412 | mac: | 400 | mac: |
413 | if (enc) { | 401 | if (enc) { |
414 | /* copy out the auth tag */ | 402 | /* copy out the auth tag */ |
415 | scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, | 403 | scatterwalk_map_and_copy( |
416 | req->dst, nbytes, | 404 | csbcpb->cpb.aes_gcm.out_pat_or_mac, |
417 | crypto_aead_authsize(crypto_aead_reqtfm(req)), | 405 | req->dst, req->assoclen + nbytes, |
418 | SCATTERWALK_TO_SG); | 406 | crypto_aead_authsize(crypto_aead_reqtfm(req)), |
407 | SCATTERWALK_TO_SG); | ||
419 | } else { | 408 | } else { |
420 | u8 *itag = nx_ctx->priv.gcm.iauth_tag; | 409 | u8 *itag = nx_ctx->priv.gcm.iauth_tag; |
421 | u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; | 410 | u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; |
422 | 411 | ||
423 | scatterwalk_map_and_copy(itag, req->src, nbytes, | 412 | scatterwalk_map_and_copy( |
424 | crypto_aead_authsize(crypto_aead_reqtfm(req)), | 413 | itag, req->src, req->assoclen + nbytes, |
425 | SCATTERWALK_FROM_SG); | 414 | crypto_aead_authsize(crypto_aead_reqtfm(req)), |
415 | SCATTERWALK_FROM_SG); | ||
426 | rc = memcmp(itag, otag, | 416 | rc = memcmp(itag, otag, |
427 | crypto_aead_authsize(crypto_aead_reqtfm(req))) ? | 417 | crypto_aead_authsize(crypto_aead_reqtfm(req))) ? |
428 | -EBADMSG : 0; | 418 | -EBADMSG : 0; |
@@ -481,45 +471,39 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req) | |||
481 | * during encrypt/decrypt doesn't solve this problem, because it calls | 471 | * during encrypt/decrypt doesn't solve this problem, because it calls |
482 | * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, | 472 | * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, |
483 | * but instead uses this tfm->blocksize. */ | 473 | * but instead uses this tfm->blocksize. */ |
484 | struct crypto_alg nx_gcm_aes_alg = { | 474 | struct aead_alg nx_gcm_aes_alg = { |
485 | .cra_name = "gcm(aes)", | 475 | .base = { |
486 | .cra_driver_name = "gcm-aes-nx", | 476 | .cra_name = "gcm(aes)", |
487 | .cra_priority = 300, | 477 | .cra_driver_name = "gcm-aes-nx", |
488 | .cra_flags = CRYPTO_ALG_TYPE_AEAD, | 478 | .cra_priority = 300, |
489 | .cra_blocksize = 1, | 479 | .cra_blocksize = 1, |
490 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 480 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
491 | .cra_type = &crypto_aead_type, | 481 | .cra_module = THIS_MODULE, |
492 | .cra_module = THIS_MODULE, | 482 | }, |
493 | .cra_init = nx_crypto_ctx_aes_gcm_init, | 483 | .init = nx_crypto_ctx_aes_gcm_init, |
494 | .cra_exit = nx_crypto_ctx_exit, | 484 | .exit = nx_crypto_ctx_aead_exit, |
495 | .cra_aead = { | 485 | .ivsize = 12, |
496 | .ivsize = AES_BLOCK_SIZE, | 486 | .maxauthsize = AES_BLOCK_SIZE, |
497 | .maxauthsize = AES_BLOCK_SIZE, | 487 | .setkey = gcm_aes_nx_set_key, |
498 | .setkey = gcm_aes_nx_set_key, | 488 | .encrypt = gcm_aes_nx_encrypt, |
499 | .setauthsize = gcm_aes_nx_setauthsize, | 489 | .decrypt = gcm_aes_nx_decrypt, |
500 | .encrypt = gcm_aes_nx_encrypt, | ||
501 | .decrypt = gcm_aes_nx_decrypt, | ||
502 | } | ||
503 | }; | 490 | }; |
504 | 491 | ||
505 | struct crypto_alg nx_gcm4106_aes_alg = { | 492 | struct aead_alg nx_gcm4106_aes_alg = { |
506 | .cra_name = "rfc4106(gcm(aes))", | 493 | .base = { |
507 | .cra_driver_name = "rfc4106-gcm-aes-nx", | 494 | .cra_name = "rfc4106(gcm(aes))", |
508 | .cra_priority = 300, | 495 | .cra_driver_name = "rfc4106-gcm-aes-nx", |
509 | .cra_flags = CRYPTO_ALG_TYPE_AEAD, | 496 | .cra_priority = 300, |
510 | .cra_blocksize = 1, | 497 | .cra_blocksize = 1, |
511 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 498 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
512 | .cra_type = &crypto_nivaead_type, | 499 | .cra_module = THIS_MODULE, |
513 | .cra_module = THIS_MODULE, | 500 | }, |
514 | .cra_init = nx_crypto_ctx_aes_gcm_init, | 501 | .init = nx_crypto_ctx_aes_gcm_init, |
515 | .cra_exit = nx_crypto_ctx_exit, | 502 | .exit = nx_crypto_ctx_aead_exit, |
516 | .cra_aead = { | 503 | .ivsize = 8, |
517 | .ivsize = 8, | 504 | .maxauthsize = AES_BLOCK_SIZE, |
518 | .maxauthsize = AES_BLOCK_SIZE, | 505 | .setkey = gcm4106_aes_nx_set_key, |
519 | .geniv = "seqiv", | 506 | .setauthsize = gcm4106_aes_nx_setauthsize, |
520 | .setkey = gcm4106_aes_nx_set_key, | 507 | .encrypt = gcm4106_aes_nx_encrypt, |
521 | .setauthsize = gcm4106_aes_nx_setauthsize, | 508 | .decrypt = gcm4106_aes_nx_decrypt, |
522 | .encrypt = gcm4106_aes_nx_encrypt, | ||
523 | .decrypt = gcm4106_aes_nx_decrypt, | ||
524 | } | ||
525 | }; | 509 | }; |
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 23621da624c3..4e91bdb83c59 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c | |||
@@ -33,8 +33,9 @@ static int nx_sha256_init(struct shash_desc *desc) | |||
33 | { | 33 | { |
34 | struct sha256_state *sctx = shash_desc_ctx(desc); | 34 | struct sha256_state *sctx = shash_desc_ctx(desc); |
35 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 35 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
36 | struct nx_sg *out_sg; | ||
36 | int len; | 37 | int len; |
37 | int rc; | 38 | u32 max_sg_len; |
38 | 39 | ||
39 | nx_ctx_init(nx_ctx, HCOP_FC_SHA); | 40 | nx_ctx_init(nx_ctx, HCOP_FC_SHA); |
40 | 41 | ||
@@ -44,15 +45,18 @@ static int nx_sha256_init(struct shash_desc *desc) | |||
44 | 45 | ||
45 | NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); | 46 | NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); |
46 | 47 | ||
48 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, | ||
49 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); | ||
50 | max_sg_len = min_t(u64, max_sg_len, | ||
51 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); | ||
52 | |||
47 | len = SHA256_DIGEST_SIZE; | 53 | len = SHA256_DIGEST_SIZE; |
48 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, | 54 | out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, |
49 | &nx_ctx->op.outlen, | 55 | &len, max_sg_len); |
50 | &len, | 56 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); |
51 | (u8 *) sctx->state, | ||
52 | NX_DS_SHA256); | ||
53 | 57 | ||
54 | if (rc) | 58 | if (len != SHA256_DIGEST_SIZE) |
55 | goto out; | 59 | return -EINVAL; |
56 | 60 | ||
57 | sctx->state[0] = __cpu_to_be32(SHA256_H0); | 61 | sctx->state[0] = __cpu_to_be32(SHA256_H0); |
58 | sctx->state[1] = __cpu_to_be32(SHA256_H1); | 62 | sctx->state[1] = __cpu_to_be32(SHA256_H1); |
@@ -64,7 +68,6 @@ static int nx_sha256_init(struct shash_desc *desc) | |||
64 | sctx->state[7] = __cpu_to_be32(SHA256_H7); | 68 | sctx->state[7] = __cpu_to_be32(SHA256_H7); |
65 | sctx->count = 0; | 69 | sctx->count = 0; |
66 | 70 | ||
67 | out: | ||
68 | return 0; | 71 | return 0; |
69 | } | 72 | } |
70 | 73 | ||
@@ -74,10 +77,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, | |||
74 | struct sha256_state *sctx = shash_desc_ctx(desc); | 77 | struct sha256_state *sctx = shash_desc_ctx(desc); |
75 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 78 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
76 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 79 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
80 | struct nx_sg *in_sg; | ||
77 | u64 to_process = 0, leftover, total; | 81 | u64 to_process = 0, leftover, total; |
78 | unsigned long irq_flags; | 82 | unsigned long irq_flags; |
79 | int rc = 0; | 83 | int rc = 0; |
80 | int data_len; | 84 | int data_len; |
85 | u32 max_sg_len; | ||
81 | u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); | 86 | u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); |
82 | 87 | ||
83 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | 88 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
@@ -97,6 +102,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, | |||
97 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | 102 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
98 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | 103 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
99 | 104 | ||
105 | in_sg = nx_ctx->in_sg; | ||
106 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, | ||
107 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); | ||
108 | max_sg_len = min_t(u64, max_sg_len, | ||
109 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); | ||
110 | |||
100 | do { | 111 | do { |
101 | /* | 112 | /* |
102 | * to_process: the SHA256_BLOCK_SIZE data chunk to process in | 113 | * to_process: the SHA256_BLOCK_SIZE data chunk to process in |
@@ -108,25 +119,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, | |||
108 | 119 | ||
109 | if (buf_len) { | 120 | if (buf_len) { |
110 | data_len = buf_len; | 121 | data_len = buf_len; |
111 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, | 122 | in_sg = nx_build_sg_list(nx_ctx->in_sg, |
112 | &nx_ctx->op.inlen, | 123 | (u8 *) sctx->buf, |
113 | &data_len, | 124 | &data_len, |
114 | (u8 *) sctx->buf, | 125 | max_sg_len); |
115 | NX_DS_SHA256); | ||
116 | 126 | ||
117 | if (rc || data_len != buf_len) | 127 | if (data_len != buf_len) { |
128 | rc = -EINVAL; | ||
118 | goto out; | 129 | goto out; |
130 | } | ||
119 | } | 131 | } |
120 | 132 | ||
121 | data_len = to_process - buf_len; | 133 | data_len = to_process - buf_len; |
122 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, | 134 | in_sg = nx_build_sg_list(in_sg, (u8 *) data, |
123 | &nx_ctx->op.inlen, | 135 | &data_len, max_sg_len); |
124 | &data_len, | ||
125 | (u8 *) data, | ||
126 | NX_DS_SHA256); | ||
127 | 136 | ||
128 | if (rc) | 137 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); |
129 | goto out; | ||
130 | 138 | ||
131 | to_process = (data_len + buf_len); | 139 | to_process = (data_len + buf_len); |
132 | leftover = total - to_process; | 140 | leftover = total - to_process; |
@@ -173,12 +181,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) | |||
173 | struct sha256_state *sctx = shash_desc_ctx(desc); | 181 | struct sha256_state *sctx = shash_desc_ctx(desc); |
174 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 182 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
175 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 183 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
184 | struct nx_sg *in_sg, *out_sg; | ||
176 | unsigned long irq_flags; | 185 | unsigned long irq_flags; |
177 | int rc; | 186 | u32 max_sg_len; |
187 | int rc = 0; | ||
178 | int len; | 188 | int len; |
179 | 189 | ||
180 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | 190 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
181 | 191 | ||
192 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, | ||
193 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); | ||
194 | max_sg_len = min_t(u64, max_sg_len, | ||
195 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); | ||
196 | |||
182 | /* final is represented by continuing the operation and indicating that | 197 | /* final is represented by continuing the operation and indicating that |
183 | * this is not an intermediate operation */ | 198 | * this is not an intermediate operation */ |
184 | if (sctx->count >= SHA256_BLOCK_SIZE) { | 199 | if (sctx->count >= SHA256_BLOCK_SIZE) { |
@@ -195,25 +210,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) | |||
195 | csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); | 210 | csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); |
196 | 211 | ||
197 | len = sctx->count & (SHA256_BLOCK_SIZE - 1); | 212 | len = sctx->count & (SHA256_BLOCK_SIZE - 1); |
198 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, | 213 | in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, |
199 | &nx_ctx->op.inlen, | 214 | &len, max_sg_len); |
200 | &len, | ||
201 | (u8 *) sctx->buf, | ||
202 | NX_DS_SHA256); | ||
203 | 215 | ||
204 | if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) | 216 | if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { |
217 | rc = -EINVAL; | ||
205 | goto out; | 218 | goto out; |
219 | } | ||
206 | 220 | ||
207 | len = SHA256_DIGEST_SIZE; | 221 | len = SHA256_DIGEST_SIZE; |
208 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, | 222 | out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len); |
209 | &nx_ctx->op.outlen, | ||
210 | &len, | ||
211 | out, | ||
212 | NX_DS_SHA256); | ||
213 | 223 | ||
214 | if (rc || len != SHA256_DIGEST_SIZE) | 224 | if (len != SHA256_DIGEST_SIZE) { |
225 | rc = -EINVAL; | ||
215 | goto out; | 226 | goto out; |
227 | } | ||
216 | 228 | ||
229 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); | ||
230 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); | ||
217 | if (!nx_ctx->op.outlen) { | 231 | if (!nx_ctx->op.outlen) { |
218 | rc = -EINVAL; | 232 | rc = -EINVAL; |
219 | goto out; | 233 | goto out; |
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index b3adf1022673..e6a58d2ee628 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c | |||
@@ -32,8 +32,9 @@ static int nx_sha512_init(struct shash_desc *desc) | |||
32 | { | 32 | { |
33 | struct sha512_state *sctx = shash_desc_ctx(desc); | 33 | struct sha512_state *sctx = shash_desc_ctx(desc); |
34 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 34 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
35 | struct nx_sg *out_sg; | ||
35 | int len; | 36 | int len; |
36 | int rc; | 37 | u32 max_sg_len; |
37 | 38 | ||
38 | nx_ctx_init(nx_ctx, HCOP_FC_SHA); | 39 | nx_ctx_init(nx_ctx, HCOP_FC_SHA); |
39 | 40 | ||
@@ -43,15 +44,18 @@ static int nx_sha512_init(struct shash_desc *desc) | |||
43 | 44 | ||
44 | NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); | 45 | NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); |
45 | 46 | ||
47 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, | ||
48 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); | ||
49 | max_sg_len = min_t(u64, max_sg_len, | ||
50 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); | ||
51 | |||
46 | len = SHA512_DIGEST_SIZE; | 52 | len = SHA512_DIGEST_SIZE; |
47 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, | 53 | out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, |
48 | &nx_ctx->op.outlen, | 54 | &len, max_sg_len); |
49 | &len, | 55 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); |
50 | (u8 *)sctx->state, | ||
51 | NX_DS_SHA512); | ||
52 | 56 | ||
53 | if (rc || len != SHA512_DIGEST_SIZE) | 57 | if (len != SHA512_DIGEST_SIZE) |
54 | goto out; | 58 | return -EINVAL; |
55 | 59 | ||
56 | sctx->state[0] = __cpu_to_be64(SHA512_H0); | 60 | sctx->state[0] = __cpu_to_be64(SHA512_H0); |
57 | sctx->state[1] = __cpu_to_be64(SHA512_H1); | 61 | sctx->state[1] = __cpu_to_be64(SHA512_H1); |
@@ -63,7 +67,6 @@ static int nx_sha512_init(struct shash_desc *desc) | |||
63 | sctx->state[7] = __cpu_to_be64(SHA512_H7); | 67 | sctx->state[7] = __cpu_to_be64(SHA512_H7); |
64 | sctx->count[0] = 0; | 68 | sctx->count[0] = 0; |
65 | 69 | ||
66 | out: | ||
67 | return 0; | 70 | return 0; |
68 | } | 71 | } |
69 | 72 | ||
@@ -73,10 +76,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, | |||
73 | struct sha512_state *sctx = shash_desc_ctx(desc); | 76 | struct sha512_state *sctx = shash_desc_ctx(desc); |
74 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 77 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
75 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 78 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
79 | struct nx_sg *in_sg; | ||
76 | u64 to_process, leftover = 0, total; | 80 | u64 to_process, leftover = 0, total; |
77 | unsigned long irq_flags; | 81 | unsigned long irq_flags; |
78 | int rc = 0; | 82 | int rc = 0; |
79 | int data_len; | 83 | int data_len; |
84 | u32 max_sg_len; | ||
80 | u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); | 85 | u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); |
81 | 86 | ||
82 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | 87 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
@@ -96,6 +101,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, | |||
96 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; | 101 | NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; |
97 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | 102 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
98 | 103 | ||
104 | in_sg = nx_ctx->in_sg; | ||
105 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, | ||
106 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); | ||
107 | max_sg_len = min_t(u64, max_sg_len, | ||
108 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); | ||
109 | |||
99 | do { | 110 | do { |
100 | /* | 111 | /* |
101 | * to_process: the SHA512_BLOCK_SIZE data chunk to process in | 112 | * to_process: the SHA512_BLOCK_SIZE data chunk to process in |
@@ -108,25 +119,26 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, | |||
108 | 119 | ||
109 | if (buf_len) { | 120 | if (buf_len) { |
110 | data_len = buf_len; | 121 | data_len = buf_len; |
111 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, | 122 | in_sg = nx_build_sg_list(nx_ctx->in_sg, |
112 | &nx_ctx->op.inlen, | 123 | (u8 *) sctx->buf, |
113 | &data_len, | 124 | &data_len, max_sg_len); |
114 | (u8 *) sctx->buf, | ||
115 | NX_DS_SHA512); | ||
116 | 125 | ||
117 | if (rc || data_len != buf_len) | 126 | if (data_len != buf_len) { |
127 | rc = -EINVAL; | ||
118 | goto out; | 128 | goto out; |
129 | } | ||
119 | } | 130 | } |
120 | 131 | ||
121 | data_len = to_process - buf_len; | 132 | data_len = to_process - buf_len; |
122 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, | 133 | in_sg = nx_build_sg_list(in_sg, (u8 *) data, |
123 | &nx_ctx->op.inlen, | 134 | &data_len, max_sg_len); |
124 | &data_len, | ||
125 | (u8 *) data, | ||
126 | NX_DS_SHA512); | ||
127 | 135 | ||
128 | if (rc || data_len != (to_process - buf_len)) | 136 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); |
137 | |||
138 | if (data_len != (to_process - buf_len)) { | ||
139 | rc = -EINVAL; | ||
129 | goto out; | 140 | goto out; |
141 | } | ||
130 | 142 | ||
131 | to_process = (data_len + buf_len); | 143 | to_process = (data_len + buf_len); |
132 | leftover = total - to_process; | 144 | leftover = total - to_process; |
@@ -172,13 +184,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) | |||
172 | struct sha512_state *sctx = shash_desc_ctx(desc); | 184 | struct sha512_state *sctx = shash_desc_ctx(desc); |
173 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); | 185 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); |
174 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; | 186 | struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; |
187 | struct nx_sg *in_sg, *out_sg; | ||
188 | u32 max_sg_len; | ||
175 | u64 count0; | 189 | u64 count0; |
176 | unsigned long irq_flags; | 190 | unsigned long irq_flags; |
177 | int rc; | 191 | int rc = 0; |
178 | int len; | 192 | int len; |
179 | 193 | ||
180 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | 194 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
181 | 195 | ||
196 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, | ||
197 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); | ||
198 | max_sg_len = min_t(u64, max_sg_len, | ||
199 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); | ||
200 | |||
182 | /* final is represented by continuing the operation and indicating that | 201 | /* final is represented by continuing the operation and indicating that |
183 | * this is not an intermediate operation */ | 202 | * this is not an intermediate operation */ |
184 | if (sctx->count[0] >= SHA512_BLOCK_SIZE) { | 203 | if (sctx->count[0] >= SHA512_BLOCK_SIZE) { |
@@ -200,24 +219,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) | |||
200 | csbcpb->cpb.sha512.message_bit_length_lo = count0; | 219 | csbcpb->cpb.sha512.message_bit_length_lo = count0; |
201 | 220 | ||
202 | len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); | 221 | len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); |
203 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, | 222 | in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len, |
204 | &nx_ctx->op.inlen, | 223 | max_sg_len); |
205 | &len, | ||
206 | (u8 *)sctx->buf, | ||
207 | NX_DS_SHA512); | ||
208 | 224 | ||
209 | if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) | 225 | if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) { |
226 | rc = -EINVAL; | ||
210 | goto out; | 227 | goto out; |
228 | } | ||
211 | 229 | ||
212 | len = SHA512_DIGEST_SIZE; | 230 | len = SHA512_DIGEST_SIZE; |
213 | rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, | 231 | out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, |
214 | &nx_ctx->op.outlen, | 232 | max_sg_len); |
215 | &len, | ||
216 | out, | ||
217 | NX_DS_SHA512); | ||
218 | 233 | ||
219 | if (rc) | 234 | nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); |
220 | goto out; | 235 | nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); |
221 | 236 | ||
222 | if (!nx_ctx->op.outlen) { | 237 | if (!nx_ctx->op.outlen) { |
223 | rc = -EINVAL; | 238 | rc = -EINVAL; |
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 1da6dc59d0dd..f6198f29a4a8 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c | |||
@@ -19,8 +19,8 @@ | |||
19 | * Author: Kent Yoder <yoder1@us.ibm.com> | 19 | * Author: Kent Yoder <yoder1@us.ibm.com> |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <crypto/internal/aead.h> | ||
22 | #include <crypto/internal/hash.h> | 23 | #include <crypto/internal/hash.h> |
23 | #include <crypto/hash.h> | ||
24 | #include <crypto/aes.h> | 24 | #include <crypto/aes.h> |
25 | #include <crypto/sha.h> | 25 | #include <crypto/sha.h> |
26 | #include <crypto/algapi.h> | 26 | #include <crypto/algapi.h> |
@@ -29,10 +29,10 @@ | |||
29 | #include <linux/moduleparam.h> | 29 | #include <linux/moduleparam.h> |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
32 | #include <linux/crypto.h> | ||
33 | #include <linux/scatterlist.h> | 32 | #include <linux/scatterlist.h> |
34 | #include <linux/device.h> | 33 | #include <linux/device.h> |
35 | #include <linux/of.h> | 34 | #include <linux/of.h> |
35 | #include <linux/types.h> | ||
36 | #include <asm/hvcall.h> | 36 | #include <asm/hvcall.h> |
37 | #include <asm/vio.h> | 37 | #include <asm/vio.h> |
38 | 38 | ||
@@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, | |||
215 | * @delta: is the amount we need to crop in order to bound the list. | 215 | * @delta: is the amount we need to crop in order to bound the list. |
216 | * | 216 | * |
217 | */ | 217 | */ |
218 | static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta) | 218 | static long int trim_sg_list(struct nx_sg *sg, |
219 | struct nx_sg *end, | ||
220 | unsigned int delta, | ||
221 | unsigned int *nbytes) | ||
219 | { | 222 | { |
223 | long int oplen; | ||
224 | long int data_back; | ||
225 | unsigned int is_delta = delta; | ||
226 | |||
220 | while (delta && end > sg) { | 227 | while (delta && end > sg) { |
221 | struct nx_sg *last = end - 1; | 228 | struct nx_sg *last = end - 1; |
222 | 229 | ||
@@ -228,54 +235,20 @@ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int d | |||
228 | delta -= last->len; | 235 | delta -= last->len; |
229 | } | 236 | } |
230 | } | 237 | } |
231 | return (sg - end) * sizeof(struct nx_sg); | ||
232 | } | ||
233 | 238 | ||
234 | /** | 239 | /* There are cases where we need to crop list in order to make it |
235 | * nx_sha_build_sg_list - walk and build sg list to sha modes | 240 | * a block size multiple, but we also need to align data. In order to |
236 | * using right bounds and limits. | 241 | * that we need to calculate how much we need to put back to be |
237 | * @nx_ctx: NX crypto context for the lists we're building | 242 | * processed |
238 | * @nx_sg: current sg list in or out list | 243 | */ |
239 | * @op_len: current op_len to be used in order to build a sg list | 244 | oplen = (sg - end) * sizeof(struct nx_sg); |
240 | * @nbytes: number or bytes to be processed | 245 | if (is_delta) { |
241 | * @offset: buf offset | 246 | data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len; |
242 | * @mode: SHA256 or SHA512 | 247 | data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1)); |
243 | */ | 248 | *nbytes -= data_back; |
244 | int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx, | ||
245 | struct nx_sg *nx_in_outsg, | ||
246 | s64 *op_len, | ||
247 | unsigned int *nbytes, | ||
248 | u8 *offset, | ||
249 | u32 mode) | ||
250 | { | ||
251 | unsigned int delta = 0; | ||
252 | unsigned int total = *nbytes; | ||
253 | struct nx_sg *nx_insg = nx_in_outsg; | ||
254 | unsigned int max_sg_len; | ||
255 | |||
256 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, | ||
257 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); | ||
258 | max_sg_len = min_t(u64, max_sg_len, | ||
259 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); | ||
260 | |||
261 | *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); | ||
262 | nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len); | ||
263 | |||
264 | switch (mode) { | ||
265 | case NX_DS_SHA256: | ||
266 | if (*nbytes < total) | ||
267 | delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1)); | ||
268 | break; | ||
269 | case NX_DS_SHA512: | ||
270 | if (*nbytes < total) | ||
271 | delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1)); | ||
272 | break; | ||
273 | default: | ||
274 | return -EINVAL; | ||
275 | } | 249 | } |
276 | *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta); | ||
277 | 250 | ||
278 | return 0; | 251 | return oplen; |
279 | } | 252 | } |
280 | 253 | ||
281 | /** | 254 | /** |
@@ -330,8 +303,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, | |||
330 | /* these lengths should be negative, which will indicate to phyp that | 303 | /* these lengths should be negative, which will indicate to phyp that |
331 | * the input and output parameters are scatterlists, not linear | 304 | * the input and output parameters are scatterlists, not linear |
332 | * buffers */ | 305 | * buffers */ |
333 | nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta); | 306 | nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes); |
334 | nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta); | 307 | nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes); |
335 | 308 | ||
336 | return 0; | 309 | return 0; |
337 | } | 310 | } |
@@ -426,6 +399,13 @@ static void nx_of_update_msc(struct device *dev, | |||
426 | goto next_loop; | 399 | goto next_loop; |
427 | } | 400 | } |
428 | 401 | ||
402 | if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) { | ||
403 | dev_warn(dev, "bogus sglen/databytelen: " | ||
404 | "%u/%u (ignored)\n", trip->sglen, | ||
405 | trip->databytelen); | ||
406 | goto next_loop; | ||
407 | } | ||
408 | |||
429 | switch (trip->keybitlen) { | 409 | switch (trip->keybitlen) { |
430 | case 128: | 410 | case 128: |
431 | case 160: | 411 | case 160: |
@@ -518,6 +498,72 @@ static void nx_of_init(struct device *dev, struct nx_of *props) | |||
518 | nx_of_update_msc(dev, p, props); | 498 | nx_of_update_msc(dev, p, props); |
519 | } | 499 | } |
520 | 500 | ||
501 | static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot) | ||
502 | { | ||
503 | struct alg_props *props = &nx_driver.of.ap[fc][mode][slot]; | ||
504 | |||
505 | if (!props->sglen || props->databytelen < NX_PAGE_SIZE) { | ||
506 | if (dev) | ||
507 | dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: " | ||
508 | "%u/%u (ignored)\n", fc, mode, slot, | ||
509 | props->sglen, props->databytelen); | ||
510 | return false; | ||
511 | } | ||
512 | |||
513 | return true; | ||
514 | } | ||
515 | |||
516 | static bool nx_check_props(struct device *dev, u32 fc, u32 mode) | ||
517 | { | ||
518 | int i; | ||
519 | |||
520 | for (i = 0; i < 3; i++) | ||
521 | if (!nx_check_prop(dev, fc, mode, i)) | ||
522 | return false; | ||
523 | |||
524 | return true; | ||
525 | } | ||
526 | |||
527 | static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode) | ||
528 | { | ||
529 | return nx_check_props(&nx_driver.viodev->dev, fc, mode) ? | ||
530 | crypto_register_alg(alg) : 0; | ||
531 | } | ||
532 | |||
533 | static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode) | ||
534 | { | ||
535 | return nx_check_props(&nx_driver.viodev->dev, fc, mode) ? | ||
536 | crypto_register_aead(alg) : 0; | ||
537 | } | ||
538 | |||
539 | static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot) | ||
540 | { | ||
541 | return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev, | ||
542 | fc, mode, slot) : | ||
543 | nx_check_props(&nx_driver.viodev->dev, fc, mode)) ? | ||
544 | crypto_register_shash(alg) : 0; | ||
545 | } | ||
546 | |||
547 | static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode) | ||
548 | { | ||
549 | if (nx_check_props(NULL, fc, mode)) | ||
550 | crypto_unregister_alg(alg); | ||
551 | } | ||
552 | |||
553 | static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode) | ||
554 | { | ||
555 | if (nx_check_props(NULL, fc, mode)) | ||
556 | crypto_unregister_aead(alg); | ||
557 | } | ||
558 | |||
559 | static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode, | ||
560 | int slot) | ||
561 | { | ||
562 | if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) : | ||
563 | nx_check_props(NULL, fc, mode)) | ||
564 | crypto_unregister_shash(alg); | ||
565 | } | ||
566 | |||
521 | /** | 567 | /** |
522 | * nx_register_algs - register algorithms with the crypto API | 568 | * nx_register_algs - register algorithms with the crypto API |
523 | * | 569 | * |
@@ -542,72 +588,77 @@ static int nx_register_algs(void) | |||
542 | 588 | ||
543 | nx_driver.of.status = NX_OKAY; | 589 | nx_driver.of.status = NX_OKAY; |
544 | 590 | ||
545 | rc = crypto_register_alg(&nx_ecb_aes_alg); | 591 | rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); |
546 | if (rc) | 592 | if (rc) |
547 | goto out; | 593 | goto out; |
548 | 594 | ||
549 | rc = crypto_register_alg(&nx_cbc_aes_alg); | 595 | rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); |
550 | if (rc) | 596 | if (rc) |
551 | goto out_unreg_ecb; | 597 | goto out_unreg_ecb; |
552 | 598 | ||
553 | rc = crypto_register_alg(&nx_ctr_aes_alg); | 599 | rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); |
554 | if (rc) | 600 | if (rc) |
555 | goto out_unreg_cbc; | 601 | goto out_unreg_cbc; |
556 | 602 | ||
557 | rc = crypto_register_alg(&nx_ctr3686_aes_alg); | 603 | rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); |
558 | if (rc) | 604 | if (rc) |
559 | goto out_unreg_ctr; | 605 | goto out_unreg_ctr; |
560 | 606 | ||
561 | rc = crypto_register_alg(&nx_gcm_aes_alg); | 607 | rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
562 | if (rc) | 608 | if (rc) |
563 | goto out_unreg_ctr3686; | 609 | goto out_unreg_ctr3686; |
564 | 610 | ||
565 | rc = crypto_register_alg(&nx_gcm4106_aes_alg); | 611 | rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
566 | if (rc) | 612 | if (rc) |
567 | goto out_unreg_gcm; | 613 | goto out_unreg_gcm; |
568 | 614 | ||
569 | rc = crypto_register_alg(&nx_ccm_aes_alg); | 615 | rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
570 | if (rc) | 616 | if (rc) |
571 | goto out_unreg_gcm4106; | 617 | goto out_unreg_gcm4106; |
572 | 618 | ||
573 | rc = crypto_register_alg(&nx_ccm4309_aes_alg); | 619 | rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
574 | if (rc) | 620 | if (rc) |
575 | goto out_unreg_ccm; | 621 | goto out_unreg_ccm; |
576 | 622 | ||
577 | rc = crypto_register_shash(&nx_shash_sha256_alg); | 623 | rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, |
624 | NX_PROPS_SHA256); | ||
578 | if (rc) | 625 | if (rc) |
579 | goto out_unreg_ccm4309; | 626 | goto out_unreg_ccm4309; |
580 | 627 | ||
581 | rc = crypto_register_shash(&nx_shash_sha512_alg); | 628 | rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, |
629 | NX_PROPS_SHA512); | ||
582 | if (rc) | 630 | if (rc) |
583 | goto out_unreg_s256; | 631 | goto out_unreg_s256; |
584 | 632 | ||
585 | rc = crypto_register_shash(&nx_shash_aes_xcbc_alg); | 633 | rc = nx_register_shash(&nx_shash_aes_xcbc_alg, |
634 | NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1); | ||
586 | if (rc) | 635 | if (rc) |
587 | goto out_unreg_s512; | 636 | goto out_unreg_s512; |
588 | 637 | ||
589 | goto out; | 638 | goto out; |
590 | 639 | ||
591 | out_unreg_s512: | 640 | out_unreg_s512: |
592 | crypto_unregister_shash(&nx_shash_sha512_alg); | 641 | nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, |
642 | NX_PROPS_SHA512); | ||
593 | out_unreg_s256: | 643 | out_unreg_s256: |
594 | crypto_unregister_shash(&nx_shash_sha256_alg); | 644 | nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, |
645 | NX_PROPS_SHA256); | ||
595 | out_unreg_ccm4309: | 646 | out_unreg_ccm4309: |
596 | crypto_unregister_alg(&nx_ccm4309_aes_alg); | 647 | nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
597 | out_unreg_ccm: | 648 | out_unreg_ccm: |
598 | crypto_unregister_alg(&nx_ccm_aes_alg); | 649 | nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
599 | out_unreg_gcm4106: | 650 | out_unreg_gcm4106: |
600 | crypto_unregister_alg(&nx_gcm4106_aes_alg); | 651 | nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
601 | out_unreg_gcm: | 652 | out_unreg_gcm: |
602 | crypto_unregister_alg(&nx_gcm_aes_alg); | 653 | nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
603 | out_unreg_ctr3686: | 654 | out_unreg_ctr3686: |
604 | crypto_unregister_alg(&nx_ctr3686_aes_alg); | 655 | nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); |
605 | out_unreg_ctr: | 656 | out_unreg_ctr: |
606 | crypto_unregister_alg(&nx_ctr_aes_alg); | 657 | nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); |
607 | out_unreg_cbc: | 658 | out_unreg_cbc: |
608 | crypto_unregister_alg(&nx_cbc_aes_alg); | 659 | nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); |
609 | out_unreg_ecb: | 660 | out_unreg_ecb: |
610 | crypto_unregister_alg(&nx_ecb_aes_alg); | 661 | nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); |
611 | out: | 662 | out: |
612 | return rc; | 663 | return rc; |
613 | } | 664 | } |
@@ -666,9 +717,9 @@ int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) | |||
666 | NX_MODE_AES_CCM); | 717 | NX_MODE_AES_CCM); |
667 | } | 718 | } |
668 | 719 | ||
669 | int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm) | 720 | int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) |
670 | { | 721 | { |
671 | return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, | 722 | return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, |
672 | NX_MODE_AES_GCM); | 723 | NX_MODE_AES_GCM); |
673 | } | 724 | } |
674 | 725 | ||
@@ -720,6 +771,13 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm) | |||
720 | nx_ctx->out_sg = NULL; | 771 | nx_ctx->out_sg = NULL; |
721 | } | 772 | } |
722 | 773 | ||
774 | void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm) | ||
775 | { | ||
776 | struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); | ||
777 | |||
778 | kzfree(nx_ctx->kmem); | ||
779 | } | ||
780 | |||
723 | static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) | 781 | static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) |
724 | { | 782 | { |
725 | dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n", | 783 | dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n", |
@@ -746,17 +804,24 @@ static int nx_remove(struct vio_dev *viodev) | |||
746 | if (nx_driver.of.status == NX_OKAY) { | 804 | if (nx_driver.of.status == NX_OKAY) { |
747 | NX_DEBUGFS_FINI(&nx_driver); | 805 | NX_DEBUGFS_FINI(&nx_driver); |
748 | 806 | ||
749 | crypto_unregister_alg(&nx_ccm_aes_alg); | 807 | nx_unregister_shash(&nx_shash_aes_xcbc_alg, |
750 | crypto_unregister_alg(&nx_ccm4309_aes_alg); | 808 | NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1); |
751 | crypto_unregister_alg(&nx_gcm_aes_alg); | 809 | nx_unregister_shash(&nx_shash_sha512_alg, |
752 | crypto_unregister_alg(&nx_gcm4106_aes_alg); | 810 | NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); |
753 | crypto_unregister_alg(&nx_ctr_aes_alg); | 811 | nx_unregister_shash(&nx_shash_sha256_alg, |
754 | crypto_unregister_alg(&nx_ctr3686_aes_alg); | 812 | NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); |
755 | crypto_unregister_alg(&nx_cbc_aes_alg); | 813 | nx_unregister_alg(&nx_ccm4309_aes_alg, |
756 | crypto_unregister_alg(&nx_ecb_aes_alg); | 814 | NX_FC_AES, NX_MODE_AES_CCM); |
757 | crypto_unregister_shash(&nx_shash_sha256_alg); | 815 | nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
758 | crypto_unregister_shash(&nx_shash_sha512_alg); | 816 | nx_unregister_aead(&nx_gcm4106_aes_alg, |
759 | crypto_unregister_shash(&nx_shash_aes_xcbc_alg); | 817 | NX_FC_AES, NX_MODE_AES_GCM); |
818 | nx_unregister_aead(&nx_gcm_aes_alg, | ||
819 | NX_FC_AES, NX_MODE_AES_GCM); | ||
820 | nx_unregister_alg(&nx_ctr3686_aes_alg, | ||
821 | NX_FC_AES, NX_MODE_AES_CTR); | ||
822 | nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); | ||
823 | nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); | ||
824 | nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); | ||
760 | } | 825 | } |
761 | 826 | ||
762 | return 0; | 827 | return 0; |
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 6c9ecaaead52..de3ea8738146 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h | |||
@@ -143,18 +143,17 @@ struct nx_crypto_ctx { | |||
143 | 143 | ||
144 | /* prototypes */ | 144 | /* prototypes */ |
145 | int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm); | 145 | int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm); |
146 | int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm); | 146 | int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm); |
147 | int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); | 147 | int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); |
148 | int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm); | 148 | int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm); |
149 | int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm); | 149 | int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm); |
150 | int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm); | 150 | int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm); |
151 | int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm); | 151 | int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm); |
152 | void nx_crypto_ctx_exit(struct crypto_tfm *tfm); | 152 | void nx_crypto_ctx_exit(struct crypto_tfm *tfm); |
153 | void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm); | ||
153 | void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); | 154 | void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); |
154 | int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, | 155 | int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, |
155 | u32 may_sleep); | 156 | u32 may_sleep); |
156 | int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *, | ||
157 | s64 *, unsigned int *, u8 *, u32); | ||
158 | struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); | 157 | struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); |
159 | int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, | 158 | int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, |
160 | struct scatterlist *, struct scatterlist *, unsigned int *, | 159 | struct scatterlist *, struct scatterlist *, unsigned int *, |
@@ -178,8 +177,8 @@ void nx_debugfs_fini(struct nx_crypto_driver *); | |||
178 | 177 | ||
179 | extern struct crypto_alg nx_cbc_aes_alg; | 178 | extern struct crypto_alg nx_cbc_aes_alg; |
180 | extern struct crypto_alg nx_ecb_aes_alg; | 179 | extern struct crypto_alg nx_ecb_aes_alg; |
181 | extern struct crypto_alg nx_gcm_aes_alg; | 180 | extern struct aead_alg nx_gcm_aes_alg; |
182 | extern struct crypto_alg nx_gcm4106_aes_alg; | 181 | extern struct aead_alg nx_gcm4106_aes_alg; |
183 | extern struct crypto_alg nx_ctr_aes_alg; | 182 | extern struct crypto_alg nx_ctr_aes_alg; |
184 | extern struct crypto_alg nx_ctr3686_aes_alg; | 183 | extern struct crypto_alg nx_ctr3686_aes_alg; |
185 | extern struct crypto_alg nx_ccm_aes_alg; | 184 | extern struct crypto_alg nx_ccm_aes_alg; |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 4d63e0d4da9a..b2024c95a3cf 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -362,7 +362,13 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) | |||
362 | 362 | ||
363 | static int omap_sham_hw_init(struct omap_sham_dev *dd) | 363 | static int omap_sham_hw_init(struct omap_sham_dev *dd) |
364 | { | 364 | { |
365 | pm_runtime_get_sync(dd->dev); | 365 | int err; |
366 | |||
367 | err = pm_runtime_get_sync(dd->dev); | ||
368 | if (err < 0) { | ||
369 | dev_err(dd->dev, "failed to get sync: %d\n", err); | ||
370 | return err; | ||
371 | } | ||
366 | 372 | ||
367 | if (!test_bit(FLAGS_INIT, &dd->flags)) { | 373 | if (!test_bit(FLAGS_INIT, &dd->flags)) { |
368 | set_bit(FLAGS_INIT, &dd->flags); | 374 | set_bit(FLAGS_INIT, &dd->flags); |
@@ -1793,6 +1799,10 @@ static const struct of_device_id omap_sham_of_match[] = { | |||
1793 | .data = &omap_sham_pdata_omap2, | 1799 | .data = &omap_sham_pdata_omap2, |
1794 | }, | 1800 | }, |
1795 | { | 1801 | { |
1802 | .compatible = "ti,omap3-sham", | ||
1803 | .data = &omap_sham_pdata_omap2, | ||
1804 | }, | ||
1805 | { | ||
1796 | .compatible = "ti,omap4-sham", | 1806 | .compatible = "ti,omap4-sham", |
1797 | .data = &omap_sham_pdata_omap4, | 1807 | .data = &omap_sham_pdata_omap4, |
1798 | }, | 1808 | }, |
@@ -1947,7 +1957,13 @@ static int omap_sham_probe(struct platform_device *pdev) | |||
1947 | 1957 | ||
1948 | pm_runtime_enable(dev); | 1958 | pm_runtime_enable(dev); |
1949 | pm_runtime_irq_safe(dev); | 1959 | pm_runtime_irq_safe(dev); |
1950 | pm_runtime_get_sync(dev); | 1960 | |
1961 | err = pm_runtime_get_sync(dev); | ||
1962 | if (err < 0) { | ||
1963 | dev_err(dev, "failed to get sync: %d\n", err); | ||
1964 | goto err_pm; | ||
1965 | } | ||
1966 | |||
1951 | rev = omap_sham_read(dd, SHA_REG_REV(dd)); | 1967 | rev = omap_sham_read(dd, SHA_REG_REV(dd)); |
1952 | pm_runtime_put_sync(&pdev->dev); | 1968 | pm_runtime_put_sync(&pdev->dev); |
1953 | 1969 | ||
@@ -1977,6 +1993,7 @@ err_algs: | |||
1977 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) | 1993 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) |
1978 | crypto_unregister_ahash( | 1994 | crypto_unregister_ahash( |
1979 | &dd->pdata->algs_info[i].algs_list[j]); | 1995 | &dd->pdata->algs_info[i].algs_list[j]); |
1996 | err_pm: | ||
1980 | pm_runtime_disable(dev); | 1997 | pm_runtime_disable(dev); |
1981 | if (dd->dma_lch) | 1998 | if (dd->dma_lch) |
1982 | dma_release_channel(dd->dma_lch); | 1999 | dma_release_channel(dd->dma_lch); |
@@ -2019,7 +2036,11 @@ static int omap_sham_suspend(struct device *dev) | |||
2019 | 2036 | ||
2020 | static int omap_sham_resume(struct device *dev) | 2037 | static int omap_sham_resume(struct device *dev) |
2021 | { | 2038 | { |
2022 | pm_runtime_get_sync(dev); | 2039 | int err = pm_runtime_get_sync(dev); |
2040 | if (err < 0) { | ||
2041 | dev_err(dev, "failed to get sync: %d\n", err); | ||
2042 | return err; | ||
2043 | } | ||
2023 | return 0; | 2044 | return 0; |
2024 | } | 2045 | } |
2025 | #endif | 2046 | #endif |
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 5da5b98b8f29..4f56f3681abd 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | */ | 17 | */ |
18 | #include <crypto/aead.h> | 18 | #include <crypto/internal/aead.h> |
19 | #include <crypto/aes.h> | 19 | #include <crypto/aes.h> |
20 | #include <crypto/algapi.h> | 20 | #include <crypto/algapi.h> |
21 | #include <crypto/authenc.h> | 21 | #include <crypto/authenc.h> |
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/rtnetlink.h> | 40 | #include <linux/rtnetlink.h> |
41 | #include <linux/scatterlist.h> | 41 | #include <linux/scatterlist.h> |
42 | #include <linux/sched.h> | 42 | #include <linux/sched.h> |
43 | #include <linux/sizes.h> | ||
43 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
44 | #include <linux/timer.h> | 45 | #include <linux/timer.h> |
45 | 46 | ||
@@ -261,18 +262,9 @@ static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, | |||
261 | } | 262 | } |
262 | 263 | ||
263 | /* Count the number of scatterlist entries in a scatterlist. */ | 264 | /* Count the number of scatterlist entries in a scatterlist. */ |
264 | static int sg_count(struct scatterlist *sg_list, int nbytes) | 265 | static inline int sg_count(struct scatterlist *sg_list, int nbytes) |
265 | { | 266 | { |
266 | struct scatterlist *sg = sg_list; | 267 | return sg_nents_for_len(sg_list, nbytes); |
267 | int sg_nents = 0; | ||
268 | |||
269 | while (nbytes > 0) { | ||
270 | ++sg_nents; | ||
271 | nbytes -= sg->length; | ||
272 | sg = sg_next(sg); | ||
273 | } | ||
274 | |||
275 | return sg_nents; | ||
276 | } | 268 | } |
277 | 269 | ||
278 | static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) | 270 | static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) |
@@ -326,6 +318,7 @@ static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) | |||
326 | struct spacc_ddt *src_ddt, *dst_ddt; | 318 | struct spacc_ddt *src_ddt, *dst_ddt; |
327 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); | 319 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); |
328 | unsigned nents = sg_count(areq->src, areq->cryptlen); | 320 | unsigned nents = sg_count(areq->src, areq->cryptlen); |
321 | unsigned total; | ||
329 | dma_addr_t iv_addr; | 322 | dma_addr_t iv_addr; |
330 | struct scatterlist *cur; | 323 | struct scatterlist *cur; |
331 | int i, dst_ents, src_ents, assoc_ents; | 324 | int i, dst_ents, src_ents, assoc_ents; |
@@ -369,11 +362,18 @@ static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) | |||
369 | * Map the associated data. For decryption we don't copy the | 362 | * Map the associated data. For decryption we don't copy the |
370 | * associated data. | 363 | * associated data. |
371 | */ | 364 | */ |
365 | total = areq->assoclen; | ||
372 | for_each_sg(areq->assoc, cur, assoc_ents, i) { | 366 | for_each_sg(areq->assoc, cur, assoc_ents, i) { |
373 | ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); | 367 | unsigned len = sg_dma_len(cur); |
368 | |||
369 | if (len > total) | ||
370 | len = total; | ||
371 | |||
372 | total -= len; | ||
373 | |||
374 | ddt_set(src_ddt++, sg_dma_address(cur), len); | ||
374 | if (req->is_encrypt) | 375 | if (req->is_encrypt) |
375 | ddt_set(dst_ddt++, sg_dma_address(cur), | 376 | ddt_set(dst_ddt++, sg_dma_address(cur), len); |
376 | sg_dma_len(cur)); | ||
377 | } | 377 | } |
378 | ddt_set(src_ddt++, iv_addr, ivsize); | 378 | ddt_set(src_ddt++, iv_addr, ivsize); |
379 | 379 | ||
@@ -790,7 +790,8 @@ static int spacc_aead_cra_init(struct crypto_tfm *tfm) | |||
790 | 790 | ||
791 | get_random_bytes(ctx->salt, sizeof(ctx->salt)); | 791 | get_random_bytes(ctx->salt, sizeof(ctx->salt)); |
792 | 792 | ||
793 | tfm->crt_aead.reqsize = sizeof(struct spacc_req); | 793 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), |
794 | sizeof(struct spacc_req)); | ||
794 | 795 | ||
795 | return 0; | 796 | return 0; |
796 | } | 797 | } |
@@ -1754,15 +1755,15 @@ static int spacc_probe(struct platform_device *pdev) | |||
1754 | return PTR_ERR(engine->clk); | 1755 | return PTR_ERR(engine->clk); |
1755 | } | 1756 | } |
1756 | 1757 | ||
1757 | if (clk_enable(engine->clk)) { | 1758 | if (clk_prepare_enable(engine->clk)) { |
1758 | dev_info(&pdev->dev, "unable to enable clk\n"); | 1759 | dev_info(&pdev->dev, "unable to prepare/enable clk\n"); |
1759 | clk_put(engine->clk); | 1760 | clk_put(engine->clk); |
1760 | return -EIO; | 1761 | return -EIO; |
1761 | } | 1762 | } |
1762 | 1763 | ||
1763 | err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); | 1764 | err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); |
1764 | if (err) { | 1765 | if (err) { |
1765 | clk_disable(engine->clk); | 1766 | clk_disable_unprepare(engine->clk); |
1766 | clk_put(engine->clk); | 1767 | clk_put(engine->clk); |
1767 | return err; | 1768 | return err; |
1768 | } | 1769 | } |
@@ -1830,7 +1831,7 @@ static int spacc_remove(struct platform_device *pdev) | |||
1830 | crypto_unregister_alg(&alg->alg); | 1831 | crypto_unregister_alg(&alg->alg); |
1831 | } | 1832 | } |
1832 | 1833 | ||
1833 | clk_disable(engine->clk); | 1834 | clk_disable_unprepare(engine->clk); |
1834 | clk_put(engine->clk); | 1835 | clk_put(engine->clk); |
1835 | 1836 | ||
1836 | return 0; | 1837 | return 0; |
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 49bede2a9f77..6fdb9e8b22a7 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig | |||
@@ -2,9 +2,8 @@ config CRYPTO_DEV_QAT | |||
2 | tristate | 2 | tristate |
3 | select CRYPTO_AEAD | 3 | select CRYPTO_AEAD |
4 | select CRYPTO_AUTHENC | 4 | select CRYPTO_AUTHENC |
5 | select CRYPTO_ALGAPI | 5 | select CRYPTO_BLKCIPHER |
6 | select CRYPTO_AES | 6 | select CRYPTO_HMAC |
7 | select CRYPTO_CBC | ||
8 | select CRYPTO_SHA1 | 7 | select CRYPTO_SHA1 |
9 | select CRYPTO_SHA256 | 8 | select CRYPTO_SHA256 |
10 | select CRYPTO_SHA512 | 9 | select CRYPTO_SHA512 |
@@ -13,7 +12,6 @@ config CRYPTO_DEV_QAT | |||
13 | config CRYPTO_DEV_QAT_DH895xCC | 12 | config CRYPTO_DEV_QAT_DH895xCC |
14 | tristate "Support for Intel(R) DH895xCC" | 13 | tristate "Support for Intel(R) DH895xCC" |
15 | depends on X86 && PCI | 14 | depends on X86 && PCI |
16 | default n | ||
17 | select CRYPTO_DEV_QAT | 15 | select CRYPTO_DEV_QAT |
18 | help | 16 | help |
19 | Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology | 17 | Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology |
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index f22ce7169fa5..5fe902967620 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h | |||
@@ -48,7 +48,6 @@ | |||
48 | #define ADF_ACCEL_DEVICES_H_ | 48 | #define ADF_ACCEL_DEVICES_H_ |
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/list.h> | 50 | #include <linux/list.h> |
51 | #include <linux/proc_fs.h> | ||
52 | #include <linux/io.h> | 51 | #include <linux/io.h> |
53 | #include "adf_cfg_common.h" | 52 | #include "adf_cfg_common.h" |
54 | 53 | ||
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h index 0c38a155a865..ef5988afd4c6 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_user.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h | |||
@@ -54,14 +54,6 @@ struct adf_user_cfg_key_val { | |||
54 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | 54 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; |
55 | char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; | 55 | char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; |
56 | union { | 56 | union { |
57 | char *user_val_ptr; | ||
58 | uint64_t padding1; | ||
59 | }; | ||
60 | union { | ||
61 | struct adf_user_cfg_key_val *prev; | ||
62 | uint64_t padding2; | ||
63 | }; | ||
64 | union { | ||
65 | struct adf_user_cfg_key_val *next; | 57 | struct adf_user_cfg_key_val *next; |
66 | uint64_t padding3; | 58 | uint64_t padding3; |
67 | }; | 59 | }; |
@@ -75,10 +67,6 @@ struct adf_user_cfg_section { | |||
75 | uint64_t padding1; | 67 | uint64_t padding1; |
76 | }; | 68 | }; |
77 | union { | 69 | union { |
78 | struct adf_user_cfg_section *prev; | ||
79 | uint64_t padding2; | ||
80 | }; | ||
81 | union { | ||
82 | struct adf_user_cfg_section *next; | 70 | struct adf_user_cfg_section *next; |
83 | uint64_t padding3; | 71 | uint64_t padding3; |
84 | }; | 72 | }; |
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 0666ee6a3360..27e16c09230b 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h | |||
@@ -53,6 +53,13 @@ | |||
53 | #include "icp_qat_fw_loader_handle.h" | 53 | #include "icp_qat_fw_loader_handle.h" |
54 | #include "icp_qat_hal.h" | 54 | #include "icp_qat_hal.h" |
55 | 55 | ||
56 | #define ADF_MAJOR_VERSION 0 | ||
57 | #define ADF_MINOR_VERSION 1 | ||
58 | #define ADF_BUILD_VERSION 3 | ||
59 | #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ | ||
60 | __stringify(ADF_MINOR_VERSION) "." \ | ||
61 | __stringify(ADF_BUILD_VERSION) | ||
62 | |||
56 | #define ADF_STATUS_RESTARTING 0 | 63 | #define ADF_STATUS_RESTARTING 0 |
57 | #define ADF_STATUS_STARTING 1 | 64 | #define ADF_STATUS_STARTING 1 |
58 | #define ADF_STATUS_CONFIGURED 2 | 65 | #define ADF_STATUS_CONFIGURED 2 |
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index cb5f066e93a6..e056b9e9bf8a 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c | |||
@@ -504,3 +504,4 @@ MODULE_LICENSE("Dual BSD/GPL"); | |||
504 | MODULE_AUTHOR("Intel"); | 504 | MODULE_AUTHOR("Intel"); |
505 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | 505 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); |
506 | MODULE_ALIAS_CRYPTO("intel_qat"); | 506 | MODULE_ALIAS_CRYPTO("intel_qat"); |
507 | MODULE_VERSION(ADF_DRV_VERSION); | ||
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 1dc5b0a17cf7..067402c7c2a9 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #include <linux/module.h> | 47 | #include <linux/module.h> |
48 | #include <linux/slab.h> | 48 | #include <linux/slab.h> |
49 | #include <linux/crypto.h> | 49 | #include <linux/crypto.h> |
50 | #include <crypto/aead.h> | 50 | #include <crypto/internal/aead.h> |
51 | #include <crypto/aes.h> | 51 | #include <crypto/aes.h> |
52 | #include <crypto/sha.h> | 52 | #include <crypto/sha.h> |
53 | #include <crypto/hash.h> | 53 | #include <crypto/hash.h> |
@@ -653,7 +653,7 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, | |||
653 | } | 653 | } |
654 | 654 | ||
655 | static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | 655 | static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, |
656 | struct scatterlist *assoc, | 656 | struct scatterlist *assoc, int assoclen, |
657 | struct scatterlist *sgl, | 657 | struct scatterlist *sgl, |
658 | struct scatterlist *sglout, uint8_t *iv, | 658 | struct scatterlist *sglout, uint8_t *iv, |
659 | uint8_t ivlen, | 659 | uint8_t ivlen, |
@@ -685,15 +685,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
685 | for_each_sg(assoc, sg, assoc_n, i) { | 685 | for_each_sg(assoc, sg, assoc_n, i) { |
686 | if (!sg->length) | 686 | if (!sg->length) |
687 | continue; | 687 | continue; |
688 | bufl->bufers[bufs].addr = dma_map_single(dev, | 688 | |
689 | sg_virt(sg), | 689 | if (!(assoclen > 0)) |
690 | sg->length, | 690 | break; |
691 | DMA_BIDIRECTIONAL); | 691 | |
692 | bufl->bufers[bufs].len = sg->length; | 692 | bufl->bufers[bufs].addr = |
693 | dma_map_single(dev, sg_virt(sg), | ||
694 | min_t(int, assoclen, sg->length), | ||
695 | DMA_BIDIRECTIONAL); | ||
696 | bufl->bufers[bufs].len = min_t(int, assoclen, sg->length); | ||
693 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | 697 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) |
694 | goto err; | 698 | goto err; |
695 | bufs++; | 699 | bufs++; |
700 | assoclen -= sg->length; | ||
696 | } | 701 | } |
702 | |||
697 | if (ivlen) { | 703 | if (ivlen) { |
698 | bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, | 704 | bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, |
699 | DMA_BIDIRECTIONAL); | 705 | DMA_BIDIRECTIONAL); |
@@ -845,8 +851,9 @@ static int qat_alg_aead_dec(struct aead_request *areq) | |||
845 | int digst_size = crypto_aead_crt(aead_tfm)->authsize; | 851 | int digst_size = crypto_aead_crt(aead_tfm)->authsize; |
846 | int ret, ctr = 0; | 852 | int ret, ctr = 0; |
847 | 853 | ||
848 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, | 854 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen, |
849 | areq->iv, AES_BLOCK_SIZE, qat_req); | 855 | areq->src, areq->dst, areq->iv, |
856 | AES_BLOCK_SIZE, qat_req); | ||
850 | if (unlikely(ret)) | 857 | if (unlikely(ret)) |
851 | return ret; | 858 | return ret; |
852 | 859 | ||
@@ -889,8 +896,9 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, | |||
889 | struct icp_qat_fw_la_bulk_req *msg; | 896 | struct icp_qat_fw_la_bulk_req *msg; |
890 | int ret, ctr = 0; | 897 | int ret, ctr = 0; |
891 | 898 | ||
892 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, | 899 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen, |
893 | iv, AES_BLOCK_SIZE, qat_req); | 900 | areq->src, areq->dst, iv, AES_BLOCK_SIZE, |
901 | qat_req); | ||
894 | if (unlikely(ret)) | 902 | if (unlikely(ret)) |
895 | return ret; | 903 | return ret; |
896 | 904 | ||
@@ -1017,7 +1025,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) | |||
1017 | struct icp_qat_fw_la_bulk_req *msg; | 1025 | struct icp_qat_fw_la_bulk_req *msg; |
1018 | int ret, ctr = 0; | 1026 | int ret, ctr = 0; |
1019 | 1027 | ||
1020 | ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, | 1028 | ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst, |
1021 | NULL, 0, qat_req); | 1029 | NULL, 0, qat_req); |
1022 | if (unlikely(ret)) | 1030 | if (unlikely(ret)) |
1023 | return ret; | 1031 | return ret; |
@@ -1055,7 +1063,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) | |||
1055 | struct icp_qat_fw_la_bulk_req *msg; | 1063 | struct icp_qat_fw_la_bulk_req *msg; |
1056 | int ret, ctr = 0; | 1064 | int ret, ctr = 0; |
1057 | 1065 | ||
1058 | ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, | 1066 | ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst, |
1059 | NULL, 0, qat_req); | 1067 | NULL, 0, qat_req); |
1060 | if (unlikely(ret)) | 1068 | if (unlikely(ret)) |
1061 | return ret; | 1069 | return ret; |
@@ -1094,8 +1102,9 @@ static int qat_alg_aead_init(struct crypto_tfm *tfm, | |||
1094 | return -EFAULT; | 1102 | return -EFAULT; |
1095 | spin_lock_init(&ctx->lock); | 1103 | spin_lock_init(&ctx->lock); |
1096 | ctx->qat_hash_alg = hash; | 1104 | ctx->qat_hash_alg = hash; |
1097 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + | 1105 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), |
1098 | sizeof(struct qat_crypto_request); | 1106 | sizeof(struct aead_request) + |
1107 | sizeof(struct qat_crypto_request)); | ||
1099 | ctx->tfm = tfm; | 1108 | ctx->tfm = tfm; |
1100 | return 0; | 1109 | return 0; |
1101 | } | 1110 | } |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 9decea2779c6..1bde45b7a3c5 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c | |||
@@ -300,6 +300,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
300 | if (ret) | 300 | if (ret) |
301 | goto out_err; | 301 | goto out_err; |
302 | 302 | ||
303 | pcie_set_readrq(pdev, 1024); | ||
304 | |||
303 | /* enable PCI device */ | 305 | /* enable PCI device */ |
304 | if (pci_enable_device(pdev)) { | 306 | if (pci_enable_device(pdev)) { |
305 | ret = -EFAULT; | 307 | ret = -EFAULT; |
@@ -417,5 +419,6 @@ module_exit(adfdrv_release); | |||
417 | 419 | ||
418 | MODULE_LICENSE("Dual BSD/GPL"); | 420 | MODULE_LICENSE("Dual BSD/GPL"); |
419 | MODULE_AUTHOR("Intel"); | 421 | MODULE_AUTHOR("Intel"); |
420 | MODULE_FIRMWARE("qat_895xcc.bin"); | 422 | MODULE_FIRMWARE(ADF_DH895XCC_FW); |
421 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | 423 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); |
424 | MODULE_VERSION(ADF_DRV_VERSION); | ||
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 6be377f6b9e7..397a500b3d8a 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
@@ -1578,8 +1578,12 @@ static int sahara_probe(struct platform_device *pdev) | |||
1578 | 1578 | ||
1579 | init_completion(&dev->dma_completion); | 1579 | init_completion(&dev->dma_completion); |
1580 | 1580 | ||
1581 | clk_prepare_enable(dev->clk_ipg); | 1581 | err = clk_prepare_enable(dev->clk_ipg); |
1582 | clk_prepare_enable(dev->clk_ahb); | 1582 | if (err) |
1583 | goto err_link; | ||
1584 | err = clk_prepare_enable(dev->clk_ahb); | ||
1585 | if (err) | ||
1586 | goto clk_ipg_disable; | ||
1583 | 1587 | ||
1584 | version = sahara_read(dev, SAHARA_REG_VERSION); | 1588 | version = sahara_read(dev, SAHARA_REG_VERSION); |
1585 | if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) { | 1589 | if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) { |
@@ -1619,10 +1623,11 @@ err_algs: | |||
1619 | dma_free_coherent(&pdev->dev, | 1623 | dma_free_coherent(&pdev->dev, |
1620 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), | 1624 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), |
1621 | dev->hw_link[0], dev->hw_phys_link[0]); | 1625 | dev->hw_link[0], dev->hw_phys_link[0]); |
1622 | clk_disable_unprepare(dev->clk_ipg); | ||
1623 | clk_disable_unprepare(dev->clk_ahb); | ||
1624 | kthread_stop(dev->kthread); | 1626 | kthread_stop(dev->kthread); |
1625 | dev_ptr = NULL; | 1627 | dev_ptr = NULL; |
1628 | clk_disable_unprepare(dev->clk_ahb); | ||
1629 | clk_ipg_disable: | ||
1630 | clk_disable_unprepare(dev->clk_ipg); | ||
1626 | err_link: | 1631 | err_link: |
1627 | dma_free_coherent(&pdev->dev, | 1632 | dma_free_coherent(&pdev->dev, |
1628 | 2 * AES_KEYSIZE_128, | 1633 | 2 * AES_KEYSIZE_128, |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 857414afa29a..83aca95a95bc 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #include <crypto/des.h> | 46 | #include <crypto/des.h> |
47 | #include <crypto/sha.h> | 47 | #include <crypto/sha.h> |
48 | #include <crypto/md5.h> | 48 | #include <crypto/md5.h> |
49 | #include <crypto/aead.h> | 49 | #include <crypto/internal/aead.h> |
50 | #include <crypto/authenc.h> | 50 | #include <crypto/authenc.h> |
51 | #include <crypto/skcipher.h> | 51 | #include <crypto/skcipher.h> |
52 | #include <crypto/hash.h> | 52 | #include <crypto/hash.h> |
@@ -55,49 +55,92 @@ | |||
55 | 55 | ||
56 | #include "talitos.h" | 56 | #include "talitos.h" |
57 | 57 | ||
58 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) | 58 | static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, |
59 | bool is_sec1) | ||
59 | { | 60 | { |
60 | talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); | 61 | ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); |
61 | talitos_ptr->eptr = upper_32_bits(dma_addr); | 62 | if (!is_sec1) |
63 | ptr->eptr = upper_32_bits(dma_addr); | ||
64 | } | ||
65 | |||
66 | static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, | ||
67 | bool is_sec1) | ||
68 | { | ||
69 | if (is_sec1) { | ||
70 | ptr->res = 0; | ||
71 | ptr->len1 = cpu_to_be16(len); | ||
72 | } else { | ||
73 | ptr->len = cpu_to_be16(len); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr, | ||
78 | bool is_sec1) | ||
79 | { | ||
80 | if (is_sec1) | ||
81 | return be16_to_cpu(ptr->len1); | ||
82 | else | ||
83 | return be16_to_cpu(ptr->len); | ||
84 | } | ||
85 | |||
86 | static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1) | ||
87 | { | ||
88 | if (!is_sec1) | ||
89 | ptr->j_extent = 0; | ||
62 | } | 90 | } |
63 | 91 | ||
64 | /* | 92 | /* |
65 | * map virtual single (contiguous) pointer to h/w descriptor pointer | 93 | * map virtual single (contiguous) pointer to h/w descriptor pointer |
66 | */ | 94 | */ |
67 | static void map_single_talitos_ptr(struct device *dev, | 95 | static void map_single_talitos_ptr(struct device *dev, |
68 | struct talitos_ptr *talitos_ptr, | 96 | struct talitos_ptr *ptr, |
69 | unsigned short len, void *data, | 97 | unsigned int len, void *data, |
70 | unsigned char extent, | ||
71 | enum dma_data_direction dir) | 98 | enum dma_data_direction dir) |
72 | { | 99 | { |
73 | dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); | 100 | dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); |
101 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
102 | bool is_sec1 = has_ftr_sec1(priv); | ||
74 | 103 | ||
75 | talitos_ptr->len = cpu_to_be16(len); | 104 | to_talitos_ptr_len(ptr, len, is_sec1); |
76 | to_talitos_ptr(talitos_ptr, dma_addr); | 105 | to_talitos_ptr(ptr, dma_addr, is_sec1); |
77 | talitos_ptr->j_extent = extent; | 106 | to_talitos_ptr_extent_clear(ptr, is_sec1); |
78 | } | 107 | } |
79 | 108 | ||
80 | /* | 109 | /* |
81 | * unmap bus single (contiguous) h/w descriptor pointer | 110 | * unmap bus single (contiguous) h/w descriptor pointer |
82 | */ | 111 | */ |
83 | static void unmap_single_talitos_ptr(struct device *dev, | 112 | static void unmap_single_talitos_ptr(struct device *dev, |
84 | struct talitos_ptr *talitos_ptr, | 113 | struct talitos_ptr *ptr, |
85 | enum dma_data_direction dir) | 114 | enum dma_data_direction dir) |
86 | { | 115 | { |
87 | dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr), | 116 | struct talitos_private *priv = dev_get_drvdata(dev); |
88 | be16_to_cpu(talitos_ptr->len), dir); | 117 | bool is_sec1 = has_ftr_sec1(priv); |
118 | |||
119 | dma_unmap_single(dev, be32_to_cpu(ptr->ptr), | ||
120 | from_talitos_ptr_len(ptr, is_sec1), dir); | ||
89 | } | 121 | } |
90 | 122 | ||
91 | static int reset_channel(struct device *dev, int ch) | 123 | static int reset_channel(struct device *dev, int ch) |
92 | { | 124 | { |
93 | struct talitos_private *priv = dev_get_drvdata(dev); | 125 | struct talitos_private *priv = dev_get_drvdata(dev); |
94 | unsigned int timeout = TALITOS_TIMEOUT; | 126 | unsigned int timeout = TALITOS_TIMEOUT; |
127 | bool is_sec1 = has_ftr_sec1(priv); | ||
95 | 128 | ||
96 | setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET); | 129 | if (is_sec1) { |
130 | setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, | ||
131 | TALITOS1_CCCR_LO_RESET); | ||
97 | 132 | ||
98 | while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET) | 133 | while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) & |
99 | && --timeout) | 134 | TALITOS1_CCCR_LO_RESET) && --timeout) |
100 | cpu_relax(); | 135 | cpu_relax(); |
136 | } else { | ||
137 | setbits32(priv->chan[ch].reg + TALITOS_CCCR, | ||
138 | TALITOS2_CCCR_RESET); | ||
139 | |||
140 | while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & | ||
141 | TALITOS2_CCCR_RESET) && --timeout) | ||
142 | cpu_relax(); | ||
143 | } | ||
101 | 144 | ||
102 | if (timeout == 0) { | 145 | if (timeout == 0) { |
103 | dev_err(dev, "failed to reset channel %d\n", ch); | 146 | dev_err(dev, "failed to reset channel %d\n", ch); |
@@ -120,11 +163,12 @@ static int reset_device(struct device *dev) | |||
120 | { | 163 | { |
121 | struct talitos_private *priv = dev_get_drvdata(dev); | 164 | struct talitos_private *priv = dev_get_drvdata(dev); |
122 | unsigned int timeout = TALITOS_TIMEOUT; | 165 | unsigned int timeout = TALITOS_TIMEOUT; |
123 | u32 mcr = TALITOS_MCR_SWR; | 166 | bool is_sec1 = has_ftr_sec1(priv); |
167 | u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR; | ||
124 | 168 | ||
125 | setbits32(priv->reg + TALITOS_MCR, mcr); | 169 | setbits32(priv->reg + TALITOS_MCR, mcr); |
126 | 170 | ||
127 | while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR) | 171 | while ((in_be32(priv->reg + TALITOS_MCR) & mcr) |
128 | && --timeout) | 172 | && --timeout) |
129 | cpu_relax(); | 173 | cpu_relax(); |
130 | 174 | ||
@@ -148,6 +192,7 @@ static int init_device(struct device *dev) | |||
148 | { | 192 | { |
149 | struct talitos_private *priv = dev_get_drvdata(dev); | 193 | struct talitos_private *priv = dev_get_drvdata(dev); |
150 | int ch, err; | 194 | int ch, err; |
195 | bool is_sec1 = has_ftr_sec1(priv); | ||
151 | 196 | ||
152 | /* | 197 | /* |
153 | * Master reset | 198 | * Master reset |
@@ -171,12 +216,19 @@ static int init_device(struct device *dev) | |||
171 | } | 216 | } |
172 | 217 | ||
173 | /* enable channel done and error interrupts */ | 218 | /* enable channel done and error interrupts */ |
174 | setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); | 219 | if (is_sec1) { |
175 | setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); | 220 | clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT); |
221 | clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); | ||
222 | /* disable parity error check in DEU (erroneous? test vect.) */ | ||
223 | setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE); | ||
224 | } else { | ||
225 | setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT); | ||
226 | setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); | ||
227 | } | ||
176 | 228 | ||
177 | /* disable integrity check error interrupts (use writeback instead) */ | 229 | /* disable integrity check error interrupts (use writeback instead) */ |
178 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) | 230 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) |
179 | setbits32(priv->reg + TALITOS_MDEUICR_LO, | 231 | setbits32(priv->reg_mdeu + TALITOS_EUICR_LO, |
180 | TALITOS_MDEUICR_LO_ICE); | 232 | TALITOS_MDEUICR_LO_ICE); |
181 | 233 | ||
182 | return 0; | 234 | return 0; |
@@ -204,6 +256,7 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | |||
204 | struct talitos_request *request; | 256 | struct talitos_request *request; |
205 | unsigned long flags; | 257 | unsigned long flags; |
206 | int head; | 258 | int head; |
259 | bool is_sec1 = has_ftr_sec1(priv); | ||
207 | 260 | ||
208 | spin_lock_irqsave(&priv->chan[ch].head_lock, flags); | 261 | spin_lock_irqsave(&priv->chan[ch].head_lock, flags); |
209 | 262 | ||
@@ -217,8 +270,17 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | |||
217 | request = &priv->chan[ch].fifo[head]; | 270 | request = &priv->chan[ch].fifo[head]; |
218 | 271 | ||
219 | /* map descriptor and save caller data */ | 272 | /* map descriptor and save caller data */ |
220 | request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), | 273 | if (is_sec1) { |
221 | DMA_BIDIRECTIONAL); | 274 | desc->hdr1 = desc->hdr; |
275 | desc->next_desc = 0; | ||
276 | request->dma_desc = dma_map_single(dev, &desc->hdr1, | ||
277 | TALITOS_DESC_SIZE, | ||
278 | DMA_BIDIRECTIONAL); | ||
279 | } else { | ||
280 | request->dma_desc = dma_map_single(dev, desc, | ||
281 | TALITOS_DESC_SIZE, | ||
282 | DMA_BIDIRECTIONAL); | ||
283 | } | ||
222 | request->callback = callback; | 284 | request->callback = callback; |
223 | request->context = context; | 285 | request->context = context; |
224 | 286 | ||
@@ -250,16 +312,21 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
250 | struct talitos_request *request, saved_req; | 312 | struct talitos_request *request, saved_req; |
251 | unsigned long flags; | 313 | unsigned long flags; |
252 | int tail, status; | 314 | int tail, status; |
315 | bool is_sec1 = has_ftr_sec1(priv); | ||
253 | 316 | ||
254 | spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); | 317 | spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); |
255 | 318 | ||
256 | tail = priv->chan[ch].tail; | 319 | tail = priv->chan[ch].tail; |
257 | while (priv->chan[ch].fifo[tail].desc) { | 320 | while (priv->chan[ch].fifo[tail].desc) { |
321 | __be32 hdr; | ||
322 | |||
258 | request = &priv->chan[ch].fifo[tail]; | 323 | request = &priv->chan[ch].fifo[tail]; |
259 | 324 | ||
260 | /* descriptors with their done bits set don't get the error */ | 325 | /* descriptors with their done bits set don't get the error */ |
261 | rmb(); | 326 | rmb(); |
262 | if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE) | 327 | hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr; |
328 | |||
329 | if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) | ||
263 | status = 0; | 330 | status = 0; |
264 | else | 331 | else |
265 | if (!error) | 332 | if (!error) |
@@ -268,7 +335,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
268 | status = error; | 335 | status = error; |
269 | 336 | ||
270 | dma_unmap_single(dev, request->dma_desc, | 337 | dma_unmap_single(dev, request->dma_desc, |
271 | sizeof(struct talitos_desc), | 338 | TALITOS_DESC_SIZE, |
272 | DMA_BIDIRECTIONAL); | 339 | DMA_BIDIRECTIONAL); |
273 | 340 | ||
274 | /* copy entries so we can call callback outside lock */ | 341 | /* copy entries so we can call callback outside lock */ |
@@ -302,8 +369,37 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
302 | /* | 369 | /* |
303 | * process completed requests for channels that have done status | 370 | * process completed requests for channels that have done status |
304 | */ | 371 | */ |
305 | #define DEF_TALITOS_DONE(name, ch_done_mask) \ | 372 | #define DEF_TALITOS1_DONE(name, ch_done_mask) \ |
306 | static void talitos_done_##name(unsigned long data) \ | 373 | static void talitos1_done_##name(unsigned long data) \ |
374 | { \ | ||
375 | struct device *dev = (struct device *)data; \ | ||
376 | struct talitos_private *priv = dev_get_drvdata(dev); \ | ||
377 | unsigned long flags; \ | ||
378 | \ | ||
379 | if (ch_done_mask & 0x10000000) \ | ||
380 | flush_channel(dev, 0, 0, 0); \ | ||
381 | if (priv->num_channels == 1) \ | ||
382 | goto out; \ | ||
383 | if (ch_done_mask & 0x40000000) \ | ||
384 | flush_channel(dev, 1, 0, 0); \ | ||
385 | if (ch_done_mask & 0x00010000) \ | ||
386 | flush_channel(dev, 2, 0, 0); \ | ||
387 | if (ch_done_mask & 0x00040000) \ | ||
388 | flush_channel(dev, 3, 0, 0); \ | ||
389 | \ | ||
390 | out: \ | ||
391 | /* At this point, all completed channels have been processed */ \ | ||
392 | /* Unmask done interrupts for channels completed later on. */ \ | ||
393 | spin_lock_irqsave(&priv->reg_lock, flags); \ | ||
394 | clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ | ||
395 | clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \ | ||
396 | spin_unlock_irqrestore(&priv->reg_lock, flags); \ | ||
397 | } | ||
398 | |||
399 | DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) | ||
400 | |||
401 | #define DEF_TALITOS2_DONE(name, ch_done_mask) \ | ||
402 | static void talitos2_done_##name(unsigned long data) \ | ||
307 | { \ | 403 | { \ |
308 | struct device *dev = (struct device *)data; \ | 404 | struct device *dev = (struct device *)data; \ |
309 | struct talitos_private *priv = dev_get_drvdata(dev); \ | 405 | struct talitos_private *priv = dev_get_drvdata(dev); \ |
@@ -325,12 +421,13 @@ out: \ | |||
325 | /* Unmask done interrupts for channels completed later on. */ \ | 421 | /* Unmask done interrupts for channels completed later on. */ \ |
326 | spin_lock_irqsave(&priv->reg_lock, flags); \ | 422 | spin_lock_irqsave(&priv->reg_lock, flags); \ |
327 | setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ | 423 | setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ |
328 | setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \ | 424 | setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \ |
329 | spin_unlock_irqrestore(&priv->reg_lock, flags); \ | 425 | spin_unlock_irqrestore(&priv->reg_lock, flags); \ |
330 | } | 426 | } |
331 | DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE) | 427 | |
332 | DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE) | 428 | DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) |
333 | DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE) | 429 | DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) |
430 | DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) | ||
334 | 431 | ||
335 | /* | 432 | /* |
336 | * locate current (offending) descriptor | 433 | * locate current (offending) descriptor |
@@ -377,44 +474,44 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) | |||
377 | switch (desc_hdr & DESC_HDR_SEL0_MASK) { | 474 | switch (desc_hdr & DESC_HDR_SEL0_MASK) { |
378 | case DESC_HDR_SEL0_AFEU: | 475 | case DESC_HDR_SEL0_AFEU: |
379 | dev_err(dev, "AFEUISR 0x%08x_%08x\n", | 476 | dev_err(dev, "AFEUISR 0x%08x_%08x\n", |
380 | in_be32(priv->reg + TALITOS_AFEUISR), | 477 | in_be32(priv->reg_afeu + TALITOS_EUISR), |
381 | in_be32(priv->reg + TALITOS_AFEUISR_LO)); | 478 | in_be32(priv->reg_afeu + TALITOS_EUISR_LO)); |
382 | break; | 479 | break; |
383 | case DESC_HDR_SEL0_DEU: | 480 | case DESC_HDR_SEL0_DEU: |
384 | dev_err(dev, "DEUISR 0x%08x_%08x\n", | 481 | dev_err(dev, "DEUISR 0x%08x_%08x\n", |
385 | in_be32(priv->reg + TALITOS_DEUISR), | 482 | in_be32(priv->reg_deu + TALITOS_EUISR), |
386 | in_be32(priv->reg + TALITOS_DEUISR_LO)); | 483 | in_be32(priv->reg_deu + TALITOS_EUISR_LO)); |
387 | break; | 484 | break; |
388 | case DESC_HDR_SEL0_MDEUA: | 485 | case DESC_HDR_SEL0_MDEUA: |
389 | case DESC_HDR_SEL0_MDEUB: | 486 | case DESC_HDR_SEL0_MDEUB: |
390 | dev_err(dev, "MDEUISR 0x%08x_%08x\n", | 487 | dev_err(dev, "MDEUISR 0x%08x_%08x\n", |
391 | in_be32(priv->reg + TALITOS_MDEUISR), | 488 | in_be32(priv->reg_mdeu + TALITOS_EUISR), |
392 | in_be32(priv->reg + TALITOS_MDEUISR_LO)); | 489 | in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); |
393 | break; | 490 | break; |
394 | case DESC_HDR_SEL0_RNG: | 491 | case DESC_HDR_SEL0_RNG: |
395 | dev_err(dev, "RNGUISR 0x%08x_%08x\n", | 492 | dev_err(dev, "RNGUISR 0x%08x_%08x\n", |
396 | in_be32(priv->reg + TALITOS_RNGUISR), | 493 | in_be32(priv->reg_rngu + TALITOS_ISR), |
397 | in_be32(priv->reg + TALITOS_RNGUISR_LO)); | 494 | in_be32(priv->reg_rngu + TALITOS_ISR_LO)); |
398 | break; | 495 | break; |
399 | case DESC_HDR_SEL0_PKEU: | 496 | case DESC_HDR_SEL0_PKEU: |
400 | dev_err(dev, "PKEUISR 0x%08x_%08x\n", | 497 | dev_err(dev, "PKEUISR 0x%08x_%08x\n", |
401 | in_be32(priv->reg + TALITOS_PKEUISR), | 498 | in_be32(priv->reg_pkeu + TALITOS_EUISR), |
402 | in_be32(priv->reg + TALITOS_PKEUISR_LO)); | 499 | in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); |
403 | break; | 500 | break; |
404 | case DESC_HDR_SEL0_AESU: | 501 | case DESC_HDR_SEL0_AESU: |
405 | dev_err(dev, "AESUISR 0x%08x_%08x\n", | 502 | dev_err(dev, "AESUISR 0x%08x_%08x\n", |
406 | in_be32(priv->reg + TALITOS_AESUISR), | 503 | in_be32(priv->reg_aesu + TALITOS_EUISR), |
407 | in_be32(priv->reg + TALITOS_AESUISR_LO)); | 504 | in_be32(priv->reg_aesu + TALITOS_EUISR_LO)); |
408 | break; | 505 | break; |
409 | case DESC_HDR_SEL0_CRCU: | 506 | case DESC_HDR_SEL0_CRCU: |
410 | dev_err(dev, "CRCUISR 0x%08x_%08x\n", | 507 | dev_err(dev, "CRCUISR 0x%08x_%08x\n", |
411 | in_be32(priv->reg + TALITOS_CRCUISR), | 508 | in_be32(priv->reg_crcu + TALITOS_EUISR), |
412 | in_be32(priv->reg + TALITOS_CRCUISR_LO)); | 509 | in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); |
413 | break; | 510 | break; |
414 | case DESC_HDR_SEL0_KEU: | 511 | case DESC_HDR_SEL0_KEU: |
415 | dev_err(dev, "KEUISR 0x%08x_%08x\n", | 512 | dev_err(dev, "KEUISR 0x%08x_%08x\n", |
416 | in_be32(priv->reg + TALITOS_KEUISR), | 513 | in_be32(priv->reg_pkeu + TALITOS_EUISR), |
417 | in_be32(priv->reg + TALITOS_KEUISR_LO)); | 514 | in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); |
418 | break; | 515 | break; |
419 | } | 516 | } |
420 | 517 | ||
@@ -422,13 +519,13 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) | |||
422 | case DESC_HDR_SEL1_MDEUA: | 519 | case DESC_HDR_SEL1_MDEUA: |
423 | case DESC_HDR_SEL1_MDEUB: | 520 | case DESC_HDR_SEL1_MDEUB: |
424 | dev_err(dev, "MDEUISR 0x%08x_%08x\n", | 521 | dev_err(dev, "MDEUISR 0x%08x_%08x\n", |
425 | in_be32(priv->reg + TALITOS_MDEUISR), | 522 | in_be32(priv->reg_mdeu + TALITOS_EUISR), |
426 | in_be32(priv->reg + TALITOS_MDEUISR_LO)); | 523 | in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); |
427 | break; | 524 | break; |
428 | case DESC_HDR_SEL1_CRCU: | 525 | case DESC_HDR_SEL1_CRCU: |
429 | dev_err(dev, "CRCUISR 0x%08x_%08x\n", | 526 | dev_err(dev, "CRCUISR 0x%08x_%08x\n", |
430 | in_be32(priv->reg + TALITOS_CRCUISR), | 527 | in_be32(priv->reg_crcu + TALITOS_EUISR), |
431 | in_be32(priv->reg + TALITOS_CRCUISR_LO)); | 528 | in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); |
432 | break; | 529 | break; |
433 | } | 530 | } |
434 | 531 | ||
@@ -445,17 +542,24 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) | |||
445 | { | 542 | { |
446 | struct talitos_private *priv = dev_get_drvdata(dev); | 543 | struct talitos_private *priv = dev_get_drvdata(dev); |
447 | unsigned int timeout = TALITOS_TIMEOUT; | 544 | unsigned int timeout = TALITOS_TIMEOUT; |
448 | int ch, error, reset_dev = 0, reset_ch = 0; | 545 | int ch, error, reset_dev = 0; |
449 | u32 v, v_lo; | 546 | u32 v_lo; |
547 | bool is_sec1 = has_ftr_sec1(priv); | ||
548 | int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */ | ||
450 | 549 | ||
451 | for (ch = 0; ch < priv->num_channels; ch++) { | 550 | for (ch = 0; ch < priv->num_channels; ch++) { |
452 | /* skip channels without errors */ | 551 | /* skip channels without errors */ |
453 | if (!(isr & (1 << (ch * 2 + 1)))) | 552 | if (is_sec1) { |
454 | continue; | 553 | /* bits 29, 31, 17, 19 */ |
554 | if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6)))) | ||
555 | continue; | ||
556 | } else { | ||
557 | if (!(isr & (1 << (ch * 2 + 1)))) | ||
558 | continue; | ||
559 | } | ||
455 | 560 | ||
456 | error = -EINVAL; | 561 | error = -EINVAL; |
457 | 562 | ||
458 | v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR); | ||
459 | v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO); | 563 | v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO); |
460 | 564 | ||
461 | if (v_lo & TALITOS_CCPSR_LO_DOF) { | 565 | if (v_lo & TALITOS_CCPSR_LO_DOF) { |
@@ -471,23 +575,28 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) | |||
471 | if (v_lo & TALITOS_CCPSR_LO_MDTE) | 575 | if (v_lo & TALITOS_CCPSR_LO_MDTE) |
472 | dev_err(dev, "master data transfer error\n"); | 576 | dev_err(dev, "master data transfer error\n"); |
473 | if (v_lo & TALITOS_CCPSR_LO_SGDLZ) | 577 | if (v_lo & TALITOS_CCPSR_LO_SGDLZ) |
474 | dev_err(dev, "s/g data length zero error\n"); | 578 | dev_err(dev, is_sec1 ? "pointeur not complete error\n" |
579 | : "s/g data length zero error\n"); | ||
475 | if (v_lo & TALITOS_CCPSR_LO_FPZ) | 580 | if (v_lo & TALITOS_CCPSR_LO_FPZ) |
476 | dev_err(dev, "fetch pointer zero error\n"); | 581 | dev_err(dev, is_sec1 ? "parity error\n" |
582 | : "fetch pointer zero error\n"); | ||
477 | if (v_lo & TALITOS_CCPSR_LO_IDH) | 583 | if (v_lo & TALITOS_CCPSR_LO_IDH) |
478 | dev_err(dev, "illegal descriptor header error\n"); | 584 | dev_err(dev, "illegal descriptor header error\n"); |
479 | if (v_lo & TALITOS_CCPSR_LO_IEU) | 585 | if (v_lo & TALITOS_CCPSR_LO_IEU) |
480 | dev_err(dev, "invalid execution unit error\n"); | 586 | dev_err(dev, is_sec1 ? "static assignment error\n" |
587 | : "invalid exec unit error\n"); | ||
481 | if (v_lo & TALITOS_CCPSR_LO_EU) | 588 | if (v_lo & TALITOS_CCPSR_LO_EU) |
482 | report_eu_error(dev, ch, current_desc_hdr(dev, ch)); | 589 | report_eu_error(dev, ch, current_desc_hdr(dev, ch)); |
483 | if (v_lo & TALITOS_CCPSR_LO_GB) | 590 | if (!is_sec1) { |
484 | dev_err(dev, "gather boundary error\n"); | 591 | if (v_lo & TALITOS_CCPSR_LO_GB) |
485 | if (v_lo & TALITOS_CCPSR_LO_GRL) | 592 | dev_err(dev, "gather boundary error\n"); |
486 | dev_err(dev, "gather return/length error\n"); | 593 | if (v_lo & TALITOS_CCPSR_LO_GRL) |
487 | if (v_lo & TALITOS_CCPSR_LO_SB) | 594 | dev_err(dev, "gather return/length error\n"); |
488 | dev_err(dev, "scatter boundary error\n"); | 595 | if (v_lo & TALITOS_CCPSR_LO_SB) |
489 | if (v_lo & TALITOS_CCPSR_LO_SRL) | 596 | dev_err(dev, "scatter boundary error\n"); |
490 | dev_err(dev, "scatter return/length error\n"); | 597 | if (v_lo & TALITOS_CCPSR_LO_SRL) |
598 | dev_err(dev, "scatter return/length error\n"); | ||
599 | } | ||
491 | 600 | ||
492 | flush_channel(dev, ch, error, reset_ch); | 601 | flush_channel(dev, ch, error, reset_ch); |
493 | 602 | ||
@@ -495,10 +604,10 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) | |||
495 | reset_channel(dev, ch); | 604 | reset_channel(dev, ch); |
496 | } else { | 605 | } else { |
497 | setbits32(priv->chan[ch].reg + TALITOS_CCCR, | 606 | setbits32(priv->chan[ch].reg + TALITOS_CCCR, |
498 | TALITOS_CCCR_CONT); | 607 | TALITOS2_CCCR_CONT); |
499 | setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); | 608 | setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); |
500 | while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & | 609 | while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & |
501 | TALITOS_CCCR_CONT) && --timeout) | 610 | TALITOS2_CCCR_CONT) && --timeout) |
502 | cpu_relax(); | 611 | cpu_relax(); |
503 | if (timeout == 0) { | 612 | if (timeout == 0) { |
504 | dev_err(dev, "failed to restart channel %d\n", | 613 | dev_err(dev, "failed to restart channel %d\n", |
@@ -507,9 +616,14 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) | |||
507 | } | 616 | } |
508 | } | 617 | } |
509 | } | 618 | } |
510 | if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) { | 619 | if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) || |
511 | dev_err(dev, "done overflow, internal time out, or rngu error: " | 620 | (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) { |
512 | "ISR 0x%08x_%08x\n", isr, isr_lo); | 621 | if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR)) |
622 | dev_err(dev, "TEA error: ISR 0x%08x_%08x\n", | ||
623 | isr, isr_lo); | ||
624 | else | ||
625 | dev_err(dev, "done overflow, internal time out, or " | ||
626 | "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo); | ||
513 | 627 | ||
514 | /* purge request queues */ | 628 | /* purge request queues */ |
515 | for (ch = 0; ch < priv->num_channels; ch++) | 629 | for (ch = 0; ch < priv->num_channels; ch++) |
@@ -520,8 +634,43 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) | |||
520 | } | 634 | } |
521 | } | 635 | } |
522 | 636 | ||
523 | #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ | 637 | #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ |
524 | static irqreturn_t talitos_interrupt_##name(int irq, void *data) \ | 638 | static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \ |
639 | { \ | ||
640 | struct device *dev = data; \ | ||
641 | struct talitos_private *priv = dev_get_drvdata(dev); \ | ||
642 | u32 isr, isr_lo; \ | ||
643 | unsigned long flags; \ | ||
644 | \ | ||
645 | spin_lock_irqsave(&priv->reg_lock, flags); \ | ||
646 | isr = in_be32(priv->reg + TALITOS_ISR); \ | ||
647 | isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ | ||
648 | /* Acknowledge interrupt */ \ | ||
649 | out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ | ||
650 | out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ | ||
651 | \ | ||
652 | if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \ | ||
653 | spin_unlock_irqrestore(&priv->reg_lock, flags); \ | ||
654 | talitos_error(dev, isr & ch_err_mask, isr_lo); \ | ||
655 | } \ | ||
656 | else { \ | ||
657 | if (likely(isr & ch_done_mask)) { \ | ||
658 | /* mask further done interrupts. */ \ | ||
659 | setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ | ||
660 | /* done_task will unmask done interrupts at exit */ \ | ||
661 | tasklet_schedule(&priv->done_task[tlet]); \ | ||
662 | } \ | ||
663 | spin_unlock_irqrestore(&priv->reg_lock, flags); \ | ||
664 | } \ | ||
665 | \ | ||
666 | return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ | ||
667 | IRQ_NONE; \ | ||
668 | } | ||
669 | |||
670 | DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0) | ||
671 | |||
672 | #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ | ||
673 | static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \ | ||
525 | { \ | 674 | { \ |
526 | struct device *dev = data; \ | 675 | struct device *dev = data; \ |
527 | struct talitos_private *priv = dev_get_drvdata(dev); \ | 676 | struct talitos_private *priv = dev_get_drvdata(dev); \ |
@@ -552,9 +701,12 @@ static irqreturn_t talitos_interrupt_##name(int irq, void *data) \ | |||
552 | return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ | 701 | return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ |
553 | IRQ_NONE; \ | 702 | IRQ_NONE; \ |
554 | } | 703 | } |
555 | DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0) | 704 | |
556 | DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0) | 705 | DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0) |
557 | DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1) | 706 | DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR, |
707 | 0) | ||
708 | DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR, | ||
709 | 1) | ||
558 | 710 | ||
559 | /* | 711 | /* |
560 | * hwrng | 712 | * hwrng |
@@ -567,7 +719,7 @@ static int talitos_rng_data_present(struct hwrng *rng, int wait) | |||
567 | int i; | 719 | int i; |
568 | 720 | ||
569 | for (i = 0; i < 20; i++) { | 721 | for (i = 0; i < 20; i++) { |
570 | ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) & | 722 | ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) & |
571 | TALITOS_RNGUSR_LO_OFL; | 723 | TALITOS_RNGUSR_LO_OFL; |
572 | if (ofl || !wait) | 724 | if (ofl || !wait) |
573 | break; | 725 | break; |
@@ -583,8 +735,8 @@ static int talitos_rng_data_read(struct hwrng *rng, u32 *data) | |||
583 | struct talitos_private *priv = dev_get_drvdata(dev); | 735 | struct talitos_private *priv = dev_get_drvdata(dev); |
584 | 736 | ||
585 | /* rng fifo requires 64-bit accesses */ | 737 | /* rng fifo requires 64-bit accesses */ |
586 | *data = in_be32(priv->reg + TALITOS_RNGU_FIFO); | 738 | *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO); |
587 | *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO); | 739 | *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO); |
588 | 740 | ||
589 | return sizeof(u32); | 741 | return sizeof(u32); |
590 | } | 742 | } |
@@ -595,8 +747,9 @@ static int talitos_rng_init(struct hwrng *rng) | |||
595 | struct talitos_private *priv = dev_get_drvdata(dev); | 747 | struct talitos_private *priv = dev_get_drvdata(dev); |
596 | unsigned int timeout = TALITOS_TIMEOUT; | 748 | unsigned int timeout = TALITOS_TIMEOUT; |
597 | 749 | ||
598 | setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR); | 750 | setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR); |
599 | while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD) | 751 | while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO) |
752 | & TALITOS_RNGUSR_LO_RD) | ||
600 | && --timeout) | 753 | && --timeout) |
601 | cpu_relax(); | 754 | cpu_relax(); |
602 | if (timeout == 0) { | 755 | if (timeout == 0) { |
@@ -605,7 +758,7 @@ static int talitos_rng_init(struct hwrng *rng) | |||
605 | } | 758 | } |
606 | 759 | ||
607 | /* start generating */ | 760 | /* start generating */ |
608 | setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0); | 761 | setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0); |
609 | 762 | ||
610 | return 0; | 763 | return 0; |
611 | } | 764 | } |
@@ -661,7 +814,7 @@ struct talitos_ahash_req_ctx { | |||
661 | unsigned int first; | 814 | unsigned int first; |
662 | unsigned int last; | 815 | unsigned int last; |
663 | unsigned int to_hash_later; | 816 | unsigned int to_hash_later; |
664 | u64 nbuf; | 817 | unsigned int nbuf; |
665 | struct scatterlist bufsl[2]; | 818 | struct scatterlist bufsl[2]; |
666 | struct scatterlist *psrc; | 819 | struct scatterlist *psrc; |
667 | }; | 820 | }; |
@@ -712,9 +865,10 @@ badkey: | |||
712 | * @dst_chained: whether dst is chained or not | 865 | * @dst_chained: whether dst is chained or not |
713 | * @iv_dma: dma address of iv for checking continuity and link table | 866 | * @iv_dma: dma address of iv for checking continuity and link table |
714 | * @dma_len: length of dma mapped link_tbl space | 867 | * @dma_len: length of dma mapped link_tbl space |
715 | * @dma_link_tbl: bus physical address of link_tbl | 868 | * @dma_link_tbl: bus physical address of link_tbl/buf |
716 | * @desc: h/w descriptor | 869 | * @desc: h/w descriptor |
717 | * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) | 870 | * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) |
871 | * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) | ||
718 | * | 872 | * |
719 | * if decrypting (with authcheck), or either one of src_nents or dst_nents | 873 | * if decrypting (with authcheck), or either one of src_nents or dst_nents |
720 | * is greater than 1, an integrity check value is concatenated to the end | 874 | * is greater than 1, an integrity check value is concatenated to the end |
@@ -731,7 +885,10 @@ struct talitos_edesc { | |||
731 | int dma_len; | 885 | int dma_len; |
732 | dma_addr_t dma_link_tbl; | 886 | dma_addr_t dma_link_tbl; |
733 | struct talitos_desc desc; | 887 | struct talitos_desc desc; |
734 | struct talitos_ptr link_tbl[0]; | 888 | union { |
889 | struct talitos_ptr link_tbl[0]; | ||
890 | u8 buf[0]; | ||
891 | }; | ||
735 | }; | 892 | }; |
736 | 893 | ||
737 | static int talitos_map_sg(struct device *dev, struct scatterlist *sg, | 894 | static int talitos_map_sg(struct device *dev, struct scatterlist *sg, |
@@ -907,8 +1064,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
907 | { | 1064 | { |
908 | int n_sg = sg_count; | 1065 | int n_sg = sg_count; |
909 | 1066 | ||
910 | while (n_sg--) { | 1067 | while (sg && n_sg--) { |
911 | to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg)); | 1068 | to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0); |
912 | link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); | 1069 | link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); |
913 | link_tbl_ptr->j_extent = 0; | 1070 | link_tbl_ptr->j_extent = 0; |
914 | link_tbl_ptr++; | 1071 | link_tbl_ptr++; |
@@ -925,7 +1082,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
925 | sg_count--; | 1082 | sg_count--; |
926 | link_tbl_ptr--; | 1083 | link_tbl_ptr--; |
927 | } | 1084 | } |
928 | be16_add_cpu(&link_tbl_ptr->len, cryptlen); | 1085 | link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len) |
1086 | + cryptlen); | ||
929 | 1087 | ||
930 | /* tag end of link table */ | 1088 | /* tag end of link table */ |
931 | link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; | 1089 | link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; |
@@ -953,7 +1111,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
953 | 1111 | ||
954 | /* hmac key */ | 1112 | /* hmac key */ |
955 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, | 1113 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, |
956 | 0, DMA_TO_DEVICE); | 1114 | DMA_TO_DEVICE); |
957 | 1115 | ||
958 | /* hmac data */ | 1116 | /* hmac data */ |
959 | desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize); | 1117 | desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize); |
@@ -962,7 +1120,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
962 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; | 1120 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; |
963 | 1121 | ||
964 | to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * | 1122 | to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * |
965 | sizeof(struct talitos_ptr)); | 1123 | sizeof(struct talitos_ptr), 0); |
966 | desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; | 1124 | desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; |
967 | 1125 | ||
968 | /* assoc_nents - 1 entries for assoc, 1 for IV */ | 1126 | /* assoc_nents - 1 entries for assoc, 1 for IV */ |
@@ -973,7 +1131,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
973 | tbl_ptr += sg_count - 1; | 1131 | tbl_ptr += sg_count - 1; |
974 | tbl_ptr->j_extent = 0; | 1132 | tbl_ptr->j_extent = 0; |
975 | tbl_ptr++; | 1133 | tbl_ptr++; |
976 | to_talitos_ptr(tbl_ptr, edesc->iv_dma); | 1134 | to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0); |
977 | tbl_ptr->len = cpu_to_be16(ivsize); | 1135 | tbl_ptr->len = cpu_to_be16(ivsize); |
978 | tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; | 1136 | tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; |
979 | 1137 | ||
@@ -982,14 +1140,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
982 | } else { | 1140 | } else { |
983 | if (areq->assoclen) | 1141 | if (areq->assoclen) |
984 | to_talitos_ptr(&desc->ptr[1], | 1142 | to_talitos_ptr(&desc->ptr[1], |
985 | sg_dma_address(areq->assoc)); | 1143 | sg_dma_address(areq->assoc), 0); |
986 | else | 1144 | else |
987 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); | 1145 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0); |
988 | desc->ptr[1].j_extent = 0; | 1146 | desc->ptr[1].j_extent = 0; |
989 | } | 1147 | } |
990 | 1148 | ||
991 | /* cipher iv */ | 1149 | /* cipher iv */ |
992 | to_talitos_ptr(&desc->ptr[2], edesc->iv_dma); | 1150 | to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); |
993 | desc->ptr[2].len = cpu_to_be16(ivsize); | 1151 | desc->ptr[2].len = cpu_to_be16(ivsize); |
994 | desc->ptr[2].j_extent = 0; | 1152 | desc->ptr[2].j_extent = 0; |
995 | /* Sync needed for the aead_givencrypt case */ | 1153 | /* Sync needed for the aead_givencrypt case */ |
@@ -997,7 +1155,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
997 | 1155 | ||
998 | /* cipher key */ | 1156 | /* cipher key */ |
999 | map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, | 1157 | map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, |
1000 | (char *)&ctx->key + ctx->authkeylen, 0, | 1158 | (char *)&ctx->key + ctx->authkeylen, |
1001 | DMA_TO_DEVICE); | 1159 | DMA_TO_DEVICE); |
1002 | 1160 | ||
1003 | /* | 1161 | /* |
@@ -1015,7 +1173,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1015 | edesc->src_chained); | 1173 | edesc->src_chained); |
1016 | 1174 | ||
1017 | if (sg_count == 1) { | 1175 | if (sg_count == 1) { |
1018 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); | 1176 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); |
1019 | } else { | 1177 | } else { |
1020 | sg_link_tbl_len = cryptlen; | 1178 | sg_link_tbl_len = cryptlen; |
1021 | 1179 | ||
@@ -1026,14 +1184,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1026 | &edesc->link_tbl[0]); | 1184 | &edesc->link_tbl[0]); |
1027 | if (sg_count > 1) { | 1185 | if (sg_count > 1) { |
1028 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1186 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1029 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl); | 1187 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0); |
1030 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1188 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
1031 | edesc->dma_len, | 1189 | edesc->dma_len, |
1032 | DMA_BIDIRECTIONAL); | 1190 | DMA_BIDIRECTIONAL); |
1033 | } else { | 1191 | } else { |
1034 | /* Only one segment now, so no link tbl needed */ | 1192 | /* Only one segment now, so no link tbl needed */ |
1035 | to_talitos_ptr(&desc->ptr[4], | 1193 | to_talitos_ptr(&desc->ptr[4], |
1036 | sg_dma_address(areq->src)); | 1194 | sg_dma_address(areq->src), 0); |
1037 | } | 1195 | } |
1038 | } | 1196 | } |
1039 | 1197 | ||
@@ -1047,13 +1205,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1047 | DMA_FROM_DEVICE, edesc->dst_chained); | 1205 | DMA_FROM_DEVICE, edesc->dst_chained); |
1048 | 1206 | ||
1049 | if (sg_count == 1) { | 1207 | if (sg_count == 1) { |
1050 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); | 1208 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); |
1051 | } else { | 1209 | } else { |
1052 | int tbl_off = edesc->src_nents + 1; | 1210 | int tbl_off = edesc->src_nents + 1; |
1053 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; | 1211 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; |
1054 | 1212 | ||
1055 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + | 1213 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + |
1056 | tbl_off * sizeof(struct talitos_ptr)); | 1214 | tbl_off * sizeof(struct talitos_ptr), 0); |
1057 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | 1215 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, |
1058 | tbl_ptr); | 1216 | tbl_ptr); |
1059 | 1217 | ||
@@ -1068,14 +1226,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1068 | to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + | 1226 | to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + |
1069 | (tbl_off + edesc->dst_nents + 1 + | 1227 | (tbl_off + edesc->dst_nents + 1 + |
1070 | edesc->assoc_nents) * | 1228 | edesc->assoc_nents) * |
1071 | sizeof(struct talitos_ptr)); | 1229 | sizeof(struct talitos_ptr), 0); |
1072 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1230 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1073 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1231 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
1074 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1232 | edesc->dma_len, DMA_BIDIRECTIONAL); |
1075 | } | 1233 | } |
1076 | 1234 | ||
1077 | /* iv out */ | 1235 | /* iv out */ |
1078 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, | 1236 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, |
1079 | DMA_FROM_DEVICE); | 1237 | DMA_FROM_DEVICE); |
1080 | 1238 | ||
1081 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); | 1239 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); |
@@ -1095,7 +1253,7 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) | |||
1095 | int sg_nents = 0; | 1253 | int sg_nents = 0; |
1096 | 1254 | ||
1097 | *chained = false; | 1255 | *chained = false; |
1098 | while (nbytes > 0) { | 1256 | while (nbytes > 0 && sg) { |
1099 | sg_nents++; | 1257 | sg_nents++; |
1100 | nbytes -= sg->length; | 1258 | nbytes -= sg->length; |
1101 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | 1259 | if (!sg_is_last(sg) && (sg + 1)->length == 0) |
@@ -1128,8 +1286,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1128 | dma_addr_t iv_dma = 0; | 1286 | dma_addr_t iv_dma = 0; |
1129 | gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1287 | gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1130 | GFP_ATOMIC; | 1288 | GFP_ATOMIC; |
1289 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1290 | bool is_sec1 = has_ftr_sec1(priv); | ||
1291 | int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; | ||
1131 | 1292 | ||
1132 | if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { | 1293 | if (cryptlen + authsize > max_len) { |
1133 | dev_err(dev, "length exceeds h/w max limit\n"); | 1294 | dev_err(dev, "length exceeds h/w max limit\n"); |
1134 | return ERR_PTR(-EINVAL); | 1295 | return ERR_PTR(-EINVAL); |
1135 | } | 1296 | } |
@@ -1173,8 +1334,12 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1173 | */ | 1334 | */ |
1174 | alloc_len = sizeof(struct talitos_edesc); | 1335 | alloc_len = sizeof(struct talitos_edesc); |
1175 | if (assoc_nents || src_nents || dst_nents) { | 1336 | if (assoc_nents || src_nents || dst_nents) { |
1176 | dma_len = (src_nents + dst_nents + 2 + assoc_nents) * | 1337 | if (is_sec1) |
1177 | sizeof(struct talitos_ptr) + authsize; | 1338 | dma_len = (src_nents ? cryptlen : 0) + |
1339 | (dst_nents ? cryptlen : 0); | ||
1340 | else | ||
1341 | dma_len = (src_nents + dst_nents + 2 + assoc_nents) * | ||
1342 | sizeof(struct talitos_ptr) + authsize; | ||
1178 | alloc_len += dma_len; | 1343 | alloc_len += dma_len; |
1179 | } else { | 1344 | } else { |
1180 | dma_len = 0; | 1345 | dma_len = 0; |
@@ -1327,16 +1492,43 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |||
1327 | return 0; | 1492 | return 0; |
1328 | } | 1493 | } |
1329 | 1494 | ||
1495 | static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src, | ||
1496 | struct scatterlist *dst, unsigned int len, | ||
1497 | struct talitos_edesc *edesc) | ||
1498 | { | ||
1499 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1500 | bool is_sec1 = has_ftr_sec1(priv); | ||
1501 | |||
1502 | if (is_sec1) { | ||
1503 | if (!edesc->src_nents) { | ||
1504 | dma_unmap_sg(dev, src, 1, | ||
1505 | dst != src ? DMA_TO_DEVICE | ||
1506 | : DMA_BIDIRECTIONAL); | ||
1507 | } | ||
1508 | if (dst && edesc->dst_nents) { | ||
1509 | dma_sync_single_for_device(dev, | ||
1510 | edesc->dma_link_tbl + len, | ||
1511 | len, DMA_FROM_DEVICE); | ||
1512 | sg_copy_from_buffer(dst, edesc->dst_nents ? : 1, | ||
1513 | edesc->buf + len, len); | ||
1514 | } else if (dst && dst != src) { | ||
1515 | dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE); | ||
1516 | } | ||
1517 | } else { | ||
1518 | talitos_sg_unmap(dev, edesc, src, dst); | ||
1519 | } | ||
1520 | } | ||
1521 | |||
1330 | static void common_nonsnoop_unmap(struct device *dev, | 1522 | static void common_nonsnoop_unmap(struct device *dev, |
1331 | struct talitos_edesc *edesc, | 1523 | struct talitos_edesc *edesc, |
1332 | struct ablkcipher_request *areq) | 1524 | struct ablkcipher_request *areq) |
1333 | { | 1525 | { |
1334 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); | 1526 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); |
1527 | |||
1528 | unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc); | ||
1335 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); | 1529 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); |
1336 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); | 1530 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); |
1337 | 1531 | ||
1338 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst); | ||
1339 | |||
1340 | if (edesc->dma_len) | 1532 | if (edesc->dma_len) |
1341 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | 1533 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, |
1342 | DMA_BIDIRECTIONAL); | 1534 | DMA_BIDIRECTIONAL); |
@@ -1358,6 +1550,102 @@ static void ablkcipher_done(struct device *dev, | |||
1358 | areq->base.complete(&areq->base, err); | 1550 | areq->base.complete(&areq->base, err); |
1359 | } | 1551 | } |
1360 | 1552 | ||
1553 | int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, | ||
1554 | unsigned int len, struct talitos_edesc *edesc, | ||
1555 | enum dma_data_direction dir, struct talitos_ptr *ptr) | ||
1556 | { | ||
1557 | int sg_count; | ||
1558 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1559 | bool is_sec1 = has_ftr_sec1(priv); | ||
1560 | |||
1561 | to_talitos_ptr_len(ptr, len, is_sec1); | ||
1562 | |||
1563 | if (is_sec1) { | ||
1564 | sg_count = edesc->src_nents ? : 1; | ||
1565 | |||
1566 | if (sg_count == 1) { | ||
1567 | dma_map_sg(dev, src, 1, dir); | ||
1568 | to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); | ||
1569 | } else { | ||
1570 | sg_copy_to_buffer(src, sg_count, edesc->buf, len); | ||
1571 | to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1); | ||
1572 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | ||
1573 | len, DMA_TO_DEVICE); | ||
1574 | } | ||
1575 | } else { | ||
1576 | to_talitos_ptr_extent_clear(ptr, is_sec1); | ||
1577 | |||
1578 | sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, | ||
1579 | edesc->src_chained); | ||
1580 | |||
1581 | if (sg_count == 1) { | ||
1582 | to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); | ||
1583 | } else { | ||
1584 | sg_count = sg_to_link_tbl(src, sg_count, len, | ||
1585 | &edesc->link_tbl[0]); | ||
1586 | if (sg_count > 1) { | ||
1587 | to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); | ||
1588 | ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1589 | dma_sync_single_for_device(dev, | ||
1590 | edesc->dma_link_tbl, | ||
1591 | edesc->dma_len, | ||
1592 | DMA_BIDIRECTIONAL); | ||
1593 | } else { | ||
1594 | /* Only one segment now, so no link tbl needed*/ | ||
1595 | to_talitos_ptr(ptr, sg_dma_address(src), | ||
1596 | is_sec1); | ||
1597 | } | ||
1598 | } | ||
1599 | } | ||
1600 | return sg_count; | ||
1601 | } | ||
1602 | |||
1603 | void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, | ||
1604 | unsigned int len, struct talitos_edesc *edesc, | ||
1605 | enum dma_data_direction dir, | ||
1606 | struct talitos_ptr *ptr, int sg_count) | ||
1607 | { | ||
1608 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1609 | bool is_sec1 = has_ftr_sec1(priv); | ||
1610 | |||
1611 | if (dir != DMA_NONE) | ||
1612 | sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, | ||
1613 | dir, edesc->dst_chained); | ||
1614 | |||
1615 | to_talitos_ptr_len(ptr, len, is_sec1); | ||
1616 | |||
1617 | if (is_sec1) { | ||
1618 | if (sg_count == 1) { | ||
1619 | if (dir != DMA_NONE) | ||
1620 | dma_map_sg(dev, dst, 1, dir); | ||
1621 | to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); | ||
1622 | } else { | ||
1623 | to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1); | ||
1624 | dma_sync_single_for_device(dev, | ||
1625 | edesc->dma_link_tbl + len, | ||
1626 | len, DMA_FROM_DEVICE); | ||
1627 | } | ||
1628 | } else { | ||
1629 | to_talitos_ptr_extent_clear(ptr, is_sec1); | ||
1630 | |||
1631 | if (sg_count == 1) { | ||
1632 | to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); | ||
1633 | } else { | ||
1634 | struct talitos_ptr *link_tbl_ptr = | ||
1635 | &edesc->link_tbl[edesc->src_nents + 1]; | ||
1636 | |||
1637 | to_talitos_ptr(ptr, edesc->dma_link_tbl + | ||
1638 | (edesc->src_nents + 1) * | ||
1639 | sizeof(struct talitos_ptr), 0); | ||
1640 | ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1641 | sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); | ||
1642 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | ||
1643 | edesc->dma_len, | ||
1644 | DMA_BIDIRECTIONAL); | ||
1645 | } | ||
1646 | } | ||
1647 | } | ||
1648 | |||
1361 | static int common_nonsnoop(struct talitos_edesc *edesc, | 1649 | static int common_nonsnoop(struct talitos_edesc *edesc, |
1362 | struct ablkcipher_request *areq, | 1650 | struct ablkcipher_request *areq, |
1363 | void (*callback) (struct device *dev, | 1651 | void (*callback) (struct device *dev, |
@@ -1371,83 +1659,41 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1371 | unsigned int cryptlen = areq->nbytes; | 1659 | unsigned int cryptlen = areq->nbytes; |
1372 | unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); | 1660 | unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); |
1373 | int sg_count, ret; | 1661 | int sg_count, ret; |
1662 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1663 | bool is_sec1 = has_ftr_sec1(priv); | ||
1374 | 1664 | ||
1375 | /* first DWORD empty */ | 1665 | /* first DWORD empty */ |
1376 | desc->ptr[0].len = 0; | 1666 | desc->ptr[0] = zero_entry; |
1377 | to_talitos_ptr(&desc->ptr[0], 0); | ||
1378 | desc->ptr[0].j_extent = 0; | ||
1379 | 1667 | ||
1380 | /* cipher iv */ | 1668 | /* cipher iv */ |
1381 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); | 1669 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); |
1382 | desc->ptr[1].len = cpu_to_be16(ivsize); | 1670 | to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); |
1383 | desc->ptr[1].j_extent = 0; | 1671 | to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1); |
1384 | 1672 | ||
1385 | /* cipher key */ | 1673 | /* cipher key */ |
1386 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, | 1674 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, |
1387 | (char *)&ctx->key, 0, DMA_TO_DEVICE); | 1675 | (char *)&ctx->key, DMA_TO_DEVICE); |
1388 | 1676 | ||
1389 | /* | 1677 | /* |
1390 | * cipher in | 1678 | * cipher in |
1391 | */ | 1679 | */ |
1392 | desc->ptr[3].len = cpu_to_be16(cryptlen); | 1680 | sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc, |
1393 | desc->ptr[3].j_extent = 0; | 1681 | (areq->src == areq->dst) ? |
1394 | 1682 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE, | |
1395 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, | 1683 | &desc->ptr[3]); |
1396 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL | ||
1397 | : DMA_TO_DEVICE, | ||
1398 | edesc->src_chained); | ||
1399 | |||
1400 | if (sg_count == 1) { | ||
1401 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); | ||
1402 | } else { | ||
1403 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, | ||
1404 | &edesc->link_tbl[0]); | ||
1405 | if (sg_count > 1) { | ||
1406 | to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); | ||
1407 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1408 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | ||
1409 | edesc->dma_len, | ||
1410 | DMA_BIDIRECTIONAL); | ||
1411 | } else { | ||
1412 | /* Only one segment now, so no link tbl needed */ | ||
1413 | to_talitos_ptr(&desc->ptr[3], | ||
1414 | sg_dma_address(areq->src)); | ||
1415 | } | ||
1416 | } | ||
1417 | 1684 | ||
1418 | /* cipher out */ | 1685 | /* cipher out */ |
1419 | desc->ptr[4].len = cpu_to_be16(cryptlen); | 1686 | map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc, |
1420 | desc->ptr[4].j_extent = 0; | 1687 | (areq->src == areq->dst) ? DMA_NONE |
1421 | 1688 | : DMA_FROM_DEVICE, | |
1422 | if (areq->src != areq->dst) | 1689 | &desc->ptr[4], sg_count); |
1423 | sg_count = talitos_map_sg(dev, areq->dst, | ||
1424 | edesc->dst_nents ? : 1, | ||
1425 | DMA_FROM_DEVICE, edesc->dst_chained); | ||
1426 | |||
1427 | if (sg_count == 1) { | ||
1428 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); | ||
1429 | } else { | ||
1430 | struct talitos_ptr *link_tbl_ptr = | ||
1431 | &edesc->link_tbl[edesc->src_nents + 1]; | ||
1432 | |||
1433 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + | ||
1434 | (edesc->src_nents + 1) * | ||
1435 | sizeof(struct talitos_ptr)); | ||
1436 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1437 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | ||
1438 | link_tbl_ptr); | ||
1439 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | ||
1440 | edesc->dma_len, DMA_BIDIRECTIONAL); | ||
1441 | } | ||
1442 | 1690 | ||
1443 | /* iv out */ | 1691 | /* iv out */ |
1444 | map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, | 1692 | map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, |
1445 | DMA_FROM_DEVICE); | 1693 | DMA_FROM_DEVICE); |
1446 | 1694 | ||
1447 | /* last DWORD empty */ | 1695 | /* last DWORD empty */ |
1448 | desc->ptr[6].len = 0; | 1696 | desc->ptr[6] = zero_entry; |
1449 | to_talitos_ptr(&desc->ptr[6], 0); | ||
1450 | desc->ptr[6].j_extent = 0; | ||
1451 | 1697 | ||
1452 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); | 1698 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); |
1453 | if (ret != -EINPROGRESS) { | 1699 | if (ret != -EINPROGRESS) { |
@@ -1507,20 +1753,22 @@ static void common_nonsnoop_hash_unmap(struct device *dev, | |||
1507 | struct ahash_request *areq) | 1753 | struct ahash_request *areq) |
1508 | { | 1754 | { |
1509 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1755 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1756 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1757 | bool is_sec1 = has_ftr_sec1(priv); | ||
1510 | 1758 | ||
1511 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); | 1759 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); |
1512 | 1760 | ||
1761 | unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); | ||
1762 | |||
1513 | /* When using hashctx-in, must unmap it. */ | 1763 | /* When using hashctx-in, must unmap it. */ |
1514 | if (edesc->desc.ptr[1].len) | 1764 | if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) |
1515 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], | 1765 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], |
1516 | DMA_TO_DEVICE); | 1766 | DMA_TO_DEVICE); |
1517 | 1767 | ||
1518 | if (edesc->desc.ptr[2].len) | 1768 | if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1)) |
1519 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], | 1769 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], |
1520 | DMA_TO_DEVICE); | 1770 | DMA_TO_DEVICE); |
1521 | 1771 | ||
1522 | talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL); | ||
1523 | |||
1524 | if (edesc->dma_len) | 1772 | if (edesc->dma_len) |
1525 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | 1773 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, |
1526 | DMA_BIDIRECTIONAL); | 1774 | DMA_BIDIRECTIONAL); |
@@ -1548,6 +1796,27 @@ static void ahash_done(struct device *dev, | |||
1548 | areq->base.complete(&areq->base, err); | 1796 | areq->base.complete(&areq->base, err); |
1549 | } | 1797 | } |
1550 | 1798 | ||
1799 | /* | ||
1800 | * SEC1 doesn't like hashing of 0 sized message, so we do the padding | ||
1801 | * ourself and submit a padded block | ||
1802 | */ | ||
1803 | void talitos_handle_buggy_hash(struct talitos_ctx *ctx, | ||
1804 | struct talitos_edesc *edesc, | ||
1805 | struct talitos_ptr *ptr) | ||
1806 | { | ||
1807 | static u8 padded_hash[64] = { | ||
1808 | 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
1809 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
1810 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
1811 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
1812 | }; | ||
1813 | |||
1814 | pr_err_once("Bug in SEC1, padding ourself\n"); | ||
1815 | edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD; | ||
1816 | map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash), | ||
1817 | (char *)padded_hash, DMA_TO_DEVICE); | ||
1818 | } | ||
1819 | |||
1551 | static int common_nonsnoop_hash(struct talitos_edesc *edesc, | 1820 | static int common_nonsnoop_hash(struct talitos_edesc *edesc, |
1552 | struct ahash_request *areq, unsigned int length, | 1821 | struct ahash_request *areq, unsigned int length, |
1553 | void (*callback) (struct device *dev, | 1822 | void (*callback) (struct device *dev, |
@@ -1559,7 +1828,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1559 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1828 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1560 | struct device *dev = ctx->dev; | 1829 | struct device *dev = ctx->dev; |
1561 | struct talitos_desc *desc = &edesc->desc; | 1830 | struct talitos_desc *desc = &edesc->desc; |
1562 | int sg_count, ret; | 1831 | int ret; |
1832 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1833 | bool is_sec1 = has_ftr_sec1(priv); | ||
1563 | 1834 | ||
1564 | /* first DWORD empty */ | 1835 | /* first DWORD empty */ |
1565 | desc->ptr[0] = zero_entry; | 1836 | desc->ptr[0] = zero_entry; |
@@ -1568,7 +1839,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1568 | if (!req_ctx->first || req_ctx->swinit) { | 1839 | if (!req_ctx->first || req_ctx->swinit) { |
1569 | map_single_talitos_ptr(dev, &desc->ptr[1], | 1840 | map_single_talitos_ptr(dev, &desc->ptr[1], |
1570 | req_ctx->hw_context_size, | 1841 | req_ctx->hw_context_size, |
1571 | (char *)req_ctx->hw_context, 0, | 1842 | (char *)req_ctx->hw_context, |
1572 | DMA_TO_DEVICE); | 1843 | DMA_TO_DEVICE); |
1573 | req_ctx->swinit = 0; | 1844 | req_ctx->swinit = 0; |
1574 | } else { | 1845 | } else { |
@@ -1580,38 +1851,15 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1580 | /* HMAC key */ | 1851 | /* HMAC key */ |
1581 | if (ctx->keylen) | 1852 | if (ctx->keylen) |
1582 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, | 1853 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, |
1583 | (char *)&ctx->key, 0, DMA_TO_DEVICE); | 1854 | (char *)&ctx->key, DMA_TO_DEVICE); |
1584 | else | 1855 | else |
1585 | desc->ptr[2] = zero_entry; | 1856 | desc->ptr[2] = zero_entry; |
1586 | 1857 | ||
1587 | /* | 1858 | /* |
1588 | * data in | 1859 | * data in |
1589 | */ | 1860 | */ |
1590 | desc->ptr[3].len = cpu_to_be16(length); | 1861 | map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc, |
1591 | desc->ptr[3].j_extent = 0; | 1862 | DMA_TO_DEVICE, &desc->ptr[3]); |
1592 | |||
1593 | sg_count = talitos_map_sg(dev, req_ctx->psrc, | ||
1594 | edesc->src_nents ? : 1, | ||
1595 | DMA_TO_DEVICE, edesc->src_chained); | ||
1596 | |||
1597 | if (sg_count == 1) { | ||
1598 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc)); | ||
1599 | } else { | ||
1600 | sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length, | ||
1601 | &edesc->link_tbl[0]); | ||
1602 | if (sg_count > 1) { | ||
1603 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1604 | to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); | ||
1605 | dma_sync_single_for_device(ctx->dev, | ||
1606 | edesc->dma_link_tbl, | ||
1607 | edesc->dma_len, | ||
1608 | DMA_BIDIRECTIONAL); | ||
1609 | } else { | ||
1610 | /* Only one segment now, so no link tbl needed */ | ||
1611 | to_talitos_ptr(&desc->ptr[3], | ||
1612 | sg_dma_address(req_ctx->psrc)); | ||
1613 | } | ||
1614 | } | ||
1615 | 1863 | ||
1616 | /* fifth DWORD empty */ | 1864 | /* fifth DWORD empty */ |
1617 | desc->ptr[4] = zero_entry; | 1865 | desc->ptr[4] = zero_entry; |
@@ -1620,15 +1868,18 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1620 | if (req_ctx->last) | 1868 | if (req_ctx->last) |
1621 | map_single_talitos_ptr(dev, &desc->ptr[5], | 1869 | map_single_talitos_ptr(dev, &desc->ptr[5], |
1622 | crypto_ahash_digestsize(tfm), | 1870 | crypto_ahash_digestsize(tfm), |
1623 | areq->result, 0, DMA_FROM_DEVICE); | 1871 | areq->result, DMA_FROM_DEVICE); |
1624 | else | 1872 | else |
1625 | map_single_talitos_ptr(dev, &desc->ptr[5], | 1873 | map_single_talitos_ptr(dev, &desc->ptr[5], |
1626 | req_ctx->hw_context_size, | 1874 | req_ctx->hw_context_size, |
1627 | req_ctx->hw_context, 0, DMA_FROM_DEVICE); | 1875 | req_ctx->hw_context, DMA_FROM_DEVICE); |
1628 | 1876 | ||
1629 | /* last DWORD empty */ | 1877 | /* last DWORD empty */ |
1630 | desc->ptr[6] = zero_entry; | 1878 | desc->ptr[6] = zero_entry; |
1631 | 1879 | ||
1880 | if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) | ||
1881 | talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); | ||
1882 | |||
1632 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); | 1883 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); |
1633 | if (ret != -EINPROGRESS) { | 1884 | if (ret != -EINPROGRESS) { |
1634 | common_nonsnoop_hash_unmap(dev, edesc, areq); | 1885 | common_nonsnoop_hash_unmap(dev, edesc, areq); |
@@ -2561,6 +2812,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
2561 | break; | 2812 | break; |
2562 | default: | 2813 | default: |
2563 | dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); | 2814 | dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); |
2815 | kfree(t_alg); | ||
2564 | return ERR_PTR(-EINVAL); | 2816 | return ERR_PTR(-EINVAL); |
2565 | } | 2817 | } |
2566 | 2818 | ||
@@ -2581,29 +2833,35 @@ static int talitos_probe_irq(struct platform_device *ofdev) | |||
2581 | struct device_node *np = ofdev->dev.of_node; | 2833 | struct device_node *np = ofdev->dev.of_node; |
2582 | struct talitos_private *priv = dev_get_drvdata(dev); | 2834 | struct talitos_private *priv = dev_get_drvdata(dev); |
2583 | int err; | 2835 | int err; |
2836 | bool is_sec1 = has_ftr_sec1(priv); | ||
2584 | 2837 | ||
2585 | priv->irq[0] = irq_of_parse_and_map(np, 0); | 2838 | priv->irq[0] = irq_of_parse_and_map(np, 0); |
2586 | if (!priv->irq[0]) { | 2839 | if (!priv->irq[0]) { |
2587 | dev_err(dev, "failed to map irq\n"); | 2840 | dev_err(dev, "failed to map irq\n"); |
2588 | return -EINVAL; | 2841 | return -EINVAL; |
2589 | } | 2842 | } |
2843 | if (is_sec1) { | ||
2844 | err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0, | ||
2845 | dev_driver_string(dev), dev); | ||
2846 | goto primary_out; | ||
2847 | } | ||
2590 | 2848 | ||
2591 | priv->irq[1] = irq_of_parse_and_map(np, 1); | 2849 | priv->irq[1] = irq_of_parse_and_map(np, 1); |
2592 | 2850 | ||
2593 | /* get the primary irq line */ | 2851 | /* get the primary irq line */ |
2594 | if (!priv->irq[1]) { | 2852 | if (!priv->irq[1]) { |
2595 | err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0, | 2853 | err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0, |
2596 | dev_driver_string(dev), dev); | 2854 | dev_driver_string(dev), dev); |
2597 | goto primary_out; | 2855 | goto primary_out; |
2598 | } | 2856 | } |
2599 | 2857 | ||
2600 | err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0, | 2858 | err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0, |
2601 | dev_driver_string(dev), dev); | 2859 | dev_driver_string(dev), dev); |
2602 | if (err) | 2860 | if (err) |
2603 | goto primary_out; | 2861 | goto primary_out; |
2604 | 2862 | ||
2605 | /* get the secondary irq line */ | 2863 | /* get the secondary irq line */ |
2606 | err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0, | 2864 | err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0, |
2607 | dev_driver_string(dev), dev); | 2865 | dev_driver_string(dev), dev); |
2608 | if (err) { | 2866 | if (err) { |
2609 | dev_err(dev, "failed to request secondary irq\n"); | 2867 | dev_err(dev, "failed to request secondary irq\n"); |
@@ -2630,6 +2888,7 @@ static int talitos_probe(struct platform_device *ofdev) | |||
2630 | struct talitos_private *priv; | 2888 | struct talitos_private *priv; |
2631 | const unsigned int *prop; | 2889 | const unsigned int *prop; |
2632 | int i, err; | 2890 | int i, err; |
2891 | int stride; | ||
2633 | 2892 | ||
2634 | priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); | 2893 | priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); |
2635 | if (!priv) | 2894 | if (!priv) |
@@ -2643,20 +2902,6 @@ static int talitos_probe(struct platform_device *ofdev) | |||
2643 | 2902 | ||
2644 | spin_lock_init(&priv->reg_lock); | 2903 | spin_lock_init(&priv->reg_lock); |
2645 | 2904 | ||
2646 | err = talitos_probe_irq(ofdev); | ||
2647 | if (err) | ||
2648 | goto err_out; | ||
2649 | |||
2650 | if (!priv->irq[1]) { | ||
2651 | tasklet_init(&priv->done_task[0], talitos_done_4ch, | ||
2652 | (unsigned long)dev); | ||
2653 | } else { | ||
2654 | tasklet_init(&priv->done_task[0], talitos_done_ch0_2, | ||
2655 | (unsigned long)dev); | ||
2656 | tasklet_init(&priv->done_task[1], talitos_done_ch1_3, | ||
2657 | (unsigned long)dev); | ||
2658 | } | ||
2659 | |||
2660 | priv->reg = of_iomap(np, 0); | 2905 | priv->reg = of_iomap(np, 0); |
2661 | if (!priv->reg) { | 2906 | if (!priv->reg) { |
2662 | dev_err(dev, "failed to of_iomap\n"); | 2907 | dev_err(dev, "failed to of_iomap\n"); |
@@ -2696,6 +2941,53 @@ static int talitos_probe(struct platform_device *ofdev) | |||
2696 | TALITOS_FTR_SHA224_HWINIT | | 2941 | TALITOS_FTR_SHA224_HWINIT | |
2697 | TALITOS_FTR_HMAC_OK; | 2942 | TALITOS_FTR_HMAC_OK; |
2698 | 2943 | ||
2944 | if (of_device_is_compatible(np, "fsl,sec1.0")) | ||
2945 | priv->features |= TALITOS_FTR_SEC1; | ||
2946 | |||
2947 | if (of_device_is_compatible(np, "fsl,sec1.2")) { | ||
2948 | priv->reg_deu = priv->reg + TALITOS12_DEU; | ||
2949 | priv->reg_aesu = priv->reg + TALITOS12_AESU; | ||
2950 | priv->reg_mdeu = priv->reg + TALITOS12_MDEU; | ||
2951 | stride = TALITOS1_CH_STRIDE; | ||
2952 | } else if (of_device_is_compatible(np, "fsl,sec1.0")) { | ||
2953 | priv->reg_deu = priv->reg + TALITOS10_DEU; | ||
2954 | priv->reg_aesu = priv->reg + TALITOS10_AESU; | ||
2955 | priv->reg_mdeu = priv->reg + TALITOS10_MDEU; | ||
2956 | priv->reg_afeu = priv->reg + TALITOS10_AFEU; | ||
2957 | priv->reg_rngu = priv->reg + TALITOS10_RNGU; | ||
2958 | priv->reg_pkeu = priv->reg + TALITOS10_PKEU; | ||
2959 | stride = TALITOS1_CH_STRIDE; | ||
2960 | } else { | ||
2961 | priv->reg_deu = priv->reg + TALITOS2_DEU; | ||
2962 | priv->reg_aesu = priv->reg + TALITOS2_AESU; | ||
2963 | priv->reg_mdeu = priv->reg + TALITOS2_MDEU; | ||
2964 | priv->reg_afeu = priv->reg + TALITOS2_AFEU; | ||
2965 | priv->reg_rngu = priv->reg + TALITOS2_RNGU; | ||
2966 | priv->reg_pkeu = priv->reg + TALITOS2_PKEU; | ||
2967 | priv->reg_keu = priv->reg + TALITOS2_KEU; | ||
2968 | priv->reg_crcu = priv->reg + TALITOS2_CRCU; | ||
2969 | stride = TALITOS2_CH_STRIDE; | ||
2970 | } | ||
2971 | |||
2972 | err = talitos_probe_irq(ofdev); | ||
2973 | if (err) | ||
2974 | goto err_out; | ||
2975 | |||
2976 | if (of_device_is_compatible(np, "fsl,sec1.0")) { | ||
2977 | tasklet_init(&priv->done_task[0], talitos1_done_4ch, | ||
2978 | (unsigned long)dev); | ||
2979 | } else { | ||
2980 | if (!priv->irq[1]) { | ||
2981 | tasklet_init(&priv->done_task[0], talitos2_done_4ch, | ||
2982 | (unsigned long)dev); | ||
2983 | } else { | ||
2984 | tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, | ||
2985 | (unsigned long)dev); | ||
2986 | tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, | ||
2987 | (unsigned long)dev); | ||
2988 | } | ||
2989 | } | ||
2990 | |||
2699 | priv->chan = kzalloc(sizeof(struct talitos_channel) * | 2991 | priv->chan = kzalloc(sizeof(struct talitos_channel) * |
2700 | priv->num_channels, GFP_KERNEL); | 2992 | priv->num_channels, GFP_KERNEL); |
2701 | if (!priv->chan) { | 2993 | if (!priv->chan) { |
@@ -2707,7 +2999,7 @@ static int talitos_probe(struct platform_device *ofdev) | |||
2707 | priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); | 2999 | priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); |
2708 | 3000 | ||
2709 | for (i = 0; i < priv->num_channels; i++) { | 3001 | for (i = 0; i < priv->num_channels; i++) { |
2710 | priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1); | 3002 | priv->chan[i].reg = priv->reg + stride * (i + 1); |
2711 | if (!priv->irq[1] || !(i & 1)) | 3003 | if (!priv->irq[1] || !(i & 1)) |
2712 | priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; | 3004 | priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; |
2713 | 3005 | ||
@@ -2794,9 +3086,16 @@ err_out: | |||
2794 | } | 3086 | } |
2795 | 3087 | ||
2796 | static const struct of_device_id talitos_match[] = { | 3088 | static const struct of_device_id talitos_match[] = { |
3089 | #ifdef CONFIG_CRYPTO_DEV_TALITOS1 | ||
3090 | { | ||
3091 | .compatible = "fsl,sec1.0", | ||
3092 | }, | ||
3093 | #endif | ||
3094 | #ifdef CONFIG_CRYPTO_DEV_TALITOS2 | ||
2797 | { | 3095 | { |
2798 | .compatible = "fsl,sec2.0", | 3096 | .compatible = "fsl,sec2.0", |
2799 | }, | 3097 | }, |
3098 | #endif | ||
2800 | {}, | 3099 | {}, |
2801 | }; | 3100 | }; |
2802 | MODULE_DEVICE_TABLE(of, talitos_match); | 3101 | MODULE_DEVICE_TABLE(of, talitos_match); |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 61a14054aa39..314daf55e7f7 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
@@ -29,7 +29,8 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | #define TALITOS_TIMEOUT 100000 | 31 | #define TALITOS_TIMEOUT 100000 |
32 | #define TALITOS_MAX_DATA_LEN 65535 | 32 | #define TALITOS1_MAX_DATA_LEN 32768 |
33 | #define TALITOS2_MAX_DATA_LEN 65535 | ||
33 | 34 | ||
34 | #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) | 35 | #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) |
35 | #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) | 36 | #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) |
@@ -37,9 +38,17 @@ | |||
37 | 38 | ||
38 | /* descriptor pointer entry */ | 39 | /* descriptor pointer entry */ |
39 | struct talitos_ptr { | 40 | struct talitos_ptr { |
40 | __be16 len; /* length */ | 41 | union { |
41 | u8 j_extent; /* jump to sg link table and/or extent */ | 42 | struct { /* SEC2 format */ |
42 | u8 eptr; /* extended address */ | 43 | __be16 len; /* length */ |
44 | u8 j_extent; /* jump to sg link table and/or extent*/ | ||
45 | u8 eptr; /* extended address */ | ||
46 | }; | ||
47 | struct { /* SEC1 format */ | ||
48 | __be16 res; | ||
49 | __be16 len1; /* length */ | ||
50 | }; | ||
51 | }; | ||
43 | __be32 ptr; /* address */ | 52 | __be32 ptr; /* address */ |
44 | }; | 53 | }; |
45 | 54 | ||
@@ -53,10 +62,16 @@ static const struct talitos_ptr zero_entry = { | |||
53 | /* descriptor */ | 62 | /* descriptor */ |
54 | struct talitos_desc { | 63 | struct talitos_desc { |
55 | __be32 hdr; /* header high bits */ | 64 | __be32 hdr; /* header high bits */ |
56 | __be32 hdr_lo; /* header low bits */ | 65 | union { |
66 | __be32 hdr_lo; /* header low bits */ | ||
67 | __be32 hdr1; /* header for SEC1 */ | ||
68 | }; | ||
57 | struct talitos_ptr ptr[7]; /* ptr/len pair array */ | 69 | struct talitos_ptr ptr[7]; /* ptr/len pair array */ |
70 | __be32 next_desc; /* next descriptor (SEC1) */ | ||
58 | }; | 71 | }; |
59 | 72 | ||
73 | #define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32)) | ||
74 | |||
60 | /** | 75 | /** |
61 | * talitos_request - descriptor submission request | 76 | * talitos_request - descriptor submission request |
62 | * @desc: descriptor pointer (kernel virtual) | 77 | * @desc: descriptor pointer (kernel virtual) |
@@ -97,6 +112,14 @@ struct talitos_private { | |||
97 | struct device *dev; | 112 | struct device *dev; |
98 | struct platform_device *ofdev; | 113 | struct platform_device *ofdev; |
99 | void __iomem *reg; | 114 | void __iomem *reg; |
115 | void __iomem *reg_deu; | ||
116 | void __iomem *reg_aesu; | ||
117 | void __iomem *reg_mdeu; | ||
118 | void __iomem *reg_afeu; | ||
119 | void __iomem *reg_rngu; | ||
120 | void __iomem *reg_pkeu; | ||
121 | void __iomem *reg_keu; | ||
122 | void __iomem *reg_crcu; | ||
100 | int irq[2]; | 123 | int irq[2]; |
101 | 124 | ||
102 | /* SEC global registers lock */ | 125 | /* SEC global registers lock */ |
@@ -144,49 +167,80 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | |||
144 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | 167 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 |
145 | #define TALITOS_FTR_SHA224_HWINIT 0x00000004 | 168 | #define TALITOS_FTR_SHA224_HWINIT 0x00000004 |
146 | #define TALITOS_FTR_HMAC_OK 0x00000008 | 169 | #define TALITOS_FTR_HMAC_OK 0x00000008 |
170 | #define TALITOS_FTR_SEC1 0x00000010 | ||
171 | |||
172 | /* | ||
173 | * If both CONFIG_CRYPTO_DEV_TALITOS1 and CONFIG_CRYPTO_DEV_TALITOS2 are | ||
174 | * defined, we check the features which are set according to the device tree. | ||
175 | * Otherwise, we answer true or false directly | ||
176 | */ | ||
177 | static inline bool has_ftr_sec1(struct talitos_private *priv) | ||
178 | { | ||
179 | #if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2) | ||
180 | return priv->features & TALITOS_FTR_SEC1 ? true : false; | ||
181 | #elif defined(CONFIG_CRYPTO_DEV_TALITOS1) | ||
182 | return true; | ||
183 | #else | ||
184 | return false; | ||
185 | #endif | ||
186 | } | ||
147 | 187 | ||
148 | /* | 188 | /* |
149 | * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register | 189 | * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register |
150 | */ | 190 | */ |
151 | 191 | ||
192 | #define ISR1_FORMAT(x) (((x) << 28) | ((x) << 16)) | ||
193 | #define ISR2_FORMAT(x) (((x) << 4) | (x)) | ||
194 | |||
152 | /* global register offset addresses */ | 195 | /* global register offset addresses */ |
153 | #define TALITOS_MCR 0x1030 /* master control register */ | 196 | #define TALITOS_MCR 0x1030 /* master control register */ |
154 | #define TALITOS_MCR_RCA0 (1 << 15) /* remap channel 0 */ | 197 | #define TALITOS_MCR_RCA0 (1 << 15) /* remap channel 0 */ |
155 | #define TALITOS_MCR_RCA1 (1 << 14) /* remap channel 1 */ | 198 | #define TALITOS_MCR_RCA1 (1 << 14) /* remap channel 1 */ |
156 | #define TALITOS_MCR_RCA2 (1 << 13) /* remap channel 2 */ | 199 | #define TALITOS_MCR_RCA2 (1 << 13) /* remap channel 2 */ |
157 | #define TALITOS_MCR_RCA3 (1 << 12) /* remap channel 3 */ | 200 | #define TALITOS_MCR_RCA3 (1 << 12) /* remap channel 3 */ |
158 | #define TALITOS_MCR_SWR 0x1 /* s/w reset */ | 201 | #define TALITOS1_MCR_SWR 0x1000000 /* s/w reset */ |
202 | #define TALITOS2_MCR_SWR 0x1 /* s/w reset */ | ||
159 | #define TALITOS_MCR_LO 0x1034 | 203 | #define TALITOS_MCR_LO 0x1034 |
160 | #define TALITOS_IMR 0x1008 /* interrupt mask register */ | 204 | #define TALITOS_IMR 0x1008 /* interrupt mask register */ |
161 | #define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */ | 205 | /* enable channel IRQs */ |
162 | #define TALITOS_IMR_DONE 0x00055 /* done IRQs */ | 206 | #define TALITOS1_IMR_INIT ISR1_FORMAT(0xf) |
207 | #define TALITOS1_IMR_DONE ISR1_FORMAT(0x5) /* done IRQs */ | ||
208 | /* enable channel IRQs */ | ||
209 | #define TALITOS2_IMR_INIT (ISR2_FORMAT(0xf) | 0x10000) | ||
210 | #define TALITOS2_IMR_DONE ISR1_FORMAT(0x5) /* done IRQs */ | ||
163 | #define TALITOS_IMR_LO 0x100C | 211 | #define TALITOS_IMR_LO 0x100C |
164 | #define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ | 212 | #define TALITOS1_IMR_LO_INIT 0x2000000 /* allow RNGU error IRQs */ |
213 | #define TALITOS2_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ | ||
165 | #define TALITOS_ISR 0x1010 /* interrupt status register */ | 214 | #define TALITOS_ISR 0x1010 /* interrupt status register */ |
166 | #define TALITOS_ISR_4CHERR 0xaa /* 4 channel errors mask */ | 215 | #define TALITOS1_ISR_4CHERR ISR1_FORMAT(0xa) /* 4 ch errors mask */ |
167 | #define TALITOS_ISR_4CHDONE 0x55 /* 4 channel done mask */ | 216 | #define TALITOS1_ISR_4CHDONE ISR1_FORMAT(0x5) /* 4 ch done mask */ |
168 | #define TALITOS_ISR_CH_0_2_ERR 0x22 /* channels 0, 2 errors mask */ | 217 | #define TALITOS1_ISR_TEA_ERR 0x00000040 |
169 | #define TALITOS_ISR_CH_0_2_DONE 0x11 /* channels 0, 2 done mask */ | 218 | #define TALITOS2_ISR_4CHERR ISR2_FORMAT(0xa) /* 4 ch errors mask */ |
170 | #define TALITOS_ISR_CH_1_3_ERR 0x88 /* channels 1, 3 errors mask */ | 219 | #define TALITOS2_ISR_4CHDONE ISR2_FORMAT(0x5) /* 4 ch done mask */ |
171 | #define TALITOS_ISR_CH_1_3_DONE 0x44 /* channels 1, 3 done mask */ | 220 | #define TALITOS2_ISR_CH_0_2_ERR ISR2_FORMAT(0x2) /* ch 0, 2 err mask */ |
221 | #define TALITOS2_ISR_CH_0_2_DONE ISR2_FORMAT(0x1) /* ch 0, 2 done mask */ | ||
222 | #define TALITOS2_ISR_CH_1_3_ERR ISR2_FORMAT(0x8) /* ch 1, 3 err mask */ | ||
223 | #define TALITOS2_ISR_CH_1_3_DONE ISR2_FORMAT(0x4) /* ch 1, 3 done mask */ | ||
172 | #define TALITOS_ISR_LO 0x1014 | 224 | #define TALITOS_ISR_LO 0x1014 |
173 | #define TALITOS_ICR 0x1018 /* interrupt clear register */ | 225 | #define TALITOS_ICR 0x1018 /* interrupt clear register */ |
174 | #define TALITOS_ICR_LO 0x101C | 226 | #define TALITOS_ICR_LO 0x101C |
175 | 227 | ||
176 | /* channel register address stride */ | 228 | /* channel register address stride */ |
177 | #define TALITOS_CH_BASE_OFFSET 0x1000 /* default channel map base */ | 229 | #define TALITOS_CH_BASE_OFFSET 0x1000 /* default channel map base */ |
178 | #define TALITOS_CH_STRIDE 0x100 | 230 | #define TALITOS1_CH_STRIDE 0x1000 |
231 | #define TALITOS2_CH_STRIDE 0x100 | ||
179 | 232 | ||
180 | /* channel configuration register */ | 233 | /* channel configuration register */ |
181 | #define TALITOS_CCCR 0x8 | 234 | #define TALITOS_CCCR 0x8 |
182 | #define TALITOS_CCCR_CONT 0x2 /* channel continue */ | 235 | #define TALITOS2_CCCR_CONT 0x2 /* channel continue on SEC2 */ |
183 | #define TALITOS_CCCR_RESET 0x1 /* channel reset */ | 236 | #define TALITOS2_CCCR_RESET 0x1 /* channel reset on SEC2 */ |
184 | #define TALITOS_CCCR_LO 0xc | 237 | #define TALITOS_CCCR_LO 0xc |
185 | #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ | 238 | #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ |
186 | #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ | 239 | #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ |
187 | #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ | 240 | #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ |
188 | #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ | 241 | #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ |
189 | #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ | 242 | #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ |
243 | #define TALITOS1_CCCR_LO_RESET 0x1 /* channel reset on SEC1 */ | ||
190 | 244 | ||
191 | /* CCPSR: channel pointer status register */ | 245 | /* CCPSR: channel pointer status register */ |
192 | #define TALITOS_CCPSR 0x10 | 246 | #define TALITOS_CCPSR 0x10 |
@@ -224,37 +278,48 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | |||
224 | #define TALITOS_SCATTER 0xe0 | 278 | #define TALITOS_SCATTER 0xe0 |
225 | #define TALITOS_SCATTER_LO 0xe4 | 279 | #define TALITOS_SCATTER_LO 0xe4 |
226 | 280 | ||
281 | /* execution unit registers base */ | ||
282 | #define TALITOS2_DEU 0x2000 | ||
283 | #define TALITOS2_AESU 0x4000 | ||
284 | #define TALITOS2_MDEU 0x6000 | ||
285 | #define TALITOS2_AFEU 0x8000 | ||
286 | #define TALITOS2_RNGU 0xa000 | ||
287 | #define TALITOS2_PKEU 0xc000 | ||
288 | #define TALITOS2_KEU 0xe000 | ||
289 | #define TALITOS2_CRCU 0xf000 | ||
290 | |||
291 | #define TALITOS12_AESU 0x4000 | ||
292 | #define TALITOS12_DEU 0x5000 | ||
293 | #define TALITOS12_MDEU 0x6000 | ||
294 | |||
295 | #define TALITOS10_AFEU 0x8000 | ||
296 | #define TALITOS10_DEU 0xa000 | ||
297 | #define TALITOS10_MDEU 0xc000 | ||
298 | #define TALITOS10_RNGU 0xe000 | ||
299 | #define TALITOS10_PKEU 0x10000 | ||
300 | #define TALITOS10_AESU 0x12000 | ||
301 | |||
227 | /* execution unit interrupt status registers */ | 302 | /* execution unit interrupt status registers */ |
228 | #define TALITOS_DEUISR 0x2030 /* DES unit */ | 303 | #define TALITOS_EUDSR 0x10 /* data size */ |
229 | #define TALITOS_DEUISR_LO 0x2034 | 304 | #define TALITOS_EUDSR_LO 0x14 |
230 | #define TALITOS_AESUISR 0x4030 /* AES unit */ | 305 | #define TALITOS_EURCR 0x18 /* reset control*/ |
231 | #define TALITOS_AESUISR_LO 0x4034 | 306 | #define TALITOS_EURCR_LO 0x1c |
232 | #define TALITOS_MDEUISR 0x6030 /* message digest unit */ | 307 | #define TALITOS_EUSR 0x28 /* rng status */ |
233 | #define TALITOS_MDEUISR_LO 0x6034 | 308 | #define TALITOS_EUSR_LO 0x2c |
234 | #define TALITOS_MDEUICR 0x6038 /* interrupt control */ | 309 | #define TALITOS_EUISR 0x30 |
235 | #define TALITOS_MDEUICR_LO 0x603c | 310 | #define TALITOS_EUISR_LO 0x34 |
311 | #define TALITOS_EUICR 0x38 /* int. control */ | ||
312 | #define TALITOS_EUICR_LO 0x3c | ||
313 | #define TALITOS_EU_FIFO 0x800 /* output FIFO */ | ||
314 | #define TALITOS_EU_FIFO_LO 0x804 /* output FIFO */ | ||
315 | /* DES unit */ | ||
316 | #define TALITOS1_DEUICR_KPE 0x00200000 /* Key Parity Error */ | ||
317 | /* message digest unit */ | ||
236 | #define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ | 318 | #define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ |
237 | #define TALITOS_AFEUISR 0x8030 /* arc4 unit */ | 319 | /* random number unit */ |
238 | #define TALITOS_AFEUISR_LO 0x8034 | ||
239 | #define TALITOS_RNGUISR 0xa030 /* random number unit */ | ||
240 | #define TALITOS_RNGUISR_LO 0xa034 | ||
241 | #define TALITOS_RNGUSR 0xa028 /* rng status */ | ||
242 | #define TALITOS_RNGUSR_LO 0xa02c | ||
243 | #define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */ | 320 | #define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */ |
244 | #define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */ | 321 | #define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */ |
245 | #define TALITOS_RNGUDSR 0xa010 /* data size */ | ||
246 | #define TALITOS_RNGUDSR_LO 0xa014 | ||
247 | #define TALITOS_RNGU_FIFO 0xa800 /* output FIFO */ | ||
248 | #define TALITOS_RNGU_FIFO_LO 0xa804 /* output FIFO */ | ||
249 | #define TALITOS_RNGURCR 0xa018 /* reset control */ | ||
250 | #define TALITOS_RNGURCR_LO 0xa01c | ||
251 | #define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */ | 322 | #define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */ |
252 | #define TALITOS_PKEUISR 0xc030 /* public key unit */ | ||
253 | #define TALITOS_PKEUISR_LO 0xc034 | ||
254 | #define TALITOS_KEUISR 0xe030 /* kasumi unit */ | ||
255 | #define TALITOS_KEUISR_LO 0xe034 | ||
256 | #define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/ | ||
257 | #define TALITOS_CRCUISR_LO 0xf034 | ||
258 | 323 | ||
259 | #define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28 | 324 | #define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28 |
260 | #define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48 | 325 | #define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48 |
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index b35e5c4b025a..30796441b0a6 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig | |||
@@ -7,6 +7,8 @@ | |||
7 | config CRYPTO_DEV_UX500_CRYP | 7 | config CRYPTO_DEV_UX500_CRYP |
8 | tristate "UX500 crypto driver for CRYP block" | 8 | tristate "UX500 crypto driver for CRYP block" |
9 | depends on CRYPTO_DEV_UX500 | 9 | depends on CRYPTO_DEV_UX500 |
10 | select CRYPTO_ALGAPI | ||
11 | select CRYPTO_BLKCIPHER | ||
10 | select CRYPTO_DES | 12 | select CRYPTO_DES |
11 | help | 13 | help |
12 | This selects the crypto driver for the UX500_CRYP hardware. It supports | 14 | This selects the crypto driver for the UX500_CRYP hardware. It supports |
@@ -16,7 +18,6 @@ config CRYPTO_DEV_UX500_HASH | |||
16 | tristate "UX500 crypto driver for HASH block" | 18 | tristate "UX500 crypto driver for HASH block" |
17 | depends on CRYPTO_DEV_UX500 | 19 | depends on CRYPTO_DEV_UX500 |
18 | select CRYPTO_HASH | 20 | select CRYPTO_HASH |
19 | select CRYPTO_HMAC | ||
20 | help | 21 | help |
21 | This selects the hash driver for the UX500_HASH hardware. | 22 | This selects the hash driver for the UX500_HASH hardware. |
22 | Depends on UX500/STM DMA if running in DMA mode. | 23 | Depends on UX500/STM DMA if running in DMA mode. |
@@ -24,7 +25,6 @@ config CRYPTO_DEV_UX500_HASH | |||
24 | config CRYPTO_DEV_UX500_DEBUG | 25 | config CRYPTO_DEV_UX500_DEBUG |
25 | bool "Activate ux500 platform debug-mode for crypto and hash block" | 26 | bool "Activate ux500 platform debug-mode for crypto and hash block" |
26 | depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH | 27 | depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH |
27 | default n | ||
28 | help | 28 | help |
29 | Say Y if you want to add debug prints to ux500_hash and | 29 | Say Y if you want to add debug prints to ux500_hash and |
30 | ux500_cryp devices. | 30 | ux500_cryp devices. |
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index 771babf16aa0..89d8208d9851 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config CRYPTO_DEV_VMX_ENCRYPT | 1 | config CRYPTO_DEV_VMX_ENCRYPT |
2 | tristate "Encryption acceleration support on P8 CPU" | 2 | tristate "Encryption acceleration support on P8 CPU" |
3 | depends on PPC64 && CRYPTO_DEV_VMX | 3 | depends on CRYPTO_DEV_VMX |
4 | default y | 4 | default y |
5 | help | 5 | help |
6 | Support for VMX cryptographic acceleration instructions on Power8 CPU. | 6 | Support for VMX cryptographic acceleration instructions on Power8 CPU. |
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile index c699c6e6c82e..d28ab96a2475 100644 --- a/drivers/crypto/vmx/Makefile +++ b/drivers/crypto/vmx/Makefile | |||
@@ -4,7 +4,7 @@ vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o gha | |||
4 | ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) | 4 | ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) |
5 | TARGET := linux-ppc64le | 5 | TARGET := linux-ppc64le |
6 | else | 6 | else |
7 | TARGET := linux-pcc64 | 7 | TARGET := linux-ppc64 |
8 | endif | 8 | endif |
9 | 9 | ||
10 | quiet_cmd_perl = PERL $@ | 10 | quiet_cmd_perl = PERL $@ |
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index a9064e36e7b5..e79e567e43aa 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c | |||
@@ -30,116 +30,118 @@ | |||
30 | #include "aesp8-ppc.h" | 30 | #include "aesp8-ppc.h" |
31 | 31 | ||
32 | struct p8_aes_ctx { | 32 | struct p8_aes_ctx { |
33 | struct crypto_cipher *fallback; | 33 | struct crypto_cipher *fallback; |
34 | struct aes_key enc_key; | 34 | struct aes_key enc_key; |
35 | struct aes_key dec_key; | 35 | struct aes_key dec_key; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | static int p8_aes_init(struct crypto_tfm *tfm) | 38 | static int p8_aes_init(struct crypto_tfm *tfm) |
39 | { | 39 | { |
40 | const char *alg; | 40 | const char *alg; |
41 | struct crypto_cipher *fallback; | 41 | struct crypto_cipher *fallback; |
42 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 42 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
43 | 43 | ||
44 | if (!(alg = crypto_tfm_alg_name(tfm))) { | 44 | if (!(alg = crypto_tfm_alg_name(tfm))) { |
45 | printk(KERN_ERR "Failed to get algorithm name.\n"); | 45 | printk(KERN_ERR "Failed to get algorithm name.\n"); |
46 | return -ENOENT; | 46 | return -ENOENT; |
47 | } | 47 | } |
48 | 48 | ||
49 | fallback = crypto_alloc_cipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); | 49 | fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); |
50 | if (IS_ERR(fallback)) { | 50 | if (IS_ERR(fallback)) { |
51 | printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", | 51 | printk(KERN_ERR |
52 | alg, PTR_ERR(fallback)); | 52 | "Failed to allocate transformation for '%s': %ld\n", |
53 | return PTR_ERR(fallback); | 53 | alg, PTR_ERR(fallback)); |
54 | } | 54 | return PTR_ERR(fallback); |
55 | printk(KERN_INFO "Using '%s' as fallback implementation.\n", | 55 | } |
56 | crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); | 56 | printk(KERN_INFO "Using '%s' as fallback implementation.\n", |
57 | 57 | crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); | |
58 | crypto_cipher_set_flags(fallback, | 58 | |
59 | crypto_cipher_get_flags((struct crypto_cipher *) tfm)); | 59 | crypto_cipher_set_flags(fallback, |
60 | ctx->fallback = fallback; | 60 | crypto_cipher_get_flags((struct |
61 | 61 | crypto_cipher *) | |
62 | return 0; | 62 | tfm)); |
63 | ctx->fallback = fallback; | ||
64 | |||
65 | return 0; | ||
63 | } | 66 | } |
64 | 67 | ||
65 | static void p8_aes_exit(struct crypto_tfm *tfm) | 68 | static void p8_aes_exit(struct crypto_tfm *tfm) |
66 | { | 69 | { |
67 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 70 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
68 | 71 | ||
69 | if (ctx->fallback) { | 72 | if (ctx->fallback) { |
70 | crypto_free_cipher(ctx->fallback); | 73 | crypto_free_cipher(ctx->fallback); |
71 | ctx->fallback = NULL; | 74 | ctx->fallback = NULL; |
72 | } | 75 | } |
73 | } | 76 | } |
74 | 77 | ||
75 | static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, | 78 | static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, |
76 | unsigned int keylen) | 79 | unsigned int keylen) |
77 | { | 80 | { |
78 | int ret; | 81 | int ret; |
79 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 82 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
80 | 83 | ||
81 | preempt_disable(); | 84 | preempt_disable(); |
82 | pagefault_disable(); | 85 | pagefault_disable(); |
83 | enable_kernel_altivec(); | 86 | enable_kernel_altivec(); |
84 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); | 87 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); |
85 | ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); | 88 | ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); |
86 | pagefault_enable(); | 89 | pagefault_enable(); |
87 | preempt_enable(); | 90 | preempt_enable(); |
88 | 91 | ||
89 | ret += crypto_cipher_setkey(ctx->fallback, key, keylen); | 92 | ret += crypto_cipher_setkey(ctx->fallback, key, keylen); |
90 | return ret; | 93 | return ret; |
91 | } | 94 | } |
92 | 95 | ||
93 | static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | 96 | static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) |
94 | { | 97 | { |
95 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 98 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
96 | 99 | ||
97 | if (in_interrupt()) { | 100 | if (in_interrupt()) { |
98 | crypto_cipher_encrypt_one(ctx->fallback, dst, src); | 101 | crypto_cipher_encrypt_one(ctx->fallback, dst, src); |
99 | } else { | 102 | } else { |
100 | preempt_disable(); | 103 | preempt_disable(); |
101 | pagefault_disable(); | 104 | pagefault_disable(); |
102 | enable_kernel_altivec(); | 105 | enable_kernel_altivec(); |
103 | aes_p8_encrypt(src, dst, &ctx->enc_key); | 106 | aes_p8_encrypt(src, dst, &ctx->enc_key); |
104 | pagefault_enable(); | 107 | pagefault_enable(); |
105 | preempt_enable(); | 108 | preempt_enable(); |
106 | } | 109 | } |
107 | } | 110 | } |
108 | 111 | ||
109 | static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | 112 | static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) |
110 | { | 113 | { |
111 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 114 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
112 | 115 | ||
113 | if (in_interrupt()) { | 116 | if (in_interrupt()) { |
114 | crypto_cipher_decrypt_one(ctx->fallback, dst, src); | 117 | crypto_cipher_decrypt_one(ctx->fallback, dst, src); |
115 | } else { | 118 | } else { |
116 | preempt_disable(); | 119 | preempt_disable(); |
117 | pagefault_disable(); | 120 | pagefault_disable(); |
118 | enable_kernel_altivec(); | 121 | enable_kernel_altivec(); |
119 | aes_p8_decrypt(src, dst, &ctx->dec_key); | 122 | aes_p8_decrypt(src, dst, &ctx->dec_key); |
120 | pagefault_enable(); | 123 | pagefault_enable(); |
121 | preempt_enable(); | 124 | preempt_enable(); |
122 | } | 125 | } |
123 | } | 126 | } |
124 | 127 | ||
125 | struct crypto_alg p8_aes_alg = { | 128 | struct crypto_alg p8_aes_alg = { |
126 | .cra_name = "aes", | 129 | .cra_name = "aes", |
127 | .cra_driver_name = "p8_aes", | 130 | .cra_driver_name = "p8_aes", |
128 | .cra_module = THIS_MODULE, | 131 | .cra_module = THIS_MODULE, |
129 | .cra_priority = 1000, | 132 | .cra_priority = 1000, |
130 | .cra_type = NULL, | 133 | .cra_type = NULL, |
131 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK, | 134 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK, |
132 | .cra_alignmask = 0, | 135 | .cra_alignmask = 0, |
133 | .cra_blocksize = AES_BLOCK_SIZE, | 136 | .cra_blocksize = AES_BLOCK_SIZE, |
134 | .cra_ctxsize = sizeof(struct p8_aes_ctx), | 137 | .cra_ctxsize = sizeof(struct p8_aes_ctx), |
135 | .cra_init = p8_aes_init, | 138 | .cra_init = p8_aes_init, |
136 | .cra_exit = p8_aes_exit, | 139 | .cra_exit = p8_aes_exit, |
137 | .cra_cipher = { | 140 | .cra_cipher = { |
138 | .cia_min_keysize = AES_MIN_KEY_SIZE, | 141 | .cia_min_keysize = AES_MIN_KEY_SIZE, |
139 | .cia_max_keysize = AES_MAX_KEY_SIZE, | 142 | .cia_max_keysize = AES_MAX_KEY_SIZE, |
140 | .cia_setkey = p8_aes_setkey, | 143 | .cia_setkey = p8_aes_setkey, |
141 | .cia_encrypt = p8_aes_encrypt, | 144 | .cia_encrypt = p8_aes_encrypt, |
142 | .cia_decrypt = p8_aes_decrypt, | 145 | .cia_decrypt = p8_aes_decrypt, |
143 | }, | 146 | }, |
144 | }; | 147 | }; |
145 | |||
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 477284abdd11..7299995c78ec 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c | |||
@@ -31,160 +31,168 @@ | |||
31 | #include "aesp8-ppc.h" | 31 | #include "aesp8-ppc.h" |
32 | 32 | ||
33 | struct p8_aes_cbc_ctx { | 33 | struct p8_aes_cbc_ctx { |
34 | struct crypto_blkcipher *fallback; | 34 | struct crypto_blkcipher *fallback; |
35 | struct aes_key enc_key; | 35 | struct aes_key enc_key; |
36 | struct aes_key dec_key; | 36 | struct aes_key dec_key; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static int p8_aes_cbc_init(struct crypto_tfm *tfm) | 39 | static int p8_aes_cbc_init(struct crypto_tfm *tfm) |
40 | { | 40 | { |
41 | const char *alg; | 41 | const char *alg; |
42 | struct crypto_blkcipher *fallback; | 42 | struct crypto_blkcipher *fallback; |
43 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 43 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); |
44 | 44 | ||
45 | if (!(alg = crypto_tfm_alg_name(tfm))) { | 45 | if (!(alg = crypto_tfm_alg_name(tfm))) { |
46 | printk(KERN_ERR "Failed to get algorithm name.\n"); | 46 | printk(KERN_ERR "Failed to get algorithm name.\n"); |
47 | return -ENOENT; | 47 | return -ENOENT; |
48 | } | 48 | } |
49 | 49 | ||
50 | fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); | 50 | fallback = |
51 | if (IS_ERR(fallback)) { | 51 | crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); |
52 | printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", | 52 | if (IS_ERR(fallback)) { |
53 | alg, PTR_ERR(fallback)); | 53 | printk(KERN_ERR |
54 | return PTR_ERR(fallback); | 54 | "Failed to allocate transformation for '%s': %ld\n", |
55 | } | 55 | alg, PTR_ERR(fallback)); |
56 | printk(KERN_INFO "Using '%s' as fallback implementation.\n", | 56 | return PTR_ERR(fallback); |
57 | crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); | 57 | } |
58 | 58 | printk(KERN_INFO "Using '%s' as fallback implementation.\n", | |
59 | crypto_blkcipher_set_flags(fallback, | 59 | crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); |
60 | crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm)); | 60 | |
61 | ctx->fallback = fallback; | 61 | crypto_blkcipher_set_flags( |
62 | 62 | fallback, | |
63 | return 0; | 63 | crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); |
64 | ctx->fallback = fallback; | ||
65 | |||
66 | return 0; | ||
64 | } | 67 | } |
65 | 68 | ||
66 | static void p8_aes_cbc_exit(struct crypto_tfm *tfm) | 69 | static void p8_aes_cbc_exit(struct crypto_tfm *tfm) |
67 | { | 70 | { |
68 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 71 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); |
69 | 72 | ||
70 | if (ctx->fallback) { | 73 | if (ctx->fallback) { |
71 | crypto_free_blkcipher(ctx->fallback); | 74 | crypto_free_blkcipher(ctx->fallback); |
72 | ctx->fallback = NULL; | 75 | ctx->fallback = NULL; |
73 | } | 76 | } |
74 | } | 77 | } |
75 | 78 | ||
76 | static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, | 79 | static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, |
77 | unsigned int keylen) | 80 | unsigned int keylen) |
78 | { | 81 | { |
79 | int ret; | 82 | int ret; |
80 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 83 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); |
81 | 84 | ||
82 | preempt_disable(); | 85 | preempt_disable(); |
83 | pagefault_disable(); | 86 | pagefault_disable(); |
84 | enable_kernel_altivec(); | 87 | enable_kernel_altivec(); |
85 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); | 88 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); |
86 | ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); | 89 | ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); |
87 | pagefault_enable(); | 90 | pagefault_enable(); |
88 | preempt_enable(); | 91 | preempt_enable(); |
89 | 92 | ||
90 | ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); | 93 | ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); |
91 | return ret; | 94 | return ret; |
92 | } | 95 | } |
93 | 96 | ||
94 | static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, | 97 | static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, |
95 | struct scatterlist *dst, struct scatterlist *src, | 98 | struct scatterlist *dst, |
96 | unsigned int nbytes) | 99 | struct scatterlist *src, unsigned int nbytes) |
97 | { | 100 | { |
98 | int ret; | 101 | int ret; |
99 | struct blkcipher_walk walk; | 102 | struct blkcipher_walk walk; |
100 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( | 103 | struct p8_aes_cbc_ctx *ctx = |
101 | crypto_blkcipher_tfm(desc->tfm)); | 104 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); |
102 | struct blkcipher_desc fallback_desc = { | 105 | struct blkcipher_desc fallback_desc = { |
103 | .tfm = ctx->fallback, | 106 | .tfm = ctx->fallback, |
104 | .info = desc->info, | 107 | .info = desc->info, |
105 | .flags = desc->flags | 108 | .flags = desc->flags |
106 | }; | 109 | }; |
107 | 110 | ||
108 | if (in_interrupt()) { | 111 | if (in_interrupt()) { |
109 | ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); | 112 | ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, |
110 | } else { | 113 | nbytes); |
111 | preempt_disable(); | 114 | } else { |
112 | pagefault_disable(); | 115 | preempt_disable(); |
113 | enable_kernel_altivec(); | 116 | pagefault_disable(); |
114 | 117 | enable_kernel_altivec(); | |
115 | blkcipher_walk_init(&walk, dst, src, nbytes); | 118 | |
116 | ret = blkcipher_walk_virt(desc, &walk); | 119 | blkcipher_walk_init(&walk, dst, src, nbytes); |
117 | while ((nbytes = walk.nbytes)) { | 120 | ret = blkcipher_walk_virt(desc, &walk); |
118 | aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | 121 | while ((nbytes = walk.nbytes)) { |
119 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); | 122 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
123 | walk.dst.virt.addr, | ||
124 | nbytes & AES_BLOCK_MASK, | ||
125 | &ctx->enc_key, walk.iv, 1); | ||
120 | nbytes &= AES_BLOCK_SIZE - 1; | 126 | nbytes &= AES_BLOCK_SIZE - 1; |
121 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 127 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
122 | } | 128 | } |
123 | 129 | ||
124 | pagefault_enable(); | 130 | pagefault_enable(); |
125 | preempt_enable(); | 131 | preempt_enable(); |
126 | } | 132 | } |
127 | 133 | ||
128 | return ret; | 134 | return ret; |
129 | } | 135 | } |
130 | 136 | ||
131 | static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, | 137 | static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, |
132 | struct scatterlist *dst, struct scatterlist *src, | 138 | struct scatterlist *dst, |
133 | unsigned int nbytes) | 139 | struct scatterlist *src, unsigned int nbytes) |
134 | { | 140 | { |
135 | int ret; | 141 | int ret; |
136 | struct blkcipher_walk walk; | 142 | struct blkcipher_walk walk; |
137 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( | 143 | struct p8_aes_cbc_ctx *ctx = |
138 | crypto_blkcipher_tfm(desc->tfm)); | 144 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); |
139 | struct blkcipher_desc fallback_desc = { | 145 | struct blkcipher_desc fallback_desc = { |
140 | .tfm = ctx->fallback, | 146 | .tfm = ctx->fallback, |
141 | .info = desc->info, | 147 | .info = desc->info, |
142 | .flags = desc->flags | 148 | .flags = desc->flags |
143 | }; | 149 | }; |
144 | 150 | ||
145 | if (in_interrupt()) { | 151 | if (in_interrupt()) { |
146 | ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); | 152 | ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, |
147 | } else { | 153 | nbytes); |
148 | preempt_disable(); | 154 | } else { |
149 | pagefault_disable(); | 155 | preempt_disable(); |
150 | enable_kernel_altivec(); | 156 | pagefault_disable(); |
151 | 157 | enable_kernel_altivec(); | |
152 | blkcipher_walk_init(&walk, dst, src, nbytes); | 158 | |
153 | ret = blkcipher_walk_virt(desc, &walk); | 159 | blkcipher_walk_init(&walk, dst, src, nbytes); |
154 | while ((nbytes = walk.nbytes)) { | 160 | ret = blkcipher_walk_virt(desc, &walk); |
155 | aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | 161 | while ((nbytes = walk.nbytes)) { |
156 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); | 162 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
163 | walk.dst.virt.addr, | ||
164 | nbytes & AES_BLOCK_MASK, | ||
165 | &ctx->dec_key, walk.iv, 0); | ||
157 | nbytes &= AES_BLOCK_SIZE - 1; | 166 | nbytes &= AES_BLOCK_SIZE - 1; |
158 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 167 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
159 | } | 168 | } |
160 | 169 | ||
161 | pagefault_enable(); | 170 | pagefault_enable(); |
162 | preempt_enable(); | 171 | preempt_enable(); |
163 | } | 172 | } |
164 | 173 | ||
165 | return ret; | 174 | return ret; |
166 | } | 175 | } |
167 | 176 | ||
168 | 177 | ||
169 | struct crypto_alg p8_aes_cbc_alg = { | 178 | struct crypto_alg p8_aes_cbc_alg = { |
170 | .cra_name = "cbc(aes)", | 179 | .cra_name = "cbc(aes)", |
171 | .cra_driver_name = "p8_aes_cbc", | 180 | .cra_driver_name = "p8_aes_cbc", |
172 | .cra_module = THIS_MODULE, | 181 | .cra_module = THIS_MODULE, |
173 | .cra_priority = 1000, | 182 | .cra_priority = 1000, |
174 | .cra_type = &crypto_blkcipher_type, | 183 | .cra_type = &crypto_blkcipher_type, |
175 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, | 184 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, |
176 | .cra_alignmask = 0, | 185 | .cra_alignmask = 0, |
177 | .cra_blocksize = AES_BLOCK_SIZE, | 186 | .cra_blocksize = AES_BLOCK_SIZE, |
178 | .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), | 187 | .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), |
179 | .cra_init = p8_aes_cbc_init, | 188 | .cra_init = p8_aes_cbc_init, |
180 | .cra_exit = p8_aes_cbc_exit, | 189 | .cra_exit = p8_aes_cbc_exit, |
181 | .cra_blkcipher = { | 190 | .cra_blkcipher = { |
182 | .ivsize = 0, | 191 | .ivsize = 0, |
183 | .min_keysize = AES_MIN_KEY_SIZE, | 192 | .min_keysize = AES_MIN_KEY_SIZE, |
184 | .max_keysize = AES_MAX_KEY_SIZE, | 193 | .max_keysize = AES_MAX_KEY_SIZE, |
185 | .setkey = p8_aes_cbc_setkey, | 194 | .setkey = p8_aes_cbc_setkey, |
186 | .encrypt = p8_aes_cbc_encrypt, | 195 | .encrypt = p8_aes_cbc_encrypt, |
187 | .decrypt = p8_aes_cbc_decrypt, | 196 | .decrypt = p8_aes_cbc_decrypt, |
188 | }, | 197 | }, |
189 | }; | 198 | }; |
190 | |||
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 96dbee4bf4a6..7adae42a7b79 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c | |||
@@ -30,138 +30,147 @@ | |||
30 | #include "aesp8-ppc.h" | 30 | #include "aesp8-ppc.h" |
31 | 31 | ||
32 | struct p8_aes_ctr_ctx { | 32 | struct p8_aes_ctr_ctx { |
33 | struct crypto_blkcipher *fallback; | 33 | struct crypto_blkcipher *fallback; |
34 | struct aes_key enc_key; | 34 | struct aes_key enc_key; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static int p8_aes_ctr_init(struct crypto_tfm *tfm) | 37 | static int p8_aes_ctr_init(struct crypto_tfm *tfm) |
38 | { | 38 | { |
39 | const char *alg; | 39 | const char *alg; |
40 | struct crypto_blkcipher *fallback; | 40 | struct crypto_blkcipher *fallback; |
41 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | 41 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
42 | 42 | ||
43 | if (!(alg = crypto_tfm_alg_name(tfm))) { | 43 | if (!(alg = crypto_tfm_alg_name(tfm))) { |
44 | printk(KERN_ERR "Failed to get algorithm name.\n"); | 44 | printk(KERN_ERR "Failed to get algorithm name.\n"); |
45 | return -ENOENT; | 45 | return -ENOENT; |
46 | } | 46 | } |
47 | 47 | ||
48 | fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); | 48 | fallback = |
49 | if (IS_ERR(fallback)) { | 49 | crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); |
50 | printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", | 50 | if (IS_ERR(fallback)) { |
51 | alg, PTR_ERR(fallback)); | 51 | printk(KERN_ERR |
52 | return PTR_ERR(fallback); | 52 | "Failed to allocate transformation for '%s': %ld\n", |
53 | } | 53 | alg, PTR_ERR(fallback)); |
54 | printk(KERN_INFO "Using '%s' as fallback implementation.\n", | 54 | return PTR_ERR(fallback); |
55 | crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); | 55 | } |
56 | 56 | printk(KERN_INFO "Using '%s' as fallback implementation.\n", | |
57 | crypto_blkcipher_set_flags(fallback, | 57 | crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); |
58 | crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm)); | 58 | |
59 | ctx->fallback = fallback; | 59 | crypto_blkcipher_set_flags( |
60 | 60 | fallback, | |
61 | return 0; | 61 | crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); |
62 | ctx->fallback = fallback; | ||
63 | |||
64 | return 0; | ||
62 | } | 65 | } |
63 | 66 | ||
64 | static void p8_aes_ctr_exit(struct crypto_tfm *tfm) | 67 | static void p8_aes_ctr_exit(struct crypto_tfm *tfm) |
65 | { | 68 | { |
66 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | 69 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
67 | 70 | ||
68 | if (ctx->fallback) { | 71 | if (ctx->fallback) { |
69 | crypto_free_blkcipher(ctx->fallback); | 72 | crypto_free_blkcipher(ctx->fallback); |
70 | ctx->fallback = NULL; | 73 | ctx->fallback = NULL; |
71 | } | 74 | } |
72 | } | 75 | } |
73 | 76 | ||
74 | static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, | 77 | static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, |
75 | unsigned int keylen) | 78 | unsigned int keylen) |
76 | { | 79 | { |
77 | int ret; | 80 | int ret; |
78 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | 81 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
79 | 82 | ||
80 | pagefault_disable(); | 83 | pagefault_disable(); |
81 | enable_kernel_altivec(); | 84 | enable_kernel_altivec(); |
82 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); | 85 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); |
83 | pagefault_enable(); | 86 | pagefault_enable(); |
84 | 87 | ||
85 | ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); | 88 | ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); |
86 | return ret; | 89 | return ret; |
87 | } | 90 | } |
88 | 91 | ||
89 | static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, | 92 | static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, |
90 | struct blkcipher_walk *walk) | 93 | struct blkcipher_walk *walk) |
91 | { | 94 | { |
92 | u8 *ctrblk = walk->iv; | 95 | u8 *ctrblk = walk->iv; |
93 | u8 keystream[AES_BLOCK_SIZE]; | 96 | u8 keystream[AES_BLOCK_SIZE]; |
94 | u8 *src = walk->src.virt.addr; | 97 | u8 *src = walk->src.virt.addr; |
95 | u8 *dst = walk->dst.virt.addr; | 98 | u8 *dst = walk->dst.virt.addr; |
96 | unsigned int nbytes = walk->nbytes; | 99 | unsigned int nbytes = walk->nbytes; |
97 | 100 | ||
98 | pagefault_disable(); | 101 | pagefault_disable(); |
99 | enable_kernel_altivec(); | 102 | enable_kernel_altivec(); |
100 | aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); | 103 | aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); |
101 | pagefault_enable(); | 104 | pagefault_enable(); |
102 | 105 | ||
103 | crypto_xor(keystream, src, nbytes); | 106 | crypto_xor(keystream, src, nbytes); |
104 | memcpy(dst, keystream, nbytes); | 107 | memcpy(dst, keystream, nbytes); |
105 | crypto_inc(ctrblk, AES_BLOCK_SIZE); | 108 | crypto_inc(ctrblk, AES_BLOCK_SIZE); |
106 | } | 109 | } |
107 | 110 | ||
108 | static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, | 111 | static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, |
109 | struct scatterlist *dst, struct scatterlist *src, | 112 | struct scatterlist *dst, |
110 | unsigned int nbytes) | 113 | struct scatterlist *src, unsigned int nbytes) |
111 | { | 114 | { |
112 | int ret; | 115 | int ret; |
113 | struct blkcipher_walk walk; | 116 | struct blkcipher_walk walk; |
114 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx( | 117 | struct p8_aes_ctr_ctx *ctx = |
115 | crypto_blkcipher_tfm(desc->tfm)); | 118 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); |
116 | struct blkcipher_desc fallback_desc = { | 119 | struct blkcipher_desc fallback_desc = { |
117 | .tfm = ctx->fallback, | 120 | .tfm = ctx->fallback, |
118 | .info = desc->info, | 121 | .info = desc->info, |
119 | .flags = desc->flags | 122 | .flags = desc->flags |
120 | }; | 123 | }; |
121 | 124 | ||
122 | if (in_interrupt()) { | 125 | if (in_interrupt()) { |
123 | ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); | 126 | ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, |
124 | } else { | 127 | nbytes); |
125 | blkcipher_walk_init(&walk, dst, src, nbytes); | 128 | } else { |
126 | ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); | 129 | blkcipher_walk_init(&walk, dst, src, nbytes); |
127 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { | 130 | ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); |
128 | pagefault_disable(); | 131 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { |
129 | enable_kernel_altivec(); | 132 | pagefault_disable(); |
130 | aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, | 133 | enable_kernel_altivec(); |
131 | (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv); | 134 | aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, |
132 | pagefault_enable(); | 135 | walk.dst.virt.addr, |
133 | 136 | (nbytes & | |
134 | crypto_inc(walk.iv, AES_BLOCK_SIZE); | 137 | AES_BLOCK_MASK) / |
135 | nbytes &= AES_BLOCK_SIZE - 1; | 138 | AES_BLOCK_SIZE, |
136 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 139 | &ctx->enc_key, |
137 | } | 140 | walk.iv); |
138 | if (walk.nbytes) { | 141 | pagefault_enable(); |
139 | p8_aes_ctr_final(ctx, &walk); | 142 | |
140 | ret = blkcipher_walk_done(desc, &walk, 0); | 143 | crypto_inc(walk.iv, AES_BLOCK_SIZE); |
141 | } | 144 | nbytes &= AES_BLOCK_SIZE - 1; |
142 | } | 145 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
143 | 146 | } | |
144 | return ret; | 147 | if (walk.nbytes) { |
148 | p8_aes_ctr_final(ctx, &walk); | ||
149 | ret = blkcipher_walk_done(desc, &walk, 0); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | return ret; | ||
145 | } | 154 | } |
146 | 155 | ||
147 | struct crypto_alg p8_aes_ctr_alg = { | 156 | struct crypto_alg p8_aes_ctr_alg = { |
148 | .cra_name = "ctr(aes)", | 157 | .cra_name = "ctr(aes)", |
149 | .cra_driver_name = "p8_aes_ctr", | 158 | .cra_driver_name = "p8_aes_ctr", |
150 | .cra_module = THIS_MODULE, | 159 | .cra_module = THIS_MODULE, |
151 | .cra_priority = 1000, | 160 | .cra_priority = 1000, |
152 | .cra_type = &crypto_blkcipher_type, | 161 | .cra_type = &crypto_blkcipher_type, |
153 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, | 162 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, |
154 | .cra_alignmask = 0, | 163 | .cra_alignmask = 0, |
155 | .cra_blocksize = 1, | 164 | .cra_blocksize = 1, |
156 | .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), | 165 | .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), |
157 | .cra_init = p8_aes_ctr_init, | 166 | .cra_init = p8_aes_ctr_init, |
158 | .cra_exit = p8_aes_ctr_exit, | 167 | .cra_exit = p8_aes_ctr_exit, |
159 | .cra_blkcipher = { | 168 | .cra_blkcipher = { |
160 | .ivsize = 0, | 169 | .ivsize = 0, |
161 | .min_keysize = AES_MIN_KEY_SIZE, | 170 | .min_keysize = AES_MIN_KEY_SIZE, |
162 | .max_keysize = AES_MAX_KEY_SIZE, | 171 | .max_keysize = AES_MAX_KEY_SIZE, |
163 | .setkey = p8_aes_ctr_setkey, | 172 | .setkey = p8_aes_ctr_setkey, |
164 | .encrypt = p8_aes_ctr_crypt, | 173 | .encrypt = p8_aes_ctr_crypt, |
165 | .decrypt = p8_aes_ctr_crypt, | 174 | .decrypt = p8_aes_ctr_crypt, |
166 | }, | 175 | }, |
167 | }; | 176 | }; |
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h index e963945a83e1..4cd34ee54a94 100644 --- a/drivers/crypto/vmx/aesp8-ppc.h +++ b/drivers/crypto/vmx/aesp8-ppc.h | |||
@@ -4,17 +4,18 @@ | |||
4 | #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) | 4 | #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) |
5 | 5 | ||
6 | struct aes_key { | 6 | struct aes_key { |
7 | u8 key[AES_MAX_KEYLENGTH]; | 7 | u8 key[AES_MAX_KEYLENGTH]; |
8 | int rounds; | 8 | int rounds; |
9 | }; | 9 | }; |
10 | 10 | ||
11 | int aes_p8_set_encrypt_key(const u8 *userKey, const int bits, | 11 | int aes_p8_set_encrypt_key(const u8 *userKey, const int bits, |
12 | struct aes_key *key); | 12 | struct aes_key *key); |
13 | int aes_p8_set_decrypt_key(const u8 *userKey, const int bits, | 13 | int aes_p8_set_decrypt_key(const u8 *userKey, const int bits, |
14 | struct aes_key *key); | 14 | struct aes_key *key); |
15 | void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key); | 15 | void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key); |
16 | void aes_p8_decrypt(const u8 *in, u8 *out,const struct aes_key *key); | 16 | void aes_p8_decrypt(const u8 *in, u8 *out, const struct aes_key *key); |
17 | void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len, | 17 | void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len, |
18 | const struct aes_key *key, u8 *iv, const int enc); | 18 | const struct aes_key *key, u8 *iv, const int enc); |
19 | void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out, | 19 | void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out, |
20 | size_t len, const struct aes_key *key, const u8 *iv); | 20 | size_t len, const struct aes_key *key, |
21 | const u8 *iv); | ||
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index f255ec4a04d4..b5e29002b666 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c | |||
@@ -39,184 +39,188 @@ | |||
39 | void gcm_init_p8(u128 htable[16], const u64 Xi[2]); | 39 | void gcm_init_p8(u128 htable[16], const u64 Xi[2]); |
40 | void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); | 40 | void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); |
41 | void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], | 41 | void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], |
42 | const u8 *in,size_t len); | 42 | const u8 *in, size_t len); |
43 | 43 | ||
44 | struct p8_ghash_ctx { | 44 | struct p8_ghash_ctx { |
45 | u128 htable[16]; | 45 | u128 htable[16]; |
46 | struct crypto_shash *fallback; | 46 | struct crypto_shash *fallback; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | struct p8_ghash_desc_ctx { | 49 | struct p8_ghash_desc_ctx { |
50 | u64 shash[2]; | 50 | u64 shash[2]; |
51 | u8 buffer[GHASH_DIGEST_SIZE]; | 51 | u8 buffer[GHASH_DIGEST_SIZE]; |
52 | int bytes; | 52 | int bytes; |
53 | struct shash_desc fallback_desc; | 53 | struct shash_desc fallback_desc; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | static int p8_ghash_init_tfm(struct crypto_tfm *tfm) | 56 | static int p8_ghash_init_tfm(struct crypto_tfm *tfm) |
57 | { | 57 | { |
58 | const char *alg; | 58 | const char *alg; |
59 | struct crypto_shash *fallback; | 59 | struct crypto_shash *fallback; |
60 | struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); | 60 | struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); |
61 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); | 61 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); |
62 | 62 | ||
63 | if (!(alg = crypto_tfm_alg_name(tfm))) { | 63 | if (!(alg = crypto_tfm_alg_name(tfm))) { |
64 | printk(KERN_ERR "Failed to get algorithm name.\n"); | 64 | printk(KERN_ERR "Failed to get algorithm name.\n"); |
65 | return -ENOENT; | 65 | return -ENOENT; |
66 | } | 66 | } |
67 | 67 | ||
68 | fallback = crypto_alloc_shash(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); | 68 | fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); |
69 | if (IS_ERR(fallback)) { | 69 | if (IS_ERR(fallback)) { |
70 | printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", | 70 | printk(KERN_ERR |
71 | alg, PTR_ERR(fallback)); | 71 | "Failed to allocate transformation for '%s': %ld\n", |
72 | return PTR_ERR(fallback); | 72 | alg, PTR_ERR(fallback)); |
73 | } | 73 | return PTR_ERR(fallback); |
74 | printk(KERN_INFO "Using '%s' as fallback implementation.\n", | 74 | } |
75 | crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); | 75 | printk(KERN_INFO "Using '%s' as fallback implementation.\n", |
76 | 76 | crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); | |
77 | crypto_shash_set_flags(fallback, | 77 | |
78 | crypto_shash_get_flags((struct crypto_shash *) tfm)); | 78 | crypto_shash_set_flags(fallback, |
79 | ctx->fallback = fallback; | 79 | crypto_shash_get_flags((struct crypto_shash |
80 | 80 | *) tfm)); | |
81 | shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) | 81 | ctx->fallback = fallback; |
82 | + crypto_shash_descsize(fallback); | 82 | |
83 | 83 | shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) | |
84 | return 0; | 84 | + crypto_shash_descsize(fallback); |
85 | |||
86 | return 0; | ||
85 | } | 87 | } |
86 | 88 | ||
87 | static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) | 89 | static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) |
88 | { | 90 | { |
89 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); | 91 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); |
90 | 92 | ||
91 | if (ctx->fallback) { | 93 | if (ctx->fallback) { |
92 | crypto_free_shash(ctx->fallback); | 94 | crypto_free_shash(ctx->fallback); |
93 | ctx->fallback = NULL; | 95 | ctx->fallback = NULL; |
94 | } | 96 | } |
95 | } | 97 | } |
96 | 98 | ||
97 | static int p8_ghash_init(struct shash_desc *desc) | 99 | static int p8_ghash_init(struct shash_desc *desc) |
98 | { | 100 | { |
99 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); | 101 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); |
100 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); | 102 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
101 | 103 | ||
102 | dctx->bytes = 0; | 104 | dctx->bytes = 0; |
103 | memset(dctx->shash, 0, GHASH_DIGEST_SIZE); | 105 | memset(dctx->shash, 0, GHASH_DIGEST_SIZE); |
104 | dctx->fallback_desc.tfm = ctx->fallback; | 106 | dctx->fallback_desc.tfm = ctx->fallback; |
105 | dctx->fallback_desc.flags = desc->flags; | 107 | dctx->fallback_desc.flags = desc->flags; |
106 | return crypto_shash_init(&dctx->fallback_desc); | 108 | return crypto_shash_init(&dctx->fallback_desc); |
107 | } | 109 | } |
108 | 110 | ||
109 | static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, | 111 | static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, |
110 | unsigned int keylen) | 112 | unsigned int keylen) |
111 | { | 113 | { |
112 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); | 114 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); |
113 | 115 | ||
114 | if (keylen != GHASH_KEY_LEN) | 116 | if (keylen != GHASH_KEY_LEN) |
115 | return -EINVAL; | 117 | return -EINVAL; |
116 | 118 | ||
117 | preempt_disable(); | 119 | preempt_disable(); |
118 | pagefault_disable(); | 120 | pagefault_disable(); |
119 | enable_kernel_altivec(); | 121 | enable_kernel_altivec(); |
120 | enable_kernel_fp(); | 122 | enable_kernel_fp(); |
121 | gcm_init_p8(ctx->htable, (const u64 *) key); | 123 | gcm_init_p8(ctx->htable, (const u64 *) key); |
122 | pagefault_enable(); | 124 | pagefault_enable(); |
123 | preempt_enable(); | 125 | preempt_enable(); |
124 | return crypto_shash_setkey(ctx->fallback, key, keylen); | 126 | return crypto_shash_setkey(ctx->fallback, key, keylen); |
125 | } | 127 | } |
126 | 128 | ||
127 | static int p8_ghash_update(struct shash_desc *desc, | 129 | static int p8_ghash_update(struct shash_desc *desc, |
128 | const u8 *src, unsigned int srclen) | 130 | const u8 *src, unsigned int srclen) |
129 | { | 131 | { |
130 | unsigned int len; | 132 | unsigned int len; |
131 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); | 133 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); |
132 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); | 134 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
133 | 135 | ||
134 | if (IN_INTERRUPT) { | 136 | if (IN_INTERRUPT) { |
135 | return crypto_shash_update(&dctx->fallback_desc, src, srclen); | 137 | return crypto_shash_update(&dctx->fallback_desc, src, |
136 | } else { | 138 | srclen); |
137 | if (dctx->bytes) { | 139 | } else { |
138 | if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { | 140 | if (dctx->bytes) { |
139 | memcpy(dctx->buffer + dctx->bytes, src, srclen); | 141 | if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { |
140 | dctx->bytes += srclen; | 142 | memcpy(dctx->buffer + dctx->bytes, src, |
141 | return 0; | 143 | srclen); |
142 | } | 144 | dctx->bytes += srclen; |
143 | memcpy(dctx->buffer + dctx->bytes, src, | 145 | return 0; |
144 | GHASH_DIGEST_SIZE - dctx->bytes); | 146 | } |
145 | preempt_disable(); | 147 | memcpy(dctx->buffer + dctx->bytes, src, |
146 | pagefault_disable(); | 148 | GHASH_DIGEST_SIZE - dctx->bytes); |
147 | enable_kernel_altivec(); | 149 | preempt_disable(); |
148 | enable_kernel_fp(); | 150 | pagefault_disable(); |
149 | gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, | 151 | enable_kernel_altivec(); |
150 | GHASH_DIGEST_SIZE); | 152 | enable_kernel_fp(); |
151 | pagefault_enable(); | 153 | gcm_ghash_p8(dctx->shash, ctx->htable, |
152 | preempt_enable(); | 154 | dctx->buffer, GHASH_DIGEST_SIZE); |
153 | src += GHASH_DIGEST_SIZE - dctx->bytes; | 155 | pagefault_enable(); |
154 | srclen -= GHASH_DIGEST_SIZE - dctx->bytes; | 156 | preempt_enable(); |
155 | dctx->bytes = 0; | 157 | src += GHASH_DIGEST_SIZE - dctx->bytes; |
156 | } | 158 | srclen -= GHASH_DIGEST_SIZE - dctx->bytes; |
157 | len = srclen & ~(GHASH_DIGEST_SIZE - 1); | 159 | dctx->bytes = 0; |
158 | if (len) { | 160 | } |
159 | preempt_disable(); | 161 | len = srclen & ~(GHASH_DIGEST_SIZE - 1); |
160 | pagefault_disable(); | 162 | if (len) { |
161 | enable_kernel_altivec(); | 163 | preempt_disable(); |
162 | enable_kernel_fp(); | 164 | pagefault_disable(); |
163 | gcm_ghash_p8(dctx->shash, ctx->htable, src, len); | 165 | enable_kernel_altivec(); |
164 | pagefault_enable(); | 166 | enable_kernel_fp(); |
165 | preempt_enable(); | 167 | gcm_ghash_p8(dctx->shash, ctx->htable, src, len); |
166 | src += len; | 168 | pagefault_enable(); |
167 | srclen -= len; | 169 | preempt_enable(); |
168 | } | 170 | src += len; |
169 | if (srclen) { | 171 | srclen -= len; |
170 | memcpy(dctx->buffer, src, srclen); | 172 | } |
171 | dctx->bytes = srclen; | 173 | if (srclen) { |
172 | } | 174 | memcpy(dctx->buffer, src, srclen); |
173 | return 0; | 175 | dctx->bytes = srclen; |
174 | } | 176 | } |
177 | return 0; | ||
178 | } | ||
175 | } | 179 | } |
176 | 180 | ||
177 | static int p8_ghash_final(struct shash_desc *desc, u8 *out) | 181 | static int p8_ghash_final(struct shash_desc *desc, u8 *out) |
178 | { | 182 | { |
179 | int i; | 183 | int i; |
180 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); | 184 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); |
181 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); | 185 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
182 | 186 | ||
183 | if (IN_INTERRUPT) { | 187 | if (IN_INTERRUPT) { |
184 | return crypto_shash_final(&dctx->fallback_desc, out); | 188 | return crypto_shash_final(&dctx->fallback_desc, out); |
185 | } else { | 189 | } else { |
186 | if (dctx->bytes) { | 190 | if (dctx->bytes) { |
187 | for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) | 191 | for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) |
188 | dctx->buffer[i] = 0; | 192 | dctx->buffer[i] = 0; |
189 | preempt_disable(); | 193 | preempt_disable(); |
190 | pagefault_disable(); | 194 | pagefault_disable(); |
191 | enable_kernel_altivec(); | 195 | enable_kernel_altivec(); |
192 | enable_kernel_fp(); | 196 | enable_kernel_fp(); |
193 | gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, | 197 | gcm_ghash_p8(dctx->shash, ctx->htable, |
194 | GHASH_DIGEST_SIZE); | 198 | dctx->buffer, GHASH_DIGEST_SIZE); |
195 | pagefault_enable(); | 199 | pagefault_enable(); |
196 | preempt_enable(); | 200 | preempt_enable(); |
197 | dctx->bytes = 0; | 201 | dctx->bytes = 0; |
198 | } | 202 | } |
199 | memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); | 203 | memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); |
200 | return 0; | 204 | return 0; |
201 | } | 205 | } |
202 | } | 206 | } |
203 | 207 | ||
204 | struct shash_alg p8_ghash_alg = { | 208 | struct shash_alg p8_ghash_alg = { |
205 | .digestsize = GHASH_DIGEST_SIZE, | 209 | .digestsize = GHASH_DIGEST_SIZE, |
206 | .init = p8_ghash_init, | 210 | .init = p8_ghash_init, |
207 | .update = p8_ghash_update, | 211 | .update = p8_ghash_update, |
208 | .final = p8_ghash_final, | 212 | .final = p8_ghash_final, |
209 | .setkey = p8_ghash_setkey, | 213 | .setkey = p8_ghash_setkey, |
210 | .descsize = sizeof(struct p8_ghash_desc_ctx), | 214 | .descsize = sizeof(struct p8_ghash_desc_ctx), |
211 | .base = { | 215 | .base = { |
212 | .cra_name = "ghash", | 216 | .cra_name = "ghash", |
213 | .cra_driver_name = "p8_ghash", | 217 | .cra_driver_name = "p8_ghash", |
214 | .cra_priority = 1000, | 218 | .cra_priority = 1000, |
215 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK, | 219 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK, |
216 | .cra_blocksize = GHASH_BLOCK_SIZE, | 220 | .cra_blocksize = GHASH_BLOCK_SIZE, |
217 | .cra_ctxsize = sizeof(struct p8_ghash_ctx), | 221 | .cra_ctxsize = sizeof(struct p8_ghash_ctx), |
218 | .cra_module = THIS_MODULE, | 222 | .cra_module = THIS_MODULE, |
219 | .cra_init = p8_ghash_init_tfm, | 223 | .cra_init = p8_ghash_init_tfm, |
220 | .cra_exit = p8_ghash_exit_tfm, | 224 | .cra_exit = p8_ghash_exit_tfm, |
221 | }, | 225 | }, |
222 | }; | 226 | }; |
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c index 44d8d5cfe40d..e163d5770438 100644 --- a/drivers/crypto/vmx/vmx.c +++ b/drivers/crypto/vmx/vmx.c | |||
@@ -32,57 +32,57 @@ extern struct crypto_alg p8_aes_alg; | |||
32 | extern struct crypto_alg p8_aes_cbc_alg; | 32 | extern struct crypto_alg p8_aes_cbc_alg; |
33 | extern struct crypto_alg p8_aes_ctr_alg; | 33 | extern struct crypto_alg p8_aes_ctr_alg; |
34 | static struct crypto_alg *algs[] = { | 34 | static struct crypto_alg *algs[] = { |
35 | &p8_aes_alg, | 35 | &p8_aes_alg, |
36 | &p8_aes_cbc_alg, | 36 | &p8_aes_cbc_alg, |
37 | &p8_aes_ctr_alg, | 37 | &p8_aes_ctr_alg, |
38 | NULL, | 38 | NULL, |
39 | }; | 39 | }; |
40 | 40 | ||
41 | int __init p8_init(void) | 41 | int __init p8_init(void) |
42 | { | 42 | { |
43 | int ret = 0; | 43 | int ret = 0; |
44 | struct crypto_alg **alg_it; | 44 | struct crypto_alg **alg_it; |
45 | 45 | ||
46 | if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) | 46 | if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) |
47 | return -ENODEV; | 47 | return -ENODEV; |
48 | 48 | ||
49 | for (alg_it = algs; *alg_it; alg_it++) { | 49 | for (alg_it = algs; *alg_it; alg_it++) { |
50 | ret = crypto_register_alg(*alg_it); | 50 | ret = crypto_register_alg(*alg_it); |
51 | printk(KERN_INFO "crypto_register_alg '%s' = %d\n", | 51 | printk(KERN_INFO "crypto_register_alg '%s' = %d\n", |
52 | (*alg_it)->cra_name, ret); | 52 | (*alg_it)->cra_name, ret); |
53 | if (ret) { | 53 | if (ret) { |
54 | for (alg_it--; alg_it >= algs; alg_it--) | 54 | for (alg_it--; alg_it >= algs; alg_it--) |
55 | crypto_unregister_alg(*alg_it); | 55 | crypto_unregister_alg(*alg_it); |
56 | break; | 56 | break; |
57 | } | 57 | } |
58 | } | 58 | } |
59 | if (ret) | 59 | if (ret) |
60 | return ret; | 60 | return ret; |
61 | 61 | ||
62 | ret = crypto_register_shash(&p8_ghash_alg); | 62 | ret = crypto_register_shash(&p8_ghash_alg); |
63 | if (ret) { | 63 | if (ret) { |
64 | for (alg_it = algs; *alg_it; alg_it++) | 64 | for (alg_it = algs; *alg_it; alg_it++) |
65 | crypto_unregister_alg(*alg_it); | 65 | crypto_unregister_alg(*alg_it); |
66 | } | 66 | } |
67 | return ret; | 67 | return ret; |
68 | } | 68 | } |
69 | 69 | ||
70 | void __exit p8_exit(void) | 70 | void __exit p8_exit(void) |
71 | { | 71 | { |
72 | struct crypto_alg **alg_it; | 72 | struct crypto_alg **alg_it; |
73 | 73 | ||
74 | for (alg_it = algs; *alg_it; alg_it++) { | 74 | for (alg_it = algs; *alg_it; alg_it++) { |
75 | printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); | 75 | printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); |
76 | crypto_unregister_alg(*alg_it); | 76 | crypto_unregister_alg(*alg_it); |
77 | } | 77 | } |
78 | crypto_unregister_shash(&p8_ghash_alg); | 78 | crypto_unregister_shash(&p8_ghash_alg); |
79 | } | 79 | } |
80 | 80 | ||
81 | module_init(p8_init); | 81 | module_init(p8_init); |
82 | module_exit(p8_exit); | 82 | module_exit(p8_exit); |
83 | 83 | ||
84 | MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>"); | 84 | MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>"); |
85 | MODULE_DESCRIPTION("IBM VMX cryptogaphic acceleration instructions support on Power 8"); | 85 | MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions " |
86 | "support on Power 8"); | ||
86 | MODULE_LICENSE("GPL"); | 87 | MODULE_LICENSE("GPL"); |
87 | MODULE_VERSION("1.0.0"); | 88 | MODULE_VERSION("1.0.0"); |
88 | |||
diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 94b19be67574..7169ad04acc0 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h | |||
@@ -18,6 +18,65 @@ | |||
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | 19 | ||
20 | /** | 20 | /** |
21 | * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API | ||
22 | * | ||
23 | * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD | ||
24 | * (listed as type "aead" in /proc/crypto) | ||
25 | * | ||
26 | * The most prominent examples for this type of encryption is GCM and CCM. | ||
27 | * However, the kernel supports other types of AEAD ciphers which are defined | ||
28 | * with the following cipher string: | ||
29 | * | ||
30 | * authenc(keyed message digest, block cipher) | ||
31 | * | ||
32 | * For example: authenc(hmac(sha256), cbc(aes)) | ||
33 | * | ||
34 | * The example code provided for the asynchronous block cipher operation | ||
35 | * applies here as well. Naturally all *ablkcipher* symbols must be exchanged | ||
36 | * the *aead* pendants discussed in the following. In addition, for the AEAD | ||
37 | * operation, the aead_request_set_assoc function must be used to set the | ||
38 | * pointer to the associated data memory location before performing the | ||
39 | * encryption or decryption operation. In case of an encryption, the associated | ||
40 | * data memory is filled during the encryption operation. For decryption, the | ||
41 | * associated data memory must contain data that is used to verify the integrity | ||
42 | * of the decrypted data. Another deviation from the asynchronous block cipher | ||
43 | * operation is that the caller should explicitly check for -EBADMSG of the | ||
44 | * crypto_aead_decrypt. That error indicates an authentication error, i.e. | ||
45 | * a breach in the integrity of the message. In essence, that -EBADMSG error | ||
46 | * code is the key bonus an AEAD cipher has over "standard" block chaining | ||
47 | * modes. | ||
48 | */ | ||
49 | |||
50 | /** | ||
51 | * struct aead_request - AEAD request | ||
52 | * @base: Common attributes for async crypto requests | ||
53 | * @old: Boolean whether the old or new AEAD API is used | ||
54 | * @assoclen: Length in bytes of associated data for authentication | ||
55 | * @cryptlen: Length of data to be encrypted or decrypted | ||
56 | * @iv: Initialisation vector | ||
57 | * @assoc: Associated data | ||
58 | * @src: Source data | ||
59 | * @dst: Destination data | ||
60 | * @__ctx: Start of private context data | ||
61 | */ | ||
62 | struct aead_request { | ||
63 | struct crypto_async_request base; | ||
64 | |||
65 | bool old; | ||
66 | |||
67 | unsigned int assoclen; | ||
68 | unsigned int cryptlen; | ||
69 | |||
70 | u8 *iv; | ||
71 | |||
72 | struct scatterlist *assoc; | ||
73 | struct scatterlist *src; | ||
74 | struct scatterlist *dst; | ||
75 | |||
76 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | ||
77 | }; | ||
78 | |||
79 | /** | ||
21 | * struct aead_givcrypt_request - AEAD request with IV generation | 80 | * struct aead_givcrypt_request - AEAD request with IV generation |
22 | * @seq: Sequence number for IV generation | 81 | * @seq: Sequence number for IV generation |
23 | * @giv: Space for generated IV | 82 | * @giv: Space for generated IV |
@@ -30,6 +89,474 @@ struct aead_givcrypt_request { | |||
30 | struct aead_request areq; | 89 | struct aead_request areq; |
31 | }; | 90 | }; |
32 | 91 | ||
92 | /** | ||
93 | * struct aead_alg - AEAD cipher definition | ||
94 | * @maxauthsize: Set the maximum authentication tag size supported by the | ||
95 | * transformation. A transformation may support smaller tag sizes. | ||
96 | * As the authentication tag is a message digest to ensure the | ||
97 | * integrity of the encrypted data, a consumer typically wants the | ||
98 | * largest authentication tag possible as defined by this | ||
99 | * variable. | ||
100 | * @setauthsize: Set authentication size for the AEAD transformation. This | ||
101 | * function is used to specify the consumer requested size of the | ||
102 | * authentication tag to be either generated by the transformation | ||
103 | * during encryption or the size of the authentication tag to be | ||
104 | * supplied during the decryption operation. This function is also | ||
105 | * responsible for checking the authentication tag size for | ||
106 | * validity. | ||
107 | * @setkey: see struct ablkcipher_alg | ||
108 | * @encrypt: see struct ablkcipher_alg | ||
109 | * @decrypt: see struct ablkcipher_alg | ||
110 | * @geniv: see struct ablkcipher_alg | ||
111 | * @ivsize: see struct ablkcipher_alg | ||
112 | * @init: Initialize the cryptographic transformation object. This function | ||
113 | * is used to initialize the cryptographic transformation object. | ||
114 | * This function is called only once at the instantiation time, right | ||
115 | * after the transformation context was allocated. In case the | ||
116 | * cryptographic hardware has some special requirements which need to | ||
117 | * be handled by software, this function shall check for the precise | ||
118 | * requirement of the transformation and put any software fallbacks | ||
119 | * in place. | ||
120 | * @exit: Deinitialize the cryptographic transformation object. This is a | ||
121 | * counterpart to @init, used to remove various changes set in | ||
122 | * @init. | ||
123 | * | ||
124 | * All fields except @ivsize is mandatory and must be filled. | ||
125 | */ | ||
126 | struct aead_alg { | ||
127 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | ||
128 | unsigned int keylen); | ||
129 | int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); | ||
130 | int (*encrypt)(struct aead_request *req); | ||
131 | int (*decrypt)(struct aead_request *req); | ||
132 | int (*init)(struct crypto_aead *tfm); | ||
133 | void (*exit)(struct crypto_aead *tfm); | ||
134 | |||
135 | const char *geniv; | ||
136 | |||
137 | unsigned int ivsize; | ||
138 | unsigned int maxauthsize; | ||
139 | |||
140 | struct crypto_alg base; | ||
141 | }; | ||
142 | |||
143 | struct crypto_aead { | ||
144 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | ||
145 | unsigned int keylen); | ||
146 | int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); | ||
147 | int (*encrypt)(struct aead_request *req); | ||
148 | int (*decrypt)(struct aead_request *req); | ||
149 | int (*givencrypt)(struct aead_givcrypt_request *req); | ||
150 | int (*givdecrypt)(struct aead_givcrypt_request *req); | ||
151 | |||
152 | struct crypto_aead *child; | ||
153 | |||
154 | unsigned int authsize; | ||
155 | unsigned int reqsize; | ||
156 | |||
157 | struct crypto_tfm base; | ||
158 | }; | ||
159 | |||
160 | static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) | ||
161 | { | ||
162 | return container_of(tfm, struct crypto_aead, base); | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * crypto_alloc_aead() - allocate AEAD cipher handle | ||
167 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
168 | * AEAD cipher | ||
169 | * @type: specifies the type of the cipher | ||
170 | * @mask: specifies the mask for the cipher | ||
171 | * | ||
172 | * Allocate a cipher handle for an AEAD. The returned struct | ||
173 | * crypto_aead is the cipher handle that is required for any subsequent | ||
174 | * API invocation for that AEAD. | ||
175 | * | ||
176 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
177 | * of an error, PTR_ERR() returns the error code. | ||
178 | */ | ||
179 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); | ||
180 | |||
181 | static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) | ||
182 | { | ||
183 | return &tfm->base; | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * crypto_free_aead() - zeroize and free aead handle | ||
188 | * @tfm: cipher handle to be freed | ||
189 | */ | ||
190 | static inline void crypto_free_aead(struct crypto_aead *tfm) | ||
191 | { | ||
192 | crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); | ||
193 | } | ||
194 | |||
195 | static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm) | ||
196 | { | ||
197 | return tfm; | ||
198 | } | ||
199 | |||
200 | static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm) | ||
201 | { | ||
202 | return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead; | ||
203 | } | ||
204 | |||
205 | static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) | ||
206 | { | ||
207 | return container_of(crypto_aead_tfm(tfm)->__crt_alg, | ||
208 | struct aead_alg, base); | ||
209 | } | ||
210 | |||
211 | static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg) | ||
212 | { | ||
213 | return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize : | ||
214 | alg->ivsize; | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * crypto_aead_ivsize() - obtain IV size | ||
219 | * @tfm: cipher handle | ||
220 | * | ||
221 | * The size of the IV for the aead referenced by the cipher handle is | ||
222 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
223 | * | ||
224 | * Return: IV size in bytes | ||
225 | */ | ||
226 | static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) | ||
227 | { | ||
228 | return crypto_aead_alg_ivsize(crypto_aead_alg(tfm)); | ||
229 | } | ||
230 | |||
231 | /** | ||
232 | * crypto_aead_authsize() - obtain maximum authentication data size | ||
233 | * @tfm: cipher handle | ||
234 | * | ||
235 | * The maximum size of the authentication data for the AEAD cipher referenced | ||
236 | * by the AEAD cipher handle is returned. The authentication data size may be | ||
237 | * zero if the cipher implements a hard-coded maximum. | ||
238 | * | ||
239 | * The authentication data may also be known as "tag value". | ||
240 | * | ||
241 | * Return: authentication data size / tag size in bytes | ||
242 | */ | ||
243 | static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) | ||
244 | { | ||
245 | return tfm->authsize; | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * crypto_aead_blocksize() - obtain block size of cipher | ||
250 | * @tfm: cipher handle | ||
251 | * | ||
252 | * The block size for the AEAD referenced with the cipher handle is returned. | ||
253 | * The caller may use that information to allocate appropriate memory for the | ||
254 | * data returned by the encryption or decryption operation | ||
255 | * | ||
256 | * Return: block size of cipher | ||
257 | */ | ||
258 | static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) | ||
259 | { | ||
260 | return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); | ||
261 | } | ||
262 | |||
263 | static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) | ||
264 | { | ||
265 | return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); | ||
266 | } | ||
267 | |||
268 | static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm) | ||
269 | { | ||
270 | return crypto_tfm_get_flags(crypto_aead_tfm(tfm)); | ||
271 | } | ||
272 | |||
273 | static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags) | ||
274 | { | ||
275 | crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags); | ||
276 | } | ||
277 | |||
278 | static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) | ||
279 | { | ||
280 | crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * crypto_aead_setkey() - set key for cipher | ||
285 | * @tfm: cipher handle | ||
286 | * @key: buffer holding the key | ||
287 | * @keylen: length of the key in bytes | ||
288 | * | ||
289 | * The caller provided key is set for the AEAD referenced by the cipher | ||
290 | * handle. | ||
291 | * | ||
292 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
293 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
294 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
295 | * is performed. | ||
296 | * | ||
297 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
298 | */ | ||
299 | int crypto_aead_setkey(struct crypto_aead *tfm, | ||
300 | const u8 *key, unsigned int keylen); | ||
301 | |||
302 | /** | ||
303 | * crypto_aead_setauthsize() - set authentication data size | ||
304 | * @tfm: cipher handle | ||
305 | * @authsize: size of the authentication data / tag in bytes | ||
306 | * | ||
307 | * Set the authentication data size / tag size. AEAD requires an authentication | ||
308 | * tag (or MAC) in addition to the associated data. | ||
309 | * | ||
310 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
311 | */ | ||
312 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); | ||
313 | |||
314 | static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | ||
315 | { | ||
316 | return __crypto_aead_cast(req->base.tfm); | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * crypto_aead_encrypt() - encrypt plaintext | ||
321 | * @req: reference to the aead_request handle that holds all information | ||
322 | * needed to perform the cipher operation | ||
323 | * | ||
324 | * Encrypt plaintext data using the aead_request handle. That data structure | ||
325 | * and how it is filled with data is discussed with the aead_request_* | ||
326 | * functions. | ||
327 | * | ||
328 | * IMPORTANT NOTE The encryption operation creates the authentication data / | ||
329 | * tag. That data is concatenated with the created ciphertext. | ||
330 | * The ciphertext memory size is therefore the given number of | ||
331 | * block cipher blocks + the size defined by the | ||
332 | * crypto_aead_setauthsize invocation. The caller must ensure | ||
333 | * that sufficient memory is available for the ciphertext and | ||
334 | * the authentication tag. | ||
335 | * | ||
336 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
337 | */ | ||
338 | static inline int crypto_aead_encrypt(struct aead_request *req) | ||
339 | { | ||
340 | return crypto_aead_reqtfm(req)->encrypt(req); | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * crypto_aead_decrypt() - decrypt ciphertext | ||
345 | * @req: reference to the ablkcipher_request handle that holds all information | ||
346 | * needed to perform the cipher operation | ||
347 | * | ||
348 | * Decrypt ciphertext data using the aead_request handle. That data structure | ||
349 | * and how it is filled with data is discussed with the aead_request_* | ||
350 | * functions. | ||
351 | * | ||
352 | * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the | ||
353 | * authentication data / tag. That authentication data / tag | ||
354 | * must have the size defined by the crypto_aead_setauthsize | ||
355 | * invocation. | ||
356 | * | ||
357 | * | ||
358 | * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD | ||
359 | * cipher operation performs the authentication of the data during the | ||
360 | * decryption operation. Therefore, the function returns this error if | ||
361 | * the authentication of the ciphertext was unsuccessful (i.e. the | ||
362 | * integrity of the ciphertext or the associated data was violated); | ||
363 | * < 0 if an error occurred. | ||
364 | */ | ||
365 | static inline int crypto_aead_decrypt(struct aead_request *req) | ||
366 | { | ||
367 | if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req))) | ||
368 | return -EINVAL; | ||
369 | |||
370 | return crypto_aead_reqtfm(req)->decrypt(req); | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * DOC: Asynchronous AEAD Request Handle | ||
375 | * | ||
376 | * The aead_request data structure contains all pointers to data required for | ||
377 | * the AEAD cipher operation. This includes the cipher handle (which can be | ||
378 | * used by multiple aead_request instances), pointer to plaintext and | ||
379 | * ciphertext, asynchronous callback function, etc. It acts as a handle to the | ||
380 | * aead_request_* API calls in a similar way as AEAD handle to the | ||
381 | * crypto_aead_* API calls. | ||
382 | */ | ||
383 | |||
384 | /** | ||
385 | * crypto_aead_reqsize() - obtain size of the request data structure | ||
386 | * @tfm: cipher handle | ||
387 | * | ||
388 | * Return: number of bytes | ||
389 | */ | ||
390 | unsigned int crypto_aead_reqsize(struct crypto_aead *tfm); | ||
391 | |||
392 | /** | ||
393 | * aead_request_set_tfm() - update cipher handle reference in request | ||
394 | * @req: request handle to be modified | ||
395 | * @tfm: cipher handle that shall be added to the request handle | ||
396 | * | ||
397 | * Allow the caller to replace the existing aead handle in the request | ||
398 | * data structure with a different one. | ||
399 | */ | ||
400 | static inline void aead_request_set_tfm(struct aead_request *req, | ||
401 | struct crypto_aead *tfm) | ||
402 | { | ||
403 | req->base.tfm = crypto_aead_tfm(tfm->child); | ||
404 | } | ||
405 | |||
406 | /** | ||
407 | * aead_request_alloc() - allocate request data structure | ||
408 | * @tfm: cipher handle to be registered with the request | ||
409 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | ||
410 | * | ||
411 | * Allocate the request data structure that must be used with the AEAD | ||
412 | * encrypt and decrypt API calls. During the allocation, the provided aead | ||
413 | * handle is registered in the request data structure. | ||
414 | * | ||
415 | * Return: allocated request handle in case of success; IS_ERR() is true in case | ||
416 | * of an error, PTR_ERR() returns the error code. | ||
417 | */ | ||
418 | static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | ||
419 | gfp_t gfp) | ||
420 | { | ||
421 | struct aead_request *req; | ||
422 | |||
423 | req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp); | ||
424 | |||
425 | if (likely(req)) | ||
426 | aead_request_set_tfm(req, tfm); | ||
427 | |||
428 | return req; | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * aead_request_free() - zeroize and free request data structure | ||
433 | * @req: request data structure cipher handle to be freed | ||
434 | */ | ||
435 | static inline void aead_request_free(struct aead_request *req) | ||
436 | { | ||
437 | kzfree(req); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * aead_request_set_callback() - set asynchronous callback function | ||
442 | * @req: request handle | ||
443 | * @flags: specify zero or an ORing of the flags | ||
444 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | ||
445 | * increase the wait queue beyond the initial maximum size; | ||
446 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | ||
447 | * @compl: callback function pointer to be registered with the request handle | ||
448 | * @data: The data pointer refers to memory that is not used by the kernel | ||
449 | * crypto API, but provided to the callback function for it to use. Here, | ||
450 | * the caller can provide a reference to memory the callback function can | ||
451 | * operate on. As the callback function is invoked asynchronously to the | ||
452 | * related functionality, it may need to access data structures of the | ||
453 | * related functionality which can be referenced using this pointer. The | ||
454 | * callback function can access the memory via the "data" field in the | ||
455 | * crypto_async_request data structure provided to the callback function. | ||
456 | * | ||
457 | * Setting the callback function that is triggered once the cipher operation | ||
458 | * completes | ||
459 | * | ||
460 | * The callback function is registered with the aead_request handle and | ||
461 | * must comply with the following template | ||
462 | * | ||
463 | * void callback_function(struct crypto_async_request *req, int error) | ||
464 | */ | ||
465 | static inline void aead_request_set_callback(struct aead_request *req, | ||
466 | u32 flags, | ||
467 | crypto_completion_t compl, | ||
468 | void *data) | ||
469 | { | ||
470 | req->base.complete = compl; | ||
471 | req->base.data = data; | ||
472 | req->base.flags = flags; | ||
473 | } | ||
474 | |||
475 | /** | ||
476 | * aead_request_set_crypt - set data buffers | ||
477 | * @req: request handle | ||
478 | * @src: source scatter / gather list | ||
479 | * @dst: destination scatter / gather list | ||
480 | * @cryptlen: number of bytes to process from @src | ||
481 | * @iv: IV for the cipher operation which must comply with the IV size defined | ||
482 | * by crypto_aead_ivsize() | ||
483 | * | ||
484 | * Setting the source data and destination data scatter / gather lists which | ||
485 | * hold the associated data concatenated with the plaintext or ciphertext. See | ||
486 | * below for the authentication tag. | ||
487 | * | ||
488 | * For encryption, the source is treated as the plaintext and the | ||
489 | * destination is the ciphertext. For a decryption operation, the use is | ||
490 | * reversed - the source is the ciphertext and the destination is the plaintext. | ||
491 | * | ||
492 | * For both src/dst the layout is associated data, plain/cipher text, | ||
493 | * authentication tag. | ||
494 | * | ||
495 | * The content of the AD in the destination buffer after processing | ||
496 | * will either be untouched, or it will contain a copy of the AD | ||
497 | * from the source buffer. In order to ensure that it always has | ||
498 | * a copy of the AD, the user must copy the AD over either before | ||
499 | * or after processing. Of course this is not relevant if the user | ||
500 | * is doing in-place processing where src == dst. | ||
501 | * | ||
502 | * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, | ||
503 | * the caller must concatenate the ciphertext followed by the | ||
504 | * authentication tag and provide the entire data stream to the | ||
505 | * decryption operation (i.e. the data length used for the | ||
506 | * initialization of the scatterlist and the data length for the | ||
507 | * decryption operation is identical). For encryption, however, | ||
508 | * the authentication tag is created while encrypting the data. | ||
509 | * The destination buffer must hold sufficient space for the | ||
510 | * ciphertext and the authentication tag while the encryption | ||
511 | * invocation must only point to the plaintext data size. The | ||
512 | * following code snippet illustrates the memory usage | ||
513 | * buffer = kmalloc(ptbuflen + (enc ? authsize : 0)); | ||
514 | * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0)); | ||
515 | * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv); | ||
516 | */ | ||
517 | static inline void aead_request_set_crypt(struct aead_request *req, | ||
518 | struct scatterlist *src, | ||
519 | struct scatterlist *dst, | ||
520 | unsigned int cryptlen, u8 *iv) | ||
521 | { | ||
522 | req->src = src; | ||
523 | req->dst = dst; | ||
524 | req->cryptlen = cryptlen; | ||
525 | req->iv = iv; | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * aead_request_set_assoc() - set the associated data scatter / gather list | ||
530 | * @req: request handle | ||
531 | * @assoc: associated data scatter / gather list | ||
532 | * @assoclen: number of bytes to process from @assoc | ||
533 | * | ||
534 | * Obsolete, do not use. | ||
535 | */ | ||
536 | static inline void aead_request_set_assoc(struct aead_request *req, | ||
537 | struct scatterlist *assoc, | ||
538 | unsigned int assoclen) | ||
539 | { | ||
540 | req->assoc = assoc; | ||
541 | req->assoclen = assoclen; | ||
542 | req->old = true; | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * aead_request_set_ad - set associated data information | ||
547 | * @req: request handle | ||
548 | * @assoclen: number of bytes in associated data | ||
549 | * | ||
550 | * Setting the AD information. This function sets the length of | ||
551 | * the associated data. | ||
552 | */ | ||
553 | static inline void aead_request_set_ad(struct aead_request *req, | ||
554 | unsigned int assoclen) | ||
555 | { | ||
556 | req->assoclen = assoclen; | ||
557 | req->old = false; | ||
558 | } | ||
559 | |||
33 | static inline struct crypto_aead *aead_givcrypt_reqtfm( | 560 | static inline struct crypto_aead *aead_givcrypt_reqtfm( |
34 | struct aead_givcrypt_request *req) | 561 | struct aead_givcrypt_request *req) |
35 | { | 562 | { |
@@ -38,14 +565,12 @@ static inline struct crypto_aead *aead_givcrypt_reqtfm( | |||
38 | 565 | ||
39 | static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req) | 566 | static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req) |
40 | { | 567 | { |
41 | struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req)); | 568 | return aead_givcrypt_reqtfm(req)->givencrypt(req); |
42 | return crt->givencrypt(req); | ||
43 | }; | 569 | }; |
44 | 570 | ||
45 | static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req) | 571 | static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req) |
46 | { | 572 | { |
47 | struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req)); | 573 | return aead_givcrypt_reqtfm(req)->givdecrypt(req); |
48 | return crt->givdecrypt(req); | ||
49 | }; | 574 | }; |
50 | 575 | ||
51 | static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req, | 576 | static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req, |
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h new file mode 100644 index 000000000000..69d163e39101 --- /dev/null +++ b/include/crypto/akcipher.h | |||
@@ -0,0 +1,340 @@ | |||
1 | /* | ||
2 | * Public Key Encryption | ||
3 | * | ||
4 | * Copyright (c) 2015, Intel Corporation | ||
5 | * Authors: Tadeusz Struk <tadeusz.struk@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | */ | ||
13 | #ifndef _CRYPTO_AKCIPHER_H | ||
14 | #define _CRYPTO_AKCIPHER_H | ||
15 | #include <linux/crypto.h> | ||
16 | |||
17 | /** | ||
18 | * struct akcipher_request - public key request | ||
19 | * | ||
20 | * @base: Common attributes for async crypto requests | ||
21 | * @src: Pointer to memory containing the input parameters | ||
22 | * The format of the parameter(s) is expeted to be Octet String | ||
23 | * @dst: Pointer to memory whare the result will be stored | ||
24 | * @src_len: Size of the input parameter | ||
25 | * @dst_len: Size of the output buffer. It needs to be at leaset | ||
26 | * as big as the expected result depending on the operation | ||
27 | * After operation it will be updated with the acctual size of the | ||
28 | * result. In case of error, where the dst_len was insufficient, | ||
29 | * it will be updated to the size required for the operation. | ||
30 | * @__ctx: Start of private context data | ||
31 | */ | ||
32 | struct akcipher_request { | ||
33 | struct crypto_async_request base; | ||
34 | void *src; | ||
35 | void *dst; | ||
36 | unsigned int src_len; | ||
37 | unsigned int dst_len; | ||
38 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | ||
39 | }; | ||
40 | |||
41 | /** | ||
42 | * struct crypto_akcipher - user-instantiated objects which encapsulate | ||
43 | * algorithms and core processing logic | ||
44 | * | ||
45 | * @base: Common crypto API algorithm data structure | ||
46 | */ | ||
47 | struct crypto_akcipher { | ||
48 | struct crypto_tfm base; | ||
49 | }; | ||
50 | |||
51 | /** | ||
52 | * struct akcipher_alg - generic public key algorithm | ||
53 | * | ||
54 | * @sign: Function performs a sign operation as defined by public key | ||
55 | * algorithm. In case of error, where the dst_len was insufficient, | ||
56 | * the req->dst_len will be updated to the size required for the | ||
57 | * operation | ||
58 | * @verify: Function performs a sign operation as defined by public key | ||
59 | * algorithm. In case of error, where the dst_len was insufficient, | ||
60 | * the req->dst_len will be updated to the size required for the | ||
61 | * operation | ||
62 | * @encrypt: Function performs an encrytp operation as defined by public key | ||
63 | * algorithm. In case of error, where the dst_len was insufficient, | ||
64 | * the req->dst_len will be updated to the size required for the | ||
65 | * operation | ||
66 | * @decrypt: Function performs a decrypt operation as defined by public key | ||
67 | * algorithm. In case of error, where the dst_len was insufficient, | ||
68 | * the req->dst_len will be updated to the size required for the | ||
69 | * operation | ||
70 | * @setkey: Function invokes the algorithm specific set key function, which | ||
71 | * knows how to decode and interpret the BER encoded key | ||
72 | * @init: Initialize the cryptographic transformation object. | ||
73 | * This function is used to initialize the cryptographic | ||
74 | * transformation object. This function is called only once at | ||
75 | * the instantiation time, right after the transformation context | ||
76 | * was allocated. In case the cryptographic hardware has some | ||
77 | * special requirements which need to be handled by software, this | ||
78 | * function shall check for the precise requirement of the | ||
79 | * transformation and put any software fallbacks in place. | ||
80 | * @exit: Deinitialize the cryptographic transformation object. This is a | ||
81 | * counterpart to @init, used to remove various changes set in | ||
82 | * @init. | ||
83 | * | ||
84 | * @reqsize: Request context size required by algorithm implementation | ||
85 | * @base: Common crypto API algorithm data structure | ||
86 | */ | ||
87 | struct akcipher_alg { | ||
88 | int (*sign)(struct akcipher_request *req); | ||
89 | int (*verify)(struct akcipher_request *req); | ||
90 | int (*encrypt)(struct akcipher_request *req); | ||
91 | int (*decrypt)(struct akcipher_request *req); | ||
92 | int (*setkey)(struct crypto_akcipher *tfm, const void *key, | ||
93 | unsigned int keylen); | ||
94 | int (*init)(struct crypto_akcipher *tfm); | ||
95 | void (*exit)(struct crypto_akcipher *tfm); | ||
96 | |||
97 | unsigned int reqsize; | ||
98 | struct crypto_alg base; | ||
99 | }; | ||
100 | |||
101 | /** | ||
102 | * DOC: Generic Public Key API | ||
103 | * | ||
104 | * The Public Key API is used with the algorithms of type | ||
105 | * CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto) | ||
106 | */ | ||
107 | |||
108 | /** | ||
109 | * crypto_alloc_akcipher() -- allocate AKCIPHER tfm handle | ||
110 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
111 | * public key algorithm e.g. "rsa" | ||
112 | * @type: specifies the type of the algorithm | ||
113 | * @mask: specifies the mask for the algorithm | ||
114 | * | ||
115 | * Allocate a handle for public key algorithm. The returned struct | ||
116 | * crypto_akcipher is the handle that is required for any subsequent | ||
117 | * API invocation for the public key operations. | ||
118 | * | ||
119 | * Return: allocated handle in case of success; IS_ERR() is true in case | ||
120 | * of an error, PTR_ERR() returns the error code. | ||
121 | */ | ||
122 | struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, | ||
123 | u32 mask); | ||
124 | |||
125 | static inline struct crypto_tfm *crypto_akcipher_tfm( | ||
126 | struct crypto_akcipher *tfm) | ||
127 | { | ||
128 | return &tfm->base; | ||
129 | } | ||
130 | |||
131 | static inline struct akcipher_alg *__crypto_akcipher_alg(struct crypto_alg *alg) | ||
132 | { | ||
133 | return container_of(alg, struct akcipher_alg, base); | ||
134 | } | ||
135 | |||
136 | static inline struct crypto_akcipher *__crypto_akcipher_tfm( | ||
137 | struct crypto_tfm *tfm) | ||
138 | { | ||
139 | return container_of(tfm, struct crypto_akcipher, base); | ||
140 | } | ||
141 | |||
142 | static inline struct akcipher_alg *crypto_akcipher_alg( | ||
143 | struct crypto_akcipher *tfm) | ||
144 | { | ||
145 | return __crypto_akcipher_alg(crypto_akcipher_tfm(tfm)->__crt_alg); | ||
146 | } | ||
147 | |||
148 | static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm) | ||
149 | { | ||
150 | return crypto_akcipher_alg(tfm)->reqsize; | ||
151 | } | ||
152 | |||
153 | static inline void akcipher_request_set_tfm(struct akcipher_request *req, | ||
154 | struct crypto_akcipher *tfm) | ||
155 | { | ||
156 | req->base.tfm = crypto_akcipher_tfm(tfm); | ||
157 | } | ||
158 | |||
159 | static inline struct crypto_akcipher *crypto_akcipher_reqtfm( | ||
160 | struct akcipher_request *req) | ||
161 | { | ||
162 | return __crypto_akcipher_tfm(req->base.tfm); | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * crypto_free_akcipher() -- free AKCIPHER tfm handle | ||
167 | * | ||
168 | * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() | ||
169 | */ | ||
170 | static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) | ||
171 | { | ||
172 | crypto_destroy_tfm(tfm, crypto_akcipher_tfm(tfm)); | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * akcipher_request_alloc() -- allocates public key request | ||
177 | * | ||
178 | * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() | ||
179 | * @gfp: allocation flags | ||
180 | * | ||
181 | * Return: allocated handle in case of success or NULL in case of an error. | ||
182 | */ | ||
183 | static inline struct akcipher_request *akcipher_request_alloc( | ||
184 | struct crypto_akcipher *tfm, gfp_t gfp) | ||
185 | { | ||
186 | struct akcipher_request *req; | ||
187 | |||
188 | req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp); | ||
189 | if (likely(req)) | ||
190 | akcipher_request_set_tfm(req, tfm); | ||
191 | |||
192 | return req; | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * akcipher_request_free() -- zeroize and free public key request | ||
197 | * | ||
198 | * @req: request to free | ||
199 | */ | ||
200 | static inline void akcipher_request_free(struct akcipher_request *req) | ||
201 | { | ||
202 | kzfree(req); | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * akcipher_request_set_callback() -- Sets an asynchronous callback. | ||
207 | * | ||
208 | * Callback will be called when an asynchronous operation on a given | ||
209 | * request is finished. | ||
210 | * | ||
211 | * @req: request that the callback will be set for | ||
212 | * @flgs: specify for instance if the operation may backlog | ||
213 | * @cmlp: callback which will be called | ||
214 | * @data: private data used by the caller | ||
215 | */ | ||
216 | static inline void akcipher_request_set_callback(struct akcipher_request *req, | ||
217 | u32 flgs, | ||
218 | crypto_completion_t cmpl, | ||
219 | void *data) | ||
220 | { | ||
221 | req->base.complete = cmpl; | ||
222 | req->base.data = data; | ||
223 | req->base.flags = flgs; | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * akcipher_request_set_crypt() -- Sets reqest parameters | ||
228 | * | ||
229 | * Sets parameters required by crypto operation | ||
230 | * | ||
231 | * @req: public key request | ||
232 | * @src: ptr to input parameter | ||
233 | * @dst: ptr of output parameter | ||
234 | * @src_len: size of the input buffer | ||
235 | * @dst_len: size of the output buffer. It will be updated by the | ||
236 | * implementation to reflect the acctual size of the result | ||
237 | */ | ||
238 | static inline void akcipher_request_set_crypt(struct akcipher_request *req, | ||
239 | void *src, void *dst, | ||
240 | unsigned int src_len, | ||
241 | unsigned int dst_len) | ||
242 | { | ||
243 | req->src = src; | ||
244 | req->dst = dst; | ||
245 | req->src_len = src_len; | ||
246 | req->dst_len = dst_len; | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * crypto_akcipher_encrypt() -- Invoke public key encrypt operation | ||
251 | * | ||
252 | * Function invokes the specific public key encrypt operation for a given | ||
253 | * public key algorithm | ||
254 | * | ||
255 | * @req: asymmetric key request | ||
256 | * | ||
257 | * Return: zero on success; error code in case of error | ||
258 | */ | ||
259 | static inline int crypto_akcipher_encrypt(struct akcipher_request *req) | ||
260 | { | ||
261 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
262 | struct akcipher_alg *alg = crypto_akcipher_alg(tfm); | ||
263 | |||
264 | return alg->encrypt(req); | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * crypto_akcipher_decrypt() -- Invoke public key decrypt operation | ||
269 | * | ||
270 | * Function invokes the specific public key decrypt operation for a given | ||
271 | * public key algorithm | ||
272 | * | ||
273 | * @req: asymmetric key request | ||
274 | * | ||
275 | * Return: zero on success; error code in case of error | ||
276 | */ | ||
277 | static inline int crypto_akcipher_decrypt(struct akcipher_request *req) | ||
278 | { | ||
279 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
280 | struct akcipher_alg *alg = crypto_akcipher_alg(tfm); | ||
281 | |||
282 | return alg->decrypt(req); | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * crypto_akcipher_sign() -- Invoke public key sign operation | ||
287 | * | ||
288 | * Function invokes the specific public key sign operation for a given | ||
289 | * public key algorithm | ||
290 | * | ||
291 | * @req: asymmetric key request | ||
292 | * | ||
293 | * Return: zero on success; error code in case of error | ||
294 | */ | ||
295 | static inline int crypto_akcipher_sign(struct akcipher_request *req) | ||
296 | { | ||
297 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
298 | struct akcipher_alg *alg = crypto_akcipher_alg(tfm); | ||
299 | |||
300 | return alg->sign(req); | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | * crypto_akcipher_verify() -- Invoke public key verify operation | ||
305 | * | ||
306 | * Function invokes the specific public key verify operation for a given | ||
307 | * public key algorithm | ||
308 | * | ||
309 | * @req: asymmetric key request | ||
310 | * | ||
311 | * Return: zero on success; error code in case of error | ||
312 | */ | ||
313 | static inline int crypto_akcipher_verify(struct akcipher_request *req) | ||
314 | { | ||
315 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
316 | struct akcipher_alg *alg = crypto_akcipher_alg(tfm); | ||
317 | |||
318 | return alg->verify(req); | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * crypto_akcipher_setkey() -- Invoke public key setkey operation | ||
323 | * | ||
324 | * Function invokes the algorithm specific set key function, which knows | ||
325 | * how to decode and interpret the encoded key | ||
326 | * | ||
327 | * @tfm: tfm handle | ||
328 | * @key: BER encoded private or public key | ||
329 | * @keylen: length of the key | ||
330 | * | ||
331 | * Return: zero on success; error code in case of error | ||
332 | */ | ||
333 | static inline int crypto_akcipher_setkey(struct crypto_akcipher *tfm, void *key, | ||
334 | unsigned int keylen) | ||
335 | { | ||
336 | struct akcipher_alg *alg = crypto_akcipher_alg(tfm); | ||
337 | |||
338 | return alg->setkey(tfm, key, keylen); | ||
339 | } | ||
340 | #endif | ||
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 0ecb7688af71..d4ebf6e9af6a 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
19 | 19 | ||
20 | struct crypto_aead; | ||
20 | struct module; | 21 | struct module; |
21 | struct rtattr; | 22 | struct rtattr; |
22 | struct seq_file; | 23 | struct seq_file; |
@@ -126,7 +127,6 @@ struct ablkcipher_walk { | |||
126 | }; | 127 | }; |
127 | 128 | ||
128 | extern const struct crypto_type crypto_ablkcipher_type; | 129 | extern const struct crypto_type crypto_ablkcipher_type; |
129 | extern const struct crypto_type crypto_aead_type; | ||
130 | extern const struct crypto_type crypto_blkcipher_type; | 130 | extern const struct crypto_type crypto_blkcipher_type; |
131 | 131 | ||
132 | void crypto_mod_put(struct crypto_alg *alg); | 132 | void crypto_mod_put(struct crypto_alg *alg); |
@@ -144,6 +144,8 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, | |||
144 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, | 144 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, |
145 | struct crypto_instance *inst, | 145 | struct crypto_instance *inst, |
146 | const struct crypto_type *frontend); | 146 | const struct crypto_type *frontend); |
147 | int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, | ||
148 | u32 type, u32 mask); | ||
147 | 149 | ||
148 | void crypto_drop_spawn(struct crypto_spawn *spawn); | 150 | void crypto_drop_spawn(struct crypto_spawn *spawn); |
149 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | 151 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, |
@@ -239,22 +241,6 @@ static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) | |||
239 | return crypto_tfm_ctx_aligned(&tfm->base); | 241 | return crypto_tfm_ctx_aligned(&tfm->base); |
240 | } | 242 | } |
241 | 243 | ||
242 | static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) | ||
243 | { | ||
244 | return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead; | ||
245 | } | ||
246 | |||
247 | static inline void *crypto_aead_ctx(struct crypto_aead *tfm) | ||
248 | { | ||
249 | return crypto_tfm_ctx(&tfm->base); | ||
250 | } | ||
251 | |||
252 | static inline struct crypto_instance *crypto_aead_alg_instance( | ||
253 | struct crypto_aead *aead) | ||
254 | { | ||
255 | return crypto_tfm_alg_instance(&aead->base); | ||
256 | } | ||
257 | |||
258 | static inline struct crypto_blkcipher *crypto_spawn_blkcipher( | 244 | static inline struct crypto_blkcipher *crypto_spawn_blkcipher( |
259 | struct crypto_spawn *spawn) | 245 | struct crypto_spawn *spawn) |
260 | { | 246 | { |
@@ -363,21 +349,6 @@ static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, | |||
363 | return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); | 349 | return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); |
364 | } | 350 | } |
365 | 351 | ||
366 | static inline void *aead_request_ctx(struct aead_request *req) | ||
367 | { | ||
368 | return req->__ctx; | ||
369 | } | ||
370 | |||
371 | static inline void aead_request_complete(struct aead_request *req, int err) | ||
372 | { | ||
373 | req->base.complete(&req->base, err); | ||
374 | } | ||
375 | |||
376 | static inline u32 aead_request_flags(struct aead_request *req) | ||
377 | { | ||
378 | return req->base.flags; | ||
379 | } | ||
380 | |||
381 | static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, | 352 | static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, |
382 | u32 type, u32 mask) | 353 | u32 type, u32 mask) |
383 | { | 354 | { |
diff --git a/include/crypto/compress.h b/include/crypto/compress.h index 86163ef24219..5b67af834d83 100644 --- a/include/crypto/compress.h +++ b/include/crypto/compress.h | |||
@@ -55,14 +55,14 @@ struct crypto_pcomp { | |||
55 | }; | 55 | }; |
56 | 56 | ||
57 | struct pcomp_alg { | 57 | struct pcomp_alg { |
58 | int (*compress_setup)(struct crypto_pcomp *tfm, void *params, | 58 | int (*compress_setup)(struct crypto_pcomp *tfm, const void *params, |
59 | unsigned int len); | 59 | unsigned int len); |
60 | int (*compress_init)(struct crypto_pcomp *tfm); | 60 | int (*compress_init)(struct crypto_pcomp *tfm); |
61 | int (*compress_update)(struct crypto_pcomp *tfm, | 61 | int (*compress_update)(struct crypto_pcomp *tfm, |
62 | struct comp_request *req); | 62 | struct comp_request *req); |
63 | int (*compress_final)(struct crypto_pcomp *tfm, | 63 | int (*compress_final)(struct crypto_pcomp *tfm, |
64 | struct comp_request *req); | 64 | struct comp_request *req); |
65 | int (*decompress_setup)(struct crypto_pcomp *tfm, void *params, | 65 | int (*decompress_setup)(struct crypto_pcomp *tfm, const void *params, |
66 | unsigned int len); | 66 | unsigned int len); |
67 | int (*decompress_init)(struct crypto_pcomp *tfm); | 67 | int (*decompress_init)(struct crypto_pcomp *tfm); |
68 | int (*decompress_update)(struct crypto_pcomp *tfm, | 68 | int (*decompress_update)(struct crypto_pcomp *tfm, |
@@ -97,7 +97,7 @@ static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | static inline int crypto_compress_setup(struct crypto_pcomp *tfm, | 99 | static inline int crypto_compress_setup(struct crypto_pcomp *tfm, |
100 | void *params, unsigned int len) | 100 | const void *params, unsigned int len) |
101 | { | 101 | { |
102 | return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len); | 102 | return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len); |
103 | } | 103 | } |
@@ -120,7 +120,7 @@ static inline int crypto_compress_final(struct crypto_pcomp *tfm, | |||
120 | } | 120 | } |
121 | 121 | ||
122 | static inline int crypto_decompress_setup(struct crypto_pcomp *tfm, | 122 | static inline int crypto_decompress_setup(struct crypto_pcomp *tfm, |
123 | void *params, unsigned int len) | 123 | const void *params, unsigned int len) |
124 | { | 124 | { |
125 | return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len); | 125 | return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len); |
126 | } | 126 | } |
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index ba98918bbd9b..1547f540c920 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <crypto/aead.h> | ||
17 | #include <crypto/hash.h> | 18 | #include <crypto/hash.h> |
18 | 19 | ||
19 | struct cryptd_ablkcipher { | 20 | struct cryptd_ablkcipher { |
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 5186f750c713..9756c70899d8 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h | |||
@@ -49,8 +49,9 @@ | |||
49 | #include <crypto/internal/rng.h> | 49 | #include <crypto/internal/rng.h> |
50 | #include <crypto/rng.h> | 50 | #include <crypto/rng.h> |
51 | #include <linux/fips.h> | 51 | #include <linux/fips.h> |
52 | #include <linux/spinlock.h> | 52 | #include <linux/mutex.h> |
53 | #include <linux/list.h> | 53 | #include <linux/list.h> |
54 | #include <linux/workqueue.h> | ||
54 | 55 | ||
55 | /* | 56 | /* |
56 | * Concatenation Helper and string operation helper | 57 | * Concatenation Helper and string operation helper |
@@ -104,12 +105,13 @@ struct drbg_test_data { | |||
104 | }; | 105 | }; |
105 | 106 | ||
106 | struct drbg_state { | 107 | struct drbg_state { |
107 | spinlock_t drbg_lock; /* lock around DRBG */ | 108 | struct mutex drbg_mutex; /* lock around DRBG */ |
108 | unsigned char *V; /* internal state 10.1.1.1 1a) */ | 109 | unsigned char *V; /* internal state 10.1.1.1 1a) */ |
109 | /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ | 110 | /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ |
110 | unsigned char *C; | 111 | unsigned char *C; |
111 | /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ | 112 | /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ |
112 | size_t reseed_ctr; | 113 | size_t reseed_ctr; |
114 | size_t reseed_threshold; | ||
113 | /* some memory the DRBG can use for its operation */ | 115 | /* some memory the DRBG can use for its operation */ |
114 | unsigned char *scratchpad; | 116 | unsigned char *scratchpad; |
115 | void *priv_data; /* Cipher handle */ | 117 | void *priv_data; /* Cipher handle */ |
@@ -119,9 +121,12 @@ struct drbg_state { | |||
119 | bool fips_primed; /* Continuous test primed? */ | 121 | bool fips_primed; /* Continuous test primed? */ |
120 | unsigned char *prev; /* FIPS 140-2 continuous test value */ | 122 | unsigned char *prev; /* FIPS 140-2 continuous test value */ |
121 | #endif | 123 | #endif |
124 | struct work_struct seed_work; /* asynchronous seeding support */ | ||
125 | struct crypto_rng *jent; | ||
122 | const struct drbg_state_ops *d_ops; | 126 | const struct drbg_state_ops *d_ops; |
123 | const struct drbg_core *core; | 127 | const struct drbg_core *core; |
124 | struct drbg_test_data *test_data; | 128 | struct drbg_string test_data; |
129 | struct random_ready_callback random_ready; | ||
125 | }; | 130 | }; |
126 | 131 | ||
127 | static inline __u8 drbg_statelen(struct drbg_state *drbg) | 132 | static inline __u8 drbg_statelen(struct drbg_state *drbg) |
@@ -177,19 +182,8 @@ static inline size_t drbg_max_requests(struct drbg_state *drbg) | |||
177 | } | 182 | } |
178 | 183 | ||
179 | /* | 184 | /* |
180 | * kernel crypto API input data structure for DRBG generate in case dlen | ||
181 | * is set to 0 | ||
182 | */ | ||
183 | struct drbg_gen { | ||
184 | unsigned char *outbuf; /* output buffer for random numbers */ | ||
185 | unsigned int outlen; /* size of output buffer */ | ||
186 | struct drbg_string *addtl; /* additional information string */ | ||
187 | struct drbg_test_data *test_data; /* test data */ | ||
188 | }; | ||
189 | |||
190 | /* | ||
191 | * This is a wrapper to the kernel crypto API function of | 185 | * This is a wrapper to the kernel crypto API function of |
192 | * crypto_rng_get_bytes() to allow the caller to provide additional data. | 186 | * crypto_rng_generate() to allow the caller to provide additional data. |
193 | * | 187 | * |
194 | * @drng DRBG handle -- see crypto_rng_get_bytes | 188 | * @drng DRBG handle -- see crypto_rng_get_bytes |
195 | * @outbuf output buffer -- see crypto_rng_get_bytes | 189 | * @outbuf output buffer -- see crypto_rng_get_bytes |
@@ -204,21 +198,15 @@ static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng, | |||
204 | unsigned char *outbuf, unsigned int outlen, | 198 | unsigned char *outbuf, unsigned int outlen, |
205 | struct drbg_string *addtl) | 199 | struct drbg_string *addtl) |
206 | { | 200 | { |
207 | int ret; | 201 | return crypto_rng_generate(drng, addtl->buf, addtl->len, |
208 | struct drbg_gen genbuf; | 202 | outbuf, outlen); |
209 | genbuf.outbuf = outbuf; | ||
210 | genbuf.outlen = outlen; | ||
211 | genbuf.addtl = addtl; | ||
212 | genbuf.test_data = NULL; | ||
213 | ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0); | ||
214 | return ret; | ||
215 | } | 203 | } |
216 | 204 | ||
217 | /* | 205 | /* |
218 | * TEST code | 206 | * TEST code |
219 | * | 207 | * |
220 | * This is a wrapper to the kernel crypto API function of | 208 | * This is a wrapper to the kernel crypto API function of |
221 | * crypto_rng_get_bytes() to allow the caller to provide additional data and | 209 | * crypto_rng_generate() to allow the caller to provide additional data and |
222 | * allow furnishing of test_data | 210 | * allow furnishing of test_data |
223 | * | 211 | * |
224 | * @drng DRBG handle -- see crypto_rng_get_bytes | 212 | * @drng DRBG handle -- see crypto_rng_get_bytes |
@@ -236,14 +224,10 @@ static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng, | |||
236 | struct drbg_string *addtl, | 224 | struct drbg_string *addtl, |
237 | struct drbg_test_data *test_data) | 225 | struct drbg_test_data *test_data) |
238 | { | 226 | { |
239 | int ret; | 227 | crypto_rng_set_entropy(drng, test_data->testentropy->buf, |
240 | struct drbg_gen genbuf; | 228 | test_data->testentropy->len); |
241 | genbuf.outbuf = outbuf; | 229 | return crypto_rng_generate(drng, addtl->buf, addtl->len, |
242 | genbuf.outlen = outlen; | 230 | outbuf, outlen); |
243 | genbuf.addtl = addtl; | ||
244 | genbuf.test_data = test_data; | ||
245 | ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0); | ||
246 | return ret; | ||
247 | } | 231 | } |
248 | 232 | ||
249 | /* | 233 | /* |
@@ -264,14 +248,9 @@ static inline int crypto_drbg_reset_test(struct crypto_rng *drng, | |||
264 | struct drbg_string *pers, | 248 | struct drbg_string *pers, |
265 | struct drbg_test_data *test_data) | 249 | struct drbg_test_data *test_data) |
266 | { | 250 | { |
267 | int ret; | 251 | crypto_rng_set_entropy(drng, test_data->testentropy->buf, |
268 | struct drbg_gen genbuf; | 252 | test_data->testentropy->len); |
269 | genbuf.outbuf = NULL; | 253 | return crypto_rng_reset(drng, pers->buf, pers->len); |
270 | genbuf.outlen = 0; | ||
271 | genbuf.addtl = pers; | ||
272 | genbuf.test_data = test_data; | ||
273 | ret = crypto_rng_reset(drng, (u8 *)&genbuf, 0); | ||
274 | return ret; | ||
275 | } | 254 | } |
276 | 255 | ||
277 | /* DRBG type flags */ | 256 | /* DRBG type flags */ |
diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 98abda9ed3aa..57c8a6ee33c2 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h | |||
@@ -66,7 +66,7 @@ struct ahash_request { | |||
66 | /** | 66 | /** |
67 | * struct ahash_alg - asynchronous message digest definition | 67 | * struct ahash_alg - asynchronous message digest definition |
68 | * @init: Initialize the transformation context. Intended only to initialize the | 68 | * @init: Initialize the transformation context. Intended only to initialize the |
69 | * state of the HASH transformation at the begining. This shall fill in | 69 | * state of the HASH transformation at the beginning. This shall fill in |
70 | * the internal structures used during the entire duration of the whole | 70 | * the internal structures used during the entire duration of the whole |
71 | * transformation. No data processing happens at this point. | 71 | * transformation. No data processing happens at this point. |
72 | * @update: Push a chunk of data into the driver for transformation. This | 72 | * @update: Push a chunk of data into the driver for transformation. This |
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h index 2eba340230a7..4b2547186519 100644 --- a/include/crypto/internal/aead.h +++ b/include/crypto/internal/aead.h | |||
@@ -15,16 +15,75 @@ | |||
15 | 15 | ||
16 | #include <crypto/aead.h> | 16 | #include <crypto/aead.h> |
17 | #include <crypto/algapi.h> | 17 | #include <crypto/algapi.h> |
18 | #include <linux/stddef.h> | ||
18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
19 | 20 | ||
20 | struct rtattr; | 21 | struct rtattr; |
21 | 22 | ||
23 | struct aead_instance { | ||
24 | union { | ||
25 | struct { | ||
26 | char head[offsetof(struct aead_alg, base)]; | ||
27 | struct crypto_instance base; | ||
28 | } s; | ||
29 | struct aead_alg alg; | ||
30 | }; | ||
31 | }; | ||
32 | |||
22 | struct crypto_aead_spawn { | 33 | struct crypto_aead_spawn { |
23 | struct crypto_spawn base; | 34 | struct crypto_spawn base; |
24 | }; | 35 | }; |
25 | 36 | ||
37 | extern const struct crypto_type crypto_aead_type; | ||
26 | extern const struct crypto_type crypto_nivaead_type; | 38 | extern const struct crypto_type crypto_nivaead_type; |
27 | 39 | ||
40 | static inline void *crypto_aead_ctx(struct crypto_aead *tfm) | ||
41 | { | ||
42 | return crypto_tfm_ctx(&tfm->base); | ||
43 | } | ||
44 | |||
45 | static inline struct crypto_instance *crypto_aead_alg_instance( | ||
46 | struct crypto_aead *aead) | ||
47 | { | ||
48 | return crypto_tfm_alg_instance(&aead->base); | ||
49 | } | ||
50 | |||
51 | static inline struct crypto_instance *aead_crypto_instance( | ||
52 | struct aead_instance *inst) | ||
53 | { | ||
54 | return container_of(&inst->alg.base, struct crypto_instance, alg); | ||
55 | } | ||
56 | |||
57 | static inline struct aead_instance *aead_instance(struct crypto_instance *inst) | ||
58 | { | ||
59 | return container_of(&inst->alg, struct aead_instance, alg.base); | ||
60 | } | ||
61 | |||
62 | static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead) | ||
63 | { | ||
64 | return aead_instance(crypto_aead_alg_instance(aead)); | ||
65 | } | ||
66 | |||
67 | static inline void *aead_instance_ctx(struct aead_instance *inst) | ||
68 | { | ||
69 | return crypto_instance_ctx(aead_crypto_instance(inst)); | ||
70 | } | ||
71 | |||
72 | static inline void *aead_request_ctx(struct aead_request *req) | ||
73 | { | ||
74 | return req->__ctx; | ||
75 | } | ||
76 | |||
77 | static inline void aead_request_complete(struct aead_request *req, int err) | ||
78 | { | ||
79 | req->base.complete(&req->base, err); | ||
80 | } | ||
81 | |||
82 | static inline u32 aead_request_flags(struct aead_request *req) | ||
83 | { | ||
84 | return req->base.flags; | ||
85 | } | ||
86 | |||
28 | static inline void crypto_set_aead_spawn( | 87 | static inline void crypto_set_aead_spawn( |
29 | struct crypto_aead_spawn *spawn, struct crypto_instance *inst) | 88 | struct crypto_aead_spawn *spawn, struct crypto_instance *inst) |
30 | { | 89 | { |
@@ -47,24 +106,27 @@ static inline struct crypto_alg *crypto_aead_spawn_alg( | |||
47 | return spawn->base.alg; | 106 | return spawn->base.alg; |
48 | } | 107 | } |
49 | 108 | ||
109 | static inline struct aead_alg *crypto_spawn_aead_alg( | ||
110 | struct crypto_aead_spawn *spawn) | ||
111 | { | ||
112 | return container_of(spawn->base.alg, struct aead_alg, base); | ||
113 | } | ||
114 | |||
50 | static inline struct crypto_aead *crypto_spawn_aead( | 115 | static inline struct crypto_aead *crypto_spawn_aead( |
51 | struct crypto_aead_spawn *spawn) | 116 | struct crypto_aead_spawn *spawn) |
52 | { | 117 | { |
53 | return __crypto_aead_cast( | 118 | return crypto_spawn_tfm2(&spawn->base); |
54 | crypto_spawn_tfm(&spawn->base, CRYPTO_ALG_TYPE_AEAD, | ||
55 | CRYPTO_ALG_TYPE_MASK)); | ||
56 | } | 119 | } |
57 | 120 | ||
58 | struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, | 121 | struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, |
59 | struct rtattr **tb, u32 type, | 122 | struct rtattr **tb, u32 type, u32 mask); |
60 | u32 mask); | 123 | void aead_geniv_free(struct aead_instance *inst); |
61 | void aead_geniv_free(struct crypto_instance *inst); | ||
62 | int aead_geniv_init(struct crypto_tfm *tfm); | 124 | int aead_geniv_init(struct crypto_tfm *tfm); |
63 | void aead_geniv_exit(struct crypto_tfm *tfm); | 125 | void aead_geniv_exit(struct crypto_tfm *tfm); |
64 | 126 | ||
65 | static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv) | 127 | static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv) |
66 | { | 128 | { |
67 | return crypto_aead_crt(geniv)->base; | 129 | return geniv->child; |
68 | } | 130 | } |
69 | 131 | ||
70 | static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req) | 132 | static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req) |
@@ -78,5 +140,29 @@ static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req, | |||
78 | aead_request_complete(&req->areq, err); | 140 | aead_request_complete(&req->areq, err); |
79 | } | 141 | } |
80 | 142 | ||
143 | static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, | ||
144 | unsigned int reqsize) | ||
145 | { | ||
146 | crypto_aead_crt(aead)->reqsize = reqsize; | ||
147 | } | ||
148 | |||
149 | static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) | ||
150 | { | ||
151 | return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize : | ||
152 | alg->maxauthsize; | ||
153 | } | ||
154 | |||
155 | static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) | ||
156 | { | ||
157 | return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); | ||
158 | } | ||
159 | |||
160 | int crypto_register_aead(struct aead_alg *alg); | ||
161 | void crypto_unregister_aead(struct aead_alg *alg); | ||
162 | int crypto_register_aeads(struct aead_alg *algs, int count); | ||
163 | void crypto_unregister_aeads(struct aead_alg *algs, int count); | ||
164 | int aead_register_instance(struct crypto_template *tmpl, | ||
165 | struct aead_instance *inst); | ||
166 | |||
81 | #endif /* _CRYPTO_INTERNAL_AEAD_H */ | 167 | #endif /* _CRYPTO_INTERNAL_AEAD_H */ |
82 | 168 | ||
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h new file mode 100644 index 000000000000..9a2bda15e454 --- /dev/null +++ b/include/crypto/internal/akcipher.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Public Key Encryption | ||
3 | * | ||
4 | * Copyright (c) 2015, Intel Corporation | ||
5 | * Authors: Tadeusz Struk <tadeusz.struk@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | */ | ||
13 | #ifndef _CRYPTO_AKCIPHER_INT_H | ||
14 | #define _CRYPTO_AKCIPHER_INT_H | ||
15 | #include <crypto/akcipher.h> | ||
16 | |||
17 | /* | ||
18 | * Transform internal helpers. | ||
19 | */ | ||
20 | static inline void *akcipher_request_ctx(struct akcipher_request *req) | ||
21 | { | ||
22 | return req->__ctx; | ||
23 | } | ||
24 | |||
25 | static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm) | ||
26 | { | ||
27 | return tfm->base.__crt_ctx; | ||
28 | } | ||
29 | |||
30 | static inline void akcipher_request_complete(struct akcipher_request *req, | ||
31 | int err) | ||
32 | { | ||
33 | req->base.complete(&req->base, err); | ||
34 | } | ||
35 | |||
36 | static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm) | ||
37 | { | ||
38 | return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; | ||
39 | } | ||
40 | |||
41 | /** | ||
42 | * crypto_register_akcipher() -- Register public key algorithm | ||
43 | * | ||
44 | * Function registers an implementation of a public key verify algorithm | ||
45 | * | ||
46 | * @alg: algorithm definition | ||
47 | * | ||
48 | * Return: zero on success; error code in case of error | ||
49 | */ | ||
50 | int crypto_register_akcipher(struct akcipher_alg *alg); | ||
51 | |||
52 | /** | ||
53 | * crypto_unregister_akcipher() -- Unregister public key algorithm | ||
54 | * | ||
55 | * Function unregisters an implementation of a public key verify algorithm | ||
56 | * | ||
57 | * @alg: algorithm definition | ||
58 | */ | ||
59 | void crypto_unregister_akcipher(struct akcipher_alg *alg); | ||
60 | #endif | ||
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h new file mode 100644 index 000000000000..9ca9b871aba5 --- /dev/null +++ b/include/crypto/internal/geniv.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * geniv: IV generation | ||
3 | * | ||
4 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the Free | ||
8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
9 | * any later version. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #ifndef _CRYPTO_INTERNAL_GENIV_H | ||
14 | #define _CRYPTO_INTERNAL_GENIV_H | ||
15 | |||
16 | #include <crypto/internal/aead.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | |||
19 | struct aead_geniv_ctx { | ||
20 | spinlock_t lock; | ||
21 | struct crypto_aead *child; | ||
22 | }; | ||
23 | |||
24 | #endif /* _CRYPTO_INTERNAL_GENIV_H */ | ||
diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h index 896973369573..a52ef3483dd7 100644 --- a/include/crypto/internal/rng.h +++ b/include/crypto/internal/rng.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * RNG: Random Number Generator algorithms under the crypto API | 2 | * RNG: Random Number Generator algorithms under the crypto API |
3 | * | 3 | * |
4 | * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> | 4 | * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> |
5 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 8 | * under the terms of the GNU General Public License as published by the Free |
@@ -16,11 +17,29 @@ | |||
16 | #include <crypto/algapi.h> | 17 | #include <crypto/algapi.h> |
17 | #include <crypto/rng.h> | 18 | #include <crypto/rng.h> |
18 | 19 | ||
19 | extern const struct crypto_type crypto_rng_type; | 20 | int crypto_register_rng(struct rng_alg *alg); |
21 | void crypto_unregister_rng(struct rng_alg *alg); | ||
22 | int crypto_register_rngs(struct rng_alg *algs, int count); | ||
23 | void crypto_unregister_rngs(struct rng_alg *algs, int count); | ||
24 | |||
25 | #if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE) | ||
26 | int crypto_del_default_rng(void); | ||
27 | #else | ||
28 | static inline int crypto_del_default_rng(void) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | #endif | ||
20 | 33 | ||
21 | static inline void *crypto_rng_ctx(struct crypto_rng *tfm) | 34 | static inline void *crypto_rng_ctx(struct crypto_rng *tfm) |
22 | { | 35 | { |
23 | return crypto_tfm_ctx(&tfm->base); | 36 | return crypto_tfm_ctx(&tfm->base); |
24 | } | 37 | } |
25 | 38 | ||
39 | static inline void crypto_rng_set_entropy(struct crypto_rng *tfm, | ||
40 | const u8 *data, unsigned int len) | ||
41 | { | ||
42 | crypto_rng_alg(tfm)->set_ent(tfm, data, len); | ||
43 | } | ||
44 | |||
26 | #endif | 45 | #endif |
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h new file mode 100644 index 000000000000..a8c86365439f --- /dev/null +++ b/include/crypto/internal/rsa.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * RSA internal helpers | ||
3 | * | ||
4 | * Copyright (c) 2015, Intel Corporation | ||
5 | * Authors: Tadeusz Struk <tadeusz.struk@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | */ | ||
13 | #ifndef _RSA_HELPER_ | ||
14 | #define _RSA_HELPER_ | ||
15 | #include <linux/mpi.h> | ||
16 | |||
17 | struct rsa_key { | ||
18 | MPI n; | ||
19 | MPI e; | ||
20 | MPI d; | ||
21 | }; | ||
22 | |||
23 | int rsa_parse_key(struct rsa_key *rsa_key, const void *key, | ||
24 | unsigned int key_len); | ||
25 | |||
26 | void rsa_free_key(struct rsa_key *rsa_key); | ||
27 | #endif | ||
diff --git a/include/crypto/md5.h b/include/crypto/md5.h index 65f299b08b0d..146af825eedb 100644 --- a/include/crypto/md5.h +++ b/include/crypto/md5.h | |||
@@ -8,6 +8,11 @@ | |||
8 | #define MD5_BLOCK_WORDS 16 | 8 | #define MD5_BLOCK_WORDS 16 |
9 | #define MD5_HASH_WORDS 4 | 9 | #define MD5_HASH_WORDS 4 |
10 | 10 | ||
11 | #define MD5_H0 0x67452301UL | ||
12 | #define MD5_H1 0xefcdab89UL | ||
13 | #define MD5_H2 0x98badcfeUL | ||
14 | #define MD5_H3 0x10325476UL | ||
15 | |||
11 | struct md5_state { | 16 | struct md5_state { |
12 | u32 hash[MD5_HASH_WORDS]; | 17 | u32 hash[MD5_HASH_WORDS]; |
13 | u32 block[MD5_BLOCK_WORDS]; | 18 | u32 block[MD5_BLOCK_WORDS]; |
diff --git a/include/crypto/null.h b/include/crypto/null.h index b7c864cc70df..06dc30d9f56e 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h | |||
@@ -8,4 +8,7 @@ | |||
8 | #define NULL_DIGEST_SIZE 0 | 8 | #define NULL_DIGEST_SIZE 0 |
9 | #define NULL_IV_SIZE 0 | 9 | #define NULL_IV_SIZE 0 |
10 | 10 | ||
11 | struct crypto_blkcipher *crypto_get_default_null_skcipher(void); | ||
12 | void crypto_put_default_null_skcipher(void); | ||
13 | |||
11 | #endif | 14 | #endif |
diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 6e28ea5be9f1..b95ede354a66 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * RNG: Random Number Generator algorithms under the crypto API | 2 | * RNG: Random Number Generator algorithms under the crypto API |
3 | * | 3 | * |
4 | * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> | 4 | * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> |
5 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 8 | * under the terms of the GNU General Public License as published by the Free |
@@ -15,6 +16,50 @@ | |||
15 | 16 | ||
16 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> |
17 | 18 | ||
19 | struct crypto_rng; | ||
20 | |||
21 | /** | ||
22 | * struct rng_alg - random number generator definition | ||
23 | * | ||
24 | * @generate: The function defined by this variable obtains a | ||
25 | * random number. The random number generator transform | ||
26 | * must generate the random number out of the context | ||
27 | * provided with this call, plus any additional data | ||
28 | * if provided to the call. | ||
29 | * @seed: Seed or reseed the random number generator. With the | ||
30 | * invocation of this function call, the random number | ||
31 | * generator shall become ready for generation. If the | ||
32 | * random number generator requires a seed for setting | ||
33 | * up a new state, the seed must be provided by the | ||
34 | * consumer while invoking this function. The required | ||
35 | * size of the seed is defined with @seedsize . | ||
36 | * @set_ent: Set entropy that would otherwise be obtained from | ||
37 | * entropy source. Internal use only. | ||
38 | * @seedsize: The seed size required for a random number generator | ||
39 | * initialization defined with this variable. Some | ||
40 | * random number generators does not require a seed | ||
41 | * as the seeding is implemented internally without | ||
42 | * the need of support by the consumer. In this case, | ||
43 | * the seed size is set to zero. | ||
44 | * @base: Common crypto API algorithm data structure. | ||
45 | */ | ||
46 | struct rng_alg { | ||
47 | int (*generate)(struct crypto_rng *tfm, | ||
48 | const u8 *src, unsigned int slen, | ||
49 | u8 *dst, unsigned int dlen); | ||
50 | int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); | ||
51 | void (*set_ent)(struct crypto_rng *tfm, const u8 *data, | ||
52 | unsigned int len); | ||
53 | |||
54 | unsigned int seedsize; | ||
55 | |||
56 | struct crypto_alg base; | ||
57 | }; | ||
58 | |||
59 | struct crypto_rng { | ||
60 | struct crypto_tfm base; | ||
61 | }; | ||
62 | |||
18 | extern struct crypto_rng *crypto_default_rng; | 63 | extern struct crypto_rng *crypto_default_rng; |
19 | 64 | ||
20 | int crypto_get_default_rng(void); | 65 | int crypto_get_default_rng(void); |
@@ -27,11 +72,6 @@ void crypto_put_default_rng(void); | |||
27 | * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) | 72 | * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) |
28 | */ | 73 | */ |
29 | 74 | ||
30 | static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) | ||
31 | { | ||
32 | return (struct crypto_rng *)tfm; | ||
33 | } | ||
34 | |||
35 | /** | 75 | /** |
36 | * crypto_alloc_rng() -- allocate RNG handle | 76 | * crypto_alloc_rng() -- allocate RNG handle |
37 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | 77 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the |
@@ -52,15 +92,7 @@ static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) | |||
52 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | 92 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case |
53 | * of an error, PTR_ERR() returns the error code. | 93 | * of an error, PTR_ERR() returns the error code. |
54 | */ | 94 | */ |
55 | static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name, | 95 | struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask); |
56 | u32 type, u32 mask) | ||
57 | { | ||
58 | type &= ~CRYPTO_ALG_TYPE_MASK; | ||
59 | type |= CRYPTO_ALG_TYPE_RNG; | ||
60 | mask |= CRYPTO_ALG_TYPE_MASK; | ||
61 | |||
62 | return __crypto_rng_cast(crypto_alloc_base(alg_name, type, mask)); | ||
63 | } | ||
64 | 96 | ||
65 | static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) | 97 | static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) |
66 | { | 98 | { |
@@ -77,12 +109,8 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) | |||
77 | */ | 109 | */ |
78 | static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) | 110 | static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) |
79 | { | 111 | { |
80 | return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng; | 112 | return container_of(crypto_rng_tfm(tfm)->__crt_alg, |
81 | } | 113 | struct rng_alg, base); |
82 | |||
83 | static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm) | ||
84 | { | ||
85 | return &crypto_rng_tfm(tfm)->crt_rng; | ||
86 | } | 114 | } |
87 | 115 | ||
88 | /** | 116 | /** |
@@ -91,7 +119,28 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm) | |||
91 | */ | 119 | */ |
92 | static inline void crypto_free_rng(struct crypto_rng *tfm) | 120 | static inline void crypto_free_rng(struct crypto_rng *tfm) |
93 | { | 121 | { |
94 | crypto_free_tfm(crypto_rng_tfm(tfm)); | 122 | crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); |
123 | } | ||
124 | |||
125 | /** | ||
126 | * crypto_rng_generate() - get random number | ||
127 | * @tfm: cipher handle | ||
128 | * @src: Input buffer holding additional data, may be NULL | ||
129 | * @slen: Length of additional data | ||
130 | * @dst: output buffer holding the random numbers | ||
131 | * @dlen: length of the output buffer | ||
132 | * | ||
133 | * This function fills the caller-allocated buffer with random | ||
134 | * numbers using the random number generator referenced by the | ||
135 | * cipher handle. | ||
136 | * | ||
137 | * Return: 0 function was successful; < 0 if an error occurred | ||
138 | */ | ||
139 | static inline int crypto_rng_generate(struct crypto_rng *tfm, | ||
140 | const u8 *src, unsigned int slen, | ||
141 | u8 *dst, unsigned int dlen) | ||
142 | { | ||
143 | return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); | ||
95 | } | 144 | } |
96 | 145 | ||
97 | /** | 146 | /** |
@@ -108,7 +157,7 @@ static inline void crypto_free_rng(struct crypto_rng *tfm) | |||
108 | static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, | 157 | static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, |
109 | u8 *rdata, unsigned int dlen) | 158 | u8 *rdata, unsigned int dlen) |
110 | { | 159 | { |
111 | return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen); | 160 | return crypto_rng_generate(tfm, NULL, 0, rdata, dlen); |
112 | } | 161 | } |
113 | 162 | ||
114 | /** | 163 | /** |
@@ -128,11 +177,8 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, | |||
128 | * | 177 | * |
129 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | 178 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred |
130 | */ | 179 | */ |
131 | static inline int crypto_rng_reset(struct crypto_rng *tfm, | 180 | int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, |
132 | u8 *seed, unsigned int slen) | 181 | unsigned int slen); |
133 | { | ||
134 | return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen); | ||
135 | } | ||
136 | 182 | ||
137 | /** | 183 | /** |
138 | * crypto_rng_seedsize() - obtain seed size of RNG | 184 | * crypto_rng_seedsize() - obtain seed size of RNG |
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 20e4226a2e14..96670e7e7c14 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h | |||
@@ -102,4 +102,8 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, | |||
102 | 102 | ||
103 | int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes); | 103 | int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes); |
104 | 104 | ||
105 | struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], | ||
106 | struct scatterlist *src, | ||
107 | unsigned int len); | ||
108 | |||
105 | #endif /* _CRYPTO_SCATTERWALK_H */ | 109 | #endif /* _CRYPTO_SCATTERWALK_H */ |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 10df5d2d093a..81ef938b0a8e 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -53,6 +53,7 @@ | |||
53 | #define CRYPTO_ALG_TYPE_SHASH 0x00000009 | 53 | #define CRYPTO_ALG_TYPE_SHASH 0x00000009 |
54 | #define CRYPTO_ALG_TYPE_AHASH 0x0000000a | 54 | #define CRYPTO_ALG_TYPE_AHASH 0x0000000a |
55 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c | 55 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c |
56 | #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d | ||
56 | #define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f | 57 | #define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f |
57 | 58 | ||
58 | #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e | 59 | #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e |
@@ -101,6 +102,12 @@ | |||
101 | #define CRYPTO_ALG_INTERNAL 0x00002000 | 102 | #define CRYPTO_ALG_INTERNAL 0x00002000 |
102 | 103 | ||
103 | /* | 104 | /* |
105 | * Temporary flag used to prevent legacy AEAD implementations from | ||
106 | * being used by user-space. | ||
107 | */ | ||
108 | #define CRYPTO_ALG_AEAD_NEW 0x00004000 | ||
109 | |||
110 | /* | ||
104 | * Transform masks and values (for crt_flags). | 111 | * Transform masks and values (for crt_flags). |
105 | */ | 112 | */ |
106 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 | 113 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 |
@@ -138,9 +145,9 @@ struct crypto_async_request; | |||
138 | struct crypto_aead; | 145 | struct crypto_aead; |
139 | struct crypto_blkcipher; | 146 | struct crypto_blkcipher; |
140 | struct crypto_hash; | 147 | struct crypto_hash; |
141 | struct crypto_rng; | ||
142 | struct crypto_tfm; | 148 | struct crypto_tfm; |
143 | struct crypto_type; | 149 | struct crypto_type; |
150 | struct aead_request; | ||
144 | struct aead_givcrypt_request; | 151 | struct aead_givcrypt_request; |
145 | struct skcipher_givcrypt_request; | 152 | struct skcipher_givcrypt_request; |
146 | 153 | ||
@@ -175,32 +182,6 @@ struct ablkcipher_request { | |||
175 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | 182 | void *__ctx[] CRYPTO_MINALIGN_ATTR; |
176 | }; | 183 | }; |
177 | 184 | ||
178 | /** | ||
179 | * struct aead_request - AEAD request | ||
180 | * @base: Common attributes for async crypto requests | ||
181 | * @assoclen: Length in bytes of associated data for authentication | ||
182 | * @cryptlen: Length of data to be encrypted or decrypted | ||
183 | * @iv: Initialisation vector | ||
184 | * @assoc: Associated data | ||
185 | * @src: Source data | ||
186 | * @dst: Destination data | ||
187 | * @__ctx: Start of private context data | ||
188 | */ | ||
189 | struct aead_request { | ||
190 | struct crypto_async_request base; | ||
191 | |||
192 | unsigned int assoclen; | ||
193 | unsigned int cryptlen; | ||
194 | |||
195 | u8 *iv; | ||
196 | |||
197 | struct scatterlist *assoc; | ||
198 | struct scatterlist *src; | ||
199 | struct scatterlist *dst; | ||
200 | |||
201 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | ||
202 | }; | ||
203 | |||
204 | struct blkcipher_desc { | 185 | struct blkcipher_desc { |
205 | struct crypto_blkcipher *tfm; | 186 | struct crypto_blkcipher *tfm; |
206 | void *info; | 187 | void *info; |
@@ -294,7 +275,7 @@ struct ablkcipher_alg { | |||
294 | }; | 275 | }; |
295 | 276 | ||
296 | /** | 277 | /** |
297 | * struct aead_alg - AEAD cipher definition | 278 | * struct old_aead_alg - AEAD cipher definition |
298 | * @maxauthsize: Set the maximum authentication tag size supported by the | 279 | * @maxauthsize: Set the maximum authentication tag size supported by the |
299 | * transformation. A transformation may support smaller tag sizes. | 280 | * transformation. A transformation may support smaller tag sizes. |
300 | * As the authentication tag is a message digest to ensure the | 281 | * As the authentication tag is a message digest to ensure the |
@@ -319,7 +300,7 @@ struct ablkcipher_alg { | |||
319 | * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are | 300 | * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are |
320 | * mandatory and must be filled. | 301 | * mandatory and must be filled. |
321 | */ | 302 | */ |
322 | struct aead_alg { | 303 | struct old_aead_alg { |
323 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | 304 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, |
324 | unsigned int keylen); | 305 | unsigned int keylen); |
325 | int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); | 306 | int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); |
@@ -426,40 +407,12 @@ struct compress_alg { | |||
426 | unsigned int slen, u8 *dst, unsigned int *dlen); | 407 | unsigned int slen, u8 *dst, unsigned int *dlen); |
427 | }; | 408 | }; |
428 | 409 | ||
429 | /** | ||
430 | * struct rng_alg - random number generator definition | ||
431 | * @rng_make_random: The function defined by this variable obtains a random | ||
432 | * number. The random number generator transform must generate | ||
433 | * the random number out of the context provided with this | ||
434 | * call. | ||
435 | * @rng_reset: Reset of the random number generator by clearing the entire state. | ||
436 | * With the invocation of this function call, the random number | ||
437 | * generator shall completely reinitialize its state. If the random | ||
438 | * number generator requires a seed for setting up a new state, | ||
439 | * the seed must be provided by the consumer while invoking this | ||
440 | * function. The required size of the seed is defined with | ||
441 | * @seedsize . | ||
442 | * @seedsize: The seed size required for a random number generator | ||
443 | * initialization defined with this variable. Some random number | ||
444 | * generators like the SP800-90A DRBG does not require a seed as the | ||
445 | * seeding is implemented internally without the need of support by | ||
446 | * the consumer. In this case, the seed size is set to zero. | ||
447 | */ | ||
448 | struct rng_alg { | ||
449 | int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, | ||
450 | unsigned int dlen); | ||
451 | int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); | ||
452 | |||
453 | unsigned int seedsize; | ||
454 | }; | ||
455 | |||
456 | 410 | ||
457 | #define cra_ablkcipher cra_u.ablkcipher | 411 | #define cra_ablkcipher cra_u.ablkcipher |
458 | #define cra_aead cra_u.aead | 412 | #define cra_aead cra_u.aead |
459 | #define cra_blkcipher cra_u.blkcipher | 413 | #define cra_blkcipher cra_u.blkcipher |
460 | #define cra_cipher cra_u.cipher | 414 | #define cra_cipher cra_u.cipher |
461 | #define cra_compress cra_u.compress | 415 | #define cra_compress cra_u.compress |
462 | #define cra_rng cra_u.rng | ||
463 | 416 | ||
464 | /** | 417 | /** |
465 | * struct crypto_alg - definition of a cryptograpic cipher algorithm | 418 | * struct crypto_alg - definition of a cryptograpic cipher algorithm |
@@ -505,7 +458,7 @@ struct rng_alg { | |||
505 | * transformation algorithm. | 458 | * transformation algorithm. |
506 | * @cra_type: Type of the cryptographic transformation. This is a pointer to | 459 | * @cra_type: Type of the cryptographic transformation. This is a pointer to |
507 | * struct crypto_type, which implements callbacks common for all | 460 | * struct crypto_type, which implements callbacks common for all |
508 | * trasnformation types. There are multiple options: | 461 | * transformation types. There are multiple options: |
509 | * &crypto_blkcipher_type, &crypto_ablkcipher_type, | 462 | * &crypto_blkcipher_type, &crypto_ablkcipher_type, |
510 | * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. | 463 | * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. |
511 | * This field might be empty. In that case, there are no common | 464 | * This field might be empty. In that case, there are no common |
@@ -555,11 +508,10 @@ struct crypto_alg { | |||
555 | 508 | ||
556 | union { | 509 | union { |
557 | struct ablkcipher_alg ablkcipher; | 510 | struct ablkcipher_alg ablkcipher; |
558 | struct aead_alg aead; | 511 | struct old_aead_alg aead; |
559 | struct blkcipher_alg blkcipher; | 512 | struct blkcipher_alg blkcipher; |
560 | struct cipher_alg cipher; | 513 | struct cipher_alg cipher; |
561 | struct compress_alg compress; | 514 | struct compress_alg compress; |
562 | struct rng_alg rng; | ||
563 | } cra_u; | 515 | } cra_u; |
564 | 516 | ||
565 | int (*cra_init)(struct crypto_tfm *tfm); | 517 | int (*cra_init)(struct crypto_tfm *tfm); |
@@ -567,7 +519,7 @@ struct crypto_alg { | |||
567 | void (*cra_destroy)(struct crypto_alg *alg); | 519 | void (*cra_destroy)(struct crypto_alg *alg); |
568 | 520 | ||
569 | struct module *cra_module; | 521 | struct module *cra_module; |
570 | }; | 522 | } CRYPTO_MINALIGN_ATTR; |
571 | 523 | ||
572 | /* | 524 | /* |
573 | * Algorithm registration interface. | 525 | * Algorithm registration interface. |
@@ -602,21 +554,6 @@ struct ablkcipher_tfm { | |||
602 | unsigned int reqsize; | 554 | unsigned int reqsize; |
603 | }; | 555 | }; |
604 | 556 | ||
605 | struct aead_tfm { | ||
606 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | ||
607 | unsigned int keylen); | ||
608 | int (*encrypt)(struct aead_request *req); | ||
609 | int (*decrypt)(struct aead_request *req); | ||
610 | int (*givencrypt)(struct aead_givcrypt_request *req); | ||
611 | int (*givdecrypt)(struct aead_givcrypt_request *req); | ||
612 | |||
613 | struct crypto_aead *base; | ||
614 | |||
615 | unsigned int ivsize; | ||
616 | unsigned int authsize; | ||
617 | unsigned int reqsize; | ||
618 | }; | ||
619 | |||
620 | struct blkcipher_tfm { | 557 | struct blkcipher_tfm { |
621 | void *iv; | 558 | void *iv; |
622 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, | 559 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, |
@@ -655,19 +592,11 @@ struct compress_tfm { | |||
655 | u8 *dst, unsigned int *dlen); | 592 | u8 *dst, unsigned int *dlen); |
656 | }; | 593 | }; |
657 | 594 | ||
658 | struct rng_tfm { | ||
659 | int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, | ||
660 | unsigned int dlen); | ||
661 | int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); | ||
662 | }; | ||
663 | |||
664 | #define crt_ablkcipher crt_u.ablkcipher | 595 | #define crt_ablkcipher crt_u.ablkcipher |
665 | #define crt_aead crt_u.aead | ||
666 | #define crt_blkcipher crt_u.blkcipher | 596 | #define crt_blkcipher crt_u.blkcipher |
667 | #define crt_cipher crt_u.cipher | 597 | #define crt_cipher crt_u.cipher |
668 | #define crt_hash crt_u.hash | 598 | #define crt_hash crt_u.hash |
669 | #define crt_compress crt_u.compress | 599 | #define crt_compress crt_u.compress |
670 | #define crt_rng crt_u.rng | ||
671 | 600 | ||
672 | struct crypto_tfm { | 601 | struct crypto_tfm { |
673 | 602 | ||
@@ -675,12 +604,10 @@ struct crypto_tfm { | |||
675 | 604 | ||
676 | union { | 605 | union { |
677 | struct ablkcipher_tfm ablkcipher; | 606 | struct ablkcipher_tfm ablkcipher; |
678 | struct aead_tfm aead; | ||
679 | struct blkcipher_tfm blkcipher; | 607 | struct blkcipher_tfm blkcipher; |
680 | struct cipher_tfm cipher; | 608 | struct cipher_tfm cipher; |
681 | struct hash_tfm hash; | 609 | struct hash_tfm hash; |
682 | struct compress_tfm compress; | 610 | struct compress_tfm compress; |
683 | struct rng_tfm rng; | ||
684 | } crt_u; | 611 | } crt_u; |
685 | 612 | ||
686 | void (*exit)(struct crypto_tfm *tfm); | 613 | void (*exit)(struct crypto_tfm *tfm); |
@@ -694,10 +621,6 @@ struct crypto_ablkcipher { | |||
694 | struct crypto_tfm base; | 621 | struct crypto_tfm base; |
695 | }; | 622 | }; |
696 | 623 | ||
697 | struct crypto_aead { | ||
698 | struct crypto_tfm base; | ||
699 | }; | ||
700 | |||
701 | struct crypto_blkcipher { | 624 | struct crypto_blkcipher { |
702 | struct crypto_tfm base; | 625 | struct crypto_tfm base; |
703 | }; | 626 | }; |
@@ -714,10 +637,6 @@ struct crypto_hash { | |||
714 | struct crypto_tfm base; | 637 | struct crypto_tfm base; |
715 | }; | 638 | }; |
716 | 639 | ||
717 | struct crypto_rng { | ||
718 | struct crypto_tfm base; | ||
719 | }; | ||
720 | |||
721 | enum { | 640 | enum { |
722 | CRYPTOA_UNSPEC, | 641 | CRYPTOA_UNSPEC, |
723 | CRYPTOA_ALG, | 642 | CRYPTOA_ALG, |
@@ -1194,400 +1113,6 @@ static inline void ablkcipher_request_set_crypt( | |||
1194 | } | 1113 | } |
1195 | 1114 | ||
1196 | /** | 1115 | /** |
1197 | * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API | ||
1198 | * | ||
1199 | * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD | ||
1200 | * (listed as type "aead" in /proc/crypto) | ||
1201 | * | ||
1202 | * The most prominent examples for this type of encryption is GCM and CCM. | ||
1203 | * However, the kernel supports other types of AEAD ciphers which are defined | ||
1204 | * with the following cipher string: | ||
1205 | * | ||
1206 | * authenc(keyed message digest, block cipher) | ||
1207 | * | ||
1208 | * For example: authenc(hmac(sha256), cbc(aes)) | ||
1209 | * | ||
1210 | * The example code provided for the asynchronous block cipher operation | ||
1211 | * applies here as well. Naturally all *ablkcipher* symbols must be exchanged | ||
1212 | * the *aead* pendants discussed in the following. In addtion, for the AEAD | ||
1213 | * operation, the aead_request_set_assoc function must be used to set the | ||
1214 | * pointer to the associated data memory location before performing the | ||
1215 | * encryption or decryption operation. In case of an encryption, the associated | ||
1216 | * data memory is filled during the encryption operation. For decryption, the | ||
1217 | * associated data memory must contain data that is used to verify the integrity | ||
1218 | * of the decrypted data. Another deviation from the asynchronous block cipher | ||
1219 | * operation is that the caller should explicitly check for -EBADMSG of the | ||
1220 | * crypto_aead_decrypt. That error indicates an authentication error, i.e. | ||
1221 | * a breach in the integrity of the message. In essence, that -EBADMSG error | ||
1222 | * code is the key bonus an AEAD cipher has over "standard" block chaining | ||
1223 | * modes. | ||
1224 | */ | ||
1225 | |||
1226 | static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) | ||
1227 | { | ||
1228 | return (struct crypto_aead *)tfm; | ||
1229 | } | ||
1230 | |||
1231 | /** | ||
1232 | * crypto_alloc_aead() - allocate AEAD cipher handle | ||
1233 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1234 | * AEAD cipher | ||
1235 | * @type: specifies the type of the cipher | ||
1236 | * @mask: specifies the mask for the cipher | ||
1237 | * | ||
1238 | * Allocate a cipher handle for an AEAD. The returned struct | ||
1239 | * crypto_aead is the cipher handle that is required for any subsequent | ||
1240 | * API invocation for that AEAD. | ||
1241 | * | ||
1242 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
1243 | * of an error, PTR_ERR() returns the error code. | ||
1244 | */ | ||
1245 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); | ||
1246 | |||
1247 | static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) | ||
1248 | { | ||
1249 | return &tfm->base; | ||
1250 | } | ||
1251 | |||
1252 | /** | ||
1253 | * crypto_free_aead() - zeroize and free aead handle | ||
1254 | * @tfm: cipher handle to be freed | ||
1255 | */ | ||
1256 | static inline void crypto_free_aead(struct crypto_aead *tfm) | ||
1257 | { | ||
1258 | crypto_free_tfm(crypto_aead_tfm(tfm)); | ||
1259 | } | ||
1260 | |||
1261 | static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm) | ||
1262 | { | ||
1263 | return &crypto_aead_tfm(tfm)->crt_aead; | ||
1264 | } | ||
1265 | |||
1266 | /** | ||
1267 | * crypto_aead_ivsize() - obtain IV size | ||
1268 | * @tfm: cipher handle | ||
1269 | * | ||
1270 | * The size of the IV for the aead referenced by the cipher handle is | ||
1271 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
1272 | * | ||
1273 | * Return: IV size in bytes | ||
1274 | */ | ||
1275 | static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) | ||
1276 | { | ||
1277 | return crypto_aead_crt(tfm)->ivsize; | ||
1278 | } | ||
1279 | |||
1280 | /** | ||
1281 | * crypto_aead_authsize() - obtain maximum authentication data size | ||
1282 | * @tfm: cipher handle | ||
1283 | * | ||
1284 | * The maximum size of the authentication data for the AEAD cipher referenced | ||
1285 | * by the AEAD cipher handle is returned. The authentication data size may be | ||
1286 | * zero if the cipher implements a hard-coded maximum. | ||
1287 | * | ||
1288 | * The authentication data may also be known as "tag value". | ||
1289 | * | ||
1290 | * Return: authentication data size / tag size in bytes | ||
1291 | */ | ||
1292 | static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) | ||
1293 | { | ||
1294 | return crypto_aead_crt(tfm)->authsize; | ||
1295 | } | ||
1296 | |||
1297 | /** | ||
1298 | * crypto_aead_blocksize() - obtain block size of cipher | ||
1299 | * @tfm: cipher handle | ||
1300 | * | ||
1301 | * The block size for the AEAD referenced with the cipher handle is returned. | ||
1302 | * The caller may use that information to allocate appropriate memory for the | ||
1303 | * data returned by the encryption or decryption operation | ||
1304 | * | ||
1305 | * Return: block size of cipher | ||
1306 | */ | ||
1307 | static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) | ||
1308 | { | ||
1309 | return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); | ||
1310 | } | ||
1311 | |||
1312 | static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) | ||
1313 | { | ||
1314 | return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); | ||
1315 | } | ||
1316 | |||
1317 | static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm) | ||
1318 | { | ||
1319 | return crypto_tfm_get_flags(crypto_aead_tfm(tfm)); | ||
1320 | } | ||
1321 | |||
1322 | static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags) | ||
1323 | { | ||
1324 | crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags); | ||
1325 | } | ||
1326 | |||
1327 | static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) | ||
1328 | { | ||
1329 | crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); | ||
1330 | } | ||
1331 | |||
1332 | /** | ||
1333 | * crypto_aead_setkey() - set key for cipher | ||
1334 | * @tfm: cipher handle | ||
1335 | * @key: buffer holding the key | ||
1336 | * @keylen: length of the key in bytes | ||
1337 | * | ||
1338 | * The caller provided key is set for the AEAD referenced by the cipher | ||
1339 | * handle. | ||
1340 | * | ||
1341 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
1342 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
1343 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
1344 | * is performed. | ||
1345 | * | ||
1346 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
1347 | */ | ||
1348 | static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, | ||
1349 | unsigned int keylen) | ||
1350 | { | ||
1351 | struct aead_tfm *crt = crypto_aead_crt(tfm); | ||
1352 | |||
1353 | return crt->setkey(crt->base, key, keylen); | ||
1354 | } | ||
1355 | |||
1356 | /** | ||
1357 | * crypto_aead_setauthsize() - set authentication data size | ||
1358 | * @tfm: cipher handle | ||
1359 | * @authsize: size of the authentication data / tag in bytes | ||
1360 | * | ||
1361 | * Set the authentication data size / tag size. AEAD requires an authentication | ||
1362 | * tag (or MAC) in addition to the associated data. | ||
1363 | * | ||
1364 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
1365 | */ | ||
1366 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); | ||
1367 | |||
1368 | static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | ||
1369 | { | ||
1370 | return __crypto_aead_cast(req->base.tfm); | ||
1371 | } | ||
1372 | |||
1373 | /** | ||
1374 | * crypto_aead_encrypt() - encrypt plaintext | ||
1375 | * @req: reference to the aead_request handle that holds all information | ||
1376 | * needed to perform the cipher operation | ||
1377 | * | ||
1378 | * Encrypt plaintext data using the aead_request handle. That data structure | ||
1379 | * and how it is filled with data is discussed with the aead_request_* | ||
1380 | * functions. | ||
1381 | * | ||
1382 | * IMPORTANT NOTE The encryption operation creates the authentication data / | ||
1383 | * tag. That data is concatenated with the created ciphertext. | ||
1384 | * The ciphertext memory size is therefore the given number of | ||
1385 | * block cipher blocks + the size defined by the | ||
1386 | * crypto_aead_setauthsize invocation. The caller must ensure | ||
1387 | * that sufficient memory is available for the ciphertext and | ||
1388 | * the authentication tag. | ||
1389 | * | ||
1390 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1391 | */ | ||
1392 | static inline int crypto_aead_encrypt(struct aead_request *req) | ||
1393 | { | ||
1394 | return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); | ||
1395 | } | ||
1396 | |||
1397 | /** | ||
1398 | * crypto_aead_decrypt() - decrypt ciphertext | ||
1399 | * @req: reference to the ablkcipher_request handle that holds all information | ||
1400 | * needed to perform the cipher operation | ||
1401 | * | ||
1402 | * Decrypt ciphertext data using the aead_request handle. That data structure | ||
1403 | * and how it is filled with data is discussed with the aead_request_* | ||
1404 | * functions. | ||
1405 | * | ||
1406 | * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the | ||
1407 | * authentication data / tag. That authentication data / tag | ||
1408 | * must have the size defined by the crypto_aead_setauthsize | ||
1409 | * invocation. | ||
1410 | * | ||
1411 | * | ||
1412 | * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD | ||
1413 | * cipher operation performs the authentication of the data during the | ||
1414 | * decryption operation. Therefore, the function returns this error if | ||
1415 | * the authentication of the ciphertext was unsuccessful (i.e. the | ||
1416 | * integrity of the ciphertext or the associated data was violated); | ||
1417 | * < 0 if an error occurred. | ||
1418 | */ | ||
1419 | static inline int crypto_aead_decrypt(struct aead_request *req) | ||
1420 | { | ||
1421 | if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req))) | ||
1422 | return -EINVAL; | ||
1423 | |||
1424 | return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); | ||
1425 | } | ||
1426 | |||
1427 | /** | ||
1428 | * DOC: Asynchronous AEAD Request Handle | ||
1429 | * | ||
1430 | * The aead_request data structure contains all pointers to data required for | ||
1431 | * the AEAD cipher operation. This includes the cipher handle (which can be | ||
1432 | * used by multiple aead_request instances), pointer to plaintext and | ||
1433 | * ciphertext, asynchronous callback function, etc. It acts as a handle to the | ||
1434 | * aead_request_* API calls in a similar way as AEAD handle to the | ||
1435 | * crypto_aead_* API calls. | ||
1436 | */ | ||
1437 | |||
1438 | /** | ||
1439 | * crypto_aead_reqsize() - obtain size of the request data structure | ||
1440 | * @tfm: cipher handle | ||
1441 | * | ||
1442 | * Return: number of bytes | ||
1443 | */ | ||
1444 | static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) | ||
1445 | { | ||
1446 | return crypto_aead_crt(tfm)->reqsize; | ||
1447 | } | ||
1448 | |||
1449 | /** | ||
1450 | * aead_request_set_tfm() - update cipher handle reference in request | ||
1451 | * @req: request handle to be modified | ||
1452 | * @tfm: cipher handle that shall be added to the request handle | ||
1453 | * | ||
1454 | * Allow the caller to replace the existing aead handle in the request | ||
1455 | * data structure with a different one. | ||
1456 | */ | ||
1457 | static inline void aead_request_set_tfm(struct aead_request *req, | ||
1458 | struct crypto_aead *tfm) | ||
1459 | { | ||
1460 | req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); | ||
1461 | } | ||
1462 | |||
1463 | /** | ||
1464 | * aead_request_alloc() - allocate request data structure | ||
1465 | * @tfm: cipher handle to be registered with the request | ||
1466 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | ||
1467 | * | ||
1468 | * Allocate the request data structure that must be used with the AEAD | ||
1469 | * encrypt and decrypt API calls. During the allocation, the provided aead | ||
1470 | * handle is registered in the request data structure. | ||
1471 | * | ||
1472 | * Return: allocated request handle in case of success; IS_ERR() is true in case | ||
1473 | * of an error, PTR_ERR() returns the error code. | ||
1474 | */ | ||
1475 | static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | ||
1476 | gfp_t gfp) | ||
1477 | { | ||
1478 | struct aead_request *req; | ||
1479 | |||
1480 | req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp); | ||
1481 | |||
1482 | if (likely(req)) | ||
1483 | aead_request_set_tfm(req, tfm); | ||
1484 | |||
1485 | return req; | ||
1486 | } | ||
1487 | |||
1488 | /** | ||
1489 | * aead_request_free() - zeroize and free request data structure | ||
1490 | * @req: request data structure cipher handle to be freed | ||
1491 | */ | ||
1492 | static inline void aead_request_free(struct aead_request *req) | ||
1493 | { | ||
1494 | kzfree(req); | ||
1495 | } | ||
1496 | |||
1497 | /** | ||
1498 | * aead_request_set_callback() - set asynchronous callback function | ||
1499 | * @req: request handle | ||
1500 | * @flags: specify zero or an ORing of the flags | ||
1501 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | ||
1502 | * increase the wait queue beyond the initial maximum size; | ||
1503 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | ||
1504 | * @compl: callback function pointer to be registered with the request handle | ||
1505 | * @data: The data pointer refers to memory that is not used by the kernel | ||
1506 | * crypto API, but provided to the callback function for it to use. Here, | ||
1507 | * the caller can provide a reference to memory the callback function can | ||
1508 | * operate on. As the callback function is invoked asynchronously to the | ||
1509 | * related functionality, it may need to access data structures of the | ||
1510 | * related functionality which can be referenced using this pointer. The | ||
1511 | * callback function can access the memory via the "data" field in the | ||
1512 | * crypto_async_request data structure provided to the callback function. | ||
1513 | * | ||
1514 | * Setting the callback function that is triggered once the cipher operation | ||
1515 | * completes | ||
1516 | * | ||
1517 | * The callback function is registered with the aead_request handle and | ||
1518 | * must comply with the following template | ||
1519 | * | ||
1520 | * void callback_function(struct crypto_async_request *req, int error) | ||
1521 | */ | ||
1522 | static inline void aead_request_set_callback(struct aead_request *req, | ||
1523 | u32 flags, | ||
1524 | crypto_completion_t compl, | ||
1525 | void *data) | ||
1526 | { | ||
1527 | req->base.complete = compl; | ||
1528 | req->base.data = data; | ||
1529 | req->base.flags = flags; | ||
1530 | } | ||
1531 | |||
1532 | /** | ||
1533 | * aead_request_set_crypt - set data buffers | ||
1534 | * @req: request handle | ||
1535 | * @src: source scatter / gather list | ||
1536 | * @dst: destination scatter / gather list | ||
1537 | * @cryptlen: number of bytes to process from @src | ||
1538 | * @iv: IV for the cipher operation which must comply with the IV size defined | ||
1539 | * by crypto_aead_ivsize() | ||
1540 | * | ||
1541 | * Setting the source data and destination data scatter / gather lists. | ||
1542 | * | ||
1543 | * For encryption, the source is treated as the plaintext and the | ||
1544 | * destination is the ciphertext. For a decryption operation, the use is | ||
1545 | * reversed - the source is the ciphertext and the destination is the plaintext. | ||
1546 | * | ||
1547 | * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, | ||
1548 | * the caller must concatenate the ciphertext followed by the | ||
1549 | * authentication tag and provide the entire data stream to the | ||
1550 | * decryption operation (i.e. the data length used for the | ||
1551 | * initialization of the scatterlist and the data length for the | ||
1552 | * decryption operation is identical). For encryption, however, | ||
1553 | * the authentication tag is created while encrypting the data. | ||
1554 | * The destination buffer must hold sufficient space for the | ||
1555 | * ciphertext and the authentication tag while the encryption | ||
1556 | * invocation must only point to the plaintext data size. The | ||
1557 | * following code snippet illustrates the memory usage | ||
1558 | * buffer = kmalloc(ptbuflen + (enc ? authsize : 0)); | ||
1559 | * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0)); | ||
1560 | * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv); | ||
1561 | */ | ||
1562 | static inline void aead_request_set_crypt(struct aead_request *req, | ||
1563 | struct scatterlist *src, | ||
1564 | struct scatterlist *dst, | ||
1565 | unsigned int cryptlen, u8 *iv) | ||
1566 | { | ||
1567 | req->src = src; | ||
1568 | req->dst = dst; | ||
1569 | req->cryptlen = cryptlen; | ||
1570 | req->iv = iv; | ||
1571 | } | ||
1572 | |||
1573 | /** | ||
1574 | * aead_request_set_assoc() - set the associated data scatter / gather list | ||
1575 | * @req: request handle | ||
1576 | * @assoc: associated data scatter / gather list | ||
1577 | * @assoclen: number of bytes to process from @assoc | ||
1578 | * | ||
1579 | * For encryption, the memory is filled with the associated data. For | ||
1580 | * decryption, the memory must point to the associated data. | ||
1581 | */ | ||
1582 | static inline void aead_request_set_assoc(struct aead_request *req, | ||
1583 | struct scatterlist *assoc, | ||
1584 | unsigned int assoclen) | ||
1585 | { | ||
1586 | req->assoc = assoc; | ||
1587 | req->assoclen = assoclen; | ||
1588 | } | ||
1589 | |||
1590 | /** | ||
1591 | * DOC: Synchronous Block Cipher API | 1116 | * DOC: Synchronous Block Cipher API |
1592 | * | 1117 | * |
1593 | * The synchronous block cipher API is used with the ciphers of type | 1118 | * The synchronous block cipher API is used with the ciphers of type |
diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 611b69fa8594..1f7bc630d225 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h | |||
@@ -54,11 +54,16 @@ struct mbus_dram_target_info | |||
54 | */ | 54 | */ |
55 | #ifdef CONFIG_PLAT_ORION | 55 | #ifdef CONFIG_PLAT_ORION |
56 | extern const struct mbus_dram_target_info *mv_mbus_dram_info(void); | 56 | extern const struct mbus_dram_target_info *mv_mbus_dram_info(void); |
57 | extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void); | ||
57 | #else | 58 | #else |
58 | static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) | 59 | static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) |
59 | { | 60 | { |
60 | return NULL; | 61 | return NULL; |
61 | } | 62 | } |
63 | static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void) | ||
64 | { | ||
65 | return NULL; | ||
66 | } | ||
62 | #endif | 67 | #endif |
63 | 68 | ||
64 | int mvebu_mbus_save_cpu_target(u32 *store_addr); | 69 | int mvebu_mbus_save_cpu_target(u32 *store_addr); |
diff --git a/include/linux/module.h b/include/linux/module.h index c883b86ea964..1e5436042eb0 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -655,4 +655,16 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr, | |||
655 | static inline void module_bug_cleanup(struct module *mod) {} | 655 | static inline void module_bug_cleanup(struct module *mod) {} |
656 | #endif /* CONFIG_GENERIC_BUG */ | 656 | #endif /* CONFIG_GENERIC_BUG */ |
657 | 657 | ||
658 | #ifdef CONFIG_MODULE_SIG | ||
659 | static inline bool module_sig_ok(struct module *module) | ||
660 | { | ||
661 | return module->sig_ok; | ||
662 | } | ||
663 | #else /* !CONFIG_MODULE_SIG */ | ||
664 | static inline bool module_sig_ok(struct module *module) | ||
665 | { | ||
666 | return true; | ||
667 | } | ||
668 | #endif /* CONFIG_MODULE_SIG */ | ||
669 | |||
658 | #endif /* _LINUX_MODULE_H */ | 670 | #endif /* _LINUX_MODULE_H */ |
diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 5af1b81def49..641b7d6fd096 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h | |||
@@ -81,6 +81,8 @@ MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread); | |||
81 | int mpi_fromstr(MPI val, const char *str); | 81 | int mpi_fromstr(MPI val, const char *str); |
82 | u32 mpi_get_keyid(MPI a, u32 *keyid); | 82 | u32 mpi_get_keyid(MPI a, u32 *keyid); |
83 | void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); | 83 | void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); |
84 | int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, | ||
85 | int *sign); | ||
84 | void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); | 86 | void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); |
85 | int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); | 87 | int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); |
86 | 88 | ||
@@ -142,4 +144,17 @@ int mpi_rshift(MPI x, MPI a, unsigned n); | |||
142 | /*-- mpi-inv.c --*/ | 144 | /*-- mpi-inv.c --*/ |
143 | int mpi_invm(MPI x, MPI u, MPI v); | 145 | int mpi_invm(MPI x, MPI u, MPI v); |
144 | 146 | ||
147 | /* inline functions */ | ||
148 | |||
149 | /** | ||
150 | * mpi_get_size() - returns max size required to store the number | ||
151 | * | ||
152 | * @a: A multi precision integer for which we want to allocate a bufer | ||
153 | * | ||
154 | * Return: size required to store the number | ||
155 | */ | ||
156 | static inline unsigned int mpi_get_size(MPI a) | ||
157 | { | ||
158 | return a->nlimbs * BYTES_PER_MPI_LIMB; | ||
159 | } | ||
145 | #endif /*G10_MPI_H */ | 160 | #endif /*G10_MPI_H */ |
diff --git a/include/linux/nx842.h b/include/linux/nx842.h deleted file mode 100644 index a4d324c6406a..000000000000 --- a/include/linux/nx842.h +++ /dev/null | |||
@@ -1,11 +0,0 @@ | |||
1 | #ifndef __NX842_H__ | ||
2 | #define __NX842_H__ | ||
3 | |||
4 | int nx842_get_workmem_size(void); | ||
5 | int nx842_get_workmem_size_aligned(void); | ||
6 | int nx842_compress(const unsigned char *in, unsigned int in_len, | ||
7 | unsigned char *out, unsigned int *out_len, void *wrkmem); | ||
8 | int nx842_decompress(const unsigned char *in, unsigned int in_len, | ||
9 | unsigned char *out, unsigned int *out_len, void *wrkmem); | ||
10 | |||
11 | #endif | ||
diff --git a/include/linux/random.h b/include/linux/random.h index b05856e16b75..e651874df2c9 100644 --- a/include/linux/random.h +++ b/include/linux/random.h | |||
@@ -6,14 +6,23 @@ | |||
6 | #ifndef _LINUX_RANDOM_H | 6 | #ifndef _LINUX_RANDOM_H |
7 | #define _LINUX_RANDOM_H | 7 | #define _LINUX_RANDOM_H |
8 | 8 | ||
9 | #include <linux/list.h> | ||
9 | #include <uapi/linux/random.h> | 10 | #include <uapi/linux/random.h> |
10 | 11 | ||
12 | struct random_ready_callback { | ||
13 | struct list_head list; | ||
14 | void (*func)(struct random_ready_callback *rdy); | ||
15 | struct module *owner; | ||
16 | }; | ||
17 | |||
11 | extern void add_device_randomness(const void *, unsigned int); | 18 | extern void add_device_randomness(const void *, unsigned int); |
12 | extern void add_input_randomness(unsigned int type, unsigned int code, | 19 | extern void add_input_randomness(unsigned int type, unsigned int code, |
13 | unsigned int value); | 20 | unsigned int value); |
14 | extern void add_interrupt_randomness(int irq, int irq_flags); | 21 | extern void add_interrupt_randomness(int irq, int irq_flags); |
15 | 22 | ||
16 | extern void get_random_bytes(void *buf, int nbytes); | 23 | extern void get_random_bytes(void *buf, int nbytes); |
24 | extern int add_random_ready_callback(struct random_ready_callback *rdy); | ||
25 | extern void del_random_ready_callback(struct random_ready_callback *rdy); | ||
17 | extern void get_random_bytes_arch(void *buf, int nbytes); | 26 | extern void get_random_bytes_arch(void *buf, int nbytes); |
18 | void generate_random_uuid(unsigned char uuid_out[16]); | 27 | void generate_random_uuid(unsigned char uuid_out[16]); |
19 | extern int random_int_secret_init(void); | 28 | extern int random_int_secret_init(void); |
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index ed8f9e70df9b..a0edb992c9c3 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
@@ -221,6 +221,7 @@ static inline void *sg_virt(struct scatterlist *sg) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | int sg_nents(struct scatterlist *sg); | 223 | int sg_nents(struct scatterlist *sg); |
224 | int sg_nents_for_len(struct scatterlist *sg, u64 len); | ||
224 | struct scatterlist *sg_next(struct scatterlist *); | 225 | struct scatterlist *sg_next(struct scatterlist *); |
225 | struct scatterlist *sg_last(struct scatterlist *s, unsigned int); | 226 | struct scatterlist *sg_last(struct scatterlist *s, unsigned int); |
226 | void sg_init_table(struct scatterlist *, unsigned int); | 227 | void sg_init_table(struct scatterlist *, unsigned int); |
diff --git a/include/linux/sw842.h b/include/linux/sw842.h new file mode 100644 index 000000000000..109ba041c2ae --- /dev/null +++ b/include/linux/sw842.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef __SW842_H__ | ||
2 | #define __SW842_H__ | ||
3 | |||
4 | #define SW842_MEM_COMPRESS (0xf000) | ||
5 | |||
6 | int sw842_compress(const u8 *src, unsigned int srclen, | ||
7 | u8 *dst, unsigned int *destlen, void *wmem); | ||
8 | |||
9 | int sw842_decompress(const u8 *src, unsigned int srclen, | ||
10 | u8 *dst, unsigned int *destlen); | ||
11 | |||
12 | #endif | ||
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 36ac102c97c7..f0ee97eec24d 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -168,6 +168,7 @@ struct xfrm_state { | |||
168 | struct xfrm_algo *ealg; | 168 | struct xfrm_algo *ealg; |
169 | struct xfrm_algo *calg; | 169 | struct xfrm_algo *calg; |
170 | struct xfrm_algo_aead *aead; | 170 | struct xfrm_algo_aead *aead; |
171 | const char *geniv; | ||
171 | 172 | ||
172 | /* Data for encapsulator */ | 173 | /* Data for encapsulator */ |
173 | struct xfrm_encap_tmpl *encap; | 174 | struct xfrm_encap_tmpl *encap; |
@@ -1314,6 +1315,7 @@ static inline int xfrm_id_proto_match(u8 proto, u8 userproto) | |||
1314 | * xfrm algorithm information | 1315 | * xfrm algorithm information |
1315 | */ | 1316 | */ |
1316 | struct xfrm_algo_aead_info { | 1317 | struct xfrm_algo_aead_info { |
1318 | char *geniv; | ||
1317 | u16 icv_truncbits; | 1319 | u16 icv_truncbits; |
1318 | }; | 1320 | }; |
1319 | 1321 | ||
@@ -1323,6 +1325,7 @@ struct xfrm_algo_auth_info { | |||
1323 | }; | 1325 | }; |
1324 | 1326 | ||
1325 | struct xfrm_algo_encr_info { | 1327 | struct xfrm_algo_encr_info { |
1328 | char *geniv; | ||
1326 | u16 blockbits; | 1329 | u16 blockbits; |
1327 | u16 defkeybits; | 1330 | u16 defkeybits; |
1328 | }; | 1331 | }; |
diff --git a/include/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 4abf2ea6a887..2e67bb64c1da 100644 --- a/include/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h | |||
@@ -25,6 +25,7 @@ enum { | |||
25 | CRYPTO_MSG_DELALG, | 25 | CRYPTO_MSG_DELALG, |
26 | CRYPTO_MSG_UPDATEALG, | 26 | CRYPTO_MSG_UPDATEALG, |
27 | CRYPTO_MSG_GETALG, | 27 | CRYPTO_MSG_GETALG, |
28 | CRYPTO_MSG_DELRNG, | ||
28 | __CRYPTO_MSG_MAX | 29 | __CRYPTO_MSG_MAX |
29 | }; | 30 | }; |
30 | #define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1) | 31 | #define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1) |
@@ -43,6 +44,7 @@ enum crypto_attr_type_t { | |||
43 | CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */ | 44 | CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */ |
44 | CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */ | 45 | CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */ |
45 | CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ | 46 | CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ |
47 | CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ | ||
46 | __CRYPTOCFGA_MAX | 48 | __CRYPTOCFGA_MAX |
47 | 49 | ||
48 | #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) | 50 | #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) |
@@ -101,5 +103,9 @@ struct crypto_report_rng { | |||
101 | unsigned int seedsize; | 103 | unsigned int seedsize; |
102 | }; | 104 | }; |
103 | 105 | ||
106 | struct crypto_report_akcipher { | ||
107 | char type[CRYPTO_MAX_NAME]; | ||
108 | }; | ||
109 | |||
104 | #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ | 110 | #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ |
105 | sizeof(struct crypto_report_blkcipher)) | 111 | sizeof(struct crypto_report_blkcipher)) |
diff --git a/lib/842/842.h b/lib/842/842.h new file mode 100644 index 000000000000..7c200030acf7 --- /dev/null +++ b/lib/842/842.h | |||
@@ -0,0 +1,127 @@ | |||
1 | |||
2 | #ifndef __842_H__ | ||
3 | #define __842_H__ | ||
4 | |||
5 | /* The 842 compressed format is made up of multiple blocks, each of | ||
6 | * which have the format: | ||
7 | * | ||
8 | * <template>[arg1][arg2][arg3][arg4] | ||
9 | * | ||
10 | * where there are between 0 and 4 template args, depending on the specific | ||
11 | * template operation. For normal operations, each arg is either a specific | ||
12 | * number of data bytes to add to the output buffer, or an index pointing | ||
13 | * to a previously-written number of data bytes to copy to the output buffer. | ||
14 | * | ||
15 | * The template code is a 5-bit value. This code indicates what to do with | ||
16 | * the following data. Template codes from 0 to 0x19 should use the template | ||
17 | * table, the static "decomp_ops" table used in decompress. For each template | ||
18 | * (table row), there are between 1 and 4 actions; each action corresponds to | ||
19 | * an arg following the template code bits. Each action is either a "data" | ||
20 | * type action, or a "index" type action, and each action results in 2, 4, or 8 | ||
21 | * bytes being written to the output buffer. Each template (i.e. all actions | ||
22 | * in the table row) will add up to 8 bytes being written to the output buffer. | ||
23 | * Any row with less than 4 actions is padded with noop actions, indicated by | ||
24 | * N0 (for which there is no corresponding arg in the compressed data buffer). | ||
25 | * | ||
26 | * "Data" actions, indicated in the table by D2, D4, and D8, mean that the | ||
27 | * corresponding arg is 2, 4, or 8 bytes, respectively, in the compressed data | ||
28 | * buffer should be copied directly to the output buffer. | ||
29 | * | ||
30 | * "Index" actions, indicated in the table by I2, I4, and I8, mean the | ||
31 | * corresponding arg is an index parameter that points to, respectively, a 2, | ||
32 | * 4, or 8 byte value already in the output buffer, that should be copied to | ||
33 | * the end of the output buffer. Essentially, the index points to a position | ||
34 | * in a ring buffer that contains the last N bytes of output buffer data. | ||
35 | * The number of bits for each index's arg are: 8 bits for I2, 9 bits for I4, | ||
36 | * and 8 bits for I8. Since each index points to a 2, 4, or 8 byte section, | ||
37 | * this means that I2 can reference 512 bytes ((2^8 bits = 256) * 2 bytes), I4 | ||
38 | * can reference 2048 bytes ((2^9 = 512) * 4 bytes), and I8 can reference 2048 | ||
39 | * bytes ((2^8 = 256) * 8 bytes). Think of it as a kind-of ring buffer for | ||
40 | * each of I2, I4, and I8 that are updated for each byte written to the output | ||
41 | * buffer. In this implementation, the output buffer is directly used for each | ||
42 | * index; there is no additional memory required. Note that the index is into | ||
43 | * a ring buffer, not a sliding window; for example, if there have been 260 | ||
44 | * bytes written to the output buffer, an I2 index of 0 would index to byte 256 | ||
45 | * in the output buffer, while an I2 index of 16 would index to byte 16 in the | ||
46 | * output buffer. | ||
47 | * | ||
48 | * There are also 3 special template codes; 0x1b for "repeat", 0x1c for | ||
49 | * "zeros", and 0x1e for "end". The "repeat" operation is followed by a 6 bit | ||
50 | * arg N indicating how many times to repeat. The last 8 bytes written to the | ||
51 | * output buffer are written again to the output buffer, N + 1 times. The | ||
52 | * "zeros" operation, which has no arg bits, writes 8 zeros to the output | ||
53 | * buffer. The "end" operation, which also has no arg bits, signals the end | ||
54 | * of the compressed data. There may be some number of padding (don't care, | ||
55 | * but usually 0) bits after the "end" operation bits, to fill the buffer | ||
56 | * length to a specific byte multiple (usually a multiple of 8, 16, or 32 | ||
57 | * bytes). | ||
58 | * | ||
59 | * This software implementation also uses one of the undefined template values, | ||
60 | * 0x1d as a special "short data" template code, to represent less than 8 bytes | ||
61 | * of uncompressed data. It is followed by a 3 bit arg N indicating how many | ||
62 | * data bytes will follow, and then N bytes of data, which should be copied to | ||
63 | * the output buffer. This allows the software 842 compressor to accept input | ||
64 | * buffers that are not an exact multiple of 8 bytes long. However, those | ||
65 | * compressed buffers containing this sw-only template will be rejected by | ||
66 | * the 842 hardware decompressor, and must be decompressed with this software | ||
67 | * library. The 842 software compression module includes a parameter to | ||
68 | * disable using this sw-only "short data" template, and instead simply | ||
69 | * reject any input buffer that is not a multiple of 8 bytes long. | ||
70 | * | ||
71 | * After all actions for each operation code are processed, another template | ||
72 | * code is in the next 5 bits. The decompression ends once the "end" template | ||
73 | * code is detected. | ||
74 | */ | ||
75 | |||
76 | #include <linux/module.h> | ||
77 | #include <linux/kernel.h> | ||
78 | #include <linux/bitops.h> | ||
79 | #include <asm/unaligned.h> | ||
80 | |||
81 | #include <linux/sw842.h> | ||
82 | |||
83 | /* special templates */ | ||
84 | #define OP_REPEAT (0x1B) | ||
85 | #define OP_ZEROS (0x1C) | ||
86 | #define OP_END (0x1E) | ||
87 | |||
88 | /* sw only template - this is not in the hw design; it's used only by this | ||
89 | * software compressor and decompressor, to allow input buffers that aren't | ||
90 | * a multiple of 8. | ||
91 | */ | ||
92 | #define OP_SHORT_DATA (0x1D) | ||
93 | |||
94 | /* additional bits of each op param */ | ||
95 | #define OP_BITS (5) | ||
96 | #define REPEAT_BITS (6) | ||
97 | #define SHORT_DATA_BITS (3) | ||
98 | #define I2_BITS (8) | ||
99 | #define I4_BITS (9) | ||
100 | #define I8_BITS (8) | ||
101 | |||
102 | #define REPEAT_BITS_MAX (0x3f) | ||
103 | #define SHORT_DATA_BITS_MAX (0x7) | ||
104 | |||
105 | /* Arbitrary values used to indicate action */ | ||
106 | #define OP_ACTION (0x70) | ||
107 | #define OP_ACTION_INDEX (0x10) | ||
108 | #define OP_ACTION_DATA (0x20) | ||
109 | #define OP_ACTION_NOOP (0x40) | ||
110 | #define OP_AMOUNT (0x0f) | ||
111 | #define OP_AMOUNT_0 (0x00) | ||
112 | #define OP_AMOUNT_2 (0x02) | ||
113 | #define OP_AMOUNT_4 (0x04) | ||
114 | #define OP_AMOUNT_8 (0x08) | ||
115 | |||
116 | #define D2 (OP_ACTION_DATA | OP_AMOUNT_2) | ||
117 | #define D4 (OP_ACTION_DATA | OP_AMOUNT_4) | ||
118 | #define D8 (OP_ACTION_DATA | OP_AMOUNT_8) | ||
119 | #define I2 (OP_ACTION_INDEX | OP_AMOUNT_2) | ||
120 | #define I4 (OP_ACTION_INDEX | OP_AMOUNT_4) | ||
121 | #define I8 (OP_ACTION_INDEX | OP_AMOUNT_8) | ||
122 | #define N0 (OP_ACTION_NOOP | OP_AMOUNT_0) | ||
123 | |||
124 | /* the max of the regular templates - not including the special templates */ | ||
125 | #define OPS_MAX (0x1a) | ||
126 | |||
127 | #endif | ||
diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c new file mode 100644 index 000000000000..7ce68948e68c --- /dev/null +++ b/lib/842/842_compress.c | |||
@@ -0,0 +1,626 @@ | |||
1 | /* | ||
2 | * 842 Software Compression | ||
3 | * | ||
4 | * Copyright (C) 2015 Dan Streetman, IBM Corp | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * See 842.h for details of the 842 compressed format. | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
20 | #define MODULE_NAME "842_compress" | ||
21 | |||
22 | #include <linux/hashtable.h> | ||
23 | |||
24 | #include "842.h" | ||
25 | #include "842_debugfs.h" | ||
26 | |||
27 | #define SW842_HASHTABLE8_BITS (10) | ||
28 | #define SW842_HASHTABLE4_BITS (11) | ||
29 | #define SW842_HASHTABLE2_BITS (10) | ||
30 | |||
31 | /* By default, we allow compressing input buffers of any length, but we must | ||
32 | * use the non-standard "short data" template so the decompressor can correctly | ||
33 | * reproduce the uncompressed data buffer at the right length. However the | ||
34 | * hardware 842 compressor will not recognize the "short data" template, and | ||
35 | * will fail to decompress any compressed buffer containing it (I have no idea | ||
36 | * why anyone would want to use software to compress and hardware to decompress | ||
37 | * but that's beside the point). This parameter forces the compression | ||
38 | * function to simply reject any input buffer that isn't a multiple of 8 bytes | ||
39 | * long, instead of using the "short data" template, so that all compressed | ||
40 | * buffers produced by this function will be decompressable by the 842 hardware | ||
41 | * decompressor. Unless you have a specific need for that, leave this disabled | ||
42 | * so that any length buffer can be compressed. | ||
43 | */ | ||
44 | static bool sw842_strict; | ||
45 | module_param_named(strict, sw842_strict, bool, 0644); | ||
46 | |||
47 | static u8 comp_ops[OPS_MAX][5] = { /* params size in bits */ | ||
48 | { I8, N0, N0, N0, 0x19 }, /* 8 */ | ||
49 | { I4, I4, N0, N0, 0x18 }, /* 18 */ | ||
50 | { I4, I2, I2, N0, 0x17 }, /* 25 */ | ||
51 | { I2, I2, I4, N0, 0x13 }, /* 25 */ | ||
52 | { I2, I2, I2, I2, 0x12 }, /* 32 */ | ||
53 | { I4, I2, D2, N0, 0x16 }, /* 33 */ | ||
54 | { I4, D2, I2, N0, 0x15 }, /* 33 */ | ||
55 | { I2, D2, I4, N0, 0x0e }, /* 33 */ | ||
56 | { D2, I2, I4, N0, 0x09 }, /* 33 */ | ||
57 | { I2, I2, I2, D2, 0x11 }, /* 40 */ | ||
58 | { I2, I2, D2, I2, 0x10 }, /* 40 */ | ||
59 | { I2, D2, I2, I2, 0x0d }, /* 40 */ | ||
60 | { D2, I2, I2, I2, 0x08 }, /* 40 */ | ||
61 | { I4, D4, N0, N0, 0x14 }, /* 41 */ | ||
62 | { D4, I4, N0, N0, 0x04 }, /* 41 */ | ||
63 | { I2, I2, D4, N0, 0x0f }, /* 48 */ | ||
64 | { I2, D2, I2, D2, 0x0c }, /* 48 */ | ||
65 | { I2, D4, I2, N0, 0x0b }, /* 48 */ | ||
66 | { D2, I2, I2, D2, 0x07 }, /* 48 */ | ||
67 | { D2, I2, D2, I2, 0x06 }, /* 48 */ | ||
68 | { D4, I2, I2, N0, 0x03 }, /* 48 */ | ||
69 | { I2, D2, D4, N0, 0x0a }, /* 56 */ | ||
70 | { D2, I2, D4, N0, 0x05 }, /* 56 */ | ||
71 | { D4, I2, D2, N0, 0x02 }, /* 56 */ | ||
72 | { D4, D2, I2, N0, 0x01 }, /* 56 */ | ||
73 | { D8, N0, N0, N0, 0x00 }, /* 64 */ | ||
74 | }; | ||
75 | |||
76 | struct sw842_hlist_node8 { | ||
77 | struct hlist_node node; | ||
78 | u64 data; | ||
79 | u8 index; | ||
80 | }; | ||
81 | |||
82 | struct sw842_hlist_node4 { | ||
83 | struct hlist_node node; | ||
84 | u32 data; | ||
85 | u16 index; | ||
86 | }; | ||
87 | |||
88 | struct sw842_hlist_node2 { | ||
89 | struct hlist_node node; | ||
90 | u16 data; | ||
91 | u8 index; | ||
92 | }; | ||
93 | |||
94 | #define INDEX_NOT_FOUND (-1) | ||
95 | #define INDEX_NOT_CHECKED (-2) | ||
96 | |||
97 | struct sw842_param { | ||
98 | u8 *in; | ||
99 | u8 *instart; | ||
100 | u64 ilen; | ||
101 | u8 *out; | ||
102 | u64 olen; | ||
103 | u8 bit; | ||
104 | u64 data8[1]; | ||
105 | u32 data4[2]; | ||
106 | u16 data2[4]; | ||
107 | int index8[1]; | ||
108 | int index4[2]; | ||
109 | int index2[4]; | ||
110 | DECLARE_HASHTABLE(htable8, SW842_HASHTABLE8_BITS); | ||
111 | DECLARE_HASHTABLE(htable4, SW842_HASHTABLE4_BITS); | ||
112 | DECLARE_HASHTABLE(htable2, SW842_HASHTABLE2_BITS); | ||
113 | struct sw842_hlist_node8 node8[1 << I8_BITS]; | ||
114 | struct sw842_hlist_node4 node4[1 << I4_BITS]; | ||
115 | struct sw842_hlist_node2 node2[1 << I2_BITS]; | ||
116 | }; | ||
117 | |||
118 | #define get_input_data(p, o, b) \ | ||
119 | be##b##_to_cpu(get_unaligned((__be##b *)((p)->in + (o)))) | ||
120 | |||
121 | #define init_hashtable_nodes(p, b) do { \ | ||
122 | int _i; \ | ||
123 | hash_init((p)->htable##b); \ | ||
124 | for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) { \ | ||
125 | (p)->node##b[_i].index = _i; \ | ||
126 | (p)->node##b[_i].data = 0; \ | ||
127 | INIT_HLIST_NODE(&(p)->node##b[_i].node); \ | ||
128 | } \ | ||
129 | } while (0) | ||
130 | |||
131 | #define find_index(p, b, n) ({ \ | ||
132 | struct sw842_hlist_node##b *_n; \ | ||
133 | p->index##b[n] = INDEX_NOT_FOUND; \ | ||
134 | hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \ | ||
135 | if (p->data##b[n] == _n->data) { \ | ||
136 | p->index##b[n] = _n->index; \ | ||
137 | break; \ | ||
138 | } \ | ||
139 | } \ | ||
140 | p->index##b[n] >= 0; \ | ||
141 | }) | ||
142 | |||
143 | #define check_index(p, b, n) \ | ||
144 | ((p)->index##b[n] == INDEX_NOT_CHECKED \ | ||
145 | ? find_index(p, b, n) \ | ||
146 | : (p)->index##b[n] >= 0) | ||
147 | |||
148 | #define replace_hash(p, b, i, d) do { \ | ||
149 | struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \ | ||
150 | hash_del(&_n->node); \ | ||
151 | _n->data = (p)->data##b[d]; \ | ||
152 | pr_debug("add hash index%x %x pos %x data %lx\n", b, \ | ||
153 | (unsigned int)_n->index, \ | ||
154 | (unsigned int)((p)->in - (p)->instart), \ | ||
155 | (unsigned long)_n->data); \ | ||
156 | hash_add((p)->htable##b, &_n->node, _n->data); \ | ||
157 | } while (0) | ||
158 | |||
159 | static u8 bmask[8] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe }; | ||
160 | |||
161 | static int add_bits(struct sw842_param *p, u64 d, u8 n); | ||
162 | |||
163 | static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s) | ||
164 | { | ||
165 | int ret; | ||
166 | |||
167 | if (n <= s) | ||
168 | return -EINVAL; | ||
169 | |||
170 | ret = add_bits(p, d >> s, n - s); | ||
171 | if (ret) | ||
172 | return ret; | ||
173 | return add_bits(p, d & GENMASK_ULL(s - 1, 0), s); | ||
174 | } | ||
175 | |||
176 | static int add_bits(struct sw842_param *p, u64 d, u8 n) | ||
177 | { | ||
178 | int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits; | ||
179 | u64 o; | ||
180 | u8 *out = p->out; | ||
181 | |||
182 | pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d); | ||
183 | |||
184 | if (n > 64) | ||
185 | return -EINVAL; | ||
186 | |||
187 | /* split this up if writing to > 8 bytes (i.e. n == 64 && p->bit > 0), | ||
188 | * or if we're at the end of the output buffer and would write past end | ||
189 | */ | ||
190 | if (bits > 64) | ||
191 | return __split_add_bits(p, d, n, 32); | ||
192 | else if (p->olen < 8 && bits > 32 && bits <= 56) | ||
193 | return __split_add_bits(p, d, n, 16); | ||
194 | else if (p->olen < 4 && bits > 16 && bits <= 24) | ||
195 | return __split_add_bits(p, d, n, 8); | ||
196 | |||
197 | if (DIV_ROUND_UP(bits, 8) > p->olen) | ||
198 | return -ENOSPC; | ||
199 | |||
200 | o = *out & bmask[b]; | ||
201 | d <<= s; | ||
202 | |||
203 | if (bits <= 8) | ||
204 | *out = o | d; | ||
205 | else if (bits <= 16) | ||
206 | put_unaligned(cpu_to_be16(o << 8 | d), (__be16 *)out); | ||
207 | else if (bits <= 24) | ||
208 | put_unaligned(cpu_to_be32(o << 24 | d << 8), (__be32 *)out); | ||
209 | else if (bits <= 32) | ||
210 | put_unaligned(cpu_to_be32(o << 24 | d), (__be32 *)out); | ||
211 | else if (bits <= 40) | ||
212 | put_unaligned(cpu_to_be64(o << 56 | d << 24), (__be64 *)out); | ||
213 | else if (bits <= 48) | ||
214 | put_unaligned(cpu_to_be64(o << 56 | d << 16), (__be64 *)out); | ||
215 | else if (bits <= 56) | ||
216 | put_unaligned(cpu_to_be64(o << 56 | d << 8), (__be64 *)out); | ||
217 | else | ||
218 | put_unaligned(cpu_to_be64(o << 56 | d), (__be64 *)out); | ||
219 | |||
220 | p->bit += n; | ||
221 | |||
222 | if (p->bit > 7) { | ||
223 | p->out += p->bit / 8; | ||
224 | p->olen -= p->bit / 8; | ||
225 | p->bit %= 8; | ||
226 | } | ||
227 | |||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | static int add_template(struct sw842_param *p, u8 c) | ||
232 | { | ||
233 | int ret, i, b = 0; | ||
234 | u8 *t = comp_ops[c]; | ||
235 | bool inv = false; | ||
236 | |||
237 | if (c >= OPS_MAX) | ||
238 | return -EINVAL; | ||
239 | |||
240 | pr_debug("template %x\n", t[4]); | ||
241 | |||
242 | ret = add_bits(p, t[4], OP_BITS); | ||
243 | if (ret) | ||
244 | return ret; | ||
245 | |||
246 | for (i = 0; i < 4; i++) { | ||
247 | pr_debug("op %x\n", t[i]); | ||
248 | |||
249 | switch (t[i] & OP_AMOUNT) { | ||
250 | case OP_AMOUNT_8: | ||
251 | if (b) | ||
252 | inv = true; | ||
253 | else if (t[i] & OP_ACTION_INDEX) | ||
254 | ret = add_bits(p, p->index8[0], I8_BITS); | ||
255 | else if (t[i] & OP_ACTION_DATA) | ||
256 | ret = add_bits(p, p->data8[0], 64); | ||
257 | else | ||
258 | inv = true; | ||
259 | break; | ||
260 | case OP_AMOUNT_4: | ||
261 | if (b == 2 && t[i] & OP_ACTION_DATA) | ||
262 | ret = add_bits(p, get_input_data(p, 2, 32), 32); | ||
263 | else if (b != 0 && b != 4) | ||
264 | inv = true; | ||
265 | else if (t[i] & OP_ACTION_INDEX) | ||
266 | ret = add_bits(p, p->index4[b >> 2], I4_BITS); | ||
267 | else if (t[i] & OP_ACTION_DATA) | ||
268 | ret = add_bits(p, p->data4[b >> 2], 32); | ||
269 | else | ||
270 | inv = true; | ||
271 | break; | ||
272 | case OP_AMOUNT_2: | ||
273 | if (b != 0 && b != 2 && b != 4 && b != 6) | ||
274 | inv = true; | ||
275 | if (t[i] & OP_ACTION_INDEX) | ||
276 | ret = add_bits(p, p->index2[b >> 1], I2_BITS); | ||
277 | else if (t[i] & OP_ACTION_DATA) | ||
278 | ret = add_bits(p, p->data2[b >> 1], 16); | ||
279 | else | ||
280 | inv = true; | ||
281 | break; | ||
282 | case OP_AMOUNT_0: | ||
283 | inv = (b != 8) || !(t[i] & OP_ACTION_NOOP); | ||
284 | break; | ||
285 | default: | ||
286 | inv = true; | ||
287 | break; | ||
288 | } | ||
289 | |||
290 | if (ret) | ||
291 | return ret; | ||
292 | |||
293 | if (inv) { | ||
294 | pr_err("Invalid templ %x op %d : %x %x %x %x\n", | ||
295 | c, i, t[0], t[1], t[2], t[3]); | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | |||
299 | b += t[i] & OP_AMOUNT; | ||
300 | } | ||
301 | |||
302 | if (b != 8) { | ||
303 | pr_err("Invalid template %x len %x : %x %x %x %x\n", | ||
304 | c, b, t[0], t[1], t[2], t[3]); | ||
305 | return -EINVAL; | ||
306 | } | ||
307 | |||
308 | if (sw842_template_counts) | ||
309 | atomic_inc(&template_count[t[4]]); | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static int add_repeat_template(struct sw842_param *p, u8 r) | ||
315 | { | ||
316 | int ret; | ||
317 | |||
318 | /* repeat param is 0-based */ | ||
319 | if (!r || --r > REPEAT_BITS_MAX) | ||
320 | return -EINVAL; | ||
321 | |||
322 | ret = add_bits(p, OP_REPEAT, OP_BITS); | ||
323 | if (ret) | ||
324 | return ret; | ||
325 | |||
326 | ret = add_bits(p, r, REPEAT_BITS); | ||
327 | if (ret) | ||
328 | return ret; | ||
329 | |||
330 | if (sw842_template_counts) | ||
331 | atomic_inc(&template_repeat_count); | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | static int add_short_data_template(struct sw842_param *p, u8 b) | ||
337 | { | ||
338 | int ret, i; | ||
339 | |||
340 | if (!b || b > SHORT_DATA_BITS_MAX) | ||
341 | return -EINVAL; | ||
342 | |||
343 | ret = add_bits(p, OP_SHORT_DATA, OP_BITS); | ||
344 | if (ret) | ||
345 | return ret; | ||
346 | |||
347 | ret = add_bits(p, b, SHORT_DATA_BITS); | ||
348 | if (ret) | ||
349 | return ret; | ||
350 | |||
351 | for (i = 0; i < b; i++) { | ||
352 | ret = add_bits(p, p->in[i], 8); | ||
353 | if (ret) | ||
354 | return ret; | ||
355 | } | ||
356 | |||
357 | if (sw842_template_counts) | ||
358 | atomic_inc(&template_short_data_count); | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static int add_zeros_template(struct sw842_param *p) | ||
364 | { | ||
365 | int ret = add_bits(p, OP_ZEROS, OP_BITS); | ||
366 | |||
367 | if (ret) | ||
368 | return ret; | ||
369 | |||
370 | if (sw842_template_counts) | ||
371 | atomic_inc(&template_zeros_count); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | static int add_end_template(struct sw842_param *p) | ||
377 | { | ||
378 | int ret = add_bits(p, OP_END, OP_BITS); | ||
379 | |||
380 | if (ret) | ||
381 | return ret; | ||
382 | |||
383 | if (sw842_template_counts) | ||
384 | atomic_inc(&template_end_count); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static bool check_template(struct sw842_param *p, u8 c) | ||
390 | { | ||
391 | u8 *t = comp_ops[c]; | ||
392 | int i, match, b = 0; | ||
393 | |||
394 | if (c >= OPS_MAX) | ||
395 | return false; | ||
396 | |||
397 | for (i = 0; i < 4; i++) { | ||
398 | if (t[i] & OP_ACTION_INDEX) { | ||
399 | if (t[i] & OP_AMOUNT_2) | ||
400 | match = check_index(p, 2, b >> 1); | ||
401 | else if (t[i] & OP_AMOUNT_4) | ||
402 | match = check_index(p, 4, b >> 2); | ||
403 | else if (t[i] & OP_AMOUNT_8) | ||
404 | match = check_index(p, 8, 0); | ||
405 | else | ||
406 | return false; | ||
407 | if (!match) | ||
408 | return false; | ||
409 | } | ||
410 | |||
411 | b += t[i] & OP_AMOUNT; | ||
412 | } | ||
413 | |||
414 | return true; | ||
415 | } | ||
416 | |||
417 | static void get_next_data(struct sw842_param *p) | ||
418 | { | ||
419 | p->data8[0] = get_input_data(p, 0, 64); | ||
420 | p->data4[0] = get_input_data(p, 0, 32); | ||
421 | p->data4[1] = get_input_data(p, 4, 32); | ||
422 | p->data2[0] = get_input_data(p, 0, 16); | ||
423 | p->data2[1] = get_input_data(p, 2, 16); | ||
424 | p->data2[2] = get_input_data(p, 4, 16); | ||
425 | p->data2[3] = get_input_data(p, 6, 16); | ||
426 | } | ||
427 | |||
428 | /* update the hashtable entries. | ||
429 | * only call this after finding/adding the current template | ||
430 | * the dataN fields for the current 8 byte block must be already updated | ||
431 | */ | ||
432 | static void update_hashtables(struct sw842_param *p) | ||
433 | { | ||
434 | u64 pos = p->in - p->instart; | ||
435 | u64 n8 = (pos >> 3) % (1 << I8_BITS); | ||
436 | u64 n4 = (pos >> 2) % (1 << I4_BITS); | ||
437 | u64 n2 = (pos >> 1) % (1 << I2_BITS); | ||
438 | |||
439 | replace_hash(p, 8, n8, 0); | ||
440 | replace_hash(p, 4, n4, 0); | ||
441 | replace_hash(p, 4, n4, 1); | ||
442 | replace_hash(p, 2, n2, 0); | ||
443 | replace_hash(p, 2, n2, 1); | ||
444 | replace_hash(p, 2, n2, 2); | ||
445 | replace_hash(p, 2, n2, 3); | ||
446 | } | ||
447 | |||
448 | /* find the next template to use, and add it | ||
449 | * the p->dataN fields must already be set for the current 8 byte block | ||
450 | */ | ||
451 | static int process_next(struct sw842_param *p) | ||
452 | { | ||
453 | int ret, i; | ||
454 | |||
455 | p->index8[0] = INDEX_NOT_CHECKED; | ||
456 | p->index4[0] = INDEX_NOT_CHECKED; | ||
457 | p->index4[1] = INDEX_NOT_CHECKED; | ||
458 | p->index2[0] = INDEX_NOT_CHECKED; | ||
459 | p->index2[1] = INDEX_NOT_CHECKED; | ||
460 | p->index2[2] = INDEX_NOT_CHECKED; | ||
461 | p->index2[3] = INDEX_NOT_CHECKED; | ||
462 | |||
463 | /* check up to OPS_MAX - 1; last op is our fallback */ | ||
464 | for (i = 0; i < OPS_MAX - 1; i++) { | ||
465 | if (check_template(p, i)) | ||
466 | break; | ||
467 | } | ||
468 | |||
469 | ret = add_template(p, i); | ||
470 | if (ret) | ||
471 | return ret; | ||
472 | |||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | /** | ||
477 | * sw842_compress | ||
478 | * | ||
479 | * Compress the uncompressed buffer of length @ilen at @in to the output buffer | ||
480 | * @out, using no more than @olen bytes, using the 842 compression format. | ||
481 | * | ||
482 | * Returns: 0 on success, error on failure. The @olen parameter | ||
483 | * will contain the number of output bytes written on success, or | ||
484 | * 0 on error. | ||
485 | */ | ||
486 | int sw842_compress(const u8 *in, unsigned int ilen, | ||
487 | u8 *out, unsigned int *olen, void *wmem) | ||
488 | { | ||
489 | struct sw842_param *p = (struct sw842_param *)wmem; | ||
490 | int ret; | ||
491 | u64 last, next, pad, total; | ||
492 | u8 repeat_count = 0; | ||
493 | |||
494 | BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS); | ||
495 | |||
496 | init_hashtable_nodes(p, 8); | ||
497 | init_hashtable_nodes(p, 4); | ||
498 | init_hashtable_nodes(p, 2); | ||
499 | |||
500 | p->in = (u8 *)in; | ||
501 | p->instart = p->in; | ||
502 | p->ilen = ilen; | ||
503 | p->out = out; | ||
504 | p->olen = *olen; | ||
505 | p->bit = 0; | ||
506 | |||
507 | total = p->olen; | ||
508 | |||
509 | *olen = 0; | ||
510 | |||
511 | /* if using strict mode, we can only compress a multiple of 8 */ | ||
512 | if (sw842_strict && (ilen % 8)) { | ||
513 | pr_err("Using strict mode, can't compress len %d\n", ilen); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | |||
517 | /* let's compress at least 8 bytes, mkay? */ | ||
518 | if (unlikely(ilen < 8)) | ||
519 | goto skip_comp; | ||
520 | |||
521 | /* make initial 'last' different so we don't match the first time */ | ||
522 | last = ~get_unaligned((u64 *)p->in); | ||
523 | |||
524 | while (p->ilen > 7) { | ||
525 | next = get_unaligned((u64 *)p->in); | ||
526 | |||
527 | /* must get the next data, as we need to update the hashtable | ||
528 | * entries with the new data every time | ||
529 | */ | ||
530 | get_next_data(p); | ||
531 | |||
532 | /* we don't care about endianness in last or next; | ||
533 | * we're just comparing 8 bytes to another 8 bytes, | ||
534 | * they're both the same endianness | ||
535 | */ | ||
536 | if (next == last) { | ||
537 | /* repeat count bits are 0-based, so we stop at +1 */ | ||
538 | if (++repeat_count <= REPEAT_BITS_MAX) | ||
539 | goto repeat; | ||
540 | } | ||
541 | if (repeat_count) { | ||
542 | ret = add_repeat_template(p, repeat_count); | ||
543 | repeat_count = 0; | ||
544 | if (next == last) /* reached max repeat bits */ | ||
545 | goto repeat; | ||
546 | } | ||
547 | |||
548 | if (next == 0) | ||
549 | ret = add_zeros_template(p); | ||
550 | else | ||
551 | ret = process_next(p); | ||
552 | |||
553 | if (ret) | ||
554 | return ret; | ||
555 | |||
556 | repeat: | ||
557 | last = next; | ||
558 | update_hashtables(p); | ||
559 | p->in += 8; | ||
560 | p->ilen -= 8; | ||
561 | } | ||
562 | |||
563 | if (repeat_count) { | ||
564 | ret = add_repeat_template(p, repeat_count); | ||
565 | if (ret) | ||
566 | return ret; | ||
567 | } | ||
568 | |||
569 | skip_comp: | ||
570 | if (p->ilen > 0) { | ||
571 | ret = add_short_data_template(p, p->ilen); | ||
572 | if (ret) | ||
573 | return ret; | ||
574 | |||
575 | p->in += p->ilen; | ||
576 | p->ilen = 0; | ||
577 | } | ||
578 | |||
579 | ret = add_end_template(p); | ||
580 | if (ret) | ||
581 | return ret; | ||
582 | |||
583 | if (p->bit) { | ||
584 | p->out++; | ||
585 | p->olen--; | ||
586 | p->bit = 0; | ||
587 | } | ||
588 | |||
589 | /* pad compressed length to multiple of 8 */ | ||
590 | pad = (8 - ((total - p->olen) % 8)) % 8; | ||
591 | if (pad) { | ||
592 | if (pad > p->olen) /* we were so close! */ | ||
593 | return -ENOSPC; | ||
594 | memset(p->out, 0, pad); | ||
595 | p->out += pad; | ||
596 | p->olen -= pad; | ||
597 | } | ||
598 | |||
599 | if (unlikely((total - p->olen) > UINT_MAX)) | ||
600 | return -ENOSPC; | ||
601 | |||
602 | *olen = total - p->olen; | ||
603 | |||
604 | return 0; | ||
605 | } | ||
606 | EXPORT_SYMBOL_GPL(sw842_compress); | ||
607 | |||
608 | static int __init sw842_init(void) | ||
609 | { | ||
610 | if (sw842_template_counts) | ||
611 | sw842_debugfs_create(); | ||
612 | |||
613 | return 0; | ||
614 | } | ||
615 | module_init(sw842_init); | ||
616 | |||
617 | static void __exit sw842_exit(void) | ||
618 | { | ||
619 | if (sw842_template_counts) | ||
620 | sw842_debugfs_remove(); | ||
621 | } | ||
622 | module_exit(sw842_exit); | ||
623 | |||
624 | MODULE_LICENSE("GPL"); | ||
625 | MODULE_DESCRIPTION("Software 842 Compressor"); | ||
626 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | ||
diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h new file mode 100644 index 000000000000..e7f3bffaf255 --- /dev/null +++ b/lib/842/842_debugfs.h | |||
@@ -0,0 +1,52 @@ | |||
1 | |||
2 | #ifndef __842_DEBUGFS_H__ | ||
3 | #define __842_DEBUGFS_H__ | ||
4 | |||
5 | #include <linux/debugfs.h> | ||
6 | |||
7 | static bool sw842_template_counts; | ||
8 | module_param_named(template_counts, sw842_template_counts, bool, 0444); | ||
9 | |||
10 | static atomic_t template_count[OPS_MAX], template_repeat_count, | ||
11 | template_zeros_count, template_short_data_count, template_end_count; | ||
12 | |||
13 | static struct dentry *sw842_debugfs_root; | ||
14 | |||
15 | static int __init sw842_debugfs_create(void) | ||
16 | { | ||
17 | umode_t m = S_IRUGO | S_IWUSR; | ||
18 | int i; | ||
19 | |||
20 | if (!debugfs_initialized()) | ||
21 | return -ENODEV; | ||
22 | |||
23 | sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL); | ||
24 | if (IS_ERR(sw842_debugfs_root)) | ||
25 | return PTR_ERR(sw842_debugfs_root); | ||
26 | |||
27 | for (i = 0; i < ARRAY_SIZE(template_count); i++) { | ||
28 | char name[32]; | ||
29 | |||
30 | snprintf(name, 32, "template_%02x", i); | ||
31 | debugfs_create_atomic_t(name, m, sw842_debugfs_root, | ||
32 | &template_count[i]); | ||
33 | } | ||
34 | debugfs_create_atomic_t("template_repeat", m, sw842_debugfs_root, | ||
35 | &template_repeat_count); | ||
36 | debugfs_create_atomic_t("template_zeros", m, sw842_debugfs_root, | ||
37 | &template_zeros_count); | ||
38 | debugfs_create_atomic_t("template_short_data", m, sw842_debugfs_root, | ||
39 | &template_short_data_count); | ||
40 | debugfs_create_atomic_t("template_end", m, sw842_debugfs_root, | ||
41 | &template_end_count); | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | static void __exit sw842_debugfs_remove(void) | ||
47 | { | ||
48 | if (sw842_debugfs_root && !IS_ERR(sw842_debugfs_root)) | ||
49 | debugfs_remove_recursive(sw842_debugfs_root); | ||
50 | } | ||
51 | |||
52 | #endif | ||
diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c new file mode 100644 index 000000000000..5446ff0c9ba0 --- /dev/null +++ b/lib/842/842_decompress.c | |||
@@ -0,0 +1,405 @@ | |||
1 | /* | ||
2 | * 842 Software Decompression | ||
3 | * | ||
4 | * Copyright (C) 2015 Dan Streetman, IBM Corp | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * See 842.h for details of the 842 compressed format. | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
20 | #define MODULE_NAME "842_decompress" | ||
21 | |||
22 | #include "842.h" | ||
23 | #include "842_debugfs.h" | ||
24 | |||
25 | /* rolling fifo sizes */ | ||
26 | #define I2_FIFO_SIZE (2 * (1 << I2_BITS)) | ||
27 | #define I4_FIFO_SIZE (4 * (1 << I4_BITS)) | ||
28 | #define I8_FIFO_SIZE (8 * (1 << I8_BITS)) | ||
29 | |||
30 | static u8 decomp_ops[OPS_MAX][4] = { | ||
31 | { D8, N0, N0, N0 }, | ||
32 | { D4, D2, I2, N0 }, | ||
33 | { D4, I2, D2, N0 }, | ||
34 | { D4, I2, I2, N0 }, | ||
35 | { D4, I4, N0, N0 }, | ||
36 | { D2, I2, D4, N0 }, | ||
37 | { D2, I2, D2, I2 }, | ||
38 | { D2, I2, I2, D2 }, | ||
39 | { D2, I2, I2, I2 }, | ||
40 | { D2, I2, I4, N0 }, | ||
41 | { I2, D2, D4, N0 }, | ||
42 | { I2, D4, I2, N0 }, | ||
43 | { I2, D2, I2, D2 }, | ||
44 | { I2, D2, I2, I2 }, | ||
45 | { I2, D2, I4, N0 }, | ||
46 | { I2, I2, D4, N0 }, | ||
47 | { I2, I2, D2, I2 }, | ||
48 | { I2, I2, I2, D2 }, | ||
49 | { I2, I2, I2, I2 }, | ||
50 | { I2, I2, I4, N0 }, | ||
51 | { I4, D4, N0, N0 }, | ||
52 | { I4, D2, I2, N0 }, | ||
53 | { I4, I2, D2, N0 }, | ||
54 | { I4, I2, I2, N0 }, | ||
55 | { I4, I4, N0, N0 }, | ||
56 | { I8, N0, N0, N0 } | ||
57 | }; | ||
58 | |||
59 | struct sw842_param { | ||
60 | u8 *in; | ||
61 | u8 bit; | ||
62 | u64 ilen; | ||
63 | u8 *out; | ||
64 | u8 *ostart; | ||
65 | u64 olen; | ||
66 | }; | ||
67 | |||
68 | #define beN_to_cpu(d, s) \ | ||
69 | ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ | ||
70 | (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ | ||
71 | (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ | ||
72 | WARN(1, "pr_debug param err invalid size %x\n", s)) | ||
73 | |||
74 | static int next_bits(struct sw842_param *p, u64 *d, u8 n); | ||
75 | |||
76 | static int __split_next_bits(struct sw842_param *p, u64 *d, u8 n, u8 s) | ||
77 | { | ||
78 | u64 tmp = 0; | ||
79 | int ret; | ||
80 | |||
81 | if (n <= s) { | ||
82 | pr_debug("split_next_bits invalid n %u s %u\n", n, s); | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
86 | ret = next_bits(p, &tmp, n - s); | ||
87 | if (ret) | ||
88 | return ret; | ||
89 | ret = next_bits(p, d, s); | ||
90 | if (ret) | ||
91 | return ret; | ||
92 | *d |= tmp << s; | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static int next_bits(struct sw842_param *p, u64 *d, u8 n) | ||
97 | { | ||
98 | u8 *in = p->in, b = p->bit, bits = b + n; | ||
99 | |||
100 | if (n > 64) { | ||
101 | pr_debug("next_bits invalid n %u\n", n); | ||
102 | return -EINVAL; | ||
103 | } | ||
104 | |||
105 | /* split this up if reading > 8 bytes, or if we're at the end of | ||
106 | * the input buffer and would read past the end | ||
107 | */ | ||
108 | if (bits > 64) | ||
109 | return __split_next_bits(p, d, n, 32); | ||
110 | else if (p->ilen < 8 && bits > 32 && bits <= 56) | ||
111 | return __split_next_bits(p, d, n, 16); | ||
112 | else if (p->ilen < 4 && bits > 16 && bits <= 24) | ||
113 | return __split_next_bits(p, d, n, 8); | ||
114 | |||
115 | if (DIV_ROUND_UP(bits, 8) > p->ilen) | ||
116 | return -EOVERFLOW; | ||
117 | |||
118 | if (bits <= 8) | ||
119 | *d = *in >> (8 - bits); | ||
120 | else if (bits <= 16) | ||
121 | *d = be16_to_cpu(get_unaligned((__be16 *)in)) >> (16 - bits); | ||
122 | else if (bits <= 32) | ||
123 | *d = be32_to_cpu(get_unaligned((__be32 *)in)) >> (32 - bits); | ||
124 | else | ||
125 | *d = be64_to_cpu(get_unaligned((__be64 *)in)) >> (64 - bits); | ||
126 | |||
127 | *d &= GENMASK_ULL(n - 1, 0); | ||
128 | |||
129 | p->bit += n; | ||
130 | |||
131 | if (p->bit > 7) { | ||
132 | p->in += p->bit / 8; | ||
133 | p->ilen -= p->bit / 8; | ||
134 | p->bit %= 8; | ||
135 | } | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static int do_data(struct sw842_param *p, u8 n) | ||
141 | { | ||
142 | u64 v; | ||
143 | int ret; | ||
144 | |||
145 | if (n > p->olen) | ||
146 | return -ENOSPC; | ||
147 | |||
148 | ret = next_bits(p, &v, n * 8); | ||
149 | if (ret) | ||
150 | return ret; | ||
151 | |||
152 | switch (n) { | ||
153 | case 2: | ||
154 | put_unaligned(cpu_to_be16((u16)v), (__be16 *)p->out); | ||
155 | break; | ||
156 | case 4: | ||
157 | put_unaligned(cpu_to_be32((u32)v), (__be32 *)p->out); | ||
158 | break; | ||
159 | case 8: | ||
160 | put_unaligned(cpu_to_be64((u64)v), (__be64 *)p->out); | ||
161 | break; | ||
162 | default: | ||
163 | return -EINVAL; | ||
164 | } | ||
165 | |||
166 | p->out += n; | ||
167 | p->olen -= n; | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize) | ||
173 | { | ||
174 | u64 index, offset, total = round_down(p->out - p->ostart, 8); | ||
175 | int ret; | ||
176 | |||
177 | ret = next_bits(p, &index, bits); | ||
178 | if (ret) | ||
179 | return ret; | ||
180 | |||
181 | offset = index * size; | ||
182 | |||
183 | /* a ring buffer of fsize is used; correct the offset */ | ||
184 | if (total > fsize) { | ||
185 | /* this is where the current fifo is */ | ||
186 | u64 section = round_down(total, fsize); | ||
187 | /* the current pos in the fifo */ | ||
188 | u64 pos = total - section; | ||
189 | |||
190 | /* if the offset is past/at the pos, we need to | ||
191 | * go back to the last fifo section | ||
192 | */ | ||
193 | if (offset >= pos) | ||
194 | section -= fsize; | ||
195 | |||
196 | offset += section; | ||
197 | } | ||
198 | |||
199 | if (offset + size > total) { | ||
200 | pr_debug("index%x %lx points past end %lx\n", size, | ||
201 | (unsigned long)offset, (unsigned long)total); | ||
202 | return -EINVAL; | ||
203 | } | ||
204 | |||
205 | pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", | ||
206 | size, (unsigned long)index, (unsigned long)(index * size), | ||
207 | (unsigned long)offset, (unsigned long)total, | ||
208 | (unsigned long)beN_to_cpu(&p->ostart[offset], size)); | ||
209 | |||
210 | memcpy(p->out, &p->ostart[offset], size); | ||
211 | p->out += size; | ||
212 | p->olen -= size; | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static int do_index(struct sw842_param *p, u8 n) | ||
218 | { | ||
219 | switch (n) { | ||
220 | case 2: | ||
221 | return __do_index(p, 2, I2_BITS, I2_FIFO_SIZE); | ||
222 | case 4: | ||
223 | return __do_index(p, 4, I4_BITS, I4_FIFO_SIZE); | ||
224 | case 8: | ||
225 | return __do_index(p, 8, I8_BITS, I8_FIFO_SIZE); | ||
226 | default: | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | static int do_op(struct sw842_param *p, u8 o) | ||
232 | { | ||
233 | int i, ret = 0; | ||
234 | |||
235 | if (o >= OPS_MAX) | ||
236 | return -EINVAL; | ||
237 | |||
238 | for (i = 0; i < 4; i++) { | ||
239 | u8 op = decomp_ops[o][i]; | ||
240 | |||
241 | pr_debug("op is %x\n", op); | ||
242 | |||
243 | switch (op & OP_ACTION) { | ||
244 | case OP_ACTION_DATA: | ||
245 | ret = do_data(p, op & OP_AMOUNT); | ||
246 | break; | ||
247 | case OP_ACTION_INDEX: | ||
248 | ret = do_index(p, op & OP_AMOUNT); | ||
249 | break; | ||
250 | case OP_ACTION_NOOP: | ||
251 | break; | ||
252 | default: | ||
253 | pr_err("Interal error, invalid op %x\n", op); | ||
254 | return -EINVAL; | ||
255 | } | ||
256 | |||
257 | if (ret) | ||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | if (sw842_template_counts) | ||
262 | atomic_inc(&template_count[o]); | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * sw842_decompress | ||
269 | * | ||
270 | * Decompress the 842-compressed buffer of length @ilen at @in | ||
271 | * to the output buffer @out, using no more than @olen bytes. | ||
272 | * | ||
273 | * The compressed buffer must be only a single 842-compressed buffer, | ||
274 | * with the standard format described in the comments in 842.h | ||
275 | * Processing will stop when the 842 "END" template is detected, | ||
276 | * not the end of the buffer. | ||
277 | * | ||
278 | * Returns: 0 on success, error on failure. The @olen parameter | ||
279 | * will contain the number of output bytes written on success, or | ||
280 | * 0 on error. | ||
281 | */ | ||
282 | int sw842_decompress(const u8 *in, unsigned int ilen, | ||
283 | u8 *out, unsigned int *olen) | ||
284 | { | ||
285 | struct sw842_param p; | ||
286 | int ret; | ||
287 | u64 op, rep, tmp, bytes, total; | ||
288 | |||
289 | p.in = (u8 *)in; | ||
290 | p.bit = 0; | ||
291 | p.ilen = ilen; | ||
292 | p.out = out; | ||
293 | p.ostart = out; | ||
294 | p.olen = *olen; | ||
295 | |||
296 | total = p.olen; | ||
297 | |||
298 | *olen = 0; | ||
299 | |||
300 | do { | ||
301 | ret = next_bits(&p, &op, OP_BITS); | ||
302 | if (ret) | ||
303 | return ret; | ||
304 | |||
305 | pr_debug("template is %lx\n", (unsigned long)op); | ||
306 | |||
307 | switch (op) { | ||
308 | case OP_REPEAT: | ||
309 | ret = next_bits(&p, &rep, REPEAT_BITS); | ||
310 | if (ret) | ||
311 | return ret; | ||
312 | |||
313 | if (p.out == out) /* no previous bytes */ | ||
314 | return -EINVAL; | ||
315 | |||
316 | /* copy rep + 1 */ | ||
317 | rep++; | ||
318 | |||
319 | if (rep * 8 > p.olen) | ||
320 | return -ENOSPC; | ||
321 | |||
322 | while (rep-- > 0) { | ||
323 | memcpy(p.out, p.out - 8, 8); | ||
324 | p.out += 8; | ||
325 | p.olen -= 8; | ||
326 | } | ||
327 | |||
328 | if (sw842_template_counts) | ||
329 | atomic_inc(&template_repeat_count); | ||
330 | |||
331 | break; | ||
332 | case OP_ZEROS: | ||
333 | if (8 > p.olen) | ||
334 | return -ENOSPC; | ||
335 | |||
336 | memset(p.out, 0, 8); | ||
337 | p.out += 8; | ||
338 | p.olen -= 8; | ||
339 | |||
340 | if (sw842_template_counts) | ||
341 | atomic_inc(&template_zeros_count); | ||
342 | |||
343 | break; | ||
344 | case OP_SHORT_DATA: | ||
345 | ret = next_bits(&p, &bytes, SHORT_DATA_BITS); | ||
346 | if (ret) | ||
347 | return ret; | ||
348 | |||
349 | if (!bytes || bytes > SHORT_DATA_BITS_MAX) | ||
350 | return -EINVAL; | ||
351 | |||
352 | while (bytes-- > 0) { | ||
353 | ret = next_bits(&p, &tmp, 8); | ||
354 | if (ret) | ||
355 | return ret; | ||
356 | *p.out = (u8)tmp; | ||
357 | p.out++; | ||
358 | p.olen--; | ||
359 | } | ||
360 | |||
361 | if (sw842_template_counts) | ||
362 | atomic_inc(&template_short_data_count); | ||
363 | |||
364 | break; | ||
365 | case OP_END: | ||
366 | if (sw842_template_counts) | ||
367 | atomic_inc(&template_end_count); | ||
368 | |||
369 | break; | ||
370 | default: /* use template */ | ||
371 | ret = do_op(&p, op); | ||
372 | if (ret) | ||
373 | return ret; | ||
374 | break; | ||
375 | } | ||
376 | } while (op != OP_END); | ||
377 | |||
378 | if (unlikely((total - p.olen) > UINT_MAX)) | ||
379 | return -ENOSPC; | ||
380 | |||
381 | *olen = total - p.olen; | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | EXPORT_SYMBOL_GPL(sw842_decompress); | ||
386 | |||
387 | static int __init sw842_init(void) | ||
388 | { | ||
389 | if (sw842_template_counts) | ||
390 | sw842_debugfs_create(); | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | module_init(sw842_init); | ||
395 | |||
396 | static void __exit sw842_exit(void) | ||
397 | { | ||
398 | if (sw842_template_counts) | ||
399 | sw842_debugfs_remove(); | ||
400 | } | ||
401 | module_exit(sw842_exit); | ||
402 | |||
403 | MODULE_LICENSE("GPL"); | ||
404 | MODULE_DESCRIPTION("Software 842 Decompressor"); | ||
405 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | ||
diff --git a/lib/842/Makefile b/lib/842/Makefile new file mode 100644 index 000000000000..5d24c0baff2e --- /dev/null +++ b/lib/842/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_842_COMPRESS) += 842_compress.o | ||
2 | obj-$(CONFIG_842_DECOMPRESS) += 842_decompress.o | ||
diff --git a/lib/Kconfig b/lib/Kconfig index 601965a948e8..34e332b8d326 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -212,6 +212,12 @@ config RANDOM32_SELFTEST | |||
212 | # | 212 | # |
213 | # compression support is select'ed if needed | 213 | # compression support is select'ed if needed |
214 | # | 214 | # |
215 | config 842_COMPRESS | ||
216 | tristate | ||
217 | |||
218 | config 842_DECOMPRESS | ||
219 | tristate | ||
220 | |||
215 | config ZLIB_INFLATE | 221 | config ZLIB_INFLATE |
216 | tristate | 222 | tristate |
217 | 223 | ||
diff --git a/lib/Makefile b/lib/Makefile index 6c37933336a0..ff37c8c2f7b2 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -78,6 +78,8 @@ obj-$(CONFIG_LIBCRC32C) += libcrc32c.o | |||
78 | obj-$(CONFIG_CRC8) += crc8.o | 78 | obj-$(CONFIG_CRC8) += crc8.o |
79 | obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o | 79 | obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o |
80 | 80 | ||
81 | obj-$(CONFIG_842_COMPRESS) += 842/ | ||
82 | obj-$(CONFIG_842_DECOMPRESS) += 842/ | ||
81 | obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ | 83 | obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ |
82 | obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ | 84 | obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ |
83 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ | 85 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ |
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 4cc6442733f4..bc0a1da8afba 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c | |||
@@ -128,28 +128,36 @@ leave: | |||
128 | } | 128 | } |
129 | EXPORT_SYMBOL_GPL(mpi_read_from_buffer); | 129 | EXPORT_SYMBOL_GPL(mpi_read_from_buffer); |
130 | 130 | ||
131 | /**************** | 131 | /** |
132 | * Return an allocated buffer with the MPI (msb first). | 132 | * mpi_read_buffer() - read MPI to a bufer provided by user (msb first) |
133 | * NBYTES receives the length of this buffer. Caller must free the | 133 | * |
134 | * return string (This function does return a 0 byte buffer with NBYTES | 134 | * @a: a multi precision integer |
135 | * set to zero if the value of A is zero. If sign is not NULL, it will | 135 | * @buf: bufer to which the output will be written to. Needs to be at |
136 | * be set to the sign of the A. | 136 | * leaset mpi_get_size(a) long. |
137 | * @buf_len: size of the buf. | ||
138 | * @nbytes: receives the actual length of the data written. | ||
139 | * @sign: if not NULL, it will be set to the sign of a. | ||
140 | * | ||
141 | * Return: 0 on success or error code in case of error | ||
137 | */ | 142 | */ |
138 | void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) | 143 | int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, |
144 | int *sign) | ||
139 | { | 145 | { |
140 | uint8_t *p, *buffer; | 146 | uint8_t *p; |
141 | mpi_limb_t alimb; | 147 | mpi_limb_t alimb; |
148 | unsigned int n = mpi_get_size(a); | ||
142 | int i; | 149 | int i; |
143 | unsigned int n; | 150 | |
151 | if (buf_len < n || !buf) | ||
152 | return -EINVAL; | ||
144 | 153 | ||
145 | if (sign) | 154 | if (sign) |
146 | *sign = a->sign; | 155 | *sign = a->sign; |
147 | *nbytes = n = a->nlimbs * BYTES_PER_MPI_LIMB; | 156 | |
148 | if (!n) | 157 | if (nbytes) |
149 | n++; /* avoid zero length allocation */ | 158 | *nbytes = n; |
150 | p = buffer = kmalloc(n, GFP_KERNEL); | 159 | |
151 | if (!p) | 160 | p = buf; |
152 | return NULL; | ||
153 | 161 | ||
154 | for (i = a->nlimbs - 1; i >= 0; i--) { | 162 | for (i = a->nlimbs - 1; i >= 0; i--) { |
155 | alimb = a->d[i]; | 163 | alimb = a->d[i]; |
@@ -171,15 +179,56 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) | |||
171 | #error please implement for this limb size. | 179 | #error please implement for this limb size. |
172 | #endif | 180 | #endif |
173 | } | 181 | } |
182 | return 0; | ||
183 | } | ||
184 | EXPORT_SYMBOL_GPL(mpi_read_buffer); | ||
185 | |||
186 | /* | ||
187 | * mpi_get_buffer() - Returns an allocated buffer with the MPI (msb first). | ||
188 | * Caller must free the return string. | ||
189 | * This function does return a 0 byte buffer with nbytes set to zero if the | ||
190 | * value of A is zero. | ||
191 | * | ||
192 | * @a: a multi precision integer. | ||
193 | * @nbytes: receives the length of this buffer. | ||
194 | * @sign: if not NULL, it will be set to the sign of the a. | ||
195 | * | ||
196 | * Return: Pointer to MPI buffer or NULL on error | ||
197 | */ | ||
198 | void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) | ||
199 | { | ||
200 | uint8_t *buf, *p; | ||
201 | unsigned int n; | ||
202 | int ret; | ||
203 | |||
204 | if (!nbytes) | ||
205 | return NULL; | ||
206 | |||
207 | n = mpi_get_size(a); | ||
208 | |||
209 | if (!n) | ||
210 | n++; | ||
211 | |||
212 | buf = kmalloc(n, GFP_KERNEL); | ||
213 | |||
214 | if (!buf) | ||
215 | return NULL; | ||
216 | |||
217 | ret = mpi_read_buffer(a, buf, n, nbytes, sign); | ||
218 | |||
219 | if (ret) { | ||
220 | kfree(buf); | ||
221 | return NULL; | ||
222 | } | ||
174 | 223 | ||
175 | /* this is sub-optimal but we need to do the shift operation | 224 | /* this is sub-optimal but we need to do the shift operation |
176 | * because the caller has to free the returned buffer */ | 225 | * because the caller has to free the returned buffer */ |
177 | for (p = buffer; !*p && *nbytes; p++, --*nbytes) | 226 | for (p = buf; !*p && *nbytes; p++, --*nbytes) |
178 | ; | 227 | ; |
179 | if (p != buffer) | 228 | if (p != buf) |
180 | memmove(buffer, p, *nbytes); | 229 | memmove(buf, p, *nbytes); |
181 | 230 | ||
182 | return buffer; | 231 | return buf; |
183 | } | 232 | } |
184 | EXPORT_SYMBOL_GPL(mpi_get_buffer); | 233 | EXPORT_SYMBOL_GPL(mpi_get_buffer); |
185 | 234 | ||
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c index bf076d281d40..314f4dfa603e 100644 --- a/lib/mpi/mpiutil.c +++ b/lib/mpi/mpiutil.c | |||
@@ -69,7 +69,7 @@ void mpi_free_limb_space(mpi_ptr_t a) | |||
69 | if (!a) | 69 | if (!a) |
70 | return; | 70 | return; |
71 | 71 | ||
72 | kfree(a); | 72 | kzfree(a); |
73 | } | 73 | } |
74 | 74 | ||
75 | void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) | 75 | void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) |
@@ -95,7 +95,7 @@ int mpi_resize(MPI a, unsigned nlimbs) | |||
95 | if (!p) | 95 | if (!p) |
96 | return -ENOMEM; | 96 | return -ENOMEM; |
97 | memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); | 97 | memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); |
98 | kfree(a->d); | 98 | kzfree(a->d); |
99 | a->d = p; | 99 | a->d = p; |
100 | } else { | 100 | } else { |
101 | a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL); | 101 | a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL); |
@@ -112,7 +112,7 @@ void mpi_free(MPI a) | |||
112 | return; | 112 | return; |
113 | 113 | ||
114 | if (a->flags & 4) | 114 | if (a->flags & 4) |
115 | kfree(a->d); | 115 | kzfree(a->d); |
116 | else | 116 | else |
117 | mpi_free_limb_space(a->d); | 117 | mpi_free_limb_space(a->d); |
118 | 118 | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index c9f2e8c6ccc9..99fbc2f238c4 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -56,6 +56,38 @@ int sg_nents(struct scatterlist *sg) | |||
56 | } | 56 | } |
57 | EXPORT_SYMBOL(sg_nents); | 57 | EXPORT_SYMBOL(sg_nents); |
58 | 58 | ||
59 | /** | ||
60 | * sg_nents_for_len - return total count of entries in scatterlist | ||
61 | * needed to satisfy the supplied length | ||
62 | * @sg: The scatterlist | ||
63 | * @len: The total required length | ||
64 | * | ||
65 | * Description: | ||
66 | * Determines the number of entries in sg that are required to meet | ||
67 | * the supplied length, taking into acount chaining as well | ||
68 | * | ||
69 | * Returns: | ||
70 | * the number of sg entries needed, negative error on failure | ||
71 | * | ||
72 | **/ | ||
73 | int sg_nents_for_len(struct scatterlist *sg, u64 len) | ||
74 | { | ||
75 | int nents; | ||
76 | u64 total; | ||
77 | |||
78 | if (!len) | ||
79 | return 0; | ||
80 | |||
81 | for (nents = 0, total = 0; sg; sg = sg_next(sg)) { | ||
82 | nents++; | ||
83 | total += sg->length; | ||
84 | if (total >= len) | ||
85 | return nents; | ||
86 | } | ||
87 | |||
88 | return -EINVAL; | ||
89 | } | ||
90 | EXPORT_SYMBOL(sg_nents_for_len); | ||
59 | 91 | ||
60 | /** | 92 | /** |
61 | * sg_last - return the last scatterlist entry in a list | 93 | * sg_last - return the last scatterlist entry in a list |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 30b544f025ac..477937465a20 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -49,7 +49,7 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) | |||
49 | len = ALIGN(len, crypto_tfm_ctx_alignment()); | 49 | len = ALIGN(len, crypto_tfm_ctx_alignment()); |
50 | } | 50 | } |
51 | 51 | ||
52 | len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); | 52 | len += sizeof(struct aead_request) + crypto_aead_reqsize(aead); |
53 | len = ALIGN(len, __alignof__(struct scatterlist)); | 53 | len = ALIGN(len, __alignof__(struct scatterlist)); |
54 | 54 | ||
55 | len += sizeof(struct scatterlist) * nfrags; | 55 | len += sizeof(struct scatterlist) * nfrags; |
@@ -68,17 +68,6 @@ static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) | |||
68 | crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; | 68 | crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline struct aead_givcrypt_request *esp_tmp_givreq( | ||
72 | struct crypto_aead *aead, u8 *iv) | ||
73 | { | ||
74 | struct aead_givcrypt_request *req; | ||
75 | |||
76 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), | ||
77 | crypto_tfm_ctx_alignment()); | ||
78 | aead_givcrypt_set_tfm(req, aead); | ||
79 | return req; | ||
80 | } | ||
81 | |||
82 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) | 71 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) |
83 | { | 72 | { |
84 | struct aead_request *req; | 73 | struct aead_request *req; |
@@ -97,14 +86,6 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, | |||
97 | __alignof__(struct scatterlist)); | 86 | __alignof__(struct scatterlist)); |
98 | } | 87 | } |
99 | 88 | ||
100 | static inline struct scatterlist *esp_givreq_sg( | ||
101 | struct crypto_aead *aead, struct aead_givcrypt_request *req) | ||
102 | { | ||
103 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
104 | crypto_aead_reqsize(aead), | ||
105 | __alignof__(struct scatterlist)); | ||
106 | } | ||
107 | |||
108 | static void esp_output_done(struct crypto_async_request *base, int err) | 89 | static void esp_output_done(struct crypto_async_request *base, int err) |
109 | { | 90 | { |
110 | struct sk_buff *skb = base->data; | 91 | struct sk_buff *skb = base->data; |
@@ -113,14 +94,37 @@ static void esp_output_done(struct crypto_async_request *base, int err) | |||
113 | xfrm_output_resume(skb, err); | 94 | xfrm_output_resume(skb, err); |
114 | } | 95 | } |
115 | 96 | ||
97 | /* Move ESP header back into place. */ | ||
98 | static void esp_restore_header(struct sk_buff *skb, unsigned int offset) | ||
99 | { | ||
100 | struct ip_esp_hdr *esph = (void *)(skb->data + offset); | ||
101 | void *tmp = ESP_SKB_CB(skb)->tmp; | ||
102 | __be32 *seqhi = esp_tmp_seqhi(tmp); | ||
103 | |||
104 | esph->seq_no = esph->spi; | ||
105 | esph->spi = *seqhi; | ||
106 | } | ||
107 | |||
108 | static void esp_output_restore_header(struct sk_buff *skb) | ||
109 | { | ||
110 | esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32)); | ||
111 | } | ||
112 | |||
113 | static void esp_output_done_esn(struct crypto_async_request *base, int err) | ||
114 | { | ||
115 | struct sk_buff *skb = base->data; | ||
116 | |||
117 | esp_output_restore_header(skb); | ||
118 | esp_output_done(base, err); | ||
119 | } | ||
120 | |||
116 | static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | 121 | static int esp_output(struct xfrm_state *x, struct sk_buff *skb) |
117 | { | 122 | { |
118 | int err; | 123 | int err; |
119 | struct ip_esp_hdr *esph; | 124 | struct ip_esp_hdr *esph; |
120 | struct crypto_aead *aead; | 125 | struct crypto_aead *aead; |
121 | struct aead_givcrypt_request *req; | 126 | struct aead_request *req; |
122 | struct scatterlist *sg; | 127 | struct scatterlist *sg; |
123 | struct scatterlist *asg; | ||
124 | struct sk_buff *trailer; | 128 | struct sk_buff *trailer; |
125 | void *tmp; | 129 | void *tmp; |
126 | u8 *iv; | 130 | u8 *iv; |
@@ -129,17 +133,19 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
129 | int clen; | 133 | int clen; |
130 | int alen; | 134 | int alen; |
131 | int plen; | 135 | int plen; |
136 | int ivlen; | ||
132 | int tfclen; | 137 | int tfclen; |
133 | int nfrags; | 138 | int nfrags; |
134 | int assoclen; | 139 | int assoclen; |
135 | int sglists; | ||
136 | int seqhilen; | 140 | int seqhilen; |
137 | __be32 *seqhi; | 141 | __be32 *seqhi; |
142 | __be64 seqno; | ||
138 | 143 | ||
139 | /* skb is pure payload to encrypt */ | 144 | /* skb is pure payload to encrypt */ |
140 | 145 | ||
141 | aead = x->data; | 146 | aead = x->data; |
142 | alen = crypto_aead_authsize(aead); | 147 | alen = crypto_aead_authsize(aead); |
148 | ivlen = crypto_aead_ivsize(aead); | ||
143 | 149 | ||
144 | tfclen = 0; | 150 | tfclen = 0; |
145 | if (x->tfcpad) { | 151 | if (x->tfcpad) { |
@@ -160,16 +166,14 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
160 | nfrags = err; | 166 | nfrags = err; |
161 | 167 | ||
162 | assoclen = sizeof(*esph); | 168 | assoclen = sizeof(*esph); |
163 | sglists = 1; | ||
164 | seqhilen = 0; | 169 | seqhilen = 0; |
165 | 170 | ||
166 | if (x->props.flags & XFRM_STATE_ESN) { | 171 | if (x->props.flags & XFRM_STATE_ESN) { |
167 | sglists += 2; | ||
168 | seqhilen += sizeof(__be32); | 172 | seqhilen += sizeof(__be32); |
169 | assoclen += seqhilen; | 173 | assoclen += seqhilen; |
170 | } | 174 | } |
171 | 175 | ||
172 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); | 176 | tmp = esp_alloc_tmp(aead, nfrags, seqhilen); |
173 | if (!tmp) { | 177 | if (!tmp) { |
174 | err = -ENOMEM; | 178 | err = -ENOMEM; |
175 | goto error; | 179 | goto error; |
@@ -177,9 +181,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
177 | 181 | ||
178 | seqhi = esp_tmp_seqhi(tmp); | 182 | seqhi = esp_tmp_seqhi(tmp); |
179 | iv = esp_tmp_iv(aead, tmp, seqhilen); | 183 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
180 | req = esp_tmp_givreq(aead, iv); | 184 | req = esp_tmp_req(aead, iv); |
181 | asg = esp_givreq_sg(aead, req); | 185 | sg = esp_req_sg(aead, req); |
182 | sg = asg + sglists; | ||
183 | 186 | ||
184 | /* Fill padding... */ | 187 | /* Fill padding... */ |
185 | tail = skb_tail_pointer(trailer); | 188 | tail = skb_tail_pointer(trailer); |
@@ -235,37 +238,53 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
235 | *skb_mac_header(skb) = IPPROTO_UDP; | 238 | *skb_mac_header(skb) = IPPROTO_UDP; |
236 | } | 239 | } |
237 | 240 | ||
238 | esph->spi = x->id.spi; | ||
239 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); | 241 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); |
240 | 242 | ||
243 | aead_request_set_callback(req, 0, esp_output_done, skb); | ||
244 | |||
245 | /* For ESN we move the header forward by 4 bytes to | ||
246 | * accomodate the high bits. We will move it back after | ||
247 | * encryption. | ||
248 | */ | ||
249 | if ((x->props.flags & XFRM_STATE_ESN)) { | ||
250 | esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); | ||
251 | *seqhi = esph->spi; | ||
252 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); | ||
253 | aead_request_set_callback(req, 0, esp_output_done_esn, skb); | ||
254 | } | ||
255 | |||
256 | esph->spi = x->id.spi; | ||
257 | |||
241 | sg_init_table(sg, nfrags); | 258 | sg_init_table(sg, nfrags); |
242 | skb_to_sgvec(skb, sg, | 259 | skb_to_sgvec(skb, sg, |
243 | esph->enc_data + crypto_aead_ivsize(aead) - skb->data, | 260 | (unsigned char *)esph - skb->data, |
244 | clen + alen); | 261 | assoclen + ivlen + clen + alen); |
245 | 262 | ||
246 | if ((x->props.flags & XFRM_STATE_ESN)) { | 263 | aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); |
247 | sg_init_table(asg, 3); | 264 | aead_request_set_ad(req, assoclen); |
248 | sg_set_buf(asg, &esph->spi, sizeof(__be32)); | 265 | |
249 | *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); | 266 | seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + |
250 | sg_set_buf(asg + 1, seqhi, seqhilen); | 267 | ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); |
251 | sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); | 268 | |
252 | } else | 269 | memset(iv, 0, ivlen); |
253 | sg_init_one(asg, esph, sizeof(*esph)); | 270 | memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8), |
254 | 271 | min(ivlen, 8)); | |
255 | aead_givcrypt_set_callback(req, 0, esp_output_done, skb); | ||
256 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); | ||
257 | aead_givcrypt_set_assoc(req, asg, assoclen); | ||
258 | aead_givcrypt_set_giv(req, esph->enc_data, | ||
259 | XFRM_SKB_CB(skb)->seq.output.low + | ||
260 | ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); | ||
261 | 272 | ||
262 | ESP_SKB_CB(skb)->tmp = tmp; | 273 | ESP_SKB_CB(skb)->tmp = tmp; |
263 | err = crypto_aead_givencrypt(req); | 274 | err = crypto_aead_encrypt(req); |
264 | if (err == -EINPROGRESS) | 275 | |
276 | switch (err) { | ||
277 | case -EINPROGRESS: | ||
265 | goto error; | 278 | goto error; |
266 | 279 | ||
267 | if (err == -EBUSY) | 280 | case -EBUSY: |
268 | err = NET_XMIT_DROP; | 281 | err = NET_XMIT_DROP; |
282 | break; | ||
283 | |||
284 | case 0: | ||
285 | if ((x->props.flags & XFRM_STATE_ESN)) | ||
286 | esp_output_restore_header(skb); | ||
287 | } | ||
269 | 288 | ||
270 | kfree(tmp); | 289 | kfree(tmp); |
271 | 290 | ||
@@ -364,6 +383,20 @@ static void esp_input_done(struct crypto_async_request *base, int err) | |||
364 | xfrm_input_resume(skb, esp_input_done2(skb, err)); | 383 | xfrm_input_resume(skb, esp_input_done2(skb, err)); |
365 | } | 384 | } |
366 | 385 | ||
386 | static void esp_input_restore_header(struct sk_buff *skb) | ||
387 | { | ||
388 | esp_restore_header(skb, 0); | ||
389 | __skb_pull(skb, 4); | ||
390 | } | ||
391 | |||
392 | static void esp_input_done_esn(struct crypto_async_request *base, int err) | ||
393 | { | ||
394 | struct sk_buff *skb = base->data; | ||
395 | |||
396 | esp_input_restore_header(skb); | ||
397 | esp_input_done(base, err); | ||
398 | } | ||
399 | |||
367 | /* | 400 | /* |
368 | * Note: detecting truncated vs. non-truncated authentication data is very | 401 | * Note: detecting truncated vs. non-truncated authentication data is very |
369 | * expensive, so we only support truncated data, which is the recommended | 402 | * expensive, so we only support truncated data, which is the recommended |
@@ -375,19 +408,18 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
375 | struct crypto_aead *aead = x->data; | 408 | struct crypto_aead *aead = x->data; |
376 | struct aead_request *req; | 409 | struct aead_request *req; |
377 | struct sk_buff *trailer; | 410 | struct sk_buff *trailer; |
378 | int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); | 411 | int ivlen = crypto_aead_ivsize(aead); |
412 | int elen = skb->len - sizeof(*esph) - ivlen; | ||
379 | int nfrags; | 413 | int nfrags; |
380 | int assoclen; | 414 | int assoclen; |
381 | int sglists; | ||
382 | int seqhilen; | 415 | int seqhilen; |
383 | __be32 *seqhi; | 416 | __be32 *seqhi; |
384 | void *tmp; | 417 | void *tmp; |
385 | u8 *iv; | 418 | u8 *iv; |
386 | struct scatterlist *sg; | 419 | struct scatterlist *sg; |
387 | struct scatterlist *asg; | ||
388 | int err = -EINVAL; | 420 | int err = -EINVAL; |
389 | 421 | ||
390 | if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) | 422 | if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) |
391 | goto out; | 423 | goto out; |
392 | 424 | ||
393 | if (elen <= 0) | 425 | if (elen <= 0) |
@@ -400,17 +432,15 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
400 | nfrags = err; | 432 | nfrags = err; |
401 | 433 | ||
402 | assoclen = sizeof(*esph); | 434 | assoclen = sizeof(*esph); |
403 | sglists = 1; | ||
404 | seqhilen = 0; | 435 | seqhilen = 0; |
405 | 436 | ||
406 | if (x->props.flags & XFRM_STATE_ESN) { | 437 | if (x->props.flags & XFRM_STATE_ESN) { |
407 | sglists += 2; | ||
408 | seqhilen += sizeof(__be32); | 438 | seqhilen += sizeof(__be32); |
409 | assoclen += seqhilen; | 439 | assoclen += seqhilen; |
410 | } | 440 | } |
411 | 441 | ||
412 | err = -ENOMEM; | 442 | err = -ENOMEM; |
413 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); | 443 | tmp = esp_alloc_tmp(aead, nfrags, seqhilen); |
414 | if (!tmp) | 444 | if (!tmp) |
415 | goto out; | 445 | goto out; |
416 | 446 | ||
@@ -418,36 +448,39 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
418 | seqhi = esp_tmp_seqhi(tmp); | 448 | seqhi = esp_tmp_seqhi(tmp); |
419 | iv = esp_tmp_iv(aead, tmp, seqhilen); | 449 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
420 | req = esp_tmp_req(aead, iv); | 450 | req = esp_tmp_req(aead, iv); |
421 | asg = esp_req_sg(aead, req); | 451 | sg = esp_req_sg(aead, req); |
422 | sg = asg + sglists; | ||
423 | 452 | ||
424 | skb->ip_summed = CHECKSUM_NONE; | 453 | skb->ip_summed = CHECKSUM_NONE; |
425 | 454 | ||
426 | esph = (struct ip_esp_hdr *)skb->data; | 455 | esph = (struct ip_esp_hdr *)skb->data; |
427 | 456 | ||
428 | /* Get ivec. This can be wrong, check against another impls. */ | 457 | aead_request_set_callback(req, 0, esp_input_done, skb); |
429 | iv = esph->enc_data; | ||
430 | |||
431 | sg_init_table(sg, nfrags); | ||
432 | skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); | ||
433 | 458 | ||
459 | /* For ESN we move the header forward by 4 bytes to | ||
460 | * accomodate the high bits. We will move it back after | ||
461 | * decryption. | ||
462 | */ | ||
434 | if ((x->props.flags & XFRM_STATE_ESN)) { | 463 | if ((x->props.flags & XFRM_STATE_ESN)) { |
435 | sg_init_table(asg, 3); | 464 | esph = (void *)skb_push(skb, 4); |
436 | sg_set_buf(asg, &esph->spi, sizeof(__be32)); | 465 | *seqhi = esph->spi; |
437 | *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; | 466 | esph->spi = esph->seq_no; |
438 | sg_set_buf(asg + 1, seqhi, seqhilen); | 467 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); |
439 | sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); | 468 | aead_request_set_callback(req, 0, esp_input_done_esn, skb); |
440 | } else | 469 | } |
441 | sg_init_one(asg, esph, sizeof(*esph)); | ||
442 | 470 | ||
443 | aead_request_set_callback(req, 0, esp_input_done, skb); | 471 | sg_init_table(sg, nfrags); |
444 | aead_request_set_crypt(req, sg, sg, elen, iv); | 472 | skb_to_sgvec(skb, sg, 0, skb->len); |
445 | aead_request_set_assoc(req, asg, assoclen); | 473 | |
474 | aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); | ||
475 | aead_request_set_ad(req, assoclen); | ||
446 | 476 | ||
447 | err = crypto_aead_decrypt(req); | 477 | err = crypto_aead_decrypt(req); |
448 | if (err == -EINPROGRESS) | 478 | if (err == -EINPROGRESS) |
449 | goto out; | 479 | goto out; |
450 | 480 | ||
481 | if ((x->props.flags & XFRM_STATE_ESN)) | ||
482 | esp_input_restore_header(skb); | ||
483 | |||
451 | err = esp_input_done2(skb, err); | 484 | err = esp_input_done2(skb, err); |
452 | 485 | ||
453 | out: | 486 | out: |
@@ -519,10 +552,16 @@ static void esp_destroy(struct xfrm_state *x) | |||
519 | 552 | ||
520 | static int esp_init_aead(struct xfrm_state *x) | 553 | static int esp_init_aead(struct xfrm_state *x) |
521 | { | 554 | { |
555 | char aead_name[CRYPTO_MAX_ALG_NAME]; | ||
522 | struct crypto_aead *aead; | 556 | struct crypto_aead *aead; |
523 | int err; | 557 | int err; |
524 | 558 | ||
525 | aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); | 559 | err = -ENAMETOOLONG; |
560 | if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", | ||
561 | x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
562 | goto error; | ||
563 | |||
564 | aead = crypto_alloc_aead(aead_name, 0, 0); | ||
526 | err = PTR_ERR(aead); | 565 | err = PTR_ERR(aead); |
527 | if (IS_ERR(aead)) | 566 | if (IS_ERR(aead)) |
528 | goto error; | 567 | goto error; |
@@ -561,15 +600,19 @@ static int esp_init_authenc(struct xfrm_state *x) | |||
561 | 600 | ||
562 | if ((x->props.flags & XFRM_STATE_ESN)) { | 601 | if ((x->props.flags & XFRM_STATE_ESN)) { |
563 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, | 602 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, |
564 | "authencesn(%s,%s)", | 603 | "%s%sauthencesn(%s,%s)%s", |
604 | x->geniv ?: "", x->geniv ? "(" : "", | ||
565 | x->aalg ? x->aalg->alg_name : "digest_null", | 605 | x->aalg ? x->aalg->alg_name : "digest_null", |
566 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) | 606 | x->ealg->alg_name, |
607 | x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) | ||
567 | goto error; | 608 | goto error; |
568 | } else { | 609 | } else { |
569 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, | 610 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, |
570 | "authenc(%s,%s)", | 611 | "%s%sauthenc(%s,%s)%s", |
612 | x->geniv ?: "", x->geniv ? "(" : "", | ||
571 | x->aalg ? x->aalg->alg_name : "digest_null", | 613 | x->aalg ? x->aalg->alg_name : "digest_null", |
572 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) | 614 | x->ealg->alg_name, |
615 | x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) | ||
573 | goto error; | 616 | goto error; |
574 | } | 617 | } |
575 | 618 | ||
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 7c07ce36aae2..060a60b2f8a6 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -76,7 +76,7 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen) | |||
76 | len = ALIGN(len, crypto_tfm_ctx_alignment()); | 76 | len = ALIGN(len, crypto_tfm_ctx_alignment()); |
77 | } | 77 | } |
78 | 78 | ||
79 | len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); | 79 | len += sizeof(struct aead_request) + crypto_aead_reqsize(aead); |
80 | len = ALIGN(len, __alignof__(struct scatterlist)); | 80 | len = ALIGN(len, __alignof__(struct scatterlist)); |
81 | 81 | ||
82 | len += sizeof(struct scatterlist) * nfrags; | 82 | len += sizeof(struct scatterlist) * nfrags; |
@@ -96,17 +96,6 @@ static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) | |||
96 | crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; | 96 | crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline struct aead_givcrypt_request *esp_tmp_givreq( | ||
100 | struct crypto_aead *aead, u8 *iv) | ||
101 | { | ||
102 | struct aead_givcrypt_request *req; | ||
103 | |||
104 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), | ||
105 | crypto_tfm_ctx_alignment()); | ||
106 | aead_givcrypt_set_tfm(req, aead); | ||
107 | return req; | ||
108 | } | ||
109 | |||
110 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) | 99 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) |
111 | { | 100 | { |
112 | struct aead_request *req; | 101 | struct aead_request *req; |
@@ -125,14 +114,6 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, | |||
125 | __alignof__(struct scatterlist)); | 114 | __alignof__(struct scatterlist)); |
126 | } | 115 | } |
127 | 116 | ||
128 | static inline struct scatterlist *esp_givreq_sg( | ||
129 | struct crypto_aead *aead, struct aead_givcrypt_request *req) | ||
130 | { | ||
131 | return (void *)ALIGN((unsigned long)(req + 1) + | ||
132 | crypto_aead_reqsize(aead), | ||
133 | __alignof__(struct scatterlist)); | ||
134 | } | ||
135 | |||
136 | static void esp_output_done(struct crypto_async_request *base, int err) | 117 | static void esp_output_done(struct crypto_async_request *base, int err) |
137 | { | 118 | { |
138 | struct sk_buff *skb = base->data; | 119 | struct sk_buff *skb = base->data; |
@@ -141,32 +122,57 @@ static void esp_output_done(struct crypto_async_request *base, int err) | |||
141 | xfrm_output_resume(skb, err); | 122 | xfrm_output_resume(skb, err); |
142 | } | 123 | } |
143 | 124 | ||
125 | /* Move ESP header back into place. */ | ||
126 | static void esp_restore_header(struct sk_buff *skb, unsigned int offset) | ||
127 | { | ||
128 | struct ip_esp_hdr *esph = (void *)(skb->data + offset); | ||
129 | void *tmp = ESP_SKB_CB(skb)->tmp; | ||
130 | __be32 *seqhi = esp_tmp_seqhi(tmp); | ||
131 | |||
132 | esph->seq_no = esph->spi; | ||
133 | esph->spi = *seqhi; | ||
134 | } | ||
135 | |||
136 | static void esp_output_restore_header(struct sk_buff *skb) | ||
137 | { | ||
138 | esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32)); | ||
139 | } | ||
140 | |||
141 | static void esp_output_done_esn(struct crypto_async_request *base, int err) | ||
142 | { | ||
143 | struct sk_buff *skb = base->data; | ||
144 | |||
145 | esp_output_restore_header(skb); | ||
146 | esp_output_done(base, err); | ||
147 | } | ||
148 | |||
144 | static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | 149 | static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) |
145 | { | 150 | { |
146 | int err; | 151 | int err; |
147 | struct ip_esp_hdr *esph; | 152 | struct ip_esp_hdr *esph; |
148 | struct crypto_aead *aead; | 153 | struct crypto_aead *aead; |
149 | struct aead_givcrypt_request *req; | 154 | struct aead_request *req; |
150 | struct scatterlist *sg; | 155 | struct scatterlist *sg; |
151 | struct scatterlist *asg; | ||
152 | struct sk_buff *trailer; | 156 | struct sk_buff *trailer; |
153 | void *tmp; | 157 | void *tmp; |
154 | int blksize; | 158 | int blksize; |
155 | int clen; | 159 | int clen; |
156 | int alen; | 160 | int alen; |
157 | int plen; | 161 | int plen; |
162 | int ivlen; | ||
158 | int tfclen; | 163 | int tfclen; |
159 | int nfrags; | 164 | int nfrags; |
160 | int assoclen; | 165 | int assoclen; |
161 | int sglists; | ||
162 | int seqhilen; | 166 | int seqhilen; |
163 | u8 *iv; | 167 | u8 *iv; |
164 | u8 *tail; | 168 | u8 *tail; |
165 | __be32 *seqhi; | 169 | __be32 *seqhi; |
170 | __be64 seqno; | ||
166 | 171 | ||
167 | /* skb is pure payload to encrypt */ | 172 | /* skb is pure payload to encrypt */ |
168 | aead = x->data; | 173 | aead = x->data; |
169 | alen = crypto_aead_authsize(aead); | 174 | alen = crypto_aead_authsize(aead); |
175 | ivlen = crypto_aead_ivsize(aead); | ||
170 | 176 | ||
171 | tfclen = 0; | 177 | tfclen = 0; |
172 | if (x->tfcpad) { | 178 | if (x->tfcpad) { |
@@ -187,16 +193,14 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
187 | nfrags = err; | 193 | nfrags = err; |
188 | 194 | ||
189 | assoclen = sizeof(*esph); | 195 | assoclen = sizeof(*esph); |
190 | sglists = 1; | ||
191 | seqhilen = 0; | 196 | seqhilen = 0; |
192 | 197 | ||
193 | if (x->props.flags & XFRM_STATE_ESN) { | 198 | if (x->props.flags & XFRM_STATE_ESN) { |
194 | sglists += 2; | ||
195 | seqhilen += sizeof(__be32); | 199 | seqhilen += sizeof(__be32); |
196 | assoclen += seqhilen; | 200 | assoclen += seqhilen; |
197 | } | 201 | } |
198 | 202 | ||
199 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); | 203 | tmp = esp_alloc_tmp(aead, nfrags, seqhilen); |
200 | if (!tmp) { | 204 | if (!tmp) { |
201 | err = -ENOMEM; | 205 | err = -ENOMEM; |
202 | goto error; | 206 | goto error; |
@@ -204,9 +208,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
204 | 208 | ||
205 | seqhi = esp_tmp_seqhi(tmp); | 209 | seqhi = esp_tmp_seqhi(tmp); |
206 | iv = esp_tmp_iv(aead, tmp, seqhilen); | 210 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
207 | req = esp_tmp_givreq(aead, iv); | 211 | req = esp_tmp_req(aead, iv); |
208 | asg = esp_givreq_sg(aead, req); | 212 | sg = esp_req_sg(aead, req); |
209 | sg = asg + sglists; | ||
210 | 213 | ||
211 | /* Fill padding... */ | 214 | /* Fill padding... */ |
212 | tail = skb_tail_pointer(trailer); | 215 | tail = skb_tail_pointer(trailer); |
@@ -227,37 +230,53 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
227 | esph = ip_esp_hdr(skb); | 230 | esph = ip_esp_hdr(skb); |
228 | *skb_mac_header(skb) = IPPROTO_ESP; | 231 | *skb_mac_header(skb) = IPPROTO_ESP; |
229 | 232 | ||
230 | esph->spi = x->id.spi; | ||
231 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); | 233 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); |
232 | 234 | ||
235 | aead_request_set_callback(req, 0, esp_output_done, skb); | ||
236 | |||
237 | /* For ESN we move the header forward by 4 bytes to | ||
238 | * accomodate the high bits. We will move it back after | ||
239 | * encryption. | ||
240 | */ | ||
241 | if ((x->props.flags & XFRM_STATE_ESN)) { | ||
242 | esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); | ||
243 | *seqhi = esph->spi; | ||
244 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); | ||
245 | aead_request_set_callback(req, 0, esp_output_done_esn, skb); | ||
246 | } | ||
247 | |||
248 | esph->spi = x->id.spi; | ||
249 | |||
233 | sg_init_table(sg, nfrags); | 250 | sg_init_table(sg, nfrags); |
234 | skb_to_sgvec(skb, sg, | 251 | skb_to_sgvec(skb, sg, |
235 | esph->enc_data + crypto_aead_ivsize(aead) - skb->data, | 252 | (unsigned char *)esph - skb->data, |
236 | clen + alen); | 253 | assoclen + ivlen + clen + alen); |
237 | 254 | ||
238 | if ((x->props.flags & XFRM_STATE_ESN)) { | 255 | aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); |
239 | sg_init_table(asg, 3); | 256 | aead_request_set_ad(req, assoclen); |
240 | sg_set_buf(asg, &esph->spi, sizeof(__be32)); | 257 | |
241 | *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); | 258 | seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + |
242 | sg_set_buf(asg + 1, seqhi, seqhilen); | 259 | ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); |
243 | sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); | 260 | |
244 | } else | 261 | memset(iv, 0, ivlen); |
245 | sg_init_one(asg, esph, sizeof(*esph)); | 262 | memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8), |
246 | 263 | min(ivlen, 8)); | |
247 | aead_givcrypt_set_callback(req, 0, esp_output_done, skb); | ||
248 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); | ||
249 | aead_givcrypt_set_assoc(req, asg, assoclen); | ||
250 | aead_givcrypt_set_giv(req, esph->enc_data, | ||
251 | XFRM_SKB_CB(skb)->seq.output.low + | ||
252 | ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); | ||
253 | 264 | ||
254 | ESP_SKB_CB(skb)->tmp = tmp; | 265 | ESP_SKB_CB(skb)->tmp = tmp; |
255 | err = crypto_aead_givencrypt(req); | 266 | err = crypto_aead_encrypt(req); |
256 | if (err == -EINPROGRESS) | 267 | |
268 | switch (err) { | ||
269 | case -EINPROGRESS: | ||
257 | goto error; | 270 | goto error; |
258 | 271 | ||
259 | if (err == -EBUSY) | 272 | case -EBUSY: |
260 | err = NET_XMIT_DROP; | 273 | err = NET_XMIT_DROP; |
274 | break; | ||
275 | |||
276 | case 0: | ||
277 | if ((x->props.flags & XFRM_STATE_ESN)) | ||
278 | esp_output_restore_header(skb); | ||
279 | } | ||
261 | 280 | ||
262 | kfree(tmp); | 281 | kfree(tmp); |
263 | 282 | ||
@@ -318,25 +337,38 @@ static void esp_input_done(struct crypto_async_request *base, int err) | |||
318 | xfrm_input_resume(skb, esp_input_done2(skb, err)); | 337 | xfrm_input_resume(skb, esp_input_done2(skb, err)); |
319 | } | 338 | } |
320 | 339 | ||
340 | static void esp_input_restore_header(struct sk_buff *skb) | ||
341 | { | ||
342 | esp_restore_header(skb, 0); | ||
343 | __skb_pull(skb, 4); | ||
344 | } | ||
345 | |||
346 | static void esp_input_done_esn(struct crypto_async_request *base, int err) | ||
347 | { | ||
348 | struct sk_buff *skb = base->data; | ||
349 | |||
350 | esp_input_restore_header(skb); | ||
351 | esp_input_done(base, err); | ||
352 | } | ||
353 | |||
321 | static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) | 354 | static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) |
322 | { | 355 | { |
323 | struct ip_esp_hdr *esph; | 356 | struct ip_esp_hdr *esph; |
324 | struct crypto_aead *aead = x->data; | 357 | struct crypto_aead *aead = x->data; |
325 | struct aead_request *req; | 358 | struct aead_request *req; |
326 | struct sk_buff *trailer; | 359 | struct sk_buff *trailer; |
327 | int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); | 360 | int ivlen = crypto_aead_ivsize(aead); |
361 | int elen = skb->len - sizeof(*esph) - ivlen; | ||
328 | int nfrags; | 362 | int nfrags; |
329 | int assoclen; | 363 | int assoclen; |
330 | int sglists; | ||
331 | int seqhilen; | 364 | int seqhilen; |
332 | int ret = 0; | 365 | int ret = 0; |
333 | void *tmp; | 366 | void *tmp; |
334 | __be32 *seqhi; | 367 | __be32 *seqhi; |
335 | u8 *iv; | 368 | u8 *iv; |
336 | struct scatterlist *sg; | 369 | struct scatterlist *sg; |
337 | struct scatterlist *asg; | ||
338 | 370 | ||
339 | if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) { | 371 | if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) { |
340 | ret = -EINVAL; | 372 | ret = -EINVAL; |
341 | goto out; | 373 | goto out; |
342 | } | 374 | } |
@@ -355,16 +387,14 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
355 | ret = -ENOMEM; | 387 | ret = -ENOMEM; |
356 | 388 | ||
357 | assoclen = sizeof(*esph); | 389 | assoclen = sizeof(*esph); |
358 | sglists = 1; | ||
359 | seqhilen = 0; | 390 | seqhilen = 0; |
360 | 391 | ||
361 | if (x->props.flags & XFRM_STATE_ESN) { | 392 | if (x->props.flags & XFRM_STATE_ESN) { |
362 | sglists += 2; | ||
363 | seqhilen += sizeof(__be32); | 393 | seqhilen += sizeof(__be32); |
364 | assoclen += seqhilen; | 394 | assoclen += seqhilen; |
365 | } | 395 | } |
366 | 396 | ||
367 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); | 397 | tmp = esp_alloc_tmp(aead, nfrags, seqhilen); |
368 | if (!tmp) | 398 | if (!tmp) |
369 | goto out; | 399 | goto out; |
370 | 400 | ||
@@ -372,36 +402,39 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
372 | seqhi = esp_tmp_seqhi(tmp); | 402 | seqhi = esp_tmp_seqhi(tmp); |
373 | iv = esp_tmp_iv(aead, tmp, seqhilen); | 403 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
374 | req = esp_tmp_req(aead, iv); | 404 | req = esp_tmp_req(aead, iv); |
375 | asg = esp_req_sg(aead, req); | 405 | sg = esp_req_sg(aead, req); |
376 | sg = asg + sglists; | ||
377 | 406 | ||
378 | skb->ip_summed = CHECKSUM_NONE; | 407 | skb->ip_summed = CHECKSUM_NONE; |
379 | 408 | ||
380 | esph = (struct ip_esp_hdr *)skb->data; | 409 | esph = (struct ip_esp_hdr *)skb->data; |
381 | 410 | ||
382 | /* Get ivec. This can be wrong, check against another impls. */ | 411 | aead_request_set_callback(req, 0, esp_input_done, skb); |
383 | iv = esph->enc_data; | ||
384 | |||
385 | sg_init_table(sg, nfrags); | ||
386 | skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); | ||
387 | 412 | ||
413 | /* For ESN we move the header forward by 4 bytes to | ||
414 | * accomodate the high bits. We will move it back after | ||
415 | * decryption. | ||
416 | */ | ||
388 | if ((x->props.flags & XFRM_STATE_ESN)) { | 417 | if ((x->props.flags & XFRM_STATE_ESN)) { |
389 | sg_init_table(asg, 3); | 418 | esph = (void *)skb_push(skb, 4); |
390 | sg_set_buf(asg, &esph->spi, sizeof(__be32)); | 419 | *seqhi = esph->spi; |
391 | *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; | 420 | esph->spi = esph->seq_no; |
392 | sg_set_buf(asg + 1, seqhi, seqhilen); | 421 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); |
393 | sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32)); | 422 | aead_request_set_callback(req, 0, esp_input_done_esn, skb); |
394 | } else | 423 | } |
395 | sg_init_one(asg, esph, sizeof(*esph)); | ||
396 | 424 | ||
397 | aead_request_set_callback(req, 0, esp_input_done, skb); | 425 | sg_init_table(sg, nfrags); |
398 | aead_request_set_crypt(req, sg, sg, elen, iv); | 426 | skb_to_sgvec(skb, sg, 0, skb->len); |
399 | aead_request_set_assoc(req, asg, assoclen); | 427 | |
428 | aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); | ||
429 | aead_request_set_ad(req, assoclen); | ||
400 | 430 | ||
401 | ret = crypto_aead_decrypt(req); | 431 | ret = crypto_aead_decrypt(req); |
402 | if (ret == -EINPROGRESS) | 432 | if (ret == -EINPROGRESS) |
403 | goto out; | 433 | goto out; |
404 | 434 | ||
435 | if ((x->props.flags & XFRM_STATE_ESN)) | ||
436 | esp_input_restore_header(skb); | ||
437 | |||
405 | ret = esp_input_done2(skb, ret); | 438 | ret = esp_input_done2(skb, ret); |
406 | 439 | ||
407 | out: | 440 | out: |
@@ -461,10 +494,16 @@ static void esp6_destroy(struct xfrm_state *x) | |||
461 | 494 | ||
462 | static int esp_init_aead(struct xfrm_state *x) | 495 | static int esp_init_aead(struct xfrm_state *x) |
463 | { | 496 | { |
497 | char aead_name[CRYPTO_MAX_ALG_NAME]; | ||
464 | struct crypto_aead *aead; | 498 | struct crypto_aead *aead; |
465 | int err; | 499 | int err; |
466 | 500 | ||
467 | aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); | 501 | err = -ENAMETOOLONG; |
502 | if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", | ||
503 | x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
504 | goto error; | ||
505 | |||
506 | aead = crypto_alloc_aead(aead_name, 0, 0); | ||
468 | err = PTR_ERR(aead); | 507 | err = PTR_ERR(aead); |
469 | if (IS_ERR(aead)) | 508 | if (IS_ERR(aead)) |
470 | goto error; | 509 | goto error; |
@@ -503,15 +542,19 @@ static int esp_init_authenc(struct xfrm_state *x) | |||
503 | 542 | ||
504 | if ((x->props.flags & XFRM_STATE_ESN)) { | 543 | if ((x->props.flags & XFRM_STATE_ESN)) { |
505 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, | 544 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, |
506 | "authencesn(%s,%s)", | 545 | "%s%sauthencesn(%s,%s)%s", |
546 | x->geniv ?: "", x->geniv ? "(" : "", | ||
507 | x->aalg ? x->aalg->alg_name : "digest_null", | 547 | x->aalg ? x->aalg->alg_name : "digest_null", |
508 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) | 548 | x->ealg->alg_name, |
549 | x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) | ||
509 | goto error; | 550 | goto error; |
510 | } else { | 551 | } else { |
511 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, | 552 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, |
512 | "authenc(%s,%s)", | 553 | "%s%sauthenc(%s,%s)%s", |
554 | x->geniv ?: "", x->geniv ? "(" : "", | ||
513 | x->aalg ? x->aalg->alg_name : "digest_null", | 555 | x->aalg ? x->aalg->alg_name : "digest_null", |
514 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) | 556 | x->ealg->alg_name, |
557 | x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) | ||
515 | goto error; | 558 | goto error; |
516 | } | 559 | } |
517 | 560 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index f0d52d721b3a..3c5b8ce38ef4 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1190,6 +1190,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, | |||
1190 | memcpy(x->ealg->alg_key, key+1, keysize); | 1190 | memcpy(x->ealg->alg_key, key+1, keysize); |
1191 | } | 1191 | } |
1192 | x->props.ealgo = sa->sadb_sa_encrypt; | 1192 | x->props.ealgo = sa->sadb_sa_encrypt; |
1193 | x->geniv = a->uinfo.encr.geniv; | ||
1193 | } | 1194 | } |
1194 | } | 1195 | } |
1195 | /* x->algo.flags = sa->sadb_sa_flags; */ | 1196 | /* x->algo.flags = sa->sadb_sa_flags; */ |
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index 208df7c0b6ea..7663c28ba353 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c | |||
@@ -11,9 +11,8 @@ | |||
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/crypto.h> | ||
15 | #include <linux/err.h> | 14 | #include <linux/err.h> |
16 | #include <crypto/aes.h> | 15 | #include <crypto/aead.h> |
17 | 16 | ||
18 | #include <net/mac80211.h> | 17 | #include <net/mac80211.h> |
19 | #include "key.h" | 18 | #include "key.h" |
@@ -23,7 +22,7 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | |||
23 | u8 *data, size_t data_len, u8 *mic, | 22 | u8 *data, size_t data_len, u8 *mic, |
24 | size_t mic_len) | 23 | size_t mic_len) |
25 | { | 24 | { |
26 | struct scatterlist assoc, pt, ct[2]; | 25 | struct scatterlist sg[3]; |
27 | 26 | ||
28 | char aead_req_data[sizeof(struct aead_request) + | 27 | char aead_req_data[sizeof(struct aead_request) + |
29 | crypto_aead_reqsize(tfm)] | 28 | crypto_aead_reqsize(tfm)] |
@@ -32,15 +31,14 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | |||
32 | 31 | ||
33 | memset(aead_req, 0, sizeof(aead_req_data)); | 32 | memset(aead_req, 0, sizeof(aead_req_data)); |
34 | 33 | ||
35 | sg_init_one(&pt, data, data_len); | 34 | sg_init_table(sg, 3); |
36 | sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); | 35 | sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); |
37 | sg_init_table(ct, 2); | 36 | sg_set_buf(&sg[1], data, data_len); |
38 | sg_set_buf(&ct[0], data, data_len); | 37 | sg_set_buf(&sg[2], mic, mic_len); |
39 | sg_set_buf(&ct[1], mic, mic_len); | ||
40 | 38 | ||
41 | aead_request_set_tfm(aead_req, tfm); | 39 | aead_request_set_tfm(aead_req, tfm); |
42 | aead_request_set_assoc(aead_req, &assoc, assoc.length); | 40 | aead_request_set_crypt(aead_req, sg, sg, data_len, b_0); |
43 | aead_request_set_crypt(aead_req, &pt, ct, data_len, b_0); | 41 | aead_request_set_ad(aead_req, sg[0].length); |
44 | 42 | ||
45 | crypto_aead_encrypt(aead_req); | 43 | crypto_aead_encrypt(aead_req); |
46 | } | 44 | } |
@@ -49,7 +47,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | |||
49 | u8 *data, size_t data_len, u8 *mic, | 47 | u8 *data, size_t data_len, u8 *mic, |
50 | size_t mic_len) | 48 | size_t mic_len) |
51 | { | 49 | { |
52 | struct scatterlist assoc, pt, ct[2]; | 50 | struct scatterlist sg[3]; |
53 | char aead_req_data[sizeof(struct aead_request) + | 51 | char aead_req_data[sizeof(struct aead_request) + |
54 | crypto_aead_reqsize(tfm)] | 52 | crypto_aead_reqsize(tfm)] |
55 | __aligned(__alignof__(struct aead_request)); | 53 | __aligned(__alignof__(struct aead_request)); |
@@ -60,15 +58,14 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | |||
60 | 58 | ||
61 | memset(aead_req, 0, sizeof(aead_req_data)); | 59 | memset(aead_req, 0, sizeof(aead_req_data)); |
62 | 60 | ||
63 | sg_init_one(&pt, data, data_len); | 61 | sg_init_table(sg, 3); |
64 | sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); | 62 | sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); |
65 | sg_init_table(ct, 2); | 63 | sg_set_buf(&sg[1], data, data_len); |
66 | sg_set_buf(&ct[0], data, data_len); | 64 | sg_set_buf(&sg[2], mic, mic_len); |
67 | sg_set_buf(&ct[1], mic, mic_len); | ||
68 | 65 | ||
69 | aead_request_set_tfm(aead_req, tfm); | 66 | aead_request_set_tfm(aead_req, tfm); |
70 | aead_request_set_assoc(aead_req, &assoc, assoc.length); | 67 | aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0); |
71 | aead_request_set_crypt(aead_req, ct, &pt, data_len + mic_len, b_0); | 68 | aead_request_set_ad(aead_req, sg[0].length); |
72 | 69 | ||
73 | return crypto_aead_decrypt(aead_req); | 70 | return crypto_aead_decrypt(aead_req); |
74 | } | 71 | } |
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c index fd278bbe1b0d..3afe361fd27c 100644 --- a/net/mac80211/aes_gcm.c +++ b/net/mac80211/aes_gcm.c | |||
@@ -8,9 +8,8 @@ | |||
8 | 8 | ||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/crypto.h> | ||
12 | #include <linux/err.h> | 11 | #include <linux/err.h> |
13 | #include <crypto/aes.h> | 12 | #include <crypto/aead.h> |
14 | 13 | ||
15 | #include <net/mac80211.h> | 14 | #include <net/mac80211.h> |
16 | #include "key.h" | 15 | #include "key.h" |
@@ -19,7 +18,7 @@ | |||
19 | void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | 18 | void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, |
20 | u8 *data, size_t data_len, u8 *mic) | 19 | u8 *data, size_t data_len, u8 *mic) |
21 | { | 20 | { |
22 | struct scatterlist assoc, pt, ct[2]; | 21 | struct scatterlist sg[3]; |
23 | 22 | ||
24 | char aead_req_data[sizeof(struct aead_request) + | 23 | char aead_req_data[sizeof(struct aead_request) + |
25 | crypto_aead_reqsize(tfm)] | 24 | crypto_aead_reqsize(tfm)] |
@@ -28,15 +27,14 @@ void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | |||
28 | 27 | ||
29 | memset(aead_req, 0, sizeof(aead_req_data)); | 28 | memset(aead_req, 0, sizeof(aead_req_data)); |
30 | 29 | ||
31 | sg_init_one(&pt, data, data_len); | 30 | sg_init_table(sg, 3); |
32 | sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); | 31 | sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); |
33 | sg_init_table(ct, 2); | 32 | sg_set_buf(&sg[1], data, data_len); |
34 | sg_set_buf(&ct[0], data, data_len); | 33 | sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); |
35 | sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN); | ||
36 | 34 | ||
37 | aead_request_set_tfm(aead_req, tfm); | 35 | aead_request_set_tfm(aead_req, tfm); |
38 | aead_request_set_assoc(aead_req, &assoc, assoc.length); | 36 | aead_request_set_crypt(aead_req, sg, sg, data_len, j_0); |
39 | aead_request_set_crypt(aead_req, &pt, ct, data_len, j_0); | 37 | aead_request_set_ad(aead_req, sg[0].length); |
40 | 38 | ||
41 | crypto_aead_encrypt(aead_req); | 39 | crypto_aead_encrypt(aead_req); |
42 | } | 40 | } |
@@ -44,7 +42,7 @@ void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | |||
44 | int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | 42 | int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, |
45 | u8 *data, size_t data_len, u8 *mic) | 43 | u8 *data, size_t data_len, u8 *mic) |
46 | { | 44 | { |
47 | struct scatterlist assoc, pt, ct[2]; | 45 | struct scatterlist sg[3]; |
48 | char aead_req_data[sizeof(struct aead_request) + | 46 | char aead_req_data[sizeof(struct aead_request) + |
49 | crypto_aead_reqsize(tfm)] | 47 | crypto_aead_reqsize(tfm)] |
50 | __aligned(__alignof__(struct aead_request)); | 48 | __aligned(__alignof__(struct aead_request)); |
@@ -55,16 +53,15 @@ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | |||
55 | 53 | ||
56 | memset(aead_req, 0, sizeof(aead_req_data)); | 54 | memset(aead_req, 0, sizeof(aead_req_data)); |
57 | 55 | ||
58 | sg_init_one(&pt, data, data_len); | 56 | sg_init_table(sg, 3); |
59 | sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); | 57 | sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); |
60 | sg_init_table(ct, 2); | 58 | sg_set_buf(&sg[1], data, data_len); |
61 | sg_set_buf(&ct[0], data, data_len); | 59 | sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); |
62 | sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN); | ||
63 | 60 | ||
64 | aead_request_set_tfm(aead_req, tfm); | 61 | aead_request_set_tfm(aead_req, tfm); |
65 | aead_request_set_assoc(aead_req, &assoc, assoc.length); | 62 | aead_request_set_crypt(aead_req, sg, sg, |
66 | aead_request_set_crypt(aead_req, ct, &pt, | ||
67 | data_len + IEEE80211_GCMP_MIC_LEN, j_0); | 63 | data_len + IEEE80211_GCMP_MIC_LEN, j_0); |
64 | aead_request_set_ad(aead_req, sg[0].length); | ||
68 | 65 | ||
69 | return crypto_aead_decrypt(aead_req); | 66 | return crypto_aead_decrypt(aead_req); |
70 | } | 67 | } |
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c index f1321b7d6506..3ddd927aaf30 100644 --- a/net/mac80211/aes_gmac.c +++ b/net/mac80211/aes_gmac.c | |||
@@ -9,8 +9,8 @@ | |||
9 | 9 | ||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/crypto.h> | ||
13 | #include <linux/err.h> | 12 | #include <linux/err.h> |
13 | #include <crypto/aead.h> | ||
14 | #include <crypto/aes.h> | 14 | #include <crypto/aes.h> |
15 | 15 | ||
16 | #include <net/mac80211.h> | 16 | #include <net/mac80211.h> |
@@ -24,7 +24,7 @@ | |||
24 | int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, | 24 | int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, |
25 | const u8 *data, size_t data_len, u8 *mic) | 25 | const u8 *data, size_t data_len, u8 *mic) |
26 | { | 26 | { |
27 | struct scatterlist sg[3], ct[1]; | 27 | struct scatterlist sg[4]; |
28 | char aead_req_data[sizeof(struct aead_request) + | 28 | char aead_req_data[sizeof(struct aead_request) + |
29 | crypto_aead_reqsize(tfm)] | 29 | crypto_aead_reqsize(tfm)] |
30 | __aligned(__alignof__(struct aead_request)); | 30 | __aligned(__alignof__(struct aead_request)); |
@@ -37,21 +37,19 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, | |||
37 | memset(aead_req, 0, sizeof(aead_req_data)); | 37 | memset(aead_req, 0, sizeof(aead_req_data)); |
38 | 38 | ||
39 | memset(zero, 0, GMAC_MIC_LEN); | 39 | memset(zero, 0, GMAC_MIC_LEN); |
40 | sg_init_table(sg, 3); | 40 | sg_init_table(sg, 4); |
41 | sg_set_buf(&sg[0], aad, AAD_LEN); | 41 | sg_set_buf(&sg[0], aad, AAD_LEN); |
42 | sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN); | 42 | sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN); |
43 | sg_set_buf(&sg[2], zero, GMAC_MIC_LEN); | 43 | sg_set_buf(&sg[2], zero, GMAC_MIC_LEN); |
44 | sg_set_buf(&sg[3], mic, GMAC_MIC_LEN); | ||
44 | 45 | ||
45 | memcpy(iv, nonce, GMAC_NONCE_LEN); | 46 | memcpy(iv, nonce, GMAC_NONCE_LEN); |
46 | memset(iv + GMAC_NONCE_LEN, 0, sizeof(iv) - GMAC_NONCE_LEN); | 47 | memset(iv + GMAC_NONCE_LEN, 0, sizeof(iv) - GMAC_NONCE_LEN); |
47 | iv[AES_BLOCK_SIZE - 1] = 0x01; | 48 | iv[AES_BLOCK_SIZE - 1] = 0x01; |
48 | 49 | ||
49 | sg_init_table(ct, 1); | ||
50 | sg_set_buf(&ct[0], mic, GMAC_MIC_LEN); | ||
51 | |||
52 | aead_request_set_tfm(aead_req, tfm); | 50 | aead_request_set_tfm(aead_req, tfm); |
53 | aead_request_set_assoc(aead_req, sg, AAD_LEN + data_len); | 51 | aead_request_set_crypt(aead_req, sg, sg, 0, iv); |
54 | aead_request_set_crypt(aead_req, NULL, ct, 0, iv); | 52 | aead_request_set_ad(aead_req, AAD_LEN + data_len); |
55 | 53 | ||
56 | crypto_aead_encrypt(aead_req); | 54 | crypto_aead_encrypt(aead_req); |
57 | 55 | ||
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index 5b2be12832e6..985e9394e2af 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c | |||
@@ -17,8 +17,9 @@ | |||
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/bug.h> | 18 | #include <linux/bug.h> |
19 | #include <linux/completion.h> | 19 | #include <linux/completion.h> |
20 | #include <linux/crypto.h> | ||
20 | #include <linux/ieee802154.h> | 21 | #include <linux/ieee802154.h> |
21 | #include <crypto/algapi.h> | 22 | #include <crypto/aead.h> |
22 | 23 | ||
23 | #include "ieee802154_i.h" | 24 | #include "ieee802154_i.h" |
24 | #include "llsec.h" | 25 | #include "llsec.h" |
@@ -649,7 +650,7 @@ llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec, | |||
649 | u8 iv[16]; | 650 | u8 iv[16]; |
650 | unsigned char *data; | 651 | unsigned char *data; |
651 | int authlen, assoclen, datalen, rc; | 652 | int authlen, assoclen, datalen, rc; |
652 | struct scatterlist src, assoc[2], dst[2]; | 653 | struct scatterlist sg; |
653 | struct aead_request *req; | 654 | struct aead_request *req; |
654 | 655 | ||
655 | authlen = ieee802154_sechdr_authtag_len(&hdr->sec); | 656 | authlen = ieee802154_sechdr_authtag_len(&hdr->sec); |
@@ -659,30 +660,23 @@ llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec, | |||
659 | if (!req) | 660 | if (!req) |
660 | return -ENOMEM; | 661 | return -ENOMEM; |
661 | 662 | ||
662 | sg_init_table(assoc, 2); | ||
663 | sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len); | ||
664 | assoclen = skb->mac_len; | 663 | assoclen = skb->mac_len; |
665 | 664 | ||
666 | data = skb_mac_header(skb) + skb->mac_len; | 665 | data = skb_mac_header(skb) + skb->mac_len; |
667 | datalen = skb_tail_pointer(skb) - data; | 666 | datalen = skb_tail_pointer(skb) - data; |
668 | 667 | ||
669 | if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) { | 668 | skb_put(skb, authlen); |
670 | sg_set_buf(&assoc[1], data, 0); | 669 | |
671 | } else { | 670 | sg_init_one(&sg, skb_mac_header(skb), assoclen + datalen + authlen); |
672 | sg_set_buf(&assoc[1], data, datalen); | 671 | |
672 | if (!(hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC)) { | ||
673 | assoclen += datalen; | 673 | assoclen += datalen; |
674 | datalen = 0; | 674 | datalen = 0; |
675 | } | 675 | } |
676 | 676 | ||
677 | sg_init_one(&src, data, datalen); | ||
678 | |||
679 | sg_init_table(dst, 2); | ||
680 | sg_set_buf(&dst[0], data, datalen); | ||
681 | sg_set_buf(&dst[1], skb_put(skb, authlen), authlen); | ||
682 | |||
683 | aead_request_set_callback(req, 0, NULL, NULL); | 677 | aead_request_set_callback(req, 0, NULL, NULL); |
684 | aead_request_set_assoc(req, assoc, assoclen); | 678 | aead_request_set_crypt(req, &sg, &sg, datalen, iv); |
685 | aead_request_set_crypt(req, &src, dst, datalen, iv); | 679 | aead_request_set_ad(req, assoclen); |
686 | 680 | ||
687 | rc = crypto_aead_encrypt(req); | 681 | rc = crypto_aead_encrypt(req); |
688 | 682 | ||
@@ -858,7 +852,7 @@ llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec, | |||
858 | u8 iv[16]; | 852 | u8 iv[16]; |
859 | unsigned char *data; | 853 | unsigned char *data; |
860 | int authlen, datalen, assoclen, rc; | 854 | int authlen, datalen, assoclen, rc; |
861 | struct scatterlist src, assoc[2]; | 855 | struct scatterlist sg; |
862 | struct aead_request *req; | 856 | struct aead_request *req; |
863 | 857 | ||
864 | authlen = ieee802154_sechdr_authtag_len(&hdr->sec); | 858 | authlen = ieee802154_sechdr_authtag_len(&hdr->sec); |
@@ -868,27 +862,21 @@ llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec, | |||
868 | if (!req) | 862 | if (!req) |
869 | return -ENOMEM; | 863 | return -ENOMEM; |
870 | 864 | ||
871 | sg_init_table(assoc, 2); | ||
872 | sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len); | ||
873 | assoclen = skb->mac_len; | 865 | assoclen = skb->mac_len; |
874 | 866 | ||
875 | data = skb_mac_header(skb) + skb->mac_len; | 867 | data = skb_mac_header(skb) + skb->mac_len; |
876 | datalen = skb_tail_pointer(skb) - data; | 868 | datalen = skb_tail_pointer(skb) - data; |
877 | 869 | ||
878 | if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) { | 870 | sg_init_one(&sg, skb_mac_header(skb), assoclen + datalen); |
879 | sg_set_buf(&assoc[1], data, 0); | 871 | |
880 | } else { | 872 | if (!(hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC)) { |
881 | sg_set_buf(&assoc[1], data, datalen - authlen); | ||
882 | assoclen += datalen - authlen; | 873 | assoclen += datalen - authlen; |
883 | data += datalen - authlen; | ||
884 | datalen = authlen; | 874 | datalen = authlen; |
885 | } | 875 | } |
886 | 876 | ||
887 | sg_init_one(&src, data, datalen); | ||
888 | |||
889 | aead_request_set_callback(req, 0, NULL, NULL); | 877 | aead_request_set_callback(req, 0, NULL, NULL); |
890 | aead_request_set_assoc(req, assoc, assoclen); | 878 | aead_request_set_crypt(req, &sg, &sg, datalen, iv); |
891 | aead_request_set_crypt(req, &src, &src, datalen, iv); | 879 | aead_request_set_ad(req, assoclen); |
892 | 880 | ||
893 | rc = crypto_aead_decrypt(req); | 881 | rc = crypto_aead_decrypt(req); |
894 | 882 | ||
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 12e82a5e4ad5..42f7c76cf853 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
@@ -31,6 +31,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
31 | 31 | ||
32 | .uinfo = { | 32 | .uinfo = { |
33 | .aead = { | 33 | .aead = { |
34 | .geniv = "seqniv", | ||
34 | .icv_truncbits = 64, | 35 | .icv_truncbits = 64, |
35 | } | 36 | } |
36 | }, | 37 | }, |
@@ -49,6 +50,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
49 | 50 | ||
50 | .uinfo = { | 51 | .uinfo = { |
51 | .aead = { | 52 | .aead = { |
53 | .geniv = "seqniv", | ||
52 | .icv_truncbits = 96, | 54 | .icv_truncbits = 96, |
53 | } | 55 | } |
54 | }, | 56 | }, |
@@ -67,6 +69,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
67 | 69 | ||
68 | .uinfo = { | 70 | .uinfo = { |
69 | .aead = { | 71 | .aead = { |
72 | .geniv = "seqniv", | ||
70 | .icv_truncbits = 128, | 73 | .icv_truncbits = 128, |
71 | } | 74 | } |
72 | }, | 75 | }, |
@@ -85,6 +88,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
85 | 88 | ||
86 | .uinfo = { | 89 | .uinfo = { |
87 | .aead = { | 90 | .aead = { |
91 | .geniv = "seqniv", | ||
88 | .icv_truncbits = 64, | 92 | .icv_truncbits = 64, |
89 | } | 93 | } |
90 | }, | 94 | }, |
@@ -103,6 +107,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
103 | 107 | ||
104 | .uinfo = { | 108 | .uinfo = { |
105 | .aead = { | 109 | .aead = { |
110 | .geniv = "seqniv", | ||
106 | .icv_truncbits = 96, | 111 | .icv_truncbits = 96, |
107 | } | 112 | } |
108 | }, | 113 | }, |
@@ -121,6 +126,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
121 | 126 | ||
122 | .uinfo = { | 127 | .uinfo = { |
123 | .aead = { | 128 | .aead = { |
129 | .geniv = "seqniv", | ||
124 | .icv_truncbits = 128, | 130 | .icv_truncbits = 128, |
125 | } | 131 | } |
126 | }, | 132 | }, |
@@ -139,6 +145,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
139 | 145 | ||
140 | .uinfo = { | 146 | .uinfo = { |
141 | .aead = { | 147 | .aead = { |
148 | .geniv = "seqiv", | ||
142 | .icv_truncbits = 128, | 149 | .icv_truncbits = 128, |
143 | } | 150 | } |
144 | }, | 151 | }, |
@@ -152,6 +159,18 @@ static struct xfrm_algo_desc aead_list[] = { | |||
152 | .sadb_alg_maxbits = 256 | 159 | .sadb_alg_maxbits = 256 |
153 | } | 160 | } |
154 | }, | 161 | }, |
162 | { | ||
163 | .name = "rfc7539esp(chacha20,poly1305)", | ||
164 | |||
165 | .uinfo = { | ||
166 | .aead = { | ||
167 | .geniv = "seqniv", | ||
168 | .icv_truncbits = 128, | ||
169 | } | ||
170 | }, | ||
171 | |||
172 | .pfkey_supported = 0, | ||
173 | }, | ||
155 | }; | 174 | }; |
156 | 175 | ||
157 | static struct xfrm_algo_desc aalg_list[] = { | 176 | static struct xfrm_algo_desc aalg_list[] = { |
@@ -353,6 +372,7 @@ static struct xfrm_algo_desc ealg_list[] = { | |||
353 | 372 | ||
354 | .uinfo = { | 373 | .uinfo = { |
355 | .encr = { | 374 | .encr = { |
375 | .geniv = "echainiv", | ||
356 | .blockbits = 64, | 376 | .blockbits = 64, |
357 | .defkeybits = 64, | 377 | .defkeybits = 64, |
358 | } | 378 | } |
@@ -373,6 +393,7 @@ static struct xfrm_algo_desc ealg_list[] = { | |||
373 | 393 | ||
374 | .uinfo = { | 394 | .uinfo = { |
375 | .encr = { | 395 | .encr = { |
396 | .geniv = "echainiv", | ||
376 | .blockbits = 64, | 397 | .blockbits = 64, |
377 | .defkeybits = 192, | 398 | .defkeybits = 192, |
378 | } | 399 | } |
@@ -393,6 +414,7 @@ static struct xfrm_algo_desc ealg_list[] = { | |||
393 | 414 | ||
394 | .uinfo = { | 415 | .uinfo = { |
395 | .encr = { | 416 | .encr = { |
417 | .geniv = "echainiv", | ||
396 | .blockbits = 64, | 418 | .blockbits = 64, |
397 | .defkeybits = 128, | 419 | .defkeybits = 128, |
398 | } | 420 | } |
@@ -413,6 +435,7 @@ static struct xfrm_algo_desc ealg_list[] = { | |||
413 | 435 | ||
414 | .uinfo = { | 436 | .uinfo = { |
415 | .encr = { | 437 | .encr = { |
438 | .geniv = "echainiv", | ||
416 | .blockbits = 64, | 439 | .blockbits = 64, |
417 | .defkeybits = 128, | 440 | .defkeybits = 128, |
418 | } | 441 | } |
@@ -433,6 +456,7 @@ static struct xfrm_algo_desc ealg_list[] = { | |||
433 | 456 | ||
434 | .uinfo = { | 457 | .uinfo = { |
435 | .encr = { | 458 | .encr = { |
459 | .geniv = "echainiv", | ||
436 | .blockbits = 128, | 460 | .blockbits = 128, |
437 | .defkeybits = 128, | 461 | .defkeybits = 128, |
438 | } | 462 | } |
@@ -453,6 +477,7 @@ static struct xfrm_algo_desc ealg_list[] = { | |||
453 | 477 | ||
454 | .uinfo = { | 478 | .uinfo = { |
455 | .encr = { | 479 | .encr = { |
480 | .geniv = "echainiv", | ||
456 | .blockbits = 128, | 481 | .blockbits = 128, |
457 | .defkeybits = 128, | 482 | .defkeybits = 128, |
458 | } | 483 | } |
@@ -473,6 +498,7 @@ static struct xfrm_algo_desc ealg_list[] = { | |||
473 | 498 | ||
474 | .uinfo = { | 499 | .uinfo = { |
475 | .encr = { | 500 | .encr = { |
501 | .geniv = "echainiv", | ||
476 | .blockbits = 128, | 502 | .blockbits = 128, |
477 | .defkeybits = 128, | 503 | .defkeybits = 128, |
478 | } | 504 | } |
@@ -493,6 +519,7 @@ static struct xfrm_algo_desc ealg_list[] = { | |||
493 | 519 | ||
494 | .uinfo = { | 520 | .uinfo = { |
495 | .encr = { | 521 | .encr = { |
522 | .geniv = "echainiv", | ||
496 | .blockbits = 128, | 523 | .blockbits = 128, |
497 | .defkeybits = 128, | 524 | .defkeybits = 128, |
498 | } | 525 | } |
@@ -512,6 +539,7 @@ static struct xfrm_algo_desc ealg_list[] = { | |||
512 | 539 | ||
513 | .uinfo = { | 540 | .uinfo = { |
514 | .encr = { | 541 | .encr = { |
542 | .geniv = "seqiv", | ||
515 | .blockbits = 128, | 543 | .blockbits = 128, |
516 | .defkeybits = 160, /* 128-bit key + 32-bit nonce */ | 544 | .defkeybits = 160, /* 128-bit key + 32-bit nonce */ |
517 | } | 545 | } |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 2091664295ba..bd16c6c7e1e7 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -289,6 +289,31 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, | |||
289 | return 0; | 289 | return 0; |
290 | } | 290 | } |
291 | 291 | ||
292 | static int attach_crypt(struct xfrm_state *x, struct nlattr *rta) | ||
293 | { | ||
294 | struct xfrm_algo *p, *ualg; | ||
295 | struct xfrm_algo_desc *algo; | ||
296 | |||
297 | if (!rta) | ||
298 | return 0; | ||
299 | |||
300 | ualg = nla_data(rta); | ||
301 | |||
302 | algo = xfrm_ealg_get_byname(ualg->alg_name, 1); | ||
303 | if (!algo) | ||
304 | return -ENOSYS; | ||
305 | x->props.ealgo = algo->desc.sadb_alg_id; | ||
306 | |||
307 | p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); | ||
308 | if (!p) | ||
309 | return -ENOMEM; | ||
310 | |||
311 | strcpy(p->alg_name, algo->name); | ||
312 | x->ealg = p; | ||
313 | x->geniv = algo->uinfo.encr.geniv; | ||
314 | return 0; | ||
315 | } | ||
316 | |||
292 | static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, | 317 | static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, |
293 | struct nlattr *rta) | 318 | struct nlattr *rta) |
294 | { | 319 | { |
@@ -349,8 +374,7 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, | |||
349 | return 0; | 374 | return 0; |
350 | } | 375 | } |
351 | 376 | ||
352 | static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props, | 377 | static int attach_aead(struct xfrm_state *x, struct nlattr *rta) |
353 | struct nlattr *rta) | ||
354 | { | 378 | { |
355 | struct xfrm_algo_aead *p, *ualg; | 379 | struct xfrm_algo_aead *p, *ualg; |
356 | struct xfrm_algo_desc *algo; | 380 | struct xfrm_algo_desc *algo; |
@@ -363,14 +387,15 @@ static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props, | |||
363 | algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); | 387 | algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); |
364 | if (!algo) | 388 | if (!algo) |
365 | return -ENOSYS; | 389 | return -ENOSYS; |
366 | *props = algo->desc.sadb_alg_id; | 390 | x->props.ealgo = algo->desc.sadb_alg_id; |
367 | 391 | ||
368 | p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); | 392 | p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); |
369 | if (!p) | 393 | if (!p) |
370 | return -ENOMEM; | 394 | return -ENOMEM; |
371 | 395 | ||
372 | strcpy(p->alg_name, algo->name); | 396 | strcpy(p->alg_name, algo->name); |
373 | *algpp = p; | 397 | x->aead = p; |
398 | x->geniv = algo->uinfo.aead.geniv; | ||
374 | return 0; | 399 | return 0; |
375 | } | 400 | } |
376 | 401 | ||
@@ -515,8 +540,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, | |||
515 | if (attrs[XFRMA_SA_EXTRA_FLAGS]) | 540 | if (attrs[XFRMA_SA_EXTRA_FLAGS]) |
516 | x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); | 541 | x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); |
517 | 542 | ||
518 | if ((err = attach_aead(&x->aead, &x->props.ealgo, | 543 | if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD]))) |
519 | attrs[XFRMA_ALG_AEAD]))) | ||
520 | goto error; | 544 | goto error; |
521 | if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, | 545 | if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, |
522 | attrs[XFRMA_ALG_AUTH_TRUNC]))) | 546 | attrs[XFRMA_ALG_AUTH_TRUNC]))) |
@@ -526,9 +550,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, | |||
526 | attrs[XFRMA_ALG_AUTH]))) | 550 | attrs[XFRMA_ALG_AUTH]))) |
527 | goto error; | 551 | goto error; |
528 | } | 552 | } |
529 | if ((err = attach_one_algo(&x->ealg, &x->props.ealgo, | 553 | if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT]))) |
530 | xfrm_ealg_get_byname, | ||
531 | attrs[XFRMA_ALG_CRYPT]))) | ||
532 | goto error; | 554 | goto error; |
533 | if ((err = attach_one_algo(&x->calg, &x->props.calgo, | 555 | if ((err = attach_one_algo(&x->calg, &x->props.calgo, |
534 | xfrm_calg_get_byname, | 556 | xfrm_calg_get_byname, |