diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-08-31 20:38:39 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-08-31 20:38:39 -0400 |
| commit | d4c90396ed7ef9b4e4d221e008e54be8bea8307f (patch) | |
| tree | 5611f1f27eec16edfeb6a3fd73a8ef7dbfd037b4 | |
| parent | f36fc04e4cdda9e4c72ee504e7dc638f9a168863 (diff) | |
| parent | bf433416e67597ba105ece55b3136557874945db (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Here is the crypto update for 4.3:
API:
- the AEAD interface transition is now complete.
- add top-level skcipher interface.
Drivers:
- x86-64 acceleration for chacha20/poly1305.
- add sunxi-ss Allwinner Security System crypto accelerator.
- add RSA algorithm to qat driver.
- add SRIOV support to qat driver.
- add LS1021A support to caam.
- add i.MX6 support to caam"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (163 commits)
crypto: algif_aead - fix for multiple operations on AF_ALG sockets
crypto: qat - enable legacy VFs
MPI: Fix mpi_read_buffer
crypto: qat - silence a static checker warning
crypto: vmx - Fixing opcode issue
crypto: caam - Use the preferred style for memory allocations
crypto: caam - Propagate the real error code in caam_probe
crypto: caam - Fix the error handling in caam_probe
crypto: caam - fix writing to JQCR_MS when using service interface
crypto: hash - Add AHASH_REQUEST_ON_STACK
crypto: testmgr - Use new skcipher interface
crypto: skcipher - Add top-level skcipher interface
crypto: cmac - allow usage in FIPS mode
crypto: sahara - Use dmam_alloc_coherent
crypto: caam - Add support for LS1021A
crypto: qat - Don't move data inside output buffer
crypto: vmx - Fixing GHASH Key issue on little endian
crypto: vmx - Fixing AES-CTR counter bug
crypto: null - Add missing Kconfig tristate for NULL2
crypto: nx - Add forward declaration for struct crypto_aead
...
154 files changed, 15624 insertions, 7328 deletions
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl index 0992531ffefb..07df23ea06e4 100644 --- a/Documentation/DocBook/crypto-API.tmpl +++ b/Documentation/DocBook/crypto-API.tmpl | |||
| @@ -585,7 +585,7 @@ kernel crypto API | IPSEC Layer | |||
| 585 | +-----------+ | | 585 | +-----------+ | |
| 586 | | | (1) | 586 | | | (1) |
| 587 | | aead | <----------------------------------- esp_output | 587 | | aead | <----------------------------------- esp_output |
| 588 | | (seqniv) | ---+ | 588 | | (seqiv) | ---+ |
| 589 | +-----------+ | | 589 | +-----------+ | |
| 590 | | (2) | 590 | | (2) |
| 591 | +-----------+ | | 591 | +-----------+ | |
| @@ -1101,7 +1101,7 @@ kernel crypto API | Caller | |||
| 1101 | </para> | 1101 | </para> |
| 1102 | 1102 | ||
| 1103 | <para> | 1103 | <para> |
| 1104 | [1] http://www.chronox.de/libkcapi.html | 1104 | [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink> |
| 1105 | </para> | 1105 | </para> |
| 1106 | 1106 | ||
| 1107 | </sect1> | 1107 | </sect1> |
| @@ -1661,7 +1661,7 @@ read(opfd, out, outlen); | |||
| 1661 | </para> | 1661 | </para> |
| 1662 | 1662 | ||
| 1663 | <para> | 1663 | <para> |
| 1664 | [1] http://www.chronox.de/libkcapi.html | 1664 | [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink> |
| 1665 | </para> | 1665 | </para> |
| 1666 | 1666 | ||
| 1667 | </sect1> | 1667 | </sect1> |
| @@ -1687,7 +1687,7 @@ read(opfd, out, outlen); | |||
| 1687 | !Pinclude/linux/crypto.h Block Cipher Algorithm Definitions | 1687 | !Pinclude/linux/crypto.h Block Cipher Algorithm Definitions |
| 1688 | !Finclude/linux/crypto.h crypto_alg | 1688 | !Finclude/linux/crypto.h crypto_alg |
| 1689 | !Finclude/linux/crypto.h ablkcipher_alg | 1689 | !Finclude/linux/crypto.h ablkcipher_alg |
| 1690 | !Finclude/linux/crypto.h aead_alg | 1690 | !Finclude/crypto/aead.h aead_alg |
| 1691 | !Finclude/linux/crypto.h blkcipher_alg | 1691 | !Finclude/linux/crypto.h blkcipher_alg |
| 1692 | !Finclude/linux/crypto.h cipher_alg | 1692 | !Finclude/linux/crypto.h cipher_alg |
| 1693 | !Finclude/crypto/rng.h rng_alg | 1693 | !Finclude/crypto/rng.h rng_alg |
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt index e4022776ac6e..100307304766 100644 --- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt +++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt | |||
| @@ -106,6 +106,18 @@ PROPERTIES | |||
| 106 | to the interrupt parent to which the child domain | 106 | to the interrupt parent to which the child domain |
| 107 | is being mapped. | 107 | is being mapped. |
| 108 | 108 | ||
| 109 | - clocks | ||
| 110 | Usage: required if SEC 4.0 requires explicit enablement of clocks | ||
| 111 | Value type: <prop_encoded-array> | ||
| 112 | Definition: A list of phandle and clock specifier pairs describing | ||
| 113 | the clocks required for enabling and disabling SEC 4.0. | ||
| 114 | |||
| 115 | - clock-names | ||
| 116 | Usage: required if SEC 4.0 requires explicit enablement of clocks | ||
| 117 | Value type: <string> | ||
| 118 | Definition: A list of clock name strings in the same order as the | ||
| 119 | clocks property. | ||
| 120 | |||
| 109 | Note: All other standard properties (see the ePAPR) are allowed | 121 | Note: All other standard properties (see the ePAPR) are allowed |
| 110 | but are optional. | 122 | but are optional. |
| 111 | 123 | ||
| @@ -120,6 +132,11 @@ EXAMPLE | |||
| 120 | ranges = <0 0x300000 0x10000>; | 132 | ranges = <0 0x300000 0x10000>; |
| 121 | interrupt-parent = <&mpic>; | 133 | interrupt-parent = <&mpic>; |
| 122 | interrupts = <92 2>; | 134 | interrupts = <92 2>; |
| 135 | clocks = <&clks IMX6QDL_CLK_CAAM_MEM>, | ||
| 136 | <&clks IMX6QDL_CLK_CAAM_ACLK>, | ||
| 137 | <&clks IMX6QDL_CLK_CAAM_IPG>, | ||
| 138 | <&clks IMX6QDL_CLK_EIM_SLOW>; | ||
| 139 | clock-names = "mem", "aclk", "ipg", "emi_slow"; | ||
| 123 | }; | 140 | }; |
| 124 | 141 | ||
| 125 | ===================================================================== | 142 | ===================================================================== |
diff --git a/Documentation/devicetree/bindings/crypto/sun4i-ss.txt b/Documentation/devicetree/bindings/crypto/sun4i-ss.txt new file mode 100644 index 000000000000..5d38e9b7033f --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/sun4i-ss.txt | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | * Allwinner Security System found on A20 SoC | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible : Should be "allwinner,sun4i-a10-crypto". | ||
| 5 | - reg: Should contain the Security System register location and length. | ||
| 6 | - interrupts: Should contain the IRQ line for the Security System. | ||
| 7 | - clocks : List of clock specifiers, corresponding to ahb and ss. | ||
| 8 | - clock-names : Name of the functional clock, should be | ||
| 9 | * "ahb" : AHB gating clock | ||
| 10 | * "mod" : SS controller clock | ||
| 11 | |||
| 12 | Optional properties: | ||
| 13 | - resets : phandle + reset specifier pair | ||
| 14 | - reset-names : must contain "ahb" | ||
| 15 | |||
| 16 | Example: | ||
| 17 | crypto: crypto-engine@01c15000 { | ||
| 18 | compatible = "allwinner,sun4i-a10-crypto"; | ||
| 19 | reg = <0x01c15000 0x1000>; | ||
| 20 | interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; | ||
| 21 | clocks = <&ahb_gates 5>, <&ss_clk>; | ||
| 22 | clock-names = "ahb", "mod"; | ||
| 23 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index cdcd9c5d3c0e..e16584a5091a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -556,6 +556,12 @@ S: Maintained | |||
| 556 | F: Documentation/i2c/busses/i2c-ali1563 | 556 | F: Documentation/i2c/busses/i2c-ali1563 |
| 557 | F: drivers/i2c/busses/i2c-ali1563.c | 557 | F: drivers/i2c/busses/i2c-ali1563.c |
| 558 | 558 | ||
| 559 | ALLWINNER SECURITY SYSTEM | ||
| 560 | M: Corentin Labbe <clabbe.montjoie@gmail.com> | ||
| 561 | L: linux-crypto@vger.kernel.org | ||
| 562 | S: Maintained | ||
| 563 | F: drivers/crypto/sunxi-ss/ | ||
| 564 | |||
| 559 | ALPHA PORT | 565 | ALPHA PORT |
| 560 | M: Richard Henderson <rth@twiddle.net> | 566 | M: Richard Henderson <rth@twiddle.net> |
| 561 | M: Ivan Kokshaysky <ink@jurassic.park.msu.ru> | 567 | M: Ivan Kokshaysky <ink@jurassic.park.msu.ru> |
| @@ -5078,9 +5084,21 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git | |||
| 5078 | S: Maintained | 5084 | S: Maintained |
| 5079 | F: arch/ia64/ | 5085 | F: arch/ia64/ |
| 5080 | 5086 | ||
| 5087 | IBM Power VMX Cryptographic instructions | ||
| 5088 | M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> | ||
| 5089 | M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com> | ||
| 5090 | L: linux-crypto@vger.kernel.org | ||
| 5091 | S: Supported | ||
| 5092 | F: drivers/crypto/vmx/Makefile | ||
| 5093 | F: drivers/crypto/vmx/Kconfig | ||
| 5094 | F: drivers/crypto/vmx/vmx.c | ||
| 5095 | F: drivers/crypto/vmx/aes* | ||
| 5096 | F: drivers/crypto/vmx/ghash* | ||
| 5097 | F: drivers/crypto/vmx/ppc-xlate.pl | ||
| 5098 | |||
| 5081 | IBM Power in-Nest Crypto Acceleration | 5099 | IBM Power in-Nest Crypto Acceleration |
| 5082 | M: Marcelo Henrique Cerri <mhcerri@linux.vnet.ibm.com> | 5100 | M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> |
| 5083 | M: Fionnuala Gunter <fin@linux.vnet.ibm.com> | 5101 | M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com> |
| 5084 | L: linux-crypto@vger.kernel.org | 5102 | L: linux-crypto@vger.kernel.org |
| 5085 | S: Supported | 5103 | S: Supported |
| 5086 | F: drivers/crypto/nx/Makefile | 5104 | F: drivers/crypto/nx/Makefile |
| @@ -5092,7 +5110,7 @@ F: drivers/crypto/nx/nx_csbcpb.h | |||
| 5092 | F: drivers/crypto/nx/nx_debugfs.h | 5110 | F: drivers/crypto/nx/nx_debugfs.h |
| 5093 | 5111 | ||
| 5094 | IBM Power 842 compression accelerator | 5112 | IBM Power 842 compression accelerator |
| 5095 | M: Dan Streetman <ddstreet@us.ibm.com> | 5113 | M: Dan Streetman <ddstreet@ieee.org> |
| 5096 | S: Supported | 5114 | S: Supported |
| 5097 | F: drivers/crypto/nx/Makefile | 5115 | F: drivers/crypto/nx/Makefile |
| 5098 | F: drivers/crypto/nx/Kconfig | 5116 | F: drivers/crypto/nx/Kconfig |
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index b57033e8c633..10d0b26c93f1 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi | |||
| @@ -836,10 +836,31 @@ | |||
| 836 | reg = <0x02100000 0x100000>; | 836 | reg = <0x02100000 0x100000>; |
| 837 | ranges; | 837 | ranges; |
| 838 | 838 | ||
| 839 | caam@02100000 { | 839 | crypto: caam@2100000 { |
| 840 | reg = <0x02100000 0x40000>; | 840 | compatible = "fsl,sec-v4.0"; |
| 841 | interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>, | 841 | fsl,sec-era = <4>; |
| 842 | <0 106 IRQ_TYPE_LEVEL_HIGH>; | 842 | #address-cells = <1>; |
| 843 | #size-cells = <1>; | ||
| 844 | reg = <0x2100000 0x10000>; | ||
| 845 | ranges = <0 0x2100000 0x10000>; | ||
| 846 | interrupt-parent = <&intc>; | ||
| 847 | clocks = <&clks IMX6QDL_CLK_CAAM_MEM>, | ||
| 848 | <&clks IMX6QDL_CLK_CAAM_ACLK>, | ||
| 849 | <&clks IMX6QDL_CLK_CAAM_IPG>, | ||
| 850 | <&clks IMX6QDL_CLK_EIM_SLOW>; | ||
| 851 | clock-names = "mem", "aclk", "ipg", "emi_slow"; | ||
| 852 | |||
| 853 | sec_jr0: jr0@1000 { | ||
| 854 | compatible = "fsl,sec-v4.0-job-ring"; | ||
| 855 | reg = <0x1000 0x1000>; | ||
| 856 | interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>; | ||
| 857 | }; | ||
| 858 | |||
| 859 | sec_jr1: jr1@2000 { | ||
| 860 | compatible = "fsl,sec-v4.0-job-ring"; | ||
| 861 | reg = <0x2000 0x1000>; | ||
| 862 | interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; | ||
| 863 | }; | ||
| 843 | }; | 864 | }; |
| 844 | 865 | ||
| 845 | aipstz@0217c000 { /* AIPSTZ2 */ | 866 | aipstz@0217c000 { /* AIPSTZ2 */ |
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 708175d59b9c..e6223d8e79af 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi | |||
| @@ -738,6 +738,33 @@ | |||
| 738 | reg = <0x02100000 0x100000>; | 738 | reg = <0x02100000 0x100000>; |
| 739 | ranges; | 739 | ranges; |
| 740 | 740 | ||
| 741 | crypto: caam@2100000 { | ||
| 742 | compatible = "fsl,sec-v4.0"; | ||
| 743 | fsl,sec-era = <4>; | ||
| 744 | #address-cells = <1>; | ||
| 745 | #size-cells = <1>; | ||
| 746 | reg = <0x2100000 0x10000>; | ||
| 747 | ranges = <0 0x2100000 0x10000>; | ||
| 748 | interrupt-parent = <&intc>; | ||
| 749 | clocks = <&clks IMX6SX_CLK_CAAM_MEM>, | ||
| 750 | <&clks IMX6SX_CLK_CAAM_ACLK>, | ||
| 751 | <&clks IMX6SX_CLK_CAAM_IPG>, | ||
| 752 | <&clks IMX6SX_CLK_EIM_SLOW>; | ||
| 753 | clock-names = "mem", "aclk", "ipg", "emi_slow"; | ||
| 754 | |||
| 755 | sec_jr0: jr0@1000 { | ||
| 756 | compatible = "fsl,sec-v4.0-job-ring"; | ||
| 757 | reg = <0x1000 0x1000>; | ||
| 758 | interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>; | ||
| 759 | }; | ||
| 760 | |||
| 761 | sec_jr1: jr1@2000 { | ||
| 762 | compatible = "fsl,sec-v4.0-job-ring"; | ||
| 763 | reg = <0x2000 0x1000>; | ||
| 764 | interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; | ||
| 765 | }; | ||
| 766 | }; | ||
| 767 | |||
| 741 | usbotg1: usb@02184000 { | 768 | usbotg1: usb@02184000 { |
| 742 | compatible = "fsl,imx6sx-usb", "fsl,imx27-usb"; | 769 | compatible = "fsl,imx6sx-usb", "fsl,imx27-usb"; |
| 743 | reg = <0x02184000 0x200>; | 770 | reg = <0x02184000 0x200>; |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index ab0e131587bb..adaa57b7a943 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
| @@ -678,6 +678,14 @@ | |||
| 678 | status = "disabled"; | 678 | status = "disabled"; |
| 679 | }; | 679 | }; |
| 680 | 680 | ||
| 681 | crypto: crypto-engine@01c15000 { | ||
| 682 | compatible = "allwinner,sun4i-a10-crypto"; | ||
| 683 | reg = <0x01c15000 0x1000>; | ||
| 684 | interrupts = <86>; | ||
| 685 | clocks = <&ahb_gates 5>, <&ss_clk>; | ||
| 686 | clock-names = "ahb", "mod"; | ||
| 687 | }; | ||
| 688 | |||
| 681 | spi2: spi@01c17000 { | 689 | spi2: spi@01c17000 { |
| 682 | compatible = "allwinner,sun4i-a10-spi"; | 690 | compatible = "allwinner,sun4i-a10-spi"; |
| 683 | reg = <0x01c17000 0x1000>; | 691 | reg = <0x01c17000 0x1000>; |
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 3ec456fa03a4..e4d3484d97bd 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi | |||
| @@ -367,6 +367,14 @@ | |||
| 367 | "mmc3_sample"; | 367 | "mmc3_sample"; |
| 368 | }; | 368 | }; |
| 369 | 369 | ||
| 370 | ss_clk: clk@01c2009c { | ||
| 371 | #clock-cells = <0>; | ||
| 372 | compatible = "allwinner,sun4i-a10-mod0-clk"; | ||
| 373 | reg = <0x01c2009c 0x4>; | ||
| 374 | clocks = <&osc24M>, <&pll6 0>; | ||
| 375 | clock-output-names = "ss"; | ||
| 376 | }; | ||
| 377 | |||
| 370 | spi0_clk: clk@01c200a0 { | 378 | spi0_clk: clk@01c200a0 { |
| 371 | #clock-cells = <0>; | 379 | #clock-cells = <0>; |
| 372 | compatible = "allwinner,sun4i-a10-mod0-clk"; | 380 | compatible = "allwinner,sun4i-a10-mod0-clk"; |
| @@ -894,6 +902,16 @@ | |||
| 894 | #size-cells = <0>; | 902 | #size-cells = <0>; |
| 895 | }; | 903 | }; |
| 896 | 904 | ||
| 905 | crypto: crypto-engine@01c15000 { | ||
| 906 | compatible = "allwinner,sun4i-a10-crypto"; | ||
| 907 | reg = <0x01c15000 0x1000>; | ||
| 908 | interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>; | ||
| 909 | clocks = <&ahb1_gates 5>, <&ss_clk>; | ||
| 910 | clock-names = "ahb", "mod"; | ||
| 911 | resets = <&ahb1_rst 5>; | ||
| 912 | reset-names = "ahb"; | ||
| 913 | }; | ||
| 914 | |||
| 897 | timer@01c60000 { | 915 | timer@01c60000 { |
| 898 | compatible = "allwinner,sun6i-a31-hstimer", | 916 | compatible = "allwinner,sun6i-a31-hstimer", |
| 899 | "allwinner,sun7i-a20-hstimer"; | 917 | "allwinner,sun7i-a20-hstimer"; |
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index ca0b01a96c52..d3b2f26417aa 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
| @@ -754,6 +754,14 @@ | |||
| 754 | status = "disabled"; | 754 | status = "disabled"; |
| 755 | }; | 755 | }; |
| 756 | 756 | ||
| 757 | crypto: crypto-engine@01c15000 { | ||
| 758 | compatible = "allwinner,sun4i-a10-crypto"; | ||
| 759 | reg = <0x01c15000 0x1000>; | ||
| 760 | interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; | ||
| 761 | clocks = <&ahb_gates 5>, <&ss_clk>; | ||
| 762 | clock-names = "ahb", "mod"; | ||
| 763 | }; | ||
| 764 | |||
| 757 | spi2: spi@01c17000 { | 765 | spi2: spi@01c17000 { |
| 758 | compatible = "allwinner,sun4i-a10-spi"; | 766 | compatible = "allwinner,sun4i-a10-spi"; |
| 759 | reg = <0x01c17000 0x1000>; | 767 | reg = <0x01c17000 0x1000>; |
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index b47863d49ac6..7569b391704e 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig | |||
| @@ -354,8 +354,7 @@ CONFIG_PROVE_LOCKING=y | |||
| 354 | # CONFIG_FTRACE is not set | 354 | # CONFIG_FTRACE is not set |
| 355 | # CONFIG_ARM_UNWIND is not set | 355 | # CONFIG_ARM_UNWIND is not set |
| 356 | CONFIG_SECURITYFS=y | 356 | CONFIG_SECURITYFS=y |
| 357 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 357 | CONFIG_CRYPTO_DEV_FSL_CAAM=y |
| 358 | # CONFIG_CRYPTO_HW is not set | ||
| 359 | CONFIG_CRC_CCITT=m | 358 | CONFIG_CRC_CCITT=m |
| 360 | CONFIG_CRC_T10DIF=y | 359 | CONFIG_CRC_T10DIF=y |
| 361 | CONFIG_CRC7=m | 360 | CONFIG_CRC7=m |
diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore index 6231d36b3635..31e1f538df7d 100644 --- a/arch/arm/crypto/.gitignore +++ b/arch/arm/crypto/.gitignore | |||
| @@ -1 +1,3 @@ | |||
| 1 | aesbs-core.S | 1 | aesbs-core.S |
| 2 | sha256-core.S | ||
| 3 | sha512-core.S | ||
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 3303e8a7b837..f4bf2f2a014c 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c | |||
| @@ -124,7 +124,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) | |||
| 124 | 124 | ||
| 125 | ce_aes_ccm_auth_data(mac, (u8 *)<ag, ltag.len, &macp, ctx->key_enc, | 125 | ce_aes_ccm_auth_data(mac, (u8 *)<ag, ltag.len, &macp, ctx->key_enc, |
| 126 | num_rounds(ctx)); | 126 | num_rounds(ctx)); |
| 127 | scatterwalk_start(&walk, req->assoc); | 127 | scatterwalk_start(&walk, req->src); |
| 128 | 128 | ||
| 129 | do { | 129 | do { |
| 130 | u32 n = scatterwalk_clamp(&walk, len); | 130 | u32 n = scatterwalk_clamp(&walk, len); |
| @@ -151,6 +151,10 @@ static int ccm_encrypt(struct aead_request *req) | |||
| 151 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); | 151 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); |
| 152 | struct blkcipher_desc desc = { .info = req->iv }; | 152 | struct blkcipher_desc desc = { .info = req->iv }; |
| 153 | struct blkcipher_walk walk; | 153 | struct blkcipher_walk walk; |
| 154 | struct scatterlist srcbuf[2]; | ||
| 155 | struct scatterlist dstbuf[2]; | ||
| 156 | struct scatterlist *src; | ||
| 157 | struct scatterlist *dst; | ||
| 154 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; | 158 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; |
| 155 | u8 buf[AES_BLOCK_SIZE]; | 159 | u8 buf[AES_BLOCK_SIZE]; |
| 156 | u32 len = req->cryptlen; | 160 | u32 len = req->cryptlen; |
| @@ -168,7 +172,12 @@ static int ccm_encrypt(struct aead_request *req) | |||
| 168 | /* preserve the original iv for the final round */ | 172 | /* preserve the original iv for the final round */ |
| 169 | memcpy(buf, req->iv, AES_BLOCK_SIZE); | 173 | memcpy(buf, req->iv, AES_BLOCK_SIZE); |
| 170 | 174 | ||
| 171 | blkcipher_walk_init(&walk, req->dst, req->src, len); | 175 | src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen); |
| 176 | dst = src; | ||
| 177 | if (req->src != req->dst) | ||
| 178 | dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen); | ||
| 179 | |||
| 180 | blkcipher_walk_init(&walk, dst, src, len); | ||
| 172 | err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, | 181 | err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, |
| 173 | AES_BLOCK_SIZE); | 182 | AES_BLOCK_SIZE); |
| 174 | 183 | ||
| @@ -194,7 +203,7 @@ static int ccm_encrypt(struct aead_request *req) | |||
| 194 | return err; | 203 | return err; |
| 195 | 204 | ||
| 196 | /* copy authtag to end of dst */ | 205 | /* copy authtag to end of dst */ |
| 197 | scatterwalk_map_and_copy(mac, req->dst, req->cryptlen, | 206 | scatterwalk_map_and_copy(mac, dst, req->cryptlen, |
| 198 | crypto_aead_authsize(aead), 1); | 207 | crypto_aead_authsize(aead), 1); |
| 199 | 208 | ||
| 200 | return 0; | 209 | return 0; |
| @@ -207,6 +216,10 @@ static int ccm_decrypt(struct aead_request *req) | |||
| 207 | unsigned int authsize = crypto_aead_authsize(aead); | 216 | unsigned int authsize = crypto_aead_authsize(aead); |
| 208 | struct blkcipher_desc desc = { .info = req->iv }; | 217 | struct blkcipher_desc desc = { .info = req->iv }; |
| 209 | struct blkcipher_walk walk; | 218 | struct blkcipher_walk walk; |
| 219 | struct scatterlist srcbuf[2]; | ||
| 220 | struct scatterlist dstbuf[2]; | ||
| 221 | struct scatterlist *src; | ||
| 222 | struct scatterlist *dst; | ||
| 210 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; | 223 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; |
| 211 | u8 buf[AES_BLOCK_SIZE]; | 224 | u8 buf[AES_BLOCK_SIZE]; |
| 212 | u32 len = req->cryptlen - authsize; | 225 | u32 len = req->cryptlen - authsize; |
| @@ -224,7 +237,12 @@ static int ccm_decrypt(struct aead_request *req) | |||
| 224 | /* preserve the original iv for the final round */ | 237 | /* preserve the original iv for the final round */ |
| 225 | memcpy(buf, req->iv, AES_BLOCK_SIZE); | 238 | memcpy(buf, req->iv, AES_BLOCK_SIZE); |
| 226 | 239 | ||
| 227 | blkcipher_walk_init(&walk, req->dst, req->src, len); | 240 | src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen); |
| 241 | dst = src; | ||
| 242 | if (req->src != req->dst) | ||
| 243 | dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen); | ||
| 244 | |||
| 245 | blkcipher_walk_init(&walk, dst, src, len); | ||
| 228 | err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, | 246 | err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, |
| 229 | AES_BLOCK_SIZE); | 247 | AES_BLOCK_SIZE); |
| 230 | 248 | ||
| @@ -250,44 +268,42 @@ static int ccm_decrypt(struct aead_request *req) | |||
| 250 | return err; | 268 | return err; |
| 251 | 269 | ||
| 252 | /* compare calculated auth tag with the stored one */ | 270 | /* compare calculated auth tag with the stored one */ |
| 253 | scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize, | 271 | scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize, |
| 254 | authsize, 0); | 272 | authsize, 0); |
| 255 | 273 | ||
| 256 | if (memcmp(mac, buf, authsize)) | 274 | if (crypto_memneq(mac, buf, authsize)) |
| 257 | return -EBADMSG; | 275 | return -EBADMSG; |
| 258 | return 0; | 276 | return 0; |
| 259 | } | 277 | } |
| 260 | 278 | ||
| 261 | static struct crypto_alg ccm_aes_alg = { | 279 | static struct aead_alg ccm_aes_alg = { |
| 262 | .cra_name = "ccm(aes)", | 280 | .base = { |
| 263 | .cra_driver_name = "ccm-aes-ce", | 281 | .cra_name = "ccm(aes)", |
| 264 | .cra_priority = 300, | 282 | .cra_driver_name = "ccm-aes-ce", |
| 265 | .cra_flags = CRYPTO_ALG_TYPE_AEAD, | 283 | .cra_priority = 300, |
| 266 | .cra_blocksize = 1, | 284 | .cra_blocksize = 1, |
| 267 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | 285 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
| 268 | .cra_alignmask = 7, | 286 | .cra_alignmask = 7, |
| 269 | .cra_type = &crypto_aead_type, | 287 | .cra_module = THIS_MODULE, |
| 270 | .cra_module = THIS_MODULE, | 288 | }, |
| 271 | .cra_aead = { | 289 | .ivsize = AES_BLOCK_SIZE, |
| 272 | .ivsize = AES_BLOCK_SIZE, | 290 | .maxauthsize = AES_BLOCK_SIZE, |
| 273 | .maxauthsize = AES_BLOCK_SIZE, | 291 | .setkey = ccm_setkey, |
| 274 | .setkey = ccm_setkey, | 292 | .setauthsize = ccm_setauthsize, |
| 275 | .setauthsize = ccm_setauthsize, | 293 | .encrypt = ccm_encrypt, |
| 276 | .encrypt = ccm_encrypt, | 294 | .decrypt = ccm_decrypt, |
| 277 | .decrypt = ccm_decrypt, | ||
| 278 | } | ||
| 279 | }; | 295 | }; |
| 280 | 296 | ||
| 281 | static int __init aes_mod_init(void) | 297 | static int __init aes_mod_init(void) |
| 282 | { | 298 | { |
| 283 | if (!(elf_hwcap & HWCAP_AES)) | 299 | if (!(elf_hwcap & HWCAP_AES)) |
| 284 | return -ENODEV; | 300 | return -ENODEV; |
| 285 | return crypto_register_alg(&ccm_aes_alg); | 301 | return crypto_register_aead(&ccm_aes_alg); |
| 286 | } | 302 | } |
| 287 | 303 | ||
| 288 | static void __exit aes_mod_exit(void) | 304 | static void __exit aes_mod_exit(void) |
| 289 | { | 305 | { |
| 290 | crypto_unregister_alg(&ccm_aes_alg); | 306 | crypto_unregister_aead(&ccm_aes_alg); |
| 291 | } | 307 | } |
| 292 | 308 | ||
| 293 | module_init(aes_mod_init); | 309 | module_init(aes_mod_init); |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 58abeda64cb7..15cca17cba4b 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
| @@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {} | |||
| 29 | 29 | ||
| 30 | extern void enable_kernel_fp(void); | 30 | extern void enable_kernel_fp(void); |
| 31 | extern void enable_kernel_altivec(void); | 31 | extern void enable_kernel_altivec(void); |
| 32 | extern void enable_kernel_vsx(void); | ||
| 32 | extern int emulate_altivec(struct pt_regs *); | 33 | extern int emulate_altivec(struct pt_regs *); |
| 33 | extern void __giveup_vsx(struct task_struct *); | 34 | extern void __giveup_vsx(struct task_struct *); |
| 34 | extern void giveup_vsx(struct task_struct *); | 35 | extern void giveup_vsx(struct task_struct *); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8005e18d1b40..64e6e9d9e656 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); | |||
| 204 | #endif /* CONFIG_ALTIVEC */ | 204 | #endif /* CONFIG_ALTIVEC */ |
| 205 | 205 | ||
| 206 | #ifdef CONFIG_VSX | 206 | #ifdef CONFIG_VSX |
| 207 | #if 0 | ||
| 208 | /* not currently used, but some crazy RAID module might want to later */ | ||
| 209 | void enable_kernel_vsx(void) | 207 | void enable_kernel_vsx(void) |
| 210 | { | 208 | { |
| 211 | WARN_ON(preemptible()); | 209 | WARN_ON(preemptible()); |
| @@ -220,7 +218,6 @@ void enable_kernel_vsx(void) | |||
| 220 | #endif /* CONFIG_SMP */ | 218 | #endif /* CONFIG_SMP */ |
| 221 | } | 219 | } |
| 222 | EXPORT_SYMBOL(enable_kernel_vsx); | 220 | EXPORT_SYMBOL(enable_kernel_vsx); |
| 223 | #endif | ||
| 224 | 221 | ||
| 225 | void giveup_vsx(struct task_struct *tsk) | 222 | void giveup_vsx(struct task_struct *tsk) |
| 226 | { | 223 | { |
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 5a4a089e8b1f..9a2838cf0591 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile | |||
| @@ -20,6 +20,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o | |||
| 20 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o | 20 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o |
| 21 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o | 21 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o |
| 22 | obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o | 22 | obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o |
| 23 | obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o | ||
| 23 | obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o | 24 | obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o |
| 24 | obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o | 25 | obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o |
| 25 | obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o | 26 | obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o |
| @@ -30,6 +31,7 @@ obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o | |||
| 30 | obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o | 31 | obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o |
| 31 | obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o | 32 | obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o |
| 32 | obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o | 33 | obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o |
| 34 | obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o | ||
| 33 | 35 | ||
| 34 | # These modules require assembler to support AVX. | 36 | # These modules require assembler to support AVX. |
| 35 | ifeq ($(avx_supported),yes) | 37 | ifeq ($(avx_supported),yes) |
| @@ -60,6 +62,7 @@ blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o | |||
| 60 | twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o | 62 | twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o |
| 61 | twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o | 63 | twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o |
| 62 | salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o | 64 | salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o |
| 65 | chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o | ||
| 63 | serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o | 66 | serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o |
| 64 | 67 | ||
| 65 | ifeq ($(avx_supported),yes) | 68 | ifeq ($(avx_supported),yes) |
| @@ -75,6 +78,7 @@ endif | |||
| 75 | 78 | ||
| 76 | ifeq ($(avx2_supported),yes) | 79 | ifeq ($(avx2_supported),yes) |
| 77 | camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o | 80 | camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o |
| 81 | chacha20-x86_64-y += chacha20-avx2-x86_64.o | ||
| 78 | serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o | 82 | serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o |
| 79 | endif | 83 | endif |
| 80 | 84 | ||
| @@ -82,8 +86,10 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o | |||
| 82 | aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o | 86 | aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o |
| 83 | ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o | 87 | ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o |
| 84 | sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o | 88 | sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o |
| 89 | poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o | ||
| 85 | ifeq ($(avx2_supported),yes) | 90 | ifeq ($(avx2_supported),yes) |
| 86 | sha1-ssse3-y += sha1_avx2_x86_64_asm.o | 91 | sha1-ssse3-y += sha1_avx2_x86_64_asm.o |
| 92 | poly1305-x86_64-y += poly1305-avx2-x86_64.o | ||
| 87 | endif | 93 | endif |
| 88 | crc32c-intel-y := crc32c-intel_glue.o | 94 | crc32c-intel-y := crc32c-intel_glue.o |
| 89 | crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o | 95 | crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index dccad38b59a8..3633ad6145c5 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
| @@ -803,10 +803,7 @@ static int rfc4106_init(struct crypto_aead *aead) | |||
| 803 | return PTR_ERR(cryptd_tfm); | 803 | return PTR_ERR(cryptd_tfm); |
| 804 | 804 | ||
| 805 | *ctx = cryptd_tfm; | 805 | *ctx = cryptd_tfm; |
| 806 | crypto_aead_set_reqsize( | 806 | crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); |
| 807 | aead, | ||
| 808 | sizeof(struct aead_request) + | ||
| 809 | crypto_aead_reqsize(&cryptd_tfm->base)); | ||
| 810 | return 0; | 807 | return 0; |
| 811 | } | 808 | } |
| 812 | 809 | ||
| @@ -955,8 +952,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req) | |||
| 955 | 952 | ||
| 956 | /* Assuming we are supporting rfc4106 64-bit extended */ | 953 | /* Assuming we are supporting rfc4106 64-bit extended */ |
| 957 | /* sequence numbers We need to have the AAD length equal */ | 954 | /* sequence numbers We need to have the AAD length equal */ |
| 958 | /* to 8 or 12 bytes */ | 955 | /* to 16 or 20 bytes */ |
| 959 | if (unlikely(req->assoclen != 8 && req->assoclen != 12)) | 956 | if (unlikely(req->assoclen != 16 && req->assoclen != 20)) |
| 960 | return -EINVAL; | 957 | return -EINVAL; |
| 961 | 958 | ||
| 962 | /* IV below built */ | 959 | /* IV below built */ |
| @@ -992,9 +989,9 @@ static int helper_rfc4106_encrypt(struct aead_request *req) | |||
| 992 | } | 989 | } |
| 993 | 990 | ||
| 994 | kernel_fpu_begin(); | 991 | kernel_fpu_begin(); |
| 995 | aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv, | 992 | aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv, |
| 996 | ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst | 993 | ctx->hash_subkey, assoc, req->assoclen - 8, |
| 997 | + ((unsigned long)req->cryptlen), auth_tag_len); | 994 | dst + req->cryptlen, auth_tag_len); |
| 998 | kernel_fpu_end(); | 995 | kernel_fpu_end(); |
| 999 | 996 | ||
| 1000 | /* The authTag (aka the Integrity Check Value) needs to be written | 997 | /* The authTag (aka the Integrity Check Value) needs to be written |
| @@ -1033,12 +1030,12 @@ static int helper_rfc4106_decrypt(struct aead_request *req) | |||
| 1033 | struct scatter_walk dst_sg_walk; | 1030 | struct scatter_walk dst_sg_walk; |
| 1034 | unsigned int i; | 1031 | unsigned int i; |
| 1035 | 1032 | ||
| 1036 | if (unlikely(req->assoclen != 8 && req->assoclen != 12)) | 1033 | if (unlikely(req->assoclen != 16 && req->assoclen != 20)) |
| 1037 | return -EINVAL; | 1034 | return -EINVAL; |
| 1038 | 1035 | ||
| 1039 | /* Assuming we are supporting rfc4106 64-bit extended */ | 1036 | /* Assuming we are supporting rfc4106 64-bit extended */ |
| 1040 | /* sequence numbers We need to have the AAD length */ | 1037 | /* sequence numbers We need to have the AAD length */ |
| 1041 | /* equal to 8 or 12 bytes */ | 1038 | /* equal to 16 or 20 bytes */ |
| 1042 | 1039 | ||
| 1043 | tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); | 1040 | tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); |
| 1044 | /* IV below built */ | 1041 | /* IV below built */ |
| @@ -1075,8 +1072,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req) | |||
| 1075 | 1072 | ||
| 1076 | kernel_fpu_begin(); | 1073 | kernel_fpu_begin(); |
| 1077 | aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, | 1074 | aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, |
| 1078 | ctx->hash_subkey, assoc, (unsigned long)req->assoclen, | 1075 | ctx->hash_subkey, assoc, req->assoclen - 8, |
| 1079 | authTag, auth_tag_len); | 1076 | authTag, auth_tag_len); |
| 1080 | kernel_fpu_end(); | 1077 | kernel_fpu_end(); |
| 1081 | 1078 | ||
| 1082 | /* Compare generated tag with passed in tag. */ | 1079 | /* Compare generated tag with passed in tag. */ |
| @@ -1105,19 +1102,12 @@ static int rfc4106_encrypt(struct aead_request *req) | |||
| 1105 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 1102 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 1106 | struct cryptd_aead **ctx = crypto_aead_ctx(tfm); | 1103 | struct cryptd_aead **ctx = crypto_aead_ctx(tfm); |
| 1107 | struct cryptd_aead *cryptd_tfm = *ctx; | 1104 | struct cryptd_aead *cryptd_tfm = *ctx; |
| 1108 | struct aead_request *subreq = aead_request_ctx(req); | ||
| 1109 | 1105 | ||
| 1110 | aead_request_set_tfm(subreq, irq_fpu_usable() ? | 1106 | aead_request_set_tfm(req, irq_fpu_usable() ? |
| 1111 | cryptd_aead_child(cryptd_tfm) : | 1107 | cryptd_aead_child(cryptd_tfm) : |
| 1112 | &cryptd_tfm->base); | 1108 | &cryptd_tfm->base); |
| 1113 | 1109 | ||
| 1114 | aead_request_set_callback(subreq, req->base.flags, | 1110 | return crypto_aead_encrypt(req); |
| 1115 | req->base.complete, req->base.data); | ||
| 1116 | aead_request_set_crypt(subreq, req->src, req->dst, | ||
| 1117 | req->cryptlen, req->iv); | ||
| 1118 | aead_request_set_ad(subreq, req->assoclen); | ||
| 1119 | |||
| 1120 | return crypto_aead_encrypt(subreq); | ||
| 1121 | } | 1111 | } |
| 1122 | 1112 | ||
| 1123 | static int rfc4106_decrypt(struct aead_request *req) | 1113 | static int rfc4106_decrypt(struct aead_request *req) |
| @@ -1125,19 +1115,12 @@ static int rfc4106_decrypt(struct aead_request *req) | |||
| 1125 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 1115 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 1126 | struct cryptd_aead **ctx = crypto_aead_ctx(tfm); | 1116 | struct cryptd_aead **ctx = crypto_aead_ctx(tfm); |
| 1127 | struct cryptd_aead *cryptd_tfm = *ctx; | 1117 | struct cryptd_aead *cryptd_tfm = *ctx; |
| 1128 | struct aead_request *subreq = aead_request_ctx(req); | ||
| 1129 | |||
| 1130 | aead_request_set_tfm(subreq, irq_fpu_usable() ? | ||
| 1131 | cryptd_aead_child(cryptd_tfm) : | ||
| 1132 | &cryptd_tfm->base); | ||
| 1133 | 1118 | ||
| 1134 | aead_request_set_callback(subreq, req->base.flags, | 1119 | aead_request_set_tfm(req, irq_fpu_usable() ? |
| 1135 | req->base.complete, req->base.data); | 1120 | cryptd_aead_child(cryptd_tfm) : |
| 1136 | aead_request_set_crypt(subreq, req->src, req->dst, | 1121 | &cryptd_tfm->base); |
| 1137 | req->cryptlen, req->iv); | ||
| 1138 | aead_request_set_ad(subreq, req->assoclen); | ||
| 1139 | 1122 | ||
| 1140 | return crypto_aead_decrypt(subreq); | 1123 | return crypto_aead_decrypt(req); |
| 1141 | } | 1124 | } |
| 1142 | #endif | 1125 | #endif |
| 1143 | 1126 | ||
diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S new file mode 100644 index 000000000000..16694e625f77 --- /dev/null +++ b/arch/x86/crypto/chacha20-avx2-x86_64.S | |||
| @@ -0,0 +1,443 @@ | |||
| 1 | /* | ||
| 2 | * ChaCha20 256-bit cipher algorithm, RFC7539, x64 AVX2 functions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Martin Willi | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/linkage.h> | ||
| 13 | |||
| 14 | .data | ||
| 15 | .align 32 | ||
| 16 | |||
| 17 | ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 | ||
| 18 | .octa 0x0e0d0c0f0a09080b0605040702010003 | ||
| 19 | ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 | ||
| 20 | .octa 0x0d0c0f0e09080b0a0504070601000302 | ||
| 21 | CTRINC: .octa 0x00000003000000020000000100000000 | ||
| 22 | .octa 0x00000007000000060000000500000004 | ||
| 23 | |||
| 24 | .text | ||
| 25 | |||
| 26 | ENTRY(chacha20_8block_xor_avx2) | ||
| 27 | # %rdi: Input state matrix, s | ||
| 28 | # %rsi: 8 data blocks output, o | ||
| 29 | # %rdx: 8 data blocks input, i | ||
| 30 | |||
| 31 | # This function encrypts eight consecutive ChaCha20 blocks by loading | ||
| 32 | # the state matrix in AVX registers eight times. As we need some | ||
| 33 | # scratch registers, we save the first four registers on the stack. The | ||
| 34 | # algorithm performs each operation on the corresponding word of each | ||
| 35 | # state matrix, hence requires no word shuffling. For final XORing step | ||
| 36 | # we transpose the matrix by interleaving 32-, 64- and then 128-bit | ||
| 37 | # words, which allows us to do XOR in AVX registers. 8/16-bit word | ||
| 38 | # rotation is done with the slightly better performing byte shuffling, | ||
| 39 | # 7/12-bit word rotation uses traditional shift+OR. | ||
| 40 | |||
| 41 | vzeroupper | ||
| 42 | # 4 * 32 byte stack, 32-byte aligned | ||
| 43 | mov %rsp, %r8 | ||
| 44 | and $~31, %rsp | ||
| 45 | sub $0x80, %rsp | ||
| 46 | |||
| 47 | # x0..15[0-7] = s[0..15] | ||
| 48 | vpbroadcastd 0x00(%rdi),%ymm0 | ||
| 49 | vpbroadcastd 0x04(%rdi),%ymm1 | ||
| 50 | vpbroadcastd 0x08(%rdi),%ymm2 | ||
| 51 | vpbroadcastd 0x0c(%rdi),%ymm3 | ||
| 52 | vpbroadcastd 0x10(%rdi),%ymm4 | ||
| 53 | vpbroadcastd 0x14(%rdi),%ymm5 | ||
| 54 | vpbroadcastd 0x18(%rdi),%ymm6 | ||
| 55 | vpbroadcastd 0x1c(%rdi),%ymm7 | ||
| 56 | vpbroadcastd 0x20(%rdi),%ymm8 | ||
| 57 | vpbroadcastd 0x24(%rdi),%ymm9 | ||
| 58 | vpbroadcastd 0x28(%rdi),%ymm10 | ||
| 59 | vpbroadcastd 0x2c(%rdi),%ymm11 | ||
| 60 | vpbroadcastd 0x30(%rdi),%ymm12 | ||
| 61 | vpbroadcastd 0x34(%rdi),%ymm13 | ||
| 62 | vpbroadcastd 0x38(%rdi),%ymm14 | ||
| 63 | vpbroadcastd 0x3c(%rdi),%ymm15 | ||
| 64 | # x0..3 on stack | ||
| 65 | vmovdqa %ymm0,0x00(%rsp) | ||
| 66 | vmovdqa %ymm1,0x20(%rsp) | ||
| 67 | vmovdqa %ymm2,0x40(%rsp) | ||
| 68 | vmovdqa %ymm3,0x60(%rsp) | ||
| 69 | |||
| 70 | vmovdqa CTRINC(%rip),%ymm1 | ||
| 71 | vmovdqa ROT8(%rip),%ymm2 | ||
| 72 | vmovdqa ROT16(%rip),%ymm3 | ||
| 73 | |||
| 74 | # x12 += counter values 0-3 | ||
| 75 | vpaddd %ymm1,%ymm12,%ymm12 | ||
| 76 | |||
| 77 | mov $10,%ecx | ||
| 78 | |||
| 79 | .Ldoubleround8: | ||
| 80 | # x0 += x4, x12 = rotl32(x12 ^ x0, 16) | ||
| 81 | vpaddd 0x00(%rsp),%ymm4,%ymm0 | ||
| 82 | vmovdqa %ymm0,0x00(%rsp) | ||
| 83 | vpxor %ymm0,%ymm12,%ymm12 | ||
| 84 | vpshufb %ymm3,%ymm12,%ymm12 | ||
| 85 | # x1 += x5, x13 = rotl32(x13 ^ x1, 16) | ||
| 86 | vpaddd 0x20(%rsp),%ymm5,%ymm0 | ||
| 87 | vmovdqa %ymm0,0x20(%rsp) | ||
| 88 | vpxor %ymm0,%ymm13,%ymm13 | ||
| 89 | vpshufb %ymm3,%ymm13,%ymm13 | ||
| 90 | # x2 += x6, x14 = rotl32(x14 ^ x2, 16) | ||
| 91 | vpaddd 0x40(%rsp),%ymm6,%ymm0 | ||
| 92 | vmovdqa %ymm0,0x40(%rsp) | ||
| 93 | vpxor %ymm0,%ymm14,%ymm14 | ||
| 94 | vpshufb %ymm3,%ymm14,%ymm14 | ||
| 95 | # x3 += x7, x15 = rotl32(x15 ^ x3, 16) | ||
| 96 | vpaddd 0x60(%rsp),%ymm7,%ymm0 | ||
| 97 | vmovdqa %ymm0,0x60(%rsp) | ||
| 98 | vpxor %ymm0,%ymm15,%ymm15 | ||
| 99 | vpshufb %ymm3,%ymm15,%ymm15 | ||
| 100 | |||
| 101 | # x8 += x12, x4 = rotl32(x4 ^ x8, 12) | ||
| 102 | vpaddd %ymm12,%ymm8,%ymm8 | ||
| 103 | vpxor %ymm8,%ymm4,%ymm4 | ||
| 104 | vpslld $12,%ymm4,%ymm0 | ||
| 105 | vpsrld $20,%ymm4,%ymm4 | ||
| 106 | vpor %ymm0,%ymm4,%ymm4 | ||
| 107 | # x9 += x13, x5 = rotl32(x5 ^ x9, 12) | ||
| 108 | vpaddd %ymm13,%ymm9,%ymm9 | ||
| 109 | vpxor %ymm9,%ymm5,%ymm5 | ||
| 110 | vpslld $12,%ymm5,%ymm0 | ||
| 111 | vpsrld $20,%ymm5,%ymm5 | ||
| 112 | vpor %ymm0,%ymm5,%ymm5 | ||
| 113 | # x10 += x14, x6 = rotl32(x6 ^ x10, 12) | ||
| 114 | vpaddd %ymm14,%ymm10,%ymm10 | ||
| 115 | vpxor %ymm10,%ymm6,%ymm6 | ||
| 116 | vpslld $12,%ymm6,%ymm0 | ||
| 117 | vpsrld $20,%ymm6,%ymm6 | ||
| 118 | vpor %ymm0,%ymm6,%ymm6 | ||
| 119 | # x11 += x15, x7 = rotl32(x7 ^ x11, 12) | ||
| 120 | vpaddd %ymm15,%ymm11,%ymm11 | ||
| 121 | vpxor %ymm11,%ymm7,%ymm7 | ||
| 122 | vpslld $12,%ymm7,%ymm0 | ||
| 123 | vpsrld $20,%ymm7,%ymm7 | ||
| 124 | vpor %ymm0,%ymm7,%ymm7 | ||
| 125 | |||
| 126 | # x0 += x4, x12 = rotl32(x12 ^ x0, 8) | ||
| 127 | vpaddd 0x00(%rsp),%ymm4,%ymm0 | ||
| 128 | vmovdqa %ymm0,0x00(%rsp) | ||
| 129 | vpxor %ymm0,%ymm12,%ymm12 | ||
| 130 | vpshufb %ymm2,%ymm12,%ymm12 | ||
| 131 | # x1 += x5, x13 = rotl32(x13 ^ x1, 8) | ||
| 132 | vpaddd 0x20(%rsp),%ymm5,%ymm0 | ||
| 133 | vmovdqa %ymm0,0x20(%rsp) | ||
| 134 | vpxor %ymm0,%ymm13,%ymm13 | ||
| 135 | vpshufb %ymm2,%ymm13,%ymm13 | ||
| 136 | # x2 += x6, x14 = rotl32(x14 ^ x2, 8) | ||
| 137 | vpaddd 0x40(%rsp),%ymm6,%ymm0 | ||
| 138 | vmovdqa %ymm0,0x40(%rsp) | ||
| 139 | vpxor %ymm0,%ymm14,%ymm14 | ||
| 140 | vpshufb %ymm2,%ymm14,%ymm14 | ||
| 141 | # x3 += x7, x15 = rotl32(x15 ^ x3, 8) | ||
| 142 | vpaddd 0x60(%rsp),%ymm7,%ymm0 | ||
| 143 | vmovdqa %ymm0,0x60(%rsp) | ||
| 144 | vpxor %ymm0,%ymm15,%ymm15 | ||
| 145 | vpshufb %ymm2,%ymm15,%ymm15 | ||
| 146 | |||
| 147 | # x8 += x12, x4 = rotl32(x4 ^ x8, 7) | ||
| 148 | vpaddd %ymm12,%ymm8,%ymm8 | ||
| 149 | vpxor %ymm8,%ymm4,%ymm4 | ||
| 150 | vpslld $7,%ymm4,%ymm0 | ||
| 151 | vpsrld $25,%ymm4,%ymm4 | ||
| 152 | vpor %ymm0,%ymm4,%ymm4 | ||
| 153 | # x9 += x13, x5 = rotl32(x5 ^ x9, 7) | ||
| 154 | vpaddd %ymm13,%ymm9,%ymm9 | ||
| 155 | vpxor %ymm9,%ymm5,%ymm5 | ||
| 156 | vpslld $7,%ymm5,%ymm0 | ||
| 157 | vpsrld $25,%ymm5,%ymm5 | ||
| 158 | vpor %ymm0,%ymm5,%ymm5 | ||
| 159 | # x10 += x14, x6 = rotl32(x6 ^ x10, 7) | ||
| 160 | vpaddd %ymm14,%ymm10,%ymm10 | ||
| 161 | vpxor %ymm10,%ymm6,%ymm6 | ||
| 162 | vpslld $7,%ymm6,%ymm0 | ||
| 163 | vpsrld $25,%ymm6,%ymm6 | ||
| 164 | vpor %ymm0,%ymm6,%ymm6 | ||
| 165 | # x11 += x15, x7 = rotl32(x7 ^ x11, 7) | ||
| 166 | vpaddd %ymm15,%ymm11,%ymm11 | ||
| 167 | vpxor %ymm11,%ymm7,%ymm7 | ||
| 168 | vpslld $7,%ymm7,%ymm0 | ||
| 169 | vpsrld $25,%ymm7,%ymm7 | ||
| 170 | vpor %ymm0,%ymm7,%ymm7 | ||
| 171 | |||
| 172 | # x0 += x5, x15 = rotl32(x15 ^ x0, 16) | ||
| 173 | vpaddd 0x00(%rsp),%ymm5,%ymm0 | ||
| 174 | vmovdqa %ymm0,0x00(%rsp) | ||
| 175 | vpxor %ymm0,%ymm15,%ymm15 | ||
| 176 | vpshufb %ymm3,%ymm15,%ymm15 | ||
| 177 | # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0 | ||
| 178 | vpaddd 0x20(%rsp),%ymm6,%ymm0 | ||
| 179 | vmovdqa %ymm0,0x20(%rsp) | ||
| 180 | vpxor %ymm0,%ymm12,%ymm12 | ||
| 181 | vpshufb %ymm3,%ymm12,%ymm12 | ||
| 182 | # x2 += x7, x13 = rotl32(x13 ^ x2, 16) | ||
| 183 | vpaddd 0x40(%rsp),%ymm7,%ymm0 | ||
| 184 | vmovdqa %ymm0,0x40(%rsp) | ||
| 185 | vpxor %ymm0,%ymm13,%ymm13 | ||
| 186 | vpshufb %ymm3,%ymm13,%ymm13 | ||
| 187 | # x3 += x4, x14 = rotl32(x14 ^ x3, 16) | ||
| 188 | vpaddd 0x60(%rsp),%ymm4,%ymm0 | ||
| 189 | vmovdqa %ymm0,0x60(%rsp) | ||
| 190 | vpxor %ymm0,%ymm14,%ymm14 | ||
| 191 | vpshufb %ymm3,%ymm14,%ymm14 | ||
| 192 | |||
| 193 | # x10 += x15, x5 = rotl32(x5 ^ x10, 12) | ||
| 194 | vpaddd %ymm15,%ymm10,%ymm10 | ||
| 195 | vpxor %ymm10,%ymm5,%ymm5 | ||
| 196 | vpslld $12,%ymm5,%ymm0 | ||
| 197 | vpsrld $20,%ymm5,%ymm5 | ||
| 198 | vpor %ymm0,%ymm5,%ymm5 | ||
| 199 | # x11 += x12, x6 = rotl32(x6 ^ x11, 12) | ||
| 200 | vpaddd %ymm12,%ymm11,%ymm11 | ||
| 201 | vpxor %ymm11,%ymm6,%ymm6 | ||
| 202 | vpslld $12,%ymm6,%ymm0 | ||
| 203 | vpsrld $20,%ymm6,%ymm6 | ||
| 204 | vpor %ymm0,%ymm6,%ymm6 | ||
| 205 | # x8 += x13, x7 = rotl32(x7 ^ x8, 12) | ||
| 206 | vpaddd %ymm13,%ymm8,%ymm8 | ||
| 207 | vpxor %ymm8,%ymm7,%ymm7 | ||
| 208 | vpslld $12,%ymm7,%ymm0 | ||
| 209 | vpsrld $20,%ymm7,%ymm7 | ||
| 210 | vpor %ymm0,%ymm7,%ymm7 | ||
| 211 | # x9 += x14, x4 = rotl32(x4 ^ x9, 12) | ||
| 212 | vpaddd %ymm14,%ymm9,%ymm9 | ||
| 213 | vpxor %ymm9,%ymm4,%ymm4 | ||
| 214 | vpslld $12,%ymm4,%ymm0 | ||
| 215 | vpsrld $20,%ymm4,%ymm4 | ||
| 216 | vpor %ymm0,%ymm4,%ymm4 | ||
| 217 | |||
| 218 | # x0 += x5, x15 = rotl32(x15 ^ x0, 8) | ||
| 219 | vpaddd 0x00(%rsp),%ymm5,%ymm0 | ||
| 220 | vmovdqa %ymm0,0x00(%rsp) | ||
| 221 | vpxor %ymm0,%ymm15,%ymm15 | ||
| 222 | vpshufb %ymm2,%ymm15,%ymm15 | ||
| 223 | # x1 += x6, x12 = rotl32(x12 ^ x1, 8) | ||
| 224 | vpaddd 0x20(%rsp),%ymm6,%ymm0 | ||
| 225 | vmovdqa %ymm0,0x20(%rsp) | ||
| 226 | vpxor %ymm0,%ymm12,%ymm12 | ||
| 227 | vpshufb %ymm2,%ymm12,%ymm12 | ||
| 228 | # x2 += x7, x13 = rotl32(x13 ^ x2, 8) | ||
| 229 | vpaddd 0x40(%rsp),%ymm7,%ymm0 | ||
| 230 | vmovdqa %ymm0,0x40(%rsp) | ||
| 231 | vpxor %ymm0,%ymm13,%ymm13 | ||
| 232 | vpshufb %ymm2,%ymm13,%ymm13 | ||
| 233 | # x3 += x4, x14 = rotl32(x14 ^ x3, 8) | ||
| 234 | vpaddd 0x60(%rsp),%ymm4,%ymm0 | ||
| 235 | vmovdqa %ymm0,0x60(%rsp) | ||
| 236 | vpxor %ymm0,%ymm14,%ymm14 | ||
| 237 | vpshufb %ymm2,%ymm14,%ymm14 | ||
| 238 | |||
| 239 | # x10 += x15, x5 = rotl32(x5 ^ x10, 7) | ||
| 240 | vpaddd %ymm15,%ymm10,%ymm10 | ||
| 241 | vpxor %ymm10,%ymm5,%ymm5 | ||
| 242 | vpslld $7,%ymm5,%ymm0 | ||
| 243 | vpsrld $25,%ymm5,%ymm5 | ||
| 244 | vpor %ymm0,%ymm5,%ymm5 | ||
| 245 | # x11 += x12, x6 = rotl32(x6 ^ x11, 7) | ||
| 246 | vpaddd %ymm12,%ymm11,%ymm11 | ||
| 247 | vpxor %ymm11,%ymm6,%ymm6 | ||
| 248 | vpslld $7,%ymm6,%ymm0 | ||
| 249 | vpsrld $25,%ymm6,%ymm6 | ||
| 250 | vpor %ymm0,%ymm6,%ymm6 | ||
| 251 | # x8 += x13, x7 = rotl32(x7 ^ x8, 7) | ||
| 252 | vpaddd %ymm13,%ymm8,%ymm8 | ||
| 253 | vpxor %ymm8,%ymm7,%ymm7 | ||
| 254 | vpslld $7,%ymm7,%ymm0 | ||
| 255 | vpsrld $25,%ymm7,%ymm7 | ||
| 256 | vpor %ymm0,%ymm7,%ymm7 | ||
| 257 | # x9 += x14, x4 = rotl32(x4 ^ x9, 7) | ||
| 258 | vpaddd %ymm14,%ymm9,%ymm9 | ||
| 259 | vpxor %ymm9,%ymm4,%ymm4 | ||
| 260 | vpslld $7,%ymm4,%ymm0 | ||
| 261 | vpsrld $25,%ymm4,%ymm4 | ||
| 262 | vpor %ymm0,%ymm4,%ymm4 | ||
| 263 | |||
| 264 | dec %ecx | ||
| 265 | jnz .Ldoubleround8 | ||
| 266 | |||
| 267 | # x0..15[0-3] += s[0..15] | ||
| 268 | vpbroadcastd 0x00(%rdi),%ymm0 | ||
| 269 | vpaddd 0x00(%rsp),%ymm0,%ymm0 | ||
| 270 | vmovdqa %ymm0,0x00(%rsp) | ||
| 271 | vpbroadcastd 0x04(%rdi),%ymm0 | ||
| 272 | vpaddd 0x20(%rsp),%ymm0,%ymm0 | ||
| 273 | vmovdqa %ymm0,0x20(%rsp) | ||
| 274 | vpbroadcastd 0x08(%rdi),%ymm0 | ||
| 275 | vpaddd 0x40(%rsp),%ymm0,%ymm0 | ||
| 276 | vmovdqa %ymm0,0x40(%rsp) | ||
| 277 | vpbroadcastd 0x0c(%rdi),%ymm0 | ||
| 278 | vpaddd 0x60(%rsp),%ymm0,%ymm0 | ||
| 279 | vmovdqa %ymm0,0x60(%rsp) | ||
| 280 | vpbroadcastd 0x10(%rdi),%ymm0 | ||
| 281 | vpaddd %ymm0,%ymm4,%ymm4 | ||
| 282 | vpbroadcastd 0x14(%rdi),%ymm0 | ||
| 283 | vpaddd %ymm0,%ymm5,%ymm5 | ||
| 284 | vpbroadcastd 0x18(%rdi),%ymm0 | ||
| 285 | vpaddd %ymm0,%ymm6,%ymm6 | ||
| 286 | vpbroadcastd 0x1c(%rdi),%ymm0 | ||
| 287 | vpaddd %ymm0,%ymm7,%ymm7 | ||
| 288 | vpbroadcastd 0x20(%rdi),%ymm0 | ||
| 289 | vpaddd %ymm0,%ymm8,%ymm8 | ||
| 290 | vpbroadcastd 0x24(%rdi),%ymm0 | ||
| 291 | vpaddd %ymm0,%ymm9,%ymm9 | ||
| 292 | vpbroadcastd 0x28(%rdi),%ymm0 | ||
| 293 | vpaddd %ymm0,%ymm10,%ymm10 | ||
| 294 | vpbroadcastd 0x2c(%rdi),%ymm0 | ||
| 295 | vpaddd %ymm0,%ymm11,%ymm11 | ||
| 296 | vpbroadcastd 0x30(%rdi),%ymm0 | ||
| 297 | vpaddd %ymm0,%ymm12,%ymm12 | ||
| 298 | vpbroadcastd 0x34(%rdi),%ymm0 | ||
| 299 | vpaddd %ymm0,%ymm13,%ymm13 | ||
| 300 | vpbroadcastd 0x38(%rdi),%ymm0 | ||
| 301 | vpaddd %ymm0,%ymm14,%ymm14 | ||
| 302 | vpbroadcastd 0x3c(%rdi),%ymm0 | ||
| 303 | vpaddd %ymm0,%ymm15,%ymm15 | ||
| 304 | |||
| 305 | # x12 += counter values 0-3 | ||
| 306 | vpaddd %ymm1,%ymm12,%ymm12 | ||
| 307 | |||
| 308 | # interleave 32-bit words in state n, n+1 | ||
| 309 | vmovdqa 0x00(%rsp),%ymm0 | ||
| 310 | vmovdqa 0x20(%rsp),%ymm1 | ||
| 311 | vpunpckldq %ymm1,%ymm0,%ymm2 | ||
| 312 | vpunpckhdq %ymm1,%ymm0,%ymm1 | ||
| 313 | vmovdqa %ymm2,0x00(%rsp) | ||
| 314 | vmovdqa %ymm1,0x20(%rsp) | ||
| 315 | vmovdqa 0x40(%rsp),%ymm0 | ||
| 316 | vmovdqa 0x60(%rsp),%ymm1 | ||
| 317 | vpunpckldq %ymm1,%ymm0,%ymm2 | ||
| 318 | vpunpckhdq %ymm1,%ymm0,%ymm1 | ||
| 319 | vmovdqa %ymm2,0x40(%rsp) | ||
| 320 | vmovdqa %ymm1,0x60(%rsp) | ||
| 321 | vmovdqa %ymm4,%ymm0 | ||
| 322 | vpunpckldq %ymm5,%ymm0,%ymm4 | ||
| 323 | vpunpckhdq %ymm5,%ymm0,%ymm5 | ||
| 324 | vmovdqa %ymm6,%ymm0 | ||
| 325 | vpunpckldq %ymm7,%ymm0,%ymm6 | ||
| 326 | vpunpckhdq %ymm7,%ymm0,%ymm7 | ||
| 327 | vmovdqa %ymm8,%ymm0 | ||
| 328 | vpunpckldq %ymm9,%ymm0,%ymm8 | ||
| 329 | vpunpckhdq %ymm9,%ymm0,%ymm9 | ||
| 330 | vmovdqa %ymm10,%ymm0 | ||
| 331 | vpunpckldq %ymm11,%ymm0,%ymm10 | ||
| 332 | vpunpckhdq %ymm11,%ymm0,%ymm11 | ||
| 333 | vmovdqa %ymm12,%ymm0 | ||
| 334 | vpunpckldq %ymm13,%ymm0,%ymm12 | ||
| 335 | vpunpckhdq %ymm13,%ymm0,%ymm13 | ||
| 336 | vmovdqa %ymm14,%ymm0 | ||
| 337 | vpunpckldq %ymm15,%ymm0,%ymm14 | ||
| 338 | vpunpckhdq %ymm15,%ymm0,%ymm15 | ||
| 339 | |||
| 340 | # interleave 64-bit words in state n, n+2 | ||
| 341 | vmovdqa 0x00(%rsp),%ymm0 | ||
| 342 | vmovdqa 0x40(%rsp),%ymm2 | ||
| 343 | vpunpcklqdq %ymm2,%ymm0,%ymm1 | ||
| 344 | vpunpckhqdq %ymm2,%ymm0,%ymm2 | ||
| 345 | vmovdqa %ymm1,0x00(%rsp) | ||
| 346 | vmovdqa %ymm2,0x40(%rsp) | ||
| 347 | vmovdqa 0x20(%rsp),%ymm0 | ||
| 348 | vmovdqa 0x60(%rsp),%ymm2 | ||
| 349 | vpunpcklqdq %ymm2,%ymm0,%ymm1 | ||
| 350 | vpunpckhqdq %ymm2,%ymm0,%ymm2 | ||
| 351 | vmovdqa %ymm1,0x20(%rsp) | ||
| 352 | vmovdqa %ymm2,0x60(%rsp) | ||
| 353 | vmovdqa %ymm4,%ymm0 | ||
| 354 | vpunpcklqdq %ymm6,%ymm0,%ymm4 | ||
| 355 | vpunpckhqdq %ymm6,%ymm0,%ymm6 | ||
| 356 | vmovdqa %ymm5,%ymm0 | ||
| 357 | vpunpcklqdq %ymm7,%ymm0,%ymm5 | ||
| 358 | vpunpckhqdq %ymm7,%ymm0,%ymm7 | ||
| 359 | vmovdqa %ymm8,%ymm0 | ||
| 360 | vpunpcklqdq %ymm10,%ymm0,%ymm8 | ||
| 361 | vpunpckhqdq %ymm10,%ymm0,%ymm10 | ||
| 362 | vmovdqa %ymm9,%ymm0 | ||
| 363 | vpunpcklqdq %ymm11,%ymm0,%ymm9 | ||
| 364 | vpunpckhqdq %ymm11,%ymm0,%ymm11 | ||
| 365 | vmovdqa %ymm12,%ymm0 | ||
| 366 | vpunpcklqdq %ymm14,%ymm0,%ymm12 | ||
| 367 | vpunpckhqdq %ymm14,%ymm0,%ymm14 | ||
| 368 | vmovdqa %ymm13,%ymm0 | ||
| 369 | vpunpcklqdq %ymm15,%ymm0,%ymm13 | ||
| 370 | vpunpckhqdq %ymm15,%ymm0,%ymm15 | ||
| 371 | |||
| 372 | # interleave 128-bit words in state n, n+4 | ||
| 373 | vmovdqa 0x00(%rsp),%ymm0 | ||
| 374 | vperm2i128 $0x20,%ymm4,%ymm0,%ymm1 | ||
| 375 | vperm2i128 $0x31,%ymm4,%ymm0,%ymm4 | ||
| 376 | vmovdqa %ymm1,0x00(%rsp) | ||
| 377 | vmovdqa 0x20(%rsp),%ymm0 | ||
| 378 | vperm2i128 $0x20,%ymm5,%ymm0,%ymm1 | ||
| 379 | vperm2i128 $0x31,%ymm5,%ymm0,%ymm5 | ||
| 380 | vmovdqa %ymm1,0x20(%rsp) | ||
| 381 | vmovdqa 0x40(%rsp),%ymm0 | ||
| 382 | vperm2i128 $0x20,%ymm6,%ymm0,%ymm1 | ||
| 383 | vperm2i128 $0x31,%ymm6,%ymm0,%ymm6 | ||
| 384 | vmovdqa %ymm1,0x40(%rsp) | ||
| 385 | vmovdqa 0x60(%rsp),%ymm0 | ||
| 386 | vperm2i128 $0x20,%ymm7,%ymm0,%ymm1 | ||
| 387 | vperm2i128 $0x31,%ymm7,%ymm0,%ymm7 | ||
| 388 | vmovdqa %ymm1,0x60(%rsp) | ||
| 389 | vperm2i128 $0x20,%ymm12,%ymm8,%ymm0 | ||
| 390 | vperm2i128 $0x31,%ymm12,%ymm8,%ymm12 | ||
| 391 | vmovdqa %ymm0,%ymm8 | ||
| 392 | vperm2i128 $0x20,%ymm13,%ymm9,%ymm0 | ||
| 393 | vperm2i128 $0x31,%ymm13,%ymm9,%ymm13 | ||
| 394 | vmovdqa %ymm0,%ymm9 | ||
| 395 | vperm2i128 $0x20,%ymm14,%ymm10,%ymm0 | ||
| 396 | vperm2i128 $0x31,%ymm14,%ymm10,%ymm14 | ||
| 397 | vmovdqa %ymm0,%ymm10 | ||
| 398 | vperm2i128 $0x20,%ymm15,%ymm11,%ymm0 | ||
| 399 | vperm2i128 $0x31,%ymm15,%ymm11,%ymm15 | ||
| 400 | vmovdqa %ymm0,%ymm11 | ||
| 401 | |||
| 402 | # xor with corresponding input, write to output | ||
| 403 | vmovdqa 0x00(%rsp),%ymm0 | ||
| 404 | vpxor 0x0000(%rdx),%ymm0,%ymm0 | ||
| 405 | vmovdqu %ymm0,0x0000(%rsi) | ||
| 406 | vmovdqa 0x20(%rsp),%ymm0 | ||
| 407 | vpxor 0x0080(%rdx),%ymm0,%ymm0 | ||
| 408 | vmovdqu %ymm0,0x0080(%rsi) | ||
| 409 | vmovdqa 0x40(%rsp),%ymm0 | ||
| 410 | vpxor 0x0040(%rdx),%ymm0,%ymm0 | ||
| 411 | vmovdqu %ymm0,0x0040(%rsi) | ||
| 412 | vmovdqa 0x60(%rsp),%ymm0 | ||
| 413 | vpxor 0x00c0(%rdx),%ymm0,%ymm0 | ||
| 414 | vmovdqu %ymm0,0x00c0(%rsi) | ||
| 415 | vpxor 0x0100(%rdx),%ymm4,%ymm4 | ||
| 416 | vmovdqu %ymm4,0x0100(%rsi) | ||
| 417 | vpxor 0x0180(%rdx),%ymm5,%ymm5 | ||
| 418 | vmovdqu %ymm5,0x00180(%rsi) | ||
| 419 | vpxor 0x0140(%rdx),%ymm6,%ymm6 | ||
| 420 | vmovdqu %ymm6,0x0140(%rsi) | ||
| 421 | vpxor 0x01c0(%rdx),%ymm7,%ymm7 | ||
| 422 | vmovdqu %ymm7,0x01c0(%rsi) | ||
| 423 | vpxor 0x0020(%rdx),%ymm8,%ymm8 | ||
| 424 | vmovdqu %ymm8,0x0020(%rsi) | ||
| 425 | vpxor 0x00a0(%rdx),%ymm9,%ymm9 | ||
| 426 | vmovdqu %ymm9,0x00a0(%rsi) | ||
| 427 | vpxor 0x0060(%rdx),%ymm10,%ymm10 | ||
| 428 | vmovdqu %ymm10,0x0060(%rsi) | ||
| 429 | vpxor 0x00e0(%rdx),%ymm11,%ymm11 | ||
| 430 | vmovdqu %ymm11,0x00e0(%rsi) | ||
| 431 | vpxor 0x0120(%rdx),%ymm12,%ymm12 | ||
| 432 | vmovdqu %ymm12,0x0120(%rsi) | ||
| 433 | vpxor 0x01a0(%rdx),%ymm13,%ymm13 | ||
| 434 | vmovdqu %ymm13,0x01a0(%rsi) | ||
| 435 | vpxor 0x0160(%rdx),%ymm14,%ymm14 | ||
| 436 | vmovdqu %ymm14,0x0160(%rsi) | ||
| 437 | vpxor 0x01e0(%rdx),%ymm15,%ymm15 | ||
| 438 | vmovdqu %ymm15,0x01e0(%rsi) | ||
| 439 | |||
| 440 | vzeroupper | ||
| 441 | mov %r8,%rsp | ||
| 442 | ret | ||
| 443 | ENDPROC(chacha20_8block_xor_avx2) | ||
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S new file mode 100644 index 000000000000..712b13047b41 --- /dev/null +++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S | |||
| @@ -0,0 +1,625 @@ | |||
| 1 | /* | ||
| 2 | * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Martin Willi | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/linkage.h> | ||
| 13 | |||
| 14 | .data | ||
| 15 | .align 16 | ||
| 16 | |||
| 17 | ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003 | ||
| 18 | ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302 | ||
| 19 | CTRINC: .octa 0x00000003000000020000000100000000 | ||
| 20 | |||
| 21 | .text | ||
| 22 | |||
| 23 | ENTRY(chacha20_block_xor_ssse3) | ||
| 24 | # %rdi: Input state matrix, s | ||
| 25 | # %rsi: 1 data block output, o | ||
| 26 | # %rdx: 1 data block input, i | ||
| 27 | |||
| 28 | # This function encrypts one ChaCha20 block by loading the state matrix | ||
| 29 | # in four SSE registers. It performs matrix operation on four words in | ||
| 30 | # parallel, but requireds shuffling to rearrange the words after each | ||
| 31 | # round. 8/16-bit word rotation is done with the slightly better | ||
| 32 | # performing SSSE3 byte shuffling, 7/12-bit word rotation uses | ||
| 33 | # traditional shift+OR. | ||
| 34 | |||
| 35 | # x0..3 = s0..3 | ||
| 36 | movdqa 0x00(%rdi),%xmm0 | ||
| 37 | movdqa 0x10(%rdi),%xmm1 | ||
| 38 | movdqa 0x20(%rdi),%xmm2 | ||
| 39 | movdqa 0x30(%rdi),%xmm3 | ||
| 40 | movdqa %xmm0,%xmm8 | ||
| 41 | movdqa %xmm1,%xmm9 | ||
| 42 | movdqa %xmm2,%xmm10 | ||
| 43 | movdqa %xmm3,%xmm11 | ||
| 44 | |||
| 45 | movdqa ROT8(%rip),%xmm4 | ||
| 46 | movdqa ROT16(%rip),%xmm5 | ||
| 47 | |||
| 48 | mov $10,%ecx | ||
| 49 | |||
| 50 | .Ldoubleround: | ||
| 51 | |||
| 52 | # x0 += x1, x3 = rotl32(x3 ^ x0, 16) | ||
| 53 | paddd %xmm1,%xmm0 | ||
| 54 | pxor %xmm0,%xmm3 | ||
| 55 | pshufb %xmm5,%xmm3 | ||
| 56 | |||
| 57 | # x2 += x3, x1 = rotl32(x1 ^ x2, 12) | ||
| 58 | paddd %xmm3,%xmm2 | ||
| 59 | pxor %xmm2,%xmm1 | ||
| 60 | movdqa %xmm1,%xmm6 | ||
| 61 | pslld $12,%xmm6 | ||
| 62 | psrld $20,%xmm1 | ||
| 63 | por %xmm6,%xmm1 | ||
| 64 | |||
| 65 | # x0 += x1, x3 = rotl32(x3 ^ x0, 8) | ||
| 66 | paddd %xmm1,%xmm0 | ||
| 67 | pxor %xmm0,%xmm3 | ||
| 68 | pshufb %xmm4,%xmm3 | ||
| 69 | |||
| 70 | # x2 += x3, x1 = rotl32(x1 ^ x2, 7) | ||
| 71 | paddd %xmm3,%xmm2 | ||
| 72 | pxor %xmm2,%xmm1 | ||
| 73 | movdqa %xmm1,%xmm7 | ||
| 74 | pslld $7,%xmm7 | ||
| 75 | psrld $25,%xmm1 | ||
| 76 | por %xmm7,%xmm1 | ||
| 77 | |||
| 78 | # x1 = shuffle32(x1, MASK(0, 3, 2, 1)) | ||
| 79 | pshufd $0x39,%xmm1,%xmm1 | ||
| 80 | # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) | ||
| 81 | pshufd $0x4e,%xmm2,%xmm2 | ||
| 82 | # x3 = shuffle32(x3, MASK(2, 1, 0, 3)) | ||
| 83 | pshufd $0x93,%xmm3,%xmm3 | ||
| 84 | |||
| 85 | # x0 += x1, x3 = rotl32(x3 ^ x0, 16) | ||
| 86 | paddd %xmm1,%xmm0 | ||
| 87 | pxor %xmm0,%xmm3 | ||
| 88 | pshufb %xmm5,%xmm3 | ||
| 89 | |||
| 90 | # x2 += x3, x1 = rotl32(x1 ^ x2, 12) | ||
| 91 | paddd %xmm3,%xmm2 | ||
| 92 | pxor %xmm2,%xmm1 | ||
| 93 | movdqa %xmm1,%xmm6 | ||
| 94 | pslld $12,%xmm6 | ||
| 95 | psrld $20,%xmm1 | ||
| 96 | por %xmm6,%xmm1 | ||
| 97 | |||
| 98 | # x0 += x1, x3 = rotl32(x3 ^ x0, 8) | ||
| 99 | paddd %xmm1,%xmm0 | ||
| 100 | pxor %xmm0,%xmm3 | ||
| 101 | pshufb %xmm4,%xmm3 | ||
| 102 | |||
| 103 | # x2 += x3, x1 = rotl32(x1 ^ x2, 7) | ||
| 104 | paddd %xmm3,%xmm2 | ||
| 105 | pxor %xmm2,%xmm1 | ||
| 106 | movdqa %xmm1,%xmm7 | ||
| 107 | pslld $7,%xmm7 | ||
| 108 | psrld $25,%xmm1 | ||
| 109 | por %xmm7,%xmm1 | ||
| 110 | |||
| 111 | # x1 = shuffle32(x1, MASK(2, 1, 0, 3)) | ||
| 112 | pshufd $0x93,%xmm1,%xmm1 | ||
| 113 | # x2 = shuffle32(x2, MASK(1, 0, 3, 2)) | ||
| 114 | pshufd $0x4e,%xmm2,%xmm2 | ||
| 115 | # x3 = shuffle32(x3, MASK(0, 3, 2, 1)) | ||
| 116 | pshufd $0x39,%xmm3,%xmm3 | ||
| 117 | |||
| 118 | dec %ecx | ||
| 119 | jnz .Ldoubleround | ||
| 120 | |||
| 121 | # o0 = i0 ^ (x0 + s0) | ||
| 122 | movdqu 0x00(%rdx),%xmm4 | ||
| 123 | paddd %xmm8,%xmm0 | ||
| 124 | pxor %xmm4,%xmm0 | ||
| 125 | movdqu %xmm0,0x00(%rsi) | ||
| 126 | # o1 = i1 ^ (x1 + s1) | ||
| 127 | movdqu 0x10(%rdx),%xmm5 | ||
| 128 | paddd %xmm9,%xmm1 | ||
| 129 | pxor %xmm5,%xmm1 | ||
| 130 | movdqu %xmm1,0x10(%rsi) | ||
| 131 | # o2 = i2 ^ (x2 + s2) | ||
| 132 | movdqu 0x20(%rdx),%xmm6 | ||
| 133 | paddd %xmm10,%xmm2 | ||
| 134 | pxor %xmm6,%xmm2 | ||
| 135 | movdqu %xmm2,0x20(%rsi) | ||
| 136 | # o3 = i3 ^ (x3 + s3) | ||
| 137 | movdqu 0x30(%rdx),%xmm7 | ||
| 138 | paddd %xmm11,%xmm3 | ||
| 139 | pxor %xmm7,%xmm3 | ||
| 140 | movdqu %xmm3,0x30(%rsi) | ||
| 141 | |||
| 142 | ret | ||
| 143 | ENDPROC(chacha20_block_xor_ssse3) | ||
| 144 | |||
| 145 | ENTRY(chacha20_4block_xor_ssse3) | ||
| 146 | # %rdi: Input state matrix, s | ||
| 147 | # %rsi: 4 data blocks output, o | ||
| 148 | # %rdx: 4 data blocks input, i | ||
| 149 | |||
| 150 | # This function encrypts four consecutive ChaCha20 blocks by loading the | ||
| 151 | # the state matrix in SSE registers four times. As we need some scratch | ||
| 152 | # registers, we save the first four registers on the stack. The | ||
| 153 | # algorithm performs each operation on the corresponding word of each | ||
| 154 | # state matrix, hence requires no word shuffling. For final XORing step | ||
| 155 | # we transpose the matrix by interleaving 32- and then 64-bit words, | ||
| 156 | # which allows us to do XOR in SSE registers. 8/16-bit word rotation is | ||
| 157 | # done with the slightly better performing SSSE3 byte shuffling, | ||
| 158 | # 7/12-bit word rotation uses traditional shift+OR. | ||
| 159 | |||
| 160 | sub $0x40,%rsp | ||
| 161 | |||
| 162 | # x0..15[0-3] = s0..3[0..3] | ||
| 163 | movq 0x00(%rdi),%xmm1 | ||
| 164 | pshufd $0x00,%xmm1,%xmm0 | ||
| 165 | pshufd $0x55,%xmm1,%xmm1 | ||
| 166 | movq 0x08(%rdi),%xmm3 | ||
| 167 | pshufd $0x00,%xmm3,%xmm2 | ||
| 168 | pshufd $0x55,%xmm3,%xmm3 | ||
| 169 | movq 0x10(%rdi),%xmm5 | ||
| 170 | pshufd $0x00,%xmm5,%xmm4 | ||
| 171 | pshufd $0x55,%xmm5,%xmm5 | ||
| 172 | movq 0x18(%rdi),%xmm7 | ||
| 173 | pshufd $0x00,%xmm7,%xmm6 | ||
| 174 | pshufd $0x55,%xmm7,%xmm7 | ||
| 175 | movq 0x20(%rdi),%xmm9 | ||
| 176 | pshufd $0x00,%xmm9,%xmm8 | ||
| 177 | pshufd $0x55,%xmm9,%xmm9 | ||
| 178 | movq 0x28(%rdi),%xmm11 | ||
| 179 | pshufd $0x00,%xmm11,%xmm10 | ||
| 180 | pshufd $0x55,%xmm11,%xmm11 | ||
| 181 | movq 0x30(%rdi),%xmm13 | ||
| 182 | pshufd $0x00,%xmm13,%xmm12 | ||
| 183 | pshufd $0x55,%xmm13,%xmm13 | ||
| 184 | movq 0x38(%rdi),%xmm15 | ||
| 185 | pshufd $0x00,%xmm15,%xmm14 | ||
| 186 | pshufd $0x55,%xmm15,%xmm15 | ||
| 187 | # x0..3 on stack | ||
| 188 | movdqa %xmm0,0x00(%rsp) | ||
| 189 | movdqa %xmm1,0x10(%rsp) | ||
| 190 | movdqa %xmm2,0x20(%rsp) | ||
| 191 | movdqa %xmm3,0x30(%rsp) | ||
| 192 | |||
| 193 | movdqa CTRINC(%rip),%xmm1 | ||
| 194 | movdqa ROT8(%rip),%xmm2 | ||
| 195 | movdqa ROT16(%rip),%xmm3 | ||
| 196 | |||
| 197 | # x12 += counter values 0-3 | ||
| 198 | paddd %xmm1,%xmm12 | ||
| 199 | |||
| 200 | mov $10,%ecx | ||
| 201 | |||
| 202 | .Ldoubleround4: | ||
| 203 | # x0 += x4, x12 = rotl32(x12 ^ x0, 16) | ||
| 204 | movdqa 0x00(%rsp),%xmm0 | ||
| 205 | paddd %xmm4,%xmm0 | ||
| 206 | movdqa %xmm0,0x00(%rsp) | ||
| 207 | pxor %xmm0,%xmm12 | ||
| 208 | pshufb %xmm3,%xmm12 | ||
| 209 | # x1 += x5, x13 = rotl32(x13 ^ x1, 16) | ||
| 210 | movdqa 0x10(%rsp),%xmm0 | ||
| 211 | paddd %xmm5,%xmm0 | ||
| 212 | movdqa %xmm0,0x10(%rsp) | ||
| 213 | pxor %xmm0,%xmm13 | ||
| 214 | pshufb %xmm3,%xmm13 | ||
| 215 | # x2 += x6, x14 = rotl32(x14 ^ x2, 16) | ||
| 216 | movdqa 0x20(%rsp),%xmm0 | ||
| 217 | paddd %xmm6,%xmm0 | ||
| 218 | movdqa %xmm0,0x20(%rsp) | ||
| 219 | pxor %xmm0,%xmm14 | ||
| 220 | pshufb %xmm3,%xmm14 | ||
| 221 | # x3 += x7, x15 = rotl32(x15 ^ x3, 16) | ||
| 222 | movdqa 0x30(%rsp),%xmm0 | ||
| 223 | paddd %xmm7,%xmm0 | ||
| 224 | movdqa %xmm0,0x30(%rsp) | ||
| 225 | pxor %xmm0,%xmm15 | ||
| 226 | pshufb %xmm3,%xmm15 | ||
| 227 | |||
| 228 | # x8 += x12, x4 = rotl32(x4 ^ x8, 12) | ||
| 229 | paddd %xmm12,%xmm8 | ||
| 230 | pxor %xmm8,%xmm4 | ||
| 231 | movdqa %xmm4,%xmm0 | ||
| 232 | pslld $12,%xmm0 | ||
| 233 | psrld $20,%xmm4 | ||
| 234 | por %xmm0,%xmm4 | ||
| 235 | # x9 += x13, x5 = rotl32(x5 ^ x9, 12) | ||
| 236 | paddd %xmm13,%xmm9 | ||
| 237 | pxor %xmm9,%xmm5 | ||
| 238 | movdqa %xmm5,%xmm0 | ||
| 239 | pslld $12,%xmm0 | ||
| 240 | psrld $20,%xmm5 | ||
| 241 | por %xmm0,%xmm5 | ||
| 242 | # x10 += x14, x6 = rotl32(x6 ^ x10, 12) | ||
| 243 | paddd %xmm14,%xmm10 | ||
| 244 | pxor %xmm10,%xmm6 | ||
| 245 | movdqa %xmm6,%xmm0 | ||
| 246 | pslld $12,%xmm0 | ||
| 247 | psrld $20,%xmm6 | ||
| 248 | por %xmm0,%xmm6 | ||
| 249 | # x11 += x15, x7 = rotl32(x7 ^ x11, 12) | ||
| 250 | paddd %xmm15,%xmm11 | ||
| 251 | pxor %xmm11,%xmm7 | ||
| 252 | movdqa %xmm7,%xmm0 | ||
| 253 | pslld $12,%xmm0 | ||
| 254 | psrld $20,%xmm7 | ||
| 255 | por %xmm0,%xmm7 | ||
| 256 | |||
| 257 | # x0 += x4, x12 = rotl32(x12 ^ x0, 8) | ||
| 258 | movdqa 0x00(%rsp),%xmm0 | ||
| 259 | paddd %xmm4,%xmm0 | ||
| 260 | movdqa %xmm0,0x00(%rsp) | ||
| 261 | pxor %xmm0,%xmm12 | ||
| 262 | pshufb %xmm2,%xmm12 | ||
| 263 | # x1 += x5, x13 = rotl32(x13 ^ x1, 8) | ||
| 264 | movdqa 0x10(%rsp),%xmm0 | ||
| 265 | paddd %xmm5,%xmm0 | ||
| 266 | movdqa %xmm0,0x10(%rsp) | ||
| 267 | pxor %xmm0,%xmm13 | ||
| 268 | pshufb %xmm2,%xmm13 | ||
| 269 | # x2 += x6, x14 = rotl32(x14 ^ x2, 8) | ||
| 270 | movdqa 0x20(%rsp),%xmm0 | ||
| 271 | paddd %xmm6,%xmm0 | ||
| 272 | movdqa %xmm0,0x20(%rsp) | ||
| 273 | pxor %xmm0,%xmm14 | ||
| 274 | pshufb %xmm2,%xmm14 | ||
| 275 | # x3 += x7, x15 = rotl32(x15 ^ x3, 8) | ||
| 276 | movdqa 0x30(%rsp),%xmm0 | ||
| 277 | paddd %xmm7,%xmm0 | ||
| 278 | movdqa %xmm0,0x30(%rsp) | ||
| 279 | pxor %xmm0,%xmm15 | ||
| 280 | pshufb %xmm2,%xmm15 | ||
| 281 | |||
| 282 | # x8 += x12, x4 = rotl32(x4 ^ x8, 7) | ||
| 283 | paddd %xmm12,%xmm8 | ||
| 284 | pxor %xmm8,%xmm4 | ||
| 285 | movdqa %xmm4,%xmm0 | ||
| 286 | pslld $7,%xmm0 | ||
| 287 | psrld $25,%xmm4 | ||
| 288 | por %xmm0,%xmm4 | ||
| 289 | # x9 += x13, x5 = rotl32(x5 ^ x9, 7) | ||
| 290 | paddd %xmm13,%xmm9 | ||
| 291 | pxor %xmm9,%xmm5 | ||
| 292 | movdqa %xmm5,%xmm0 | ||
| 293 | pslld $7,%xmm0 | ||
| 294 | psrld $25,%xmm5 | ||
| 295 | por %xmm0,%xmm5 | ||
| 296 | # x10 += x14, x6 = rotl32(x6 ^ x10, 7) | ||
| 297 | paddd %xmm14,%xmm10 | ||
| 298 | pxor %xmm10,%xmm6 | ||
| 299 | movdqa %xmm6,%xmm0 | ||
| 300 | pslld $7,%xmm0 | ||
| 301 | psrld $25,%xmm6 | ||
| 302 | por %xmm0,%xmm6 | ||
| 303 | # x11 += x15, x7 = rotl32(x7 ^ x11, 7) | ||
| 304 | paddd %xmm15,%xmm11 | ||
| 305 | pxor %xmm11,%xmm7 | ||
| 306 | movdqa %xmm7,%xmm0 | ||
| 307 | pslld $7,%xmm0 | ||
| 308 | psrld $25,%xmm7 | ||
| 309 | por %xmm0,%xmm7 | ||
| 310 | |||
| 311 | # x0 += x5, x15 = rotl32(x15 ^ x0, 16) | ||
| 312 | movdqa 0x00(%rsp),%xmm0 | ||
| 313 | paddd %xmm5,%xmm0 | ||
| 314 | movdqa %xmm0,0x00(%rsp) | ||
| 315 | pxor %xmm0,%xmm15 | ||
| 316 | pshufb %xmm3,%xmm15 | ||
| 317 | # x1 += x6, x12 = rotl32(x12 ^ x1, 16) | ||
| 318 | movdqa 0x10(%rsp),%xmm0 | ||
| 319 | paddd %xmm6,%xmm0 | ||
| 320 | movdqa %xmm0,0x10(%rsp) | ||
| 321 | pxor %xmm0,%xmm12 | ||
| 322 | pshufb %xmm3,%xmm12 | ||
| 323 | # x2 += x7, x13 = rotl32(x13 ^ x2, 16) | ||
| 324 | movdqa 0x20(%rsp),%xmm0 | ||
| 325 | paddd %xmm7,%xmm0 | ||
| 326 | movdqa %xmm0,0x20(%rsp) | ||
| 327 | pxor %xmm0,%xmm13 | ||
| 328 | pshufb %xmm3,%xmm13 | ||
| 329 | # x3 += x4, x14 = rotl32(x14 ^ x3, 16) | ||
| 330 | movdqa 0x30(%rsp),%xmm0 | ||
| 331 | paddd %xmm4,%xmm0 | ||
| 332 | movdqa %xmm0,0x30(%rsp) | ||
| 333 | pxor %xmm0,%xmm14 | ||
| 334 | pshufb %xmm3,%xmm14 | ||
| 335 | |||
| 336 | # x10 += x15, x5 = rotl32(x5 ^ x10, 12) | ||
| 337 | paddd %xmm15,%xmm10 | ||
| 338 | pxor %xmm10,%xmm5 | ||
| 339 | movdqa %xmm5,%xmm0 | ||
| 340 | pslld $12,%xmm0 | ||
| 341 | psrld $20,%xmm5 | ||
| 342 | por %xmm0,%xmm5 | ||
| 343 | # x11 += x12, x6 = rotl32(x6 ^ x11, 12) | ||
| 344 | paddd %xmm12,%xmm11 | ||
| 345 | pxor %xmm11,%xmm6 | ||
| 346 | movdqa %xmm6,%xmm0 | ||
| 347 | pslld $12,%xmm0 | ||
| 348 | psrld $20,%xmm6 | ||
| 349 | por %xmm0,%xmm6 | ||
| 350 | # x8 += x13, x7 = rotl32(x7 ^ x8, 12) | ||
| 351 | paddd %xmm13,%xmm8 | ||
| 352 | pxor %xmm8,%xmm7 | ||
| 353 | movdqa %xmm7,%xmm0 | ||
| 354 | pslld $12,%xmm0 | ||
| 355 | psrld $20,%xmm7 | ||
| 356 | por %xmm0,%xmm7 | ||
| 357 | # x9 += x14, x4 = rotl32(x4 ^ x9, 12) | ||
| 358 | paddd %xmm14,%xmm9 | ||
| 359 | pxor %xmm9,%xmm4 | ||
| 360 | movdqa %xmm4,%xmm0 | ||
| 361 | pslld $12,%xmm0 | ||
| 362 | psrld $20,%xmm4 | ||
| 363 | por %xmm0,%xmm4 | ||
| 364 | |||
| 365 | # x0 += x5, x15 = rotl32(x15 ^ x0, 8) | ||
| 366 | movdqa 0x00(%rsp),%xmm0 | ||
| 367 | paddd %xmm5,%xmm0 | ||
| 368 | movdqa %xmm0,0x00(%rsp) | ||
| 369 | pxor %xmm0,%xmm15 | ||
| 370 | pshufb %xmm2,%xmm15 | ||
| 371 | # x1 += x6, x12 = rotl32(x12 ^ x1, 8) | ||
| 372 | movdqa 0x10(%rsp),%xmm0 | ||
| 373 | paddd %xmm6,%xmm0 | ||
| 374 | movdqa %xmm0,0x10(%rsp) | ||
| 375 | pxor %xmm0,%xmm12 | ||
| 376 | pshufb %xmm2,%xmm12 | ||
| 377 | # x2 += x7, x13 = rotl32(x13 ^ x2, 8) | ||
| 378 | movdqa 0x20(%rsp),%xmm0 | ||
| 379 | paddd %xmm7,%xmm0 | ||
| 380 | movdqa %xmm0,0x20(%rsp) | ||
| 381 | pxor %xmm0,%xmm13 | ||
| 382 | pshufb %xmm2,%xmm13 | ||
| 383 | # x3 += x4, x14 = rotl32(x14 ^ x3, 8) | ||
| 384 | movdqa 0x30(%rsp),%xmm0 | ||
| 385 | paddd %xmm4,%xmm0 | ||
| 386 | movdqa %xmm0,0x30(%rsp) | ||
| 387 | pxor %xmm0,%xmm14 | ||
| 388 | pshufb %xmm2,%xmm14 | ||
| 389 | |||
| 390 | # x10 += x15, x5 = rotl32(x5 ^ x10, 7) | ||
| 391 | paddd %xmm15,%xmm10 | ||
| 392 | pxor %xmm10,%xmm5 | ||
| 393 | movdqa %xmm5,%xmm0 | ||
| 394 | pslld $7,%xmm0 | ||
| 395 | psrld $25,%xmm5 | ||
| 396 | por %xmm0,%xmm5 | ||
| 397 | # x11 += x12, x6 = rotl32(x6 ^ x11, 7) | ||
| 398 | paddd %xmm12,%xmm11 | ||
| 399 | pxor %xmm11,%xmm6 | ||
| 400 | movdqa %xmm6,%xmm0 | ||
| 401 | pslld $7,%xmm0 | ||
| 402 | psrld $25,%xmm6 | ||
| 403 | por %xmm0,%xmm6 | ||
| 404 | # x8 += x13, x7 = rotl32(x7 ^ x8, 7) | ||
| 405 | paddd %xmm13,%xmm8 | ||
| 406 | pxor %xmm8,%xmm7 | ||
| 407 | movdqa %xmm7,%xmm0 | ||
| 408 | pslld $7,%xmm0 | ||
| 409 | psrld $25,%xmm7 | ||
| 410 | por %xmm0,%xmm7 | ||
| 411 | # x9 += x14, x4 = rotl32(x4 ^ x9, 7) | ||
| 412 | paddd %xmm14,%xmm9 | ||
| 413 | pxor %xmm9,%xmm4 | ||
| 414 | movdqa %xmm4,%xmm0 | ||
| 415 | pslld $7,%xmm0 | ||
| 416 | psrld $25,%xmm4 | ||
| 417 | por %xmm0,%xmm4 | ||
| 418 | |||
| 419 | dec %ecx | ||
| 420 | jnz .Ldoubleround4 | ||
| 421 | |||
| 422 | # x0[0-3] += s0[0] | ||
| 423 | # x1[0-3] += s0[1] | ||
| 424 | movq 0x00(%rdi),%xmm3 | ||
| 425 | pshufd $0x00,%xmm3,%xmm2 | ||
| 426 | pshufd $0x55,%xmm3,%xmm3 | ||
| 427 | paddd 0x00(%rsp),%xmm2 | ||
| 428 | movdqa %xmm2,0x00(%rsp) | ||
| 429 | paddd 0x10(%rsp),%xmm3 | ||
| 430 | movdqa %xmm3,0x10(%rsp) | ||
| 431 | # x2[0-3] += s0[2] | ||
| 432 | # x3[0-3] += s0[3] | ||
| 433 | movq 0x08(%rdi),%xmm3 | ||
| 434 | pshufd $0x00,%xmm3,%xmm2 | ||
| 435 | pshufd $0x55,%xmm3,%xmm3 | ||
| 436 | paddd 0x20(%rsp),%xmm2 | ||
| 437 | movdqa %xmm2,0x20(%rsp) | ||
| 438 | paddd 0x30(%rsp),%xmm3 | ||
| 439 | movdqa %xmm3,0x30(%rsp) | ||
| 440 | |||
| 441 | # x4[0-3] += s1[0] | ||
| 442 | # x5[0-3] += s1[1] | ||
| 443 | movq 0x10(%rdi),%xmm3 | ||
| 444 | pshufd $0x00,%xmm3,%xmm2 | ||
| 445 | pshufd $0x55,%xmm3,%xmm3 | ||
| 446 | paddd %xmm2,%xmm4 | ||
| 447 | paddd %xmm3,%xmm5 | ||
| 448 | # x6[0-3] += s1[2] | ||
| 449 | # x7[0-3] += s1[3] | ||
| 450 | movq 0x18(%rdi),%xmm3 | ||
| 451 | pshufd $0x00,%xmm3,%xmm2 | ||
| 452 | pshufd $0x55,%xmm3,%xmm3 | ||
| 453 | paddd %xmm2,%xmm6 | ||
| 454 | paddd %xmm3,%xmm7 | ||
| 455 | |||
| 456 | # x8[0-3] += s2[0] | ||
| 457 | # x9[0-3] += s2[1] | ||
| 458 | movq 0x20(%rdi),%xmm3 | ||
| 459 | pshufd $0x00,%xmm3,%xmm2 | ||
| 460 | pshufd $0x55,%xmm3,%xmm3 | ||
| 461 | paddd %xmm2,%xmm8 | ||
| 462 | paddd %xmm3,%xmm9 | ||
| 463 | # x10[0-3] += s2[2] | ||
| 464 | # x11[0-3] += s2[3] | ||
| 465 | movq 0x28(%rdi),%xmm3 | ||
| 466 | pshufd $0x00,%xmm3,%xmm2 | ||
| 467 | pshufd $0x55,%xmm3,%xmm3 | ||
| 468 | paddd %xmm2,%xmm10 | ||
| 469 | paddd %xmm3,%xmm11 | ||
| 470 | |||
| 471 | # x12[0-3] += s3[0] | ||
| 472 | # x13[0-3] += s3[1] | ||
| 473 | movq 0x30(%rdi),%xmm3 | ||
| 474 | pshufd $0x00,%xmm3,%xmm2 | ||
| 475 | pshufd $0x55,%xmm3,%xmm3 | ||
| 476 | paddd %xmm2,%xmm12 | ||
| 477 | paddd %xmm3,%xmm13 | ||
| 478 | # x14[0-3] += s3[2] | ||
| 479 | # x15[0-3] += s3[3] | ||
| 480 | movq 0x38(%rdi),%xmm3 | ||
| 481 | pshufd $0x00,%xmm3,%xmm2 | ||
| 482 | pshufd $0x55,%xmm3,%xmm3 | ||
| 483 | paddd %xmm2,%xmm14 | ||
| 484 | paddd %xmm3,%xmm15 | ||
| 485 | |||
| 486 | # x12 += counter values 0-3 | ||
| 487 | paddd %xmm1,%xmm12 | ||
| 488 | |||
| 489 | # interleave 32-bit words in state n, n+1 | ||
| 490 | movdqa 0x00(%rsp),%xmm0 | ||
| 491 | movdqa 0x10(%rsp),%xmm1 | ||
| 492 | movdqa %xmm0,%xmm2 | ||
| 493 | punpckldq %xmm1,%xmm2 | ||
| 494 | punpckhdq %xmm1,%xmm0 | ||
| 495 | movdqa %xmm2,0x00(%rsp) | ||
| 496 | movdqa %xmm0,0x10(%rsp) | ||
| 497 | movdqa 0x20(%rsp),%xmm0 | ||
| 498 | movdqa 0x30(%rsp),%xmm1 | ||
| 499 | movdqa %xmm0,%xmm2 | ||
| 500 | punpckldq %xmm1,%xmm2 | ||
| 501 | punpckhdq %xmm1,%xmm0 | ||
| 502 | movdqa %xmm2,0x20(%rsp) | ||
| 503 | movdqa %xmm0,0x30(%rsp) | ||
| 504 | movdqa %xmm4,%xmm0 | ||
| 505 | punpckldq %xmm5,%xmm4 | ||
| 506 | punpckhdq %xmm5,%xmm0 | ||
| 507 | movdqa %xmm0,%xmm5 | ||
| 508 | movdqa %xmm6,%xmm0 | ||
| 509 | punpckldq %xmm7,%xmm6 | ||
| 510 | punpckhdq %xmm7,%xmm0 | ||
| 511 | movdqa %xmm0,%xmm7 | ||
| 512 | movdqa %xmm8,%xmm0 | ||
| 513 | punpckldq %xmm9,%xmm8 | ||
| 514 | punpckhdq %xmm9,%xmm0 | ||
| 515 | movdqa %xmm0,%xmm9 | ||
| 516 | movdqa %xmm10,%xmm0 | ||
| 517 | punpckldq %xmm11,%xmm10 | ||
| 518 | punpckhdq %xmm11,%xmm0 | ||
| 519 | movdqa %xmm0,%xmm11 | ||
| 520 | movdqa %xmm12,%xmm0 | ||
| 521 | punpckldq %xmm13,%xmm12 | ||
| 522 | punpckhdq %xmm13,%xmm0 | ||
| 523 | movdqa %xmm0,%xmm13 | ||
| 524 | movdqa %xmm14,%xmm0 | ||
| 525 | punpckldq %xmm15,%xmm14 | ||
| 526 | punpckhdq %xmm15,%xmm0 | ||
| 527 | movdqa %xmm0,%xmm15 | ||
| 528 | |||
| 529 | # interleave 64-bit words in state n, n+2 | ||
| 530 | movdqa 0x00(%rsp),%xmm0 | ||
| 531 | movdqa 0x20(%rsp),%xmm1 | ||
| 532 | movdqa %xmm0,%xmm2 | ||
| 533 | punpcklqdq %xmm1,%xmm2 | ||
| 534 | punpckhqdq %xmm1,%xmm0 | ||
| 535 | movdqa %xmm2,0x00(%rsp) | ||
| 536 | movdqa %xmm0,0x20(%rsp) | ||
| 537 | movdqa 0x10(%rsp),%xmm0 | ||
| 538 | movdqa 0x30(%rsp),%xmm1 | ||
| 539 | movdqa %xmm0,%xmm2 | ||
| 540 | punpcklqdq %xmm1,%xmm2 | ||
| 541 | punpckhqdq %xmm1,%xmm0 | ||
| 542 | movdqa %xmm2,0x10(%rsp) | ||
| 543 | movdqa %xmm0,0x30(%rsp) | ||
| 544 | movdqa %xmm4,%xmm0 | ||
| 545 | punpcklqdq %xmm6,%xmm4 | ||
| 546 | punpckhqdq %xmm6,%xmm0 | ||
| 547 | movdqa %xmm0,%xmm6 | ||
| 548 | movdqa %xmm5,%xmm0 | ||
| 549 | punpcklqdq %xmm7,%xmm5 | ||
| 550 | punpckhqdq %xmm7,%xmm0 | ||
| 551 | movdqa %xmm0,%xmm7 | ||
| 552 | movdqa %xmm8,%xmm0 | ||
| 553 | punpcklqdq %xmm10,%xmm8 | ||
| 554 | punpckhqdq %xmm10,%xmm0 | ||
| 555 | movdqa %xmm0,%xmm10 | ||
| 556 | movdqa %xmm9,%xmm0 | ||
| 557 | punpcklqdq %xmm11,%xmm9 | ||
| 558 | punpckhqdq %xmm11,%xmm0 | ||
| 559 | movdqa %xmm0,%xmm11 | ||
| 560 | movdqa %xmm12,%xmm0 | ||
| 561 | punpcklqdq %xmm14,%xmm12 | ||
| 562 | punpckhqdq %xmm14,%xmm0 | ||
| 563 | movdqa %xmm0,%xmm14 | ||
| 564 | movdqa %xmm13,%xmm0 | ||
| 565 | punpcklqdq %xmm15,%xmm13 | ||
| 566 | punpckhqdq %xmm15,%xmm0 | ||
| 567 | movdqa %xmm0,%xmm15 | ||
| 568 | |||
| 569 | # xor with corresponding input, write to output | ||
| 570 | movdqa 0x00(%rsp),%xmm0 | ||
| 571 | movdqu 0x00(%rdx),%xmm1 | ||
| 572 | pxor %xmm1,%xmm0 | ||
| 573 | movdqu %xmm0,0x00(%rsi) | ||
| 574 | movdqa 0x10(%rsp),%xmm0 | ||
| 575 | movdqu 0x80(%rdx),%xmm1 | ||
| 576 | pxor %xmm1,%xmm0 | ||
| 577 | movdqu %xmm0,0x80(%rsi) | ||
| 578 | movdqa 0x20(%rsp),%xmm0 | ||
| 579 | movdqu 0x40(%rdx),%xmm1 | ||
| 580 | pxor %xmm1,%xmm0 | ||
| 581 | movdqu %xmm0,0x40(%rsi) | ||
| 582 | movdqa 0x30(%rsp),%xmm0 | ||
| 583 | movdqu 0xc0(%rdx),%xmm1 | ||
| 584 | pxor %xmm1,%xmm0 | ||
| 585 | movdqu %xmm0,0xc0(%rsi) | ||
| 586 | movdqu 0x10(%rdx),%xmm1 | ||
| 587 | pxor %xmm1,%xmm4 | ||
| 588 | movdqu %xmm4,0x10(%rsi) | ||
| 589 | movdqu 0x90(%rdx),%xmm1 | ||
| 590 | pxor %xmm1,%xmm5 | ||
| 591 | movdqu %xmm5,0x90(%rsi) | ||
| 592 | movdqu 0x50(%rdx),%xmm1 | ||
| 593 | pxor %xmm1,%xmm6 | ||
| 594 | movdqu %xmm6,0x50(%rsi) | ||
| 595 | movdqu 0xd0(%rdx),%xmm1 | ||
| 596 | pxor %xmm1,%xmm7 | ||
| 597 | movdqu %xmm7,0xd0(%rsi) | ||
| 598 | movdqu 0x20(%rdx),%xmm1 | ||
| 599 | pxor %xmm1,%xmm8 | ||
| 600 | movdqu %xmm8,0x20(%rsi) | ||
| 601 | movdqu 0xa0(%rdx),%xmm1 | ||
| 602 | pxor %xmm1,%xmm9 | ||
| 603 | movdqu %xmm9,0xa0(%rsi) | ||
| 604 | movdqu 0x60(%rdx),%xmm1 | ||
| 605 | pxor %xmm1,%xmm10 | ||
| 606 | movdqu %xmm10,0x60(%rsi) | ||
| 607 | movdqu 0xe0(%rdx),%xmm1 | ||
| 608 | pxor %xmm1,%xmm11 | ||
| 609 | movdqu %xmm11,0xe0(%rsi) | ||
| 610 | movdqu 0x30(%rdx),%xmm1 | ||
| 611 | pxor %xmm1,%xmm12 | ||
| 612 | movdqu %xmm12,0x30(%rsi) | ||
| 613 | movdqu 0xb0(%rdx),%xmm1 | ||
| 614 | pxor %xmm1,%xmm13 | ||
| 615 | movdqu %xmm13,0xb0(%rsi) | ||
| 616 | movdqu 0x70(%rdx),%xmm1 | ||
| 617 | pxor %xmm1,%xmm14 | ||
| 618 | movdqu %xmm14,0x70(%rsi) | ||
| 619 | movdqu 0xf0(%rdx),%xmm1 | ||
| 620 | pxor %xmm1,%xmm15 | ||
| 621 | movdqu %xmm15,0xf0(%rsi) | ||
| 622 | |||
| 623 | add $0x40,%rsp | ||
| 624 | ret | ||
| 625 | ENDPROC(chacha20_4block_xor_ssse3) | ||
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c new file mode 100644 index 000000000000..effe2160b7c5 --- /dev/null +++ b/arch/x86/crypto/chacha20_glue.c | |||
| @@ -0,0 +1,150 @@ | |||
| 1 | /* | ||
| 2 | * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Martin Willi | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <crypto/algapi.h> | ||
| 13 | #include <crypto/chacha20.h> | ||
| 14 | #include <linux/crypto.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <asm/fpu/api.h> | ||
| 18 | #include <asm/simd.h> | ||
| 19 | |||
| 20 | #define CHACHA20_STATE_ALIGN 16 | ||
| 21 | |||
| 22 | asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src); | ||
| 23 | asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src); | ||
| 24 | #ifdef CONFIG_AS_AVX2 | ||
| 25 | asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src); | ||
| 26 | static bool chacha20_use_avx2; | ||
| 27 | #endif | ||
| 28 | |||
| 29 | static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src, | ||
| 30 | unsigned int bytes) | ||
| 31 | { | ||
| 32 | u8 buf[CHACHA20_BLOCK_SIZE]; | ||
| 33 | |||
| 34 | #ifdef CONFIG_AS_AVX2 | ||
| 35 | if (chacha20_use_avx2) { | ||
| 36 | while (bytes >= CHACHA20_BLOCK_SIZE * 8) { | ||
| 37 | chacha20_8block_xor_avx2(state, dst, src); | ||
| 38 | bytes -= CHACHA20_BLOCK_SIZE * 8; | ||
| 39 | src += CHACHA20_BLOCK_SIZE * 8; | ||
| 40 | dst += CHACHA20_BLOCK_SIZE * 8; | ||
| 41 | state[12] += 8; | ||
| 42 | } | ||
| 43 | } | ||
| 44 | #endif | ||
| 45 | while (bytes >= CHACHA20_BLOCK_SIZE * 4) { | ||
| 46 | chacha20_4block_xor_ssse3(state, dst, src); | ||
| 47 | bytes -= CHACHA20_BLOCK_SIZE * 4; | ||
| 48 | src += CHACHA20_BLOCK_SIZE * 4; | ||
| 49 | dst += CHACHA20_BLOCK_SIZE * 4; | ||
| 50 | state[12] += 4; | ||
| 51 | } | ||
| 52 | while (bytes >= CHACHA20_BLOCK_SIZE) { | ||
| 53 | chacha20_block_xor_ssse3(state, dst, src); | ||
| 54 | bytes -= CHACHA20_BLOCK_SIZE; | ||
| 55 | src += CHACHA20_BLOCK_SIZE; | ||
| 56 | dst += CHACHA20_BLOCK_SIZE; | ||
| 57 | state[12]++; | ||
| 58 | } | ||
| 59 | if (bytes) { | ||
| 60 | memcpy(buf, src, bytes); | ||
| 61 | chacha20_block_xor_ssse3(state, buf, buf); | ||
| 62 | memcpy(dst, buf, bytes); | ||
| 63 | } | ||
| 64 | } | ||
| 65 | |||
| 66 | static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 67 | struct scatterlist *src, unsigned int nbytes) | ||
| 68 | { | ||
| 69 | u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1]; | ||
| 70 | struct blkcipher_walk walk; | ||
| 71 | int err; | ||
| 72 | |||
| 73 | if (!may_use_simd()) | ||
| 74 | return crypto_chacha20_crypt(desc, dst, src, nbytes); | ||
| 75 | |||
| 76 | state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN); | ||
| 77 | |||
| 78 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 79 | err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); | ||
| 80 | |||
| 81 | crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); | ||
| 82 | |||
| 83 | kernel_fpu_begin(); | ||
| 84 | |||
| 85 | while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { | ||
| 86 | chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, | ||
| 87 | rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); | ||
| 88 | err = blkcipher_walk_done(desc, &walk, | ||
| 89 | walk.nbytes % CHACHA20_BLOCK_SIZE); | ||
| 90 | } | ||
| 91 | |||
| 92 | if (walk.nbytes) { | ||
| 93 | chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, | ||
| 94 | walk.nbytes); | ||
| 95 | err = blkcipher_walk_done(desc, &walk, 0); | ||
| 96 | } | ||
| 97 | |||
| 98 | kernel_fpu_end(); | ||
| 99 | |||
| 100 | return err; | ||
| 101 | } | ||
| 102 | |||
| 103 | static struct crypto_alg alg = { | ||
| 104 | .cra_name = "chacha20", | ||
| 105 | .cra_driver_name = "chacha20-simd", | ||
| 106 | .cra_priority = 300, | ||
| 107 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 108 | .cra_blocksize = 1, | ||
| 109 | .cra_type = &crypto_blkcipher_type, | ||
| 110 | .cra_ctxsize = sizeof(struct chacha20_ctx), | ||
| 111 | .cra_alignmask = sizeof(u32) - 1, | ||
| 112 | .cra_module = THIS_MODULE, | ||
| 113 | .cra_u = { | ||
| 114 | .blkcipher = { | ||
| 115 | .min_keysize = CHACHA20_KEY_SIZE, | ||
| 116 | .max_keysize = CHACHA20_KEY_SIZE, | ||
| 117 | .ivsize = CHACHA20_IV_SIZE, | ||
| 118 | .geniv = "seqiv", | ||
| 119 | .setkey = crypto_chacha20_setkey, | ||
| 120 | .encrypt = chacha20_simd, | ||
| 121 | .decrypt = chacha20_simd, | ||
| 122 | }, | ||
| 123 | }, | ||
| 124 | }; | ||
| 125 | |||
| 126 | static int __init chacha20_simd_mod_init(void) | ||
| 127 | { | ||
| 128 | if (!cpu_has_ssse3) | ||
| 129 | return -ENODEV; | ||
| 130 | |||
| 131 | #ifdef CONFIG_AS_AVX2 | ||
| 132 | chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 && | ||
| 133 | cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); | ||
| 134 | #endif | ||
| 135 | return crypto_register_alg(&alg); | ||
| 136 | } | ||
| 137 | |||
| 138 | static void __exit chacha20_simd_mod_fini(void) | ||
| 139 | { | ||
| 140 | crypto_unregister_alg(&alg); | ||
| 141 | } | ||
| 142 | |||
| 143 | module_init(chacha20_simd_mod_init); | ||
| 144 | module_exit(chacha20_simd_mod_fini); | ||
| 145 | |||
| 146 | MODULE_LICENSE("GPL"); | ||
| 147 | MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); | ||
| 148 | MODULE_DESCRIPTION("chacha20 cipher algorithm, SIMD accelerated"); | ||
| 149 | MODULE_ALIAS_CRYPTO("chacha20"); | ||
| 150 | MODULE_ALIAS_CRYPTO("chacha20-simd"); | ||
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S new file mode 100644 index 000000000000..eff2f414e22b --- /dev/null +++ b/arch/x86/crypto/poly1305-avx2-x86_64.S | |||
| @@ -0,0 +1,386 @@ | |||
| 1 | /* | ||
| 2 | * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Martin Willi | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/linkage.h> | ||
| 13 | |||
| 14 | .data | ||
| 15 | .align 32 | ||
| 16 | |||
| 17 | ANMASK: .octa 0x0000000003ffffff0000000003ffffff | ||
| 18 | .octa 0x0000000003ffffff0000000003ffffff | ||
| 19 | ORMASK: .octa 0x00000000010000000000000001000000 | ||
| 20 | .octa 0x00000000010000000000000001000000 | ||
| 21 | |||
| 22 | .text | ||
| 23 | |||
| 24 | #define h0 0x00(%rdi) | ||
| 25 | #define h1 0x04(%rdi) | ||
| 26 | #define h2 0x08(%rdi) | ||
| 27 | #define h3 0x0c(%rdi) | ||
| 28 | #define h4 0x10(%rdi) | ||
| 29 | #define r0 0x00(%rdx) | ||
| 30 | #define r1 0x04(%rdx) | ||
| 31 | #define r2 0x08(%rdx) | ||
| 32 | #define r3 0x0c(%rdx) | ||
| 33 | #define r4 0x10(%rdx) | ||
| 34 | #define u0 0x00(%r8) | ||
| 35 | #define u1 0x04(%r8) | ||
| 36 | #define u2 0x08(%r8) | ||
| 37 | #define u3 0x0c(%r8) | ||
| 38 | #define u4 0x10(%r8) | ||
| 39 | #define w0 0x14(%r8) | ||
| 40 | #define w1 0x18(%r8) | ||
| 41 | #define w2 0x1c(%r8) | ||
| 42 | #define w3 0x20(%r8) | ||
| 43 | #define w4 0x24(%r8) | ||
| 44 | #define y0 0x28(%r8) | ||
| 45 | #define y1 0x2c(%r8) | ||
| 46 | #define y2 0x30(%r8) | ||
| 47 | #define y3 0x34(%r8) | ||
| 48 | #define y4 0x38(%r8) | ||
| 49 | #define m %rsi | ||
| 50 | #define hc0 %ymm0 | ||
| 51 | #define hc1 %ymm1 | ||
| 52 | #define hc2 %ymm2 | ||
| 53 | #define hc3 %ymm3 | ||
| 54 | #define hc4 %ymm4 | ||
| 55 | #define hc0x %xmm0 | ||
| 56 | #define hc1x %xmm1 | ||
| 57 | #define hc2x %xmm2 | ||
| 58 | #define hc3x %xmm3 | ||
| 59 | #define hc4x %xmm4 | ||
| 60 | #define t1 %ymm5 | ||
| 61 | #define t2 %ymm6 | ||
| 62 | #define t1x %xmm5 | ||
| 63 | #define t2x %xmm6 | ||
| 64 | #define ruwy0 %ymm7 | ||
| 65 | #define ruwy1 %ymm8 | ||
| 66 | #define ruwy2 %ymm9 | ||
| 67 | #define ruwy3 %ymm10 | ||
| 68 | #define ruwy4 %ymm11 | ||
| 69 | #define ruwy0x %xmm7 | ||
| 70 | #define ruwy1x %xmm8 | ||
| 71 | #define ruwy2x %xmm9 | ||
| 72 | #define ruwy3x %xmm10 | ||
| 73 | #define ruwy4x %xmm11 | ||
| 74 | #define svxz1 %ymm12 | ||
| 75 | #define svxz2 %ymm13 | ||
| 76 | #define svxz3 %ymm14 | ||
| 77 | #define svxz4 %ymm15 | ||
| 78 | #define d0 %r9 | ||
| 79 | #define d1 %r10 | ||
| 80 | #define d2 %r11 | ||
| 81 | #define d3 %r12 | ||
| 82 | #define d4 %r13 | ||
| 83 | |||
| 84 | ENTRY(poly1305_4block_avx2) | ||
| 85 | # %rdi: Accumulator h[5] | ||
| 86 | # %rsi: 64 byte input block m | ||
| 87 | # %rdx: Poly1305 key r[5] | ||
| 88 | # %rcx: Quadblock count | ||
| 89 | # %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5], | ||
| 90 | |||
| 91 | # This four-block variant uses loop unrolled block processing. It | ||
| 92 | # requires 4 Poly1305 keys: r, r^2, r^3 and r^4: | ||
| 93 | # h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r | ||
| 94 | |||
| 95 | vzeroupper | ||
| 96 | push %rbx | ||
| 97 | push %r12 | ||
| 98 | push %r13 | ||
| 99 | |||
| 100 | # combine r0,u0,w0,y0 | ||
| 101 | vmovd y0,ruwy0x | ||
| 102 | vmovd w0,t1x | ||
| 103 | vpunpcklqdq t1,ruwy0,ruwy0 | ||
| 104 | vmovd u0,t1x | ||
| 105 | vmovd r0,t2x | ||
| 106 | vpunpcklqdq t2,t1,t1 | ||
| 107 | vperm2i128 $0x20,t1,ruwy0,ruwy0 | ||
| 108 | |||
| 109 | # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5 | ||
| 110 | vmovd y1,ruwy1x | ||
| 111 | vmovd w1,t1x | ||
| 112 | vpunpcklqdq t1,ruwy1,ruwy1 | ||
| 113 | vmovd u1,t1x | ||
| 114 | vmovd r1,t2x | ||
| 115 | vpunpcklqdq t2,t1,t1 | ||
| 116 | vperm2i128 $0x20,t1,ruwy1,ruwy1 | ||
| 117 | vpslld $2,ruwy1,svxz1 | ||
| 118 | vpaddd ruwy1,svxz1,svxz1 | ||
| 119 | |||
| 120 | # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5 | ||
| 121 | vmovd y2,ruwy2x | ||
| 122 | vmovd w2,t1x | ||
| 123 | vpunpcklqdq t1,ruwy2,ruwy2 | ||
| 124 | vmovd u2,t1x | ||
| 125 | vmovd r2,t2x | ||
| 126 | vpunpcklqdq t2,t1,t1 | ||
| 127 | vperm2i128 $0x20,t1,ruwy2,ruwy2 | ||
| 128 | vpslld $2,ruwy2,svxz2 | ||
| 129 | vpaddd ruwy2,svxz2,svxz2 | ||
| 130 | |||
| 131 | # combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5 | ||
| 132 | vmovd y3,ruwy3x | ||
| 133 | vmovd w3,t1x | ||
| 134 | vpunpcklqdq t1,ruwy3,ruwy3 | ||
| 135 | vmovd u3,t1x | ||
| 136 | vmovd r3,t2x | ||
| 137 | vpunpcklqdq t2,t1,t1 | ||
| 138 | vperm2i128 $0x20,t1,ruwy3,ruwy3 | ||
| 139 | vpslld $2,ruwy3,svxz3 | ||
| 140 | vpaddd ruwy3,svxz3,svxz3 | ||
| 141 | |||
| 142 | # combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5 | ||
| 143 | vmovd y4,ruwy4x | ||
| 144 | vmovd w4,t1x | ||
| 145 | vpunpcklqdq t1,ruwy4,ruwy4 | ||
| 146 | vmovd u4,t1x | ||
| 147 | vmovd r4,t2x | ||
| 148 | vpunpcklqdq t2,t1,t1 | ||
| 149 | vperm2i128 $0x20,t1,ruwy4,ruwy4 | ||
| 150 | vpslld $2,ruwy4,svxz4 | ||
| 151 | vpaddd ruwy4,svxz4,svxz4 | ||
| 152 | |||
| 153 | .Ldoblock4: | ||
| 154 | # hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff, | ||
| 155 | # m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0] | ||
| 156 | vmovd 0x00(m),hc0x | ||
| 157 | vmovd 0x10(m),t1x | ||
| 158 | vpunpcklqdq t1,hc0,hc0 | ||
| 159 | vmovd 0x20(m),t1x | ||
| 160 | vmovd 0x30(m),t2x | ||
| 161 | vpunpcklqdq t2,t1,t1 | ||
| 162 | vperm2i128 $0x20,t1,hc0,hc0 | ||
| 163 | vpand ANMASK(%rip),hc0,hc0 | ||
| 164 | vmovd h0,t1x | ||
| 165 | vpaddd t1,hc0,hc0 | ||
| 166 | # hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff, | ||
| 167 | # (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1] | ||
| 168 | vmovd 0x03(m),hc1x | ||
| 169 | vmovd 0x13(m),t1x | ||
| 170 | vpunpcklqdq t1,hc1,hc1 | ||
| 171 | vmovd 0x23(m),t1x | ||
| 172 | vmovd 0x33(m),t2x | ||
| 173 | vpunpcklqdq t2,t1,t1 | ||
| 174 | vperm2i128 $0x20,t1,hc1,hc1 | ||
| 175 | vpsrld $2,hc1,hc1 | ||
| 176 | vpand ANMASK(%rip),hc1,hc1 | ||
| 177 | vmovd h1,t1x | ||
| 178 | vpaddd t1,hc1,hc1 | ||
| 179 | # hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff, | ||
| 180 | # (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2] | ||
| 181 | vmovd 0x06(m),hc2x | ||
| 182 | vmovd 0x16(m),t1x | ||
| 183 | vpunpcklqdq t1,hc2,hc2 | ||
| 184 | vmovd 0x26(m),t1x | ||
| 185 | vmovd 0x36(m),t2x | ||
| 186 | vpunpcklqdq t2,t1,t1 | ||
| 187 | vperm2i128 $0x20,t1,hc2,hc2 | ||
| 188 | vpsrld $4,hc2,hc2 | ||
| 189 | vpand ANMASK(%rip),hc2,hc2 | ||
| 190 | vmovd h2,t1x | ||
| 191 | vpaddd t1,hc2,hc2 | ||
| 192 | # hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff, | ||
| 193 | # (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3] | ||
| 194 | vmovd 0x09(m),hc3x | ||
| 195 | vmovd 0x19(m),t1x | ||
| 196 | vpunpcklqdq t1,hc3,hc3 | ||
| 197 | vmovd 0x29(m),t1x | ||
| 198 | vmovd 0x39(m),t2x | ||
| 199 | vpunpcklqdq t2,t1,t1 | ||
| 200 | vperm2i128 $0x20,t1,hc3,hc3 | ||
| 201 | vpsrld $6,hc3,hc3 | ||
| 202 | vpand ANMASK(%rip),hc3,hc3 | ||
| 203 | vmovd h3,t1x | ||
| 204 | vpaddd t1,hc3,hc3 | ||
| 205 | # hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24), | ||
| 206 | # (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4] | ||
| 207 | vmovd 0x0c(m),hc4x | ||
| 208 | vmovd 0x1c(m),t1x | ||
| 209 | vpunpcklqdq t1,hc4,hc4 | ||
| 210 | vmovd 0x2c(m),t1x | ||
| 211 | vmovd 0x3c(m),t2x | ||
| 212 | vpunpcklqdq t2,t1,t1 | ||
| 213 | vperm2i128 $0x20,t1,hc4,hc4 | ||
| 214 | vpsrld $8,hc4,hc4 | ||
| 215 | vpor ORMASK(%rip),hc4,hc4 | ||
| 216 | vmovd h4,t1x | ||
| 217 | vpaddd t1,hc4,hc4 | ||
| 218 | |||
| 219 | # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ] | ||
| 220 | vpmuludq hc0,ruwy0,t1 | ||
| 221 | # t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ] | ||
| 222 | vpmuludq hc1,svxz4,t2 | ||
| 223 | vpaddq t2,t1,t1 | ||
| 224 | # t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ] | ||
| 225 | vpmuludq hc2,svxz3,t2 | ||
| 226 | vpaddq t2,t1,t1 | ||
| 227 | # t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ] | ||
| 228 | vpmuludq hc3,svxz2,t2 | ||
| 229 | vpaddq t2,t1,t1 | ||
| 230 | # t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ] | ||
| 231 | vpmuludq hc4,svxz1,t2 | ||
| 232 | vpaddq t2,t1,t1 | ||
| 233 | # d0 = t1[0] + t1[1] + t[2] + t[3] | ||
| 234 | vpermq $0xee,t1,t2 | ||
| 235 | vpaddq t2,t1,t1 | ||
| 236 | vpsrldq $8,t1,t2 | ||
| 237 | vpaddq t2,t1,t1 | ||
| 238 | vmovq t1x,d0 | ||
| 239 | |||
| 240 | # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ] | ||
| 241 | vpmuludq hc0,ruwy1,t1 | ||
| 242 | # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ] | ||
| 243 | vpmuludq hc1,ruwy0,t2 | ||
| 244 | vpaddq t2,t1,t1 | ||
| 245 | # t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ] | ||
| 246 | vpmuludq hc2,svxz4,t2 | ||
| 247 | vpaddq t2,t1,t1 | ||
| 248 | # t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ] | ||
| 249 | vpmuludq hc3,svxz3,t2 | ||
| 250 | vpaddq t2,t1,t1 | ||
| 251 | # t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ] | ||
| 252 | vpmuludq hc4,svxz2,t2 | ||
| 253 | vpaddq t2,t1,t1 | ||
| 254 | # d1 = t1[0] + t1[1] + t1[3] + t1[4] | ||
| 255 | vpermq $0xee,t1,t2 | ||
| 256 | vpaddq t2,t1,t1 | ||
| 257 | vpsrldq $8,t1,t2 | ||
| 258 | vpaddq t2,t1,t1 | ||
| 259 | vmovq t1x,d1 | ||
| 260 | |||
| 261 | # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ] | ||
| 262 | vpmuludq hc0,ruwy2,t1 | ||
| 263 | # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ] | ||
| 264 | vpmuludq hc1,ruwy1,t2 | ||
| 265 | vpaddq t2,t1,t1 | ||
| 266 | # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ] | ||
| 267 | vpmuludq hc2,ruwy0,t2 | ||
| 268 | vpaddq t2,t1,t1 | ||
| 269 | # t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ] | ||
| 270 | vpmuludq hc3,svxz4,t2 | ||
| 271 | vpaddq t2,t1,t1 | ||
| 272 | # t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ] | ||
| 273 | vpmuludq hc4,svxz3,t2 | ||
| 274 | vpaddq t2,t1,t1 | ||
| 275 | # d2 = t1[0] + t1[1] + t1[2] + t1[3] | ||
| 276 | vpermq $0xee,t1,t2 | ||
| 277 | vpaddq t2,t1,t1 | ||
| 278 | vpsrldq $8,t1,t2 | ||
| 279 | vpaddq t2,t1,t1 | ||
| 280 | vmovq t1x,d2 | ||
| 281 | |||
| 282 | # t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ] | ||
| 283 | vpmuludq hc0,ruwy3,t1 | ||
| 284 | # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ] | ||
| 285 | vpmuludq hc1,ruwy2,t2 | ||
| 286 | vpaddq t2,t1,t1 | ||
| 287 | # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ] | ||
| 288 | vpmuludq hc2,ruwy1,t2 | ||
| 289 | vpaddq t2,t1,t1 | ||
| 290 | # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ] | ||
| 291 | vpmuludq hc3,ruwy0,t2 | ||
| 292 | vpaddq t2,t1,t1 | ||
| 293 | # t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ] | ||
| 294 | vpmuludq hc4,svxz4,t2 | ||
| 295 | vpaddq t2,t1,t1 | ||
| 296 | # d3 = t1[0] + t1[1] + t1[2] + t1[3] | ||
| 297 | vpermq $0xee,t1,t2 | ||
| 298 | vpaddq t2,t1,t1 | ||
| 299 | vpsrldq $8,t1,t2 | ||
| 300 | vpaddq t2,t1,t1 | ||
| 301 | vmovq t1x,d3 | ||
| 302 | |||
| 303 | # t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ] | ||
| 304 | vpmuludq hc0,ruwy4,t1 | ||
| 305 | # t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ] | ||
| 306 | vpmuludq hc1,ruwy3,t2 | ||
| 307 | vpaddq t2,t1,t1 | ||
| 308 | # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ] | ||
| 309 | vpmuludq hc2,ruwy2,t2 | ||
| 310 | vpaddq t2,t1,t1 | ||
| 311 | # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ] | ||
| 312 | vpmuludq hc3,ruwy1,t2 | ||
| 313 | vpaddq t2,t1,t1 | ||
| 314 | # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ] | ||
| 315 | vpmuludq hc4,ruwy0,t2 | ||
| 316 | vpaddq t2,t1,t1 | ||
| 317 | # d4 = t1[0] + t1[1] + t1[2] + t1[3] | ||
| 318 | vpermq $0xee,t1,t2 | ||
| 319 | vpaddq t2,t1,t1 | ||
| 320 | vpsrldq $8,t1,t2 | ||
| 321 | vpaddq t2,t1,t1 | ||
| 322 | vmovq t1x,d4 | ||
| 323 | |||
| 324 | # d1 += d0 >> 26 | ||
| 325 | mov d0,%rax | ||
| 326 | shr $26,%rax | ||
| 327 | add %rax,d1 | ||
| 328 | # h0 = d0 & 0x3ffffff | ||
| 329 | mov d0,%rbx | ||
| 330 | and $0x3ffffff,%ebx | ||
| 331 | |||
| 332 | # d2 += d1 >> 26 | ||
| 333 | mov d1,%rax | ||
| 334 | shr $26,%rax | ||
| 335 | add %rax,d2 | ||
| 336 | # h1 = d1 & 0x3ffffff | ||
| 337 | mov d1,%rax | ||
| 338 | and $0x3ffffff,%eax | ||
| 339 | mov %eax,h1 | ||
| 340 | |||
| 341 | # d3 += d2 >> 26 | ||
| 342 | mov d2,%rax | ||
| 343 | shr $26,%rax | ||
| 344 | add %rax,d3 | ||
| 345 | # h2 = d2 & 0x3ffffff | ||
| 346 | mov d2,%rax | ||
| 347 | and $0x3ffffff,%eax | ||
| 348 | mov %eax,h2 | ||
| 349 | |||
| 350 | # d4 += d3 >> 26 | ||
| 351 | mov d3,%rax | ||
| 352 | shr $26,%rax | ||
| 353 | add %rax,d4 | ||
| 354 | # h3 = d3 & 0x3ffffff | ||
| 355 | mov d3,%rax | ||
| 356 | and $0x3ffffff,%eax | ||
| 357 | mov %eax,h3 | ||
| 358 | |||
| 359 | # h0 += (d4 >> 26) * 5 | ||
| 360 | mov d4,%rax | ||
| 361 | shr $26,%rax | ||
| 362 | lea (%eax,%eax,4),%eax | ||
| 363 | add %eax,%ebx | ||
| 364 | # h4 = d4 & 0x3ffffff | ||
| 365 | mov d4,%rax | ||
| 366 | and $0x3ffffff,%eax | ||
| 367 | mov %eax,h4 | ||
| 368 | |||
| 369 | # h1 += h0 >> 26 | ||
| 370 | mov %ebx,%eax | ||
| 371 | shr $26,%eax | ||
| 372 | add %eax,h1 | ||
| 373 | # h0 = h0 & 0x3ffffff | ||
| 374 | andl $0x3ffffff,%ebx | ||
| 375 | mov %ebx,h0 | ||
| 376 | |||
| 377 | add $0x40,m | ||
| 378 | dec %rcx | ||
| 379 | jnz .Ldoblock4 | ||
| 380 | |||
| 381 | vzeroupper | ||
| 382 | pop %r13 | ||
| 383 | pop %r12 | ||
| 384 | pop %rbx | ||
| 385 | ret | ||
| 386 | ENDPROC(poly1305_4block_avx2) | ||
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S new file mode 100644 index 000000000000..338c748054ed --- /dev/null +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S | |||
| @@ -0,0 +1,582 @@ | |||
| 1 | /* | ||
| 2 | * Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Martin Willi | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/linkage.h> | ||
| 13 | |||
| 14 | .data | ||
| 15 | .align 16 | ||
| 16 | |||
| 17 | ANMASK: .octa 0x0000000003ffffff0000000003ffffff | ||
| 18 | ORMASK: .octa 0x00000000010000000000000001000000 | ||
| 19 | |||
| 20 | .text | ||
| 21 | |||
| 22 | #define h0 0x00(%rdi) | ||
| 23 | #define h1 0x04(%rdi) | ||
| 24 | #define h2 0x08(%rdi) | ||
| 25 | #define h3 0x0c(%rdi) | ||
| 26 | #define h4 0x10(%rdi) | ||
| 27 | #define r0 0x00(%rdx) | ||
| 28 | #define r1 0x04(%rdx) | ||
| 29 | #define r2 0x08(%rdx) | ||
| 30 | #define r3 0x0c(%rdx) | ||
| 31 | #define r4 0x10(%rdx) | ||
| 32 | #define s1 0x00(%rsp) | ||
| 33 | #define s2 0x04(%rsp) | ||
| 34 | #define s3 0x08(%rsp) | ||
| 35 | #define s4 0x0c(%rsp) | ||
| 36 | #define m %rsi | ||
| 37 | #define h01 %xmm0 | ||
| 38 | #define h23 %xmm1 | ||
| 39 | #define h44 %xmm2 | ||
| 40 | #define t1 %xmm3 | ||
| 41 | #define t2 %xmm4 | ||
| 42 | #define t3 %xmm5 | ||
| 43 | #define t4 %xmm6 | ||
| 44 | #define mask %xmm7 | ||
| 45 | #define d0 %r8 | ||
| 46 | #define d1 %r9 | ||
| 47 | #define d2 %r10 | ||
| 48 | #define d3 %r11 | ||
| 49 | #define d4 %r12 | ||
| 50 | |||
| 51 | ENTRY(poly1305_block_sse2) | ||
| 52 | # %rdi: Accumulator h[5] | ||
| 53 | # %rsi: 16 byte input block m | ||
| 54 | # %rdx: Poly1305 key r[5] | ||
| 55 | # %rcx: Block count | ||
| 56 | |||
| 57 | # This single block variant tries to improve performance by doing two | ||
| 58 | # multiplications in parallel using SSE instructions. There is quite | ||
| 59 | # some quardword packing involved, hence the speedup is marginal. | ||
| 60 | |||
| 61 | push %rbx | ||
| 62 | push %r12 | ||
| 63 | sub $0x10,%rsp | ||
| 64 | |||
| 65 | # s1..s4 = r1..r4 * 5 | ||
| 66 | mov r1,%eax | ||
| 67 | lea (%eax,%eax,4),%eax | ||
| 68 | mov %eax,s1 | ||
| 69 | mov r2,%eax | ||
| 70 | lea (%eax,%eax,4),%eax | ||
| 71 | mov %eax,s2 | ||
| 72 | mov r3,%eax | ||
| 73 | lea (%eax,%eax,4),%eax | ||
| 74 | mov %eax,s3 | ||
| 75 | mov r4,%eax | ||
| 76 | lea (%eax,%eax,4),%eax | ||
| 77 | mov %eax,s4 | ||
| 78 | |||
| 79 | movdqa ANMASK(%rip),mask | ||
| 80 | |||
| 81 | .Ldoblock: | ||
| 82 | # h01 = [0, h1, 0, h0] | ||
| 83 | # h23 = [0, h3, 0, h2] | ||
| 84 | # h44 = [0, h4, 0, h4] | ||
| 85 | movd h0,h01 | ||
| 86 | movd h1,t1 | ||
| 87 | movd h2,h23 | ||
| 88 | movd h3,t2 | ||
| 89 | movd h4,h44 | ||
| 90 | punpcklqdq t1,h01 | ||
| 91 | punpcklqdq t2,h23 | ||
| 92 | punpcklqdq h44,h44 | ||
| 93 | |||
| 94 | # h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ] | ||
| 95 | movd 0x00(m),t1 | ||
| 96 | movd 0x03(m),t2 | ||
| 97 | psrld $2,t2 | ||
| 98 | punpcklqdq t2,t1 | ||
| 99 | pand mask,t1 | ||
| 100 | paddd t1,h01 | ||
| 101 | # h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ] | ||
| 102 | movd 0x06(m),t1 | ||
| 103 | movd 0x09(m),t2 | ||
| 104 | psrld $4,t1 | ||
| 105 | psrld $6,t2 | ||
| 106 | punpcklqdq t2,t1 | ||
| 107 | pand mask,t1 | ||
| 108 | paddd t1,h23 | ||
| 109 | # h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ] | ||
| 110 | mov 0x0c(m),%eax | ||
| 111 | shr $8,%eax | ||
| 112 | or $0x01000000,%eax | ||
| 113 | movd %eax,t1 | ||
| 114 | pshufd $0xc4,t1,t1 | ||
| 115 | paddd t1,h44 | ||
| 116 | |||
| 117 | # t1[0] = h0 * r0 + h2 * s3 | ||
| 118 | # t1[1] = h1 * s4 + h3 * s2 | ||
| 119 | movd r0,t1 | ||
| 120 | movd s4,t2 | ||
| 121 | punpcklqdq t2,t1 | ||
| 122 | pmuludq h01,t1 | ||
| 123 | movd s3,t2 | ||
| 124 | movd s2,t3 | ||
| 125 | punpcklqdq t3,t2 | ||
| 126 | pmuludq h23,t2 | ||
| 127 | paddq t2,t1 | ||
| 128 | # t2[0] = h0 * r1 + h2 * s4 | ||
| 129 | # t2[1] = h1 * r0 + h3 * s3 | ||
| 130 | movd r1,t2 | ||
| 131 | movd r0,t3 | ||
| 132 | punpcklqdq t3,t2 | ||
| 133 | pmuludq h01,t2 | ||
| 134 | movd s4,t3 | ||
| 135 | movd s3,t4 | ||
| 136 | punpcklqdq t4,t3 | ||
| 137 | pmuludq h23,t3 | ||
| 138 | paddq t3,t2 | ||
| 139 | # t3[0] = h4 * s1 | ||
| 140 | # t3[1] = h4 * s2 | ||
| 141 | movd s1,t3 | ||
| 142 | movd s2,t4 | ||
| 143 | punpcklqdq t4,t3 | ||
| 144 | pmuludq h44,t3 | ||
| 145 | # d0 = t1[0] + t1[1] + t3[0] | ||
| 146 | # d1 = t2[0] + t2[1] + t3[1] | ||
| 147 | movdqa t1,t4 | ||
| 148 | punpcklqdq t2,t4 | ||
| 149 | punpckhqdq t2,t1 | ||
| 150 | paddq t4,t1 | ||
| 151 | paddq t3,t1 | ||
| 152 | movq t1,d0 | ||
| 153 | psrldq $8,t1 | ||
| 154 | movq t1,d1 | ||
| 155 | |||
| 156 | # t1[0] = h0 * r2 + h2 * r0 | ||
| 157 | # t1[1] = h1 * r1 + h3 * s4 | ||
| 158 | movd r2,t1 | ||
| 159 | movd r1,t2 | ||
| 160 | punpcklqdq t2,t1 | ||
| 161 | pmuludq h01,t1 | ||
| 162 | movd r0,t2 | ||
| 163 | movd s4,t3 | ||
| 164 | punpcklqdq t3,t2 | ||
| 165 | pmuludq h23,t2 | ||
| 166 | paddq t2,t1 | ||
| 167 | # t2[0] = h0 * r3 + h2 * r1 | ||
| 168 | # t2[1] = h1 * r2 + h3 * r0 | ||
| 169 | movd r3,t2 | ||
| 170 | movd r2,t3 | ||
| 171 | punpcklqdq t3,t2 | ||
| 172 | pmuludq h01,t2 | ||
| 173 | movd r1,t3 | ||
| 174 | movd r0,t4 | ||
| 175 | punpcklqdq t4,t3 | ||
| 176 | pmuludq h23,t3 | ||
| 177 | paddq t3,t2 | ||
| 178 | # t3[0] = h4 * s3 | ||
| 179 | # t3[1] = h4 * s4 | ||
| 180 | movd s3,t3 | ||
| 181 | movd s4,t4 | ||
| 182 | punpcklqdq t4,t3 | ||
| 183 | pmuludq h44,t3 | ||
| 184 | # d2 = t1[0] + t1[1] + t3[0] | ||
| 185 | # d3 = t2[0] + t2[1] + t3[1] | ||
| 186 | movdqa t1,t4 | ||
| 187 | punpcklqdq t2,t4 | ||
| 188 | punpckhqdq t2,t1 | ||
| 189 | paddq t4,t1 | ||
| 190 | paddq t3,t1 | ||
| 191 | movq t1,d2 | ||
| 192 | psrldq $8,t1 | ||
| 193 | movq t1,d3 | ||
| 194 | |||
| 195 | # t1[0] = h0 * r4 + h2 * r2 | ||
| 196 | # t1[1] = h1 * r3 + h3 * r1 | ||
| 197 | movd r4,t1 | ||
| 198 | movd r3,t2 | ||
| 199 | punpcklqdq t2,t1 | ||
| 200 | pmuludq h01,t1 | ||
| 201 | movd r2,t2 | ||
| 202 | movd r1,t3 | ||
| 203 | punpcklqdq t3,t2 | ||
| 204 | pmuludq h23,t2 | ||
| 205 | paddq t2,t1 | ||
| 206 | # t3[0] = h4 * r0 | ||
| 207 | movd r0,t3 | ||
| 208 | pmuludq h44,t3 | ||
| 209 | # d4 = t1[0] + t1[1] + t3[0] | ||
| 210 | movdqa t1,t4 | ||
| 211 | psrldq $8,t4 | ||
| 212 | paddq t4,t1 | ||
| 213 | paddq t3,t1 | ||
| 214 | movq t1,d4 | ||
| 215 | |||
| 216 | # d1 += d0 >> 26 | ||
| 217 | mov d0,%rax | ||
| 218 | shr $26,%rax | ||
| 219 | add %rax,d1 | ||
| 220 | # h0 = d0 & 0x3ffffff | ||
| 221 | mov d0,%rbx | ||
| 222 | and $0x3ffffff,%ebx | ||
| 223 | |||
| 224 | # d2 += d1 >> 26 | ||
| 225 | mov d1,%rax | ||
| 226 | shr $26,%rax | ||
| 227 | add %rax,d2 | ||
| 228 | # h1 = d1 & 0x3ffffff | ||
| 229 | mov d1,%rax | ||
| 230 | and $0x3ffffff,%eax | ||
| 231 | mov %eax,h1 | ||
| 232 | |||
| 233 | # d3 += d2 >> 26 | ||
| 234 | mov d2,%rax | ||
| 235 | shr $26,%rax | ||
| 236 | add %rax,d3 | ||
| 237 | # h2 = d2 & 0x3ffffff | ||
| 238 | mov d2,%rax | ||
| 239 | and $0x3ffffff,%eax | ||
| 240 | mov %eax,h2 | ||
| 241 | |||
| 242 | # d4 += d3 >> 26 | ||
| 243 | mov d3,%rax | ||
| 244 | shr $26,%rax | ||
| 245 | add %rax,d4 | ||
| 246 | # h3 = d3 & 0x3ffffff | ||
| 247 | mov d3,%rax | ||
| 248 | and $0x3ffffff,%eax | ||
| 249 | mov %eax,h3 | ||
| 250 | |||
| 251 | # h0 += (d4 >> 26) * 5 | ||
| 252 | mov d4,%rax | ||
| 253 | shr $26,%rax | ||
| 254 | lea (%eax,%eax,4),%eax | ||
| 255 | add %eax,%ebx | ||
| 256 | # h4 = d4 & 0x3ffffff | ||
| 257 | mov d4,%rax | ||
| 258 | and $0x3ffffff,%eax | ||
| 259 | mov %eax,h4 | ||
| 260 | |||
| 261 | # h1 += h0 >> 26 | ||
| 262 | mov %ebx,%eax | ||
| 263 | shr $26,%eax | ||
| 264 | add %eax,h1 | ||
| 265 | # h0 = h0 & 0x3ffffff | ||
| 266 | andl $0x3ffffff,%ebx | ||
| 267 | mov %ebx,h0 | ||
| 268 | |||
| 269 | add $0x10,m | ||
| 270 | dec %rcx | ||
| 271 | jnz .Ldoblock | ||
| 272 | |||
| 273 | add $0x10,%rsp | ||
| 274 | pop %r12 | ||
| 275 | pop %rbx | ||
| 276 | ret | ||
| 277 | ENDPROC(poly1305_block_sse2) | ||
| 278 | |||
| 279 | |||
| 280 | #define u0 0x00(%r8) | ||
| 281 | #define u1 0x04(%r8) | ||
| 282 | #define u2 0x08(%r8) | ||
| 283 | #define u3 0x0c(%r8) | ||
| 284 | #define u4 0x10(%r8) | ||
| 285 | #define hc0 %xmm0 | ||
| 286 | #define hc1 %xmm1 | ||
| 287 | #define hc2 %xmm2 | ||
| 288 | #define hc3 %xmm5 | ||
| 289 | #define hc4 %xmm6 | ||
| 290 | #define ru0 %xmm7 | ||
| 291 | #define ru1 %xmm8 | ||
| 292 | #define ru2 %xmm9 | ||
| 293 | #define ru3 %xmm10 | ||
| 294 | #define ru4 %xmm11 | ||
| 295 | #define sv1 %xmm12 | ||
| 296 | #define sv2 %xmm13 | ||
| 297 | #define sv3 %xmm14 | ||
| 298 | #define sv4 %xmm15 | ||
| 299 | #undef d0 | ||
| 300 | #define d0 %r13 | ||
| 301 | |||
| 302 | ENTRY(poly1305_2block_sse2) | ||
| 303 | # %rdi: Accumulator h[5] | ||
| 304 | # %rsi: 16 byte input block m | ||
| 305 | # %rdx: Poly1305 key r[5] | ||
| 306 | # %rcx: Doubleblock count | ||
| 307 | # %r8: Poly1305 derived key r^2 u[5] | ||
| 308 | |||
| 309 | # This two-block variant further improves performance by using loop | ||
| 310 | # unrolled block processing. This is more straight forward and does | ||
| 311 | # less byte shuffling, but requires a second Poly1305 key r^2: | ||
| 312 | # h = (h + m) * r => h = (h + m1) * r^2 + m2 * r | ||
| 313 | |||
| 314 | push %rbx | ||
| 315 | push %r12 | ||
| 316 | push %r13 | ||
| 317 | |||
| 318 | # combine r0,u0 | ||
| 319 | movd u0,ru0 | ||
| 320 | movd r0,t1 | ||
| 321 | punpcklqdq t1,ru0 | ||
| 322 | |||
| 323 | # combine r1,u1 and s1=r1*5,v1=u1*5 | ||
| 324 | movd u1,ru1 | ||
| 325 | movd r1,t1 | ||
| 326 | punpcklqdq t1,ru1 | ||
| 327 | movdqa ru1,sv1 | ||
| 328 | pslld $2,sv1 | ||
| 329 | paddd ru1,sv1 | ||
| 330 | |||
| 331 | # combine r2,u2 and s2=r2*5,v2=u2*5 | ||
| 332 | movd u2,ru2 | ||
| 333 | movd r2,t1 | ||
| 334 | punpcklqdq t1,ru2 | ||
| 335 | movdqa ru2,sv2 | ||
| 336 | pslld $2,sv2 | ||
| 337 | paddd ru2,sv2 | ||
| 338 | |||
| 339 | # combine r3,u3 and s3=r3*5,v3=u3*5 | ||
| 340 | movd u3,ru3 | ||
| 341 | movd r3,t1 | ||
| 342 | punpcklqdq t1,ru3 | ||
| 343 | movdqa ru3,sv3 | ||
| 344 | pslld $2,sv3 | ||
| 345 | paddd ru3,sv3 | ||
| 346 | |||
| 347 | # combine r4,u4 and s4=r4*5,v4=u4*5 | ||
| 348 | movd u4,ru4 | ||
| 349 | movd r4,t1 | ||
| 350 | punpcklqdq t1,ru4 | ||
| 351 | movdqa ru4,sv4 | ||
| 352 | pslld $2,sv4 | ||
| 353 | paddd ru4,sv4 | ||
| 354 | |||
| 355 | .Ldoblock2: | ||
| 356 | # hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ] | ||
| 357 | movd 0x00(m),hc0 | ||
| 358 | movd 0x10(m),t1 | ||
| 359 | punpcklqdq t1,hc0 | ||
| 360 | pand ANMASK(%rip),hc0 | ||
| 361 | movd h0,t1 | ||
| 362 | paddd t1,hc0 | ||
| 363 | # hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ] | ||
| 364 | movd 0x03(m),hc1 | ||
| 365 | movd 0x13(m),t1 | ||
| 366 | punpcklqdq t1,hc1 | ||
| 367 | psrld $2,hc1 | ||
| 368 | pand ANMASK(%rip),hc1 | ||
| 369 | movd h1,t1 | ||
| 370 | paddd t1,hc1 | ||
| 371 | # hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ] | ||
| 372 | movd 0x06(m),hc2 | ||
| 373 | movd 0x16(m),t1 | ||
| 374 | punpcklqdq t1,hc2 | ||
| 375 | psrld $4,hc2 | ||
| 376 | pand ANMASK(%rip),hc2 | ||
| 377 | movd h2,t1 | ||
| 378 | paddd t1,hc2 | ||
| 379 | # hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ] | ||
| 380 | movd 0x09(m),hc3 | ||
| 381 | movd 0x19(m),t1 | ||
| 382 | punpcklqdq t1,hc3 | ||
| 383 | psrld $6,hc3 | ||
| 384 | pand ANMASK(%rip),hc3 | ||
| 385 | movd h3,t1 | ||
| 386 | paddd t1,hc3 | ||
| 387 | # hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ] | ||
| 388 | movd 0x0c(m),hc4 | ||
| 389 | movd 0x1c(m),t1 | ||
| 390 | punpcklqdq t1,hc4 | ||
| 391 | psrld $8,hc4 | ||
| 392 | por ORMASK(%rip),hc4 | ||
| 393 | movd h4,t1 | ||
| 394 | paddd t1,hc4 | ||
| 395 | |||
| 396 | # t1 = [ hc0[1] * r0, hc0[0] * u0 ] | ||
| 397 | movdqa ru0,t1 | ||
| 398 | pmuludq hc0,t1 | ||
| 399 | # t1 += [ hc1[1] * s4, hc1[0] * v4 ] | ||
| 400 | movdqa sv4,t2 | ||
| 401 | pmuludq hc1,t2 | ||
| 402 | paddq t2,t1 | ||
| 403 | # t1 += [ hc2[1] * s3, hc2[0] * v3 ] | ||
| 404 | movdqa sv3,t2 | ||
| 405 | pmuludq hc2,t2 | ||
| 406 | paddq t2,t1 | ||
| 407 | # t1 += [ hc3[1] * s2, hc3[0] * v2 ] | ||
| 408 | movdqa sv2,t2 | ||
| 409 | pmuludq hc3,t2 | ||
| 410 | paddq t2,t1 | ||
| 411 | # t1 += [ hc4[1] * s1, hc4[0] * v1 ] | ||
| 412 | movdqa sv1,t2 | ||
| 413 | pmuludq hc4,t2 | ||
| 414 | paddq t2,t1 | ||
| 415 | # d0 = t1[0] + t1[1] | ||
| 416 | movdqa t1,t2 | ||
| 417 | psrldq $8,t2 | ||
| 418 | paddq t2,t1 | ||
| 419 | movq t1,d0 | ||
| 420 | |||
| 421 | # t1 = [ hc0[1] * r1, hc0[0] * u1 ] | ||
| 422 | movdqa ru1,t1 | ||
| 423 | pmuludq hc0,t1 | ||
| 424 | # t1 += [ hc1[1] * r0, hc1[0] * u0 ] | ||
| 425 | movdqa ru0,t2 | ||
| 426 | pmuludq hc1,t2 | ||
| 427 | paddq t2,t1 | ||
| 428 | # t1 += [ hc2[1] * s4, hc2[0] * v4 ] | ||
| 429 | movdqa sv4,t2 | ||
| 430 | pmuludq hc2,t2 | ||
| 431 | paddq t2,t1 | ||
| 432 | # t1 += [ hc3[1] * s3, hc3[0] * v3 ] | ||
| 433 | movdqa sv3,t2 | ||
| 434 | pmuludq hc3,t2 | ||
| 435 | paddq t2,t1 | ||
| 436 | # t1 += [ hc4[1] * s2, hc4[0] * v2 ] | ||
| 437 | movdqa sv2,t2 | ||
| 438 | pmuludq hc4,t2 | ||
| 439 | paddq t2,t1 | ||
| 440 | # d1 = t1[0] + t1[1] | ||
| 441 | movdqa t1,t2 | ||
| 442 | psrldq $8,t2 | ||
| 443 | paddq t2,t1 | ||
| 444 | movq t1,d1 | ||
| 445 | |||
| 446 | # t1 = [ hc0[1] * r2, hc0[0] * u2 ] | ||
| 447 | movdqa ru2,t1 | ||
| 448 | pmuludq hc0,t1 | ||
| 449 | # t1 += [ hc1[1] * r1, hc1[0] * u1 ] | ||
| 450 | movdqa ru1,t2 | ||
| 451 | pmuludq hc1,t2 | ||
| 452 | paddq t2,t1 | ||
| 453 | # t1 += [ hc2[1] * r0, hc2[0] * u0 ] | ||
| 454 | movdqa ru0,t2 | ||
| 455 | pmuludq hc2,t2 | ||
| 456 | paddq t2,t1 | ||
| 457 | # t1 += [ hc3[1] * s4, hc3[0] * v4 ] | ||
| 458 | movdqa sv4,t2 | ||
| 459 | pmuludq hc3,t2 | ||
| 460 | paddq t2,t1 | ||
| 461 | # t1 += [ hc4[1] * s3, hc4[0] * v3 ] | ||
| 462 | movdqa sv3,t2 | ||
| 463 | pmuludq hc4,t2 | ||
| 464 | paddq t2,t1 | ||
| 465 | # d2 = t1[0] + t1[1] | ||
| 466 | movdqa t1,t2 | ||
| 467 | psrldq $8,t2 | ||
| 468 | paddq t2,t1 | ||
| 469 | movq t1,d2 | ||
| 470 | |||
| 471 | # t1 = [ hc0[1] * r3, hc0[0] * u3 ] | ||
| 472 | movdqa ru3,t1 | ||
| 473 | pmuludq hc0,t1 | ||
| 474 | # t1 += [ hc1[1] * r2, hc1[0] * u2 ] | ||
| 475 | movdqa ru2,t2 | ||
| 476 | pmuludq hc1,t2 | ||
| 477 | paddq t2,t1 | ||
| 478 | # t1 += [ hc2[1] * r1, hc2[0] * u1 ] | ||
| 479 | movdqa ru1,t2 | ||
| 480 | pmuludq hc2,t2 | ||
| 481 | paddq t2,t1 | ||
| 482 | # t1 += [ hc3[1] * r0, hc3[0] * u0 ] | ||
| 483 | movdqa ru0,t2 | ||
| 484 | pmuludq hc3,t2 | ||
| 485 | paddq t2,t1 | ||
| 486 | # t1 += [ hc4[1] * s4, hc4[0] * v4 ] | ||
| 487 | movdqa sv4,t2 | ||
| 488 | pmuludq hc4,t2 | ||
| 489 | paddq t2,t1 | ||
| 490 | # d3 = t1[0] + t1[1] | ||
| 491 | movdqa t1,t2 | ||
| 492 | psrldq $8,t2 | ||
| 493 | paddq t2,t1 | ||
| 494 | movq t1,d3 | ||
| 495 | |||
| 496 | # t1 = [ hc0[1] * r4, hc0[0] * u4 ] | ||
| 497 | movdqa ru4,t1 | ||
| 498 | pmuludq hc0,t1 | ||
| 499 | # t1 += [ hc1[1] * r3, hc1[0] * u3 ] | ||
| 500 | movdqa ru3,t2 | ||
| 501 | pmuludq hc1,t2 | ||
| 502 | paddq t2,t1 | ||
| 503 | # t1 += [ hc2[1] * r2, hc2[0] * u2 ] | ||
| 504 | movdqa ru2,t2 | ||
| 505 | pmuludq hc2,t2 | ||
| 506 | paddq t2,t1 | ||
| 507 | # t1 += [ hc3[1] * r1, hc3[0] * u1 ] | ||
| 508 | movdqa ru1,t2 | ||
| 509 | pmuludq hc3,t2 | ||
| 510 | paddq t2,t1 | ||
| 511 | # t1 += [ hc4[1] * r0, hc4[0] * u0 ] | ||
| 512 | movdqa ru0,t2 | ||
| 513 | pmuludq hc4,t2 | ||
| 514 | paddq t2,t1 | ||
| 515 | # d4 = t1[0] + t1[1] | ||
| 516 | movdqa t1,t2 | ||
| 517 | psrldq $8,t2 | ||
| 518 | paddq t2,t1 | ||
| 519 | movq t1,d4 | ||
| 520 | |||
| 521 | # d1 += d0 >> 26 | ||
| 522 | mov d0,%rax | ||
| 523 | shr $26,%rax | ||
| 524 | add %rax,d1 | ||
| 525 | # h0 = d0 & 0x3ffffff | ||
| 526 | mov d0,%rbx | ||
| 527 | and $0x3ffffff,%ebx | ||
| 528 | |||
| 529 | # d2 += d1 >> 26 | ||
| 530 | mov d1,%rax | ||
| 531 | shr $26,%rax | ||
| 532 | add %rax,d2 | ||
| 533 | # h1 = d1 & 0x3ffffff | ||
| 534 | mov d1,%rax | ||
| 535 | and $0x3ffffff,%eax | ||
| 536 | mov %eax,h1 | ||
| 537 | |||
| 538 | # d3 += d2 >> 26 | ||
| 539 | mov d2,%rax | ||
| 540 | shr $26,%rax | ||
| 541 | add %rax,d3 | ||
| 542 | # h2 = d2 & 0x3ffffff | ||
| 543 | mov d2,%rax | ||
| 544 | and $0x3ffffff,%eax | ||
| 545 | mov %eax,h2 | ||
| 546 | |||
| 547 | # d4 += d3 >> 26 | ||
| 548 | mov d3,%rax | ||
| 549 | shr $26,%rax | ||
| 550 | add %rax,d4 | ||
| 551 | # h3 = d3 & 0x3ffffff | ||
| 552 | mov d3,%rax | ||
| 553 | and $0x3ffffff,%eax | ||
| 554 | mov %eax,h3 | ||
| 555 | |||
| 556 | # h0 += (d4 >> 26) * 5 | ||
| 557 | mov d4,%rax | ||
| 558 | shr $26,%rax | ||
| 559 | lea (%eax,%eax,4),%eax | ||
| 560 | add %eax,%ebx | ||
| 561 | # h4 = d4 & 0x3ffffff | ||
| 562 | mov d4,%rax | ||
| 563 | and $0x3ffffff,%eax | ||
| 564 | mov %eax,h4 | ||
| 565 | |||
| 566 | # h1 += h0 >> 26 | ||
| 567 | mov %ebx,%eax | ||
| 568 | shr $26,%eax | ||
| 569 | add %eax,h1 | ||
| 570 | # h0 = h0 & 0x3ffffff | ||
| 571 | andl $0x3ffffff,%ebx | ||
| 572 | mov %ebx,h0 | ||
| 573 | |||
| 574 | add $0x20,m | ||
| 575 | dec %rcx | ||
| 576 | jnz .Ldoblock2 | ||
| 577 | |||
| 578 | pop %r13 | ||
| 579 | pop %r12 | ||
| 580 | pop %rbx | ||
| 581 | ret | ||
| 582 | ENDPROC(poly1305_2block_sse2) | ||
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c new file mode 100644 index 000000000000..f7170d764f32 --- /dev/null +++ b/arch/x86/crypto/poly1305_glue.c | |||
| @@ -0,0 +1,207 @@ | |||
| 1 | /* | ||
| 2 | * Poly1305 authenticator algorithm, RFC7539, SIMD glue code | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Martin Willi | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <crypto/algapi.h> | ||
| 13 | #include <crypto/internal/hash.h> | ||
| 14 | #include <crypto/poly1305.h> | ||
| 15 | #include <linux/crypto.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/module.h> | ||
| 18 | #include <asm/fpu/api.h> | ||
| 19 | #include <asm/simd.h> | ||
| 20 | |||
| 21 | struct poly1305_simd_desc_ctx { | ||
| 22 | struct poly1305_desc_ctx base; | ||
| 23 | /* derived key u set? */ | ||
| 24 | bool uset; | ||
| 25 | #ifdef CONFIG_AS_AVX2 | ||
| 26 | /* derived keys r^3, r^4 set? */ | ||
| 27 | bool wset; | ||
| 28 | #endif | ||
| 29 | /* derived Poly1305 key r^2 */ | ||
| 30 | u32 u[5]; | ||
| 31 | /* ... silently appended r^3 and r^4 when using AVX2 */ | ||
| 32 | }; | ||
| 33 | |||
| 34 | asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src, | ||
| 35 | const u32 *r, unsigned int blocks); | ||
| 36 | asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r, | ||
| 37 | unsigned int blocks, const u32 *u); | ||
| 38 | #ifdef CONFIG_AS_AVX2 | ||
| 39 | asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r, | ||
| 40 | unsigned int blocks, const u32 *u); | ||
| 41 | static bool poly1305_use_avx2; | ||
| 42 | #endif | ||
| 43 | |||
| 44 | static int poly1305_simd_init(struct shash_desc *desc) | ||
| 45 | { | ||
| 46 | struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc); | ||
| 47 | |||
| 48 | sctx->uset = false; | ||
| 49 | #ifdef CONFIG_AS_AVX2 | ||
| 50 | sctx->wset = false; | ||
| 51 | #endif | ||
| 52 | |||
| 53 | return crypto_poly1305_init(desc); | ||
| 54 | } | ||
| 55 | |||
| 56 | static void poly1305_simd_mult(u32 *a, const u32 *b) | ||
| 57 | { | ||
| 58 | u8 m[POLY1305_BLOCK_SIZE]; | ||
| 59 | |||
| 60 | memset(m, 0, sizeof(m)); | ||
| 61 | /* The poly1305 block function adds a hi-bit to the accumulator which | ||
| 62 | * we don't need for key multiplication; compensate for it. */ | ||
| 63 | a[4] -= 1 << 24; | ||
| 64 | poly1305_block_sse2(a, m, b, 1); | ||
| 65 | } | ||
| 66 | |||
| 67 | static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx, | ||
| 68 | const u8 *src, unsigned int srclen) | ||
| 69 | { | ||
| 70 | struct poly1305_simd_desc_ctx *sctx; | ||
| 71 | unsigned int blocks, datalen; | ||
| 72 | |||
| 73 | BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base)); | ||
| 74 | sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base); | ||
| 75 | |||
| 76 | if (unlikely(!dctx->sset)) { | ||
| 77 | datalen = crypto_poly1305_setdesckey(dctx, src, srclen); | ||
| 78 | src += srclen - datalen; | ||
| 79 | srclen = datalen; | ||
| 80 | } | ||
| 81 | |||
| 82 | #ifdef CONFIG_AS_AVX2 | ||
| 83 | if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) { | ||
| 84 | if (unlikely(!sctx->wset)) { | ||
| 85 | if (!sctx->uset) { | ||
| 86 | memcpy(sctx->u, dctx->r, sizeof(sctx->u)); | ||
| 87 | poly1305_simd_mult(sctx->u, dctx->r); | ||
| 88 | sctx->uset = true; | ||
| 89 | } | ||
| 90 | memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u)); | ||
| 91 | poly1305_simd_mult(sctx->u + 5, dctx->r); | ||
| 92 | memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u)); | ||
| 93 | poly1305_simd_mult(sctx->u + 10, dctx->r); | ||
| 94 | sctx->wset = true; | ||
| 95 | } | ||
| 96 | blocks = srclen / (POLY1305_BLOCK_SIZE * 4); | ||
| 97 | poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u); | ||
| 98 | src += POLY1305_BLOCK_SIZE * 4 * blocks; | ||
| 99 | srclen -= POLY1305_BLOCK_SIZE * 4 * blocks; | ||
| 100 | } | ||
| 101 | #endif | ||
| 102 | if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) { | ||
| 103 | if (unlikely(!sctx->uset)) { | ||
| 104 | memcpy(sctx->u, dctx->r, sizeof(sctx->u)); | ||
| 105 | poly1305_simd_mult(sctx->u, dctx->r); | ||
| 106 | sctx->uset = true; | ||
| 107 | } | ||
| 108 | blocks = srclen / (POLY1305_BLOCK_SIZE * 2); | ||
| 109 | poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u); | ||
| 110 | src += POLY1305_BLOCK_SIZE * 2 * blocks; | ||
| 111 | srclen -= POLY1305_BLOCK_SIZE * 2 * blocks; | ||
| 112 | } | ||
| 113 | if (srclen >= POLY1305_BLOCK_SIZE) { | ||
| 114 | poly1305_block_sse2(dctx->h, src, dctx->r, 1); | ||
| 115 | srclen -= POLY1305_BLOCK_SIZE; | ||
| 116 | } | ||
| 117 | return srclen; | ||
| 118 | } | ||
| 119 | |||
| 120 | static int poly1305_simd_update(struct shash_desc *desc, | ||
| 121 | const u8 *src, unsigned int srclen) | ||
| 122 | { | ||
| 123 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); | ||
| 124 | unsigned int bytes; | ||
| 125 | |||
| 126 | /* kernel_fpu_begin/end is costly, use fallback for small updates */ | ||
| 127 | if (srclen <= 288 || !may_use_simd()) | ||
| 128 | return crypto_poly1305_update(desc, src, srclen); | ||
| 129 | |||
| 130 | kernel_fpu_begin(); | ||
| 131 | |||
| 132 | if (unlikely(dctx->buflen)) { | ||
| 133 | bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); | ||
| 134 | memcpy(dctx->buf + dctx->buflen, src, bytes); | ||
| 135 | src += bytes; | ||
| 136 | srclen -= bytes; | ||
| 137 | dctx->buflen += bytes; | ||
| 138 | |||
| 139 | if (dctx->buflen == POLY1305_BLOCK_SIZE) { | ||
| 140 | poly1305_simd_blocks(dctx, dctx->buf, | ||
| 141 | POLY1305_BLOCK_SIZE); | ||
| 142 | dctx->buflen = 0; | ||
| 143 | } | ||
| 144 | } | ||
| 145 | |||
| 146 | if (likely(srclen >= POLY1305_BLOCK_SIZE)) { | ||
| 147 | bytes = poly1305_simd_blocks(dctx, src, srclen); | ||
| 148 | src += srclen - bytes; | ||
| 149 | srclen = bytes; | ||
| 150 | } | ||
| 151 | |||
| 152 | kernel_fpu_end(); | ||
| 153 | |||
| 154 | if (unlikely(srclen)) { | ||
| 155 | dctx->buflen = srclen; | ||
| 156 | memcpy(dctx->buf, src, srclen); | ||
| 157 | } | ||
| 158 | |||
| 159 | return 0; | ||
| 160 | } | ||
| 161 | |||
| 162 | static struct shash_alg alg = { | ||
| 163 | .digestsize = POLY1305_DIGEST_SIZE, | ||
| 164 | .init = poly1305_simd_init, | ||
| 165 | .update = poly1305_simd_update, | ||
| 166 | .final = crypto_poly1305_final, | ||
| 167 | .setkey = crypto_poly1305_setkey, | ||
| 168 | .descsize = sizeof(struct poly1305_simd_desc_ctx), | ||
| 169 | .base = { | ||
| 170 | .cra_name = "poly1305", | ||
| 171 | .cra_driver_name = "poly1305-simd", | ||
| 172 | .cra_priority = 300, | ||
| 173 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
| 174 | .cra_alignmask = sizeof(u32) - 1, | ||
| 175 | .cra_blocksize = POLY1305_BLOCK_SIZE, | ||
| 176 | .cra_module = THIS_MODULE, | ||
| 177 | }, | ||
| 178 | }; | ||
| 179 | |||
| 180 | static int __init poly1305_simd_mod_init(void) | ||
| 181 | { | ||
| 182 | if (!cpu_has_xmm2) | ||
| 183 | return -ENODEV; | ||
| 184 | |||
| 185 | #ifdef CONFIG_AS_AVX2 | ||
| 186 | poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 && | ||
| 187 | cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL); | ||
| 188 | alg.descsize = sizeof(struct poly1305_simd_desc_ctx); | ||
| 189 | if (poly1305_use_avx2) | ||
| 190 | alg.descsize += 10 * sizeof(u32); | ||
| 191 | #endif | ||
| 192 | return crypto_register_shash(&alg); | ||
| 193 | } | ||
| 194 | |||
| 195 | static void __exit poly1305_simd_mod_exit(void) | ||
| 196 | { | ||
| 197 | crypto_unregister_shash(&alg); | ||
| 198 | } | ||
| 199 | |||
| 200 | module_init(poly1305_simd_mod_init); | ||
| 201 | module_exit(poly1305_simd_mod_exit); | ||
| 202 | |||
| 203 | MODULE_LICENSE("GPL"); | ||
| 204 | MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); | ||
| 205 | MODULE_DESCRIPTION("Poly1305 authenticator"); | ||
| 206 | MODULE_ALIAS_CRYPTO("poly1305"); | ||
| 207 | MODULE_ALIAS_CRYPTO("poly1305-simd"); | ||
diff --git a/crypto/Kconfig b/crypto/Kconfig index b4cfc5754033..b582ea7f78d3 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
| @@ -48,6 +48,8 @@ config CRYPTO_AEAD | |||
| 48 | config CRYPTO_AEAD2 | 48 | config CRYPTO_AEAD2 |
| 49 | tristate | 49 | tristate |
| 50 | select CRYPTO_ALGAPI2 | 50 | select CRYPTO_ALGAPI2 |
| 51 | select CRYPTO_NULL2 | ||
| 52 | select CRYPTO_RNG2 | ||
| 51 | 53 | ||
| 52 | config CRYPTO_BLKCIPHER | 54 | config CRYPTO_BLKCIPHER |
| 53 | tristate | 55 | tristate |
| @@ -150,12 +152,16 @@ config CRYPTO_GF128MUL | |||
| 150 | 152 | ||
| 151 | config CRYPTO_NULL | 153 | config CRYPTO_NULL |
| 152 | tristate "Null algorithms" | 154 | tristate "Null algorithms" |
| 153 | select CRYPTO_ALGAPI | 155 | select CRYPTO_NULL2 |
| 154 | select CRYPTO_BLKCIPHER | ||
| 155 | select CRYPTO_HASH | ||
| 156 | help | 156 | help |
| 157 | These are 'Null' algorithms, used by IPsec, which do nothing. | 157 | These are 'Null' algorithms, used by IPsec, which do nothing. |
| 158 | 158 | ||
| 159 | config CRYPTO_NULL2 | ||
| 160 | tristate | ||
| 161 | select CRYPTO_ALGAPI2 | ||
| 162 | select CRYPTO_BLKCIPHER2 | ||
| 163 | select CRYPTO_HASH2 | ||
| 164 | |||
| 159 | config CRYPTO_PCRYPT | 165 | config CRYPTO_PCRYPT |
| 160 | tristate "Parallel crypto engine" | 166 | tristate "Parallel crypto engine" |
| 161 | depends on SMP | 167 | depends on SMP |
| @@ -200,6 +206,7 @@ config CRYPTO_AUTHENC | |||
| 200 | select CRYPTO_BLKCIPHER | 206 | select CRYPTO_BLKCIPHER |
| 201 | select CRYPTO_MANAGER | 207 | select CRYPTO_MANAGER |
| 202 | select CRYPTO_HASH | 208 | select CRYPTO_HASH |
| 209 | select CRYPTO_NULL | ||
| 203 | help | 210 | help |
| 204 | Authenc: Combined mode wrapper for IPsec. | 211 | Authenc: Combined mode wrapper for IPsec. |
| 205 | This is required for IPSec. | 212 | This is required for IPSec. |
| @@ -470,6 +477,18 @@ config CRYPTO_POLY1305 | |||
| 470 | It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use | 477 | It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use |
| 471 | in IETF protocols. This is the portable C implementation of Poly1305. | 478 | in IETF protocols. This is the portable C implementation of Poly1305. |
| 472 | 479 | ||
| 480 | config CRYPTO_POLY1305_X86_64 | ||
| 481 | tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)" | ||
| 482 | depends on X86 && 64BIT | ||
| 483 | select CRYPTO_POLY1305 | ||
| 484 | help | ||
| 485 | Poly1305 authenticator algorithm, RFC7539. | ||
| 486 | |||
| 487 | Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. | ||
| 488 | It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use | ||
| 489 | in IETF protocols. This is the x86_64 assembler implementation using SIMD | ||
| 490 | instructions. | ||
| 491 | |||
| 473 | config CRYPTO_MD4 | 492 | config CRYPTO_MD4 |
| 474 | tristate "MD4 digest algorithm" | 493 | tristate "MD4 digest algorithm" |
| 475 | select CRYPTO_HASH | 494 | select CRYPTO_HASH |
| @@ -1213,6 +1232,21 @@ config CRYPTO_CHACHA20 | |||
| 1213 | See also: | 1232 | See also: |
| 1214 | <http://cr.yp.to/chacha/chacha-20080128.pdf> | 1233 | <http://cr.yp.to/chacha/chacha-20080128.pdf> |
| 1215 | 1234 | ||
| 1235 | config CRYPTO_CHACHA20_X86_64 | ||
| 1236 | tristate "ChaCha20 cipher algorithm (x86_64/SSSE3/AVX2)" | ||
| 1237 | depends on X86 && 64BIT | ||
| 1238 | select CRYPTO_BLKCIPHER | ||
| 1239 | select CRYPTO_CHACHA20 | ||
| 1240 | help | ||
| 1241 | ChaCha20 cipher algorithm, RFC7539. | ||
| 1242 | |||
| 1243 | ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. | ||
| 1244 | Bernstein and further specified in RFC7539 for use in IETF protocols. | ||
| 1245 | This is the x86_64 assembler implementation using SIMD instructions. | ||
| 1246 | |||
| 1247 | See also: | ||
| 1248 | <http://cr.yp.to/chacha/chacha-20080128.pdf> | ||
| 1249 | |||
| 1216 | config CRYPTO_SEED | 1250 | config CRYPTO_SEED |
| 1217 | tristate "SEED cipher algorithm" | 1251 | tristate "SEED cipher algorithm" |
| 1218 | select CRYPTO_ALGAPI | 1252 | select CRYPTO_ALGAPI |
diff --git a/crypto/Makefile b/crypto/Makefile index a16a7e7f2d60..e2c59819b236 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
| @@ -17,6 +17,7 @@ obj-$(CONFIG_CRYPTO_AEAD2) += aead.o | |||
| 17 | 17 | ||
| 18 | crypto_blkcipher-y := ablkcipher.o | 18 | crypto_blkcipher-y := ablkcipher.o |
| 19 | crypto_blkcipher-y += blkcipher.o | 19 | crypto_blkcipher-y += blkcipher.o |
| 20 | crypto_blkcipher-y += skcipher.o | ||
| 20 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o | 21 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o |
| 21 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o | 22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o |
| 22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o | 23 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o |
| @@ -46,7 +47,7 @@ obj-$(CONFIG_CRYPTO_CMAC) += cmac.o | |||
| 46 | obj-$(CONFIG_CRYPTO_HMAC) += hmac.o | 47 | obj-$(CONFIG_CRYPTO_HMAC) += hmac.o |
| 47 | obj-$(CONFIG_CRYPTO_VMAC) += vmac.o | 48 | obj-$(CONFIG_CRYPTO_VMAC) += vmac.o |
| 48 | obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o | 49 | obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o |
| 49 | obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o | 50 | obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o |
| 50 | obj-$(CONFIG_CRYPTO_MD4) += md4.o | 51 | obj-$(CONFIG_CRYPTO_MD4) += md4.o |
| 51 | obj-$(CONFIG_CRYPTO_MD5) += md5.o | 52 | obj-$(CONFIG_CRYPTO_MD5) += md5.o |
| 52 | obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o | 53 | obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o |
diff --git a/crypto/aead.c b/crypto/aead.c index 07bf99773548..9b18a1e40d6a 100644 --- a/crypto/aead.c +++ b/crypto/aead.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * This file provides API support for AEAD algorithms. | 4 | * This file provides API support for AEAD algorithms. |
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | 6 | * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms of the GNU General Public License as published by the Free | 9 | * under the terms of the GNU General Public License as published by the Free |
| @@ -13,13 +13,14 @@ | |||
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <crypto/internal/geniv.h> | 15 | #include <crypto/internal/geniv.h> |
| 16 | #include <crypto/internal/rng.h> | ||
| 17 | #include <crypto/null.h> | ||
| 16 | #include <crypto/scatterwalk.h> | 18 | #include <crypto/scatterwalk.h> |
| 17 | #include <linux/err.h> | 19 | #include <linux/err.h> |
| 18 | #include <linux/init.h> | 20 | #include <linux/init.h> |
| 19 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
| 20 | #include <linux/module.h> | 22 | #include <linux/module.h> |
| 21 | #include <linux/rtnetlink.h> | 23 | #include <linux/rtnetlink.h> |
| 22 | #include <linux/sched.h> | ||
| 23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 24 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
| 25 | #include <linux/cryptouser.h> | 26 | #include <linux/cryptouser.h> |
| @@ -27,17 +28,6 @@ | |||
| 27 | 28 | ||
| 28 | #include "internal.h" | 29 | #include "internal.h" |
| 29 | 30 | ||
| 30 | struct compat_request_ctx { | ||
| 31 | struct scatterlist src[2]; | ||
| 32 | struct scatterlist dst[2]; | ||
| 33 | struct scatterlist ivbuf[2]; | ||
| 34 | struct scatterlist *ivsg; | ||
| 35 | struct aead_givcrypt_request subreq; | ||
| 36 | }; | ||
| 37 | |||
| 38 | static int aead_null_givencrypt(struct aead_givcrypt_request *req); | ||
| 39 | static int aead_null_givdecrypt(struct aead_givcrypt_request *req); | ||
| 40 | |||
| 41 | static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, | 31 | static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, |
| 42 | unsigned int keylen) | 32 | unsigned int keylen) |
| 43 | { | 33 | { |
| @@ -53,7 +43,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, | |||
| 53 | 43 | ||
| 54 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | 44 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
| 55 | memcpy(alignbuffer, key, keylen); | 45 | memcpy(alignbuffer, key, keylen); |
| 56 | ret = tfm->setkey(tfm, alignbuffer, keylen); | 46 | ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen); |
| 57 | memset(alignbuffer, 0, keylen); | 47 | memset(alignbuffer, 0, keylen); |
| 58 | kfree(buffer); | 48 | kfree(buffer); |
| 59 | return ret; | 49 | return ret; |
| @@ -64,12 +54,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm, | |||
| 64 | { | 54 | { |
| 65 | unsigned long alignmask = crypto_aead_alignmask(tfm); | 55 | unsigned long alignmask = crypto_aead_alignmask(tfm); |
| 66 | 56 | ||
| 67 | tfm = tfm->child; | ||
| 68 | |||
| 69 | if ((unsigned long)key & alignmask) | 57 | if ((unsigned long)key & alignmask) |
| 70 | return setkey_unaligned(tfm, key, keylen); | 58 | return setkey_unaligned(tfm, key, keylen); |
| 71 | 59 | ||
| 72 | return tfm->setkey(tfm, key, keylen); | 60 | return crypto_aead_alg(tfm)->setkey(tfm, key, keylen); |
| 73 | } | 61 | } |
| 74 | EXPORT_SYMBOL_GPL(crypto_aead_setkey); | 62 | EXPORT_SYMBOL_GPL(crypto_aead_setkey); |
| 75 | 63 | ||
| @@ -80,100 +68,17 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | |||
| 80 | if (authsize > crypto_aead_maxauthsize(tfm)) | 68 | if (authsize > crypto_aead_maxauthsize(tfm)) |
| 81 | return -EINVAL; | 69 | return -EINVAL; |
| 82 | 70 | ||
| 83 | if (tfm->setauthsize) { | 71 | if (crypto_aead_alg(tfm)->setauthsize) { |
| 84 | err = tfm->setauthsize(tfm->child, authsize); | 72 | err = crypto_aead_alg(tfm)->setauthsize(tfm, authsize); |
| 85 | if (err) | 73 | if (err) |
| 86 | return err; | 74 | return err; |
| 87 | } | 75 | } |
| 88 | 76 | ||
| 89 | tfm->child->authsize = authsize; | ||
| 90 | tfm->authsize = authsize; | 77 | tfm->authsize = authsize; |
| 91 | return 0; | 78 | return 0; |
| 92 | } | 79 | } |
| 93 | EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); | 80 | EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); |
| 94 | 81 | ||
| 95 | struct aead_old_request { | ||
| 96 | struct scatterlist srcbuf[2]; | ||
| 97 | struct scatterlist dstbuf[2]; | ||
| 98 | struct aead_request subreq; | ||
| 99 | }; | ||
| 100 | |||
| 101 | unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) | ||
| 102 | { | ||
| 103 | return tfm->reqsize + sizeof(struct aead_old_request); | ||
| 104 | } | ||
| 105 | EXPORT_SYMBOL_GPL(crypto_aead_reqsize); | ||
| 106 | |||
| 107 | static int old_crypt(struct aead_request *req, | ||
| 108 | int (*crypt)(struct aead_request *req)) | ||
| 109 | { | ||
| 110 | struct aead_old_request *nreq = aead_request_ctx(req); | ||
| 111 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 112 | struct scatterlist *src, *dst; | ||
| 113 | |||
| 114 | if (req->old) | ||
| 115 | return crypt(req); | ||
| 116 | |||
| 117 | src = scatterwalk_ffwd(nreq->srcbuf, req->src, req->assoclen); | ||
| 118 | dst = req->src == req->dst ? | ||
| 119 | src : scatterwalk_ffwd(nreq->dstbuf, req->dst, req->assoclen); | ||
| 120 | |||
| 121 | aead_request_set_tfm(&nreq->subreq, aead); | ||
| 122 | aead_request_set_callback(&nreq->subreq, aead_request_flags(req), | ||
| 123 | req->base.complete, req->base.data); | ||
| 124 | aead_request_set_crypt(&nreq->subreq, src, dst, req->cryptlen, | ||
| 125 | req->iv); | ||
| 126 | aead_request_set_assoc(&nreq->subreq, req->src, req->assoclen); | ||
| 127 | |||
| 128 | return crypt(&nreq->subreq); | ||
| 129 | } | ||
| 130 | |||
| 131 | static int old_encrypt(struct aead_request *req) | ||
| 132 | { | ||
| 133 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 134 | struct old_aead_alg *alg = crypto_old_aead_alg(aead); | ||
| 135 | |||
| 136 | return old_crypt(req, alg->encrypt); | ||
| 137 | } | ||
| 138 | |||
| 139 | static int old_decrypt(struct aead_request *req) | ||
| 140 | { | ||
| 141 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 142 | struct old_aead_alg *alg = crypto_old_aead_alg(aead); | ||
| 143 | |||
| 144 | return old_crypt(req, alg->decrypt); | ||
| 145 | } | ||
| 146 | |||
| 147 | static int no_givcrypt(struct aead_givcrypt_request *req) | ||
| 148 | { | ||
| 149 | return -ENOSYS; | ||
| 150 | } | ||
| 151 | |||
| 152 | static int crypto_old_aead_init_tfm(struct crypto_tfm *tfm) | ||
| 153 | { | ||
| 154 | struct old_aead_alg *alg = &tfm->__crt_alg->cra_aead; | ||
| 155 | struct crypto_aead *crt = __crypto_aead_cast(tfm); | ||
| 156 | |||
| 157 | if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) | ||
| 158 | return -EINVAL; | ||
| 159 | |||
| 160 | crt->setkey = alg->setkey; | ||
| 161 | crt->setauthsize = alg->setauthsize; | ||
| 162 | crt->encrypt = old_encrypt; | ||
| 163 | crt->decrypt = old_decrypt; | ||
| 164 | if (alg->ivsize) { | ||
| 165 | crt->givencrypt = alg->givencrypt ?: no_givcrypt; | ||
| 166 | crt->givdecrypt = alg->givdecrypt ?: no_givcrypt; | ||
| 167 | } else { | ||
| 168 | crt->givencrypt = aead_null_givencrypt; | ||
| 169 | crt->givdecrypt = aead_null_givdecrypt; | ||
| 170 | } | ||
| 171 | crt->child = __crypto_aead_cast(tfm); | ||
| 172 | crt->authsize = alg->maxauthsize; | ||
| 173 | |||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | static void crypto_aead_exit_tfm(struct crypto_tfm *tfm) | 82 | static void crypto_aead_exit_tfm(struct crypto_tfm *tfm) |
| 178 | { | 83 | { |
| 179 | struct crypto_aead *aead = __crypto_aead_cast(tfm); | 84 | struct crypto_aead *aead = __crypto_aead_cast(tfm); |
| @@ -187,14 +92,6 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm) | |||
| 187 | struct crypto_aead *aead = __crypto_aead_cast(tfm); | 92 | struct crypto_aead *aead = __crypto_aead_cast(tfm); |
| 188 | struct aead_alg *alg = crypto_aead_alg(aead); | 93 | struct aead_alg *alg = crypto_aead_alg(aead); |
| 189 | 94 | ||
| 190 | if (crypto_old_aead_alg(aead)->encrypt) | ||
| 191 | return crypto_old_aead_init_tfm(tfm); | ||
| 192 | |||
| 193 | aead->setkey = alg->setkey; | ||
| 194 | aead->setauthsize = alg->setauthsize; | ||
| 195 | aead->encrypt = alg->encrypt; | ||
| 196 | aead->decrypt = alg->decrypt; | ||
| 197 | aead->child = __crypto_aead_cast(tfm); | ||
| 198 | aead->authsize = alg->maxauthsize; | 95 | aead->authsize = alg->maxauthsize; |
| 199 | 96 | ||
| 200 | if (alg->exit) | 97 | if (alg->exit) |
| @@ -207,64 +104,6 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm) | |||
| 207 | } | 104 | } |
| 208 | 105 | ||
| 209 | #ifdef CONFIG_NET | 106 | #ifdef CONFIG_NET |
| 210 | static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
| 211 | { | ||
| 212 | struct crypto_report_aead raead; | ||
| 213 | struct old_aead_alg *aead = &alg->cra_aead; | ||
| 214 | |||
| 215 | strncpy(raead.type, "aead", sizeof(raead.type)); | ||
| 216 | strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv)); | ||
| 217 | |||
| 218 | raead.blocksize = alg->cra_blocksize; | ||
| 219 | raead.maxauthsize = aead->maxauthsize; | ||
| 220 | raead.ivsize = aead->ivsize; | ||
| 221 | |||
| 222 | if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD, | ||
| 223 | sizeof(struct crypto_report_aead), &raead)) | ||
| 224 | goto nla_put_failure; | ||
| 225 | return 0; | ||
| 226 | |||
| 227 | nla_put_failure: | ||
| 228 | return -EMSGSIZE; | ||
| 229 | } | ||
| 230 | #else | ||
| 231 | static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
| 232 | { | ||
| 233 | return -ENOSYS; | ||
| 234 | } | ||
| 235 | #endif | ||
| 236 | |||
| 237 | static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg) | ||
| 238 | __attribute__ ((unused)); | ||
| 239 | static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg) | ||
| 240 | { | ||
| 241 | struct old_aead_alg *aead = &alg->cra_aead; | ||
| 242 | |||
| 243 | seq_printf(m, "type : aead\n"); | ||
| 244 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? | ||
| 245 | "yes" : "no"); | ||
| 246 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | ||
| 247 | seq_printf(m, "ivsize : %u\n", aead->ivsize); | ||
| 248 | seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); | ||
| 249 | seq_printf(m, "geniv : %s\n", aead->geniv ?: "<built-in>"); | ||
| 250 | } | ||
| 251 | |||
| 252 | const struct crypto_type crypto_aead_type = { | ||
| 253 | .extsize = crypto_alg_extsize, | ||
| 254 | .init_tfm = crypto_aead_init_tfm, | ||
| 255 | #ifdef CONFIG_PROC_FS | ||
| 256 | .show = crypto_old_aead_show, | ||
| 257 | #endif | ||
| 258 | .report = crypto_old_aead_report, | ||
| 259 | .lookup = crypto_lookup_aead, | ||
| 260 | .maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV), | ||
| 261 | .maskset = CRYPTO_ALG_TYPE_MASK, | ||
| 262 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
| 263 | .tfmsize = offsetof(struct crypto_aead, base), | ||
| 264 | }; | ||
| 265 | EXPORT_SYMBOL_GPL(crypto_aead_type); | ||
| 266 | |||
| 267 | #ifdef CONFIG_NET | ||
| 268 | static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) | 107 | static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) |
| 269 | { | 108 | { |
| 270 | struct crypto_report_aead raead; | 109 | struct crypto_report_aead raead; |
| @@ -307,93 +146,31 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) | |||
| 307 | seq_printf(m, "geniv : <none>\n"); | 146 | seq_printf(m, "geniv : <none>\n"); |
| 308 | } | 147 | } |
| 309 | 148 | ||
| 310 | static const struct crypto_type crypto_new_aead_type = { | 149 | static void crypto_aead_free_instance(struct crypto_instance *inst) |
| 311 | .extsize = crypto_alg_extsize, | ||
| 312 | .init_tfm = crypto_aead_init_tfm, | ||
| 313 | #ifdef CONFIG_PROC_FS | ||
| 314 | .show = crypto_aead_show, | ||
| 315 | #endif | ||
| 316 | .report = crypto_aead_report, | ||
| 317 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
| 318 | .maskset = CRYPTO_ALG_TYPE_MASK, | ||
| 319 | .type = CRYPTO_ALG_TYPE_AEAD, | ||
| 320 | .tfmsize = offsetof(struct crypto_aead, base), | ||
| 321 | }; | ||
| 322 | |||
| 323 | static int aead_null_givencrypt(struct aead_givcrypt_request *req) | ||
| 324 | { | ||
| 325 | return crypto_aead_encrypt(&req->areq); | ||
| 326 | } | ||
| 327 | |||
| 328 | static int aead_null_givdecrypt(struct aead_givcrypt_request *req) | ||
| 329 | { | ||
| 330 | return crypto_aead_decrypt(&req->areq); | ||
| 331 | } | ||
| 332 | |||
| 333 | #ifdef CONFIG_NET | ||
| 334 | static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
| 335 | { | ||
| 336 | struct crypto_report_aead raead; | ||
| 337 | struct old_aead_alg *aead = &alg->cra_aead; | ||
| 338 | |||
| 339 | strncpy(raead.type, "nivaead", sizeof(raead.type)); | ||
| 340 | strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv)); | ||
| 341 | |||
| 342 | raead.blocksize = alg->cra_blocksize; | ||
| 343 | raead.maxauthsize = aead->maxauthsize; | ||
| 344 | raead.ivsize = aead->ivsize; | ||
| 345 | |||
| 346 | if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD, | ||
| 347 | sizeof(struct crypto_report_aead), &raead)) | ||
| 348 | goto nla_put_failure; | ||
| 349 | return 0; | ||
| 350 | |||
| 351 | nla_put_failure: | ||
| 352 | return -EMSGSIZE; | ||
| 353 | } | ||
| 354 | #else | ||
| 355 | static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) | ||
| 356 | { | 150 | { |
| 357 | return -ENOSYS; | 151 | struct aead_instance *aead = aead_instance(inst); |
| 358 | } | ||
| 359 | #endif | ||
| 360 | |||
| 361 | 152 | ||
| 362 | static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) | 153 | if (!aead->free) { |
| 363 | __attribute__ ((unused)); | 154 | inst->tmpl->free(inst); |
| 364 | static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) | 155 | return; |
| 365 | { | 156 | } |
| 366 | struct old_aead_alg *aead = &alg->cra_aead; | ||
| 367 | 157 | ||
| 368 | seq_printf(m, "type : nivaead\n"); | 158 | aead->free(aead); |
| 369 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? | ||
| 370 | "yes" : "no"); | ||
| 371 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | ||
| 372 | seq_printf(m, "ivsize : %u\n", aead->ivsize); | ||
| 373 | seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); | ||
| 374 | seq_printf(m, "geniv : %s\n", aead->geniv); | ||
| 375 | } | 159 | } |
| 376 | 160 | ||
| 377 | const struct crypto_type crypto_nivaead_type = { | 161 | static const struct crypto_type crypto_aead_type = { |
| 378 | .extsize = crypto_alg_extsize, | 162 | .extsize = crypto_alg_extsize, |
| 379 | .init_tfm = crypto_aead_init_tfm, | 163 | .init_tfm = crypto_aead_init_tfm, |
| 164 | .free = crypto_aead_free_instance, | ||
| 380 | #ifdef CONFIG_PROC_FS | 165 | #ifdef CONFIG_PROC_FS |
| 381 | .show = crypto_nivaead_show, | 166 | .show = crypto_aead_show, |
| 382 | #endif | 167 | #endif |
| 383 | .report = crypto_nivaead_report, | 168 | .report = crypto_aead_report, |
| 384 | .maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV), | 169 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
| 385 | .maskset = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV, | 170 | .maskset = CRYPTO_ALG_TYPE_MASK, |
| 386 | .type = CRYPTO_ALG_TYPE_AEAD, | 171 | .type = CRYPTO_ALG_TYPE_AEAD, |
| 387 | .tfmsize = offsetof(struct crypto_aead, base), | 172 | .tfmsize = offsetof(struct crypto_aead, base), |
| 388 | }; | 173 | }; |
| 389 | EXPORT_SYMBOL_GPL(crypto_nivaead_type); | ||
| 390 | |||
| 391 | static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn, | ||
| 392 | const char *name, u32 type, u32 mask) | ||
| 393 | { | ||
| 394 | spawn->base.frontend = &crypto_nivaead_type; | ||
| 395 | return crypto_grab_spawn(&spawn->base, name, type, mask); | ||
| 396 | } | ||
| 397 | 174 | ||
| 398 | static int aead_geniv_setkey(struct crypto_aead *tfm, | 175 | static int aead_geniv_setkey(struct crypto_aead *tfm, |
| 399 | const u8 *key, unsigned int keylen) | 176 | const u8 *key, unsigned int keylen) |
| @@ -411,169 +188,6 @@ static int aead_geniv_setauthsize(struct crypto_aead *tfm, | |||
| 411 | return crypto_aead_setauthsize(ctx->child, authsize); | 188 | return crypto_aead_setauthsize(ctx->child, authsize); |
| 412 | } | 189 | } |
| 413 | 190 | ||
| 414 | static void compat_encrypt_complete2(struct aead_request *req, int err) | ||
| 415 | { | ||
| 416 | struct compat_request_ctx *rctx = aead_request_ctx(req); | ||
| 417 | struct aead_givcrypt_request *subreq = &rctx->subreq; | ||
| 418 | struct crypto_aead *geniv; | ||
| 419 | |||
| 420 | if (err == -EINPROGRESS) | ||
| 421 | return; | ||
| 422 | |||
| 423 | if (err) | ||
| 424 | goto out; | ||
| 425 | |||
| 426 | geniv = crypto_aead_reqtfm(req); | ||
| 427 | scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0, | ||
| 428 | crypto_aead_ivsize(geniv), 1); | ||
| 429 | |||
| 430 | out: | ||
| 431 | kzfree(subreq->giv); | ||
| 432 | } | ||
| 433 | |||
| 434 | static void compat_encrypt_complete(struct crypto_async_request *base, int err) | ||
| 435 | { | ||
| 436 | struct aead_request *req = base->data; | ||
| 437 | |||
| 438 | compat_encrypt_complete2(req, err); | ||
| 439 | aead_request_complete(req, err); | ||
| 440 | } | ||
| 441 | |||
| 442 | static int compat_encrypt(struct aead_request *req) | ||
| 443 | { | ||
| 444 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
| 445 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 446 | struct compat_request_ctx *rctx = aead_request_ctx(req); | ||
| 447 | struct aead_givcrypt_request *subreq = &rctx->subreq; | ||
| 448 | unsigned int ivsize = crypto_aead_ivsize(geniv); | ||
| 449 | struct scatterlist *src, *dst; | ||
| 450 | crypto_completion_t compl; | ||
| 451 | void *data; | ||
| 452 | u8 *info; | ||
| 453 | __be64 seq; | ||
| 454 | int err; | ||
| 455 | |||
| 456 | if (req->cryptlen < ivsize) | ||
| 457 | return -EINVAL; | ||
| 458 | |||
| 459 | compl = req->base.complete; | ||
| 460 | data = req->base.data; | ||
| 461 | |||
| 462 | rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen); | ||
| 463 | info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg); | ||
| 464 | |||
| 465 | if (!info) { | ||
| 466 | info = kmalloc(ivsize, req->base.flags & | ||
| 467 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | ||
| 468 | GFP_ATOMIC); | ||
| 469 | if (!info) | ||
| 470 | return -ENOMEM; | ||
| 471 | |||
| 472 | compl = compat_encrypt_complete; | ||
| 473 | data = req; | ||
| 474 | } | ||
| 475 | |||
| 476 | memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq)); | ||
| 477 | |||
| 478 | src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize); | ||
| 479 | dst = req->src == req->dst ? | ||
| 480 | src : scatterwalk_ffwd(rctx->dst, rctx->ivsg, ivsize); | ||
| 481 | |||
| 482 | aead_givcrypt_set_tfm(subreq, ctx->child); | ||
| 483 | aead_givcrypt_set_callback(subreq, req->base.flags, | ||
| 484 | req->base.complete, req->base.data); | ||
| 485 | aead_givcrypt_set_crypt(subreq, src, dst, | ||
| 486 | req->cryptlen - ivsize, req->iv); | ||
| 487 | aead_givcrypt_set_assoc(subreq, req->src, req->assoclen); | ||
| 488 | aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq)); | ||
| 489 | |||
| 490 | err = crypto_aead_givencrypt(subreq); | ||
| 491 | if (unlikely(PageHighMem(sg_page(rctx->ivsg)))) | ||
| 492 | compat_encrypt_complete2(req, err); | ||
| 493 | return err; | ||
| 494 | } | ||
| 495 | |||
| 496 | static int compat_decrypt(struct aead_request *req) | ||
| 497 | { | ||
| 498 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
| 499 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 500 | struct compat_request_ctx *rctx = aead_request_ctx(req); | ||
| 501 | struct aead_request *subreq = &rctx->subreq.areq; | ||
| 502 | unsigned int ivsize = crypto_aead_ivsize(geniv); | ||
| 503 | struct scatterlist *src, *dst; | ||
| 504 | crypto_completion_t compl; | ||
| 505 | void *data; | ||
| 506 | |||
| 507 | if (req->cryptlen < ivsize) | ||
| 508 | return -EINVAL; | ||
| 509 | |||
| 510 | aead_request_set_tfm(subreq, ctx->child); | ||
| 511 | |||
| 512 | compl = req->base.complete; | ||
| 513 | data = req->base.data; | ||
| 514 | |||
| 515 | src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize); | ||
| 516 | dst = req->src == req->dst ? | ||
| 517 | src : scatterwalk_ffwd(rctx->dst, req->dst, | ||
| 518 | req->assoclen + ivsize); | ||
| 519 | |||
| 520 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
| 521 | aead_request_set_crypt(subreq, src, dst, | ||
| 522 | req->cryptlen - ivsize, req->iv); | ||
| 523 | aead_request_set_assoc(subreq, req->src, req->assoclen); | ||
| 524 | |||
| 525 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); | ||
| 526 | |||
| 527 | return crypto_aead_decrypt(subreq); | ||
| 528 | } | ||
| 529 | |||
| 530 | static int compat_encrypt_first(struct aead_request *req) | ||
| 531 | { | ||
| 532 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
| 533 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 534 | int err = 0; | ||
| 535 | |||
| 536 | spin_lock_bh(&ctx->lock); | ||
| 537 | if (geniv->encrypt != compat_encrypt_first) | ||
| 538 | goto unlock; | ||
| 539 | |||
| 540 | geniv->encrypt = compat_encrypt; | ||
| 541 | |||
| 542 | unlock: | ||
| 543 | spin_unlock_bh(&ctx->lock); | ||
| 544 | |||
| 545 | if (err) | ||
| 546 | return err; | ||
| 547 | |||
| 548 | return compat_encrypt(req); | ||
| 549 | } | ||
| 550 | |||
| 551 | static int aead_geniv_init_compat(struct crypto_tfm *tfm) | ||
| 552 | { | ||
| 553 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | ||
| 554 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 555 | int err; | ||
| 556 | |||
| 557 | spin_lock_init(&ctx->lock); | ||
| 558 | |||
| 559 | crypto_aead_set_reqsize(geniv, sizeof(struct compat_request_ctx)); | ||
| 560 | |||
| 561 | err = aead_geniv_init(tfm); | ||
| 562 | |||
| 563 | ctx->child = geniv->child; | ||
| 564 | geniv->child = geniv; | ||
| 565 | |||
| 566 | return err; | ||
| 567 | } | ||
| 568 | |||
| 569 | static void aead_geniv_exit_compat(struct crypto_tfm *tfm) | ||
| 570 | { | ||
| 571 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | ||
| 572 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 573 | |||
| 574 | crypto_free_aead(ctx->child); | ||
| 575 | } | ||
| 576 | |||
| 577 | struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, | 191 | struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, |
| 578 | struct rtattr **tb, u32 type, u32 mask) | 192 | struct rtattr **tb, u32 type, u32 mask) |
| 579 | { | 193 | { |
| @@ -590,8 +204,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, | |||
| 590 | if (IS_ERR(algt)) | 204 | if (IS_ERR(algt)) |
| 591 | return ERR_CAST(algt); | 205 | return ERR_CAST(algt); |
| 592 | 206 | ||
| 593 | if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) & | 207 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
| 594 | algt->mask) | ||
| 595 | return ERR_PTR(-EINVAL); | 208 | return ERR_PTR(-EINVAL); |
| 596 | 209 | ||
| 597 | name = crypto_attr_alg_name(tb[1]); | 210 | name = crypto_attr_alg_name(tb[1]); |
| @@ -608,9 +221,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, | |||
| 608 | mask |= crypto_requires_sync(algt->type, algt->mask); | 221 | mask |= crypto_requires_sync(algt->type, algt->mask); |
| 609 | 222 | ||
| 610 | crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); | 223 | crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); |
| 611 | err = (algt->mask & CRYPTO_ALG_GENIV) ? | 224 | err = crypto_grab_aead(spawn, name, type, mask); |
| 612 | crypto_grab_nivaead(spawn, name, type, mask) : | ||
| 613 | crypto_grab_aead(spawn, name, type, mask); | ||
| 614 | if (err) | 225 | if (err) |
| 615 | goto err_free_inst; | 226 | goto err_free_inst; |
| 616 | 227 | ||
| @@ -623,43 +234,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, | |||
| 623 | if (ivsize < sizeof(u64)) | 234 | if (ivsize < sizeof(u64)) |
| 624 | goto err_drop_alg; | 235 | goto err_drop_alg; |
| 625 | 236 | ||
| 626 | /* | ||
| 627 | * This is only true if we're constructing an algorithm with its | ||
| 628 | * default IV generator. For the default generator we elide the | ||
| 629 | * template name and double-check the IV generator. | ||
| 630 | */ | ||
| 631 | if (algt->mask & CRYPTO_ALG_GENIV) { | ||
| 632 | if (!alg->base.cra_aead.encrypt) | ||
| 633 | goto err_drop_alg; | ||
| 634 | if (strcmp(tmpl->name, alg->base.cra_aead.geniv)) | ||
| 635 | goto err_drop_alg; | ||
| 636 | |||
| 637 | memcpy(inst->alg.base.cra_name, alg->base.cra_name, | ||
| 638 | CRYPTO_MAX_ALG_NAME); | ||
| 639 | memcpy(inst->alg.base.cra_driver_name, | ||
| 640 | alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME); | ||
| 641 | |||
| 642 | inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_AEAD | | ||
| 643 | CRYPTO_ALG_GENIV; | ||
| 644 | inst->alg.base.cra_flags |= alg->base.cra_flags & | ||
| 645 | CRYPTO_ALG_ASYNC; | ||
| 646 | inst->alg.base.cra_priority = alg->base.cra_priority; | ||
| 647 | inst->alg.base.cra_blocksize = alg->base.cra_blocksize; | ||
| 648 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask; | ||
| 649 | inst->alg.base.cra_type = &crypto_aead_type; | ||
| 650 | |||
| 651 | inst->alg.base.cra_aead.ivsize = ivsize; | ||
| 652 | inst->alg.base.cra_aead.maxauthsize = maxauthsize; | ||
| 653 | |||
| 654 | inst->alg.base.cra_aead.setkey = alg->base.cra_aead.setkey; | ||
| 655 | inst->alg.base.cra_aead.setauthsize = | ||
| 656 | alg->base.cra_aead.setauthsize; | ||
| 657 | inst->alg.base.cra_aead.encrypt = alg->base.cra_aead.encrypt; | ||
| 658 | inst->alg.base.cra_aead.decrypt = alg->base.cra_aead.decrypt; | ||
| 659 | |||
| 660 | goto out; | ||
| 661 | } | ||
| 662 | |||
| 663 | err = -ENAMETOOLONG; | 237 | err = -ENAMETOOLONG; |
| 664 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, | 238 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
| 665 | "%s(%s)", tmpl->name, alg->base.cra_name) >= | 239 | "%s(%s)", tmpl->name, alg->base.cra_name) >= |
| @@ -682,12 +256,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, | |||
| 682 | inst->alg.ivsize = ivsize; | 256 | inst->alg.ivsize = ivsize; |
| 683 | inst->alg.maxauthsize = maxauthsize; | 257 | inst->alg.maxauthsize = maxauthsize; |
| 684 | 258 | ||
| 685 | inst->alg.encrypt = compat_encrypt_first; | ||
| 686 | inst->alg.decrypt = compat_decrypt; | ||
| 687 | |||
| 688 | inst->alg.base.cra_init = aead_geniv_init_compat; | ||
| 689 | inst->alg.base.cra_exit = aead_geniv_exit_compat; | ||
| 690 | |||
| 691 | out: | 259 | out: |
| 692 | return inst; | 260 | return inst; |
| 693 | 261 | ||
| @@ -707,147 +275,58 @@ void aead_geniv_free(struct aead_instance *inst) | |||
| 707 | } | 275 | } |
| 708 | EXPORT_SYMBOL_GPL(aead_geniv_free); | 276 | EXPORT_SYMBOL_GPL(aead_geniv_free); |
| 709 | 277 | ||
| 710 | int aead_geniv_init(struct crypto_tfm *tfm) | 278 | int aead_init_geniv(struct crypto_aead *aead) |
| 711 | { | 279 | { |
| 712 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 280 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead); |
| 281 | struct aead_instance *inst = aead_alg_instance(aead); | ||
| 713 | struct crypto_aead *child; | 282 | struct crypto_aead *child; |
| 714 | struct crypto_aead *aead; | ||
| 715 | |||
| 716 | aead = __crypto_aead_cast(tfm); | ||
| 717 | |||
| 718 | child = crypto_spawn_aead(crypto_instance_ctx(inst)); | ||
| 719 | if (IS_ERR(child)) | ||
| 720 | return PTR_ERR(child); | ||
| 721 | |||
| 722 | aead->child = child; | ||
| 723 | aead->reqsize += crypto_aead_reqsize(child); | ||
| 724 | |||
| 725 | return 0; | ||
| 726 | } | ||
| 727 | EXPORT_SYMBOL_GPL(aead_geniv_init); | ||
| 728 | |||
| 729 | void aead_geniv_exit(struct crypto_tfm *tfm) | ||
| 730 | { | ||
| 731 | crypto_free_aead(__crypto_aead_cast(tfm)->child); | ||
| 732 | } | ||
| 733 | EXPORT_SYMBOL_GPL(aead_geniv_exit); | ||
| 734 | |||
| 735 | static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask) | ||
| 736 | { | ||
| 737 | struct rtattr *tb[3]; | ||
| 738 | struct { | ||
| 739 | struct rtattr attr; | ||
| 740 | struct crypto_attr_type data; | ||
| 741 | } ptype; | ||
| 742 | struct { | ||
| 743 | struct rtattr attr; | ||
| 744 | struct crypto_attr_alg data; | ||
| 745 | } palg; | ||
| 746 | struct crypto_template *tmpl; | ||
| 747 | struct crypto_instance *inst; | ||
| 748 | struct crypto_alg *larval; | ||
| 749 | const char *geniv; | ||
| 750 | int err; | 283 | int err; |
| 751 | 284 | ||
| 752 | larval = crypto_larval_lookup(alg->cra_driver_name, | 285 | spin_lock_init(&ctx->lock); |
| 753 | CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV, | ||
| 754 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | ||
| 755 | err = PTR_ERR(larval); | ||
| 756 | if (IS_ERR(larval)) | ||
| 757 | goto out; | ||
| 758 | |||
| 759 | err = -EAGAIN; | ||
| 760 | if (!crypto_is_larval(larval)) | ||
| 761 | goto drop_larval; | ||
| 762 | |||
| 763 | ptype.attr.rta_len = sizeof(ptype); | ||
| 764 | ptype.attr.rta_type = CRYPTOA_TYPE; | ||
| 765 | ptype.data.type = type | CRYPTO_ALG_GENIV; | ||
| 766 | /* GENIV tells the template that we're making a default geniv. */ | ||
| 767 | ptype.data.mask = mask | CRYPTO_ALG_GENIV; | ||
| 768 | tb[0] = &ptype.attr; | ||
| 769 | |||
| 770 | palg.attr.rta_len = sizeof(palg); | ||
| 771 | palg.attr.rta_type = CRYPTOA_ALG; | ||
| 772 | /* Must use the exact name to locate ourselves. */ | ||
| 773 | memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME); | ||
| 774 | tb[1] = &palg.attr; | ||
| 775 | |||
| 776 | tb[2] = NULL; | ||
| 777 | 286 | ||
| 778 | geniv = alg->cra_aead.geniv; | 287 | err = crypto_get_default_rng(); |
| 288 | if (err) | ||
| 289 | goto out; | ||
| 779 | 290 | ||
| 780 | tmpl = crypto_lookup_template(geniv); | 291 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, |
| 781 | err = -ENOENT; | 292 | crypto_aead_ivsize(aead)); |
| 782 | if (!tmpl) | 293 | crypto_put_default_rng(); |
| 783 | goto kill_larval; | 294 | if (err) |
| 295 | goto out; | ||
| 784 | 296 | ||
| 785 | if (tmpl->create) { | 297 | ctx->null = crypto_get_default_null_skcipher(); |
| 786 | err = tmpl->create(tmpl, tb); | 298 | err = PTR_ERR(ctx->null); |
| 787 | if (err) | 299 | if (IS_ERR(ctx->null)) |
| 788 | goto put_tmpl; | 300 | goto out; |
| 789 | goto ok; | ||
| 790 | } | ||
| 791 | 301 | ||
| 792 | inst = tmpl->alloc(tb); | 302 | child = crypto_spawn_aead(aead_instance_ctx(inst)); |
| 793 | err = PTR_ERR(inst); | 303 | err = PTR_ERR(child); |
| 794 | if (IS_ERR(inst)) | 304 | if (IS_ERR(child)) |
| 795 | goto put_tmpl; | 305 | goto drop_null; |
| 796 | 306 | ||
| 797 | err = crypto_register_instance(tmpl, inst); | 307 | ctx->child = child; |
| 798 | if (err) { | 308 | crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) + |
| 799 | tmpl->free(inst); | 309 | sizeof(struct aead_request)); |
| 800 | goto put_tmpl; | ||
| 801 | } | ||
| 802 | 310 | ||
| 803 | ok: | 311 | err = 0; |
| 804 | /* Redo the lookup to use the instance we just registered. */ | ||
| 805 | err = -EAGAIN; | ||
| 806 | 312 | ||
| 807 | put_tmpl: | ||
| 808 | crypto_tmpl_put(tmpl); | ||
| 809 | kill_larval: | ||
| 810 | crypto_larval_kill(larval); | ||
| 811 | drop_larval: | ||
| 812 | crypto_mod_put(larval); | ||
| 813 | out: | 313 | out: |
| 814 | crypto_mod_put(alg); | ||
| 815 | return err; | 314 | return err; |
| 315 | |||
| 316 | drop_null: | ||
| 317 | crypto_put_default_null_skcipher(); | ||
| 318 | goto out; | ||
| 816 | } | 319 | } |
| 320 | EXPORT_SYMBOL_GPL(aead_init_geniv); | ||
| 817 | 321 | ||
| 818 | struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask) | 322 | void aead_exit_geniv(struct crypto_aead *tfm) |
| 819 | { | 323 | { |
| 820 | struct crypto_alg *alg; | 324 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); |
| 821 | |||
| 822 | alg = crypto_alg_mod_lookup(name, type, mask); | ||
| 823 | if (IS_ERR(alg)) | ||
| 824 | return alg; | ||
| 825 | |||
| 826 | if (alg->cra_type == &crypto_aead_type) | ||
| 827 | return alg; | ||
| 828 | |||
| 829 | if (!alg->cra_aead.ivsize) | ||
| 830 | return alg; | ||
| 831 | |||
| 832 | crypto_mod_put(alg); | ||
| 833 | alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED, | ||
| 834 | mask & ~CRYPTO_ALG_TESTED); | ||
| 835 | if (IS_ERR(alg)) | ||
| 836 | return alg; | ||
| 837 | |||
| 838 | if (alg->cra_type == &crypto_aead_type) { | ||
| 839 | if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) { | ||
| 840 | crypto_mod_put(alg); | ||
| 841 | alg = ERR_PTR(-ENOENT); | ||
| 842 | } | ||
| 843 | return alg; | ||
| 844 | } | ||
| 845 | |||
| 846 | BUG_ON(!alg->cra_aead.ivsize); | ||
| 847 | 325 | ||
| 848 | return ERR_PTR(crypto_nivaead_default(alg, type, mask)); | 326 | crypto_free_aead(ctx->child); |
| 327 | crypto_put_default_null_skcipher(); | ||
| 849 | } | 328 | } |
| 850 | EXPORT_SYMBOL_GPL(crypto_lookup_aead); | 329 | EXPORT_SYMBOL_GPL(aead_exit_geniv); |
| 851 | 330 | ||
| 852 | int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, | 331 | int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, |
| 853 | u32 type, u32 mask) | 332 | u32 type, u32 mask) |
| @@ -870,7 +349,7 @@ static int aead_prepare_alg(struct aead_alg *alg) | |||
| 870 | if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) | 349 | if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) |
| 871 | return -EINVAL; | 350 | return -EINVAL; |
| 872 | 351 | ||
| 873 | base->cra_type = &crypto_new_aead_type; | 352 | base->cra_type = &crypto_aead_type; |
| 874 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | 353 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
| 875 | base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; | 354 | base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; |
| 876 | 355 | ||
diff --git a/crypto/algapi.c b/crypto/algapi.c index 3c079b7f23f6..d130b41dbaea 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
| @@ -67,12 +67,22 @@ static int crypto_check_alg(struct crypto_alg *alg) | |||
| 67 | return crypto_set_driver_name(alg); | 67 | return crypto_set_driver_name(alg); |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | static void crypto_free_instance(struct crypto_instance *inst) | ||
| 71 | { | ||
| 72 | if (!inst->alg.cra_type->free) { | ||
| 73 | inst->tmpl->free(inst); | ||
| 74 | return; | ||
| 75 | } | ||
| 76 | |||
| 77 | inst->alg.cra_type->free(inst); | ||
| 78 | } | ||
| 79 | |||
| 70 | static void crypto_destroy_instance(struct crypto_alg *alg) | 80 | static void crypto_destroy_instance(struct crypto_alg *alg) |
| 71 | { | 81 | { |
| 72 | struct crypto_instance *inst = (void *)alg; | 82 | struct crypto_instance *inst = (void *)alg; |
| 73 | struct crypto_template *tmpl = inst->tmpl; | 83 | struct crypto_template *tmpl = inst->tmpl; |
| 74 | 84 | ||
| 75 | tmpl->free(inst); | 85 | crypto_free_instance(inst); |
| 76 | crypto_tmpl_put(tmpl); | 86 | crypto_tmpl_put(tmpl); |
| 77 | } | 87 | } |
| 78 | 88 | ||
| @@ -481,7 +491,7 @@ void crypto_unregister_template(struct crypto_template *tmpl) | |||
| 481 | 491 | ||
| 482 | hlist_for_each_entry_safe(inst, n, list, list) { | 492 | hlist_for_each_entry_safe(inst, n, list, list) { |
| 483 | BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); | 493 | BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); |
| 484 | tmpl->free(inst); | 494 | crypto_free_instance(inst); |
| 485 | } | 495 | } |
| 486 | crypto_remove_final(&users); | 496 | crypto_remove_final(&users); |
| 487 | } | 497 | } |
| @@ -892,7 +902,7 @@ out: | |||
| 892 | } | 902 | } |
| 893 | EXPORT_SYMBOL_GPL(crypto_enqueue_request); | 903 | EXPORT_SYMBOL_GPL(crypto_enqueue_request); |
| 894 | 904 | ||
| 895 | void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset) | 905 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) |
| 896 | { | 906 | { |
| 897 | struct list_head *request; | 907 | struct list_head *request; |
| 898 | 908 | ||
| @@ -907,14 +917,7 @@ void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset) | |||
| 907 | request = queue->list.next; | 917 | request = queue->list.next; |
| 908 | list_del(request); | 918 | list_del(request); |
| 909 | 919 | ||
| 910 | return (char *)list_entry(request, struct crypto_async_request, list) - | 920 | return list_entry(request, struct crypto_async_request, list); |
| 911 | offset; | ||
| 912 | } | ||
| 913 | EXPORT_SYMBOL_GPL(__crypto_dequeue_request); | ||
| 914 | |||
| 915 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) | ||
| 916 | { | ||
| 917 | return __crypto_dequeue_request(queue, 0); | ||
| 918 | } | 921 | } |
| 919 | EXPORT_SYMBOL_GPL(crypto_dequeue_request); | 922 | EXPORT_SYMBOL_GPL(crypto_dequeue_request); |
| 920 | 923 | ||
diff --git a/crypto/algboss.c b/crypto/algboss.c index 76fc0b23fc6c..6e39d9c05b98 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
| @@ -248,13 +248,11 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) | |||
| 248 | type = alg->cra_flags; | 248 | type = alg->cra_flags; |
| 249 | 249 | ||
| 250 | /* This piece of crap needs to disappear into per-type test hooks. */ | 250 | /* This piece of crap needs to disappear into per-type test hooks. */ |
| 251 | if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & | 251 | if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & |
| 252 | CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) && | 252 | CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) && |
| 253 | ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == | 253 | ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
| 254 | CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : | 254 | CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : |
| 255 | alg->cra_ablkcipher.ivsize)) || | 255 | alg->cra_ablkcipher.ivsize)) |
| 256 | (!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) && | ||
| 257 | alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize)) | ||
| 258 | type |= CRYPTO_ALG_TESTED; | 256 | type |= CRYPTO_ALG_TESTED; |
| 259 | 257 | ||
| 260 | param->type = type; | 258 | param->type = type; |
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index e0408a480d2f..0aa6fdfb448a 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
| @@ -90,6 +90,7 @@ static void aead_put_sgl(struct sock *sk) | |||
| 90 | put_page(sg_page(sg + i)); | 90 | put_page(sg_page(sg + i)); |
| 91 | sg_assign_page(sg + i, NULL); | 91 | sg_assign_page(sg + i, NULL); |
| 92 | } | 92 | } |
| 93 | sg_init_table(sg, ALG_MAX_PAGES); | ||
| 93 | sgl->cur = 0; | 94 | sgl->cur = 0; |
| 94 | ctx->used = 0; | 95 | ctx->used = 0; |
| 95 | ctx->more = 0; | 96 | ctx->more = 0; |
| @@ -514,8 +515,7 @@ static struct proto_ops algif_aead_ops = { | |||
| 514 | 515 | ||
| 515 | static void *aead_bind(const char *name, u32 type, u32 mask) | 516 | static void *aead_bind(const char *name, u32 type, u32 mask) |
| 516 | { | 517 | { |
| 517 | return crypto_alloc_aead(name, type | CRYPTO_ALG_AEAD_NEW, | 518 | return crypto_alloc_aead(name, type, mask); |
| 518 | mask | CRYPTO_ALG_AEAD_NEW); | ||
| 519 | } | 519 | } |
| 520 | 520 | ||
| 521 | static void aead_release(void *private) | 521 | static void aead_release(void *private) |
diff --git a/crypto/authenc.c b/crypto/authenc.c index 3e852299afb4..55a354d57251 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Authenc: Simple AEAD wrapper for IPsec | 2 | * Authenc: Simple AEAD wrapper for IPsec |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <crypto/internal/hash.h> | 14 | #include <crypto/internal/hash.h> |
| 15 | #include <crypto/internal/skcipher.h> | 15 | #include <crypto/internal/skcipher.h> |
| 16 | #include <crypto/authenc.h> | 16 | #include <crypto/authenc.h> |
| 17 | #include <crypto/null.h> | ||
| 17 | #include <crypto/scatterwalk.h> | 18 | #include <crypto/scatterwalk.h> |
| 18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
| 19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
| @@ -23,26 +24,21 @@ | |||
| 23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 24 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
| 25 | 26 | ||
| 26 | typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags); | ||
| 27 | |||
| 28 | struct authenc_instance_ctx { | 27 | struct authenc_instance_ctx { |
| 29 | struct crypto_ahash_spawn auth; | 28 | struct crypto_ahash_spawn auth; |
| 30 | struct crypto_skcipher_spawn enc; | 29 | struct crypto_skcipher_spawn enc; |
| 30 | unsigned int reqoff; | ||
| 31 | }; | 31 | }; |
| 32 | 32 | ||
| 33 | struct crypto_authenc_ctx { | 33 | struct crypto_authenc_ctx { |
| 34 | unsigned int reqoff; | ||
| 35 | struct crypto_ahash *auth; | 34 | struct crypto_ahash *auth; |
| 36 | struct crypto_ablkcipher *enc; | 35 | struct crypto_ablkcipher *enc; |
| 36 | struct crypto_blkcipher *null; | ||
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | struct authenc_request_ctx { | 39 | struct authenc_request_ctx { |
| 40 | unsigned int cryptlen; | 40 | struct scatterlist src[2]; |
| 41 | struct scatterlist *sg; | 41 | struct scatterlist dst[2]; |
| 42 | struct scatterlist asg[2]; | ||
| 43 | struct scatterlist cipher[2]; | ||
| 44 | crypto_completion_t complete; | ||
| 45 | crypto_completion_t update_complete; | ||
| 46 | char tail[]; | 42 | char tail[]; |
| 47 | }; | 43 | }; |
| 48 | 44 | ||
| @@ -119,189 +115,35 @@ badkey: | |||
| 119 | goto out; | 115 | goto out; |
| 120 | } | 116 | } |
| 121 | 117 | ||
| 122 | static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, | ||
| 123 | int err) | ||
| 124 | { | ||
| 125 | struct aead_request *req = areq->data; | ||
| 126 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 127 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 128 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 129 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 130 | |||
| 131 | if (err) | ||
| 132 | goto out; | ||
| 133 | |||
| 134 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
| 135 | areq_ctx->cryptlen); | ||
| 136 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
| 137 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 138 | areq_ctx->complete, req); | ||
| 139 | |||
| 140 | err = crypto_ahash_finup(ahreq); | ||
| 141 | if (err) | ||
| 142 | goto out; | ||
| 143 | |||
| 144 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
| 145 | areq_ctx->cryptlen, | ||
| 146 | crypto_aead_authsize(authenc), 1); | ||
| 147 | |||
| 148 | out: | ||
| 149 | authenc_request_complete(req, err); | ||
| 150 | } | ||
| 151 | |||
| 152 | static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) | 118 | static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) |
| 153 | { | 119 | { |
| 154 | struct aead_request *req = areq->data; | 120 | struct aead_request *req = areq->data; |
| 155 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 121 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 156 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 122 | struct aead_instance *inst = aead_alg_instance(authenc); |
| 123 | struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); | ||
| 157 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 124 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
| 158 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 125 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); |
| 159 | 126 | ||
| 160 | if (err) | 127 | if (err) |
| 161 | goto out; | 128 | goto out; |
| 162 | 129 | ||
| 163 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | 130 | scatterwalk_map_and_copy(ahreq->result, req->dst, |
| 164 | areq_ctx->cryptlen, | 131 | req->assoclen + req->cryptlen, |
| 165 | crypto_aead_authsize(authenc), 1); | 132 | crypto_aead_authsize(authenc), 1); |
| 166 | 133 | ||
| 167 | out: | 134 | out: |
| 168 | aead_request_complete(req, err); | 135 | aead_request_complete(req, err); |
| 169 | } | 136 | } |
| 170 | 137 | ||
| 171 | static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, | 138 | static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags) |
| 172 | int err) | ||
| 173 | { | ||
| 174 | u8 *ihash; | ||
| 175 | unsigned int authsize; | ||
| 176 | struct ablkcipher_request *abreq; | ||
| 177 | struct aead_request *req = areq->data; | ||
| 178 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 179 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 180 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 181 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 182 | unsigned int cryptlen = req->cryptlen; | ||
| 183 | |||
| 184 | if (err) | ||
| 185 | goto out; | ||
| 186 | |||
| 187 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
| 188 | areq_ctx->cryptlen); | ||
| 189 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
| 190 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 191 | areq_ctx->complete, req); | ||
| 192 | |||
| 193 | err = crypto_ahash_finup(ahreq); | ||
| 194 | if (err) | ||
| 195 | goto out; | ||
| 196 | |||
| 197 | authsize = crypto_aead_authsize(authenc); | ||
| 198 | cryptlen -= authsize; | ||
| 199 | ihash = ahreq->result + authsize; | ||
| 200 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
| 201 | authsize, 0); | ||
| 202 | |||
| 203 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | ||
| 204 | if (err) | ||
| 205 | goto out; | ||
| 206 | |||
| 207 | abreq = aead_request_ctx(req); | ||
| 208 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
| 209 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
| 210 | req->base.complete, req->base.data); | ||
| 211 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
| 212 | cryptlen, req->iv); | ||
| 213 | |||
| 214 | err = crypto_ablkcipher_decrypt(abreq); | ||
| 215 | |||
| 216 | out: | ||
| 217 | authenc_request_complete(req, err); | ||
| 218 | } | ||
| 219 | |||
| 220 | static void authenc_verify_ahash_done(struct crypto_async_request *areq, | ||
| 221 | int err) | ||
| 222 | { | ||
| 223 | u8 *ihash; | ||
| 224 | unsigned int authsize; | ||
| 225 | struct ablkcipher_request *abreq; | ||
| 226 | struct aead_request *req = areq->data; | ||
| 227 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 228 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 229 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 230 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 231 | unsigned int cryptlen = req->cryptlen; | ||
| 232 | |||
| 233 | if (err) | ||
| 234 | goto out; | ||
| 235 | |||
| 236 | authsize = crypto_aead_authsize(authenc); | ||
| 237 | cryptlen -= authsize; | ||
| 238 | ihash = ahreq->result + authsize; | ||
| 239 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
| 240 | authsize, 0); | ||
| 241 | |||
| 242 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | ||
| 243 | if (err) | ||
| 244 | goto out; | ||
| 245 | |||
| 246 | abreq = aead_request_ctx(req); | ||
| 247 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
| 248 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
| 249 | req->base.complete, req->base.data); | ||
| 250 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
| 251 | cryptlen, req->iv); | ||
| 252 | |||
| 253 | err = crypto_ablkcipher_decrypt(abreq); | ||
| 254 | |||
| 255 | out: | ||
| 256 | authenc_request_complete(req, err); | ||
| 257 | } | ||
| 258 | |||
| 259 | static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) | ||
| 260 | { | ||
| 261 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 262 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 263 | struct crypto_ahash *auth = ctx->auth; | ||
| 264 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 265 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 266 | u8 *hash = areq_ctx->tail; | ||
| 267 | int err; | ||
| 268 | |||
| 269 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), | ||
| 270 | crypto_ahash_alignmask(auth) + 1); | ||
| 271 | |||
| 272 | ahash_request_set_tfm(ahreq, auth); | ||
| 273 | |||
| 274 | err = crypto_ahash_init(ahreq); | ||
| 275 | if (err) | ||
| 276 | return ERR_PTR(err); | ||
| 277 | |||
| 278 | ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen); | ||
| 279 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
| 280 | areq_ctx->update_complete, req); | ||
| 281 | |||
| 282 | err = crypto_ahash_update(ahreq); | ||
| 283 | if (err) | ||
| 284 | return ERR_PTR(err); | ||
| 285 | |||
| 286 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, | ||
| 287 | areq_ctx->cryptlen); | ||
| 288 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
| 289 | areq_ctx->complete, req); | ||
| 290 | |||
| 291 | err = crypto_ahash_finup(ahreq); | ||
| 292 | if (err) | ||
| 293 | return ERR_PTR(err); | ||
| 294 | |||
| 295 | return hash; | ||
| 296 | } | ||
| 297 | |||
| 298 | static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags) | ||
| 299 | { | 139 | { |
| 300 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 140 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 141 | struct aead_instance *inst = aead_alg_instance(authenc); | ||
| 301 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 142 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
| 143 | struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); | ||
| 302 | struct crypto_ahash *auth = ctx->auth; | 144 | struct crypto_ahash *auth = ctx->auth; |
| 303 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 145 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
| 304 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 146 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); |
| 305 | u8 *hash = areq_ctx->tail; | 147 | u8 *hash = areq_ctx->tail; |
| 306 | int err; | 148 | int err; |
| 307 | 149 | ||
| @@ -309,66 +151,18 @@ static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags) | |||
| 309 | crypto_ahash_alignmask(auth) + 1); | 151 | crypto_ahash_alignmask(auth) + 1); |
| 310 | 152 | ||
| 311 | ahash_request_set_tfm(ahreq, auth); | 153 | ahash_request_set_tfm(ahreq, auth); |
| 312 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, | 154 | ahash_request_set_crypt(ahreq, req->dst, hash, |
| 313 | areq_ctx->cryptlen); | 155 | req->assoclen + req->cryptlen); |
| 314 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | 156 | ahash_request_set_callback(ahreq, flags, |
| 315 | areq_ctx->complete, req); | 157 | authenc_geniv_ahash_done, req); |
| 316 | 158 | ||
| 317 | err = crypto_ahash_digest(ahreq); | 159 | err = crypto_ahash_digest(ahreq); |
| 318 | if (err) | 160 | if (err) |
| 319 | return ERR_PTR(err); | 161 | return err; |
| 320 | |||
| 321 | return hash; | ||
| 322 | } | ||
| 323 | |||
| 324 | static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | ||
| 325 | unsigned int flags) | ||
| 326 | { | ||
| 327 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 328 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 329 | struct scatterlist *dst = req->dst; | ||
| 330 | struct scatterlist *assoc = req->assoc; | ||
| 331 | struct scatterlist *cipher = areq_ctx->cipher; | ||
| 332 | struct scatterlist *asg = areq_ctx->asg; | ||
| 333 | unsigned int ivsize = crypto_aead_ivsize(authenc); | ||
| 334 | unsigned int cryptlen = req->cryptlen; | ||
| 335 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; | ||
| 336 | struct page *dstp; | ||
| 337 | u8 *vdst; | ||
| 338 | u8 *hash; | ||
| 339 | |||
| 340 | dstp = sg_page(dst); | ||
| 341 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; | ||
| 342 | |||
| 343 | if (ivsize) { | ||
| 344 | sg_init_table(cipher, 2); | ||
| 345 | sg_set_buf(cipher, iv, ivsize); | ||
| 346 | scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2); | ||
| 347 | dst = cipher; | ||
| 348 | cryptlen += ivsize; | ||
| 349 | } | ||
| 350 | |||
| 351 | if (req->assoclen && sg_is_last(assoc)) { | ||
| 352 | authenc_ahash_fn = crypto_authenc_ahash; | ||
| 353 | sg_init_table(asg, 2); | ||
| 354 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | ||
| 355 | scatterwalk_crypto_chain(asg, dst, 0, 2); | ||
| 356 | dst = asg; | ||
| 357 | cryptlen += req->assoclen; | ||
| 358 | } | ||
| 359 | |||
| 360 | areq_ctx->cryptlen = cryptlen; | ||
| 361 | areq_ctx->sg = dst; | ||
| 362 | |||
| 363 | areq_ctx->complete = authenc_geniv_ahash_done; | ||
| 364 | areq_ctx->update_complete = authenc_geniv_ahash_update_done; | ||
| 365 | |||
| 366 | hash = authenc_ahash_fn(req, flags); | ||
| 367 | if (IS_ERR(hash)) | ||
| 368 | return PTR_ERR(hash); | ||
| 369 | 162 | ||
| 370 | scatterwalk_map_and_copy(hash, dst, cryptlen, | 163 | scatterwalk_map_and_copy(hash, req->dst, req->assoclen + req->cryptlen, |
| 371 | crypto_aead_authsize(authenc), 1); | 164 | crypto_aead_authsize(authenc), 1); |
| 165 | |||
| 372 | return 0; | 166 | return 0; |
| 373 | } | 167 | } |
| 374 | 168 | ||
| @@ -377,180 +171,155 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req, | |||
| 377 | { | 171 | { |
| 378 | struct aead_request *areq = req->data; | 172 | struct aead_request *areq = req->data; |
| 379 | 173 | ||
| 380 | if (!err) { | 174 | if (err) |
| 381 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 175 | goto out; |
| 382 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 383 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq); | ||
| 384 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail | ||
| 385 | + ctx->reqoff); | ||
| 386 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc); | ||
| 387 | 176 | ||
| 388 | err = crypto_authenc_genicv(areq, iv, 0); | 177 | err = crypto_authenc_genicv(areq, 0); |
| 389 | } | ||
| 390 | 178 | ||
| 179 | out: | ||
| 391 | authenc_request_complete(areq, err); | 180 | authenc_request_complete(areq, err); |
| 392 | } | 181 | } |
| 393 | 182 | ||
| 183 | static int crypto_authenc_copy_assoc(struct aead_request *req) | ||
| 184 | { | ||
| 185 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 186 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 187 | struct blkcipher_desc desc = { | ||
| 188 | .tfm = ctx->null, | ||
| 189 | }; | ||
| 190 | |||
| 191 | return crypto_blkcipher_encrypt(&desc, req->dst, req->src, | ||
| 192 | req->assoclen); | ||
| 193 | } | ||
| 194 | |||
| 394 | static int crypto_authenc_encrypt(struct aead_request *req) | 195 | static int crypto_authenc_encrypt(struct aead_request *req) |
| 395 | { | 196 | { |
| 396 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 197 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 198 | struct aead_instance *inst = aead_alg_instance(authenc); | ||
| 397 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 199 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
| 200 | struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); | ||
| 398 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | 201 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
| 399 | struct crypto_ablkcipher *enc = ctx->enc; | 202 | struct crypto_ablkcipher *enc = ctx->enc; |
| 400 | struct scatterlist *dst = req->dst; | ||
| 401 | unsigned int cryptlen = req->cryptlen; | 203 | unsigned int cryptlen = req->cryptlen; |
| 402 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail | 204 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail + |
| 403 | + ctx->reqoff); | 205 | ictx->reqoff); |
| 404 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc); | 206 | struct scatterlist *src, *dst; |
| 405 | int err; | 207 | int err; |
| 406 | 208 | ||
| 209 | sg_init_table(areq_ctx->src, 2); | ||
| 210 | src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); | ||
| 211 | dst = src; | ||
| 212 | |||
| 213 | if (req->src != req->dst) { | ||
| 214 | err = crypto_authenc_copy_assoc(req); | ||
| 215 | if (err) | ||
| 216 | return err; | ||
| 217 | |||
| 218 | sg_init_table(areq_ctx->dst, 2); | ||
| 219 | dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); | ||
| 220 | } | ||
| 221 | |||
| 407 | ablkcipher_request_set_tfm(abreq, enc); | 222 | ablkcipher_request_set_tfm(abreq, enc); |
| 408 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 223 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
| 409 | crypto_authenc_encrypt_done, req); | 224 | crypto_authenc_encrypt_done, req); |
| 410 | ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); | 225 | ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv); |
| 411 | |||
| 412 | memcpy(iv, req->iv, crypto_aead_ivsize(authenc)); | ||
| 413 | 226 | ||
| 414 | err = crypto_ablkcipher_encrypt(abreq); | 227 | err = crypto_ablkcipher_encrypt(abreq); |
| 415 | if (err) | 228 | if (err) |
| 416 | return err; | 229 | return err; |
| 417 | 230 | ||
| 418 | return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); | 231 | return crypto_authenc_genicv(req, aead_request_flags(req)); |
| 419 | } | 232 | } |
| 420 | 233 | ||
| 421 | static void crypto_authenc_givencrypt_done(struct crypto_async_request *req, | 234 | static int crypto_authenc_decrypt_tail(struct aead_request *req, |
| 422 | int err) | 235 | unsigned int flags) |
| 423 | { | 236 | { |
| 424 | struct aead_request *areq = req->data; | 237 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 425 | 238 | struct aead_instance *inst = aead_alg_instance(authenc); | |
| 426 | if (!err) { | 239 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
| 427 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); | 240 | struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); |
| 428 | 241 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | |
| 429 | err = crypto_authenc_genicv(areq, greq->giv, 0); | 242 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); |
| 430 | } | 243 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail + |
| 244 | ictx->reqoff); | ||
| 245 | unsigned int authsize = crypto_aead_authsize(authenc); | ||
| 246 | u8 *ihash = ahreq->result + authsize; | ||
| 247 | struct scatterlist *src, *dst; | ||
| 431 | 248 | ||
| 432 | authenc_request_complete(areq, err); | 249 | scatterwalk_map_and_copy(ihash, req->src, ahreq->nbytes, authsize, 0); |
| 433 | } | ||
| 434 | 250 | ||
| 435 | static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) | 251 | if (crypto_memneq(ihash, ahreq->result, authsize)) |
| 436 | { | 252 | return -EBADMSG; |
| 437 | struct crypto_aead *authenc = aead_givcrypt_reqtfm(req); | ||
| 438 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 439 | struct aead_request *areq = &req->areq; | ||
| 440 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); | ||
| 441 | u8 *iv = req->giv; | ||
| 442 | int err; | ||
| 443 | 253 | ||
| 444 | skcipher_givcrypt_set_tfm(greq, ctx->enc); | 254 | sg_init_table(areq_ctx->src, 2); |
| 445 | skcipher_givcrypt_set_callback(greq, aead_request_flags(areq), | 255 | src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); |
| 446 | crypto_authenc_givencrypt_done, areq); | 256 | dst = src; |
| 447 | skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen, | ||
| 448 | areq->iv); | ||
| 449 | skcipher_givcrypt_set_giv(greq, iv, req->seq); | ||
| 450 | 257 | ||
| 451 | err = crypto_skcipher_givencrypt(greq); | 258 | if (req->src != req->dst) { |
| 452 | if (err) | 259 | sg_init_table(areq_ctx->dst, 2); |
| 453 | return err; | 260 | dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); |
| 261 | } | ||
| 454 | 262 | ||
| 455 | return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP); | 263 | ablkcipher_request_set_tfm(abreq, ctx->enc); |
| 456 | } | 264 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
| 265 | req->base.complete, req->base.data); | ||
| 266 | ablkcipher_request_set_crypt(abreq, src, dst, | ||
| 267 | req->cryptlen - authsize, req->iv); | ||
| 457 | 268 | ||
| 458 | static int crypto_authenc_verify(struct aead_request *req, | 269 | return crypto_ablkcipher_decrypt(abreq); |
| 459 | authenc_ahash_t authenc_ahash_fn) | ||
| 460 | { | ||
| 461 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
| 462 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 463 | u8 *ohash; | ||
| 464 | u8 *ihash; | ||
| 465 | unsigned int authsize; | ||
| 466 | |||
| 467 | areq_ctx->complete = authenc_verify_ahash_done; | ||
| 468 | areq_ctx->update_complete = authenc_verify_ahash_update_done; | ||
| 469 | |||
| 470 | ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
| 471 | if (IS_ERR(ohash)) | ||
| 472 | return PTR_ERR(ohash); | ||
| 473 | |||
| 474 | authsize = crypto_aead_authsize(authenc); | ||
| 475 | ihash = ohash + authsize; | ||
| 476 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
| 477 | authsize, 0); | ||
| 478 | return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; | ||
| 479 | } | 270 | } |
| 480 | 271 | ||
| 481 | static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | 272 | static void authenc_verify_ahash_done(struct crypto_async_request *areq, |
| 482 | unsigned int cryptlen) | 273 | int err) |
| 483 | { | 274 | { |
| 484 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 275 | struct aead_request *req = areq->data; |
| 485 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 486 | struct scatterlist *src = req->src; | ||
| 487 | struct scatterlist *assoc = req->assoc; | ||
| 488 | struct scatterlist *cipher = areq_ctx->cipher; | ||
| 489 | struct scatterlist *asg = areq_ctx->asg; | ||
| 490 | unsigned int ivsize = crypto_aead_ivsize(authenc); | ||
| 491 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; | ||
| 492 | struct page *srcp; | ||
| 493 | u8 *vsrc; | ||
| 494 | |||
| 495 | srcp = sg_page(src); | ||
| 496 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; | ||
| 497 | |||
| 498 | if (ivsize) { | ||
| 499 | sg_init_table(cipher, 2); | ||
| 500 | sg_set_buf(cipher, iv, ivsize); | ||
| 501 | scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2); | ||
| 502 | src = cipher; | ||
| 503 | cryptlen += ivsize; | ||
| 504 | } | ||
| 505 | 276 | ||
| 506 | if (req->assoclen && sg_is_last(assoc)) { | 277 | if (err) |
| 507 | authenc_ahash_fn = crypto_authenc_ahash; | 278 | goto out; |
| 508 | sg_init_table(asg, 2); | ||
| 509 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | ||
| 510 | scatterwalk_crypto_chain(asg, src, 0, 2); | ||
| 511 | src = asg; | ||
| 512 | cryptlen += req->assoclen; | ||
| 513 | } | ||
| 514 | 279 | ||
| 515 | areq_ctx->cryptlen = cryptlen; | 280 | err = crypto_authenc_decrypt_tail(req, 0); |
| 516 | areq_ctx->sg = src; | ||
| 517 | 281 | ||
| 518 | return crypto_authenc_verify(req, authenc_ahash_fn); | 282 | out: |
| 283 | authenc_request_complete(req, err); | ||
| 519 | } | 284 | } |
| 520 | 285 | ||
| 521 | static int crypto_authenc_decrypt(struct aead_request *req) | 286 | static int crypto_authenc_decrypt(struct aead_request *req) |
| 522 | { | 287 | { |
| 523 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 288 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 524 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 525 | struct ablkcipher_request *abreq = aead_request_ctx(req); | ||
| 526 | unsigned int cryptlen = req->cryptlen; | ||
| 527 | unsigned int authsize = crypto_aead_authsize(authenc); | 289 | unsigned int authsize = crypto_aead_authsize(authenc); |
| 528 | u8 *iv = req->iv; | 290 | struct aead_instance *inst = aead_alg_instance(authenc); |
| 291 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 292 | struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); | ||
| 293 | struct crypto_ahash *auth = ctx->auth; | ||
| 294 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 295 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); | ||
| 296 | u8 *hash = areq_ctx->tail; | ||
| 529 | int err; | 297 | int err; |
| 530 | 298 | ||
| 531 | if (cryptlen < authsize) | 299 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), |
| 532 | return -EINVAL; | 300 | crypto_ahash_alignmask(auth) + 1); |
| 533 | cryptlen -= authsize; | ||
| 534 | 301 | ||
| 535 | err = crypto_authenc_iverify(req, iv, cryptlen); | 302 | ahash_request_set_tfm(ahreq, auth); |
| 303 | ahash_request_set_crypt(ahreq, req->src, hash, | ||
| 304 | req->assoclen + req->cryptlen - authsize); | ||
| 305 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
| 306 | authenc_verify_ahash_done, req); | ||
| 307 | |||
| 308 | err = crypto_ahash_digest(ahreq); | ||
| 536 | if (err) | 309 | if (err) |
| 537 | return err; | 310 | return err; |
| 538 | 311 | ||
| 539 | ablkcipher_request_set_tfm(abreq, ctx->enc); | 312 | return crypto_authenc_decrypt_tail(req, aead_request_flags(req)); |
| 540 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
| 541 | req->base.complete, req->base.data); | ||
| 542 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); | ||
| 543 | |||
| 544 | return crypto_ablkcipher_decrypt(abreq); | ||
| 545 | } | 313 | } |
| 546 | 314 | ||
| 547 | static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) | 315 | static int crypto_authenc_init_tfm(struct crypto_aead *tfm) |
| 548 | { | 316 | { |
| 549 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 317 | struct aead_instance *inst = aead_alg_instance(tfm); |
| 550 | struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); | 318 | struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); |
| 551 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); | 319 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); |
| 552 | struct crypto_ahash *auth; | 320 | struct crypto_ahash *auth; |
| 553 | struct crypto_ablkcipher *enc; | 321 | struct crypto_ablkcipher *enc; |
| 322 | struct crypto_blkcipher *null; | ||
| 554 | int err; | 323 | int err; |
| 555 | 324 | ||
| 556 | auth = crypto_spawn_ahash(&ictx->auth); | 325 | auth = crypto_spawn_ahash(&ictx->auth); |
| @@ -562,42 +331,57 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) | |||
| 562 | if (IS_ERR(enc)) | 331 | if (IS_ERR(enc)) |
| 563 | goto err_free_ahash; | 332 | goto err_free_ahash; |
| 564 | 333 | ||
| 334 | null = crypto_get_default_null_skcipher(); | ||
| 335 | err = PTR_ERR(null); | ||
| 336 | if (IS_ERR(null)) | ||
| 337 | goto err_free_skcipher; | ||
| 338 | |||
| 565 | ctx->auth = auth; | 339 | ctx->auth = auth; |
| 566 | ctx->enc = enc; | 340 | ctx->enc = enc; |
| 341 | ctx->null = null; | ||
| 567 | 342 | ||
| 568 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + | 343 | crypto_aead_set_reqsize( |
| 569 | crypto_ahash_alignmask(auth), | 344 | tfm, |
| 570 | crypto_ahash_alignmask(auth) + 1) + | ||
| 571 | crypto_ablkcipher_ivsize(enc); | ||
| 572 | |||
| 573 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | ||
| 574 | sizeof(struct authenc_request_ctx) + | 345 | sizeof(struct authenc_request_ctx) + |
| 575 | ctx->reqoff + | 346 | ictx->reqoff + |
| 576 | max_t(unsigned int, | 347 | max_t(unsigned int, |
| 577 | crypto_ahash_reqsize(auth) + | 348 | crypto_ahash_reqsize(auth) + |
| 578 | sizeof(struct ahash_request), | 349 | sizeof(struct ahash_request), |
| 579 | sizeof(struct skcipher_givcrypt_request) + | 350 | sizeof(struct ablkcipher_request) + |
| 580 | crypto_ablkcipher_reqsize(enc))); | 351 | crypto_ablkcipher_reqsize(enc))); |
| 581 | 352 | ||
| 582 | return 0; | 353 | return 0; |
| 583 | 354 | ||
| 355 | err_free_skcipher: | ||
| 356 | crypto_free_ablkcipher(enc); | ||
| 584 | err_free_ahash: | 357 | err_free_ahash: |
| 585 | crypto_free_ahash(auth); | 358 | crypto_free_ahash(auth); |
| 586 | return err; | 359 | return err; |
| 587 | } | 360 | } |
| 588 | 361 | ||
| 589 | static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm) | 362 | static void crypto_authenc_exit_tfm(struct crypto_aead *tfm) |
| 590 | { | 363 | { |
| 591 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); | 364 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); |
| 592 | 365 | ||
| 593 | crypto_free_ahash(ctx->auth); | 366 | crypto_free_ahash(ctx->auth); |
| 594 | crypto_free_ablkcipher(ctx->enc); | 367 | crypto_free_ablkcipher(ctx->enc); |
| 368 | crypto_put_default_null_skcipher(); | ||
| 595 | } | 369 | } |
| 596 | 370 | ||
| 597 | static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | 371 | static void crypto_authenc_free(struct aead_instance *inst) |
| 372 | { | ||
| 373 | struct authenc_instance_ctx *ctx = aead_instance_ctx(inst); | ||
| 374 | |||
| 375 | crypto_drop_skcipher(&ctx->enc); | ||
| 376 | crypto_drop_ahash(&ctx->auth); | ||
| 377 | kfree(inst); | ||
| 378 | } | ||
| 379 | |||
| 380 | static int crypto_authenc_create(struct crypto_template *tmpl, | ||
| 381 | struct rtattr **tb) | ||
| 598 | { | 382 | { |
| 599 | struct crypto_attr_type *algt; | 383 | struct crypto_attr_type *algt; |
| 600 | struct crypto_instance *inst; | 384 | struct aead_instance *inst; |
| 601 | struct hash_alg_common *auth; | 385 | struct hash_alg_common *auth; |
| 602 | struct crypto_alg *auth_base; | 386 | struct crypto_alg *auth_base; |
| 603 | struct crypto_alg *enc; | 387 | struct crypto_alg *enc; |
| @@ -607,15 +391,15 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
| 607 | 391 | ||
| 608 | algt = crypto_get_attr_type(tb); | 392 | algt = crypto_get_attr_type(tb); |
| 609 | if (IS_ERR(algt)) | 393 | if (IS_ERR(algt)) |
| 610 | return ERR_CAST(algt); | 394 | return PTR_ERR(algt); |
| 611 | 395 | ||
| 612 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 396 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
| 613 | return ERR_PTR(-EINVAL); | 397 | return -EINVAL; |
| 614 | 398 | ||
| 615 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, | 399 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, |
| 616 | CRYPTO_ALG_TYPE_AHASH_MASK); | 400 | CRYPTO_ALG_TYPE_AHASH_MASK); |
| 617 | if (IS_ERR(auth)) | 401 | if (IS_ERR(auth)) |
| 618 | return ERR_CAST(auth); | 402 | return PTR_ERR(auth); |
| 619 | 403 | ||
| 620 | auth_base = &auth->base; | 404 | auth_base = &auth->base; |
| 621 | 405 | ||
| @@ -629,13 +413,14 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
| 629 | if (!inst) | 413 | if (!inst) |
| 630 | goto out_put_auth; | 414 | goto out_put_auth; |
| 631 | 415 | ||
| 632 | ctx = crypto_instance_ctx(inst); | 416 | ctx = aead_instance_ctx(inst); |
| 633 | 417 | ||
| 634 | err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); | 418 | err = crypto_init_ahash_spawn(&ctx->auth, auth, |
| 419 | aead_crypto_instance(inst)); | ||
| 635 | if (err) | 420 | if (err) |
| 636 | goto err_free_inst; | 421 | goto err_free_inst; |
| 637 | 422 | ||
| 638 | crypto_set_skcipher_spawn(&ctx->enc, inst); | 423 | crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); |
| 639 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, | 424 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, |
| 640 | crypto_requires_sync(algt->type, | 425 | crypto_requires_sync(algt->type, |
| 641 | algt->mask)); | 426 | algt->mask)); |
| @@ -644,41 +429,47 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
| 644 | 429 | ||
| 645 | enc = crypto_skcipher_spawn_alg(&ctx->enc); | 430 | enc = crypto_skcipher_spawn_alg(&ctx->enc); |
| 646 | 431 | ||
| 432 | ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask, | ||
| 433 | auth_base->cra_alignmask + 1); | ||
| 434 | |||
| 647 | err = -ENAMETOOLONG; | 435 | err = -ENAMETOOLONG; |
| 648 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 436 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
| 649 | "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= | 437 | "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= |
| 650 | CRYPTO_MAX_ALG_NAME) | 438 | CRYPTO_MAX_ALG_NAME) |
| 651 | goto err_drop_enc; | 439 | goto err_drop_enc; |
| 652 | 440 | ||
| 653 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 441 | if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
| 654 | "authenc(%s,%s)", auth_base->cra_driver_name, | 442 | "authenc(%s,%s)", auth_base->cra_driver_name, |
| 655 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 443 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
| 656 | goto err_drop_enc; | 444 | goto err_drop_enc; |
| 657 | 445 | ||
| 658 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 446 | inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC; |
| 659 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; | 447 | inst->alg.base.cra_priority = enc->cra_priority * 10 + |
| 660 | inst->alg.cra_priority = enc->cra_priority * | 448 | auth_base->cra_priority; |
| 661 | 10 + auth_base->cra_priority; | 449 | inst->alg.base.cra_blocksize = enc->cra_blocksize; |
| 662 | inst->alg.cra_blocksize = enc->cra_blocksize; | 450 | inst->alg.base.cra_alignmask = auth_base->cra_alignmask | |
| 663 | inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; | 451 | enc->cra_alignmask; |
| 664 | inst->alg.cra_type = &crypto_aead_type; | 452 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx); |
| 453 | |||
| 454 | inst->alg.ivsize = enc->cra_ablkcipher.ivsize; | ||
| 455 | inst->alg.maxauthsize = auth->digestsize; | ||
| 665 | 456 | ||
| 666 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; | 457 | inst->alg.init = crypto_authenc_init_tfm; |
| 667 | inst->alg.cra_aead.maxauthsize = auth->digestsize; | 458 | inst->alg.exit = crypto_authenc_exit_tfm; |
| 668 | 459 | ||
| 669 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); | 460 | inst->alg.setkey = crypto_authenc_setkey; |
| 461 | inst->alg.encrypt = crypto_authenc_encrypt; | ||
| 462 | inst->alg.decrypt = crypto_authenc_decrypt; | ||
| 670 | 463 | ||
| 671 | inst->alg.cra_init = crypto_authenc_init_tfm; | 464 | inst->free = crypto_authenc_free; |
| 672 | inst->alg.cra_exit = crypto_authenc_exit_tfm; | ||
| 673 | 465 | ||
| 674 | inst->alg.cra_aead.setkey = crypto_authenc_setkey; | 466 | err = aead_register_instance(tmpl, inst); |
| 675 | inst->alg.cra_aead.encrypt = crypto_authenc_encrypt; | 467 | if (err) |
| 676 | inst->alg.cra_aead.decrypt = crypto_authenc_decrypt; | 468 | goto err_drop_enc; |
| 677 | inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; | ||
| 678 | 469 | ||
| 679 | out: | 470 | out: |
| 680 | crypto_mod_put(auth_base); | 471 | crypto_mod_put(auth_base); |
| 681 | return inst; | 472 | return err; |
| 682 | 473 | ||
| 683 | err_drop_enc: | 474 | err_drop_enc: |
| 684 | crypto_drop_skcipher(&ctx->enc); | 475 | crypto_drop_skcipher(&ctx->enc); |
| @@ -687,23 +478,12 @@ err_drop_auth: | |||
| 687 | err_free_inst: | 478 | err_free_inst: |
| 688 | kfree(inst); | 479 | kfree(inst); |
| 689 | out_put_auth: | 480 | out_put_auth: |
| 690 | inst = ERR_PTR(err); | ||
| 691 | goto out; | 481 | goto out; |
| 692 | } | 482 | } |
| 693 | 483 | ||
| 694 | static void crypto_authenc_free(struct crypto_instance *inst) | ||
| 695 | { | ||
| 696 | struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
| 697 | |||
| 698 | crypto_drop_skcipher(&ctx->enc); | ||
| 699 | crypto_drop_ahash(&ctx->auth); | ||
| 700 | kfree(inst); | ||
| 701 | } | ||
| 702 | |||
| 703 | static struct crypto_template crypto_authenc_tmpl = { | 484 | static struct crypto_template crypto_authenc_tmpl = { |
| 704 | .name = "authenc", | 485 | .name = "authenc", |
| 705 | .alloc = crypto_authenc_alloc, | 486 | .create = crypto_authenc_create, |
| 706 | .free = crypto_authenc_free, | ||
| 707 | .module = THIS_MODULE, | 487 | .module = THIS_MODULE, |
| 708 | }; | 488 | }; |
| 709 | 489 | ||
diff --git a/crypto/authencesn.c b/crypto/authencesn.c index b8efe36ce114..0c0468869e25 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2010 secunet Security Networks AG | 5 | * Copyright (C) 2010 secunet Security Networks AG |
| 6 | * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com> | 6 | * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com> |
| 7 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it | 9 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms of the GNU General Public License as published by the Free | 10 | * under the terms of the GNU General Public License as published by the Free |
| @@ -16,6 +17,7 @@ | |||
| 16 | #include <crypto/internal/hash.h> | 17 | #include <crypto/internal/hash.h> |
| 17 | #include <crypto/internal/skcipher.h> | 18 | #include <crypto/internal/skcipher.h> |
| 18 | #include <crypto/authenc.h> | 19 | #include <crypto/authenc.h> |
| 20 | #include <crypto/null.h> | ||
| 19 | #include <crypto/scatterwalk.h> | 21 | #include <crypto/scatterwalk.h> |
| 20 | #include <linux/err.h> | 22 | #include <linux/err.h> |
| 21 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| @@ -34,19 +36,12 @@ struct crypto_authenc_esn_ctx { | |||
| 34 | unsigned int reqoff; | 36 | unsigned int reqoff; |
| 35 | struct crypto_ahash *auth; | 37 | struct crypto_ahash *auth; |
| 36 | struct crypto_ablkcipher *enc; | 38 | struct crypto_ablkcipher *enc; |
| 39 | struct crypto_blkcipher *null; | ||
| 37 | }; | 40 | }; |
| 38 | 41 | ||
| 39 | struct authenc_esn_request_ctx { | 42 | struct authenc_esn_request_ctx { |
| 40 | unsigned int cryptlen; | 43 | struct scatterlist src[2]; |
| 41 | unsigned int headlen; | 44 | struct scatterlist dst[2]; |
| 42 | unsigned int trailen; | ||
| 43 | struct scatterlist *sg; | ||
| 44 | struct scatterlist hsg[2]; | ||
| 45 | struct scatterlist tsg[1]; | ||
| 46 | struct scatterlist cipher[2]; | ||
| 47 | crypto_completion_t complete; | ||
| 48 | crypto_completion_t update_complete; | ||
| 49 | crypto_completion_t update_complete2; | ||
| 50 | char tail[]; | 45 | char tail[]; |
| 51 | }; | 46 | }; |
| 52 | 47 | ||
| @@ -56,6 +51,15 @@ static void authenc_esn_request_complete(struct aead_request *req, int err) | |||
| 56 | aead_request_complete(req, err); | 51 | aead_request_complete(req, err); |
| 57 | } | 52 | } |
| 58 | 53 | ||
| 54 | static int crypto_authenc_esn_setauthsize(struct crypto_aead *authenc_esn, | ||
| 55 | unsigned int authsize) | ||
| 56 | { | ||
| 57 | if (authsize > 0 && authsize < 4) | ||
| 58 | return -EINVAL; | ||
| 59 | |||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | |||
| 59 | static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, | 63 | static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, |
| 60 | unsigned int keylen) | 64 | unsigned int keylen) |
| 61 | { | 65 | { |
| @@ -93,536 +97,242 @@ badkey: | |||
| 93 | goto out; | 97 | goto out; |
| 94 | } | 98 | } |
| 95 | 99 | ||
| 96 | static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq, | 100 | static int crypto_authenc_esn_genicv_tail(struct aead_request *req, |
| 97 | int err) | 101 | unsigned int flags) |
| 98 | { | 102 | { |
| 99 | struct aead_request *req = areq->data; | ||
| 100 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 103 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
| 101 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 104 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
| 102 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 105 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
| 103 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 106 | struct crypto_ahash *auth = ctx->auth; |
| 104 | 107 | u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail, | |
| 105 | if (err) | 108 | crypto_ahash_alignmask(auth) + 1); |
| 106 | goto out; | 109 | unsigned int authsize = crypto_aead_authsize(authenc_esn); |
| 107 | 110 | unsigned int assoclen = req->assoclen; | |
| 108 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | 111 | unsigned int cryptlen = req->cryptlen; |
| 109 | areq_ctx->cryptlen); | 112 | struct scatterlist *dst = req->dst; |
| 110 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | 113 | u32 tmp[2]; |
| 111 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 112 | areq_ctx->update_complete2, req); | ||
| 113 | |||
| 114 | err = crypto_ahash_update(ahreq); | ||
| 115 | if (err) | ||
| 116 | goto out; | ||
| 117 | |||
| 118 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | ||
| 119 | areq_ctx->trailen); | ||
| 120 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
| 121 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 122 | areq_ctx->complete, req); | ||
| 123 | |||
| 124 | err = crypto_ahash_finup(ahreq); | ||
| 125 | if (err) | ||
| 126 | goto out; | ||
| 127 | |||
| 128 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
| 129 | areq_ctx->cryptlen, | ||
| 130 | crypto_aead_authsize(authenc_esn), 1); | ||
| 131 | |||
| 132 | out: | ||
| 133 | authenc_esn_request_complete(req, err); | ||
| 134 | } | ||
| 135 | |||
| 136 | static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq, | ||
| 137 | int err) | ||
| 138 | { | ||
| 139 | struct aead_request *req = areq->data; | ||
| 140 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
| 141 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
| 142 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 143 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 144 | |||
| 145 | if (err) | ||
| 146 | goto out; | ||
| 147 | |||
| 148 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | ||
| 149 | areq_ctx->trailen); | ||
| 150 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
| 151 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 152 | areq_ctx->complete, req); | ||
| 153 | |||
| 154 | err = crypto_ahash_finup(ahreq); | ||
| 155 | if (err) | ||
| 156 | goto out; | ||
| 157 | 114 | ||
| 158 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | 115 | /* Move high-order bits of sequence number back. */ |
| 159 | areq_ctx->cryptlen, | 116 | scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); |
| 160 | crypto_aead_authsize(authenc_esn), 1); | 117 | scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); |
| 118 | scatterwalk_map_and_copy(tmp, dst, 0, 8, 1); | ||
| 161 | 119 | ||
| 162 | out: | 120 | scatterwalk_map_and_copy(hash, dst, assoclen + cryptlen, authsize, 1); |
| 163 | authenc_esn_request_complete(req, err); | 121 | return 0; |
| 164 | } | 122 | } |
| 165 | 123 | ||
| 166 | |||
| 167 | static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq, | 124 | static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq, |
| 168 | int err) | 125 | int err) |
| 169 | { | 126 | { |
| 170 | struct aead_request *req = areq->data; | 127 | struct aead_request *req = areq->data; |
| 171 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
| 172 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
| 173 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 174 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 175 | 128 | ||
| 176 | if (err) | 129 | err = err ?: crypto_authenc_esn_genicv_tail(req, 0); |
| 177 | goto out; | ||
| 178 | |||
| 179 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
| 180 | areq_ctx->cryptlen, | ||
| 181 | crypto_aead_authsize(authenc_esn), 1); | ||
| 182 | |||
| 183 | out: | ||
| 184 | aead_request_complete(req, err); | 130 | aead_request_complete(req, err); |
| 185 | } | 131 | } |
| 186 | 132 | ||
| 187 | 133 | static int crypto_authenc_esn_genicv(struct aead_request *req, | |
| 188 | static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq, | 134 | unsigned int flags) |
| 189 | int err) | ||
| 190 | { | 135 | { |
| 191 | u8 *ihash; | ||
| 192 | unsigned int authsize; | ||
| 193 | struct ablkcipher_request *abreq; | ||
| 194 | struct aead_request *req = areq->data; | ||
| 195 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 136 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
| 196 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
| 197 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 137 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
| 198 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 199 | unsigned int cryptlen = req->cryptlen; | ||
| 200 | |||
| 201 | if (err) | ||
| 202 | goto out; | ||
| 203 | |||
| 204 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
| 205 | areq_ctx->cryptlen); | ||
| 206 | |||
| 207 | ahash_request_set_callback(ahreq, | ||
| 208 | aead_request_flags(req) & | ||
| 209 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 210 | areq_ctx->update_complete2, req); | ||
| 211 | |||
| 212 | err = crypto_ahash_update(ahreq); | ||
| 213 | if (err) | ||
| 214 | goto out; | ||
| 215 | |||
| 216 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | ||
| 217 | areq_ctx->trailen); | ||
| 218 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
| 219 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
| 220 | areq_ctx->complete, req); | ||
| 221 | |||
| 222 | err = crypto_ahash_finup(ahreq); | ||
| 223 | if (err) | ||
| 224 | goto out; | ||
| 225 | |||
| 226 | authsize = crypto_aead_authsize(authenc_esn); | ||
| 227 | cryptlen -= authsize; | ||
| 228 | ihash = ahreq->result + authsize; | ||
| 229 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
| 230 | authsize, 0); | ||
| 231 | |||
| 232 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | ||
| 233 | if (err) | ||
| 234 | goto out; | ||
| 235 | |||
| 236 | abreq = aead_request_ctx(req); | ||
| 237 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
| 238 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
| 239 | req->base.complete, req->base.data); | ||
| 240 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
| 241 | cryptlen, req->iv); | ||
| 242 | |||
| 243 | err = crypto_ablkcipher_decrypt(abreq); | ||
| 244 | |||
| 245 | out: | ||
| 246 | authenc_esn_request_complete(req, err); | ||
| 247 | } | ||
| 248 | |||
| 249 | static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq, | ||
| 250 | int err) | ||
| 251 | { | ||
| 252 | u8 *ihash; | ||
| 253 | unsigned int authsize; | ||
| 254 | struct ablkcipher_request *abreq; | ||
| 255 | struct aead_request *req = areq->data; | ||
| 256 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
| 257 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 138 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
| 258 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 139 | struct crypto_ahash *auth = ctx->auth; |
| 140 | u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail, | ||
| 141 | crypto_ahash_alignmask(auth) + 1); | ||
| 259 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 142 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
| 143 | unsigned int authsize = crypto_aead_authsize(authenc_esn); | ||
| 144 | unsigned int assoclen = req->assoclen; | ||
| 260 | unsigned int cryptlen = req->cryptlen; | 145 | unsigned int cryptlen = req->cryptlen; |
| 146 | struct scatterlist *dst = req->dst; | ||
| 147 | u32 tmp[2]; | ||
| 261 | 148 | ||
| 262 | if (err) | 149 | if (!authsize) |
| 263 | goto out; | 150 | return 0; |
| 264 | 151 | ||
| 265 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result, | 152 | /* Move high-order bits of sequence number to the end. */ |
| 266 | areq_ctx->trailen); | 153 | scatterwalk_map_and_copy(tmp, dst, 0, 8, 0); |
| 267 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | 154 | scatterwalk_map_and_copy(tmp, dst, 4, 4, 1); |
| 268 | CRYPTO_TFM_REQ_MAY_SLEEP, | 155 | scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1); |
| 269 | areq_ctx->complete, req); | ||
| 270 | 156 | ||
| 271 | err = crypto_ahash_finup(ahreq); | 157 | sg_init_table(areq_ctx->dst, 2); |
| 272 | if (err) | 158 | dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4); |
| 273 | goto out; | ||
| 274 | 159 | ||
| 275 | authsize = crypto_aead_authsize(authenc_esn); | 160 | ahash_request_set_tfm(ahreq, auth); |
| 276 | cryptlen -= authsize; | 161 | ahash_request_set_crypt(ahreq, dst, hash, assoclen + cryptlen); |
| 277 | ihash = ahreq->result + authsize; | 162 | ahash_request_set_callback(ahreq, flags, |
| 278 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 163 | authenc_esn_geniv_ahash_done, req); |
| 279 | authsize, 0); | ||
| 280 | |||
| 281 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | ||
| 282 | if (err) | ||
| 283 | goto out; | ||
| 284 | |||
| 285 | abreq = aead_request_ctx(req); | ||
| 286 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
| 287 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
| 288 | req->base.complete, req->base.data); | ||
| 289 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
| 290 | cryptlen, req->iv); | ||
| 291 | |||
| 292 | err = crypto_ablkcipher_decrypt(abreq); | ||
| 293 | 164 | ||
| 294 | out: | 165 | return crypto_ahash_digest(ahreq) ?: |
| 295 | authenc_esn_request_complete(req, err); | 166 | crypto_authenc_esn_genicv_tail(req, aead_request_flags(req)); |
| 296 | } | 167 | } |
| 297 | 168 | ||
| 298 | 169 | ||
| 299 | static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, | 170 | static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req, |
| 300 | int err) | 171 | int err) |
| 301 | { | 172 | { |
| 302 | u8 *ihash; | 173 | struct aead_request *areq = req->data; |
| 303 | unsigned int authsize; | ||
| 304 | struct ablkcipher_request *abreq; | ||
| 305 | struct aead_request *req = areq->data; | ||
| 306 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
| 307 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
| 308 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 309 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
| 310 | unsigned int cryptlen = req->cryptlen; | ||
| 311 | |||
| 312 | if (err) | ||
| 313 | goto out; | ||
| 314 | |||
| 315 | authsize = crypto_aead_authsize(authenc_esn); | ||
| 316 | cryptlen -= authsize; | ||
| 317 | ihash = ahreq->result + authsize; | ||
| 318 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
| 319 | authsize, 0); | ||
| 320 | |||
| 321 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | ||
| 322 | if (err) | ||
| 323 | goto out; | ||
| 324 | |||
| 325 | abreq = aead_request_ctx(req); | ||
| 326 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
| 327 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
| 328 | req->base.complete, req->base.data); | ||
| 329 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
| 330 | cryptlen, req->iv); | ||
| 331 | 174 | ||
| 332 | err = crypto_ablkcipher_decrypt(abreq); | 175 | if (!err) |
| 176 | err = crypto_authenc_esn_genicv(areq, 0); | ||
| 333 | 177 | ||
| 334 | out: | 178 | authenc_esn_request_complete(areq, err); |
| 335 | authenc_esn_request_complete(req, err); | ||
| 336 | } | 179 | } |
| 337 | 180 | ||
| 338 | static u8 *crypto_authenc_esn_ahash(struct aead_request *req, | 181 | static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len) |
| 339 | unsigned int flags) | ||
| 340 | { | 182 | { |
| 341 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 183 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
| 342 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 184 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
| 343 | struct crypto_ahash *auth = ctx->auth; | 185 | struct blkcipher_desc desc = { |
| 344 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 186 | .tfm = ctx->null, |
| 345 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | 187 | }; |
| 346 | u8 *hash = areq_ctx->tail; | ||
| 347 | int err; | ||
| 348 | 188 | ||
| 349 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), | 189 | return crypto_blkcipher_encrypt(&desc, req->dst, req->src, len); |
| 350 | crypto_ahash_alignmask(auth) + 1); | ||
| 351 | |||
| 352 | ahash_request_set_tfm(ahreq, auth); | ||
| 353 | |||
| 354 | err = crypto_ahash_init(ahreq); | ||
| 355 | if (err) | ||
| 356 | return ERR_PTR(err); | ||
| 357 | |||
| 358 | ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen); | ||
| 359 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
| 360 | areq_ctx->update_complete, req); | ||
| 361 | |||
| 362 | err = crypto_ahash_update(ahreq); | ||
| 363 | if (err) | ||
| 364 | return ERR_PTR(err); | ||
| 365 | |||
| 366 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen); | ||
| 367 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
| 368 | areq_ctx->update_complete2, req); | ||
| 369 | |||
| 370 | err = crypto_ahash_update(ahreq); | ||
| 371 | if (err) | ||
| 372 | return ERR_PTR(err); | ||
| 373 | |||
| 374 | ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash, | ||
| 375 | areq_ctx->trailen); | ||
| 376 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
| 377 | areq_ctx->complete, req); | ||
| 378 | |||
| 379 | err = crypto_ahash_finup(ahreq); | ||
| 380 | if (err) | ||
| 381 | return ERR_PTR(err); | ||
| 382 | |||
| 383 | return hash; | ||
| 384 | } | 190 | } |
| 385 | 191 | ||
| 386 | static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv, | 192 | static int crypto_authenc_esn_encrypt(struct aead_request *req) |
| 387 | unsigned int flags) | ||
| 388 | { | 193 | { |
| 389 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 194 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
| 390 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 195 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
| 391 | struct scatterlist *dst = req->dst; | 196 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
| 392 | struct scatterlist *assoc = req->assoc; | 197 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail |
| 393 | struct scatterlist *cipher = areq_ctx->cipher; | 198 | + ctx->reqoff); |
| 394 | struct scatterlist *hsg = areq_ctx->hsg; | 199 | struct crypto_ablkcipher *enc = ctx->enc; |
| 395 | struct scatterlist *tsg = areq_ctx->tsg; | 200 | unsigned int assoclen = req->assoclen; |
| 396 | unsigned int ivsize = crypto_aead_ivsize(authenc_esn); | ||
| 397 | unsigned int cryptlen = req->cryptlen; | 201 | unsigned int cryptlen = req->cryptlen; |
| 398 | struct page *dstp; | 202 | struct scatterlist *src, *dst; |
| 399 | u8 *vdst; | 203 | int err; |
| 400 | u8 *hash; | ||
| 401 | |||
| 402 | dstp = sg_page(dst); | ||
| 403 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; | ||
| 404 | |||
| 405 | if (ivsize) { | ||
| 406 | sg_init_table(cipher, 2); | ||
| 407 | sg_set_buf(cipher, iv, ivsize); | ||
| 408 | scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2); | ||
| 409 | dst = cipher; | ||
| 410 | cryptlen += ivsize; | ||
| 411 | } | ||
| 412 | |||
| 413 | if (assoc->length < 12) | ||
| 414 | return -EINVAL; | ||
| 415 | |||
| 416 | sg_init_table(hsg, 2); | ||
| 417 | sg_set_page(hsg, sg_page(assoc), 4, assoc->offset); | ||
| 418 | sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8); | ||
| 419 | |||
| 420 | sg_init_table(tsg, 1); | ||
| 421 | sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4); | ||
| 422 | |||
| 423 | areq_ctx->cryptlen = cryptlen; | ||
| 424 | areq_ctx->headlen = 8; | ||
| 425 | areq_ctx->trailen = 4; | ||
| 426 | areq_ctx->sg = dst; | ||
| 427 | |||
| 428 | areq_ctx->complete = authenc_esn_geniv_ahash_done; | ||
| 429 | areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done; | ||
| 430 | areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2; | ||
| 431 | |||
| 432 | hash = crypto_authenc_esn_ahash(req, flags); | ||
| 433 | if (IS_ERR(hash)) | ||
| 434 | return PTR_ERR(hash); | ||
| 435 | 204 | ||
| 436 | scatterwalk_map_and_copy(hash, dst, cryptlen, | 205 | sg_init_table(areq_ctx->src, 2); |
| 437 | crypto_aead_authsize(authenc_esn), 1); | 206 | src = scatterwalk_ffwd(areq_ctx->src, req->src, assoclen); |
| 438 | return 0; | 207 | dst = src; |
| 439 | } | ||
| 440 | 208 | ||
| 209 | if (req->src != req->dst) { | ||
| 210 | err = crypto_authenc_esn_copy(req, assoclen); | ||
| 211 | if (err) | ||
| 212 | return err; | ||
| 441 | 213 | ||
| 442 | static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req, | 214 | sg_init_table(areq_ctx->dst, 2); |
| 443 | int err) | 215 | dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen); |
| 444 | { | ||
| 445 | struct aead_request *areq = req->data; | ||
| 446 | |||
| 447 | if (!err) { | ||
| 448 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq); | ||
| 449 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
| 450 | struct ablkcipher_request *abreq = aead_request_ctx(areq); | ||
| 451 | u8 *iv = (u8 *)(abreq + 1) + | ||
| 452 | crypto_ablkcipher_reqsize(ctx->enc); | ||
| 453 | |||
| 454 | err = crypto_authenc_esn_genicv(areq, iv, 0); | ||
| 455 | } | 216 | } |
| 456 | 217 | ||
| 457 | authenc_esn_request_complete(areq, err); | ||
| 458 | } | ||
| 459 | |||
| 460 | static int crypto_authenc_esn_encrypt(struct aead_request *req) | ||
| 461 | { | ||
| 462 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | ||
| 463 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
| 464 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 465 | struct crypto_ablkcipher *enc = ctx->enc; | ||
| 466 | struct scatterlist *dst = req->dst; | ||
| 467 | unsigned int cryptlen = req->cryptlen; | ||
| 468 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail | ||
| 469 | + ctx->reqoff); | ||
| 470 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc); | ||
| 471 | int err; | ||
| 472 | |||
| 473 | ablkcipher_request_set_tfm(abreq, enc); | 218 | ablkcipher_request_set_tfm(abreq, enc); |
| 474 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 219 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
| 475 | crypto_authenc_esn_encrypt_done, req); | 220 | crypto_authenc_esn_encrypt_done, req); |
| 476 | ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); | 221 | ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv); |
| 477 | |||
| 478 | memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn)); | ||
| 479 | 222 | ||
| 480 | err = crypto_ablkcipher_encrypt(abreq); | 223 | err = crypto_ablkcipher_encrypt(abreq); |
| 481 | if (err) | 224 | if (err) |
| 482 | return err; | 225 | return err; |
| 483 | 226 | ||
| 484 | return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); | 227 | return crypto_authenc_esn_genicv(req, aead_request_flags(req)); |
| 485 | } | 228 | } |
| 486 | 229 | ||
| 487 | static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req, | 230 | static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, |
| 488 | int err) | 231 | unsigned int flags) |
| 489 | { | 232 | { |
| 490 | struct aead_request *areq = req->data; | 233 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
| 491 | 234 | unsigned int authsize = crypto_aead_authsize(authenc_esn); | |
| 492 | if (!err) { | 235 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
| 493 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); | 236 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
| 494 | 237 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail | |
| 495 | err = crypto_authenc_esn_genicv(areq, greq->giv, 0); | 238 | + ctx->reqoff); |
| 496 | } | 239 | struct crypto_ahash *auth = ctx->auth; |
| 240 | u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, | ||
| 241 | crypto_ahash_alignmask(auth) + 1); | ||
| 242 | unsigned int cryptlen = req->cryptlen - authsize; | ||
| 243 | unsigned int assoclen = req->assoclen; | ||
| 244 | struct scatterlist *dst = req->dst; | ||
| 245 | u8 *ihash = ohash + crypto_ahash_digestsize(auth); | ||
| 246 | u32 tmp[2]; | ||
| 497 | 247 | ||
| 498 | authenc_esn_request_complete(areq, err); | 248 | /* Move high-order bits of sequence number back. */ |
| 499 | } | 249 | scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); |
| 250 | scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); | ||
| 251 | scatterwalk_map_and_copy(tmp, dst, 0, 8, 1); | ||
| 500 | 252 | ||
| 501 | static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req) | 253 | if (crypto_memneq(ihash, ohash, authsize)) |
| 502 | { | 254 | return -EBADMSG; |
| 503 | struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req); | ||
| 504 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | ||
| 505 | struct aead_request *areq = &req->areq; | ||
| 506 | struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); | ||
| 507 | u8 *iv = req->giv; | ||
| 508 | int err; | ||
| 509 | 255 | ||
| 510 | skcipher_givcrypt_set_tfm(greq, ctx->enc); | 256 | sg_init_table(areq_ctx->dst, 2); |
| 511 | skcipher_givcrypt_set_callback(greq, aead_request_flags(areq), | 257 | dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); |
| 512 | crypto_authenc_esn_givencrypt_done, areq); | ||
| 513 | skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen, | ||
| 514 | areq->iv); | ||
| 515 | skcipher_givcrypt_set_giv(greq, iv, req->seq); | ||
| 516 | 258 | ||
| 517 | err = crypto_skcipher_givencrypt(greq); | 259 | ablkcipher_request_set_tfm(abreq, ctx->enc); |
| 518 | if (err) | 260 | ablkcipher_request_set_callback(abreq, flags, |
| 519 | return err; | 261 | req->base.complete, req->base.data); |
| 262 | ablkcipher_request_set_crypt(abreq, dst, dst, cryptlen, req->iv); | ||
| 520 | 263 | ||
| 521 | return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP); | 264 | return crypto_ablkcipher_decrypt(abreq); |
| 522 | } | 265 | } |
| 523 | 266 | ||
| 524 | static int crypto_authenc_esn_verify(struct aead_request *req) | 267 | static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, |
| 268 | int err) | ||
| 525 | { | 269 | { |
| 526 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 270 | struct aead_request *req = areq->data; |
| 527 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | ||
| 528 | u8 *ohash; | ||
| 529 | u8 *ihash; | ||
| 530 | unsigned int authsize; | ||
| 531 | |||
| 532 | areq_ctx->complete = authenc_esn_verify_ahash_done; | ||
| 533 | areq_ctx->update_complete = authenc_esn_verify_ahash_update_done; | ||
| 534 | |||
| 535 | ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
| 536 | if (IS_ERR(ohash)) | ||
| 537 | return PTR_ERR(ohash); | ||
| 538 | 271 | ||
| 539 | authsize = crypto_aead_authsize(authenc_esn); | 272 | err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); |
| 540 | ihash = ohash + authsize; | 273 | aead_request_complete(req, err); |
| 541 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
| 542 | authsize, 0); | ||
| 543 | return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; | ||
| 544 | } | 274 | } |
| 545 | 275 | ||
| 546 | static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, | 276 | static int crypto_authenc_esn_decrypt(struct aead_request *req) |
| 547 | unsigned int cryptlen) | ||
| 548 | { | 277 | { |
| 549 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 278 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); |
| 550 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); | 279 | struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); |
| 551 | struct scatterlist *src = req->src; | 280 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
| 552 | struct scatterlist *assoc = req->assoc; | 281 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
| 553 | struct scatterlist *cipher = areq_ctx->cipher; | 282 | unsigned int authsize = crypto_aead_authsize(authenc_esn); |
| 554 | struct scatterlist *hsg = areq_ctx->hsg; | 283 | struct crypto_ahash *auth = ctx->auth; |
| 555 | struct scatterlist *tsg = areq_ctx->tsg; | 284 | u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, |
| 556 | unsigned int ivsize = crypto_aead_ivsize(authenc_esn); | 285 | crypto_ahash_alignmask(auth) + 1); |
| 557 | struct page *srcp; | 286 | unsigned int assoclen = req->assoclen; |
| 558 | u8 *vsrc; | 287 | unsigned int cryptlen = req->cryptlen; |
| 559 | 288 | u8 *ihash = ohash + crypto_ahash_digestsize(auth); | |
| 560 | srcp = sg_page(src); | 289 | struct scatterlist *dst = req->dst; |
| 561 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; | 290 | u32 tmp[2]; |
| 562 | 291 | int err; | |
| 563 | if (ivsize) { | ||
| 564 | sg_init_table(cipher, 2); | ||
| 565 | sg_set_buf(cipher, iv, ivsize); | ||
| 566 | scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2); | ||
| 567 | src = cipher; | ||
| 568 | cryptlen += ivsize; | ||
| 569 | } | ||
| 570 | 292 | ||
| 571 | if (assoc->length < 12) | 293 | cryptlen -= authsize; |
| 572 | return -EINVAL; | ||
| 573 | 294 | ||
| 574 | sg_init_table(hsg, 2); | 295 | if (req->src != dst) { |
| 575 | sg_set_page(hsg, sg_page(assoc), 4, assoc->offset); | 296 | err = crypto_authenc_esn_copy(req, assoclen + cryptlen); |
| 576 | sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8); | 297 | if (err) |
| 298 | return err; | ||
| 299 | } | ||
| 577 | 300 | ||
| 578 | sg_init_table(tsg, 1); | 301 | scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen, |
| 579 | sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4); | 302 | authsize, 0); |
| 580 | 303 | ||
| 581 | areq_ctx->cryptlen = cryptlen; | 304 | if (!authsize) |
| 582 | areq_ctx->headlen = 8; | 305 | goto tail; |
| 583 | areq_ctx->trailen = 4; | ||
| 584 | areq_ctx->sg = src; | ||
| 585 | 306 | ||
| 586 | areq_ctx->complete = authenc_esn_verify_ahash_done; | 307 | /* Move high-order bits of sequence number to the end. */ |
| 587 | areq_ctx->update_complete = authenc_esn_verify_ahash_update_done; | 308 | scatterwalk_map_and_copy(tmp, dst, 0, 8, 0); |
| 588 | areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2; | 309 | scatterwalk_map_and_copy(tmp, dst, 4, 4, 1); |
| 310 | scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1); | ||
| 589 | 311 | ||
| 590 | return crypto_authenc_esn_verify(req); | 312 | sg_init_table(areq_ctx->dst, 2); |
| 591 | } | 313 | dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4); |
| 592 | 314 | ||
| 593 | static int crypto_authenc_esn_decrypt(struct aead_request *req) | 315 | ahash_request_set_tfm(ahreq, auth); |
| 594 | { | 316 | ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen); |
| 595 | struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); | 317 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
| 596 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 318 | authenc_esn_verify_ahash_done, req); |
| 597 | struct ablkcipher_request *abreq = aead_request_ctx(req); | ||
| 598 | unsigned int cryptlen = req->cryptlen; | ||
| 599 | unsigned int authsize = crypto_aead_authsize(authenc_esn); | ||
| 600 | u8 *iv = req->iv; | ||
| 601 | int err; | ||
| 602 | |||
| 603 | if (cryptlen < authsize) | ||
| 604 | return -EINVAL; | ||
| 605 | cryptlen -= authsize; | ||
| 606 | 319 | ||
| 607 | err = crypto_authenc_esn_iverify(req, iv, cryptlen); | 320 | err = crypto_ahash_digest(ahreq); |
| 608 | if (err) | 321 | if (err) |
| 609 | return err; | 322 | return err; |
| 610 | 323 | ||
| 611 | ablkcipher_request_set_tfm(abreq, ctx->enc); | 324 | tail: |
| 612 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 325 | return crypto_authenc_esn_decrypt_tail(req, aead_request_flags(req)); |
| 613 | req->base.complete, req->base.data); | ||
| 614 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); | ||
| 615 | |||
| 616 | return crypto_ablkcipher_decrypt(abreq); | ||
| 617 | } | 326 | } |
| 618 | 327 | ||
| 619 | static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm) | 328 | static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) |
| 620 | { | 329 | { |
| 621 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 330 | struct aead_instance *inst = aead_alg_instance(tfm); |
| 622 | struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst); | 331 | struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst); |
| 623 | struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm); | 332 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); |
| 624 | struct crypto_ahash *auth; | 333 | struct crypto_ahash *auth; |
| 625 | struct crypto_ablkcipher *enc; | 334 | struct crypto_ablkcipher *enc; |
| 335 | struct crypto_blkcipher *null; | ||
| 626 | int err; | 336 | int err; |
| 627 | 337 | ||
| 628 | auth = crypto_spawn_ahash(&ictx->auth); | 338 | auth = crypto_spawn_ahash(&ictx->auth); |
| @@ -634,15 +344,20 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm) | |||
| 634 | if (IS_ERR(enc)) | 344 | if (IS_ERR(enc)) |
| 635 | goto err_free_ahash; | 345 | goto err_free_ahash; |
| 636 | 346 | ||
| 347 | null = crypto_get_default_null_skcipher(); | ||
| 348 | err = PTR_ERR(null); | ||
| 349 | if (IS_ERR(null)) | ||
| 350 | goto err_free_skcipher; | ||
| 351 | |||
| 637 | ctx->auth = auth; | 352 | ctx->auth = auth; |
| 638 | ctx->enc = enc; | 353 | ctx->enc = enc; |
| 354 | ctx->null = null; | ||
| 639 | 355 | ||
| 640 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + | 356 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth), |
| 641 | crypto_ahash_alignmask(auth), | 357 | crypto_ahash_alignmask(auth) + 1); |
| 642 | crypto_ahash_alignmask(auth) + 1) + | ||
| 643 | crypto_ablkcipher_ivsize(enc); | ||
| 644 | 358 | ||
| 645 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | 359 | crypto_aead_set_reqsize( |
| 360 | tfm, | ||
| 646 | sizeof(struct authenc_esn_request_ctx) + | 361 | sizeof(struct authenc_esn_request_ctx) + |
| 647 | ctx->reqoff + | 362 | ctx->reqoff + |
| 648 | max_t(unsigned int, | 363 | max_t(unsigned int, |
| @@ -653,23 +368,36 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm) | |||
| 653 | 368 | ||
| 654 | return 0; | 369 | return 0; |
| 655 | 370 | ||
| 371 | err_free_skcipher: | ||
| 372 | crypto_free_ablkcipher(enc); | ||
| 656 | err_free_ahash: | 373 | err_free_ahash: |
| 657 | crypto_free_ahash(auth); | 374 | crypto_free_ahash(auth); |
| 658 | return err; | 375 | return err; |
| 659 | } | 376 | } |
| 660 | 377 | ||
| 661 | static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm) | 378 | static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm) |
| 662 | { | 379 | { |
| 663 | struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm); | 380 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); |
| 664 | 381 | ||
| 665 | crypto_free_ahash(ctx->auth); | 382 | crypto_free_ahash(ctx->auth); |
| 666 | crypto_free_ablkcipher(ctx->enc); | 383 | crypto_free_ablkcipher(ctx->enc); |
| 384 | crypto_put_default_null_skcipher(); | ||
| 385 | } | ||
| 386 | |||
| 387 | static void crypto_authenc_esn_free(struct aead_instance *inst) | ||
| 388 | { | ||
| 389 | struct authenc_esn_instance_ctx *ctx = aead_instance_ctx(inst); | ||
| 390 | |||
| 391 | crypto_drop_skcipher(&ctx->enc); | ||
| 392 | crypto_drop_ahash(&ctx->auth); | ||
| 393 | kfree(inst); | ||
| 667 | } | 394 | } |
| 668 | 395 | ||
| 669 | static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb) | 396 | static int crypto_authenc_esn_create(struct crypto_template *tmpl, |
| 397 | struct rtattr **tb) | ||
| 670 | { | 398 | { |
| 671 | struct crypto_attr_type *algt; | 399 | struct crypto_attr_type *algt; |
| 672 | struct crypto_instance *inst; | 400 | struct aead_instance *inst; |
| 673 | struct hash_alg_common *auth; | 401 | struct hash_alg_common *auth; |
| 674 | struct crypto_alg *auth_base; | 402 | struct crypto_alg *auth_base; |
| 675 | struct crypto_alg *enc; | 403 | struct crypto_alg *enc; |
| @@ -679,15 +407,15 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb) | |||
| 679 | 407 | ||
| 680 | algt = crypto_get_attr_type(tb); | 408 | algt = crypto_get_attr_type(tb); |
| 681 | if (IS_ERR(algt)) | 409 | if (IS_ERR(algt)) |
| 682 | return ERR_CAST(algt); | 410 | return PTR_ERR(algt); |
| 683 | 411 | ||
| 684 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 412 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
| 685 | return ERR_PTR(-EINVAL); | 413 | return -EINVAL; |
| 686 | 414 | ||
| 687 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, | 415 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, |
| 688 | CRYPTO_ALG_TYPE_AHASH_MASK); | 416 | CRYPTO_ALG_TYPE_AHASH_MASK); |
| 689 | if (IS_ERR(auth)) | 417 | if (IS_ERR(auth)) |
| 690 | return ERR_CAST(auth); | 418 | return PTR_ERR(auth); |
| 691 | 419 | ||
| 692 | auth_base = &auth->base; | 420 | auth_base = &auth->base; |
| 693 | 421 | ||
| @@ -701,13 +429,14 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb) | |||
| 701 | if (!inst) | 429 | if (!inst) |
| 702 | goto out_put_auth; | 430 | goto out_put_auth; |
| 703 | 431 | ||
| 704 | ctx = crypto_instance_ctx(inst); | 432 | ctx = aead_instance_ctx(inst); |
| 705 | 433 | ||
| 706 | err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); | 434 | err = crypto_init_ahash_spawn(&ctx->auth, auth, |
| 435 | aead_crypto_instance(inst)); | ||
| 707 | if (err) | 436 | if (err) |
| 708 | goto err_free_inst; | 437 | goto err_free_inst; |
| 709 | 438 | ||
| 710 | crypto_set_skcipher_spawn(&ctx->enc, inst); | 439 | crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); |
| 711 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, | 440 | err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, |
| 712 | crypto_requires_sync(algt->type, | 441 | crypto_requires_sync(algt->type, |
| 713 | algt->mask)); | 442 | algt->mask)); |
| @@ -717,40 +446,44 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb) | |||
| 717 | enc = crypto_skcipher_spawn_alg(&ctx->enc); | 446 | enc = crypto_skcipher_spawn_alg(&ctx->enc); |
| 718 | 447 | ||
| 719 | err = -ENAMETOOLONG; | 448 | err = -ENAMETOOLONG; |
| 720 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 449 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
| 721 | "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >= | 450 | "authencesn(%s,%s)", auth_base->cra_name, |
| 722 | CRYPTO_MAX_ALG_NAME) | 451 | enc->cra_name) >= CRYPTO_MAX_ALG_NAME) |
| 723 | goto err_drop_enc; | 452 | goto err_drop_enc; |
| 724 | 453 | ||
| 725 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 454 | if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
| 726 | "authencesn(%s,%s)", auth_base->cra_driver_name, | 455 | "authencesn(%s,%s)", auth_base->cra_driver_name, |
| 727 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 456 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
| 728 | goto err_drop_enc; | 457 | goto err_drop_enc; |
| 729 | 458 | ||
| 730 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 459 | inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC; |
| 731 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; | 460 | inst->alg.base.cra_priority = enc->cra_priority * 10 + |
| 732 | inst->alg.cra_priority = enc->cra_priority * | 461 | auth_base->cra_priority; |
| 733 | 10 + auth_base->cra_priority; | 462 | inst->alg.base.cra_blocksize = enc->cra_blocksize; |
| 734 | inst->alg.cra_blocksize = enc->cra_blocksize; | 463 | inst->alg.base.cra_alignmask = auth_base->cra_alignmask | |
| 735 | inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; | 464 | enc->cra_alignmask; |
| 736 | inst->alg.cra_type = &crypto_aead_type; | 465 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); |
| 466 | |||
| 467 | inst->alg.ivsize = enc->cra_ablkcipher.ivsize; | ||
| 468 | inst->alg.maxauthsize = auth->digestsize; | ||
| 737 | 469 | ||
| 738 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; | 470 | inst->alg.init = crypto_authenc_esn_init_tfm; |
| 739 | inst->alg.cra_aead.maxauthsize = auth->digestsize; | 471 | inst->alg.exit = crypto_authenc_esn_exit_tfm; |
| 740 | 472 | ||
| 741 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); | 473 | inst->alg.setkey = crypto_authenc_esn_setkey; |
| 474 | inst->alg.setauthsize = crypto_authenc_esn_setauthsize; | ||
| 475 | inst->alg.encrypt = crypto_authenc_esn_encrypt; | ||
| 476 | inst->alg.decrypt = crypto_authenc_esn_decrypt; | ||
| 742 | 477 | ||
| 743 | inst->alg.cra_init = crypto_authenc_esn_init_tfm; | 478 | inst->free = crypto_authenc_esn_free, |
| 744 | inst->alg.cra_exit = crypto_authenc_esn_exit_tfm; | ||
| 745 | 479 | ||
| 746 | inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey; | 480 | err = aead_register_instance(tmpl, inst); |
| 747 | inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt; | 481 | if (err) |
| 748 | inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt; | 482 | goto err_drop_enc; |
| 749 | inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt; | ||
| 750 | 483 | ||
| 751 | out: | 484 | out: |
| 752 | crypto_mod_put(auth_base); | 485 | crypto_mod_put(auth_base); |
| 753 | return inst; | 486 | return err; |
| 754 | 487 | ||
| 755 | err_drop_enc: | 488 | err_drop_enc: |
| 756 | crypto_drop_skcipher(&ctx->enc); | 489 | crypto_drop_skcipher(&ctx->enc); |
| @@ -759,23 +492,12 @@ err_drop_auth: | |||
| 759 | err_free_inst: | 492 | err_free_inst: |
| 760 | kfree(inst); | 493 | kfree(inst); |
| 761 | out_put_auth: | 494 | out_put_auth: |
| 762 | inst = ERR_PTR(err); | ||
| 763 | goto out; | 495 | goto out; |
| 764 | } | 496 | } |
| 765 | 497 | ||
| 766 | static void crypto_authenc_esn_free(struct crypto_instance *inst) | ||
| 767 | { | ||
| 768 | struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
| 769 | |||
| 770 | crypto_drop_skcipher(&ctx->enc); | ||
| 771 | crypto_drop_ahash(&ctx->auth); | ||
| 772 | kfree(inst); | ||
| 773 | } | ||
| 774 | |||
| 775 | static struct crypto_template crypto_authenc_esn_tmpl = { | 498 | static struct crypto_template crypto_authenc_esn_tmpl = { |
| 776 | .name = "authencesn", | 499 | .name = "authencesn", |
| 777 | .alloc = crypto_authenc_esn_alloc, | 500 | .create = crypto_authenc_esn_create, |
| 778 | .free = crypto_authenc_esn_free, | ||
| 779 | .module = THIS_MODULE, | 501 | .module = THIS_MODULE, |
| 780 | }; | 502 | }; |
| 781 | 503 | ||
diff --git a/crypto/ccm.c b/crypto/ccm.c index a4d1a5eda18b..cc31ea4335bf 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c | |||
| @@ -36,14 +36,20 @@ struct crypto_rfc4309_ctx { | |||
| 36 | u8 nonce[3]; | 36 | u8 nonce[3]; |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | struct crypto_rfc4309_req_ctx { | ||
| 40 | struct scatterlist src[3]; | ||
| 41 | struct scatterlist dst[3]; | ||
| 42 | struct aead_request subreq; | ||
| 43 | }; | ||
| 44 | |||
| 39 | struct crypto_ccm_req_priv_ctx { | 45 | struct crypto_ccm_req_priv_ctx { |
| 40 | u8 odata[16]; | 46 | u8 odata[16]; |
| 41 | u8 idata[16]; | 47 | u8 idata[16]; |
| 42 | u8 auth_tag[16]; | 48 | u8 auth_tag[16]; |
| 43 | u32 ilen; | 49 | u32 ilen; |
| 44 | u32 flags; | 50 | u32 flags; |
| 45 | struct scatterlist src[2]; | 51 | struct scatterlist src[3]; |
| 46 | struct scatterlist dst[2]; | 52 | struct scatterlist dst[3]; |
| 47 | struct ablkcipher_request abreq; | 53 | struct ablkcipher_request abreq; |
| 48 | }; | 54 | }; |
| 49 | 55 | ||
| @@ -265,7 +271,7 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, | |||
| 265 | /* format associated data and compute into mac */ | 271 | /* format associated data and compute into mac */ |
| 266 | if (assoclen) { | 272 | if (assoclen) { |
| 267 | pctx->ilen = format_adata(idata, assoclen); | 273 | pctx->ilen = format_adata(idata, assoclen); |
| 268 | get_data_to_compute(cipher, pctx, req->assoc, req->assoclen); | 274 | get_data_to_compute(cipher, pctx, req->src, req->assoclen); |
| 269 | } else { | 275 | } else { |
| 270 | pctx->ilen = 0; | 276 | pctx->ilen = 0; |
| 271 | } | 277 | } |
| @@ -286,7 +292,8 @@ static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err) | |||
| 286 | u8 *odata = pctx->odata; | 292 | u8 *odata = pctx->odata; |
| 287 | 293 | ||
| 288 | if (!err) | 294 | if (!err) |
| 289 | scatterwalk_map_and_copy(odata, req->dst, req->cryptlen, | 295 | scatterwalk_map_and_copy(odata, req->dst, |
| 296 | req->assoclen + req->cryptlen, | ||
| 290 | crypto_aead_authsize(aead), 1); | 297 | crypto_aead_authsize(aead), 1); |
| 291 | aead_request_complete(req, err); | 298 | aead_request_complete(req, err); |
| 292 | } | 299 | } |
| @@ -300,6 +307,41 @@ static inline int crypto_ccm_check_iv(const u8 *iv) | |||
| 300 | return 0; | 307 | return 0; |
| 301 | } | 308 | } |
| 302 | 309 | ||
| 310 | static int crypto_ccm_init_crypt(struct aead_request *req, u8 *tag) | ||
| 311 | { | ||
| 312 | struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); | ||
| 313 | struct scatterlist *sg; | ||
| 314 | u8 *iv = req->iv; | ||
| 315 | int err; | ||
| 316 | |||
| 317 | err = crypto_ccm_check_iv(iv); | ||
| 318 | if (err) | ||
| 319 | return err; | ||
| 320 | |||
| 321 | pctx->flags = aead_request_flags(req); | ||
| 322 | |||
| 323 | /* Note: rfc 3610 and NIST 800-38C require counter of | ||
| 324 | * zero to encrypt auth tag. | ||
| 325 | */ | ||
| 326 | memset(iv + 15 - iv[0], 0, iv[0] + 1); | ||
| 327 | |||
| 328 | sg_init_table(pctx->src, 3); | ||
| 329 | sg_set_buf(pctx->src, tag, 16); | ||
| 330 | sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); | ||
| 331 | if (sg != pctx->src + 1) | ||
| 332 | sg_chain(pctx->src, 2, sg); | ||
| 333 | |||
| 334 | if (req->src != req->dst) { | ||
| 335 | sg_init_table(pctx->dst, 3); | ||
| 336 | sg_set_buf(pctx->dst, tag, 16); | ||
| 337 | sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); | ||
| 338 | if (sg != pctx->dst + 1) | ||
| 339 | sg_chain(pctx->dst, 2, sg); | ||
| 340 | } | ||
| 341 | |||
| 342 | return 0; | ||
| 343 | } | ||
| 344 | |||
| 303 | static int crypto_ccm_encrypt(struct aead_request *req) | 345 | static int crypto_ccm_encrypt(struct aead_request *req) |
| 304 | { | 346 | { |
| 305 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 347 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| @@ -312,32 +354,17 @@ static int crypto_ccm_encrypt(struct aead_request *req) | |||
| 312 | u8 *iv = req->iv; | 354 | u8 *iv = req->iv; |
| 313 | int err; | 355 | int err; |
| 314 | 356 | ||
| 315 | err = crypto_ccm_check_iv(iv); | 357 | err = crypto_ccm_init_crypt(req, odata); |
| 316 | if (err) | 358 | if (err) |
| 317 | return err; | 359 | return err; |
| 318 | 360 | ||
| 319 | pctx->flags = aead_request_flags(req); | 361 | err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen); |
| 320 | |||
| 321 | err = crypto_ccm_auth(req, req->src, cryptlen); | ||
| 322 | if (err) | 362 | if (err) |
| 323 | return err; | 363 | return err; |
| 324 | 364 | ||
| 325 | /* Note: rfc 3610 and NIST 800-38C require counter of | ||
| 326 | * zero to encrypt auth tag. | ||
| 327 | */ | ||
| 328 | memset(iv + 15 - iv[0], 0, iv[0] + 1); | ||
| 329 | |||
| 330 | sg_init_table(pctx->src, 2); | ||
| 331 | sg_set_buf(pctx->src, odata, 16); | ||
| 332 | scatterwalk_sg_chain(pctx->src, 2, req->src); | ||
| 333 | |||
| 334 | dst = pctx->src; | 365 | dst = pctx->src; |
| 335 | if (req->src != req->dst) { | 366 | if (req->src != req->dst) |
| 336 | sg_init_table(pctx->dst, 2); | ||
| 337 | sg_set_buf(pctx->dst, odata, 16); | ||
| 338 | scatterwalk_sg_chain(pctx->dst, 2, req->dst); | ||
| 339 | dst = pctx->dst; | 367 | dst = pctx->dst; |
| 340 | } | ||
| 341 | 368 | ||
| 342 | ablkcipher_request_set_tfm(abreq, ctx->ctr); | 369 | ablkcipher_request_set_tfm(abreq, ctx->ctr); |
| 343 | ablkcipher_request_set_callback(abreq, pctx->flags, | 370 | ablkcipher_request_set_callback(abreq, pctx->flags, |
| @@ -348,7 +375,7 @@ static int crypto_ccm_encrypt(struct aead_request *req) | |||
| 348 | return err; | 375 | return err; |
| 349 | 376 | ||
| 350 | /* copy authtag to end of dst */ | 377 | /* copy authtag to end of dst */ |
| 351 | scatterwalk_map_and_copy(odata, req->dst, cryptlen, | 378 | scatterwalk_map_and_copy(odata, sg_next(dst), cryptlen, |
| 352 | crypto_aead_authsize(aead), 1); | 379 | crypto_aead_authsize(aead), 1); |
| 353 | return err; | 380 | return err; |
| 354 | } | 381 | } |
| @@ -361,9 +388,14 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, | |||
| 361 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 388 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 362 | unsigned int authsize = crypto_aead_authsize(aead); | 389 | unsigned int authsize = crypto_aead_authsize(aead); |
| 363 | unsigned int cryptlen = req->cryptlen - authsize; | 390 | unsigned int cryptlen = req->cryptlen - authsize; |
| 391 | struct scatterlist *dst; | ||
| 392 | |||
| 393 | pctx->flags = 0; | ||
| 394 | |||
| 395 | dst = sg_next(req->src == req->dst ? pctx->src : pctx->dst); | ||
| 364 | 396 | ||
| 365 | if (!err) { | 397 | if (!err) { |
| 366 | err = crypto_ccm_auth(req, req->dst, cryptlen); | 398 | err = crypto_ccm_auth(req, dst, cryptlen); |
| 367 | if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) | 399 | if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) |
| 368 | err = -EBADMSG; | 400 | err = -EBADMSG; |
| 369 | } | 401 | } |
| @@ -384,31 +416,18 @@ static int crypto_ccm_decrypt(struct aead_request *req) | |||
| 384 | u8 *iv = req->iv; | 416 | u8 *iv = req->iv; |
| 385 | int err; | 417 | int err; |
| 386 | 418 | ||
| 387 | if (cryptlen < authsize) | ||
| 388 | return -EINVAL; | ||
| 389 | cryptlen -= authsize; | 419 | cryptlen -= authsize; |
| 390 | 420 | ||
| 391 | err = crypto_ccm_check_iv(iv); | 421 | err = crypto_ccm_init_crypt(req, authtag); |
| 392 | if (err) | 422 | if (err) |
| 393 | return err; | 423 | return err; |
| 394 | 424 | ||
| 395 | pctx->flags = aead_request_flags(req); | 425 | scatterwalk_map_and_copy(authtag, sg_next(pctx->src), cryptlen, |
| 396 | 426 | authsize, 0); | |
| 397 | scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); | ||
| 398 | |||
| 399 | memset(iv + 15 - iv[0], 0, iv[0] + 1); | ||
| 400 | |||
| 401 | sg_init_table(pctx->src, 2); | ||
| 402 | sg_set_buf(pctx->src, authtag, 16); | ||
| 403 | scatterwalk_sg_chain(pctx->src, 2, req->src); | ||
| 404 | 427 | ||
| 405 | dst = pctx->src; | 428 | dst = pctx->src; |
| 406 | if (req->src != req->dst) { | 429 | if (req->src != req->dst) |
| 407 | sg_init_table(pctx->dst, 2); | ||
| 408 | sg_set_buf(pctx->dst, authtag, 16); | ||
| 409 | scatterwalk_sg_chain(pctx->dst, 2, req->dst); | ||
| 410 | dst = pctx->dst; | 430 | dst = pctx->dst; |
| 411 | } | ||
| 412 | 431 | ||
| 413 | ablkcipher_request_set_tfm(abreq, ctx->ctr); | 432 | ablkcipher_request_set_tfm(abreq, ctx->ctr); |
| 414 | ablkcipher_request_set_callback(abreq, pctx->flags, | 433 | ablkcipher_request_set_callback(abreq, pctx->flags, |
| @@ -418,7 +437,7 @@ static int crypto_ccm_decrypt(struct aead_request *req) | |||
| 418 | if (err) | 437 | if (err) |
| 419 | return err; | 438 | return err; |
| 420 | 439 | ||
| 421 | err = crypto_ccm_auth(req, req->dst, cryptlen); | 440 | err = crypto_ccm_auth(req, sg_next(dst), cryptlen); |
| 422 | if (err) | 441 | if (err) |
| 423 | return err; | 442 | return err; |
| 424 | 443 | ||
| @@ -429,11 +448,11 @@ static int crypto_ccm_decrypt(struct aead_request *req) | |||
| 429 | return err; | 448 | return err; |
| 430 | } | 449 | } |
| 431 | 450 | ||
| 432 | static int crypto_ccm_init_tfm(struct crypto_tfm *tfm) | 451 | static int crypto_ccm_init_tfm(struct crypto_aead *tfm) |
| 433 | { | 452 | { |
| 434 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 453 | struct aead_instance *inst = aead_alg_instance(tfm); |
| 435 | struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst); | 454 | struct ccm_instance_ctx *ictx = aead_instance_ctx(inst); |
| 436 | struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); | 455 | struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); |
| 437 | struct crypto_cipher *cipher; | 456 | struct crypto_cipher *cipher; |
| 438 | struct crypto_ablkcipher *ctr; | 457 | struct crypto_ablkcipher *ctr; |
| 439 | unsigned long align; | 458 | unsigned long align; |
| @@ -451,9 +470,10 @@ static int crypto_ccm_init_tfm(struct crypto_tfm *tfm) | |||
| 451 | ctx->cipher = cipher; | 470 | ctx->cipher = cipher; |
| 452 | ctx->ctr = ctr; | 471 | ctx->ctr = ctr; |
| 453 | 472 | ||
| 454 | align = crypto_tfm_alg_alignmask(tfm); | 473 | align = crypto_aead_alignmask(tfm); |
| 455 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 474 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
| 456 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | 475 | crypto_aead_set_reqsize( |
| 476 | tfm, | ||
| 457 | align + sizeof(struct crypto_ccm_req_priv_ctx) + | 477 | align + sizeof(struct crypto_ccm_req_priv_ctx) + |
| 458 | crypto_ablkcipher_reqsize(ctr)); | 478 | crypto_ablkcipher_reqsize(ctr)); |
| 459 | 479 | ||
| @@ -464,21 +484,31 @@ err_free_cipher: | |||
| 464 | return err; | 484 | return err; |
| 465 | } | 485 | } |
| 466 | 486 | ||
| 467 | static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm) | 487 | static void crypto_ccm_exit_tfm(struct crypto_aead *tfm) |
| 468 | { | 488 | { |
| 469 | struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); | 489 | struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); |
| 470 | 490 | ||
| 471 | crypto_free_cipher(ctx->cipher); | 491 | crypto_free_cipher(ctx->cipher); |
| 472 | crypto_free_ablkcipher(ctx->ctr); | 492 | crypto_free_ablkcipher(ctx->ctr); |
| 473 | } | 493 | } |
| 474 | 494 | ||
| 475 | static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, | 495 | static void crypto_ccm_free(struct aead_instance *inst) |
| 476 | const char *full_name, | 496 | { |
| 477 | const char *ctr_name, | 497 | struct ccm_instance_ctx *ctx = aead_instance_ctx(inst); |
| 478 | const char *cipher_name) | 498 | |
| 499 | crypto_drop_spawn(&ctx->cipher); | ||
| 500 | crypto_drop_skcipher(&ctx->ctr); | ||
| 501 | kfree(inst); | ||
| 502 | } | ||
| 503 | |||
| 504 | static int crypto_ccm_create_common(struct crypto_template *tmpl, | ||
| 505 | struct rtattr **tb, | ||
| 506 | const char *full_name, | ||
| 507 | const char *ctr_name, | ||
| 508 | const char *cipher_name) | ||
| 479 | { | 509 | { |
| 480 | struct crypto_attr_type *algt; | 510 | struct crypto_attr_type *algt; |
| 481 | struct crypto_instance *inst; | 511 | struct aead_instance *inst; |
| 482 | struct crypto_alg *ctr; | 512 | struct crypto_alg *ctr; |
| 483 | struct crypto_alg *cipher; | 513 | struct crypto_alg *cipher; |
| 484 | struct ccm_instance_ctx *ictx; | 514 | struct ccm_instance_ctx *ictx; |
| @@ -486,15 +516,15 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, | |||
| 486 | 516 | ||
| 487 | algt = crypto_get_attr_type(tb); | 517 | algt = crypto_get_attr_type(tb); |
| 488 | if (IS_ERR(algt)) | 518 | if (IS_ERR(algt)) |
| 489 | return ERR_CAST(algt); | 519 | return PTR_ERR(algt); |
| 490 | 520 | ||
| 491 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 521 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
| 492 | return ERR_PTR(-EINVAL); | 522 | return -EINVAL; |
| 493 | 523 | ||
| 494 | cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER, | 524 | cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER, |
| 495 | CRYPTO_ALG_TYPE_MASK); | 525 | CRYPTO_ALG_TYPE_MASK); |
| 496 | if (IS_ERR(cipher)) | 526 | if (IS_ERR(cipher)) |
| 497 | return ERR_CAST(cipher); | 527 | return PTR_ERR(cipher); |
| 498 | 528 | ||
| 499 | err = -EINVAL; | 529 | err = -EINVAL; |
| 500 | if (cipher->cra_blocksize != 16) | 530 | if (cipher->cra_blocksize != 16) |
| @@ -505,14 +535,15 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, | |||
| 505 | if (!inst) | 535 | if (!inst) |
| 506 | goto out_put_cipher; | 536 | goto out_put_cipher; |
| 507 | 537 | ||
| 508 | ictx = crypto_instance_ctx(inst); | 538 | ictx = aead_instance_ctx(inst); |
| 509 | 539 | ||
| 510 | err = crypto_init_spawn(&ictx->cipher, cipher, inst, | 540 | err = crypto_init_spawn(&ictx->cipher, cipher, |
| 541 | aead_crypto_instance(inst), | ||
| 511 | CRYPTO_ALG_TYPE_MASK); | 542 | CRYPTO_ALG_TYPE_MASK); |
| 512 | if (err) | 543 | if (err) |
| 513 | goto err_free_inst; | 544 | goto err_free_inst; |
| 514 | 545 | ||
| 515 | crypto_set_skcipher_spawn(&ictx->ctr, inst); | 546 | crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); |
| 516 | err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, | 547 | err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, |
| 517 | crypto_requires_sync(algt->type, | 548 | crypto_requires_sync(algt->type, |
| 518 | algt->mask)); | 549 | algt->mask)); |
| @@ -531,33 +562,39 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, | |||
| 531 | goto err_drop_ctr; | 562 | goto err_drop_ctr; |
| 532 | 563 | ||
| 533 | err = -ENAMETOOLONG; | 564 | err = -ENAMETOOLONG; |
| 534 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 565 | if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
| 535 | "ccm_base(%s,%s)", ctr->cra_driver_name, | 566 | "ccm_base(%s,%s)", ctr->cra_driver_name, |
| 536 | cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 567 | cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
| 537 | goto err_drop_ctr; | 568 | goto err_drop_ctr; |
| 538 | 569 | ||
| 539 | memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); | 570 | memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); |
| 540 | 571 | ||
| 541 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 572 | inst->alg.base.cra_flags = ctr->cra_flags & CRYPTO_ALG_ASYNC; |
| 542 | inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; | 573 | inst->alg.base.cra_priority = (cipher->cra_priority + |
| 543 | inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority; | 574 | ctr->cra_priority) / 2; |
| 544 | inst->alg.cra_blocksize = 1; | 575 | inst->alg.base.cra_blocksize = 1; |
| 545 | inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask | | 576 | inst->alg.base.cra_alignmask = cipher->cra_alignmask | |
| 546 | (__alignof__(u32) - 1); | 577 | ctr->cra_alignmask | |
| 547 | inst->alg.cra_type = &crypto_aead_type; | 578 | (__alignof__(u32) - 1); |
| 548 | inst->alg.cra_aead.ivsize = 16; | 579 | inst->alg.ivsize = 16; |
| 549 | inst->alg.cra_aead.maxauthsize = 16; | 580 | inst->alg.maxauthsize = 16; |
| 550 | inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx); | 581 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx); |
| 551 | inst->alg.cra_init = crypto_ccm_init_tfm; | 582 | inst->alg.init = crypto_ccm_init_tfm; |
| 552 | inst->alg.cra_exit = crypto_ccm_exit_tfm; | 583 | inst->alg.exit = crypto_ccm_exit_tfm; |
| 553 | inst->alg.cra_aead.setkey = crypto_ccm_setkey; | 584 | inst->alg.setkey = crypto_ccm_setkey; |
| 554 | inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize; | 585 | inst->alg.setauthsize = crypto_ccm_setauthsize; |
| 555 | inst->alg.cra_aead.encrypt = crypto_ccm_encrypt; | 586 | inst->alg.encrypt = crypto_ccm_encrypt; |
| 556 | inst->alg.cra_aead.decrypt = crypto_ccm_decrypt; | 587 | inst->alg.decrypt = crypto_ccm_decrypt; |
| 588 | |||
| 589 | inst->free = crypto_ccm_free; | ||
| 590 | |||
| 591 | err = aead_register_instance(tmpl, inst); | ||
| 592 | if (err) | ||
| 593 | goto err_drop_ctr; | ||
| 557 | 594 | ||
| 558 | out: | 595 | out_put_cipher: |
| 559 | crypto_mod_put(cipher); | 596 | crypto_mod_put(cipher); |
| 560 | return inst; | 597 | return err; |
| 561 | 598 | ||
| 562 | err_drop_ctr: | 599 | err_drop_ctr: |
| 563 | crypto_drop_skcipher(&ictx->ctr); | 600 | crypto_drop_skcipher(&ictx->ctr); |
| @@ -565,12 +602,10 @@ err_drop_cipher: | |||
| 565 | crypto_drop_spawn(&ictx->cipher); | 602 | crypto_drop_spawn(&ictx->cipher); |
| 566 | err_free_inst: | 603 | err_free_inst: |
| 567 | kfree(inst); | 604 | kfree(inst); |
| 568 | out_put_cipher: | 605 | goto out_put_cipher; |
| 569 | inst = ERR_PTR(err); | ||
| 570 | goto out; | ||
| 571 | } | 606 | } |
| 572 | 607 | ||
| 573 | static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb) | 608 | static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) |
| 574 | { | 609 | { |
| 575 | const char *cipher_name; | 610 | const char *cipher_name; |
| 576 | char ctr_name[CRYPTO_MAX_ALG_NAME]; | 611 | char ctr_name[CRYPTO_MAX_ALG_NAME]; |
| @@ -578,36 +613,28 @@ static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb) | |||
| 578 | 613 | ||
| 579 | cipher_name = crypto_attr_alg_name(tb[1]); | 614 | cipher_name = crypto_attr_alg_name(tb[1]); |
| 580 | if (IS_ERR(cipher_name)) | 615 | if (IS_ERR(cipher_name)) |
| 581 | return ERR_CAST(cipher_name); | 616 | return PTR_ERR(cipher_name); |
| 582 | 617 | ||
| 583 | if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", | 618 | if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", |
| 584 | cipher_name) >= CRYPTO_MAX_ALG_NAME) | 619 | cipher_name) >= CRYPTO_MAX_ALG_NAME) |
| 585 | return ERR_PTR(-ENAMETOOLONG); | 620 | return -ENAMETOOLONG; |
| 586 | 621 | ||
| 587 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= | 622 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= |
| 588 | CRYPTO_MAX_ALG_NAME) | 623 | CRYPTO_MAX_ALG_NAME) |
| 589 | return ERR_PTR(-ENAMETOOLONG); | 624 | return -ENAMETOOLONG; |
| 590 | 625 | ||
| 591 | return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); | 626 | return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, |
| 592 | } | 627 | cipher_name); |
| 593 | |||
| 594 | static void crypto_ccm_free(struct crypto_instance *inst) | ||
| 595 | { | ||
| 596 | struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
| 597 | |||
| 598 | crypto_drop_spawn(&ctx->cipher); | ||
| 599 | crypto_drop_skcipher(&ctx->ctr); | ||
| 600 | kfree(inst); | ||
| 601 | } | 628 | } |
| 602 | 629 | ||
| 603 | static struct crypto_template crypto_ccm_tmpl = { | 630 | static struct crypto_template crypto_ccm_tmpl = { |
| 604 | .name = "ccm", | 631 | .name = "ccm", |
| 605 | .alloc = crypto_ccm_alloc, | 632 | .create = crypto_ccm_create, |
| 606 | .free = crypto_ccm_free, | ||
| 607 | .module = THIS_MODULE, | 633 | .module = THIS_MODULE, |
| 608 | }; | 634 | }; |
| 609 | 635 | ||
| 610 | static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb) | 636 | static int crypto_ccm_base_create(struct crypto_template *tmpl, |
| 637 | struct rtattr **tb) | ||
| 611 | { | 638 | { |
| 612 | const char *ctr_name; | 639 | const char *ctr_name; |
| 613 | const char *cipher_name; | 640 | const char *cipher_name; |
| @@ -615,23 +642,23 @@ static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb) | |||
| 615 | 642 | ||
| 616 | ctr_name = crypto_attr_alg_name(tb[1]); | 643 | ctr_name = crypto_attr_alg_name(tb[1]); |
| 617 | if (IS_ERR(ctr_name)) | 644 | if (IS_ERR(ctr_name)) |
| 618 | return ERR_CAST(ctr_name); | 645 | return PTR_ERR(ctr_name); |
| 619 | 646 | ||
| 620 | cipher_name = crypto_attr_alg_name(tb[2]); | 647 | cipher_name = crypto_attr_alg_name(tb[2]); |
| 621 | if (IS_ERR(cipher_name)) | 648 | if (IS_ERR(cipher_name)) |
| 622 | return ERR_CAST(cipher_name); | 649 | return PTR_ERR(cipher_name); |
| 623 | 650 | ||
| 624 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", | 651 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", |
| 625 | ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME) | 652 | ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME) |
| 626 | return ERR_PTR(-ENAMETOOLONG); | 653 | return -ENAMETOOLONG; |
| 627 | 654 | ||
| 628 | return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); | 655 | return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, |
| 656 | cipher_name); | ||
| 629 | } | 657 | } |
| 630 | 658 | ||
| 631 | static struct crypto_template crypto_ccm_base_tmpl = { | 659 | static struct crypto_template crypto_ccm_base_tmpl = { |
| 632 | .name = "ccm_base", | 660 | .name = "ccm_base", |
| 633 | .alloc = crypto_ccm_base_alloc, | 661 | .create = crypto_ccm_base_create, |
| 634 | .free = crypto_ccm_free, | ||
| 635 | .module = THIS_MODULE, | 662 | .module = THIS_MODULE, |
| 636 | }; | 663 | }; |
| 637 | 664 | ||
| @@ -677,10 +704,12 @@ static int crypto_rfc4309_setauthsize(struct crypto_aead *parent, | |||
| 677 | 704 | ||
| 678 | static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) | 705 | static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) |
| 679 | { | 706 | { |
| 680 | struct aead_request *subreq = aead_request_ctx(req); | 707 | struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req); |
| 708 | struct aead_request *subreq = &rctx->subreq; | ||
| 681 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 709 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 682 | struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead); | 710 | struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead); |
| 683 | struct crypto_aead *child = ctx->child; | 711 | struct crypto_aead *child = ctx->child; |
| 712 | struct scatterlist *sg; | ||
| 684 | u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), | 713 | u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), |
| 685 | crypto_aead_alignmask(child) + 1); | 714 | crypto_aead_alignmask(child) + 1); |
| 686 | 715 | ||
| @@ -690,17 +719,38 @@ static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) | |||
| 690 | memcpy(iv + 1, ctx->nonce, 3); | 719 | memcpy(iv + 1, ctx->nonce, 3); |
| 691 | memcpy(iv + 4, req->iv, 8); | 720 | memcpy(iv + 4, req->iv, 8); |
| 692 | 721 | ||
| 722 | scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0); | ||
| 723 | |||
| 724 | sg_init_table(rctx->src, 3); | ||
| 725 | sg_set_buf(rctx->src, iv + 16, req->assoclen - 8); | ||
| 726 | sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); | ||
| 727 | if (sg != rctx->src + 1) | ||
| 728 | sg_chain(rctx->src, 2, sg); | ||
| 729 | |||
| 730 | if (req->src != req->dst) { | ||
| 731 | sg_init_table(rctx->dst, 3); | ||
| 732 | sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8); | ||
| 733 | sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); | ||
| 734 | if (sg != rctx->dst + 1) | ||
| 735 | sg_chain(rctx->dst, 2, sg); | ||
| 736 | } | ||
| 737 | |||
| 693 | aead_request_set_tfm(subreq, child); | 738 | aead_request_set_tfm(subreq, child); |
| 694 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, | 739 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, |
| 695 | req->base.data); | 740 | req->base.data); |
| 696 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); | 741 | aead_request_set_crypt(subreq, rctx->src, |
| 697 | aead_request_set_assoc(subreq, req->assoc, req->assoclen); | 742 | req->src == req->dst ? rctx->src : rctx->dst, |
| 743 | req->cryptlen, iv); | ||
| 744 | aead_request_set_ad(subreq, req->assoclen - 8); | ||
| 698 | 745 | ||
| 699 | return subreq; | 746 | return subreq; |
| 700 | } | 747 | } |
| 701 | 748 | ||
| 702 | static int crypto_rfc4309_encrypt(struct aead_request *req) | 749 | static int crypto_rfc4309_encrypt(struct aead_request *req) |
| 703 | { | 750 | { |
| 751 | if (req->assoclen != 16 && req->assoclen != 20) | ||
| 752 | return -EINVAL; | ||
| 753 | |||
| 704 | req = crypto_rfc4309_crypt(req); | 754 | req = crypto_rfc4309_crypt(req); |
| 705 | 755 | ||
| 706 | return crypto_aead_encrypt(req); | 756 | return crypto_aead_encrypt(req); |
| @@ -708,16 +758,19 @@ static int crypto_rfc4309_encrypt(struct aead_request *req) | |||
| 708 | 758 | ||
| 709 | static int crypto_rfc4309_decrypt(struct aead_request *req) | 759 | static int crypto_rfc4309_decrypt(struct aead_request *req) |
| 710 | { | 760 | { |
| 761 | if (req->assoclen != 16 && req->assoclen != 20) | ||
| 762 | return -EINVAL; | ||
| 763 | |||
| 711 | req = crypto_rfc4309_crypt(req); | 764 | req = crypto_rfc4309_crypt(req); |
| 712 | 765 | ||
| 713 | return crypto_aead_decrypt(req); | 766 | return crypto_aead_decrypt(req); |
| 714 | } | 767 | } |
| 715 | 768 | ||
| 716 | static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm) | 769 | static int crypto_rfc4309_init_tfm(struct crypto_aead *tfm) |
| 717 | { | 770 | { |
| 718 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 771 | struct aead_instance *inst = aead_alg_instance(tfm); |
| 719 | struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); | 772 | struct crypto_aead_spawn *spawn = aead_instance_ctx(inst); |
| 720 | struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); | 773 | struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm); |
| 721 | struct crypto_aead *aead; | 774 | struct crypto_aead *aead; |
| 722 | unsigned long align; | 775 | unsigned long align; |
| 723 | 776 | ||
| @@ -729,115 +782,118 @@ static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm) | |||
| 729 | 782 | ||
| 730 | align = crypto_aead_alignmask(aead); | 783 | align = crypto_aead_alignmask(aead); |
| 731 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 784 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
| 732 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | 785 | crypto_aead_set_reqsize( |
| 733 | sizeof(struct aead_request) + | 786 | tfm, |
| 787 | sizeof(struct crypto_rfc4309_req_ctx) + | ||
| 734 | ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + | 788 | ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + |
| 735 | align + 16); | 789 | align + 32); |
| 736 | 790 | ||
| 737 | return 0; | 791 | return 0; |
| 738 | } | 792 | } |
| 739 | 793 | ||
| 740 | static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm) | 794 | static void crypto_rfc4309_exit_tfm(struct crypto_aead *tfm) |
| 741 | { | 795 | { |
| 742 | struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); | 796 | struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm); |
| 743 | 797 | ||
| 744 | crypto_free_aead(ctx->child); | 798 | crypto_free_aead(ctx->child); |
| 745 | } | 799 | } |
| 746 | 800 | ||
| 747 | static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb) | 801 | static void crypto_rfc4309_free(struct aead_instance *inst) |
| 802 | { | ||
| 803 | crypto_drop_aead(aead_instance_ctx(inst)); | ||
| 804 | kfree(inst); | ||
| 805 | } | ||
| 806 | |||
| 807 | static int crypto_rfc4309_create(struct crypto_template *tmpl, | ||
| 808 | struct rtattr **tb) | ||
| 748 | { | 809 | { |
| 749 | struct crypto_attr_type *algt; | 810 | struct crypto_attr_type *algt; |
| 750 | struct crypto_instance *inst; | 811 | struct aead_instance *inst; |
| 751 | struct crypto_aead_spawn *spawn; | 812 | struct crypto_aead_spawn *spawn; |
| 752 | struct crypto_alg *alg; | 813 | struct aead_alg *alg; |
| 753 | const char *ccm_name; | 814 | const char *ccm_name; |
| 754 | int err; | 815 | int err; |
| 755 | 816 | ||
| 756 | algt = crypto_get_attr_type(tb); | 817 | algt = crypto_get_attr_type(tb); |
| 757 | if (IS_ERR(algt)) | 818 | if (IS_ERR(algt)) |
| 758 | return ERR_CAST(algt); | 819 | return PTR_ERR(algt); |
| 759 | 820 | ||
| 760 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 821 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
| 761 | return ERR_PTR(-EINVAL); | 822 | return -EINVAL; |
| 762 | 823 | ||
| 763 | ccm_name = crypto_attr_alg_name(tb[1]); | 824 | ccm_name = crypto_attr_alg_name(tb[1]); |
| 764 | if (IS_ERR(ccm_name)) | 825 | if (IS_ERR(ccm_name)) |
| 765 | return ERR_CAST(ccm_name); | 826 | return PTR_ERR(ccm_name); |
| 766 | 827 | ||
| 767 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | 828 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
| 768 | if (!inst) | 829 | if (!inst) |
| 769 | return ERR_PTR(-ENOMEM); | 830 | return -ENOMEM; |
| 770 | 831 | ||
| 771 | spawn = crypto_instance_ctx(inst); | 832 | spawn = aead_instance_ctx(inst); |
| 772 | crypto_set_aead_spawn(spawn, inst); | 833 | crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); |
| 773 | err = crypto_grab_aead(spawn, ccm_name, 0, | 834 | err = crypto_grab_aead(spawn, ccm_name, 0, |
| 774 | crypto_requires_sync(algt->type, algt->mask)); | 835 | crypto_requires_sync(algt->type, algt->mask)); |
| 775 | if (err) | 836 | if (err) |
| 776 | goto out_free_inst; | 837 | goto out_free_inst; |
| 777 | 838 | ||
| 778 | alg = crypto_aead_spawn_alg(spawn); | 839 | alg = crypto_spawn_aead_alg(spawn); |
| 779 | 840 | ||
| 780 | err = -EINVAL; | 841 | err = -EINVAL; |
| 781 | 842 | ||
| 782 | /* We only support 16-byte blocks. */ | 843 | /* We only support 16-byte blocks. */ |
| 783 | if (alg->cra_aead.ivsize != 16) | 844 | if (crypto_aead_alg_ivsize(alg) != 16) |
| 784 | goto out_drop_alg; | 845 | goto out_drop_alg; |
| 785 | 846 | ||
| 786 | /* Not a stream cipher? */ | 847 | /* Not a stream cipher? */ |
| 787 | if (alg->cra_blocksize != 1) | 848 | if (alg->base.cra_blocksize != 1) |
| 788 | goto out_drop_alg; | 849 | goto out_drop_alg; |
| 789 | 850 | ||
| 790 | err = -ENAMETOOLONG; | 851 | err = -ENAMETOOLONG; |
| 791 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 852 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
| 792 | "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || | 853 | "rfc4309(%s)", alg->base.cra_name) >= |
| 793 | snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 854 | CRYPTO_MAX_ALG_NAME || |
| 794 | "rfc4309(%s)", alg->cra_driver_name) >= | 855 | snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
| 856 | "rfc4309(%s)", alg->base.cra_driver_name) >= | ||
| 795 | CRYPTO_MAX_ALG_NAME) | 857 | CRYPTO_MAX_ALG_NAME) |
| 796 | goto out_drop_alg; | 858 | goto out_drop_alg; |
| 797 | 859 | ||
| 798 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 860 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
| 799 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; | 861 | inst->alg.base.cra_priority = alg->base.cra_priority; |
| 800 | inst->alg.cra_priority = alg->cra_priority; | 862 | inst->alg.base.cra_blocksize = 1; |
| 801 | inst->alg.cra_blocksize = 1; | 863 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask; |
| 802 | inst->alg.cra_alignmask = alg->cra_alignmask; | ||
| 803 | inst->alg.cra_type = &crypto_nivaead_type; | ||
| 804 | 864 | ||
| 805 | inst->alg.cra_aead.ivsize = 8; | 865 | inst->alg.ivsize = 8; |
| 806 | inst->alg.cra_aead.maxauthsize = 16; | 866 | inst->alg.maxauthsize = 16; |
| 807 | 867 | ||
| 808 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); | 868 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); |
| 809 | 869 | ||
| 810 | inst->alg.cra_init = crypto_rfc4309_init_tfm; | 870 | inst->alg.init = crypto_rfc4309_init_tfm; |
| 811 | inst->alg.cra_exit = crypto_rfc4309_exit_tfm; | 871 | inst->alg.exit = crypto_rfc4309_exit_tfm; |
| 812 | 872 | ||
| 813 | inst->alg.cra_aead.setkey = crypto_rfc4309_setkey; | 873 | inst->alg.setkey = crypto_rfc4309_setkey; |
| 814 | inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize; | 874 | inst->alg.setauthsize = crypto_rfc4309_setauthsize; |
| 815 | inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt; | 875 | inst->alg.encrypt = crypto_rfc4309_encrypt; |
| 816 | inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt; | 876 | inst->alg.decrypt = crypto_rfc4309_decrypt; |
| 817 | 877 | ||
| 818 | inst->alg.cra_aead.geniv = "seqiv"; | 878 | inst->free = crypto_rfc4309_free; |
| 879 | |||
| 880 | err = aead_register_instance(tmpl, inst); | ||
| 881 | if (err) | ||
| 882 | goto out_drop_alg; | ||
| 819 | 883 | ||
| 820 | out: | 884 | out: |
| 821 | return inst; | 885 | return err; |
| 822 | 886 | ||
| 823 | out_drop_alg: | 887 | out_drop_alg: |
| 824 | crypto_drop_aead(spawn); | 888 | crypto_drop_aead(spawn); |
| 825 | out_free_inst: | 889 | out_free_inst: |
| 826 | kfree(inst); | 890 | kfree(inst); |
| 827 | inst = ERR_PTR(err); | ||
| 828 | goto out; | 891 | goto out; |
| 829 | } | 892 | } |
| 830 | 893 | ||
| 831 | static void crypto_rfc4309_free(struct crypto_instance *inst) | ||
| 832 | { | ||
| 833 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
| 834 | kfree(inst); | ||
| 835 | } | ||
| 836 | |||
| 837 | static struct crypto_template crypto_rfc4309_tmpl = { | 894 | static struct crypto_template crypto_rfc4309_tmpl = { |
| 838 | .name = "rfc4309", | 895 | .name = "rfc4309", |
| 839 | .alloc = crypto_rfc4309_alloc, | 896 | .create = crypto_rfc4309_create, |
| 840 | .free = crypto_rfc4309_free, | ||
| 841 | .module = THIS_MODULE, | 897 | .module = THIS_MODULE, |
| 842 | }; | 898 | }; |
| 843 | 899 | ||
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c index fa42e708aa96..da9c89968223 100644 --- a/crypto/chacha20_generic.c +++ b/crypto/chacha20_generic.c | |||
| @@ -13,14 +13,7 @@ | |||
| 13 | #include <linux/crypto.h> | 13 | #include <linux/crypto.h> |
| 14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | 16 | #include <crypto/chacha20.h> | |
| 17 | #define CHACHA20_NONCE_SIZE 16 | ||
| 18 | #define CHACHA20_KEY_SIZE 32 | ||
| 19 | #define CHACHA20_BLOCK_SIZE 64 | ||
| 20 | |||
| 21 | struct chacha20_ctx { | ||
| 22 | u32 key[8]; | ||
| 23 | }; | ||
| 24 | 17 | ||
| 25 | static inline u32 rotl32(u32 v, u8 n) | 18 | static inline u32 rotl32(u32 v, u8 n) |
| 26 | { | 19 | { |
| @@ -108,7 +101,7 @@ static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, | |||
| 108 | } | 101 | } |
| 109 | } | 102 | } |
| 110 | 103 | ||
| 111 | static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) | 104 | void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) |
| 112 | { | 105 | { |
| 113 | static const char constant[16] = "expand 32-byte k"; | 106 | static const char constant[16] = "expand 32-byte k"; |
| 114 | 107 | ||
| @@ -129,8 +122,9 @@ static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) | |||
| 129 | state[14] = le32_to_cpuvp(iv + 8); | 122 | state[14] = le32_to_cpuvp(iv + 8); |
| 130 | state[15] = le32_to_cpuvp(iv + 12); | 123 | state[15] = le32_to_cpuvp(iv + 12); |
| 131 | } | 124 | } |
| 125 | EXPORT_SYMBOL_GPL(crypto_chacha20_init); | ||
| 132 | 126 | ||
| 133 | static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, | 127 | int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, |
| 134 | unsigned int keysize) | 128 | unsigned int keysize) |
| 135 | { | 129 | { |
| 136 | struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm); | 130 | struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm); |
| @@ -144,8 +138,9 @@ static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
| 144 | 138 | ||
| 145 | return 0; | 139 | return 0; |
| 146 | } | 140 | } |
| 141 | EXPORT_SYMBOL_GPL(crypto_chacha20_setkey); | ||
| 147 | 142 | ||
| 148 | static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | 143 | int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
| 149 | struct scatterlist *src, unsigned int nbytes) | 144 | struct scatterlist *src, unsigned int nbytes) |
| 150 | { | 145 | { |
| 151 | struct blkcipher_walk walk; | 146 | struct blkcipher_walk walk; |
| @@ -155,7 +150,7 @@ static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
| 155 | blkcipher_walk_init(&walk, dst, src, nbytes); | 150 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 156 | err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); | 151 | err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); |
| 157 | 152 | ||
| 158 | chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); | 153 | crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); |
| 159 | 154 | ||
| 160 | while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { | 155 | while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { |
| 161 | chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, | 156 | chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, |
| @@ -172,6 +167,7 @@ static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
| 172 | 167 | ||
| 173 | return err; | 168 | return err; |
| 174 | } | 169 | } |
| 170 | EXPORT_SYMBOL_GPL(crypto_chacha20_crypt); | ||
| 175 | 171 | ||
| 176 | static struct crypto_alg alg = { | 172 | static struct crypto_alg alg = { |
| 177 | .cra_name = "chacha20", | 173 | .cra_name = "chacha20", |
| @@ -187,11 +183,11 @@ static struct crypto_alg alg = { | |||
| 187 | .blkcipher = { | 183 | .blkcipher = { |
| 188 | .min_keysize = CHACHA20_KEY_SIZE, | 184 | .min_keysize = CHACHA20_KEY_SIZE, |
| 189 | .max_keysize = CHACHA20_KEY_SIZE, | 185 | .max_keysize = CHACHA20_KEY_SIZE, |
| 190 | .ivsize = CHACHA20_NONCE_SIZE, | 186 | .ivsize = CHACHA20_IV_SIZE, |
| 191 | .geniv = "seqiv", | 187 | .geniv = "seqiv", |
| 192 | .setkey = chacha20_setkey, | 188 | .setkey = crypto_chacha20_setkey, |
| 193 | .encrypt = chacha20_crypt, | 189 | .encrypt = crypto_chacha20_crypt, |
| 194 | .decrypt = chacha20_crypt, | 190 | .decrypt = crypto_chacha20_crypt, |
| 195 | }, | 191 | }, |
| 196 | }, | 192 | }, |
| 197 | }; | 193 | }; |
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 7b46ed799a64..99c3cce01290 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c | |||
| @@ -13,6 +13,8 @@ | |||
| 13 | #include <crypto/internal/hash.h> | 13 | #include <crypto/internal/hash.h> |
| 14 | #include <crypto/internal/skcipher.h> | 14 | #include <crypto/internal/skcipher.h> |
| 15 | #include <crypto/scatterwalk.h> | 15 | #include <crypto/scatterwalk.h> |
| 16 | #include <crypto/chacha20.h> | ||
| 17 | #include <crypto/poly1305.h> | ||
| 16 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 17 | #include <linux/init.h> | 19 | #include <linux/init.h> |
| 18 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| @@ -20,11 +22,6 @@ | |||
| 20 | 22 | ||
| 21 | #include "internal.h" | 23 | #include "internal.h" |
| 22 | 24 | ||
| 23 | #define POLY1305_BLOCK_SIZE 16 | ||
| 24 | #define POLY1305_DIGEST_SIZE 16 | ||
| 25 | #define POLY1305_KEY_SIZE 32 | ||
| 26 | #define CHACHA20_KEY_SIZE 32 | ||
| 27 | #define CHACHA20_IV_SIZE 16 | ||
| 28 | #define CHACHAPOLY_IV_SIZE 12 | 25 | #define CHACHAPOLY_IV_SIZE 12 |
| 29 | 26 | ||
| 30 | struct chachapoly_instance_ctx { | 27 | struct chachapoly_instance_ctx { |
| @@ -60,12 +57,16 @@ struct chacha_req { | |||
| 60 | }; | 57 | }; |
| 61 | 58 | ||
| 62 | struct chachapoly_req_ctx { | 59 | struct chachapoly_req_ctx { |
| 60 | struct scatterlist src[2]; | ||
| 61 | struct scatterlist dst[2]; | ||
| 63 | /* the key we generate for Poly1305 using Chacha20 */ | 62 | /* the key we generate for Poly1305 using Chacha20 */ |
| 64 | u8 key[POLY1305_KEY_SIZE]; | 63 | u8 key[POLY1305_KEY_SIZE]; |
| 65 | /* calculated Poly1305 tag */ | 64 | /* calculated Poly1305 tag */ |
| 66 | u8 tag[POLY1305_DIGEST_SIZE]; | 65 | u8 tag[POLY1305_DIGEST_SIZE]; |
| 67 | /* length of data to en/decrypt, without ICV */ | 66 | /* length of data to en/decrypt, without ICV */ |
| 68 | unsigned int cryptlen; | 67 | unsigned int cryptlen; |
| 68 | /* Actual AD, excluding IV */ | ||
| 69 | unsigned int assoclen; | ||
| 69 | union { | 70 | union { |
| 70 | struct poly_req poly; | 71 | struct poly_req poly; |
| 71 | struct chacha_req chacha; | 72 | struct chacha_req chacha; |
| @@ -98,7 +99,9 @@ static int poly_verify_tag(struct aead_request *req) | |||
| 98 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 99 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
| 99 | u8 tag[sizeof(rctx->tag)]; | 100 | u8 tag[sizeof(rctx->tag)]; |
| 100 | 101 | ||
| 101 | scatterwalk_map_and_copy(tag, req->src, rctx->cryptlen, sizeof(tag), 0); | 102 | scatterwalk_map_and_copy(tag, req->src, |
| 103 | req->assoclen + rctx->cryptlen, | ||
| 104 | sizeof(tag), 0); | ||
| 102 | if (crypto_memneq(tag, rctx->tag, sizeof(tag))) | 105 | if (crypto_memneq(tag, rctx->tag, sizeof(tag))) |
| 103 | return -EBADMSG; | 106 | return -EBADMSG; |
| 104 | return 0; | 107 | return 0; |
| @@ -108,7 +111,8 @@ static int poly_copy_tag(struct aead_request *req) | |||
| 108 | { | 111 | { |
| 109 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 112 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
| 110 | 113 | ||
| 111 | scatterwalk_map_and_copy(rctx->tag, req->dst, rctx->cryptlen, | 114 | scatterwalk_map_and_copy(rctx->tag, req->dst, |
| 115 | req->assoclen + rctx->cryptlen, | ||
| 112 | sizeof(rctx->tag), 1); | 116 | sizeof(rctx->tag), 1); |
| 113 | return 0; | 117 | return 0; |
| 114 | } | 118 | } |
| @@ -123,14 +127,24 @@ static int chacha_decrypt(struct aead_request *req) | |||
| 123 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | 127 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
| 124 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 128 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
| 125 | struct chacha_req *creq = &rctx->u.chacha; | 129 | struct chacha_req *creq = &rctx->u.chacha; |
| 130 | struct scatterlist *src, *dst; | ||
| 126 | int err; | 131 | int err; |
| 127 | 132 | ||
| 128 | chacha_iv(creq->iv, req, 1); | 133 | chacha_iv(creq->iv, req, 1); |
| 129 | 134 | ||
| 135 | sg_init_table(rctx->src, 2); | ||
| 136 | src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); | ||
| 137 | dst = src; | ||
| 138 | |||
| 139 | if (req->src != req->dst) { | ||
| 140 | sg_init_table(rctx->dst, 2); | ||
| 141 | dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); | ||
| 142 | } | ||
| 143 | |||
| 130 | ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), | 144 | ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), |
| 131 | chacha_decrypt_done, req); | 145 | chacha_decrypt_done, req); |
| 132 | ablkcipher_request_set_tfm(&creq->req, ctx->chacha); | 146 | ablkcipher_request_set_tfm(&creq->req, ctx->chacha); |
| 133 | ablkcipher_request_set_crypt(&creq->req, req->src, req->dst, | 147 | ablkcipher_request_set_crypt(&creq->req, src, dst, |
| 134 | rctx->cryptlen, creq->iv); | 148 | rctx->cryptlen, creq->iv); |
| 135 | err = crypto_ablkcipher_decrypt(&creq->req); | 149 | err = crypto_ablkcipher_decrypt(&creq->req); |
| 136 | if (err) | 150 | if (err) |
| @@ -156,14 +170,15 @@ static void poly_tail_done(struct crypto_async_request *areq, int err) | |||
| 156 | 170 | ||
| 157 | static int poly_tail(struct aead_request *req) | 171 | static int poly_tail(struct aead_request *req) |
| 158 | { | 172 | { |
| 159 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | 173 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 174 | struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); | ||
| 160 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 175 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
| 161 | struct poly_req *preq = &rctx->u.poly; | 176 | struct poly_req *preq = &rctx->u.poly; |
| 162 | __le64 len; | 177 | __le64 len; |
| 163 | int err; | 178 | int err; |
| 164 | 179 | ||
| 165 | sg_init_table(preq->src, 1); | 180 | sg_init_table(preq->src, 1); |
| 166 | len = cpu_to_le64(req->assoclen); | 181 | len = cpu_to_le64(rctx->assoclen); |
| 167 | memcpy(&preq->tail.assoclen, &len, sizeof(len)); | 182 | memcpy(&preq->tail.assoclen, &len, sizeof(len)); |
| 168 | len = cpu_to_le64(rctx->cryptlen); | 183 | len = cpu_to_le64(rctx->cryptlen); |
| 169 | memcpy(&preq->tail.cryptlen, &len, sizeof(len)); | 184 | memcpy(&preq->tail.cryptlen, &len, sizeof(len)); |
| @@ -228,6 +243,9 @@ static int poly_cipher(struct aead_request *req) | |||
| 228 | if (rctx->cryptlen == req->cryptlen) /* encrypting */ | 243 | if (rctx->cryptlen == req->cryptlen) /* encrypting */ |
| 229 | crypt = req->dst; | 244 | crypt = req->dst; |
| 230 | 245 | ||
| 246 | sg_init_table(rctx->src, 2); | ||
| 247 | crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen); | ||
| 248 | |||
| 231 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | 249 | ahash_request_set_callback(&preq->req, aead_request_flags(req), |
| 232 | poly_cipher_done, req); | 250 | poly_cipher_done, req); |
| 233 | ahash_request_set_tfm(&preq->req, ctx->poly); | 251 | ahash_request_set_tfm(&preq->req, ctx->poly); |
| @@ -253,7 +271,7 @@ static int poly_adpad(struct aead_request *req) | |||
| 253 | unsigned int padlen, bs = POLY1305_BLOCK_SIZE; | 271 | unsigned int padlen, bs = POLY1305_BLOCK_SIZE; |
| 254 | int err; | 272 | int err; |
| 255 | 273 | ||
| 256 | padlen = (bs - (req->assoclen % bs)) % bs; | 274 | padlen = (bs - (rctx->assoclen % bs)) % bs; |
| 257 | memset(preq->pad, 0, sizeof(preq->pad)); | 275 | memset(preq->pad, 0, sizeof(preq->pad)); |
| 258 | sg_init_table(preq->src, 1); | 276 | sg_init_table(preq->src, 1); |
| 259 | sg_set_buf(preq->src, preq->pad, padlen); | 277 | sg_set_buf(preq->src, preq->pad, padlen); |
| @@ -285,7 +303,7 @@ static int poly_ad(struct aead_request *req) | |||
| 285 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | 303 | ahash_request_set_callback(&preq->req, aead_request_flags(req), |
| 286 | poly_ad_done, req); | 304 | poly_ad_done, req); |
| 287 | ahash_request_set_tfm(&preq->req, ctx->poly); | 305 | ahash_request_set_tfm(&preq->req, ctx->poly); |
| 288 | ahash_request_set_crypt(&preq->req, req->assoc, NULL, req->assoclen); | 306 | ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen); |
| 289 | 307 | ||
| 290 | err = crypto_ahash_update(&preq->req); | 308 | err = crypto_ahash_update(&preq->req); |
| 291 | if (err) | 309 | if (err) |
| @@ -351,11 +369,20 @@ static void poly_genkey_done(struct crypto_async_request *areq, int err) | |||
| 351 | 369 | ||
| 352 | static int poly_genkey(struct aead_request *req) | 370 | static int poly_genkey(struct aead_request *req) |
| 353 | { | 371 | { |
| 354 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | 372 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 373 | struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); | ||
| 355 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 374 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
| 356 | struct chacha_req *creq = &rctx->u.chacha; | 375 | struct chacha_req *creq = &rctx->u.chacha; |
| 357 | int err; | 376 | int err; |
| 358 | 377 | ||
| 378 | rctx->assoclen = req->assoclen; | ||
| 379 | |||
| 380 | if (crypto_aead_ivsize(tfm) == 8) { | ||
| 381 | if (rctx->assoclen < 8) | ||
| 382 | return -EINVAL; | ||
| 383 | rctx->assoclen -= 8; | ||
| 384 | } | ||
| 385 | |||
| 359 | sg_init_table(creq->src, 1); | 386 | sg_init_table(creq->src, 1); |
| 360 | memset(rctx->key, 0, sizeof(rctx->key)); | 387 | memset(rctx->key, 0, sizeof(rctx->key)); |
| 361 | sg_set_buf(creq->src, rctx->key, sizeof(rctx->key)); | 388 | sg_set_buf(creq->src, rctx->key, sizeof(rctx->key)); |
| @@ -385,14 +412,24 @@ static int chacha_encrypt(struct aead_request *req) | |||
| 385 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | 412 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
| 386 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 413 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
| 387 | struct chacha_req *creq = &rctx->u.chacha; | 414 | struct chacha_req *creq = &rctx->u.chacha; |
| 415 | struct scatterlist *src, *dst; | ||
| 388 | int err; | 416 | int err; |
| 389 | 417 | ||
| 390 | chacha_iv(creq->iv, req, 1); | 418 | chacha_iv(creq->iv, req, 1); |
| 391 | 419 | ||
| 420 | sg_init_table(rctx->src, 2); | ||
| 421 | src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); | ||
| 422 | dst = src; | ||
| 423 | |||
| 424 | if (req->src != req->dst) { | ||
| 425 | sg_init_table(rctx->dst, 2); | ||
| 426 | dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); | ||
| 427 | } | ||
| 428 | |||
| 392 | ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), | 429 | ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), |
| 393 | chacha_encrypt_done, req); | 430 | chacha_encrypt_done, req); |
| 394 | ablkcipher_request_set_tfm(&creq->req, ctx->chacha); | 431 | ablkcipher_request_set_tfm(&creq->req, ctx->chacha); |
| 395 | ablkcipher_request_set_crypt(&creq->req, req->src, req->dst, | 432 | ablkcipher_request_set_crypt(&creq->req, src, dst, |
| 396 | req->cryptlen, creq->iv); | 433 | req->cryptlen, creq->iv); |
| 397 | err = crypto_ablkcipher_encrypt(&creq->req); | 434 | err = crypto_ablkcipher_encrypt(&creq->req); |
| 398 | if (err) | 435 | if (err) |
| @@ -426,8 +463,6 @@ static int chachapoly_decrypt(struct aead_request *req) | |||
| 426 | { | 463 | { |
| 427 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 464 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
| 428 | 465 | ||
| 429 | if (req->cryptlen < POLY1305_DIGEST_SIZE) | ||
| 430 | return -EINVAL; | ||
| 431 | rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE; | 466 | rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE; |
| 432 | 467 | ||
| 433 | /* decrypt call chain: | 468 | /* decrypt call chain: |
| @@ -476,11 +511,11 @@ static int chachapoly_setauthsize(struct crypto_aead *tfm, | |||
| 476 | return 0; | 511 | return 0; |
| 477 | } | 512 | } |
| 478 | 513 | ||
| 479 | static int chachapoly_init(struct crypto_tfm *tfm) | 514 | static int chachapoly_init(struct crypto_aead *tfm) |
| 480 | { | 515 | { |
| 481 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 516 | struct aead_instance *inst = aead_alg_instance(tfm); |
| 482 | struct chachapoly_instance_ctx *ictx = crypto_instance_ctx(inst); | 517 | struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst); |
| 483 | struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm); | 518 | struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); |
| 484 | struct crypto_ablkcipher *chacha; | 519 | struct crypto_ablkcipher *chacha; |
| 485 | struct crypto_ahash *poly; | 520 | struct crypto_ahash *poly; |
| 486 | unsigned long align; | 521 | unsigned long align; |
| @@ -499,77 +534,87 @@ static int chachapoly_init(struct crypto_tfm *tfm) | |||
| 499 | ctx->poly = poly; | 534 | ctx->poly = poly; |
| 500 | ctx->saltlen = ictx->saltlen; | 535 | ctx->saltlen = ictx->saltlen; |
| 501 | 536 | ||
| 502 | align = crypto_tfm_alg_alignmask(tfm); | 537 | align = crypto_aead_alignmask(tfm); |
| 503 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 538 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
| 504 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | 539 | crypto_aead_set_reqsize( |
| 505 | align + offsetof(struct chachapoly_req_ctx, u) + | 540 | tfm, |
| 506 | max(offsetof(struct chacha_req, req) + | 541 | align + offsetof(struct chachapoly_req_ctx, u) + |
| 507 | sizeof(struct ablkcipher_request) + | 542 | max(offsetof(struct chacha_req, req) + |
| 508 | crypto_ablkcipher_reqsize(chacha), | 543 | sizeof(struct ablkcipher_request) + |
| 509 | offsetof(struct poly_req, req) + | 544 | crypto_ablkcipher_reqsize(chacha), |
| 510 | sizeof(struct ahash_request) + | 545 | offsetof(struct poly_req, req) + |
| 511 | crypto_ahash_reqsize(poly))); | 546 | sizeof(struct ahash_request) + |
| 547 | crypto_ahash_reqsize(poly))); | ||
| 512 | 548 | ||
| 513 | return 0; | 549 | return 0; |
| 514 | } | 550 | } |
| 515 | 551 | ||
| 516 | static void chachapoly_exit(struct crypto_tfm *tfm) | 552 | static void chachapoly_exit(struct crypto_aead *tfm) |
| 517 | { | 553 | { |
| 518 | struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm); | 554 | struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); |
| 519 | 555 | ||
| 520 | crypto_free_ahash(ctx->poly); | 556 | crypto_free_ahash(ctx->poly); |
| 521 | crypto_free_ablkcipher(ctx->chacha); | 557 | crypto_free_ablkcipher(ctx->chacha); |
| 522 | } | 558 | } |
| 523 | 559 | ||
| 524 | static struct crypto_instance *chachapoly_alloc(struct rtattr **tb, | 560 | static void chachapoly_free(struct aead_instance *inst) |
| 525 | const char *name, | 561 | { |
| 526 | unsigned int ivsize) | 562 | struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst); |
| 563 | |||
| 564 | crypto_drop_skcipher(&ctx->chacha); | ||
| 565 | crypto_drop_ahash(&ctx->poly); | ||
| 566 | kfree(inst); | ||
| 567 | } | ||
| 568 | |||
| 569 | static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, | ||
| 570 | const char *name, unsigned int ivsize) | ||
| 527 | { | 571 | { |
| 528 | struct crypto_attr_type *algt; | 572 | struct crypto_attr_type *algt; |
| 529 | struct crypto_instance *inst; | 573 | struct aead_instance *inst; |
| 530 | struct crypto_alg *chacha; | 574 | struct crypto_alg *chacha; |
| 531 | struct crypto_alg *poly; | 575 | struct crypto_alg *poly; |
| 532 | struct ahash_alg *poly_ahash; | 576 | struct hash_alg_common *poly_hash; |
| 533 | struct chachapoly_instance_ctx *ctx; | 577 | struct chachapoly_instance_ctx *ctx; |
| 534 | const char *chacha_name, *poly_name; | 578 | const char *chacha_name, *poly_name; |
| 535 | int err; | 579 | int err; |
| 536 | 580 | ||
| 537 | if (ivsize > CHACHAPOLY_IV_SIZE) | 581 | if (ivsize > CHACHAPOLY_IV_SIZE) |
| 538 | return ERR_PTR(-EINVAL); | 582 | return -EINVAL; |
| 539 | 583 | ||
| 540 | algt = crypto_get_attr_type(tb); | 584 | algt = crypto_get_attr_type(tb); |
| 541 | if (IS_ERR(algt)) | 585 | if (IS_ERR(algt)) |
| 542 | return ERR_CAST(algt); | 586 | return PTR_ERR(algt); |
| 543 | 587 | ||
| 544 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 588 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
| 545 | return ERR_PTR(-EINVAL); | 589 | return -EINVAL; |
| 546 | 590 | ||
| 547 | chacha_name = crypto_attr_alg_name(tb[1]); | 591 | chacha_name = crypto_attr_alg_name(tb[1]); |
| 548 | if (IS_ERR(chacha_name)) | 592 | if (IS_ERR(chacha_name)) |
| 549 | return ERR_CAST(chacha_name); | 593 | return PTR_ERR(chacha_name); |
| 550 | poly_name = crypto_attr_alg_name(tb[2]); | 594 | poly_name = crypto_attr_alg_name(tb[2]); |
| 551 | if (IS_ERR(poly_name)) | 595 | if (IS_ERR(poly_name)) |
| 552 | return ERR_CAST(poly_name); | 596 | return PTR_ERR(poly_name); |
| 553 | 597 | ||
| 554 | poly = crypto_find_alg(poly_name, &crypto_ahash_type, | 598 | poly = crypto_find_alg(poly_name, &crypto_ahash_type, |
| 555 | CRYPTO_ALG_TYPE_HASH, | 599 | CRYPTO_ALG_TYPE_HASH, |
| 556 | CRYPTO_ALG_TYPE_AHASH_MASK); | 600 | CRYPTO_ALG_TYPE_AHASH_MASK); |
| 557 | if (IS_ERR(poly)) | 601 | if (IS_ERR(poly)) |
| 558 | return ERR_CAST(poly); | 602 | return PTR_ERR(poly); |
| 559 | 603 | ||
| 560 | err = -ENOMEM; | 604 | err = -ENOMEM; |
| 561 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 605 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
| 562 | if (!inst) | 606 | if (!inst) |
| 563 | goto out_put_poly; | 607 | goto out_put_poly; |
| 564 | 608 | ||
| 565 | ctx = crypto_instance_ctx(inst); | 609 | ctx = aead_instance_ctx(inst); |
| 566 | ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; | 610 | ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; |
| 567 | poly_ahash = container_of(poly, struct ahash_alg, halg.base); | 611 | poly_hash = __crypto_hash_alg_common(poly); |
| 568 | err = crypto_init_ahash_spawn(&ctx->poly, &poly_ahash->halg, inst); | 612 | err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, |
| 613 | aead_crypto_instance(inst)); | ||
| 569 | if (err) | 614 | if (err) |
| 570 | goto err_free_inst; | 615 | goto err_free_inst; |
| 571 | 616 | ||
| 572 | crypto_set_skcipher_spawn(&ctx->chacha, inst); | 617 | crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst)); |
| 573 | err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, | 618 | err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, |
| 574 | crypto_requires_sync(algt->type, | 619 | crypto_requires_sync(algt->type, |
| 575 | algt->mask)); | 620 | algt->mask)); |
| @@ -587,37 +632,42 @@ static struct crypto_instance *chachapoly_alloc(struct rtattr **tb, | |||
| 587 | goto out_drop_chacha; | 632 | goto out_drop_chacha; |
| 588 | 633 | ||
| 589 | err = -ENAMETOOLONG; | 634 | err = -ENAMETOOLONG; |
| 590 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 635 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
| 591 | "%s(%s,%s)", name, chacha_name, | 636 | "%s(%s,%s)", name, chacha_name, |
| 592 | poly_name) >= CRYPTO_MAX_ALG_NAME) | 637 | poly_name) >= CRYPTO_MAX_ALG_NAME) |
| 593 | goto out_drop_chacha; | 638 | goto out_drop_chacha; |
| 594 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 639 | if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
| 595 | "%s(%s,%s)", name, chacha->cra_driver_name, | 640 | "%s(%s,%s)", name, chacha->cra_driver_name, |
| 596 | poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 641 | poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
| 597 | goto out_drop_chacha; | 642 | goto out_drop_chacha; |
| 598 | 643 | ||
| 599 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 644 | inst->alg.base.cra_flags = (chacha->cra_flags | poly->cra_flags) & |
| 600 | inst->alg.cra_flags |= (chacha->cra_flags | | 645 | CRYPTO_ALG_ASYNC; |
| 601 | poly->cra_flags) & CRYPTO_ALG_ASYNC; | 646 | inst->alg.base.cra_priority = (chacha->cra_priority + |
| 602 | inst->alg.cra_priority = (chacha->cra_priority + | 647 | poly->cra_priority) / 2; |
| 603 | poly->cra_priority) / 2; | 648 | inst->alg.base.cra_blocksize = 1; |
| 604 | inst->alg.cra_blocksize = 1; | 649 | inst->alg.base.cra_alignmask = chacha->cra_alignmask | |
| 605 | inst->alg.cra_alignmask = chacha->cra_alignmask | poly->cra_alignmask; | 650 | poly->cra_alignmask; |
| 606 | inst->alg.cra_type = &crypto_nivaead_type; | 651 | inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + |
| 607 | inst->alg.cra_aead.ivsize = ivsize; | 652 | ctx->saltlen; |
| 608 | inst->alg.cra_aead.maxauthsize = POLY1305_DIGEST_SIZE; | 653 | inst->alg.ivsize = ivsize; |
| 609 | inst->alg.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; | 654 | inst->alg.maxauthsize = POLY1305_DIGEST_SIZE; |
| 610 | inst->alg.cra_init = chachapoly_init; | 655 | inst->alg.init = chachapoly_init; |
| 611 | inst->alg.cra_exit = chachapoly_exit; | 656 | inst->alg.exit = chachapoly_exit; |
| 612 | inst->alg.cra_aead.encrypt = chachapoly_encrypt; | 657 | inst->alg.encrypt = chachapoly_encrypt; |
| 613 | inst->alg.cra_aead.decrypt = chachapoly_decrypt; | 658 | inst->alg.decrypt = chachapoly_decrypt; |
| 614 | inst->alg.cra_aead.setkey = chachapoly_setkey; | 659 | inst->alg.setkey = chachapoly_setkey; |
| 615 | inst->alg.cra_aead.setauthsize = chachapoly_setauthsize; | 660 | inst->alg.setauthsize = chachapoly_setauthsize; |
| 616 | inst->alg.cra_aead.geniv = "seqiv"; | 661 | |
| 617 | 662 | inst->free = chachapoly_free; | |
| 618 | out: | 663 | |
| 664 | err = aead_register_instance(tmpl, inst); | ||
| 665 | if (err) | ||
| 666 | goto out_drop_chacha; | ||
| 667 | |||
| 668 | out_put_poly: | ||
| 619 | crypto_mod_put(poly); | 669 | crypto_mod_put(poly); |
| 620 | return inst; | 670 | return err; |
| 621 | 671 | ||
| 622 | out_drop_chacha: | 672 | out_drop_chacha: |
| 623 | crypto_drop_skcipher(&ctx->chacha); | 673 | crypto_drop_skcipher(&ctx->chacha); |
| @@ -625,41 +675,28 @@ err_drop_poly: | |||
| 625 | crypto_drop_ahash(&ctx->poly); | 675 | crypto_drop_ahash(&ctx->poly); |
| 626 | err_free_inst: | 676 | err_free_inst: |
| 627 | kfree(inst); | 677 | kfree(inst); |
| 628 | out_put_poly: | 678 | goto out_put_poly; |
| 629 | inst = ERR_PTR(err); | ||
| 630 | goto out; | ||
| 631 | } | ||
| 632 | |||
| 633 | static struct crypto_instance *rfc7539_alloc(struct rtattr **tb) | ||
| 634 | { | ||
| 635 | return chachapoly_alloc(tb, "rfc7539", 12); | ||
| 636 | } | 679 | } |
| 637 | 680 | ||
| 638 | static struct crypto_instance *rfc7539esp_alloc(struct rtattr **tb) | 681 | static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb) |
| 639 | { | 682 | { |
| 640 | return chachapoly_alloc(tb, "rfc7539esp", 8); | 683 | return chachapoly_create(tmpl, tb, "rfc7539", 12); |
| 641 | } | 684 | } |
| 642 | 685 | ||
| 643 | static void chachapoly_free(struct crypto_instance *inst) | 686 | static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb) |
| 644 | { | 687 | { |
| 645 | struct chachapoly_instance_ctx *ctx = crypto_instance_ctx(inst); | 688 | return chachapoly_create(tmpl, tb, "rfc7539esp", 8); |
| 646 | |||
| 647 | crypto_drop_skcipher(&ctx->chacha); | ||
| 648 | crypto_drop_ahash(&ctx->poly); | ||
| 649 | kfree(inst); | ||
| 650 | } | 689 | } |
| 651 | 690 | ||
| 652 | static struct crypto_template rfc7539_tmpl = { | 691 | static struct crypto_template rfc7539_tmpl = { |
| 653 | .name = "rfc7539", | 692 | .name = "rfc7539", |
| 654 | .alloc = rfc7539_alloc, | 693 | .create = rfc7539_create, |
| 655 | .free = chachapoly_free, | ||
| 656 | .module = THIS_MODULE, | 694 | .module = THIS_MODULE, |
| 657 | }; | 695 | }; |
| 658 | 696 | ||
| 659 | static struct crypto_template rfc7539esp_tmpl = { | 697 | static struct crypto_template rfc7539esp_tmpl = { |
| 660 | .name = "rfc7539esp", | 698 | .name = "rfc7539esp", |
| 661 | .alloc = rfc7539esp_alloc, | 699 | .create = rfc7539esp_create, |
| 662 | .free = chachapoly_free, | ||
| 663 | .module = THIS_MODULE, | 700 | .module = THIS_MODULE, |
| 664 | }; | 701 | }; |
| 665 | 702 | ||
| @@ -690,6 +727,5 @@ module_exit(chacha20poly1305_module_exit); | |||
| 690 | MODULE_LICENSE("GPL"); | 727 | MODULE_LICENSE("GPL"); |
| 691 | MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); | 728 | MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); |
| 692 | MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD"); | 729 | MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD"); |
| 693 | MODULE_ALIAS_CRYPTO("chacha20poly1305"); | ||
| 694 | MODULE_ALIAS_CRYPTO("rfc7539"); | 730 | MODULE_ALIAS_CRYPTO("rfc7539"); |
| 695 | MODULE_ALIAS_CRYPTO("rfc7539esp"); | 731 | MODULE_ALIAS_CRYPTO("rfc7539esp"); |
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 22ba81f76764..c81861b1350b 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
| @@ -176,10 +176,9 @@ static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, | |||
| 176 | algt = crypto_get_attr_type(tb); | 176 | algt = crypto_get_attr_type(tb); |
| 177 | if (IS_ERR(algt)) | 177 | if (IS_ERR(algt)) |
| 178 | return; | 178 | return; |
| 179 | if ((algt->type & CRYPTO_ALG_INTERNAL)) | 179 | |
| 180 | *type |= CRYPTO_ALG_INTERNAL; | 180 | *type |= algt->type & CRYPTO_ALG_INTERNAL; |
| 181 | if ((algt->mask & CRYPTO_ALG_INTERNAL)) | 181 | *mask |= algt->mask & CRYPTO_ALG_INTERNAL; |
| 182 | *mask |= CRYPTO_ALG_INTERNAL; | ||
| 183 | } | 182 | } |
| 184 | 183 | ||
| 185 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | 184 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
| @@ -688,16 +687,18 @@ static void cryptd_aead_crypt(struct aead_request *req, | |||
| 688 | int (*crypt)(struct aead_request *req)) | 687 | int (*crypt)(struct aead_request *req)) |
| 689 | { | 688 | { |
| 690 | struct cryptd_aead_request_ctx *rctx; | 689 | struct cryptd_aead_request_ctx *rctx; |
| 690 | crypto_completion_t compl; | ||
| 691 | |||
| 691 | rctx = aead_request_ctx(req); | 692 | rctx = aead_request_ctx(req); |
| 693 | compl = rctx->complete; | ||
| 692 | 694 | ||
| 693 | if (unlikely(err == -EINPROGRESS)) | 695 | if (unlikely(err == -EINPROGRESS)) |
| 694 | goto out; | 696 | goto out; |
| 695 | aead_request_set_tfm(req, child); | 697 | aead_request_set_tfm(req, child); |
| 696 | err = crypt( req ); | 698 | err = crypt( req ); |
| 697 | req->base.complete = rctx->complete; | ||
| 698 | out: | 699 | out: |
| 699 | local_bh_disable(); | 700 | local_bh_disable(); |
| 700 | rctx->complete(&req->base, err); | 701 | compl(&req->base, err); |
| 701 | local_bh_enable(); | 702 | local_bh_enable(); |
| 702 | } | 703 | } |
| 703 | 704 | ||
| @@ -708,7 +709,7 @@ static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) | |||
| 708 | struct aead_request *req; | 709 | struct aead_request *req; |
| 709 | 710 | ||
| 710 | req = container_of(areq, struct aead_request, base); | 711 | req = container_of(areq, struct aead_request, base); |
| 711 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt); | 712 | cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); |
| 712 | } | 713 | } |
| 713 | 714 | ||
| 714 | static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) | 715 | static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) |
| @@ -718,7 +719,7 @@ static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) | |||
| 718 | struct aead_request *req; | 719 | struct aead_request *req; |
| 719 | 720 | ||
| 720 | req = container_of(areq, struct aead_request, base); | 721 | req = container_of(areq, struct aead_request, base); |
| 721 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt); | 722 | cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); |
| 722 | } | 723 | } |
| 723 | 724 | ||
| 724 | static int cryptd_aead_enqueue(struct aead_request *req, | 725 | static int cryptd_aead_enqueue(struct aead_request *req, |
| @@ -756,7 +757,9 @@ static int cryptd_aead_init_tfm(struct crypto_aead *tfm) | |||
| 756 | return PTR_ERR(cipher); | 757 | return PTR_ERR(cipher); |
| 757 | 758 | ||
| 758 | ctx->child = cipher; | 759 | ctx->child = cipher; |
| 759 | crypto_aead_set_reqsize(tfm, sizeof(struct cryptd_aead_request_ctx)); | 760 | crypto_aead_set_reqsize( |
| 761 | tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), | ||
| 762 | crypto_aead_reqsize(cipher))); | ||
| 760 | return 0; | 763 | return 0; |
| 761 | } | 764 | } |
| 762 | 765 | ||
| @@ -775,7 +778,7 @@ static int cryptd_create_aead(struct crypto_template *tmpl, | |||
| 775 | struct aead_alg *alg; | 778 | struct aead_alg *alg; |
| 776 | const char *name; | 779 | const char *name; |
| 777 | u32 type = 0; | 780 | u32 type = 0; |
| 778 | u32 mask = 0; | 781 | u32 mask = CRYPTO_ALG_ASYNC; |
| 779 | int err; | 782 | int err; |
| 780 | 783 | ||
| 781 | cryptd_check_internal(tb, &type, &mask); | 784 | cryptd_check_internal(tb, &type, &mask); |
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 08ea2867fc8a..d94d99ffe8b9 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | #include <net/netlink.h> | 25 | #include <net/netlink.h> |
| 26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
| 27 | #include <net/net_namespace.h> | 27 | #include <net/net_namespace.h> |
| 28 | #include <crypto/internal/aead.h> | ||
| 29 | #include <crypto/internal/skcipher.h> | 28 | #include <crypto/internal/skcipher.h> |
| 30 | #include <crypto/internal/rng.h> | 29 | #include <crypto/internal/rng.h> |
| 31 | #include <crypto/akcipher.h> | 30 | #include <crypto/akcipher.h> |
| @@ -385,34 +384,6 @@ static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type, | |||
| 385 | return ERR_PTR(err); | 384 | return ERR_PTR(err); |
| 386 | } | 385 | } |
| 387 | 386 | ||
| 388 | static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type, | ||
| 389 | u32 mask) | ||
| 390 | { | ||
| 391 | int err; | ||
| 392 | struct crypto_alg *alg; | ||
| 393 | |||
| 394 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | ||
| 395 | type |= CRYPTO_ALG_TYPE_AEAD; | ||
| 396 | mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); | ||
| 397 | mask |= CRYPTO_ALG_TYPE_MASK; | ||
| 398 | |||
| 399 | for (;;) { | ||
| 400 | alg = crypto_lookup_aead(name, type, mask); | ||
| 401 | if (!IS_ERR(alg)) | ||
| 402 | return alg; | ||
| 403 | |||
| 404 | err = PTR_ERR(alg); | ||
| 405 | if (err != -EAGAIN) | ||
| 406 | break; | ||
| 407 | if (signal_pending(current)) { | ||
| 408 | err = -EINTR; | ||
| 409 | break; | ||
| 410 | } | ||
| 411 | } | ||
| 412 | |||
| 413 | return ERR_PTR(err); | ||
| 414 | } | ||
| 415 | |||
| 416 | static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, | 387 | static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, |
| 417 | struct nlattr **attrs) | 388 | struct nlattr **attrs) |
| 418 | { | 389 | { |
| @@ -446,9 +417,6 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
| 446 | name = p->cru_name; | 417 | name = p->cru_name; |
| 447 | 418 | ||
| 448 | switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) { | 419 | switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) { |
| 449 | case CRYPTO_ALG_TYPE_AEAD: | ||
| 450 | alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask); | ||
| 451 | break; | ||
| 452 | case CRYPTO_ALG_TYPE_GIVCIPHER: | 420 | case CRYPTO_ALG_TYPE_GIVCIPHER: |
| 453 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 421 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
| 454 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 422 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
diff --git a/crypto/echainiv.c b/crypto/echainiv.c index b6e43dc61356..b96a84560b67 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c | |||
| @@ -19,8 +19,6 @@ | |||
| 19 | */ | 19 | */ |
| 20 | 20 | ||
| 21 | #include <crypto/internal/geniv.h> | 21 | #include <crypto/internal/geniv.h> |
| 22 | #include <crypto/null.h> | ||
| 23 | #include <crypto/rng.h> | ||
| 24 | #include <crypto/scatterwalk.h> | 22 | #include <crypto/scatterwalk.h> |
| 25 | #include <linux/err.h> | 23 | #include <linux/err.h> |
| 26 | #include <linux/init.h> | 24 | #include <linux/init.h> |
| @@ -33,13 +31,6 @@ | |||
| 33 | 31 | ||
| 34 | #define MAX_IV_SIZE 16 | 32 | #define MAX_IV_SIZE 16 |
| 35 | 33 | ||
| 36 | struct echainiv_ctx { | ||
| 37 | /* aead_geniv_ctx must be first the element */ | ||
| 38 | struct aead_geniv_ctx geniv; | ||
| 39 | struct crypto_blkcipher *null; | ||
| 40 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); | ||
| 41 | }; | ||
| 42 | |||
| 43 | static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv); | 34 | static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv); |
| 44 | 35 | ||
| 45 | /* We don't care if we get preempted and read/write IVs from the next CPU. */ | 36 | /* We don't care if we get preempted and read/write IVs from the next CPU. */ |
| @@ -103,7 +94,7 @@ static void echainiv_encrypt_complete(struct crypto_async_request *base, | |||
| 103 | static int echainiv_encrypt(struct aead_request *req) | 94 | static int echainiv_encrypt(struct aead_request *req) |
| 104 | { | 95 | { |
| 105 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | 96 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); |
| 106 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | 97 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); |
| 107 | struct aead_request *subreq = aead_request_ctx(req); | 98 | struct aead_request *subreq = aead_request_ctx(req); |
| 108 | crypto_completion_t compl; | 99 | crypto_completion_t compl; |
| 109 | void *data; | 100 | void *data; |
| @@ -114,7 +105,7 @@ static int echainiv_encrypt(struct aead_request *req) | |||
| 114 | if (req->cryptlen < ivsize) | 105 | if (req->cryptlen < ivsize) |
| 115 | return -EINVAL; | 106 | return -EINVAL; |
| 116 | 107 | ||
| 117 | aead_request_set_tfm(subreq, ctx->geniv.child); | 108 | aead_request_set_tfm(subreq, ctx->child); |
| 118 | 109 | ||
| 119 | compl = echainiv_encrypt_complete; | 110 | compl = echainiv_encrypt_complete; |
| 120 | data = req; | 111 | data = req; |
| @@ -145,8 +136,8 @@ static int echainiv_encrypt(struct aead_request *req) | |||
| 145 | 136 | ||
| 146 | aead_request_set_callback(subreq, req->base.flags, compl, data); | 137 | aead_request_set_callback(subreq, req->base.flags, compl, data); |
| 147 | aead_request_set_crypt(subreq, req->dst, req->dst, | 138 | aead_request_set_crypt(subreq, req->dst, req->dst, |
| 148 | req->cryptlen - ivsize, info); | 139 | req->cryptlen, info); |
| 149 | aead_request_set_ad(subreq, req->assoclen + ivsize); | 140 | aead_request_set_ad(subreq, req->assoclen); |
| 150 | 141 | ||
| 151 | crypto_xor(info, ctx->salt, ivsize); | 142 | crypto_xor(info, ctx->salt, ivsize); |
| 152 | scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); | 143 | scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); |
| @@ -160,16 +151,16 @@ static int echainiv_encrypt(struct aead_request *req) | |||
| 160 | static int echainiv_decrypt(struct aead_request *req) | 151 | static int echainiv_decrypt(struct aead_request *req) |
| 161 | { | 152 | { |
| 162 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | 153 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); |
| 163 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | 154 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); |
| 164 | struct aead_request *subreq = aead_request_ctx(req); | 155 | struct aead_request *subreq = aead_request_ctx(req); |
| 165 | crypto_completion_t compl; | 156 | crypto_completion_t compl; |
| 166 | void *data; | 157 | void *data; |
| 167 | unsigned int ivsize = crypto_aead_ivsize(geniv); | 158 | unsigned int ivsize = crypto_aead_ivsize(geniv); |
| 168 | 159 | ||
| 169 | if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) | 160 | if (req->cryptlen < ivsize) |
| 170 | return -EINVAL; | 161 | return -EINVAL; |
| 171 | 162 | ||
| 172 | aead_request_set_tfm(subreq, ctx->geniv.child); | 163 | aead_request_set_tfm(subreq, ctx->child); |
| 173 | 164 | ||
| 174 | compl = req->base.complete; | 165 | compl = req->base.complete; |
| 175 | data = req->base.data; | 166 | data = req->base.data; |
| @@ -180,61 +171,10 @@ static int echainiv_decrypt(struct aead_request *req) | |||
| 180 | aead_request_set_ad(subreq, req->assoclen + ivsize); | 171 | aead_request_set_ad(subreq, req->assoclen + ivsize); |
| 181 | 172 | ||
| 182 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); | 173 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); |
| 183 | if (req->src != req->dst) | ||
| 184 | scatterwalk_map_and_copy(req->iv, req->dst, | ||
| 185 | req->assoclen, ivsize, 1); | ||
| 186 | 174 | ||
| 187 | return crypto_aead_decrypt(subreq); | 175 | return crypto_aead_decrypt(subreq); |
| 188 | } | 176 | } |
| 189 | 177 | ||
| 190 | static int echainiv_init(struct crypto_tfm *tfm) | ||
| 191 | { | ||
| 192 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | ||
| 193 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 194 | int err; | ||
| 195 | |||
| 196 | spin_lock_init(&ctx->geniv.lock); | ||
| 197 | |||
| 198 | crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); | ||
| 199 | |||
| 200 | err = crypto_get_default_rng(); | ||
| 201 | if (err) | ||
| 202 | goto out; | ||
| 203 | |||
| 204 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | ||
| 205 | crypto_aead_ivsize(geniv)); | ||
| 206 | crypto_put_default_rng(); | ||
| 207 | if (err) | ||
| 208 | goto out; | ||
| 209 | |||
| 210 | ctx->null = crypto_get_default_null_skcipher(); | ||
| 211 | err = PTR_ERR(ctx->null); | ||
| 212 | if (IS_ERR(ctx->null)) | ||
| 213 | goto out; | ||
| 214 | |||
| 215 | err = aead_geniv_init(tfm); | ||
| 216 | if (err) | ||
| 217 | goto drop_null; | ||
| 218 | |||
| 219 | ctx->geniv.child = geniv->child; | ||
| 220 | geniv->child = geniv; | ||
| 221 | |||
| 222 | out: | ||
| 223 | return err; | ||
| 224 | |||
| 225 | drop_null: | ||
| 226 | crypto_put_default_null_skcipher(); | ||
| 227 | goto out; | ||
| 228 | } | ||
| 229 | |||
| 230 | static void echainiv_exit(struct crypto_tfm *tfm) | ||
| 231 | { | ||
| 232 | struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 233 | |||
| 234 | crypto_free_aead(ctx->geniv.child); | ||
| 235 | crypto_put_default_null_skcipher(); | ||
| 236 | } | ||
| 237 | |||
| 238 | static int echainiv_aead_create(struct crypto_template *tmpl, | 178 | static int echainiv_aead_create(struct crypto_template *tmpl, |
| 239 | struct rtattr **tb) | 179 | struct rtattr **tb) |
| 240 | { | 180 | { |
| @@ -251,9 +191,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl, | |||
| 251 | spawn = aead_instance_ctx(inst); | 191 | spawn = aead_instance_ctx(inst); |
| 252 | alg = crypto_spawn_aead_alg(spawn); | 192 | alg = crypto_spawn_aead_alg(spawn); |
| 253 | 193 | ||
| 254 | if (alg->base.cra_aead.encrypt) | ||
| 255 | goto done; | ||
| 256 | |||
| 257 | err = -EINVAL; | 194 | err = -EINVAL; |
| 258 | if (inst->alg.ivsize & (sizeof(u32) - 1) || | 195 | if (inst->alg.ivsize & (sizeof(u32) - 1) || |
| 259 | inst->alg.ivsize > MAX_IV_SIZE) | 196 | inst->alg.ivsize > MAX_IV_SIZE) |
| @@ -262,14 +199,15 @@ static int echainiv_aead_create(struct crypto_template *tmpl, | |||
| 262 | inst->alg.encrypt = echainiv_encrypt; | 199 | inst->alg.encrypt = echainiv_encrypt; |
| 263 | inst->alg.decrypt = echainiv_decrypt; | 200 | inst->alg.decrypt = echainiv_decrypt; |
| 264 | 201 | ||
| 265 | inst->alg.base.cra_init = echainiv_init; | 202 | inst->alg.init = aead_init_geniv; |
| 266 | inst->alg.base.cra_exit = echainiv_exit; | 203 | inst->alg.exit = aead_exit_geniv; |
| 267 | 204 | ||
| 268 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; | 205 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; |
| 269 | inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx); | 206 | inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); |
| 270 | inst->alg.base.cra_ctxsize += inst->alg.ivsize; | 207 | inst->alg.base.cra_ctxsize += inst->alg.ivsize; |
| 271 | 208 | ||
| 272 | done: | 209 | inst->free = aead_geniv_free; |
| 210 | |||
| 273 | err = aead_register_instance(tmpl, inst); | 211 | err = aead_register_instance(tmpl, inst); |
| 274 | if (err) | 212 | if (err) |
| 275 | goto free_inst; | 213 | goto free_inst; |
diff --git a/crypto/gcm.c b/crypto/gcm.c index 7d32d4720564..ddb4f29b2fe6 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
| @@ -38,6 +38,12 @@ struct crypto_rfc4106_ctx { | |||
| 38 | u8 nonce[4]; | 38 | u8 nonce[4]; |
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| 41 | struct crypto_rfc4106_req_ctx { | ||
| 42 | struct scatterlist src[3]; | ||
| 43 | struct scatterlist dst[3]; | ||
| 44 | struct aead_request subreq; | ||
| 45 | }; | ||
| 46 | |||
| 41 | struct crypto_rfc4543_instance_ctx { | 47 | struct crypto_rfc4543_instance_ctx { |
| 42 | struct crypto_aead_spawn aead; | 48 | struct crypto_aead_spawn aead; |
| 43 | }; | 49 | }; |
| @@ -601,6 +607,15 @@ static void crypto_gcm_exit_tfm(struct crypto_aead *tfm) | |||
| 601 | crypto_free_ablkcipher(ctx->ctr); | 607 | crypto_free_ablkcipher(ctx->ctr); |
| 602 | } | 608 | } |
| 603 | 609 | ||
| 610 | static void crypto_gcm_free(struct aead_instance *inst) | ||
| 611 | { | ||
| 612 | struct gcm_instance_ctx *ctx = aead_instance_ctx(inst); | ||
| 613 | |||
| 614 | crypto_drop_skcipher(&ctx->ctr); | ||
| 615 | crypto_drop_ahash(&ctx->ghash); | ||
| 616 | kfree(inst); | ||
| 617 | } | ||
| 618 | |||
| 604 | static int crypto_gcm_create_common(struct crypto_template *tmpl, | 619 | static int crypto_gcm_create_common(struct crypto_template *tmpl, |
| 605 | struct rtattr **tb, | 620 | struct rtattr **tb, |
| 606 | const char *full_name, | 621 | const char *full_name, |
| @@ -689,6 +704,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, | |||
| 689 | inst->alg.encrypt = crypto_gcm_encrypt; | 704 | inst->alg.encrypt = crypto_gcm_encrypt; |
| 690 | inst->alg.decrypt = crypto_gcm_decrypt; | 705 | inst->alg.decrypt = crypto_gcm_decrypt; |
| 691 | 706 | ||
| 707 | inst->free = crypto_gcm_free; | ||
| 708 | |||
| 692 | err = aead_register_instance(tmpl, inst); | 709 | err = aead_register_instance(tmpl, inst); |
| 693 | if (err) | 710 | if (err) |
| 694 | goto out_put_ctr; | 711 | goto out_put_ctr; |
| @@ -728,19 +745,9 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 728 | ctr_name, "ghash"); | 745 | ctr_name, "ghash"); |
| 729 | } | 746 | } |
| 730 | 747 | ||
| 731 | static void crypto_gcm_free(struct crypto_instance *inst) | ||
| 732 | { | ||
| 733 | struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
| 734 | |||
| 735 | crypto_drop_skcipher(&ctx->ctr); | ||
| 736 | crypto_drop_ahash(&ctx->ghash); | ||
| 737 | kfree(aead_instance(inst)); | ||
| 738 | } | ||
| 739 | |||
| 740 | static struct crypto_template crypto_gcm_tmpl = { | 748 | static struct crypto_template crypto_gcm_tmpl = { |
| 741 | .name = "gcm", | 749 | .name = "gcm", |
| 742 | .create = crypto_gcm_create, | 750 | .create = crypto_gcm_create, |
| 743 | .free = crypto_gcm_free, | ||
| 744 | .module = THIS_MODULE, | 751 | .module = THIS_MODULE, |
| 745 | }; | 752 | }; |
| 746 | 753 | ||
| @@ -770,7 +777,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl, | |||
| 770 | static struct crypto_template crypto_gcm_base_tmpl = { | 777 | static struct crypto_template crypto_gcm_base_tmpl = { |
| 771 | .name = "gcm_base", | 778 | .name = "gcm_base", |
| 772 | .create = crypto_gcm_base_create, | 779 | .create = crypto_gcm_base_create, |
| 773 | .free = crypto_gcm_free, | ||
| 774 | .module = THIS_MODULE, | 780 | .module = THIS_MODULE, |
| 775 | }; | 781 | }; |
| 776 | 782 | ||
| @@ -816,27 +822,50 @@ static int crypto_rfc4106_setauthsize(struct crypto_aead *parent, | |||
| 816 | 822 | ||
| 817 | static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) | 823 | static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) |
| 818 | { | 824 | { |
| 819 | struct aead_request *subreq = aead_request_ctx(req); | 825 | struct crypto_rfc4106_req_ctx *rctx = aead_request_ctx(req); |
| 820 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 826 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 821 | struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead); | 827 | struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead); |
| 828 | struct aead_request *subreq = &rctx->subreq; | ||
| 822 | struct crypto_aead *child = ctx->child; | 829 | struct crypto_aead *child = ctx->child; |
| 830 | struct scatterlist *sg; | ||
| 823 | u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), | 831 | u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), |
| 824 | crypto_aead_alignmask(child) + 1); | 832 | crypto_aead_alignmask(child) + 1); |
| 825 | 833 | ||
| 834 | scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0); | ||
| 835 | |||
| 826 | memcpy(iv, ctx->nonce, 4); | 836 | memcpy(iv, ctx->nonce, 4); |
| 827 | memcpy(iv + 4, req->iv, 8); | 837 | memcpy(iv + 4, req->iv, 8); |
| 828 | 838 | ||
| 839 | sg_init_table(rctx->src, 3); | ||
| 840 | sg_set_buf(rctx->src, iv + 12, req->assoclen - 8); | ||
| 841 | sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); | ||
| 842 | if (sg != rctx->src + 1) | ||
| 843 | sg_chain(rctx->src, 2, sg); | ||
| 844 | |||
| 845 | if (req->src != req->dst) { | ||
| 846 | sg_init_table(rctx->dst, 3); | ||
| 847 | sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8); | ||
| 848 | sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); | ||
| 849 | if (sg != rctx->dst + 1) | ||
| 850 | sg_chain(rctx->dst, 2, sg); | ||
| 851 | } | ||
| 852 | |||
| 829 | aead_request_set_tfm(subreq, child); | 853 | aead_request_set_tfm(subreq, child); |
| 830 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, | 854 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, |
| 831 | req->base.data); | 855 | req->base.data); |
| 832 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); | 856 | aead_request_set_crypt(subreq, rctx->src, |
| 833 | aead_request_set_ad(subreq, req->assoclen); | 857 | req->src == req->dst ? rctx->src : rctx->dst, |
| 858 | req->cryptlen, iv); | ||
| 859 | aead_request_set_ad(subreq, req->assoclen - 8); | ||
| 834 | 860 | ||
| 835 | return subreq; | 861 | return subreq; |
| 836 | } | 862 | } |
| 837 | 863 | ||
| 838 | static int crypto_rfc4106_encrypt(struct aead_request *req) | 864 | static int crypto_rfc4106_encrypt(struct aead_request *req) |
| 839 | { | 865 | { |
| 866 | if (req->assoclen != 16 && req->assoclen != 20) | ||
| 867 | return -EINVAL; | ||
| 868 | |||
| 840 | req = crypto_rfc4106_crypt(req); | 869 | req = crypto_rfc4106_crypt(req); |
| 841 | 870 | ||
| 842 | return crypto_aead_encrypt(req); | 871 | return crypto_aead_encrypt(req); |
| @@ -844,6 +873,9 @@ static int crypto_rfc4106_encrypt(struct aead_request *req) | |||
| 844 | 873 | ||
| 845 | static int crypto_rfc4106_decrypt(struct aead_request *req) | 874 | static int crypto_rfc4106_decrypt(struct aead_request *req) |
| 846 | { | 875 | { |
| 876 | if (req->assoclen != 16 && req->assoclen != 20) | ||
| 877 | return -EINVAL; | ||
| 878 | |||
| 847 | req = crypto_rfc4106_crypt(req); | 879 | req = crypto_rfc4106_crypt(req); |
| 848 | 880 | ||
| 849 | return crypto_aead_decrypt(req); | 881 | return crypto_aead_decrypt(req); |
| @@ -867,9 +899,9 @@ static int crypto_rfc4106_init_tfm(struct crypto_aead *tfm) | |||
| 867 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 899 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
| 868 | crypto_aead_set_reqsize( | 900 | crypto_aead_set_reqsize( |
| 869 | tfm, | 901 | tfm, |
| 870 | sizeof(struct aead_request) + | 902 | sizeof(struct crypto_rfc4106_req_ctx) + |
| 871 | ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + | 903 | ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + |
| 872 | align + 12); | 904 | align + 24); |
| 873 | 905 | ||
| 874 | return 0; | 906 | return 0; |
| 875 | } | 907 | } |
| @@ -881,6 +913,12 @@ static void crypto_rfc4106_exit_tfm(struct crypto_aead *tfm) | |||
| 881 | crypto_free_aead(ctx->child); | 913 | crypto_free_aead(ctx->child); |
| 882 | } | 914 | } |
| 883 | 915 | ||
| 916 | static void crypto_rfc4106_free(struct aead_instance *inst) | ||
| 917 | { | ||
| 918 | crypto_drop_aead(aead_instance_ctx(inst)); | ||
| 919 | kfree(inst); | ||
| 920 | } | ||
| 921 | |||
| 884 | static int crypto_rfc4106_create(struct crypto_template *tmpl, | 922 | static int crypto_rfc4106_create(struct crypto_template *tmpl, |
| 885 | struct rtattr **tb) | 923 | struct rtattr **tb) |
| 886 | { | 924 | { |
| @@ -934,7 +972,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, | |||
| 934 | CRYPTO_MAX_ALG_NAME) | 972 | CRYPTO_MAX_ALG_NAME) |
| 935 | goto out_drop_alg; | 973 | goto out_drop_alg; |
| 936 | 974 | ||
| 937 | inst->alg.base.cra_flags |= alg->base.cra_flags & CRYPTO_ALG_ASYNC; | 975 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
| 938 | inst->alg.base.cra_priority = alg->base.cra_priority; | 976 | inst->alg.base.cra_priority = alg->base.cra_priority; |
| 939 | inst->alg.base.cra_blocksize = 1; | 977 | inst->alg.base.cra_blocksize = 1; |
| 940 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask; | 978 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask; |
| @@ -952,6 +990,8 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, | |||
| 952 | inst->alg.encrypt = crypto_rfc4106_encrypt; | 990 | inst->alg.encrypt = crypto_rfc4106_encrypt; |
| 953 | inst->alg.decrypt = crypto_rfc4106_decrypt; | 991 | inst->alg.decrypt = crypto_rfc4106_decrypt; |
| 954 | 992 | ||
| 993 | inst->free = crypto_rfc4106_free; | ||
| 994 | |||
| 955 | err = aead_register_instance(tmpl, inst); | 995 | err = aead_register_instance(tmpl, inst); |
| 956 | if (err) | 996 | if (err) |
| 957 | goto out_drop_alg; | 997 | goto out_drop_alg; |
| @@ -966,16 +1006,9 @@ out_free_inst: | |||
| 966 | goto out; | 1006 | goto out; |
| 967 | } | 1007 | } |
| 968 | 1008 | ||
| 969 | static void crypto_rfc4106_free(struct crypto_instance *inst) | ||
| 970 | { | ||
| 971 | crypto_drop_aead(crypto_instance_ctx(inst)); | ||
| 972 | kfree(aead_instance(inst)); | ||
| 973 | } | ||
| 974 | |||
| 975 | static struct crypto_template crypto_rfc4106_tmpl = { | 1009 | static struct crypto_template crypto_rfc4106_tmpl = { |
| 976 | .name = "rfc4106", | 1010 | .name = "rfc4106", |
| 977 | .create = crypto_rfc4106_create, | 1011 | .create = crypto_rfc4106_create, |
| 978 | .free = crypto_rfc4106_free, | ||
| 979 | .module = THIS_MODULE, | 1012 | .module = THIS_MODULE, |
| 980 | }; | 1013 | }; |
| 981 | 1014 | ||
| @@ -1114,6 +1147,15 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) | |||
| 1114 | crypto_put_default_null_skcipher(); | 1147 | crypto_put_default_null_skcipher(); |
| 1115 | } | 1148 | } |
| 1116 | 1149 | ||
| 1150 | static void crypto_rfc4543_free(struct aead_instance *inst) | ||
| 1151 | { | ||
| 1152 | struct crypto_rfc4543_instance_ctx *ctx = aead_instance_ctx(inst); | ||
| 1153 | |||
| 1154 | crypto_drop_aead(&ctx->aead); | ||
| 1155 | |||
| 1156 | kfree(inst); | ||
| 1157 | } | ||
| 1158 | |||
| 1117 | static int crypto_rfc4543_create(struct crypto_template *tmpl, | 1159 | static int crypto_rfc4543_create(struct crypto_template *tmpl, |
| 1118 | struct rtattr **tb) | 1160 | struct rtattr **tb) |
| 1119 | { | 1161 | { |
| @@ -1187,6 +1229,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, | |||
| 1187 | inst->alg.encrypt = crypto_rfc4543_encrypt; | 1229 | inst->alg.encrypt = crypto_rfc4543_encrypt; |
| 1188 | inst->alg.decrypt = crypto_rfc4543_decrypt; | 1230 | inst->alg.decrypt = crypto_rfc4543_decrypt; |
| 1189 | 1231 | ||
| 1232 | inst->free = crypto_rfc4543_free, | ||
| 1233 | |||
| 1190 | err = aead_register_instance(tmpl, inst); | 1234 | err = aead_register_instance(tmpl, inst); |
| 1191 | if (err) | 1235 | if (err) |
| 1192 | goto out_drop_alg; | 1236 | goto out_drop_alg; |
| @@ -1201,19 +1245,9 @@ out_free_inst: | |||
| 1201 | goto out; | 1245 | goto out; |
| 1202 | } | 1246 | } |
| 1203 | 1247 | ||
| 1204 | static void crypto_rfc4543_free(struct crypto_instance *inst) | ||
| 1205 | { | ||
| 1206 | struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
| 1207 | |||
| 1208 | crypto_drop_aead(&ctx->aead); | ||
| 1209 | |||
| 1210 | kfree(aead_instance(inst)); | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | static struct crypto_template crypto_rfc4543_tmpl = { | 1248 | static struct crypto_template crypto_rfc4543_tmpl = { |
| 1214 | .name = "rfc4543", | 1249 | .name = "rfc4543", |
| 1215 | .create = crypto_rfc4543_create, | 1250 | .create = crypto_rfc4543_create, |
| 1216 | .free = crypto_rfc4543_free, | ||
| 1217 | .module = THIS_MODULE, | 1251 | .module = THIS_MODULE, |
| 1218 | }; | 1252 | }; |
| 1219 | 1253 | ||
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index b32d834144cd..ceea83d13168 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c | |||
| @@ -79,7 +79,7 @@ int jent_fips_enabled(void) | |||
| 79 | 79 | ||
| 80 | void jent_panic(char *s) | 80 | void jent_panic(char *s) |
| 81 | { | 81 | { |
| 82 | panic(s); | 82 | panic("%s", s); |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | void jent_memcpy(void *dest, const void *src, unsigned int n) | 85 | void jent_memcpy(void *dest, const void *src, unsigned int n) |
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 45e7d5155672..ee9cfb99fe25 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c | |||
| @@ -274,11 +274,16 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, | |||
| 274 | u32 type, u32 mask) | 274 | u32 type, u32 mask) |
| 275 | { | 275 | { |
| 276 | struct pcrypt_instance_ctx *ctx; | 276 | struct pcrypt_instance_ctx *ctx; |
| 277 | struct crypto_attr_type *algt; | ||
| 277 | struct aead_instance *inst; | 278 | struct aead_instance *inst; |
| 278 | struct aead_alg *alg; | 279 | struct aead_alg *alg; |
| 279 | const char *name; | 280 | const char *name; |
| 280 | int err; | 281 | int err; |
| 281 | 282 | ||
| 283 | algt = crypto_get_attr_type(tb); | ||
| 284 | if (IS_ERR(algt)) | ||
| 285 | return PTR_ERR(algt); | ||
| 286 | |||
| 282 | name = crypto_attr_alg_name(tb[1]); | 287 | name = crypto_attr_alg_name(tb[1]); |
| 283 | if (IS_ERR(name)) | 288 | if (IS_ERR(name)) |
| 284 | return PTR_ERR(name); | 289 | return PTR_ERR(name); |
| @@ -299,6 +304,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, | |||
| 299 | if (err) | 304 | if (err) |
| 300 | goto out_drop_aead; | 305 | goto out_drop_aead; |
| 301 | 306 | ||
| 307 | inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC; | ||
| 308 | |||
| 302 | inst->alg.ivsize = crypto_aead_alg_ivsize(alg); | 309 | inst->alg.ivsize = crypto_aead_alg_ivsize(alg); |
| 303 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); | 310 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); |
| 304 | 311 | ||
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index 387b5c887a80..2df9835dfbc0 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c | |||
| @@ -13,31 +13,11 @@ | |||
| 13 | 13 | ||
| 14 | #include <crypto/algapi.h> | 14 | #include <crypto/algapi.h> |
| 15 | #include <crypto/internal/hash.h> | 15 | #include <crypto/internal/hash.h> |
| 16 | #include <crypto/poly1305.h> | ||
| 16 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> |
| 17 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 19 | 20 | ||
| 20 | #define POLY1305_BLOCK_SIZE 16 | ||
| 21 | #define POLY1305_KEY_SIZE 32 | ||
| 22 | #define POLY1305_DIGEST_SIZE 16 | ||
| 23 | |||
| 24 | struct poly1305_desc_ctx { | ||
| 25 | /* key */ | ||
| 26 | u32 r[5]; | ||
| 27 | /* finalize key */ | ||
| 28 | u32 s[4]; | ||
| 29 | /* accumulator */ | ||
| 30 | u32 h[5]; | ||
| 31 | /* partial buffer */ | ||
| 32 | u8 buf[POLY1305_BLOCK_SIZE]; | ||
| 33 | /* bytes used in partial buffer */ | ||
| 34 | unsigned int buflen; | ||
| 35 | /* r key has been set */ | ||
| 36 | bool rset; | ||
| 37 | /* s key has been set */ | ||
| 38 | bool sset; | ||
| 39 | }; | ||
| 40 | |||
| 41 | static inline u64 mlt(u64 a, u64 b) | 21 | static inline u64 mlt(u64 a, u64 b) |
| 42 | { | 22 | { |
| 43 | return a * b; | 23 | return a * b; |
| @@ -58,7 +38,7 @@ static inline u32 le32_to_cpuvp(const void *p) | |||
| 58 | return le32_to_cpup(p); | 38 | return le32_to_cpup(p); |
| 59 | } | 39 | } |
| 60 | 40 | ||
| 61 | static int poly1305_init(struct shash_desc *desc) | 41 | int crypto_poly1305_init(struct shash_desc *desc) |
| 62 | { | 42 | { |
| 63 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); | 43 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); |
| 64 | 44 | ||
| @@ -69,8 +49,9 @@ static int poly1305_init(struct shash_desc *desc) | |||
| 69 | 49 | ||
| 70 | return 0; | 50 | return 0; |
| 71 | } | 51 | } |
| 52 | EXPORT_SYMBOL_GPL(crypto_poly1305_init); | ||
| 72 | 53 | ||
| 73 | static int poly1305_setkey(struct crypto_shash *tfm, | 54 | int crypto_poly1305_setkey(struct crypto_shash *tfm, |
| 74 | const u8 *key, unsigned int keylen) | 55 | const u8 *key, unsigned int keylen) |
| 75 | { | 56 | { |
| 76 | /* Poly1305 requires a unique key for each tag, which implies that | 57 | /* Poly1305 requires a unique key for each tag, which implies that |
| @@ -79,6 +60,7 @@ static int poly1305_setkey(struct crypto_shash *tfm, | |||
| 79 | * the update() call. */ | 60 | * the update() call. */ |
| 80 | return -ENOTSUPP; | 61 | return -ENOTSUPP; |
| 81 | } | 62 | } |
| 63 | EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); | ||
| 82 | 64 | ||
| 83 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) | 65 | static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) |
| 84 | { | 66 | { |
| @@ -98,16 +80,10 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) | |||
| 98 | dctx->s[3] = le32_to_cpuvp(key + 12); | 80 | dctx->s[3] = le32_to_cpuvp(key + 12); |
| 99 | } | 81 | } |
| 100 | 82 | ||
| 101 | static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx, | 83 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, |
| 102 | const u8 *src, unsigned int srclen, | 84 | const u8 *src, unsigned int srclen) |
| 103 | u32 hibit) | ||
| 104 | { | 85 | { |
| 105 | u32 r0, r1, r2, r3, r4; | 86 | if (!dctx->sset) { |
| 106 | u32 s1, s2, s3, s4; | ||
| 107 | u32 h0, h1, h2, h3, h4; | ||
| 108 | u64 d0, d1, d2, d3, d4; | ||
| 109 | |||
| 110 | if (unlikely(!dctx->sset)) { | ||
| 111 | if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { | 87 | if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { |
| 112 | poly1305_setrkey(dctx, src); | 88 | poly1305_setrkey(dctx, src); |
| 113 | src += POLY1305_BLOCK_SIZE; | 89 | src += POLY1305_BLOCK_SIZE; |
| @@ -121,6 +97,25 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx, | |||
| 121 | dctx->sset = true; | 97 | dctx->sset = true; |
| 122 | } | 98 | } |
| 123 | } | 99 | } |
| 100 | return srclen; | ||
| 101 | } | ||
| 102 | EXPORT_SYMBOL_GPL(crypto_poly1305_setdesckey); | ||
| 103 | |||
| 104 | static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx, | ||
| 105 | const u8 *src, unsigned int srclen, | ||
| 106 | u32 hibit) | ||
| 107 | { | ||
| 108 | u32 r0, r1, r2, r3, r4; | ||
| 109 | u32 s1, s2, s3, s4; | ||
| 110 | u32 h0, h1, h2, h3, h4; | ||
| 111 | u64 d0, d1, d2, d3, d4; | ||
| 112 | unsigned int datalen; | ||
| 113 | |||
| 114 | if (unlikely(!dctx->sset)) { | ||
| 115 | datalen = crypto_poly1305_setdesckey(dctx, src, srclen); | ||
| 116 | src += srclen - datalen; | ||
| 117 | srclen = datalen; | ||
| 118 | } | ||
| 124 | 119 | ||
| 125 | r0 = dctx->r[0]; | 120 | r0 = dctx->r[0]; |
| 126 | r1 = dctx->r[1]; | 121 | r1 = dctx->r[1]; |
| @@ -181,7 +176,7 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx, | |||
| 181 | return srclen; | 176 | return srclen; |
| 182 | } | 177 | } |
| 183 | 178 | ||
| 184 | static int poly1305_update(struct shash_desc *desc, | 179 | int crypto_poly1305_update(struct shash_desc *desc, |
| 185 | const u8 *src, unsigned int srclen) | 180 | const u8 *src, unsigned int srclen) |
| 186 | { | 181 | { |
| 187 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); | 182 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); |
| @@ -214,8 +209,9 @@ static int poly1305_update(struct shash_desc *desc, | |||
| 214 | 209 | ||
| 215 | return 0; | 210 | return 0; |
| 216 | } | 211 | } |
| 212 | EXPORT_SYMBOL_GPL(crypto_poly1305_update); | ||
| 217 | 213 | ||
| 218 | static int poly1305_final(struct shash_desc *desc, u8 *dst) | 214 | int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) |
| 219 | { | 215 | { |
| 220 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); | 216 | struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); |
| 221 | __le32 *mac = (__le32 *)dst; | 217 | __le32 *mac = (__le32 *)dst; |
| @@ -282,13 +278,14 @@ static int poly1305_final(struct shash_desc *desc, u8 *dst) | |||
| 282 | 278 | ||
| 283 | return 0; | 279 | return 0; |
| 284 | } | 280 | } |
| 281 | EXPORT_SYMBOL_GPL(crypto_poly1305_final); | ||
| 285 | 282 | ||
| 286 | static struct shash_alg poly1305_alg = { | 283 | static struct shash_alg poly1305_alg = { |
| 287 | .digestsize = POLY1305_DIGEST_SIZE, | 284 | .digestsize = POLY1305_DIGEST_SIZE, |
| 288 | .init = poly1305_init, | 285 | .init = crypto_poly1305_init, |
| 289 | .update = poly1305_update, | 286 | .update = crypto_poly1305_update, |
| 290 | .final = poly1305_final, | 287 | .final = crypto_poly1305_final, |
| 291 | .setkey = poly1305_setkey, | 288 | .setkey = crypto_poly1305_setkey, |
| 292 | .descsize = sizeof(struct poly1305_desc_ctx), | 289 | .descsize = sizeof(struct poly1305_desc_ctx), |
| 293 | .base = { | 290 | .base = { |
| 294 | .cra_name = "poly1305", | 291 | .cra_name = "poly1305", |
diff --git a/crypto/rsa.c b/crypto/rsa.c index 752af0656f2e..466003e1a8cf 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c | |||
| @@ -267,12 +267,36 @@ err_free_m: | |||
| 267 | return ret; | 267 | return ret; |
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | static int rsa_check_key_length(unsigned int len) | ||
| 271 | { | ||
| 272 | switch (len) { | ||
| 273 | case 512: | ||
| 274 | case 1024: | ||
| 275 | case 1536: | ||
| 276 | case 2048: | ||
| 277 | case 3072: | ||
| 278 | case 4096: | ||
| 279 | return 0; | ||
| 280 | } | ||
| 281 | |||
| 282 | return -EINVAL; | ||
| 283 | } | ||
| 284 | |||
| 270 | static int rsa_setkey(struct crypto_akcipher *tfm, const void *key, | 285 | static int rsa_setkey(struct crypto_akcipher *tfm, const void *key, |
| 271 | unsigned int keylen) | 286 | unsigned int keylen) |
| 272 | { | 287 | { |
| 273 | struct rsa_key *pkey = akcipher_tfm_ctx(tfm); | 288 | struct rsa_key *pkey = akcipher_tfm_ctx(tfm); |
| 289 | int ret; | ||
| 274 | 290 | ||
| 275 | return rsa_parse_key(pkey, key, keylen); | 291 | ret = rsa_parse_key(pkey, key, keylen); |
| 292 | if (ret) | ||
| 293 | return ret; | ||
| 294 | |||
| 295 | if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) { | ||
| 296 | rsa_free_key(pkey); | ||
| 297 | ret = -EINVAL; | ||
| 298 | } | ||
| 299 | return ret; | ||
| 276 | } | 300 | } |
| 277 | 301 | ||
| 278 | static void rsa_exit_tfm(struct crypto_akcipher *tfm) | 302 | static void rsa_exit_tfm(struct crypto_akcipher *tfm) |
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c index 3e8e0a9e5a8e..8d96ce969b44 100644 --- a/crypto/rsa_helper.c +++ b/crypto/rsa_helper.c | |||
| @@ -28,7 +28,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, | |||
| 28 | return -ENOMEM; | 28 | return -ENOMEM; |
| 29 | 29 | ||
| 30 | /* In FIPS mode only allow key size 2K & 3K */ | 30 | /* In FIPS mode only allow key size 2K & 3K */ |
| 31 | if (fips_enabled && (mpi_get_size(key->n) != 256 || | 31 | if (fips_enabled && (mpi_get_size(key->n) != 256 && |
| 32 | mpi_get_size(key->n) != 384)) { | 32 | mpi_get_size(key->n) != 384)) { |
| 33 | pr_err("RSA: key size not allowed in FIPS mode\n"); | 33 | pr_err("RSA: key size not allowed in FIPS mode\n"); |
| 34 | mpi_free(key->n); | 34 | mpi_free(key->n); |
| @@ -62,7 +62,7 @@ int rsa_get_d(void *context, size_t hdrlen, unsigned char tag, | |||
| 62 | return -ENOMEM; | 62 | return -ENOMEM; |
| 63 | 63 | ||
| 64 | /* In FIPS mode only allow key size 2K & 3K */ | 64 | /* In FIPS mode only allow key size 2K & 3K */ |
| 65 | if (fips_enabled && (mpi_get_size(key->d) != 256 || | 65 | if (fips_enabled && (mpi_get_size(key->d) != 256 && |
| 66 | mpi_get_size(key->d) != 384)) { | 66 | mpi_get_size(key->d) != 384)) { |
| 67 | pr_err("RSA: key size not allowed in FIPS mode\n"); | 67 | pr_err("RSA: key size not allowed in FIPS mode\n"); |
| 68 | mpi_free(key->d); | 68 | mpi_free(key->d); |
diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 122c56e3491b..15a749a5cab7 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | #include <crypto/internal/geniv.h> | 16 | #include <crypto/internal/geniv.h> |
| 17 | #include <crypto/internal/skcipher.h> | 17 | #include <crypto/internal/skcipher.h> |
| 18 | #include <crypto/null.h> | ||
| 19 | #include <crypto/rng.h> | 18 | #include <crypto/rng.h> |
| 20 | #include <crypto/scatterwalk.h> | 19 | #include <crypto/scatterwalk.h> |
| 21 | #include <linux/err.h> | 20 | #include <linux/err.h> |
| @@ -26,23 +25,11 @@ | |||
| 26 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
| 27 | #include <linux/string.h> | 26 | #include <linux/string.h> |
| 28 | 27 | ||
| 29 | struct seqniv_request_ctx { | ||
| 30 | struct scatterlist dst[2]; | ||
| 31 | struct aead_request subreq; | ||
| 32 | }; | ||
| 33 | |||
| 34 | struct seqiv_ctx { | 28 | struct seqiv_ctx { |
| 35 | spinlock_t lock; | 29 | spinlock_t lock; |
| 36 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); | 30 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); |
| 37 | }; | 31 | }; |
| 38 | 32 | ||
| 39 | struct seqiv_aead_ctx { | ||
| 40 | /* aead_geniv_ctx must be first the element */ | ||
| 41 | struct aead_geniv_ctx geniv; | ||
| 42 | struct crypto_blkcipher *null; | ||
| 43 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); | ||
| 44 | }; | ||
| 45 | |||
| 46 | static void seqiv_free(struct crypto_instance *inst); | 33 | static void seqiv_free(struct crypto_instance *inst); |
| 47 | 34 | ||
| 48 | static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) | 35 | static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) |
| @@ -71,32 +58,6 @@ static void seqiv_complete(struct crypto_async_request *base, int err) | |||
| 71 | skcipher_givcrypt_complete(req, err); | 58 | skcipher_givcrypt_complete(req, err); |
| 72 | } | 59 | } |
| 73 | 60 | ||
| 74 | static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err) | ||
| 75 | { | ||
| 76 | struct aead_request *subreq = aead_givcrypt_reqctx(req); | ||
| 77 | struct crypto_aead *geniv; | ||
| 78 | |||
| 79 | if (err == -EINPROGRESS) | ||
| 80 | return; | ||
| 81 | |||
| 82 | if (err) | ||
| 83 | goto out; | ||
| 84 | |||
| 85 | geniv = aead_givcrypt_reqtfm(req); | ||
| 86 | memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv)); | ||
| 87 | |||
| 88 | out: | ||
| 89 | kfree(subreq->iv); | ||
| 90 | } | ||
| 91 | |||
| 92 | static void seqiv_aead_complete(struct crypto_async_request *base, int err) | ||
| 93 | { | ||
| 94 | struct aead_givcrypt_request *req = base->data; | ||
| 95 | |||
| 96 | seqiv_aead_complete2(req, err); | ||
| 97 | aead_givcrypt_complete(req, err); | ||
| 98 | } | ||
| 99 | |||
| 100 | static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) | 61 | static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) |
| 101 | { | 62 | { |
| 102 | struct aead_request *subreq = aead_request_ctx(req); | 63 | struct aead_request *subreq = aead_request_ctx(req); |
| @@ -124,50 +85,6 @@ static void seqiv_aead_encrypt_complete(struct crypto_async_request *base, | |||
| 124 | aead_request_complete(req, err); | 85 | aead_request_complete(req, err); |
| 125 | } | 86 | } |
| 126 | 87 | ||
| 127 | static void seqniv_aead_encrypt_complete2(struct aead_request *req, int err) | ||
| 128 | { | ||
| 129 | unsigned int ivsize = 8; | ||
| 130 | u8 data[20]; | ||
| 131 | |||
| 132 | if (err == -EINPROGRESS) | ||
| 133 | return; | ||
| 134 | |||
| 135 | /* Swap IV and ESP header back to correct order. */ | ||
| 136 | scatterwalk_map_and_copy(data, req->dst, 0, req->assoclen + ivsize, 0); | ||
| 137 | scatterwalk_map_and_copy(data + ivsize, req->dst, 0, req->assoclen, 1); | ||
| 138 | scatterwalk_map_and_copy(data, req->dst, req->assoclen, ivsize, 1); | ||
| 139 | } | ||
| 140 | |||
| 141 | static void seqniv_aead_encrypt_complete(struct crypto_async_request *base, | ||
| 142 | int err) | ||
| 143 | { | ||
| 144 | struct aead_request *req = base->data; | ||
| 145 | |||
| 146 | seqniv_aead_encrypt_complete2(req, err); | ||
| 147 | aead_request_complete(req, err); | ||
| 148 | } | ||
| 149 | |||
| 150 | static void seqniv_aead_decrypt_complete2(struct aead_request *req, int err) | ||
| 151 | { | ||
| 152 | u8 data[4]; | ||
| 153 | |||
| 154 | if (err == -EINPROGRESS) | ||
| 155 | return; | ||
| 156 | |||
| 157 | /* Move ESP header back to correct location. */ | ||
| 158 | scatterwalk_map_and_copy(data, req->dst, 16, req->assoclen - 8, 0); | ||
| 159 | scatterwalk_map_and_copy(data, req->dst, 8, req->assoclen - 8, 1); | ||
| 160 | } | ||
| 161 | |||
| 162 | static void seqniv_aead_decrypt_complete(struct crypto_async_request *base, | ||
| 163 | int err) | ||
| 164 | { | ||
| 165 | struct aead_request *req = base->data; | ||
| 166 | |||
| 167 | seqniv_aead_decrypt_complete2(req, err); | ||
| 168 | aead_request_complete(req, err); | ||
| 169 | } | ||
| 170 | |||
| 171 | static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, | 88 | static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, |
| 172 | unsigned int ivsize) | 89 | unsigned int ivsize) |
| 173 | { | 90 | { |
| @@ -227,112 +144,10 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) | |||
| 227 | return err; | 144 | return err; |
| 228 | } | 145 | } |
| 229 | 146 | ||
| 230 | static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) | ||
| 231 | { | ||
| 232 | struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); | ||
| 233 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 234 | struct aead_request *areq = &req->areq; | ||
| 235 | struct aead_request *subreq = aead_givcrypt_reqctx(req); | ||
| 236 | crypto_completion_t compl; | ||
| 237 | void *data; | ||
| 238 | u8 *info; | ||
| 239 | unsigned int ivsize; | ||
| 240 | int err; | ||
| 241 | |||
| 242 | aead_request_set_tfm(subreq, aead_geniv_base(geniv)); | ||
| 243 | |||
| 244 | compl = areq->base.complete; | ||
| 245 | data = areq->base.data; | ||
| 246 | info = areq->iv; | ||
| 247 | |||
| 248 | ivsize = crypto_aead_ivsize(geniv); | ||
| 249 | |||
| 250 | if (unlikely(!IS_ALIGNED((unsigned long)info, | ||
| 251 | crypto_aead_alignmask(geniv) + 1))) { | ||
| 252 | info = kmalloc(ivsize, areq->base.flags & | ||
| 253 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | ||
| 254 | GFP_ATOMIC); | ||
| 255 | if (!info) | ||
| 256 | return -ENOMEM; | ||
| 257 | |||
| 258 | compl = seqiv_aead_complete; | ||
| 259 | data = req; | ||
| 260 | } | ||
| 261 | |||
| 262 | aead_request_set_callback(subreq, areq->base.flags, compl, data); | ||
| 263 | aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen, | ||
| 264 | info); | ||
| 265 | aead_request_set_assoc(subreq, areq->assoc, areq->assoclen); | ||
| 266 | |||
| 267 | seqiv_geniv(ctx, info, req->seq, ivsize); | ||
| 268 | memcpy(req->giv, info, ivsize); | ||
| 269 | |||
| 270 | err = crypto_aead_encrypt(subreq); | ||
| 271 | if (unlikely(info != areq->iv)) | ||
| 272 | seqiv_aead_complete2(req, err); | ||
| 273 | return err; | ||
| 274 | } | ||
| 275 | |||
| 276 | static int seqniv_aead_encrypt(struct aead_request *req) | ||
| 277 | { | ||
| 278 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
| 279 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 280 | struct seqniv_request_ctx *rctx = aead_request_ctx(req); | ||
| 281 | struct aead_request *subreq = &rctx->subreq; | ||
| 282 | struct scatterlist *dst; | ||
| 283 | crypto_completion_t compl; | ||
| 284 | void *data; | ||
| 285 | unsigned int ivsize = 8; | ||
| 286 | u8 buf[20] __attribute__ ((aligned(__alignof__(u32)))); | ||
| 287 | int err; | ||
| 288 | |||
| 289 | if (req->cryptlen < ivsize) | ||
| 290 | return -EINVAL; | ||
| 291 | |||
| 292 | /* ESP AD is at most 12 bytes (ESN). */ | ||
| 293 | if (req->assoclen > 12) | ||
| 294 | return -EINVAL; | ||
| 295 | |||
| 296 | aead_request_set_tfm(subreq, ctx->geniv.child); | ||
| 297 | |||
| 298 | compl = seqniv_aead_encrypt_complete; | ||
| 299 | data = req; | ||
| 300 | |||
| 301 | if (req->src != req->dst) { | ||
| 302 | struct blkcipher_desc desc = { | ||
| 303 | .tfm = ctx->null, | ||
| 304 | }; | ||
| 305 | |||
| 306 | err = crypto_blkcipher_encrypt(&desc, req->dst, req->src, | ||
| 307 | req->assoclen + req->cryptlen); | ||
| 308 | if (err) | ||
| 309 | return err; | ||
| 310 | } | ||
| 311 | |||
| 312 | dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize); | ||
| 313 | |||
| 314 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
| 315 | aead_request_set_crypt(subreq, dst, dst, | ||
| 316 | req->cryptlen - ivsize, req->iv); | ||
| 317 | aead_request_set_ad(subreq, req->assoclen); | ||
| 318 | |||
| 319 | memcpy(buf, req->iv, ivsize); | ||
| 320 | crypto_xor(buf, ctx->salt, ivsize); | ||
| 321 | memcpy(req->iv, buf, ivsize); | ||
| 322 | |||
| 323 | /* Swap order of IV and ESP AD for ICV generation. */ | ||
| 324 | scatterwalk_map_and_copy(buf + ivsize, req->dst, 0, req->assoclen, 0); | ||
| 325 | scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 1); | ||
| 326 | |||
| 327 | err = crypto_aead_encrypt(subreq); | ||
| 328 | seqniv_aead_encrypt_complete2(req, err); | ||
| 329 | return err; | ||
| 330 | } | ||
| 331 | |||
| 332 | static int seqiv_aead_encrypt(struct aead_request *req) | 147 | static int seqiv_aead_encrypt(struct aead_request *req) |
| 333 | { | 148 | { |
| 334 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | 149 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); |
| 335 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); | 150 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); |
| 336 | struct aead_request *subreq = aead_request_ctx(req); | 151 | struct aead_request *subreq = aead_request_ctx(req); |
| 337 | crypto_completion_t compl; | 152 | crypto_completion_t compl; |
| 338 | void *data; | 153 | void *data; |
| @@ -343,7 +158,7 @@ static int seqiv_aead_encrypt(struct aead_request *req) | |||
| 343 | if (req->cryptlen < ivsize) | 158 | if (req->cryptlen < ivsize) |
| 344 | return -EINVAL; | 159 | return -EINVAL; |
| 345 | 160 | ||
| 346 | aead_request_set_tfm(subreq, ctx->geniv.child); | 161 | aead_request_set_tfm(subreq, ctx->child); |
| 347 | 162 | ||
| 348 | compl = req->base.complete; | 163 | compl = req->base.complete; |
| 349 | data = req->base.data; | 164 | data = req->base.data; |
| @@ -387,67 +202,10 @@ static int seqiv_aead_encrypt(struct aead_request *req) | |||
| 387 | return err; | 202 | return err; |
| 388 | } | 203 | } |
| 389 | 204 | ||
| 390 | static int seqniv_aead_decrypt(struct aead_request *req) | ||
| 391 | { | ||
| 392 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | ||
| 393 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 394 | struct seqniv_request_ctx *rctx = aead_request_ctx(req); | ||
| 395 | struct aead_request *subreq = &rctx->subreq; | ||
| 396 | struct scatterlist *dst; | ||
| 397 | crypto_completion_t compl; | ||
| 398 | void *data; | ||
| 399 | unsigned int ivsize = 8; | ||
| 400 | u8 buf[20]; | ||
| 401 | int err; | ||
| 402 | |||
| 403 | if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) | ||
| 404 | return -EINVAL; | ||
| 405 | |||
| 406 | aead_request_set_tfm(subreq, ctx->geniv.child); | ||
| 407 | |||
| 408 | compl = req->base.complete; | ||
| 409 | data = req->base.data; | ||
| 410 | |||
| 411 | if (req->assoclen > 12) | ||
| 412 | return -EINVAL; | ||
| 413 | else if (req->assoclen > 8) { | ||
| 414 | compl = seqniv_aead_decrypt_complete; | ||
| 415 | data = req; | ||
| 416 | } | ||
| 417 | |||
| 418 | if (req->src != req->dst) { | ||
| 419 | struct blkcipher_desc desc = { | ||
| 420 | .tfm = ctx->null, | ||
| 421 | }; | ||
| 422 | |||
| 423 | err = crypto_blkcipher_encrypt(&desc, req->dst, req->src, | ||
| 424 | req->assoclen + req->cryptlen); | ||
| 425 | if (err) | ||
| 426 | return err; | ||
| 427 | } | ||
| 428 | |||
| 429 | /* Move ESP AD forward for ICV generation. */ | ||
| 430 | scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 0); | ||
| 431 | memcpy(req->iv, buf + req->assoclen, ivsize); | ||
| 432 | scatterwalk_map_and_copy(buf, req->dst, ivsize, req->assoclen, 1); | ||
| 433 | |||
| 434 | dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize); | ||
| 435 | |||
| 436 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
| 437 | aead_request_set_crypt(subreq, dst, dst, | ||
| 438 | req->cryptlen - ivsize, req->iv); | ||
| 439 | aead_request_set_ad(subreq, req->assoclen); | ||
| 440 | |||
| 441 | err = crypto_aead_decrypt(subreq); | ||
| 442 | if (req->assoclen > 8) | ||
| 443 | seqniv_aead_decrypt_complete2(req, err); | ||
| 444 | return err; | ||
| 445 | } | ||
| 446 | |||
| 447 | static int seqiv_aead_decrypt(struct aead_request *req) | 205 | static int seqiv_aead_decrypt(struct aead_request *req) |
| 448 | { | 206 | { |
| 449 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | 207 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); |
| 450 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); | 208 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); |
| 451 | struct aead_request *subreq = aead_request_ctx(req); | 209 | struct aead_request *subreq = aead_request_ctx(req); |
| 452 | crypto_completion_t compl; | 210 | crypto_completion_t compl; |
| 453 | void *data; | 211 | void *data; |
| @@ -456,7 +214,7 @@ static int seqiv_aead_decrypt(struct aead_request *req) | |||
| 456 | if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) | 214 | if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) |
| 457 | return -EINVAL; | 215 | return -EINVAL; |
| 458 | 216 | ||
| 459 | aead_request_set_tfm(subreq, ctx->geniv.child); | 217 | aead_request_set_tfm(subreq, ctx->child); |
| 460 | 218 | ||
| 461 | compl = req->base.complete; | 219 | compl = req->base.complete; |
| 462 | data = req->base.data; | 220 | data = req->base.data; |
| @@ -467,9 +225,6 @@ static int seqiv_aead_decrypt(struct aead_request *req) | |||
| 467 | aead_request_set_ad(subreq, req->assoclen + ivsize); | 225 | aead_request_set_ad(subreq, req->assoclen + ivsize); |
| 468 | 226 | ||
| 469 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); | 227 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); |
| 470 | if (req->src != req->dst) | ||
| 471 | scatterwalk_map_and_copy(req->iv, req->dst, | ||
| 472 | req->assoclen, ivsize, 1); | ||
| 473 | 228 | ||
| 474 | return crypto_aead_decrypt(subreq); | 229 | return crypto_aead_decrypt(subreq); |
| 475 | } | 230 | } |
| @@ -495,85 +250,6 @@ static int seqiv_init(struct crypto_tfm *tfm) | |||
| 495 | return err ?: skcipher_geniv_init(tfm); | 250 | return err ?: skcipher_geniv_init(tfm); |
| 496 | } | 251 | } |
| 497 | 252 | ||
| 498 | static int seqiv_old_aead_init(struct crypto_tfm *tfm) | ||
| 499 | { | ||
| 500 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | ||
| 501 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 502 | int err; | ||
| 503 | |||
| 504 | spin_lock_init(&ctx->lock); | ||
| 505 | |||
| 506 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | ||
| 507 | sizeof(struct aead_request)); | ||
| 508 | err = 0; | ||
| 509 | if (!crypto_get_default_rng()) { | ||
| 510 | geniv->givencrypt = seqiv_aead_givencrypt; | ||
| 511 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | ||
| 512 | crypto_aead_ivsize(geniv)); | ||
| 513 | crypto_put_default_rng(); | ||
| 514 | } | ||
| 515 | |||
| 516 | return err ?: aead_geniv_init(tfm); | ||
| 517 | } | ||
| 518 | |||
| 519 | static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize) | ||
| 520 | { | ||
| 521 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | ||
| 522 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); | ||
| 523 | int err; | ||
| 524 | |||
| 525 | spin_lock_init(&ctx->geniv.lock); | ||
| 526 | |||
| 527 | crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); | ||
| 528 | |||
| 529 | err = crypto_get_default_rng(); | ||
| 530 | if (err) | ||
| 531 | goto out; | ||
| 532 | |||
| 533 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | ||
| 534 | crypto_aead_ivsize(geniv)); | ||
| 535 | crypto_put_default_rng(); | ||
| 536 | if (err) | ||
| 537 | goto out; | ||
| 538 | |||
| 539 | ctx->null = crypto_get_default_null_skcipher(); | ||
| 540 | err = PTR_ERR(ctx->null); | ||
| 541 | if (IS_ERR(ctx->null)) | ||
| 542 | goto out; | ||
| 543 | |||
| 544 | err = aead_geniv_init(tfm); | ||
| 545 | if (err) | ||
| 546 | goto drop_null; | ||
| 547 | |||
| 548 | ctx->geniv.child = geniv->child; | ||
| 549 | geniv->child = geniv; | ||
| 550 | |||
| 551 | out: | ||
| 552 | return err; | ||
| 553 | |||
| 554 | drop_null: | ||
| 555 | crypto_put_default_null_skcipher(); | ||
| 556 | goto out; | ||
| 557 | } | ||
| 558 | |||
| 559 | static int seqiv_aead_init(struct crypto_tfm *tfm) | ||
| 560 | { | ||
| 561 | return seqiv_aead_init_common(tfm, sizeof(struct aead_request)); | ||
| 562 | } | ||
| 563 | |||
| 564 | static int seqniv_aead_init(struct crypto_tfm *tfm) | ||
| 565 | { | ||
| 566 | return seqiv_aead_init_common(tfm, sizeof(struct seqniv_request_ctx)); | ||
| 567 | } | ||
| 568 | |||
| 569 | static void seqiv_aead_exit(struct crypto_tfm *tfm) | ||
| 570 | { | ||
| 571 | struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 572 | |||
| 573 | crypto_free_aead(ctx->geniv.child); | ||
| 574 | crypto_put_default_null_skcipher(); | ||
| 575 | } | ||
| 576 | |||
| 577 | static int seqiv_ablkcipher_create(struct crypto_template *tmpl, | 253 | static int seqiv_ablkcipher_create(struct crypto_template *tmpl, |
| 578 | struct rtattr **tb) | 254 | struct rtattr **tb) |
| 579 | { | 255 | { |
| @@ -609,33 +285,6 @@ free_inst: | |||
| 609 | goto out; | 285 | goto out; |
| 610 | } | 286 | } |
| 611 | 287 | ||
| 612 | static int seqiv_old_aead_create(struct crypto_template *tmpl, | ||
| 613 | struct aead_instance *aead) | ||
| 614 | { | ||
| 615 | struct crypto_instance *inst = aead_crypto_instance(aead); | ||
| 616 | int err = -EINVAL; | ||
| 617 | |||
| 618 | if (inst->alg.cra_aead.ivsize < sizeof(u64)) | ||
| 619 | goto free_inst; | ||
| 620 | |||
| 621 | inst->alg.cra_init = seqiv_old_aead_init; | ||
| 622 | inst->alg.cra_exit = aead_geniv_exit; | ||
| 623 | |||
| 624 | inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize; | ||
| 625 | inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); | ||
| 626 | |||
| 627 | err = crypto_register_instance(tmpl, inst); | ||
| 628 | if (err) | ||
| 629 | goto free_inst; | ||
| 630 | |||
| 631 | out: | ||
| 632 | return err; | ||
| 633 | |||
| 634 | free_inst: | ||
| 635 | aead_geniv_free(aead); | ||
| 636 | goto out; | ||
| 637 | } | ||
| 638 | |||
| 639 | static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) | 288 | static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) |
| 640 | { | 289 | { |
| 641 | struct aead_instance *inst; | 290 | struct aead_instance *inst; |
| @@ -650,15 +299,9 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 650 | 299 | ||
| 651 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; | 300 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; |
| 652 | 301 | ||
| 653 | if (inst->alg.base.cra_aead.encrypt) | ||
| 654 | return seqiv_old_aead_create(tmpl, inst); | ||
| 655 | |||
| 656 | spawn = aead_instance_ctx(inst); | 302 | spawn = aead_instance_ctx(inst); |
| 657 | alg = crypto_spawn_aead_alg(spawn); | 303 | alg = crypto_spawn_aead_alg(spawn); |
| 658 | 304 | ||
| 659 | if (alg->base.cra_aead.encrypt) | ||
| 660 | goto done; | ||
| 661 | |||
| 662 | err = -EINVAL; | 305 | err = -EINVAL; |
| 663 | if (inst->alg.ivsize != sizeof(u64)) | 306 | if (inst->alg.ivsize != sizeof(u64)) |
| 664 | goto free_inst; | 307 | goto free_inst; |
| @@ -666,13 +309,12 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 666 | inst->alg.encrypt = seqiv_aead_encrypt; | 309 | inst->alg.encrypt = seqiv_aead_encrypt; |
| 667 | inst->alg.decrypt = seqiv_aead_decrypt; | 310 | inst->alg.decrypt = seqiv_aead_decrypt; |
| 668 | 311 | ||
| 669 | inst->alg.base.cra_init = seqiv_aead_init; | 312 | inst->alg.init = aead_init_geniv; |
| 670 | inst->alg.base.cra_exit = seqiv_aead_exit; | 313 | inst->alg.exit = aead_exit_geniv; |
| 671 | 314 | ||
| 672 | inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx); | 315 | inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); |
| 673 | inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize; | 316 | inst->alg.base.cra_ctxsize += inst->alg.ivsize; |
| 674 | 317 | ||
| 675 | done: | ||
| 676 | err = aead_register_instance(tmpl, inst); | 318 | err = aead_register_instance(tmpl, inst); |
| 677 | if (err) | 319 | if (err) |
| 678 | goto free_inst; | 320 | goto free_inst; |
| @@ -702,51 +344,6 @@ static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 702 | return err; | 344 | return err; |
| 703 | } | 345 | } |
| 704 | 346 | ||
| 705 | static int seqniv_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
| 706 | { | ||
| 707 | struct aead_instance *inst; | ||
| 708 | struct crypto_aead_spawn *spawn; | ||
| 709 | struct aead_alg *alg; | ||
| 710 | int err; | ||
| 711 | |||
| 712 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); | ||
| 713 | err = PTR_ERR(inst); | ||
| 714 | if (IS_ERR(inst)) | ||
| 715 | goto out; | ||
| 716 | |||
| 717 | spawn = aead_instance_ctx(inst); | ||
| 718 | alg = crypto_spawn_aead_alg(spawn); | ||
| 719 | |||
| 720 | if (alg->base.cra_aead.encrypt) | ||
| 721 | goto done; | ||
| 722 | |||
| 723 | err = -EINVAL; | ||
| 724 | if (inst->alg.ivsize != sizeof(u64)) | ||
| 725 | goto free_inst; | ||
| 726 | |||
| 727 | inst->alg.encrypt = seqniv_aead_encrypt; | ||
| 728 | inst->alg.decrypt = seqniv_aead_decrypt; | ||
| 729 | |||
| 730 | inst->alg.base.cra_init = seqniv_aead_init; | ||
| 731 | inst->alg.base.cra_exit = seqiv_aead_exit; | ||
| 732 | |||
| 733 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; | ||
| 734 | inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx); | ||
| 735 | inst->alg.base.cra_ctxsize += inst->alg.ivsize; | ||
| 736 | |||
| 737 | done: | ||
| 738 | err = aead_register_instance(tmpl, inst); | ||
| 739 | if (err) | ||
| 740 | goto free_inst; | ||
| 741 | |||
| 742 | out: | ||
| 743 | return err; | ||
| 744 | |||
| 745 | free_inst: | ||
| 746 | aead_geniv_free(inst); | ||
| 747 | goto out; | ||
| 748 | } | ||
| 749 | |||
| 750 | static void seqiv_free(struct crypto_instance *inst) | 347 | static void seqiv_free(struct crypto_instance *inst) |
| 751 | { | 348 | { |
| 752 | if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) | 349 | if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) |
| @@ -762,36 +359,13 @@ static struct crypto_template seqiv_tmpl = { | |||
| 762 | .module = THIS_MODULE, | 359 | .module = THIS_MODULE, |
| 763 | }; | 360 | }; |
| 764 | 361 | ||
| 765 | static struct crypto_template seqniv_tmpl = { | ||
| 766 | .name = "seqniv", | ||
| 767 | .create = seqniv_create, | ||
| 768 | .free = seqiv_free, | ||
| 769 | .module = THIS_MODULE, | ||
| 770 | }; | ||
| 771 | |||
| 772 | static int __init seqiv_module_init(void) | 362 | static int __init seqiv_module_init(void) |
| 773 | { | 363 | { |
| 774 | int err; | 364 | return crypto_register_template(&seqiv_tmpl); |
| 775 | |||
| 776 | err = crypto_register_template(&seqiv_tmpl); | ||
| 777 | if (err) | ||
| 778 | goto out; | ||
| 779 | |||
| 780 | err = crypto_register_template(&seqniv_tmpl); | ||
| 781 | if (err) | ||
| 782 | goto out_undo_niv; | ||
| 783 | |||
| 784 | out: | ||
| 785 | return err; | ||
| 786 | |||
| 787 | out_undo_niv: | ||
| 788 | crypto_unregister_template(&seqiv_tmpl); | ||
| 789 | goto out; | ||
| 790 | } | 365 | } |
| 791 | 366 | ||
| 792 | static void __exit seqiv_module_exit(void) | 367 | static void __exit seqiv_module_exit(void) |
| 793 | { | 368 | { |
| 794 | crypto_unregister_template(&seqniv_tmpl); | ||
| 795 | crypto_unregister_template(&seqiv_tmpl); | 369 | crypto_unregister_template(&seqiv_tmpl); |
| 796 | } | 370 | } |
| 797 | 371 | ||
| @@ -801,4 +375,3 @@ module_exit(seqiv_module_exit); | |||
| 801 | MODULE_LICENSE("GPL"); | 375 | MODULE_LICENSE("GPL"); |
| 802 | MODULE_DESCRIPTION("Sequence Number IV Generator"); | 376 | MODULE_DESCRIPTION("Sequence Number IV Generator"); |
| 803 | MODULE_ALIAS_CRYPTO("seqiv"); | 377 | MODULE_ALIAS_CRYPTO("seqiv"); |
| 804 | MODULE_ALIAS_CRYPTO("seqniv"); | ||
diff --git a/crypto/skcipher.c b/crypto/skcipher.c new file mode 100644 index 000000000000..dd5fc1bf6447 --- /dev/null +++ b/crypto/skcipher.c | |||
| @@ -0,0 +1,245 @@ | |||
| 1 | /* | ||
| 2 | * Symmetric key cipher operations. | ||
| 3 | * | ||
| 4 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across | ||
| 5 | * multiple page boundaries by using temporary blocks. In user context, | ||
| 6 | * the kernel is given a chance to schedule us once per page. | ||
| 7 | * | ||
| 8 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify it | ||
| 11 | * under the terms of the GNU General Public License as published by the Free | ||
| 12 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 13 | * any later version. | ||
| 14 | * | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <crypto/internal/skcipher.h> | ||
| 18 | #include <linux/bug.h> | ||
| 19 | #include <linux/module.h> | ||
| 20 | |||
| 21 | #include "internal.h" | ||
| 22 | |||
| 23 | static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) | ||
| 24 | { | ||
| 25 | if (alg->cra_type == &crypto_blkcipher_type) | ||
| 26 | return sizeof(struct crypto_blkcipher *); | ||
| 27 | |||
| 28 | BUG_ON(alg->cra_type != &crypto_ablkcipher_type && | ||
| 29 | alg->cra_type != &crypto_givcipher_type); | ||
| 30 | |||
| 31 | return sizeof(struct crypto_ablkcipher *); | ||
| 32 | } | ||
| 33 | |||
| 34 | static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, | ||
| 35 | const u8 *key, unsigned int keylen) | ||
| 36 | { | ||
| 37 | struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); | ||
| 38 | struct crypto_blkcipher *blkcipher = *ctx; | ||
| 39 | int err; | ||
| 40 | |||
| 41 | crypto_blkcipher_clear_flags(blkcipher, ~0); | ||
| 42 | crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) & | ||
| 43 | CRYPTO_TFM_REQ_MASK); | ||
| 44 | err = crypto_blkcipher_setkey(blkcipher, key, keylen); | ||
| 45 | crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & | ||
| 46 | CRYPTO_TFM_RES_MASK); | ||
| 47 | |||
| 48 | return err; | ||
| 49 | } | ||
| 50 | |||
| 51 | static int skcipher_crypt_blkcipher(struct skcipher_request *req, | ||
| 52 | int (*crypt)(struct blkcipher_desc *, | ||
| 53 | struct scatterlist *, | ||
| 54 | struct scatterlist *, | ||
| 55 | unsigned int)) | ||
| 56 | { | ||
| 57 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 58 | struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); | ||
| 59 | struct blkcipher_desc desc = { | ||
| 60 | .tfm = *ctx, | ||
| 61 | .info = req->iv, | ||
| 62 | .flags = req->base.flags, | ||
| 63 | }; | ||
| 64 | |||
| 65 | |||
| 66 | return crypt(&desc, req->dst, req->src, req->cryptlen); | ||
| 67 | } | ||
| 68 | |||
| 69 | static int skcipher_encrypt_blkcipher(struct skcipher_request *req) | ||
| 70 | { | ||
| 71 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | ||
| 72 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); | ||
| 73 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | ||
| 74 | |||
| 75 | return skcipher_crypt_blkcipher(req, alg->encrypt); | ||
| 76 | } | ||
| 77 | |||
| 78 | static int skcipher_decrypt_blkcipher(struct skcipher_request *req) | ||
| 79 | { | ||
| 80 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | ||
| 81 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); | ||
| 82 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | ||
| 83 | |||
| 84 | return skcipher_crypt_blkcipher(req, alg->decrypt); | ||
| 85 | } | ||
| 86 | |||
| 87 | static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm) | ||
| 88 | { | ||
| 89 | struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); | ||
| 90 | |||
| 91 | crypto_free_blkcipher(*ctx); | ||
| 92 | } | ||
| 93 | |||
| 94 | int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) | ||
| 95 | { | ||
| 96 | struct crypto_alg *calg = tfm->__crt_alg; | ||
| 97 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); | ||
| 98 | struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); | ||
| 99 | struct crypto_blkcipher *blkcipher; | ||
| 100 | struct crypto_tfm *btfm; | ||
| 101 | |||
| 102 | if (!crypto_mod_get(calg)) | ||
| 103 | return -EAGAIN; | ||
| 104 | |||
| 105 | btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 106 | CRYPTO_ALG_TYPE_MASK); | ||
| 107 | if (IS_ERR(btfm)) { | ||
| 108 | crypto_mod_put(calg); | ||
| 109 | return PTR_ERR(btfm); | ||
| 110 | } | ||
| 111 | |||
| 112 | blkcipher = __crypto_blkcipher_cast(btfm); | ||
| 113 | *ctx = blkcipher; | ||
| 114 | tfm->exit = crypto_exit_skcipher_ops_blkcipher; | ||
| 115 | |||
| 116 | skcipher->setkey = skcipher_setkey_blkcipher; | ||
| 117 | skcipher->encrypt = skcipher_encrypt_blkcipher; | ||
| 118 | skcipher->decrypt = skcipher_decrypt_blkcipher; | ||
| 119 | |||
| 120 | skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); | ||
| 121 | |||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | |||
| 125 | static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, | ||
| 126 | const u8 *key, unsigned int keylen) | ||
| 127 | { | ||
| 128 | struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); | ||
| 129 | struct crypto_ablkcipher *ablkcipher = *ctx; | ||
| 130 | int err; | ||
| 131 | |||
| 132 | crypto_ablkcipher_clear_flags(ablkcipher, ~0); | ||
| 133 | crypto_ablkcipher_set_flags(ablkcipher, | ||
| 134 | crypto_skcipher_get_flags(tfm) & | ||
| 135 | CRYPTO_TFM_REQ_MASK); | ||
| 136 | err = crypto_ablkcipher_setkey(ablkcipher, key, keylen); | ||
| 137 | crypto_skcipher_set_flags(tfm, | ||
| 138 | crypto_ablkcipher_get_flags(ablkcipher) & | ||
| 139 | CRYPTO_TFM_RES_MASK); | ||
| 140 | |||
| 141 | return err; | ||
| 142 | } | ||
| 143 | |||
| 144 | static int skcipher_crypt_ablkcipher(struct skcipher_request *req, | ||
| 145 | int (*crypt)(struct ablkcipher_request *)) | ||
| 146 | { | ||
| 147 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 148 | struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); | ||
| 149 | struct ablkcipher_request *subreq = skcipher_request_ctx(req); | ||
| 150 | |||
| 151 | ablkcipher_request_set_tfm(subreq, *ctx); | ||
| 152 | ablkcipher_request_set_callback(subreq, skcipher_request_flags(req), | ||
| 153 | req->base.complete, req->base.data); | ||
| 154 | ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | ||
| 155 | req->iv); | ||
| 156 | |||
| 157 | return crypt(subreq); | ||
| 158 | } | ||
| 159 | |||
| 160 | static int skcipher_encrypt_ablkcipher(struct skcipher_request *req) | ||
| 161 | { | ||
| 162 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | ||
| 163 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); | ||
| 164 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; | ||
| 165 | |||
| 166 | return skcipher_crypt_ablkcipher(req, alg->encrypt); | ||
| 167 | } | ||
| 168 | |||
| 169 | static int skcipher_decrypt_ablkcipher(struct skcipher_request *req) | ||
| 170 | { | ||
| 171 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | ||
| 172 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); | ||
| 173 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; | ||
| 174 | |||
| 175 | return skcipher_crypt_ablkcipher(req, alg->decrypt); | ||
| 176 | } | ||
| 177 | |||
| 178 | static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) | ||
| 179 | { | ||
| 180 | struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); | ||
| 181 | |||
| 182 | crypto_free_ablkcipher(*ctx); | ||
| 183 | } | ||
| 184 | |||
| 185 | int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) | ||
| 186 | { | ||
| 187 | struct crypto_alg *calg = tfm->__crt_alg; | ||
| 188 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); | ||
| 189 | struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); | ||
| 190 | struct crypto_ablkcipher *ablkcipher; | ||
| 191 | struct crypto_tfm *abtfm; | ||
| 192 | |||
| 193 | if (!crypto_mod_get(calg)) | ||
| 194 | return -EAGAIN; | ||
| 195 | |||
| 196 | abtfm = __crypto_alloc_tfm(calg, 0, 0); | ||
| 197 | if (IS_ERR(abtfm)) { | ||
| 198 | crypto_mod_put(calg); | ||
| 199 | return PTR_ERR(abtfm); | ||
| 200 | } | ||
| 201 | |||
| 202 | ablkcipher = __crypto_ablkcipher_cast(abtfm); | ||
| 203 | *ctx = ablkcipher; | ||
| 204 | tfm->exit = crypto_exit_skcipher_ops_ablkcipher; | ||
| 205 | |||
| 206 | skcipher->setkey = skcipher_setkey_ablkcipher; | ||
| 207 | skcipher->encrypt = skcipher_encrypt_ablkcipher; | ||
| 208 | skcipher->decrypt = skcipher_decrypt_ablkcipher; | ||
| 209 | |||
| 210 | skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
| 211 | skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + | ||
| 212 | sizeof(struct ablkcipher_request); | ||
| 213 | |||
| 214 | return 0; | ||
| 215 | } | ||
| 216 | |||
| 217 | static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) | ||
| 218 | { | ||
| 219 | if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) | ||
| 220 | return crypto_init_skcipher_ops_blkcipher(tfm); | ||
| 221 | |||
| 222 | BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type && | ||
| 223 | tfm->__crt_alg->cra_type != &crypto_givcipher_type); | ||
| 224 | |||
| 225 | return crypto_init_skcipher_ops_ablkcipher(tfm); | ||
| 226 | } | ||
| 227 | |||
| 228 | static const struct crypto_type crypto_skcipher_type2 = { | ||
| 229 | .extsize = crypto_skcipher_extsize, | ||
| 230 | .init_tfm = crypto_skcipher_init_tfm, | ||
| 231 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
| 232 | .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, | ||
| 233 | .type = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 234 | .tfmsize = offsetof(struct crypto_skcipher, base), | ||
| 235 | }; | ||
| 236 | |||
| 237 | struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, | ||
| 238 | u32 type, u32 mask) | ||
| 239 | { | ||
| 240 | return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask); | ||
| 241 | } | ||
| 242 | EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); | ||
| 243 | |||
| 244 | MODULE_LICENSE("GPL"); | ||
| 245 | MODULE_DESCRIPTION("Symmetric key cipher type"); | ||
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 9f6f10b498ba..2b00b617daab 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
| @@ -73,6 +73,22 @@ static char *check[] = { | |||
| 73 | "lzo", "cts", "zlib", NULL | 73 | "lzo", "cts", "zlib", NULL |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | struct tcrypt_result { | ||
| 77 | struct completion completion; | ||
| 78 | int err; | ||
| 79 | }; | ||
| 80 | |||
| 81 | static void tcrypt_complete(struct crypto_async_request *req, int err) | ||
| 82 | { | ||
| 83 | struct tcrypt_result *res = req->data; | ||
| 84 | |||
| 85 | if (err == -EINPROGRESS) | ||
| 86 | return; | ||
| 87 | |||
| 88 | res->err = err; | ||
| 89 | complete(&res->completion); | ||
| 90 | } | ||
| 91 | |||
| 76 | static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, | 92 | static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, |
| 77 | struct scatterlist *sg, int blen, int secs) | 93 | struct scatterlist *sg, int blen, int secs) |
| 78 | { | 94 | { |
| @@ -143,6 +159,20 @@ out: | |||
| 143 | return ret; | 159 | return ret; |
| 144 | } | 160 | } |
| 145 | 161 | ||
| 162 | static inline int do_one_aead_op(struct aead_request *req, int ret) | ||
| 163 | { | ||
| 164 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
| 165 | struct tcrypt_result *tr = req->base.data; | ||
| 166 | |||
| 167 | ret = wait_for_completion_interruptible(&tr->completion); | ||
| 168 | if (!ret) | ||
| 169 | ret = tr->err; | ||
| 170 | reinit_completion(&tr->completion); | ||
| 171 | } | ||
| 172 | |||
| 173 | return ret; | ||
| 174 | } | ||
| 175 | |||
| 146 | static int test_aead_jiffies(struct aead_request *req, int enc, | 176 | static int test_aead_jiffies(struct aead_request *req, int enc, |
| 147 | int blen, int secs) | 177 | int blen, int secs) |
| 148 | { | 178 | { |
| @@ -153,9 +183,9 @@ static int test_aead_jiffies(struct aead_request *req, int enc, | |||
| 153 | for (start = jiffies, end = start + secs * HZ, bcount = 0; | 183 | for (start = jiffies, end = start + secs * HZ, bcount = 0; |
| 154 | time_before(jiffies, end); bcount++) { | 184 | time_before(jiffies, end); bcount++) { |
| 155 | if (enc) | 185 | if (enc) |
| 156 | ret = crypto_aead_encrypt(req); | 186 | ret = do_one_aead_op(req, crypto_aead_encrypt(req)); |
| 157 | else | 187 | else |
| 158 | ret = crypto_aead_decrypt(req); | 188 | ret = do_one_aead_op(req, crypto_aead_decrypt(req)); |
| 159 | 189 | ||
| 160 | if (ret) | 190 | if (ret) |
| 161 | return ret; | 191 | return ret; |
| @@ -177,9 +207,9 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen) | |||
| 177 | /* Warm-up run. */ | 207 | /* Warm-up run. */ |
| 178 | for (i = 0; i < 4; i++) { | 208 | for (i = 0; i < 4; i++) { |
| 179 | if (enc) | 209 | if (enc) |
| 180 | ret = crypto_aead_encrypt(req); | 210 | ret = do_one_aead_op(req, crypto_aead_encrypt(req)); |
| 181 | else | 211 | else |
| 182 | ret = crypto_aead_decrypt(req); | 212 | ret = do_one_aead_op(req, crypto_aead_decrypt(req)); |
| 183 | 213 | ||
| 184 | if (ret) | 214 | if (ret) |
| 185 | goto out; | 215 | goto out; |
| @@ -191,9 +221,9 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen) | |||
| 191 | 221 | ||
| 192 | start = get_cycles(); | 222 | start = get_cycles(); |
| 193 | if (enc) | 223 | if (enc) |
| 194 | ret = crypto_aead_encrypt(req); | 224 | ret = do_one_aead_op(req, crypto_aead_encrypt(req)); |
| 195 | else | 225 | else |
| 196 | ret = crypto_aead_decrypt(req); | 226 | ret = do_one_aead_op(req, crypto_aead_decrypt(req)); |
| 197 | end = get_cycles(); | 227 | end = get_cycles(); |
| 198 | 228 | ||
| 199 | if (ret) | 229 | if (ret) |
| @@ -286,6 +316,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
| 286 | char *axbuf[XBUFSIZE]; | 316 | char *axbuf[XBUFSIZE]; |
| 287 | unsigned int *b_size; | 317 | unsigned int *b_size; |
| 288 | unsigned int iv_len; | 318 | unsigned int iv_len; |
| 319 | struct tcrypt_result result; | ||
| 289 | 320 | ||
| 290 | iv = kzalloc(MAX_IVLEN, GFP_KERNEL); | 321 | iv = kzalloc(MAX_IVLEN, GFP_KERNEL); |
| 291 | if (!iv) | 322 | if (!iv) |
| @@ -321,6 +352,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
| 321 | goto out_notfm; | 352 | goto out_notfm; |
| 322 | } | 353 | } |
| 323 | 354 | ||
| 355 | init_completion(&result.completion); | ||
| 324 | printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, | 356 | printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, |
| 325 | get_driver_name(crypto_aead, tfm), e); | 357 | get_driver_name(crypto_aead, tfm), e); |
| 326 | 358 | ||
| @@ -331,6 +363,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, | |||
| 331 | goto out_noreq; | 363 | goto out_noreq; |
| 332 | } | 364 | } |
| 333 | 365 | ||
| 366 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
| 367 | tcrypt_complete, &result); | ||
| 368 | |||
| 334 | i = 0; | 369 | i = 0; |
| 335 | do { | 370 | do { |
| 336 | b_size = aead_sizes; | 371 | b_size = aead_sizes; |
| @@ -749,22 +784,6 @@ out: | |||
| 749 | crypto_free_hash(tfm); | 784 | crypto_free_hash(tfm); |
| 750 | } | 785 | } |
| 751 | 786 | ||
| 752 | struct tcrypt_result { | ||
| 753 | struct completion completion; | ||
| 754 | int err; | ||
| 755 | }; | ||
| 756 | |||
| 757 | static void tcrypt_complete(struct crypto_async_request *req, int err) | ||
| 758 | { | ||
| 759 | struct tcrypt_result *res = req->data; | ||
| 760 | |||
| 761 | if (err == -EINPROGRESS) | ||
| 762 | return; | ||
| 763 | |||
| 764 | res->err = err; | ||
| 765 | complete(&res->completion); | ||
| 766 | } | ||
| 767 | |||
| 768 | static inline int do_one_ahash_op(struct ahash_request *req, int ret) | 787 | static inline int do_one_ahash_op(struct ahash_request *req, int ret) |
| 769 | { | 788 | { |
| 770 | if (ret == -EINPROGRESS || ret == -EBUSY) { | 789 | if (ret == -EINPROGRESS || ret == -EBUSY) { |
| @@ -1759,14 +1778,27 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
| 1759 | 1778 | ||
| 1760 | case 211: | 1779 | case 211: |
| 1761 | test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, | 1780 | test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, |
| 1781 | NULL, 0, 16, 16, aead_speed_template_20); | ||
| 1782 | test_aead_speed("gcm(aes)", ENCRYPT, sec, | ||
| 1762 | NULL, 0, 16, 8, aead_speed_template_20); | 1783 | NULL, 0, 16, 8, aead_speed_template_20); |
| 1763 | break; | 1784 | break; |
| 1764 | 1785 | ||
| 1765 | case 212: | 1786 | case 212: |
| 1766 | test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, | 1787 | test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, |
| 1767 | NULL, 0, 16, 8, aead_speed_template_19); | 1788 | NULL, 0, 16, 16, aead_speed_template_19); |
| 1789 | break; | ||
| 1790 | |||
| 1791 | case 213: | ||
| 1792 | test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec, | ||
| 1793 | NULL, 0, 16, 8, aead_speed_template_36); | ||
| 1794 | break; | ||
| 1795 | |||
| 1796 | case 214: | ||
| 1797 | test_cipher_speed("chacha20", ENCRYPT, sec, NULL, 0, | ||
| 1798 | speed_template_32); | ||
| 1768 | break; | 1799 | break; |
| 1769 | 1800 | ||
| 1801 | |||
| 1770 | case 300: | 1802 | case 300: |
| 1771 | if (alg) { | 1803 | if (alg) { |
| 1772 | test_hash_speed(alg, sec, generic_hash_speed_template); | 1804 | test_hash_speed(alg, sec, generic_hash_speed_template); |
| @@ -1855,6 +1887,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) | |||
| 1855 | test_hash_speed("crct10dif", sec, generic_hash_speed_template); | 1887 | test_hash_speed("crct10dif", sec, generic_hash_speed_template); |
| 1856 | if (mode > 300 && mode < 400) break; | 1888 | if (mode > 300 && mode < 400) break; |
| 1857 | 1889 | ||
| 1890 | case 321: | ||
| 1891 | test_hash_speed("poly1305", sec, poly1305_speed_template); | ||
| 1892 | if (mode > 300 && mode < 400) break; | ||
| 1893 | |||
| 1858 | case 399: | 1894 | case 399: |
| 1859 | break; | 1895 | break; |
| 1860 | 1896 | ||
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 6cc1b856871b..f0bfee1bb293 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h | |||
| @@ -61,12 +61,14 @@ static u8 speed_template_32_40_48[] = {32, 40, 48, 0}; | |||
| 61 | static u8 speed_template_32_48[] = {32, 48, 0}; | 61 | static u8 speed_template_32_48[] = {32, 48, 0}; |
| 62 | static u8 speed_template_32_48_64[] = {32, 48, 64, 0}; | 62 | static u8 speed_template_32_48_64[] = {32, 48, 64, 0}; |
| 63 | static u8 speed_template_32_64[] = {32, 64, 0}; | 63 | static u8 speed_template_32_64[] = {32, 64, 0}; |
| 64 | static u8 speed_template_32[] = {32, 0}; | ||
| 64 | 65 | ||
| 65 | /* | 66 | /* |
| 66 | * AEAD speed tests | 67 | * AEAD speed tests |
| 67 | */ | 68 | */ |
| 68 | static u8 aead_speed_template_19[] = {19, 0}; | 69 | static u8 aead_speed_template_19[] = {19, 0}; |
| 69 | static u8 aead_speed_template_20[] = {20, 0}; | 70 | static u8 aead_speed_template_20[] = {20, 0}; |
| 71 | static u8 aead_speed_template_36[] = {36, 0}; | ||
| 70 | 72 | ||
| 71 | /* | 73 | /* |
| 72 | * Digest speed tests | 74 | * Digest speed tests |
| @@ -127,4 +129,22 @@ static struct hash_speed hash_speed_template_16[] = { | |||
| 127 | { .blen = 0, .plen = 0, .klen = 0, } | 129 | { .blen = 0, .plen = 0, .klen = 0, } |
| 128 | }; | 130 | }; |
| 129 | 131 | ||
| 132 | static struct hash_speed poly1305_speed_template[] = { | ||
| 133 | { .blen = 96, .plen = 16, }, | ||
| 134 | { .blen = 96, .plen = 32, }, | ||
| 135 | { .blen = 96, .plen = 96, }, | ||
| 136 | { .blen = 288, .plen = 16, }, | ||
| 137 | { .blen = 288, .plen = 32, }, | ||
| 138 | { .blen = 288, .plen = 288, }, | ||
| 139 | { .blen = 1056, .plen = 32, }, | ||
| 140 | { .blen = 1056, .plen = 1056, }, | ||
| 141 | { .blen = 2080, .plen = 32, }, | ||
| 142 | { .blen = 2080, .plen = 2080, }, | ||
| 143 | { .blen = 4128, .plen = 4128, }, | ||
| 144 | { .blen = 8224, .plen = 8224, }, | ||
| 145 | |||
| 146 | /* End marker */ | ||
| 147 | { .blen = 0, .plen = 0, } | ||
| 148 | }; | ||
| 149 | |||
| 130 | #endif /* _CRYPTO_TCRYPT_H */ | 150 | #endif /* _CRYPTO_TCRYPT_H */ |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index d0a42bd3aae9..35c2de136971 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | #include <crypto/aead.h> | 23 | #include <crypto/aead.h> |
| 24 | #include <crypto/hash.h> | 24 | #include <crypto/hash.h> |
| 25 | #include <crypto/skcipher.h> | ||
| 25 | #include <linux/err.h> | 26 | #include <linux/err.h> |
| 26 | #include <linux/fips.h> | 27 | #include <linux/fips.h> |
| 27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
| @@ -921,15 +922,15 @@ out_nobuf: | |||
| 921 | return ret; | 922 | return ret; |
| 922 | } | 923 | } |
| 923 | 924 | ||
| 924 | static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | 925 | static int __test_skcipher(struct crypto_skcipher *tfm, int enc, |
| 925 | struct cipher_testvec *template, unsigned int tcount, | 926 | struct cipher_testvec *template, unsigned int tcount, |
| 926 | const bool diff_dst, const int align_offset) | 927 | const bool diff_dst, const int align_offset) |
| 927 | { | 928 | { |
| 928 | const char *algo = | 929 | const char *algo = |
| 929 | crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm)); | 930 | crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); |
| 930 | unsigned int i, j, k, n, temp; | 931 | unsigned int i, j, k, n, temp; |
| 931 | char *q; | 932 | char *q; |
| 932 | struct ablkcipher_request *req; | 933 | struct skcipher_request *req; |
| 933 | struct scatterlist sg[8]; | 934 | struct scatterlist sg[8]; |
| 934 | struct scatterlist sgout[8]; | 935 | struct scatterlist sgout[8]; |
| 935 | const char *e, *d; | 936 | const char *e, *d; |
| @@ -958,15 +959,15 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
| 958 | 959 | ||
| 959 | init_completion(&result.completion); | 960 | init_completion(&result.completion); |
| 960 | 961 | ||
| 961 | req = ablkcipher_request_alloc(tfm, GFP_KERNEL); | 962 | req = skcipher_request_alloc(tfm, GFP_KERNEL); |
| 962 | if (!req) { | 963 | if (!req) { |
| 963 | pr_err("alg: skcipher%s: Failed to allocate request for %s\n", | 964 | pr_err("alg: skcipher%s: Failed to allocate request for %s\n", |
| 964 | d, algo); | 965 | d, algo); |
| 965 | goto out; | 966 | goto out; |
| 966 | } | 967 | } |
| 967 | 968 | ||
| 968 | ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 969 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
| 969 | tcrypt_complete, &result); | 970 | tcrypt_complete, &result); |
| 970 | 971 | ||
| 971 | j = 0; | 972 | j = 0; |
| 972 | for (i = 0; i < tcount; i++) { | 973 | for (i = 0; i < tcount; i++) { |
| @@ -987,15 +988,16 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
| 987 | data += align_offset; | 988 | data += align_offset; |
| 988 | memcpy(data, template[i].input, template[i].ilen); | 989 | memcpy(data, template[i].input, template[i].ilen); |
| 989 | 990 | ||
| 990 | crypto_ablkcipher_clear_flags(tfm, ~0); | 991 | crypto_skcipher_clear_flags(tfm, ~0); |
| 991 | if (template[i].wk) | 992 | if (template[i].wk) |
| 992 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); | 993 | crypto_skcipher_set_flags(tfm, |
| 994 | CRYPTO_TFM_REQ_WEAK_KEY); | ||
| 993 | 995 | ||
| 994 | ret = crypto_ablkcipher_setkey(tfm, template[i].key, | 996 | ret = crypto_skcipher_setkey(tfm, template[i].key, |
| 995 | template[i].klen); | 997 | template[i].klen); |
| 996 | if (!ret == template[i].fail) { | 998 | if (!ret == template[i].fail) { |
| 997 | pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n", | 999 | pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n", |
| 998 | d, j, algo, crypto_ablkcipher_get_flags(tfm)); | 1000 | d, j, algo, crypto_skcipher_get_flags(tfm)); |
| 999 | goto out; | 1001 | goto out; |
| 1000 | } else if (ret) | 1002 | } else if (ret) |
| 1001 | continue; | 1003 | continue; |
| @@ -1007,10 +1009,10 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
| 1007 | sg_init_one(&sgout[0], data, template[i].ilen); | 1009 | sg_init_one(&sgout[0], data, template[i].ilen); |
| 1008 | } | 1010 | } |
| 1009 | 1011 | ||
| 1010 | ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, | 1012 | skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, |
| 1011 | template[i].ilen, iv); | 1013 | template[i].ilen, iv); |
| 1012 | ret = enc ? crypto_ablkcipher_encrypt(req) : | 1014 | ret = enc ? crypto_skcipher_encrypt(req) : |
| 1013 | crypto_ablkcipher_decrypt(req); | 1015 | crypto_skcipher_decrypt(req); |
| 1014 | 1016 | ||
| 1015 | switch (ret) { | 1017 | switch (ret) { |
| 1016 | case 0: | 1018 | case 0: |
| @@ -1054,15 +1056,16 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
| 1054 | memset(iv, 0, MAX_IVLEN); | 1056 | memset(iv, 0, MAX_IVLEN); |
| 1055 | 1057 | ||
| 1056 | j++; | 1058 | j++; |
| 1057 | crypto_ablkcipher_clear_flags(tfm, ~0); | 1059 | crypto_skcipher_clear_flags(tfm, ~0); |
| 1058 | if (template[i].wk) | 1060 | if (template[i].wk) |
| 1059 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); | 1061 | crypto_skcipher_set_flags(tfm, |
| 1062 | CRYPTO_TFM_REQ_WEAK_KEY); | ||
| 1060 | 1063 | ||
| 1061 | ret = crypto_ablkcipher_setkey(tfm, template[i].key, | 1064 | ret = crypto_skcipher_setkey(tfm, template[i].key, |
| 1062 | template[i].klen); | 1065 | template[i].klen); |
| 1063 | if (!ret == template[i].fail) { | 1066 | if (!ret == template[i].fail) { |
| 1064 | pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n", | 1067 | pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n", |
| 1065 | d, j, algo, crypto_ablkcipher_get_flags(tfm)); | 1068 | d, j, algo, crypto_skcipher_get_flags(tfm)); |
| 1066 | goto out; | 1069 | goto out; |
| 1067 | } else if (ret) | 1070 | } else if (ret) |
| 1068 | continue; | 1071 | continue; |
| @@ -1100,11 +1103,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
| 1100 | temp += template[i].tap[k]; | 1103 | temp += template[i].tap[k]; |
| 1101 | } | 1104 | } |
| 1102 | 1105 | ||
| 1103 | ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, | 1106 | skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, |
| 1104 | template[i].ilen, iv); | 1107 | template[i].ilen, iv); |
| 1105 | 1108 | ||
| 1106 | ret = enc ? crypto_ablkcipher_encrypt(req) : | 1109 | ret = enc ? crypto_skcipher_encrypt(req) : |
| 1107 | crypto_ablkcipher_decrypt(req); | 1110 | crypto_skcipher_decrypt(req); |
| 1108 | 1111 | ||
| 1109 | switch (ret) { | 1112 | switch (ret) { |
| 1110 | case 0: | 1113 | case 0: |
| @@ -1157,7 +1160,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc, | |||
| 1157 | ret = 0; | 1160 | ret = 0; |
| 1158 | 1161 | ||
| 1159 | out: | 1162 | out: |
| 1160 | ablkcipher_request_free(req); | 1163 | skcipher_request_free(req); |
| 1161 | if (diff_dst) | 1164 | if (diff_dst) |
| 1162 | testmgr_free_buf(xoutbuf); | 1165 | testmgr_free_buf(xoutbuf); |
| 1163 | out_nooutbuf: | 1166 | out_nooutbuf: |
| @@ -1166,7 +1169,7 @@ out_nobuf: | |||
| 1166 | return ret; | 1169 | return ret; |
| 1167 | } | 1170 | } |
| 1168 | 1171 | ||
| 1169 | static int test_skcipher(struct crypto_ablkcipher *tfm, int enc, | 1172 | static int test_skcipher(struct crypto_skcipher *tfm, int enc, |
| 1170 | struct cipher_testvec *template, unsigned int tcount) | 1173 | struct cipher_testvec *template, unsigned int tcount) |
| 1171 | { | 1174 | { |
| 1172 | unsigned int alignmask; | 1175 | unsigned int alignmask; |
| @@ -1578,10 +1581,10 @@ out: | |||
| 1578 | static int alg_test_skcipher(const struct alg_test_desc *desc, | 1581 | static int alg_test_skcipher(const struct alg_test_desc *desc, |
| 1579 | const char *driver, u32 type, u32 mask) | 1582 | const char *driver, u32 type, u32 mask) |
| 1580 | { | 1583 | { |
| 1581 | struct crypto_ablkcipher *tfm; | 1584 | struct crypto_skcipher *tfm; |
| 1582 | int err = 0; | 1585 | int err = 0; |
| 1583 | 1586 | ||
| 1584 | tfm = crypto_alloc_ablkcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); | 1587 | tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); |
| 1585 | if (IS_ERR(tfm)) { | 1588 | if (IS_ERR(tfm)) { |
| 1586 | printk(KERN_ERR "alg: skcipher: Failed to load transform for " | 1589 | printk(KERN_ERR "alg: skcipher: Failed to load transform for " |
| 1587 | "%s: %ld\n", driver, PTR_ERR(tfm)); | 1590 | "%s: %ld\n", driver, PTR_ERR(tfm)); |
| @@ -1600,7 +1603,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc, | |||
| 1600 | desc->suite.cipher.dec.count); | 1603 | desc->suite.cipher.dec.count); |
| 1601 | 1604 | ||
| 1602 | out: | 1605 | out: |
| 1603 | crypto_free_ablkcipher(tfm); | 1606 | crypto_free_skcipher(tfm); |
| 1604 | return err; | 1607 | return err; |
| 1605 | } | 1608 | } |
| 1606 | 1609 | ||
| @@ -2476,6 +2479,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 2476 | } | 2479 | } |
| 2477 | }, { | 2480 | }, { |
| 2478 | .alg = "cmac(aes)", | 2481 | .alg = "cmac(aes)", |
| 2482 | .fips_allowed = 1, | ||
| 2479 | .test = alg_test_hash, | 2483 | .test = alg_test_hash, |
| 2480 | .suite = { | 2484 | .suite = { |
| 2481 | .hash = { | 2485 | .hash = { |
| @@ -2485,6 +2489,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
| 2485 | } | 2489 | } |
| 2486 | }, { | 2490 | }, { |
| 2487 | .alg = "cmac(des3_ede)", | 2491 | .alg = "cmac(des3_ede)", |
| 2492 | .fips_allowed = 1, | ||
| 2488 | .test = alg_test_hash, | 2493 | .test = alg_test_hash, |
| 2489 | .suite = { | 2494 | .suite = { |
| 2490 | .hash = { | 2495 | .hash = { |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 868edf117041..64b8a8082645 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
| @@ -14504,6 +14504,9 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = { | |||
| 14504 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" | 14504 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" |
| 14505 | "\x27\x08\x94\x2d\xbe\x77\x18\x1a", | 14505 | "\x27\x08\x94\x2d\xbe\x77\x18\x1a", |
| 14506 | .rlen = 16, | 14506 | .rlen = 16, |
| 14507 | .also_non_np = 1, | ||
| 14508 | .np = 8, | ||
| 14509 | .tap = { 3, 2, 3, 2, 3, 1, 1, 1 }, | ||
| 14507 | }, { | 14510 | }, { |
| 14508 | .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" | 14511 | .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" |
| 14509 | "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", | 14512 | "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", |
| @@ -14723,6 +14726,9 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = { | |||
| 14723 | .ilen = 16, | 14726 | .ilen = 16, |
| 14724 | .result = "Single block msg", | 14727 | .result = "Single block msg", |
| 14725 | .rlen = 16, | 14728 | .rlen = 16, |
| 14729 | .also_non_np = 1, | ||
| 14730 | .np = 8, | ||
| 14731 | .tap = { 3, 2, 3, 2, 3, 1, 1, 1 }, | ||
| 14726 | }, { | 14732 | }, { |
| 14727 | .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" | 14733 | .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" |
| 14728 | "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", | 14734 | "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", |
| @@ -15032,6 +15038,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { | |||
| 15032 | .klen = 8 + 20 + 16, | 15038 | .klen = 8 + 20 + 16, |
| 15033 | .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" | 15039 | .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" |
| 15034 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", | 15040 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", |
| 15041 | .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" | ||
| 15042 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", | ||
| 15043 | .alen = 16, | ||
| 15035 | .input = "Single block msg", | 15044 | .input = "Single block msg", |
| 15036 | .ilen = 16, | 15045 | .ilen = 16, |
| 15037 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" | 15046 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" |
| @@ -15057,6 +15066,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { | |||
| 15057 | .klen = 8 + 20 + 16, | 15066 | .klen = 8 + 20 + 16, |
| 15058 | .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" | 15067 | .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" |
| 15059 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", | 15068 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", |
| 15069 | .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" | ||
| 15070 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", | ||
| 15071 | .alen = 16, | ||
| 15060 | .input = "\x00\x01\x02\x03\x04\x05\x06\x07" | 15072 | .input = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 15061 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | 15073 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" |
| 15062 | "\x10\x11\x12\x13\x14\x15\x16\x17" | 15074 | "\x10\x11\x12\x13\x14\x15\x16\x17" |
| @@ -15087,6 +15099,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { | |||
| 15087 | .klen = 8 + 20 + 16, | 15099 | .klen = 8 + 20 + 16, |
| 15088 | .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" | 15100 | .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" |
| 15089 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", | 15101 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", |
| 15102 | .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" | ||
| 15103 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", | ||
| 15104 | .alen = 16, | ||
| 15090 | .input = "This is a 48-byte message (exactly 3 AES blocks)", | 15105 | .input = "This is a 48-byte message (exactly 3 AES blocks)", |
| 15091 | .ilen = 48, | 15106 | .ilen = 48, |
| 15092 | .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" | 15107 | .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" |
| @@ -15116,6 +15131,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { | |||
| 15116 | .klen = 8 + 20 + 16, | 15131 | .klen = 8 + 20 + 16, |
| 15117 | .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" | 15132 | .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" |
| 15118 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", | 15133 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", |
| 15134 | .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" | ||
| 15135 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", | ||
| 15136 | .alen = 16, | ||
| 15119 | .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" | 15137 | .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" |
| 15120 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" | 15138 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" |
| 15121 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" | 15139 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" |
| @@ -15154,8 +15172,10 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { | |||
| 15154 | .klen = 8 + 20 + 16, | 15172 | .klen = 8 + 20 + 16, |
| 15155 | .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63" | 15173 | .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63" |
| 15156 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", | 15174 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", |
| 15157 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 15175 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 15158 | .alen = 8, | 15176 | "\xe9\x6e\x8c\x08\xab\x46\x57\x63" |
| 15177 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", | ||
| 15178 | .alen = 24, | ||
| 15159 | .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" | 15179 | .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" |
| 15160 | "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" | 15180 | "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" |
| 15161 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | 15181 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" |
| @@ -15199,6 +15219,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { | |||
| 15199 | .klen = 8 + 20 + 24, | 15219 | .klen = 8 + 20 + 24, |
| 15200 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | 15220 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 15201 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | 15221 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", |
| 15222 | .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 15223 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 15224 | .alen = 16, | ||
| 15202 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | 15225 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" |
| 15203 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | 15226 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" |
| 15204 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | 15227 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" |
| @@ -15239,6 +15262,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { | |||
| 15239 | .klen = 8 + 20 + 32, | 15262 | .klen = 8 + 20 + 32, |
| 15240 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | 15263 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 15241 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | 15264 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", |
| 15265 | .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 15266 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 15267 | .alen = 16, | ||
| 15242 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | 15268 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" |
| 15243 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | 15269 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" |
| 15244 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | 15270 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" |
| @@ -15374,6 +15400,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { | |||
| 15374 | .klen = 8 + 32 + 16, | 15400 | .klen = 8 + 32 + 16, |
| 15375 | .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" | 15401 | .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" |
| 15376 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", | 15402 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", |
| 15403 | .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" | ||
| 15404 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", | ||
| 15405 | .alen = 16, | ||
| 15377 | .input = "Single block msg", | 15406 | .input = "Single block msg", |
| 15378 | .ilen = 16, | 15407 | .ilen = 16, |
| 15379 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" | 15408 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" |
| @@ -15401,6 +15430,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { | |||
| 15401 | .klen = 8 + 32 + 16, | 15430 | .klen = 8 + 32 + 16, |
| 15402 | .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" | 15431 | .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" |
| 15403 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", | 15432 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", |
| 15433 | .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" | ||
| 15434 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", | ||
| 15435 | .alen = 16, | ||
| 15404 | .input = "\x00\x01\x02\x03\x04\x05\x06\x07" | 15436 | .input = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 15405 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | 15437 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" |
| 15406 | "\x10\x11\x12\x13\x14\x15\x16\x17" | 15438 | "\x10\x11\x12\x13\x14\x15\x16\x17" |
| @@ -15433,6 +15465,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { | |||
| 15433 | .klen = 8 + 32 + 16, | 15465 | .klen = 8 + 32 + 16, |
| 15434 | .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" | 15466 | .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" |
| 15435 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", | 15467 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", |
| 15468 | .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" | ||
| 15469 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", | ||
| 15470 | .alen = 16, | ||
| 15436 | .input = "This is a 48-byte message (exactly 3 AES blocks)", | 15471 | .input = "This is a 48-byte message (exactly 3 AES blocks)", |
| 15437 | .ilen = 48, | 15472 | .ilen = 48, |
| 15438 | .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" | 15473 | .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" |
| @@ -15464,6 +15499,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { | |||
| 15464 | .klen = 8 + 32 + 16, | 15499 | .klen = 8 + 32 + 16, |
| 15465 | .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" | 15500 | .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" |
| 15466 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", | 15501 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", |
| 15502 | .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" | ||
| 15503 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", | ||
| 15504 | .alen = 16, | ||
| 15467 | .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" | 15505 | .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" |
| 15468 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" | 15506 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" |
| 15469 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" | 15507 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" |
| @@ -15504,8 +15542,10 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { | |||
| 15504 | .klen = 8 + 32 + 16, | 15542 | .klen = 8 + 32 + 16, |
| 15505 | .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63" | 15543 | .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63" |
| 15506 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", | 15544 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", |
| 15507 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 15545 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 15508 | .alen = 8, | 15546 | "\xe9\x6e\x8c\x08\xab\x46\x57\x63" |
| 15547 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", | ||
| 15548 | .alen = 24, | ||
| 15509 | .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" | 15549 | .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" |
| 15510 | "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" | 15550 | "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" |
| 15511 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | 15551 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" |
| @@ -15551,6 +15591,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { | |||
| 15551 | .klen = 8 + 32 + 24, | 15591 | .klen = 8 + 32 + 24, |
| 15552 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | 15592 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 15553 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | 15593 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", |
| 15594 | .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 15595 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 15596 | .alen = 16, | ||
| 15554 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | 15597 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" |
| 15555 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | 15598 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" |
| 15556 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | 15599 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" |
| @@ -15593,6 +15636,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { | |||
| 15593 | .klen = 8 + 32 + 32, | 15636 | .klen = 8 + 32 + 32, |
| 15594 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | 15637 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 15595 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | 15638 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", |
| 15639 | .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 15640 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 15641 | .alen = 16, | ||
| 15596 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | 15642 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" |
| 15597 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | 15643 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" |
| 15598 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | 15644 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" |
| @@ -15641,6 +15687,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { | |||
| 15641 | .klen = 8 + 64 + 16, | 15687 | .klen = 8 + 64 + 16, |
| 15642 | .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" | 15688 | .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" |
| 15643 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", | 15689 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", |
| 15690 | .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" | ||
| 15691 | "\xb4\x22\xda\x80\x2c\x9f\xac\x41", | ||
| 15692 | .alen = 16, | ||
| 15644 | .input = "Single block msg", | 15693 | .input = "Single block msg", |
| 15645 | .ilen = 16, | 15694 | .ilen = 16, |
| 15646 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" | 15695 | .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" |
| @@ -15676,6 +15725,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { | |||
| 15676 | .klen = 8 + 64 + 16, | 15725 | .klen = 8 + 64 + 16, |
| 15677 | .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" | 15726 | .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" |
| 15678 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", | 15727 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", |
| 15728 | .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" | ||
| 15729 | "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", | ||
| 15730 | .alen = 16, | ||
| 15679 | .input = "\x00\x01\x02\x03\x04\x05\x06\x07" | 15731 | .input = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 15680 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | 15732 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" |
| 15681 | "\x10\x11\x12\x13\x14\x15\x16\x17" | 15733 | "\x10\x11\x12\x13\x14\x15\x16\x17" |
| @@ -15716,6 +15768,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { | |||
| 15716 | .klen = 8 + 64 + 16, | 15768 | .klen = 8 + 64 + 16, |
| 15717 | .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" | 15769 | .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" |
| 15718 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", | 15770 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", |
| 15771 | .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" | ||
| 15772 | "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", | ||
| 15773 | .alen = 16, | ||
| 15719 | .input = "This is a 48-byte message (exactly 3 AES blocks)", | 15774 | .input = "This is a 48-byte message (exactly 3 AES blocks)", |
| 15720 | .ilen = 48, | 15775 | .ilen = 48, |
| 15721 | .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" | 15776 | .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" |
| @@ -15755,6 +15810,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { | |||
| 15755 | .klen = 8 + 64 + 16, | 15810 | .klen = 8 + 64 + 16, |
| 15756 | .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" | 15811 | .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" |
| 15757 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", | 15812 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", |
| 15813 | .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" | ||
| 15814 | "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", | ||
| 15815 | .alen = 16, | ||
| 15758 | .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" | 15816 | .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" |
| 15759 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" | 15817 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" |
| 15760 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" | 15818 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" |
| @@ -15803,8 +15861,10 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { | |||
| 15803 | .klen = 8 + 64 + 16, | 15861 | .klen = 8 + 64 + 16, |
| 15804 | .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63" | 15862 | .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63" |
| 15805 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", | 15863 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", |
| 15806 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 15864 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 15807 | .alen = 8, | 15865 | "\xe9\x6e\x8c\x08\xab\x46\x57\x63" |
| 15866 | "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", | ||
| 15867 | .alen = 24, | ||
| 15808 | .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" | 15868 | .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" |
| 15809 | "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" | 15869 | "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" |
| 15810 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | 15870 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" |
| @@ -15858,6 +15918,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { | |||
| 15858 | .klen = 8 + 64 + 24, | 15918 | .klen = 8 + 64 + 24, |
| 15859 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | 15919 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 15860 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | 15920 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", |
| 15921 | .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 15922 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 15923 | .alen = 16, | ||
| 15861 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | 15924 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" |
| 15862 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | 15925 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" |
| 15863 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | 15926 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" |
| @@ -15908,6 +15971,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { | |||
| 15908 | .klen = 8 + 64 + 32, | 15971 | .klen = 8 + 64 + 32, |
| 15909 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" | 15972 | .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 15910 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | 15973 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", |
| 15974 | .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 15975 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
| 15976 | .alen = 16, | ||
| 15911 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" | 15977 | .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" |
| 15912 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" | 15978 | "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" |
| 15913 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" | 15979 | "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" |
| @@ -15955,8 +16021,9 @@ static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { | |||
| 15955 | "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", | 16021 | "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", |
| 15956 | .klen = 8 + 20 + 8, | 16022 | .klen = 8 + 20 + 8, |
| 15957 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", | 16023 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 15958 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 16024 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 15959 | .alen = 8, | 16025 | "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16026 | .alen = 16, | ||
| 15960 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" | 16027 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" |
| 15961 | "\x53\x20\x63\x65\x65\x72\x73\x74" | 16028 | "\x53\x20\x63\x65\x65\x72\x73\x74" |
| 15962 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" | 16029 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" |
| @@ -16015,8 +16082,9 @@ static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { | |||
| 16015 | "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", | 16082 | "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", |
| 16016 | .klen = 8 + 24 + 8, | 16083 | .klen = 8 + 24 + 8, |
| 16017 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", | 16084 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16018 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 16085 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 16019 | .alen = 8, | 16086 | "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16087 | .alen = 16, | ||
| 16020 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" | 16088 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" |
| 16021 | "\x53\x20\x63\x65\x65\x72\x73\x74" | 16089 | "\x53\x20\x63\x65\x65\x72\x73\x74" |
| 16022 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" | 16090 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" |
| @@ -16076,8 +16144,9 @@ static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { | |||
| 16076 | "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", | 16144 | "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", |
| 16077 | .klen = 8 + 32 + 8, | 16145 | .klen = 8 + 32 + 8, |
| 16078 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", | 16146 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16079 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 16147 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 16080 | .alen = 8, | 16148 | "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16149 | .alen = 16, | ||
| 16081 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" | 16150 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" |
| 16082 | "\x53\x20\x63\x65\x65\x72\x73\x74" | 16151 | "\x53\x20\x63\x65\x65\x72\x73\x74" |
| 16083 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" | 16152 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" |
| @@ -16140,8 +16209,9 @@ static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { | |||
| 16140 | "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", | 16209 | "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", |
| 16141 | .klen = 8 + 48 + 8, | 16210 | .klen = 8 + 48 + 8, |
| 16142 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", | 16211 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16143 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 16212 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 16144 | .alen = 8, | 16213 | "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16214 | .alen = 16, | ||
| 16145 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" | 16215 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" |
| 16146 | "\x53\x20\x63\x65\x65\x72\x73\x74" | 16216 | "\x53\x20\x63\x65\x65\x72\x73\x74" |
| 16147 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" | 16217 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" |
| @@ -16208,8 +16278,9 @@ static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { | |||
| 16208 | "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", | 16278 | "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24", |
| 16209 | .klen = 8 + 64 + 8, | 16279 | .klen = 8 + 64 + 8, |
| 16210 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", | 16280 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16211 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 16281 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 16212 | .alen = 8, | 16282 | "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16283 | .alen = 16, | ||
| 16213 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" | 16284 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" |
| 16214 | "\x53\x20\x63\x65\x65\x72\x73\x74" | 16285 | "\x53\x20\x63\x65\x65\x72\x73\x74" |
| 16215 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" | 16286 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" |
| @@ -16275,8 +16346,9 @@ static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { | |||
| 16275 | "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", | 16346 | "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", |
| 16276 | .klen = 8 + 20 + 24, | 16347 | .klen = 8 + 20 + 24, |
| 16277 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", | 16348 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16278 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 16349 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 16279 | .alen = 8, | 16350 | "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16351 | .alen = 16, | ||
| 16280 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" | 16352 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" |
| 16281 | "\x53\x20\x63\x65\x65\x72\x73\x74" | 16353 | "\x53\x20\x63\x65\x65\x72\x73\x74" |
| 16282 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" | 16354 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" |
| @@ -16337,8 +16409,9 @@ static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { | |||
| 16337 | "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", | 16409 | "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", |
| 16338 | .klen = 8 + 24 + 24, | 16410 | .klen = 8 + 24 + 24, |
| 16339 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", | 16411 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16340 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 16412 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 16341 | .alen = 8, | 16413 | "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16414 | .alen = 16, | ||
| 16342 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" | 16415 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" |
| 16343 | "\x53\x20\x63\x65\x65\x72\x73\x74" | 16416 | "\x53\x20\x63\x65\x65\x72\x73\x74" |
| 16344 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" | 16417 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" |
| @@ -16400,8 +16473,9 @@ static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { | |||
| 16400 | "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", | 16473 | "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", |
| 16401 | .klen = 8 + 32 + 24, | 16474 | .klen = 8 + 32 + 24, |
| 16402 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", | 16475 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16403 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 16476 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 16404 | .alen = 8, | 16477 | "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16478 | .alen = 16, | ||
| 16405 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" | 16479 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" |
| 16406 | "\x53\x20\x63\x65\x65\x72\x73\x74" | 16480 | "\x53\x20\x63\x65\x65\x72\x73\x74" |
| 16407 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" | 16481 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" |
| @@ -16466,8 +16540,9 @@ static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { | |||
| 16466 | "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", | 16540 | "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", |
| 16467 | .klen = 8 + 48 + 24, | 16541 | .klen = 8 + 48 + 24, |
| 16468 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", | 16542 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16469 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 16543 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 16470 | .alen = 8, | 16544 | "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16545 | .alen = 16, | ||
| 16471 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" | 16546 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" |
| 16472 | "\x53\x20\x63\x65\x65\x72\x73\x74" | 16547 | "\x53\x20\x63\x65\x65\x72\x73\x74" |
| 16473 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" | 16548 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" |
| @@ -16536,8 +16611,9 @@ static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { | |||
| 16536 | "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", | 16611 | "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", |
| 16537 | .klen = 8 + 64 + 24, | 16612 | .klen = 8 + 64 + 24, |
| 16538 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", | 16613 | .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16539 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01", | 16614 | .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" |
| 16540 | .alen = 8, | 16615 | "\x7D\x33\x88\x93\x0F\x93\xB2\x42", |
| 16616 | .alen = 16, | ||
| 16541 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" | 16617 | .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" |
| 16542 | "\x53\x20\x63\x65\x65\x72\x73\x74" | 16618 | "\x53\x20\x63\x65\x65\x72\x73\x74" |
| 16543 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" | 16619 | "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" |
| @@ -20129,149 +20205,150 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { | |||
| 20129 | }; | 20205 | }; |
| 20130 | 20206 | ||
| 20131 | static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | 20207 | static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { |
| 20132 | { /* Generated using Crypto++ */ | 20208 | { /* Generated using Crypto++ */ |
| 20133 | .key = zeroed_string, | 20209 | .key = zeroed_string, |
| 20134 | .klen = 20, | 20210 | .klen = 20, |
| 20135 | .iv = zeroed_string, | 20211 | .iv = zeroed_string, |
| 20136 | .input = zeroed_string, | 20212 | .input = zeroed_string, |
| 20137 | .ilen = 16, | 20213 | .ilen = 16, |
| 20138 | .assoc = zeroed_string, | 20214 | .assoc = zeroed_string, |
| 20139 | .alen = 8, | 20215 | .alen = 16, |
| 20140 | .result = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" | 20216 | .result = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" |
| 20141 | "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" | 20217 | "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" |
| 20142 | "\x97\xFE\x4C\x23\x37\x42\x01\xE0" | 20218 | "\x97\xFE\x4C\x23\x37\x42\x01\xE0" |
| 20143 | "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", | 20219 | "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", |
| 20144 | .rlen = 32, | 20220 | .rlen = 32, |
| 20145 | },{ | 20221 | },{ |
| 20146 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | 20222 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" |
| 20147 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | 20223 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" |
| 20148 | "\x00\x00\x00\x00", | 20224 | "\x00\x00\x00\x00", |
| 20149 | .klen = 20, | 20225 | .klen = 20, |
| 20150 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | 20226 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20151 | "\x00\x00\x00\x00", | 20227 | .input = zeroed_string, |
| 20152 | .input = zeroed_string, | 20228 | .ilen = 16, |
| 20153 | .ilen = 16, | 20229 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" |
| 20154 | .assoc = zeroed_string, | 20230 | "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20155 | .alen = 8, | 20231 | .alen = 16, |
| 20156 | .result = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" | 20232 | .result = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" |
| 20157 | "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" | 20233 | "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" |
| 20158 | "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" | 20234 | "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" |
| 20159 | "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", | 20235 | "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", |
| 20160 | .rlen = 32, | 20236 | .rlen = 32, |
| 20161 | 20237 | ||
| 20162 | }, { | 20238 | }, { |
| 20163 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | 20239 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" |
| 20164 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | 20240 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" |
| 20165 | "\x00\x00\x00\x00", | 20241 | "\x00\x00\x00\x00", |
| 20166 | .klen = 20, | 20242 | .klen = 20, |
| 20167 | .iv = zeroed_string, | 20243 | .iv = zeroed_string, |
| 20168 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | 20244 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20169 | "\x01\x01\x01\x01\x01\x01\x01\x01", | 20245 | "\x01\x01\x01\x01\x01\x01\x01\x01", |
| 20170 | .ilen = 16, | 20246 | .ilen = 16, |
| 20171 | .assoc = zeroed_string, | 20247 | .assoc = zeroed_string, |
| 20172 | .alen = 8, | 20248 | .alen = 16, |
| 20173 | .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" | 20249 | .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" |
| 20174 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" | 20250 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" |
| 20175 | "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" | 20251 | "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" |
| 20176 | "\xB1\x68\xFD\x14\x52\x64\x61\xB2", | 20252 | "\xB1\x68\xFD\x14\x52\x64\x61\xB2", |
| 20177 | .rlen = 32, | 20253 | .rlen = 32, |
| 20178 | }, { | 20254 | }, { |
| 20179 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | 20255 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" |
| 20180 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | 20256 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" |
| 20181 | "\x00\x00\x00\x00", | 20257 | "\x00\x00\x00\x00", |
| 20182 | .klen = 20, | 20258 | .klen = 20, |
| 20183 | .iv = zeroed_string, | 20259 | .iv = zeroed_string, |
| 20184 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | 20260 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20185 | "\x01\x01\x01\x01\x01\x01\x01\x01", | 20261 | "\x01\x01\x01\x01\x01\x01\x01\x01", |
| 20186 | .ilen = 16, | 20262 | .ilen = 16, |
| 20187 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | 20263 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20188 | .alen = 8, | 20264 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
| 20265 | .alen = 16, | ||
| 20189 | .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" | 20266 | .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" |
| 20190 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" | 20267 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" |
| 20191 | "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" | 20268 | "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" |
| 20192 | "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", | 20269 | "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", |
| 20193 | .rlen = 32, | 20270 | .rlen = 32, |
| 20194 | }, { | 20271 | }, { |
| 20195 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | 20272 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" |
| 20196 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | 20273 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" |
| 20197 | "\x00\x00\x00\x00", | 20274 | "\x00\x00\x00\x00", |
| 20198 | .klen = 20, | 20275 | .klen = 20, |
| 20199 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | 20276 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20200 | "\x00\x00\x00\x00", | 20277 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20201 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | 20278 | "\x01\x01\x01\x01\x01\x01\x01\x01", |
| 20202 | "\x01\x01\x01\x01\x01\x01\x01\x01", | 20279 | .ilen = 16, |
| 20203 | .ilen = 16, | 20280 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20204 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | 20281 | "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20205 | .alen = 8, | 20282 | .alen = 16, |
| 20206 | .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" | 20283 | .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" |
| 20207 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" | 20284 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" |
| 20208 | "\x64\x50\xF9\x32\x13\xFB\x74\x61" | 20285 | "\x64\x50\xF9\x32\x13\xFB\x74\x61" |
| 20209 | "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", | 20286 | "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", |
| 20210 | .rlen = 32, | 20287 | .rlen = 32, |
| 20211 | }, { | 20288 | }, { |
| 20212 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | 20289 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" |
| 20213 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | 20290 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" |
| 20214 | "\x00\x00\x00\x00", | 20291 | "\x00\x00\x00\x00", |
| 20215 | .klen = 20, | 20292 | .klen = 20, |
| 20216 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | 20293 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20217 | "\x00\x00\x00\x00", | 20294 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20218 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | 20295 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20219 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20296 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20220 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20297 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20221 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20298 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20222 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20299 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20223 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20300 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20224 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20301 | "\x01\x01\x01\x01\x01\x01\x01\x01", |
| 20225 | "\x01\x01\x01\x01\x01\x01\x01\x01", | 20302 | .ilen = 64, |
| 20226 | .ilen = 64, | 20303 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20227 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | 20304 | "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20228 | .alen = 8, | 20305 | .alen = 16, |
| 20229 | .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" | 20306 | .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" |
| 20230 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" | 20307 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" |
| 20231 | "\x98\x14\xA1\x42\x37\x80\xFD\x90" | 20308 | "\x98\x14\xA1\x42\x37\x80\xFD\x90" |
| 20232 | "\x68\x12\x01\xA8\x91\x89\xB9\x83" | 20309 | "\x68\x12\x01\xA8\x91\x89\xB9\x83" |
| 20233 | "\x5B\x11\x77\x12\x9B\xFF\x24\x89" | 20310 | "\x5B\x11\x77\x12\x9B\xFF\x24\x89" |
| 20234 | "\x94\x5F\x18\x12\xBA\x27\x09\x39" | 20311 | "\x94\x5F\x18\x12\xBA\x27\x09\x39" |
| 20235 | "\x99\x96\x76\x42\x15\x1C\xCD\xCB" | 20312 | "\x99\x96\x76\x42\x15\x1C\xCD\xCB" |
| 20236 | "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" | 20313 | "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" |
| 20237 | "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" | 20314 | "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" |
| 20238 | "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", | 20315 | "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", |
| 20239 | .rlen = 80, | 20316 | .rlen = 80, |
| 20240 | }, { | 20317 | }, { |
| 20241 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | 20318 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 20242 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | 20319 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" |
| 20243 | "\x00\x00\x00\x00", | 20320 | "\x00\x00\x00\x00", |
| 20244 | .klen = 20, | 20321 | .klen = 20, |
| 20245 | .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef" | 20322 | .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", |
| 20246 | "\x00\x00\x00\x00", | 20323 | .input = "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20247 | .input = "\xff\xff\xff\xff\xff\xff\xff\xff" | 20324 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20248 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20325 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20249 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20326 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20250 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20327 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20251 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20328 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20252 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20329 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20253 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20330 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20254 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20331 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20255 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20332 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20256 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20333 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20257 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20334 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20258 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20335 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20259 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20336 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20260 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20337 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20261 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20338 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20262 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20339 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20263 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20340 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20264 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20341 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20265 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20342 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20266 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20343 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20267 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20344 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20268 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20345 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20269 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20346 | "\xff\xff\xff\xff\xff\xff\xff\xff", |
| 20270 | "\xff\xff\xff\xff\xff\xff\xff\xff", | 20347 | .ilen = 192, |
| 20271 | .ilen = 192, | 20348 | .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" |
| 20272 | .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" | 20349 | "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" |
| 20273 | "\xaa\xaa\xaa\xaa", | 20350 | "\x89\xab\xcd\xef", |
| 20274 | .alen = 12, | 20351 | .alen = 20, |
| 20275 | .result = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" | 20352 | .result = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" |
| 20276 | "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" | 20353 | "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" |
| 20277 | "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" | 20354 | "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" |
| @@ -20316,8 +20393,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20316 | "\x00\x21\x00\x01\x01\x02\x02\x01", | 20393 | "\x00\x21\x00\x01\x01\x02\x02\x01", |
| 20317 | .ilen = 72, | 20394 | .ilen = 72, |
| 20318 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | 20395 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" |
| 20319 | "\x00\x00\x00\x00", | 20396 | "\x00\x00\x00\x00\x49\x56\xED\x7E" |
| 20320 | .alen = 12, | 20397 | "\x3B\x24\x4C\xFE", |
| 20398 | .alen = 20, | ||
| 20321 | .result = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" | 20399 | .result = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" |
| 20322 | "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76" | 20400 | "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76" |
| 20323 | "\x8D\x1B\x98\x73\x66\x96\xA6\xFD" | 20401 | "\x8D\x1B\x98\x73\x66\x96\xA6\xFD" |
| @@ -20345,8 +20423,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20345 | "\x65\x72\x63\x69\x74\x79\x02\x64" | 20423 | "\x65\x72\x63\x69\x74\x79\x02\x64" |
| 20346 | "\x6B\x00\x00\x01\x00\x01\x00\x01", | 20424 | "\x6B\x00\x00\x01\x00\x01\x00\x01", |
| 20347 | .ilen = 64, | 20425 | .ilen = 64, |
| 20348 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A", | 20426 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" |
| 20349 | .alen = 8, | 20427 | "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", |
| 20428 | .alen = 16, | ||
| 20350 | .result = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" | 20429 | .result = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" |
| 20351 | "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04" | 20430 | "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04" |
| 20352 | "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F" | 20431 | "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F" |
| @@ -20374,8 +20453,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20374 | "\x02\x04\x05\xB4\x01\x01\x04\x02" | 20453 | "\x02\x04\x05\xB4\x01\x01\x04\x02" |
| 20375 | "\x01\x02\x02\x01", | 20454 | "\x01\x02\x02\x01", |
| 20376 | .ilen = 52, | 20455 | .ilen = 52, |
| 20377 | .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02", | 20456 | .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" |
| 20378 | .alen = 8, | 20457 | "\x01\x02\x03\x04\x05\x06\x07\x08", |
| 20458 | .alen = 16, | ||
| 20379 | .result = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" | 20459 | .result = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" |
| 20380 | "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D" | 20460 | "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D" |
| 20381 | "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF" | 20461 | "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF" |
| @@ -20401,8 +20481,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20401 | "\x75\x76\x77\x61\x62\x63\x64\x65" | 20481 | "\x75\x76\x77\x61\x62\x63\x64\x65" |
| 20402 | "\x66\x67\x68\x69\x01\x02\x02\x01", | 20482 | "\x66\x67\x68\x69\x01\x02\x02\x01", |
| 20403 | .ilen = 64, | 20483 | .ilen = 64, |
| 20404 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01", | 20484 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" |
| 20405 | .alen = 8, | 20485 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
| 20486 | .alen = 16, | ||
| 20406 | .result = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" | 20487 | .result = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" |
| 20407 | "\x73\x29\x09\xC3\x31\xD5\x6D\x60" | 20488 | "\x73\x29\x09\xC3\x31\xD5\x6D\x60" |
| 20408 | "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F" | 20489 | "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F" |
| @@ -20430,8 +20511,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20430 | "\x66\x67\x68\x69\x01\x02\x02\x01", | 20511 | "\x66\x67\x68\x69\x01\x02\x02\x01", |
| 20431 | .ilen = 64, | 20512 | .ilen = 64, |
| 20432 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | 20513 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" |
| 20433 | "\x10\x10\x10\x10", | 20514 | "\x10\x10\x10\x10\x4E\x28\x00\x00" |
| 20434 | .alen = 12, | 20515 | "\xA2\xFC\xA1\xA3", |
| 20516 | .alen = 20, | ||
| 20435 | .result = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" | 20517 | .result = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" |
| 20436 | "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0" | 20518 | "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0" |
| 20437 | "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0" | 20519 | "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0" |
| @@ -20455,8 +20537,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20455 | "\x01\x02\x02\x01", | 20537 | "\x01\x02\x02\x01", |
| 20456 | .ilen = 28, | 20538 | .ilen = 28, |
| 20457 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | 20539 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" |
| 20458 | "\x10\x10\x10\x10", | 20540 | "\x10\x10\x10\x10\x4E\x28\x00\x00" |
| 20459 | .alen = 12, | 20541 | "\xA2\xFC\xA1\xA3", |
| 20542 | .alen = 20, | ||
| 20460 | .result = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" | 20543 | .result = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" |
| 20461 | "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E" | 20544 | "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E" |
| 20462 | "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13" | 20545 | "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13" |
| @@ -20477,8 +20560,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20477 | "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" | 20560 | "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" |
| 20478 | "\x50\x10\x16\xD0\x75\x68\x00\x01", | 20561 | "\x50\x10\x16\xD0\x75\x68\x00\x01", |
| 20479 | .ilen = 40, | 20562 | .ilen = 40, |
| 20480 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A", | 20563 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" |
| 20481 | .alen = 8, | 20564 | "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", |
| 20565 | .alen = 16, | ||
| 20482 | .result = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" | 20566 | .result = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" |
| 20483 | "\x0E\x59\x8B\x81\x22\xDE\x02\x42" | 20567 | "\x0E\x59\x8B\x81\x22\xDE\x02\x42" |
| 20484 | "\x09\x38\xB3\xAB\x33\xF8\x28\xE6" | 20568 | "\x09\x38\xB3\xAB\x33\xF8\x28\xE6" |
| @@ -20505,8 +20589,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20505 | "\x23\x01\x01\x01", | 20589 | "\x23\x01\x01\x01", |
| 20506 | .ilen = 76, | 20590 | .ilen = 76, |
| 20507 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | 20591 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" |
| 20508 | "\x00\x00\x00\x01", | 20592 | "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" |
| 20509 | .alen = 12, | 20593 | "\xCE\xFA\xCE\x74", |
| 20594 | .alen = 20, | ||
| 20510 | .result = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" | 20595 | .result = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" |
| 20511 | "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14" | 20596 | "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14" |
| 20512 | "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1" | 20597 | "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1" |
| @@ -20535,8 +20620,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20535 | "\x50\x10\x1F\x64\x6D\x54\x00\x01", | 20620 | "\x50\x10\x1F\x64\x6D\x54\x00\x01", |
| 20536 | .ilen = 40, | 20621 | .ilen = 40, |
| 20537 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | 20622 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" |
| 20538 | "\xDD\x0D\xB9\x9B", | 20623 | "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" |
| 20539 | .alen = 12, | 20624 | "\x69\x76\x65\x63", |
| 20625 | .alen = 20, | ||
| 20540 | .result = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" | 20626 | .result = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" |
| 20541 | "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D" | 20627 | "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D" |
| 20542 | "\x1F\x27\x8F\xDE\x98\xEF\x67\x54" | 20628 | "\x1F\x27\x8F\xDE\x98\xEF\x67\x54" |
| @@ -20563,8 +20649,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20563 | "\x15\x01\x01\x01", | 20649 | "\x15\x01\x01\x01", |
| 20564 | .ilen = 76, | 20650 | .ilen = 76, |
| 20565 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | 20651 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" |
| 20566 | "\x10\x10\x10\x10", | 20652 | "\x10\x10\x10\x10\x4E\x28\x00\x00" |
| 20567 | .alen = 12, | 20653 | "\xA2\xFC\xA1\xA3", |
| 20654 | .alen = 20, | ||
| 20568 | .result = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" | 20655 | .result = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" |
| 20569 | "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8" | 20656 | "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8" |
| 20570 | "\x3D\x77\x84\xB6\x07\x32\x3D\x22" | 20657 | "\x3D\x77\x84\xB6\x07\x32\x3D\x22" |
| @@ -20597,8 +20684,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20597 | "\x72\x72\x6F\x77\x01\x02\x02\x01", | 20684 | "\x72\x72\x6F\x77\x01\x02\x02\x01", |
| 20598 | .ilen = 72, | 20685 | .ilen = 72, |
| 20599 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | 20686 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" |
| 20600 | "\xDD\x0D\xB9\x9B", | 20687 | "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" |
| 20601 | .alen = 12, | 20688 | "\x69\x76\x65\x63", |
| 20689 | .alen = 20, | ||
| 20602 | .result = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" | 20690 | .result = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" |
| 20603 | "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E" | 20691 | "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E" |
| 20604 | "\x7B\x43\xF8\x26\xFB\x56\x83\x12" | 20692 | "\x7B\x43\xF8\x26\xFB\x56\x83\x12" |
| @@ -20619,8 +20707,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20619 | .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", | 20707 | .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", |
| 20620 | .input = "\x01\x02\x02\x01", | 20708 | .input = "\x01\x02\x02\x01", |
| 20621 | .ilen = 4, | 20709 | .ilen = 4, |
| 20622 | .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF", | 20710 | .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" |
| 20623 | .alen = 8, | 20711 | "\x43\x45\x7E\x91\x82\x44\x3B\xC6", |
| 20712 | .alen = 16, | ||
| 20624 | .result = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" | 20713 | .result = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" |
| 20625 | "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45" | 20714 | "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45" |
| 20626 | "\x04\xBE\xF2\x70", | 20715 | "\x04\xBE\xF2\x70", |
| @@ -20636,8 +20725,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20636 | "\x62\x65\x00\x01", | 20725 | "\x62\x65\x00\x01", |
| 20637 | .ilen = 20, | 20726 | .ilen = 20, |
| 20638 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | 20727 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" |
| 20639 | "\x00\x00\x00\x01", | 20728 | "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" |
| 20640 | .alen = 12, | 20729 | "\xCE\xFA\xCE\x74", |
| 20730 | .alen = 20, | ||
| 20641 | .result = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" | 20731 | .result = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" |
| 20642 | "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05" | 20732 | "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05" |
| 20643 | "\x43\x33\x21\x64\x41\x25\x03\x52" | 20733 | "\x43\x33\x21\x64\x41\x25\x03\x52" |
| @@ -20661,8 +20751,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20661 | "\x01\x02\x02\x01", | 20751 | "\x01\x02\x02\x01", |
| 20662 | .ilen = 52, | 20752 | .ilen = 52, |
| 20663 | .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" | 20753 | .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" |
| 20664 | "\xFF\xFF\xFF\xFF", | 20754 | "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" |
| 20665 | .alen = 12, | 20755 | "\x67\x65\x74\x6D", |
| 20756 | .alen = 20, | ||
| 20666 | .result = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" | 20757 | .result = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" |
| 20667 | "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D" | 20758 | "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D" |
| 20668 | "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6" | 20759 | "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6" |
| @@ -20688,8 +20779,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20688 | "\x01\x02\x02\x01", | 20779 | "\x01\x02\x02\x01", |
| 20689 | .ilen = 52, | 20780 | .ilen = 52, |
| 20690 | .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" | 20781 | .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" |
| 20691 | "\x10\x10\x10\x10", | 20782 | "\x10\x10\x10\x10\x4E\x28\x00\x00" |
| 20692 | .alen = 12, | 20783 | "\xA2\xFC\xA1\xA3", |
| 20784 | .alen = 20, | ||
| 20693 | .result = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" | 20785 | .result = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" |
| 20694 | "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD" | 20786 | "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD" |
| 20695 | "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21" | 20787 | "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21" |
| @@ -20712,8 +20804,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20712 | "\x71\x72\x73\x74\x01\x02\x02\x01", | 20804 | "\x71\x72\x73\x74\x01\x02\x02\x01", |
| 20713 | .ilen = 32, | 20805 | .ilen = 32, |
| 20714 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | 20806 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" |
| 20715 | "\x00\x00\x00\x07", | 20807 | "\x00\x00\x00\x07\x48\x55\xEC\x7D" |
| 20716 | .alen = 12, | 20808 | "\x3A\x23\x4B\xFD", |
| 20809 | .alen = 20, | ||
| 20717 | .result = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" | 20810 | .result = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" |
| 20718 | "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF" | 20811 | "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF" |
| 20719 | "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6" | 20812 | "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6" |
| @@ -20725,122 +20818,122 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { | |||
| 20725 | }; | 20818 | }; |
| 20726 | 20819 | ||
| 20727 | static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | 20820 | static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { |
| 20728 | { /* Generated using Crypto++ */ | 20821 | { /* Generated using Crypto++ */ |
| 20729 | .key = zeroed_string, | 20822 | .key = zeroed_string, |
| 20730 | .klen = 20, | 20823 | .klen = 20, |
| 20731 | .iv = zeroed_string, | 20824 | .iv = zeroed_string, |
| 20732 | .input = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" | 20825 | .input = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" |
| 20733 | "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" | 20826 | "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" |
| 20734 | "\x97\xFE\x4C\x23\x37\x42\x01\xE0" | 20827 | "\x97\xFE\x4C\x23\x37\x42\x01\xE0" |
| 20735 | "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", | 20828 | "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", |
| 20736 | .ilen = 32, | 20829 | .ilen = 32, |
| 20737 | .assoc = zeroed_string, | 20830 | .assoc = zeroed_string, |
| 20738 | .alen = 8, | 20831 | .alen = 16, |
| 20739 | .result = zeroed_string, | 20832 | .result = zeroed_string, |
| 20740 | .rlen = 16, | 20833 | .rlen = 16, |
| 20741 | 20834 | ||
| 20742 | },{ | 20835 | },{ |
| 20743 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | 20836 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" |
| 20744 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | 20837 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" |
| 20745 | "\x00\x00\x00\x00", | 20838 | "\x00\x00\x00\x00", |
| 20746 | .klen = 20, | 20839 | .klen = 20, |
| 20747 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | 20840 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20748 | "\x00\x00\x00\x00", | ||
| 20749 | .input = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" | 20841 | .input = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" |
| 20750 | "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" | 20842 | "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" |
| 20751 | "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" | 20843 | "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" |
| 20752 | "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", | 20844 | "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", |
| 20753 | .ilen = 32, | 20845 | .ilen = 32, |
| 20754 | .assoc = zeroed_string, | 20846 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" |
| 20755 | .alen = 8, | 20847 | "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20756 | .result = zeroed_string, | 20848 | .alen = 16, |
| 20757 | .rlen = 16, | 20849 | .result = zeroed_string, |
| 20758 | }, { | 20850 | .rlen = 16, |
| 20851 | }, { | ||
| 20759 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | 20852 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" |
| 20760 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | 20853 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" |
| 20761 | "\x00\x00\x00\x00", | 20854 | "\x00\x00\x00\x00", |
| 20762 | .klen = 20, | 20855 | .klen = 20, |
| 20763 | .iv = zeroed_string, | 20856 | .iv = zeroed_string, |
| 20764 | .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" | 20857 | .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" |
| 20765 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" | 20858 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" |
| 20766 | "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" | 20859 | "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" |
| 20767 | "\xB1\x68\xFD\x14\x52\x64\x61\xB2", | 20860 | "\xB1\x68\xFD\x14\x52\x64\x61\xB2", |
| 20768 | .ilen = 32, | 20861 | .ilen = 32, |
| 20769 | .assoc = zeroed_string, | 20862 | .assoc = zeroed_string, |
| 20770 | .alen = 8, | 20863 | .alen = 16, |
| 20771 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | 20864 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20772 | "\x01\x01\x01\x01\x01\x01\x01\x01", | 20865 | "\x01\x01\x01\x01\x01\x01\x01\x01", |
| 20773 | .rlen = 16, | 20866 | .rlen = 16, |
| 20774 | }, { | 20867 | }, { |
| 20775 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | 20868 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" |
| 20776 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | 20869 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" |
| 20777 | "\x00\x00\x00\x00", | 20870 | "\x00\x00\x00\x00", |
| 20778 | .klen = 20, | 20871 | .klen = 20, |
| 20779 | .iv = zeroed_string, | 20872 | .iv = zeroed_string, |
| 20780 | .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" | 20873 | .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" |
| 20781 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" | 20874 | "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" |
| 20782 | "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" | 20875 | "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" |
| 20783 | "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", | 20876 | "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", |
| 20784 | .ilen = 32, | 20877 | .ilen = 32, |
| 20785 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | 20878 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20786 | .alen = 8, | 20879 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
| 20787 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | 20880 | .alen = 16, |
| 20788 | "\x01\x01\x01\x01\x01\x01\x01\x01", | 20881 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20789 | .rlen = 16, | 20882 | "\x01\x01\x01\x01\x01\x01\x01\x01", |
| 20883 | .rlen = 16, | ||
| 20790 | 20884 | ||
| 20791 | }, { | 20885 | }, { |
| 20792 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | 20886 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" |
| 20793 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | 20887 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" |
| 20794 | "\x00\x00\x00\x00", | 20888 | "\x00\x00\x00\x00", |
| 20795 | .klen = 20, | 20889 | .klen = 20, |
| 20796 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | 20890 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20797 | "\x00\x00\x00\x00", | ||
| 20798 | .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" | 20891 | .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" |
| 20799 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" | 20892 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" |
| 20800 | "\x64\x50\xF9\x32\x13\xFB\x74\x61" | 20893 | "\x64\x50\xF9\x32\x13\xFB\x74\x61" |
| 20801 | "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", | 20894 | "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", |
| 20802 | .ilen = 32, | 20895 | .ilen = 32, |
| 20803 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | 20896 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20804 | .alen = 8, | 20897 | "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20805 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | 20898 | .alen = 16, |
| 20806 | "\x01\x01\x01\x01\x01\x01\x01\x01", | 20899 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20807 | .rlen = 16, | 20900 | "\x01\x01\x01\x01\x01\x01\x01\x01", |
| 20808 | }, { | 20901 | .rlen = 16, |
| 20902 | }, { | ||
| 20809 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | 20903 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" |
| 20810 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | 20904 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" |
| 20811 | "\x00\x00\x00\x00", | 20905 | "\x00\x00\x00\x00", |
| 20812 | .klen = 20, | 20906 | .klen = 20, |
| 20813 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01" | 20907 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20814 | "\x00\x00\x00\x00", | ||
| 20815 | .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" | 20908 | .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" |
| 20816 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" | 20909 | "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" |
| 20817 | "\x98\x14\xA1\x42\x37\x80\xFD\x90" | 20910 | "\x98\x14\xA1\x42\x37\x80\xFD\x90" |
| 20818 | "\x68\x12\x01\xA8\x91\x89\xB9\x83" | 20911 | "\x68\x12\x01\xA8\x91\x89\xB9\x83" |
| 20819 | "\x5B\x11\x77\x12\x9B\xFF\x24\x89" | 20912 | "\x5B\x11\x77\x12\x9B\xFF\x24\x89" |
| 20820 | "\x94\x5F\x18\x12\xBA\x27\x09\x39" | 20913 | "\x94\x5F\x18\x12\xBA\x27\x09\x39" |
| 20821 | "\x99\x96\x76\x42\x15\x1C\xCD\xCB" | 20914 | "\x99\x96\x76\x42\x15\x1C\xCD\xCB" |
| 20822 | "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" | 20915 | "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" |
| 20823 | "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" | 20916 | "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" |
| 20824 | "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", | 20917 | "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", |
| 20825 | .ilen = 80, | 20918 | .ilen = 80, |
| 20826 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01", | 20919 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20827 | .alen = 8, | 20920 | "\x00\x00\x00\x00\x00\x00\x00\x01", |
| 20828 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | 20921 | .alen = 16, |
| 20829 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20922 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20830 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20923 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20831 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20924 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20832 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20925 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20833 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20926 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20834 | "\x01\x01\x01\x01\x01\x01\x01\x01" | 20927 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20835 | "\x01\x01\x01\x01\x01\x01\x01\x01", | 20928 | "\x01\x01\x01\x01\x01\x01\x01\x01" |
| 20836 | .rlen = 64, | 20929 | "\x01\x01\x01\x01\x01\x01\x01\x01", |
| 20837 | }, { | 20930 | .rlen = 64, |
| 20931 | }, { | ||
| 20838 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | 20932 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" |
| 20839 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | 20933 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" |
| 20840 | "\x00\x00\x00\x00", | 20934 | "\x00\x00\x00\x00", |
| 20841 | .klen = 20, | 20935 | .klen = 20, |
| 20842 | .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef" | 20936 | .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", |
| 20843 | "\x00\x00\x00\x00", | ||
| 20844 | .input = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" | 20937 | .input = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" |
| 20845 | "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" | 20938 | "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" |
| 20846 | "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" | 20939 | "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" |
| @@ -20868,34 +20961,35 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 20868 | "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" | 20961 | "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" |
| 20869 | "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", | 20962 | "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", |
| 20870 | .ilen = 208, | 20963 | .ilen = 208, |
| 20871 | .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" | 20964 | .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" |
| 20872 | "\xaa\xaa\xaa\xaa", | 20965 | "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" |
| 20873 | .alen = 12, | 20966 | "\x89\xab\xcd\xef", |
| 20874 | .result = "\xff\xff\xff\xff\xff\xff\xff\xff" | 20967 | .alen = 20, |
| 20875 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20968 | .result = "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20876 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20969 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20877 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20970 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20878 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20971 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20879 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20972 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20880 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20973 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20881 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20974 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20882 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20975 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20883 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20976 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20884 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20977 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20885 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20978 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20886 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20979 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20887 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20980 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20888 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20981 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20889 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20982 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20890 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20983 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20891 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20984 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20892 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20985 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20893 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20986 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20894 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20987 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20895 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20988 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20896 | "\xff\xff\xff\xff\xff\xff\xff\xff" | 20989 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20897 | "\xff\xff\xff\xff\xff\xff\xff\xff", | 20990 | "\xff\xff\xff\xff\xff\xff\xff\xff" |
| 20898 | .rlen = 192, | 20991 | "\xff\xff\xff\xff\xff\xff\xff\xff", |
| 20992 | .rlen = 192, | ||
| 20899 | }, { | 20993 | }, { |
| 20900 | .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" | 20994 | .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" |
| 20901 | "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" | 20995 | "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" |
| @@ -20913,8 +21007,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 20913 | "\x00\x21\x00\x01\x01\x02\x02\x01", | 21007 | "\x00\x21\x00\x01\x01\x02\x02\x01", |
| 20914 | .rlen = 72, | 21008 | .rlen = 72, |
| 20915 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | 21009 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" |
| 20916 | "\x00\x00\x00\x00", | 21010 | "\x00\x00\x00\x00\x49\x56\xED\x7E" |
| 20917 | .alen = 12, | 21011 | "\x3B\x24\x4C\xFE", |
| 21012 | .alen = 20, | ||
| 20918 | .input = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" | 21013 | .input = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" |
| 20919 | "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76" | 21014 | "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76" |
| 20920 | "\x8D\x1B\x98\x73\x66\x96\xA6\xFD" | 21015 | "\x8D\x1B\x98\x73\x66\x96\xA6\xFD" |
| @@ -20942,8 +21037,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 20942 | "\x65\x72\x63\x69\x74\x79\x02\x64" | 21037 | "\x65\x72\x63\x69\x74\x79\x02\x64" |
| 20943 | "\x6B\x00\x00\x01\x00\x01\x00\x01", | 21038 | "\x6B\x00\x00\x01\x00\x01\x00\x01", |
| 20944 | .rlen = 64, | 21039 | .rlen = 64, |
| 20945 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A", | 21040 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" |
| 20946 | .alen = 8, | 21041 | "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", |
| 21042 | .alen = 16, | ||
| 20947 | .input = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" | 21043 | .input = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" |
| 20948 | "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04" | 21044 | "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04" |
| 20949 | "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F" | 21045 | "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F" |
| @@ -20971,8 +21067,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 20971 | "\x02\x04\x05\xB4\x01\x01\x04\x02" | 21067 | "\x02\x04\x05\xB4\x01\x01\x04\x02" |
| 20972 | "\x01\x02\x02\x01", | 21068 | "\x01\x02\x02\x01", |
| 20973 | .rlen = 52, | 21069 | .rlen = 52, |
| 20974 | .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02", | 21070 | .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" |
| 20975 | .alen = 8, | 21071 | "\x01\x02\x03\x04\x05\x06\x07\x08", |
| 21072 | .alen = 16, | ||
| 20976 | .input = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" | 21073 | .input = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" |
| 20977 | "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D" | 21074 | "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D" |
| 20978 | "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF" | 21075 | "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF" |
| @@ -20998,8 +21095,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 20998 | "\x75\x76\x77\x61\x62\x63\x64\x65" | 21095 | "\x75\x76\x77\x61\x62\x63\x64\x65" |
| 20999 | "\x66\x67\x68\x69\x01\x02\x02\x01", | 21096 | "\x66\x67\x68\x69\x01\x02\x02\x01", |
| 21000 | .rlen = 64, | 21097 | .rlen = 64, |
| 21001 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01", | 21098 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" |
| 21002 | .alen = 8, | 21099 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
| 21100 | .alen = 16, | ||
| 21003 | .input = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" | 21101 | .input = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" |
| 21004 | "\x73\x29\x09\xC3\x31\xD5\x6D\x60" | 21102 | "\x73\x29\x09\xC3\x31\xD5\x6D\x60" |
| 21005 | "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F" | 21103 | "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F" |
| @@ -21027,8 +21125,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21027 | "\x66\x67\x68\x69\x01\x02\x02\x01", | 21125 | "\x66\x67\x68\x69\x01\x02\x02\x01", |
| 21028 | .rlen = 64, | 21126 | .rlen = 64, |
| 21029 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | 21127 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" |
| 21030 | "\x10\x10\x10\x10", | 21128 | "\x10\x10\x10\x10\x4E\x28\x00\x00" |
| 21031 | .alen = 12, | 21129 | "\xA2\xFC\xA1\xA3", |
| 21130 | .alen = 20, | ||
| 21032 | .input = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" | 21131 | .input = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" |
| 21033 | "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0" | 21132 | "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0" |
| 21034 | "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0" | 21133 | "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0" |
| @@ -21052,8 +21151,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21052 | "\x01\x02\x02\x01", | 21151 | "\x01\x02\x02\x01", |
| 21053 | .rlen = 28, | 21152 | .rlen = 28, |
| 21054 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | 21153 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" |
| 21055 | "\x10\x10\x10\x10", | 21154 | "\x10\x10\x10\x10\x4E\x28\x00\x00" |
| 21056 | .alen = 12, | 21155 | "\xA2\xFC\xA1\xA3", |
| 21156 | .alen = 20, | ||
| 21057 | .input = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" | 21157 | .input = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" |
| 21058 | "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E" | 21158 | "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E" |
| 21059 | "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13" | 21159 | "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13" |
| @@ -21074,8 +21174,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21074 | "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" | 21174 | "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" |
| 21075 | "\x50\x10\x16\xD0\x75\x68\x00\x01", | 21175 | "\x50\x10\x16\xD0\x75\x68\x00\x01", |
| 21076 | .rlen = 40, | 21176 | .rlen = 40, |
| 21077 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A", | 21177 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" |
| 21078 | .alen = 8, | 21178 | "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", |
| 21179 | .alen = 16, | ||
| 21079 | .input = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" | 21180 | .input = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" |
| 21080 | "\x0E\x59\x8B\x81\x22\xDE\x02\x42" | 21181 | "\x0E\x59\x8B\x81\x22\xDE\x02\x42" |
| 21081 | "\x09\x38\xB3\xAB\x33\xF8\x28\xE6" | 21182 | "\x09\x38\xB3\xAB\x33\xF8\x28\xE6" |
| @@ -21102,8 +21203,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21102 | "\x23\x01\x01\x01", | 21203 | "\x23\x01\x01\x01", |
| 21103 | .rlen = 76, | 21204 | .rlen = 76, |
| 21104 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | 21205 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" |
| 21105 | "\x00\x00\x00\x01", | 21206 | "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" |
| 21106 | .alen = 12, | 21207 | "\xCE\xFA\xCE\x74", |
| 21208 | .alen = 20, | ||
| 21107 | .input = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" | 21209 | .input = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" |
| 21108 | "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14" | 21210 | "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14" |
| 21109 | "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1" | 21211 | "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1" |
| @@ -21132,8 +21234,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21132 | "\x50\x10\x1F\x64\x6D\x54\x00\x01", | 21234 | "\x50\x10\x1F\x64\x6D\x54\x00\x01", |
| 21133 | .rlen = 40, | 21235 | .rlen = 40, |
| 21134 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | 21236 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" |
| 21135 | "\xDD\x0D\xB9\x9B", | 21237 | "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" |
| 21136 | .alen = 12, | 21238 | "\x69\x76\x65\x63", |
| 21239 | .alen = 20, | ||
| 21137 | .input = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" | 21240 | .input = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" |
| 21138 | "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D" | 21241 | "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D" |
| 21139 | "\x1F\x27\x8F\xDE\x98\xEF\x67\x54" | 21242 | "\x1F\x27\x8F\xDE\x98\xEF\x67\x54" |
| @@ -21160,8 +21263,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21160 | "\x15\x01\x01\x01", | 21263 | "\x15\x01\x01\x01", |
| 21161 | .rlen = 76, | 21264 | .rlen = 76, |
| 21162 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | 21265 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" |
| 21163 | "\x10\x10\x10\x10", | 21266 | "\x10\x10\x10\x10\x4E\x28\x00\x00" |
| 21164 | .alen = 12, | 21267 | "\xA2\xFC\xA1\xA3", |
| 21268 | .alen = 20, | ||
| 21165 | .input = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" | 21269 | .input = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" |
| 21166 | "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8" | 21270 | "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8" |
| 21167 | "\x3D\x77\x84\xB6\x07\x32\x3D\x22" | 21271 | "\x3D\x77\x84\xB6\x07\x32\x3D\x22" |
| @@ -21194,8 +21298,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21194 | "\x72\x72\x6F\x77\x01\x02\x02\x01", | 21298 | "\x72\x72\x6F\x77\x01\x02\x02\x01", |
| 21195 | .rlen = 72, | 21299 | .rlen = 72, |
| 21196 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | 21300 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" |
| 21197 | "\xDD\x0D\xB9\x9B", | 21301 | "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" |
| 21198 | .alen = 12, | 21302 | "\x69\x76\x65\x63", |
| 21303 | .alen = 20, | ||
| 21199 | .input = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" | 21304 | .input = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" |
| 21200 | "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E" | 21305 | "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E" |
| 21201 | "\x7B\x43\xF8\x26\xFB\x56\x83\x12" | 21306 | "\x7B\x43\xF8\x26\xFB\x56\x83\x12" |
| @@ -21216,8 +21321,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21216 | .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", | 21321 | .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", |
| 21217 | .result = "\x01\x02\x02\x01", | 21322 | .result = "\x01\x02\x02\x01", |
| 21218 | .rlen = 4, | 21323 | .rlen = 4, |
| 21219 | .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF", | 21324 | .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" |
| 21220 | .alen = 8, | 21325 | "\x43\x45\x7E\x91\x82\x44\x3B\xC6", |
| 21326 | .alen = 16, | ||
| 21221 | .input = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" | 21327 | .input = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" |
| 21222 | "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45" | 21328 | "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45" |
| 21223 | "\x04\xBE\xF2\x70", | 21329 | "\x04\xBE\xF2\x70", |
| @@ -21233,8 +21339,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21233 | "\x62\x65\x00\x01", | 21339 | "\x62\x65\x00\x01", |
| 21234 | .rlen = 20, | 21340 | .rlen = 20, |
| 21235 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | 21341 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" |
| 21236 | "\x00\x00\x00\x01", | 21342 | "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" |
| 21237 | .alen = 12, | 21343 | "\xCE\xFA\xCE\x74", |
| 21344 | .alen = 20, | ||
| 21238 | .input = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" | 21345 | .input = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" |
| 21239 | "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05" | 21346 | "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05" |
| 21240 | "\x43\x33\x21\x64\x41\x25\x03\x52" | 21347 | "\x43\x33\x21\x64\x41\x25\x03\x52" |
| @@ -21258,8 +21365,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21258 | "\x01\x02\x02\x01", | 21365 | "\x01\x02\x02\x01", |
| 21259 | .rlen = 52, | 21366 | .rlen = 52, |
| 21260 | .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" | 21367 | .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" |
| 21261 | "\xFF\xFF\xFF\xFF", | 21368 | "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" |
| 21262 | .alen = 12, | 21369 | "\x67\x65\x74\x6D", |
| 21370 | .alen = 20, | ||
| 21263 | .input = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" | 21371 | .input = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" |
| 21264 | "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D" | 21372 | "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D" |
| 21265 | "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6" | 21373 | "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6" |
| @@ -21285,8 +21393,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21285 | "\x01\x02\x02\x01", | 21393 | "\x01\x02\x02\x01", |
| 21286 | .rlen = 52, | 21394 | .rlen = 52, |
| 21287 | .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" | 21395 | .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" |
| 21288 | "\x10\x10\x10\x10", | 21396 | "\x10\x10\x10\x10\x4E\x28\x00\x00" |
| 21289 | .alen = 12, | 21397 | "\xA2\xFC\xA1\xA3", |
| 21398 | .alen = 20, | ||
| 21290 | .input = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" | 21399 | .input = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" |
| 21291 | "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD" | 21400 | "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD" |
| 21292 | "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21" | 21401 | "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21" |
| @@ -21309,8 +21418,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { | |||
| 21309 | "\x71\x72\x73\x74\x01\x02\x02\x01", | 21418 | "\x71\x72\x73\x74\x01\x02\x02\x01", |
| 21310 | .rlen = 32, | 21419 | .rlen = 32, |
| 21311 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | 21420 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" |
| 21312 | "\x00\x00\x00\x07", | 21421 | "\x00\x00\x00\x07\x48\x55\xEC\x7D" |
| 21313 | .alen = 12, | 21422 | "\x3A\x23\x4B\xFD", |
| 21423 | .alen = 20, | ||
| 21314 | .input = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" | 21424 | .input = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" |
| 21315 | "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF" | 21425 | "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF" |
| 21316 | "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6" | 21426 | "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6" |
| @@ -21538,10 +21648,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = { | |||
| 21538 | "\xba", | 21648 | "\xba", |
| 21539 | .rlen = 33, | 21649 | .rlen = 33, |
| 21540 | }, { | 21650 | }, { |
| 21541 | /* | 21651 | /* This is taken from FIPS CAVS. */ |
| 21542 | * This is the same vector as aes_ccm_rfc4309_enc_tv_template[0] | ||
| 21543 | * below but rewritten to use the ccm algorithm directly. | ||
| 21544 | */ | ||
| 21545 | .key = "\x83\xac\x54\x66\xc2\xeb\xe5\x05" | 21652 | .key = "\x83\xac\x54\x66\xc2\xeb\xe5\x05" |
| 21546 | "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e", | 21653 | "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e", |
| 21547 | .klen = 16, | 21654 | .klen = 16, |
| @@ -21559,6 +21666,142 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = { | |||
| 21559 | "\xda\x24\xea\xd9\xa1\x39\x98\xfd" | 21666 | "\xda\x24\xea\xd9\xa1\x39\x98\xfd" |
| 21560 | "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8", | 21667 | "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8", |
| 21561 | .rlen = 48, | 21668 | .rlen = 48, |
| 21669 | }, { | ||
| 21670 | .key = "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0" | ||
| 21671 | "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3", | ||
| 21672 | .klen = 16, | ||
| 21673 | .iv = "\x03\x4f\xa3\x19\xd3\x01\x5a\xd8" | ||
| 21674 | "\x30\x60\x15\x56\x00\x00\x00\x00", | ||
| 21675 | .assoc = "\xda\xe6\x28\x9c\x45\x2d\xfd\x63" | ||
| 21676 | "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7" | ||
| 21677 | "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1" | ||
| 21678 | "\x0a\x63\x09\x78\xbc\x2c\x55\xde", | ||
| 21679 | .alen = 32, | ||
| 21680 | .input = "\x87\xa3\x36\xfd\x96\xb3\x93\x78" | ||
| 21681 | "\xa9\x28\x63\xba\x12\xa3\x14\x85" | ||
| 21682 | "\x57\x1e\x06\xc9\x7b\x21\xef\x76" | ||
| 21683 | "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e", | ||
| 21684 | .ilen = 32, | ||
| 21685 | .result = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19" | ||
| 21686 | "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab" | ||
| 21687 | "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff" | ||
| 21688 | "\x3b\xb5\xce\x53\xef\xde\xbb\x02" | ||
| 21689 | "\xa9\x86\x15\x6c\x13\xfe\xda\x0a" | ||
| 21690 | "\x22\xb8\x29\x3d\xd8\x39\x9a\x23", | ||
| 21691 | .rlen = 48, | ||
| 21692 | }, { | ||
| 21693 | .key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1" | ||
| 21694 | "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75" | ||
| 21695 | "\x53\x14\x73\x66\x8d\x88\xf6\x80", | ||
| 21696 | .klen = 24, | ||
| 21697 | .iv = "\x03\xa0\x20\x35\x26\xf2\x21\x8d" | ||
| 21698 | "\x50\x20\xda\xe2\x00\x00\x00\x00", | ||
| 21699 | .assoc = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1" | ||
| 21700 | "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47" | ||
| 21701 | "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d" | ||
| 21702 | "\xd8\x9e\x2b\x56\x10\x23\x56\xe7", | ||
| 21703 | .alen = 32, | ||
| 21704 | .result = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc" | ||
| 21705 | "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b", | ||
| 21706 | .rlen = 16, | ||
| 21707 | }, { | ||
| 21708 | .key = "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42" | ||
| 21709 | "\xef\x7a\xd3\xce\xfc\x84\x60\x62" | ||
| 21710 | "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01", | ||
| 21711 | .klen = 24, | ||
| 21712 | .iv = "\x03\xd6\x3c\x8c\x86\x84\xb6\xcd" | ||
| 21713 | "\xef\x09\x2e\x94\x00\x00\x00\x00", | ||
| 21714 | .assoc = "\x02\x65\x78\x3c\xe9\x21\x30\x91" | ||
| 21715 | "\xb1\xb9\xda\x76\x9a\x78\x6d\x95" | ||
| 21716 | "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c" | ||
| 21717 | "\xe3\x00\x73\x69\x84\x69\x87\x79", | ||
| 21718 | .alen = 32, | ||
| 21719 | .input = "\x9f\xd2\x02\x4b\x52\x49\x31\x3c" | ||
| 21720 | "\x43\x69\x3a\x2d\x8e\x70\xad\x7e" | ||
| 21721 | "\xe0\xe5\x46\x09\x80\x89\x13\xb2" | ||
| 21722 | "\x8c\x8b\xd9\x3f\x86\xfb\xb5\x6b", | ||
| 21723 | .ilen = 32, | ||
| 21724 | .result = "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62" | ||
| 21725 | "\x5d\x51\xc2\x16\xd8\xbd\x06\x9f" | ||
| 21726 | "\x9b\x6a\x09\x70\xc1\x51\x83\xc2" | ||
| 21727 | "\x66\x88\x1d\x4f\x9a\xda\xe0\x1e" | ||
| 21728 | "\xc7\x79\x11\x58\xe5\x6b\x20\x40" | ||
| 21729 | "\x7a\xea\x46\x42\x8b\xe4\x6f\xe1", | ||
| 21730 | .rlen = 48, | ||
| 21731 | }, { | ||
| 21732 | .key = "\xe0\x8d\x99\x71\x60\xd7\x97\x1a" | ||
| 21733 | "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a" | ||
| 21734 | "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e" | ||
| 21735 | "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b", | ||
| 21736 | .klen = 32, | ||
| 21737 | .iv = "\x03\x1e\x29\x91\xad\x8e\xc1\x53" | ||
| 21738 | "\x0a\xcf\x2d\xbe\x00\x00\x00\x00", | ||
| 21739 | .assoc = "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b" | ||
| 21740 | "\x78\x2b\x94\x02\x29\x0f\x42\x27" | ||
| 21741 | "\x6b\x75\xcb\x98\x34\x08\x7e\x79" | ||
| 21742 | "\xe4\x3e\x49\x0d\x84\x8b\x22\x87", | ||
| 21743 | .alen = 32, | ||
| 21744 | .input = "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f" | ||
| 21745 | "\x9d\xbd\x5f\x66\xbe\xdc\xbb\x66" | ||
| 21746 | "\xbf\x17\x99\x62\x4a\x39\x27\x1f" | ||
| 21747 | "\x1d\xdc\x24\xae\x19\x2f\x98\x4c", | ||
| 21748 | .ilen = 32, | ||
| 21749 | .result = "\x19\xb8\x61\x33\x45\x2b\x43\x96" | ||
| 21750 | "\x6f\x51\xd0\x20\x30\x7d\x9b\xc6" | ||
| 21751 | "\x26\x3d\xf8\xc9\x65\x16\xa8\x9f" | ||
| 21752 | "\xf0\x62\x17\x34\xf2\x1e\x8d\x75" | ||
| 21753 | "\x4e\x13\xcc\xc0\xc3\x2a\x54\x2d", | ||
| 21754 | .rlen = 40, | ||
| 21755 | }, { | ||
| 21756 | .key = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c" | ||
| 21757 | "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32" | ||
| 21758 | "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c" | ||
| 21759 | "\x09\x75\x9a\x9b\x3c\x9b\x27\x39", | ||
| 21760 | .klen = 32, | ||
| 21761 | .iv = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d" | ||
| 21762 | "\x43\xf6\x1e\x50", | ||
| 21763 | .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b" | ||
| 21764 | "\x13\x02\x01\x0c\x83\x4c\x96\x35" | ||
| 21765 | "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94" | ||
| 21766 | "\xb0\x39\x36\xe6\x8f\x57\xe0\x13", | ||
| 21767 | .alen = 32, | ||
| 21768 | .input = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6" | ||
| 21769 | "\x83\x72\x07\x4f\xcf\xfa\x66\x89" | ||
| 21770 | "\x5f\xca\xb1\xba\xd5\x8f\x2c\x27" | ||
| 21771 | "\x30\xdb\x75\x09\x93\xd4\x65\xe4", | ||
| 21772 | .ilen = 32, | ||
| 21773 | .result = "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d" | ||
| 21774 | "\x85\x23\xc7\xc6\x2f\xf4\x1e\x3d" | ||
| 21775 | "\xcc\x63\x44\x25\x07\x78\x4f\x9e" | ||
| 21776 | "\x96\xb8\x88\xeb\xbc\x48\x1f\x06" | ||
| 21777 | "\x39\xaf\x39\xac\xd8\x4a\x80\x39" | ||
| 21778 | "\x7b\x72\x8a\xf7", | ||
| 21779 | .rlen = 44, | ||
| 21780 | }, { | ||
| 21781 | .key = "\xab\xd0\xe9\x33\x07\x26\xe5\x83" | ||
| 21782 | "\x8c\x76\x95\xd4\xb6\xdc\xf3\x46" | ||
| 21783 | "\xf9\x8f\xad\xe3\x02\x13\x83\x77" | ||
| 21784 | "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b", | ||
| 21785 | .klen = 32, | ||
| 21786 | .iv = "\x03\x24\xa7\x8b\x07\xcb\xcc\x0e" | ||
| 21787 | "\xe6\x33\xbf\xf5\x00\x00\x00\x00", | ||
| 21788 | .assoc = "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f" | ||
| 21789 | "\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d" | ||
| 21790 | "\xab\x90\x65\x8d\x8e\xca\x4d\x4f" | ||
| 21791 | "\x16\x0c\x40\x90\x4b\xc7\x36\x73", | ||
| 21792 | .alen = 32, | ||
| 21793 | .input = "\xf5\xc6\x7d\x48\xc1\xb7\xe6\x92" | ||
| 21794 | "\x97\x5a\xca\xc4\xa9\x6d\xf9\x3d" | ||
| 21795 | "\x6c\xde\xbc\xf1\x90\xea\x6a\xb2" | ||
| 21796 | "\x35\x86\x36\xaf\x5c\xfe\x4b\x3a", | ||
| 21797 | .ilen = 32, | ||
| 21798 | .result = "\x83\x6f\x40\x87\x72\xcf\xc1\x13" | ||
| 21799 | "\xef\xbb\x80\x21\x04\x6c\x58\x09" | ||
| 21800 | "\x07\x1b\xfc\xdf\xc0\x3f\x5b\xc7" | ||
| 21801 | "\xe0\x79\xa8\x6e\x71\x7c\x3f\xcf" | ||
| 21802 | "\x5c\xda\xb2\x33\xe5\x13\xe2\x0d" | ||
| 21803 | "\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8", | ||
| 21804 | .rlen = 48, | ||
| 21562 | } | 21805 | } |
| 21563 | }; | 21806 | }; |
| 21564 | 21807 | ||
| @@ -21688,186 +21931,13 @@ static struct aead_testvec aes_ccm_dec_tv_template[] = { | |||
| 21688 | "\x8e\x5e\x67\x01\xc9\x17\x87\x65" | 21931 | "\x8e\x5e\x67\x01\xc9\x17\x87\x65" |
| 21689 | "\x98\x09\xd6\x7d\xbe\xdd\x18", | 21932 | "\x98\x09\xd6\x7d\xbe\xdd\x18", |
| 21690 | .rlen = 23, | 21933 | .rlen = 23, |
| 21691 | }, | ||
| 21692 | }; | ||
| 21693 | |||
| 21694 | /* | ||
| 21695 | * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all | ||
| 21696 | * use a 13-byte nonce, we only support an 11-byte nonce. Similarly, all of | ||
| 21697 | * Special Publication 800-38C's test vectors also use nonce lengths our | ||
| 21698 | * implementation doesn't support. The following are taken from fips cavs | ||
| 21699 | * fax files on hand at Red Hat. | ||
| 21700 | * | ||
| 21701 | * nb: actual key lengths are (klen - 3), the last 3 bytes are actually | ||
| 21702 | * part of the nonce which combine w/the iv, but need to be input this way. | ||
| 21703 | */ | ||
| 21704 | static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = { | ||
| 21705 | { | ||
| 21706 | .key = "\x83\xac\x54\x66\xc2\xeb\xe5\x05" | ||
| 21707 | "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e" | ||
| 21708 | "\x96\xac\x59", | ||
| 21709 | .klen = 19, | ||
| 21710 | .iv = "\x30\x07\xa1\xe2\xa2\xc7\x55\x24", | ||
| 21711 | .alen = 0, | ||
| 21712 | .input = "\x19\xc8\x81\xf6\xe9\x86\xff\x93" | ||
| 21713 | "\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e" | ||
| 21714 | "\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c" | ||
| 21715 | "\x35\x2e\xad\xe0\x62\xf9\x91\xa1", | ||
| 21716 | .ilen = 32, | ||
| 21717 | .result = "\xab\x6f\xe1\x69\x1d\x19\x99\xa8" | ||
| 21718 | "\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1" | ||
| 21719 | "\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a" | ||
| 21720 | "\x57\x2b\xbe\x5d\x98\xa6\xb1\x32" | ||
| 21721 | "\xda\x24\xea\xd9\xa1\x39\x98\xfd" | ||
| 21722 | "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8", | ||
| 21723 | .rlen = 48, | ||
| 21724 | }, { | ||
| 21725 | .key = "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0" | ||
| 21726 | "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3" | ||
| 21727 | "\x4f\xa3\x19", | ||
| 21728 | .klen = 19, | ||
| 21729 | .iv = "\xd3\x01\x5a\xd8\x30\x60\x15\x56", | ||
| 21730 | .assoc = "\xda\xe6\x28\x9c\x45\x2d\xfd\x63" | ||
| 21731 | "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7" | ||
| 21732 | "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1" | ||
| 21733 | "\x0a\x63\x09\x78\xbc\x2c\x55\xde", | ||
| 21734 | .alen = 32, | ||
| 21735 | .input = "\x87\xa3\x36\xfd\x96\xb3\x93\x78" | ||
| 21736 | "\xa9\x28\x63\xba\x12\xa3\x14\x85" | ||
| 21737 | "\x57\x1e\x06\xc9\x7b\x21\xef\x76" | ||
| 21738 | "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e", | ||
| 21739 | .ilen = 32, | ||
| 21740 | .result = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19" | ||
| 21741 | "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab" | ||
| 21742 | "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff" | ||
| 21743 | "\x3b\xb5\xce\x53\xef\xde\xbb\x02" | ||
| 21744 | "\xa9\x86\x15\x6c\x13\xfe\xda\x0a" | ||
| 21745 | "\x22\xb8\x29\x3d\xd8\x39\x9a\x23", | ||
| 21746 | .rlen = 48, | ||
| 21747 | }, { | ||
| 21748 | .key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1" | ||
| 21749 | "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75" | ||
| 21750 | "\x53\x14\x73\x66\x8d\x88\xf6\x80" | ||
| 21751 | "\xa0\x20\x35", | ||
| 21752 | .klen = 27, | ||
| 21753 | .iv = "\x26\xf2\x21\x8d\x50\x20\xda\xe2", | ||
| 21754 | .assoc = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1" | ||
| 21755 | "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47" | ||
| 21756 | "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d" | ||
| 21757 | "\xd8\x9e\x2b\x56\x10\x23\x56\xe7", | ||
| 21758 | .alen = 32, | ||
| 21759 | .ilen = 0, | ||
| 21760 | .result = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc" | ||
| 21761 | "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b", | ||
| 21762 | .rlen = 16, | ||
| 21763 | }, { | ||
| 21764 | .key = "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42" | ||
| 21765 | "\xef\x7a\xd3\xce\xfc\x84\x60\x62" | ||
| 21766 | "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01" | ||
| 21767 | "\xd6\x3c\x8c", | ||
| 21768 | .klen = 27, | ||
| 21769 | .iv = "\x86\x84\xb6\xcd\xef\x09\x2e\x94", | ||
| 21770 | .assoc = "\x02\x65\x78\x3c\xe9\x21\x30\x91" | ||
| 21771 | "\xb1\xb9\xda\x76\x9a\x78\x6d\x95" | ||
| 21772 | "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c" | ||
| 21773 | "\xe3\x00\x73\x69\x84\x69\x87\x79", | ||
| 21774 | .alen = 32, | ||
| 21775 | .input = "\x9f\xd2\x02\x4b\x52\x49\x31\x3c" | ||
| 21776 | "\x43\x69\x3a\x2d\x8e\x70\xad\x7e" | ||
| 21777 | "\xe0\xe5\x46\x09\x80\x89\x13\xb2" | ||
| 21778 | "\x8c\x8b\xd9\x3f\x86\xfb\xb5\x6b", | ||
| 21779 | .ilen = 32, | ||
| 21780 | .result = "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62" | ||
| 21781 | "\x5d\x51\xc2\x16\xd8\xbd\x06\x9f" | ||
| 21782 | "\x9b\x6a\x09\x70\xc1\x51\x83\xc2" | ||
| 21783 | "\x66\x88\x1d\x4f\x9a\xda\xe0\x1e" | ||
| 21784 | "\xc7\x79\x11\x58\xe5\x6b\x20\x40" | ||
| 21785 | "\x7a\xea\x46\x42\x8b\xe4\x6f\xe1", | ||
| 21786 | .rlen = 48, | ||
| 21787 | }, { | ||
| 21788 | .key = "\xe0\x8d\x99\x71\x60\xd7\x97\x1a" | ||
| 21789 | "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a" | ||
| 21790 | "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e" | ||
| 21791 | "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b" | ||
| 21792 | "\x1e\x29\x91", | ||
| 21793 | .klen = 35, | ||
| 21794 | .iv = "\xad\x8e\xc1\x53\x0a\xcf\x2d\xbe", | ||
| 21795 | .assoc = "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b" | ||
| 21796 | "\x78\x2b\x94\x02\x29\x0f\x42\x27" | ||
| 21797 | "\x6b\x75\xcb\x98\x34\x08\x7e\x79" | ||
| 21798 | "\xe4\x3e\x49\x0d\x84\x8b\x22\x87", | ||
| 21799 | .alen = 32, | ||
| 21800 | .input = "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f" | ||
| 21801 | "\x9d\xbd\x5f\x66\xbe\xdc\xbb\x66" | ||
| 21802 | "\xbf\x17\x99\x62\x4a\x39\x27\x1f" | ||
| 21803 | "\x1d\xdc\x24\xae\x19\x2f\x98\x4c", | ||
| 21804 | .ilen = 32, | ||
| 21805 | .result = "\x19\xb8\x61\x33\x45\x2b\x43\x96" | ||
| 21806 | "\x6f\x51\xd0\x20\x30\x7d\x9b\xc6" | ||
| 21807 | "\x26\x3d\xf8\xc9\x65\x16\xa8\x9f" | ||
| 21808 | "\xf0\x62\x17\x34\xf2\x1e\x8d\x75" | ||
| 21809 | "\x4e\x13\xcc\xc0\xc3\x2a\x54\x2d", | ||
| 21810 | .rlen = 40, | ||
| 21811 | }, { | ||
| 21812 | .key = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c" | ||
| 21813 | "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32" | ||
| 21814 | "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c" | ||
| 21815 | "\x09\x75\x9a\x9b\x3c\x9b\x27\x39" | ||
| 21816 | "\xf9\xd9\x4e", | ||
| 21817 | .klen = 35, | ||
| 21818 | .iv = "\x63\xb5\x3d\x9d\x43\xf6\x1e\x50", | ||
| 21819 | .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b" | ||
| 21820 | "\x13\x02\x01\x0c\x83\x4c\x96\x35" | ||
| 21821 | "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94" | ||
| 21822 | "\xb0\x39\x36\xe6\x8f\x57\xe0\x13", | ||
| 21823 | .alen = 32, | ||
| 21824 | .input = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6" | ||
| 21825 | "\x83\x72\x07\x4f\xcf\xfa\x66\x89" | ||
| 21826 | "\x5f\xca\xb1\xba\xd5\x8f\x2c\x27" | ||
| 21827 | "\x30\xdb\x75\x09\x93\xd4\x65\xe4", | ||
| 21828 | .ilen = 32, | ||
| 21829 | .result = "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d" | ||
| 21830 | "\x85\x23\xc7\xc6\x2f\xf4\x1e\x3d" | ||
| 21831 | "\xcc\x63\x44\x25\x07\x78\x4f\x9e" | ||
| 21832 | "\x96\xb8\x88\xeb\xbc\x48\x1f\x06" | ||
| 21833 | "\x39\xaf\x39\xac\xd8\x4a\x80\x39" | ||
| 21834 | "\x7b\x72\x8a\xf7", | ||
| 21835 | .rlen = 44, | ||
| 21836 | }, { | 21934 | }, { |
| 21837 | .key = "\xab\xd0\xe9\x33\x07\x26\xe5\x83" | 21935 | /* This is taken from FIPS CAVS. */ |
| 21838 | "\x8c\x76\x95\xd4\xb6\xdc\xf3\x46" | ||
| 21839 | "\xf9\x8f\xad\xe3\x02\x13\x83\x77" | ||
| 21840 | "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b" | ||
| 21841 | "\x24\xa7\x8b", | ||
| 21842 | .klen = 35, | ||
| 21843 | .iv = "\x07\xcb\xcc\x0e\xe6\x33\xbf\xf5", | ||
| 21844 | .assoc = "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f" | ||
| 21845 | "\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d" | ||
| 21846 | "\xab\x90\x65\x8d\x8e\xca\x4d\x4f" | ||
| 21847 | "\x16\x0c\x40\x90\x4b\xc7\x36\x73", | ||
| 21848 | .alen = 32, | ||
| 21849 | .input = "\xf5\xc6\x7d\x48\xc1\xb7\xe6\x92" | ||
| 21850 | "\x97\x5a\xca\xc4\xa9\x6d\xf9\x3d" | ||
| 21851 | "\x6c\xde\xbc\xf1\x90\xea\x6a\xb2" | ||
| 21852 | "\x35\x86\x36\xaf\x5c\xfe\x4b\x3a", | ||
| 21853 | .ilen = 32, | ||
| 21854 | .result = "\x83\x6f\x40\x87\x72\xcf\xc1\x13" | ||
| 21855 | "\xef\xbb\x80\x21\x04\x6c\x58\x09" | ||
| 21856 | "\x07\x1b\xfc\xdf\xc0\x3f\x5b\xc7" | ||
| 21857 | "\xe0\x79\xa8\x6e\x71\x7c\x3f\xcf" | ||
| 21858 | "\x5c\xda\xb2\x33\xe5\x13\xe2\x0d" | ||
| 21859 | "\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8", | ||
| 21860 | .rlen = 48, | ||
| 21861 | }, | ||
| 21862 | }; | ||
| 21863 | |||
| 21864 | static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | ||
| 21865 | { | ||
| 21866 | .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" | 21936 | .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" |
| 21867 | "\xff\x80\x2e\x48\x7d\x82\xf8\xb9" | 21937 | "\xff\x80\x2e\x48\x7d\x82\xf8\xb9", |
| 21868 | "\xc6\xfb\x7d", | 21938 | .klen = 16, |
| 21869 | .klen = 19, | 21939 | .iv = "\x03\xc6\xfb\x7d\x80\x0d\x13\xab" |
| 21870 | .iv = "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8", | 21940 | "\xd8\xa6\xb2\xd8\x00\x00\x00\x00", |
| 21871 | .alen = 0, | 21941 | .alen = 0, |
| 21872 | .input = "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b", | 21942 | .input = "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b", |
| 21873 | .ilen = 8, | 21943 | .ilen = 8, |
| @@ -21876,10 +21946,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
| 21876 | .novrfy = 1, | 21946 | .novrfy = 1, |
| 21877 | }, { | 21947 | }, { |
| 21878 | .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" | 21948 | .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" |
| 21879 | "\xff\x80\x2e\x48\x7d\x82\xf8\xb9" | 21949 | "\xff\x80\x2e\x48\x7d\x82\xf8\xb9", |
| 21880 | "\xaf\x94\x87", | 21950 | .klen = 16, |
| 21881 | .klen = 19, | 21951 | .iv = "\x03\xaf\x94\x87\x78\x35\x82\x81" |
| 21882 | .iv = "\x78\x35\x82\x81\x7f\x88\x94\x68", | 21952 | "\x7f\x88\x94\x68\x00\x00\x00\x00", |
| 21883 | .alen = 0, | 21953 | .alen = 0, |
| 21884 | .input = "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3", | 21954 | .input = "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3", |
| 21885 | .ilen = 8, | 21955 | .ilen = 8, |
| @@ -21887,10 +21957,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
| 21887 | .rlen = 0, | 21957 | .rlen = 0, |
| 21888 | }, { | 21958 | }, { |
| 21889 | .key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38" | 21959 | .key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38" |
| 21890 | "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8" | 21960 | "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8", |
| 21891 | "\xc6\xfb\x7d", | 21961 | .klen = 16, |
| 21892 | .klen = 19, | 21962 | .iv = "\x03\xc6\xfb\x7d\x80\x0d\x13\xab" |
| 21893 | .iv = "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8", | 21963 | "\xd8\xa6\xb2\xd8\x00\x00\x00\x00", |
| 21894 | .assoc = "\xf3\x94\x87\x78\x35\x82\x81\x7f" | 21964 | .assoc = "\xf3\x94\x87\x78\x35\x82\x81\x7f" |
| 21895 | "\x88\x94\x68\xb1\x78\x6b\x2b\xd6" | 21965 | "\x88\x94\x68\xb1\x78\x6b\x2b\xd6" |
| 21896 | "\x04\x1f\x4e\xed\x78\xd5\x33\x66" | 21966 | "\x04\x1f\x4e\xed\x78\xd5\x33\x66" |
| @@ -21911,10 +21981,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
| 21911 | .novrfy = 1, | 21981 | .novrfy = 1, |
| 21912 | }, { | 21982 | }, { |
| 21913 | .key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38" | 21983 | .key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38" |
| 21914 | "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8" | 21984 | "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8", |
| 21915 | "\x05\xe0\xc9", | 21985 | .klen = 16, |
| 21916 | .klen = 19, | 21986 | .iv = "\x03\x05\xe0\xc9\x0f\xed\x34\xea" |
| 21917 | .iv = "\x0f\xed\x34\xea\x97\xd4\x3b\xdf", | 21987 | "\x97\xd4\x3b\xdf\x00\x00\x00\x00", |
| 21918 | .assoc = "\x49\x5c\x50\x1f\x1d\x94\xcc\x81" | 21988 | .assoc = "\x49\x5c\x50\x1f\x1d\x94\xcc\x81" |
| 21919 | "\xba\xb7\xb6\x03\xaf\xa5\xc1\xa1" | 21989 | "\xba\xb7\xb6\x03\xaf\xa5\xc1\xa1" |
| 21920 | "\xd8\x5c\x42\x68\xe0\x6c\xda\x89" | 21990 | "\xd8\x5c\x42\x68\xe0\x6c\xda\x89" |
| @@ -21935,10 +22005,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
| 21935 | }, { | 22005 | }, { |
| 21936 | .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" | 22006 | .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" |
| 21937 | "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3" | 22007 | "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3" |
| 21938 | "\xa4\x48\x93\x39\x26\x71\x4a\xc6" | 22008 | "\xa4\x48\x93\x39\x26\x71\x4a\xc6", |
| 21939 | "\xee\x49\x83", | 22009 | .klen = 24, |
| 21940 | .klen = 27, | 22010 | .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" |
| 21941 | .iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e", | 22011 | "\x57\xba\xfd\x9e\x00\x00\x00\x00", |
| 21942 | .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" | 22012 | .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" |
| 21943 | "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" | 22013 | "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" |
| 21944 | "\xa4\xf0\x13\x05\xd1\x77\x99\x67" | 22014 | "\xa4\xf0\x13\x05\xd1\x77\x99\x67" |
| @@ -21951,10 +22021,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
| 21951 | }, { | 22021 | }, { |
| 21952 | .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" | 22022 | .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" |
| 21953 | "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" | 22023 | "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" |
| 21954 | "\x29\xa0\xba\x9e\x48\x78\xd1\xba" | 22024 | "\x29\xa0\xba\x9e\x48\x78\xd1\xba", |
| 21955 | "\xee\x49\x83", | 22025 | .klen = 24, |
| 21956 | .klen = 27, | 22026 | .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" |
| 21957 | .iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e", | 22027 | "\x57\xba\xfd\x9e\x00\x00\x00\x00", |
| 21958 | .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" | 22028 | .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" |
| 21959 | "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" | 22029 | "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" |
| 21960 | "\xa4\xf0\x13\x05\xd1\x77\x99\x67" | 22030 | "\xa4\xf0\x13\x05\xd1\x77\x99\x67" |
| @@ -21974,10 +22044,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
| 21974 | }, { | 22044 | }, { |
| 21975 | .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" | 22045 | .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" |
| 21976 | "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" | 22046 | "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" |
| 21977 | "\x29\xa0\xba\x9e\x48\x78\xd1\xba" | 22047 | "\x29\xa0\xba\x9e\x48\x78\xd1\xba", |
| 21978 | "\xd1\xfc\x57", | 22048 | .klen = 24, |
| 21979 | .klen = 27, | 22049 | .iv = "\x03\xd1\xfc\x57\x9c\xfe\xb8\x9c" |
| 21980 | .iv = "\x9c\xfe\xb8\x9c\xad\x71\xaa\x1f", | 22050 | "\xad\x71\xaa\x1f\x00\x00\x00\x00", |
| 21981 | .assoc = "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6" | 22051 | .assoc = "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6" |
| 21982 | "\xff\x14\xc7\x44\xbf\x6c\x3a\xc3" | 22052 | "\xff\x14\xc7\x44\xbf\x6c\x3a\xc3" |
| 21983 | "\xff\xb6\x81\xbd\xe2\xd5\x06\xc7" | 22053 | "\xff\xb6\x81\xbd\xe2\xd5\x06\xc7" |
| @@ -22000,10 +22070,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
| 22000 | .key = "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01" | 22070 | .key = "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01" |
| 22001 | "\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c" | 22071 | "\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c" |
| 22002 | "\x20\x2c\xad\x30\xc2\x2b\x41\xfb" | 22072 | "\x20\x2c\xad\x30\xc2\x2b\x41\xfb" |
| 22003 | "\x0e\x85\xbc\x33\xad\x0f\x2b\xff" | 22073 | "\x0e\x85\xbc\x33\xad\x0f\x2b\xff", |
| 22004 | "\xee\x49\x83", | 22074 | .klen = 32, |
| 22005 | .klen = 35, | 22075 | .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" |
| 22006 | .iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e", | 22076 | "\x57\xba\xfd\x9e\x00\x00\x00\x00", |
| 22007 | .alen = 0, | 22077 | .alen = 0, |
| 22008 | .input = "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2", | 22078 | .input = "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2", |
| 22009 | .ilen = 8, | 22079 | .ilen = 8, |
| @@ -22013,10 +22083,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
| 22013 | .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" | 22083 | .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" |
| 22014 | "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3" | 22084 | "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3" |
| 22015 | "\xa4\x48\x93\x39\x26\x71\x4a\xc6" | 22085 | "\xa4\x48\x93\x39\x26\x71\x4a\xc6" |
| 22016 | "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb" | 22086 | "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb", |
| 22017 | "\x85\x34\x66", | 22087 | .klen = 32, |
| 22018 | .klen = 35, | 22088 | .iv = "\x03\x85\x34\x66\x42\xc8\x92\x0f" |
| 22019 | .iv = "\x42\xc8\x92\x0f\x36\x58\xe0\x6b", | 22089 | "\x36\x58\xe0\x6b\x00\x00\x00\x00", |
| 22020 | .alen = 0, | 22090 | .alen = 0, |
| 22021 | .input = "\x48\x01\x5e\x02\x24\x04\x66\x47" | 22091 | .input = "\x48\x01\x5e\x02\x24\x04\x66\x47" |
| 22022 | "\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd" | 22092 | "\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd" |
| @@ -22035,10 +22105,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
| 22035 | .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" | 22105 | .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" |
| 22036 | "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" | 22106 | "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" |
| 22037 | "\x29\xa0\xba\x9e\x48\x78\xd1\xba" | 22107 | "\x29\xa0\xba\x9e\x48\x78\xd1\xba" |
| 22038 | "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b" | 22108 | "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b", |
| 22039 | "\xcf\x76\x3f", | 22109 | .klen = 32, |
| 22040 | .klen = 35, | 22110 | .iv = "\x03\xcf\x76\x3f\xd9\x95\x75\x8f" |
| 22041 | .iv = "\xd9\x95\x75\x8f\x44\x89\x40\x7b", | 22111 | "\x44\x89\x40\x7b\x00\x00\x00\x00", |
| 22042 | .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88" | 22112 | .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88" |
| 22043 | "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b" | 22113 | "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b" |
| 22044 | "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b" | 22114 | "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b" |
| @@ -22060,6 +22130,1240 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | |||
| 22060 | }; | 22130 | }; |
| 22061 | 22131 | ||
| 22062 | /* | 22132 | /* |
| 22133 | * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all | ||
| 22134 | * use a 13-byte nonce, we only support an 11-byte nonce. Worse, | ||
| 22135 | * they use AD lengths which are not valid ESP header lengths. | ||
| 22136 | * | ||
| 22137 | * These vectors are copied/generated from the ones for rfc4106 with | ||
| 22138 | * the key truncated by one byte.. | ||
| 22139 | */ | ||
| 22140 | static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = { | ||
| 22141 | { /* Generated using Crypto++ */ | ||
| 22142 | .key = zeroed_string, | ||
| 22143 | .klen = 19, | ||
| 22144 | .iv = zeroed_string, | ||
| 22145 | .input = zeroed_string, | ||
| 22146 | .ilen = 16, | ||
| 22147 | .assoc = zeroed_string, | ||
| 22148 | .alen = 16, | ||
| 22149 | .result = "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F" | ||
| 22150 | "\x12\x50\xE8\xDE\x81\x3C\x63\x08" | ||
| 22151 | "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5" | ||
| 22152 | "\x27\x50\x01\xAC\x03\x33\x39\xFB", | ||
| 22153 | .rlen = 32, | ||
| 22154 | },{ | ||
| 22155 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
| 22156 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
| 22157 | "\x00\x00\x00", | ||
| 22158 | .klen = 19, | ||
| 22159 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22160 | .input = zeroed_string, | ||
| 22161 | .ilen = 16, | ||
| 22162 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 22163 | "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22164 | .alen = 16, | ||
| 22165 | .result = "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F" | ||
| 22166 | "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17" | ||
| 22167 | "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA" | ||
| 22168 | "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0", | ||
| 22169 | .rlen = 32, | ||
| 22170 | |||
| 22171 | }, { | ||
| 22172 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
| 22173 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
| 22174 | "\x00\x00\x00", | ||
| 22175 | .klen = 19, | ||
| 22176 | .iv = zeroed_string, | ||
| 22177 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22178 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
| 22179 | .ilen = 16, | ||
| 22180 | .assoc = zeroed_string, | ||
| 22181 | .alen = 16, | ||
| 22182 | .result = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6" | ||
| 22183 | "\x61\xF4\xF5\x41\x03\x4A\xE3\x86" | ||
| 22184 | "\xA1\xE2\xC2\x42\x2B\x81\x70\x40" | ||
| 22185 | "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C", | ||
| 22186 | .rlen = 32, | ||
| 22187 | }, { | ||
| 22188 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
| 22189 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
| 22190 | "\x00\x00\x00", | ||
| 22191 | .klen = 19, | ||
| 22192 | .iv = zeroed_string, | ||
| 22193 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22194 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
| 22195 | .ilen = 16, | ||
| 22196 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22197 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
| 22198 | .alen = 16, | ||
| 22199 | .result = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6" | ||
| 22200 | "\x61\xF4\xF5\x41\x03\x4A\xE3\x86" | ||
| 22201 | "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9" | ||
| 22202 | "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E", | ||
| 22203 | .rlen = 32, | ||
| 22204 | }, { | ||
| 22205 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
| 22206 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
| 22207 | "\x00\x00\x00", | ||
| 22208 | .klen = 19, | ||
| 22209 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22210 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22211 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
| 22212 | .ilen = 16, | ||
| 22213 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22214 | "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22215 | .alen = 16, | ||
| 22216 | .result = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E" | ||
| 22217 | "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16" | ||
| 22218 | "\x43\x8E\x76\x57\x3B\xB4\x05\xE8" | ||
| 22219 | "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED", | ||
| 22220 | .rlen = 32, | ||
| 22221 | }, { | ||
| 22222 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
| 22223 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
| 22224 | "\x00\x00\x00", | ||
| 22225 | .klen = 19, | ||
| 22226 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22227 | .input = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22228 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22229 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22230 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22231 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22232 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22233 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22234 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
| 22235 | .ilen = 64, | ||
| 22236 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22237 | "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22238 | .alen = 16, | ||
| 22239 | .result = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E" | ||
| 22240 | "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16" | ||
| 22241 | "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4" | ||
| 22242 | "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02" | ||
| 22243 | "\x9B\xAB\x39\x18\xEB\x94\x34\x36" | ||
| 22244 | "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49" | ||
| 22245 | "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E" | ||
| 22246 | "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB" | ||
| 22247 | "\x02\x73\xDD\xE7\x30\x4A\x30\x54" | ||
| 22248 | "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F", | ||
| 22249 | .rlen = 80, | ||
| 22250 | }, { | ||
| 22251 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 22252 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
| 22253 | "\x00\x00\x00", | ||
| 22254 | .klen = 19, | ||
| 22255 | .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", | ||
| 22256 | .input = "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22257 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22258 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22259 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22260 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22261 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22262 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22263 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22264 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22265 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22266 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22267 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22268 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22269 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22270 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22271 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22272 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22273 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22274 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22275 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22276 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22277 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22278 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22279 | "\xff\xff\xff\xff\xff\xff\xff\xff", | ||
| 22280 | .ilen = 192, | ||
| 22281 | .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" | ||
| 22282 | "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" | ||
| 22283 | "\x89\xab\xcd\xef", | ||
| 22284 | .alen = 20, | ||
| 22285 | .result = "\x64\x17\xDC\x24\x9D\x92\xBA\x5E" | ||
| 22286 | "\x7C\x64\x6D\x33\x46\x77\xAC\xB1" | ||
| 22287 | "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95" | ||
| 22288 | "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C" | ||
| 22289 | "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB" | ||
| 22290 | "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01" | ||
| 22291 | "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04" | ||
| 22292 | "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B" | ||
| 22293 | "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09" | ||
| 22294 | "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79" | ||
| 22295 | "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D" | ||
| 22296 | "\x22\xEB\xD1\x09\x80\x3F\x5A\x70" | ||
| 22297 | "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B" | ||
| 22298 | "\x36\x12\x00\x89\xAA\x5D\x55\xDA" | ||
| 22299 | "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6" | ||
| 22300 | "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F" | ||
| 22301 | "\x2B\x50\x44\x52\xC2\x10\x7D\x38" | ||
| 22302 | "\x82\x64\x83\x0C\xAE\x49\xD0\xE5" | ||
| 22303 | "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43" | ||
| 22304 | "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC" | ||
| 22305 | "\x19\x6D\x60\x66\x61\xF9\x9A\x3F" | ||
| 22306 | "\x75\xFC\x38\x53\x5B\xB5\xCD\x52" | ||
| 22307 | "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98" | ||
| 22308 | "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E" | ||
| 22309 | "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC" | ||
| 22310 | "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A", | ||
| 22311 | .rlen = 208, | ||
| 22312 | }, { /* From draft-mcgrew-gcm-test-01 */ | ||
| 22313 | .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" | ||
| 22314 | "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" | ||
| 22315 | "\x2E\x44\x3B", | ||
| 22316 | .klen = 19, | ||
| 22317 | .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", | ||
| 22318 | .input = "\x45\x00\x00\x48\x69\x9A\x00\x00" | ||
| 22319 | "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" | ||
| 22320 | "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" | ||
| 22321 | "\x38\xD3\x01\x00\x00\x01\x00\x00" | ||
| 22322 | "\x00\x00\x00\x00\x04\x5F\x73\x69" | ||
| 22323 | "\x70\x04\x5F\x75\x64\x70\x03\x73" | ||
| 22324 | "\x69\x70\x09\x63\x79\x62\x65\x72" | ||
| 22325 | "\x63\x69\x74\x79\x02\x64\x6B\x00" | ||
| 22326 | "\x00\x21\x00\x01\x01\x02\x02\x01", | ||
| 22327 | .ilen = 72, | ||
| 22328 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | ||
| 22329 | "\x00\x00\x00\x00\x49\x56\xED\x7E" | ||
| 22330 | "\x3B\x24\x4C\xFE", | ||
| 22331 | .alen = 20, | ||
| 22332 | .result = "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB" | ||
| 22333 | "\x83\x60\xF5\xBA\x3A\x56\x79\xE6" | ||
| 22334 | "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E" | ||
| 22335 | "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF" | ||
| 22336 | "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0" | ||
| 22337 | "\x39\xF8\x53\x6D\x31\x22\x2B\xBF" | ||
| 22338 | "\x98\x81\xFC\x34\xEE\x85\x36\xCD" | ||
| 22339 | "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35" | ||
| 22340 | "\x18\x85\x54\xB2\xBC\xDD\x3F\x43" | ||
| 22341 | "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC" | ||
| 22342 | "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF", | ||
| 22343 | .rlen = 88, | ||
| 22344 | }, { | ||
| 22345 | .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
| 22346 | "\x6D\x6A\x8F\x94\x67\x30\x83\x08" | ||
| 22347 | "\xCA\xFE\xBA", | ||
| 22348 | .klen = 19, | ||
| 22349 | .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
| 22350 | .input = "\x45\x00\x00\x3E\x69\x8F\x00\x00" | ||
| 22351 | "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" | ||
| 22352 | "\xC0\xA8\x01\x01\x0A\x98\x00\x35" | ||
| 22353 | "\x00\x2A\x23\x43\xB2\xD0\x01\x00" | ||
| 22354 | "\x00\x01\x00\x00\x00\x00\x00\x00" | ||
| 22355 | "\x03\x73\x69\x70\x09\x63\x79\x62" | ||
| 22356 | "\x65\x72\x63\x69\x74\x79\x02\x64" | ||
| 22357 | "\x6B\x00\x00\x01\x00\x01\x00\x01", | ||
| 22358 | .ilen = 64, | ||
| 22359 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" | ||
| 22360 | "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
| 22361 | .alen = 16, | ||
| 22362 | .result = "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8" | ||
| 22363 | "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16" | ||
| 22364 | "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF" | ||
| 22365 | "\xF5\x71\xB6\x37\x84\xA7\xB1\x99" | ||
| 22366 | "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D" | ||
| 22367 | "\xEF\x25\x88\x1F\x1D\x77\x11\xFF" | ||
| 22368 | "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B" | ||
| 22369 | "\x00\x62\x1F\x68\x4E\x7C\xA0\x97" | ||
| 22370 | "\x10\x72\x7E\x53\x13\x3B\x68\xE4" | ||
| 22371 | "\x30\x99\x91\x79\x09\xEA\xFF\x6A", | ||
| 22372 | .rlen = 80, | ||
| 22373 | }, { | ||
| 22374 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 22375 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 22376 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 22377 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 22378 | "\x11\x22\x33", | ||
| 22379 | .klen = 35, | ||
| 22380 | .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", | ||
| 22381 | .input = "\x45\x00\x00\x30\x69\xA6\x40\x00" | ||
| 22382 | "\x80\x06\x26\x90\xC0\xA8\x01\x02" | ||
| 22383 | "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" | ||
| 22384 | "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" | ||
| 22385 | "\x70\x02\x40\x00\x20\xBF\x00\x00" | ||
| 22386 | "\x02\x04\x05\xB4\x01\x01\x04\x02" | ||
| 22387 | "\x01\x02\x02\x01", | ||
| 22388 | .ilen = 52, | ||
| 22389 | .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" | ||
| 22390 | "\x01\x02\x03\x04\x05\x06\x07\x08", | ||
| 22391 | .alen = 16, | ||
| 22392 | .result = "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F" | ||
| 22393 | "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4" | ||
| 22394 | "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD" | ||
| 22395 | "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55" | ||
| 22396 | "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10" | ||
| 22397 | "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B" | ||
| 22398 | "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92" | ||
| 22399 | "\x23\xA6\x10\xB0\x26\xD6\xD9\x26" | ||
| 22400 | "\x5A\x48\x6A\x3E", | ||
| 22401 | .rlen = 68, | ||
| 22402 | }, { | ||
| 22403 | .key = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 22404 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 22405 | "\x00\x00\x00", | ||
| 22406 | .klen = 19, | ||
| 22407 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
| 22408 | .input = "\x45\x00\x00\x3C\x99\xC5\x00\x00" | ||
| 22409 | "\x80\x01\xCB\x7A\x40\x67\x93\x18" | ||
| 22410 | "\x01\x01\x01\x01\x08\x00\x07\x5C" | ||
| 22411 | "\x02\x00\x44\x00\x61\x62\x63\x64" | ||
| 22412 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
| 22413 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
| 22414 | "\x75\x76\x77\x61\x62\x63\x64\x65" | ||
| 22415 | "\x66\x67\x68\x69\x01\x02\x02\x01", | ||
| 22416 | .ilen = 64, | ||
| 22417 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" | ||
| 22418 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
| 22419 | .alen = 16, | ||
| 22420 | .result = "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F" | ||
| 22421 | "\x92\x51\x23\xA4\xC1\x5B\xF0\x10" | ||
| 22422 | "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC" | ||
| 22423 | "\x89\xC8\xF8\x42\x62\x95\xB7\xCB" | ||
| 22424 | "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7" | ||
| 22425 | "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C" | ||
| 22426 | "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9" | ||
| 22427 | "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE" | ||
| 22428 | "\x36\x25\xC1\x10\x12\x1C\xCA\x82" | ||
| 22429 | "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A", | ||
| 22430 | .rlen = 80, | ||
| 22431 | }, { | ||
| 22432 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
| 22433 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
| 22434 | "\x57\x69\x0E", | ||
| 22435 | .klen = 19, | ||
| 22436 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
| 22437 | .input = "\x45\x00\x00\x3C\x99\xC3\x00\x00" | ||
| 22438 | "\x80\x01\xCB\x7C\x40\x67\x93\x18" | ||
| 22439 | "\x01\x01\x01\x01\x08\x00\x08\x5C" | ||
| 22440 | "\x02\x00\x43\x00\x61\x62\x63\x64" | ||
| 22441 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
| 22442 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
| 22443 | "\x75\x76\x77\x61\x62\x63\x64\x65" | ||
| 22444 | "\x66\x67\x68\x69\x01\x02\x02\x01", | ||
| 22445 | .ilen = 64, | ||
| 22446 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
| 22447 | "\x10\x10\x10\x10\x4E\x28\x00\x00" | ||
| 22448 | "\xA2\xFC\xA1\xA3", | ||
| 22449 | .alen = 20, | ||
| 22450 | .result = "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6" | ||
| 22451 | "\x10\x60\x40\x62\x6B\x4F\x97\x8E" | ||
| 22452 | "\x0B\xB2\x22\x97\xCB\x21\xE0\x90" | ||
| 22453 | "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B" | ||
| 22454 | "\x79\x01\x58\x50\x01\x06\xE1\xE0" | ||
| 22455 | "\x2C\x83\x79\xD3\xDE\x46\x97\x1A" | ||
| 22456 | "\x30\xB8\xE5\xDF\xD7\x12\x56\x75" | ||
| 22457 | "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD" | ||
| 22458 | "\x97\x57\xCA\xC1\x20\xD0\x86\xB9" | ||
| 22459 | "\x66\x9D\xB4\x2B\x96\x22\xAC\x67", | ||
| 22460 | .rlen = 80, | ||
| 22461 | }, { | ||
| 22462 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
| 22463 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
| 22464 | "\x57\x69\x0E", | ||
| 22465 | .klen = 19, | ||
| 22466 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
| 22467 | .input = "\x45\x00\x00\x1C\x42\xA2\x00\x00" | ||
| 22468 | "\x80\x01\x44\x1F\x40\x67\x93\xB6" | ||
| 22469 | "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" | ||
| 22470 | "\x01\x02\x02\x01", | ||
| 22471 | .ilen = 28, | ||
| 22472 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
| 22473 | "\x10\x10\x10\x10\x4E\x28\x00\x00" | ||
| 22474 | "\xA2\xFC\xA1\xA3", | ||
| 22475 | .alen = 20, | ||
| 22476 | .result = "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6" | ||
| 22477 | "\x10\x60\xCF\x01\x6B\x4F\x97\x20" | ||
| 22478 | "\xEA\xB3\x23\x94\xC9\x21\x1D\x33" | ||
| 22479 | "\xA1\xE5\x90\x40\x05\x37\x45\x70" | ||
| 22480 | "\xB5\xD6\x09\x0A\x23\x73\x33\xF9" | ||
| 22481 | "\x08\xB4\x22\xE4", | ||
| 22482 | .rlen = 44, | ||
| 22483 | }, { | ||
| 22484 | .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
| 22485 | "\x6D\x6A\x8F\x94\x67\x30\x83\x08" | ||
| 22486 | "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
| 22487 | "\xCA\xFE\xBA", | ||
| 22488 | .klen = 27, | ||
| 22489 | .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
| 22490 | .input = "\x45\x00\x00\x28\xA4\xAD\x40\x00" | ||
| 22491 | "\x40\x06\x78\x80\x0A\x01\x03\x8F" | ||
| 22492 | "\x0A\x01\x06\x12\x80\x23\x06\xB8" | ||
| 22493 | "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" | ||
| 22494 | "\x50\x10\x16\xD0\x75\x68\x00\x01", | ||
| 22495 | .ilen = 40, | ||
| 22496 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" | ||
| 22497 | "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
| 22498 | .alen = 16, | ||
| 22499 | .result = "\x05\x22\x15\xD1\x52\x56\x85\x04" | ||
| 22500 | "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA" | ||
| 22501 | "\xEA\x16\x37\x50\xF3\xDF\x84\x3B" | ||
| 22502 | "\x2F\x32\x18\x57\x34\x2A\x8C\x23" | ||
| 22503 | "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB" | ||
| 22504 | "\x34\xA5\x9F\x6C\x48\x30\x1E\x22" | ||
| 22505 | "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B", | ||
| 22506 | .rlen = 56, | ||
| 22507 | }, { | ||
| 22508 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 22509 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 22510 | "\xDE\xCA\xF8", | ||
| 22511 | .klen = 19, | ||
| 22512 | .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", | ||
| 22513 | .input = "\x45\x00\x00\x49\x33\xBA\x00\x00" | ||
| 22514 | "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" | ||
| 22515 | "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" | ||
| 22516 | "\x00\x35\xDD\x7B\x80\x03\x02\xD5" | ||
| 22517 | "\x00\x00\x4E\x20\x00\x1E\x8C\x18" | ||
| 22518 | "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47" | ||
| 22519 | "\x6B\x91\xB9\x24\xB2\x80\x38\x9D" | ||
| 22520 | "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" | ||
| 22521 | "\x9B\x62\x66\xC0\x47\x22\xB1\x49" | ||
| 22522 | "\x23\x01\x01\x01", | ||
| 22523 | .ilen = 76, | ||
| 22524 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | ||
| 22525 | "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" | ||
| 22526 | "\xCE\xFA\xCE\x74", | ||
| 22527 | .alen = 20, | ||
| 22528 | .result = "\x92\xD0\x53\x79\x33\x38\xD5\xF3" | ||
| 22529 | "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90" | ||
| 22530 | "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76" | ||
| 22531 | "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00" | ||
| 22532 | "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA" | ||
| 22533 | "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7" | ||
| 22534 | "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6" | ||
| 22535 | "\x63\xC9\xDB\x05\x69\x51\x12\xAD" | ||
| 22536 | "\x3E\x00\x32\x73\x86\xF2\xEE\xF5" | ||
| 22537 | "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D" | ||
| 22538 | "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7" | ||
| 22539 | "\x12\x25\x0B\xF9", | ||
| 22540 | .rlen = 92, | ||
| 22541 | }, { | ||
| 22542 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 22543 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 22544 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 22545 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 22546 | "\x73\x61\x6C", | ||
| 22547 | .klen = 35, | ||
| 22548 | .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", | ||
| 22549 | .input = "\x45\x08\x00\x28\x73\x2C\x00\x00" | ||
| 22550 | "\x40\x06\xE9\xF9\x0A\x01\x06\x12" | ||
| 22551 | "\x0A\x01\x03\x8F\x06\xB8\x80\x23" | ||
| 22552 | "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" | ||
| 22553 | "\x50\x10\x1F\x64\x6D\x54\x00\x01", | ||
| 22554 | .ilen = 40, | ||
| 22555 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | ||
| 22556 | "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" | ||
| 22557 | "\x69\x76\x65\x63", | ||
| 22558 | .alen = 20, | ||
| 22559 | .result = "\xCC\x74\xB7\xD3\xB0\x38\x50\x42" | ||
| 22560 | "\x2C\x64\x87\x46\x1E\x34\x10\x05" | ||
| 22561 | "\x29\x6B\xBB\x36\xE9\x69\xAD\x92" | ||
| 22562 | "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D" | ||
| 22563 | "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA" | ||
| 22564 | "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7" | ||
| 22565 | "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D", | ||
| 22566 | .rlen = 56, | ||
| 22567 | }, { | ||
| 22568 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
| 22569 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
| 22570 | "\x57\x69\x0E", | ||
| 22571 | .klen = 19, | ||
| 22572 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
| 22573 | .input = "\x45\x00\x00\x49\x33\x3E\x00\x00" | ||
| 22574 | "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" | ||
| 22575 | "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" | ||
| 22576 | "\x00\x35\xCB\x45\x80\x03\x02\x5B" | ||
| 22577 | "\x00\x00\x01\xE0\x00\x1E\x8C\x18" | ||
| 22578 | "\xD6\x57\x59\xD5\x22\x84\xA0\x35" | ||
| 22579 | "\x2C\x71\x47\x5C\x88\x80\x39\x1C" | ||
| 22580 | "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" | ||
| 22581 | "\x5A\xE2\x70\xC0\x38\x99\x49\x39" | ||
| 22582 | "\x15\x01\x01\x01", | ||
| 22583 | .ilen = 76, | ||
| 22584 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
| 22585 | "\x10\x10\x10\x10\x4E\x28\x00\x00" | ||
| 22586 | "\xA2\xFC\xA1\xA3", | ||
| 22587 | .alen = 20, | ||
| 22588 | .result = "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6" | ||
| 22589 | "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86" | ||
| 22590 | "\xC8\x02\xF0\xB0\x03\x09\xD9\x02" | ||
| 22591 | "\xA0\xD2\x59\x04\xD1\x85\x2A\x24" | ||
| 22592 | "\x1C\x67\x3E\xD8\x68\x72\x06\x94" | ||
| 22593 | "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B" | ||
| 22594 | "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C" | ||
| 22595 | "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE" | ||
| 22596 | "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5" | ||
| 22597 | "\x23\xF4\x84\x40\x74\x14\x8A\x6B" | ||
| 22598 | "\xDB\xD7\x67\xED\xA4\x93\xF3\x47" | ||
| 22599 | "\xCC\xF7\x46\x6F", | ||
| 22600 | .rlen = 92, | ||
| 22601 | }, { | ||
| 22602 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 22603 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 22604 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 22605 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 22606 | "\x73\x61\x6C", | ||
| 22607 | .klen = 35, | ||
| 22608 | .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", | ||
| 22609 | .input = "\x63\x69\x73\x63\x6F\x01\x72\x75" | ||
| 22610 | "\x6C\x65\x73\x01\x74\x68\x65\x01" | ||
| 22611 | "\x6E\x65\x74\x77\x65\x01\x64\x65" | ||
| 22612 | "\x66\x69\x6E\x65\x01\x74\x68\x65" | ||
| 22613 | "\x74\x65\x63\x68\x6E\x6F\x6C\x6F" | ||
| 22614 | "\x67\x69\x65\x73\x01\x74\x68\x61" | ||
| 22615 | "\x74\x77\x69\x6C\x6C\x01\x64\x65" | ||
| 22616 | "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" | ||
| 22617 | "\x72\x72\x6F\x77\x01\x02\x02\x01", | ||
| 22618 | .ilen = 72, | ||
| 22619 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | ||
| 22620 | "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" | ||
| 22621 | "\x69\x76\x65\x63", | ||
| 22622 | .alen = 20, | ||
| 22623 | .result = "\xEA\x15\xC4\x98\xAC\x15\x22\x37" | ||
| 22624 | "\x00\x07\x1D\xBE\x60\x5D\x73\x16" | ||
| 22625 | "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4" | ||
| 22626 | "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A" | ||
| 22627 | "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4" | ||
| 22628 | "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5" | ||
| 22629 | "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6" | ||
| 22630 | "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C" | ||
| 22631 | "\x73\xF4\xE7\x12\x84\x4C\x37\x0A" | ||
| 22632 | "\x4A\x8F\x06\x37\x48\xF9\xF9\x05" | ||
| 22633 | "\x55\x13\x40\xC3\xD5\x55\x3A\x3D", | ||
| 22634 | .rlen = 88, | ||
| 22635 | }, { | ||
| 22636 | .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" | ||
| 22637 | "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" | ||
| 22638 | "\xD9\x66\x42", | ||
| 22639 | .klen = 19, | ||
| 22640 | .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", | ||
| 22641 | .input = "\x01\x02\x02\x01", | ||
| 22642 | .ilen = 4, | ||
| 22643 | .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" | ||
| 22644 | "\x43\x45\x7E\x91\x82\x44\x3B\xC6", | ||
| 22645 | .alen = 16, | ||
| 22646 | .result = "\x4C\x72\x63\x30\x2F\xE6\x56\xDD" | ||
| 22647 | "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90" | ||
| 22648 | "\xF7\x61\x24\x62", | ||
| 22649 | .rlen = 20, | ||
| 22650 | }, { | ||
| 22651 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 22652 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 22653 | "\xDE\xCA\xF8", | ||
| 22654 | .klen = 19, | ||
| 22655 | .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", | ||
| 22656 | .input = "\x74\x6F\x01\x62\x65\x01\x6F\x72" | ||
| 22657 | "\x01\x6E\x6F\x74\x01\x74\x6F\x01" | ||
| 22658 | "\x62\x65\x00\x01", | ||
| 22659 | .ilen = 20, | ||
| 22660 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | ||
| 22661 | "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" | ||
| 22662 | "\xCE\xFA\xCE\x74", | ||
| 22663 | .alen = 20, | ||
| 22664 | .result = "\xA3\xBF\x52\x52\x65\x83\xBA\x81" | ||
| 22665 | "\x03\x9B\x84\xFC\x44\x8C\xBB\x81" | ||
| 22666 | "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0" | ||
| 22667 | "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC" | ||
| 22668 | "\x17\x17\x65\xAD", | ||
| 22669 | .rlen = 36, | ||
| 22670 | }, { | ||
| 22671 | .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" | ||
| 22672 | "\x6D\x61\x72\x69\x6A\x75\x61\x6E" | ||
| 22673 | "\x61\x61\x6E\x64\x64\x6F\x69\x74" | ||
| 22674 | "\x62\x65\x66\x6F\x72\x65\x69\x61" | ||
| 22675 | "\x74\x75\x72", | ||
| 22676 | .klen = 35, | ||
| 22677 | .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", | ||
| 22678 | .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00" | ||
| 22679 | "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" | ||
| 22680 | "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" | ||
| 22681 | "\x02\x00\x07\x00\x61\x62\x63\x64" | ||
| 22682 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
| 22683 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
| 22684 | "\x01\x02\x02\x01", | ||
| 22685 | .ilen = 52, | ||
| 22686 | .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" | ||
| 22687 | "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" | ||
| 22688 | "\x67\x65\x74\x6D", | ||
| 22689 | .alen = 20, | ||
| 22690 | .result = "\x96\xFD\x86\xF8\xD1\x98\xFF\x10" | ||
| 22691 | "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A" | ||
| 22692 | "\x48\x59\x80\x18\x1A\x18\x1A\x04" | ||
| 22693 | "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75" | ||
| 22694 | "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF" | ||
| 22695 | "\x16\xC3\x35\xA8\xE7\xCE\x84\x04" | ||
| 22696 | "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42" | ||
| 22697 | "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B" | ||
| 22698 | "\x39\xDB\xC8\xDC", | ||
| 22699 | .rlen = 68, | ||
| 22700 | }, { | ||
| 22701 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
| 22702 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
| 22703 | "\x57\x69\x0E", | ||
| 22704 | .klen = 19, | ||
| 22705 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
| 22706 | .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00" | ||
| 22707 | "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" | ||
| 22708 | "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" | ||
| 22709 | "\x02\x00\x07\x00\x61\x62\x63\x64" | ||
| 22710 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
| 22711 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
| 22712 | "\x01\x02\x02\x01", | ||
| 22713 | .ilen = 52, | ||
| 22714 | .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" | ||
| 22715 | "\x10\x10\x10\x10\x4E\x28\x00\x00" | ||
| 22716 | "\xA2\xFC\xA1\xA3", | ||
| 22717 | .alen = 20, | ||
| 22718 | .result = "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6" | ||
| 22719 | "\x10\x60\x54\x25\xEB\x80\x04\x93" | ||
| 22720 | "\xCA\x1B\x23\x97\xCB\x21\x2E\x01" | ||
| 22721 | "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B" | ||
| 22722 | "\x79\x01\x58\x50\x01\x06\xE1\xE0" | ||
| 22723 | "\x2C\x83\x79\xD3\xDE\x46\x97\x1A" | ||
| 22724 | "\x44\xCC\x90\xBF\x00\x94\x94\x92" | ||
| 22725 | "\x20\x17\x0C\x1B\x55\xDE\x7E\x68" | ||
| 22726 | "\xF4\x95\x5D\x4F", | ||
| 22727 | .rlen = 68, | ||
| 22728 | }, { | ||
| 22729 | .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" | ||
| 22730 | "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" | ||
| 22731 | "\x22\x43\x3C", | ||
| 22732 | .klen = 19, | ||
| 22733 | .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", | ||
| 22734 | .input = "\x08\x00\xC6\xCD\x02\x00\x07\x00" | ||
| 22735 | "\x61\x62\x63\x64\x65\x66\x67\x68" | ||
| 22736 | "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" | ||
| 22737 | "\x71\x72\x73\x74\x01\x02\x02\x01", | ||
| 22738 | .ilen = 32, | ||
| 22739 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | ||
| 22740 | "\x00\x00\x00\x07\x48\x55\xEC\x7D" | ||
| 22741 | "\x3A\x23\x4B\xFD", | ||
| 22742 | .alen = 20, | ||
| 22743 | .result = "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02" | ||
| 22744 | "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F" | ||
| 22745 | "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF" | ||
| 22746 | "\x58\x7D\xCF\x29\xA3\x41\x68\x6B" | ||
| 22747 | "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F" | ||
| 22748 | "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6", | ||
| 22749 | .rlen = 48, | ||
| 22750 | } | ||
| 22751 | }; | ||
| 22752 | |||
| 22753 | static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { | ||
| 22754 | { /* Generated using Crypto++ */ | ||
| 22755 | .key = zeroed_string, | ||
| 22756 | .klen = 19, | ||
| 22757 | .iv = zeroed_string, | ||
| 22758 | .result = zeroed_string, | ||
| 22759 | .rlen = 16, | ||
| 22760 | .assoc = zeroed_string, | ||
| 22761 | .alen = 16, | ||
| 22762 | .input = "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F" | ||
| 22763 | "\x12\x50\xE8\xDE\x81\x3C\x63\x08" | ||
| 22764 | "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5" | ||
| 22765 | "\x27\x50\x01\xAC\x03\x33\x39\xFB", | ||
| 22766 | .ilen = 32, | ||
| 22767 | },{ | ||
| 22768 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
| 22769 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
| 22770 | "\x00\x00\x00", | ||
| 22771 | .klen = 19, | ||
| 22772 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22773 | .result = zeroed_string, | ||
| 22774 | .rlen = 16, | ||
| 22775 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 22776 | "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22777 | .alen = 16, | ||
| 22778 | .input = "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F" | ||
| 22779 | "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17" | ||
| 22780 | "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA" | ||
| 22781 | "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0", | ||
| 22782 | .ilen = 32, | ||
| 22783 | |||
| 22784 | }, { | ||
| 22785 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
| 22786 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
| 22787 | "\x00\x00\x00", | ||
| 22788 | .klen = 19, | ||
| 22789 | .iv = zeroed_string, | ||
| 22790 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22791 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
| 22792 | .rlen = 16, | ||
| 22793 | .assoc = zeroed_string, | ||
| 22794 | .alen = 16, | ||
| 22795 | .input = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6" | ||
| 22796 | "\x61\xF4\xF5\x41\x03\x4A\xE3\x86" | ||
| 22797 | "\xA1\xE2\xC2\x42\x2B\x81\x70\x40" | ||
| 22798 | "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C", | ||
| 22799 | .ilen = 32, | ||
| 22800 | }, { | ||
| 22801 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
| 22802 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
| 22803 | "\x00\x00\x00", | ||
| 22804 | .klen = 19, | ||
| 22805 | .iv = zeroed_string, | ||
| 22806 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22807 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
| 22808 | .rlen = 16, | ||
| 22809 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22810 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
| 22811 | .alen = 16, | ||
| 22812 | .input = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6" | ||
| 22813 | "\x61\xF4\xF5\x41\x03\x4A\xE3\x86" | ||
| 22814 | "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9" | ||
| 22815 | "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E", | ||
| 22816 | .ilen = 32, | ||
| 22817 | }, { | ||
| 22818 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
| 22819 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
| 22820 | "\x00\x00\x00", | ||
| 22821 | .klen = 19, | ||
| 22822 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22823 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22824 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
| 22825 | .rlen = 16, | ||
| 22826 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22827 | "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22828 | .alen = 16, | ||
| 22829 | .input = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E" | ||
| 22830 | "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16" | ||
| 22831 | "\x43\x8E\x76\x57\x3B\xB4\x05\xE8" | ||
| 22832 | "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED", | ||
| 22833 | .ilen = 32, | ||
| 22834 | }, { | ||
| 22835 | .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" | ||
| 22836 | "\x6d\x6a\x8f\x94\x67\x30\x83\x08" | ||
| 22837 | "\x00\x00\x00", | ||
| 22838 | .klen = 19, | ||
| 22839 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22840 | .result = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22841 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22842 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22843 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22844 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22845 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22846 | "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22847 | "\x01\x01\x01\x01\x01\x01\x01\x01", | ||
| 22848 | .rlen = 64, | ||
| 22849 | .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" | ||
| 22850 | "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 22851 | .alen = 16, | ||
| 22852 | .input = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E" | ||
| 22853 | "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16" | ||
| 22854 | "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4" | ||
| 22855 | "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02" | ||
| 22856 | "\x9B\xAB\x39\x18\xEB\x94\x34\x36" | ||
| 22857 | "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49" | ||
| 22858 | "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E" | ||
| 22859 | "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB" | ||
| 22860 | "\x02\x73\xDD\xE7\x30\x4A\x30\x54" | ||
| 22861 | "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F", | ||
| 22862 | .ilen = 80, | ||
| 22863 | }, { | ||
| 22864 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
| 22865 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
| 22866 | "\x00\x00\x00", | ||
| 22867 | .klen = 19, | ||
| 22868 | .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", | ||
| 22869 | .result = "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22870 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22871 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22872 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22873 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22874 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22875 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22876 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22877 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22878 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22879 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22880 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22881 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22882 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22883 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22884 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22885 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22886 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22887 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22888 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22889 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22890 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22891 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 22892 | "\xff\xff\xff\xff\xff\xff\xff\xff", | ||
| 22893 | .rlen = 192, | ||
| 22894 | .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" | ||
| 22895 | "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" | ||
| 22896 | "\x89\xab\xcd\xef", | ||
| 22897 | .alen = 20, | ||
| 22898 | .input = "\x64\x17\xDC\x24\x9D\x92\xBA\x5E" | ||
| 22899 | "\x7C\x64\x6D\x33\x46\x77\xAC\xB1" | ||
| 22900 | "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95" | ||
| 22901 | "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C" | ||
| 22902 | "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB" | ||
| 22903 | "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01" | ||
| 22904 | "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04" | ||
| 22905 | "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B" | ||
| 22906 | "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09" | ||
| 22907 | "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79" | ||
| 22908 | "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D" | ||
| 22909 | "\x22\xEB\xD1\x09\x80\x3F\x5A\x70" | ||
| 22910 | "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B" | ||
| 22911 | "\x36\x12\x00\x89\xAA\x5D\x55\xDA" | ||
| 22912 | "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6" | ||
| 22913 | "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F" | ||
| 22914 | "\x2B\x50\x44\x52\xC2\x10\x7D\x38" | ||
| 22915 | "\x82\x64\x83\x0C\xAE\x49\xD0\xE5" | ||
| 22916 | "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43" | ||
| 22917 | "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC" | ||
| 22918 | "\x19\x6D\x60\x66\x61\xF9\x9A\x3F" | ||
| 22919 | "\x75\xFC\x38\x53\x5B\xB5\xCD\x52" | ||
| 22920 | "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98" | ||
| 22921 | "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E" | ||
| 22922 | "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC" | ||
| 22923 | "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A", | ||
| 22924 | .ilen = 208, | ||
| 22925 | }, { /* From draft-mcgrew-gcm-test-01 */ | ||
| 22926 | .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" | ||
| 22927 | "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" | ||
| 22928 | "\x2E\x44\x3B", | ||
| 22929 | .klen = 19, | ||
| 22930 | .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", | ||
| 22931 | .result = "\x45\x00\x00\x48\x69\x9A\x00\x00" | ||
| 22932 | "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" | ||
| 22933 | "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" | ||
| 22934 | "\x38\xD3\x01\x00\x00\x01\x00\x00" | ||
| 22935 | "\x00\x00\x00\x00\x04\x5F\x73\x69" | ||
| 22936 | "\x70\x04\x5F\x75\x64\x70\x03\x73" | ||
| 22937 | "\x69\x70\x09\x63\x79\x62\x65\x72" | ||
| 22938 | "\x63\x69\x74\x79\x02\x64\x6B\x00" | ||
| 22939 | "\x00\x21\x00\x01\x01\x02\x02\x01", | ||
| 22940 | .rlen = 72, | ||
| 22941 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | ||
| 22942 | "\x00\x00\x00\x00\x49\x56\xED\x7E" | ||
| 22943 | "\x3B\x24\x4C\xFE", | ||
| 22944 | .alen = 20, | ||
| 22945 | .input = "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB" | ||
| 22946 | "\x83\x60\xF5\xBA\x3A\x56\x79\xE6" | ||
| 22947 | "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E" | ||
| 22948 | "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF" | ||
| 22949 | "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0" | ||
| 22950 | "\x39\xF8\x53\x6D\x31\x22\x2B\xBF" | ||
| 22951 | "\x98\x81\xFC\x34\xEE\x85\x36\xCD" | ||
| 22952 | "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35" | ||
| 22953 | "\x18\x85\x54\xB2\xBC\xDD\x3F\x43" | ||
| 22954 | "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC" | ||
| 22955 | "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF", | ||
| 22956 | .ilen = 88, | ||
| 22957 | }, { | ||
| 22958 | .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
| 22959 | "\x6D\x6A\x8F\x94\x67\x30\x83\x08" | ||
| 22960 | "\xCA\xFE\xBA", | ||
| 22961 | .klen = 19, | ||
| 22962 | .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
| 22963 | .result = "\x45\x00\x00\x3E\x69\x8F\x00\x00" | ||
| 22964 | "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" | ||
| 22965 | "\xC0\xA8\x01\x01\x0A\x98\x00\x35" | ||
| 22966 | "\x00\x2A\x23\x43\xB2\xD0\x01\x00" | ||
| 22967 | "\x00\x01\x00\x00\x00\x00\x00\x00" | ||
| 22968 | "\x03\x73\x69\x70\x09\x63\x79\x62" | ||
| 22969 | "\x65\x72\x63\x69\x74\x79\x02\x64" | ||
| 22970 | "\x6B\x00\x00\x01\x00\x01\x00\x01", | ||
| 22971 | .rlen = 64, | ||
| 22972 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" | ||
| 22973 | "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
| 22974 | .alen = 16, | ||
| 22975 | .input = "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8" | ||
| 22976 | "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16" | ||
| 22977 | "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF" | ||
| 22978 | "\xF5\x71\xB6\x37\x84\xA7\xB1\x99" | ||
| 22979 | "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D" | ||
| 22980 | "\xEF\x25\x88\x1F\x1D\x77\x11\xFF" | ||
| 22981 | "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B" | ||
| 22982 | "\x00\x62\x1F\x68\x4E\x7C\xA0\x97" | ||
| 22983 | "\x10\x72\x7E\x53\x13\x3B\x68\xE4" | ||
| 22984 | "\x30\x99\x91\x79\x09\xEA\xFF\x6A", | ||
| 22985 | .ilen = 80, | ||
| 22986 | }, { | ||
| 22987 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 22988 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 22989 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 22990 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 22991 | "\x11\x22\x33", | ||
| 22992 | .klen = 35, | ||
| 22993 | .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", | ||
| 22994 | .result = "\x45\x00\x00\x30\x69\xA6\x40\x00" | ||
| 22995 | "\x80\x06\x26\x90\xC0\xA8\x01\x02" | ||
| 22996 | "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" | ||
| 22997 | "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" | ||
| 22998 | "\x70\x02\x40\x00\x20\xBF\x00\x00" | ||
| 22999 | "\x02\x04\x05\xB4\x01\x01\x04\x02" | ||
| 23000 | "\x01\x02\x02\x01", | ||
| 23001 | .rlen = 52, | ||
| 23002 | .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" | ||
| 23003 | "\x01\x02\x03\x04\x05\x06\x07\x08", | ||
| 23004 | .alen = 16, | ||
| 23005 | .input = "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F" | ||
| 23006 | "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4" | ||
| 23007 | "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD" | ||
| 23008 | "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55" | ||
| 23009 | "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10" | ||
| 23010 | "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B" | ||
| 23011 | "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92" | ||
| 23012 | "\x23\xA6\x10\xB0\x26\xD6\xD9\x26" | ||
| 23013 | "\x5A\x48\x6A\x3E", | ||
| 23014 | .ilen = 68, | ||
| 23015 | }, { | ||
| 23016 | .key = "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 23017 | "\x00\x00\x00\x00\x00\x00\x00\x00" | ||
| 23018 | "\x00\x00\x00", | ||
| 23019 | .klen = 19, | ||
| 23020 | .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
| 23021 | .result = "\x45\x00\x00\x3C\x99\xC5\x00\x00" | ||
| 23022 | "\x80\x01\xCB\x7A\x40\x67\x93\x18" | ||
| 23023 | "\x01\x01\x01\x01\x08\x00\x07\x5C" | ||
| 23024 | "\x02\x00\x44\x00\x61\x62\x63\x64" | ||
| 23025 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
| 23026 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
| 23027 | "\x75\x76\x77\x61\x62\x63\x64\x65" | ||
| 23028 | "\x66\x67\x68\x69\x01\x02\x02\x01", | ||
| 23029 | .rlen = 64, | ||
| 23030 | .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" | ||
| 23031 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
| 23032 | .alen = 16, | ||
| 23033 | .input = "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F" | ||
| 23034 | "\x92\x51\x23\xA4\xC1\x5B\xF0\x10" | ||
| 23035 | "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC" | ||
| 23036 | "\x89\xC8\xF8\x42\x62\x95\xB7\xCB" | ||
| 23037 | "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7" | ||
| 23038 | "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C" | ||
| 23039 | "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9" | ||
| 23040 | "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE" | ||
| 23041 | "\x36\x25\xC1\x10\x12\x1C\xCA\x82" | ||
| 23042 | "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A", | ||
| 23043 | .ilen = 80, | ||
| 23044 | }, { | ||
| 23045 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
| 23046 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
| 23047 | "\x57\x69\x0E", | ||
| 23048 | .klen = 19, | ||
| 23049 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
| 23050 | .result = "\x45\x00\x00\x3C\x99\xC3\x00\x00" | ||
| 23051 | "\x80\x01\xCB\x7C\x40\x67\x93\x18" | ||
| 23052 | "\x01\x01\x01\x01\x08\x00\x08\x5C" | ||
| 23053 | "\x02\x00\x43\x00\x61\x62\x63\x64" | ||
| 23054 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
| 23055 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
| 23056 | "\x75\x76\x77\x61\x62\x63\x64\x65" | ||
| 23057 | "\x66\x67\x68\x69\x01\x02\x02\x01", | ||
| 23058 | .rlen = 64, | ||
| 23059 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
| 23060 | "\x10\x10\x10\x10\x4E\x28\x00\x00" | ||
| 23061 | "\xA2\xFC\xA1\xA3", | ||
| 23062 | .alen = 20, | ||
| 23063 | .input = "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6" | ||
| 23064 | "\x10\x60\x40\x62\x6B\x4F\x97\x8E" | ||
| 23065 | "\x0B\xB2\x22\x97\xCB\x21\xE0\x90" | ||
| 23066 | "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B" | ||
| 23067 | "\x79\x01\x58\x50\x01\x06\xE1\xE0" | ||
| 23068 | "\x2C\x83\x79\xD3\xDE\x46\x97\x1A" | ||
| 23069 | "\x30\xB8\xE5\xDF\xD7\x12\x56\x75" | ||
| 23070 | "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD" | ||
| 23071 | "\x97\x57\xCA\xC1\x20\xD0\x86\xB9" | ||
| 23072 | "\x66\x9D\xB4\x2B\x96\x22\xAC\x67", | ||
| 23073 | .ilen = 80, | ||
| 23074 | }, { | ||
| 23075 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
| 23076 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
| 23077 | "\x57\x69\x0E", | ||
| 23078 | .klen = 19, | ||
| 23079 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
| 23080 | .result = "\x45\x00\x00\x1C\x42\xA2\x00\x00" | ||
| 23081 | "\x80\x01\x44\x1F\x40\x67\x93\xB6" | ||
| 23082 | "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" | ||
| 23083 | "\x01\x02\x02\x01", | ||
| 23084 | .rlen = 28, | ||
| 23085 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
| 23086 | "\x10\x10\x10\x10\x4E\x28\x00\x00" | ||
| 23087 | "\xA2\xFC\xA1\xA3", | ||
| 23088 | .alen = 20, | ||
| 23089 | .input = "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6" | ||
| 23090 | "\x10\x60\xCF\x01\x6B\x4F\x97\x20" | ||
| 23091 | "\xEA\xB3\x23\x94\xC9\x21\x1D\x33" | ||
| 23092 | "\xA1\xE5\x90\x40\x05\x37\x45\x70" | ||
| 23093 | "\xB5\xD6\x09\x0A\x23\x73\x33\xF9" | ||
| 23094 | "\x08\xB4\x22\xE4", | ||
| 23095 | .ilen = 44, | ||
| 23096 | }, { | ||
| 23097 | .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
| 23098 | "\x6D\x6A\x8F\x94\x67\x30\x83\x08" | ||
| 23099 | "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" | ||
| 23100 | "\xCA\xFE\xBA", | ||
| 23101 | .klen = 27, | ||
| 23102 | .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
| 23103 | .result = "\x45\x00\x00\x28\xA4\xAD\x40\x00" | ||
| 23104 | "\x40\x06\x78\x80\x0A\x01\x03\x8F" | ||
| 23105 | "\x0A\x01\x06\x12\x80\x23\x06\xB8" | ||
| 23106 | "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" | ||
| 23107 | "\x50\x10\x16\xD0\x75\x68\x00\x01", | ||
| 23108 | .rlen = 40, | ||
| 23109 | .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" | ||
| 23110 | "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", | ||
| 23111 | .alen = 16, | ||
| 23112 | .input = "\x05\x22\x15\xD1\x52\x56\x85\x04" | ||
| 23113 | "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA" | ||
| 23114 | "\xEA\x16\x37\x50\xF3\xDF\x84\x3B" | ||
| 23115 | "\x2F\x32\x18\x57\x34\x2A\x8C\x23" | ||
| 23116 | "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB" | ||
| 23117 | "\x34\xA5\x9F\x6C\x48\x30\x1E\x22" | ||
| 23118 | "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B", | ||
| 23119 | .ilen = 56, | ||
| 23120 | }, { | ||
| 23121 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 23122 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 23123 | "\xDE\xCA\xF8", | ||
| 23124 | .klen = 19, | ||
| 23125 | .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", | ||
| 23126 | .result = "\x45\x00\x00\x49\x33\xBA\x00\x00" | ||
| 23127 | "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" | ||
| 23128 | "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" | ||
| 23129 | "\x00\x35\xDD\x7B\x80\x03\x02\xD5" | ||
| 23130 | "\x00\x00\x4E\x20\x00\x1E\x8C\x18" | ||
| 23131 | "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47" | ||
| 23132 | "\x6B\x91\xB9\x24\xB2\x80\x38\x9D" | ||
| 23133 | "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" | ||
| 23134 | "\x9B\x62\x66\xC0\x47\x22\xB1\x49" | ||
| 23135 | "\x23\x01\x01\x01", | ||
| 23136 | .rlen = 76, | ||
| 23137 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | ||
| 23138 | "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" | ||
| 23139 | "\xCE\xFA\xCE\x74", | ||
| 23140 | .alen = 20, | ||
| 23141 | .input = "\x92\xD0\x53\x79\x33\x38\xD5\xF3" | ||
| 23142 | "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90" | ||
| 23143 | "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76" | ||
| 23144 | "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00" | ||
| 23145 | "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA" | ||
| 23146 | "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7" | ||
| 23147 | "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6" | ||
| 23148 | "\x63\xC9\xDB\x05\x69\x51\x12\xAD" | ||
| 23149 | "\x3E\x00\x32\x73\x86\xF2\xEE\xF5" | ||
| 23150 | "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D" | ||
| 23151 | "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7" | ||
| 23152 | "\x12\x25\x0B\xF9", | ||
| 23153 | .ilen = 92, | ||
| 23154 | }, { | ||
| 23155 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 23156 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 23157 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 23158 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 23159 | "\x73\x61\x6C", | ||
| 23160 | .klen = 35, | ||
| 23161 | .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", | ||
| 23162 | .result = "\x45\x08\x00\x28\x73\x2C\x00\x00" | ||
| 23163 | "\x40\x06\xE9\xF9\x0A\x01\x06\x12" | ||
| 23164 | "\x0A\x01\x03\x8F\x06\xB8\x80\x23" | ||
| 23165 | "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" | ||
| 23166 | "\x50\x10\x1F\x64\x6D\x54\x00\x01", | ||
| 23167 | .rlen = 40, | ||
| 23168 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | ||
| 23169 | "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" | ||
| 23170 | "\x69\x76\x65\x63", | ||
| 23171 | .alen = 20, | ||
| 23172 | .input = "\xCC\x74\xB7\xD3\xB0\x38\x50\x42" | ||
| 23173 | "\x2C\x64\x87\x46\x1E\x34\x10\x05" | ||
| 23174 | "\x29\x6B\xBB\x36\xE9\x69\xAD\x92" | ||
| 23175 | "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D" | ||
| 23176 | "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA" | ||
| 23177 | "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7" | ||
| 23178 | "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D", | ||
| 23179 | .ilen = 56, | ||
| 23180 | }, { | ||
| 23181 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
| 23182 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
| 23183 | "\x57\x69\x0E", | ||
| 23184 | .klen = 19, | ||
| 23185 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
| 23186 | .result = "\x45\x00\x00\x49\x33\x3E\x00\x00" | ||
| 23187 | "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" | ||
| 23188 | "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" | ||
| 23189 | "\x00\x35\xCB\x45\x80\x03\x02\x5B" | ||
| 23190 | "\x00\x00\x01\xE0\x00\x1E\x8C\x18" | ||
| 23191 | "\xD6\x57\x59\xD5\x22\x84\xA0\x35" | ||
| 23192 | "\x2C\x71\x47\x5C\x88\x80\x39\x1C" | ||
| 23193 | "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" | ||
| 23194 | "\x5A\xE2\x70\xC0\x38\x99\x49\x39" | ||
| 23195 | "\x15\x01\x01\x01", | ||
| 23196 | .rlen = 76, | ||
| 23197 | .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" | ||
| 23198 | "\x10\x10\x10\x10\x4E\x28\x00\x00" | ||
| 23199 | "\xA2\xFC\xA1\xA3", | ||
| 23200 | .alen = 20, | ||
| 23201 | .input = "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6" | ||
| 23202 | "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86" | ||
| 23203 | "\xC8\x02\xF0\xB0\x03\x09\xD9\x02" | ||
| 23204 | "\xA0\xD2\x59\x04\xD1\x85\x2A\x24" | ||
| 23205 | "\x1C\x67\x3E\xD8\x68\x72\x06\x94" | ||
| 23206 | "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B" | ||
| 23207 | "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C" | ||
| 23208 | "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE" | ||
| 23209 | "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5" | ||
| 23210 | "\x23\xF4\x84\x40\x74\x14\x8A\x6B" | ||
| 23211 | "\xDB\xD7\x67\xED\xA4\x93\xF3\x47" | ||
| 23212 | "\xCC\xF7\x46\x6F", | ||
| 23213 | .ilen = 92, | ||
| 23214 | }, { | ||
| 23215 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 23216 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 23217 | "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 23218 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 23219 | "\x73\x61\x6C", | ||
| 23220 | .klen = 35, | ||
| 23221 | .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", | ||
| 23222 | .result = "\x63\x69\x73\x63\x6F\x01\x72\x75" | ||
| 23223 | "\x6C\x65\x73\x01\x74\x68\x65\x01" | ||
| 23224 | "\x6E\x65\x74\x77\x65\x01\x64\x65" | ||
| 23225 | "\x66\x69\x6E\x65\x01\x74\x68\x65" | ||
| 23226 | "\x74\x65\x63\x68\x6E\x6F\x6C\x6F" | ||
| 23227 | "\x67\x69\x65\x73\x01\x74\x68\x61" | ||
| 23228 | "\x74\x77\x69\x6C\x6C\x01\x64\x65" | ||
| 23229 | "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" | ||
| 23230 | "\x72\x72\x6F\x77\x01\x02\x02\x01", | ||
| 23231 | .rlen = 72, | ||
| 23232 | .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" | ||
| 23233 | "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" | ||
| 23234 | "\x69\x76\x65\x63", | ||
| 23235 | .alen = 20, | ||
| 23236 | .input = "\xEA\x15\xC4\x98\xAC\x15\x22\x37" | ||
| 23237 | "\x00\x07\x1D\xBE\x60\x5D\x73\x16" | ||
| 23238 | "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4" | ||
| 23239 | "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A" | ||
| 23240 | "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4" | ||
| 23241 | "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5" | ||
| 23242 | "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6" | ||
| 23243 | "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C" | ||
| 23244 | "\x73\xF4\xE7\x12\x84\x4C\x37\x0A" | ||
| 23245 | "\x4A\x8F\x06\x37\x48\xF9\xF9\x05" | ||
| 23246 | "\x55\x13\x40\xC3\xD5\x55\x3A\x3D", | ||
| 23247 | .ilen = 88, | ||
| 23248 | }, { | ||
| 23249 | .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" | ||
| 23250 | "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" | ||
| 23251 | "\xD9\x66\x42", | ||
| 23252 | .klen = 19, | ||
| 23253 | .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", | ||
| 23254 | .result = "\x01\x02\x02\x01", | ||
| 23255 | .rlen = 4, | ||
| 23256 | .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" | ||
| 23257 | "\x43\x45\x7E\x91\x82\x44\x3B\xC6", | ||
| 23258 | .alen = 16, | ||
| 23259 | .input = "\x4C\x72\x63\x30\x2F\xE6\x56\xDD" | ||
| 23260 | "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90" | ||
| 23261 | "\xF7\x61\x24\x62", | ||
| 23262 | .ilen = 20, | ||
| 23263 | }, { | ||
| 23264 | .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" | ||
| 23265 | "\x34\x45\x56\x67\x78\x89\x9A\xAB" | ||
| 23266 | "\xDE\xCA\xF8", | ||
| 23267 | .klen = 19, | ||
| 23268 | .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", | ||
| 23269 | .result = "\x74\x6F\x01\x62\x65\x01\x6F\x72" | ||
| 23270 | "\x01\x6E\x6F\x74\x01\x74\x6F\x01" | ||
| 23271 | "\x62\x65\x00\x01", | ||
| 23272 | .rlen = 20, | ||
| 23273 | .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" | ||
| 23274 | "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" | ||
| 23275 | "\xCE\xFA\xCE\x74", | ||
| 23276 | .alen = 20, | ||
| 23277 | .input = "\xA3\xBF\x52\x52\x65\x83\xBA\x81" | ||
| 23278 | "\x03\x9B\x84\xFC\x44\x8C\xBB\x81" | ||
| 23279 | "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0" | ||
| 23280 | "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC" | ||
| 23281 | "\x17\x17\x65\xAD", | ||
| 23282 | .ilen = 36, | ||
| 23283 | }, { | ||
| 23284 | .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" | ||
| 23285 | "\x6D\x61\x72\x69\x6A\x75\x61\x6E" | ||
| 23286 | "\x61\x61\x6E\x64\x64\x6F\x69\x74" | ||
| 23287 | "\x62\x65\x66\x6F\x72\x65\x69\x61" | ||
| 23288 | "\x74\x75\x72", | ||
| 23289 | .klen = 35, | ||
| 23290 | .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", | ||
| 23291 | .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00" | ||
| 23292 | "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" | ||
| 23293 | "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" | ||
| 23294 | "\x02\x00\x07\x00\x61\x62\x63\x64" | ||
| 23295 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
| 23296 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
| 23297 | "\x01\x02\x02\x01", | ||
| 23298 | .rlen = 52, | ||
| 23299 | .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" | ||
| 23300 | "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" | ||
| 23301 | "\x67\x65\x74\x6D", | ||
| 23302 | .alen = 20, | ||
| 23303 | .input = "\x96\xFD\x86\xF8\xD1\x98\xFF\x10" | ||
| 23304 | "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A" | ||
| 23305 | "\x48\x59\x80\x18\x1A\x18\x1A\x04" | ||
| 23306 | "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75" | ||
| 23307 | "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF" | ||
| 23308 | "\x16\xC3\x35\xA8\xE7\xCE\x84\x04" | ||
| 23309 | "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42" | ||
| 23310 | "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B" | ||
| 23311 | "\x39\xDB\xC8\xDC", | ||
| 23312 | .ilen = 68, | ||
| 23313 | }, { | ||
| 23314 | .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" | ||
| 23315 | "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" | ||
| 23316 | "\x57\x69\x0E", | ||
| 23317 | .klen = 19, | ||
| 23318 | .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", | ||
| 23319 | .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00" | ||
| 23320 | "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" | ||
| 23321 | "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" | ||
| 23322 | "\x02\x00\x07\x00\x61\x62\x63\x64" | ||
| 23323 | "\x65\x66\x67\x68\x69\x6A\x6B\x6C" | ||
| 23324 | "\x6D\x6E\x6F\x70\x71\x72\x73\x74" | ||
| 23325 | "\x01\x02\x02\x01", | ||
| 23326 | .rlen = 52, | ||
| 23327 | .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" | ||
| 23328 | "\x10\x10\x10\x10\x4E\x28\x00\x00" | ||
| 23329 | "\xA2\xFC\xA1\xA3", | ||
| 23330 | .alen = 20, | ||
| 23331 | .input = "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6" | ||
| 23332 | "\x10\x60\x54\x25\xEB\x80\x04\x93" | ||
| 23333 | "\xCA\x1B\x23\x97\xCB\x21\x2E\x01" | ||
| 23334 | "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B" | ||
| 23335 | "\x79\x01\x58\x50\x01\x06\xE1\xE0" | ||
| 23336 | "\x2C\x83\x79\xD3\xDE\x46\x97\x1A" | ||
| 23337 | "\x44\xCC\x90\xBF\x00\x94\x94\x92" | ||
| 23338 | "\x20\x17\x0C\x1B\x55\xDE\x7E\x68" | ||
| 23339 | "\xF4\x95\x5D\x4F", | ||
| 23340 | .ilen = 68, | ||
| 23341 | }, { | ||
| 23342 | .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" | ||
| 23343 | "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" | ||
| 23344 | "\x22\x43\x3C", | ||
| 23345 | .klen = 19, | ||
| 23346 | .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", | ||
| 23347 | .result = "\x08\x00\xC6\xCD\x02\x00\x07\x00" | ||
| 23348 | "\x61\x62\x63\x64\x65\x66\x67\x68" | ||
| 23349 | "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" | ||
| 23350 | "\x71\x72\x73\x74\x01\x02\x02\x01", | ||
| 23351 | .rlen = 32, | ||
| 23352 | .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" | ||
| 23353 | "\x00\x00\x00\x07\x48\x55\xEC\x7D" | ||
| 23354 | "\x3A\x23\x4B\xFD", | ||
| 23355 | .alen = 20, | ||
| 23356 | .input = "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02" | ||
| 23357 | "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F" | ||
| 23358 | "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF" | ||
| 23359 | "\x58\x7D\xCF\x29\xA3\x41\x68\x6B" | ||
| 23360 | "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F" | ||
| 23361 | "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6", | ||
| 23362 | .ilen = 48, | ||
| 23363 | } | ||
| 23364 | }; | ||
| 23365 | |||
| 23366 | /* | ||
| 22063 | * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5. | 23367 | * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5. |
| 22064 | */ | 23368 | */ |
| 22065 | #define RFC7539_ENC_TEST_VECTORS 2 | 23369 | #define RFC7539_ENC_TEST_VECTORS 2 |
| @@ -22343,8 +23647,9 @@ static struct aead_testvec rfc7539esp_enc_tv_template[] = { | |||
| 22343 | .klen = 36, | 23647 | .klen = 36, |
| 22344 | .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", | 23648 | .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", |
| 22345 | .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" | 23649 | .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" |
| 22346 | "\x00\x00\x4e\x91", | 23650 | "\x00\x00\x4e\x91\x01\x02\x03\x04" |
| 22347 | .alen = 12, | 23651 | "\x05\x06\x07\x08", |
| 23652 | .alen = 20, | ||
| 22348 | .input = "\x49\x6e\x74\x65\x72\x6e\x65\x74" | 23653 | .input = "\x49\x6e\x74\x65\x72\x6e\x65\x74" |
| 22349 | "\x2d\x44\x72\x61\x66\x74\x73\x20" | 23654 | "\x2d\x44\x72\x61\x66\x74\x73\x20" |
| 22350 | "\x61\x72\x65\x20\x64\x72\x61\x66" | 23655 | "\x61\x72\x65\x20\x64\x72\x61\x66" |
| @@ -22430,8 +23735,9 @@ static struct aead_testvec rfc7539esp_dec_tv_template[] = { | |||
| 22430 | .klen = 36, | 23735 | .klen = 36, |
| 22431 | .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", | 23736 | .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", |
| 22432 | .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" | 23737 | .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" |
| 22433 | "\x00\x00\x4e\x91", | 23738 | "\x00\x00\x4e\x91\x01\x02\x03\x04" |
| 22434 | .alen = 12, | 23739 | "\x05\x06\x07\x08", |
| 23740 | .alen = 20, | ||
| 22435 | .input = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" | 23741 | .input = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" |
| 22436 | "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" | 23742 | "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" |
| 22437 | "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" | 23743 | "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" |
| @@ -30174,7 +31480,7 @@ static struct cipher_testvec salsa20_stream_enc_tv_template[] = { | |||
| 30174 | }, | 31480 | }, |
| 30175 | }; | 31481 | }; |
| 30176 | 31482 | ||
| 30177 | #define CHACHA20_ENC_TEST_VECTORS 3 | 31483 | #define CHACHA20_ENC_TEST_VECTORS 4 |
| 30178 | static struct cipher_testvec chacha20_enc_tv_template[] = { | 31484 | static struct cipher_testvec chacha20_enc_tv_template[] = { |
| 30179 | { /* RFC7539 A.2. Test Vector #1 */ | 31485 | { /* RFC7539 A.2. Test Vector #1 */ |
| 30180 | .key = "\x00\x00\x00\x00\x00\x00\x00\x00" | 31486 | .key = "\x00\x00\x00\x00\x00\x00\x00\x00" |
| @@ -30348,6 +31654,338 @@ static struct cipher_testvec chacha20_enc_tv_template[] = { | |||
| 30348 | "\x87\xb5\x8d\xfd\x72\x8a\xfa\x36" | 31654 | "\x87\xb5\x8d\xfd\x72\x8a\xfa\x36" |
| 30349 | "\x75\x7a\x79\x7a\xc1\x88\xd1", | 31655 | "\x75\x7a\x79\x7a\xc1\x88\xd1", |
| 30350 | .rlen = 127, | 31656 | .rlen = 127, |
| 31657 | }, { /* Self-made test vector for long data */ | ||
| 31658 | .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" | ||
| 31659 | "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" | ||
| 31660 | "\x47\x39\x17\xc1\x40\x2b\x80\x09" | ||
| 31661 | "\x9d\xca\x5c\xbc\x20\x70\x75\xc0", | ||
| 31662 | .klen = 32, | ||
| 31663 | .iv = "\x1c\x00\x00\x00\x00\x00\x00\x00" | ||
| 31664 | "\x00\x00\x00\x00\x00\x00\x00\x01", | ||
| 31665 | .input = "\x49\xee\xe0\xdc\x24\x90\x40\xcd" | ||
| 31666 | "\xc5\x40\x8f\x47\x05\xbc\xdd\x81" | ||
| 31667 | "\x47\xc6\x8d\xe6\xb1\x8f\xd7\xcb" | ||
| 31668 | "\x09\x0e\x6e\x22\x48\x1f\xbf\xb8" | ||
| 31669 | "\x5c\xf7\x1e\x8a\xc1\x23\xf2\xd4" | ||
| 31670 | "\x19\x4b\x01\x0f\x4e\xa4\x43\xce" | ||
| 31671 | "\x01\xc6\x67\xda\x03\x91\x18\x90" | ||
| 31672 | "\xa5\xa4\x8e\x45\x03\xb3\x2d\xac" | ||
| 31673 | "\x74\x92\xd3\x53\x47\xc8\xdd\x25" | ||
| 31674 | "\x53\x6c\x02\x03\x87\x0d\x11\x0c" | ||
| 31675 | "\x58\xe3\x12\x18\xfd\x2a\x5b\x40" | ||
| 31676 | "\x0c\x30\xf0\xb8\x3f\x43\xce\xae" | ||
| 31677 | "\x65\x3a\x7d\x7c\xf4\x54\xaa\xcc" | ||
| 31678 | "\x33\x97\xc3\x77\xba\xc5\x70\xde" | ||
| 31679 | "\xd7\xd5\x13\xa5\x65\xc4\x5f\x0f" | ||
| 31680 | "\x46\x1a\x0d\x97\xb5\xf3\xbb\x3c" | ||
| 31681 | "\x84\x0f\x2b\xc5\xaa\xea\xf2\x6c" | ||
| 31682 | "\xc9\xb5\x0c\xee\x15\xf3\x7d\xbe" | ||
| 31683 | "\x9f\x7b\x5a\xa6\xae\x4f\x83\xb6" | ||
| 31684 | "\x79\x49\x41\xf4\x58\x18\xcb\x86" | ||
| 31685 | "\x7f\x30\x0e\xf8\x7d\x44\x36\xea" | ||
| 31686 | "\x75\xeb\x88\x84\x40\x3c\xad\x4f" | ||
| 31687 | "\x6f\x31\x6b\xaa\x5d\xe5\xa5\xc5" | ||
| 31688 | "\x21\x66\xe9\xa7\xe3\xb2\x15\x88" | ||
| 31689 | "\x78\xf6\x79\xa1\x59\x47\x12\x4e" | ||
| 31690 | "\x9f\x9f\x64\x1a\xa0\x22\x5b\x08" | ||
| 31691 | "\xbe\x7c\x36\xc2\x2b\x66\x33\x1b" | ||
| 31692 | "\xdd\x60\x71\xf7\x47\x8c\x61\xc3" | ||
| 31693 | "\xda\x8a\x78\x1e\x16\xfa\x1e\x86" | ||
| 31694 | "\x81\xa6\x17\x2a\xa7\xb5\xc2\xe7" | ||
| 31695 | "\xa4\xc7\x42\xf1\xcf\x6a\xca\xb4" | ||
| 31696 | "\x45\xcf\xf3\x93\xf0\xe7\xea\xf6" | ||
| 31697 | "\xf4\xe6\x33\x43\x84\x93\xa5\x67" | ||
| 31698 | "\x9b\x16\x58\x58\x80\x0f\x2b\x5c" | ||
| 31699 | "\x24\x74\x75\x7f\x95\x81\xb7\x30" | ||
| 31700 | "\x7a\x33\xa7\xf7\x94\x87\x32\x27" | ||
| 31701 | "\x10\x5d\x14\x4c\x43\x29\xdd\x26" | ||
| 31702 | "\xbd\x3e\x3c\x0e\xfe\x0e\xa5\x10" | ||
| 31703 | "\xea\x6b\x64\xfd\x73\xc6\xed\xec" | ||
| 31704 | "\xa8\xc9\xbf\xb3\xba\x0b\x4d\x07" | ||
| 31705 | "\x70\xfc\x16\xfd\x79\x1e\xd7\xc5" | ||
| 31706 | "\x49\x4e\x1c\x8b\x8d\x79\x1b\xb1" | ||
| 31707 | "\xec\xca\x60\x09\x4c\x6a\xd5\x09" | ||
| 31708 | "\x49\x46\x00\x88\x22\x8d\xce\xea" | ||
| 31709 | "\xb1\x17\x11\xde\x42\xd2\x23\xc1" | ||
| 31710 | "\x72\x11\xf5\x50\x73\x04\x40\x47" | ||
| 31711 | "\xf9\x5d\xe7\xa7\x26\xb1\x7e\xb0" | ||
| 31712 | "\x3f\x58\xc1\x52\xab\x12\x67\x9d" | ||
| 31713 | "\x3f\x43\x4b\x68\xd4\x9c\x68\x38" | ||
| 31714 | "\x07\x8a\x2d\x3e\xf3\xaf\x6a\x4b" | ||
| 31715 | "\xf9\xe5\x31\x69\x22\xf9\xa6\x69" | ||
| 31716 | "\xc6\x9c\x96\x9a\x12\x35\x95\x1d" | ||
| 31717 | "\x95\xd5\xdd\xbe\xbf\x93\x53\x24" | ||
| 31718 | "\xfd\xeb\xc2\x0a\x64\xb0\x77\x00" | ||
| 31719 | "\x6f\x88\xc4\x37\x18\x69\x7c\xd7" | ||
| 31720 | "\x41\x92\x55\x4c\x03\xa1\x9a\x4b" | ||
| 31721 | "\x15\xe5\xdf\x7f\x37\x33\x72\xc1" | ||
| 31722 | "\x8b\x10\x67\xa3\x01\x57\x94\x25" | ||
| 31723 | "\x7b\x38\x71\x7e\xdd\x1e\xcc\x73" | ||
| 31724 | "\x55\xd2\x8e\xeb\x07\xdd\xf1\xda" | ||
| 31725 | "\x58\xb1\x47\x90\xfe\x42\x21\x72" | ||
| 31726 | "\xa3\x54\x7a\xa0\x40\xec\x9f\xdd" | ||
| 31727 | "\xc6\x84\x6e\xca\xae\xe3\x68\xb4" | ||
| 31728 | "\x9d\xe4\x78\xff\x57\xf2\xf8\x1b" | ||
| 31729 | "\x03\xa1\x31\xd9\xde\x8d\xf5\x22" | ||
| 31730 | "\x9c\xdd\x20\xa4\x1e\x27\xb1\x76" | ||
| 31731 | "\x4f\x44\x55\xe2\x9b\xa1\x9c\xfe" | ||
| 31732 | "\x54\xf7\x27\x1b\xf4\xde\x02\xf5" | ||
| 31733 | "\x1b\x55\x48\x5c\xdc\x21\x4b\x9e" | ||
| 31734 | "\x4b\x6e\xed\x46\x23\xdc\x65\xb2" | ||
| 31735 | "\xcf\x79\x5f\x28\xe0\x9e\x8b\xe7" | ||
| 31736 | "\x4c\x9d\x8a\xff\xc1\xa6\x28\xb8" | ||
| 31737 | "\x65\x69\x8a\x45\x29\xef\x74\x85" | ||
| 31738 | "\xde\x79\xc7\x08\xae\x30\xb0\xf4" | ||
| 31739 | "\xa3\x1d\x51\x41\xab\xce\xcb\xf6" | ||
| 31740 | "\xb5\xd8\x6d\xe0\x85\xe1\x98\xb3" | ||
| 31741 | "\x43\xbb\x86\x83\x0a\xa0\xf5\xb7" | ||
| 31742 | "\x04\x0b\xfa\x71\x1f\xb0\xf6\xd9" | ||
| 31743 | "\x13\x00\x15\xf0\xc7\xeb\x0d\x5a" | ||
| 31744 | "\x9f\xd7\xb9\x6c\x65\x14\x22\x45" | ||
| 31745 | "\x6e\x45\x32\x3e\x7e\x60\x1a\x12" | ||
| 31746 | "\x97\x82\x14\xfb\xaa\x04\x22\xfa" | ||
| 31747 | "\xa0\xe5\x7e\x8c\x78\x02\x48\x5d" | ||
| 31748 | "\x78\x33\x5a\x7c\xad\xdb\x29\xce" | ||
| 31749 | "\xbb\x8b\x61\xa4\xb7\x42\xe2\xac" | ||
| 31750 | "\x8b\x1a\xd9\x2f\x0b\x8b\x62\x21" | ||
| 31751 | "\x83\x35\x7e\xad\x73\xc2\xb5\x6c" | ||
| 31752 | "\x10\x26\x38\x07\xe5\xc7\x36\x80" | ||
| 31753 | "\xe2\x23\x12\x61\xf5\x48\x4b\x2b" | ||
| 31754 | "\xc5\xdf\x15\xd9\x87\x01\xaa\xac" | ||
| 31755 | "\x1e\x7c\xad\x73\x78\x18\x63\xe0" | ||
| 31756 | "\x8b\x9f\x81\xd8\x12\x6a\x28\x10" | ||
| 31757 | "\xbe\x04\x68\x8a\x09\x7c\x1b\x1c" | ||
| 31758 | "\x83\x66\x80\x47\x80\xe8\xfd\x35" | ||
| 31759 | "\x1c\x97\x6f\xae\x49\x10\x66\xcc" | ||
| 31760 | "\xc6\xd8\xcc\x3a\x84\x91\x20\x77" | ||
| 31761 | "\x72\xe4\x24\xd2\x37\x9f\xc5\xc9" | ||
| 31762 | "\x25\x94\x10\x5f\x40\x00\x64\x99" | ||
| 31763 | "\xdc\xae\xd7\x21\x09\x78\x50\x15" | ||
| 31764 | "\xac\x5f\xc6\x2c\xa2\x0b\xa9\x39" | ||
| 31765 | "\x87\x6e\x6d\xab\xde\x08\x51\x16" | ||
| 31766 | "\xc7\x13\xe9\xea\xed\x06\x8e\x2c" | ||
| 31767 | "\xf8\x37\x8c\xf0\xa6\x96\x8d\x43" | ||
| 31768 | "\xb6\x98\x37\xb2\x43\xed\xde\xdf" | ||
| 31769 | "\x89\x1a\xe7\xeb\x9d\xa1\x7b\x0b" | ||
| 31770 | "\x77\xb0\xe2\x75\xc0\xf1\x98\xd9" | ||
| 31771 | "\x80\x55\xc9\x34\x91\xd1\x59\xe8" | ||
| 31772 | "\x4b\x0f\xc1\xa9\x4b\x7a\x84\x06" | ||
| 31773 | "\x20\xa8\x5d\xfa\xd1\xde\x70\x56" | ||
| 31774 | "\x2f\x9e\x91\x9c\x20\xb3\x24\xd8" | ||
| 31775 | "\x84\x3d\xe1\x8c\x7e\x62\x52\xe5" | ||
| 31776 | "\x44\x4b\x9f\xc2\x93\x03\xea\x2b" | ||
| 31777 | "\x59\xc5\xfa\x3f\x91\x2b\xbb\x23" | ||
| 31778 | "\xf5\xb2\x7b\xf5\x38\xaf\xb3\xee" | ||
| 31779 | "\x63\xdc\x7b\xd1\xff\xaa\x8b\xab" | ||
| 31780 | "\x82\x6b\x37\x04\xeb\x74\xbe\x79" | ||
| 31781 | "\xb9\x83\x90\xef\x20\x59\x46\xff" | ||
| 31782 | "\xe9\x97\x3e\x2f\xee\xb6\x64\x18" | ||
| 31783 | "\x38\x4c\x7a\x4a\xf9\x61\xe8\x9a" | ||
| 31784 | "\xa1\xb5\x01\xa6\x47\xd3\x11\xd4" | ||
| 31785 | "\xce\xd3\x91\x49\x88\xc7\xb8\x4d" | ||
| 31786 | "\xb1\xb9\x07\x6d\x16\x72\xae\x46" | ||
| 31787 | "\x5e\x03\xa1\x4b\xb6\x02\x30\xa8" | ||
| 31788 | "\x3d\xa9\x07\x2a\x7c\x19\xe7\x62" | ||
| 31789 | "\x87\xe3\x82\x2f\x6f\xe1\x09\xd9" | ||
| 31790 | "\x94\x97\xea\xdd\x58\x9e\xae\x76" | ||
| 31791 | "\x7e\x35\xe5\xb4\xda\x7e\xf4\xde" | ||
| 31792 | "\xf7\x32\x87\xcd\x93\xbf\x11\x56" | ||
| 31793 | "\x11\xbe\x08\x74\xe1\x69\xad\xe2" | ||
| 31794 | "\xd7\xf8\x86\x75\x8a\x3c\xa4\xbe" | ||
| 31795 | "\x70\xa7\x1b\xfc\x0b\x44\x2a\x76" | ||
| 31796 | "\x35\xea\x5d\x85\x81\xaf\x85\xeb" | ||
| 31797 | "\xa0\x1c\x61\xc2\xf7\x4f\xa5\xdc" | ||
| 31798 | "\x02\x7f\xf6\x95\x40\x6e\x8a\x9a" | ||
| 31799 | "\xf3\x5d\x25\x6e\x14\x3a\x22\xc9" | ||
| 31800 | "\x37\x1c\xeb\x46\x54\x3f\xa5\x91" | ||
| 31801 | "\xc2\xb5\x8c\xfe\x53\x08\x97\x32" | ||
| 31802 | "\x1b\xb2\x30\x27\xfe\x25\x5d\xdc" | ||
| 31803 | "\x08\x87\xd0\xe5\x94\x1a\xd4\xf1" | ||
| 31804 | "\xfe\xd6\xb4\xa3\xe6\x74\x81\x3c" | ||
| 31805 | "\x1b\xb7\x31\xa7\x22\xfd\xd4\xdd" | ||
| 31806 | "\x20\x4e\x7c\x51\xb0\x60\x73\xb8" | ||
| 31807 | "\x9c\xac\x91\x90\x7e\x01\xb0\xe1" | ||
| 31808 | "\x8a\x2f\x75\x1c\x53\x2a\x98\x2a" | ||
| 31809 | "\x06\x52\x95\x52\xb2\xe9\x25\x2e" | ||
| 31810 | "\x4c\xe2\x5a\x00\xb2\x13\x81\x03" | ||
| 31811 | "\x77\x66\x0d\xa5\x99\xda\x4e\x8c" | ||
| 31812 | "\xac\xf3\x13\x53\x27\x45\xaf\x64" | ||
| 31813 | "\x46\xdc\xea\x23\xda\x97\xd1\xab" | ||
| 31814 | "\x7d\x6c\x30\x96\x1f\xbc\x06\x34" | ||
| 31815 | "\x18\x0b\x5e\x21\x35\x11\x8d\x4c" | ||
| 31816 | "\xe0\x2d\xe9\x50\x16\x74\x81\xa8" | ||
| 31817 | "\xb4\x34\xb9\x72\x42\xa6\xcc\xbc" | ||
| 31818 | "\xca\x34\x83\x27\x10\x5b\x68\x45" | ||
| 31819 | "\x8f\x52\x22\x0c\x55\x3d\x29\x7c" | ||
| 31820 | "\xe3\xc0\x66\x05\x42\x91\x5f\x58" | ||
| 31821 | "\xfe\x4a\x62\xd9\x8c\xa9\x04\x19" | ||
| 31822 | "\x04\xa9\x08\x4b\x57\xfc\x67\x53" | ||
| 31823 | "\x08\x7c\xbc\x66\x8a\xb0\xb6\x9f" | ||
| 31824 | "\x92\xd6\x41\x7c\x5b\x2a\x00\x79" | ||
| 31825 | "\x72", | ||
| 31826 | .ilen = 1281, | ||
| 31827 | .result = "\x45\xe8\xe0\xb6\x9c\xca\xfd\x87" | ||
| 31828 | "\xe8\x1d\x37\x96\x8a\xe3\x40\x35" | ||
| 31829 | "\xcf\x5e\x3a\x46\x3d\xfb\xd0\x69" | ||
| 31830 | "\xde\xaf\x7a\xd5\x0d\xe9\x52\xec" | ||
| 31831 | "\xc2\x82\xe5\x3e\x7d\xb2\x4a\xd9" | ||
| 31832 | "\xbb\xc3\x9f\xc0\x5d\xac\x93\x8d" | ||
| 31833 | "\x0e\x6f\xd3\xd7\xfb\x6a\x0d\xce" | ||
| 31834 | "\x92\x2c\xf7\xbb\x93\x57\xcc\xee" | ||
| 31835 | "\x42\x72\x6f\xc8\x4b\xd2\x76\xbf" | ||
| 31836 | "\xa0\xe3\x7a\x39\xf9\x5c\x8e\xfd" | ||
| 31837 | "\xa1\x1d\x41\xe5\x08\xc1\x1c\x11" | ||
| 31838 | "\x92\xfd\x39\x5c\x51\xd0\x2f\x66" | ||
| 31839 | "\x33\x4a\x71\x15\xfe\xee\x12\x54" | ||
| 31840 | "\x8c\x8f\x34\xd8\x50\x3c\x18\xa6" | ||
| 31841 | "\xc5\xe1\x46\x8a\xfb\x5f\x7e\x25" | ||
| 31842 | "\x9b\xe2\xc3\x66\x41\x2b\xb3\xa5" | ||
| 31843 | "\x57\x0e\x94\x17\x26\x39\xbb\x54" | ||
| 31844 | "\xae\x2e\x6f\x42\xfb\x4d\x89\x6f" | ||
| 31845 | "\x9d\xf1\x16\x2e\xe3\xe7\xfc\xe3" | ||
| 31846 | "\xb2\x4b\x2b\xa6\x7c\x04\x69\x3a" | ||
| 31847 | "\x70\x5a\xa7\xf1\x31\x64\x19\xca" | ||
| 31848 | "\x45\x79\xd8\x58\x23\x61\xaf\xc2" | ||
| 31849 | "\x52\x05\xc3\x0b\xc1\x64\x7c\x81" | ||
| 31850 | "\xd9\x11\xcf\xff\x02\x3d\x51\x84" | ||
| 31851 | "\x01\xac\xc6\x2e\x34\x2b\x09\x3a" | ||
| 31852 | "\xa8\x5d\x98\x0e\x89\xd9\xef\x8f" | ||
| 31853 | "\xd9\xd7\x7d\xdd\x63\x47\x46\x7d" | ||
| 31854 | "\xa1\xda\x0b\x53\x7d\x79\xcd\xc9" | ||
| 31855 | "\x86\xdd\x6b\x13\xa1\x9a\x70\xdd" | ||
| 31856 | "\x5c\xa1\x69\x3c\xe4\x5d\xe3\x8c" | ||
| 31857 | "\xe5\xf4\x87\x9c\x10\xcf\x0f\x0b" | ||
| 31858 | "\xc8\x43\xdc\xf8\x1d\x62\x5e\x5b" | ||
| 31859 | "\xe2\x03\x06\xc5\x71\xb6\x48\xa5" | ||
| 31860 | "\xf0\x0f\x2d\xd5\xa2\x73\x55\x8f" | ||
| 31861 | "\x01\xa7\x59\x80\x5f\x11\x6c\x40" | ||
| 31862 | "\xff\xb1\xf2\xc6\x7e\x01\xbb\x1c" | ||
| 31863 | "\x69\x9c\xc9\x3f\x71\x5f\x07\x7e" | ||
| 31864 | "\xdf\x6f\x99\xca\x9c\xfd\xf9\xb9" | ||
| 31865 | "\x49\xe7\xcc\x91\xd5\x9b\x8f\x03" | ||
| 31866 | "\xae\xe7\x61\x32\xef\x41\x6c\x75" | ||
| 31867 | "\x84\x9b\x8c\xce\x1d\x6b\x93\x21" | ||
| 31868 | "\x41\xec\xc6\xad\x8e\x0c\x48\xa8" | ||
| 31869 | "\xe2\xf5\x57\xde\xf7\x38\xfd\x4a" | ||
| 31870 | "\x6f\xa7\x4a\xf9\xac\x7d\xb1\x85" | ||
| 31871 | "\x7d\x6c\x95\x0a\x5a\xcf\x68\xd2" | ||
| 31872 | "\xe0\x7a\x26\xd9\xc1\x6d\x3e\xc6" | ||
| 31873 | "\x37\xbd\xbe\x24\x36\x77\x9f\x1b" | ||
| 31874 | "\xc1\x22\xf3\x79\xae\x95\x78\x66" | ||
| 31875 | "\x97\x11\xc0\x1a\xf1\xe8\x0d\x38" | ||
| 31876 | "\x09\xc2\xee\xb7\xd3\x46\x7b\x59" | ||
| 31877 | "\x77\x23\xe8\xb4\x92\x3d\x78\xbe" | ||
| 31878 | "\xe2\x25\x63\xa5\x2a\x06\x70\x92" | ||
| 31879 | "\x32\x63\xf9\x19\x21\x68\xe1\x0b" | ||
| 31880 | "\x9a\xd0\xee\x21\xdb\x1f\xe0\xde" | ||
| 31881 | "\x3e\x64\x02\x4d\x0e\xe0\x0a\xa9" | ||
| 31882 | "\xed\x19\x8c\xa8\xbf\xe3\x2e\x75" | ||
| 31883 | "\x24\x2b\xb0\xe5\x82\x6a\x1e\x6f" | ||
| 31884 | "\x71\x2a\x3a\x60\xed\x06\x0d\x17" | ||
| 31885 | "\xa2\xdb\x29\x1d\xae\xb2\xc4\xfb" | ||
| 31886 | "\x94\x04\xd8\x58\xfc\xc4\x04\x4e" | ||
| 31887 | "\xee\xc7\xc1\x0f\xe9\x9b\x63\x2d" | ||
| 31888 | "\x02\x3e\x02\x67\xe5\xd8\xbb\x79" | ||
| 31889 | "\xdf\xd2\xeb\x50\xe9\x0a\x02\x46" | ||
| 31890 | "\xdf\x68\xcf\xe7\x2b\x0a\x56\xd6" | ||
| 31891 | "\xf7\xbc\x44\xad\xb8\xb5\x5f\xeb" | ||
| 31892 | "\xbc\x74\x6b\xe8\x7e\xb0\x60\xc6" | ||
| 31893 | "\x0d\x96\x09\xbb\x19\xba\xe0\x3c" | ||
| 31894 | "\xc4\x6c\xbf\x0f\x58\xc0\x55\x62" | ||
| 31895 | "\x23\xa0\xff\xb5\x1c\xfd\x18\xe1" | ||
| 31896 | "\xcf\x6d\xd3\x52\xb4\xce\xa6\xfa" | ||
| 31897 | "\xaa\xfb\x1b\x0b\x42\x6d\x79\x42" | ||
| 31898 | "\x48\x70\x5b\x0e\xdd\x3a\xc9\x69" | ||
| 31899 | "\x8b\x73\x67\xf6\x95\xdb\x8c\xfb" | ||
| 31900 | "\xfd\xb5\x08\x47\x42\x84\x9a\xfa" | ||
| 31901 | "\xcc\x67\xb2\x3c\xb6\xfd\xd8\x32" | ||
| 31902 | "\xd6\x04\xb6\x4a\xea\x53\x4b\xf5" | ||
| 31903 | "\x94\x16\xad\xf0\x10\x2e\x2d\xb4" | ||
| 31904 | "\x8b\xab\xe5\x89\xc7\x39\x12\xf3" | ||
| 31905 | "\x8d\xb5\x96\x0b\x87\x5d\xa7\x7c" | ||
| 31906 | "\xb0\xc2\xf6\x2e\x57\x97\x2c\xdc" | ||
| 31907 | "\x54\x1c\x34\x72\xde\x0c\x68\x39" | ||
| 31908 | "\x9d\x32\xa5\x75\x92\x13\x32\xea" | ||
| 31909 | "\x90\x27\xbd\x5b\x1d\xb9\x21\x02" | ||
| 31910 | "\x1c\xcc\xba\x97\x5e\x49\x58\xe8" | ||
| 31911 | "\xac\x8b\xf3\xce\x3c\xf0\x00\xe9" | ||
| 31912 | "\x6c\xae\xe9\x77\xdf\xf4\x02\xcd" | ||
| 31913 | "\x55\x25\x89\x9e\x90\xf3\x6b\x8f" | ||
| 31914 | "\xb7\xd6\x47\x98\x26\x2f\x31\x2f" | ||
| 31915 | "\x8d\xbf\x54\xcd\x99\xeb\x80\xd7" | ||
| 31916 | "\xac\xc3\x08\xc2\xa6\x32\xf1\x24" | ||
| 31917 | "\x76\x7c\x4f\x78\x53\x55\xfb\x00" | ||
| 31918 | "\x8a\xd6\x52\x53\x25\x45\xfb\x0a" | ||
| 31919 | "\x6b\xb9\xbe\x3c\x5e\x11\xcc\x6a" | ||
| 31920 | "\xdd\xfc\xa7\xc4\x79\x4d\xbd\xfb" | ||
| 31921 | "\xce\x3a\xf1\x7a\xda\xeb\xfe\x64" | ||
| 31922 | "\x28\x3d\x0f\xee\x80\xba\x0c\xf8" | ||
| 31923 | "\xe9\x5b\x3a\xd4\xae\xc9\xf3\x0e" | ||
| 31924 | "\xe8\x5d\xc5\x5c\x0b\x20\x20\xee" | ||
| 31925 | "\x40\x0d\xde\x07\xa7\x14\xb4\x90" | ||
| 31926 | "\xb6\xbd\x3b\xae\x7d\x2b\xa7\xc7" | ||
| 31927 | "\xdc\x0b\x4c\x5d\x65\xb0\xd2\xc5" | ||
| 31928 | "\x79\x61\x23\xe0\xa2\x99\x73\x55" | ||
| 31929 | "\xad\xc6\xfb\xc7\x54\xb5\x98\x1f" | ||
| 31930 | "\x8c\x86\xc2\x3f\xbe\x5e\xea\x64" | ||
| 31931 | "\xa3\x60\x18\x9f\x80\xaf\x52\x74" | ||
| 31932 | "\x1a\xfe\x22\xc2\x92\x67\x40\x02" | ||
| 31933 | "\x08\xee\x67\x5b\x67\xe0\x3d\xde" | ||
| 31934 | "\x7a\xaf\x8e\x28\xf3\x5e\x0e\xf4" | ||
| 31935 | "\x48\x56\xaa\x85\x22\xd8\x36\xed" | ||
| 31936 | "\x3b\x3d\x68\x69\x30\xbc\x71\x23" | ||
| 31937 | "\xb1\x6e\x61\x03\x89\x44\x03\xf4" | ||
| 31938 | "\x32\xaa\x4c\x40\x9f\x69\xfb\x70" | ||
| 31939 | "\x91\xcc\x1f\x11\xbd\x76\x67\xe6" | ||
| 31940 | "\x10\x8b\x29\x39\x68\xea\x4e\x6d" | ||
| 31941 | "\xae\xfb\x40\xcf\xe2\xd0\x0d\x8d" | ||
| 31942 | "\x6f\xed\x9b\x8d\x64\x7a\x94\x8e" | ||
| 31943 | "\x32\x38\x78\xeb\x7d\x5f\xf9\x4d" | ||
| 31944 | "\x13\xbe\x21\xea\x16\xe7\x5c\xee" | ||
| 31945 | "\xcd\xf6\x5f\xc6\x45\xb2\x8f\x2b" | ||
| 31946 | "\xb5\x93\x3e\x45\xdb\xfd\xa2\x6a" | ||
| 31947 | "\xec\x83\x92\x99\x87\x47\xe0\x7c" | ||
| 31948 | "\xa2\x7b\xc4\x2a\xcd\xc0\x81\x03" | ||
| 31949 | "\x98\xb0\x87\xb6\x86\x13\x64\x33" | ||
| 31950 | "\x4c\xd7\x99\xbf\xdb\x7b\x6e\xaa" | ||
| 31951 | "\x76\xcc\xa0\x74\x1b\xa3\x6e\x83" | ||
| 31952 | "\xd4\xba\x7a\x84\x9d\x91\x71\xcd" | ||
| 31953 | "\x60\x2d\x56\xfd\x26\x35\xcb\xeb" | ||
| 31954 | "\xac\xe9\xee\xa4\xfc\x18\x5b\x91" | ||
| 31955 | "\xd5\xfe\x84\x45\xe0\xc7\xfd\x11" | ||
| 31956 | "\xe9\x00\xb6\x54\xdf\xe1\x94\xde" | ||
| 31957 | "\x2b\x70\x9f\x94\x7f\x15\x0e\x83" | ||
| 31958 | "\x63\x10\xb3\xf5\xea\xd3\xe8\xd1" | ||
| 31959 | "\xa5\xfc\x17\x19\x68\x9a\xbc\x17" | ||
| 31960 | "\x30\x43\x0a\x1a\x33\x92\xd4\x2a" | ||
| 31961 | "\x2e\x68\x99\xbc\x49\xf0\x68\xe3" | ||
| 31962 | "\xf0\x1f\xcb\xcc\xfa\xbb\x05\x56" | ||
| 31963 | "\x46\x84\x8b\x69\x83\x64\xc5\xe0" | ||
| 31964 | "\xc5\x52\x99\x07\x3c\xa6\x5c\xaf" | ||
| 31965 | "\xa3\xde\xd7\xdb\x43\xe6\xb7\x76" | ||
| 31966 | "\x4e\x4d\xd6\x71\x60\x63\x4a\x0c" | ||
| 31967 | "\x5f\xae\x25\x84\x22\x90\x5f\x26" | ||
| 31968 | "\x61\x4d\x8f\xaf\xc9\x22\xf2\x05" | ||
| 31969 | "\xcf\xc1\xdc\x68\xe5\x57\x8e\x24" | ||
| 31970 | "\x1b\x30\x59\xca\xd7\x0d\xc3\xd3" | ||
| 31971 | "\x52\x9e\x09\x3e\x0e\xaf\xdb\x5f" | ||
| 31972 | "\xc7\x2b\xde\x3a\xfd\xad\x93\x04" | ||
| 31973 | "\x74\x06\x89\x0e\x90\xeb\x85\xff" | ||
| 31974 | "\xe6\x3c\x12\x42\xf4\xfa\x80\x75" | ||
| 31975 | "\x5e\x4e\xd7\x2f\x93\x0b\x34\x41" | ||
| 31976 | "\x02\x85\x68\xd0\x03\x12\xde\x92" | ||
| 31977 | "\x54\x7a\x7e\xfb\x55\xe7\x88\xfb" | ||
| 31978 | "\xa4\xa9\xf2\xd1\xc6\x70\x06\x37" | ||
| 31979 | "\x25\xee\xa7\x6e\xd9\x89\x86\x50" | ||
| 31980 | "\x2e\x07\xdb\xfb\x2a\x86\x45\x0e" | ||
| 31981 | "\x91\xf4\x7c\xbb\x12\x60\xe8\x3f" | ||
| 31982 | "\x71\xbe\x8f\x9d\x26\xef\xd9\x89" | ||
| 31983 | "\xc4\x8f\xd8\xc5\x73\xd8\x84\xaa" | ||
| 31984 | "\x2f\xad\x22\x1e\x7e\xcf\xa2\x08" | ||
| 31985 | "\x23\x45\x89\x42\xa0\x30\xeb\xbf" | ||
| 31986 | "\xa1\xed\xad\xd5\x76\xfa\x24\x8f" | ||
| 31987 | "\x98", | ||
| 31988 | .rlen = 1281, | ||
| 30351 | }, | 31989 | }, |
| 30352 | }; | 31990 | }; |
| 30353 | 31991 | ||
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index c507bcad2c37..b2c1c047dc94 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c | |||
| @@ -381,6 +381,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) | |||
| 381 | clk[IMX6QDL_CLK_ASRC] = imx_clk_gate2_shared("asrc", "asrc_podf", base + 0x68, 6, &share_count_asrc); | 381 | clk[IMX6QDL_CLK_ASRC] = imx_clk_gate2_shared("asrc", "asrc_podf", base + 0x68, 6, &share_count_asrc); |
| 382 | clk[IMX6QDL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); | 382 | clk[IMX6QDL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); |
| 383 | clk[IMX6QDL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); | 383 | clk[IMX6QDL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); |
| 384 | clk[IMX6QDL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8); | ||
| 385 | clk[IMX6QDL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10); | ||
| 386 | clk[IMX6QDL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12); | ||
| 384 | clk[IMX6QDL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14); | 387 | clk[IMX6QDL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14); |
| 385 | clk[IMX6QDL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16); | 388 | clk[IMX6QDL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16); |
| 386 | clk[IMX6QDL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); | 389 | clk[IMX6QDL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 4044125fb5d5..07bc7aa6b224 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
| @@ -480,4 +480,21 @@ config CRYPTO_DEV_IMGTEC_HASH | |||
| 480 | hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256 | 480 | hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256 |
| 481 | hashing algorithms. | 481 | hashing algorithms. |
| 482 | 482 | ||
| 483 | config CRYPTO_DEV_SUN4I_SS | ||
| 484 | tristate "Support for Allwinner Security System cryptographic accelerator" | ||
| 485 | depends on ARCH_SUNXI | ||
| 486 | select CRYPTO_MD5 | ||
| 487 | select CRYPTO_SHA1 | ||
| 488 | select CRYPTO_AES | ||
| 489 | select CRYPTO_DES | ||
| 490 | select CRYPTO_BLKCIPHER | ||
| 491 | help | ||
| 492 | Some Allwinner SoC have a crypto accelerator named | ||
| 493 | Security System. Select this if you want to use it. | ||
| 494 | The Security System handle AES/DES/3DES ciphers in CBC mode | ||
| 495 | and SHA1 and MD5 hash algorithms. | ||
| 496 | |||
| 497 | To compile this driver as a module, choose M here: the module | ||
| 498 | will be called sun4i-ss. | ||
| 499 | |||
| 483 | endif # CRYPTO_HW | 500 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index e35c07a8da85..c3ced6fbd1b8 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
| @@ -28,3 +28,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ | |||
| 28 | obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ | 28 | obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ |
| 29 | obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ | 29 | obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ |
| 30 | obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ | 30 | obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ |
| 31 | obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ | ||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 3b28e8c3de28..192a8fa325c1 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
| @@ -1113,7 +1113,7 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) | |||
| 1113 | struct device *dev = (struct device *)data; | 1113 | struct device *dev = (struct device *)data; |
| 1114 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); | 1114 | struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); |
| 1115 | 1115 | ||
| 1116 | if (core_dev->dev->ce_base == 0) | 1116 | if (!core_dev->dev->ce_base) |
| 1117 | return 0; | 1117 | return 0; |
| 1118 | 1118 | ||
| 1119 | writel(PPC4XX_INTERRUPT_CLR, | 1119 | writel(PPC4XX_INTERRUPT_CLR, |
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index e286e285aa8a..5652a53415dc 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config CRYPTO_DEV_FSL_CAAM | 1 | config CRYPTO_DEV_FSL_CAAM |
| 2 | tristate "Freescale CAAM-Multicore driver backend" | 2 | tristate "Freescale CAAM-Multicore driver backend" |
| 3 | depends on FSL_SOC | 3 | depends on FSL_SOC || ARCH_MXC |
| 4 | help | 4 | help |
| 5 | Enables the driver module for Freescale's Cryptographic Accelerator | 5 | Enables the driver module for Freescale's Cryptographic Accelerator |
| 6 | and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). | 6 | and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). |
| @@ -112,6 +112,14 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API | |||
| 112 | To compile this as a module, choose M here: the module | 112 | To compile this as a module, choose M here: the module |
| 113 | will be called caamrng. | 113 | will be called caamrng. |
| 114 | 114 | ||
| 115 | config CRYPTO_DEV_FSL_CAAM_IMX | ||
| 116 | def_bool SOC_IMX6 || SOC_IMX7D | ||
| 117 | depends on CRYPTO_DEV_FSL_CAAM | ||
| 118 | |||
| 119 | config CRYPTO_DEV_FSL_CAAM_LE | ||
| 120 | def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A | ||
| 121 | depends on CRYPTO_DEV_FSL_CAAM | ||
| 122 | |||
| 115 | config CRYPTO_DEV_FSL_CAAM_DEBUG | 123 | config CRYPTO_DEV_FSL_CAAM_DEBUG |
| 116 | bool "Enable debug output in CAAM driver" | 124 | bool "Enable debug output in CAAM driver" |
| 117 | depends on CRYPTO_DEV_FSL_CAAM | 125 | depends on CRYPTO_DEV_FSL_CAAM |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index daca933a82ec..ba79d638f782 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
| @@ -68,27 +68,29 @@ | |||
| 68 | #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) | 68 | #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) |
| 69 | #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ | 69 | #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ |
| 70 | CAAM_CMD_SZ * 4) | 70 | CAAM_CMD_SZ * 4) |
| 71 | #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ | ||
| 72 | CAAM_CMD_SZ * 5) | ||
| 71 | 73 | ||
| 72 | /* length of descriptors text */ | 74 | /* length of descriptors text */ |
| 73 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) | 75 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) |
| 74 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) | 76 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) |
| 75 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ) | 77 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) |
| 76 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) | 78 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ) |
| 77 | 79 | ||
| 78 | /* Note: Nonce is counted in enckeylen */ | 80 | /* Note: Nonce is counted in enckeylen */ |
| 79 | #define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ) | 81 | #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ) |
| 80 | 82 | ||
| 81 | #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) | 83 | #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) |
| 82 | #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ) | 84 | #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ) |
| 83 | #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) | 85 | #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ) |
| 84 | 86 | ||
| 85 | #define DESC_GCM_BASE (3 * CAAM_CMD_SZ) | 87 | #define DESC_GCM_BASE (3 * CAAM_CMD_SZ) |
| 86 | #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) | 88 | #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) |
| 87 | #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) | 89 | #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) |
| 88 | 90 | ||
| 89 | #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) | 91 | #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) |
| 90 | #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ) | 92 | #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) |
| 91 | #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ) | 93 | #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) |
| 92 | 94 | ||
| 93 | #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) | 95 | #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) |
| 94 | #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) | 96 | #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) |
| @@ -111,6 +113,20 @@ | |||
| 111 | #endif | 113 | #endif |
| 112 | static struct list_head alg_list; | 114 | static struct list_head alg_list; |
| 113 | 115 | ||
| 116 | struct caam_alg_entry { | ||
| 117 | int class1_alg_type; | ||
| 118 | int class2_alg_type; | ||
| 119 | int alg_op; | ||
| 120 | bool rfc3686; | ||
| 121 | bool geniv; | ||
| 122 | }; | ||
| 123 | |||
| 124 | struct caam_aead_alg { | ||
| 125 | struct aead_alg aead; | ||
| 126 | struct caam_alg_entry caam; | ||
| 127 | bool registered; | ||
| 128 | }; | ||
| 129 | |||
| 114 | /* Set DK bit in class 1 operation if shared */ | 130 | /* Set DK bit in class 1 operation if shared */ |
| 115 | static inline void append_dec_op1(u32 *desc, u32 type) | 131 | static inline void append_dec_op1(u32 *desc, u32 type) |
| 116 | { | 132 | { |
| @@ -145,18 +161,6 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type) | |||
| 145 | } | 161 | } |
| 146 | 162 | ||
| 147 | /* | 163 | /* |
| 148 | * For aead encrypt and decrypt, read iv for both classes | ||
| 149 | */ | ||
| 150 | static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset) | ||
| 151 | { | ||
| 152 | append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | | ||
| 153 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 154 | (ivoffset << LDST_OFFSET_SHIFT)); | ||
| 155 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | | ||
| 156 | (ivoffset << MOVE_OFFSET_SHIFT) | ivsize); | ||
| 157 | } | ||
| 158 | |||
| 159 | /* | ||
| 160 | * For ablkcipher encrypt and decrypt, read from req->src and | 164 | * For ablkcipher encrypt and decrypt, read from req->src and |
| 161 | * write to req->dst | 165 | * write to req->dst |
| 162 | */ | 166 | */ |
| @@ -170,13 +174,6 @@ static inline void ablkcipher_append_src_dst(u32 *desc) | |||
| 170 | } | 174 | } |
| 171 | 175 | ||
| 172 | /* | 176 | /* |
| 173 | * If all data, including src (with assoc and iv) or dst (with iv only) are | ||
| 174 | * contiguous | ||
| 175 | */ | ||
| 176 | #define GIV_SRC_CONTIG 1 | ||
| 177 | #define GIV_DST_CONTIG (1 << 1) | ||
| 178 | |||
| 179 | /* | ||
| 180 | * per-session context | 177 | * per-session context |
| 181 | */ | 178 | */ |
| 182 | struct caam_ctx { | 179 | struct caam_ctx { |
| @@ -259,7 +256,6 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, | |||
| 259 | 256 | ||
| 260 | static int aead_null_set_sh_desc(struct crypto_aead *aead) | 257 | static int aead_null_set_sh_desc(struct crypto_aead *aead) |
| 261 | { | 258 | { |
| 262 | unsigned int ivsize = crypto_aead_ivsize(aead); | ||
| 263 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 259 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 264 | struct device *jrdev = ctx->jrdev; | 260 | struct device *jrdev = ctx->jrdev; |
| 265 | bool keys_fit_inline = false; | 261 | bool keys_fit_inline = false; |
| @@ -270,11 +266,11 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
| 270 | * Job Descriptor and Shared Descriptors | 266 | * Job Descriptor and Shared Descriptors |
| 271 | * must all fit into the 64-word Descriptor h/w Buffer | 267 | * must all fit into the 64-word Descriptor h/w Buffer |
| 272 | */ | 268 | */ |
| 273 | if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN + | 269 | if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN + |
| 274 | ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) | 270 | ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) |
| 275 | keys_fit_inline = true; | 271 | keys_fit_inline = true; |
| 276 | 272 | ||
| 277 | /* old_aead_encrypt shared descriptor */ | 273 | /* aead_encrypt shared descriptor */ |
| 278 | desc = ctx->sh_desc_enc; | 274 | desc = ctx->sh_desc_enc; |
| 279 | 275 | ||
| 280 | init_sh_desc(desc, HDR_SHARE_SERIAL); | 276 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
| @@ -291,20 +287,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
| 291 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | 287 | KEY_DEST_MDHA_SPLIT | KEY_ENC); |
| 292 | set_jump_tgt_here(desc, key_jump_cmd); | 288 | set_jump_tgt_here(desc, key_jump_cmd); |
| 293 | 289 | ||
| 294 | /* cryptlen = seqoutlen - authsize */ | 290 | /* assoclen + cryptlen = seqinlen */ |
| 295 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | 291 | append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); |
| 296 | |||
| 297 | /* | ||
| 298 | * NULL encryption; IV is zero | ||
| 299 | * assoclen = (assoclen + cryptlen) - cryptlen | ||
| 300 | */ | ||
| 301 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
| 302 | |||
| 303 | /* read assoc before reading payload */ | ||
| 304 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
| 305 | KEY_VLF); | ||
| 306 | 292 | ||
| 307 | /* Prepare to read and write cryptlen bytes */ | 293 | /* Prepare to read and write cryptlen + assoclen bytes */ |
| 308 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 294 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
| 309 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 295 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
| 310 | 296 | ||
| @@ -363,7 +349,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
| 363 | 349 | ||
| 364 | desc = ctx->sh_desc_dec; | 350 | desc = ctx->sh_desc_dec; |
| 365 | 351 | ||
| 366 | /* old_aead_decrypt shared descriptor */ | 352 | /* aead_decrypt shared descriptor */ |
| 367 | init_sh_desc(desc, HDR_SHARE_SERIAL); | 353 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
| 368 | 354 | ||
| 369 | /* Skip if already shared */ | 355 | /* Skip if already shared */ |
| @@ -382,18 +368,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
| 382 | append_operation(desc, ctx->class2_alg_type | | 368 | append_operation(desc, ctx->class2_alg_type | |
| 383 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | 369 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
| 384 | 370 | ||
| 385 | /* assoclen + cryptlen = seqinlen - ivsize - authsize */ | 371 | /* assoclen + cryptlen = seqoutlen */ |
| 386 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | ||
| 387 | ctx->authsize + ivsize); | ||
| 388 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | ||
| 389 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | 372 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
| 390 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | ||
| 391 | 373 | ||
| 392 | /* read assoc before reading payload */ | 374 | /* Prepare to read and write cryptlen + assoclen bytes */ |
| 393 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
| 394 | KEY_VLF); | ||
| 395 | |||
| 396 | /* Prepare to read and write cryptlen bytes */ | ||
| 397 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | 375 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); |
| 398 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); | 376 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); |
| 399 | 377 | ||
| @@ -450,10 +428,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) | |||
| 450 | 428 | ||
| 451 | static int aead_set_sh_desc(struct crypto_aead *aead) | 429 | static int aead_set_sh_desc(struct crypto_aead *aead) |
| 452 | { | 430 | { |
| 431 | struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), | ||
| 432 | struct caam_aead_alg, aead); | ||
| 453 | unsigned int ivsize = crypto_aead_ivsize(aead); | 433 | unsigned int ivsize = crypto_aead_ivsize(aead); |
| 454 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 434 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 455 | struct crypto_tfm *ctfm = crypto_aead_tfm(aead); | ||
| 456 | const char *alg_name = crypto_tfm_alg_name(ctfm); | ||
| 457 | struct device *jrdev = ctx->jrdev; | 435 | struct device *jrdev = ctx->jrdev; |
| 458 | bool keys_fit_inline; | 436 | bool keys_fit_inline; |
| 459 | u32 geniv, moveiv; | 437 | u32 geniv, moveiv; |
| @@ -461,11 +439,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 461 | u32 *desc; | 439 | u32 *desc; |
| 462 | const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == | 440 | const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == |
| 463 | OP_ALG_AAI_CTR_MOD128); | 441 | OP_ALG_AAI_CTR_MOD128); |
| 464 | const bool is_rfc3686 = (ctr_mode && | 442 | const bool is_rfc3686 = alg->caam.rfc3686; |
| 465 | (strstr(alg_name, "rfc3686") != NULL)); | ||
| 466 | |||
| 467 | if (!ctx->authsize) | ||
| 468 | return 0; | ||
| 469 | 443 | ||
| 470 | /* NULL encryption / decryption */ | 444 | /* NULL encryption / decryption */ |
| 471 | if (!ctx->enckeylen) | 445 | if (!ctx->enckeylen) |
| @@ -486,18 +460,21 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 486 | if (is_rfc3686) | 460 | if (is_rfc3686) |
| 487 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; | 461 | ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; |
| 488 | 462 | ||
| 463 | if (alg->caam.geniv) | ||
| 464 | goto skip_enc; | ||
| 465 | |||
| 489 | /* | 466 | /* |
| 490 | * Job Descriptor and Shared Descriptors | 467 | * Job Descriptor and Shared Descriptors |
| 491 | * must all fit into the 64-word Descriptor h/w Buffer | 468 | * must all fit into the 64-word Descriptor h/w Buffer |
| 492 | */ | 469 | */ |
| 493 | keys_fit_inline = false; | 470 | keys_fit_inline = false; |
| 494 | if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + | 471 | if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN + |
| 495 | ctx->split_key_pad_len + ctx->enckeylen + | 472 | ctx->split_key_pad_len + ctx->enckeylen + |
| 496 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= | 473 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= |
| 497 | CAAM_DESC_BYTES_MAX) | 474 | CAAM_DESC_BYTES_MAX) |
| 498 | keys_fit_inline = true; | 475 | keys_fit_inline = true; |
| 499 | 476 | ||
| 500 | /* old_aead_encrypt shared descriptor */ | 477 | /* aead_encrypt shared descriptor */ |
| 501 | desc = ctx->sh_desc_enc; | 478 | desc = ctx->sh_desc_enc; |
| 502 | 479 | ||
| 503 | /* Note: Context registers are saved. */ | 480 | /* Note: Context registers are saved. */ |
| @@ -507,19 +484,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 507 | append_operation(desc, ctx->class2_alg_type | | 484 | append_operation(desc, ctx->class2_alg_type | |
| 508 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | 485 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); |
| 509 | 486 | ||
| 510 | /* cryptlen = seqoutlen - authsize */ | 487 | /* Read and write assoclen bytes */ |
| 511 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | 488 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
| 512 | 489 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | |
| 513 | /* assoclen + cryptlen = seqinlen - ivsize */ | ||
| 514 | append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize); | ||
| 515 | 490 | ||
| 516 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | 491 | /* Skip assoc data */ |
| 517 | append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); | 492 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
| 518 | 493 | ||
| 519 | /* read assoc before reading payload */ | 494 | /* read assoc before reading payload */ |
| 520 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | 495 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | |
| 521 | KEY_VLF); | 496 | FIFOLDST_VLF); |
| 522 | aead_append_ld_iv(desc, ivsize, ctx1_iv_off); | ||
| 523 | 497 | ||
| 524 | /* Load Counter into CONTEXT1 reg */ | 498 | /* Load Counter into CONTEXT1 reg */ |
| 525 | if (is_rfc3686) | 499 | if (is_rfc3686) |
| @@ -534,8 +508,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 534 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | 508 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); |
| 535 | 509 | ||
| 536 | /* Read and write cryptlen bytes */ | 510 | /* Read and write cryptlen bytes */ |
| 537 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 511 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
| 538 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 512 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
| 539 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); | 513 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); |
| 540 | 514 | ||
| 541 | /* Write ICV */ | 515 | /* Write ICV */ |
| @@ -555,18 +529,19 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 555 | desc_bytes(desc), 1); | 529 | desc_bytes(desc), 1); |
| 556 | #endif | 530 | #endif |
| 557 | 531 | ||
| 532 | skip_enc: | ||
| 558 | /* | 533 | /* |
| 559 | * Job Descriptor and Shared Descriptors | 534 | * Job Descriptor and Shared Descriptors |
| 560 | * must all fit into the 64-word Descriptor h/w Buffer | 535 | * must all fit into the 64-word Descriptor h/w Buffer |
| 561 | */ | 536 | */ |
| 562 | keys_fit_inline = false; | 537 | keys_fit_inline = false; |
| 563 | if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + | 538 | if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN + |
| 564 | ctx->split_key_pad_len + ctx->enckeylen + | 539 | ctx->split_key_pad_len + ctx->enckeylen + |
| 565 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= | 540 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= |
| 566 | CAAM_DESC_BYTES_MAX) | 541 | CAAM_DESC_BYTES_MAX) |
| 567 | keys_fit_inline = true; | 542 | keys_fit_inline = true; |
| 568 | 543 | ||
| 569 | /* old_aead_decrypt shared descriptor */ | 544 | /* aead_decrypt shared descriptor */ |
| 570 | desc = ctx->sh_desc_dec; | 545 | desc = ctx->sh_desc_dec; |
| 571 | 546 | ||
| 572 | /* Note: Context registers are saved. */ | 547 | /* Note: Context registers are saved. */ |
| @@ -576,19 +551,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 576 | append_operation(desc, ctx->class2_alg_type | | 551 | append_operation(desc, ctx->class2_alg_type | |
| 577 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | 552 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
| 578 | 553 | ||
| 579 | /* assoclen + cryptlen = seqinlen - ivsize - authsize */ | 554 | /* Read and write assoclen bytes */ |
| 580 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | 555 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
| 581 | ctx->authsize + ivsize); | 556 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
| 582 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | 557 | |
| 583 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | 558 | /* Skip assoc data */ |
| 584 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | 559 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
| 585 | 560 | ||
| 586 | /* read assoc before reading payload */ | 561 | /* read assoc before reading payload */ |
| 587 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | 562 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | |
| 588 | KEY_VLF); | 563 | KEY_VLF); |
| 589 | 564 | ||
| 590 | aead_append_ld_iv(desc, ivsize, ctx1_iv_off); | ||
| 591 | |||
| 592 | /* Load Counter into CONTEXT1 reg */ | 565 | /* Load Counter into CONTEXT1 reg */ |
| 593 | if (is_rfc3686) | 566 | if (is_rfc3686) |
| 594 | append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | | 567 | append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | |
| @@ -605,8 +578,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 605 | append_dec_op1(desc, ctx->class1_alg_type); | 578 | append_dec_op1(desc, ctx->class1_alg_type); |
| 606 | 579 | ||
| 607 | /* Read and write cryptlen bytes */ | 580 | /* Read and write cryptlen bytes */ |
| 608 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | 581 | append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
| 609 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); | 582 | append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
| 610 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG); | 583 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG); |
| 611 | 584 | ||
| 612 | /* Load ICV */ | 585 | /* Load ICV */ |
| @@ -626,12 +599,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 626 | desc_bytes(desc), 1); | 599 | desc_bytes(desc), 1); |
| 627 | #endif | 600 | #endif |
| 628 | 601 | ||
| 602 | if (!alg->caam.geniv) | ||
| 603 | goto skip_givenc; | ||
| 604 | |||
| 629 | /* | 605 | /* |
| 630 | * Job Descriptor and Shared Descriptors | 606 | * Job Descriptor and Shared Descriptors |
| 631 | * must all fit into the 64-word Descriptor h/w Buffer | 607 | * must all fit into the 64-word Descriptor h/w Buffer |
| 632 | */ | 608 | */ |
| 633 | keys_fit_inline = false; | 609 | keys_fit_inline = false; |
| 634 | if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + | 610 | if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN + |
| 635 | ctx->split_key_pad_len + ctx->enckeylen + | 611 | ctx->split_key_pad_len + ctx->enckeylen + |
| 636 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= | 612 | (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= |
| 637 | CAAM_DESC_BYTES_MAX) | 613 | CAAM_DESC_BYTES_MAX) |
| @@ -643,6 +619,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 643 | /* Note: Context registers are saved. */ | 619 | /* Note: Context registers are saved. */ |
| 644 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); | 620 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); |
| 645 | 621 | ||
| 622 | if (is_rfc3686) | ||
| 623 | goto copy_iv; | ||
| 624 | |||
| 646 | /* Generate IV */ | 625 | /* Generate IV */ |
| 647 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | 626 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | |
| 648 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | | 627 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | |
| @@ -656,6 +635,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 656 | (ivsize << MOVE_LEN_SHIFT)); | 635 | (ivsize << MOVE_LEN_SHIFT)); |
| 657 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | 636 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); |
| 658 | 637 | ||
| 638 | copy_iv: | ||
| 659 | /* Copy IV to class 1 context */ | 639 | /* Copy IV to class 1 context */ |
| 660 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | | 640 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | |
| 661 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | | 641 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | |
| @@ -668,8 +648,12 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 668 | /* ivsize + cryptlen = seqoutlen - authsize */ | 648 | /* ivsize + cryptlen = seqoutlen - authsize */ |
| 669 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | 649 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); |
| 670 | 650 | ||
| 671 | /* assoclen = seqinlen - (ivsize + cryptlen) */ | 651 | /* Read and write assoclen bytes */ |
| 672 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | 652 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
| 653 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
| 654 | |||
| 655 | /* Skip assoc data */ | ||
| 656 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 673 | 657 | ||
| 674 | /* read assoc before reading payload */ | 658 | /* read assoc before reading payload */ |
| 675 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | 659 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | |
| @@ -710,9 +694,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 710 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | 694 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | |
| 711 | LDST_SRCDST_BYTE_CONTEXT); | 695 | LDST_SRCDST_BYTE_CONTEXT); |
| 712 | 696 | ||
| 713 | ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, | 697 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
| 714 | desc_bytes(desc), | 698 | desc_bytes(desc), |
| 715 | DMA_TO_DEVICE); | 699 | DMA_TO_DEVICE); |
| 716 | if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { | 700 | if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { |
| 717 | dev_err(jrdev, "unable to map shared descriptor\n"); | 701 | dev_err(jrdev, "unable to map shared descriptor\n"); |
| 718 | return -ENOMEM; | 702 | return -ENOMEM; |
| @@ -723,6 +707,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
| 723 | desc_bytes(desc), 1); | 707 | desc_bytes(desc), 1); |
| 724 | #endif | 708 | #endif |
| 725 | 709 | ||
| 710 | skip_givenc: | ||
| 726 | return 0; | 711 | return 0; |
| 727 | } | 712 | } |
| 728 | 713 | ||
| @@ -976,22 +961,28 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
| 976 | append_operation(desc, ctx->class1_alg_type | | 961 | append_operation(desc, ctx->class1_alg_type | |
| 977 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | 962 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); |
| 978 | 963 | ||
| 979 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 964 | append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); |
| 980 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 965 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
| 981 | 966 | ||
| 982 | /* Skip assoc data */ | ||
| 983 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 984 | |||
| 985 | /* Read assoc data */ | 967 | /* Read assoc data */ |
| 986 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | 968 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
| 987 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | 969 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); |
| 988 | 970 | ||
| 989 | /* cryptlen = seqoutlen - assoclen */ | 971 | /* Skip IV */ |
| 990 | append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | 972 | append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); |
| 991 | 973 | ||
| 992 | /* Will read cryptlen bytes */ | 974 | /* Will read cryptlen bytes */ |
| 993 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | 975 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
| 994 | 976 | ||
| 977 | /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ | ||
| 978 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); | ||
| 979 | |||
| 980 | /* Skip assoc data */ | ||
| 981 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 982 | |||
| 983 | /* cryptlen = seqoutlen - assoclen */ | ||
| 984 | append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ); | ||
| 985 | |||
| 995 | /* Write encrypted data */ | 986 | /* Write encrypted data */ |
| 996 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | 987 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); |
| 997 | 988 | ||
| @@ -1044,21 +1035,27 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) | |||
| 1044 | append_operation(desc, ctx->class1_alg_type | | 1035 | append_operation(desc, ctx->class1_alg_type | |
| 1045 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | 1036 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
| 1046 | 1037 | ||
| 1047 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 1038 | append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); |
| 1048 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 1039 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
| 1049 | 1040 | ||
| 1050 | /* Skip assoc data */ | ||
| 1051 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 1052 | |||
| 1053 | /* Read assoc data */ | 1041 | /* Read assoc data */ |
| 1054 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | | 1042 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
| 1055 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); | 1043 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); |
| 1056 | 1044 | ||
| 1057 | /* Will write cryptlen bytes */ | 1045 | /* Skip IV */ |
| 1058 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | 1046 | append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); |
| 1059 | 1047 | ||
| 1060 | /* Will read cryptlen bytes */ | 1048 | /* Will read cryptlen bytes */ |
| 1061 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | 1049 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); |
| 1050 | |||
| 1051 | /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ | ||
| 1052 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); | ||
| 1053 | |||
| 1054 | /* Skip assoc data */ | ||
| 1055 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | ||
| 1056 | |||
| 1057 | /* Will write cryptlen bytes */ | ||
| 1058 | append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
| 1062 | 1059 | ||
| 1063 | /* Store payload data */ | 1060 | /* Store payload data */ |
| 1064 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); | 1061 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); |
| @@ -1793,22 +1790,6 @@ static void aead_unmap(struct device *dev, | |||
| 1793 | edesc->sec4_sg_dma, edesc->sec4_sg_bytes); | 1790 | edesc->sec4_sg_dma, edesc->sec4_sg_bytes); |
| 1794 | } | 1791 | } |
| 1795 | 1792 | ||
| 1796 | static void old_aead_unmap(struct device *dev, | ||
| 1797 | struct aead_edesc *edesc, | ||
| 1798 | struct aead_request *req) | ||
| 1799 | { | ||
| 1800 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 1801 | int ivsize = crypto_aead_ivsize(aead); | ||
| 1802 | |||
| 1803 | dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, | ||
| 1804 | DMA_TO_DEVICE, edesc->assoc_chained); | ||
| 1805 | |||
| 1806 | caam_unmap(dev, req->src, req->dst, | ||
| 1807 | edesc->src_nents, edesc->src_chained, edesc->dst_nents, | ||
| 1808 | edesc->dst_chained, edesc->iv_dma, ivsize, | ||
| 1809 | edesc->sec4_sg_dma, edesc->sec4_sg_bytes); | ||
| 1810 | } | ||
| 1811 | |||
| 1812 | static void ablkcipher_unmap(struct device *dev, | 1793 | static void ablkcipher_unmap(struct device *dev, |
| 1813 | struct ablkcipher_edesc *edesc, | 1794 | struct ablkcipher_edesc *edesc, |
| 1814 | struct ablkcipher_request *req) | 1795 | struct ablkcipher_request *req) |
| @@ -1844,45 +1825,6 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 1844 | aead_request_complete(req, err); | 1825 | aead_request_complete(req, err); |
| 1845 | } | 1826 | } |
| 1846 | 1827 | ||
| 1847 | static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
| 1848 | void *context) | ||
| 1849 | { | ||
| 1850 | struct aead_request *req = context; | ||
| 1851 | struct aead_edesc *edesc; | ||
| 1852 | #ifdef DEBUG | ||
| 1853 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 1854 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
| 1855 | int ivsize = crypto_aead_ivsize(aead); | ||
| 1856 | |||
| 1857 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 1858 | #endif | ||
| 1859 | |||
| 1860 | edesc = (struct aead_edesc *)((char *)desc - | ||
| 1861 | offsetof(struct aead_edesc, hw_desc)); | ||
| 1862 | |||
| 1863 | if (err) | ||
| 1864 | caam_jr_strstatus(jrdev, err); | ||
| 1865 | |||
| 1866 | old_aead_unmap(jrdev, edesc, req); | ||
| 1867 | |||
| 1868 | #ifdef DEBUG | ||
| 1869 | print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", | ||
| 1870 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), | ||
| 1871 | req->assoclen , 1); | ||
| 1872 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", | ||
| 1873 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, | ||
| 1874 | edesc->src_nents ? 100 : ivsize, 1); | ||
| 1875 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", | ||
| 1876 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
| 1877 | edesc->src_nents ? 100 : req->cryptlen + | ||
| 1878 | ctx->authsize + 4, 1); | ||
| 1879 | #endif | ||
| 1880 | |||
| 1881 | kfree(edesc); | ||
| 1882 | |||
| 1883 | aead_request_complete(req, err); | ||
| 1884 | } | ||
| 1885 | |||
| 1886 | static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | 1828 | static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, |
| 1887 | void *context) | 1829 | void *context) |
| 1888 | { | 1830 | { |
| @@ -1911,62 +1853,6 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 1911 | aead_request_complete(req, err); | 1853 | aead_request_complete(req, err); |
| 1912 | } | 1854 | } |
| 1913 | 1855 | ||
| 1914 | static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
| 1915 | void *context) | ||
| 1916 | { | ||
| 1917 | struct aead_request *req = context; | ||
| 1918 | struct aead_edesc *edesc; | ||
| 1919 | #ifdef DEBUG | ||
| 1920 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 1921 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
| 1922 | int ivsize = crypto_aead_ivsize(aead); | ||
| 1923 | |||
| 1924 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
| 1925 | #endif | ||
| 1926 | |||
| 1927 | edesc = (struct aead_edesc *)((char *)desc - | ||
| 1928 | offsetof(struct aead_edesc, hw_desc)); | ||
| 1929 | |||
| 1930 | #ifdef DEBUG | ||
| 1931 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", | ||
| 1932 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
| 1933 | ivsize, 1); | ||
| 1934 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", | ||
| 1935 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), | ||
| 1936 | req->cryptlen - ctx->authsize, 1); | ||
| 1937 | #endif | ||
| 1938 | |||
| 1939 | if (err) | ||
| 1940 | caam_jr_strstatus(jrdev, err); | ||
| 1941 | |||
| 1942 | old_aead_unmap(jrdev, edesc, req); | ||
| 1943 | |||
| 1944 | /* | ||
| 1945 | * verify hw auth check passed else return -EBADMSG | ||
| 1946 | */ | ||
| 1947 | if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) | ||
| 1948 | err = -EBADMSG; | ||
| 1949 | |||
| 1950 | #ifdef DEBUG | ||
| 1951 | print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ", | ||
| 1952 | DUMP_PREFIX_ADDRESS, 16, 4, | ||
| 1953 | ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), | ||
| 1954 | sizeof(struct iphdr) + req->assoclen + | ||
| 1955 | ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + | ||
| 1956 | ctx->authsize + 36, 1); | ||
| 1957 | if (!err && edesc->sec4_sg_bytes) { | ||
| 1958 | struct scatterlist *sg = sg_last(req->src, edesc->src_nents); | ||
| 1959 | print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ", | ||
| 1960 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), | ||
| 1961 | sg->length + ctx->authsize + 16, 1); | ||
| 1962 | } | ||
| 1963 | #endif | ||
| 1964 | |||
| 1965 | kfree(edesc); | ||
| 1966 | |||
| 1967 | aead_request_complete(req, err); | ||
| 1968 | } | ||
| 1969 | |||
| 1970 | static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | 1856 | static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, |
| 1971 | void *context) | 1857 | void *context) |
| 1972 | { | 1858 | { |
| @@ -2035,91 +1921,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
| 2035 | /* | 1921 | /* |
| 2036 | * Fill in aead job descriptor | 1922 | * Fill in aead job descriptor |
| 2037 | */ | 1923 | */ |
| 2038 | static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr, | ||
| 2039 | struct aead_edesc *edesc, | ||
| 2040 | struct aead_request *req, | ||
| 2041 | bool all_contig, bool encrypt) | ||
| 2042 | { | ||
| 2043 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 2044 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
| 2045 | int ivsize = crypto_aead_ivsize(aead); | ||
| 2046 | int authsize = ctx->authsize; | ||
| 2047 | u32 *desc = edesc->hw_desc; | ||
| 2048 | u32 out_options = 0, in_options; | ||
| 2049 | dma_addr_t dst_dma, src_dma; | ||
| 2050 | int len, sec4_sg_index = 0; | ||
| 2051 | bool is_gcm = false; | ||
| 2052 | |||
| 2053 | #ifdef DEBUG | ||
| 2054 | debug("assoclen %d cryptlen %d authsize %d\n", | ||
| 2055 | req->assoclen, req->cryptlen, authsize); | ||
| 2056 | print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", | ||
| 2057 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), | ||
| 2058 | req->assoclen , 1); | ||
| 2059 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", | ||
| 2060 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
| 2061 | edesc->src_nents ? 100 : ivsize, 1); | ||
| 2062 | print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", | ||
| 2063 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
| 2064 | edesc->src_nents ? 100 : req->cryptlen, 1); | ||
| 2065 | print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", | ||
| 2066 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | ||
| 2067 | desc_bytes(sh_desc), 1); | ||
| 2068 | #endif | ||
| 2069 | |||
| 2070 | if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == | ||
| 2071 | OP_ALG_ALGSEL_AES) && | ||
| 2072 | ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) | ||
| 2073 | is_gcm = true; | ||
| 2074 | |||
| 2075 | len = desc_len(sh_desc); | ||
| 2076 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); | ||
| 2077 | |||
| 2078 | if (all_contig) { | ||
| 2079 | if (is_gcm) | ||
| 2080 | src_dma = edesc->iv_dma; | ||
| 2081 | else | ||
| 2082 | src_dma = sg_dma_address(req->assoc); | ||
| 2083 | in_options = 0; | ||
| 2084 | } else { | ||
| 2085 | src_dma = edesc->sec4_sg_dma; | ||
| 2086 | sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + | ||
| 2087 | (edesc->src_nents ? : 1); | ||
| 2088 | in_options = LDST_SGF; | ||
| 2089 | } | ||
| 2090 | |||
| 2091 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, | ||
| 2092 | in_options); | ||
| 2093 | |||
| 2094 | if (likely(req->src == req->dst)) { | ||
| 2095 | if (all_contig) { | ||
| 2096 | dst_dma = sg_dma_address(req->src); | ||
| 2097 | } else { | ||
| 2098 | dst_dma = src_dma + sizeof(struct sec4_sg_entry) * | ||
| 2099 | ((edesc->assoc_nents ? : 1) + 1); | ||
| 2100 | out_options = LDST_SGF; | ||
| 2101 | } | ||
| 2102 | } else { | ||
| 2103 | if (!edesc->dst_nents) { | ||
| 2104 | dst_dma = sg_dma_address(req->dst); | ||
| 2105 | } else { | ||
| 2106 | dst_dma = edesc->sec4_sg_dma + | ||
| 2107 | sec4_sg_index * | ||
| 2108 | sizeof(struct sec4_sg_entry); | ||
| 2109 | out_options = LDST_SGF; | ||
| 2110 | } | ||
| 2111 | } | ||
| 2112 | if (encrypt) | ||
| 2113 | append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, | ||
| 2114 | out_options); | ||
| 2115 | else | ||
| 2116 | append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, | ||
| 2117 | out_options); | ||
| 2118 | } | ||
| 2119 | |||
| 2120 | /* | ||
| 2121 | * Fill in aead job descriptor | ||
| 2122 | */ | ||
| 2123 | static void init_aead_job(struct aead_request *req, | 1924 | static void init_aead_job(struct aead_request *req, |
| 2124 | struct aead_edesc *edesc, | 1925 | struct aead_edesc *edesc, |
| 2125 | bool all_contig, bool encrypt) | 1926 | bool all_contig, bool encrypt) |
| @@ -2208,80 +2009,43 @@ static void init_gcm_job(struct aead_request *req, | |||
| 2208 | /* End of blank commands */ | 2009 | /* End of blank commands */ |
| 2209 | } | 2010 | } |
| 2210 | 2011 | ||
| 2211 | /* | 2012 | static void init_authenc_job(struct aead_request *req, |
| 2212 | * Fill in aead givencrypt job descriptor | 2013 | struct aead_edesc *edesc, |
| 2213 | */ | 2014 | bool all_contig, bool encrypt) |
| 2214 | static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | ||
| 2215 | struct aead_edesc *edesc, | ||
| 2216 | struct aead_request *req, | ||
| 2217 | int contig) | ||
| 2218 | { | 2015 | { |
| 2219 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 2016 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 2017 | struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), | ||
| 2018 | struct caam_aead_alg, aead); | ||
| 2019 | unsigned int ivsize = crypto_aead_ivsize(aead); | ||
| 2220 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 2020 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
| 2221 | int ivsize = crypto_aead_ivsize(aead); | 2021 | const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == |
| 2222 | int authsize = ctx->authsize; | 2022 | OP_ALG_AAI_CTR_MOD128); |
| 2023 | const bool is_rfc3686 = alg->caam.rfc3686; | ||
| 2223 | u32 *desc = edesc->hw_desc; | 2024 | u32 *desc = edesc->hw_desc; |
| 2224 | u32 out_options = 0, in_options; | 2025 | u32 ivoffset = 0; |
| 2225 | dma_addr_t dst_dma, src_dma; | ||
| 2226 | int len, sec4_sg_index = 0; | ||
| 2227 | bool is_gcm = false; | ||
| 2228 | |||
| 2229 | #ifdef DEBUG | ||
| 2230 | debug("assoclen %d cryptlen %d authsize %d\n", | ||
| 2231 | req->assoclen, req->cryptlen, authsize); | ||
| 2232 | print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", | ||
| 2233 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), | ||
| 2234 | req->assoclen , 1); | ||
| 2235 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", | ||
| 2236 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); | ||
| 2237 | print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", | ||
| 2238 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
| 2239 | edesc->src_nents > 1 ? 100 : req->cryptlen, 1); | ||
| 2240 | print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", | ||
| 2241 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | ||
| 2242 | desc_bytes(sh_desc), 1); | ||
| 2243 | #endif | ||
| 2244 | 2026 | ||
| 2245 | if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == | 2027 | /* |
| 2246 | OP_ALG_ALGSEL_AES) && | 2028 | * AES-CTR needs to load IV in CONTEXT1 reg |
| 2247 | ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) | 2029 | * at an offset of 128bits (16bytes) |
| 2248 | is_gcm = true; | 2030 | * CONTEXT1[255:128] = IV |
| 2249 | 2031 | */ | |
| 2250 | len = desc_len(sh_desc); | 2032 | if (ctr_mode) |
| 2251 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); | 2033 | ivoffset = 16; |
| 2252 | 2034 | ||
| 2253 | if (contig & GIV_SRC_CONTIG) { | 2035 | /* |
| 2254 | if (is_gcm) | 2036 | * RFC3686 specific: |
| 2255 | src_dma = edesc->iv_dma; | 2037 | * CONTEXT1[255:128] = {NONCE, IV, COUNTER} |
| 2256 | else | 2038 | */ |
| 2257 | src_dma = sg_dma_address(req->assoc); | 2039 | if (is_rfc3686) |
| 2258 | in_options = 0; | 2040 | ivoffset = 16 + CTR_RFC3686_NONCE_SIZE; |
| 2259 | } else { | ||
| 2260 | src_dma = edesc->sec4_sg_dma; | ||
| 2261 | sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; | ||
| 2262 | in_options = LDST_SGF; | ||
| 2263 | } | ||
| 2264 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, | ||
| 2265 | in_options); | ||
| 2266 | 2041 | ||
| 2267 | if (contig & GIV_DST_CONTIG) { | 2042 | init_aead_job(req, edesc, all_contig, encrypt); |
| 2268 | dst_dma = edesc->iv_dma; | ||
| 2269 | } else { | ||
| 2270 | if (likely(req->src == req->dst)) { | ||
| 2271 | dst_dma = src_dma + sizeof(struct sec4_sg_entry) * | ||
| 2272 | (edesc->assoc_nents + | ||
| 2273 | (is_gcm ? 1 + edesc->src_nents : 0)); | ||
| 2274 | out_options = LDST_SGF; | ||
| 2275 | } else { | ||
| 2276 | dst_dma = edesc->sec4_sg_dma + | ||
| 2277 | sec4_sg_index * | ||
| 2278 | sizeof(struct sec4_sg_entry); | ||
| 2279 | out_options = LDST_SGF; | ||
| 2280 | } | ||
| 2281 | } | ||
| 2282 | 2043 | ||
| 2283 | append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, | 2044 | if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt))) |
| 2284 | out_options); | 2045 | append_load_as_imm(desc, req->iv, ivsize, |
| 2046 | LDST_CLASS_1_CCB | | ||
| 2047 | LDST_SRCDST_BYTE_CONTEXT | | ||
| 2048 | (ivoffset << LDST_OFFSET_SHIFT)); | ||
| 2285 | } | 2049 | } |
| 2286 | 2050 | ||
| 2287 | /* | 2051 | /* |
| @@ -2392,150 +2156,6 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
| 2392 | /* | 2156 | /* |
| 2393 | * allocate and map the aead extended descriptor | 2157 | * allocate and map the aead extended descriptor |
| 2394 | */ | 2158 | */ |
| 2395 | static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req, | ||
| 2396 | int desc_bytes, | ||
| 2397 | bool *all_contig_ptr, | ||
| 2398 | bool encrypt) | ||
| 2399 | { | ||
| 2400 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 2401 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
| 2402 | struct device *jrdev = ctx->jrdev; | ||
| 2403 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 2404 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
| 2405 | int assoc_nents, src_nents, dst_nents = 0; | ||
| 2406 | struct aead_edesc *edesc; | ||
| 2407 | dma_addr_t iv_dma = 0; | ||
| 2408 | int sgc; | ||
| 2409 | bool all_contig = true; | ||
| 2410 | bool assoc_chained = false, src_chained = false, dst_chained = false; | ||
| 2411 | int ivsize = crypto_aead_ivsize(aead); | ||
| 2412 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | ||
| 2413 | unsigned int authsize = ctx->authsize; | ||
| 2414 | bool is_gcm = false; | ||
| 2415 | |||
| 2416 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); | ||
| 2417 | |||
| 2418 | if (unlikely(req->dst != req->src)) { | ||
| 2419 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); | ||
| 2420 | dst_nents = sg_count(req->dst, | ||
| 2421 | req->cryptlen + | ||
| 2422 | (encrypt ? authsize : (-authsize)), | ||
| 2423 | &dst_chained); | ||
| 2424 | } else { | ||
| 2425 | src_nents = sg_count(req->src, | ||
| 2426 | req->cryptlen + | ||
| 2427 | (encrypt ? authsize : 0), | ||
| 2428 | &src_chained); | ||
| 2429 | } | ||
| 2430 | |||
| 2431 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, | ||
| 2432 | DMA_TO_DEVICE, assoc_chained); | ||
| 2433 | if (likely(req->src == req->dst)) { | ||
| 2434 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, | ||
| 2435 | DMA_BIDIRECTIONAL, src_chained); | ||
| 2436 | } else { | ||
| 2437 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, | ||
| 2438 | DMA_TO_DEVICE, src_chained); | ||
| 2439 | sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, | ||
| 2440 | DMA_FROM_DEVICE, dst_chained); | ||
| 2441 | } | ||
| 2442 | |||
| 2443 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | ||
| 2444 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
| 2445 | dev_err(jrdev, "unable to map IV\n"); | ||
| 2446 | return ERR_PTR(-ENOMEM); | ||
| 2447 | } | ||
| 2448 | |||
| 2449 | if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == | ||
| 2450 | OP_ALG_ALGSEL_AES) && | ||
| 2451 | ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) | ||
| 2452 | is_gcm = true; | ||
| 2453 | |||
| 2454 | /* | ||
| 2455 | * Check if data are contiguous. | ||
| 2456 | * GCM expected input sequence: IV, AAD, text | ||
| 2457 | * All other - expected input sequence: AAD, IV, text | ||
| 2458 | */ | ||
| 2459 | if (is_gcm) | ||
| 2460 | all_contig = (!assoc_nents && | ||
| 2461 | iv_dma + ivsize == sg_dma_address(req->assoc) && | ||
| 2462 | !src_nents && sg_dma_address(req->assoc) + | ||
| 2463 | req->assoclen == sg_dma_address(req->src)); | ||
| 2464 | else | ||
| 2465 | all_contig = (!assoc_nents && sg_dma_address(req->assoc) + | ||
| 2466 | req->assoclen == iv_dma && !src_nents && | ||
| 2467 | iv_dma + ivsize == sg_dma_address(req->src)); | ||
| 2468 | if (!all_contig) { | ||
| 2469 | assoc_nents = assoc_nents ? : 1; | ||
| 2470 | src_nents = src_nents ? : 1; | ||
| 2471 | sec4_sg_len = assoc_nents + 1 + src_nents; | ||
| 2472 | } | ||
| 2473 | |||
| 2474 | sec4_sg_len += dst_nents; | ||
| 2475 | |||
| 2476 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); | ||
| 2477 | |||
| 2478 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
| 2479 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + | ||
| 2480 | sec4_sg_bytes, GFP_DMA | flags); | ||
| 2481 | if (!edesc) { | ||
| 2482 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
| 2483 | return ERR_PTR(-ENOMEM); | ||
| 2484 | } | ||
| 2485 | |||
| 2486 | edesc->assoc_nents = assoc_nents; | ||
| 2487 | edesc->assoc_chained = assoc_chained; | ||
| 2488 | edesc->src_nents = src_nents; | ||
| 2489 | edesc->src_chained = src_chained; | ||
| 2490 | edesc->dst_nents = dst_nents; | ||
| 2491 | edesc->dst_chained = dst_chained; | ||
| 2492 | edesc->iv_dma = iv_dma; | ||
| 2493 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 2494 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + | ||
| 2495 | desc_bytes; | ||
| 2496 | *all_contig_ptr = all_contig; | ||
| 2497 | |||
| 2498 | sec4_sg_index = 0; | ||
| 2499 | if (!all_contig) { | ||
| 2500 | if (!is_gcm) { | ||
| 2501 | sg_to_sec4_sg_len(req->assoc, req->assoclen, | ||
| 2502 | edesc->sec4_sg + sec4_sg_index); | ||
| 2503 | sec4_sg_index += assoc_nents; | ||
| 2504 | } | ||
| 2505 | |||
| 2506 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, | ||
| 2507 | iv_dma, ivsize, 0); | ||
| 2508 | sec4_sg_index += 1; | ||
| 2509 | |||
| 2510 | if (is_gcm) { | ||
| 2511 | sg_to_sec4_sg_len(req->assoc, req->assoclen, | ||
| 2512 | edesc->sec4_sg + sec4_sg_index); | ||
| 2513 | sec4_sg_index += assoc_nents; | ||
| 2514 | } | ||
| 2515 | |||
| 2516 | sg_to_sec4_sg_last(req->src, | ||
| 2517 | src_nents, | ||
| 2518 | edesc->sec4_sg + | ||
| 2519 | sec4_sg_index, 0); | ||
| 2520 | sec4_sg_index += src_nents; | ||
| 2521 | } | ||
| 2522 | if (dst_nents) { | ||
| 2523 | sg_to_sec4_sg_last(req->dst, dst_nents, | ||
| 2524 | edesc->sec4_sg + sec4_sg_index, 0); | ||
| 2525 | } | ||
| 2526 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
| 2527 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
| 2528 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
| 2529 | dev_err(jrdev, "unable to map S/G table\n"); | ||
| 2530 | return ERR_PTR(-ENOMEM); | ||
| 2531 | } | ||
| 2532 | |||
| 2533 | return edesc; | ||
| 2534 | } | ||
| 2535 | |||
| 2536 | /* | ||
| 2537 | * allocate and map the aead extended descriptor | ||
| 2538 | */ | ||
| 2539 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | 2159 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
| 2540 | int desc_bytes, bool *all_contig_ptr, | 2160 | int desc_bytes, bool *all_contig_ptr, |
| 2541 | bool encrypt) | 2161 | bool encrypt) |
| @@ -2579,8 +2199,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
| 2579 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); | 2199 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
| 2580 | 2200 | ||
| 2581 | /* allocate space for base edesc and hw desc commands, link tables */ | 2201 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 2582 | edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes + | 2202 | edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, |
| 2583 | sec4_sg_bytes, GFP_DMA | flags); | 2203 | GFP_DMA | flags); |
| 2584 | if (!edesc) { | 2204 | if (!edesc) { |
| 2585 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 2205 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 2586 | return ERR_PTR(-ENOMEM); | 2206 | return ERR_PTR(-ENOMEM); |
| @@ -2685,7 +2305,15 @@ static int gcm_encrypt(struct aead_request *req) | |||
| 2685 | return ret; | 2305 | return ret; |
| 2686 | } | 2306 | } |
| 2687 | 2307 | ||
| 2688 | static int old_aead_encrypt(struct aead_request *req) | 2308 | static int ipsec_gcm_encrypt(struct aead_request *req) |
| 2309 | { | ||
| 2310 | if (req->assoclen < 8) | ||
| 2311 | return -EINVAL; | ||
| 2312 | |||
| 2313 | return gcm_encrypt(req); | ||
| 2314 | } | ||
| 2315 | |||
| 2316 | static int aead_encrypt(struct aead_request *req) | ||
| 2689 | { | 2317 | { |
| 2690 | struct aead_edesc *edesc; | 2318 | struct aead_edesc *edesc; |
| 2691 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 2319 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| @@ -2696,14 +2324,13 @@ static int old_aead_encrypt(struct aead_request *req) | |||
| 2696 | int ret = 0; | 2324 | int ret = 0; |
| 2697 | 2325 | ||
| 2698 | /* allocate extended descriptor */ | 2326 | /* allocate extended descriptor */ |
| 2699 | edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN * | 2327 | edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, |
| 2700 | CAAM_CMD_SZ, &all_contig, true); | 2328 | &all_contig, true); |
| 2701 | if (IS_ERR(edesc)) | 2329 | if (IS_ERR(edesc)) |
| 2702 | return PTR_ERR(edesc); | 2330 | return PTR_ERR(edesc); |
| 2703 | 2331 | ||
| 2704 | /* Create and submit job descriptor */ | 2332 | /* Create and submit job descriptor */ |
| 2705 | old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, | 2333 | init_authenc_job(req, edesc, all_contig, true); |
| 2706 | all_contig, true); | ||
| 2707 | #ifdef DEBUG | 2334 | #ifdef DEBUG |
| 2708 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 2335 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", |
| 2709 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 2336 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
| @@ -2711,11 +2338,11 @@ static int old_aead_encrypt(struct aead_request *req) | |||
| 2711 | #endif | 2338 | #endif |
| 2712 | 2339 | ||
| 2713 | desc = edesc->hw_desc; | 2340 | desc = edesc->hw_desc; |
| 2714 | ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req); | 2341 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); |
| 2715 | if (!ret) { | 2342 | if (!ret) { |
| 2716 | ret = -EINPROGRESS; | 2343 | ret = -EINPROGRESS; |
| 2717 | } else { | 2344 | } else { |
| 2718 | old_aead_unmap(jrdev, edesc, req); | 2345 | aead_unmap(jrdev, edesc, req); |
| 2719 | kfree(edesc); | 2346 | kfree(edesc); |
| 2720 | } | 2347 | } |
| 2721 | 2348 | ||
| @@ -2757,7 +2384,15 @@ static int gcm_decrypt(struct aead_request *req) | |||
| 2757 | return ret; | 2384 | return ret; |
| 2758 | } | 2385 | } |
| 2759 | 2386 | ||
| 2760 | static int old_aead_decrypt(struct aead_request *req) | 2387 | static int ipsec_gcm_decrypt(struct aead_request *req) |
| 2388 | { | ||
| 2389 | if (req->assoclen < 8) | ||
| 2390 | return -EINVAL; | ||
| 2391 | |||
| 2392 | return gcm_decrypt(req); | ||
| 2393 | } | ||
| 2394 | |||
| 2395 | static int aead_decrypt(struct aead_request *req) | ||
| 2761 | { | 2396 | { |
| 2762 | struct aead_edesc *edesc; | 2397 | struct aead_edesc *edesc; |
| 2763 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 2398 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| @@ -2768,20 +2403,19 @@ static int old_aead_decrypt(struct aead_request *req) | |||
| 2768 | int ret = 0; | 2403 | int ret = 0; |
| 2769 | 2404 | ||
| 2770 | /* allocate extended descriptor */ | 2405 | /* allocate extended descriptor */ |
| 2771 | edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN * | 2406 | edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, |
| 2772 | CAAM_CMD_SZ, &all_contig, false); | 2407 | &all_contig, false); |
| 2773 | if (IS_ERR(edesc)) | 2408 | if (IS_ERR(edesc)) |
| 2774 | return PTR_ERR(edesc); | 2409 | return PTR_ERR(edesc); |
| 2775 | 2410 | ||
| 2776 | #ifdef DEBUG | 2411 | #ifdef DEBUG |
| 2777 | print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", | 2412 | print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", |
| 2778 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | 2413 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
| 2779 | req->cryptlen, 1); | 2414 | req->assoclen + req->cryptlen, 1); |
| 2780 | #endif | 2415 | #endif |
| 2781 | 2416 | ||
| 2782 | /* Create and submit job descriptor*/ | 2417 | /* Create and submit job descriptor*/ |
| 2783 | old_init_aead_job(ctx->sh_desc_dec, | 2418 | init_authenc_job(req, edesc, all_contig, false); |
| 2784 | ctx->sh_desc_dec_dma, edesc, req, all_contig, false); | ||
| 2785 | #ifdef DEBUG | 2419 | #ifdef DEBUG |
| 2786 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 2420 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", |
| 2787 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 2421 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
| @@ -2789,232 +2423,29 @@ static int old_aead_decrypt(struct aead_request *req) | |||
| 2789 | #endif | 2423 | #endif |
| 2790 | 2424 | ||
| 2791 | desc = edesc->hw_desc; | 2425 | desc = edesc->hw_desc; |
| 2792 | ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req); | 2426 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); |
| 2793 | if (!ret) { | 2427 | if (!ret) { |
| 2794 | ret = -EINPROGRESS; | 2428 | ret = -EINPROGRESS; |
| 2795 | } else { | 2429 | } else { |
| 2796 | old_aead_unmap(jrdev, edesc, req); | 2430 | aead_unmap(jrdev, edesc, req); |
| 2797 | kfree(edesc); | 2431 | kfree(edesc); |
| 2798 | } | 2432 | } |
| 2799 | 2433 | ||
| 2800 | return ret; | 2434 | return ret; |
| 2801 | } | 2435 | } |
| 2802 | 2436 | ||
| 2803 | /* | 2437 | static int aead_givdecrypt(struct aead_request *req) |
| 2804 | * allocate and map the aead extended descriptor for aead givencrypt | ||
| 2805 | */ | ||
| 2806 | static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | ||
| 2807 | *greq, int desc_bytes, | ||
| 2808 | u32 *contig_ptr) | ||
| 2809 | { | ||
| 2810 | struct aead_request *req = &greq->areq; | ||
| 2811 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
| 2812 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
| 2813 | struct device *jrdev = ctx->jrdev; | ||
| 2814 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
| 2815 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
| 2816 | int assoc_nents, src_nents, dst_nents = 0; | ||
| 2817 | struct aead_edesc *edesc; | ||
| 2818 | dma_addr_t iv_dma = 0; | ||
| 2819 | int sgc; | ||
| 2820 | u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; | ||
| 2821 | int ivsize = crypto_aead_ivsize(aead); | ||
| 2822 | bool assoc_chained = false, src_chained = false, dst_chained = false; | ||
| 2823 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | ||
| 2824 | bool is_gcm = false; | ||
| 2825 | |||
| 2826 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); | ||
| 2827 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); | ||
| 2828 | |||
| 2829 | if (unlikely(req->dst != req->src)) | ||
| 2830 | dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, | ||
| 2831 | &dst_chained); | ||
| 2832 | |||
| 2833 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, | ||
| 2834 | DMA_TO_DEVICE, assoc_chained); | ||
| 2835 | if (likely(req->src == req->dst)) { | ||
| 2836 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, | ||
| 2837 | DMA_BIDIRECTIONAL, src_chained); | ||
| 2838 | } else { | ||
| 2839 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, | ||
| 2840 | DMA_TO_DEVICE, src_chained); | ||
| 2841 | sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, | ||
| 2842 | DMA_FROM_DEVICE, dst_chained); | ||
| 2843 | } | ||
| 2844 | |||
| 2845 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); | ||
| 2846 | if (dma_mapping_error(jrdev, iv_dma)) { | ||
| 2847 | dev_err(jrdev, "unable to map IV\n"); | ||
| 2848 | return ERR_PTR(-ENOMEM); | ||
| 2849 | } | ||
| 2850 | |||
| 2851 | if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == | ||
| 2852 | OP_ALG_ALGSEL_AES) && | ||
| 2853 | ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) | ||
| 2854 | is_gcm = true; | ||
| 2855 | |||
| 2856 | /* | ||
| 2857 | * Check if data are contiguous. | ||
| 2858 | * GCM expected input sequence: IV, AAD, text | ||
| 2859 | * All other - expected input sequence: AAD, IV, text | ||
| 2860 | */ | ||
| 2861 | |||
| 2862 | if (is_gcm) { | ||
| 2863 | if (assoc_nents || iv_dma + ivsize != | ||
| 2864 | sg_dma_address(req->assoc) || src_nents || | ||
| 2865 | sg_dma_address(req->assoc) + req->assoclen != | ||
| 2866 | sg_dma_address(req->src)) | ||
| 2867 | contig &= ~GIV_SRC_CONTIG; | ||
| 2868 | } else { | ||
| 2869 | if (assoc_nents || | ||
| 2870 | sg_dma_address(req->assoc) + req->assoclen != iv_dma || | ||
| 2871 | src_nents || iv_dma + ivsize != sg_dma_address(req->src)) | ||
| 2872 | contig &= ~GIV_SRC_CONTIG; | ||
| 2873 | } | ||
| 2874 | |||
| 2875 | if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) | ||
| 2876 | contig &= ~GIV_DST_CONTIG; | ||
| 2877 | |||
| 2878 | if (!(contig & GIV_SRC_CONTIG)) { | ||
| 2879 | assoc_nents = assoc_nents ? : 1; | ||
| 2880 | src_nents = src_nents ? : 1; | ||
| 2881 | sec4_sg_len += assoc_nents + 1 + src_nents; | ||
| 2882 | if (req->src == req->dst && | ||
| 2883 | (src_nents || iv_dma + ivsize != sg_dma_address(req->src))) | ||
| 2884 | contig &= ~GIV_DST_CONTIG; | ||
| 2885 | } | ||
| 2886 | |||
| 2887 | /* | ||
| 2888 | * Add new sg entries for GCM output sequence. | ||
| 2889 | * Expected output sequence: IV, encrypted text. | ||
| 2890 | */ | ||
| 2891 | if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) | ||
| 2892 | sec4_sg_len += 1 + src_nents; | ||
| 2893 | |||
| 2894 | if (unlikely(req->src != req->dst)) { | ||
| 2895 | dst_nents = dst_nents ? : 1; | ||
| 2896 | sec4_sg_len += 1 + dst_nents; | ||
| 2897 | } | ||
| 2898 | |||
| 2899 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); | ||
| 2900 | |||
| 2901 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
| 2902 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + | ||
| 2903 | sec4_sg_bytes, GFP_DMA | flags); | ||
| 2904 | if (!edesc) { | ||
| 2905 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
| 2906 | return ERR_PTR(-ENOMEM); | ||
| 2907 | } | ||
| 2908 | |||
| 2909 | edesc->assoc_nents = assoc_nents; | ||
| 2910 | edesc->assoc_chained = assoc_chained; | ||
| 2911 | edesc->src_nents = src_nents; | ||
| 2912 | edesc->src_chained = src_chained; | ||
| 2913 | edesc->dst_nents = dst_nents; | ||
| 2914 | edesc->dst_chained = dst_chained; | ||
| 2915 | edesc->iv_dma = iv_dma; | ||
| 2916 | edesc->sec4_sg_bytes = sec4_sg_bytes; | ||
| 2917 | edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + | ||
| 2918 | desc_bytes; | ||
| 2919 | *contig_ptr = contig; | ||
| 2920 | |||
| 2921 | sec4_sg_index = 0; | ||
| 2922 | if (!(contig & GIV_SRC_CONTIG)) { | ||
| 2923 | if (!is_gcm) { | ||
| 2924 | sg_to_sec4_sg_len(req->assoc, req->assoclen, | ||
| 2925 | edesc->sec4_sg + sec4_sg_index); | ||
| 2926 | sec4_sg_index += assoc_nents; | ||
| 2927 | } | ||
| 2928 | |||
| 2929 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, | ||
| 2930 | iv_dma, ivsize, 0); | ||
| 2931 | sec4_sg_index += 1; | ||
| 2932 | |||
| 2933 | if (is_gcm) { | ||
| 2934 | sg_to_sec4_sg_len(req->assoc, req->assoclen, | ||
| 2935 | edesc->sec4_sg + sec4_sg_index); | ||
| 2936 | sec4_sg_index += assoc_nents; | ||
| 2937 | } | ||
| 2938 | |||
| 2939 | sg_to_sec4_sg_last(req->src, src_nents, | ||
| 2940 | edesc->sec4_sg + | ||
| 2941 | sec4_sg_index, 0); | ||
| 2942 | sec4_sg_index += src_nents; | ||
| 2943 | } | ||
| 2944 | |||
| 2945 | if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) { | ||
| 2946 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, | ||
| 2947 | iv_dma, ivsize, 0); | ||
| 2948 | sec4_sg_index += 1; | ||
| 2949 | sg_to_sec4_sg_last(req->src, src_nents, | ||
| 2950 | edesc->sec4_sg + sec4_sg_index, 0); | ||
| 2951 | } | ||
| 2952 | |||
| 2953 | if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { | ||
| 2954 | dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, | ||
| 2955 | iv_dma, ivsize, 0); | ||
| 2956 | sec4_sg_index += 1; | ||
| 2957 | sg_to_sec4_sg_last(req->dst, dst_nents, | ||
| 2958 | edesc->sec4_sg + sec4_sg_index, 0); | ||
| 2959 | } | ||
| 2960 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | ||
| 2961 | sec4_sg_bytes, DMA_TO_DEVICE); | ||
| 2962 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { | ||
| 2963 | dev_err(jrdev, "unable to map S/G table\n"); | ||
| 2964 | return ERR_PTR(-ENOMEM); | ||
| 2965 | } | ||
| 2966 | |||
| 2967 | return edesc; | ||
| 2968 | } | ||
| 2969 | |||
| 2970 | static int old_aead_givencrypt(struct aead_givcrypt_request *areq) | ||
| 2971 | { | 2438 | { |
| 2972 | struct aead_request *req = &areq->areq; | ||
| 2973 | struct aead_edesc *edesc; | ||
| 2974 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 2439 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 2975 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 2440 | unsigned int ivsize = crypto_aead_ivsize(aead); |
| 2976 | struct device *jrdev = ctx->jrdev; | ||
| 2977 | u32 contig; | ||
| 2978 | u32 *desc; | ||
| 2979 | int ret = 0; | ||
| 2980 | |||
| 2981 | /* allocate extended descriptor */ | ||
| 2982 | edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * | ||
| 2983 | CAAM_CMD_SZ, &contig); | ||
| 2984 | |||
| 2985 | if (IS_ERR(edesc)) | ||
| 2986 | return PTR_ERR(edesc); | ||
| 2987 | |||
| 2988 | #ifdef DEBUG | ||
| 2989 | print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ", | ||
| 2990 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
| 2991 | req->cryptlen, 1); | ||
| 2992 | #endif | ||
| 2993 | 2441 | ||
| 2994 | /* Create and submit job descriptor*/ | 2442 | if (req->cryptlen < ivsize) |
| 2995 | init_aead_giv_job(ctx->sh_desc_givenc, | 2443 | return -EINVAL; |
| 2996 | ctx->sh_desc_givenc_dma, edesc, req, contig); | ||
| 2997 | #ifdef DEBUG | ||
| 2998 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | ||
| 2999 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
| 3000 | desc_bytes(edesc->hw_desc), 1); | ||
| 3001 | #endif | ||
| 3002 | 2444 | ||
| 3003 | desc = edesc->hw_desc; | 2445 | req->cryptlen -= ivsize; |
| 3004 | ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req); | 2446 | req->assoclen += ivsize; |
| 3005 | if (!ret) { | ||
| 3006 | ret = -EINPROGRESS; | ||
| 3007 | } else { | ||
| 3008 | old_aead_unmap(jrdev, edesc, req); | ||
| 3009 | kfree(edesc); | ||
| 3010 | } | ||
| 3011 | 2447 | ||
| 3012 | return ret; | 2448 | return aead_decrypt(req); |
| 3013 | } | ||
| 3014 | |||
| 3015 | static int aead_null_givencrypt(struct aead_givcrypt_request *areq) | ||
| 3016 | { | ||
| 3017 | return old_aead_encrypt(&areq->areq); | ||
| 3018 | } | 2449 | } |
| 3019 | 2450 | ||
| 3020 | /* | 2451 | /* |
| @@ -3072,8 +2503,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
| 3072 | sizeof(struct sec4_sg_entry); | 2503 | sizeof(struct sec4_sg_entry); |
| 3073 | 2504 | ||
| 3074 | /* allocate space for base edesc and hw desc commands, link tables */ | 2505 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 3075 | edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + | 2506 | edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, |
| 3076 | sec4_sg_bytes, GFP_DMA | flags); | 2507 | GFP_DMA | flags); |
| 3077 | if (!edesc) { | 2508 | if (!edesc) { |
| 3078 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 2509 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 3079 | return ERR_PTR(-ENOMEM); | 2510 | return ERR_PTR(-ENOMEM); |
| @@ -3251,8 +2682,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( | |||
| 3251 | sizeof(struct sec4_sg_entry); | 2682 | sizeof(struct sec4_sg_entry); |
| 3252 | 2683 | ||
| 3253 | /* allocate space for base edesc and hw desc commands, link tables */ | 2684 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 3254 | edesc = kmalloc(sizeof(*edesc) + desc_bytes + | 2685 | edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, |
| 3255 | sec4_sg_bytes, GFP_DMA | flags); | 2686 | GFP_DMA | flags); |
| 3256 | if (!edesc) { | 2687 | if (!edesc) { |
| 3257 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 2688 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 3258 | return ERR_PTR(-ENOMEM); | 2689 | return ERR_PTR(-ENOMEM); |
| @@ -3347,7 +2778,6 @@ struct caam_alg_template { | |||
| 3347 | u32 type; | 2778 | u32 type; |
| 3348 | union { | 2779 | union { |
| 3349 | struct ablkcipher_alg ablkcipher; | 2780 | struct ablkcipher_alg ablkcipher; |
| 3350 | struct old_aead_alg aead; | ||
| 3351 | } template_u; | 2781 | } template_u; |
| 3352 | u32 class1_alg_type; | 2782 | u32 class1_alg_type; |
| 3353 | u32 class2_alg_type; | 2783 | u32 class2_alg_type; |
| @@ -3355,753 +2785,1426 @@ struct caam_alg_template { | |||
| 3355 | }; | 2785 | }; |
| 3356 | 2786 | ||
| 3357 | static struct caam_alg_template driver_algs[] = { | 2787 | static struct caam_alg_template driver_algs[] = { |
| 2788 | /* ablkcipher descriptor */ | ||
| 2789 | { | ||
| 2790 | .name = "cbc(aes)", | ||
| 2791 | .driver_name = "cbc-aes-caam", | ||
| 2792 | .blocksize = AES_BLOCK_SIZE, | ||
| 2793 | .type = CRYPTO_ALG_TYPE_GIVCIPHER, | ||
| 2794 | .template_ablkcipher = { | ||
| 2795 | .setkey = ablkcipher_setkey, | ||
| 2796 | .encrypt = ablkcipher_encrypt, | ||
| 2797 | .decrypt = ablkcipher_decrypt, | ||
| 2798 | .givencrypt = ablkcipher_givencrypt, | ||
| 2799 | .geniv = "<built-in>", | ||
| 2800 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 2801 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 2802 | .ivsize = AES_BLOCK_SIZE, | ||
| 2803 | }, | ||
| 2804 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 2805 | }, | ||
| 2806 | { | ||
| 2807 | .name = "cbc(des3_ede)", | ||
| 2808 | .driver_name = "cbc-3des-caam", | ||
| 2809 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 2810 | .type = CRYPTO_ALG_TYPE_GIVCIPHER, | ||
| 2811 | .template_ablkcipher = { | ||
| 2812 | .setkey = ablkcipher_setkey, | ||
| 2813 | .encrypt = ablkcipher_encrypt, | ||
| 2814 | .decrypt = ablkcipher_decrypt, | ||
| 2815 | .givencrypt = ablkcipher_givencrypt, | ||
| 2816 | .geniv = "<built-in>", | ||
| 2817 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
| 2818 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
| 2819 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 2820 | }, | ||
| 2821 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 2822 | }, | ||
| 2823 | { | ||
| 2824 | .name = "cbc(des)", | ||
| 2825 | .driver_name = "cbc-des-caam", | ||
| 2826 | .blocksize = DES_BLOCK_SIZE, | ||
| 2827 | .type = CRYPTO_ALG_TYPE_GIVCIPHER, | ||
| 2828 | .template_ablkcipher = { | ||
| 2829 | .setkey = ablkcipher_setkey, | ||
| 2830 | .encrypt = ablkcipher_encrypt, | ||
| 2831 | .decrypt = ablkcipher_decrypt, | ||
| 2832 | .givencrypt = ablkcipher_givencrypt, | ||
| 2833 | .geniv = "<built-in>", | ||
| 2834 | .min_keysize = DES_KEY_SIZE, | ||
| 2835 | .max_keysize = DES_KEY_SIZE, | ||
| 2836 | .ivsize = DES_BLOCK_SIZE, | ||
| 2837 | }, | ||
| 2838 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 2839 | }, | ||
| 2840 | { | ||
| 2841 | .name = "ctr(aes)", | ||
| 2842 | .driver_name = "ctr-aes-caam", | ||
| 2843 | .blocksize = 1, | ||
| 2844 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 2845 | .template_ablkcipher = { | ||
| 2846 | .setkey = ablkcipher_setkey, | ||
| 2847 | .encrypt = ablkcipher_encrypt, | ||
| 2848 | .decrypt = ablkcipher_decrypt, | ||
| 2849 | .geniv = "chainiv", | ||
| 2850 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 2851 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 2852 | .ivsize = AES_BLOCK_SIZE, | ||
| 2853 | }, | ||
| 2854 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | ||
| 2855 | }, | ||
| 2856 | { | ||
| 2857 | .name = "rfc3686(ctr(aes))", | ||
| 2858 | .driver_name = "rfc3686-ctr-aes-caam", | ||
| 2859 | .blocksize = 1, | ||
| 2860 | .type = CRYPTO_ALG_TYPE_GIVCIPHER, | ||
| 2861 | .template_ablkcipher = { | ||
| 2862 | .setkey = ablkcipher_setkey, | ||
| 2863 | .encrypt = ablkcipher_encrypt, | ||
| 2864 | .decrypt = ablkcipher_decrypt, | ||
| 2865 | .givencrypt = ablkcipher_givencrypt, | ||
| 2866 | .geniv = "<built-in>", | ||
| 2867 | .min_keysize = AES_MIN_KEY_SIZE + | ||
| 2868 | CTR_RFC3686_NONCE_SIZE, | ||
| 2869 | .max_keysize = AES_MAX_KEY_SIZE + | ||
| 2870 | CTR_RFC3686_NONCE_SIZE, | ||
| 2871 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
| 2872 | }, | ||
| 2873 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | ||
| 2874 | } | ||
| 2875 | }; | ||
| 2876 | |||
| 2877 | static struct caam_aead_alg driver_aeads[] = { | ||
| 2878 | { | ||
| 2879 | .aead = { | ||
| 2880 | .base = { | ||
| 2881 | .cra_name = "rfc4106(gcm(aes))", | ||
| 2882 | .cra_driver_name = "rfc4106-gcm-aes-caam", | ||
| 2883 | .cra_blocksize = 1, | ||
| 2884 | }, | ||
| 2885 | .setkey = rfc4106_setkey, | ||
| 2886 | .setauthsize = rfc4106_setauthsize, | ||
| 2887 | .encrypt = ipsec_gcm_encrypt, | ||
| 2888 | .decrypt = ipsec_gcm_decrypt, | ||
| 2889 | .ivsize = 8, | ||
| 2890 | .maxauthsize = AES_BLOCK_SIZE, | ||
| 2891 | }, | ||
| 2892 | .caam = { | ||
| 2893 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | ||
| 2894 | }, | ||
| 2895 | }, | ||
| 2896 | { | ||
| 2897 | .aead = { | ||
| 2898 | .base = { | ||
| 2899 | .cra_name = "rfc4543(gcm(aes))", | ||
| 2900 | .cra_driver_name = "rfc4543-gcm-aes-caam", | ||
| 2901 | .cra_blocksize = 1, | ||
| 2902 | }, | ||
| 2903 | .setkey = rfc4543_setkey, | ||
| 2904 | .setauthsize = rfc4543_setauthsize, | ||
| 2905 | .encrypt = ipsec_gcm_encrypt, | ||
| 2906 | .decrypt = ipsec_gcm_decrypt, | ||
| 2907 | .ivsize = 8, | ||
| 2908 | .maxauthsize = AES_BLOCK_SIZE, | ||
| 2909 | }, | ||
| 2910 | .caam = { | ||
| 2911 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | ||
| 2912 | }, | ||
| 2913 | }, | ||
| 2914 | /* Galois Counter Mode */ | ||
| 2915 | { | ||
| 2916 | .aead = { | ||
| 2917 | .base = { | ||
| 2918 | .cra_name = "gcm(aes)", | ||
| 2919 | .cra_driver_name = "gcm-aes-caam", | ||
| 2920 | .cra_blocksize = 1, | ||
| 2921 | }, | ||
| 2922 | .setkey = gcm_setkey, | ||
| 2923 | .setauthsize = gcm_setauthsize, | ||
| 2924 | .encrypt = gcm_encrypt, | ||
| 2925 | .decrypt = gcm_decrypt, | ||
| 2926 | .ivsize = 12, | ||
| 2927 | .maxauthsize = AES_BLOCK_SIZE, | ||
| 2928 | }, | ||
| 2929 | .caam = { | ||
| 2930 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | ||
| 2931 | }, | ||
| 2932 | }, | ||
| 3358 | /* single-pass ipsec_esp descriptor */ | 2933 | /* single-pass ipsec_esp descriptor */ |
| 3359 | { | 2934 | { |
| 3360 | .name = "authenc(hmac(md5),ecb(cipher_null))", | 2935 | .aead = { |
| 3361 | .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam", | 2936 | .base = { |
| 3362 | .blocksize = NULL_BLOCK_SIZE, | 2937 | .cra_name = "authenc(hmac(md5)," |
| 3363 | .type = CRYPTO_ALG_TYPE_AEAD, | 2938 | "ecb(cipher_null))", |
| 3364 | .template_aead = { | 2939 | .cra_driver_name = "authenc-hmac-md5-" |
| 2940 | "ecb-cipher_null-caam", | ||
| 2941 | .cra_blocksize = NULL_BLOCK_SIZE, | ||
| 2942 | }, | ||
| 3365 | .setkey = aead_setkey, | 2943 | .setkey = aead_setkey, |
| 3366 | .setauthsize = aead_setauthsize, | 2944 | .setauthsize = aead_setauthsize, |
| 3367 | .encrypt = old_aead_encrypt, | 2945 | .encrypt = aead_encrypt, |
| 3368 | .decrypt = old_aead_decrypt, | 2946 | .decrypt = aead_decrypt, |
| 3369 | .givencrypt = aead_null_givencrypt, | ||
| 3370 | .geniv = "<built-in>", | ||
| 3371 | .ivsize = NULL_IV_SIZE, | 2947 | .ivsize = NULL_IV_SIZE, |
| 3372 | .maxauthsize = MD5_DIGEST_SIZE, | 2948 | .maxauthsize = MD5_DIGEST_SIZE, |
| 3373 | }, | 2949 | }, |
| 3374 | .class1_alg_type = 0, | 2950 | .caam = { |
| 3375 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, | 2951 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 3376 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | 2952 | OP_ALG_AAI_HMAC_PRECOMP, |
| 2953 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 2954 | }, | ||
| 3377 | }, | 2955 | }, |
| 3378 | { | 2956 | { |
| 3379 | .name = "authenc(hmac(sha1),ecb(cipher_null))", | 2957 | .aead = { |
| 3380 | .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam", | 2958 | .base = { |
| 3381 | .blocksize = NULL_BLOCK_SIZE, | 2959 | .cra_name = "authenc(hmac(sha1)," |
| 3382 | .type = CRYPTO_ALG_TYPE_AEAD, | 2960 | "ecb(cipher_null))", |
| 3383 | .template_aead = { | 2961 | .cra_driver_name = "authenc-hmac-sha1-" |
| 2962 | "ecb-cipher_null-caam", | ||
| 2963 | .cra_blocksize = NULL_BLOCK_SIZE, | ||
| 2964 | }, | ||
| 3384 | .setkey = aead_setkey, | 2965 | .setkey = aead_setkey, |
| 3385 | .setauthsize = aead_setauthsize, | 2966 | .setauthsize = aead_setauthsize, |
| 3386 | .encrypt = old_aead_encrypt, | 2967 | .encrypt = aead_encrypt, |
| 3387 | .decrypt = old_aead_decrypt, | 2968 | .decrypt = aead_decrypt, |
| 3388 | .givencrypt = aead_null_givencrypt, | ||
| 3389 | .geniv = "<built-in>", | ||
| 3390 | .ivsize = NULL_IV_SIZE, | 2969 | .ivsize = NULL_IV_SIZE, |
| 3391 | .maxauthsize = SHA1_DIGEST_SIZE, | 2970 | .maxauthsize = SHA1_DIGEST_SIZE, |
| 3392 | }, | 2971 | }, |
| 3393 | .class1_alg_type = 0, | 2972 | .caam = { |
| 3394 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | 2973 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 3395 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | 2974 | OP_ALG_AAI_HMAC_PRECOMP, |
| 2975 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 2976 | }, | ||
| 3396 | }, | 2977 | }, |
| 3397 | { | 2978 | { |
| 3398 | .name = "authenc(hmac(sha224),ecb(cipher_null))", | 2979 | .aead = { |
| 3399 | .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam", | 2980 | .base = { |
| 3400 | .blocksize = NULL_BLOCK_SIZE, | 2981 | .cra_name = "authenc(hmac(sha224)," |
| 3401 | .type = CRYPTO_ALG_TYPE_AEAD, | 2982 | "ecb(cipher_null))", |
| 3402 | .template_aead = { | 2983 | .cra_driver_name = "authenc-hmac-sha224-" |
| 2984 | "ecb-cipher_null-caam", | ||
| 2985 | .cra_blocksize = NULL_BLOCK_SIZE, | ||
| 2986 | }, | ||
| 3403 | .setkey = aead_setkey, | 2987 | .setkey = aead_setkey, |
| 3404 | .setauthsize = aead_setauthsize, | 2988 | .setauthsize = aead_setauthsize, |
| 3405 | .encrypt = old_aead_encrypt, | 2989 | .encrypt = aead_encrypt, |
| 3406 | .decrypt = old_aead_decrypt, | 2990 | .decrypt = aead_decrypt, |
| 3407 | .givencrypt = aead_null_givencrypt, | ||
| 3408 | .geniv = "<built-in>", | ||
| 3409 | .ivsize = NULL_IV_SIZE, | 2991 | .ivsize = NULL_IV_SIZE, |
| 3410 | .maxauthsize = SHA224_DIGEST_SIZE, | 2992 | .maxauthsize = SHA224_DIGEST_SIZE, |
| 3411 | }, | 2993 | }, |
| 3412 | .class1_alg_type = 0, | 2994 | .caam = { |
| 3413 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 2995 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 3414 | OP_ALG_AAI_HMAC_PRECOMP, | 2996 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3415 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | 2997 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, |
| 2998 | }, | ||
| 3416 | }, | 2999 | }, |
| 3417 | { | 3000 | { |
| 3418 | .name = "authenc(hmac(sha256),ecb(cipher_null))", | 3001 | .aead = { |
| 3419 | .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam", | 3002 | .base = { |
| 3420 | .blocksize = NULL_BLOCK_SIZE, | 3003 | .cra_name = "authenc(hmac(sha256)," |
| 3421 | .type = CRYPTO_ALG_TYPE_AEAD, | 3004 | "ecb(cipher_null))", |
| 3422 | .template_aead = { | 3005 | .cra_driver_name = "authenc-hmac-sha256-" |
| 3006 | "ecb-cipher_null-caam", | ||
| 3007 | .cra_blocksize = NULL_BLOCK_SIZE, | ||
| 3008 | }, | ||
| 3423 | .setkey = aead_setkey, | 3009 | .setkey = aead_setkey, |
| 3424 | .setauthsize = aead_setauthsize, | 3010 | .setauthsize = aead_setauthsize, |
| 3425 | .encrypt = old_aead_encrypt, | 3011 | .encrypt = aead_encrypt, |
| 3426 | .decrypt = old_aead_decrypt, | 3012 | .decrypt = aead_decrypt, |
| 3427 | .givencrypt = aead_null_givencrypt, | ||
| 3428 | .geniv = "<built-in>", | ||
| 3429 | .ivsize = NULL_IV_SIZE, | 3013 | .ivsize = NULL_IV_SIZE, |
| 3430 | .maxauthsize = SHA256_DIGEST_SIZE, | 3014 | .maxauthsize = SHA256_DIGEST_SIZE, |
| 3431 | }, | 3015 | }, |
| 3432 | .class1_alg_type = 0, | 3016 | .caam = { |
| 3433 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 3017 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
| 3434 | OP_ALG_AAI_HMAC_PRECOMP, | 3018 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3435 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | 3019 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, |
| 3020 | }, | ||
| 3436 | }, | 3021 | }, |
| 3437 | { | 3022 | { |
| 3438 | .name = "authenc(hmac(sha384),ecb(cipher_null))", | 3023 | .aead = { |
| 3439 | .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam", | 3024 | .base = { |
| 3440 | .blocksize = NULL_BLOCK_SIZE, | 3025 | .cra_name = "authenc(hmac(sha384)," |
| 3441 | .type = CRYPTO_ALG_TYPE_AEAD, | 3026 | "ecb(cipher_null))", |
| 3442 | .template_aead = { | 3027 | .cra_driver_name = "authenc-hmac-sha384-" |
| 3028 | "ecb-cipher_null-caam", | ||
| 3029 | .cra_blocksize = NULL_BLOCK_SIZE, | ||
| 3030 | }, | ||
| 3443 | .setkey = aead_setkey, | 3031 | .setkey = aead_setkey, |
| 3444 | .setauthsize = aead_setauthsize, | 3032 | .setauthsize = aead_setauthsize, |
| 3445 | .encrypt = old_aead_encrypt, | 3033 | .encrypt = aead_encrypt, |
| 3446 | .decrypt = old_aead_decrypt, | 3034 | .decrypt = aead_decrypt, |
| 3447 | .givencrypt = aead_null_givencrypt, | ||
| 3448 | .geniv = "<built-in>", | ||
| 3449 | .ivsize = NULL_IV_SIZE, | 3035 | .ivsize = NULL_IV_SIZE, |
| 3450 | .maxauthsize = SHA384_DIGEST_SIZE, | 3036 | .maxauthsize = SHA384_DIGEST_SIZE, |
| 3451 | }, | 3037 | }, |
| 3452 | .class1_alg_type = 0, | 3038 | .caam = { |
| 3453 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 3039 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 3454 | OP_ALG_AAI_HMAC_PRECOMP, | 3040 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3455 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | 3041 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, |
| 3042 | }, | ||
| 3456 | }, | 3043 | }, |
| 3457 | { | 3044 | { |
| 3458 | .name = "authenc(hmac(sha512),ecb(cipher_null))", | 3045 | .aead = { |
| 3459 | .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam", | 3046 | .base = { |
| 3460 | .blocksize = NULL_BLOCK_SIZE, | 3047 | .cra_name = "authenc(hmac(sha512)," |
| 3461 | .type = CRYPTO_ALG_TYPE_AEAD, | 3048 | "ecb(cipher_null))", |
| 3462 | .template_aead = { | 3049 | .cra_driver_name = "authenc-hmac-sha512-" |
| 3050 | "ecb-cipher_null-caam", | ||
| 3051 | .cra_blocksize = NULL_BLOCK_SIZE, | ||
| 3052 | }, | ||
| 3463 | .setkey = aead_setkey, | 3053 | .setkey = aead_setkey, |
| 3464 | .setauthsize = aead_setauthsize, | 3054 | .setauthsize = aead_setauthsize, |
| 3465 | .encrypt = old_aead_encrypt, | 3055 | .encrypt = aead_encrypt, |
| 3466 | .decrypt = old_aead_decrypt, | 3056 | .decrypt = aead_decrypt, |
| 3467 | .givencrypt = aead_null_givencrypt, | ||
| 3468 | .geniv = "<built-in>", | ||
| 3469 | .ivsize = NULL_IV_SIZE, | 3057 | .ivsize = NULL_IV_SIZE, |
| 3470 | .maxauthsize = SHA512_DIGEST_SIZE, | 3058 | .maxauthsize = SHA512_DIGEST_SIZE, |
| 3471 | }, | 3059 | }, |
| 3472 | .class1_alg_type = 0, | 3060 | .caam = { |
| 3473 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 3061 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 3474 | OP_ALG_AAI_HMAC_PRECOMP, | 3062 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3475 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | 3063 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, |
| 3064 | }, | ||
| 3476 | }, | 3065 | }, |
| 3477 | { | 3066 | { |
| 3478 | .name = "authenc(hmac(md5),cbc(aes))", | 3067 | .aead = { |
| 3479 | .driver_name = "authenc-hmac-md5-cbc-aes-caam", | 3068 | .base = { |
| 3480 | .blocksize = AES_BLOCK_SIZE, | 3069 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
| 3481 | .type = CRYPTO_ALG_TYPE_AEAD, | 3070 | .cra_driver_name = "authenc-hmac-md5-" |
| 3482 | .template_aead = { | 3071 | "cbc-aes-caam", |
| 3072 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3073 | }, | ||
| 3483 | .setkey = aead_setkey, | 3074 | .setkey = aead_setkey, |
| 3484 | .setauthsize = aead_setauthsize, | 3075 | .setauthsize = aead_setauthsize, |
| 3485 | .encrypt = old_aead_encrypt, | 3076 | .encrypt = aead_encrypt, |
| 3486 | .decrypt = old_aead_decrypt, | 3077 | .decrypt = aead_decrypt, |
| 3487 | .givencrypt = old_aead_givencrypt, | ||
| 3488 | .geniv = "<built-in>", | ||
| 3489 | .ivsize = AES_BLOCK_SIZE, | 3078 | .ivsize = AES_BLOCK_SIZE, |
| 3490 | .maxauthsize = MD5_DIGEST_SIZE, | 3079 | .maxauthsize = MD5_DIGEST_SIZE, |
| 3080 | }, | ||
| 3081 | .caam = { | ||
| 3082 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3083 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | ||
| 3084 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3085 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3086 | }, | ||
| 3087 | }, | ||
| 3088 | { | ||
| 3089 | .aead = { | ||
| 3090 | .base = { | ||
| 3091 | .cra_name = "echainiv(authenc(hmac(md5)," | ||
| 3092 | "cbc(aes)))", | ||
| 3093 | .cra_driver_name = "echainiv-authenc-hmac-md5-" | ||
| 3094 | "cbc-aes-caam", | ||
| 3095 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3491 | }, | 3096 | }, |
| 3492 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 3097 | .setkey = aead_setkey, |
| 3493 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, | 3098 | .setauthsize = aead_setauthsize, |
| 3494 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | 3099 | .encrypt = aead_encrypt, |
| 3100 | .decrypt = aead_givdecrypt, | ||
| 3101 | .ivsize = AES_BLOCK_SIZE, | ||
| 3102 | .maxauthsize = MD5_DIGEST_SIZE, | ||
| 3103 | }, | ||
| 3104 | .caam = { | ||
| 3105 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3106 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | ||
| 3107 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3108 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3109 | .geniv = true, | ||
| 3110 | }, | ||
| 3495 | }, | 3111 | }, |
| 3496 | { | 3112 | { |
| 3497 | .name = "authenc(hmac(sha1),cbc(aes))", | 3113 | .aead = { |
| 3498 | .driver_name = "authenc-hmac-sha1-cbc-aes-caam", | 3114 | .base = { |
| 3499 | .blocksize = AES_BLOCK_SIZE, | 3115 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
| 3500 | .type = CRYPTO_ALG_TYPE_AEAD, | 3116 | .cra_driver_name = "authenc-hmac-sha1-" |
| 3501 | .template_aead = { | 3117 | "cbc-aes-caam", |
| 3118 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3119 | }, | ||
| 3502 | .setkey = aead_setkey, | 3120 | .setkey = aead_setkey, |
| 3503 | .setauthsize = aead_setauthsize, | 3121 | .setauthsize = aead_setauthsize, |
| 3504 | .encrypt = old_aead_encrypt, | 3122 | .encrypt = aead_encrypt, |
| 3505 | .decrypt = old_aead_decrypt, | 3123 | .decrypt = aead_decrypt, |
| 3506 | .givencrypt = old_aead_givencrypt, | ||
| 3507 | .geniv = "<built-in>", | ||
| 3508 | .ivsize = AES_BLOCK_SIZE, | 3124 | .ivsize = AES_BLOCK_SIZE, |
| 3509 | .maxauthsize = SHA1_DIGEST_SIZE, | 3125 | .maxauthsize = SHA1_DIGEST_SIZE, |
| 3126 | }, | ||
| 3127 | .caam = { | ||
| 3128 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3129 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | ||
| 3130 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3131 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3132 | }, | ||
| 3133 | }, | ||
| 3134 | { | ||
| 3135 | .aead = { | ||
| 3136 | .base = { | ||
| 3137 | .cra_name = "echainiv(authenc(hmac(sha1)," | ||
| 3138 | "cbc(aes)))", | ||
| 3139 | .cra_driver_name = "echainiv-authenc-" | ||
| 3140 | "hmac-sha1-cbc-aes-caam", | ||
| 3141 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3510 | }, | 3142 | }, |
| 3511 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 3143 | .setkey = aead_setkey, |
| 3512 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | 3144 | .setauthsize = aead_setauthsize, |
| 3513 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | 3145 | .encrypt = aead_encrypt, |
| 3146 | .decrypt = aead_givdecrypt, | ||
| 3147 | .ivsize = AES_BLOCK_SIZE, | ||
| 3148 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 3149 | }, | ||
| 3150 | .caam = { | ||
| 3151 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3152 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | ||
| 3153 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3154 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3155 | .geniv = true, | ||
| 3156 | }, | ||
| 3514 | }, | 3157 | }, |
| 3515 | { | 3158 | { |
| 3516 | .name = "authenc(hmac(sha224),cbc(aes))", | 3159 | .aead = { |
| 3517 | .driver_name = "authenc-hmac-sha224-cbc-aes-caam", | 3160 | .base = { |
| 3518 | .blocksize = AES_BLOCK_SIZE, | 3161 | .cra_name = "authenc(hmac(sha224),cbc(aes))", |
| 3519 | .type = CRYPTO_ALG_TYPE_AEAD, | 3162 | .cra_driver_name = "authenc-hmac-sha224-" |
| 3520 | .template_aead = { | 3163 | "cbc-aes-caam", |
| 3164 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3165 | }, | ||
| 3521 | .setkey = aead_setkey, | 3166 | .setkey = aead_setkey, |
| 3522 | .setauthsize = aead_setauthsize, | 3167 | .setauthsize = aead_setauthsize, |
| 3523 | .encrypt = old_aead_encrypt, | 3168 | .encrypt = aead_encrypt, |
| 3524 | .decrypt = old_aead_decrypt, | 3169 | .decrypt = aead_decrypt, |
| 3525 | .givencrypt = old_aead_givencrypt, | ||
| 3526 | .geniv = "<built-in>", | ||
| 3527 | .ivsize = AES_BLOCK_SIZE, | 3170 | .ivsize = AES_BLOCK_SIZE, |
| 3528 | .maxauthsize = SHA224_DIGEST_SIZE, | 3171 | .maxauthsize = SHA224_DIGEST_SIZE, |
| 3172 | }, | ||
| 3173 | .caam = { | ||
| 3174 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3175 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | ||
| 3176 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3177 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3178 | }, | ||
| 3179 | }, | ||
| 3180 | { | ||
| 3181 | .aead = { | ||
| 3182 | .base = { | ||
| 3183 | .cra_name = "echainiv(authenc(hmac(sha224)," | ||
| 3184 | "cbc(aes)))", | ||
| 3185 | .cra_driver_name = "echainiv-authenc-" | ||
| 3186 | "hmac-sha224-cbc-aes-caam", | ||
| 3187 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3529 | }, | 3188 | }, |
| 3530 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 3189 | .setkey = aead_setkey, |
| 3531 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 3190 | .setauthsize = aead_setauthsize, |
| 3532 | OP_ALG_AAI_HMAC_PRECOMP, | 3191 | .encrypt = aead_encrypt, |
| 3533 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | 3192 | .decrypt = aead_givdecrypt, |
| 3193 | .ivsize = AES_BLOCK_SIZE, | ||
| 3194 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
| 3195 | }, | ||
| 3196 | .caam = { | ||
| 3197 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3198 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | ||
| 3199 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3200 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3201 | .geniv = true, | ||
| 3202 | }, | ||
| 3534 | }, | 3203 | }, |
| 3535 | { | 3204 | { |
| 3536 | .name = "authenc(hmac(sha256),cbc(aes))", | 3205 | .aead = { |
| 3537 | .driver_name = "authenc-hmac-sha256-cbc-aes-caam", | 3206 | .base = { |
| 3538 | .blocksize = AES_BLOCK_SIZE, | 3207 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
| 3539 | .type = CRYPTO_ALG_TYPE_AEAD, | 3208 | .cra_driver_name = "authenc-hmac-sha256-" |
| 3540 | .template_aead = { | 3209 | "cbc-aes-caam", |
| 3210 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3211 | }, | ||
| 3541 | .setkey = aead_setkey, | 3212 | .setkey = aead_setkey, |
| 3542 | .setauthsize = aead_setauthsize, | 3213 | .setauthsize = aead_setauthsize, |
| 3543 | .encrypt = old_aead_encrypt, | 3214 | .encrypt = aead_encrypt, |
| 3544 | .decrypt = old_aead_decrypt, | 3215 | .decrypt = aead_decrypt, |
| 3545 | .givencrypt = old_aead_givencrypt, | ||
| 3546 | .geniv = "<built-in>", | ||
| 3547 | .ivsize = AES_BLOCK_SIZE, | 3216 | .ivsize = AES_BLOCK_SIZE, |
| 3548 | .maxauthsize = SHA256_DIGEST_SIZE, | 3217 | .maxauthsize = SHA256_DIGEST_SIZE, |
| 3218 | }, | ||
| 3219 | .caam = { | ||
| 3220 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3221 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
| 3222 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3223 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3224 | }, | ||
| 3225 | }, | ||
| 3226 | { | ||
| 3227 | .aead = { | ||
| 3228 | .base = { | ||
| 3229 | .cra_name = "echainiv(authenc(hmac(sha256)," | ||
| 3230 | "cbc(aes)))", | ||
| 3231 | .cra_driver_name = "echainiv-authenc-" | ||
| 3232 | "hmac-sha256-cbc-aes-caam", | ||
| 3233 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3549 | }, | 3234 | }, |
| 3550 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 3235 | .setkey = aead_setkey, |
| 3551 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 3236 | .setauthsize = aead_setauthsize, |
| 3552 | OP_ALG_AAI_HMAC_PRECOMP, | 3237 | .encrypt = aead_encrypt, |
| 3553 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | 3238 | .decrypt = aead_givdecrypt, |
| 3239 | .ivsize = AES_BLOCK_SIZE, | ||
| 3240 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 3241 | }, | ||
| 3242 | .caam = { | ||
| 3243 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3244 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
| 3245 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3246 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3247 | .geniv = true, | ||
| 3248 | }, | ||
| 3554 | }, | 3249 | }, |
| 3555 | { | 3250 | { |
| 3556 | .name = "authenc(hmac(sha384),cbc(aes))", | 3251 | .aead = { |
| 3557 | .driver_name = "authenc-hmac-sha384-cbc-aes-caam", | 3252 | .base = { |
| 3558 | .blocksize = AES_BLOCK_SIZE, | 3253 | .cra_name = "authenc(hmac(sha384),cbc(aes))", |
| 3559 | .type = CRYPTO_ALG_TYPE_AEAD, | 3254 | .cra_driver_name = "authenc-hmac-sha384-" |
| 3560 | .template_aead = { | 3255 | "cbc-aes-caam", |
| 3256 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3257 | }, | ||
| 3561 | .setkey = aead_setkey, | 3258 | .setkey = aead_setkey, |
| 3562 | .setauthsize = aead_setauthsize, | 3259 | .setauthsize = aead_setauthsize, |
| 3563 | .encrypt = old_aead_encrypt, | 3260 | .encrypt = aead_encrypt, |
| 3564 | .decrypt = old_aead_decrypt, | 3261 | .decrypt = aead_decrypt, |
| 3565 | .givencrypt = old_aead_givencrypt, | ||
| 3566 | .geniv = "<built-in>", | ||
| 3567 | .ivsize = AES_BLOCK_SIZE, | 3262 | .ivsize = AES_BLOCK_SIZE, |
| 3568 | .maxauthsize = SHA384_DIGEST_SIZE, | 3263 | .maxauthsize = SHA384_DIGEST_SIZE, |
| 3264 | }, | ||
| 3265 | .caam = { | ||
| 3266 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3267 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | ||
| 3268 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3269 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3270 | }, | ||
| 3271 | }, | ||
| 3272 | { | ||
| 3273 | .aead = { | ||
| 3274 | .base = { | ||
| 3275 | .cra_name = "echainiv(authenc(hmac(sha384)," | ||
| 3276 | "cbc(aes)))", | ||
| 3277 | .cra_driver_name = "echainiv-authenc-" | ||
| 3278 | "hmac-sha384-cbc-aes-caam", | ||
| 3279 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3569 | }, | 3280 | }, |
| 3570 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 3281 | .setkey = aead_setkey, |
| 3571 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 3282 | .setauthsize = aead_setauthsize, |
| 3572 | OP_ALG_AAI_HMAC_PRECOMP, | 3283 | .encrypt = aead_encrypt, |
| 3573 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | 3284 | .decrypt = aead_givdecrypt, |
| 3285 | .ivsize = AES_BLOCK_SIZE, | ||
| 3286 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
| 3287 | }, | ||
| 3288 | .caam = { | ||
| 3289 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3290 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | ||
| 3291 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3292 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3293 | .geniv = true, | ||
| 3294 | }, | ||
| 3574 | }, | 3295 | }, |
| 3575 | |||
| 3576 | { | 3296 | { |
| 3577 | .name = "authenc(hmac(sha512),cbc(aes))", | 3297 | .aead = { |
| 3578 | .driver_name = "authenc-hmac-sha512-cbc-aes-caam", | 3298 | .base = { |
| 3579 | .blocksize = AES_BLOCK_SIZE, | 3299 | .cra_name = "authenc(hmac(sha512),cbc(aes))", |
| 3580 | .type = CRYPTO_ALG_TYPE_AEAD, | 3300 | .cra_driver_name = "authenc-hmac-sha512-" |
| 3581 | .template_aead = { | 3301 | "cbc-aes-caam", |
| 3302 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3303 | }, | ||
| 3582 | .setkey = aead_setkey, | 3304 | .setkey = aead_setkey, |
| 3583 | .setauthsize = aead_setauthsize, | 3305 | .setauthsize = aead_setauthsize, |
| 3584 | .encrypt = old_aead_encrypt, | 3306 | .encrypt = aead_encrypt, |
| 3585 | .decrypt = old_aead_decrypt, | 3307 | .decrypt = aead_decrypt, |
| 3586 | .givencrypt = old_aead_givencrypt, | ||
| 3587 | .geniv = "<built-in>", | ||
| 3588 | .ivsize = AES_BLOCK_SIZE, | 3308 | .ivsize = AES_BLOCK_SIZE, |
| 3589 | .maxauthsize = SHA512_DIGEST_SIZE, | 3309 | .maxauthsize = SHA512_DIGEST_SIZE, |
| 3310 | }, | ||
| 3311 | .caam = { | ||
| 3312 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3313 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
| 3314 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3315 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 3316 | }, | ||
| 3317 | }, | ||
| 3318 | { | ||
| 3319 | .aead = { | ||
| 3320 | .base = { | ||
| 3321 | .cra_name = "echainiv(authenc(hmac(sha512)," | ||
| 3322 | "cbc(aes)))", | ||
| 3323 | .cra_driver_name = "echainiv-authenc-" | ||
| 3324 | "hmac-sha512-cbc-aes-caam", | ||
| 3325 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 3590 | }, | 3326 | }, |
| 3591 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 3327 | .setkey = aead_setkey, |
| 3592 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 3328 | .setauthsize = aead_setauthsize, |
| 3593 | OP_ALG_AAI_HMAC_PRECOMP, | 3329 | .encrypt = aead_encrypt, |
| 3594 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | 3330 | .decrypt = aead_givdecrypt, |
| 3331 | .ivsize = AES_BLOCK_SIZE, | ||
| 3332 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
| 3333 | }, | ||
| 3334 | .caam = { | ||
| 3335 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
| 3336 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
| 3337 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3338 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 3339 | .geniv = true, | ||
| 3340 | }, | ||
| 3595 | }, | 3341 | }, |
| 3596 | { | 3342 | { |
| 3597 | .name = "authenc(hmac(md5),cbc(des3_ede))", | 3343 | .aead = { |
| 3598 | .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam", | 3344 | .base = { |
| 3599 | .blocksize = DES3_EDE_BLOCK_SIZE, | 3345 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", |
| 3600 | .type = CRYPTO_ALG_TYPE_AEAD, | 3346 | .cra_driver_name = "authenc-hmac-md5-" |
| 3601 | .template_aead = { | 3347 | "cbc-des3_ede-caam", |
| 3348 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3349 | }, | ||
| 3602 | .setkey = aead_setkey, | 3350 | .setkey = aead_setkey, |
| 3603 | .setauthsize = aead_setauthsize, | 3351 | .setauthsize = aead_setauthsize, |
| 3604 | .encrypt = old_aead_encrypt, | 3352 | .encrypt = aead_encrypt, |
| 3605 | .decrypt = old_aead_decrypt, | 3353 | .decrypt = aead_decrypt, |
| 3606 | .givencrypt = old_aead_givencrypt, | ||
| 3607 | .geniv = "<built-in>", | ||
| 3608 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3354 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 3609 | .maxauthsize = MD5_DIGEST_SIZE, | 3355 | .maxauthsize = MD5_DIGEST_SIZE, |
| 3356 | }, | ||
| 3357 | .caam = { | ||
| 3358 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3359 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | ||
| 3360 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3361 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3362 | } | ||
| 3363 | }, | ||
| 3364 | { | ||
| 3365 | .aead = { | ||
| 3366 | .base = { | ||
| 3367 | .cra_name = "echainiv(authenc(hmac(md5)," | ||
| 3368 | "cbc(des3_ede)))", | ||
| 3369 | .cra_driver_name = "echainiv-authenc-hmac-md5-" | ||
| 3370 | "cbc-des3_ede-caam", | ||
| 3371 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3610 | }, | 3372 | }, |
| 3611 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 3373 | .setkey = aead_setkey, |
| 3612 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, | 3374 | .setauthsize = aead_setauthsize, |
| 3613 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | 3375 | .encrypt = aead_encrypt, |
| 3376 | .decrypt = aead_givdecrypt, | ||
| 3377 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 3378 | .maxauthsize = MD5_DIGEST_SIZE, | ||
| 3379 | }, | ||
| 3380 | .caam = { | ||
| 3381 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3382 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | ||
| 3383 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3384 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3385 | .geniv = true, | ||
| 3386 | } | ||
| 3614 | }, | 3387 | }, |
| 3615 | { | 3388 | { |
| 3616 | .name = "authenc(hmac(sha1),cbc(des3_ede))", | 3389 | .aead = { |
| 3617 | .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", | 3390 | .base = { |
| 3618 | .blocksize = DES3_EDE_BLOCK_SIZE, | 3391 | .cra_name = "authenc(hmac(sha1)," |
| 3619 | .type = CRYPTO_ALG_TYPE_AEAD, | 3392 | "cbc(des3_ede))", |
| 3620 | .template_aead = { | 3393 | .cra_driver_name = "authenc-hmac-sha1-" |
| 3394 | "cbc-des3_ede-caam", | ||
| 3395 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3396 | }, | ||
| 3621 | .setkey = aead_setkey, | 3397 | .setkey = aead_setkey, |
| 3622 | .setauthsize = aead_setauthsize, | 3398 | .setauthsize = aead_setauthsize, |
| 3623 | .encrypt = old_aead_encrypt, | 3399 | .encrypt = aead_encrypt, |
| 3624 | .decrypt = old_aead_decrypt, | 3400 | .decrypt = aead_decrypt, |
| 3625 | .givencrypt = old_aead_givencrypt, | ||
| 3626 | .geniv = "<built-in>", | ||
| 3627 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3401 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 3628 | .maxauthsize = SHA1_DIGEST_SIZE, | 3402 | .maxauthsize = SHA1_DIGEST_SIZE, |
| 3403 | }, | ||
| 3404 | .caam = { | ||
| 3405 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3406 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | ||
| 3407 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3408 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3409 | }, | ||
| 3410 | }, | ||
| 3411 | { | ||
| 3412 | .aead = { | ||
| 3413 | .base = { | ||
| 3414 | .cra_name = "echainiv(authenc(hmac(sha1)," | ||
| 3415 | "cbc(des3_ede)))", | ||
| 3416 | .cra_driver_name = "echainiv-authenc-" | ||
| 3417 | "hmac-sha1-" | ||
| 3418 | "cbc-des3_ede-caam", | ||
| 3419 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3629 | }, | 3420 | }, |
| 3630 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 3421 | .setkey = aead_setkey, |
| 3631 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | 3422 | .setauthsize = aead_setauthsize, |
| 3632 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | 3423 | .encrypt = aead_encrypt, |
| 3424 | .decrypt = aead_givdecrypt, | ||
| 3425 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 3426 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 3427 | }, | ||
| 3428 | .caam = { | ||
| 3429 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3430 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | ||
| 3431 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3432 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3433 | .geniv = true, | ||
| 3434 | }, | ||
| 3633 | }, | 3435 | }, |
| 3634 | { | 3436 | { |
| 3635 | .name = "authenc(hmac(sha224),cbc(des3_ede))", | 3437 | .aead = { |
| 3636 | .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam", | 3438 | .base = { |
| 3637 | .blocksize = DES3_EDE_BLOCK_SIZE, | 3439 | .cra_name = "authenc(hmac(sha224)," |
| 3638 | .type = CRYPTO_ALG_TYPE_AEAD, | 3440 | "cbc(des3_ede))", |
| 3639 | .template_aead = { | 3441 | .cra_driver_name = "authenc-hmac-sha224-" |
| 3442 | "cbc-des3_ede-caam", | ||
| 3443 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3444 | }, | ||
| 3640 | .setkey = aead_setkey, | 3445 | .setkey = aead_setkey, |
| 3641 | .setauthsize = aead_setauthsize, | 3446 | .setauthsize = aead_setauthsize, |
| 3642 | .encrypt = old_aead_encrypt, | 3447 | .encrypt = aead_encrypt, |
| 3643 | .decrypt = old_aead_decrypt, | 3448 | .decrypt = aead_decrypt, |
| 3644 | .givencrypt = old_aead_givencrypt, | ||
| 3645 | .geniv = "<built-in>", | ||
| 3646 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3449 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 3647 | .maxauthsize = SHA224_DIGEST_SIZE, | 3450 | .maxauthsize = SHA224_DIGEST_SIZE, |
| 3451 | }, | ||
| 3452 | .caam = { | ||
| 3453 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3454 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | ||
| 3455 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3456 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3457 | }, | ||
| 3458 | }, | ||
| 3459 | { | ||
| 3460 | .aead = { | ||
| 3461 | .base = { | ||
| 3462 | .cra_name = "echainiv(authenc(hmac(sha224)," | ||
| 3463 | "cbc(des3_ede)))", | ||
| 3464 | .cra_driver_name = "echainiv-authenc-" | ||
| 3465 | "hmac-sha224-" | ||
| 3466 | "cbc-des3_ede-caam", | ||
| 3467 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3648 | }, | 3468 | }, |
| 3649 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 3469 | .setkey = aead_setkey, |
| 3650 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 3470 | .setauthsize = aead_setauthsize, |
| 3651 | OP_ALG_AAI_HMAC_PRECOMP, | 3471 | .encrypt = aead_encrypt, |
| 3652 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | 3472 | .decrypt = aead_givdecrypt, |
| 3473 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 3474 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
| 3475 | }, | ||
| 3476 | .caam = { | ||
| 3477 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3478 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | ||
| 3479 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3480 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3481 | .geniv = true, | ||
| 3482 | }, | ||
| 3653 | }, | 3483 | }, |
| 3654 | { | 3484 | { |
| 3655 | .name = "authenc(hmac(sha256),cbc(des3_ede))", | 3485 | .aead = { |
| 3656 | .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", | 3486 | .base = { |
| 3657 | .blocksize = DES3_EDE_BLOCK_SIZE, | 3487 | .cra_name = "authenc(hmac(sha256)," |
| 3658 | .type = CRYPTO_ALG_TYPE_AEAD, | 3488 | "cbc(des3_ede))", |
| 3659 | .template_aead = { | 3489 | .cra_driver_name = "authenc-hmac-sha256-" |
| 3490 | "cbc-des3_ede-caam", | ||
| 3491 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3492 | }, | ||
| 3660 | .setkey = aead_setkey, | 3493 | .setkey = aead_setkey, |
| 3661 | .setauthsize = aead_setauthsize, | 3494 | .setauthsize = aead_setauthsize, |
| 3662 | .encrypt = old_aead_encrypt, | 3495 | .encrypt = aead_encrypt, |
| 3663 | .decrypt = old_aead_decrypt, | 3496 | .decrypt = aead_decrypt, |
| 3664 | .givencrypt = old_aead_givencrypt, | ||
| 3665 | .geniv = "<built-in>", | ||
| 3666 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3497 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 3667 | .maxauthsize = SHA256_DIGEST_SIZE, | 3498 | .maxauthsize = SHA256_DIGEST_SIZE, |
| 3499 | }, | ||
| 3500 | .caam = { | ||
| 3501 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3502 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
| 3503 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3504 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3505 | }, | ||
| 3506 | }, | ||
| 3507 | { | ||
| 3508 | .aead = { | ||
| 3509 | .base = { | ||
| 3510 | .cra_name = "echainiv(authenc(hmac(sha256)," | ||
| 3511 | "cbc(des3_ede)))", | ||
| 3512 | .cra_driver_name = "echainiv-authenc-" | ||
| 3513 | "hmac-sha256-" | ||
| 3514 | "cbc-des3_ede-caam", | ||
| 3515 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3668 | }, | 3516 | }, |
| 3669 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 3517 | .setkey = aead_setkey, |
| 3670 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 3518 | .setauthsize = aead_setauthsize, |
| 3671 | OP_ALG_AAI_HMAC_PRECOMP, | 3519 | .encrypt = aead_encrypt, |
| 3672 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | 3520 | .decrypt = aead_givdecrypt, |
| 3521 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 3522 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 3523 | }, | ||
| 3524 | .caam = { | ||
| 3525 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3526 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
| 3527 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3528 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3529 | .geniv = true, | ||
| 3530 | }, | ||
| 3673 | }, | 3531 | }, |
| 3674 | { | 3532 | { |
| 3675 | .name = "authenc(hmac(sha384),cbc(des3_ede))", | 3533 | .aead = { |
| 3676 | .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam", | 3534 | .base = { |
| 3677 | .blocksize = DES3_EDE_BLOCK_SIZE, | 3535 | .cra_name = "authenc(hmac(sha384)," |
| 3678 | .type = CRYPTO_ALG_TYPE_AEAD, | 3536 | "cbc(des3_ede))", |
| 3679 | .template_aead = { | 3537 | .cra_driver_name = "authenc-hmac-sha384-" |
| 3538 | "cbc-des3_ede-caam", | ||
| 3539 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3540 | }, | ||
| 3680 | .setkey = aead_setkey, | 3541 | .setkey = aead_setkey, |
| 3681 | .setauthsize = aead_setauthsize, | 3542 | .setauthsize = aead_setauthsize, |
| 3682 | .encrypt = old_aead_encrypt, | 3543 | .encrypt = aead_encrypt, |
| 3683 | .decrypt = old_aead_decrypt, | 3544 | .decrypt = aead_decrypt, |
| 3684 | .givencrypt = old_aead_givencrypt, | ||
| 3685 | .geniv = "<built-in>", | ||
| 3686 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3545 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 3687 | .maxauthsize = SHA384_DIGEST_SIZE, | 3546 | .maxauthsize = SHA384_DIGEST_SIZE, |
| 3547 | }, | ||
| 3548 | .caam = { | ||
| 3549 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3550 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | ||
| 3551 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3552 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3553 | }, | ||
| 3554 | }, | ||
| 3555 | { | ||
| 3556 | .aead = { | ||
| 3557 | .base = { | ||
| 3558 | .cra_name = "echainiv(authenc(hmac(sha384)," | ||
| 3559 | "cbc(des3_ede)))", | ||
| 3560 | .cra_driver_name = "echainiv-authenc-" | ||
| 3561 | "hmac-sha384-" | ||
| 3562 | "cbc-des3_ede-caam", | ||
| 3563 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3688 | }, | 3564 | }, |
| 3689 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 3565 | .setkey = aead_setkey, |
| 3690 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 3566 | .setauthsize = aead_setauthsize, |
| 3691 | OP_ALG_AAI_HMAC_PRECOMP, | 3567 | .encrypt = aead_encrypt, |
| 3692 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | 3568 | .decrypt = aead_givdecrypt, |
| 3569 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 3570 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
| 3571 | }, | ||
| 3572 | .caam = { | ||
| 3573 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3574 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | ||
| 3575 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3576 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3577 | .geniv = true, | ||
| 3578 | }, | ||
| 3693 | }, | 3579 | }, |
| 3694 | { | 3580 | { |
| 3695 | .name = "authenc(hmac(sha512),cbc(des3_ede))", | 3581 | .aead = { |
| 3696 | .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", | 3582 | .base = { |
| 3697 | .blocksize = DES3_EDE_BLOCK_SIZE, | 3583 | .cra_name = "authenc(hmac(sha512)," |
| 3698 | .type = CRYPTO_ALG_TYPE_AEAD, | 3584 | "cbc(des3_ede))", |
| 3699 | .template_aead = { | 3585 | .cra_driver_name = "authenc-hmac-sha512-" |
| 3586 | "cbc-des3_ede-caam", | ||
| 3587 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3588 | }, | ||
| 3700 | .setkey = aead_setkey, | 3589 | .setkey = aead_setkey, |
| 3701 | .setauthsize = aead_setauthsize, | 3590 | .setauthsize = aead_setauthsize, |
| 3702 | .encrypt = old_aead_encrypt, | 3591 | .encrypt = aead_encrypt, |
| 3703 | .decrypt = old_aead_decrypt, | 3592 | .decrypt = aead_decrypt, |
| 3704 | .givencrypt = old_aead_givencrypt, | ||
| 3705 | .geniv = "<built-in>", | ||
| 3706 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3593 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 3707 | .maxauthsize = SHA512_DIGEST_SIZE, | 3594 | .maxauthsize = SHA512_DIGEST_SIZE, |
| 3595 | }, | ||
| 3596 | .caam = { | ||
| 3597 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3598 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
| 3599 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3600 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 3601 | }, | ||
| 3602 | }, | ||
| 3603 | { | ||
| 3604 | .aead = { | ||
| 3605 | .base = { | ||
| 3606 | .cra_name = "echainiv(authenc(hmac(sha512)," | ||
| 3607 | "cbc(des3_ede)))", | ||
| 3608 | .cra_driver_name = "echainiv-authenc-" | ||
| 3609 | "hmac-sha512-" | ||
| 3610 | "cbc-des3_ede-caam", | ||
| 3611 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 3708 | }, | 3612 | }, |
| 3709 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 3613 | .setkey = aead_setkey, |
| 3710 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 3614 | .setauthsize = aead_setauthsize, |
| 3711 | OP_ALG_AAI_HMAC_PRECOMP, | 3615 | .encrypt = aead_encrypt, |
| 3712 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | 3616 | .decrypt = aead_givdecrypt, |
| 3617 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 3618 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
| 3619 | }, | ||
| 3620 | .caam = { | ||
| 3621 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
| 3622 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
| 3623 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3624 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 3625 | .geniv = true, | ||
| 3626 | }, | ||
| 3713 | }, | 3627 | }, |
| 3714 | { | 3628 | { |
| 3715 | .name = "authenc(hmac(md5),cbc(des))", | 3629 | .aead = { |
| 3716 | .driver_name = "authenc-hmac-md5-cbc-des-caam", | 3630 | .base = { |
| 3717 | .blocksize = DES_BLOCK_SIZE, | 3631 | .cra_name = "authenc(hmac(md5),cbc(des))", |
| 3718 | .type = CRYPTO_ALG_TYPE_AEAD, | 3632 | .cra_driver_name = "authenc-hmac-md5-" |
| 3719 | .template_aead = { | 3633 | "cbc-des-caam", |
| 3634 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3635 | }, | ||
| 3720 | .setkey = aead_setkey, | 3636 | .setkey = aead_setkey, |
| 3721 | .setauthsize = aead_setauthsize, | 3637 | .setauthsize = aead_setauthsize, |
| 3722 | .encrypt = old_aead_encrypt, | 3638 | .encrypt = aead_encrypt, |
| 3723 | .decrypt = old_aead_decrypt, | 3639 | .decrypt = aead_decrypt, |
| 3724 | .givencrypt = old_aead_givencrypt, | ||
| 3725 | .geniv = "<built-in>", | ||
| 3726 | .ivsize = DES_BLOCK_SIZE, | 3640 | .ivsize = DES_BLOCK_SIZE, |
| 3727 | .maxauthsize = MD5_DIGEST_SIZE, | 3641 | .maxauthsize = MD5_DIGEST_SIZE, |
| 3642 | }, | ||
| 3643 | .caam = { | ||
| 3644 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 3645 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | ||
| 3646 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3647 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3648 | }, | ||
| 3649 | }, | ||
| 3650 | { | ||
| 3651 | .aead = { | ||
| 3652 | .base = { | ||
| 3653 | .cra_name = "echainiv(authenc(hmac(md5)," | ||
| 3654 | "cbc(des)))", | ||
| 3655 | .cra_driver_name = "echainiv-authenc-hmac-md5-" | ||
| 3656 | "cbc-des-caam", | ||
| 3657 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3728 | }, | 3658 | }, |
| 3729 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 3659 | .setkey = aead_setkey, |
| 3730 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, | 3660 | .setauthsize = aead_setauthsize, |
| 3731 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | 3661 | .encrypt = aead_encrypt, |
| 3662 | .decrypt = aead_givdecrypt, | ||
| 3663 | .ivsize = DES_BLOCK_SIZE, | ||
| 3664 | .maxauthsize = MD5_DIGEST_SIZE, | ||
| 3665 | }, | ||
| 3666 | .caam = { | ||
| 3667 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 3668 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | ||
| 3669 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3670 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3671 | .geniv = true, | ||
| 3672 | }, | ||
| 3732 | }, | 3673 | }, |
| 3733 | { | 3674 | { |
| 3734 | .name = "authenc(hmac(sha1),cbc(des))", | 3675 | .aead = { |
| 3735 | .driver_name = "authenc-hmac-sha1-cbc-des-caam", | 3676 | .base = { |
| 3736 | .blocksize = DES_BLOCK_SIZE, | 3677 | .cra_name = "authenc(hmac(sha1),cbc(des))", |
| 3737 | .type = CRYPTO_ALG_TYPE_AEAD, | 3678 | .cra_driver_name = "authenc-hmac-sha1-" |
| 3738 | .template_aead = { | 3679 | "cbc-des-caam", |
| 3680 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3681 | }, | ||
| 3739 | .setkey = aead_setkey, | 3682 | .setkey = aead_setkey, |
| 3740 | .setauthsize = aead_setauthsize, | 3683 | .setauthsize = aead_setauthsize, |
| 3741 | .encrypt = old_aead_encrypt, | 3684 | .encrypt = aead_encrypt, |
| 3742 | .decrypt = old_aead_decrypt, | 3685 | .decrypt = aead_decrypt, |
| 3743 | .givencrypt = old_aead_givencrypt, | ||
| 3744 | .geniv = "<built-in>", | ||
| 3745 | .ivsize = DES_BLOCK_SIZE, | 3686 | .ivsize = DES_BLOCK_SIZE, |
| 3746 | .maxauthsize = SHA1_DIGEST_SIZE, | 3687 | .maxauthsize = SHA1_DIGEST_SIZE, |
| 3688 | }, | ||
| 3689 | .caam = { | ||
| 3690 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 3691 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | ||
| 3692 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3693 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3694 | }, | ||
| 3695 | }, | ||
| 3696 | { | ||
| 3697 | .aead = { | ||
| 3698 | .base = { | ||
| 3699 | .cra_name = "echainiv(authenc(hmac(sha1)," | ||
| 3700 | "cbc(des)))", | ||
| 3701 | .cra_driver_name = "echainiv-authenc-" | ||
| 3702 | "hmac-sha1-cbc-des-caam", | ||
| 3703 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3747 | }, | 3704 | }, |
| 3748 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 3705 | .setkey = aead_setkey, |
| 3749 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | 3706 | .setauthsize = aead_setauthsize, |
| 3750 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | 3707 | .encrypt = aead_encrypt, |
| 3708 | .decrypt = aead_givdecrypt, | ||
| 3709 | .ivsize = DES_BLOCK_SIZE, | ||
| 3710 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 3711 | }, | ||
| 3712 | .caam = { | ||
| 3713 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 3714 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | | ||
| 3715 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3716 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3717 | .geniv = true, | ||
| 3718 | }, | ||
| 3751 | }, | 3719 | }, |
| 3752 | { | 3720 | { |
| 3753 | .name = "authenc(hmac(sha224),cbc(des))", | 3721 | .aead = { |
| 3754 | .driver_name = "authenc-hmac-sha224-cbc-des-caam", | 3722 | .base = { |
| 3755 | .blocksize = DES_BLOCK_SIZE, | 3723 | .cra_name = "authenc(hmac(sha224),cbc(des))", |
| 3756 | .type = CRYPTO_ALG_TYPE_AEAD, | 3724 | .cra_driver_name = "authenc-hmac-sha224-" |
| 3757 | .template_aead = { | 3725 | "cbc-des-caam", |
| 3726 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3727 | }, | ||
| 3758 | .setkey = aead_setkey, | 3728 | .setkey = aead_setkey, |
| 3759 | .setauthsize = aead_setauthsize, | 3729 | .setauthsize = aead_setauthsize, |
| 3760 | .encrypt = old_aead_encrypt, | 3730 | .encrypt = aead_encrypt, |
| 3761 | .decrypt = old_aead_decrypt, | 3731 | .decrypt = aead_decrypt, |
| 3762 | .givencrypt = old_aead_givencrypt, | ||
| 3763 | .geniv = "<built-in>", | ||
| 3764 | .ivsize = DES_BLOCK_SIZE, | 3732 | .ivsize = DES_BLOCK_SIZE, |
| 3765 | .maxauthsize = SHA224_DIGEST_SIZE, | 3733 | .maxauthsize = SHA224_DIGEST_SIZE, |
| 3734 | }, | ||
| 3735 | .caam = { | ||
| 3736 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 3737 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | ||
| 3738 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3739 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3740 | }, | ||
| 3741 | }, | ||
| 3742 | { | ||
| 3743 | .aead = { | ||
| 3744 | .base = { | ||
| 3745 | .cra_name = "echainiv(authenc(hmac(sha224)," | ||
| 3746 | "cbc(des)))", | ||
| 3747 | .cra_driver_name = "echainiv-authenc-" | ||
| 3748 | "hmac-sha224-cbc-des-caam", | ||
| 3749 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3766 | }, | 3750 | }, |
| 3767 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 3751 | .setkey = aead_setkey, |
| 3768 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 3752 | .setauthsize = aead_setauthsize, |
| 3769 | OP_ALG_AAI_HMAC_PRECOMP, | 3753 | .encrypt = aead_encrypt, |
| 3770 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | 3754 | .decrypt = aead_givdecrypt, |
| 3755 | .ivsize = DES_BLOCK_SIZE, | ||
| 3756 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
| 3757 | }, | ||
| 3758 | .caam = { | ||
| 3759 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 3760 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | ||
| 3761 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3762 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 3763 | .geniv = true, | ||
| 3764 | }, | ||
| 3771 | }, | 3765 | }, |
| 3772 | { | 3766 | { |
| 3773 | .name = "authenc(hmac(sha256),cbc(des))", | 3767 | .aead = { |
| 3774 | .driver_name = "authenc-hmac-sha256-cbc-des-caam", | 3768 | .base = { |
| 3775 | .blocksize = DES_BLOCK_SIZE, | 3769 | .cra_name = "authenc(hmac(sha256),cbc(des))", |
| 3776 | .type = CRYPTO_ALG_TYPE_AEAD, | 3770 | .cra_driver_name = "authenc-hmac-sha256-" |
| 3777 | .template_aead = { | 3771 | "cbc-des-caam", |
| 3772 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3773 | }, | ||
| 3778 | .setkey = aead_setkey, | 3774 | .setkey = aead_setkey, |
| 3779 | .setauthsize = aead_setauthsize, | 3775 | .setauthsize = aead_setauthsize, |
| 3780 | .encrypt = old_aead_encrypt, | 3776 | .encrypt = aead_encrypt, |
| 3781 | .decrypt = old_aead_decrypt, | 3777 | .decrypt = aead_decrypt, |
| 3782 | .givencrypt = old_aead_givencrypt, | ||
| 3783 | .geniv = "<built-in>", | ||
| 3784 | .ivsize = DES_BLOCK_SIZE, | 3778 | .ivsize = DES_BLOCK_SIZE, |
| 3785 | .maxauthsize = SHA256_DIGEST_SIZE, | 3779 | .maxauthsize = SHA256_DIGEST_SIZE, |
| 3780 | }, | ||
| 3781 | .caam = { | ||
| 3782 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 3783 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
| 3784 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3785 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3786 | }, | ||
| 3787 | }, | ||
| 3788 | { | ||
| 3789 | .aead = { | ||
| 3790 | .base = { | ||
| 3791 | .cra_name = "echainiv(authenc(hmac(sha256)," | ||
| 3792 | "cbc(des)))", | ||
| 3793 | .cra_driver_name = "echainiv-authenc-" | ||
| 3794 | "hmac-sha256-cbc-des-caam", | ||
| 3795 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3786 | }, | 3796 | }, |
| 3787 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 3797 | .setkey = aead_setkey, |
| 3788 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 3798 | .setauthsize = aead_setauthsize, |
| 3789 | OP_ALG_AAI_HMAC_PRECOMP, | 3799 | .encrypt = aead_encrypt, |
| 3790 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | 3800 | .decrypt = aead_givdecrypt, |
| 3801 | .ivsize = DES_BLOCK_SIZE, | ||
| 3802 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 3803 | }, | ||
| 3804 | .caam = { | ||
| 3805 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 3806 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
| 3807 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3808 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 3809 | .geniv = true, | ||
| 3810 | }, | ||
| 3791 | }, | 3811 | }, |
| 3792 | { | 3812 | { |
| 3793 | .name = "authenc(hmac(sha384),cbc(des))", | 3813 | .aead = { |
| 3794 | .driver_name = "authenc-hmac-sha384-cbc-des-caam", | 3814 | .base = { |
| 3795 | .blocksize = DES_BLOCK_SIZE, | 3815 | .cra_name = "authenc(hmac(sha384),cbc(des))", |
| 3796 | .type = CRYPTO_ALG_TYPE_AEAD, | 3816 | .cra_driver_name = "authenc-hmac-sha384-" |
| 3797 | .template_aead = { | 3817 | "cbc-des-caam", |
| 3818 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3819 | }, | ||
| 3798 | .setkey = aead_setkey, | 3820 | .setkey = aead_setkey, |
| 3799 | .setauthsize = aead_setauthsize, | 3821 | .setauthsize = aead_setauthsize, |
| 3800 | .encrypt = old_aead_encrypt, | 3822 | .encrypt = aead_encrypt, |
| 3801 | .decrypt = old_aead_decrypt, | 3823 | .decrypt = aead_decrypt, |
| 3802 | .givencrypt = old_aead_givencrypt, | ||
| 3803 | .geniv = "<built-in>", | ||
| 3804 | .ivsize = DES_BLOCK_SIZE, | 3824 | .ivsize = DES_BLOCK_SIZE, |
| 3805 | .maxauthsize = SHA384_DIGEST_SIZE, | 3825 | .maxauthsize = SHA384_DIGEST_SIZE, |
| 3826 | }, | ||
| 3827 | .caam = { | ||
| 3828 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 3829 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | ||
| 3830 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3831 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3832 | }, | ||
| 3833 | }, | ||
| 3834 | { | ||
| 3835 | .aead = { | ||
| 3836 | .base = { | ||
| 3837 | .cra_name = "echainiv(authenc(hmac(sha384)," | ||
| 3838 | "cbc(des)))", | ||
| 3839 | .cra_driver_name = "echainiv-authenc-" | ||
| 3840 | "hmac-sha384-cbc-des-caam", | ||
| 3841 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3806 | }, | 3842 | }, |
| 3807 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 3843 | .setkey = aead_setkey, |
| 3808 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 3844 | .setauthsize = aead_setauthsize, |
| 3809 | OP_ALG_AAI_HMAC_PRECOMP, | 3845 | .encrypt = aead_encrypt, |
| 3810 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | 3846 | .decrypt = aead_givdecrypt, |
| 3847 | .ivsize = DES_BLOCK_SIZE, | ||
| 3848 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
| 3849 | }, | ||
| 3850 | .caam = { | ||
| 3851 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
| 3852 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | ||
| 3853 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3854 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 3855 | .geniv = true, | ||
| 3856 | }, | ||
| 3811 | }, | 3857 | }, |
| 3812 | { | 3858 | { |
| 3813 | .name = "authenc(hmac(sha512),cbc(des))", | 3859 | .aead = { |
| 3814 | .driver_name = "authenc-hmac-sha512-cbc-des-caam", | 3860 | .base = { |
| 3815 | .blocksize = DES_BLOCK_SIZE, | 3861 | .cra_name = "authenc(hmac(sha512),cbc(des))", |
| 3816 | .type = CRYPTO_ALG_TYPE_AEAD, | 3862 | .cra_driver_name = "authenc-hmac-sha512-" |
| 3817 | .template_aead = { | 3863 | "cbc-des-caam", |
| 3864 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3865 | }, | ||
| 3818 | .setkey = aead_setkey, | 3866 | .setkey = aead_setkey, |
| 3819 | .setauthsize = aead_setauthsize, | 3867 | .setauthsize = aead_setauthsize, |
| 3820 | .encrypt = old_aead_encrypt, | 3868 | .encrypt = aead_encrypt, |
| 3821 | .decrypt = old_aead_decrypt, | 3869 | .decrypt = aead_decrypt, |
| 3822 | .givencrypt = old_aead_givencrypt, | ||
| 3823 | .geniv = "<built-in>", | ||
| 3824 | .ivsize = DES_BLOCK_SIZE, | 3870 | .ivsize = DES_BLOCK_SIZE, |
| 3825 | .maxauthsize = SHA512_DIGEST_SIZE, | 3871 | .maxauthsize = SHA512_DIGEST_SIZE, |
| 3826 | }, | 3872 | }, |
| 3827 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 3873 | .caam = { |
| 3828 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 3874 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3829 | OP_ALG_AAI_HMAC_PRECOMP, | 3875 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 3830 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | 3876 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3877 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 3878 | }, | ||
| 3831 | }, | 3879 | }, |
| 3832 | { | 3880 | { |
| 3833 | .name = "authenc(hmac(md5),rfc3686(ctr(aes)))", | 3881 | .aead = { |
| 3834 | .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam", | 3882 | .base = { |
| 3835 | .blocksize = 1, | 3883 | .cra_name = "echainiv(authenc(hmac(sha512)," |
| 3836 | .type = CRYPTO_ALG_TYPE_AEAD, | 3884 | "cbc(des)))", |
| 3837 | .template_aead = { | 3885 | .cra_driver_name = "echainiv-authenc-" |
| 3886 | "hmac-sha512-cbc-des-caam", | ||
| 3887 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 3888 | }, | ||
| 3838 | .setkey = aead_setkey, | 3889 | .setkey = aead_setkey, |
| 3839 | .setauthsize = aead_setauthsize, | 3890 | .setauthsize = aead_setauthsize, |
| 3840 | .encrypt = old_aead_encrypt, | 3891 | .encrypt = aead_encrypt, |
| 3841 | .decrypt = old_aead_decrypt, | 3892 | .decrypt = aead_givdecrypt, |
| 3842 | .givencrypt = old_aead_givencrypt, | 3893 | .ivsize = DES_BLOCK_SIZE, |
| 3843 | .geniv = "<built-in>", | 3894 | .maxauthsize = SHA512_DIGEST_SIZE, |
| 3844 | .ivsize = CTR_RFC3686_IV_SIZE, | 3895 | }, |
| 3845 | .maxauthsize = MD5_DIGEST_SIZE, | 3896 | .caam = { |
| 3846 | }, | 3897 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
| 3847 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | 3898 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
| 3848 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, | 3899 | OP_ALG_AAI_HMAC_PRECOMP, |
| 3849 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | 3900 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, |
| 3901 | .geniv = true, | ||
| 3902 | }, | ||
| 3850 | }, | 3903 | }, |
| 3851 | { | 3904 | { |
| 3852 | .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", | 3905 | .aead = { |
| 3853 | .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam", | 3906 | .base = { |
| 3854 | .blocksize = 1, | 3907 | .cra_name = "authenc(hmac(md5)," |
| 3855 | .type = CRYPTO_ALG_TYPE_AEAD, | 3908 | "rfc3686(ctr(aes)))", |
| 3856 | .template_aead = { | 3909 | .cra_driver_name = "authenc-hmac-md5-" |
| 3910 | "rfc3686-ctr-aes-caam", | ||
| 3911 | .cra_blocksize = 1, | ||
| 3912 | }, | ||
| 3857 | .setkey = aead_setkey, | 3913 | .setkey = aead_setkey, |
| 3858 | .setauthsize = aead_setauthsize, | 3914 | .setauthsize = aead_setauthsize, |
| 3859 | .encrypt = old_aead_encrypt, | 3915 | .encrypt = aead_encrypt, |
| 3860 | .decrypt = old_aead_decrypt, | 3916 | .decrypt = aead_decrypt, |
| 3861 | .givencrypt = old_aead_givencrypt, | ||
| 3862 | .geniv = "<built-in>", | ||
| 3863 | .ivsize = CTR_RFC3686_IV_SIZE, | 3917 | .ivsize = CTR_RFC3686_IV_SIZE, |
| 3864 | .maxauthsize = SHA1_DIGEST_SIZE, | 3918 | .maxauthsize = MD5_DIGEST_SIZE, |
| 3865 | }, | 3919 | }, |
| 3866 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | 3920 | .caam = { |
| 3867 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | 3921 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
| 3868 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | 3922 | OP_ALG_AAI_CTR_MOD128, |
| 3923 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | | ||
| 3924 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3925 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3926 | .rfc3686 = true, | ||
| 3927 | }, | ||
| 3869 | }, | 3928 | }, |
| 3870 | { | 3929 | { |
| 3871 | .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", | 3930 | .aead = { |
| 3872 | .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam", | 3931 | .base = { |
| 3873 | .blocksize = 1, | 3932 | .cra_name = "seqiv(authenc(" |
| 3874 | .type = CRYPTO_ALG_TYPE_AEAD, | 3933 | "hmac(md5),rfc3686(ctr(aes))))", |
| 3875 | .template_aead = { | 3934 | .cra_driver_name = "seqiv-authenc-hmac-md5-" |
| 3935 | "rfc3686-ctr-aes-caam", | ||
| 3936 | .cra_blocksize = 1, | ||
| 3937 | }, | ||
| 3876 | .setkey = aead_setkey, | 3938 | .setkey = aead_setkey, |
| 3877 | .setauthsize = aead_setauthsize, | 3939 | .setauthsize = aead_setauthsize, |
| 3878 | .encrypt = old_aead_encrypt, | 3940 | .encrypt = aead_encrypt, |
| 3879 | .decrypt = old_aead_decrypt, | 3941 | .decrypt = aead_givdecrypt, |
| 3880 | .givencrypt = old_aead_givencrypt, | ||
| 3881 | .geniv = "<built-in>", | ||
| 3882 | .ivsize = CTR_RFC3686_IV_SIZE, | 3942 | .ivsize = CTR_RFC3686_IV_SIZE, |
| 3883 | .maxauthsize = SHA224_DIGEST_SIZE, | 3943 | .maxauthsize = MD5_DIGEST_SIZE, |
| 3884 | }, | 3944 | }, |
| 3885 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | 3945 | .caam = { |
| 3886 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | 3946 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
| 3887 | OP_ALG_AAI_HMAC_PRECOMP, | 3947 | OP_ALG_AAI_CTR_MOD128, |
| 3888 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | 3948 | .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
| 3949 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3950 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | ||
| 3951 | .rfc3686 = true, | ||
| 3952 | .geniv = true, | ||
| 3953 | }, | ||
| 3889 | }, | 3954 | }, |
| 3890 | { | 3955 | { |
| 3891 | .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", | 3956 | .aead = { |
| 3892 | .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam", | 3957 | .base = { |
| 3893 | .blocksize = 1, | 3958 | .cra_name = "authenc(hmac(sha1)," |
| 3894 | .type = CRYPTO_ALG_TYPE_AEAD, | 3959 | "rfc3686(ctr(aes)))", |
| 3895 | .template_aead = { | 3960 | .cra_driver_name = "authenc-hmac-sha1-" |
| 3961 | "rfc3686-ctr-aes-caam", | ||
| 3962 | .cra_blocksize = 1, | ||
| 3963 | }, | ||
| 3896 | .setkey = aead_setkey, | 3964 | .setkey = aead_setkey, |
| 3897 | .setauthsize = aead_setauthsize, | 3965 | .setauthsize = aead_setauthsize, |
| 3898 | .encrypt = old_aead_encrypt, | 3966 | .encrypt = aead_encrypt, |
| 3899 | .decrypt = old_aead_decrypt, | 3967 | .decrypt = aead_decrypt, |
| 3900 | .givencrypt = old_aead_givencrypt, | ||
| 3901 | .geniv = "<built-in>", | ||
| 3902 | .ivsize = CTR_RFC3686_IV_SIZE, | 3968 | .ivsize = CTR_RFC3686_IV_SIZE, |
| 3903 | .maxauthsize = SHA256_DIGEST_SIZE, | 3969 | .maxauthsize = SHA1_DIGEST_SIZE, |
| 3904 | }, | 3970 | }, |
| 3905 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | 3971 | .caam = { |
| 3906 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | 3972 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
| 3907 | OP_ALG_AAI_HMAC_PRECOMP, | 3973 | OP_ALG_AAI_CTR_MOD128, |
| 3908 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | 3974 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 3975 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 3976 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 3977 | .rfc3686 = true, | ||
| 3978 | }, | ||
| 3909 | }, | 3979 | }, |
| 3910 | { | 3980 | { |
| 3911 | .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", | 3981 | .aead = { |
| 3912 | .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam", | 3982 | .base = { |
| 3913 | .blocksize = 1, | 3983 | .cra_name = "seqiv(authenc(" |
| 3914 | .type = CRYPTO_ALG_TYPE_AEAD, | 3984 | "hmac(sha1),rfc3686(ctr(aes))))", |
| 3915 | .template_aead = { | 3985 | .cra_driver_name = "seqiv-authenc-hmac-sha1-" |
| 3986 | "rfc3686-ctr-aes-caam", | ||
| 3987 | .cra_blocksize = 1, | ||
| 3988 | }, | ||
| 3916 | .setkey = aead_setkey, | 3989 | .setkey = aead_setkey, |
| 3917 | .setauthsize = aead_setauthsize, | 3990 | .setauthsize = aead_setauthsize, |
| 3918 | .encrypt = old_aead_encrypt, | 3991 | .encrypt = aead_encrypt, |
| 3919 | .decrypt = old_aead_decrypt, | 3992 | .decrypt = aead_givdecrypt, |
| 3920 | .givencrypt = old_aead_givencrypt, | ||
| 3921 | .geniv = "<built-in>", | ||
| 3922 | .ivsize = CTR_RFC3686_IV_SIZE, | 3993 | .ivsize = CTR_RFC3686_IV_SIZE, |
| 3923 | .maxauthsize = SHA384_DIGEST_SIZE, | 3994 | .maxauthsize = SHA1_DIGEST_SIZE, |
| 3924 | }, | 3995 | }, |
| 3925 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | 3996 | .caam = { |
| 3926 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | 3997 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
| 3927 | OP_ALG_AAI_HMAC_PRECOMP, | 3998 | OP_ALG_AAI_CTR_MOD128, |
| 3928 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | 3999 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
| 4000 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 4001 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | ||
| 4002 | .rfc3686 = true, | ||
| 4003 | .geniv = true, | ||
| 4004 | }, | ||
| 3929 | }, | 4005 | }, |
| 3930 | { | 4006 | { |
| 3931 | .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", | 4007 | .aead = { |
| 3932 | .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam", | 4008 | .base = { |
| 3933 | .blocksize = 1, | 4009 | .cra_name = "authenc(hmac(sha224)," |
| 3934 | .type = CRYPTO_ALG_TYPE_AEAD, | 4010 | "rfc3686(ctr(aes)))", |
| 3935 | .template_aead = { | 4011 | .cra_driver_name = "authenc-hmac-sha224-" |
| 4012 | "rfc3686-ctr-aes-caam", | ||
| 4013 | .cra_blocksize = 1, | ||
| 4014 | }, | ||
| 3936 | .setkey = aead_setkey, | 4015 | .setkey = aead_setkey, |
| 3937 | .setauthsize = aead_setauthsize, | 4016 | .setauthsize = aead_setauthsize, |
| 3938 | .encrypt = old_aead_encrypt, | 4017 | .encrypt = aead_encrypt, |
| 3939 | .decrypt = old_aead_decrypt, | 4018 | .decrypt = aead_decrypt, |
| 3940 | .givencrypt = old_aead_givencrypt, | ||
| 3941 | .geniv = "<built-in>", | ||
| 3942 | .ivsize = CTR_RFC3686_IV_SIZE, | 4019 | .ivsize = CTR_RFC3686_IV_SIZE, |
| 3943 | .maxauthsize = SHA512_DIGEST_SIZE, | 4020 | .maxauthsize = SHA224_DIGEST_SIZE, |
| 3944 | }, | 4021 | }, |
| 3945 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | 4022 | .caam = { |
| 3946 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | 4023 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
| 3947 | OP_ALG_AAI_HMAC_PRECOMP, | 4024 | OP_ALG_AAI_CTR_MOD128, |
| 3948 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | 4025 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
| 4026 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 4027 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 4028 | .rfc3686 = true, | ||
| 4029 | }, | ||
| 3949 | }, | 4030 | }, |
| 3950 | /* ablkcipher descriptor */ | ||
| 3951 | { | 4031 | { |
| 3952 | .name = "cbc(aes)", | 4032 | .aead = { |
| 3953 | .driver_name = "cbc-aes-caam", | 4033 | .base = { |
| 3954 | .blocksize = AES_BLOCK_SIZE, | 4034 | .cra_name = "seqiv(authenc(" |
| 3955 | .type = CRYPTO_ALG_TYPE_GIVCIPHER, | 4035 | "hmac(sha224),rfc3686(ctr(aes))))", |
| 3956 | .template_ablkcipher = { | 4036 | .cra_driver_name = "seqiv-authenc-hmac-sha224-" |
| 3957 | .setkey = ablkcipher_setkey, | 4037 | "rfc3686-ctr-aes-caam", |
| 3958 | .encrypt = ablkcipher_encrypt, | 4038 | .cra_blocksize = 1, |
| 3959 | .decrypt = ablkcipher_decrypt, | ||
| 3960 | .givencrypt = ablkcipher_givencrypt, | ||
| 3961 | .geniv = "<built-in>", | ||
| 3962 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 3963 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 3964 | .ivsize = AES_BLOCK_SIZE, | ||
| 3965 | }, | 4039 | }, |
| 3966 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | 4040 | .setkey = aead_setkey, |
| 4041 | .setauthsize = aead_setauthsize, | ||
| 4042 | .encrypt = aead_encrypt, | ||
| 4043 | .decrypt = aead_givdecrypt, | ||
| 4044 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
| 4045 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
| 4046 | }, | ||
| 4047 | .caam = { | ||
| 4048 | .class1_alg_type = OP_ALG_ALGSEL_AES | | ||
| 4049 | OP_ALG_AAI_CTR_MOD128, | ||
| 4050 | .class2_alg_type = OP_ALG_ALGSEL_SHA224 | | ||
| 4051 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 4052 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | ||
| 4053 | .rfc3686 = true, | ||
| 4054 | .geniv = true, | ||
| 4055 | }, | ||
| 3967 | }, | 4056 | }, |
| 3968 | { | 4057 | { |
| 3969 | .name = "cbc(des3_ede)", | 4058 | .aead = { |
| 3970 | .driver_name = "cbc-3des-caam", | 4059 | .base = { |
| 3971 | .blocksize = DES3_EDE_BLOCK_SIZE, | 4060 | .cra_name = "authenc(hmac(sha256)," |
| 3972 | .type = CRYPTO_ALG_TYPE_GIVCIPHER, | 4061 | "rfc3686(ctr(aes)))", |
| 3973 | .template_ablkcipher = { | 4062 | .cra_driver_name = "authenc-hmac-sha256-" |
| 3974 | .setkey = ablkcipher_setkey, | 4063 | "rfc3686-ctr-aes-caam", |
| 3975 | .encrypt = ablkcipher_encrypt, | 4064 | .cra_blocksize = 1, |
| 3976 | .decrypt = ablkcipher_decrypt, | ||
| 3977 | .givencrypt = ablkcipher_givencrypt, | ||
| 3978 | .geniv = "<built-in>", | ||
| 3979 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
| 3980 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
| 3981 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 3982 | }, | 4065 | }, |
| 3983 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | 4066 | .setkey = aead_setkey, |
| 4067 | .setauthsize = aead_setauthsize, | ||
| 4068 | .encrypt = aead_encrypt, | ||
| 4069 | .decrypt = aead_decrypt, | ||
| 4070 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
| 4071 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 4072 | }, | ||
| 4073 | .caam = { | ||
| 4074 | .class1_alg_type = OP_ALG_ALGSEL_AES | | ||
| 4075 | OP_ALG_AAI_CTR_MOD128, | ||
| 4076 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
| 4077 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 4078 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 4079 | .rfc3686 = true, | ||
| 4080 | }, | ||
| 3984 | }, | 4081 | }, |
| 3985 | { | 4082 | { |
| 3986 | .name = "cbc(des)", | 4083 | .aead = { |
| 3987 | .driver_name = "cbc-des-caam", | 4084 | .base = { |
| 3988 | .blocksize = DES_BLOCK_SIZE, | 4085 | .cra_name = "seqiv(authenc(hmac(sha256)," |
| 3989 | .type = CRYPTO_ALG_TYPE_GIVCIPHER, | 4086 | "rfc3686(ctr(aes))))", |
| 3990 | .template_ablkcipher = { | 4087 | .cra_driver_name = "seqiv-authenc-hmac-sha256-" |
| 3991 | .setkey = ablkcipher_setkey, | 4088 | "rfc3686-ctr-aes-caam", |
| 3992 | .encrypt = ablkcipher_encrypt, | 4089 | .cra_blocksize = 1, |
| 3993 | .decrypt = ablkcipher_decrypt, | ||
| 3994 | .givencrypt = ablkcipher_givencrypt, | ||
| 3995 | .geniv = "<built-in>", | ||
| 3996 | .min_keysize = DES_KEY_SIZE, | ||
| 3997 | .max_keysize = DES_KEY_SIZE, | ||
| 3998 | .ivsize = DES_BLOCK_SIZE, | ||
| 3999 | }, | 4090 | }, |
| 4000 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | 4091 | .setkey = aead_setkey, |
| 4092 | .setauthsize = aead_setauthsize, | ||
| 4093 | .encrypt = aead_encrypt, | ||
| 4094 | .decrypt = aead_givdecrypt, | ||
| 4095 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
| 4096 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 4097 | }, | ||
| 4098 | .caam = { | ||
| 4099 | .class1_alg_type = OP_ALG_ALGSEL_AES | | ||
| 4100 | OP_ALG_AAI_CTR_MOD128, | ||
| 4101 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | ||
| 4102 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 4103 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | ||
| 4104 | .rfc3686 = true, | ||
| 4105 | .geniv = true, | ||
| 4106 | }, | ||
| 4001 | }, | 4107 | }, |
| 4002 | { | 4108 | { |
| 4003 | .name = "ctr(aes)", | 4109 | .aead = { |
| 4004 | .driver_name = "ctr-aes-caam", | 4110 | .base = { |
| 4005 | .blocksize = 1, | 4111 | .cra_name = "authenc(hmac(sha384)," |
| 4006 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 4112 | "rfc3686(ctr(aes)))", |
| 4007 | .template_ablkcipher = { | 4113 | .cra_driver_name = "authenc-hmac-sha384-" |
| 4008 | .setkey = ablkcipher_setkey, | 4114 | "rfc3686-ctr-aes-caam", |
| 4009 | .encrypt = ablkcipher_encrypt, | 4115 | .cra_blocksize = 1, |
| 4010 | .decrypt = ablkcipher_decrypt, | ||
| 4011 | .geniv = "chainiv", | ||
| 4012 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 4013 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 4014 | .ivsize = AES_BLOCK_SIZE, | ||
| 4015 | }, | 4116 | }, |
| 4016 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | 4117 | .setkey = aead_setkey, |
| 4017 | }, | 4118 | .setauthsize = aead_setauthsize, |
| 4018 | { | 4119 | .encrypt = aead_encrypt, |
| 4019 | .name = "rfc3686(ctr(aes))", | 4120 | .decrypt = aead_decrypt, |
| 4020 | .driver_name = "rfc3686-ctr-aes-caam", | ||
| 4021 | .blocksize = 1, | ||
| 4022 | .type = CRYPTO_ALG_TYPE_GIVCIPHER, | ||
| 4023 | .template_ablkcipher = { | ||
| 4024 | .setkey = ablkcipher_setkey, | ||
| 4025 | .encrypt = ablkcipher_encrypt, | ||
| 4026 | .decrypt = ablkcipher_decrypt, | ||
| 4027 | .givencrypt = ablkcipher_givencrypt, | ||
| 4028 | .geniv = "<built-in>", | ||
| 4029 | .min_keysize = AES_MIN_KEY_SIZE + | ||
| 4030 | CTR_RFC3686_NONCE_SIZE, | ||
| 4031 | .max_keysize = AES_MAX_KEY_SIZE + | ||
| 4032 | CTR_RFC3686_NONCE_SIZE, | ||
| 4033 | .ivsize = CTR_RFC3686_IV_SIZE, | 4121 | .ivsize = CTR_RFC3686_IV_SIZE, |
| 4034 | }, | 4122 | .maxauthsize = SHA384_DIGEST_SIZE, |
| 4035 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, | 4123 | }, |
| 4036 | } | 4124 | .caam = { |
| 4037 | }; | 4125 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
| 4038 | 4126 | OP_ALG_AAI_CTR_MOD128, | |
| 4039 | struct caam_alg_entry { | 4127 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
| 4040 | int class1_alg_type; | 4128 | OP_ALG_AAI_HMAC_PRECOMP, |
| 4041 | int class2_alg_type; | 4129 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, |
| 4042 | int alg_op; | 4130 | .rfc3686 = true, |
| 4043 | }; | 4131 | }, |
| 4044 | 4132 | }, | |
| 4045 | struct caam_aead_alg { | ||
| 4046 | struct aead_alg aead; | ||
| 4047 | struct caam_alg_entry caam; | ||
| 4048 | bool registered; | ||
| 4049 | }; | ||
| 4050 | |||
| 4051 | static struct caam_aead_alg driver_aeads[] = { | ||
| 4052 | { | 4133 | { |
| 4053 | .aead = { | 4134 | .aead = { |
| 4054 | .base = { | 4135 | .base = { |
| 4055 | .cra_name = "rfc4106(gcm(aes))", | 4136 | .cra_name = "seqiv(authenc(hmac(sha384)," |
| 4056 | .cra_driver_name = "rfc4106-gcm-aes-caam", | 4137 | "rfc3686(ctr(aes))))", |
| 4138 | .cra_driver_name = "seqiv-authenc-hmac-sha384-" | ||
| 4139 | "rfc3686-ctr-aes-caam", | ||
| 4057 | .cra_blocksize = 1, | 4140 | .cra_blocksize = 1, |
| 4058 | }, | 4141 | }, |
| 4059 | .setkey = rfc4106_setkey, | 4142 | .setkey = aead_setkey, |
| 4060 | .setauthsize = rfc4106_setauthsize, | 4143 | .setauthsize = aead_setauthsize, |
| 4061 | .encrypt = gcm_encrypt, | 4144 | .encrypt = aead_encrypt, |
| 4062 | .decrypt = gcm_decrypt, | 4145 | .decrypt = aead_givdecrypt, |
| 4063 | .ivsize = 8, | 4146 | .ivsize = CTR_RFC3686_IV_SIZE, |
| 4064 | .maxauthsize = AES_BLOCK_SIZE, | 4147 | .maxauthsize = SHA384_DIGEST_SIZE, |
| 4065 | }, | 4148 | }, |
| 4066 | .caam = { | 4149 | .caam = { |
| 4067 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | 4150 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
| 4151 | OP_ALG_AAI_CTR_MOD128, | ||
| 4152 | .class2_alg_type = OP_ALG_ALGSEL_SHA384 | | ||
| 4153 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 4154 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | ||
| 4155 | .rfc3686 = true, | ||
| 4156 | .geniv = true, | ||
| 4068 | }, | 4157 | }, |
| 4069 | }, | 4158 | }, |
| 4070 | { | 4159 | { |
| 4071 | .aead = { | 4160 | .aead = { |
| 4072 | .base = { | 4161 | .base = { |
| 4073 | .cra_name = "rfc4543(gcm(aes))", | 4162 | .cra_name = "authenc(hmac(sha512)," |
| 4074 | .cra_driver_name = "rfc4543-gcm-aes-caam", | 4163 | "rfc3686(ctr(aes)))", |
| 4164 | .cra_driver_name = "authenc-hmac-sha512-" | ||
| 4165 | "rfc3686-ctr-aes-caam", | ||
| 4075 | .cra_blocksize = 1, | 4166 | .cra_blocksize = 1, |
| 4076 | }, | 4167 | }, |
| 4077 | .setkey = rfc4543_setkey, | 4168 | .setkey = aead_setkey, |
| 4078 | .setauthsize = rfc4543_setauthsize, | 4169 | .setauthsize = aead_setauthsize, |
| 4079 | .encrypt = gcm_encrypt, | 4170 | .encrypt = aead_encrypt, |
| 4080 | .decrypt = gcm_decrypt, | 4171 | .decrypt = aead_decrypt, |
| 4081 | .ivsize = 8, | 4172 | .ivsize = CTR_RFC3686_IV_SIZE, |
| 4082 | .maxauthsize = AES_BLOCK_SIZE, | 4173 | .maxauthsize = SHA512_DIGEST_SIZE, |
| 4083 | }, | 4174 | }, |
| 4084 | .caam = { | 4175 | .caam = { |
| 4085 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | 4176 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
| 4177 | OP_ALG_AAI_CTR_MOD128, | ||
| 4178 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
| 4179 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 4180 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 4181 | .rfc3686 = true, | ||
| 4086 | }, | 4182 | }, |
| 4087 | }, | 4183 | }, |
| 4088 | /* Galois Counter Mode */ | ||
| 4089 | { | 4184 | { |
| 4090 | .aead = { | 4185 | .aead = { |
| 4091 | .base = { | 4186 | .base = { |
| 4092 | .cra_name = "gcm(aes)", | 4187 | .cra_name = "seqiv(authenc(hmac(sha512)," |
| 4093 | .cra_driver_name = "gcm-aes-caam", | 4188 | "rfc3686(ctr(aes))))", |
| 4189 | .cra_driver_name = "seqiv-authenc-hmac-sha512-" | ||
| 4190 | "rfc3686-ctr-aes-caam", | ||
| 4094 | .cra_blocksize = 1, | 4191 | .cra_blocksize = 1, |
| 4095 | }, | 4192 | }, |
| 4096 | .setkey = gcm_setkey, | 4193 | .setkey = aead_setkey, |
| 4097 | .setauthsize = gcm_setauthsize, | 4194 | .setauthsize = aead_setauthsize, |
| 4098 | .encrypt = gcm_encrypt, | 4195 | .encrypt = aead_encrypt, |
| 4099 | .decrypt = gcm_decrypt, | 4196 | .decrypt = aead_givdecrypt, |
| 4100 | .ivsize = 12, | 4197 | .ivsize = CTR_RFC3686_IV_SIZE, |
| 4101 | .maxauthsize = AES_BLOCK_SIZE, | 4198 | .maxauthsize = SHA512_DIGEST_SIZE, |
| 4102 | }, | 4199 | }, |
| 4103 | .caam = { | 4200 | .caam = { |
| 4104 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, | 4201 | .class1_alg_type = OP_ALG_ALGSEL_AES | |
| 4202 | OP_ALG_AAI_CTR_MOD128, | ||
| 4203 | .class2_alg_type = OP_ALG_ALGSEL_SHA512 | | ||
| 4204 | OP_ALG_AAI_HMAC_PRECOMP, | ||
| 4205 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | ||
| 4206 | .rfc3686 = true, | ||
| 4207 | .geniv = true, | ||
| 4105 | }, | 4208 | }, |
| 4106 | }, | 4209 | }, |
| 4107 | }; | 4210 | }; |
| @@ -4211,7 +4314,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template | |||
| 4211 | struct caam_crypto_alg *t_alg; | 4314 | struct caam_crypto_alg *t_alg; |
| 4212 | struct crypto_alg *alg; | 4315 | struct crypto_alg *alg; |
| 4213 | 4316 | ||
| 4214 | t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); | 4317 | t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); |
| 4215 | if (!t_alg) { | 4318 | if (!t_alg) { |
| 4216 | pr_err("failed to allocate t_alg\n"); | 4319 | pr_err("failed to allocate t_alg\n"); |
| 4217 | return ERR_PTR(-ENOMEM); | 4320 | return ERR_PTR(-ENOMEM); |
| @@ -4240,10 +4343,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template | |||
| 4240 | alg->cra_type = &crypto_ablkcipher_type; | 4343 | alg->cra_type = &crypto_ablkcipher_type; |
| 4241 | alg->cra_ablkcipher = template->template_ablkcipher; | 4344 | alg->cra_ablkcipher = template->template_ablkcipher; |
| 4242 | break; | 4345 | break; |
| 4243 | case CRYPTO_ALG_TYPE_AEAD: | ||
| 4244 | alg->cra_type = &crypto_aead_type; | ||
| 4245 | alg->cra_aead = template->template_aead; | ||
| 4246 | break; | ||
| 4247 | } | 4346 | } |
| 4248 | 4347 | ||
| 4249 | t_alg->caam.class1_alg_type = template->class1_alg_type; | 4348 | t_alg->caam.class1_alg_type = template->class1_alg_type; |
| @@ -4271,8 +4370,10 @@ static int __init caam_algapi_init(void) | |||
| 4271 | struct device_node *dev_node; | 4370 | struct device_node *dev_node; |
| 4272 | struct platform_device *pdev; | 4371 | struct platform_device *pdev; |
| 4273 | struct device *ctrldev; | 4372 | struct device *ctrldev; |
| 4274 | void *priv; | 4373 | struct caam_drv_private *priv; |
| 4275 | int i = 0, err = 0; | 4374 | int i = 0, err = 0; |
| 4375 | u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; | ||
| 4376 | unsigned int md_limit = SHA512_DIGEST_SIZE; | ||
| 4276 | bool registered = false; | 4377 | bool registered = false; |
| 4277 | 4378 | ||
| 4278 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 4379 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
| @@ -4302,16 +4403,39 @@ static int __init caam_algapi_init(void) | |||
| 4302 | 4403 | ||
| 4303 | INIT_LIST_HEAD(&alg_list); | 4404 | INIT_LIST_HEAD(&alg_list); |
| 4304 | 4405 | ||
| 4305 | /* register crypto algorithms the device supports */ | 4406 | /* |
| 4407 | * Register crypto algorithms the device supports. | ||
| 4408 | * First, detect presence and attributes of DES, AES, and MD blocks. | ||
| 4409 | */ | ||
| 4410 | cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); | ||
| 4411 | cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); | ||
| 4412 | des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; | ||
| 4413 | aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; | ||
| 4414 | md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; | ||
| 4415 | |||
| 4416 | /* If MD is present, limit digest size based on LP256 */ | ||
| 4417 | if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) | ||
| 4418 | md_limit = SHA256_DIGEST_SIZE; | ||
| 4419 | |||
| 4306 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | 4420 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
| 4307 | /* TODO: check if h/w supports alg */ | ||
| 4308 | struct caam_crypto_alg *t_alg; | 4421 | struct caam_crypto_alg *t_alg; |
| 4422 | struct caam_alg_template *alg = driver_algs + i; | ||
| 4423 | u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK; | ||
| 4424 | |||
| 4425 | /* Skip DES algorithms if not supported by device */ | ||
| 4426 | if (!des_inst && | ||
| 4427 | ((alg_sel == OP_ALG_ALGSEL_3DES) || | ||
| 4428 | (alg_sel == OP_ALG_ALGSEL_DES))) | ||
| 4429 | continue; | ||
| 4430 | |||
| 4431 | /* Skip AES algorithms if not supported by device */ | ||
| 4432 | if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) | ||
| 4433 | continue; | ||
| 4309 | 4434 | ||
| 4310 | t_alg = caam_alg_alloc(&driver_algs[i]); | 4435 | t_alg = caam_alg_alloc(alg); |
| 4311 | if (IS_ERR(t_alg)) { | 4436 | if (IS_ERR(t_alg)) { |
| 4312 | err = PTR_ERR(t_alg); | 4437 | err = PTR_ERR(t_alg); |
| 4313 | pr_warn("%s alg allocation failed\n", | 4438 | pr_warn("%s alg allocation failed\n", alg->driver_name); |
| 4314 | driver_algs[i].driver_name); | ||
| 4315 | continue; | 4439 | continue; |
| 4316 | } | 4440 | } |
| 4317 | 4441 | ||
| @@ -4329,6 +4453,37 @@ static int __init caam_algapi_init(void) | |||
| 4329 | 4453 | ||
| 4330 | for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { | 4454 | for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { |
| 4331 | struct caam_aead_alg *t_alg = driver_aeads + i; | 4455 | struct caam_aead_alg *t_alg = driver_aeads + i; |
| 4456 | u32 c1_alg_sel = t_alg->caam.class1_alg_type & | ||
| 4457 | OP_ALG_ALGSEL_MASK; | ||
| 4458 | u32 c2_alg_sel = t_alg->caam.class2_alg_type & | ||
| 4459 | OP_ALG_ALGSEL_MASK; | ||
| 4460 | u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; | ||
| 4461 | |||
| 4462 | /* Skip DES algorithms if not supported by device */ | ||
| 4463 | if (!des_inst && | ||
| 4464 | ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || | ||
| 4465 | (c1_alg_sel == OP_ALG_ALGSEL_DES))) | ||
| 4466 | continue; | ||
| 4467 | |||
| 4468 | /* Skip AES algorithms if not supported by device */ | ||
| 4469 | if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) | ||
| 4470 | continue; | ||
| 4471 | |||
| 4472 | /* | ||
| 4473 | * Check support for AES algorithms not available | ||
| 4474 | * on LP devices. | ||
| 4475 | */ | ||
| 4476 | if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) | ||
| 4477 | if (alg_aai == OP_ALG_AAI_GCM) | ||
| 4478 | continue; | ||
| 4479 | |||
| 4480 | /* | ||
| 4481 | * Skip algorithms requiring message digests | ||
| 4482 | * if MD or MD size is not supported by device. | ||
| 4483 | */ | ||
| 4484 | if (c2_alg_sel && | ||
| 4485 | (!md_inst || (t_alg->aead.maxauthsize > md_limit))) | ||
| 4486 | continue; | ||
| 4332 | 4487 | ||
| 4333 | caam_aead_alg_init(t_alg); | 4488 | caam_aead_alg_init(t_alg); |
| 4334 | 4489 | ||
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index f9c78751989e..94433b9fc200 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -127,7 +127,7 @@ struct caam_hash_state { | |||
| 127 | int buflen_0; | 127 | int buflen_0; |
| 128 | u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; | 128 | u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; |
| 129 | int buflen_1; | 129 | int buflen_1; |
| 130 | u8 caam_ctx[MAX_CTX_LEN]; | 130 | u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; |
| 131 | int (*update)(struct ahash_request *req); | 131 | int (*update)(struct ahash_request *req); |
| 132 | int (*final)(struct ahash_request *req); | 132 | int (*final)(struct ahash_request *req); |
| 133 | int (*finup)(struct ahash_request *req); | 133 | int (*finup)(struct ahash_request *req); |
| @@ -807,7 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 807 | * allocate space for base edesc and hw desc commands, | 807 | * allocate space for base edesc and hw desc commands, |
| 808 | * link tables | 808 | * link tables |
| 809 | */ | 809 | */ |
| 810 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | 810 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + |
| 811 | sec4_sg_bytes, GFP_DMA | flags); | 811 | sec4_sg_bytes, GFP_DMA | flags); |
| 812 | if (!edesc) { | 812 | if (!edesc) { |
| 813 | dev_err(jrdev, | 813 | dev_err(jrdev, |
| @@ -829,7 +829,7 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 829 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, | 829 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, |
| 830 | edesc->sec4_sg + 1, | 830 | edesc->sec4_sg + 1, |
| 831 | buf, state->buf_dma, | 831 | buf, state->buf_dma, |
| 832 | *buflen, last_buflen); | 832 | *next_buflen, *buflen); |
| 833 | 833 | ||
| 834 | if (src_nents) { | 834 | if (src_nents) { |
| 835 | src_map_to_sec4_sg(jrdev, req->src, src_nents, | 835 | src_map_to_sec4_sg(jrdev, req->src, src_nents, |
| @@ -919,8 +919,8 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
| 919 | sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); | 919 | sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); |
| 920 | 920 | ||
| 921 | /* allocate space for base edesc and hw desc commands, link tables */ | 921 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 922 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | 922 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, |
| 923 | sec4_sg_bytes, GFP_DMA | flags); | 923 | GFP_DMA | flags); |
| 924 | if (!edesc) { | 924 | if (!edesc) { |
| 925 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 925 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 926 | return -ENOMEM; | 926 | return -ENOMEM; |
| @@ -1006,8 +1006,8 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
| 1006 | sizeof(struct sec4_sg_entry); | 1006 | sizeof(struct sec4_sg_entry); |
| 1007 | 1007 | ||
| 1008 | /* allocate space for base edesc and hw desc commands, link tables */ | 1008 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1009 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | 1009 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, |
| 1010 | sec4_sg_bytes, GFP_DMA | flags); | 1010 | GFP_DMA | flags); |
| 1011 | if (!edesc) { | 1011 | if (!edesc) { |
| 1012 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1012 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 1013 | return -ENOMEM; | 1013 | return -ENOMEM; |
| @@ -1092,8 +1092,8 @@ static int ahash_digest(struct ahash_request *req) | |||
| 1092 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); | 1092 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); |
| 1093 | 1093 | ||
| 1094 | /* allocate space for base edesc and hw desc commands, link tables */ | 1094 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1095 | edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes + | 1095 | edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN, |
| 1096 | DESC_JOB_IO_LEN, GFP_DMA | flags); | 1096 | GFP_DMA | flags); |
| 1097 | if (!edesc) { | 1097 | if (!edesc) { |
| 1098 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1098 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 1099 | return -ENOMEM; | 1099 | return -ENOMEM; |
| @@ -1166,8 +1166,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
| 1166 | int sh_len; | 1166 | int sh_len; |
| 1167 | 1167 | ||
| 1168 | /* allocate space for base edesc and hw desc commands, link tables */ | 1168 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1169 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN, | 1169 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags); |
| 1170 | GFP_DMA | flags); | ||
| 1171 | if (!edesc) { | 1170 | if (!edesc) { |
| 1172 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1171 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 1173 | return -ENOMEM; | 1172 | return -ENOMEM; |
| @@ -1246,7 +1245,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
| 1246 | * allocate space for base edesc and hw desc commands, | 1245 | * allocate space for base edesc and hw desc commands, |
| 1247 | * link tables | 1246 | * link tables |
| 1248 | */ | 1247 | */ |
| 1249 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | 1248 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + |
| 1250 | sec4_sg_bytes, GFP_DMA | flags); | 1249 | sec4_sg_bytes, GFP_DMA | flags); |
| 1251 | if (!edesc) { | 1250 | if (!edesc) { |
| 1252 | dev_err(jrdev, | 1251 | dev_err(jrdev, |
| @@ -1354,8 +1353,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
| 1354 | sizeof(struct sec4_sg_entry); | 1353 | sizeof(struct sec4_sg_entry); |
| 1355 | 1354 | ||
| 1356 | /* allocate space for base edesc and hw desc commands, link tables */ | 1355 | /* allocate space for base edesc and hw desc commands, link tables */ |
| 1357 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | 1356 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, |
| 1358 | sec4_sg_bytes, GFP_DMA | flags); | 1357 | GFP_DMA | flags); |
| 1359 | if (!edesc) { | 1358 | if (!edesc) { |
| 1360 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1359 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
| 1361 | return -ENOMEM; | 1360 | return -ENOMEM; |
| @@ -1449,7 +1448,7 @@ static int ahash_update_first(struct ahash_request *req) | |||
| 1449 | * allocate space for base edesc and hw desc commands, | 1448 | * allocate space for base edesc and hw desc commands, |
| 1450 | * link tables | 1449 | * link tables |
| 1451 | */ | 1450 | */ |
| 1452 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | 1451 | edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + |
| 1453 | sec4_sg_bytes, GFP_DMA | flags); | 1452 | sec4_sg_bytes, GFP_DMA | flags); |
| 1454 | if (!edesc) { | 1453 | if (!edesc) { |
| 1455 | dev_err(jrdev, | 1454 | dev_err(jrdev, |
| @@ -1843,7 +1842,7 @@ caam_hash_alloc(struct caam_hash_template *template, | |||
| 1843 | struct ahash_alg *halg; | 1842 | struct ahash_alg *halg; |
| 1844 | struct crypto_alg *alg; | 1843 | struct crypto_alg *alg; |
| 1845 | 1844 | ||
| 1846 | t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); | 1845 | t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); |
| 1847 | if (!t_alg) { | 1846 | if (!t_alg) { |
| 1848 | pr_err("failed to allocate t_alg\n"); | 1847 | pr_err("failed to allocate t_alg\n"); |
| 1849 | return ERR_PTR(-ENOMEM); | 1848 | return ERR_PTR(-ENOMEM); |
| @@ -1885,8 +1884,10 @@ static int __init caam_algapi_hash_init(void) | |||
| 1885 | struct device_node *dev_node; | 1884 | struct device_node *dev_node; |
| 1886 | struct platform_device *pdev; | 1885 | struct platform_device *pdev; |
| 1887 | struct device *ctrldev; | 1886 | struct device *ctrldev; |
| 1888 | void *priv; | ||
| 1889 | int i = 0, err = 0; | 1887 | int i = 0, err = 0; |
| 1888 | struct caam_drv_private *priv; | ||
| 1889 | unsigned int md_limit = SHA512_DIGEST_SIZE; | ||
| 1890 | u32 cha_inst, cha_vid; | ||
| 1890 | 1891 | ||
| 1891 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 1892 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
| 1892 | if (!dev_node) { | 1893 | if (!dev_node) { |
| @@ -1912,19 +1913,40 @@ static int __init caam_algapi_hash_init(void) | |||
| 1912 | if (!priv) | 1913 | if (!priv) |
| 1913 | return -ENODEV; | 1914 | return -ENODEV; |
| 1914 | 1915 | ||
| 1916 | /* | ||
| 1917 | * Register crypto algorithms the device supports. First, identify | ||
| 1918 | * presence and attributes of MD block. | ||
| 1919 | */ | ||
| 1920 | cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); | ||
| 1921 | cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); | ||
| 1922 | |||
| 1923 | /* | ||
| 1924 | * Skip registration of any hashing algorithms if MD block | ||
| 1925 | * is not present. | ||
| 1926 | */ | ||
| 1927 | if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT)) | ||
| 1928 | return -ENODEV; | ||
| 1929 | |||
| 1930 | /* Limit digest size based on LP256 */ | ||
| 1931 | if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256) | ||
| 1932 | md_limit = SHA256_DIGEST_SIZE; | ||
| 1933 | |||
| 1915 | INIT_LIST_HEAD(&hash_list); | 1934 | INIT_LIST_HEAD(&hash_list); |
| 1916 | 1935 | ||
| 1917 | /* register crypto algorithms the device supports */ | 1936 | /* register crypto algorithms the device supports */ |
| 1918 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { | 1937 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { |
| 1919 | /* TODO: check if h/w supports alg */ | ||
| 1920 | struct caam_hash_alg *t_alg; | 1938 | struct caam_hash_alg *t_alg; |
| 1939 | struct caam_hash_template *alg = driver_hash + i; | ||
| 1940 | |||
| 1941 | /* If MD size is not supported by device, skip registration */ | ||
| 1942 | if (alg->template_ahash.halg.digestsize > md_limit) | ||
| 1943 | continue; | ||
| 1921 | 1944 | ||
| 1922 | /* register hmac version */ | 1945 | /* register hmac version */ |
| 1923 | t_alg = caam_hash_alloc(&driver_hash[i], true); | 1946 | t_alg = caam_hash_alloc(alg, true); |
| 1924 | if (IS_ERR(t_alg)) { | 1947 | if (IS_ERR(t_alg)) { |
| 1925 | err = PTR_ERR(t_alg); | 1948 | err = PTR_ERR(t_alg); |
| 1926 | pr_warn("%s alg allocation failed\n", | 1949 | pr_warn("%s alg allocation failed\n", alg->driver_name); |
| 1927 | driver_hash[i].driver_name); | ||
| 1928 | continue; | 1950 | continue; |
| 1929 | } | 1951 | } |
| 1930 | 1952 | ||
| @@ -1937,11 +1959,10 @@ static int __init caam_algapi_hash_init(void) | |||
| 1937 | list_add_tail(&t_alg->entry, &hash_list); | 1959 | list_add_tail(&t_alg->entry, &hash_list); |
| 1938 | 1960 | ||
| 1939 | /* register unkeyed version */ | 1961 | /* register unkeyed version */ |
| 1940 | t_alg = caam_hash_alloc(&driver_hash[i], false); | 1962 | t_alg = caam_hash_alloc(alg, false); |
| 1941 | if (IS_ERR(t_alg)) { | 1963 | if (IS_ERR(t_alg)) { |
| 1942 | err = PTR_ERR(t_alg); | 1964 | err = PTR_ERR(t_alg); |
| 1943 | pr_warn("%s alg allocation failed\n", | 1965 | pr_warn("%s alg allocation failed\n", alg->driver_name); |
| 1944 | driver_hash[i].driver_name); | ||
| 1945 | continue; | 1966 | continue; |
| 1946 | } | 1967 | } |
| 1947 | 1968 | ||
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 5095337205b8..9b92af2c7241 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
| @@ -108,6 +108,10 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) | |||
| 108 | 108 | ||
| 109 | atomic_set(&bd->empty, BUF_NOT_EMPTY); | 109 | atomic_set(&bd->empty, BUF_NOT_EMPTY); |
| 110 | complete(&bd->filled); | 110 | complete(&bd->filled); |
| 111 | |||
| 112 | /* Buffer refilled, invalidate cache */ | ||
| 113 | dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE); | ||
| 114 | |||
| 111 | #ifdef DEBUG | 115 | #ifdef DEBUG |
| 112 | print_hex_dump(KERN_ERR, "rng refreshed buf@: ", | 116 | print_hex_dump(KERN_ERR, "rng refreshed buf@: ", |
| 113 | DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); | 117 | DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); |
| @@ -311,7 +315,7 @@ static int __init caam_rng_init(void) | |||
| 311 | struct device_node *dev_node; | 315 | struct device_node *dev_node; |
| 312 | struct platform_device *pdev; | 316 | struct platform_device *pdev; |
| 313 | struct device *ctrldev; | 317 | struct device *ctrldev; |
| 314 | void *priv; | 318 | struct caam_drv_private *priv; |
| 315 | int err; | 319 | int err; |
| 316 | 320 | ||
| 317 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 321 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
| @@ -338,20 +342,32 @@ static int __init caam_rng_init(void) | |||
| 338 | if (!priv) | 342 | if (!priv) |
| 339 | return -ENODEV; | 343 | return -ENODEV; |
| 340 | 344 | ||
| 345 | /* Check for an instantiated RNG before registration */ | ||
| 346 | if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK)) | ||
| 347 | return -ENODEV; | ||
| 348 | |||
| 341 | dev = caam_jr_alloc(); | 349 | dev = caam_jr_alloc(); |
| 342 | if (IS_ERR(dev)) { | 350 | if (IS_ERR(dev)) { |
| 343 | pr_err("Job Ring Device allocation for transform failed\n"); | 351 | pr_err("Job Ring Device allocation for transform failed\n"); |
| 344 | return PTR_ERR(dev); | 352 | return PTR_ERR(dev); |
| 345 | } | 353 | } |
| 346 | rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA); | 354 | rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA); |
| 347 | if (!rng_ctx) | 355 | if (!rng_ctx) { |
| 348 | return -ENOMEM; | 356 | err = -ENOMEM; |
| 357 | goto free_caam_alloc; | ||
| 358 | } | ||
| 349 | err = caam_init_rng(rng_ctx, dev); | 359 | err = caam_init_rng(rng_ctx, dev); |
| 350 | if (err) | 360 | if (err) |
| 351 | return err; | 361 | goto free_rng_ctx; |
| 352 | 362 | ||
| 353 | dev_info(dev, "registering rng-caam\n"); | 363 | dev_info(dev, "registering rng-caam\n"); |
| 354 | return hwrng_register(&caam_rng); | 364 | return hwrng_register(&caam_rng); |
| 365 | |||
| 366 | free_rng_ctx: | ||
| 367 | kfree(rng_ctx); | ||
| 368 | free_caam_alloc: | ||
| 369 | caam_jr_free(dev); | ||
| 370 | return err; | ||
| 355 | } | 371 | } |
| 356 | 372 | ||
| 357 | module_init(caam_rng_init); | 373 | module_init(caam_rng_init); |
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index f57f395db33f..b6955ecdfb3f 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
| 24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
| 25 | #include <linux/circ_buf.h> | 25 | #include <linux/circ_buf.h> |
| 26 | #include <linux/clk.h> | ||
| 26 | #include <net/xfrm.h> | 27 | #include <net/xfrm.h> |
| 27 | 28 | ||
| 28 | #include <crypto/algapi.h> | 29 | #include <crypto/algapi.h> |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index efacab7539ef..8abb4bc548cc 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
| @@ -16,6 +16,24 @@ | |||
| 16 | #include "error.h" | 16 | #include "error.h" |
| 17 | 17 | ||
| 18 | /* | 18 | /* |
| 19 | * i.MX targets tend to have clock control subsystems that can | ||
| 20 | * enable/disable clocking to our device. | ||
| 21 | */ | ||
| 22 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX | ||
| 23 | static inline struct clk *caam_drv_identify_clk(struct device *dev, | ||
| 24 | char *clk_name) | ||
| 25 | { | ||
| 26 | return devm_clk_get(dev, clk_name); | ||
| 27 | } | ||
| 28 | #else | ||
| 29 | static inline struct clk *caam_drv_identify_clk(struct device *dev, | ||
| 30 | char *clk_name) | ||
| 31 | { | ||
| 32 | return NULL; | ||
| 33 | } | ||
| 34 | #endif | ||
| 35 | |||
| 36 | /* | ||
| 19 | * Descriptor to instantiate RNG State Handle 0 in normal mode and | 37 | * Descriptor to instantiate RNG State Handle 0 in normal mode and |
| 20 | * load the JDKEK, TDKEK and TDSK registers | 38 | * load the JDKEK, TDKEK and TDSK registers |
| 21 | */ | 39 | */ |
| @@ -121,7 +139,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, | |||
| 121 | flags |= DECO_JQCR_FOUR; | 139 | flags |= DECO_JQCR_FOUR; |
| 122 | 140 | ||
| 123 | /* Instruct the DECO to execute it */ | 141 | /* Instruct the DECO to execute it */ |
| 124 | wr_reg32(&deco->jr_ctl_hi, flags); | 142 | setbits32(&deco->jr_ctl_hi, flags); |
| 125 | 143 | ||
| 126 | timeout = 10000000; | 144 | timeout = 10000000; |
| 127 | do { | 145 | do { |
| @@ -175,7 +193,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, | |||
| 175 | { | 193 | { |
| 176 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | 194 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); |
| 177 | struct caam_ctrl __iomem *ctrl; | 195 | struct caam_ctrl __iomem *ctrl; |
| 178 | u32 *desc, status, rdsta_val; | 196 | u32 *desc, status = 0, rdsta_val; |
| 179 | int ret = 0, sh_idx; | 197 | int ret = 0, sh_idx; |
| 180 | 198 | ||
| 181 | ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; | 199 | ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; |
| @@ -207,7 +225,8 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, | |||
| 207 | * CAAM eras), then try again. | 225 | * CAAM eras), then try again. |
| 208 | */ | 226 | */ |
| 209 | rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; | 227 | rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; |
| 210 | if (status || !(rdsta_val & (1 << sh_idx))) | 228 | if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || |
| 229 | !(rdsta_val & (1 << sh_idx))) | ||
| 211 | ret = -EAGAIN; | 230 | ret = -EAGAIN; |
| 212 | if (ret) | 231 | if (ret) |
| 213 | break; | 232 | break; |
| @@ -279,7 +298,7 @@ static int caam_remove(struct platform_device *pdev) | |||
| 279 | struct device *ctrldev; | 298 | struct device *ctrldev; |
| 280 | struct caam_drv_private *ctrlpriv; | 299 | struct caam_drv_private *ctrlpriv; |
| 281 | struct caam_ctrl __iomem *ctrl; | 300 | struct caam_ctrl __iomem *ctrl; |
| 282 | int ring, ret = 0; | 301 | int ring; |
| 283 | 302 | ||
| 284 | ctrldev = &pdev->dev; | 303 | ctrldev = &pdev->dev; |
| 285 | ctrlpriv = dev_get_drvdata(ctrldev); | 304 | ctrlpriv = dev_get_drvdata(ctrldev); |
| @@ -303,7 +322,13 @@ static int caam_remove(struct platform_device *pdev) | |||
| 303 | /* Unmap controller region */ | 322 | /* Unmap controller region */ |
| 304 | iounmap(ctrl); | 323 | iounmap(ctrl); |
| 305 | 324 | ||
| 306 | return ret; | 325 | /* shut clocks off before finalizing shutdown */ |
| 326 | clk_disable_unprepare(ctrlpriv->caam_ipg); | ||
| 327 | clk_disable_unprepare(ctrlpriv->caam_mem); | ||
| 328 | clk_disable_unprepare(ctrlpriv->caam_aclk); | ||
| 329 | clk_disable_unprepare(ctrlpriv->caam_emi_slow); | ||
| 330 | |||
| 331 | return 0; | ||
| 307 | } | 332 | } |
| 308 | 333 | ||
| 309 | /* | 334 | /* |
| @@ -370,14 +395,14 @@ static void kick_trng(struct platform_device *pdev, int ent_delay) | |||
| 370 | int caam_get_era(void) | 395 | int caam_get_era(void) |
| 371 | { | 396 | { |
| 372 | struct device_node *caam_node; | 397 | struct device_node *caam_node; |
| 373 | for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { | 398 | int ret; |
| 374 | const uint32_t *prop = (uint32_t *)of_get_property(caam_node, | 399 | u32 prop; |
| 375 | "fsl,sec-era", | ||
| 376 | NULL); | ||
| 377 | return prop ? *prop : -ENOTSUPP; | ||
| 378 | } | ||
| 379 | 400 | ||
| 380 | return -ENOTSUPP; | 401 | caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
| 402 | ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop); | ||
| 403 | of_node_put(caam_node); | ||
| 404 | |||
| 405 | return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop; | ||
| 381 | } | 406 | } |
| 382 | EXPORT_SYMBOL(caam_get_era); | 407 | EXPORT_SYMBOL(caam_get_era); |
| 383 | 408 | ||
| @@ -390,6 +415,7 @@ static int caam_probe(struct platform_device *pdev) | |||
| 390 | struct device_node *nprop, *np; | 415 | struct device_node *nprop, *np; |
| 391 | struct caam_ctrl __iomem *ctrl; | 416 | struct caam_ctrl __iomem *ctrl; |
| 392 | struct caam_drv_private *ctrlpriv; | 417 | struct caam_drv_private *ctrlpriv; |
| 418 | struct clk *clk; | ||
| 393 | #ifdef CONFIG_DEBUG_FS | 419 | #ifdef CONFIG_DEBUG_FS |
| 394 | struct caam_perfmon *perfmon; | 420 | struct caam_perfmon *perfmon; |
| 395 | #endif | 421 | #endif |
| @@ -398,8 +424,7 @@ static int caam_probe(struct platform_device *pdev) | |||
| 398 | int pg_size; | 424 | int pg_size; |
| 399 | int BLOCK_OFFSET = 0; | 425 | int BLOCK_OFFSET = 0; |
| 400 | 426 | ||
| 401 | ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private), | 427 | ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL); |
| 402 | GFP_KERNEL); | ||
| 403 | if (!ctrlpriv) | 428 | if (!ctrlpriv) |
| 404 | return -ENOMEM; | 429 | return -ENOMEM; |
| 405 | 430 | ||
| @@ -408,12 +433,76 @@ static int caam_probe(struct platform_device *pdev) | |||
| 408 | ctrlpriv->pdev = pdev; | 433 | ctrlpriv->pdev = pdev; |
| 409 | nprop = pdev->dev.of_node; | 434 | nprop = pdev->dev.of_node; |
| 410 | 435 | ||
| 436 | /* Enable clocking */ | ||
| 437 | clk = caam_drv_identify_clk(&pdev->dev, "ipg"); | ||
| 438 | if (IS_ERR(clk)) { | ||
| 439 | ret = PTR_ERR(clk); | ||
| 440 | dev_err(&pdev->dev, | ||
| 441 | "can't identify CAAM ipg clk: %d\n", ret); | ||
| 442 | return ret; | ||
| 443 | } | ||
| 444 | ctrlpriv->caam_ipg = clk; | ||
| 445 | |||
| 446 | clk = caam_drv_identify_clk(&pdev->dev, "mem"); | ||
| 447 | if (IS_ERR(clk)) { | ||
| 448 | ret = PTR_ERR(clk); | ||
| 449 | dev_err(&pdev->dev, | ||
| 450 | "can't identify CAAM mem clk: %d\n", ret); | ||
| 451 | return ret; | ||
| 452 | } | ||
| 453 | ctrlpriv->caam_mem = clk; | ||
| 454 | |||
| 455 | clk = caam_drv_identify_clk(&pdev->dev, "aclk"); | ||
| 456 | if (IS_ERR(clk)) { | ||
| 457 | ret = PTR_ERR(clk); | ||
| 458 | dev_err(&pdev->dev, | ||
| 459 | "can't identify CAAM aclk clk: %d\n", ret); | ||
| 460 | return ret; | ||
| 461 | } | ||
| 462 | ctrlpriv->caam_aclk = clk; | ||
| 463 | |||
| 464 | clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); | ||
| 465 | if (IS_ERR(clk)) { | ||
| 466 | ret = PTR_ERR(clk); | ||
| 467 | dev_err(&pdev->dev, | ||
| 468 | "can't identify CAAM emi_slow clk: %d\n", ret); | ||
| 469 | return ret; | ||
| 470 | } | ||
| 471 | ctrlpriv->caam_emi_slow = clk; | ||
| 472 | |||
| 473 | ret = clk_prepare_enable(ctrlpriv->caam_ipg); | ||
| 474 | if (ret < 0) { | ||
| 475 | dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret); | ||
| 476 | return ret; | ||
| 477 | } | ||
| 478 | |||
| 479 | ret = clk_prepare_enable(ctrlpriv->caam_mem); | ||
| 480 | if (ret < 0) { | ||
| 481 | dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n", | ||
| 482 | ret); | ||
| 483 | goto disable_caam_ipg; | ||
| 484 | } | ||
| 485 | |||
| 486 | ret = clk_prepare_enable(ctrlpriv->caam_aclk); | ||
| 487 | if (ret < 0) { | ||
| 488 | dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret); | ||
| 489 | goto disable_caam_mem; | ||
| 490 | } | ||
| 491 | |||
| 492 | ret = clk_prepare_enable(ctrlpriv->caam_emi_slow); | ||
| 493 | if (ret < 0) { | ||
| 494 | dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n", | ||
| 495 | ret); | ||
| 496 | goto disable_caam_aclk; | ||
| 497 | } | ||
| 498 | |||
| 411 | /* Get configuration properties from device tree */ | 499 | /* Get configuration properties from device tree */ |
| 412 | /* First, get register page */ | 500 | /* First, get register page */ |
| 413 | ctrl = of_iomap(nprop, 0); | 501 | ctrl = of_iomap(nprop, 0); |
| 414 | if (ctrl == NULL) { | 502 | if (ctrl == NULL) { |
| 415 | dev_err(dev, "caam: of_iomap() failed\n"); | 503 | dev_err(dev, "caam: of_iomap() failed\n"); |
| 416 | return -ENOMEM; | 504 | ret = -ENOMEM; |
| 505 | goto disable_caam_emi_slow; | ||
| 417 | } | 506 | } |
| 418 | /* Finding the page size for using the CTPR_MS register */ | 507 | /* Finding the page size for using the CTPR_MS register */ |
| 419 | comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); | 508 | comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); |
| @@ -444,8 +533,9 @@ static int caam_probe(struct platform_device *pdev) | |||
| 444 | * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, | 533 | * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, |
| 445 | * long pointers in master configuration register | 534 | * long pointers in master configuration register |
| 446 | */ | 535 | */ |
| 447 | setbits32(&ctrl->mcr, MCFGR_WDENABLE | | 536 | clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH | |
| 448 | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); | 537 | MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ? |
| 538 | MCFGR_LONG_PTR : 0)); | ||
| 449 | 539 | ||
| 450 | /* | 540 | /* |
| 451 | * Read the Compile Time paramters and SCFGR to determine | 541 | * Read the Compile Time paramters and SCFGR to determine |
| @@ -492,12 +582,11 @@ static int caam_probe(struct platform_device *pdev) | |||
| 492 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) | 582 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) |
| 493 | rspec++; | 583 | rspec++; |
| 494 | 584 | ||
| 495 | ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev, | 585 | ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec, |
| 496 | sizeof(struct platform_device *) * rspec, | 586 | sizeof(*ctrlpriv->jrpdev), GFP_KERNEL); |
| 497 | GFP_KERNEL); | ||
| 498 | if (ctrlpriv->jrpdev == NULL) { | 587 | if (ctrlpriv->jrpdev == NULL) { |
| 499 | iounmap(ctrl); | 588 | ret = -ENOMEM; |
| 500 | return -ENOMEM; | 589 | goto iounmap_ctrl; |
| 501 | } | 590 | } |
| 502 | 591 | ||
| 503 | ring = 0; | 592 | ring = 0; |
| @@ -537,8 +626,8 @@ static int caam_probe(struct platform_device *pdev) | |||
| 537 | /* If no QI and no rings specified, quit and go home */ | 626 | /* If no QI and no rings specified, quit and go home */ |
| 538 | if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { | 627 | if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { |
| 539 | dev_err(dev, "no queues configured, terminating\n"); | 628 | dev_err(dev, "no queues configured, terminating\n"); |
| 540 | caam_remove(pdev); | 629 | ret = -ENOMEM; |
| 541 | return -ENOMEM; | 630 | goto caam_remove; |
| 542 | } | 631 | } |
| 543 | 632 | ||
| 544 | cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls); | 633 | cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls); |
| @@ -595,8 +684,7 @@ static int caam_probe(struct platform_device *pdev) | |||
| 595 | } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); | 684 | } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); |
| 596 | if (ret) { | 685 | if (ret) { |
| 597 | dev_err(dev, "failed to instantiate RNG"); | 686 | dev_err(dev, "failed to instantiate RNG"); |
| 598 | caam_remove(pdev); | 687 | goto caam_remove; |
| 599 | return ret; | ||
| 600 | } | 688 | } |
| 601 | /* | 689 | /* |
| 602 | * Set handles init'ed by this module as the complement of the | 690 | * Set handles init'ed by this module as the complement of the |
| @@ -700,6 +788,20 @@ static int caam_probe(struct platform_device *pdev) | |||
| 700 | &ctrlpriv->ctl_tdsk_wrap); | 788 | &ctrlpriv->ctl_tdsk_wrap); |
| 701 | #endif | 789 | #endif |
| 702 | return 0; | 790 | return 0; |
| 791 | |||
| 792 | caam_remove: | ||
| 793 | caam_remove(pdev); | ||
| 794 | iounmap_ctrl: | ||
| 795 | iounmap(ctrl); | ||
| 796 | disable_caam_emi_slow: | ||
| 797 | clk_disable_unprepare(ctrlpriv->caam_emi_slow); | ||
| 798 | disable_caam_aclk: | ||
| 799 | clk_disable_unprepare(ctrlpriv->caam_aclk); | ||
| 800 | disable_caam_mem: | ||
| 801 | clk_disable_unprepare(ctrlpriv->caam_mem); | ||
| 802 | disable_caam_ipg: | ||
| 803 | clk_disable_unprepare(ctrlpriv->caam_ipg); | ||
| 804 | return ret; | ||
| 703 | } | 805 | } |
| 704 | 806 | ||
| 705 | static struct of_device_id caam_match[] = { | 807 | static struct of_device_id caam_match[] = { |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index d397ff9d56fd..983d663ef671 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
| @@ -8,12 +8,29 @@ | |||
| 8 | #ifndef DESC_H | 8 | #ifndef DESC_H |
| 9 | #define DESC_H | 9 | #define DESC_H |
| 10 | 10 | ||
| 11 | /* | ||
| 12 | * 16-byte hardware scatter/gather table | ||
| 13 | * An 8-byte table exists in the hardware spec, but has never been | ||
| 14 | * implemented to date. The 8/16 option is selected at RTL-compile-time. | ||
| 15 | * and this selection is visible in the Compile Time Parameters Register | ||
| 16 | */ | ||
| 17 | |||
| 18 | #define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */ | ||
| 19 | #define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */ | ||
| 20 | #define SEC4_SG_BPID_MASK 0x000000ff | ||
| 21 | #define SEC4_SG_BPID_SHIFT 16 | ||
| 22 | #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */ | ||
| 23 | #define SEC4_SG_OFFS_MASK 0x00001fff | ||
| 24 | |||
| 11 | struct sec4_sg_entry { | 25 | struct sec4_sg_entry { |
| 26 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX | ||
| 27 | u32 rsvd1; | ||
| 28 | dma_addr_t ptr; | ||
| 29 | #else | ||
| 12 | u64 ptr; | 30 | u64 ptr; |
| 13 | #define SEC4_SG_LEN_FIN 0x40000000 | 31 | #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */ |
| 14 | #define SEC4_SG_LEN_EXT 0x80000000 | ||
| 15 | u32 len; | 32 | u32 len; |
| 16 | u8 reserved; | 33 | u8 rsvd2; |
| 17 | u8 buf_pool_id; | 34 | u8 buf_pool_id; |
| 18 | u16 offset; | 35 | u16 offset; |
| 19 | }; | 36 | }; |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 9f79fd7bd4d7..98d07de24fc4 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
| @@ -367,7 +367,7 @@ do { \ | |||
| 367 | if (upper) \ | 367 | if (upper) \ |
| 368 | append_u64(desc, data); \ | 368 | append_u64(desc, data); \ |
| 369 | else \ | 369 | else \ |
| 370 | append_u32(desc, data); \ | 370 | append_u32(desc, lower_32_bits(data)); \ |
| 371 | } while (0) | 371 | } while (0) |
| 372 | 372 | ||
| 373 | #define append_math_add_imm_u64(desc, dest, src0, src1, data) \ | 373 | #define append_math_add_imm_u64(desc, dest, src0, src1, data) \ |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 89b94cc9e7a2..e2bcacc1a921 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
| @@ -91,6 +91,11 @@ struct caam_drv_private { | |||
| 91 | Handles of the RNG4 block are initialized | 91 | Handles of the RNG4 block are initialized |
| 92 | by this driver */ | 92 | by this driver */ |
| 93 | 93 | ||
| 94 | struct clk *caam_ipg; | ||
| 95 | struct clk *caam_mem; | ||
| 96 | struct clk *caam_aclk; | ||
| 97 | struct clk *caam_emi_slow; | ||
| 98 | |||
| 94 | /* | 99 | /* |
| 95 | * debugfs entries for developer view into driver/device | 100 | * debugfs entries for developer view into driver/device |
| 96 | * variables at runtime. | 101 | * variables at runtime. |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index b8b5d47acd7a..f7e0d8d4c3da 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
| @@ -202,6 +202,13 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
| 202 | userdesc = jrp->entinfo[sw_idx].desc_addr_virt; | 202 | userdesc = jrp->entinfo[sw_idx].desc_addr_virt; |
| 203 | userstatus = jrp->outring[hw_idx].jrstatus; | 203 | userstatus = jrp->outring[hw_idx].jrstatus; |
| 204 | 204 | ||
| 205 | /* | ||
| 206 | * Make sure all information from the job has been obtained | ||
| 207 | * before telling CAAM that the job has been removed from the | ||
| 208 | * output ring. | ||
| 209 | */ | ||
| 210 | mb(); | ||
| 211 | |||
| 205 | /* set done */ | 212 | /* set done */ |
| 206 | wr_reg32(&jrp->rregs->outring_rmvd, 1); | 213 | wr_reg32(&jrp->rregs->outring_rmvd, 1); |
| 207 | 214 | ||
| @@ -351,12 +358,23 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
| 351 | 358 | ||
| 352 | jrp->inpring[jrp->inp_ring_write_index] = desc_dma; | 359 | jrp->inpring[jrp->inp_ring_write_index] = desc_dma; |
| 353 | 360 | ||
| 361 | /* | ||
| 362 | * Guarantee that the descriptor's DMA address has been written to | ||
| 363 | * the next slot in the ring before the write index is updated, since | ||
| 364 | * other cores may update this index independently. | ||
| 365 | */ | ||
| 354 | smp_wmb(); | 366 | smp_wmb(); |
| 355 | 367 | ||
| 356 | jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) & | 368 | jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) & |
| 357 | (JOBR_DEPTH - 1); | 369 | (JOBR_DEPTH - 1); |
| 358 | jrp->head = (head + 1) & (JOBR_DEPTH - 1); | 370 | jrp->head = (head + 1) & (JOBR_DEPTH - 1); |
| 359 | 371 | ||
| 372 | /* | ||
| 373 | * Ensure that all job information has been written before | ||
| 374 | * notifying CAAM that a new job was added to the input ring. | ||
| 375 | */ | ||
| 376 | wmb(); | ||
| 377 | |||
| 360 | wr_reg32(&jrp->rregs->inpring_jobadd, 1); | 378 | wr_reg32(&jrp->rregs->inpring_jobadd, 1); |
| 361 | 379 | ||
| 362 | spin_unlock_bh(&jrp->inplock); | 380 | spin_unlock_bh(&jrp->inplock); |
| @@ -392,18 +410,17 @@ static int caam_jr_init(struct device *dev) | |||
| 392 | goto out_free_irq; | 410 | goto out_free_irq; |
| 393 | 411 | ||
| 394 | error = -ENOMEM; | 412 | error = -ENOMEM; |
| 395 | jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, | 413 | jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) * |
| 396 | &inpbusaddr, GFP_KERNEL); | 414 | JOBR_DEPTH, &inpbusaddr, GFP_KERNEL); |
| 397 | if (!jrp->inpring) | 415 | if (!jrp->inpring) |
| 398 | goto out_free_irq; | 416 | goto out_free_irq; |
| 399 | 417 | ||
| 400 | jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) * | 418 | jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) * |
| 401 | JOBR_DEPTH, &outbusaddr, GFP_KERNEL); | 419 | JOBR_DEPTH, &outbusaddr, GFP_KERNEL); |
| 402 | if (!jrp->outring) | 420 | if (!jrp->outring) |
| 403 | goto out_free_inpring; | 421 | goto out_free_inpring; |
| 404 | 422 | ||
| 405 | jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, | 423 | jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL); |
| 406 | GFP_KERNEL); | ||
| 407 | if (!jrp->entinfo) | 424 | if (!jrp->entinfo) |
| 408 | goto out_free_outring; | 425 | goto out_free_outring; |
| 409 | 426 | ||
| @@ -461,8 +478,7 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
| 461 | int error; | 478 | int error; |
| 462 | 479 | ||
| 463 | jrdev = &pdev->dev; | 480 | jrdev = &pdev->dev; |
| 464 | jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr), | 481 | jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL); |
| 465 | GFP_KERNEL); | ||
| 466 | if (!jrpriv) | 482 | if (!jrpriv) |
| 467 | return -ENOMEM; | 483 | return -ENOMEM; |
| 468 | 484 | ||
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 672c97489505..a8a79975682f 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
| @@ -65,9 +65,31 @@ | |||
| 65 | * | 65 | * |
| 66 | */ | 66 | */ |
| 67 | 67 | ||
| 68 | #ifdef CONFIG_ARM | ||
| 69 | /* These are common macros for Power, put here for ARM */ | ||
| 70 | #define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr)) | ||
| 71 | #define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr)) | ||
| 72 | |||
| 73 | #define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a) | ||
| 74 | #define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a)) | ||
| 75 | |||
| 76 | #define out_le32(a, v) out_arch(l, le32, a, v) | ||
| 77 | #define in_le32(a) in_arch(l, le32, a) | ||
| 78 | |||
| 79 | #define out_be32(a, v) out_arch(l, be32, a, v) | ||
| 80 | #define in_be32(a) in_arch(l, be32, a) | ||
| 81 | |||
| 82 | #define clrsetbits(type, addr, clear, set) \ | ||
| 83 | out_##type((addr), (in_##type(addr) & ~(clear)) | (set)) | ||
| 84 | |||
| 85 | #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set) | ||
| 86 | #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) | ||
| 87 | #endif | ||
| 88 | |||
| 68 | #ifdef __BIG_ENDIAN | 89 | #ifdef __BIG_ENDIAN |
| 69 | #define wr_reg32(reg, data) out_be32(reg, data) | 90 | #define wr_reg32(reg, data) out_be32(reg, data) |
| 70 | #define rd_reg32(reg) in_be32(reg) | 91 | #define rd_reg32(reg) in_be32(reg) |
| 92 | #define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set) | ||
| 71 | #ifdef CONFIG_64BIT | 93 | #ifdef CONFIG_64BIT |
| 72 | #define wr_reg64(reg, data) out_be64(reg, data) | 94 | #define wr_reg64(reg, data) out_be64(reg, data) |
| 73 | #define rd_reg64(reg) in_be64(reg) | 95 | #define rd_reg64(reg) in_be64(reg) |
| @@ -76,6 +98,7 @@ | |||
| 76 | #ifdef __LITTLE_ENDIAN | 98 | #ifdef __LITTLE_ENDIAN |
| 77 | #define wr_reg32(reg, data) __raw_writel(data, reg) | 99 | #define wr_reg32(reg, data) __raw_writel(data, reg) |
| 78 | #define rd_reg32(reg) __raw_readl(reg) | 100 | #define rd_reg32(reg) __raw_readl(reg) |
| 101 | #define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set) | ||
| 79 | #ifdef CONFIG_64BIT | 102 | #ifdef CONFIG_64BIT |
| 80 | #define wr_reg64(reg, data) __raw_writeq(data, reg) | 103 | #define wr_reg64(reg, data) __raw_writeq(data, reg) |
| 81 | #define rd_reg64(reg) __raw_readq(reg) | 104 | #define rd_reg64(reg) __raw_readq(reg) |
| @@ -85,20 +108,31 @@ | |||
| 85 | 108 | ||
| 86 | /* | 109 | /* |
| 87 | * The only users of these wr/rd_reg64 functions is the Job Ring (JR). | 110 | * The only users of these wr/rd_reg64 functions is the Job Ring (JR). |
| 88 | * The DMA address registers in the JR are a pair of 32-bit registers. | 111 | * The DMA address registers in the JR are handled differently depending on |
| 89 | * The layout is: | 112 | * platform: |
| 113 | * | ||
| 114 | * 1. All BE CAAM platforms and i.MX platforms (LE CAAM): | ||
| 90 | * | 115 | * |
| 91 | * base + 0x0000 : most-significant 32 bits | 116 | * base + 0x0000 : most-significant 32 bits |
| 92 | * base + 0x0004 : least-significant 32 bits | 117 | * base + 0x0004 : least-significant 32 bits |
| 93 | * | 118 | * |
| 94 | * The 32-bit version of this core therefore has to write to base + 0x0004 | 119 | * The 32-bit version of this core therefore has to write to base + 0x0004 |
| 95 | * to set the 32-bit wide DMA address. This seems to be independent of the | 120 | * to set the 32-bit wide DMA address. |
| 96 | * endianness of the written/read data. | 121 | * |
| 122 | * 2. All other LE CAAM platforms (LS1021A etc.) | ||
| 123 | * base + 0x0000 : least-significant 32 bits | ||
| 124 | * base + 0x0004 : most-significant 32 bits | ||
| 97 | */ | 125 | */ |
| 98 | 126 | ||
| 99 | #ifndef CONFIG_64BIT | 127 | #ifndef CONFIG_64BIT |
| 128 | #if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \ | ||
| 129 | defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX) | ||
| 100 | #define REG64_MS32(reg) ((u32 __iomem *)(reg)) | 130 | #define REG64_MS32(reg) ((u32 __iomem *)(reg)) |
| 101 | #define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1) | 131 | #define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1) |
| 132 | #else | ||
| 133 | #define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1) | ||
| 134 | #define REG64_LS32(reg) ((u32 __iomem *)(reg)) | ||
| 135 | #endif | ||
| 102 | 136 | ||
| 103 | static inline void wr_reg64(u64 __iomem *reg, u64 data) | 137 | static inline void wr_reg64(u64 __iomem *reg, u64 data) |
| 104 | { | 138 | { |
| @@ -133,18 +167,28 @@ struct jr_outentry { | |||
| 133 | #define CHA_NUM_MS_DECONUM_SHIFT 24 | 167 | #define CHA_NUM_MS_DECONUM_SHIFT 24 |
| 134 | #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) | 168 | #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) |
| 135 | 169 | ||
| 136 | /* CHA Version IDs */ | 170 | /* |
| 171 | * CHA version IDs / instantiation bitfields | ||
| 172 | * Defined for use with the cha_id fields in perfmon, but the same shift/mask | ||
| 173 | * selectors can be used to pull out the number of instantiated blocks within | ||
| 174 | * cha_num fields in perfmon because the locations are the same. | ||
| 175 | */ | ||
| 137 | #define CHA_ID_LS_AES_SHIFT 0 | 176 | #define CHA_ID_LS_AES_SHIFT 0 |
| 138 | #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) | 177 | #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) |
| 178 | #define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT) | ||
| 179 | #define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT) | ||
| 139 | 180 | ||
| 140 | #define CHA_ID_LS_DES_SHIFT 4 | 181 | #define CHA_ID_LS_DES_SHIFT 4 |
| 141 | #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) | 182 | #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) |
| 142 | 183 | ||
| 143 | #define CHA_ID_LS_ARC4_SHIFT 8 | 184 | #define CHA_ID_LS_ARC4_SHIFT 8 |
| 144 | #define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT) | 185 | #define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT) |
| 145 | 186 | ||
| 146 | #define CHA_ID_LS_MD_SHIFT 12 | 187 | #define CHA_ID_LS_MD_SHIFT 12 |
| 147 | #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) | 188 | #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) |
| 189 | #define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT) | ||
| 190 | #define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT) | ||
| 191 | #define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT) | ||
| 148 | 192 | ||
| 149 | #define CHA_ID_LS_RNG_SHIFT 16 | 193 | #define CHA_ID_LS_RNG_SHIFT 16 |
| 150 | #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) | 194 | #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) |
| @@ -395,10 +439,16 @@ struct caam_ctrl { | |||
| 395 | /* AXI read cache control */ | 439 | /* AXI read cache control */ |
| 396 | #define MCFGR_ARCACHE_SHIFT 12 | 440 | #define MCFGR_ARCACHE_SHIFT 12 |
| 397 | #define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT) | 441 | #define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT) |
| 442 | #define MCFGR_ARCACHE_BUFF (0x1 << MCFGR_ARCACHE_SHIFT) | ||
| 443 | #define MCFGR_ARCACHE_CACH (0x2 << MCFGR_ARCACHE_SHIFT) | ||
| 444 | #define MCFGR_ARCACHE_RALL (0x4 << MCFGR_ARCACHE_SHIFT) | ||
| 398 | 445 | ||
| 399 | /* AXI write cache control */ | 446 | /* AXI write cache control */ |
| 400 | #define MCFGR_AWCACHE_SHIFT 8 | 447 | #define MCFGR_AWCACHE_SHIFT 8 |
| 401 | #define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT) | 448 | #define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT) |
| 449 | #define MCFGR_AWCACHE_BUFF (0x1 << MCFGR_AWCACHE_SHIFT) | ||
| 450 | #define MCFGR_AWCACHE_CACH (0x2 << MCFGR_AWCACHE_SHIFT) | ||
| 451 | #define MCFGR_AWCACHE_WALL (0x8 << MCFGR_AWCACHE_SHIFT) | ||
| 402 | 452 | ||
| 403 | /* AXI pipeline depth */ | 453 | /* AXI pipeline depth */ |
| 404 | #define MCFGR_AXIPIPE_SHIFT 4 | 454 | #define MCFGR_AXIPIPE_SHIFT 4 |
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index b68b74cc7b77..18cd6d1f5870 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
| @@ -15,7 +15,6 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, | |||
| 15 | { | 15 | { |
| 16 | sec4_sg_ptr->ptr = dma; | 16 | sec4_sg_ptr->ptr = dma; |
| 17 | sec4_sg_ptr->len = len; | 17 | sec4_sg_ptr->len = len; |
| 18 | sec4_sg_ptr->reserved = 0; | ||
| 19 | sec4_sg_ptr->buf_pool_id = 0; | 18 | sec4_sg_ptr->buf_pool_id = 0; |
| 20 | sec4_sg_ptr->offset = offset; | 19 | sec4_sg_ptr->offset = offset; |
| 21 | #ifdef DEBUG | 20 | #ifdef DEBUG |
| @@ -106,9 +105,15 @@ static inline void dma_unmap_sg_chained( | |||
| 106 | { | 105 | { |
| 107 | if (unlikely(chained)) { | 106 | if (unlikely(chained)) { |
| 108 | int i; | 107 | int i; |
| 108 | struct scatterlist *tsg = sg; | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Use a local copy of the sg pointer to avoid moving the | ||
| 112 | * head of the list pointed to by sg as we walk the list. | ||
| 113 | */ | ||
| 109 | for (i = 0; i < nents; i++) { | 114 | for (i = 0; i < nents; i++) { |
| 110 | dma_unmap_sg(dev, sg, 1, dir); | 115 | dma_unmap_sg(dev, tsg, 1, dir); |
| 111 | sg = sg_next(sg); | 116 | tsg = sg_next(tsg); |
| 112 | } | 117 | } |
| 113 | } else if (nents) { | 118 | } else if (nents) { |
| 114 | dma_unmap_sg(dev, sg, nents, dir); | 119 | dma_unmap_sg(dev, sg, nents, dir); |
| @@ -119,19 +124,23 @@ static inline int dma_map_sg_chained( | |||
| 119 | struct device *dev, struct scatterlist *sg, unsigned int nents, | 124 | struct device *dev, struct scatterlist *sg, unsigned int nents, |
| 120 | enum dma_data_direction dir, bool chained) | 125 | enum dma_data_direction dir, bool chained) |
| 121 | { | 126 | { |
| 122 | struct scatterlist *first = sg; | ||
| 123 | |||
| 124 | if (unlikely(chained)) { | 127 | if (unlikely(chained)) { |
| 125 | int i; | 128 | int i; |
| 129 | struct scatterlist *tsg = sg; | ||
| 130 | |||
| 131 | /* | ||
| 132 | * Use a local copy of the sg pointer to avoid moving the | ||
| 133 | * head of the list pointed to by sg as we walk the list. | ||
| 134 | */ | ||
| 126 | for (i = 0; i < nents; i++) { | 135 | for (i = 0; i < nents; i++) { |
| 127 | if (!dma_map_sg(dev, sg, 1, dir)) { | 136 | if (!dma_map_sg(dev, tsg, 1, dir)) { |
| 128 | dma_unmap_sg_chained(dev, first, i, dir, | 137 | dma_unmap_sg_chained(dev, sg, i, dir, |
| 129 | chained); | 138 | chained); |
| 130 | nents = 0; | 139 | nents = 0; |
| 131 | break; | 140 | break; |
| 132 | } | 141 | } |
| 133 | 142 | ||
| 134 | sg = sg_next(sg); | 143 | tsg = sg_next(tsg); |
| 135 | } | 144 | } |
| 136 | } else | 145 | } else |
| 137 | nents = dma_map_sg(dev, sg, nents, dir); | 146 | nents = dma_map_sg(dev, sg, nents, dir); |
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index f2e6de361fd1..bb241c3ab6b9 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c | |||
| @@ -216,6 +216,7 @@ static const struct acpi_device_id ccp_acpi_match[] = { | |||
| 216 | { "AMDI0C00", 0 }, | 216 | { "AMDI0C00", 0 }, |
| 217 | { }, | 217 | { }, |
| 218 | }; | 218 | }; |
| 219 | MODULE_DEVICE_TABLE(acpi, ccp_acpi_match); | ||
| 219 | #endif | 220 | #endif |
| 220 | 221 | ||
| 221 | #ifdef CONFIG_OF | 222 | #ifdef CONFIG_OF |
| @@ -223,6 +224,7 @@ static const struct of_device_id ccp_of_match[] = { | |||
| 223 | { .compatible = "amd,ccp-seattle-v1a" }, | 224 | { .compatible = "amd,ccp-seattle-v1a" }, |
| 224 | { }, | 225 | { }, |
| 225 | }; | 226 | }; |
| 227 | MODULE_DEVICE_TABLE(of, ccp_of_match); | ||
| 226 | #endif | 228 | #endif |
| 227 | 229 | ||
| 228 | static struct platform_driver ccp_platform_driver = { | 230 | static struct platform_driver ccp_platform_driver = { |
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index ad47d0d61098..68e8aa90fe01 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c | |||
| @@ -334,7 +334,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) | |||
| 334 | 334 | ||
| 335 | hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx"); | 335 | hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx"); |
| 336 | if (!hdev->dma_lch) { | 336 | if (!hdev->dma_lch) { |
| 337 | dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n"); | 337 | dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n"); |
| 338 | return -EBUSY; | 338 | return -EBUSY; |
| 339 | } | 339 | } |
| 340 | dma_conf.direction = DMA_MEM_TO_DEV; | 340 | dma_conf.direction = DMA_MEM_TO_DEV; |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 402631a19a11..8f2790353281 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
| @@ -156,7 +156,8 @@ struct ablk_ctx { | |||
| 156 | }; | 156 | }; |
| 157 | 157 | ||
| 158 | struct aead_ctx { | 158 | struct aead_ctx { |
| 159 | struct buffer_desc *buffer; | 159 | struct buffer_desc *src; |
| 160 | struct buffer_desc *dst; | ||
| 160 | struct scatterlist ivlist; | 161 | struct scatterlist ivlist; |
| 161 | /* used when the hmac is not on one sg entry */ | 162 | /* used when the hmac is not on one sg entry */ |
| 162 | u8 *hmac_virt; | 163 | u8 *hmac_virt; |
| @@ -198,6 +199,15 @@ struct ixp_alg { | |||
| 198 | int registered; | 199 | int registered; |
| 199 | }; | 200 | }; |
| 200 | 201 | ||
| 202 | struct ixp_aead_alg { | ||
| 203 | struct aead_alg crypto; | ||
| 204 | const struct ix_hash_algo *hash; | ||
| 205 | u32 cfg_enc; | ||
| 206 | u32 cfg_dec; | ||
| 207 | |||
| 208 | int registered; | ||
| 209 | }; | ||
| 210 | |||
| 201 | static const struct ix_hash_algo hash_alg_md5 = { | 211 | static const struct ix_hash_algo hash_alg_md5 = { |
| 202 | .cfgword = 0xAA010004, | 212 | .cfgword = 0xAA010004, |
| 203 | .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" | 213 | .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" |
| @@ -339,11 +349,11 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt) | |||
| 339 | struct aead_ctx *req_ctx = aead_request_ctx(req); | 349 | struct aead_ctx *req_ctx = aead_request_ctx(req); |
| 340 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 350 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 341 | int authsize = crypto_aead_authsize(tfm); | 351 | int authsize = crypto_aead_authsize(tfm); |
| 342 | int decryptlen = req->cryptlen - authsize; | 352 | int decryptlen = req->assoclen + req->cryptlen - authsize; |
| 343 | 353 | ||
| 344 | if (req_ctx->encrypt) { | 354 | if (req_ctx->encrypt) { |
| 345 | scatterwalk_map_and_copy(req_ctx->hmac_virt, | 355 | scatterwalk_map_and_copy(req_ctx->hmac_virt, |
| 346 | req->src, decryptlen, authsize, 1); | 356 | req->dst, decryptlen, authsize, 1); |
| 347 | } | 357 | } |
| 348 | dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes); | 358 | dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes); |
| 349 | } | 359 | } |
| @@ -364,7 +374,8 @@ static void one_packet(dma_addr_t phys) | |||
| 364 | struct aead_request *req = crypt->data.aead_req; | 374 | struct aead_request *req = crypt->data.aead_req; |
| 365 | struct aead_ctx *req_ctx = aead_request_ctx(req); | 375 | struct aead_ctx *req_ctx = aead_request_ctx(req); |
| 366 | 376 | ||
| 367 | free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); | 377 | free_buf_chain(dev, req_ctx->src, crypt->src_buf); |
| 378 | free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); | ||
| 368 | if (req_ctx->hmac_virt) { | 379 | if (req_ctx->hmac_virt) { |
| 369 | finish_scattered_hmac(crypt); | 380 | finish_scattered_hmac(crypt); |
| 370 | } | 381 | } |
| @@ -573,11 +584,10 @@ static int init_tfm_ablk(struct crypto_tfm *tfm) | |||
| 573 | return init_tfm(tfm); | 584 | return init_tfm(tfm); |
| 574 | } | 585 | } |
| 575 | 586 | ||
| 576 | static int init_tfm_aead(struct crypto_tfm *tfm) | 587 | static int init_tfm_aead(struct crypto_aead *tfm) |
| 577 | { | 588 | { |
| 578 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | 589 | crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx)); |
| 579 | sizeof(struct aead_ctx)); | 590 | return init_tfm(crypto_aead_tfm(tfm)); |
| 580 | return init_tfm(tfm); | ||
| 581 | } | 591 | } |
| 582 | 592 | ||
| 583 | static void exit_tfm(struct crypto_tfm *tfm) | 593 | static void exit_tfm(struct crypto_tfm *tfm) |
| @@ -587,6 +597,11 @@ static void exit_tfm(struct crypto_tfm *tfm) | |||
| 587 | free_sa_dir(&ctx->decrypt); | 597 | free_sa_dir(&ctx->decrypt); |
| 588 | } | 598 | } |
| 589 | 599 | ||
| 600 | static void exit_tfm_aead(struct crypto_aead *tfm) | ||
| 601 | { | ||
| 602 | exit_tfm(crypto_aead_tfm(tfm)); | ||
| 603 | } | ||
| 604 | |||
| 590 | static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, | 605 | static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, |
| 591 | int init_len, u32 ctx_addr, const u8 *key, int key_len) | 606 | int init_len, u32 ctx_addr, const u8 *key, int key_len) |
| 592 | { | 607 | { |
| @@ -969,24 +984,6 @@ static int ablk_rfc3686_crypt(struct ablkcipher_request *req) | |||
| 969 | return ret; | 984 | return ret; |
| 970 | } | 985 | } |
| 971 | 986 | ||
| 972 | static int hmac_inconsistent(struct scatterlist *sg, unsigned start, | ||
| 973 | unsigned int nbytes) | ||
| 974 | { | ||
| 975 | int offset = 0; | ||
| 976 | |||
| 977 | if (!nbytes) | ||
| 978 | return 0; | ||
| 979 | |||
| 980 | for (;;) { | ||
| 981 | if (start < offset + sg->length) | ||
| 982 | break; | ||
| 983 | |||
| 984 | offset += sg->length; | ||
| 985 | sg = sg_next(sg); | ||
| 986 | } | ||
| 987 | return (start + nbytes > offset + sg->length); | ||
| 988 | } | ||
| 989 | |||
| 990 | static int aead_perform(struct aead_request *req, int encrypt, | 987 | static int aead_perform(struct aead_request *req, int encrypt, |
| 991 | int cryptoffset, int eff_cryptlen, u8 *iv) | 988 | int cryptoffset, int eff_cryptlen, u8 *iv) |
| 992 | { | 989 | { |
| @@ -1002,6 +999,8 @@ static int aead_perform(struct aead_request *req, int encrypt, | |||
| 1002 | struct device *dev = &pdev->dev; | 999 | struct device *dev = &pdev->dev; |
| 1003 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | 1000 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
| 1004 | GFP_KERNEL : GFP_ATOMIC; | 1001 | GFP_KERNEL : GFP_ATOMIC; |
| 1002 | enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; | ||
| 1003 | unsigned int lastlen; | ||
| 1005 | 1004 | ||
| 1006 | if (qmgr_stat_full(SEND_QID)) | 1005 | if (qmgr_stat_full(SEND_QID)) |
| 1007 | return -EAGAIN; | 1006 | return -EAGAIN; |
| @@ -1030,35 +1029,55 @@ static int aead_perform(struct aead_request *req, int encrypt, | |||
| 1030 | crypt->crypt_len = eff_cryptlen; | 1029 | crypt->crypt_len = eff_cryptlen; |
| 1031 | 1030 | ||
| 1032 | crypt->auth_offs = 0; | 1031 | crypt->auth_offs = 0; |
| 1033 | crypt->auth_len = req->assoclen + ivsize + cryptlen; | 1032 | crypt->auth_len = req->assoclen + cryptlen; |
| 1034 | BUG_ON(ivsize && !req->iv); | 1033 | BUG_ON(ivsize && !req->iv); |
| 1035 | memcpy(crypt->iv, req->iv, ivsize); | 1034 | memcpy(crypt->iv, req->iv, ivsize); |
| 1036 | 1035 | ||
| 1036 | req_ctx->dst = NULL; | ||
| 1037 | |||
| 1037 | if (req->src != req->dst) { | 1038 | if (req->src != req->dst) { |
| 1038 | BUG(); /* -ENOTSUP because of my laziness */ | 1039 | struct buffer_desc dst_hook; |
| 1040 | |||
| 1041 | crypt->mode |= NPE_OP_NOT_IN_PLACE; | ||
| 1042 | src_direction = DMA_TO_DEVICE; | ||
| 1043 | |||
| 1044 | buf = chainup_buffers(dev, req->dst, crypt->auth_len, | ||
| 1045 | &dst_hook, flags, DMA_FROM_DEVICE); | ||
| 1046 | req_ctx->dst = dst_hook.next; | ||
| 1047 | crypt->dst_buf = dst_hook.phys_next; | ||
| 1048 | |||
| 1049 | if (!buf) | ||
| 1050 | goto free_buf_dst; | ||
| 1051 | |||
| 1052 | if (encrypt) { | ||
| 1053 | lastlen = buf->buf_len; | ||
| 1054 | if (lastlen >= authsize) | ||
| 1055 | crypt->icv_rev_aes = buf->phys_addr + | ||
| 1056 | buf->buf_len - authsize; | ||
| 1057 | } | ||
| 1039 | } | 1058 | } |
| 1040 | 1059 | ||
| 1041 | /* ASSOC data */ | 1060 | buf = chainup_buffers(dev, req->src, crypt->auth_len, |
| 1042 | buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook, | 1061 | &src_hook, flags, src_direction); |
| 1043 | flags, DMA_TO_DEVICE); | 1062 | req_ctx->src = src_hook.next; |
| 1044 | req_ctx->buffer = src_hook.next; | ||
| 1045 | crypt->src_buf = src_hook.phys_next; | 1063 | crypt->src_buf = src_hook.phys_next; |
| 1046 | if (!buf) | 1064 | if (!buf) |
| 1047 | goto out; | 1065 | goto free_buf_src; |
| 1048 | /* IV */ | 1066 | |
| 1049 | sg_init_table(&req_ctx->ivlist, 1); | 1067 | if (!encrypt || !req_ctx->dst) { |
| 1050 | sg_set_buf(&req_ctx->ivlist, iv, ivsize); | 1068 | lastlen = buf->buf_len; |
| 1051 | buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags, | 1069 | if (lastlen >= authsize) |
| 1052 | DMA_BIDIRECTIONAL); | 1070 | crypt->icv_rev_aes = buf->phys_addr + |
| 1053 | if (!buf) | 1071 | buf->buf_len - authsize; |
| 1054 | goto free_chain; | 1072 | } |
| 1055 | if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) { | 1073 | |
| 1074 | if (unlikely(lastlen < authsize)) { | ||
| 1056 | /* The 12 hmac bytes are scattered, | 1075 | /* The 12 hmac bytes are scattered, |
| 1057 | * we need to copy them into a safe buffer */ | 1076 | * we need to copy them into a safe buffer */ |
| 1058 | req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, | 1077 | req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, |
| 1059 | &crypt->icv_rev_aes); | 1078 | &crypt->icv_rev_aes); |
| 1060 | if (unlikely(!req_ctx->hmac_virt)) | 1079 | if (unlikely(!req_ctx->hmac_virt)) |
| 1061 | goto free_chain; | 1080 | goto free_buf_src; |
| 1062 | if (!encrypt) { | 1081 | if (!encrypt) { |
| 1063 | scatterwalk_map_and_copy(req_ctx->hmac_virt, | 1082 | scatterwalk_map_and_copy(req_ctx->hmac_virt, |
| 1064 | req->src, cryptlen, authsize, 0); | 1083 | req->src, cryptlen, authsize, 0); |
| @@ -1067,27 +1086,16 @@ static int aead_perform(struct aead_request *req, int encrypt, | |||
| 1067 | } else { | 1086 | } else { |
| 1068 | req_ctx->hmac_virt = NULL; | 1087 | req_ctx->hmac_virt = NULL; |
| 1069 | } | 1088 | } |
| 1070 | /* Crypt */ | ||
| 1071 | buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags, | ||
| 1072 | DMA_BIDIRECTIONAL); | ||
| 1073 | if (!buf) | ||
| 1074 | goto free_hmac_virt; | ||
| 1075 | if (!req_ctx->hmac_virt) { | ||
| 1076 | crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize; | ||
| 1077 | } | ||
| 1078 | 1089 | ||
| 1079 | crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; | 1090 | crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; |
| 1080 | qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); | 1091 | qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); |
| 1081 | BUG_ON(qmgr_stat_overflow(SEND_QID)); | 1092 | BUG_ON(qmgr_stat_overflow(SEND_QID)); |
| 1082 | return -EINPROGRESS; | 1093 | return -EINPROGRESS; |
| 1083 | free_hmac_virt: | 1094 | |
| 1084 | if (req_ctx->hmac_virt) { | 1095 | free_buf_src: |
| 1085 | dma_pool_free(buffer_pool, req_ctx->hmac_virt, | 1096 | free_buf_chain(dev, req_ctx->src, crypt->src_buf); |
| 1086 | crypt->icv_rev_aes); | 1097 | free_buf_dst: |
| 1087 | } | 1098 | free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); |
| 1088 | free_chain: | ||
| 1089 | free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); | ||
| 1090 | out: | ||
| 1091 | crypt->ctl_flags = CTL_FLAG_UNUSED; | 1099 | crypt->ctl_flags = CTL_FLAG_UNUSED; |
| 1092 | return -ENOMEM; | 1100 | return -ENOMEM; |
| 1093 | } | 1101 | } |
| @@ -1173,40 +1181,12 @@ badkey: | |||
| 1173 | 1181 | ||
| 1174 | static int aead_encrypt(struct aead_request *req) | 1182 | static int aead_encrypt(struct aead_request *req) |
| 1175 | { | 1183 | { |
| 1176 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); | 1184 | return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv); |
| 1177 | return aead_perform(req, 1, req->assoclen + ivsize, | ||
| 1178 | req->cryptlen, req->iv); | ||
| 1179 | } | 1185 | } |
| 1180 | 1186 | ||
| 1181 | static int aead_decrypt(struct aead_request *req) | 1187 | static int aead_decrypt(struct aead_request *req) |
| 1182 | { | 1188 | { |
| 1183 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); | 1189 | return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv); |
| 1184 | return aead_perform(req, 0, req->assoclen + ivsize, | ||
| 1185 | req->cryptlen, req->iv); | ||
| 1186 | } | ||
| 1187 | |||
| 1188 | static int aead_givencrypt(struct aead_givcrypt_request *req) | ||
| 1189 | { | ||
| 1190 | struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); | ||
| 1191 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); | ||
| 1192 | unsigned len, ivsize = crypto_aead_ivsize(tfm); | ||
| 1193 | __be64 seq; | ||
| 1194 | |||
| 1195 | /* copied from eseqiv.c */ | ||
| 1196 | if (!ctx->salted) { | ||
| 1197 | get_random_bytes(ctx->salt, ivsize); | ||
| 1198 | ctx->salted = 1; | ||
| 1199 | } | ||
| 1200 | memcpy(req->areq.iv, ctx->salt, ivsize); | ||
| 1201 | len = ivsize; | ||
| 1202 | if (ivsize > sizeof(u64)) { | ||
| 1203 | memset(req->giv, 0, ivsize - sizeof(u64)); | ||
| 1204 | len = sizeof(u64); | ||
| 1205 | } | ||
| 1206 | seq = cpu_to_be64(req->seq); | ||
| 1207 | memcpy(req->giv + ivsize - len, &seq, len); | ||
| 1208 | return aead_perform(&req->areq, 1, req->areq.assoclen, | ||
| 1209 | req->areq.cryptlen +ivsize, req->giv); | ||
| 1210 | } | 1190 | } |
| 1211 | 1191 | ||
| 1212 | static struct ixp_alg ixp4xx_algos[] = { | 1192 | static struct ixp_alg ixp4xx_algos[] = { |
| @@ -1319,80 +1299,77 @@ static struct ixp_alg ixp4xx_algos[] = { | |||
| 1319 | }, | 1299 | }, |
| 1320 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, | 1300 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, |
| 1321 | .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, | 1301 | .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, |
| 1322 | }, { | 1302 | } }; |
| 1303 | |||
| 1304 | static struct ixp_aead_alg ixp4xx_aeads[] = { | ||
| 1305 | { | ||
| 1323 | .crypto = { | 1306 | .crypto = { |
| 1324 | .cra_name = "authenc(hmac(md5),cbc(des))", | 1307 | .base = { |
| 1325 | .cra_blocksize = DES_BLOCK_SIZE, | 1308 | .cra_name = "authenc(hmac(md5),cbc(des))", |
| 1326 | .cra_u = { .aead = { | 1309 | .cra_blocksize = DES_BLOCK_SIZE, |
| 1327 | .ivsize = DES_BLOCK_SIZE, | 1310 | }, |
| 1328 | .maxauthsize = MD5_DIGEST_SIZE, | 1311 | .ivsize = DES_BLOCK_SIZE, |
| 1329 | } | 1312 | .maxauthsize = MD5_DIGEST_SIZE, |
| 1330 | } | ||
| 1331 | }, | 1313 | }, |
| 1332 | .hash = &hash_alg_md5, | 1314 | .hash = &hash_alg_md5, |
| 1333 | .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, | 1315 | .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, |
| 1334 | .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, | 1316 | .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, |
| 1335 | }, { | 1317 | }, { |
| 1336 | .crypto = { | 1318 | .crypto = { |
| 1337 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", | 1319 | .base = { |
| 1338 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1320 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", |
| 1339 | .cra_u = { .aead = { | 1321 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 1340 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1322 | }, |
| 1341 | .maxauthsize = MD5_DIGEST_SIZE, | 1323 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 1342 | } | 1324 | .maxauthsize = MD5_DIGEST_SIZE, |
| 1343 | } | ||
| 1344 | }, | 1325 | }, |
| 1345 | .hash = &hash_alg_md5, | 1326 | .hash = &hash_alg_md5, |
| 1346 | .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, | 1327 | .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, |
| 1347 | .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, | 1328 | .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, |
| 1348 | }, { | 1329 | }, { |
| 1349 | .crypto = { | 1330 | .crypto = { |
| 1350 | .cra_name = "authenc(hmac(sha1),cbc(des))", | 1331 | .base = { |
| 1351 | .cra_blocksize = DES_BLOCK_SIZE, | 1332 | .cra_name = "authenc(hmac(sha1),cbc(des))", |
| 1352 | .cra_u = { .aead = { | 1333 | .cra_blocksize = DES_BLOCK_SIZE, |
| 1334 | }, | ||
| 1353 | .ivsize = DES_BLOCK_SIZE, | 1335 | .ivsize = DES_BLOCK_SIZE, |
| 1354 | .maxauthsize = SHA1_DIGEST_SIZE, | 1336 | .maxauthsize = SHA1_DIGEST_SIZE, |
| 1355 | } | ||
| 1356 | } | ||
| 1357 | }, | 1337 | }, |
| 1358 | .hash = &hash_alg_sha1, | 1338 | .hash = &hash_alg_sha1, |
| 1359 | .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, | 1339 | .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, |
| 1360 | .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, | 1340 | .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, |
| 1361 | }, { | 1341 | }, { |
| 1362 | .crypto = { | 1342 | .crypto = { |
| 1363 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", | 1343 | .base = { |
| 1364 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1344 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", |
| 1365 | .cra_u = { .aead = { | 1345 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 1366 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1346 | }, |
| 1367 | .maxauthsize = SHA1_DIGEST_SIZE, | 1347 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 1368 | } | 1348 | .maxauthsize = SHA1_DIGEST_SIZE, |
| 1369 | } | ||
| 1370 | }, | 1349 | }, |
| 1371 | .hash = &hash_alg_sha1, | 1350 | .hash = &hash_alg_sha1, |
| 1372 | .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, | 1351 | .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, |
| 1373 | .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, | 1352 | .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, |
| 1374 | }, { | 1353 | }, { |
| 1375 | .crypto = { | 1354 | .crypto = { |
| 1376 | .cra_name = "authenc(hmac(md5),cbc(aes))", | 1355 | .base = { |
| 1377 | .cra_blocksize = AES_BLOCK_SIZE, | 1356 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
| 1378 | .cra_u = { .aead = { | 1357 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1379 | .ivsize = AES_BLOCK_SIZE, | 1358 | }, |
| 1380 | .maxauthsize = MD5_DIGEST_SIZE, | 1359 | .ivsize = AES_BLOCK_SIZE, |
| 1381 | } | 1360 | .maxauthsize = MD5_DIGEST_SIZE, |
| 1382 | } | ||
| 1383 | }, | 1361 | }, |
| 1384 | .hash = &hash_alg_md5, | 1362 | .hash = &hash_alg_md5, |
| 1385 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, | 1363 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, |
| 1386 | .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, | 1364 | .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, |
| 1387 | }, { | 1365 | }, { |
| 1388 | .crypto = { | 1366 | .crypto = { |
| 1389 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 1367 | .base = { |
| 1390 | .cra_blocksize = AES_BLOCK_SIZE, | 1368 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
| 1391 | .cra_u = { .aead = { | 1369 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1392 | .ivsize = AES_BLOCK_SIZE, | 1370 | }, |
| 1393 | .maxauthsize = SHA1_DIGEST_SIZE, | 1371 | .ivsize = AES_BLOCK_SIZE, |
| 1394 | } | 1372 | .maxauthsize = SHA1_DIGEST_SIZE, |
| 1395 | } | ||
| 1396 | }, | 1373 | }, |
| 1397 | .hash = &hash_alg_sha1, | 1374 | .hash = &hash_alg_sha1, |
| 1398 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, | 1375 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, |
| @@ -1436,32 +1413,20 @@ static int __init ixp_module_init(void) | |||
| 1436 | if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) { | 1413 | if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) { |
| 1437 | continue; | 1414 | continue; |
| 1438 | } | 1415 | } |
| 1439 | if (!ixp4xx_algos[i].hash) { | 1416 | |
| 1440 | /* block ciphers */ | 1417 | /* block ciphers */ |
| 1441 | cra->cra_type = &crypto_ablkcipher_type; | 1418 | cra->cra_type = &crypto_ablkcipher_type; |
| 1442 | cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 1419 | cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 1443 | CRYPTO_ALG_KERN_DRIVER_ONLY | | 1420 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 1444 | CRYPTO_ALG_ASYNC; | 1421 | CRYPTO_ALG_ASYNC; |
| 1445 | if (!cra->cra_ablkcipher.setkey) | 1422 | if (!cra->cra_ablkcipher.setkey) |
| 1446 | cra->cra_ablkcipher.setkey = ablk_setkey; | 1423 | cra->cra_ablkcipher.setkey = ablk_setkey; |
| 1447 | if (!cra->cra_ablkcipher.encrypt) | 1424 | if (!cra->cra_ablkcipher.encrypt) |
| 1448 | cra->cra_ablkcipher.encrypt = ablk_encrypt; | 1425 | cra->cra_ablkcipher.encrypt = ablk_encrypt; |
| 1449 | if (!cra->cra_ablkcipher.decrypt) | 1426 | if (!cra->cra_ablkcipher.decrypt) |
| 1450 | cra->cra_ablkcipher.decrypt = ablk_decrypt; | 1427 | cra->cra_ablkcipher.decrypt = ablk_decrypt; |
| 1451 | cra->cra_init = init_tfm_ablk; | 1428 | cra->cra_init = init_tfm_ablk; |
| 1452 | } else { | 1429 | |
| 1453 | /* authenc */ | ||
| 1454 | cra->cra_type = &crypto_aead_type; | ||
| 1455 | cra->cra_flags = CRYPTO_ALG_TYPE_AEAD | | ||
| 1456 | CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
| 1457 | CRYPTO_ALG_ASYNC; | ||
| 1458 | cra->cra_aead.setkey = aead_setkey; | ||
| 1459 | cra->cra_aead.setauthsize = aead_setauthsize; | ||
| 1460 | cra->cra_aead.encrypt = aead_encrypt; | ||
| 1461 | cra->cra_aead.decrypt = aead_decrypt; | ||
| 1462 | cra->cra_aead.givencrypt = aead_givencrypt; | ||
| 1463 | cra->cra_init = init_tfm_aead; | ||
| 1464 | } | ||
| 1465 | cra->cra_ctxsize = sizeof(struct ixp_ctx); | 1430 | cra->cra_ctxsize = sizeof(struct ixp_ctx); |
| 1466 | cra->cra_module = THIS_MODULE; | 1431 | cra->cra_module = THIS_MODULE; |
| 1467 | cra->cra_alignmask = 3; | 1432 | cra->cra_alignmask = 3; |
| @@ -1473,6 +1438,38 @@ static int __init ixp_module_init(void) | |||
| 1473 | else | 1438 | else |
| 1474 | ixp4xx_algos[i].registered = 1; | 1439 | ixp4xx_algos[i].registered = 1; |
| 1475 | } | 1440 | } |
| 1441 | |||
| 1442 | for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) { | ||
| 1443 | struct aead_alg *cra = &ixp4xx_aeads[i].crypto; | ||
| 1444 | |||
| 1445 | if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, | ||
| 1446 | "%s"IXP_POSTFIX, cra->base.cra_name) >= | ||
| 1447 | CRYPTO_MAX_ALG_NAME) | ||
| 1448 | continue; | ||
| 1449 | if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) | ||
| 1450 | continue; | ||
| 1451 | |||
| 1452 | /* authenc */ | ||
| 1453 | cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | | ||
| 1454 | CRYPTO_ALG_ASYNC; | ||
| 1455 | cra->setkey = aead_setkey; | ||
| 1456 | cra->setauthsize = aead_setauthsize; | ||
| 1457 | cra->encrypt = aead_encrypt; | ||
| 1458 | cra->decrypt = aead_decrypt; | ||
| 1459 | cra->init = init_tfm_aead; | ||
| 1460 | cra->exit = exit_tfm_aead; | ||
| 1461 | |||
| 1462 | cra->base.cra_ctxsize = sizeof(struct ixp_ctx); | ||
| 1463 | cra->base.cra_module = THIS_MODULE; | ||
| 1464 | cra->base.cra_alignmask = 3; | ||
| 1465 | cra->base.cra_priority = 300; | ||
| 1466 | |||
| 1467 | if (crypto_register_aead(cra)) | ||
| 1468 | printk(KERN_ERR "Failed to register '%s'\n", | ||
| 1469 | cra->base.cra_driver_name); | ||
| 1470 | else | ||
| 1471 | ixp4xx_aeads[i].registered = 1; | ||
| 1472 | } | ||
| 1476 | return 0; | 1473 | return 0; |
| 1477 | } | 1474 | } |
| 1478 | 1475 | ||
| @@ -1481,6 +1478,11 @@ static void __exit ixp_module_exit(void) | |||
| 1481 | int num = ARRAY_SIZE(ixp4xx_algos); | 1478 | int num = ARRAY_SIZE(ixp4xx_algos); |
| 1482 | int i; | 1479 | int i; |
| 1483 | 1480 | ||
| 1481 | for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) { | ||
| 1482 | if (ixp4xx_aeads[i].registered) | ||
| 1483 | crypto_unregister_aead(&ixp4xx_aeads[i].crypto); | ||
| 1484 | } | ||
| 1485 | |||
| 1484 | for (i=0; i< num; i++) { | 1486 | for (i=0; i< num; i++) { |
| 1485 | if (ixp4xx_algos[i].registered) | 1487 | if (ixp4xx_algos[i].registered) |
| 1486 | crypto_unregister_alg(&ixp4xx_algos[i].crypto); | 1488 | crypto_unregister_alg(&ixp4xx_algos[i].crypto); |
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 1c6f98dd88f4..0643e3366e33 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c | |||
| @@ -533,7 +533,6 @@ static struct platform_driver marvell_cesa = { | |||
| 533 | .probe = mv_cesa_probe, | 533 | .probe = mv_cesa_probe, |
| 534 | .remove = mv_cesa_remove, | 534 | .remove = mv_cesa_remove, |
| 535 | .driver = { | 535 | .driver = { |
| 536 | .owner = THIS_MODULE, | ||
| 537 | .name = "marvell-cesa", | 536 | .name = "marvell-cesa", |
| 538 | .of_match_table = mv_cesa_of_match_table, | 537 | .of_match_table = mv_cesa_of_match_table, |
| 539 | }, | 538 | }, |
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig index e421c96c763a..ad7552a6998c 100644 --- a/drivers/crypto/nx/Kconfig +++ b/drivers/crypto/nx/Kconfig | |||
| @@ -14,11 +14,14 @@ config CRYPTO_DEV_NX_ENCRYPT | |||
| 14 | config CRYPTO_DEV_NX_COMPRESS | 14 | config CRYPTO_DEV_NX_COMPRESS |
| 15 | tristate "Compression acceleration support" | 15 | tristate "Compression acceleration support" |
| 16 | default y | 16 | default y |
| 17 | select CRYPTO_ALGAPI | ||
| 18 | select 842_DECOMPRESS | ||
| 17 | help | 19 | help |
| 18 | Support for PowerPC Nest (NX) compression acceleration. This | 20 | Support for PowerPC Nest (NX) compression acceleration. This |
| 19 | module supports acceleration for compressing memory with the 842 | 21 | module supports acceleration for compressing memory with the 842 |
| 20 | algorithm. One of the platform drivers must be selected also. | 22 | algorithm using the cryptographic API. One of the platform |
| 21 | If you choose 'M' here, this module will be called nx_compress. | 23 | drivers must be selected also. If you choose 'M' here, this |
| 24 | module will be called nx_compress. | ||
| 22 | 25 | ||
| 23 | if CRYPTO_DEV_NX_COMPRESS | 26 | if CRYPTO_DEV_NX_COMPRESS |
| 24 | 27 | ||
| @@ -42,14 +45,4 @@ config CRYPTO_DEV_NX_COMPRESS_POWERNV | |||
| 42 | algorithm. This supports NX hardware on the PowerNV platform. | 45 | algorithm. This supports NX hardware on the PowerNV platform. |
| 43 | If you choose 'M' here, this module will be called nx_compress_powernv. | 46 | If you choose 'M' here, this module will be called nx_compress_powernv. |
| 44 | 47 | ||
| 45 | config CRYPTO_DEV_NX_COMPRESS_CRYPTO | ||
| 46 | tristate "Compression acceleration cryptographic interface" | ||
| 47 | select CRYPTO_ALGAPI | ||
| 48 | select 842_DECOMPRESS | ||
| 49 | default y | ||
| 50 | help | ||
| 51 | Support for PowerPC Nest (NX) accelerators using the cryptographic | ||
| 52 | API. If you choose 'M' here, this module will be called | ||
| 53 | nx_compress_crypto. | ||
| 54 | |||
| 55 | endif | 48 | endif |
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index e1684f5adb11..b727821c8ed4 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile | |||
| @@ -10,12 +10,8 @@ nx-crypto-objs := nx.o \ | |||
| 10 | nx-sha256.o \ | 10 | nx-sha256.o \ |
| 11 | nx-sha512.o | 11 | nx-sha512.o |
| 12 | 12 | ||
| 13 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o | 13 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o |
| 14 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o | 14 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o |
| 15 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o | ||
| 16 | obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o | ||
| 17 | nx-compress-objs := nx-842.o | 15 | nx-compress-objs := nx-842.o |
| 18 | nx-compress-platform-objs := nx-842-platform.o | ||
| 19 | nx-compress-pseries-objs := nx-842-pseries.o | 16 | nx-compress-pseries-objs := nx-842-pseries.o |
| 20 | nx-compress-powernv-objs := nx-842-powernv.o | 17 | nx-compress-powernv-objs := nx-842-powernv.o |
| 21 | nx-compress-crypto-objs := nx-842-crypto.o | ||
diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c deleted file mode 100644 index d53a1dcd7b4e..000000000000 --- a/drivers/crypto/nx/nx-842-crypto.c +++ /dev/null | |||
| @@ -1,580 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Cryptographic API for the NX-842 hardware compression. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * Copyright (C) IBM Corporation, 2011-2015 | ||
| 15 | * | ||
| 16 | * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com> | ||
| 17 | * Seth Jennings <sjenning@linux.vnet.ibm.com> | ||
| 18 | * | ||
| 19 | * Rewrite: Dan Streetman <ddstreet@ieee.org> | ||
| 20 | * | ||
| 21 | * This is an interface to the NX-842 compression hardware in PowerPC | ||
| 22 | * processors. Most of the complexity of this drvier is due to the fact that | ||
| 23 | * the NX-842 compression hardware requires the input and output data buffers | ||
| 24 | * to be specifically aligned, to be a specific multiple in length, and within | ||
| 25 | * specific minimum and maximum lengths. Those restrictions, provided by the | ||
| 26 | * nx-842 driver via nx842_constraints, mean this driver must use bounce | ||
| 27 | * buffers and headers to correct misaligned in or out buffers, and to split | ||
| 28 | * input buffers that are too large. | ||
| 29 | * | ||
| 30 | * This driver will fall back to software decompression if the hardware | ||
| 31 | * decompression fails, so this driver's decompression should never fail as | ||
| 32 | * long as the provided compressed buffer is valid. Any compressed buffer | ||
| 33 | * created by this driver will have a header (except ones where the input | ||
| 34 | * perfectly matches the constraints); so users of this driver cannot simply | ||
| 35 | * pass a compressed buffer created by this driver over to the 842 software | ||
| 36 | * decompression library. Instead, users must use this driver to decompress; | ||
| 37 | * if the hardware fails or is unavailable, the compressed buffer will be | ||
| 38 | * parsed and the header removed, and the raw 842 buffer(s) passed to the 842 | ||
| 39 | * software decompression library. | ||
| 40 | * | ||
| 41 | * This does not fall back to software compression, however, since the caller | ||
| 42 | * of this function is specifically requesting hardware compression; if the | ||
| 43 | * hardware compression fails, the caller can fall back to software | ||
| 44 | * compression, and the raw 842 compressed buffer that the software compressor | ||
| 45 | * creates can be passed to this driver for hardware decompression; any | ||
| 46 | * buffer without our specific header magic is assumed to be a raw 842 buffer | ||
| 47 | * and passed directly to the hardware. Note that the software compression | ||
| 48 | * library will produce a compressed buffer that is incompatible with the | ||
| 49 | * hardware decompressor if the original input buffer length is not a multiple | ||
| 50 | * of 8; if such a compressed buffer is passed to this driver for | ||
| 51 | * decompression, the hardware will reject it and this driver will then pass | ||
| 52 | * it over to the software library for decompression. | ||
| 53 | */ | ||
| 54 | |||
| 55 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 56 | |||
| 57 | #include <linux/init.h> | ||
| 58 | #include <linux/module.h> | ||
| 59 | #include <linux/crypto.h> | ||
| 60 | #include <linux/vmalloc.h> | ||
| 61 | #include <linux/sw842.h> | ||
| 62 | #include <linux/ratelimit.h> | ||
| 63 | |||
| 64 | #include "nx-842.h" | ||
| 65 | |||
| 66 | /* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit | ||
| 67 | * template (see lib/842/842.h), so this magic number will never appear at | ||
| 68 | * the start of a raw 842 compressed buffer. That is important, as any buffer | ||
| 69 | * passed to us without this magic is assumed to be a raw 842 compressed | ||
| 70 | * buffer, and passed directly to the hardware to decompress. | ||
| 71 | */ | ||
| 72 | #define NX842_CRYPTO_MAGIC (0xf842) | ||
| 73 | #define NX842_CRYPTO_GROUP_MAX (0x20) | ||
| 74 | #define NX842_CRYPTO_HEADER_SIZE(g) \ | ||
| 75 | (sizeof(struct nx842_crypto_header) + \ | ||
| 76 | sizeof(struct nx842_crypto_header_group) * (g)) | ||
| 77 | #define NX842_CRYPTO_HEADER_MAX_SIZE \ | ||
| 78 | NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX) | ||
| 79 | |||
| 80 | /* bounce buffer size */ | ||
| 81 | #define BOUNCE_BUFFER_ORDER (2) | ||
| 82 | #define BOUNCE_BUFFER_SIZE \ | ||
| 83 | ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER)) | ||
| 84 | |||
| 85 | /* try longer on comp because we can fallback to sw decomp if hw is busy */ | ||
| 86 | #define COMP_BUSY_TIMEOUT (250) /* ms */ | ||
| 87 | #define DECOMP_BUSY_TIMEOUT (50) /* ms */ | ||
| 88 | |||
| 89 | struct nx842_crypto_header_group { | ||
| 90 | __be16 padding; /* unused bytes at start of group */ | ||
| 91 | __be32 compressed_length; /* compressed bytes in group */ | ||
| 92 | __be32 uncompressed_length; /* bytes after decompression */ | ||
| 93 | } __packed; | ||
| 94 | |||
| 95 | struct nx842_crypto_header { | ||
| 96 | __be16 magic; /* NX842_CRYPTO_MAGIC */ | ||
| 97 | __be16 ignore; /* decompressed end bytes to ignore */ | ||
| 98 | u8 groups; /* total groups in this header */ | ||
| 99 | struct nx842_crypto_header_group group[]; | ||
| 100 | } __packed; | ||
| 101 | |||
| 102 | struct nx842_crypto_param { | ||
| 103 | u8 *in; | ||
| 104 | unsigned int iremain; | ||
| 105 | u8 *out; | ||
| 106 | unsigned int oremain; | ||
| 107 | unsigned int ototal; | ||
| 108 | }; | ||
| 109 | |||
| 110 | static int update_param(struct nx842_crypto_param *p, | ||
| 111 | unsigned int slen, unsigned int dlen) | ||
| 112 | { | ||
| 113 | if (p->iremain < slen) | ||
| 114 | return -EOVERFLOW; | ||
| 115 | if (p->oremain < dlen) | ||
| 116 | return -ENOSPC; | ||
| 117 | |||
| 118 | p->in += slen; | ||
| 119 | p->iremain -= slen; | ||
| 120 | p->out += dlen; | ||
| 121 | p->oremain -= dlen; | ||
| 122 | p->ototal += dlen; | ||
| 123 | |||
| 124 | return 0; | ||
| 125 | } | ||
| 126 | |||
| 127 | struct nx842_crypto_ctx { | ||
| 128 | u8 *wmem; | ||
| 129 | u8 *sbounce, *dbounce; | ||
| 130 | |||
| 131 | struct nx842_crypto_header header; | ||
| 132 | struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX]; | ||
| 133 | }; | ||
| 134 | |||
| 135 | static int nx842_crypto_init(struct crypto_tfm *tfm) | ||
| 136 | { | ||
| 137 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 138 | |||
| 139 | ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL); | ||
| 140 | ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); | ||
| 141 | ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); | ||
| 142 | if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { | ||
| 143 | kfree(ctx->wmem); | ||
| 144 | free_page((unsigned long)ctx->sbounce); | ||
| 145 | free_page((unsigned long)ctx->dbounce); | ||
| 146 | return -ENOMEM; | ||
| 147 | } | ||
| 148 | |||
| 149 | return 0; | ||
| 150 | } | ||
| 151 | |||
| 152 | static void nx842_crypto_exit(struct crypto_tfm *tfm) | ||
| 153 | { | ||
| 154 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 155 | |||
| 156 | kfree(ctx->wmem); | ||
| 157 | free_page((unsigned long)ctx->sbounce); | ||
| 158 | free_page((unsigned long)ctx->dbounce); | ||
| 159 | } | ||
| 160 | |||
| 161 | static int read_constraints(struct nx842_constraints *c) | ||
| 162 | { | ||
| 163 | int ret; | ||
| 164 | |||
| 165 | ret = nx842_constraints(c); | ||
| 166 | if (ret) { | ||
| 167 | pr_err_ratelimited("could not get nx842 constraints : %d\n", | ||
| 168 | ret); | ||
| 169 | return ret; | ||
| 170 | } | ||
| 171 | |||
| 172 | /* limit maximum, to always have enough bounce buffer to decompress */ | ||
| 173 | if (c->maximum > BOUNCE_BUFFER_SIZE) { | ||
| 174 | c->maximum = BOUNCE_BUFFER_SIZE; | ||
| 175 | pr_info_once("limiting nx842 maximum to %x\n", c->maximum); | ||
| 176 | } | ||
| 177 | |||
| 178 | return 0; | ||
| 179 | } | ||
| 180 | |||
| 181 | static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf) | ||
| 182 | { | ||
| 183 | int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups); | ||
| 184 | |||
| 185 | /* compress should have added space for header */ | ||
| 186 | if (s > be16_to_cpu(hdr->group[0].padding)) { | ||
| 187 | pr_err("Internal error: no space for header\n"); | ||
| 188 | return -EINVAL; | ||
| 189 | } | ||
| 190 | |||
| 191 | memcpy(buf, hdr, s); | ||
| 192 | |||
| 193 | print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0); | ||
| 194 | |||
| 195 | return 0; | ||
| 196 | } | ||
| 197 | |||
| 198 | static int compress(struct nx842_crypto_ctx *ctx, | ||
| 199 | struct nx842_crypto_param *p, | ||
| 200 | struct nx842_crypto_header_group *g, | ||
| 201 | struct nx842_constraints *c, | ||
| 202 | u16 *ignore, | ||
| 203 | unsigned int hdrsize) | ||
| 204 | { | ||
| 205 | unsigned int slen = p->iremain, dlen = p->oremain, tmplen; | ||
| 206 | unsigned int adj_slen = slen; | ||
| 207 | u8 *src = p->in, *dst = p->out; | ||
| 208 | int ret, dskip = 0; | ||
| 209 | ktime_t timeout; | ||
| 210 | |||
| 211 | if (p->iremain == 0) | ||
| 212 | return -EOVERFLOW; | ||
| 213 | |||
| 214 | if (p->oremain == 0 || hdrsize + c->minimum > dlen) | ||
| 215 | return -ENOSPC; | ||
| 216 | |||
| 217 | if (slen % c->multiple) | ||
| 218 | adj_slen = round_up(slen, c->multiple); | ||
| 219 | if (slen < c->minimum) | ||
| 220 | adj_slen = c->minimum; | ||
| 221 | if (slen > c->maximum) | ||
| 222 | adj_slen = slen = c->maximum; | ||
| 223 | if (adj_slen > slen || (u64)src % c->alignment) { | ||
| 224 | adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE); | ||
| 225 | slen = min(slen, BOUNCE_BUFFER_SIZE); | ||
| 226 | if (adj_slen > slen) | ||
| 227 | memset(ctx->sbounce + slen, 0, adj_slen - slen); | ||
| 228 | memcpy(ctx->sbounce, src, slen); | ||
| 229 | src = ctx->sbounce; | ||
| 230 | slen = adj_slen; | ||
| 231 | pr_debug("using comp sbounce buffer, len %x\n", slen); | ||
| 232 | } | ||
| 233 | |||
| 234 | dst += hdrsize; | ||
| 235 | dlen -= hdrsize; | ||
| 236 | |||
| 237 | if ((u64)dst % c->alignment) { | ||
| 238 | dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst); | ||
| 239 | dst += dskip; | ||
| 240 | dlen -= dskip; | ||
| 241 | } | ||
| 242 | if (dlen % c->multiple) | ||
| 243 | dlen = round_down(dlen, c->multiple); | ||
| 244 | if (dlen < c->minimum) { | ||
| 245 | nospc: | ||
| 246 | dst = ctx->dbounce; | ||
| 247 | dlen = min(p->oremain, BOUNCE_BUFFER_SIZE); | ||
| 248 | dlen = round_down(dlen, c->multiple); | ||
| 249 | dskip = 0; | ||
| 250 | pr_debug("using comp dbounce buffer, len %x\n", dlen); | ||
| 251 | } | ||
| 252 | if (dlen > c->maximum) | ||
| 253 | dlen = c->maximum; | ||
| 254 | |||
| 255 | tmplen = dlen; | ||
| 256 | timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT); | ||
| 257 | do { | ||
| 258 | dlen = tmplen; /* reset dlen, if we're retrying */ | ||
| 259 | ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem); | ||
| 260 | /* possibly we should reduce the slen here, instead of | ||
| 261 | * retrying with the dbounce buffer? | ||
| 262 | */ | ||
| 263 | if (ret == -ENOSPC && dst != ctx->dbounce) | ||
| 264 | goto nospc; | ||
| 265 | } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); | ||
| 266 | if (ret) | ||
| 267 | return ret; | ||
| 268 | |||
| 269 | dskip += hdrsize; | ||
| 270 | |||
| 271 | if (dst == ctx->dbounce) | ||
| 272 | memcpy(p->out + dskip, dst, dlen); | ||
| 273 | |||
| 274 | g->padding = cpu_to_be16(dskip); | ||
| 275 | g->compressed_length = cpu_to_be32(dlen); | ||
| 276 | g->uncompressed_length = cpu_to_be32(slen); | ||
| 277 | |||
| 278 | if (p->iremain < slen) { | ||
| 279 | *ignore = slen - p->iremain; | ||
| 280 | slen = p->iremain; | ||
| 281 | } | ||
| 282 | |||
| 283 | pr_debug("compress slen %x ignore %x dlen %x padding %x\n", | ||
| 284 | slen, *ignore, dlen, dskip); | ||
| 285 | |||
| 286 | return update_param(p, slen, dskip + dlen); | ||
| 287 | } | ||
| 288 | |||
| 289 | static int nx842_crypto_compress(struct crypto_tfm *tfm, | ||
| 290 | const u8 *src, unsigned int slen, | ||
| 291 | u8 *dst, unsigned int *dlen) | ||
| 292 | { | ||
| 293 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 294 | struct nx842_crypto_header *hdr = &ctx->header; | ||
| 295 | struct nx842_crypto_param p; | ||
| 296 | struct nx842_constraints c; | ||
| 297 | unsigned int groups, hdrsize, h; | ||
| 298 | int ret, n; | ||
| 299 | bool add_header; | ||
| 300 | u16 ignore = 0; | ||
| 301 | |||
| 302 | p.in = (u8 *)src; | ||
| 303 | p.iremain = slen; | ||
| 304 | p.out = dst; | ||
| 305 | p.oremain = *dlen; | ||
| 306 | p.ototal = 0; | ||
| 307 | |||
| 308 | *dlen = 0; | ||
| 309 | |||
| 310 | ret = read_constraints(&c); | ||
| 311 | if (ret) | ||
| 312 | return ret; | ||
| 313 | |||
| 314 | groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX, | ||
| 315 | DIV_ROUND_UP(p.iremain, c.maximum)); | ||
| 316 | hdrsize = NX842_CRYPTO_HEADER_SIZE(groups); | ||
| 317 | |||
| 318 | /* skip adding header if the buffers meet all constraints */ | ||
| 319 | add_header = (p.iremain % c.multiple || | ||
| 320 | p.iremain < c.minimum || | ||
| 321 | p.iremain > c.maximum || | ||
| 322 | (u64)p.in % c.alignment || | ||
| 323 | p.oremain % c.multiple || | ||
| 324 | p.oremain < c.minimum || | ||
| 325 | p.oremain > c.maximum || | ||
| 326 | (u64)p.out % c.alignment); | ||
| 327 | |||
| 328 | hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC); | ||
| 329 | hdr->groups = 0; | ||
| 330 | hdr->ignore = 0; | ||
| 331 | |||
| 332 | while (p.iremain > 0) { | ||
| 333 | n = hdr->groups++; | ||
| 334 | if (hdr->groups > NX842_CRYPTO_GROUP_MAX) | ||
| 335 | return -ENOSPC; | ||
| 336 | |||
| 337 | /* header goes before first group */ | ||
| 338 | h = !n && add_header ? hdrsize : 0; | ||
| 339 | |||
| 340 | if (ignore) | ||
| 341 | pr_warn("interal error, ignore is set %x\n", ignore); | ||
| 342 | |||
| 343 | ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); | ||
| 344 | if (ret) | ||
| 345 | return ret; | ||
| 346 | } | ||
| 347 | |||
| 348 | if (!add_header && hdr->groups > 1) { | ||
| 349 | pr_err("Internal error: No header but multiple groups\n"); | ||
| 350 | return -EINVAL; | ||
| 351 | } | ||
| 352 | |||
| 353 | /* ignore indicates the input stream needed to be padded */ | ||
| 354 | hdr->ignore = cpu_to_be16(ignore); | ||
| 355 | if (ignore) | ||
| 356 | pr_debug("marked %d bytes as ignore\n", ignore); | ||
| 357 | |||
| 358 | if (add_header) | ||
| 359 | ret = nx842_crypto_add_header(hdr, dst); | ||
| 360 | if (ret) | ||
| 361 | return ret; | ||
| 362 | |||
| 363 | *dlen = p.ototal; | ||
| 364 | |||
| 365 | pr_debug("compress total slen %x dlen %x\n", slen, *dlen); | ||
| 366 | |||
| 367 | return 0; | ||
| 368 | } | ||
| 369 | |||
| 370 | static int decompress(struct nx842_crypto_ctx *ctx, | ||
| 371 | struct nx842_crypto_param *p, | ||
| 372 | struct nx842_crypto_header_group *g, | ||
| 373 | struct nx842_constraints *c, | ||
| 374 | u16 ignore, | ||
| 375 | bool usehw) | ||
| 376 | { | ||
| 377 | unsigned int slen = be32_to_cpu(g->compressed_length); | ||
| 378 | unsigned int required_len = be32_to_cpu(g->uncompressed_length); | ||
| 379 | unsigned int dlen = p->oremain, tmplen; | ||
| 380 | unsigned int adj_slen = slen; | ||
| 381 | u8 *src = p->in, *dst = p->out; | ||
| 382 | u16 padding = be16_to_cpu(g->padding); | ||
| 383 | int ret, spadding = 0, dpadding = 0; | ||
| 384 | ktime_t timeout; | ||
| 385 | |||
| 386 | if (!slen || !required_len) | ||
| 387 | return -EINVAL; | ||
| 388 | |||
| 389 | if (p->iremain <= 0 || padding + slen > p->iremain) | ||
| 390 | return -EOVERFLOW; | ||
| 391 | |||
| 392 | if (p->oremain <= 0 || required_len - ignore > p->oremain) | ||
| 393 | return -ENOSPC; | ||
| 394 | |||
| 395 | src += padding; | ||
| 396 | |||
| 397 | if (!usehw) | ||
| 398 | goto usesw; | ||
| 399 | |||
| 400 | if (slen % c->multiple) | ||
| 401 | adj_slen = round_up(slen, c->multiple); | ||
| 402 | if (slen < c->minimum) | ||
| 403 | adj_slen = c->minimum; | ||
| 404 | if (slen > c->maximum) | ||
| 405 | goto usesw; | ||
| 406 | if (slen < adj_slen || (u64)src % c->alignment) { | ||
| 407 | /* we can append padding bytes because the 842 format defines | ||
| 408 | * an "end" template (see lib/842/842_decompress.c) and will | ||
| 409 | * ignore any bytes following it. | ||
| 410 | */ | ||
| 411 | if (slen < adj_slen) | ||
| 412 | memset(ctx->sbounce + slen, 0, adj_slen - slen); | ||
| 413 | memcpy(ctx->sbounce, src, slen); | ||
| 414 | src = ctx->sbounce; | ||
| 415 | spadding = adj_slen - slen; | ||
| 416 | slen = adj_slen; | ||
| 417 | pr_debug("using decomp sbounce buffer, len %x\n", slen); | ||
| 418 | } | ||
| 419 | |||
| 420 | if (dlen % c->multiple) | ||
| 421 | dlen = round_down(dlen, c->multiple); | ||
| 422 | if (dlen < required_len || (u64)dst % c->alignment) { | ||
| 423 | dst = ctx->dbounce; | ||
| 424 | dlen = min(required_len, BOUNCE_BUFFER_SIZE); | ||
| 425 | pr_debug("using decomp dbounce buffer, len %x\n", dlen); | ||
| 426 | } | ||
| 427 | if (dlen < c->minimum) | ||
| 428 | goto usesw; | ||
| 429 | if (dlen > c->maximum) | ||
| 430 | dlen = c->maximum; | ||
| 431 | |||
| 432 | tmplen = dlen; | ||
| 433 | timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT); | ||
| 434 | do { | ||
| 435 | dlen = tmplen; /* reset dlen, if we're retrying */ | ||
| 436 | ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem); | ||
| 437 | } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); | ||
| 438 | if (ret) { | ||
| 439 | usesw: | ||
| 440 | /* reset everything, sw doesn't have constraints */ | ||
| 441 | src = p->in + padding; | ||
| 442 | slen = be32_to_cpu(g->compressed_length); | ||
| 443 | spadding = 0; | ||
| 444 | dst = p->out; | ||
| 445 | dlen = p->oremain; | ||
| 446 | dpadding = 0; | ||
| 447 | if (dlen < required_len) { /* have ignore bytes */ | ||
| 448 | dst = ctx->dbounce; | ||
| 449 | dlen = BOUNCE_BUFFER_SIZE; | ||
| 450 | } | ||
| 451 | pr_info_ratelimited("using software 842 decompression\n"); | ||
| 452 | ret = sw842_decompress(src, slen, dst, &dlen); | ||
| 453 | } | ||
| 454 | if (ret) | ||
| 455 | return ret; | ||
| 456 | |||
| 457 | slen -= spadding; | ||
| 458 | |||
| 459 | dlen -= ignore; | ||
| 460 | if (ignore) | ||
| 461 | pr_debug("ignoring last %x bytes\n", ignore); | ||
| 462 | |||
| 463 | if (dst == ctx->dbounce) | ||
| 464 | memcpy(p->out, dst, dlen); | ||
| 465 | |||
| 466 | pr_debug("decompress slen %x padding %x dlen %x ignore %x\n", | ||
| 467 | slen, padding, dlen, ignore); | ||
| 468 | |||
| 469 | return update_param(p, slen + padding, dlen); | ||
| 470 | } | ||
| 471 | |||
| 472 | static int nx842_crypto_decompress(struct crypto_tfm *tfm, | ||
| 473 | const u8 *src, unsigned int slen, | ||
| 474 | u8 *dst, unsigned int *dlen) | ||
| 475 | { | ||
| 476 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 477 | struct nx842_crypto_header *hdr; | ||
| 478 | struct nx842_crypto_param p; | ||
| 479 | struct nx842_constraints c; | ||
| 480 | int n, ret, hdr_len; | ||
| 481 | u16 ignore = 0; | ||
| 482 | bool usehw = true; | ||
| 483 | |||
| 484 | p.in = (u8 *)src; | ||
| 485 | p.iremain = slen; | ||
| 486 | p.out = dst; | ||
| 487 | p.oremain = *dlen; | ||
| 488 | p.ototal = 0; | ||
| 489 | |||
| 490 | *dlen = 0; | ||
| 491 | |||
| 492 | if (read_constraints(&c)) | ||
| 493 | usehw = false; | ||
| 494 | |||
| 495 | hdr = (struct nx842_crypto_header *)src; | ||
| 496 | |||
| 497 | /* If it doesn't start with our header magic number, assume it's a raw | ||
| 498 | * 842 compressed buffer and pass it directly to the hardware driver | ||
| 499 | */ | ||
| 500 | if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) { | ||
| 501 | struct nx842_crypto_header_group g = { | ||
| 502 | .padding = 0, | ||
| 503 | .compressed_length = cpu_to_be32(p.iremain), | ||
| 504 | .uncompressed_length = cpu_to_be32(p.oremain), | ||
| 505 | }; | ||
| 506 | |||
| 507 | ret = decompress(ctx, &p, &g, &c, 0, usehw); | ||
| 508 | if (ret) | ||
| 509 | return ret; | ||
| 510 | |||
| 511 | *dlen = p.ototal; | ||
| 512 | |||
| 513 | return 0; | ||
| 514 | } | ||
| 515 | |||
| 516 | if (!hdr->groups) { | ||
| 517 | pr_err("header has no groups\n"); | ||
| 518 | return -EINVAL; | ||
| 519 | } | ||
| 520 | if (hdr->groups > NX842_CRYPTO_GROUP_MAX) { | ||
| 521 | pr_err("header has too many groups %x, max %x\n", | ||
| 522 | hdr->groups, NX842_CRYPTO_GROUP_MAX); | ||
| 523 | return -EINVAL; | ||
| 524 | } | ||
| 525 | |||
| 526 | hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups); | ||
| 527 | if (hdr_len > slen) | ||
| 528 | return -EOVERFLOW; | ||
| 529 | |||
| 530 | memcpy(&ctx->header, src, hdr_len); | ||
| 531 | hdr = &ctx->header; | ||
| 532 | |||
| 533 | for (n = 0; n < hdr->groups; n++) { | ||
| 534 | /* ignore applies to last group */ | ||
| 535 | if (n + 1 == hdr->groups) | ||
| 536 | ignore = be16_to_cpu(hdr->ignore); | ||
| 537 | |||
| 538 | ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw); | ||
| 539 | if (ret) | ||
| 540 | return ret; | ||
| 541 | } | ||
| 542 | |||
| 543 | *dlen = p.ototal; | ||
| 544 | |||
| 545 | pr_debug("decompress total slen %x dlen %x\n", slen, *dlen); | ||
| 546 | |||
| 547 | return 0; | ||
| 548 | } | ||
| 549 | |||
| 550 | static struct crypto_alg alg = { | ||
| 551 | .cra_name = "842", | ||
| 552 | .cra_driver_name = "842-nx", | ||
| 553 | .cra_priority = 300, | ||
| 554 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | ||
| 555 | .cra_ctxsize = sizeof(struct nx842_crypto_ctx), | ||
| 556 | .cra_module = THIS_MODULE, | ||
| 557 | .cra_init = nx842_crypto_init, | ||
| 558 | .cra_exit = nx842_crypto_exit, | ||
| 559 | .cra_u = { .compress = { | ||
| 560 | .coa_compress = nx842_crypto_compress, | ||
| 561 | .coa_decompress = nx842_crypto_decompress } } | ||
| 562 | }; | ||
| 563 | |||
| 564 | static int __init nx842_crypto_mod_init(void) | ||
| 565 | { | ||
| 566 | return crypto_register_alg(&alg); | ||
| 567 | } | ||
| 568 | module_init(nx842_crypto_mod_init); | ||
| 569 | |||
| 570 | static void __exit nx842_crypto_mod_exit(void) | ||
| 571 | { | ||
| 572 | crypto_unregister_alg(&alg); | ||
| 573 | } | ||
| 574 | module_exit(nx842_crypto_mod_exit); | ||
| 575 | |||
| 576 | MODULE_LICENSE("GPL"); | ||
| 577 | MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface"); | ||
| 578 | MODULE_ALIAS_CRYPTO("842"); | ||
| 579 | MODULE_ALIAS_CRYPTO("842-nx"); | ||
| 580 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | ||
diff --git a/drivers/crypto/nx/nx-842-platform.c b/drivers/crypto/nx/nx-842-platform.c deleted file mode 100644 index 664f13dd06ed..000000000000 --- a/drivers/crypto/nx/nx-842-platform.c +++ /dev/null | |||
| @@ -1,84 +0,0 @@ | |||
| 1 | |||
| 2 | #include "nx-842.h" | ||
| 3 | |||
| 4 | /* this is needed, separate from the main nx-842.c driver, because that main | ||
| 5 | * driver loads the platform drivers during its init(), and it expects one | ||
| 6 | * (or none) of the platform drivers to set this pointer to its driver. | ||
| 7 | * That means this pointer can't be in the main nx-842 driver, because it | ||
| 8 | * wouldn't be accessible until after the main driver loaded, which wouldn't | ||
| 9 | * be possible as it's waiting for the platform driver to load. So place it | ||
| 10 | * here. | ||
| 11 | */ | ||
| 12 | static struct nx842_driver *driver; | ||
| 13 | static DEFINE_SPINLOCK(driver_lock); | ||
| 14 | |||
| 15 | struct nx842_driver *nx842_platform_driver(void) | ||
| 16 | { | ||
| 17 | return driver; | ||
| 18 | } | ||
| 19 | EXPORT_SYMBOL_GPL(nx842_platform_driver); | ||
| 20 | |||
| 21 | bool nx842_platform_driver_set(struct nx842_driver *_driver) | ||
| 22 | { | ||
| 23 | bool ret = false; | ||
| 24 | |||
| 25 | spin_lock(&driver_lock); | ||
| 26 | |||
| 27 | if (!driver) { | ||
| 28 | driver = _driver; | ||
| 29 | ret = true; | ||
| 30 | } else | ||
| 31 | WARN(1, "can't set platform driver, already set to %s\n", | ||
| 32 | driver->name); | ||
| 33 | |||
| 34 | spin_unlock(&driver_lock); | ||
| 35 | return ret; | ||
| 36 | } | ||
| 37 | EXPORT_SYMBOL_GPL(nx842_platform_driver_set); | ||
| 38 | |||
| 39 | /* only call this from the platform driver exit function */ | ||
| 40 | void nx842_platform_driver_unset(struct nx842_driver *_driver) | ||
| 41 | { | ||
| 42 | spin_lock(&driver_lock); | ||
| 43 | |||
| 44 | if (driver == _driver) | ||
| 45 | driver = NULL; | ||
| 46 | else if (driver) | ||
| 47 | WARN(1, "can't unset platform driver %s, currently set to %s\n", | ||
| 48 | _driver->name, driver->name); | ||
| 49 | else | ||
| 50 | WARN(1, "can't unset platform driver, already unset\n"); | ||
| 51 | |||
| 52 | spin_unlock(&driver_lock); | ||
| 53 | } | ||
| 54 | EXPORT_SYMBOL_GPL(nx842_platform_driver_unset); | ||
| 55 | |||
| 56 | bool nx842_platform_driver_get(void) | ||
| 57 | { | ||
| 58 | bool ret = false; | ||
| 59 | |||
| 60 | spin_lock(&driver_lock); | ||
| 61 | |||
| 62 | if (driver) | ||
| 63 | ret = try_module_get(driver->owner); | ||
| 64 | |||
| 65 | spin_unlock(&driver_lock); | ||
| 66 | |||
| 67 | return ret; | ||
| 68 | } | ||
| 69 | EXPORT_SYMBOL_GPL(nx842_platform_driver_get); | ||
| 70 | |||
| 71 | void nx842_platform_driver_put(void) | ||
| 72 | { | ||
| 73 | spin_lock(&driver_lock); | ||
| 74 | |||
| 75 | if (driver) | ||
| 76 | module_put(driver->owner); | ||
| 77 | |||
| 78 | spin_unlock(&driver_lock); | ||
| 79 | } | ||
| 80 | EXPORT_SYMBOL_GPL(nx842_platform_driver_put); | ||
| 81 | |||
| 82 | MODULE_LICENSE("GPL"); | ||
| 83 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | ||
| 84 | MODULE_DESCRIPTION("842 H/W Compression platform driver"); | ||
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 33b3b0abf4ae..3750e13d8721 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c | |||
| @@ -26,6 +26,8 @@ | |||
| 26 | MODULE_LICENSE("GPL"); | 26 | MODULE_LICENSE("GPL"); |
| 27 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | 27 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); |
| 28 | MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors"); | 28 | MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors"); |
| 29 | MODULE_ALIAS_CRYPTO("842"); | ||
| 30 | MODULE_ALIAS_CRYPTO("842-nx"); | ||
| 29 | 31 | ||
| 30 | #define WORKMEM_ALIGN (CRB_ALIGN) | 32 | #define WORKMEM_ALIGN (CRB_ALIGN) |
| 31 | #define CSB_WAIT_MAX (5000) /* ms */ | 33 | #define CSB_WAIT_MAX (5000) /* ms */ |
| @@ -344,7 +346,8 @@ static int wait_for_csb(struct nx842_workmem *wmem, | |||
| 344 | } | 346 | } |
| 345 | 347 | ||
| 346 | /* successful completion */ | 348 | /* successful completion */ |
| 347 | pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count, | 349 | pr_debug_ratelimited("Processed %u bytes in %lu us\n", |
| 350 | be32_to_cpu(csb->count), | ||
| 348 | (unsigned long)ktime_us_delta(now, start)); | 351 | (unsigned long)ktime_us_delta(now, start)); |
| 349 | 352 | ||
| 350 | return 0; | 353 | return 0; |
| @@ -581,9 +584,29 @@ static struct nx842_driver nx842_powernv_driver = { | |||
| 581 | .decompress = nx842_powernv_decompress, | 584 | .decompress = nx842_powernv_decompress, |
| 582 | }; | 585 | }; |
| 583 | 586 | ||
| 587 | static int nx842_powernv_crypto_init(struct crypto_tfm *tfm) | ||
| 588 | { | ||
| 589 | return nx842_crypto_init(tfm, &nx842_powernv_driver); | ||
| 590 | } | ||
| 591 | |||
| 592 | static struct crypto_alg nx842_powernv_alg = { | ||
| 593 | .cra_name = "842", | ||
| 594 | .cra_driver_name = "842-nx", | ||
| 595 | .cra_priority = 300, | ||
| 596 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | ||
| 597 | .cra_ctxsize = sizeof(struct nx842_crypto_ctx), | ||
| 598 | .cra_module = THIS_MODULE, | ||
| 599 | .cra_init = nx842_powernv_crypto_init, | ||
| 600 | .cra_exit = nx842_crypto_exit, | ||
| 601 | .cra_u = { .compress = { | ||
| 602 | .coa_compress = nx842_crypto_compress, | ||
| 603 | .coa_decompress = nx842_crypto_decompress } } | ||
| 604 | }; | ||
| 605 | |||
| 584 | static __init int nx842_powernv_init(void) | 606 | static __init int nx842_powernv_init(void) |
| 585 | { | 607 | { |
| 586 | struct device_node *dn; | 608 | struct device_node *dn; |
| 609 | int ret; | ||
| 587 | 610 | ||
| 588 | /* verify workmem size/align restrictions */ | 611 | /* verify workmem size/align restrictions */ |
| 589 | BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN); | 612 | BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN); |
| @@ -594,17 +617,14 @@ static __init int nx842_powernv_init(void) | |||
| 594 | BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT); | 617 | BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT); |
| 595 | BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); | 618 | BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); |
| 596 | 619 | ||
| 597 | pr_info("loading\n"); | ||
| 598 | |||
| 599 | for_each_compatible_node(dn, NULL, "ibm,power-nx") | 620 | for_each_compatible_node(dn, NULL, "ibm,power-nx") |
| 600 | nx842_powernv_probe(dn); | 621 | nx842_powernv_probe(dn); |
| 601 | 622 | ||
| 602 | if (!nx842_ct) { | 623 | if (!nx842_ct) |
| 603 | pr_err("no coprocessors found\n"); | ||
| 604 | return -ENODEV; | 624 | return -ENODEV; |
| 605 | } | ||
| 606 | 625 | ||
| 607 | if (!nx842_platform_driver_set(&nx842_powernv_driver)) { | 626 | ret = crypto_register_alg(&nx842_powernv_alg); |
| 627 | if (ret) { | ||
| 608 | struct nx842_coproc *coproc, *n; | 628 | struct nx842_coproc *coproc, *n; |
| 609 | 629 | ||
| 610 | list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { | 630 | list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { |
| @@ -612,11 +632,9 @@ static __init int nx842_powernv_init(void) | |||
| 612 | kfree(coproc); | 632 | kfree(coproc); |
| 613 | } | 633 | } |
| 614 | 634 | ||
| 615 | return -EEXIST; | 635 | return ret; |
| 616 | } | 636 | } |
| 617 | 637 | ||
| 618 | pr_info("loaded\n"); | ||
| 619 | |||
| 620 | return 0; | 638 | return 0; |
| 621 | } | 639 | } |
| 622 | module_init(nx842_powernv_init); | 640 | module_init(nx842_powernv_init); |
| @@ -625,13 +643,11 @@ static void __exit nx842_powernv_exit(void) | |||
| 625 | { | 643 | { |
| 626 | struct nx842_coproc *coproc, *n; | 644 | struct nx842_coproc *coproc, *n; |
| 627 | 645 | ||
| 628 | nx842_platform_driver_unset(&nx842_powernv_driver); | 646 | crypto_unregister_alg(&nx842_powernv_alg); |
| 629 | 647 | ||
| 630 | list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { | 648 | list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { |
| 631 | list_del(&coproc->list); | 649 | list_del(&coproc->list); |
| 632 | kfree(coproc); | 650 | kfree(coproc); |
| 633 | } | 651 | } |
| 634 | |||
| 635 | pr_info("unloaded\n"); | ||
| 636 | } | 652 | } |
| 637 | module_exit(nx842_powernv_exit); | 653 | module_exit(nx842_powernv_exit); |
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index 3040a6091bf2..f4cbde03c6ad 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c | |||
| @@ -29,6 +29,8 @@ | |||
| 29 | MODULE_LICENSE("GPL"); | 29 | MODULE_LICENSE("GPL"); |
| 30 | MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>"); | 30 | MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>"); |
| 31 | MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); | 31 | MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); |
| 32 | MODULE_ALIAS_CRYPTO("842"); | ||
| 33 | MODULE_ALIAS_CRYPTO("842-nx"); | ||
| 32 | 34 | ||
| 33 | static struct nx842_constraints nx842_pseries_constraints = { | 35 | static struct nx842_constraints nx842_pseries_constraints = { |
| 34 | .alignment = DDE_BUFFER_ALIGN, | 36 | .alignment = DDE_BUFFER_ALIGN, |
| @@ -99,11 +101,6 @@ struct nx842_workmem { | |||
| 99 | #define NX842_HW_PAGE_SIZE (4096) | 101 | #define NX842_HW_PAGE_SIZE (4096) |
| 100 | #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) | 102 | #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) |
| 101 | 103 | ||
| 102 | enum nx842_status { | ||
| 103 | UNAVAILABLE, | ||
| 104 | AVAILABLE | ||
| 105 | }; | ||
| 106 | |||
| 107 | struct ibm_nx842_counters { | 104 | struct ibm_nx842_counters { |
| 108 | atomic64_t comp_complete; | 105 | atomic64_t comp_complete; |
| 109 | atomic64_t comp_failed; | 106 | atomic64_t comp_failed; |
| @@ -121,7 +118,6 @@ static struct nx842_devdata { | |||
| 121 | unsigned int max_sg_len; | 118 | unsigned int max_sg_len; |
| 122 | unsigned int max_sync_size; | 119 | unsigned int max_sync_size; |
| 123 | unsigned int max_sync_sg; | 120 | unsigned int max_sync_sg; |
| 124 | enum nx842_status status; | ||
| 125 | } __rcu *devdata; | 121 | } __rcu *devdata; |
| 126 | static DEFINE_SPINLOCK(devdata_mutex); | 122 | static DEFINE_SPINLOCK(devdata_mutex); |
| 127 | 123 | ||
| @@ -230,9 +226,12 @@ static int nx842_validate_result(struct device *dev, | |||
| 230 | switch (csb->completion_code) { | 226 | switch (csb->completion_code) { |
| 231 | case 0: /* Completed without error */ | 227 | case 0: /* Completed without error */ |
| 232 | break; | 228 | break; |
| 233 | case 64: /* Target bytes > Source bytes during compression */ | 229 | case 64: /* Compression ok, but output larger than input */ |
| 230 | dev_dbg(dev, "%s: output size larger than input size\n", | ||
| 231 | __func__); | ||
| 232 | break; | ||
| 234 | case 13: /* Output buffer too small */ | 233 | case 13: /* Output buffer too small */ |
| 235 | dev_dbg(dev, "%s: Compression output larger than input\n", | 234 | dev_dbg(dev, "%s: Out of space in output buffer\n", |
| 236 | __func__); | 235 | __func__); |
| 237 | return -ENOSPC; | 236 | return -ENOSPC; |
| 238 | case 66: /* Input data contains an illegal template field */ | 237 | case 66: /* Input data contains an illegal template field */ |
| @@ -537,41 +536,36 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata) | |||
| 537 | devdata->max_sync_size = 0; | 536 | devdata->max_sync_size = 0; |
| 538 | devdata->max_sync_sg = 0; | 537 | devdata->max_sync_sg = 0; |
| 539 | devdata->max_sg_len = 0; | 538 | devdata->max_sg_len = 0; |
| 540 | devdata->status = UNAVAILABLE; | ||
| 541 | return 0; | 539 | return 0; |
| 542 | } else | 540 | } else |
| 543 | return -ENOENT; | 541 | return -ENOENT; |
| 544 | } | 542 | } |
| 545 | 543 | ||
| 546 | /** | 544 | /** |
| 547 | * nx842_OF_upd_status -- Update the device info from OF status prop | 545 | * nx842_OF_upd_status -- Check the device info from OF status prop |
| 548 | * | 546 | * |
| 549 | * The status property indicates if the accelerator is enabled. If the | 547 | * The status property indicates if the accelerator is enabled. If the |
| 550 | * device is in the OF tree it indicates that the hardware is present. | 548 | * device is in the OF tree it indicates that the hardware is present. |
| 551 | * The status field indicates if the device is enabled when the status | 549 | * The status field indicates if the device is enabled when the status |
| 552 | * is 'okay'. Otherwise the device driver will be disabled. | 550 | * is 'okay'. Otherwise the device driver will be disabled. |
| 553 | * | 551 | * |
| 554 | * @devdata - struct nx842_devdata to update | ||
| 555 | * @prop - struct property point containing the maxsyncop for the update | 552 | * @prop - struct property point containing the maxsyncop for the update |
| 556 | * | 553 | * |
| 557 | * Returns: | 554 | * Returns: |
| 558 | * 0 - Device is available | 555 | * 0 - Device is available |
| 559 | * -EINVAL - Device is not available | 556 | * -ENODEV - Device is not available |
| 560 | */ | 557 | */ |
| 561 | static int nx842_OF_upd_status(struct nx842_devdata *devdata, | 558 | static int nx842_OF_upd_status(struct property *prop) |
| 562 | struct property *prop) { | 559 | { |
| 563 | int ret = 0; | ||
| 564 | const char *status = (const char *)prop->value; | 560 | const char *status = (const char *)prop->value; |
| 565 | 561 | ||
| 566 | if (!strncmp(status, "okay", (size_t)prop->length)) { | 562 | if (!strncmp(status, "okay", (size_t)prop->length)) |
| 567 | devdata->status = AVAILABLE; | 563 | return 0; |
| 568 | } else { | 564 | if (!strncmp(status, "disabled", (size_t)prop->length)) |
| 569 | dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n", | 565 | return -ENODEV; |
| 570 | __func__, status); | 566 | dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status); |
| 571 | devdata->status = UNAVAILABLE; | ||
| 572 | } | ||
| 573 | 567 | ||
| 574 | return ret; | 568 | return -EINVAL; |
| 575 | } | 569 | } |
| 576 | 570 | ||
| 577 | /** | 571 | /** |
| @@ -735,6 +729,10 @@ static int nx842_OF_upd(struct property *new_prop) | |||
| 735 | int ret = 0; | 729 | int ret = 0; |
| 736 | unsigned long flags; | 730 | unsigned long flags; |
| 737 | 731 | ||
| 732 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); | ||
| 733 | if (!new_devdata) | ||
| 734 | return -ENOMEM; | ||
| 735 | |||
| 738 | spin_lock_irqsave(&devdata_mutex, flags); | 736 | spin_lock_irqsave(&devdata_mutex, flags); |
| 739 | old_devdata = rcu_dereference_check(devdata, | 737 | old_devdata = rcu_dereference_check(devdata, |
| 740 | lockdep_is_held(&devdata_mutex)); | 738 | lockdep_is_held(&devdata_mutex)); |
| @@ -744,16 +742,10 @@ static int nx842_OF_upd(struct property *new_prop) | |||
| 744 | if (!old_devdata || !of_node) { | 742 | if (!old_devdata || !of_node) { |
| 745 | pr_err("%s: device is not available\n", __func__); | 743 | pr_err("%s: device is not available\n", __func__); |
| 746 | spin_unlock_irqrestore(&devdata_mutex, flags); | 744 | spin_unlock_irqrestore(&devdata_mutex, flags); |
| 745 | kfree(new_devdata); | ||
| 747 | return -ENODEV; | 746 | return -ENODEV; |
| 748 | } | 747 | } |
| 749 | 748 | ||
| 750 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); | ||
| 751 | if (!new_devdata) { | ||
| 752 | dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__); | ||
| 753 | ret = -ENOMEM; | ||
| 754 | goto error_out; | ||
| 755 | } | ||
| 756 | |||
| 757 | memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); | 749 | memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); |
| 758 | new_devdata->counters = old_devdata->counters; | 750 | new_devdata->counters = old_devdata->counters; |
| 759 | 751 | ||
| @@ -777,7 +769,7 @@ static int nx842_OF_upd(struct property *new_prop) | |||
| 777 | goto out; | 769 | goto out; |
| 778 | 770 | ||
| 779 | /* Perform property updates */ | 771 | /* Perform property updates */ |
| 780 | ret = nx842_OF_upd_status(new_devdata, status); | 772 | ret = nx842_OF_upd_status(status); |
| 781 | if (ret) | 773 | if (ret) |
| 782 | goto error_out; | 774 | goto error_out; |
| 783 | 775 | ||
| @@ -970,13 +962,43 @@ static struct nx842_driver nx842_pseries_driver = { | |||
| 970 | .decompress = nx842_pseries_decompress, | 962 | .decompress = nx842_pseries_decompress, |
| 971 | }; | 963 | }; |
| 972 | 964 | ||
| 973 | static int __init nx842_probe(struct vio_dev *viodev, | 965 | static int nx842_pseries_crypto_init(struct crypto_tfm *tfm) |
| 974 | const struct vio_device_id *id) | 966 | { |
| 967 | return nx842_crypto_init(tfm, &nx842_pseries_driver); | ||
| 968 | } | ||
| 969 | |||
| 970 | static struct crypto_alg nx842_pseries_alg = { | ||
| 971 | .cra_name = "842", | ||
| 972 | .cra_driver_name = "842-nx", | ||
| 973 | .cra_priority = 300, | ||
| 974 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | ||
| 975 | .cra_ctxsize = sizeof(struct nx842_crypto_ctx), | ||
| 976 | .cra_module = THIS_MODULE, | ||
| 977 | .cra_init = nx842_pseries_crypto_init, | ||
| 978 | .cra_exit = nx842_crypto_exit, | ||
| 979 | .cra_u = { .compress = { | ||
| 980 | .coa_compress = nx842_crypto_compress, | ||
| 981 | .coa_decompress = nx842_crypto_decompress } } | ||
| 982 | }; | ||
| 983 | |||
| 984 | static int nx842_probe(struct vio_dev *viodev, | ||
| 985 | const struct vio_device_id *id) | ||
| 975 | { | 986 | { |
| 976 | struct nx842_devdata *old_devdata, *new_devdata = NULL; | 987 | struct nx842_devdata *old_devdata, *new_devdata = NULL; |
| 977 | unsigned long flags; | 988 | unsigned long flags; |
| 978 | int ret = 0; | 989 | int ret = 0; |
| 979 | 990 | ||
| 991 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); | ||
| 992 | if (!new_devdata) | ||
| 993 | return -ENOMEM; | ||
| 994 | |||
| 995 | new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), | ||
| 996 | GFP_NOFS); | ||
| 997 | if (!new_devdata->counters) { | ||
| 998 | kfree(new_devdata); | ||
| 999 | return -ENOMEM; | ||
| 1000 | } | ||
| 1001 | |||
| 980 | spin_lock_irqsave(&devdata_mutex, flags); | 1002 | spin_lock_irqsave(&devdata_mutex, flags); |
| 981 | old_devdata = rcu_dereference_check(devdata, | 1003 | old_devdata = rcu_dereference_check(devdata, |
| 982 | lockdep_is_held(&devdata_mutex)); | 1004 | lockdep_is_held(&devdata_mutex)); |
| @@ -989,21 +1011,6 @@ static int __init nx842_probe(struct vio_dev *viodev, | |||
| 989 | 1011 | ||
| 990 | dev_set_drvdata(&viodev->dev, NULL); | 1012 | dev_set_drvdata(&viodev->dev, NULL); |
| 991 | 1013 | ||
| 992 | new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); | ||
| 993 | if (!new_devdata) { | ||
| 994 | dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__); | ||
| 995 | ret = -ENOMEM; | ||
| 996 | goto error_unlock; | ||
| 997 | } | ||
| 998 | |||
| 999 | new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), | ||
| 1000 | GFP_NOFS); | ||
| 1001 | if (!new_devdata->counters) { | ||
| 1002 | dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__); | ||
| 1003 | ret = -ENOMEM; | ||
| 1004 | goto error_unlock; | ||
| 1005 | } | ||
| 1006 | |||
| 1007 | new_devdata->vdev = viodev; | 1014 | new_devdata->vdev = viodev; |
| 1008 | new_devdata->dev = &viodev->dev; | 1015 | new_devdata->dev = &viodev->dev; |
| 1009 | nx842_OF_set_defaults(new_devdata); | 1016 | nx842_OF_set_defaults(new_devdata); |
| @@ -1016,9 +1023,12 @@ static int __init nx842_probe(struct vio_dev *viodev, | |||
| 1016 | of_reconfig_notifier_register(&nx842_of_nb); | 1023 | of_reconfig_notifier_register(&nx842_of_nb); |
| 1017 | 1024 | ||
| 1018 | ret = nx842_OF_upd(NULL); | 1025 | ret = nx842_OF_upd(NULL); |
| 1019 | if (ret && ret != -ENODEV) { | 1026 | if (ret) |
| 1020 | dev_err(&viodev->dev, "could not parse device tree. %d\n", ret); | 1027 | goto error; |
| 1021 | ret = -1; | 1028 | |
| 1029 | ret = crypto_register_alg(&nx842_pseries_alg); | ||
| 1030 | if (ret) { | ||
| 1031 | dev_err(&viodev->dev, "could not register comp alg: %d\n", ret); | ||
| 1022 | goto error; | 1032 | goto error; |
| 1023 | } | 1033 | } |
| 1024 | 1034 | ||
| @@ -1043,7 +1053,7 @@ error: | |||
| 1043 | return ret; | 1053 | return ret; |
| 1044 | } | 1054 | } |
| 1045 | 1055 | ||
| 1046 | static int __exit nx842_remove(struct vio_dev *viodev) | 1056 | static int nx842_remove(struct vio_dev *viodev) |
| 1047 | { | 1057 | { |
| 1048 | struct nx842_devdata *old_devdata; | 1058 | struct nx842_devdata *old_devdata; |
| 1049 | unsigned long flags; | 1059 | unsigned long flags; |
| @@ -1051,6 +1061,8 @@ static int __exit nx842_remove(struct vio_dev *viodev) | |||
| 1051 | pr_info("Removing IBM Power 842 compression device\n"); | 1061 | pr_info("Removing IBM Power 842 compression device\n"); |
| 1052 | sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); | 1062 | sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); |
| 1053 | 1063 | ||
| 1064 | crypto_unregister_alg(&nx842_pseries_alg); | ||
| 1065 | |||
| 1054 | spin_lock_irqsave(&devdata_mutex, flags); | 1066 | spin_lock_irqsave(&devdata_mutex, flags); |
| 1055 | old_devdata = rcu_dereference_check(devdata, | 1067 | old_devdata = rcu_dereference_check(devdata, |
| 1056 | lockdep_is_held(&devdata_mutex)); | 1068 | lockdep_is_held(&devdata_mutex)); |
| @@ -1074,18 +1086,16 @@ static struct vio_device_id nx842_vio_driver_ids[] = { | |||
| 1074 | static struct vio_driver nx842_vio_driver = { | 1086 | static struct vio_driver nx842_vio_driver = { |
| 1075 | .name = KBUILD_MODNAME, | 1087 | .name = KBUILD_MODNAME, |
| 1076 | .probe = nx842_probe, | 1088 | .probe = nx842_probe, |
| 1077 | .remove = __exit_p(nx842_remove), | 1089 | .remove = nx842_remove, |
| 1078 | .get_desired_dma = nx842_get_desired_dma, | 1090 | .get_desired_dma = nx842_get_desired_dma, |
| 1079 | .id_table = nx842_vio_driver_ids, | 1091 | .id_table = nx842_vio_driver_ids, |
| 1080 | }; | 1092 | }; |
| 1081 | 1093 | ||
| 1082 | static int __init nx842_init(void) | 1094 | static int __init nx842_pseries_init(void) |
| 1083 | { | 1095 | { |
| 1084 | struct nx842_devdata *new_devdata; | 1096 | struct nx842_devdata *new_devdata; |
| 1085 | int ret; | 1097 | int ret; |
| 1086 | 1098 | ||
| 1087 | pr_info("Registering IBM Power 842 compression driver\n"); | ||
| 1088 | |||
| 1089 | if (!of_find_compatible_node(NULL, NULL, "ibm,compression")) | 1099 | if (!of_find_compatible_node(NULL, NULL, "ibm,compression")) |
| 1090 | return -ENODEV; | 1100 | return -ENODEV; |
| 1091 | 1101 | ||
| @@ -1095,7 +1105,6 @@ static int __init nx842_init(void) | |||
| 1095 | pr_err("Could not allocate memory for device data\n"); | 1105 | pr_err("Could not allocate memory for device data\n"); |
| 1096 | return -ENOMEM; | 1106 | return -ENOMEM; |
| 1097 | } | 1107 | } |
| 1098 | new_devdata->status = UNAVAILABLE; | ||
| 1099 | RCU_INIT_POINTER(devdata, new_devdata); | 1108 | RCU_INIT_POINTER(devdata, new_devdata); |
| 1100 | 1109 | ||
| 1101 | ret = vio_register_driver(&nx842_vio_driver); | 1110 | ret = vio_register_driver(&nx842_vio_driver); |
| @@ -1106,24 +1115,18 @@ static int __init nx842_init(void) | |||
| 1106 | return ret; | 1115 | return ret; |
| 1107 | } | 1116 | } |
| 1108 | 1117 | ||
| 1109 | if (!nx842_platform_driver_set(&nx842_pseries_driver)) { | ||
| 1110 | vio_unregister_driver(&nx842_vio_driver); | ||
| 1111 | kfree(new_devdata); | ||
| 1112 | return -EEXIST; | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | return 0; | 1118 | return 0; |
| 1116 | } | 1119 | } |
| 1117 | 1120 | ||
| 1118 | module_init(nx842_init); | 1121 | module_init(nx842_pseries_init); |
| 1119 | 1122 | ||
| 1120 | static void __exit nx842_exit(void) | 1123 | static void __exit nx842_pseries_exit(void) |
| 1121 | { | 1124 | { |
| 1122 | struct nx842_devdata *old_devdata; | 1125 | struct nx842_devdata *old_devdata; |
| 1123 | unsigned long flags; | 1126 | unsigned long flags; |
| 1124 | 1127 | ||
| 1125 | pr_info("Exiting IBM Power 842 compression driver\n"); | 1128 | crypto_unregister_alg(&nx842_pseries_alg); |
| 1126 | nx842_platform_driver_unset(&nx842_pseries_driver); | 1129 | |
| 1127 | spin_lock_irqsave(&devdata_mutex, flags); | 1130 | spin_lock_irqsave(&devdata_mutex, flags); |
| 1128 | old_devdata = rcu_dereference_check(devdata, | 1131 | old_devdata = rcu_dereference_check(devdata, |
| 1129 | lockdep_is_held(&devdata_mutex)); | 1132 | lockdep_is_held(&devdata_mutex)); |
| @@ -1136,5 +1139,5 @@ static void __exit nx842_exit(void) | |||
| 1136 | vio_unregister_driver(&nx842_vio_driver); | 1139 | vio_unregister_driver(&nx842_vio_driver); |
| 1137 | } | 1140 | } |
| 1138 | 1141 | ||
| 1139 | module_exit(nx842_exit); | 1142 | module_exit(nx842_pseries_exit); |
| 1140 | 1143 | ||
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 6e5e0d60d0c8..046c1c45411b 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c | |||
| @@ -1,10 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Driver frontend for IBM Power 842 compression accelerator | 2 | * Cryptographic API for the NX-842 hardware compression. |
| 3 | * | ||
| 4 | * Copyright (C) 2015 Dan Streetman, IBM Corp | ||
| 5 | * | ||
| 6 | * Designer of the Power data compression engine: | ||
| 7 | * Bulent Abali <abali@us.ibm.com> | ||
| 8 | * | 3 | * |
| 9 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
| @@ -15,89 +10,522 @@ | |||
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 17 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
| 13 | * | ||
| 14 | * Copyright (C) IBM Corporation, 2011-2015 | ||
| 15 | * | ||
| 16 | * Designer of the Power data compression engine: | ||
| 17 | * Bulent Abali <abali@us.ibm.com> | ||
| 18 | * | ||
| 19 | * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com> | ||
| 20 | * Seth Jennings <sjenning@linux.vnet.ibm.com> | ||
| 21 | * | ||
| 22 | * Rewrite: Dan Streetman <ddstreet@ieee.org> | ||
| 23 | * | ||
| 24 | * This is an interface to the NX-842 compression hardware in PowerPC | ||
| 25 | * processors. Most of the complexity of this drvier is due to the fact that | ||
| 26 | * the NX-842 compression hardware requires the input and output data buffers | ||
| 27 | * to be specifically aligned, to be a specific multiple in length, and within | ||
| 28 | * specific minimum and maximum lengths. Those restrictions, provided by the | ||
| 29 | * nx-842 driver via nx842_constraints, mean this driver must use bounce | ||
| 30 | * buffers and headers to correct misaligned in or out buffers, and to split | ||
| 31 | * input buffers that are too large. | ||
| 32 | * | ||
| 33 | * This driver will fall back to software decompression if the hardware | ||
| 34 | * decompression fails, so this driver's decompression should never fail as | ||
| 35 | * long as the provided compressed buffer is valid. Any compressed buffer | ||
| 36 | * created by this driver will have a header (except ones where the input | ||
| 37 | * perfectly matches the constraints); so users of this driver cannot simply | ||
| 38 | * pass a compressed buffer created by this driver over to the 842 software | ||
| 39 | * decompression library. Instead, users must use this driver to decompress; | ||
| 40 | * if the hardware fails or is unavailable, the compressed buffer will be | ||
| 41 | * parsed and the header removed, and the raw 842 buffer(s) passed to the 842 | ||
| 42 | * software decompression library. | ||
| 43 | * | ||
| 44 | * This does not fall back to software compression, however, since the caller | ||
| 45 | * of this function is specifically requesting hardware compression; if the | ||
| 46 | * hardware compression fails, the caller can fall back to software | ||
| 47 | * compression, and the raw 842 compressed buffer that the software compressor | ||
| 48 | * creates can be passed to this driver for hardware decompression; any | ||
| 49 | * buffer without our specific header magic is assumed to be a raw 842 buffer | ||
| 50 | * and passed directly to the hardware. Note that the software compression | ||
| 51 | * library will produce a compressed buffer that is incompatible with the | ||
| 52 | * hardware decompressor if the original input buffer length is not a multiple | ||
| 53 | * of 8; if such a compressed buffer is passed to this driver for | ||
| 54 | * decompression, the hardware will reject it and this driver will then pass | ||
| 55 | * it over to the software library for decompression. | ||
| 18 | */ | 56 | */ |
| 19 | 57 | ||
| 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 58 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 21 | 59 | ||
| 22 | #include "nx-842.h" | 60 | #include <linux/vmalloc.h> |
| 61 | #include <linux/sw842.h> | ||
| 62 | #include <linux/spinlock.h> | ||
| 23 | 63 | ||
| 24 | MODULE_LICENSE("GPL"); | 64 | #include "nx-842.h" |
| 25 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | ||
| 26 | MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); | ||
| 27 | 65 | ||
| 28 | /** | 66 | /* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit |
| 29 | * nx842_constraints | 67 | * template (see lib/842/842.h), so this magic number will never appear at |
| 30 | * | 68 | * the start of a raw 842 compressed buffer. That is important, as any buffer |
| 31 | * This provides the driver's constraints. Different nx842 implementations | 69 | * passed to us without this magic is assumed to be a raw 842 compressed |
| 32 | * may have varying requirements. The constraints are: | 70 | * buffer, and passed directly to the hardware to decompress. |
| 33 | * @alignment: All buffers should be aligned to this | ||
| 34 | * @multiple: All buffer lengths should be a multiple of this | ||
| 35 | * @minimum: Buffer lengths must not be less than this amount | ||
| 36 | * @maximum: Buffer lengths must not be more than this amount | ||
| 37 | * | ||
| 38 | * The constraints apply to all buffers and lengths, both input and output, | ||
| 39 | * for both compression and decompression, except for the minimum which | ||
| 40 | * only applies to compression input and decompression output; the | ||
| 41 | * compressed data can be less than the minimum constraint. It can be | ||
| 42 | * assumed that compressed data will always adhere to the multiple | ||
| 43 | * constraint. | ||
| 44 | * | ||
| 45 | * The driver may succeed even if these constraints are violated; | ||
| 46 | * however the driver can return failure or suffer reduced performance | ||
| 47 | * if any constraint is not met. | ||
| 48 | */ | 71 | */ |
| 49 | int nx842_constraints(struct nx842_constraints *c) | 72 | #define NX842_CRYPTO_MAGIC (0xf842) |
| 73 | #define NX842_CRYPTO_HEADER_SIZE(g) \ | ||
| 74 | (sizeof(struct nx842_crypto_header) + \ | ||
| 75 | sizeof(struct nx842_crypto_header_group) * (g)) | ||
| 76 | #define NX842_CRYPTO_HEADER_MAX_SIZE \ | ||
| 77 | NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX) | ||
| 78 | |||
| 79 | /* bounce buffer size */ | ||
| 80 | #define BOUNCE_BUFFER_ORDER (2) | ||
| 81 | #define BOUNCE_BUFFER_SIZE \ | ||
| 82 | ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER)) | ||
| 83 | |||
| 84 | /* try longer on comp because we can fallback to sw decomp if hw is busy */ | ||
| 85 | #define COMP_BUSY_TIMEOUT (250) /* ms */ | ||
| 86 | #define DECOMP_BUSY_TIMEOUT (50) /* ms */ | ||
| 87 | |||
| 88 | struct nx842_crypto_param { | ||
| 89 | u8 *in; | ||
| 90 | unsigned int iremain; | ||
| 91 | u8 *out; | ||
| 92 | unsigned int oremain; | ||
| 93 | unsigned int ototal; | ||
| 94 | }; | ||
| 95 | |||
| 96 | static int update_param(struct nx842_crypto_param *p, | ||
| 97 | unsigned int slen, unsigned int dlen) | ||
| 50 | { | 98 | { |
| 51 | memcpy(c, nx842_platform_driver()->constraints, sizeof(*c)); | 99 | if (p->iremain < slen) |
| 100 | return -EOVERFLOW; | ||
| 101 | if (p->oremain < dlen) | ||
| 102 | return -ENOSPC; | ||
| 103 | |||
| 104 | p->in += slen; | ||
| 105 | p->iremain -= slen; | ||
| 106 | p->out += dlen; | ||
| 107 | p->oremain -= dlen; | ||
| 108 | p->ototal += dlen; | ||
| 109 | |||
| 52 | return 0; | 110 | return 0; |
| 53 | } | 111 | } |
| 54 | EXPORT_SYMBOL_GPL(nx842_constraints); | ||
| 55 | 112 | ||
| 56 | /** | 113 | int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver) |
| 57 | * nx842_workmem_size | ||
| 58 | * | ||
| 59 | * Get the amount of working memory the driver requires. | ||
| 60 | */ | ||
| 61 | size_t nx842_workmem_size(void) | ||
| 62 | { | 114 | { |
| 63 | return nx842_platform_driver()->workmem_size; | 115 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); |
| 116 | |||
| 117 | spin_lock_init(&ctx->lock); | ||
| 118 | ctx->driver = driver; | ||
| 119 | ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL); | ||
| 120 | ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); | ||
| 121 | ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); | ||
| 122 | if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { | ||
| 123 | kfree(ctx->wmem); | ||
| 124 | free_page((unsigned long)ctx->sbounce); | ||
| 125 | free_page((unsigned long)ctx->dbounce); | ||
| 126 | return -ENOMEM; | ||
| 127 | } | ||
| 128 | |||
| 129 | return 0; | ||
| 64 | } | 130 | } |
| 65 | EXPORT_SYMBOL_GPL(nx842_workmem_size); | 131 | EXPORT_SYMBOL_GPL(nx842_crypto_init); |
| 66 | 132 | ||
| 67 | int nx842_compress(const unsigned char *in, unsigned int ilen, | 133 | void nx842_crypto_exit(struct crypto_tfm *tfm) |
| 68 | unsigned char *out, unsigned int *olen, void *wmem) | ||
| 69 | { | 134 | { |
| 70 | return nx842_platform_driver()->compress(in, ilen, out, olen, wmem); | 135 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); |
| 136 | |||
| 137 | kfree(ctx->wmem); | ||
| 138 | free_page((unsigned long)ctx->sbounce); | ||
| 139 | free_page((unsigned long)ctx->dbounce); | ||
| 71 | } | 140 | } |
| 72 | EXPORT_SYMBOL_GPL(nx842_compress); | 141 | EXPORT_SYMBOL_GPL(nx842_crypto_exit); |
| 73 | 142 | ||
| 74 | int nx842_decompress(const unsigned char *in, unsigned int ilen, | 143 | static void check_constraints(struct nx842_constraints *c) |
| 75 | unsigned char *out, unsigned int *olen, void *wmem) | ||
| 76 | { | 144 | { |
| 77 | return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem); | 145 | /* limit maximum, to always have enough bounce buffer to decompress */ |
| 146 | if (c->maximum > BOUNCE_BUFFER_SIZE) | ||
| 147 | c->maximum = BOUNCE_BUFFER_SIZE; | ||
| 78 | } | 148 | } |
| 79 | EXPORT_SYMBOL_GPL(nx842_decompress); | ||
| 80 | 149 | ||
| 81 | static __init int nx842_init(void) | 150 | static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf) |
| 82 | { | 151 | { |
| 83 | request_module("nx-compress-powernv"); | 152 | int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups); |
| 84 | request_module("nx-compress-pseries"); | ||
| 85 | 153 | ||
| 86 | /* we prevent loading if there's no platform driver, and we get the | 154 | /* compress should have added space for header */ |
| 87 | * module that set it so it won't unload, so we don't need to check | 155 | if (s > be16_to_cpu(hdr->group[0].padding)) { |
| 88 | * if it's set in any of the above functions | 156 | pr_err("Internal error: no space for header\n"); |
| 89 | */ | 157 | return -EINVAL; |
| 90 | if (!nx842_platform_driver_get()) { | ||
| 91 | pr_err("no nx842 driver found.\n"); | ||
| 92 | return -ENODEV; | ||
| 93 | } | 158 | } |
| 94 | 159 | ||
| 160 | memcpy(buf, hdr, s); | ||
| 161 | |||
| 162 | print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0); | ||
| 163 | |||
| 95 | return 0; | 164 | return 0; |
| 96 | } | 165 | } |
| 97 | module_init(nx842_init); | ||
| 98 | 166 | ||
| 99 | static void __exit nx842_exit(void) | 167 | static int compress(struct nx842_crypto_ctx *ctx, |
| 168 | struct nx842_crypto_param *p, | ||
| 169 | struct nx842_crypto_header_group *g, | ||
| 170 | struct nx842_constraints *c, | ||
| 171 | u16 *ignore, | ||
| 172 | unsigned int hdrsize) | ||
| 173 | { | ||
| 174 | unsigned int slen = p->iremain, dlen = p->oremain, tmplen; | ||
| 175 | unsigned int adj_slen = slen; | ||
| 176 | u8 *src = p->in, *dst = p->out; | ||
| 177 | int ret, dskip = 0; | ||
| 178 | ktime_t timeout; | ||
| 179 | |||
| 180 | if (p->iremain == 0) | ||
| 181 | return -EOVERFLOW; | ||
| 182 | |||
| 183 | if (p->oremain == 0 || hdrsize + c->minimum > dlen) | ||
| 184 | return -ENOSPC; | ||
| 185 | |||
| 186 | if (slen % c->multiple) | ||
| 187 | adj_slen = round_up(slen, c->multiple); | ||
| 188 | if (slen < c->minimum) | ||
| 189 | adj_slen = c->minimum; | ||
| 190 | if (slen > c->maximum) | ||
| 191 | adj_slen = slen = c->maximum; | ||
| 192 | if (adj_slen > slen || (u64)src % c->alignment) { | ||
| 193 | adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE); | ||
| 194 | slen = min(slen, BOUNCE_BUFFER_SIZE); | ||
| 195 | if (adj_slen > slen) | ||
| 196 | memset(ctx->sbounce + slen, 0, adj_slen - slen); | ||
| 197 | memcpy(ctx->sbounce, src, slen); | ||
| 198 | src = ctx->sbounce; | ||
| 199 | slen = adj_slen; | ||
| 200 | pr_debug("using comp sbounce buffer, len %x\n", slen); | ||
| 201 | } | ||
| 202 | |||
| 203 | dst += hdrsize; | ||
| 204 | dlen -= hdrsize; | ||
| 205 | |||
| 206 | if ((u64)dst % c->alignment) { | ||
| 207 | dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst); | ||
| 208 | dst += dskip; | ||
| 209 | dlen -= dskip; | ||
| 210 | } | ||
| 211 | if (dlen % c->multiple) | ||
| 212 | dlen = round_down(dlen, c->multiple); | ||
| 213 | if (dlen < c->minimum) { | ||
| 214 | nospc: | ||
| 215 | dst = ctx->dbounce; | ||
| 216 | dlen = min(p->oremain, BOUNCE_BUFFER_SIZE); | ||
| 217 | dlen = round_down(dlen, c->multiple); | ||
| 218 | dskip = 0; | ||
| 219 | pr_debug("using comp dbounce buffer, len %x\n", dlen); | ||
| 220 | } | ||
| 221 | if (dlen > c->maximum) | ||
| 222 | dlen = c->maximum; | ||
| 223 | |||
| 224 | tmplen = dlen; | ||
| 225 | timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT); | ||
| 226 | do { | ||
| 227 | dlen = tmplen; /* reset dlen, if we're retrying */ | ||
| 228 | ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem); | ||
| 229 | /* possibly we should reduce the slen here, instead of | ||
| 230 | * retrying with the dbounce buffer? | ||
| 231 | */ | ||
| 232 | if (ret == -ENOSPC && dst != ctx->dbounce) | ||
| 233 | goto nospc; | ||
| 234 | } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); | ||
| 235 | if (ret) | ||
| 236 | return ret; | ||
| 237 | |||
| 238 | dskip += hdrsize; | ||
| 239 | |||
| 240 | if (dst == ctx->dbounce) | ||
| 241 | memcpy(p->out + dskip, dst, dlen); | ||
| 242 | |||
| 243 | g->padding = cpu_to_be16(dskip); | ||
| 244 | g->compressed_length = cpu_to_be32(dlen); | ||
| 245 | g->uncompressed_length = cpu_to_be32(slen); | ||
| 246 | |||
| 247 | if (p->iremain < slen) { | ||
| 248 | *ignore = slen - p->iremain; | ||
| 249 | slen = p->iremain; | ||
| 250 | } | ||
| 251 | |||
| 252 | pr_debug("compress slen %x ignore %x dlen %x padding %x\n", | ||
| 253 | slen, *ignore, dlen, dskip); | ||
| 254 | |||
| 255 | return update_param(p, slen, dskip + dlen); | ||
| 256 | } | ||
| 257 | |||
| 258 | int nx842_crypto_compress(struct crypto_tfm *tfm, | ||
| 259 | const u8 *src, unsigned int slen, | ||
| 260 | u8 *dst, unsigned int *dlen) | ||
| 261 | { | ||
| 262 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 263 | struct nx842_crypto_header *hdr = &ctx->header; | ||
| 264 | struct nx842_crypto_param p; | ||
| 265 | struct nx842_constraints c = *ctx->driver->constraints; | ||
| 266 | unsigned int groups, hdrsize, h; | ||
| 267 | int ret, n; | ||
| 268 | bool add_header; | ||
| 269 | u16 ignore = 0; | ||
| 270 | |||
| 271 | check_constraints(&c); | ||
| 272 | |||
| 273 | p.in = (u8 *)src; | ||
| 274 | p.iremain = slen; | ||
| 275 | p.out = dst; | ||
| 276 | p.oremain = *dlen; | ||
| 277 | p.ototal = 0; | ||
| 278 | |||
| 279 | *dlen = 0; | ||
| 280 | |||
| 281 | groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX, | ||
| 282 | DIV_ROUND_UP(p.iremain, c.maximum)); | ||
| 283 | hdrsize = NX842_CRYPTO_HEADER_SIZE(groups); | ||
| 284 | |||
| 285 | spin_lock_bh(&ctx->lock); | ||
| 286 | |||
| 287 | /* skip adding header if the buffers meet all constraints */ | ||
| 288 | add_header = (p.iremain % c.multiple || | ||
| 289 | p.iremain < c.minimum || | ||
| 290 | p.iremain > c.maximum || | ||
| 291 | (u64)p.in % c.alignment || | ||
| 292 | p.oremain % c.multiple || | ||
| 293 | p.oremain < c.minimum || | ||
| 294 | p.oremain > c.maximum || | ||
| 295 | (u64)p.out % c.alignment); | ||
| 296 | |||
| 297 | hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC); | ||
| 298 | hdr->groups = 0; | ||
| 299 | hdr->ignore = 0; | ||
| 300 | |||
| 301 | while (p.iremain > 0) { | ||
| 302 | n = hdr->groups++; | ||
| 303 | ret = -ENOSPC; | ||
| 304 | if (hdr->groups > NX842_CRYPTO_GROUP_MAX) | ||
| 305 | goto unlock; | ||
| 306 | |||
| 307 | /* header goes before first group */ | ||
| 308 | h = !n && add_header ? hdrsize : 0; | ||
| 309 | |||
| 310 | if (ignore) | ||
| 311 | pr_warn("interal error, ignore is set %x\n", ignore); | ||
| 312 | |||
| 313 | ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); | ||
| 314 | if (ret) | ||
| 315 | goto unlock; | ||
| 316 | } | ||
| 317 | |||
| 318 | if (!add_header && hdr->groups > 1) { | ||
| 319 | pr_err("Internal error: No header but multiple groups\n"); | ||
| 320 | ret = -EINVAL; | ||
| 321 | goto unlock; | ||
| 322 | } | ||
| 323 | |||
| 324 | /* ignore indicates the input stream needed to be padded */ | ||
| 325 | hdr->ignore = cpu_to_be16(ignore); | ||
| 326 | if (ignore) | ||
| 327 | pr_debug("marked %d bytes as ignore\n", ignore); | ||
| 328 | |||
| 329 | if (add_header) | ||
| 330 | ret = nx842_crypto_add_header(hdr, dst); | ||
| 331 | if (ret) | ||
| 332 | goto unlock; | ||
| 333 | |||
| 334 | *dlen = p.ototal; | ||
| 335 | |||
| 336 | pr_debug("compress total slen %x dlen %x\n", slen, *dlen); | ||
| 337 | |||
| 338 | unlock: | ||
| 339 | spin_unlock_bh(&ctx->lock); | ||
| 340 | return ret; | ||
| 341 | } | ||
| 342 | EXPORT_SYMBOL_GPL(nx842_crypto_compress); | ||
| 343 | |||
| 344 | static int decompress(struct nx842_crypto_ctx *ctx, | ||
| 345 | struct nx842_crypto_param *p, | ||
| 346 | struct nx842_crypto_header_group *g, | ||
| 347 | struct nx842_constraints *c, | ||
| 348 | u16 ignore) | ||
| 100 | { | 349 | { |
| 101 | nx842_platform_driver_put(); | 350 | unsigned int slen = be32_to_cpu(g->compressed_length); |
| 351 | unsigned int required_len = be32_to_cpu(g->uncompressed_length); | ||
| 352 | unsigned int dlen = p->oremain, tmplen; | ||
| 353 | unsigned int adj_slen = slen; | ||
| 354 | u8 *src = p->in, *dst = p->out; | ||
| 355 | u16 padding = be16_to_cpu(g->padding); | ||
| 356 | int ret, spadding = 0, dpadding = 0; | ||
| 357 | ktime_t timeout; | ||
| 358 | |||
| 359 | if (!slen || !required_len) | ||
| 360 | return -EINVAL; | ||
| 361 | |||
| 362 | if (p->iremain <= 0 || padding + slen > p->iremain) | ||
| 363 | return -EOVERFLOW; | ||
| 364 | |||
| 365 | if (p->oremain <= 0 || required_len - ignore > p->oremain) | ||
| 366 | return -ENOSPC; | ||
| 367 | |||
| 368 | src += padding; | ||
| 369 | |||
| 370 | if (slen % c->multiple) | ||
| 371 | adj_slen = round_up(slen, c->multiple); | ||
| 372 | if (slen < c->minimum) | ||
| 373 | adj_slen = c->minimum; | ||
| 374 | if (slen > c->maximum) | ||
| 375 | goto usesw; | ||
| 376 | if (slen < adj_slen || (u64)src % c->alignment) { | ||
| 377 | /* we can append padding bytes because the 842 format defines | ||
| 378 | * an "end" template (see lib/842/842_decompress.c) and will | ||
| 379 | * ignore any bytes following it. | ||
| 380 | */ | ||
| 381 | if (slen < adj_slen) | ||
| 382 | memset(ctx->sbounce + slen, 0, adj_slen - slen); | ||
| 383 | memcpy(ctx->sbounce, src, slen); | ||
| 384 | src = ctx->sbounce; | ||
| 385 | spadding = adj_slen - slen; | ||
| 386 | slen = adj_slen; | ||
| 387 | pr_debug("using decomp sbounce buffer, len %x\n", slen); | ||
| 388 | } | ||
| 389 | |||
| 390 | if (dlen % c->multiple) | ||
| 391 | dlen = round_down(dlen, c->multiple); | ||
| 392 | if (dlen < required_len || (u64)dst % c->alignment) { | ||
| 393 | dst = ctx->dbounce; | ||
| 394 | dlen = min(required_len, BOUNCE_BUFFER_SIZE); | ||
| 395 | pr_debug("using decomp dbounce buffer, len %x\n", dlen); | ||
| 396 | } | ||
| 397 | if (dlen < c->minimum) | ||
| 398 | goto usesw; | ||
| 399 | if (dlen > c->maximum) | ||
| 400 | dlen = c->maximum; | ||
| 401 | |||
| 402 | tmplen = dlen; | ||
| 403 | timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT); | ||
| 404 | do { | ||
| 405 | dlen = tmplen; /* reset dlen, if we're retrying */ | ||
| 406 | ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem); | ||
| 407 | } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); | ||
| 408 | if (ret) { | ||
| 409 | usesw: | ||
| 410 | /* reset everything, sw doesn't have constraints */ | ||
| 411 | src = p->in + padding; | ||
| 412 | slen = be32_to_cpu(g->compressed_length); | ||
| 413 | spadding = 0; | ||
| 414 | dst = p->out; | ||
| 415 | dlen = p->oremain; | ||
| 416 | dpadding = 0; | ||
| 417 | if (dlen < required_len) { /* have ignore bytes */ | ||
| 418 | dst = ctx->dbounce; | ||
| 419 | dlen = BOUNCE_BUFFER_SIZE; | ||
| 420 | } | ||
| 421 | pr_info_ratelimited("using software 842 decompression\n"); | ||
| 422 | ret = sw842_decompress(src, slen, dst, &dlen); | ||
| 423 | } | ||
| 424 | if (ret) | ||
| 425 | return ret; | ||
| 426 | |||
| 427 | slen -= spadding; | ||
| 428 | |||
| 429 | dlen -= ignore; | ||
| 430 | if (ignore) | ||
| 431 | pr_debug("ignoring last %x bytes\n", ignore); | ||
| 432 | |||
| 433 | if (dst == ctx->dbounce) | ||
| 434 | memcpy(p->out, dst, dlen); | ||
| 435 | |||
| 436 | pr_debug("decompress slen %x padding %x dlen %x ignore %x\n", | ||
| 437 | slen, padding, dlen, ignore); | ||
| 438 | |||
| 439 | return update_param(p, slen + padding, dlen); | ||
| 102 | } | 440 | } |
| 103 | module_exit(nx842_exit); | 441 | |
| 442 | int nx842_crypto_decompress(struct crypto_tfm *tfm, | ||
| 443 | const u8 *src, unsigned int slen, | ||
| 444 | u8 *dst, unsigned int *dlen) | ||
| 445 | { | ||
| 446 | struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 447 | struct nx842_crypto_header *hdr; | ||
| 448 | struct nx842_crypto_param p; | ||
| 449 | struct nx842_constraints c = *ctx->driver->constraints; | ||
| 450 | int n, ret, hdr_len; | ||
| 451 | u16 ignore = 0; | ||
| 452 | |||
| 453 | check_constraints(&c); | ||
| 454 | |||
| 455 | p.in = (u8 *)src; | ||
| 456 | p.iremain = slen; | ||
| 457 | p.out = dst; | ||
| 458 | p.oremain = *dlen; | ||
| 459 | p.ototal = 0; | ||
| 460 | |||
| 461 | *dlen = 0; | ||
| 462 | |||
| 463 | hdr = (struct nx842_crypto_header *)src; | ||
| 464 | |||
| 465 | spin_lock_bh(&ctx->lock); | ||
| 466 | |||
| 467 | /* If it doesn't start with our header magic number, assume it's a raw | ||
| 468 | * 842 compressed buffer and pass it directly to the hardware driver | ||
| 469 | */ | ||
| 470 | if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) { | ||
| 471 | struct nx842_crypto_header_group g = { | ||
| 472 | .padding = 0, | ||
| 473 | .compressed_length = cpu_to_be32(p.iremain), | ||
| 474 | .uncompressed_length = cpu_to_be32(p.oremain), | ||
| 475 | }; | ||
| 476 | |||
| 477 | ret = decompress(ctx, &p, &g, &c, 0); | ||
| 478 | if (ret) | ||
| 479 | goto unlock; | ||
| 480 | |||
| 481 | goto success; | ||
| 482 | } | ||
| 483 | |||
| 484 | if (!hdr->groups) { | ||
| 485 | pr_err("header has no groups\n"); | ||
| 486 | ret = -EINVAL; | ||
| 487 | goto unlock; | ||
| 488 | } | ||
| 489 | if (hdr->groups > NX842_CRYPTO_GROUP_MAX) { | ||
| 490 | pr_err("header has too many groups %x, max %x\n", | ||
| 491 | hdr->groups, NX842_CRYPTO_GROUP_MAX); | ||
| 492 | ret = -EINVAL; | ||
| 493 | goto unlock; | ||
| 494 | } | ||
| 495 | |||
| 496 | hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups); | ||
| 497 | if (hdr_len > slen) { | ||
| 498 | ret = -EOVERFLOW; | ||
| 499 | goto unlock; | ||
| 500 | } | ||
| 501 | |||
| 502 | memcpy(&ctx->header, src, hdr_len); | ||
| 503 | hdr = &ctx->header; | ||
| 504 | |||
| 505 | for (n = 0; n < hdr->groups; n++) { | ||
| 506 | /* ignore applies to last group */ | ||
| 507 | if (n + 1 == hdr->groups) | ||
| 508 | ignore = be16_to_cpu(hdr->ignore); | ||
| 509 | |||
| 510 | ret = decompress(ctx, &p, &hdr->group[n], &c, ignore); | ||
| 511 | if (ret) | ||
| 512 | goto unlock; | ||
| 513 | } | ||
| 514 | |||
| 515 | success: | ||
| 516 | *dlen = p.ototal; | ||
| 517 | |||
| 518 | pr_debug("decompress total slen %x dlen %x\n", slen, *dlen); | ||
| 519 | |||
| 520 | ret = 0; | ||
| 521 | |||
| 522 | unlock: | ||
| 523 | spin_unlock_bh(&ctx->lock); | ||
| 524 | |||
| 525 | return ret; | ||
| 526 | } | ||
| 527 | EXPORT_SYMBOL_GPL(nx842_crypto_decompress); | ||
| 528 | |||
| 529 | MODULE_LICENSE("GPL"); | ||
| 530 | MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver"); | ||
| 531 | MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); | ||
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index ac0ea79d0f8b..a4eee3bba937 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h | |||
| @@ -3,8 +3,9 @@ | |||
| 3 | #define __NX_842_H__ | 3 | #define __NX_842_H__ |
| 4 | 4 | ||
| 5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
| 6 | #include <linux/init.h> | ||
| 6 | #include <linux/module.h> | 7 | #include <linux/module.h> |
| 7 | #include <linux/sw842.h> | 8 | #include <linux/crypto.h> |
| 8 | #include <linux/of.h> | 9 | #include <linux/of.h> |
| 9 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
| 10 | #include <linux/io.h> | 11 | #include <linux/io.h> |
| @@ -104,6 +105,25 @@ static inline unsigned long nx842_get_pa(void *addr) | |||
| 104 | #define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m)) | 105 | #define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m)) |
| 105 | #define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m))) | 106 | #define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m))) |
| 106 | 107 | ||
| 108 | /** | ||
| 109 | * This provides the driver's constraints. Different nx842 implementations | ||
| 110 | * may have varying requirements. The constraints are: | ||
| 111 | * @alignment: All buffers should be aligned to this | ||
| 112 | * @multiple: All buffer lengths should be a multiple of this | ||
| 113 | * @minimum: Buffer lengths must not be less than this amount | ||
| 114 | * @maximum: Buffer lengths must not be more than this amount | ||
| 115 | * | ||
| 116 | * The constraints apply to all buffers and lengths, both input and output, | ||
| 117 | * for both compression and decompression, except for the minimum which | ||
| 118 | * only applies to compression input and decompression output; the | ||
| 119 | * compressed data can be less than the minimum constraint. It can be | ||
| 120 | * assumed that compressed data will always adhere to the multiple | ||
| 121 | * constraint. | ||
| 122 | * | ||
| 123 | * The driver may succeed even if these constraints are violated; | ||
| 124 | * however the driver can return failure or suffer reduced performance | ||
| 125 | * if any constraint is not met. | ||
| 126 | */ | ||
| 107 | struct nx842_constraints { | 127 | struct nx842_constraints { |
| 108 | int alignment; | 128 | int alignment; |
| 109 | int multiple; | 129 | int multiple; |
| @@ -126,19 +146,40 @@ struct nx842_driver { | |||
| 126 | void *wrkmem); | 146 | void *wrkmem); |
| 127 | }; | 147 | }; |
| 128 | 148 | ||
| 129 | struct nx842_driver *nx842_platform_driver(void); | 149 | struct nx842_crypto_header_group { |
| 130 | bool nx842_platform_driver_set(struct nx842_driver *driver); | 150 | __be16 padding; /* unused bytes at start of group */ |
| 131 | void nx842_platform_driver_unset(struct nx842_driver *driver); | 151 | __be32 compressed_length; /* compressed bytes in group */ |
| 132 | bool nx842_platform_driver_get(void); | 152 | __be32 uncompressed_length; /* bytes after decompression */ |
| 133 | void nx842_platform_driver_put(void); | 153 | } __packed; |
| 154 | |||
| 155 | struct nx842_crypto_header { | ||
| 156 | __be16 magic; /* NX842_CRYPTO_MAGIC */ | ||
| 157 | __be16 ignore; /* decompressed end bytes to ignore */ | ||
| 158 | u8 groups; /* total groups in this header */ | ||
| 159 | struct nx842_crypto_header_group group[]; | ||
| 160 | } __packed; | ||
| 134 | 161 | ||
| 135 | size_t nx842_workmem_size(void); | 162 | #define NX842_CRYPTO_GROUP_MAX (0x20) |
| 136 | 163 | ||
| 137 | int nx842_constraints(struct nx842_constraints *constraints); | 164 | struct nx842_crypto_ctx { |
| 165 | spinlock_t lock; | ||
| 166 | |||
| 167 | u8 *wmem; | ||
| 168 | u8 *sbounce, *dbounce; | ||
| 169 | |||
| 170 | struct nx842_crypto_header header; | ||
| 171 | struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX]; | ||
| 172 | |||
| 173 | struct nx842_driver *driver; | ||
| 174 | }; | ||
| 138 | 175 | ||
| 139 | int nx842_compress(const unsigned char *in, unsigned int in_len, | 176 | int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver); |
| 140 | unsigned char *out, unsigned int *out_len, void *wrkmem); | 177 | void nx842_crypto_exit(struct crypto_tfm *tfm); |
| 141 | int nx842_decompress(const unsigned char *in, unsigned int in_len, | 178 | int nx842_crypto_compress(struct crypto_tfm *tfm, |
| 142 | unsigned char *out, unsigned int *out_len, void *wrkmem); | 179 | const u8 *src, unsigned int slen, |
| 180 | u8 *dst, unsigned int *dlen); | ||
| 181 | int nx842_crypto_decompress(struct crypto_tfm *tfm, | ||
| 182 | const u8 *src, unsigned int slen, | ||
| 183 | u8 *dst, unsigned int *dlen); | ||
| 143 | 184 | ||
| 144 | #endif /* __NX_842_H__ */ | 185 | #endif /* __NX_842_H__ */ |
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index e4311ce0cd78..73ef49922788 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c | |||
| @@ -94,8 +94,6 @@ static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm, | |||
| 94 | return -EINVAL; | 94 | return -EINVAL; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | crypto_aead_crt(tfm)->authsize = authsize; | ||
| 98 | |||
| 99 | return 0; | 97 | return 0; |
| 100 | } | 98 | } |
| 101 | 99 | ||
| @@ -111,8 +109,6 @@ static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm, | |||
| 111 | return -EINVAL; | 109 | return -EINVAL; |
| 112 | } | 110 | } |
| 113 | 111 | ||
| 114 | crypto_aead_crt(tfm)->authsize = authsize; | ||
| 115 | |||
| 116 | return 0; | 112 | return 0; |
| 117 | } | 113 | } |
| 118 | 114 | ||
| @@ -174,6 +170,7 @@ static int generate_pat(u8 *iv, | |||
| 174 | struct nx_crypto_ctx *nx_ctx, | 170 | struct nx_crypto_ctx *nx_ctx, |
| 175 | unsigned int authsize, | 171 | unsigned int authsize, |
| 176 | unsigned int nbytes, | 172 | unsigned int nbytes, |
| 173 | unsigned int assoclen, | ||
| 177 | u8 *out) | 174 | u8 *out) |
| 178 | { | 175 | { |
| 179 | struct nx_sg *nx_insg = nx_ctx->in_sg; | 176 | struct nx_sg *nx_insg = nx_ctx->in_sg; |
| @@ -200,16 +197,16 @@ static int generate_pat(u8 *iv, | |||
| 200 | * greater than 2^32. | 197 | * greater than 2^32. |
| 201 | */ | 198 | */ |
| 202 | 199 | ||
| 203 | if (!req->assoclen) { | 200 | if (!assoclen) { |
| 204 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; | 201 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; |
| 205 | } else if (req->assoclen <= 14) { | 202 | } else if (assoclen <= 14) { |
| 206 | /* if associated data is 14 bytes or less, we do 1 GCM | 203 | /* if associated data is 14 bytes or less, we do 1 GCM |
| 207 | * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, | 204 | * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, |
| 208 | * which is fed in through the source buffers here */ | 205 | * which is fed in through the source buffers here */ |
| 209 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; | 206 | b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; |
| 210 | b1 = nx_ctx->priv.ccm.iauth_tag; | 207 | b1 = nx_ctx->priv.ccm.iauth_tag; |
| 211 | iauth_len = req->assoclen; | 208 | iauth_len = assoclen; |
| 212 | } else if (req->assoclen <= 65280) { | 209 | } else if (assoclen <= 65280) { |
| 213 | /* if associated data is less than (2^16 - 2^8), we construct | 210 | /* if associated data is less than (2^16 - 2^8), we construct |
| 214 | * B1 differently and feed in the associated data to a CCA | 211 | * B1 differently and feed in the associated data to a CCA |
| 215 | * operation */ | 212 | * operation */ |
| @@ -223,7 +220,7 @@ static int generate_pat(u8 *iv, | |||
| 223 | } | 220 | } |
| 224 | 221 | ||
| 225 | /* generate B0 */ | 222 | /* generate B0 */ |
| 226 | rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); | 223 | rc = generate_b0(iv, assoclen, authsize, nbytes, b0); |
| 227 | if (rc) | 224 | if (rc) |
| 228 | return rc; | 225 | return rc; |
| 229 | 226 | ||
| @@ -233,22 +230,22 @@ static int generate_pat(u8 *iv, | |||
| 233 | */ | 230 | */ |
| 234 | if (b1) { | 231 | if (b1) { |
| 235 | memset(b1, 0, 16); | 232 | memset(b1, 0, 16); |
| 236 | if (req->assoclen <= 65280) { | 233 | if (assoclen <= 65280) { |
| 237 | *(u16 *)b1 = (u16)req->assoclen; | 234 | *(u16 *)b1 = assoclen; |
| 238 | scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, | 235 | scatterwalk_map_and_copy(b1 + 2, req->src, 0, |
| 239 | iauth_len, SCATTERWALK_FROM_SG); | 236 | iauth_len, SCATTERWALK_FROM_SG); |
| 240 | } else { | 237 | } else { |
| 241 | *(u16 *)b1 = (u16)(0xfffe); | 238 | *(u16 *)b1 = (u16)(0xfffe); |
| 242 | *(u32 *)&b1[2] = (u32)req->assoclen; | 239 | *(u32 *)&b1[2] = assoclen; |
| 243 | scatterwalk_map_and_copy(b1 + 6, req->assoc, 0, | 240 | scatterwalk_map_and_copy(b1 + 6, req->src, 0, |
| 244 | iauth_len, SCATTERWALK_FROM_SG); | 241 | iauth_len, SCATTERWALK_FROM_SG); |
| 245 | } | 242 | } |
| 246 | } | 243 | } |
| 247 | 244 | ||
| 248 | /* now copy any remaining AAD to scatterlist and call nx... */ | 245 | /* now copy any remaining AAD to scatterlist and call nx... */ |
| 249 | if (!req->assoclen) { | 246 | if (!assoclen) { |
| 250 | return rc; | 247 | return rc; |
| 251 | } else if (req->assoclen <= 14) { | 248 | } else if (assoclen <= 14) { |
| 252 | unsigned int len = 16; | 249 | unsigned int len = 16; |
| 253 | 250 | ||
| 254 | nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen); | 251 | nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen); |
| @@ -280,7 +277,7 @@ static int generate_pat(u8 *iv, | |||
| 280 | return rc; | 277 | return rc; |
| 281 | 278 | ||
| 282 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 279 | atomic_inc(&(nx_ctx->stats->aes_ops)); |
| 283 | atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); | 280 | atomic64_add(assoclen, &nx_ctx->stats->aes_bytes); |
| 284 | 281 | ||
| 285 | } else { | 282 | } else { |
| 286 | unsigned int processed = 0, to_process; | 283 | unsigned int processed = 0, to_process; |
| @@ -294,15 +291,15 @@ static int generate_pat(u8 *iv, | |||
| 294 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); | 291 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); |
| 295 | 292 | ||
| 296 | do { | 293 | do { |
| 297 | to_process = min_t(u32, req->assoclen - processed, | 294 | to_process = min_t(u32, assoclen - processed, |
| 298 | nx_ctx->ap->databytelen); | 295 | nx_ctx->ap->databytelen); |
| 299 | 296 | ||
| 300 | nx_insg = nx_walk_and_build(nx_ctx->in_sg, | 297 | nx_insg = nx_walk_and_build(nx_ctx->in_sg, |
| 301 | nx_ctx->ap->sglen, | 298 | nx_ctx->ap->sglen, |
| 302 | req->assoc, processed, | 299 | req->src, processed, |
| 303 | &to_process); | 300 | &to_process); |
| 304 | 301 | ||
| 305 | if ((to_process + processed) < req->assoclen) { | 302 | if ((to_process + processed) < assoclen) { |
| 306 | NX_CPB_FDM(nx_ctx->csbcpb_aead) |= | 303 | NX_CPB_FDM(nx_ctx->csbcpb_aead) |= |
| 307 | NX_FDM_INTERMEDIATE; | 304 | NX_FDM_INTERMEDIATE; |
| 308 | } else { | 305 | } else { |
| @@ -328,11 +325,10 @@ static int generate_pat(u8 *iv, | |||
| 328 | NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; | 325 | NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; |
| 329 | 326 | ||
| 330 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 327 | atomic_inc(&(nx_ctx->stats->aes_ops)); |
| 331 | atomic64_add(req->assoclen, | 328 | atomic64_add(assoclen, &nx_ctx->stats->aes_bytes); |
| 332 | &(nx_ctx->stats->aes_bytes)); | ||
| 333 | 329 | ||
| 334 | processed += to_process; | 330 | processed += to_process; |
| 335 | } while (processed < req->assoclen); | 331 | } while (processed < assoclen); |
| 336 | 332 | ||
| 337 | result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; | 333 | result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; |
| 338 | } | 334 | } |
| @@ -343,7 +339,8 @@ static int generate_pat(u8 *iv, | |||
| 343 | } | 339 | } |
| 344 | 340 | ||
| 345 | static int ccm_nx_decrypt(struct aead_request *req, | 341 | static int ccm_nx_decrypt(struct aead_request *req, |
| 346 | struct blkcipher_desc *desc) | 342 | struct blkcipher_desc *desc, |
| 343 | unsigned int assoclen) | ||
| 347 | { | 344 | { |
| 348 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); | 345 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); |
| 349 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 346 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
| @@ -360,10 +357,10 @@ static int ccm_nx_decrypt(struct aead_request *req, | |||
| 360 | 357 | ||
| 361 | /* copy out the auth tag to compare with later */ | 358 | /* copy out the auth tag to compare with later */ |
| 362 | scatterwalk_map_and_copy(priv->oauth_tag, | 359 | scatterwalk_map_and_copy(priv->oauth_tag, |
| 363 | req->src, nbytes, authsize, | 360 | req->src, nbytes + req->assoclen, authsize, |
| 364 | SCATTERWALK_FROM_SG); | 361 | SCATTERWALK_FROM_SG); |
| 365 | 362 | ||
| 366 | rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, | 363 | rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen, |
| 367 | csbcpb->cpb.aes_ccm.in_pat_or_b0); | 364 | csbcpb->cpb.aes_ccm.in_pat_or_b0); |
| 368 | if (rc) | 365 | if (rc) |
| 369 | goto out; | 366 | goto out; |
| @@ -383,8 +380,8 @@ static int ccm_nx_decrypt(struct aead_request *req, | |||
| 383 | NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; | 380 | NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; |
| 384 | 381 | ||
| 385 | rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, | 382 | rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, |
| 386 | &to_process, processed, | 383 | &to_process, processed + req->assoclen, |
| 387 | csbcpb->cpb.aes_ccm.iv_or_ctr); | 384 | csbcpb->cpb.aes_ccm.iv_or_ctr); |
| 388 | if (rc) | 385 | if (rc) |
| 389 | goto out; | 386 | goto out; |
| 390 | 387 | ||
| @@ -420,7 +417,8 @@ out: | |||
| 420 | } | 417 | } |
| 421 | 418 | ||
| 422 | static int ccm_nx_encrypt(struct aead_request *req, | 419 | static int ccm_nx_encrypt(struct aead_request *req, |
| 423 | struct blkcipher_desc *desc) | 420 | struct blkcipher_desc *desc, |
| 421 | unsigned int assoclen) | ||
| 424 | { | 422 | { |
| 425 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); | 423 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); |
| 426 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 424 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
| @@ -432,7 +430,7 @@ static int ccm_nx_encrypt(struct aead_request *req, | |||
| 432 | 430 | ||
| 433 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); | 431 | spin_lock_irqsave(&nx_ctx->lock, irq_flags); |
| 434 | 432 | ||
| 435 | rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, | 433 | rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen, |
| 436 | csbcpb->cpb.aes_ccm.in_pat_or_b0); | 434 | csbcpb->cpb.aes_ccm.in_pat_or_b0); |
| 437 | if (rc) | 435 | if (rc) |
| 438 | goto out; | 436 | goto out; |
| @@ -451,7 +449,7 @@ static int ccm_nx_encrypt(struct aead_request *req, | |||
| 451 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; | 449 | NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; |
| 452 | 450 | ||
| 453 | rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, | 451 | rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, |
| 454 | &to_process, processed, | 452 | &to_process, processed + req->assoclen, |
| 455 | csbcpb->cpb.aes_ccm.iv_or_ctr); | 453 | csbcpb->cpb.aes_ccm.iv_or_ctr); |
| 456 | if (rc) | 454 | if (rc) |
| 457 | goto out; | 455 | goto out; |
| @@ -483,7 +481,7 @@ static int ccm_nx_encrypt(struct aead_request *req, | |||
| 483 | 481 | ||
| 484 | /* copy out the auth tag */ | 482 | /* copy out the auth tag */ |
| 485 | scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, | 483 | scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, |
| 486 | req->dst, nbytes, authsize, | 484 | req->dst, nbytes + req->assoclen, authsize, |
| 487 | SCATTERWALK_TO_SG); | 485 | SCATTERWALK_TO_SG); |
| 488 | 486 | ||
| 489 | out: | 487 | out: |
| @@ -503,9 +501,8 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req) | |||
| 503 | memcpy(iv + 4, req->iv, 8); | 501 | memcpy(iv + 4, req->iv, 8); |
| 504 | 502 | ||
| 505 | desc.info = iv; | 503 | desc.info = iv; |
| 506 | desc.tfm = (struct crypto_blkcipher *)req->base.tfm; | ||
| 507 | 504 | ||
| 508 | return ccm_nx_encrypt(req, &desc); | 505 | return ccm_nx_encrypt(req, &desc, req->assoclen - 8); |
| 509 | } | 506 | } |
| 510 | 507 | ||
| 511 | static int ccm_aes_nx_encrypt(struct aead_request *req) | 508 | static int ccm_aes_nx_encrypt(struct aead_request *req) |
| @@ -514,13 +511,12 @@ static int ccm_aes_nx_encrypt(struct aead_request *req) | |||
| 514 | int rc; | 511 | int rc; |
| 515 | 512 | ||
| 516 | desc.info = req->iv; | 513 | desc.info = req->iv; |
| 517 | desc.tfm = (struct crypto_blkcipher *)req->base.tfm; | ||
| 518 | 514 | ||
| 519 | rc = crypto_ccm_check_iv(desc.info); | 515 | rc = crypto_ccm_check_iv(desc.info); |
| 520 | if (rc) | 516 | if (rc) |
| 521 | return rc; | 517 | return rc; |
| 522 | 518 | ||
| 523 | return ccm_nx_encrypt(req, &desc); | 519 | return ccm_nx_encrypt(req, &desc, req->assoclen); |
| 524 | } | 520 | } |
| 525 | 521 | ||
| 526 | static int ccm4309_aes_nx_decrypt(struct aead_request *req) | 522 | static int ccm4309_aes_nx_decrypt(struct aead_request *req) |
| @@ -535,9 +531,8 @@ static int ccm4309_aes_nx_decrypt(struct aead_request *req) | |||
| 535 | memcpy(iv + 4, req->iv, 8); | 531 | memcpy(iv + 4, req->iv, 8); |
| 536 | 532 | ||
| 537 | desc.info = iv; | 533 | desc.info = iv; |
| 538 | desc.tfm = (struct crypto_blkcipher *)req->base.tfm; | ||
| 539 | 534 | ||
| 540 | return ccm_nx_decrypt(req, &desc); | 535 | return ccm_nx_decrypt(req, &desc, req->assoclen - 8); |
| 541 | } | 536 | } |
| 542 | 537 | ||
| 543 | static int ccm_aes_nx_decrypt(struct aead_request *req) | 538 | static int ccm_aes_nx_decrypt(struct aead_request *req) |
| @@ -546,13 +541,12 @@ static int ccm_aes_nx_decrypt(struct aead_request *req) | |||
| 546 | int rc; | 541 | int rc; |
| 547 | 542 | ||
| 548 | desc.info = req->iv; | 543 | desc.info = req->iv; |
| 549 | desc.tfm = (struct crypto_blkcipher *)req->base.tfm; | ||
| 550 | 544 | ||
| 551 | rc = crypto_ccm_check_iv(desc.info); | 545 | rc = crypto_ccm_check_iv(desc.info); |
| 552 | if (rc) | 546 | if (rc) |
| 553 | return rc; | 547 | return rc; |
| 554 | 548 | ||
| 555 | return ccm_nx_decrypt(req, &desc); | 549 | return ccm_nx_decrypt(req, &desc, req->assoclen); |
| 556 | } | 550 | } |
| 557 | 551 | ||
| 558 | /* tell the block cipher walk routines that this is a stream cipher by | 552 | /* tell the block cipher walk routines that this is a stream cipher by |
| @@ -560,47 +554,42 @@ static int ccm_aes_nx_decrypt(struct aead_request *req) | |||
| 560 | * during encrypt/decrypt doesn't solve this problem, because it calls | 554 | * during encrypt/decrypt doesn't solve this problem, because it calls |
| 561 | * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, | 555 | * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, |
| 562 | * but instead uses this tfm->blocksize. */ | 556 | * but instead uses this tfm->blocksize. */ |
| 563 | struct crypto_alg nx_ccm_aes_alg = { | 557 | struct aead_alg nx_ccm_aes_alg = { |
| 564 | .cra_name = "ccm(aes)", | 558 | .base = { |
| 565 | .cra_driver_name = "ccm-aes-nx", | 559 | .cra_name = "ccm(aes)", |
| 566 | .cra_priority = 300, | 560 | .cra_driver_name = "ccm-aes-nx", |
| 567 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | | 561 | .cra_priority = 300, |
| 568 | CRYPTO_ALG_NEED_FALLBACK, | 562 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
| 569 | .cra_blocksize = 1, | 563 | .cra_blocksize = 1, |
| 570 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 564 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
| 571 | .cra_type = &crypto_aead_type, | 565 | .cra_module = THIS_MODULE, |
| 572 | .cra_module = THIS_MODULE, | 566 | }, |
| 573 | .cra_init = nx_crypto_ctx_aes_ccm_init, | 567 | .init = nx_crypto_ctx_aes_ccm_init, |
| 574 | .cra_exit = nx_crypto_ctx_exit, | 568 | .exit = nx_crypto_ctx_aead_exit, |
| 575 | .cra_aead = { | 569 | .ivsize = AES_BLOCK_SIZE, |
| 576 | .ivsize = AES_BLOCK_SIZE, | 570 | .maxauthsize = AES_BLOCK_SIZE, |
| 577 | .maxauthsize = AES_BLOCK_SIZE, | 571 | .setkey = ccm_aes_nx_set_key, |
| 578 | .setkey = ccm_aes_nx_set_key, | 572 | .setauthsize = ccm_aes_nx_setauthsize, |
| 579 | .setauthsize = ccm_aes_nx_setauthsize, | 573 | .encrypt = ccm_aes_nx_encrypt, |
| 580 | .encrypt = ccm_aes_nx_encrypt, | 574 | .decrypt = ccm_aes_nx_decrypt, |
| 581 | .decrypt = ccm_aes_nx_decrypt, | ||
| 582 | } | ||
| 583 | }; | 575 | }; |
| 584 | 576 | ||
| 585 | struct crypto_alg nx_ccm4309_aes_alg = { | 577 | struct aead_alg nx_ccm4309_aes_alg = { |
| 586 | .cra_name = "rfc4309(ccm(aes))", | 578 | .base = { |
| 587 | .cra_driver_name = "rfc4309-ccm-aes-nx", | 579 | .cra_name = "rfc4309(ccm(aes))", |
| 588 | .cra_priority = 300, | 580 | .cra_driver_name = "rfc4309-ccm-aes-nx", |
| 589 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | | 581 | .cra_priority = 300, |
| 590 | CRYPTO_ALG_NEED_FALLBACK, | 582 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
| 591 | .cra_blocksize = 1, | 583 | .cra_blocksize = 1, |
| 592 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | 584 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), |
| 593 | .cra_type = &crypto_nivaead_type, | 585 | .cra_module = THIS_MODULE, |
| 594 | .cra_module = THIS_MODULE, | 586 | }, |
| 595 | .cra_init = nx_crypto_ctx_aes_ccm_init, | 587 | .init = nx_crypto_ctx_aes_ccm_init, |
| 596 | .cra_exit = nx_crypto_ctx_exit, | 588 | .exit = nx_crypto_ctx_aead_exit, |
| 597 | .cra_aead = { | 589 | .ivsize = 8, |
| 598 | .ivsize = 8, | 590 | .maxauthsize = AES_BLOCK_SIZE, |
| 599 | .maxauthsize = AES_BLOCK_SIZE, | 591 | .setkey = ccm4309_aes_nx_set_key, |
| 600 | .setkey = ccm4309_aes_nx_set_key, | 592 | .setauthsize = ccm4309_aes_nx_setauthsize, |
| 601 | .setauthsize = ccm4309_aes_nx_setauthsize, | 593 | .encrypt = ccm4309_aes_nx_encrypt, |
| 602 | .encrypt = ccm4309_aes_nx_encrypt, | 594 | .decrypt = ccm4309_aes_nx_decrypt, |
| 603 | .decrypt = ccm4309_aes_nx_decrypt, | ||
| 604 | .geniv = "seqiv", | ||
| 605 | } | ||
| 606 | }; | 595 | }; |
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index dd7e9f3f5b6b..898c0a280511 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c | |||
| @@ -144,27 +144,6 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc, | |||
| 144 | return ctr_aes_nx_crypt(desc, dst, src, nbytes); | 144 | return ctr_aes_nx_crypt(desc, dst, src, nbytes); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | struct crypto_alg nx_ctr_aes_alg = { | ||
| 148 | .cra_name = "ctr(aes)", | ||
| 149 | .cra_driver_name = "ctr-aes-nx", | ||
| 150 | .cra_priority = 300, | ||
| 151 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
| 152 | .cra_blocksize = 1, | ||
| 153 | .cra_ctxsize = sizeof(struct nx_crypto_ctx), | ||
| 154 | .cra_type = &crypto_blkcipher_type, | ||
| 155 | .cra_module = THIS_MODULE, | ||
| 156 | .cra_init = nx_crypto_ctx_aes_ctr_init, | ||
| 157 | .cra_exit = nx_crypto_ctx_exit, | ||
| 158 | .cra_blkcipher = { | ||
| 159 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 160 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 161 | .ivsize = AES_BLOCK_SIZE, | ||
| 162 | .setkey = ctr_aes_nx_set_key, | ||
| 163 | .encrypt = ctr_aes_nx_crypt, | ||
| 164 | .decrypt = ctr_aes_nx_crypt, | ||
| 165 | } | ||
| 166 | }; | ||
| 167 | |||
| 168 | struct crypto_alg nx_ctr3686_aes_alg = { | 147 | struct crypto_alg nx_ctr3686_aes_alg = { |
| 169 | .cra_name = "rfc3686(ctr(aes))", | 148 | .cra_name = "rfc3686(ctr(aes))", |
| 170 | .cra_driver_name = "rfc3686-ctr-aes-nx", | 149 | .cra_driver_name = "rfc3686-ctr-aes-nx", |
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 92c993f08213..eee624f589b6 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c | |||
| @@ -21,11 +21,9 @@ | |||
| 21 | 21 | ||
| 22 | #include <crypto/internal/aead.h> | 22 | #include <crypto/internal/aead.h> |
| 23 | #include <crypto/aes.h> | 23 | #include <crypto/aes.h> |
| 24 | #include <crypto/algapi.h> | ||
| 25 | #include <crypto/scatterwalk.h> | 24 | #include <crypto/scatterwalk.h> |
| 26 | #include <linux/module.h> | 25 | #include <linux/module.h> |
| 27 | #include <linux/types.h> | 26 | #include <linux/types.h> |
| 28 | #include <linux/crypto.h> | ||
| 29 | #include <asm/vio.h> | 27 | #include <asm/vio.h> |
| 30 | 28 | ||
| 31 | #include "nx_csbcpb.h" | 29 | #include "nx_csbcpb.h" |
| @@ -36,7 +34,7 @@ static int gcm_aes_nx_set_key(struct crypto_aead *tfm, | |||
| 36 | const u8 *in_key, | 34 | const u8 *in_key, |
| 37 | unsigned int key_len) | 35 | unsigned int key_len) |
| 38 | { | 36 | { |
| 39 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); | 37 | struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); |
| 40 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 38 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
| 41 | struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; | 39 | struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; |
| 42 | 40 | ||
| @@ -75,7 +73,7 @@ static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, | |||
| 75 | const u8 *in_key, | 73 | const u8 *in_key, |
| 76 | unsigned int key_len) | 74 | unsigned int key_len) |
| 77 | { | 75 | { |
| 78 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); | 76 | struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); |
| 79 | char *nonce = nx_ctx->priv.gcm.nonce; | 77 | char *nonce = nx_ctx->priv.gcm.nonce; |
| 80 | int rc; | 78 | int rc; |
| 81 | 79 | ||
| @@ -110,13 +108,14 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, | |||
| 110 | 108 | ||
| 111 | static int nx_gca(struct nx_crypto_ctx *nx_ctx, | 109 | static int nx_gca(struct nx_crypto_ctx *nx_ctx, |
| 112 | struct aead_request *req, | 110 | struct aead_request *req, |
| 113 | u8 *out) | 111 | u8 *out, |
| 112 | unsigned int assoclen) | ||
| 114 | { | 113 | { |
| 115 | int rc; | 114 | int rc; |
| 116 | struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; | 115 | struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; |
| 117 | struct scatter_walk walk; | 116 | struct scatter_walk walk; |
| 118 | struct nx_sg *nx_sg = nx_ctx->in_sg; | 117 | struct nx_sg *nx_sg = nx_ctx->in_sg; |
| 119 | unsigned int nbytes = req->assoclen; | 118 | unsigned int nbytes = assoclen; |
| 120 | unsigned int processed = 0, to_process; | 119 | unsigned int processed = 0, to_process; |
| 121 | unsigned int max_sg_len; | 120 | unsigned int max_sg_len; |
| 122 | 121 | ||
| @@ -167,7 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, | |||
| 167 | NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; | 166 | NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; |
| 168 | 167 | ||
| 169 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 168 | atomic_inc(&(nx_ctx->stats->aes_ops)); |
| 170 | atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); | 169 | atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); |
| 171 | 170 | ||
| 172 | processed += to_process; | 171 | processed += to_process; |
| 173 | } while (processed < nbytes); | 172 | } while (processed < nbytes); |
| @@ -177,13 +176,15 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, | |||
| 177 | return rc; | 176 | return rc; |
| 178 | } | 177 | } |
| 179 | 178 | ||
| 180 | static int gmac(struct aead_request *req, struct blkcipher_desc *desc) | 179 | static int gmac(struct aead_request *req, struct blkcipher_desc *desc, |
| 180 | unsigned int assoclen) | ||
| 181 | { | 181 | { |
| 182 | int rc; | 182 | int rc; |
| 183 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); | 183 | struct nx_crypto_ctx *nx_ctx = |
| 184 | crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
| 184 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 185 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
| 185 | struct nx_sg *nx_sg; | 186 | struct nx_sg *nx_sg; |
| 186 | unsigned int nbytes = req->assoclen; | 187 | unsigned int nbytes = assoclen; |
| 187 | unsigned int processed = 0, to_process; | 188 | unsigned int processed = 0, to_process; |
| 188 | unsigned int max_sg_len; | 189 | unsigned int max_sg_len; |
| 189 | 190 | ||
| @@ -238,7 +239,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc) | |||
| 238 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; | 239 | NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; |
| 239 | 240 | ||
| 240 | atomic_inc(&(nx_ctx->stats->aes_ops)); | 241 | atomic_inc(&(nx_ctx->stats->aes_ops)); |
| 241 | atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); | 242 | atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); |
| 242 | 243 | ||
| 243 | processed += to_process; | 244 | processed += to_process; |
| 244 | } while (processed < nbytes); | 245 | } while (processed < nbytes); |
| @@ -253,7 +254,8 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, | |||
| 253 | int enc) | 254 | int enc) |
| 254 | { | 255 | { |
| 255 | int rc; | 256 | int rc; |
| 256 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); | 257 | struct nx_crypto_ctx *nx_ctx = |
| 258 | crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
| 257 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 259 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
| 258 | char out[AES_BLOCK_SIZE]; | 260 | char out[AES_BLOCK_SIZE]; |
| 259 | struct nx_sg *in_sg, *out_sg; | 261 | struct nx_sg *in_sg, *out_sg; |
| @@ -314,9 +316,11 @@ out: | |||
| 314 | return rc; | 316 | return rc; |
| 315 | } | 317 | } |
| 316 | 318 | ||
| 317 | static int gcm_aes_nx_crypt(struct aead_request *req, int enc) | 319 | static int gcm_aes_nx_crypt(struct aead_request *req, int enc, |
| 320 | unsigned int assoclen) | ||
| 318 | { | 321 | { |
| 319 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); | 322 | struct nx_crypto_ctx *nx_ctx = |
| 323 | crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
| 320 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); | 324 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); |
| 321 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; | 325 | struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; |
| 322 | struct blkcipher_desc desc; | 326 | struct blkcipher_desc desc; |
| @@ -332,10 +336,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) | |||
| 332 | *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; | 336 | *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; |
| 333 | 337 | ||
| 334 | if (nbytes == 0) { | 338 | if (nbytes == 0) { |
| 335 | if (req->assoclen == 0) | 339 | if (assoclen == 0) |
| 336 | rc = gcm_empty(req, &desc, enc); | 340 | rc = gcm_empty(req, &desc, enc); |
| 337 | else | 341 | else |
| 338 | rc = gmac(req, &desc); | 342 | rc = gmac(req, &desc, assoclen); |
| 339 | if (rc) | 343 | if (rc) |
| 340 | goto out; | 344 | goto out; |
| 341 | else | 345 | else |
| @@ -343,9 +347,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) | |||
| 343 | } | 347 | } |
| 344 | 348 | ||
| 345 | /* Process associated data */ | 349 | /* Process associated data */ |
| 346 | csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; | 350 | csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8; |
| 347 | if (req->assoclen) { | 351 | if (assoclen) { |
| 348 | rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); | 352 | rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad, |
| 353 | assoclen); | ||
| 349 | if (rc) | 354 | if (rc) |
| 350 | goto out; | 355 | goto out; |
| 351 | } | 356 | } |
| @@ -363,7 +368,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) | |||
| 363 | to_process = nbytes - processed; | 368 | to_process = nbytes - processed; |
| 364 | 369 | ||
| 365 | csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; | 370 | csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; |
| 366 | desc.tfm = (struct crypto_blkcipher *) req->base.tfm; | ||
| 367 | rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, | 371 | rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, |
| 368 | req->src, &to_process, | 372 | req->src, &to_process, |
| 369 | processed + req->assoclen, | 373 | processed + req->assoclen, |
| @@ -430,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req) | |||
| 430 | 434 | ||
| 431 | memcpy(iv, req->iv, 12); | 435 | memcpy(iv, req->iv, 12); |
| 432 | 436 | ||
| 433 | return gcm_aes_nx_crypt(req, 1); | 437 | return gcm_aes_nx_crypt(req, 1, req->assoclen); |
| 434 | } | 438 | } |
| 435 | 439 | ||
| 436 | static int gcm_aes_nx_decrypt(struct aead_request *req) | 440 | static int gcm_aes_nx_decrypt(struct aead_request *req) |
| @@ -440,12 +444,13 @@ static int gcm_aes_nx_decrypt(struct aead_request *req) | |||
| 440 | 444 | ||
| 441 | memcpy(iv, req->iv, 12); | 445 | memcpy(iv, req->iv, 12); |
| 442 | 446 | ||
| 443 | return gcm_aes_nx_crypt(req, 0); | 447 | return gcm_aes_nx_crypt(req, 0, req->assoclen); |
| 444 | } | 448 | } |
| 445 | 449 | ||
| 446 | static int gcm4106_aes_nx_encrypt(struct aead_request *req) | 450 | static int gcm4106_aes_nx_encrypt(struct aead_request *req) |
| 447 | { | 451 | { |
| 448 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); | 452 | struct nx_crypto_ctx *nx_ctx = |
| 453 | crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
| 449 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); | 454 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); |
| 450 | char *iv = rctx->iv; | 455 | char *iv = rctx->iv; |
| 451 | char *nonce = nx_ctx->priv.gcm.nonce; | 456 | char *nonce = nx_ctx->priv.gcm.nonce; |
| @@ -453,12 +458,16 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req) | |||
| 453 | memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); | 458 | memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); |
| 454 | memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); | 459 | memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); |
| 455 | 460 | ||
| 456 | return gcm_aes_nx_crypt(req, 1); | 461 | if (req->assoclen < 8) |
| 462 | return -EINVAL; | ||
| 463 | |||
| 464 | return gcm_aes_nx_crypt(req, 1, req->assoclen - 8); | ||
| 457 | } | 465 | } |
| 458 | 466 | ||
| 459 | static int gcm4106_aes_nx_decrypt(struct aead_request *req) | 467 | static int gcm4106_aes_nx_decrypt(struct aead_request *req) |
| 460 | { | 468 | { |
| 461 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); | 469 | struct nx_crypto_ctx *nx_ctx = |
| 470 | crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
| 462 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); | 471 | struct nx_gcm_rctx *rctx = aead_request_ctx(req); |
| 463 | char *iv = rctx->iv; | 472 | char *iv = rctx->iv; |
| 464 | char *nonce = nx_ctx->priv.gcm.nonce; | 473 | char *nonce = nx_ctx->priv.gcm.nonce; |
| @@ -466,7 +475,10 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req) | |||
| 466 | memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); | 475 | memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); |
| 467 | memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); | 476 | memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); |
| 468 | 477 | ||
| 469 | return gcm_aes_nx_crypt(req, 0); | 478 | if (req->assoclen < 8) |
| 479 | return -EINVAL; | ||
| 480 | |||
| 481 | return gcm_aes_nx_crypt(req, 0, req->assoclen - 8); | ||
| 470 | } | 482 | } |
| 471 | 483 | ||
| 472 | /* tell the block cipher walk routines that this is a stream cipher by | 484 | /* tell the block cipher walk routines that this is a stream cipher by |
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 436971343ff7..0794f1cc0018 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c | |||
| @@ -596,13 +596,9 @@ static int nx_register_algs(void) | |||
| 596 | if (rc) | 596 | if (rc) |
| 597 | goto out_unreg_ecb; | 597 | goto out_unreg_ecb; |
| 598 | 598 | ||
| 599 | rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); | ||
| 600 | if (rc) | ||
| 601 | goto out_unreg_cbc; | ||
| 602 | |||
| 603 | rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); | 599 | rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); |
| 604 | if (rc) | 600 | if (rc) |
| 605 | goto out_unreg_ctr; | 601 | goto out_unreg_cbc; |
| 606 | 602 | ||
| 607 | rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); | 603 | rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
| 608 | if (rc) | 604 | if (rc) |
| @@ -612,11 +608,11 @@ static int nx_register_algs(void) | |||
| 612 | if (rc) | 608 | if (rc) |
| 613 | goto out_unreg_gcm; | 609 | goto out_unreg_gcm; |
| 614 | 610 | ||
| 615 | rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); | 611 | rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
| 616 | if (rc) | 612 | if (rc) |
| 617 | goto out_unreg_gcm4106; | 613 | goto out_unreg_gcm4106; |
| 618 | 614 | ||
| 619 | rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); | 615 | rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
| 620 | if (rc) | 616 | if (rc) |
| 621 | goto out_unreg_ccm; | 617 | goto out_unreg_ccm; |
| 622 | 618 | ||
| @@ -644,17 +640,15 @@ out_unreg_s256: | |||
| 644 | nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, | 640 | nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, |
| 645 | NX_PROPS_SHA256); | 641 | NX_PROPS_SHA256); |
| 646 | out_unreg_ccm4309: | 642 | out_unreg_ccm4309: |
| 647 | nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); | 643 | nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
| 648 | out_unreg_ccm: | 644 | out_unreg_ccm: |
| 649 | nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); | 645 | nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
| 650 | out_unreg_gcm4106: | 646 | out_unreg_gcm4106: |
| 651 | nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); | 647 | nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
| 652 | out_unreg_gcm: | 648 | out_unreg_gcm: |
| 653 | nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); | 649 | nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
| 654 | out_unreg_ctr3686: | 650 | out_unreg_ctr3686: |
| 655 | nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); | 651 | nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); |
| 656 | out_unreg_ctr: | ||
| 657 | nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); | ||
| 658 | out_unreg_cbc: | 652 | out_unreg_cbc: |
| 659 | nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); | 653 | nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); |
| 660 | out_unreg_ecb: | 654 | out_unreg_ecb: |
| @@ -711,11 +705,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode) | |||
| 711 | } | 705 | } |
| 712 | 706 | ||
| 713 | /* entry points from the crypto tfm initializers */ | 707 | /* entry points from the crypto tfm initializers */ |
| 714 | int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) | 708 | int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm) |
| 715 | { | 709 | { |
| 716 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | 710 | crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx)); |
| 717 | sizeof(struct nx_ccm_rctx)); | 711 | return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, |
| 718 | return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, | ||
| 719 | NX_MODE_AES_CCM); | 712 | NX_MODE_AES_CCM); |
| 720 | } | 713 | } |
| 721 | 714 | ||
| @@ -813,16 +806,15 @@ static int nx_remove(struct vio_dev *viodev) | |||
| 813 | NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); | 806 | NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); |
| 814 | nx_unregister_shash(&nx_shash_sha256_alg, | 807 | nx_unregister_shash(&nx_shash_sha256_alg, |
| 815 | NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); | 808 | NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); |
| 816 | nx_unregister_alg(&nx_ccm4309_aes_alg, | 809 | nx_unregister_aead(&nx_ccm4309_aes_alg, |
| 817 | NX_FC_AES, NX_MODE_AES_CCM); | 810 | NX_FC_AES, NX_MODE_AES_CCM); |
| 818 | nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); | 811 | nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
| 819 | nx_unregister_aead(&nx_gcm4106_aes_alg, | 812 | nx_unregister_aead(&nx_gcm4106_aes_alg, |
| 820 | NX_FC_AES, NX_MODE_AES_GCM); | 813 | NX_FC_AES, NX_MODE_AES_GCM); |
| 821 | nx_unregister_aead(&nx_gcm_aes_alg, | 814 | nx_unregister_aead(&nx_gcm_aes_alg, |
| 822 | NX_FC_AES, NX_MODE_AES_GCM); | 815 | NX_FC_AES, NX_MODE_AES_GCM); |
| 823 | nx_unregister_alg(&nx_ctr3686_aes_alg, | 816 | nx_unregister_alg(&nx_ctr3686_aes_alg, |
| 824 | NX_FC_AES, NX_MODE_AES_CTR); | 817 | NX_FC_AES, NX_MODE_AES_CTR); |
| 825 | nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); | ||
| 826 | nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); | 818 | nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); |
| 827 | nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); | 819 | nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); |
| 828 | } | 820 | } |
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index cdff03a42ae7..9347878d4f30 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h | |||
| @@ -149,8 +149,10 @@ struct nx_crypto_ctx { | |||
| 149 | } priv; | 149 | } priv; |
| 150 | }; | 150 | }; |
| 151 | 151 | ||
| 152 | struct crypto_aead; | ||
| 153 | |||
| 152 | /* prototypes */ | 154 | /* prototypes */ |
| 153 | int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm); | 155 | int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm); |
| 154 | int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm); | 156 | int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm); |
| 155 | int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); | 157 | int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); |
| 156 | int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm); | 158 | int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm); |
| @@ -187,10 +189,9 @@ extern struct crypto_alg nx_cbc_aes_alg; | |||
| 187 | extern struct crypto_alg nx_ecb_aes_alg; | 189 | extern struct crypto_alg nx_ecb_aes_alg; |
| 188 | extern struct aead_alg nx_gcm_aes_alg; | 190 | extern struct aead_alg nx_gcm_aes_alg; |
| 189 | extern struct aead_alg nx_gcm4106_aes_alg; | 191 | extern struct aead_alg nx_gcm4106_aes_alg; |
| 190 | extern struct crypto_alg nx_ctr_aes_alg; | ||
| 191 | extern struct crypto_alg nx_ctr3686_aes_alg; | 192 | extern struct crypto_alg nx_ctr3686_aes_alg; |
| 192 | extern struct crypto_alg nx_ccm_aes_alg; | 193 | extern struct aead_alg nx_ccm_aes_alg; |
| 193 | extern struct crypto_alg nx_ccm4309_aes_alg; | 194 | extern struct aead_alg nx_ccm4309_aes_alg; |
| 194 | extern struct shash_alg nx_shash_aes_xcbc_alg; | 195 | extern struct shash_alg nx_shash_aes_xcbc_alg; |
| 195 | extern struct shash_alg nx_shash_sha512_alg; | 196 | extern struct shash_alg nx_shash_sha512_alg; |
| 196 | extern struct shash_alg nx_shash_sha256_alg; | 197 | extern struct shash_alg nx_shash_sha256_alg; |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 9a28b7e07c71..eba23147c0ee 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
| @@ -52,29 +52,30 @@ | |||
| 52 | #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) | 52 | #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) |
| 53 | 53 | ||
| 54 | #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) | 54 | #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) |
| 55 | #define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7) | 55 | #define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7) |
| 56 | #define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7) | 56 | #define AES_REG_CTRL_CTR_WIDTH_32 0 |
| 57 | #define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7) | 57 | #define AES_REG_CTRL_CTR_WIDTH_64 BIT(7) |
| 58 | #define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7) | 58 | #define AES_REG_CTRL_CTR_WIDTH_96 BIT(8) |
| 59 | #define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7) | 59 | #define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7) |
| 60 | #define AES_REG_CTRL_CTR (1 << 6) | 60 | #define AES_REG_CTRL_CTR BIT(6) |
| 61 | #define AES_REG_CTRL_CBC (1 << 5) | 61 | #define AES_REG_CTRL_CBC BIT(5) |
| 62 | #define AES_REG_CTRL_KEY_SIZE (3 << 3) | 62 | #define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3) |
| 63 | #define AES_REG_CTRL_DIRECTION (1 << 2) | 63 | #define AES_REG_CTRL_DIRECTION BIT(2) |
| 64 | #define AES_REG_CTRL_INPUT_READY (1 << 1) | 64 | #define AES_REG_CTRL_INPUT_READY BIT(1) |
| 65 | #define AES_REG_CTRL_OUTPUT_READY (1 << 0) | 65 | #define AES_REG_CTRL_OUTPUT_READY BIT(0) |
| 66 | #define AES_REG_CTRL_MASK GENMASK(24, 2) | ||
| 66 | 67 | ||
| 67 | #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) | 68 | #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) |
| 68 | 69 | ||
| 69 | #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs) | 70 | #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs) |
| 70 | 71 | ||
| 71 | #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs) | 72 | #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs) |
| 72 | #define AES_REG_MASK_SIDLE (1 << 6) | 73 | #define AES_REG_MASK_SIDLE BIT(6) |
| 73 | #define AES_REG_MASK_START (1 << 5) | 74 | #define AES_REG_MASK_START BIT(5) |
| 74 | #define AES_REG_MASK_DMA_OUT_EN (1 << 3) | 75 | #define AES_REG_MASK_DMA_OUT_EN BIT(3) |
| 75 | #define AES_REG_MASK_DMA_IN_EN (1 << 2) | 76 | #define AES_REG_MASK_DMA_IN_EN BIT(2) |
| 76 | #define AES_REG_MASK_SOFTRESET (1 << 1) | 77 | #define AES_REG_MASK_SOFTRESET BIT(1) |
| 77 | #define AES_REG_AUTOIDLE (1 << 0) | 78 | #define AES_REG_AUTOIDLE BIT(0) |
| 78 | 79 | ||
| 79 | #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) | 80 | #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) |
| 80 | 81 | ||
| @@ -254,7 +255,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) | |||
| 254 | { | 255 | { |
| 255 | unsigned int key32; | 256 | unsigned int key32; |
| 256 | int i, err; | 257 | int i, err; |
| 257 | u32 val, mask = 0; | 258 | u32 val; |
| 258 | 259 | ||
| 259 | err = omap_aes_hw_init(dd); | 260 | err = omap_aes_hw_init(dd); |
| 260 | if (err) | 261 | if (err) |
| @@ -274,17 +275,13 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) | |||
| 274 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); | 275 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); |
| 275 | if (dd->flags & FLAGS_CBC) | 276 | if (dd->flags & FLAGS_CBC) |
| 276 | val |= AES_REG_CTRL_CBC; | 277 | val |= AES_REG_CTRL_CBC; |
| 277 | if (dd->flags & FLAGS_CTR) { | 278 | if (dd->flags & FLAGS_CTR) |
| 278 | val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; | 279 | val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; |
| 279 | mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; | 280 | |
| 280 | } | ||
| 281 | if (dd->flags & FLAGS_ENCRYPT) | 281 | if (dd->flags & FLAGS_ENCRYPT) |
| 282 | val |= AES_REG_CTRL_DIRECTION; | 282 | val |= AES_REG_CTRL_DIRECTION; |
| 283 | 283 | ||
| 284 | mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | | 284 | omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK); |
| 285 | AES_REG_CTRL_KEY_SIZE; | ||
| 286 | |||
| 287 | omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask); | ||
| 288 | 285 | ||
| 289 | return 0; | 286 | return 0; |
| 290 | } | 287 | } |
| @@ -558,6 +555,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total) | |||
| 558 | { | 555 | { |
| 559 | int len = 0; | 556 | int len = 0; |
| 560 | 557 | ||
| 558 | if (!IS_ALIGNED(total, AES_BLOCK_SIZE)) | ||
| 559 | return -EINVAL; | ||
| 560 | |||
| 561 | while (sg) { | 561 | while (sg) { |
| 562 | if (!IS_ALIGNED(sg->offset, 4)) | 562 | if (!IS_ALIGNED(sg->offset, 4)) |
| 563 | return -1; | 563 | return -1; |
| @@ -577,9 +577,10 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total) | |||
| 577 | static int omap_aes_copy_sgs(struct omap_aes_dev *dd) | 577 | static int omap_aes_copy_sgs(struct omap_aes_dev *dd) |
| 578 | { | 578 | { |
| 579 | void *buf_in, *buf_out; | 579 | void *buf_in, *buf_out; |
| 580 | int pages; | 580 | int pages, total; |
| 581 | 581 | ||
| 582 | pages = get_order(dd->total); | 582 | total = ALIGN(dd->total, AES_BLOCK_SIZE); |
| 583 | pages = get_order(total); | ||
| 583 | 584 | ||
| 584 | buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); | 585 | buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); |
| 585 | buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); | 586 | buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); |
| @@ -594,11 +595,11 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd) | |||
| 594 | sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); | 595 | sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); |
| 595 | 596 | ||
| 596 | sg_init_table(&dd->in_sgl, 1); | 597 | sg_init_table(&dd->in_sgl, 1); |
| 597 | sg_set_buf(&dd->in_sgl, buf_in, dd->total); | 598 | sg_set_buf(&dd->in_sgl, buf_in, total); |
| 598 | dd->in_sg = &dd->in_sgl; | 599 | dd->in_sg = &dd->in_sgl; |
| 599 | 600 | ||
| 600 | sg_init_table(&dd->out_sgl, 1); | 601 | sg_init_table(&dd->out_sgl, 1); |
| 601 | sg_set_buf(&dd->out_sgl, buf_out, dd->total); | 602 | sg_set_buf(&dd->out_sgl, buf_out, total); |
| 602 | dd->out_sg = &dd->out_sgl; | 603 | dd->out_sg = &dd->out_sgl; |
| 603 | 604 | ||
| 604 | return 0; | 605 | return 0; |
| @@ -611,7 +612,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, | |||
| 611 | struct omap_aes_ctx *ctx; | 612 | struct omap_aes_ctx *ctx; |
| 612 | struct omap_aes_reqctx *rctx; | 613 | struct omap_aes_reqctx *rctx; |
| 613 | unsigned long flags; | 614 | unsigned long flags; |
| 614 | int err, ret = 0; | 615 | int err, ret = 0, len; |
| 615 | 616 | ||
| 616 | spin_lock_irqsave(&dd->lock, flags); | 617 | spin_lock_irqsave(&dd->lock, flags); |
| 617 | if (req) | 618 | if (req) |
| @@ -650,8 +651,9 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, | |||
| 650 | dd->sgs_copied = 0; | 651 | dd->sgs_copied = 0; |
| 651 | } | 652 | } |
| 652 | 653 | ||
| 653 | dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); | 654 | len = ALIGN(dd->total, AES_BLOCK_SIZE); |
| 654 | dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); | 655 | dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len); |
| 656 | dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len); | ||
| 655 | BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); | 657 | BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); |
| 656 | 658 | ||
| 657 | rctx = ablkcipher_request_ctx(req); | 659 | rctx = ablkcipher_request_ctx(req); |
| @@ -678,7 +680,7 @@ static void omap_aes_done_task(unsigned long data) | |||
| 678 | { | 680 | { |
| 679 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; | 681 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; |
| 680 | void *buf_in, *buf_out; | 682 | void *buf_in, *buf_out; |
| 681 | int pages; | 683 | int pages, len; |
| 682 | 684 | ||
| 683 | pr_debug("enter done_task\n"); | 685 | pr_debug("enter done_task\n"); |
| 684 | 686 | ||
| @@ -697,7 +699,8 @@ static void omap_aes_done_task(unsigned long data) | |||
| 697 | 699 | ||
| 698 | sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); | 700 | sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); |
| 699 | 701 | ||
| 700 | pages = get_order(dd->total_save); | 702 | len = ALIGN(dd->total_save, AES_BLOCK_SIZE); |
| 703 | pages = get_order(len); | ||
| 701 | free_pages((unsigned long)buf_in, pages); | 704 | free_pages((unsigned long)buf_in, pages); |
| 702 | free_pages((unsigned long)buf_out, pages); | 705 | free_pages((unsigned long)buf_out, pages); |
| 703 | } | 706 | } |
| @@ -726,11 +729,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
| 726 | !!(mode & FLAGS_ENCRYPT), | 729 | !!(mode & FLAGS_ENCRYPT), |
| 727 | !!(mode & FLAGS_CBC)); | 730 | !!(mode & FLAGS_CBC)); |
| 728 | 731 | ||
| 729 | if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { | ||
| 730 | pr_err("request size is not exact amount of AES blocks\n"); | ||
| 731 | return -EINVAL; | ||
| 732 | } | ||
| 733 | |||
| 734 | dd = omap_aes_find_dev(ctx); | 732 | dd = omap_aes_find_dev(ctx); |
| 735 | if (!dd) | 733 | if (!dd) |
| 736 | return -ENODEV; | 734 | return -ENODEV; |
| @@ -833,7 +831,7 @@ static struct crypto_alg algs_ecb_cbc[] = { | |||
| 833 | { | 831 | { |
| 834 | .cra_name = "ecb(aes)", | 832 | .cra_name = "ecb(aes)", |
| 835 | .cra_driver_name = "ecb-aes-omap", | 833 | .cra_driver_name = "ecb-aes-omap", |
| 836 | .cra_priority = 100, | 834 | .cra_priority = 300, |
| 837 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 835 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 838 | CRYPTO_ALG_KERN_DRIVER_ONLY | | 836 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 839 | CRYPTO_ALG_ASYNC, | 837 | CRYPTO_ALG_ASYNC, |
| @@ -855,7 +853,7 @@ static struct crypto_alg algs_ecb_cbc[] = { | |||
| 855 | { | 853 | { |
| 856 | .cra_name = "cbc(aes)", | 854 | .cra_name = "cbc(aes)", |
| 857 | .cra_driver_name = "cbc-aes-omap", | 855 | .cra_driver_name = "cbc-aes-omap", |
| 858 | .cra_priority = 100, | 856 | .cra_priority = 300, |
| 859 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 857 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 860 | CRYPTO_ALG_KERN_DRIVER_ONLY | | 858 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 861 | CRYPTO_ALG_ASYNC, | 859 | CRYPTO_ALG_ASYNC, |
| @@ -881,7 +879,7 @@ static struct crypto_alg algs_ctr[] = { | |||
| 881 | { | 879 | { |
| 882 | .cra_name = "ctr(aes)", | 880 | .cra_name = "ctr(aes)", |
| 883 | .cra_driver_name = "ctr-aes-omap", | 881 | .cra_driver_name = "ctr-aes-omap", |
| 884 | .cra_priority = 100, | 882 | .cra_priority = 300, |
| 885 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 883 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 886 | CRYPTO_ALG_KERN_DRIVER_ONLY | | 884 | CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 887 | CRYPTO_ALG_ASYNC, | 885 | CRYPTO_ALG_ASYNC, |
| @@ -1046,9 +1044,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id) | |||
| 1046 | } | 1044 | } |
| 1047 | } | 1045 | } |
| 1048 | 1046 | ||
| 1049 | dd->total -= AES_BLOCK_SIZE; | 1047 | dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total); |
| 1050 | |||
| 1051 | BUG_ON(dd->total < 0); | ||
| 1052 | 1048 | ||
| 1053 | /* Clear IRQ status */ | 1049 | /* Clear IRQ status */ |
| 1054 | status &= ~AES_REG_IRQ_DATA_OUT; | 1050 | status &= ~AES_REG_IRQ_DATA_OUT; |
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 4f56f3681abd..da36de26a4dc 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c | |||
| @@ -99,11 +99,16 @@ struct spacc_req { | |||
| 99 | dma_addr_t src_addr, dst_addr; | 99 | dma_addr_t src_addr, dst_addr; |
| 100 | struct spacc_ddt *src_ddt, *dst_ddt; | 100 | struct spacc_ddt *src_ddt, *dst_ddt; |
| 101 | void (*complete)(struct spacc_req *req); | 101 | void (*complete)(struct spacc_req *req); |
| 102 | }; | ||
| 102 | 103 | ||
| 103 | /* AEAD specific bits. */ | 104 | struct spacc_aead { |
| 104 | u8 *giv; | 105 | unsigned long ctrl_default; |
| 105 | size_t giv_len; | 106 | unsigned long type; |
| 106 | dma_addr_t giv_pa; | 107 | struct aead_alg alg; |
| 108 | struct spacc_engine *engine; | ||
| 109 | struct list_head entry; | ||
| 110 | int key_offs; | ||
| 111 | int iv_offs; | ||
| 107 | }; | 112 | }; |
| 108 | 113 | ||
| 109 | struct spacc_engine { | 114 | struct spacc_engine { |
| @@ -121,6 +126,9 @@ struct spacc_engine { | |||
| 121 | struct spacc_alg *algs; | 126 | struct spacc_alg *algs; |
| 122 | unsigned num_algs; | 127 | unsigned num_algs; |
| 123 | struct list_head registered_algs; | 128 | struct list_head registered_algs; |
| 129 | struct spacc_aead *aeads; | ||
| 130 | unsigned num_aeads; | ||
| 131 | struct list_head registered_aeads; | ||
| 124 | size_t cipher_pg_sz; | 132 | size_t cipher_pg_sz; |
| 125 | size_t hash_pg_sz; | 133 | size_t hash_pg_sz; |
| 126 | const char *name; | 134 | const char *name; |
| @@ -174,8 +182,6 @@ struct spacc_aead_ctx { | |||
| 174 | u8 cipher_key_len; | 182 | u8 cipher_key_len; |
| 175 | u8 hash_key_len; | 183 | u8 hash_key_len; |
| 176 | struct crypto_aead *sw_cipher; | 184 | struct crypto_aead *sw_cipher; |
| 177 | size_t auth_size; | ||
| 178 | u8 salt[AES_BLOCK_SIZE]; | ||
| 179 | }; | 185 | }; |
| 180 | 186 | ||
| 181 | static int spacc_ablk_submit(struct spacc_req *req); | 187 | static int spacc_ablk_submit(struct spacc_req *req); |
| @@ -185,6 +191,11 @@ static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg) | |||
| 185 | return alg ? container_of(alg, struct spacc_alg, alg) : NULL; | 191 | return alg ? container_of(alg, struct spacc_alg, alg) : NULL; |
| 186 | } | 192 | } |
| 187 | 193 | ||
| 194 | static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg) | ||
| 195 | { | ||
| 196 | return container_of(alg, struct spacc_aead, alg); | ||
| 197 | } | ||
| 198 | |||
| 188 | static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) | 199 | static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) |
| 189 | { | 200 | { |
| 190 | u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); | 201 | u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); |
| @@ -310,120 +321,117 @@ out: | |||
| 310 | return NULL; | 321 | return NULL; |
| 311 | } | 322 | } |
| 312 | 323 | ||
| 313 | static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) | 324 | static int spacc_aead_make_ddts(struct aead_request *areq) |
| 314 | { | 325 | { |
| 315 | struct aead_request *areq = container_of(req->req, struct aead_request, | 326 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); |
| 316 | base); | 327 | struct spacc_req *req = aead_request_ctx(areq); |
| 317 | struct spacc_engine *engine = req->engine; | 328 | struct spacc_engine *engine = req->engine; |
| 318 | struct spacc_ddt *src_ddt, *dst_ddt; | 329 | struct spacc_ddt *src_ddt, *dst_ddt; |
| 319 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); | ||
| 320 | unsigned nents = sg_count(areq->src, areq->cryptlen); | ||
| 321 | unsigned total; | 330 | unsigned total; |
| 322 | dma_addr_t iv_addr; | 331 | unsigned int src_nents, dst_nents; |
| 323 | struct scatterlist *cur; | 332 | struct scatterlist *cur; |
| 324 | int i, dst_ents, src_ents, assoc_ents; | 333 | int i, dst_ents, src_ents; |
| 325 | u8 *iv = giv ? giv : areq->iv; | 334 | |
| 335 | total = areq->assoclen + areq->cryptlen; | ||
| 336 | if (req->is_encrypt) | ||
| 337 | total += crypto_aead_authsize(aead); | ||
| 338 | |||
| 339 | src_nents = sg_count(areq->src, total); | ||
| 340 | if (src_nents + 1 > MAX_DDT_LEN) | ||
| 341 | return -E2BIG; | ||
| 342 | |||
| 343 | dst_nents = 0; | ||
| 344 | if (areq->src != areq->dst) { | ||
| 345 | dst_nents = sg_count(areq->dst, total); | ||
| 346 | if (src_nents + 1 > MAX_DDT_LEN) | ||
| 347 | return -E2BIG; | ||
| 348 | } | ||
| 326 | 349 | ||
| 327 | src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); | 350 | src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); |
| 328 | if (!src_ddt) | 351 | if (!src_ddt) |
| 329 | return -ENOMEM; | 352 | goto err; |
| 330 | 353 | ||
| 331 | dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); | 354 | dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); |
| 332 | if (!dst_ddt) { | 355 | if (!dst_ddt) |
| 333 | dma_pool_free(engine->req_pool, src_ddt, req->src_addr); | 356 | goto err_free_src; |
| 334 | return -ENOMEM; | ||
| 335 | } | ||
| 336 | 357 | ||
| 337 | req->src_ddt = src_ddt; | 358 | req->src_ddt = src_ddt; |
| 338 | req->dst_ddt = dst_ddt; | 359 | req->dst_ddt = dst_ddt; |
| 339 | 360 | ||
| 340 | assoc_ents = dma_map_sg(engine->dev, areq->assoc, | 361 | if (dst_nents) { |
| 341 | sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); | 362 | src_ents = dma_map_sg(engine->dev, areq->src, src_nents, |
| 342 | if (areq->src != areq->dst) { | ||
| 343 | src_ents = dma_map_sg(engine->dev, areq->src, nents, | ||
| 344 | DMA_TO_DEVICE); | 363 | DMA_TO_DEVICE); |
| 345 | dst_ents = dma_map_sg(engine->dev, areq->dst, nents, | 364 | if (!src_ents) |
| 365 | goto err_free_dst; | ||
| 366 | |||
| 367 | dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents, | ||
| 346 | DMA_FROM_DEVICE); | 368 | DMA_FROM_DEVICE); |
| 369 | |||
| 370 | if (!dst_ents) { | ||
| 371 | dma_unmap_sg(engine->dev, areq->src, src_nents, | ||
| 372 | DMA_TO_DEVICE); | ||
| 373 | goto err_free_dst; | ||
| 374 | } | ||
| 347 | } else { | 375 | } else { |
| 348 | src_ents = dma_map_sg(engine->dev, areq->src, nents, | 376 | src_ents = dma_map_sg(engine->dev, areq->src, src_nents, |
| 349 | DMA_BIDIRECTIONAL); | 377 | DMA_BIDIRECTIONAL); |
| 350 | dst_ents = 0; | 378 | if (!src_ents) |
| 379 | goto err_free_dst; | ||
| 380 | dst_ents = src_ents; | ||
| 351 | } | 381 | } |
| 352 | 382 | ||
| 353 | /* | 383 | /* |
| 354 | * Map the IV/GIV. For the GIV it needs to be bidirectional as it is | 384 | * Now map in the payload for the source and destination and terminate |
| 355 | * formed by the crypto block and sent as the ESP IV for IPSEC. | 385 | * with the NULL pointers. |
| 356 | */ | 386 | */ |
| 357 | iv_addr = dma_map_single(engine->dev, iv, ivsize, | 387 | for_each_sg(areq->src, cur, src_ents, i) |
| 358 | giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); | 388 | ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); |
| 359 | req->giv_pa = iv_addr; | ||
| 360 | 389 | ||
| 361 | /* | 390 | /* For decryption we need to skip the associated data. */ |
| 362 | * Map the associated data. For decryption we don't copy the | 391 | total = req->is_encrypt ? 0 : areq->assoclen; |
| 363 | * associated data. | 392 | for_each_sg(areq->dst, cur, dst_ents, i) { |
| 364 | */ | ||
| 365 | total = areq->assoclen; | ||
| 366 | for_each_sg(areq->assoc, cur, assoc_ents, i) { | ||
| 367 | unsigned len = sg_dma_len(cur); | 393 | unsigned len = sg_dma_len(cur); |
| 368 | 394 | ||
| 369 | if (len > total) | 395 | if (len <= total) { |
| 370 | len = total; | 396 | total -= len; |
| 371 | 397 | continue; | |
| 372 | total -= len; | 398 | } |
| 373 | 399 | ||
| 374 | ddt_set(src_ddt++, sg_dma_address(cur), len); | 400 | ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total); |
| 375 | if (req->is_encrypt) | ||
| 376 | ddt_set(dst_ddt++, sg_dma_address(cur), len); | ||
| 377 | } | 401 | } |
| 378 | ddt_set(src_ddt++, iv_addr, ivsize); | ||
| 379 | |||
| 380 | if (giv || req->is_encrypt) | ||
| 381 | ddt_set(dst_ddt++, iv_addr, ivsize); | ||
| 382 | |||
| 383 | /* | ||
| 384 | * Now map in the payload for the source and destination and terminate | ||
| 385 | * with the NULL pointers. | ||
| 386 | */ | ||
| 387 | for_each_sg(areq->src, cur, src_ents, i) { | ||
| 388 | ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); | ||
| 389 | if (areq->src == areq->dst) | ||
| 390 | ddt_set(dst_ddt++, sg_dma_address(cur), | ||
| 391 | sg_dma_len(cur)); | ||
| 392 | } | ||
| 393 | |||
| 394 | for_each_sg(areq->dst, cur, dst_ents, i) | ||
| 395 | ddt_set(dst_ddt++, sg_dma_address(cur), | ||
| 396 | sg_dma_len(cur)); | ||
| 397 | 402 | ||
| 398 | ddt_set(src_ddt, 0, 0); | 403 | ddt_set(src_ddt, 0, 0); |
| 399 | ddt_set(dst_ddt, 0, 0); | 404 | ddt_set(dst_ddt, 0, 0); |
| 400 | 405 | ||
| 401 | return 0; | 406 | return 0; |
| 407 | |||
| 408 | err_free_dst: | ||
| 409 | dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr); | ||
| 410 | err_free_src: | ||
| 411 | dma_pool_free(engine->req_pool, src_ddt, req->src_addr); | ||
| 412 | err: | ||
| 413 | return -ENOMEM; | ||
| 402 | } | 414 | } |
| 403 | 415 | ||
| 404 | static void spacc_aead_free_ddts(struct spacc_req *req) | 416 | static void spacc_aead_free_ddts(struct spacc_req *req) |
| 405 | { | 417 | { |
| 406 | struct aead_request *areq = container_of(req->req, struct aead_request, | 418 | struct aead_request *areq = container_of(req->req, struct aead_request, |
| 407 | base); | 419 | base); |
| 408 | struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg); | 420 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); |
| 409 | struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm); | 421 | unsigned total = areq->assoclen + areq->cryptlen + |
| 422 | (req->is_encrypt ? crypto_aead_authsize(aead) : 0); | ||
| 423 | struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead); | ||
| 410 | struct spacc_engine *engine = aead_ctx->generic.engine; | 424 | struct spacc_engine *engine = aead_ctx->generic.engine; |
| 411 | unsigned ivsize = alg->alg.cra_aead.ivsize; | 425 | unsigned nents = sg_count(areq->src, total); |
| 412 | unsigned nents = sg_count(areq->src, areq->cryptlen); | ||
| 413 | 426 | ||
| 414 | if (areq->src != areq->dst) { | 427 | if (areq->src != areq->dst) { |
| 415 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); | 428 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); |
| 416 | dma_unmap_sg(engine->dev, areq->dst, | 429 | dma_unmap_sg(engine->dev, areq->dst, |
| 417 | sg_count(areq->dst, areq->cryptlen), | 430 | sg_count(areq->dst, total), |
| 418 | DMA_FROM_DEVICE); | 431 | DMA_FROM_DEVICE); |
| 419 | } else | 432 | } else |
| 420 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); | 433 | dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); |
| 421 | 434 | ||
| 422 | dma_unmap_sg(engine->dev, areq->assoc, | ||
| 423 | sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); | ||
| 424 | |||
| 425 | dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL); | ||
| 426 | |||
| 427 | dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); | 435 | dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); |
| 428 | dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); | 436 | dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); |
| 429 | } | 437 | } |
| @@ -438,65 +446,22 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, | |||
| 438 | dma_pool_free(req->engine->req_pool, ddt, ddt_addr); | 446 | dma_pool_free(req->engine->req_pool, ddt, ddt_addr); |
| 439 | } | 447 | } |
| 440 | 448 | ||
| 441 | /* | ||
| 442 | * Set key for a DES operation in an AEAD cipher. This also performs weak key | ||
| 443 | * checking if required. | ||
| 444 | */ | ||
| 445 | static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key, | ||
| 446 | unsigned int len) | ||
| 447 | { | ||
| 448 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | ||
| 449 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 450 | u32 tmp[DES_EXPKEY_WORDS]; | ||
| 451 | |||
| 452 | if (unlikely(!des_ekey(tmp, key)) && | ||
| 453 | (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) { | ||
| 454 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
| 455 | return -EINVAL; | ||
| 456 | } | ||
| 457 | |||
| 458 | memcpy(ctx->cipher_key, key, len); | ||
| 459 | ctx->cipher_key_len = len; | ||
| 460 | |||
| 461 | return 0; | ||
| 462 | } | ||
| 463 | |||
| 464 | /* Set the key for the AES block cipher component of the AEAD transform. */ | ||
| 465 | static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key, | ||
| 466 | unsigned int len) | ||
| 467 | { | ||
| 468 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | ||
| 469 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 470 | |||
| 471 | /* | ||
| 472 | * IPSec engine only supports 128 and 256 bit AES keys. If we get a | ||
| 473 | * request for any other size (192 bits) then we need to do a software | ||
| 474 | * fallback. | ||
| 475 | */ | ||
| 476 | if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) { | ||
| 477 | /* | ||
| 478 | * Set the fallback transform to use the same request flags as | ||
| 479 | * the hardware transform. | ||
| 480 | */ | ||
| 481 | ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | ||
| 482 | ctx->sw_cipher->base.crt_flags |= | ||
| 483 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; | ||
| 484 | return crypto_aead_setkey(ctx->sw_cipher, key, len); | ||
| 485 | } | ||
| 486 | |||
| 487 | memcpy(ctx->cipher_key, key, len); | ||
| 488 | ctx->cipher_key_len = len; | ||
| 489 | |||
| 490 | return 0; | ||
| 491 | } | ||
| 492 | |||
| 493 | static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | 449 | static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, |
| 494 | unsigned int keylen) | 450 | unsigned int keylen) |
| 495 | { | 451 | { |
| 496 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); | 452 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 497 | struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); | ||
| 498 | struct crypto_authenc_keys keys; | 453 | struct crypto_authenc_keys keys; |
| 499 | int err = -EINVAL; | 454 | int err; |
| 455 | |||
| 456 | crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | ||
| 457 | crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) & | ||
| 458 | CRYPTO_TFM_REQ_MASK); | ||
| 459 | err = crypto_aead_setkey(ctx->sw_cipher, key, keylen); | ||
| 460 | crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK); | ||
| 461 | crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) & | ||
| 462 | CRYPTO_TFM_RES_MASK); | ||
| 463 | if (err) | ||
| 464 | return err; | ||
| 500 | 465 | ||
| 501 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) | 466 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
| 502 | goto badkey; | 467 | goto badkey; |
| @@ -507,14 +472,8 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
| 507 | if (keys.authkeylen > sizeof(ctx->hash_ctx)) | 472 | if (keys.authkeylen > sizeof(ctx->hash_ctx)) |
| 508 | goto badkey; | 473 | goto badkey; |
| 509 | 474 | ||
| 510 | if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | 475 | memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen); |
| 511 | SPA_CTRL_CIPH_ALG_AES) | 476 | ctx->cipher_key_len = keys.enckeylen; |
| 512 | err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen); | ||
| 513 | else | ||
| 514 | err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen); | ||
| 515 | |||
| 516 | if (err) | ||
| 517 | goto badkey; | ||
| 518 | 477 | ||
| 519 | memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); | 478 | memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); |
| 520 | ctx->hash_key_len = keys.authkeylen; | 479 | ctx->hash_key_len = keys.authkeylen; |
| @@ -531,9 +490,7 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm, | |||
| 531 | { | 490 | { |
| 532 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); | 491 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); |
| 533 | 492 | ||
| 534 | ctx->auth_size = authsize; | 493 | return crypto_aead_setauthsize(ctx->sw_cipher, authsize); |
| 535 | |||
| 536 | return 0; | ||
| 537 | } | 494 | } |
| 538 | 495 | ||
| 539 | /* | 496 | /* |
| @@ -541,15 +498,13 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm, | |||
| 541 | * be completed in hardware because the hardware may not support certain key | 498 | * be completed in hardware because the hardware may not support certain key |
| 542 | * sizes. In these cases we need to complete the request in software. | 499 | * sizes. In these cases we need to complete the request in software. |
| 543 | */ | 500 | */ |
| 544 | static int spacc_aead_need_fallback(struct spacc_req *req) | 501 | static int spacc_aead_need_fallback(struct aead_request *aead_req) |
| 545 | { | 502 | { |
| 546 | struct aead_request *aead_req; | 503 | struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); |
| 547 | struct crypto_tfm *tfm = req->req->tfm; | 504 | struct aead_alg *alg = crypto_aead_alg(aead); |
| 548 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | 505 | struct spacc_aead *spacc_alg = to_spacc_aead(alg); |
| 549 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | 506 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead); |
| 550 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 551 | 507 | ||
| 552 | aead_req = container_of(req->req, struct aead_request, base); | ||
| 553 | /* | 508 | /* |
| 554 | * If we have a non-supported key-length, then we need to do a | 509 | * If we have a non-supported key-length, then we need to do a |
| 555 | * software fallback. | 510 | * software fallback. |
| @@ -568,22 +523,17 @@ static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type, | |||
| 568 | { | 523 | { |
| 569 | struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); | 524 | struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); |
| 570 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm); | 525 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm); |
| 571 | int err; | 526 | struct aead_request *subreq = aead_request_ctx(req); |
| 572 | 527 | ||
| 573 | if (ctx->sw_cipher) { | 528 | aead_request_set_tfm(subreq, ctx->sw_cipher); |
| 574 | /* | 529 | aead_request_set_callback(subreq, req->base.flags, |
| 575 | * Change the request to use the software fallback transform, | 530 | req->base.complete, req->base.data); |
| 576 | * and once the ciphering has completed, put the old transform | 531 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, |
| 577 | * back into the request. | 532 | req->iv); |
| 578 | */ | 533 | aead_request_set_ad(subreq, req->assoclen); |
| 579 | aead_request_set_tfm(req, ctx->sw_cipher); | ||
| 580 | err = is_encrypt ? crypto_aead_encrypt(req) : | ||
| 581 | crypto_aead_decrypt(req); | ||
| 582 | aead_request_set_tfm(req, __crypto_aead_cast(old_tfm)); | ||
| 583 | } else | ||
| 584 | err = -EINVAL; | ||
| 585 | 534 | ||
| 586 | return err; | 535 | return is_encrypt ? crypto_aead_encrypt(subreq) : |
| 536 | crypto_aead_decrypt(subreq); | ||
| 587 | } | 537 | } |
| 588 | 538 | ||
| 589 | static void spacc_aead_complete(struct spacc_req *req) | 539 | static void spacc_aead_complete(struct spacc_req *req) |
| @@ -594,18 +544,19 @@ static void spacc_aead_complete(struct spacc_req *req) | |||
| 594 | 544 | ||
| 595 | static int spacc_aead_submit(struct spacc_req *req) | 545 | static int spacc_aead_submit(struct spacc_req *req) |
| 596 | { | 546 | { |
| 597 | struct crypto_tfm *tfm = req->req->tfm; | ||
| 598 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 599 | struct crypto_alg *alg = req->req->tfm->__crt_alg; | ||
| 600 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | ||
| 601 | struct spacc_engine *engine = ctx->generic.engine; | ||
| 602 | u32 ctrl, proc_len, assoc_len; | ||
| 603 | struct aead_request *aead_req = | 547 | struct aead_request *aead_req = |
| 604 | container_of(req->req, struct aead_request, base); | 548 | container_of(req->req, struct aead_request, base); |
| 549 | struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); | ||
| 550 | unsigned int authsize = crypto_aead_authsize(aead); | ||
| 551 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead); | ||
| 552 | struct aead_alg *alg = crypto_aead_alg(aead); | ||
| 553 | struct spacc_aead *spacc_alg = to_spacc_aead(alg); | ||
| 554 | struct spacc_engine *engine = ctx->generic.engine; | ||
| 555 | u32 ctrl, proc_len, assoc_len; | ||
| 605 | 556 | ||
| 606 | req->result = -EINPROGRESS; | 557 | req->result = -EINPROGRESS; |
| 607 | req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, | 558 | req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, |
| 608 | ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize, | 559 | ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead), |
| 609 | ctx->hash_ctx, ctx->hash_key_len); | 560 | ctx->hash_ctx, ctx->hash_key_len); |
| 610 | 561 | ||
| 611 | /* Set the source and destination DDT pointers. */ | 562 | /* Set the source and destination DDT pointers. */ |
| @@ -617,25 +568,15 @@ static int spacc_aead_submit(struct spacc_req *req) | |||
| 617 | proc_len = aead_req->cryptlen + assoc_len; | 568 | proc_len = aead_req->cryptlen + assoc_len; |
| 618 | 569 | ||
| 619 | /* | 570 | /* |
| 620 | * If we aren't generating an IV, then we need to include the IV in the | ||
| 621 | * associated data so that it is included in the hash. | ||
| 622 | */ | ||
| 623 | if (!req->giv) { | ||
| 624 | assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); | ||
| 625 | proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); | ||
| 626 | } else | ||
| 627 | proc_len += req->giv_len; | ||
| 628 | |||
| 629 | /* | ||
| 630 | * If we are decrypting, we need to take the length of the ICV out of | 571 | * If we are decrypting, we need to take the length of the ICV out of |
| 631 | * the processing length. | 572 | * the processing length. |
| 632 | */ | 573 | */ |
| 633 | if (!req->is_encrypt) | 574 | if (!req->is_encrypt) |
| 634 | proc_len -= ctx->auth_size; | 575 | proc_len -= authsize; |
| 635 | 576 | ||
| 636 | writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET); | 577 | writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET); |
| 637 | writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET); | 578 | writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET); |
| 638 | writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET); | 579 | writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET); |
| 639 | writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); | 580 | writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); |
| 640 | writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); | 581 | writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); |
| 641 | 582 | ||
| @@ -674,32 +615,29 @@ static void spacc_push(struct spacc_engine *engine) | |||
| 674 | /* | 615 | /* |
| 675 | * Setup an AEAD request for processing. This will configure the engine, load | 616 | * Setup an AEAD request for processing. This will configure the engine, load |
| 676 | * the context and then start the packet processing. | 617 | * the context and then start the packet processing. |
| 677 | * | ||
| 678 | * @giv Pointer to destination address for a generated IV. If the | ||
| 679 | * request does not need to generate an IV then this should be set to NULL. | ||
| 680 | */ | 618 | */ |
| 681 | static int spacc_aead_setup(struct aead_request *req, u8 *giv, | 619 | static int spacc_aead_setup(struct aead_request *req, |
| 682 | unsigned alg_type, bool is_encrypt) | 620 | unsigned alg_type, bool is_encrypt) |
| 683 | { | 621 | { |
| 684 | struct crypto_alg *alg = req->base.tfm->__crt_alg; | 622 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 685 | struct spacc_engine *engine = to_spacc_alg(alg)->engine; | 623 | struct aead_alg *alg = crypto_aead_alg(aead); |
| 624 | struct spacc_engine *engine = to_spacc_aead(alg)->engine; | ||
| 686 | struct spacc_req *dev_req = aead_request_ctx(req); | 625 | struct spacc_req *dev_req = aead_request_ctx(req); |
| 687 | int err = -EINPROGRESS; | 626 | int err; |
| 688 | unsigned long flags; | 627 | unsigned long flags; |
| 689 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); | ||
| 690 | 628 | ||
| 691 | dev_req->giv = giv; | ||
| 692 | dev_req->giv_len = ivsize; | ||
| 693 | dev_req->req = &req->base; | 629 | dev_req->req = &req->base; |
| 694 | dev_req->is_encrypt = is_encrypt; | 630 | dev_req->is_encrypt = is_encrypt; |
| 695 | dev_req->result = -EBUSY; | 631 | dev_req->result = -EBUSY; |
| 696 | dev_req->engine = engine; | 632 | dev_req->engine = engine; |
| 697 | dev_req->complete = spacc_aead_complete; | 633 | dev_req->complete = spacc_aead_complete; |
| 698 | 634 | ||
| 699 | if (unlikely(spacc_aead_need_fallback(dev_req))) | 635 | if (unlikely(spacc_aead_need_fallback(req) || |
| 636 | ((err = spacc_aead_make_ddts(req)) == -E2BIG))) | ||
| 700 | return spacc_aead_do_fallback(req, alg_type, is_encrypt); | 637 | return spacc_aead_do_fallback(req, alg_type, is_encrypt); |
| 701 | 638 | ||
| 702 | spacc_aead_make_ddts(dev_req, dev_req->giv); | 639 | if (err) |
| 640 | goto out; | ||
| 703 | 641 | ||
| 704 | err = -EINPROGRESS; | 642 | err = -EINPROGRESS; |
| 705 | spin_lock_irqsave(&engine->hw_lock, flags); | 643 | spin_lock_irqsave(&engine->hw_lock, flags); |
| @@ -728,70 +666,44 @@ out: | |||
| 728 | static int spacc_aead_encrypt(struct aead_request *req) | 666 | static int spacc_aead_encrypt(struct aead_request *req) |
| 729 | { | 667 | { |
| 730 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 668 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 731 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | 669 | struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead)); |
| 732 | struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | ||
| 733 | 670 | ||
| 734 | return spacc_aead_setup(req, NULL, alg->type, 1); | 671 | return spacc_aead_setup(req, alg->type, 1); |
| 735 | } | ||
| 736 | |||
| 737 | static int spacc_aead_givencrypt(struct aead_givcrypt_request *req) | ||
| 738 | { | ||
| 739 | struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); | ||
| 740 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); | ||
| 741 | size_t ivsize = crypto_aead_ivsize(tfm); | ||
| 742 | struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); | ||
| 743 | unsigned len; | ||
| 744 | __be64 seq; | ||
| 745 | |||
| 746 | memcpy(req->areq.iv, ctx->salt, ivsize); | ||
| 747 | len = ivsize; | ||
| 748 | if (ivsize > sizeof(u64)) { | ||
| 749 | memset(req->giv, 0, ivsize - sizeof(u64)); | ||
| 750 | len = sizeof(u64); | ||
| 751 | } | ||
| 752 | seq = cpu_to_be64(req->seq); | ||
| 753 | memcpy(req->giv + ivsize - len, &seq, len); | ||
| 754 | |||
| 755 | return spacc_aead_setup(&req->areq, req->giv, alg->type, 1); | ||
| 756 | } | 672 | } |
| 757 | 673 | ||
| 758 | static int spacc_aead_decrypt(struct aead_request *req) | 674 | static int spacc_aead_decrypt(struct aead_request *req) |
| 759 | { | 675 | { |
| 760 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 676 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 761 | struct crypto_tfm *tfm = crypto_aead_tfm(aead); | 677 | struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead)); |
| 762 | struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | ||
| 763 | 678 | ||
| 764 | return spacc_aead_setup(req, NULL, alg->type, 0); | 679 | return spacc_aead_setup(req, alg->type, 0); |
| 765 | } | 680 | } |
| 766 | 681 | ||
| 767 | /* | 682 | /* |
| 768 | * Initialise a new AEAD context. This is responsible for allocating the | 683 | * Initialise a new AEAD context. This is responsible for allocating the |
| 769 | * fallback cipher and initialising the context. | 684 | * fallback cipher and initialising the context. |
| 770 | */ | 685 | */ |
| 771 | static int spacc_aead_cra_init(struct crypto_tfm *tfm) | 686 | static int spacc_aead_cra_init(struct crypto_aead *tfm) |
| 772 | { | 687 | { |
| 773 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 688 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 774 | struct crypto_alg *alg = tfm->__crt_alg; | 689 | struct aead_alg *alg = crypto_aead_alg(tfm); |
| 775 | struct spacc_alg *spacc_alg = to_spacc_alg(alg); | 690 | struct spacc_aead *spacc_alg = to_spacc_aead(alg); |
| 776 | struct spacc_engine *engine = spacc_alg->engine; | 691 | struct spacc_engine *engine = spacc_alg->engine; |
| 777 | 692 | ||
| 778 | ctx->generic.flags = spacc_alg->type; | 693 | ctx->generic.flags = spacc_alg->type; |
| 779 | ctx->generic.engine = engine; | 694 | ctx->generic.engine = engine; |
| 780 | ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0, | 695 | ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, |
| 781 | CRYPTO_ALG_ASYNC | | ||
| 782 | CRYPTO_ALG_NEED_FALLBACK); | 696 | CRYPTO_ALG_NEED_FALLBACK); |
| 783 | if (IS_ERR(ctx->sw_cipher)) { | 697 | if (IS_ERR(ctx->sw_cipher)) |
| 784 | dev_warn(engine->dev, "failed to allocate fallback for %s\n", | 698 | return PTR_ERR(ctx->sw_cipher); |
| 785 | alg->cra_name); | ||
| 786 | ctx->sw_cipher = NULL; | ||
| 787 | } | ||
| 788 | ctx->generic.key_offs = spacc_alg->key_offs; | 699 | ctx->generic.key_offs = spacc_alg->key_offs; |
| 789 | ctx->generic.iv_offs = spacc_alg->iv_offs; | 700 | ctx->generic.iv_offs = spacc_alg->iv_offs; |
| 790 | 701 | ||
| 791 | get_random_bytes(ctx->salt, sizeof(ctx->salt)); | 702 | crypto_aead_set_reqsize( |
| 792 | 703 | tfm, | |
| 793 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | 704 | max(sizeof(struct spacc_req), |
| 794 | sizeof(struct spacc_req)); | 705 | sizeof(struct aead_request) + |
| 706 | crypto_aead_reqsize(ctx->sw_cipher))); | ||
| 795 | 707 | ||
| 796 | return 0; | 708 | return 0; |
| 797 | } | 709 | } |
| @@ -800,13 +712,11 @@ static int spacc_aead_cra_init(struct crypto_tfm *tfm) | |||
| 800 | * Destructor for an AEAD context. This is called when the transform is freed | 712 | * Destructor for an AEAD context. This is called when the transform is freed |
| 801 | * and must free the fallback cipher. | 713 | * and must free the fallback cipher. |
| 802 | */ | 714 | */ |
| 803 | static void spacc_aead_cra_exit(struct crypto_tfm *tfm) | 715 | static void spacc_aead_cra_exit(struct crypto_aead *tfm) |
| 804 | { | 716 | { |
| 805 | struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 717 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 806 | 718 | ||
| 807 | if (ctx->sw_cipher) | 719 | crypto_free_aead(ctx->sw_cipher); |
| 808 | crypto_free_aead(ctx->sw_cipher); | ||
| 809 | ctx->sw_cipher = NULL; | ||
| 810 | } | 720 | } |
| 811 | 721 | ||
| 812 | /* | 722 | /* |
| @@ -1458,180 +1368,188 @@ static struct spacc_alg ipsec_engine_algs[] = { | |||
| 1458 | .cra_exit = spacc_ablk_cra_exit, | 1368 | .cra_exit = spacc_ablk_cra_exit, |
| 1459 | }, | 1369 | }, |
| 1460 | }, | 1370 | }, |
| 1371 | }; | ||
| 1372 | |||
| 1373 | static struct spacc_aead ipsec_engine_aeads[] = { | ||
| 1461 | { | 1374 | { |
| 1462 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | 1375 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | |
| 1463 | SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, | 1376 | SPA_CTRL_CIPH_MODE_CBC | |
| 1377 | SPA_CTRL_HASH_ALG_SHA | | ||
| 1378 | SPA_CTRL_HASH_MODE_HMAC, | ||
| 1464 | .key_offs = 0, | 1379 | .key_offs = 0, |
| 1465 | .iv_offs = AES_MAX_KEY_SIZE, | 1380 | .iv_offs = AES_MAX_KEY_SIZE, |
| 1466 | .alg = { | 1381 | .alg = { |
| 1467 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 1382 | .base = { |
| 1468 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell", | 1383 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
| 1469 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 1384 | .cra_driver_name = "authenc-hmac-sha1-" |
| 1470 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | | 1385 | "cbc-aes-picoxcell", |
| 1471 | CRYPTO_ALG_ASYNC | | 1386 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, |
| 1472 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1387 | .cra_flags = CRYPTO_ALG_ASYNC | |
| 1473 | .cra_blocksize = AES_BLOCK_SIZE, | 1388 | CRYPTO_ALG_NEED_FALLBACK | |
| 1474 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | 1389 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
| 1475 | .cra_type = &crypto_aead_type, | 1390 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1476 | .cra_module = THIS_MODULE, | 1391 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), |
| 1477 | .cra_aead = { | 1392 | .cra_module = THIS_MODULE, |
| 1478 | .setkey = spacc_aead_setkey, | ||
| 1479 | .setauthsize = spacc_aead_setauthsize, | ||
| 1480 | .encrypt = spacc_aead_encrypt, | ||
| 1481 | .decrypt = spacc_aead_decrypt, | ||
| 1482 | .givencrypt = spacc_aead_givencrypt, | ||
| 1483 | .ivsize = AES_BLOCK_SIZE, | ||
| 1484 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 1485 | }, | 1393 | }, |
| 1486 | .cra_init = spacc_aead_cra_init, | 1394 | .setkey = spacc_aead_setkey, |
| 1487 | .cra_exit = spacc_aead_cra_exit, | 1395 | .setauthsize = spacc_aead_setauthsize, |
| 1396 | .encrypt = spacc_aead_encrypt, | ||
| 1397 | .decrypt = spacc_aead_decrypt, | ||
| 1398 | .ivsize = AES_BLOCK_SIZE, | ||
| 1399 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 1400 | .init = spacc_aead_cra_init, | ||
| 1401 | .exit = spacc_aead_cra_exit, | ||
| 1488 | }, | 1402 | }, |
| 1489 | }, | 1403 | }, |
| 1490 | { | 1404 | { |
| 1491 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | 1405 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | |
| 1406 | SPA_CTRL_CIPH_MODE_CBC | | ||
| 1492 | SPA_CTRL_HASH_ALG_SHA256 | | 1407 | SPA_CTRL_HASH_ALG_SHA256 | |
| 1493 | SPA_CTRL_HASH_MODE_HMAC, | 1408 | SPA_CTRL_HASH_MODE_HMAC, |
| 1494 | .key_offs = 0, | 1409 | .key_offs = 0, |
| 1495 | .iv_offs = AES_MAX_KEY_SIZE, | 1410 | .iv_offs = AES_MAX_KEY_SIZE, |
| 1496 | .alg = { | 1411 | .alg = { |
| 1497 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | 1412 | .base = { |
| 1498 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell", | 1413 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
| 1499 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 1414 | .cra_driver_name = "authenc-hmac-sha256-" |
| 1500 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | | 1415 | "cbc-aes-picoxcell", |
| 1501 | CRYPTO_ALG_ASYNC | | 1416 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, |
| 1502 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1417 | .cra_flags = CRYPTO_ALG_ASYNC | |
| 1503 | .cra_blocksize = AES_BLOCK_SIZE, | 1418 | CRYPTO_ALG_NEED_FALLBACK | |
| 1504 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | 1419 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
| 1505 | .cra_type = &crypto_aead_type, | 1420 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1506 | .cra_module = THIS_MODULE, | 1421 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), |
| 1507 | .cra_aead = { | 1422 | .cra_module = THIS_MODULE, |
| 1508 | .setkey = spacc_aead_setkey, | ||
| 1509 | .setauthsize = spacc_aead_setauthsize, | ||
| 1510 | .encrypt = spacc_aead_encrypt, | ||
| 1511 | .decrypt = spacc_aead_decrypt, | ||
| 1512 | .givencrypt = spacc_aead_givencrypt, | ||
| 1513 | .ivsize = AES_BLOCK_SIZE, | ||
| 1514 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 1515 | }, | 1423 | }, |
| 1516 | .cra_init = spacc_aead_cra_init, | 1424 | .setkey = spacc_aead_setkey, |
| 1517 | .cra_exit = spacc_aead_cra_exit, | 1425 | .setauthsize = spacc_aead_setauthsize, |
| 1426 | .encrypt = spacc_aead_encrypt, | ||
| 1427 | .decrypt = spacc_aead_decrypt, | ||
| 1428 | .ivsize = AES_BLOCK_SIZE, | ||
| 1429 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 1430 | .init = spacc_aead_cra_init, | ||
| 1431 | .exit = spacc_aead_cra_exit, | ||
| 1518 | }, | 1432 | }, |
| 1519 | }, | 1433 | }, |
| 1520 | { | 1434 | { |
| 1521 | .key_offs = 0, | 1435 | .key_offs = 0, |
| 1522 | .iv_offs = AES_MAX_KEY_SIZE, | 1436 | .iv_offs = AES_MAX_KEY_SIZE, |
| 1523 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | 1437 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | |
| 1524 | SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, | 1438 | SPA_CTRL_CIPH_MODE_CBC | |
| 1439 | SPA_CTRL_HASH_ALG_MD5 | | ||
| 1440 | SPA_CTRL_HASH_MODE_HMAC, | ||
| 1525 | .alg = { | 1441 | .alg = { |
| 1526 | .cra_name = "authenc(hmac(md5),cbc(aes))", | 1442 | .base = { |
| 1527 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell", | 1443 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
| 1528 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 1444 | .cra_driver_name = "authenc-hmac-md5-" |
| 1529 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | | 1445 | "cbc-aes-picoxcell", |
| 1530 | CRYPTO_ALG_ASYNC | | 1446 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, |
| 1531 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1447 | .cra_flags = CRYPTO_ALG_ASYNC | |
| 1532 | .cra_blocksize = AES_BLOCK_SIZE, | 1448 | CRYPTO_ALG_NEED_FALLBACK | |
| 1533 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | 1449 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
| 1534 | .cra_type = &crypto_aead_type, | 1450 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1535 | .cra_module = THIS_MODULE, | 1451 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), |
| 1536 | .cra_aead = { | 1452 | .cra_module = THIS_MODULE, |
| 1537 | .setkey = spacc_aead_setkey, | ||
| 1538 | .setauthsize = spacc_aead_setauthsize, | ||
| 1539 | .encrypt = spacc_aead_encrypt, | ||
| 1540 | .decrypt = spacc_aead_decrypt, | ||
| 1541 | .givencrypt = spacc_aead_givencrypt, | ||
| 1542 | .ivsize = AES_BLOCK_SIZE, | ||
| 1543 | .maxauthsize = MD5_DIGEST_SIZE, | ||
| 1544 | }, | 1453 | }, |
| 1545 | .cra_init = spacc_aead_cra_init, | 1454 | .setkey = spacc_aead_setkey, |
| 1546 | .cra_exit = spacc_aead_cra_exit, | 1455 | .setauthsize = spacc_aead_setauthsize, |
| 1456 | .encrypt = spacc_aead_encrypt, | ||
| 1457 | .decrypt = spacc_aead_decrypt, | ||
| 1458 | .ivsize = AES_BLOCK_SIZE, | ||
| 1459 | .maxauthsize = MD5_DIGEST_SIZE, | ||
| 1460 | .init = spacc_aead_cra_init, | ||
| 1461 | .exit = spacc_aead_cra_exit, | ||
| 1547 | }, | 1462 | }, |
| 1548 | }, | 1463 | }, |
| 1549 | { | 1464 | { |
| 1550 | .key_offs = DES_BLOCK_SIZE, | 1465 | .key_offs = DES_BLOCK_SIZE, |
| 1551 | .iv_offs = 0, | 1466 | .iv_offs = 0, |
| 1552 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | | 1467 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | |
| 1553 | SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, | 1468 | SPA_CTRL_CIPH_MODE_CBC | |
| 1469 | SPA_CTRL_HASH_ALG_SHA | | ||
| 1470 | SPA_CTRL_HASH_MODE_HMAC, | ||
| 1554 | .alg = { | 1471 | .alg = { |
| 1555 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", | 1472 | .base = { |
| 1556 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell", | 1473 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", |
| 1557 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 1474 | .cra_driver_name = "authenc-hmac-sha1-" |
| 1558 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | | 1475 | "cbc-3des-picoxcell", |
| 1559 | CRYPTO_ALG_ASYNC | | 1476 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, |
| 1560 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1477 | .cra_flags = CRYPTO_ALG_ASYNC | |
| 1561 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1478 | CRYPTO_ALG_NEED_FALLBACK | |
| 1562 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | 1479 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
| 1563 | .cra_type = &crypto_aead_type, | 1480 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 1564 | .cra_module = THIS_MODULE, | 1481 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), |
| 1565 | .cra_aead = { | 1482 | .cra_module = THIS_MODULE, |
| 1566 | .setkey = spacc_aead_setkey, | ||
| 1567 | .setauthsize = spacc_aead_setauthsize, | ||
| 1568 | .encrypt = spacc_aead_encrypt, | ||
| 1569 | .decrypt = spacc_aead_decrypt, | ||
| 1570 | .givencrypt = spacc_aead_givencrypt, | ||
| 1571 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 1572 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 1573 | }, | 1483 | }, |
| 1574 | .cra_init = spacc_aead_cra_init, | 1484 | .setkey = spacc_aead_setkey, |
| 1575 | .cra_exit = spacc_aead_cra_exit, | 1485 | .setauthsize = spacc_aead_setauthsize, |
| 1486 | .encrypt = spacc_aead_encrypt, | ||
| 1487 | .decrypt = spacc_aead_decrypt, | ||
| 1488 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 1489 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 1490 | .init = spacc_aead_cra_init, | ||
| 1491 | .exit = spacc_aead_cra_exit, | ||
| 1576 | }, | 1492 | }, |
| 1577 | }, | 1493 | }, |
| 1578 | { | 1494 | { |
| 1579 | .key_offs = DES_BLOCK_SIZE, | 1495 | .key_offs = DES_BLOCK_SIZE, |
| 1580 | .iv_offs = 0, | 1496 | .iv_offs = 0, |
| 1581 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | 1497 | .ctrl_default = SPA_CTRL_CIPH_ALG_AES | |
| 1498 | SPA_CTRL_CIPH_MODE_CBC | | ||
| 1582 | SPA_CTRL_HASH_ALG_SHA256 | | 1499 | SPA_CTRL_HASH_ALG_SHA256 | |
| 1583 | SPA_CTRL_HASH_MODE_HMAC, | 1500 | SPA_CTRL_HASH_MODE_HMAC, |
| 1584 | .alg = { | 1501 | .alg = { |
| 1585 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", | 1502 | .base = { |
| 1586 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell", | 1503 | .cra_name = "authenc(hmac(sha256)," |
| 1587 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 1504 | "cbc(des3_ede))", |
| 1588 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | | 1505 | .cra_driver_name = "authenc-hmac-sha256-" |
| 1589 | CRYPTO_ALG_ASYNC | | 1506 | "cbc-3des-picoxcell", |
| 1590 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1507 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, |
| 1591 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1508 | .cra_flags = CRYPTO_ALG_ASYNC | |
| 1592 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | 1509 | CRYPTO_ALG_NEED_FALLBACK | |
| 1593 | .cra_type = &crypto_aead_type, | 1510 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
| 1594 | .cra_module = THIS_MODULE, | 1511 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 1595 | .cra_aead = { | 1512 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), |
| 1596 | .setkey = spacc_aead_setkey, | 1513 | .cra_module = THIS_MODULE, |
| 1597 | .setauthsize = spacc_aead_setauthsize, | ||
| 1598 | .encrypt = spacc_aead_encrypt, | ||
| 1599 | .decrypt = spacc_aead_decrypt, | ||
| 1600 | .givencrypt = spacc_aead_givencrypt, | ||
| 1601 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 1602 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 1603 | }, | 1514 | }, |
| 1604 | .cra_init = spacc_aead_cra_init, | 1515 | .setkey = spacc_aead_setkey, |
| 1605 | .cra_exit = spacc_aead_cra_exit, | 1516 | .setauthsize = spacc_aead_setauthsize, |
| 1517 | .encrypt = spacc_aead_encrypt, | ||
| 1518 | .decrypt = spacc_aead_decrypt, | ||
| 1519 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 1520 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 1521 | .init = spacc_aead_cra_init, | ||
| 1522 | .exit = spacc_aead_cra_exit, | ||
| 1606 | }, | 1523 | }, |
| 1607 | }, | 1524 | }, |
| 1608 | { | 1525 | { |
| 1609 | .key_offs = DES_BLOCK_SIZE, | 1526 | .key_offs = DES_BLOCK_SIZE, |
| 1610 | .iv_offs = 0, | 1527 | .iv_offs = 0, |
| 1611 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | | 1528 | .ctrl_default = SPA_CTRL_CIPH_ALG_DES | |
| 1612 | SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, | 1529 | SPA_CTRL_CIPH_MODE_CBC | |
| 1530 | SPA_CTRL_HASH_ALG_MD5 | | ||
| 1531 | SPA_CTRL_HASH_MODE_HMAC, | ||
| 1613 | .alg = { | 1532 | .alg = { |
| 1614 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", | 1533 | .base = { |
| 1615 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell", | 1534 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", |
| 1616 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 1535 | .cra_driver_name = "authenc-hmac-md5-" |
| 1617 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | | 1536 | "cbc-3des-picoxcell", |
| 1618 | CRYPTO_ALG_ASYNC | | 1537 | .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, |
| 1619 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1538 | .cra_flags = CRYPTO_ALG_ASYNC | |
| 1620 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1539 | CRYPTO_ALG_NEED_FALLBACK | |
| 1621 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), | 1540 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
| 1622 | .cra_type = &crypto_aead_type, | 1541 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 1623 | .cra_module = THIS_MODULE, | 1542 | .cra_ctxsize = sizeof(struct spacc_aead_ctx), |
| 1624 | .cra_aead = { | 1543 | .cra_module = THIS_MODULE, |
| 1625 | .setkey = spacc_aead_setkey, | ||
| 1626 | .setauthsize = spacc_aead_setauthsize, | ||
| 1627 | .encrypt = spacc_aead_encrypt, | ||
| 1628 | .decrypt = spacc_aead_decrypt, | ||
| 1629 | .givencrypt = spacc_aead_givencrypt, | ||
| 1630 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 1631 | .maxauthsize = MD5_DIGEST_SIZE, | ||
| 1632 | }, | 1544 | }, |
| 1633 | .cra_init = spacc_aead_cra_init, | 1545 | .setkey = spacc_aead_setkey, |
| 1634 | .cra_exit = spacc_aead_cra_exit, | 1546 | .setauthsize = spacc_aead_setauthsize, |
| 1547 | .encrypt = spacc_aead_encrypt, | ||
| 1548 | .decrypt = spacc_aead_decrypt, | ||
| 1549 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 1550 | .maxauthsize = MD5_DIGEST_SIZE, | ||
| 1551 | .init = spacc_aead_cra_init, | ||
| 1552 | .exit = spacc_aead_cra_exit, | ||
| 1635 | }, | 1553 | }, |
| 1636 | }, | 1554 | }, |
| 1637 | }; | 1555 | }; |
| @@ -1707,6 +1625,8 @@ static int spacc_probe(struct platform_device *pdev) | |||
| 1707 | engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ; | 1625 | engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ; |
| 1708 | engine->algs = ipsec_engine_algs; | 1626 | engine->algs = ipsec_engine_algs; |
| 1709 | engine->num_algs = ARRAY_SIZE(ipsec_engine_algs); | 1627 | engine->num_algs = ARRAY_SIZE(ipsec_engine_algs); |
| 1628 | engine->aeads = ipsec_engine_aeads; | ||
| 1629 | engine->num_aeads = ARRAY_SIZE(ipsec_engine_aeads); | ||
| 1710 | } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) { | 1630 | } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) { |
| 1711 | engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS; | 1631 | engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS; |
| 1712 | engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ; | 1632 | engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ; |
| @@ -1815,17 +1735,40 @@ static int spacc_probe(struct platform_device *pdev) | |||
| 1815 | engine->algs[i].alg.cra_name); | 1735 | engine->algs[i].alg.cra_name); |
| 1816 | } | 1736 | } |
| 1817 | 1737 | ||
| 1738 | INIT_LIST_HEAD(&engine->registered_aeads); | ||
| 1739 | for (i = 0; i < engine->num_aeads; ++i) { | ||
| 1740 | engine->aeads[i].engine = engine; | ||
| 1741 | err = crypto_register_aead(&engine->aeads[i].alg); | ||
| 1742 | if (!err) { | ||
| 1743 | list_add_tail(&engine->aeads[i].entry, | ||
| 1744 | &engine->registered_aeads); | ||
| 1745 | ret = 0; | ||
| 1746 | } | ||
| 1747 | if (err) | ||
| 1748 | dev_err(engine->dev, "failed to register alg \"%s\"\n", | ||
| 1749 | engine->aeads[i].alg.base.cra_name); | ||
| 1750 | else | ||
| 1751 | dev_dbg(engine->dev, "registered alg \"%s\"\n", | ||
| 1752 | engine->aeads[i].alg.base.cra_name); | ||
| 1753 | } | ||
| 1754 | |||
| 1818 | return ret; | 1755 | return ret; |
| 1819 | } | 1756 | } |
| 1820 | 1757 | ||
| 1821 | static int spacc_remove(struct platform_device *pdev) | 1758 | static int spacc_remove(struct platform_device *pdev) |
| 1822 | { | 1759 | { |
| 1760 | struct spacc_aead *aead, *an; | ||
| 1823 | struct spacc_alg *alg, *next; | 1761 | struct spacc_alg *alg, *next; |
| 1824 | struct spacc_engine *engine = platform_get_drvdata(pdev); | 1762 | struct spacc_engine *engine = platform_get_drvdata(pdev); |
| 1825 | 1763 | ||
| 1826 | del_timer_sync(&engine->packet_timeout); | 1764 | del_timer_sync(&engine->packet_timeout); |
| 1827 | device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); | 1765 | device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); |
| 1828 | 1766 | ||
| 1767 | list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) { | ||
| 1768 | list_del(&aead->entry); | ||
| 1769 | crypto_unregister_aead(&aead->alg); | ||
| 1770 | } | ||
| 1771 | |||
| 1829 | list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) { | 1772 | list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) { |
| 1830 | list_del(&alg->entry); | 1773 | list_del(&alg->entry); |
| 1831 | crypto_unregister_alg(&alg->alg); | 1774 | crypto_unregister_alg(&alg->alg); |
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 6fdb9e8b22a7..eefccf7b8be7 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig | |||
| @@ -3,11 +3,13 @@ config CRYPTO_DEV_QAT | |||
| 3 | select CRYPTO_AEAD | 3 | select CRYPTO_AEAD |
| 4 | select CRYPTO_AUTHENC | 4 | select CRYPTO_AUTHENC |
| 5 | select CRYPTO_BLKCIPHER | 5 | select CRYPTO_BLKCIPHER |
| 6 | select CRYPTO_AKCIPHER | ||
| 6 | select CRYPTO_HMAC | 7 | select CRYPTO_HMAC |
| 7 | select CRYPTO_SHA1 | 8 | select CRYPTO_SHA1 |
| 8 | select CRYPTO_SHA256 | 9 | select CRYPTO_SHA256 |
| 9 | select CRYPTO_SHA512 | 10 | select CRYPTO_SHA512 |
| 10 | select FW_LOADER | 11 | select FW_LOADER |
| 12 | select ASN1 | ||
| 11 | 13 | ||
| 12 | config CRYPTO_DEV_QAT_DH895xCC | 14 | config CRYPTO_DEV_QAT_DH895xCC |
| 13 | tristate "Support for Intel(R) DH895xCC" | 15 | tristate "Support for Intel(R) DH895xCC" |
| @@ -19,3 +21,16 @@ config CRYPTO_DEV_QAT_DH895xCC | |||
| 19 | 21 | ||
| 20 | To compile this as a module, choose M here: the module | 22 | To compile this as a module, choose M here: the module |
| 21 | will be called qat_dh895xcc. | 23 | will be called qat_dh895xcc. |
| 24 | |||
| 25 | config CRYPTO_DEV_QAT_DH895xCCVF | ||
| 26 | tristate "Support for Intel(R) DH895xCC Virtual Function" | ||
| 27 | depends on X86 && PCI | ||
| 28 | select PCI_IOV | ||
| 29 | select CRYPTO_DEV_QAT | ||
| 30 | |||
| 31 | help | ||
| 32 | Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology | ||
| 33 | Virtual Function for accelerating crypto and compression workloads. | ||
| 34 | |||
| 35 | To compile this as a module, choose M here: the module | ||
| 36 | will be called qat_dh895xccvf. | ||
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index d11481be225e..a3ce0b70e32f 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile | |||
| @@ -1,2 +1,3 @@ | |||
| 1 | obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ | 1 | obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ |
| 2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ | 2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ |
| 3 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ | ||
diff --git a/drivers/crypto/qat/qat_common/.gitignore b/drivers/crypto/qat/qat_common/.gitignore new file mode 100644 index 000000000000..ee328374dba8 --- /dev/null +++ b/drivers/crypto/qat/qat_common/.gitignore | |||
| @@ -0,0 +1 @@ | |||
| *-asn1.[ch] | |||
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index e0424dc382fe..df20a9de1c58 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile | |||
| @@ -1,3 +1,6 @@ | |||
| 1 | $(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h | ||
| 2 | clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h | ||
| 3 | |||
| 1 | obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o | 4 | obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o |
| 2 | intel_qat-objs := adf_cfg.o \ | 5 | intel_qat-objs := adf_cfg.o \ |
| 3 | adf_ctl_drv.o \ | 6 | adf_ctl_drv.o \ |
| @@ -6,9 +9,14 @@ intel_qat-objs := adf_cfg.o \ | |||
| 6 | adf_accel_engine.o \ | 9 | adf_accel_engine.o \ |
| 7 | adf_aer.o \ | 10 | adf_aer.o \ |
| 8 | adf_transport.o \ | 11 | adf_transport.o \ |
| 12 | adf_admin.o \ | ||
| 13 | adf_hw_arbiter.o \ | ||
| 9 | qat_crypto.o \ | 14 | qat_crypto.o \ |
| 10 | qat_algs.o \ | 15 | qat_algs.o \ |
| 16 | qat_rsakey-asn1.o \ | ||
| 17 | qat_asym_algs.o \ | ||
| 11 | qat_uclo.o \ | 18 | qat_uclo.o \ |
| 12 | qat_hal.o | 19 | qat_hal.o |
| 13 | 20 | ||
| 14 | intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o | 21 | intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o |
| 22 | intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o | ||
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 5fe902967620..ca853d50b4b7 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h | |||
| @@ -46,13 +46,17 @@ | |||
| 46 | */ | 46 | */ |
| 47 | #ifndef ADF_ACCEL_DEVICES_H_ | 47 | #ifndef ADF_ACCEL_DEVICES_H_ |
| 48 | #define ADF_ACCEL_DEVICES_H_ | 48 | #define ADF_ACCEL_DEVICES_H_ |
| 49 | #include <linux/interrupt.h> | ||
| 49 | #include <linux/module.h> | 50 | #include <linux/module.h> |
| 50 | #include <linux/list.h> | 51 | #include <linux/list.h> |
| 51 | #include <linux/io.h> | 52 | #include <linux/io.h> |
| 53 | #include <linux/ratelimit.h> | ||
| 52 | #include "adf_cfg_common.h" | 54 | #include "adf_cfg_common.h" |
| 53 | 55 | ||
| 54 | #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" | 56 | #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" |
| 57 | #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" | ||
| 55 | #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 | 58 | #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 |
| 59 | #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 | ||
| 56 | #define ADF_PCI_MAX_BARS 3 | 60 | #define ADF_PCI_MAX_BARS 3 |
| 57 | #define ADF_DEVICE_NAME_LENGTH 32 | 61 | #define ADF_DEVICE_NAME_LENGTH 32 |
| 58 | #define ADF_ETR_MAX_RINGS_PER_BANK 16 | 62 | #define ADF_ETR_MAX_RINGS_PER_BANK 16 |
| @@ -79,6 +83,7 @@ struct adf_bar { | |||
| 79 | struct adf_accel_msix { | 83 | struct adf_accel_msix { |
| 80 | struct msix_entry *entries; | 84 | struct msix_entry *entries; |
| 81 | char **names; | 85 | char **names; |
| 86 | u32 num_entries; | ||
| 82 | } __packed; | 87 | } __packed; |
| 83 | 88 | ||
| 84 | struct adf_accel_pci { | 89 | struct adf_accel_pci { |
| @@ -99,6 +104,7 @@ enum dev_sku_info { | |||
| 99 | DEV_SKU_2, | 104 | DEV_SKU_2, |
| 100 | DEV_SKU_3, | 105 | DEV_SKU_3, |
| 101 | DEV_SKU_4, | 106 | DEV_SKU_4, |
| 107 | DEV_SKU_VF, | ||
| 102 | DEV_SKU_UNKNOWN, | 108 | DEV_SKU_UNKNOWN, |
| 103 | }; | 109 | }; |
| 104 | 110 | ||
| @@ -113,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info) | |||
| 113 | return "SKU3"; | 119 | return "SKU3"; |
| 114 | case DEV_SKU_4: | 120 | case DEV_SKU_4: |
| 115 | return "SKU4"; | 121 | return "SKU4"; |
| 122 | case DEV_SKU_VF: | ||
| 123 | return "SKUVF"; | ||
| 116 | case DEV_SKU_UNKNOWN: | 124 | case DEV_SKU_UNKNOWN: |
| 117 | default: | 125 | default: |
| 118 | break; | 126 | break; |
| @@ -135,23 +143,29 @@ struct adf_hw_device_data { | |||
| 135 | struct adf_hw_device_class *dev_class; | 143 | struct adf_hw_device_class *dev_class; |
| 136 | uint32_t (*get_accel_mask)(uint32_t fuse); | 144 | uint32_t (*get_accel_mask)(uint32_t fuse); |
| 137 | uint32_t (*get_ae_mask)(uint32_t fuse); | 145 | uint32_t (*get_ae_mask)(uint32_t fuse); |
| 146 | uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self); | ||
| 138 | uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); | 147 | uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self); |
| 139 | uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); | 148 | uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); |
| 140 | uint32_t (*get_num_aes)(struct adf_hw_device_data *self); | 149 | uint32_t (*get_num_aes)(struct adf_hw_device_data *self); |
| 141 | uint32_t (*get_num_accels)(struct adf_hw_device_data *self); | 150 | uint32_t (*get_num_accels)(struct adf_hw_device_data *self); |
| 151 | uint32_t (*get_pf2vf_offset)(uint32_t i); | ||
| 152 | uint32_t (*get_vintmsk_offset)(uint32_t i); | ||
| 142 | enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); | 153 | enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); |
| 143 | void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring); | ||
| 144 | void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring); | ||
| 145 | int (*alloc_irq)(struct adf_accel_dev *accel_dev); | 154 | int (*alloc_irq)(struct adf_accel_dev *accel_dev); |
| 146 | void (*free_irq)(struct adf_accel_dev *accel_dev); | 155 | void (*free_irq)(struct adf_accel_dev *accel_dev); |
| 147 | void (*enable_error_correction)(struct adf_accel_dev *accel_dev); | 156 | void (*enable_error_correction)(struct adf_accel_dev *accel_dev); |
| 148 | int (*init_admin_comms)(struct adf_accel_dev *accel_dev); | 157 | int (*init_admin_comms)(struct adf_accel_dev *accel_dev); |
| 149 | void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); | 158 | void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); |
| 159 | int (*send_admin_init)(struct adf_accel_dev *accel_dev); | ||
| 150 | int (*init_arb)(struct adf_accel_dev *accel_dev); | 160 | int (*init_arb)(struct adf_accel_dev *accel_dev); |
| 151 | void (*exit_arb)(struct adf_accel_dev *accel_dev); | 161 | void (*exit_arb)(struct adf_accel_dev *accel_dev); |
| 162 | void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, | ||
| 163 | const uint32_t **cfg); | ||
| 164 | void (*disable_iov)(struct adf_accel_dev *accel_dev); | ||
| 152 | void (*enable_ints)(struct adf_accel_dev *accel_dev); | 165 | void (*enable_ints)(struct adf_accel_dev *accel_dev); |
| 166 | int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); | ||
| 153 | const char *fw_name; | 167 | const char *fw_name; |
| 154 | uint32_t pci_dev_id; | 168 | const char *fw_mmp_name; |
| 155 | uint32_t fuses; | 169 | uint32_t fuses; |
| 156 | uint32_t accel_capabilities_mask; | 170 | uint32_t accel_capabilities_mask; |
| 157 | uint16_t accel_mask; | 171 | uint16_t accel_mask; |
| @@ -163,6 +177,7 @@ struct adf_hw_device_data { | |||
| 163 | uint8_t num_accel; | 177 | uint8_t num_accel; |
| 164 | uint8_t num_logical_accel; | 178 | uint8_t num_logical_accel; |
| 165 | uint8_t num_engines; | 179 | uint8_t num_engines; |
| 180 | uint8_t min_iov_compat_ver; | ||
| 166 | } __packed; | 181 | } __packed; |
| 167 | 182 | ||
| 168 | /* CSR write macro */ | 183 | /* CSR write macro */ |
| @@ -184,6 +199,16 @@ struct icp_qat_fw_loader_handle; | |||
| 184 | struct adf_fw_loader_data { | 199 | struct adf_fw_loader_data { |
| 185 | struct icp_qat_fw_loader_handle *fw_loader; | 200 | struct icp_qat_fw_loader_handle *fw_loader; |
| 186 | const struct firmware *uof_fw; | 201 | const struct firmware *uof_fw; |
| 202 | const struct firmware *mmp_fw; | ||
| 203 | }; | ||
| 204 | |||
| 205 | struct adf_accel_vf_info { | ||
| 206 | struct adf_accel_dev *accel_dev; | ||
| 207 | struct tasklet_struct vf2pf_bh_tasklet; | ||
| 208 | struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ | ||
| 209 | struct ratelimit_state vf2pf_ratelimit; | ||
| 210 | u32 vf_nr; | ||
| 211 | bool init; | ||
| 187 | }; | 212 | }; |
| 188 | 213 | ||
| 189 | struct adf_accel_dev { | 214 | struct adf_accel_dev { |
| @@ -199,6 +224,21 @@ struct adf_accel_dev { | |||
| 199 | struct list_head list; | 224 | struct list_head list; |
| 200 | struct module *owner; | 225 | struct module *owner; |
| 201 | struct adf_accel_pci accel_pci_dev; | 226 | struct adf_accel_pci accel_pci_dev; |
| 227 | union { | ||
| 228 | struct { | ||
| 229 | /* vf_info is non-zero when SR-IOV is init'ed */ | ||
| 230 | struct adf_accel_vf_info *vf_info; | ||
| 231 | } pf; | ||
| 232 | struct { | ||
| 233 | char *irq_name; | ||
| 234 | struct tasklet_struct pf2vf_bh_tasklet; | ||
| 235 | struct mutex vf2pf_lock; /* protect CSR access */ | ||
| 236 | struct completion iov_msg_completion; | ||
| 237 | uint8_t compatible; | ||
| 238 | uint8_t pf_version; | ||
| 239 | } vf; | ||
| 240 | }; | ||
| 241 | bool is_vf; | ||
| 202 | uint8_t accel_id; | 242 | uint8_t accel_id; |
| 203 | } __packed; | 243 | } __packed; |
| 204 | #endif | 244 | #endif |
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index fdda8e7ae302..20b08bdcb146 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c | |||
| @@ -55,24 +55,36 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev) | |||
| 55 | { | 55 | { |
| 56 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | 56 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; |
| 57 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | 57 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; |
| 58 | void *uof_addr; | 58 | void *uof_addr, *mmp_addr; |
| 59 | uint32_t uof_size; | 59 | u32 uof_size, mmp_size; |
| 60 | 60 | ||
| 61 | if (!hw_device->fw_name) | ||
| 62 | return 0; | ||
| 63 | |||
| 64 | if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name, | ||
| 65 | &accel_dev->accel_pci_dev.pci_dev->dev)) { | ||
| 66 | dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n", | ||
| 67 | hw_device->fw_mmp_name); | ||
| 68 | return -EFAULT; | ||
| 69 | } | ||
| 61 | if (request_firmware(&loader_data->uof_fw, hw_device->fw_name, | 70 | if (request_firmware(&loader_data->uof_fw, hw_device->fw_name, |
| 62 | &accel_dev->accel_pci_dev.pci_dev->dev)) { | 71 | &accel_dev->accel_pci_dev.pci_dev->dev)) { |
| 63 | dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n", | 72 | dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n", |
| 64 | hw_device->fw_name); | 73 | hw_device->fw_name); |
| 65 | return -EFAULT; | 74 | goto out_err; |
| 66 | } | 75 | } |
| 67 | 76 | ||
| 68 | uof_size = loader_data->uof_fw->size; | 77 | uof_size = loader_data->uof_fw->size; |
| 69 | uof_addr = (void *)loader_data->uof_fw->data; | 78 | uof_addr = (void *)loader_data->uof_fw->data; |
| 79 | mmp_size = loader_data->mmp_fw->size; | ||
| 80 | mmp_addr = (void *)loader_data->mmp_fw->data; | ||
| 81 | qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size); | ||
| 70 | if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { | 82 | if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { |
| 71 | dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); | 83 | dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); |
| 72 | goto out_err; | 84 | goto out_err; |
| 73 | } | 85 | } |
| 74 | if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { | 86 | if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { |
| 75 | dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); | 87 | dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n"); |
| 76 | goto out_err; | 88 | goto out_err; |
| 77 | } | 89 | } |
| 78 | return 0; | 90 | return 0; |
| @@ -85,11 +97,17 @@ out_err: | |||
| 85 | void adf_ae_fw_release(struct adf_accel_dev *accel_dev) | 97 | void adf_ae_fw_release(struct adf_accel_dev *accel_dev) |
| 86 | { | 98 | { |
| 87 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | 99 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; |
| 100 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
| 101 | |||
| 102 | if (!hw_device->fw_name) | ||
| 103 | return; | ||
| 88 | 104 | ||
| 89 | qat_uclo_del_uof_obj(loader_data->fw_loader); | 105 | qat_uclo_del_uof_obj(loader_data->fw_loader); |
| 90 | qat_hal_deinit(loader_data->fw_loader); | 106 | qat_hal_deinit(loader_data->fw_loader); |
| 91 | release_firmware(loader_data->uof_fw); | 107 | release_firmware(loader_data->uof_fw); |
| 108 | release_firmware(loader_data->mmp_fw); | ||
| 92 | loader_data->uof_fw = NULL; | 109 | loader_data->uof_fw = NULL; |
| 110 | loader_data->mmp_fw = NULL; | ||
| 93 | loader_data->fw_loader = NULL; | 111 | loader_data->fw_loader = NULL; |
| 94 | } | 112 | } |
| 95 | 113 | ||
| @@ -99,6 +117,9 @@ int adf_ae_start(struct adf_accel_dev *accel_dev) | |||
| 99 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | 117 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
| 100 | uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); | 118 | uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); |
| 101 | 119 | ||
| 120 | if (!hw_data->fw_name) | ||
| 121 | return 0; | ||
| 122 | |||
| 102 | for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { | 123 | for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { |
| 103 | if (hw_data->ae_mask & (1 << ae)) { | 124 | if (hw_data->ae_mask & (1 << ae)) { |
| 104 | qat_hal_start(loader_data->fw_loader, ae, 0xFF); | 125 | qat_hal_start(loader_data->fw_loader, ae, 0xFF); |
| @@ -117,6 +138,9 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev) | |||
| 117 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | 138 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
| 118 | uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); | 139 | uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); |
| 119 | 140 | ||
| 141 | if (!hw_data->fw_name) | ||
| 142 | return 0; | ||
| 143 | |||
| 120 | for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { | 144 | for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { |
| 121 | if (hw_data->ae_mask & (1 << ae)) { | 145 | if (hw_data->ae_mask & (1 << ae)) { |
| 122 | qat_hal_stop(loader_data->fw_loader, ae, 0xFF); | 146 | qat_hal_stop(loader_data->fw_loader, ae, 0xFF); |
| @@ -143,6 +167,10 @@ static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae) | |||
| 143 | int adf_ae_init(struct adf_accel_dev *accel_dev) | 167 | int adf_ae_init(struct adf_accel_dev *accel_dev) |
| 144 | { | 168 | { |
| 145 | struct adf_fw_loader_data *loader_data; | 169 | struct adf_fw_loader_data *loader_data; |
| 170 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
| 171 | |||
| 172 | if (!hw_device->fw_name) | ||
| 173 | return 0; | ||
| 146 | 174 | ||
| 147 | loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL); | 175 | loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL); |
| 148 | if (!loader_data) | 176 | if (!loader_data) |
| @@ -166,6 +194,10 @@ int adf_ae_init(struct adf_accel_dev *accel_dev) | |||
| 166 | int adf_ae_shutdown(struct adf_accel_dev *accel_dev) | 194 | int adf_ae_shutdown(struct adf_accel_dev *accel_dev) |
| 167 | { | 195 | { |
| 168 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; | 196 | struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; |
| 197 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
| 198 | |||
| 199 | if (!hw_device->fw_name) | ||
| 200 | return 0; | ||
| 169 | 201 | ||
| 170 | qat_hal_deinit(loader_data->fw_loader); | 202 | qat_hal_deinit(loader_data->fw_loader); |
| 171 | kfree(accel_dev->fw_loader); | 203 | kfree(accel_dev->fw_loader); |
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c new file mode 100644 index 000000000000..147d755fed97 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_admin.c | |||
| @@ -0,0 +1,290 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2014 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2014 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | #include <linux/types.h> | ||
| 48 | #include <linux/mutex.h> | ||
| 49 | #include <linux/slab.h> | ||
| 50 | #include <linux/delay.h> | ||
| 51 | #include <linux/pci.h> | ||
| 52 | #include <linux/dma-mapping.h> | ||
| 53 | #include "adf_accel_devices.h" | ||
| 54 | #include "icp_qat_fw_init_admin.h" | ||
| 55 | |||
| 56 | /* Admin Messages Registers */ | ||
| 57 | #define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574) | ||
| 58 | #define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578) | ||
| 59 | #define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 | ||
| 60 | #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 | ||
| 61 | #define ADF_ADMINMSG_LEN 32 | ||
| 62 | |||
| 63 | static const u8 const_tab[1024] = { | ||
| 64 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 65 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 66 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 67 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 68 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 69 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 70 | 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, | ||
| 71 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 72 | 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, | ||
| 73 | 0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, | ||
| 74 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 75 | 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00, | ||
| 76 | 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, | ||
| 77 | 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 78 | 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, | ||
| 79 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 80 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 81 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 82 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 83 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 84 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 85 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 86 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, | ||
| 87 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 88 | 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, | ||
| 89 | 0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 90 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, | ||
| 91 | 0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0, | ||
| 92 | 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 93 | 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 94 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e, | ||
| 95 | 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39, | ||
| 96 | 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, | ||
| 97 | 0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 98 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, | ||
| 99 | 0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, | ||
| 100 | 0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05, | ||
| 101 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 102 | 0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, | ||
| 103 | 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17, | ||
| 104 | 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff, | ||
| 105 | 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, | ||
| 106 | 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, | ||
| 107 | 0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 108 | 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, | ||
| 109 | 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, | ||
| 110 | 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52, | ||
| 111 | 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, | ||
| 112 | 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, | ||
| 113 | 0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 114 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 115 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 116 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 117 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 118 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 119 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 120 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 121 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 122 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 123 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 124 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 125 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 126 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 127 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 128 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 129 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 130 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 131 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 132 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 133 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 134 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 135 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 136 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 137 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 138 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 139 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 140 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 141 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
| 142 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; | ||
| 143 | |||
| 144 | struct adf_admin_comms { | ||
| 145 | dma_addr_t phy_addr; | ||
| 146 | dma_addr_t const_tbl_addr; | ||
| 147 | void *virt_addr; | ||
| 148 | void __iomem *mailbox_addr; | ||
| 149 | struct mutex lock; /* protects adf_admin_comms struct */ | ||
| 150 | }; | ||
| 151 | |||
| 152 | static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, | ||
| 153 | void *in, void *out) | ||
| 154 | { | ||
| 155 | struct adf_admin_comms *admin = accel_dev->admin; | ||
| 156 | int offset = ae * ADF_ADMINMSG_LEN * 2; | ||
| 157 | void __iomem *mailbox = admin->mailbox_addr; | ||
| 158 | int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; | ||
| 159 | int times, received; | ||
| 160 | |||
| 161 | mutex_lock(&admin->lock); | ||
| 162 | |||
| 163 | if (ADF_CSR_RD(mailbox, mb_offset) == 1) { | ||
| 164 | mutex_unlock(&admin->lock); | ||
| 165 | return -EAGAIN; | ||
| 166 | } | ||
| 167 | |||
| 168 | memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); | ||
| 169 | ADF_CSR_WR(mailbox, mb_offset, 1); | ||
| 170 | received = 0; | ||
| 171 | for (times = 0; times < 50; times++) { | ||
| 172 | msleep(20); | ||
| 173 | if (ADF_CSR_RD(mailbox, mb_offset) == 0) { | ||
| 174 | received = 1; | ||
| 175 | break; | ||
| 176 | } | ||
| 177 | } | ||
| 178 | if (received) | ||
| 179 | memcpy(out, admin->virt_addr + offset + | ||
| 180 | ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); | ||
| 181 | else | ||
| 182 | dev_err(&GET_DEV(accel_dev), | ||
| 183 | "Failed to send admin msg to accelerator\n"); | ||
| 184 | |||
| 185 | mutex_unlock(&admin->lock); | ||
| 186 | return received ? 0 : -EFAULT; | ||
| 187 | } | ||
| 188 | |||
| 189 | static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) | ||
| 190 | { | ||
| 191 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | ||
| 192 | struct icp_qat_fw_init_admin_req req; | ||
| 193 | struct icp_qat_fw_init_admin_resp resp; | ||
| 194 | int i; | ||
| 195 | |||
| 196 | memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); | ||
| 197 | req.init_admin_cmd_id = cmd; | ||
| 198 | |||
| 199 | if (cmd == ICP_QAT_FW_CONSTANTS_CFG) { | ||
| 200 | req.init_cfg_sz = 1024; | ||
| 201 | req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; | ||
| 202 | } | ||
| 203 | for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { | ||
| 204 | memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); | ||
| 205 | if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || | ||
| 206 | resp.init_resp_hdr.status) | ||
| 207 | return -EFAULT; | ||
| 208 | } | ||
| 209 | return 0; | ||
| 210 | } | ||
| 211 | |||
| 212 | /** | ||
| 213 | * adf_send_admin_init() - Function sends init message to FW | ||
| 214 | * @accel_dev: Pointer to acceleration device. | ||
| 215 | * | ||
| 216 | * Function sends admin init message to the FW | ||
| 217 | * | ||
| 218 | * Return: 0 on success, error code otherwise. | ||
| 219 | */ | ||
| 220 | int adf_send_admin_init(struct adf_accel_dev *accel_dev) | ||
| 221 | { | ||
| 222 | int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); | ||
| 223 | |||
| 224 | if (ret) | ||
| 225 | return ret; | ||
| 226 | return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG); | ||
| 227 | } | ||
| 228 | EXPORT_SYMBOL_GPL(adf_send_admin_init); | ||
| 229 | |||
| 230 | int adf_init_admin_comms(struct adf_accel_dev *accel_dev) | ||
| 231 | { | ||
| 232 | struct adf_admin_comms *admin; | ||
| 233 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 234 | struct adf_bar *pmisc = | ||
| 235 | &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; | ||
| 236 | void __iomem *csr = pmisc->virt_addr; | ||
| 237 | void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; | ||
| 238 | u64 reg_val; | ||
| 239 | |||
| 240 | admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, | ||
| 241 | dev_to_node(&GET_DEV(accel_dev))); | ||
| 242 | if (!admin) | ||
| 243 | return -ENOMEM; | ||
| 244 | admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||
| 245 | &admin->phy_addr, GFP_KERNEL); | ||
| 246 | if (!admin->virt_addr) { | ||
| 247 | dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); | ||
| 248 | kfree(admin); | ||
| 249 | return -ENOMEM; | ||
| 250 | } | ||
| 251 | |||
| 252 | admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev), | ||
| 253 | (void *) const_tab, 1024, | ||
| 254 | DMA_TO_DEVICE); | ||
| 255 | |||
| 256 | if (unlikely(dma_mapping_error(&GET_DEV(accel_dev), | ||
| 257 | admin->const_tbl_addr))) { | ||
| 258 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||
| 259 | admin->virt_addr, admin->phy_addr); | ||
| 260 | kfree(admin); | ||
| 261 | return -ENOMEM; | ||
| 262 | } | ||
| 263 | reg_val = (u64)admin->phy_addr; | ||
| 264 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); | ||
| 265 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); | ||
| 266 | mutex_init(&admin->lock); | ||
| 267 | admin->mailbox_addr = mailbox; | ||
| 268 | accel_dev->admin = admin; | ||
| 269 | return 0; | ||
| 270 | } | ||
| 271 | EXPORT_SYMBOL_GPL(adf_init_admin_comms); | ||
| 272 | |||
| 273 | void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) | ||
| 274 | { | ||
| 275 | struct adf_admin_comms *admin = accel_dev->admin; | ||
| 276 | |||
| 277 | if (!admin) | ||
| 278 | return; | ||
| 279 | |||
| 280 | if (admin->virt_addr) | ||
| 281 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||
| 282 | admin->virt_addr, admin->phy_addr); | ||
| 283 | |||
| 284 | dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024, | ||
| 285 | DMA_TO_DEVICE); | ||
| 286 | mutex_destroy(&admin->lock); | ||
| 287 | kfree(admin); | ||
| 288 | accel_dev->admin = NULL; | ||
| 289 | } | ||
| 290 | EXPORT_SYMBOL_GPL(adf_exit_admin_comms); | ||
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 2dbc733b8ab2..a57b4194de28 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c | |||
| @@ -91,6 +91,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev) | |||
| 91 | dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", | 91 | dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", |
| 92 | accel_dev->accel_id); | 92 | accel_dev->accel_id); |
| 93 | 93 | ||
| 94 | if (!parent) | ||
| 95 | parent = pdev; | ||
| 96 | |||
| 94 | if (!pci_wait_for_pending_transaction(pdev)) | 97 | if (!pci_wait_for_pending_transaction(pdev)) |
| 95 | dev_info(&GET_DEV(accel_dev), | 98 | dev_info(&GET_DEV(accel_dev), |
| 96 | "Transaction still in progress. Proceeding\n"); | 99 | "Transaction still in progress. Proceeding\n"); |
| @@ -206,7 +209,7 @@ static struct pci_error_handlers adf_err_handler = { | |||
| 206 | * QAT acceleration device accel_dev. | 209 | * QAT acceleration device accel_dev. |
| 207 | * To be used by QAT device specific drivers. | 210 | * To be used by QAT device specific drivers. |
| 208 | * | 211 | * |
| 209 | * Return: 0 on success, error code othewise. | 212 | * Return: 0 on success, error code otherwise. |
| 210 | */ | 213 | */ |
| 211 | int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf) | 214 | int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf) |
| 212 | { | 215 | { |
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index ab65bc274561..d0879790561f 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c | |||
| @@ -123,7 +123,7 @@ static const struct file_operations qat_dev_cfg_fops = { | |||
| 123 | * The table stores device specific config values. | 123 | * The table stores device specific config values. |
| 124 | * To be used by QAT device specific drivers. | 124 | * To be used by QAT device specific drivers. |
| 125 | * | 125 | * |
| 126 | * Return: 0 on success, error code othewise. | 126 | * Return: 0 on success, error code otherwise. |
| 127 | */ | 127 | */ |
| 128 | int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) | 128 | int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) |
| 129 | { | 129 | { |
| @@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev) | |||
| 178 | { | 178 | { |
| 179 | struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; | 179 | struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; |
| 180 | 180 | ||
| 181 | if (!dev_cfg_data) | ||
| 182 | return; | ||
| 183 | |||
| 181 | down_write(&dev_cfg_data->lock); | 184 | down_write(&dev_cfg_data->lock); |
| 182 | adf_cfg_section_del_all(&dev_cfg_data->sec_list); | 185 | adf_cfg_section_del_all(&dev_cfg_data->sec_list); |
| 183 | up_write(&dev_cfg_data->lock); | 186 | up_write(&dev_cfg_data->lock); |
| @@ -276,7 +279,7 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev, | |||
| 276 | * in the given acceleration device | 279 | * in the given acceleration device |
| 277 | * To be used by QAT device specific drivers. | 280 | * To be used by QAT device specific drivers. |
| 278 | * | 281 | * |
| 279 | * Return: 0 on success, error code othewise. | 282 | * Return: 0 on success, error code otherwise. |
| 280 | */ | 283 | */ |
| 281 | int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, | 284 | int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, |
| 282 | const char *section_name, | 285 | const char *section_name, |
| @@ -327,7 +330,7 @@ EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param); | |||
| 327 | * will be stored. | 330 | * will be stored. |
| 328 | * To be used by QAT device specific drivers. | 331 | * To be used by QAT device specific drivers. |
| 329 | * | 332 | * |
| 330 | * Return: 0 on success, error code othewise. | 333 | * Return: 0 on success, error code otherwise. |
| 331 | */ | 334 | */ |
| 332 | int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name) | 335 | int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name) |
| 333 | { | 336 | { |
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index 88b82187ac35..c697fb1cdfb5 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h | |||
| @@ -60,7 +60,7 @@ | |||
| 60 | #define ADF_CFG_NO_DEVICE 0xFF | 60 | #define ADF_CFG_NO_DEVICE 0xFF |
| 61 | #define ADF_CFG_AFFINITY_WHATEVER 0xFF | 61 | #define ADF_CFG_AFFINITY_WHATEVER 0xFF |
| 62 | #define MAX_DEVICE_NAME_SIZE 32 | 62 | #define MAX_DEVICE_NAME_SIZE 32 |
| 63 | #define ADF_MAX_DEVICES 32 | 63 | #define ADF_MAX_DEVICES (32 * 32) |
| 64 | 64 | ||
| 65 | enum adf_cfg_val_type { | 65 | enum adf_cfg_val_type { |
| 66 | ADF_DEC, | 66 | ADF_DEC, |
| @@ -71,6 +71,7 @@ enum adf_cfg_val_type { | |||
| 71 | enum adf_device_type { | 71 | enum adf_device_type { |
| 72 | DEV_UNKNOWN = 0, | 72 | DEV_UNKNOWN = 0, |
| 73 | DEV_DH895XCC, | 73 | DEV_DH895XCC, |
| 74 | DEV_DH895XCCVF, | ||
| 74 | }; | 75 | }; |
| 75 | 76 | ||
| 76 | struct adf_dev_status_info { | 77 | struct adf_dev_status_info { |
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 27e16c09230b..7836dffc3d47 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h | |||
| @@ -54,8 +54,8 @@ | |||
| 54 | #include "icp_qat_hal.h" | 54 | #include "icp_qat_hal.h" |
| 55 | 55 | ||
| 56 | #define ADF_MAJOR_VERSION 0 | 56 | #define ADF_MAJOR_VERSION 0 |
| 57 | #define ADF_MINOR_VERSION 1 | 57 | #define ADF_MINOR_VERSION 2 |
| 58 | #define ADF_BUILD_VERSION 3 | 58 | #define ADF_BUILD_VERSION 0 |
| 59 | #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ | 59 | #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ |
| 60 | __stringify(ADF_MINOR_VERSION) "." \ | 60 | __stringify(ADF_MINOR_VERSION) "." \ |
| 61 | __stringify(ADF_BUILD_VERSION) | 61 | __stringify(ADF_BUILD_VERSION) |
| @@ -91,9 +91,13 @@ struct service_hndl { | |||
| 91 | unsigned long start_status; | 91 | unsigned long start_status; |
| 92 | char *name; | 92 | char *name; |
| 93 | struct list_head list; | 93 | struct list_head list; |
| 94 | int admin; | ||
| 95 | }; | 94 | }; |
| 96 | 95 | ||
| 96 | static inline int get_current_node(void) | ||
| 97 | { | ||
| 98 | return topology_physical_package_id(smp_processor_id()); | ||
| 99 | } | ||
| 100 | |||
| 97 | int adf_service_register(struct service_hndl *service); | 101 | int adf_service_register(struct service_hndl *service); |
| 98 | int adf_service_unregister(struct service_hndl *service); | 102 | int adf_service_unregister(struct service_hndl *service); |
| 99 | 103 | ||
| @@ -102,13 +106,24 @@ int adf_dev_start(struct adf_accel_dev *accel_dev); | |||
| 102 | int adf_dev_stop(struct adf_accel_dev *accel_dev); | 106 | int adf_dev_stop(struct adf_accel_dev *accel_dev); |
| 103 | void adf_dev_shutdown(struct adf_accel_dev *accel_dev); | 107 | void adf_dev_shutdown(struct adf_accel_dev *accel_dev); |
| 104 | 108 | ||
| 109 | void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); | ||
| 110 | void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); | ||
| 111 | int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); | ||
| 112 | void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); | ||
| 113 | int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev); | ||
| 114 | void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info); | ||
| 115 | void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); | ||
| 116 | void adf_clean_vf_map(bool); | ||
| 117 | |||
| 105 | int adf_ctl_dev_register(void); | 118 | int adf_ctl_dev_register(void); |
| 106 | void adf_ctl_dev_unregister(void); | 119 | void adf_ctl_dev_unregister(void); |
| 107 | int adf_processes_dev_register(void); | 120 | int adf_processes_dev_register(void); |
| 108 | void adf_processes_dev_unregister(void); | 121 | void adf_processes_dev_unregister(void); |
| 109 | 122 | ||
| 110 | int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev); | 123 | int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, |
| 111 | void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev); | 124 | struct adf_accel_dev *pf); |
| 125 | void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, | ||
| 126 | struct adf_accel_dev *pf); | ||
| 112 | struct list_head *adf_devmgr_get_head(void); | 127 | struct list_head *adf_devmgr_get_head(void); |
| 113 | struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); | 128 | struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); |
| 114 | struct adf_accel_dev *adf_devmgr_get_first(void); | 129 | struct adf_accel_dev *adf_devmgr_get_first(void); |
| @@ -130,6 +145,12 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); | |||
| 130 | void adf_disable_aer(struct adf_accel_dev *accel_dev); | 145 | void adf_disable_aer(struct adf_accel_dev *accel_dev); |
| 131 | int adf_init_aer(void); | 146 | int adf_init_aer(void); |
| 132 | void adf_exit_aer(void); | 147 | void adf_exit_aer(void); |
| 148 | int adf_init_admin_comms(struct adf_accel_dev *accel_dev); | ||
| 149 | void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); | ||
| 150 | int adf_send_admin_init(struct adf_accel_dev *accel_dev); | ||
| 151 | int adf_init_arb(struct adf_accel_dev *accel_dev); | ||
| 152 | void adf_exit_arb(struct adf_accel_dev *accel_dev); | ||
| 153 | void adf_update_ring_arb(struct adf_etr_ring_data *ring); | ||
| 133 | 154 | ||
| 134 | int adf_dev_get(struct adf_accel_dev *accel_dev); | 155 | int adf_dev_get(struct adf_accel_dev *accel_dev); |
| 135 | void adf_dev_put(struct adf_accel_dev *accel_dev); | 156 | void adf_dev_put(struct adf_accel_dev *accel_dev); |
| @@ -141,10 +162,13 @@ int qat_crypto_unregister(void); | |||
| 141 | struct qat_crypto_instance *qat_crypto_get_instance_node(int node); | 162 | struct qat_crypto_instance *qat_crypto_get_instance_node(int node); |
| 142 | void qat_crypto_put_instance(struct qat_crypto_instance *inst); | 163 | void qat_crypto_put_instance(struct qat_crypto_instance *inst); |
| 143 | void qat_alg_callback(void *resp); | 164 | void qat_alg_callback(void *resp); |
| 165 | void qat_alg_asym_callback(void *resp); | ||
| 144 | int qat_algs_init(void); | 166 | int qat_algs_init(void); |
| 145 | void qat_algs_exit(void); | 167 | void qat_algs_exit(void); |
| 146 | int qat_algs_register(void); | 168 | int qat_algs_register(void); |
| 147 | int qat_algs_unregister(void); | 169 | int qat_algs_unregister(void); |
| 170 | int qat_asym_algs_register(void); | ||
| 171 | void qat_asym_algs_unregister(void); | ||
| 148 | 172 | ||
| 149 | int qat_hal_init(struct adf_accel_dev *accel_dev); | 173 | int qat_hal_init(struct adf_accel_dev *accel_dev); |
| 150 | void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); | 174 | void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); |
| @@ -196,4 +220,23 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); | |||
| 196 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); | 220 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); |
| 197 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | 221 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, |
| 198 | void *addr_ptr, int mem_size); | 222 | void *addr_ptr, int mem_size); |
| 223 | void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, | ||
| 224 | void *addr_ptr, int mem_size); | ||
| 225 | #if defined(CONFIG_PCI_IOV) | ||
| 226 | int adf_sriov_configure(struct pci_dev *pdev, int numvfs); | ||
| 227 | void adf_disable_sriov(struct adf_accel_dev *accel_dev); | ||
| 228 | void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, | ||
| 229 | uint32_t vf_mask); | ||
| 230 | void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, | ||
| 231 | uint32_t vf_mask); | ||
| 232 | #else | ||
| 233 | static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) | ||
| 234 | { | ||
| 235 | return 0; | ||
| 236 | } | ||
| 237 | |||
| 238 | static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) | ||
| 239 | { | ||
| 240 | } | ||
| 241 | #endif | ||
| 199 | #endif | 242 | #endif |
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index e056b9e9bf8a..cd8a12af8ec5 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c | |||
| @@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, | |||
| 398 | } | 398 | } |
| 399 | 399 | ||
| 400 | accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); | 400 | accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); |
| 401 | if (!accel_dev) { | 401 | if (!accel_dev) |
| 402 | pr_err("QAT: Device %d not found\n", dev_info.accel_id); | ||
| 403 | return -ENODEV; | 402 | return -ENODEV; |
| 404 | } | 403 | |
| 405 | hw_data = accel_dev->hw_device; | 404 | hw_data = accel_dev->hw_device; |
| 406 | dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; | 405 | dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; |
| 407 | dev_info.num_ae = hw_data->get_num_aes(hw_data); | 406 | dev_info.num_ae = hw_data->get_num_aes(hw_data); |
| @@ -495,6 +494,7 @@ static void __exit adf_unregister_ctl_device_driver(void) | |||
| 495 | adf_exit_aer(); | 494 | adf_exit_aer(); |
| 496 | qat_crypto_unregister(); | 495 | qat_crypto_unregister(); |
| 497 | qat_algs_exit(); | 496 | qat_algs_exit(); |
| 497 | adf_clean_vf_map(false); | ||
| 498 | mutex_destroy(&adf_ctl_lock); | 498 | mutex_destroy(&adf_ctl_lock); |
| 499 | } | 499 | } |
| 500 | 500 | ||
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c index 3f0ff9e7d840..8dfdb8f90797 100644 --- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c | |||
| @@ -50,21 +50,125 @@ | |||
| 50 | #include "adf_common_drv.h" | 50 | #include "adf_common_drv.h" |
| 51 | 51 | ||
| 52 | static LIST_HEAD(accel_table); | 52 | static LIST_HEAD(accel_table); |
| 53 | static LIST_HEAD(vfs_table); | ||
| 53 | static DEFINE_MUTEX(table_lock); | 54 | static DEFINE_MUTEX(table_lock); |
| 54 | static uint32_t num_devices; | 55 | static uint32_t num_devices; |
| 55 | 56 | ||
| 57 | struct vf_id_map { | ||
| 58 | u32 bdf; | ||
| 59 | u32 id; | ||
| 60 | u32 fake_id; | ||
| 61 | bool attached; | ||
| 62 | struct list_head list; | ||
| 63 | }; | ||
| 64 | |||
| 65 | static int adf_get_vf_id(struct adf_accel_dev *vf) | ||
| 66 | { | ||
| 67 | return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) + | ||
| 68 | PCI_FUNC(accel_to_pci_dev(vf)->devfn) + | ||
| 69 | (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)); | ||
| 70 | } | ||
| 71 | |||
| 72 | static int adf_get_vf_num(struct adf_accel_dev *vf) | ||
| 73 | { | ||
| 74 | return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf); | ||
| 75 | } | ||
| 76 | |||
| 77 | static struct vf_id_map *adf_find_vf(u32 bdf) | ||
| 78 | { | ||
| 79 | struct list_head *itr; | ||
| 80 | |||
| 81 | list_for_each(itr, &vfs_table) { | ||
| 82 | struct vf_id_map *ptr = | ||
| 83 | list_entry(itr, struct vf_id_map, list); | ||
| 84 | |||
| 85 | if (ptr->bdf == bdf) | ||
| 86 | return ptr; | ||
| 87 | } | ||
| 88 | return NULL; | ||
| 89 | } | ||
| 90 | |||
| 91 | static int adf_get_vf_real_id(u32 fake) | ||
| 92 | { | ||
| 93 | struct list_head *itr; | ||
| 94 | |||
| 95 | list_for_each(itr, &vfs_table) { | ||
| 96 | struct vf_id_map *ptr = | ||
| 97 | list_entry(itr, struct vf_id_map, list); | ||
| 98 | if (ptr->fake_id == fake) | ||
| 99 | return ptr->id; | ||
| 100 | } | ||
| 101 | return -1; | ||
| 102 | } | ||
| 103 | |||
| 104 | /** | ||
| 105 | * adf_clean_vf_map() - Cleans VF id mapings | ||
| 106 | * | ||
| 107 | * Function cleans internal ids for virtual functions. | ||
| 108 | * @vf: flag indicating whether mappings is cleaned | ||
| 109 | * for vfs only or for vfs and pfs | ||
| 110 | */ | ||
| 111 | void adf_clean_vf_map(bool vf) | ||
| 112 | { | ||
| 113 | struct vf_id_map *map; | ||
| 114 | struct list_head *ptr, *tmp; | ||
| 115 | |||
| 116 | mutex_lock(&table_lock); | ||
| 117 | list_for_each_safe(ptr, tmp, &vfs_table) { | ||
| 118 | map = list_entry(ptr, struct vf_id_map, list); | ||
| 119 | if (map->bdf != -1) | ||
| 120 | num_devices--; | ||
| 121 | |||
| 122 | if (vf && map->bdf == -1) | ||
| 123 | continue; | ||
| 124 | |||
| 125 | list_del(ptr); | ||
| 126 | kfree(map); | ||
| 127 | } | ||
| 128 | mutex_unlock(&table_lock); | ||
| 129 | } | ||
| 130 | EXPORT_SYMBOL_GPL(adf_clean_vf_map); | ||
| 131 | |||
| 132 | /** | ||
| 133 | * adf_devmgr_update_class_index() - Update internal index | ||
| 134 | * @hw_data: Pointer to internal device data. | ||
| 135 | * | ||
| 136 | * Function updates internal dev index for VFs | ||
| 137 | */ | ||
| 138 | void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data) | ||
| 139 | { | ||
| 140 | struct adf_hw_device_class *class = hw_data->dev_class; | ||
| 141 | struct list_head *itr; | ||
| 142 | int i = 0; | ||
| 143 | |||
| 144 | list_for_each(itr, &accel_table) { | ||
| 145 | struct adf_accel_dev *ptr = | ||
| 146 | list_entry(itr, struct adf_accel_dev, list); | ||
| 147 | |||
| 148 | if (ptr->hw_device->dev_class == class) | ||
| 149 | ptr->hw_device->instance_id = i++; | ||
| 150 | |||
| 151 | if (i == class->instances) | ||
| 152 | break; | ||
| 153 | } | ||
| 154 | } | ||
| 155 | EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index); | ||
| 156 | |||
| 56 | /** | 157 | /** |
| 57 | * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework | 158 | * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework |
| 58 | * @accel_dev: Pointer to acceleration device. | 159 | * @accel_dev: Pointer to acceleration device. |
| 160 | * @pf: Corresponding PF if the accel_dev is a VF | ||
| 59 | * | 161 | * |
| 60 | * Function adds acceleration device to the acceleration framework. | 162 | * Function adds acceleration device to the acceleration framework. |
| 61 | * To be used by QAT device specific drivers. | 163 | * To be used by QAT device specific drivers. |
| 62 | * | 164 | * |
| 63 | * Return: 0 on success, error code othewise. | 165 | * Return: 0 on success, error code otherwise. |
| 64 | */ | 166 | */ |
| 65 | int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) | 167 | int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, |
| 168 | struct adf_accel_dev *pf) | ||
| 66 | { | 169 | { |
| 67 | struct list_head *itr; | 170 | struct list_head *itr; |
| 171 | int ret = 0; | ||
| 68 | 172 | ||
| 69 | if (num_devices == ADF_MAX_DEVICES) { | 173 | if (num_devices == ADF_MAX_DEVICES) { |
| 70 | dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n", | 174 | dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n", |
| @@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) | |||
| 73 | } | 177 | } |
| 74 | 178 | ||
| 75 | mutex_lock(&table_lock); | 179 | mutex_lock(&table_lock); |
| 76 | list_for_each(itr, &accel_table) { | 180 | atomic_set(&accel_dev->ref_count, 0); |
| 77 | struct adf_accel_dev *ptr = | 181 | |
| 182 | /* PF on host or VF on guest */ | ||
| 183 | if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { | ||
| 184 | struct vf_id_map *map; | ||
| 185 | |||
| 186 | list_for_each(itr, &accel_table) { | ||
| 187 | struct adf_accel_dev *ptr = | ||
| 78 | list_entry(itr, struct adf_accel_dev, list); | 188 | list_entry(itr, struct adf_accel_dev, list); |
| 79 | 189 | ||
| 80 | if (ptr == accel_dev) { | 190 | if (ptr == accel_dev) { |
| 81 | mutex_unlock(&table_lock); | 191 | ret = -EEXIST; |
| 82 | return -EEXIST; | 192 | goto unlock; |
| 193 | } | ||
| 83 | } | 194 | } |
| 195 | |||
| 196 | list_add_tail(&accel_dev->list, &accel_table); | ||
| 197 | accel_dev->accel_id = num_devices++; | ||
| 198 | |||
| 199 | map = kzalloc(sizeof(*map), GFP_KERNEL); | ||
| 200 | if (!map) { | ||
| 201 | ret = -ENOMEM; | ||
| 202 | goto unlock; | ||
| 203 | } | ||
| 204 | map->bdf = ~0; | ||
| 205 | map->id = accel_dev->accel_id; | ||
| 206 | map->fake_id = map->id; | ||
| 207 | map->attached = true; | ||
| 208 | list_add_tail(&map->list, &vfs_table); | ||
| 209 | } else if (accel_dev->is_vf && pf) { | ||
| 210 | /* VF on host */ | ||
| 211 | struct adf_accel_vf_info *vf_info; | ||
| 212 | struct vf_id_map *map; | ||
| 213 | |||
| 214 | vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev); | ||
| 215 | |||
| 216 | map = adf_find_vf(adf_get_vf_num(accel_dev)); | ||
| 217 | if (map) { | ||
| 218 | struct vf_id_map *next; | ||
| 219 | |||
| 220 | accel_dev->accel_id = map->id; | ||
| 221 | list_add_tail(&accel_dev->list, &accel_table); | ||
| 222 | map->fake_id++; | ||
| 223 | map->attached = true; | ||
| 224 | next = list_next_entry(map, list); | ||
| 225 | while (next && &next->list != &vfs_table) { | ||
| 226 | next->fake_id++; | ||
| 227 | next = list_next_entry(next, list); | ||
| 228 | } | ||
| 229 | |||
| 230 | ret = 0; | ||
| 231 | goto unlock; | ||
| 232 | } | ||
| 233 | |||
| 234 | map = kzalloc(sizeof(*map), GFP_KERNEL); | ||
| 235 | if (!map) { | ||
| 236 | ret = -ENOMEM; | ||
| 237 | goto unlock; | ||
| 238 | } | ||
| 239 | |||
| 240 | accel_dev->accel_id = num_devices++; | ||
| 241 | list_add_tail(&accel_dev->list, &accel_table); | ||
| 242 | map->bdf = adf_get_vf_num(accel_dev); | ||
| 243 | map->id = accel_dev->accel_id; | ||
| 244 | map->fake_id = map->id; | ||
| 245 | map->attached = true; | ||
| 246 | list_add_tail(&map->list, &vfs_table); | ||
| 84 | } | 247 | } |
| 85 | atomic_set(&accel_dev->ref_count, 0); | 248 | unlock: |
| 86 | list_add_tail(&accel_dev->list, &accel_table); | ||
| 87 | accel_dev->accel_id = num_devices++; | ||
| 88 | mutex_unlock(&table_lock); | 249 | mutex_unlock(&table_lock); |
| 89 | return 0; | 250 | return ret; |
| 90 | } | 251 | } |
| 91 | EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); | 252 | EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); |
| 92 | 253 | ||
| @@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void) | |||
| 98 | /** | 259 | /** |
| 99 | * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. | 260 | * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. |
| 100 | * @accel_dev: Pointer to acceleration device. | 261 | * @accel_dev: Pointer to acceleration device. |
| 262 | * @pf: Corresponding PF if the accel_dev is a VF | ||
| 101 | * | 263 | * |
| 102 | * Function removes acceleration device from the acceleration framework. | 264 | * Function removes acceleration device from the acceleration framework. |
| 103 | * To be used by QAT device specific drivers. | 265 | * To be used by QAT device specific drivers. |
| 104 | * | 266 | * |
| 105 | * Return: void | 267 | * Return: void |
| 106 | */ | 268 | */ |
| 107 | void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev) | 269 | void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, |
| 270 | struct adf_accel_dev *pf) | ||
| 108 | { | 271 | { |
| 109 | mutex_lock(&table_lock); | 272 | mutex_lock(&table_lock); |
| 273 | if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { | ||
| 274 | num_devices--; | ||
| 275 | } else if (accel_dev->is_vf && pf) { | ||
| 276 | struct vf_id_map *map, *next; | ||
| 277 | |||
| 278 | map = adf_find_vf(adf_get_vf_num(accel_dev)); | ||
| 279 | if (!map) { | ||
| 280 | dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n"); | ||
| 281 | goto unlock; | ||
| 282 | } | ||
| 283 | map->fake_id--; | ||
| 284 | map->attached = false; | ||
| 285 | next = list_next_entry(map, list); | ||
| 286 | while (next && &next->list != &vfs_table) { | ||
| 287 | next->fake_id--; | ||
| 288 | next = list_next_entry(next, list); | ||
| 289 | } | ||
| 290 | } | ||
| 291 | unlock: | ||
| 110 | list_del(&accel_dev->list); | 292 | list_del(&accel_dev->list); |
| 111 | num_devices--; | ||
| 112 | mutex_unlock(&table_lock); | 293 | mutex_unlock(&table_lock); |
| 113 | } | 294 | } |
| 114 | EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); | 295 | EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); |
| @@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); | |||
| 154 | struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) | 335 | struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) |
| 155 | { | 336 | { |
| 156 | struct list_head *itr; | 337 | struct list_head *itr; |
| 338 | int real_id; | ||
| 157 | 339 | ||
| 158 | mutex_lock(&table_lock); | 340 | mutex_lock(&table_lock); |
| 341 | real_id = adf_get_vf_real_id(id); | ||
| 342 | if (real_id < 0) | ||
| 343 | goto unlock; | ||
| 344 | |||
| 345 | id = real_id; | ||
| 346 | |||
| 159 | list_for_each(itr, &accel_table) { | 347 | list_for_each(itr, &accel_table) { |
| 160 | struct adf_accel_dev *ptr = | 348 | struct adf_accel_dev *ptr = |
| 161 | list_entry(itr, struct adf_accel_dev, list); | 349 | list_entry(itr, struct adf_accel_dev, list); |
| 162 | |||
| 163 | if (ptr->accel_id == id) { | 350 | if (ptr->accel_id == id) { |
| 164 | mutex_unlock(&table_lock); | 351 | mutex_unlock(&table_lock); |
| 165 | return ptr; | 352 | return ptr; |
| 166 | } | 353 | } |
| 167 | } | 354 | } |
| 355 | unlock: | ||
| 168 | mutex_unlock(&table_lock); | 356 | mutex_unlock(&table_lock); |
| 169 | return NULL; | 357 | return NULL; |
| 170 | } | 358 | } |
| @@ -180,21 +368,52 @@ int adf_devmgr_verify_id(uint32_t id) | |||
| 180 | return -ENODEV; | 368 | return -ENODEV; |
| 181 | } | 369 | } |
| 182 | 370 | ||
| 183 | void adf_devmgr_get_num_dev(uint32_t *num) | 371 | static int adf_get_num_dettached_vfs(void) |
| 184 | { | 372 | { |
| 185 | struct list_head *itr; | 373 | struct list_head *itr; |
| 374 | int vfs = 0; | ||
| 186 | 375 | ||
| 187 | *num = 0; | 376 | mutex_lock(&table_lock); |
| 188 | list_for_each(itr, &accel_table) { | 377 | list_for_each(itr, &vfs_table) { |
| 189 | (*num)++; | 378 | struct vf_id_map *ptr = |
| 379 | list_entry(itr, struct vf_id_map, list); | ||
| 380 | if (ptr->bdf != ~0 && !ptr->attached) | ||
| 381 | vfs++; | ||
| 190 | } | 382 | } |
| 383 | mutex_unlock(&table_lock); | ||
| 384 | return vfs; | ||
| 385 | } | ||
| 386 | |||
| 387 | void adf_devmgr_get_num_dev(uint32_t *num) | ||
| 388 | { | ||
| 389 | *num = num_devices - adf_get_num_dettached_vfs(); | ||
| 191 | } | 390 | } |
| 192 | 391 | ||
| 392 | /** | ||
| 393 | * adf_dev_in_use() - Check whether accel_dev is currently in use | ||
| 394 | * @accel_dev: Pointer to acceleration device. | ||
| 395 | * | ||
| 396 | * To be used by QAT device specific drivers. | ||
| 397 | * | ||
| 398 | * Return: 1 when device is in use, 0 otherwise. | ||
| 399 | */ | ||
| 193 | int adf_dev_in_use(struct adf_accel_dev *accel_dev) | 400 | int adf_dev_in_use(struct adf_accel_dev *accel_dev) |
| 194 | { | 401 | { |
| 195 | return atomic_read(&accel_dev->ref_count) != 0; | 402 | return atomic_read(&accel_dev->ref_count) != 0; |
| 196 | } | 403 | } |
| 404 | EXPORT_SYMBOL_GPL(adf_dev_in_use); | ||
| 197 | 405 | ||
| 406 | /** | ||
| 407 | * adf_dev_get() - Increment accel_dev reference count | ||
| 408 | * @accel_dev: Pointer to acceleration device. | ||
| 409 | * | ||
| 410 | * Increment the accel_dev refcount and if this is the first time | ||
| 411 | * incrementing it during this period the accel_dev is in use, | ||
| 412 | * increment the module refcount too. | ||
| 413 | * To be used by QAT device specific drivers. | ||
| 414 | * | ||
| 415 | * Return: 0 when successful, EFAULT when fail to bump module refcount | ||
| 416 | */ | ||
| 198 | int adf_dev_get(struct adf_accel_dev *accel_dev) | 417 | int adf_dev_get(struct adf_accel_dev *accel_dev) |
| 199 | { | 418 | { |
| 200 | if (atomic_add_return(1, &accel_dev->ref_count) == 1) | 419 | if (atomic_add_return(1, &accel_dev->ref_count) == 1) |
| @@ -202,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev) | |||
| 202 | return -EFAULT; | 421 | return -EFAULT; |
| 203 | return 0; | 422 | return 0; |
| 204 | } | 423 | } |
| 424 | EXPORT_SYMBOL_GPL(adf_dev_get); | ||
| 205 | 425 | ||
| 426 | /** | ||
| 427 | * adf_dev_put() - Decrement accel_dev reference count | ||
| 428 | * @accel_dev: Pointer to acceleration device. | ||
| 429 | * | ||
| 430 | * Decrement the accel_dev refcount and if this is the last time | ||
| 431 | * decrementing it during this period the accel_dev is in use, | ||
| 432 | * decrement the module refcount too. | ||
| 433 | * To be used by QAT device specific drivers. | ||
| 434 | * | ||
| 435 | * Return: void | ||
| 436 | */ | ||
| 206 | void adf_dev_put(struct adf_accel_dev *accel_dev) | 437 | void adf_dev_put(struct adf_accel_dev *accel_dev) |
| 207 | { | 438 | { |
| 208 | if (atomic_sub_return(1, &accel_dev->ref_count) == 0) | 439 | if (atomic_sub_return(1, &accel_dev->ref_count) == 0) |
| 209 | module_put(accel_dev->owner); | 440 | module_put(accel_dev->owner); |
| 210 | } | 441 | } |
| 442 | EXPORT_SYMBOL_GPL(adf_dev_put); | ||
| 211 | 443 | ||
| 444 | /** | ||
| 445 | * adf_devmgr_in_reset() - Check whether device is in reset | ||
| 446 | * @accel_dev: Pointer to acceleration device. | ||
| 447 | * | ||
| 448 | * To be used by QAT device specific drivers. | ||
| 449 | * | ||
| 450 | * Return: 1 when the device is being reset, 0 otherwise. | ||
| 451 | */ | ||
| 212 | int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) | 452 | int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) |
| 213 | { | 453 | { |
| 214 | return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); | 454 | return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); |
| 215 | } | 455 | } |
| 456 | EXPORT_SYMBOL_GPL(adf_devmgr_in_reset); | ||
| 216 | 457 | ||
| 458 | /** | ||
| 459 | * adf_dev_started() - Check whether device has started | ||
| 460 | * @accel_dev: Pointer to acceleration device. | ||
| 461 | * | ||
| 462 | * To be used by QAT device specific drivers. | ||
| 463 | * | ||
| 464 | * Return: 1 when the device has started, 0 otherwise | ||
| 465 | */ | ||
| 217 | int adf_dev_started(struct adf_accel_dev *accel_dev) | 466 | int adf_dev_started(struct adf_accel_dev *accel_dev) |
| 218 | { | 467 | { |
| 219 | return test_bit(ADF_STATUS_STARTED, &accel_dev->status); | 468 | return test_bit(ADF_STATUS_STARTED, &accel_dev->status); |
| 220 | } | 469 | } |
| 470 | EXPORT_SYMBOL_GPL(adf_dev_started); | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c index 1864bdb36f8f..6849422e04bb 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c +++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c | |||
| @@ -44,9 +44,8 @@ | |||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 46 | */ | 46 | */ |
| 47 | #include <adf_accel_devices.h> | 47 | #include "adf_accel_devices.h" |
| 48 | #include <adf_transport_internal.h> | 48 | #include "adf_transport_internal.h" |
| 49 | #include "adf_drv.h" | ||
| 50 | 49 | ||
| 51 | #define ADF_ARB_NUM 4 | 50 | #define ADF_ARB_NUM 4 |
| 52 | #define ADF_ARB_REQ_RING_NUM 8 | 51 | #define ADF_ARB_REQ_RING_NUM 8 |
| @@ -58,7 +57,6 @@ | |||
| 58 | #define ADF_ARB_RO_EN_OFFSET 0x090 | 57 | #define ADF_ARB_RO_EN_OFFSET 0x090 |
| 59 | #define ADF_ARB_WQCFG_OFFSET 0x100 | 58 | #define ADF_ARB_WQCFG_OFFSET 0x100 |
| 60 | #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 | 59 | #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 |
| 61 | #define ADF_ARB_WRK_2_SER_MAP 10 | ||
| 62 | #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C | 60 | #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C |
| 63 | 61 | ||
| 64 | #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ | 62 | #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ |
| @@ -89,10 +87,11 @@ | |||
| 89 | 87 | ||
| 90 | int adf_init_arb(struct adf_accel_dev *accel_dev) | 88 | int adf_init_arb(struct adf_accel_dev *accel_dev) |
| 91 | { | 89 | { |
| 90 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 92 | void __iomem *csr = accel_dev->transport->banks[0].csr_addr; | 91 | void __iomem *csr = accel_dev->transport->banks[0].csr_addr; |
| 93 | uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; | 92 | u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; |
| 94 | uint32_t arb, i; | 93 | u32 arb, i; |
| 95 | const uint32_t *thd_2_arb_cfg; | 94 | const u32 *thd_2_arb_cfg; |
| 96 | 95 | ||
| 97 | /* Service arb configured for 32 bytes responses and | 96 | /* Service arb configured for 32 bytes responses and |
| 98 | * ring flow control check enabled. */ | 97 | * ring flow control check enabled. */ |
| @@ -109,30 +108,39 @@ int adf_init_arb(struct adf_accel_dev *accel_dev) | |||
| 109 | WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF); | 108 | WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF); |
| 110 | 109 | ||
| 111 | /* Setup worker queue registers */ | 110 | /* Setup worker queue registers */ |
| 112 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | 111 | for (i = 0; i < hw_data->num_engines; i++) |
| 113 | WRITE_CSR_ARB_WQCFG(csr, i, i); | 112 | WRITE_CSR_ARB_WQCFG(csr, i, i); |
| 114 | 113 | ||
| 115 | /* Map worker threads to service arbiters */ | 114 | /* Map worker threads to service arbiters */ |
| 116 | adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg); | 115 | hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg); |
| 117 | 116 | ||
| 118 | if (!thd_2_arb_cfg) | 117 | if (!thd_2_arb_cfg) |
| 119 | return -EFAULT; | 118 | return -EFAULT; |
| 120 | 119 | ||
| 121 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | 120 | for (i = 0; i < hw_data->num_engines; i++) |
| 122 | WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i)); | 121 | WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i)); |
| 123 | 122 | ||
| 124 | return 0; | 123 | return 0; |
| 125 | } | 124 | } |
| 126 | 125 | EXPORT_SYMBOL_GPL(adf_init_arb); | |
| 127 | void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring) | 126 | |
| 127 | /** | ||
| 128 | * adf_update_ring_arb() - update ring arbitration rgister | ||
| 129 | * @accel_dev: Pointer to ring data. | ||
| 130 | * | ||
| 131 | * Function enables or disables rings for/from arbitration. | ||
| 132 | */ | ||
| 133 | void adf_update_ring_arb(struct adf_etr_ring_data *ring) | ||
| 128 | { | 134 | { |
| 129 | WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, | 135 | WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, |
| 130 | ring->bank->bank_number, | 136 | ring->bank->bank_number, |
| 131 | ring->bank->ring_mask & 0xFF); | 137 | ring->bank->ring_mask & 0xFF); |
| 132 | } | 138 | } |
| 139 | EXPORT_SYMBOL_GPL(adf_update_ring_arb); | ||
| 133 | 140 | ||
| 134 | void adf_exit_arb(struct adf_accel_dev *accel_dev) | 141 | void adf_exit_arb(struct adf_accel_dev *accel_dev) |
| 135 | { | 142 | { |
| 143 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 136 | void __iomem *csr; | 144 | void __iomem *csr; |
| 137 | unsigned int i; | 145 | unsigned int i; |
| 138 | 146 | ||
| @@ -146,14 +154,15 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev) | |||
| 146 | WRITE_CSR_ARB_SARCONFIG(csr, i, 0); | 154 | WRITE_CSR_ARB_SARCONFIG(csr, i, 0); |
| 147 | 155 | ||
| 148 | /* Shutdown work queue */ | 156 | /* Shutdown work queue */ |
| 149 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | 157 | for (i = 0; i < hw_data->num_engines; i++) |
| 150 | WRITE_CSR_ARB_WQCFG(csr, i, 0); | 158 | WRITE_CSR_ARB_WQCFG(csr, i, 0); |
| 151 | 159 | ||
| 152 | /* Unmap worker threads to service arbiters */ | 160 | /* Unmap worker threads to service arbiters */ |
| 153 | for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++) | 161 | for (i = 0; i < hw_data->num_engines; i++) |
| 154 | WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0); | 162 | WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0); |
| 155 | 163 | ||
| 156 | /* Disable arbitration on all rings */ | 164 | /* Disable arbitration on all rings */ |
| 157 | for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) | 165 | for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) |
| 158 | WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0); | 166 | WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0); |
| 159 | } | 167 | } |
| 168 | EXPORT_SYMBOL_GPL(adf_exit_arb); | ||
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 245f43237a2d..ac37a89965ac 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c | |||
| @@ -69,7 +69,7 @@ static void adf_service_add(struct service_hndl *service) | |||
| 69 | * Function adds the acceleration service to the acceleration framework. | 69 | * Function adds the acceleration service to the acceleration framework. |
| 70 | * To be used by QAT device specific drivers. | 70 | * To be used by QAT device specific drivers. |
| 71 | * | 71 | * |
| 72 | * Return: 0 on success, error code othewise. | 72 | * Return: 0 on success, error code otherwise. |
| 73 | */ | 73 | */ |
| 74 | int adf_service_register(struct service_hndl *service) | 74 | int adf_service_register(struct service_hndl *service) |
| 75 | { | 75 | { |
| @@ -94,7 +94,7 @@ static void adf_service_remove(struct service_hndl *service) | |||
| 94 | * Function remove the acceleration service from the acceleration framework. | 94 | * Function remove the acceleration service from the acceleration framework. |
| 95 | * To be used by QAT device specific drivers. | 95 | * To be used by QAT device specific drivers. |
| 96 | * | 96 | * |
| 97 | * Return: 0 on success, error code othewise. | 97 | * Return: 0 on success, error code otherwise. |
| 98 | */ | 98 | */ |
| 99 | int adf_service_unregister(struct service_hndl *service) | 99 | int adf_service_unregister(struct service_hndl *service) |
| 100 | { | 100 | { |
| @@ -114,7 +114,7 @@ EXPORT_SYMBOL_GPL(adf_service_unregister); | |||
| 114 | * Initialize the ring data structures and the admin comms and arbitration | 114 | * Initialize the ring data structures and the admin comms and arbitration |
| 115 | * services. | 115 | * services. |
| 116 | * | 116 | * |
| 117 | * Return: 0 on success, error code othewise. | 117 | * Return: 0 on success, error code otherwise. |
| 118 | */ | 118 | */ |
| 119 | int adf_dev_init(struct adf_accel_dev *accel_dev) | 119 | int adf_dev_init(struct adf_accel_dev *accel_dev) |
| 120 | { | 120 | { |
| @@ -177,20 +177,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) | |||
| 177 | */ | 177 | */ |
| 178 | list_for_each(list_itr, &service_table) { | 178 | list_for_each(list_itr, &service_table) { |
| 179 | service = list_entry(list_itr, struct service_hndl, list); | 179 | service = list_entry(list_itr, struct service_hndl, list); |
| 180 | if (!service->admin) | ||
| 181 | continue; | ||
| 182 | if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { | ||
| 183 | dev_err(&GET_DEV(accel_dev), | ||
| 184 | "Failed to initialise service %s\n", | ||
| 185 | service->name); | ||
| 186 | return -EFAULT; | ||
| 187 | } | ||
| 188 | set_bit(accel_dev->accel_id, &service->init_status); | ||
| 189 | } | ||
| 190 | list_for_each(list_itr, &service_table) { | ||
| 191 | service = list_entry(list_itr, struct service_hndl, list); | ||
| 192 | if (service->admin) | ||
| 193 | continue; | ||
| 194 | if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { | 180 | if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { |
| 195 | dev_err(&GET_DEV(accel_dev), | 181 | dev_err(&GET_DEV(accel_dev), |
| 196 | "Failed to initialise service %s\n", | 182 | "Failed to initialise service %s\n", |
| @@ -201,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) | |||
| 201 | } | 187 | } |
| 202 | 188 | ||
| 203 | hw_data->enable_error_correction(accel_dev); | 189 | hw_data->enable_error_correction(accel_dev); |
| 190 | hw_data->enable_vf2pf_comms(accel_dev); | ||
| 204 | 191 | ||
| 205 | return 0; | 192 | return 0; |
| 206 | } | 193 | } |
| @@ -214,10 +201,11 @@ EXPORT_SYMBOL_GPL(adf_dev_init); | |||
| 214 | * is ready to be used. | 201 | * is ready to be used. |
| 215 | * To be used by QAT device specific drivers. | 202 | * To be used by QAT device specific drivers. |
| 216 | * | 203 | * |
| 217 | * Return: 0 on success, error code othewise. | 204 | * Return: 0 on success, error code otherwise. |
| 218 | */ | 205 | */ |
| 219 | int adf_dev_start(struct adf_accel_dev *accel_dev) | 206 | int adf_dev_start(struct adf_accel_dev *accel_dev) |
| 220 | { | 207 | { |
| 208 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 221 | struct service_hndl *service; | 209 | struct service_hndl *service; |
| 222 | struct list_head *list_itr; | 210 | struct list_head *list_itr; |
| 223 | 211 | ||
| @@ -229,22 +217,13 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) | |||
| 229 | } | 217 | } |
| 230 | set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); | 218 | set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); |
| 231 | 219 | ||
| 232 | list_for_each(list_itr, &service_table) { | 220 | if (hw_data->send_admin_init(accel_dev)) { |
| 233 | service = list_entry(list_itr, struct service_hndl, list); | 221 | dev_err(&GET_DEV(accel_dev), "Failed to send init message\n"); |
| 234 | if (!service->admin) | 222 | return -EFAULT; |
| 235 | continue; | ||
| 236 | if (service->event_hld(accel_dev, ADF_EVENT_START)) { | ||
| 237 | dev_err(&GET_DEV(accel_dev), | ||
| 238 | "Failed to start service %s\n", | ||
| 239 | service->name); | ||
| 240 | return -EFAULT; | ||
| 241 | } | ||
| 242 | set_bit(accel_dev->accel_id, &service->start_status); | ||
| 243 | } | 223 | } |
| 224 | |||
| 244 | list_for_each(list_itr, &service_table) { | 225 | list_for_each(list_itr, &service_table) { |
| 245 | service = list_entry(list_itr, struct service_hndl, list); | 226 | service = list_entry(list_itr, struct service_hndl, list); |
| 246 | if (service->admin) | ||
| 247 | continue; | ||
| 248 | if (service->event_hld(accel_dev, ADF_EVENT_START)) { | 227 | if (service->event_hld(accel_dev, ADF_EVENT_START)) { |
| 249 | dev_err(&GET_DEV(accel_dev), | 228 | dev_err(&GET_DEV(accel_dev), |
| 250 | "Failed to start service %s\n", | 229 | "Failed to start service %s\n", |
| @@ -257,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) | |||
| 257 | clear_bit(ADF_STATUS_STARTING, &accel_dev->status); | 236 | clear_bit(ADF_STATUS_STARTING, &accel_dev->status); |
| 258 | set_bit(ADF_STATUS_STARTED, &accel_dev->status); | 237 | set_bit(ADF_STATUS_STARTED, &accel_dev->status); |
| 259 | 238 | ||
| 260 | if (qat_algs_register()) { | 239 | if (!list_empty(&accel_dev->crypto_list) && |
| 240 | (qat_algs_register() || qat_asym_algs_register())) { | ||
| 261 | dev_err(&GET_DEV(accel_dev), | 241 | dev_err(&GET_DEV(accel_dev), |
| 262 | "Failed to register crypto algs\n"); | 242 | "Failed to register crypto algs\n"); |
| 263 | set_bit(ADF_STATUS_STARTING, &accel_dev->status); | 243 | set_bit(ADF_STATUS_STARTING, &accel_dev->status); |
| @@ -276,7 +256,7 @@ EXPORT_SYMBOL_GPL(adf_dev_start); | |||
| 276 | * is shuting down. | 256 | * is shuting down. |
| 277 | * To be used by QAT device specific drivers. | 257 | * To be used by QAT device specific drivers. |
| 278 | * | 258 | * |
| 279 | * Return: 0 on success, error code othewise. | 259 | * Return: 0 on success, error code otherwise. |
| 280 | */ | 260 | */ |
| 281 | int adf_dev_stop(struct adf_accel_dev *accel_dev) | 261 | int adf_dev_stop(struct adf_accel_dev *accel_dev) |
| 282 | { | 262 | { |
| @@ -292,14 +272,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev) | |||
| 292 | clear_bit(ADF_STATUS_STARTING, &accel_dev->status); | 272 | clear_bit(ADF_STATUS_STARTING, &accel_dev->status); |
| 293 | clear_bit(ADF_STATUS_STARTED, &accel_dev->status); | 273 | clear_bit(ADF_STATUS_STARTED, &accel_dev->status); |
| 294 | 274 | ||
| 295 | if (qat_algs_unregister()) | 275 | if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister()) |
| 296 | dev_err(&GET_DEV(accel_dev), | 276 | dev_err(&GET_DEV(accel_dev), |
| 297 | "Failed to unregister crypto algs\n"); | 277 | "Failed to unregister crypto algs\n"); |
| 298 | 278 | ||
| 279 | if (!list_empty(&accel_dev->crypto_list)) | ||
| 280 | qat_asym_algs_unregister(); | ||
| 281 | |||
| 299 | list_for_each(list_itr, &service_table) { | 282 | list_for_each(list_itr, &service_table) { |
| 300 | service = list_entry(list_itr, struct service_hndl, list); | 283 | service = list_entry(list_itr, struct service_hndl, list); |
| 301 | if (service->admin) | ||
| 302 | continue; | ||
| 303 | if (!test_bit(accel_dev->accel_id, &service->start_status)) | 284 | if (!test_bit(accel_dev->accel_id, &service->start_status)) |
| 304 | continue; | 285 | continue; |
| 305 | ret = service->event_hld(accel_dev, ADF_EVENT_STOP); | 286 | ret = service->event_hld(accel_dev, ADF_EVENT_STOP); |
| @@ -310,19 +291,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev) | |||
| 310 | clear_bit(accel_dev->accel_id, &service->start_status); | 291 | clear_bit(accel_dev->accel_id, &service->start_status); |
| 311 | } | 292 | } |
| 312 | } | 293 | } |
| 313 | list_for_each(list_itr, &service_table) { | ||
| 314 | service = list_entry(list_itr, struct service_hndl, list); | ||
| 315 | if (!service->admin) | ||
| 316 | continue; | ||
| 317 | if (!test_bit(accel_dev->accel_id, &service->start_status)) | ||
| 318 | continue; | ||
| 319 | if (service->event_hld(accel_dev, ADF_EVENT_STOP)) | ||
| 320 | dev_err(&GET_DEV(accel_dev), | ||
| 321 | "Failed to shutdown service %s\n", | ||
| 322 | service->name); | ||
| 323 | else | ||
| 324 | clear_bit(accel_dev->accel_id, &service->start_status); | ||
| 325 | } | ||
| 326 | 294 | ||
| 327 | if (wait) | 295 | if (wait) |
| 328 | msleep(100); | 296 | msleep(100); |
| @@ -373,21 +341,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) | |||
| 373 | 341 | ||
| 374 | list_for_each(list_itr, &service_table) { | 342 | list_for_each(list_itr, &service_table) { |
| 375 | service = list_entry(list_itr, struct service_hndl, list); | 343 | service = list_entry(list_itr, struct service_hndl, list); |
| 376 | if (service->admin) | ||
| 377 | continue; | ||
| 378 | if (!test_bit(accel_dev->accel_id, &service->init_status)) | ||
| 379 | continue; | ||
| 380 | if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) | ||
| 381 | dev_err(&GET_DEV(accel_dev), | ||
| 382 | "Failed to shutdown service %s\n", | ||
| 383 | service->name); | ||
| 384 | else | ||
| 385 | clear_bit(accel_dev->accel_id, &service->init_status); | ||
| 386 | } | ||
| 387 | list_for_each(list_itr, &service_table) { | ||
| 388 | service = list_entry(list_itr, struct service_hndl, list); | ||
| 389 | if (!service->admin) | ||
| 390 | continue; | ||
| 391 | if (!test_bit(accel_dev->accel_id, &service->init_status)) | 344 | if (!test_bit(accel_dev->accel_id, &service->init_status)) |
| 392 | continue; | 345 | continue; |
| 393 | if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) | 346 | if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) |
| @@ -413,6 +366,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) | |||
| 413 | if (hw_data->exit_admin_comms) | 366 | if (hw_data->exit_admin_comms) |
| 414 | hw_data->exit_admin_comms(accel_dev); | 367 | hw_data->exit_admin_comms(accel_dev); |
| 415 | 368 | ||
| 369 | hw_data->disable_iov(accel_dev); | ||
| 416 | adf_cleanup_etr_data(accel_dev); | 370 | adf_cleanup_etr_data(accel_dev); |
| 417 | } | 371 | } |
| 418 | EXPORT_SYMBOL_GPL(adf_dev_shutdown); | 372 | EXPORT_SYMBOL_GPL(adf_dev_shutdown); |
| @@ -424,17 +378,6 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) | |||
| 424 | 378 | ||
| 425 | list_for_each(list_itr, &service_table) { | 379 | list_for_each(list_itr, &service_table) { |
| 426 | service = list_entry(list_itr, struct service_hndl, list); | 380 | service = list_entry(list_itr, struct service_hndl, list); |
| 427 | if (service->admin) | ||
| 428 | continue; | ||
| 429 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) | ||
| 430 | dev_err(&GET_DEV(accel_dev), | ||
| 431 | "Failed to restart service %s.\n", | ||
| 432 | service->name); | ||
| 433 | } | ||
| 434 | list_for_each(list_itr, &service_table) { | ||
| 435 | service = list_entry(list_itr, struct service_hndl, list); | ||
| 436 | if (!service->admin) | ||
| 437 | continue; | ||
| 438 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) | 381 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) |
| 439 | dev_err(&GET_DEV(accel_dev), | 382 | dev_err(&GET_DEV(accel_dev), |
| 440 | "Failed to restart service %s.\n", | 383 | "Failed to restart service %s.\n", |
| @@ -450,17 +393,6 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) | |||
| 450 | 393 | ||
| 451 | list_for_each(list_itr, &service_table) { | 394 | list_for_each(list_itr, &service_table) { |
| 452 | service = list_entry(list_itr, struct service_hndl, list); | 395 | service = list_entry(list_itr, struct service_hndl, list); |
| 453 | if (service->admin) | ||
| 454 | continue; | ||
| 455 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) | ||
| 456 | dev_err(&GET_DEV(accel_dev), | ||
| 457 | "Failed to restart service %s.\n", | ||
| 458 | service->name); | ||
| 459 | } | ||
| 460 | list_for_each(list_itr, &service_table) { | ||
| 461 | service = list_entry(list_itr, struct service_hndl, list); | ||
| 462 | if (!service->admin) | ||
| 463 | continue; | ||
| 464 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) | 396 | if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) |
| 465 | dev_err(&GET_DEV(accel_dev), | 397 | dev_err(&GET_DEV(accel_dev), |
| 466 | "Failed to restart service %s.\n", | 398 | "Failed to restart service %s.\n", |
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c new file mode 100644 index 000000000000..5fdbad809343 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c | |||
| @@ -0,0 +1,438 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2015 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2015 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | |||
| 48 | #include <linux/pci.h> | ||
| 49 | #include <linux/mutex.h> | ||
| 50 | #include <linux/delay.h> | ||
| 51 | #include "adf_accel_devices.h" | ||
| 52 | #include "adf_common_drv.h" | ||
| 53 | #include "adf_pf2vf_msg.h" | ||
| 54 | |||
| 55 | #define ADF_DH895XCC_EP_OFFSET 0x3A000 | ||
| 56 | #define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C) | ||
| 57 | #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9) | ||
| 58 | #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC) | ||
| 59 | #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16) | ||
| 60 | |||
| 61 | /** | ||
| 62 | * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts | ||
| 63 | * @accel_dev: Pointer to acceleration device. | ||
| 64 | * | ||
| 65 | * Function enables PF to VF interrupts | ||
| 66 | */ | ||
| 67 | void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) | ||
| 68 | { | ||
| 69 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; | ||
| 70 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 71 | void __iomem *pmisc_bar_addr = | ||
| 72 | pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr; | ||
| 73 | |||
| 74 | ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0); | ||
| 75 | } | ||
| 76 | EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts); | ||
| 77 | |||
| 78 | /** | ||
| 79 | * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts | ||
| 80 | * @accel_dev: Pointer to acceleration device. | ||
| 81 | * | ||
| 82 | * Function disables PF to VF interrupts | ||
| 83 | */ | ||
| 84 | void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) | ||
| 85 | { | ||
| 86 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; | ||
| 87 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 88 | void __iomem *pmisc_bar_addr = | ||
| 89 | pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr; | ||
| 90 | |||
| 91 | ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2); | ||
| 92 | } | ||
| 93 | EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts); | ||
| 94 | |||
| 95 | void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, | ||
| 96 | u32 vf_mask) | ||
| 97 | { | ||
| 98 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 99 | struct adf_bar *pmisc = | ||
| 100 | &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; | ||
| 101 | void __iomem *pmisc_addr = pmisc->virt_addr; | ||
| 102 | u32 reg; | ||
| 103 | |||
| 104 | /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */ | ||
| 105 | if (vf_mask & 0xFFFF) { | ||
| 106 | reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3); | ||
| 107 | reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask); | ||
| 108 | ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg); | ||
| 109 | } | ||
| 110 | |||
| 111 | /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */ | ||
| 112 | if (vf_mask >> 16) { | ||
| 113 | reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5); | ||
| 114 | reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask); | ||
| 115 | ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); | ||
| 116 | } | ||
| 117 | } | ||
| 118 | |||
| 119 | /** | ||
| 120 | * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts | ||
| 121 | * @accel_dev: Pointer to acceleration device. | ||
| 122 | * | ||
| 123 | * Function disables VF to PF interrupts | ||
| 124 | */ | ||
| 125 | void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) | ||
| 126 | { | ||
| 127 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 128 | struct adf_bar *pmisc = | ||
| 129 | &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; | ||
| 130 | void __iomem *pmisc_addr = pmisc->virt_addr; | ||
| 131 | u32 reg; | ||
| 132 | |||
| 133 | /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */ | ||
| 134 | if (vf_mask & 0xFFFF) { | ||
| 135 | reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) | | ||
| 136 | ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask); | ||
| 137 | ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg); | ||
| 138 | } | ||
| 139 | |||
| 140 | /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */ | ||
| 141 | if (vf_mask >> 16) { | ||
| 142 | reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) | | ||
| 143 | ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask); | ||
| 144 | ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts); | ||
| 148 | |||
| 149 | static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) | ||
| 150 | { | ||
| 151 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; | ||
| 152 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 153 | void __iomem *pmisc_bar_addr = | ||
| 154 | pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr; | ||
| 155 | u32 val, pf2vf_offset, count = 0; | ||
| 156 | u32 local_in_use_mask, local_in_use_pattern; | ||
| 157 | u32 remote_in_use_mask, remote_in_use_pattern; | ||
| 158 | struct mutex *lock; /* lock preventing concurrent acces of CSR */ | ||
| 159 | u32 int_bit; | ||
| 160 | int ret = 0; | ||
| 161 | |||
| 162 | if (accel_dev->is_vf) { | ||
| 163 | pf2vf_offset = hw_data->get_pf2vf_offset(0); | ||
| 164 | lock = &accel_dev->vf.vf2pf_lock; | ||
| 165 | local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK; | ||
| 166 | local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF; | ||
| 167 | remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK; | ||
| 168 | remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF; | ||
| 169 | int_bit = ADF_VF2PF_INT; | ||
| 170 | } else { | ||
| 171 | pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr); | ||
| 172 | lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock; | ||
| 173 | local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK; | ||
| 174 | local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF; | ||
| 175 | remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK; | ||
| 176 | remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF; | ||
| 177 | int_bit = ADF_PF2VF_INT; | ||
| 178 | } | ||
| 179 | |||
| 180 | mutex_lock(lock); | ||
| 181 | |||
| 182 | /* Check if PF2VF CSR is in use by remote function */ | ||
| 183 | val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); | ||
| 184 | if ((val & remote_in_use_mask) == remote_in_use_pattern) { | ||
| 185 | dev_dbg(&GET_DEV(accel_dev), | ||
| 186 | "PF2VF CSR in use by remote function\n"); | ||
| 187 | ret = -EBUSY; | ||
| 188 | goto out; | ||
| 189 | } | ||
| 190 | |||
| 191 | /* Attempt to get ownership of PF2VF CSR */ | ||
| 192 | msg &= ~local_in_use_mask; | ||
| 193 | msg |= local_in_use_pattern; | ||
| 194 | ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg); | ||
| 195 | |||
| 196 | /* Wait in case remote func also attempting to get ownership */ | ||
| 197 | msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY); | ||
| 198 | |||
| 199 | val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); | ||
| 200 | if ((val & local_in_use_mask) != local_in_use_pattern) { | ||
| 201 | dev_dbg(&GET_DEV(accel_dev), | ||
| 202 | "PF2VF CSR in use by remote - collision detected\n"); | ||
| 203 | ret = -EBUSY; | ||
| 204 | goto out; | ||
| 205 | } | ||
| 206 | |||
| 207 | /* | ||
| 208 | * This function now owns the PV2VF CSR. The IN_USE_BY pattern must | ||
| 209 | * remain in the PF2VF CSR for all writes including ACK from remote | ||
| 210 | * until this local function relinquishes the CSR. Send the message | ||
| 211 | * by interrupting the remote. | ||
| 212 | */ | ||
| 213 | ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit); | ||
| 214 | |||
| 215 | /* Wait for confirmation from remote func it received the message */ | ||
| 216 | do { | ||
| 217 | msleep(ADF_IOV_MSG_ACK_DELAY); | ||
| 218 | val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); | ||
| 219 | } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY)); | ||
| 220 | |||
| 221 | if (val & int_bit) { | ||
| 222 | dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n"); | ||
| 223 | val &= ~int_bit; | ||
| 224 | ret = -EIO; | ||
| 225 | } | ||
| 226 | |||
| 227 | /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */ | ||
| 228 | ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask); | ||
| 229 | out: | ||
| 230 | mutex_unlock(lock); | ||
| 231 | return ret; | ||
| 232 | } | ||
| 233 | |||
| 234 | /** | ||
| 235 | * adf_iov_putmsg() - send PF2VF message | ||
| 236 | * @accel_dev: Pointer to acceleration device. | ||
| 237 | * @msg: Message to send | ||
| 238 | * @vf_nr: VF number to which the message will be sent | ||
| 239 | * | ||
| 240 | * Function sends a messge from the PF to a VF | ||
| 241 | * | ||
| 242 | * Return: 0 on success, error code otherwise. | ||
| 243 | */ | ||
| 244 | int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) | ||
| 245 | { | ||
| 246 | u32 count = 0; | ||
| 247 | int ret; | ||
| 248 | |||
| 249 | do { | ||
| 250 | ret = __adf_iov_putmsg(accel_dev, msg, vf_nr); | ||
| 251 | if (ret) | ||
| 252 | msleep(ADF_IOV_MSG_RETRY_DELAY); | ||
| 253 | } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES)); | ||
| 254 | |||
| 255 | return ret; | ||
| 256 | } | ||
| 257 | EXPORT_SYMBOL_GPL(adf_iov_putmsg); | ||
| 258 | |||
| 259 | void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info) | ||
| 260 | { | ||
| 261 | struct adf_accel_dev *accel_dev = vf_info->accel_dev; | ||
| 262 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 263 | int bar_id = hw_data->get_misc_bar_id(hw_data); | ||
| 264 | struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id]; | ||
| 265 | void __iomem *pmisc_addr = pmisc->virt_addr; | ||
| 266 | u32 msg, resp = 0, vf_nr = vf_info->vf_nr; | ||
| 267 | |||
| 268 | /* Read message from the VF */ | ||
| 269 | msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr)); | ||
| 270 | |||
| 271 | /* To ACK, clear the VF2PFINT bit */ | ||
| 272 | msg &= ~ADF_VF2PF_INT; | ||
| 273 | ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg); | ||
| 274 | |||
| 275 | if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM)) | ||
| 276 | /* Ignore legacy non-system (non-kernel) VF2PF messages */ | ||
| 277 | goto err; | ||
| 278 | |||
| 279 | switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) { | ||
| 280 | case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ: | ||
| 281 | { | ||
| 282 | u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT; | ||
| 283 | |||
| 284 | resp = (ADF_PF2VF_MSGORIGIN_SYSTEM | | ||
| 285 | (ADF_PF2VF_MSGTYPE_VERSION_RESP << | ||
| 286 | ADF_PF2VF_MSGTYPE_SHIFT) | | ||
| 287 | (ADF_PFVF_COMPATIBILITY_VERSION << | ||
| 288 | ADF_PF2VF_VERSION_RESP_VERS_SHIFT)); | ||
| 289 | |||
| 290 | dev_dbg(&GET_DEV(accel_dev), | ||
| 291 | "Compatibility Version Request from VF%d vers=%u\n", | ||
| 292 | vf_nr + 1, vf_compat_ver); | ||
| 293 | |||
| 294 | if (vf_compat_ver < hw_data->min_iov_compat_ver) { | ||
| 295 | dev_err(&GET_DEV(accel_dev), | ||
| 296 | "VF (vers %d) incompatible with PF (vers %d)\n", | ||
| 297 | vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION); | ||
| 298 | resp |= ADF_PF2VF_VF_INCOMPATIBLE << | ||
| 299 | ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; | ||
| 300 | } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) { | ||
| 301 | dev_err(&GET_DEV(accel_dev), | ||
| 302 | "VF (vers %d) compat with PF (vers %d) unkn.\n", | ||
| 303 | vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION); | ||
| 304 | resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN << | ||
| 305 | ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; | ||
| 306 | } else { | ||
| 307 | dev_dbg(&GET_DEV(accel_dev), | ||
| 308 | "VF (vers %d) compatible with PF (vers %d)\n", | ||
| 309 | vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION); | ||
| 310 | resp |= ADF_PF2VF_VF_COMPATIBLE << | ||
| 311 | ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; | ||
| 312 | } | ||
| 313 | } | ||
| 314 | break; | ||
| 315 | case ADF_VF2PF_MSGTYPE_VERSION_REQ: | ||
| 316 | dev_dbg(&GET_DEV(accel_dev), | ||
| 317 | "Legacy VersionRequest received from VF%d 0x%x\n", | ||
| 318 | vf_nr + 1, msg); | ||
| 319 | resp = (ADF_PF2VF_MSGORIGIN_SYSTEM | | ||
| 320 | (ADF_PF2VF_MSGTYPE_VERSION_RESP << | ||
| 321 | ADF_PF2VF_MSGTYPE_SHIFT) | | ||
| 322 | (ADF_PFVF_COMPATIBILITY_VERSION << | ||
| 323 | ADF_PF2VF_VERSION_RESP_VERS_SHIFT)); | ||
| 324 | resp |= ADF_PF2VF_VF_COMPATIBLE << | ||
| 325 | ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; | ||
| 326 | /* Set legacy major and minor version num */ | ||
| 327 | resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT | | ||
| 328 | 1 << ADF_PF2VF_MINORVERSION_SHIFT; | ||
| 329 | break; | ||
| 330 | case ADF_VF2PF_MSGTYPE_INIT: | ||
| 331 | { | ||
| 332 | dev_dbg(&GET_DEV(accel_dev), | ||
| 333 | "Init message received from VF%d 0x%x\n", | ||
| 334 | vf_nr + 1, msg); | ||
| 335 | vf_info->init = true; | ||
| 336 | } | ||
| 337 | break; | ||
| 338 | case ADF_VF2PF_MSGTYPE_SHUTDOWN: | ||
| 339 | { | ||
| 340 | dev_dbg(&GET_DEV(accel_dev), | ||
| 341 | "Shutdown message received from VF%d 0x%x\n", | ||
| 342 | vf_nr + 1, msg); | ||
| 343 | vf_info->init = false; | ||
| 344 | } | ||
| 345 | break; | ||
| 346 | default: | ||
| 347 | goto err; | ||
| 348 | } | ||
| 349 | |||
| 350 | if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr)) | ||
| 351 | dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n"); | ||
| 352 | |||
| 353 | /* re-enable interrupt on PF from this VF */ | ||
| 354 | adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr)); | ||
| 355 | return; | ||
| 356 | err: | ||
| 357 | dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n", | ||
| 358 | vf_nr + 1, msg); | ||
| 359 | } | ||
| 360 | |||
| 361 | void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) | ||
| 362 | { | ||
| 363 | struct adf_accel_vf_info *vf; | ||
| 364 | u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM | | ||
| 365 | (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT)); | ||
| 366 | int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); | ||
| 367 | |||
| 368 | for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { | ||
| 369 | if (vf->init && adf_iov_putmsg(accel_dev, msg, i)) | ||
| 370 | dev_err(&GET_DEV(accel_dev), | ||
| 371 | "Failed to send restarting msg to VF%d\n", i); | ||
| 372 | } | ||
| 373 | } | ||
| 374 | |||
| 375 | static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) | ||
| 376 | { | ||
| 377 | unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT); | ||
| 378 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 379 | u32 msg = 0; | ||
| 380 | int ret; | ||
| 381 | |||
| 382 | msg = ADF_VF2PF_MSGORIGIN_SYSTEM; | ||
| 383 | msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT; | ||
| 384 | msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT; | ||
| 385 | BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255); | ||
| 386 | |||
| 387 | /* Send request from VF to PF */ | ||
| 388 | ret = adf_iov_putmsg(accel_dev, msg, 0); | ||
| 389 | if (ret) { | ||
| 390 | dev_err(&GET_DEV(accel_dev), | ||
| 391 | "Failed to send Compatibility Version Request.\n"); | ||
| 392 | return ret; | ||
| 393 | } | ||
| 394 | |||
| 395 | /* Wait for response */ | ||
| 396 | if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion, | ||
| 397 | timeout)) { | ||
| 398 | dev_err(&GET_DEV(accel_dev), | ||
| 399 | "IOV request/response message timeout expired\n"); | ||
| 400 | return -EIO; | ||
| 401 | } | ||
| 402 | |||
| 403 | /* Response from PF received, check compatibility */ | ||
| 404 | switch (accel_dev->vf.compatible) { | ||
| 405 | case ADF_PF2VF_VF_COMPATIBLE: | ||
| 406 | break; | ||
| 407 | case ADF_PF2VF_VF_COMPAT_UNKNOWN: | ||
| 408 | /* VF is newer than PF and decides whether it is compatible */ | ||
| 409 | if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) | ||
| 410 | break; | ||
| 411 | /* fall through */ | ||
| 412 | case ADF_PF2VF_VF_INCOMPATIBLE: | ||
| 413 | dev_err(&GET_DEV(accel_dev), | ||
| 414 | "PF (vers %d) and VF (vers %d) are not compatible\n", | ||
| 415 | accel_dev->vf.pf_version, | ||
| 416 | ADF_PFVF_COMPATIBILITY_VERSION); | ||
| 417 | return -EINVAL; | ||
| 418 | default: | ||
| 419 | dev_err(&GET_DEV(accel_dev), | ||
| 420 | "Invalid response from PF; assume not compatible\n"); | ||
| 421 | return -EINVAL; | ||
| 422 | } | ||
| 423 | return ret; | ||
| 424 | } | ||
| 425 | |||
| 426 | /** | ||
| 427 | * adf_enable_vf2pf_comms() - Function enables communication from vf to pf | ||
| 428 | * | ||
| 429 | * @accel_dev: Pointer to acceleration device virtual function. | ||
| 430 | * | ||
| 431 | * Return: 0 on success, error code otherwise. | ||
| 432 | */ | ||
| 433 | int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) | ||
| 434 | { | ||
| 435 | adf_enable_pf2vf_interrupts(accel_dev); | ||
| 436 | return adf_vf2pf_request_version(accel_dev); | ||
| 437 | } | ||
| 438 | EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms); | ||
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h new file mode 100644 index 000000000000..5acd531a11ff --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h | |||
| @@ -0,0 +1,146 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2015 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2015 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | #ifndef ADF_PF2VF_MSG_H | ||
| 48 | #define ADF_PF2VF_MSG_H | ||
| 49 | |||
| 50 | /* | ||
| 51 | * PF<->VF Messaging | ||
| 52 | * The PF has an array of 32-bit PF2VF registers, one for each VF. The | ||
| 53 | * PF can access all these registers; each VF can access only the one | ||
| 54 | * register associated with that particular VF. | ||
| 55 | * | ||
| 56 | * The register functionally is split into two parts: | ||
| 57 | * The bottom half is for PF->VF messages. In particular when the first | ||
| 58 | * bit of this register (bit 0) gets set an interrupt will be triggered | ||
| 59 | * in the respective VF. | ||
| 60 | * The top half is for VF->PF messages. In particular when the first bit | ||
| 61 | * of this half of register (bit 16) gets set an interrupt will be triggered | ||
| 62 | * in the PF. | ||
| 63 | * | ||
| 64 | * The remaining bits within this register are available to encode messages. | ||
| 65 | * and implement a collision control mechanism to prevent concurrent use of | ||
| 66 | * the PF2VF register by both the PF and VF. | ||
| 67 | * | ||
| 68 | * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 | ||
| 69 | * _______________________________________________ | ||
| 70 | * | | | | | | | | | | | | | | | | | | ||
| 71 | * +-----------------------------------------------+ | ||
| 72 | * \___________________________/ \_________/ ^ ^ | ||
| 73 | * ^ ^ | | | ||
| 74 | * | | | VF2PF Int | ||
| 75 | * | | Message Origin | ||
| 76 | * | Message Type | ||
| 77 | * Message-specific Data/Reserved | ||
| 78 | * | ||
| 79 | * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 | ||
| 80 | * _______________________________________________ | ||
| 81 | * | | | | | | | | | | | | | | | | | | ||
| 82 | * +-----------------------------------------------+ | ||
| 83 | * \___________________________/ \_________/ ^ ^ | ||
| 84 | * ^ ^ | | | ||
| 85 | * | | | PF2VF Int | ||
| 86 | * | | Message Origin | ||
| 87 | * | Message Type | ||
| 88 | * Message-specific Data/Reserved | ||
| 89 | * | ||
| 90 | * Message Origin (Should always be 1) | ||
| 91 | * A legacy out-of-tree QAT driver allowed for a set of messages not supported | ||
| 92 | * by this driver; these had a Msg Origin of 0 and are ignored by this driver. | ||
| 93 | * | ||
| 94 | * When a PF or VF attempts to send a message in the lower or upper 16 bits, | ||
| 95 | * respectively, the other 16 bits are written to first with a defined | ||
| 96 | * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg). | ||
| 97 | */ | ||
| 98 | |||
| 99 | #define ADF_PFVF_COMPATIBILITY_VERSION 0x1 /* PF<->VF compat */ | ||
| 100 | |||
| 101 | /* PF->VF messages */ | ||
| 102 | #define ADF_PF2VF_INT BIT(0) | ||
| 103 | #define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1) | ||
| 104 | #define ADF_PF2VF_MSGTYPE_MASK 0x0000003C | ||
| 105 | #define ADF_PF2VF_MSGTYPE_SHIFT 2 | ||
| 106 | #define ADF_PF2VF_MSGTYPE_RESTARTING 0x01 | ||
| 107 | #define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02 | ||
| 108 | #define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000 | ||
| 109 | #define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000 | ||
| 110 | |||
| 111 | /* PF->VF Version Response */ | ||
| 112 | #define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0 | ||
| 113 | #define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6 | ||
| 114 | #define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000 | ||
| 115 | #define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14 | ||
| 116 | #define ADF_PF2VF_MINORVERSION_SHIFT 6 | ||
| 117 | #define ADF_PF2VF_MAJORVERSION_SHIFT 10 | ||
| 118 | #define ADF_PF2VF_VF_COMPATIBLE 1 | ||
| 119 | #define ADF_PF2VF_VF_INCOMPATIBLE 2 | ||
| 120 | #define ADF_PF2VF_VF_COMPAT_UNKNOWN 3 | ||
| 121 | |||
| 122 | /* VF->PF messages */ | ||
| 123 | #define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2 | ||
| 124 | #define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE | ||
| 125 | #define ADF_VF2PF_INT BIT(16) | ||
| 126 | #define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17) | ||
| 127 | #define ADF_VF2PF_MSGTYPE_MASK 0x003C0000 | ||
| 128 | #define ADF_VF2PF_MSGTYPE_SHIFT 18 | ||
| 129 | #define ADF_VF2PF_MSGTYPE_INIT 0x3 | ||
| 130 | #define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4 | ||
| 131 | #define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5 | ||
| 132 | #define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6 | ||
| 133 | |||
| 134 | /* VF->PF Compatible Version Request */ | ||
| 135 | #define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22 | ||
| 136 | |||
| 137 | /* Collision detection */ | ||
| 138 | #define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10 | ||
| 139 | #define ADF_IOV_MSG_ACK_DELAY 2 | ||
| 140 | #define ADF_IOV_MSG_ACK_MAX_RETRY 100 | ||
| 141 | #define ADF_IOV_MSG_RETRY_DELAY 5 | ||
| 142 | #define ADF_IOV_MSG_MAX_RETRIES 3 | ||
| 143 | #define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \ | ||
| 144 | ADF_IOV_MSG_ACK_MAX_RETRY + \ | ||
| 145 | ADF_IOV_MSG_COLLISION_DETECT_DELAY) | ||
| 146 | #endif /* ADF_IOV_MSG_H */ | ||
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c new file mode 100644 index 000000000000..2f77a4a8cecb --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_sriov.c | |||
| @@ -0,0 +1,309 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2015 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2015 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | #include <linux/workqueue.h> | ||
| 48 | #include <linux/pci.h> | ||
| 49 | #include <linux/device.h> | ||
| 50 | #include <linux/iommu.h> | ||
| 51 | #include "adf_common_drv.h" | ||
| 52 | #include "adf_cfg.h" | ||
| 53 | #include "adf_pf2vf_msg.h" | ||
| 54 | |||
| 55 | static struct workqueue_struct *pf2vf_resp_wq; | ||
| 56 | |||
| 57 | #define ME2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190) | ||
| 58 | #define ME2FUNCTION_MAP_A_NUM_REGS 96 | ||
| 59 | |||
| 60 | #define ME2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310) | ||
| 61 | #define ME2FUNCTION_MAP_B_NUM_REGS 12 | ||
| 62 | |||
| 63 | #define ME2FUNCTION_MAP_REG_SIZE 4 | ||
| 64 | #define ME2FUNCTION_MAP_VALID BIT(7) | ||
| 65 | |||
| 66 | #define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index) \ | ||
| 67 | ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \ | ||
| 68 | ME2FUNCTION_MAP_REG_SIZE * index) | ||
| 69 | |||
| 70 | #define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \ | ||
| 71 | ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \ | ||
| 72 | ME2FUNCTION_MAP_REG_SIZE * index, value) | ||
| 73 | |||
| 74 | #define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index) \ | ||
| 75 | ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \ | ||
| 76 | ME2FUNCTION_MAP_REG_SIZE * index) | ||
| 77 | |||
| 78 | #define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \ | ||
| 79 | ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \ | ||
| 80 | ME2FUNCTION_MAP_REG_SIZE * index, value) | ||
| 81 | |||
| 82 | struct adf_pf2vf_resp { | ||
| 83 | struct work_struct pf2vf_resp_work; | ||
| 84 | struct adf_accel_vf_info *vf_info; | ||
| 85 | }; | ||
| 86 | |||
| 87 | static void adf_iov_send_resp(struct work_struct *work) | ||
| 88 | { | ||
| 89 | struct adf_pf2vf_resp *pf2vf_resp = | ||
| 90 | container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work); | ||
| 91 | |||
| 92 | adf_vf2pf_req_hndl(pf2vf_resp->vf_info); | ||
| 93 | kfree(pf2vf_resp); | ||
| 94 | } | ||
| 95 | |||
| 96 | static void adf_vf2pf_bh_handler(void *data) | ||
| 97 | { | ||
| 98 | struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data; | ||
| 99 | struct adf_pf2vf_resp *pf2vf_resp; | ||
| 100 | |||
| 101 | pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC); | ||
| 102 | if (!pf2vf_resp) | ||
| 103 | return; | ||
| 104 | |||
| 105 | pf2vf_resp->vf_info = vf_info; | ||
| 106 | INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp); | ||
| 107 | queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work); | ||
| 108 | } | ||
| 109 | |||
| 110 | static int adf_enable_sriov(struct adf_accel_dev *accel_dev) | ||
| 111 | { | ||
| 112 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | ||
| 113 | int totalvfs = pci_sriov_get_totalvfs(pdev); | ||
| 114 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 115 | struct adf_bar *pmisc = | ||
| 116 | &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; | ||
| 117 | void __iomem *pmisc_addr = pmisc->virt_addr; | ||
| 118 | struct adf_accel_vf_info *vf_info; | ||
| 119 | int i; | ||
| 120 | u32 reg; | ||
| 121 | |||
| 122 | /* Workqueue for PF2VF responses */ | ||
| 123 | pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); | ||
| 124 | if (!pf2vf_resp_wq) | ||
| 125 | return -ENOMEM; | ||
| 126 | |||
| 127 | for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs; | ||
| 128 | i++, vf_info++) { | ||
| 129 | /* This ptr will be populated when VFs will be created */ | ||
| 130 | vf_info->accel_dev = accel_dev; | ||
| 131 | vf_info->vf_nr = i; | ||
| 132 | |||
| 133 | tasklet_init(&vf_info->vf2pf_bh_tasklet, | ||
| 134 | (void *)adf_vf2pf_bh_handler, | ||
| 135 | (unsigned long)vf_info); | ||
| 136 | mutex_init(&vf_info->pf2vf_lock); | ||
| 137 | ratelimit_state_init(&vf_info->vf2pf_ratelimit, | ||
| 138 | DEFAULT_RATELIMIT_INTERVAL, | ||
| 139 | DEFAULT_RATELIMIT_BURST); | ||
| 140 | } | ||
| 141 | |||
| 142 | /* Set Valid bits in ME Thread to PCIe Function Mapping Group A */ | ||
| 143 | for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) { | ||
| 144 | reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i); | ||
| 145 | reg |= ME2FUNCTION_MAP_VALID; | ||
| 146 | WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg); | ||
| 147 | } | ||
| 148 | |||
| 149 | /* Set Valid bits in ME Thread to PCIe Function Mapping Group B */ | ||
| 150 | for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) { | ||
| 151 | reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i); | ||
| 152 | reg |= ME2FUNCTION_MAP_VALID; | ||
| 153 | WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg); | ||
| 154 | } | ||
| 155 | |||
| 156 | /* Enable VF to PF interrupts for all VFs */ | ||
| 157 | adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0)); | ||
| 158 | |||
| 159 | /* | ||
| 160 | * Due to the hardware design, when SR-IOV and the ring arbiter | ||
| 161 | * are enabled all the VFs supported in hardware must be enabled in | ||
| 162 | * order for all the hardware resources (i.e. bundles) to be usable. | ||
| 163 | * When SR-IOV is enabled, each of the VFs will own one bundle. | ||
| 164 | */ | ||
| 165 | return pci_enable_sriov(pdev, totalvfs); | ||
| 166 | } | ||
| 167 | |||
| 168 | /** | ||
| 169 | * adf_disable_sriov() - Disable SRIOV for the device | ||
| 170 | * @pdev: Pointer to pci device. | ||
| 171 | * | ||
| 172 | * Function disables SRIOV for the pci device. | ||
| 173 | * | ||
| 174 | * Return: 0 on success, error code otherwise. | ||
| 175 | */ | ||
| 176 | void adf_disable_sriov(struct adf_accel_dev *accel_dev) | ||
| 177 | { | ||
| 178 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 179 | struct adf_bar *pmisc = | ||
| 180 | &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; | ||
| 181 | void __iomem *pmisc_addr = pmisc->virt_addr; | ||
| 182 | int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev)); | ||
| 183 | struct adf_accel_vf_info *vf; | ||
| 184 | u32 reg; | ||
| 185 | int i; | ||
| 186 | |||
| 187 | if (!accel_dev->pf.vf_info) | ||
| 188 | return; | ||
| 189 | |||
| 190 | adf_pf2vf_notify_restarting(accel_dev); | ||
| 191 | |||
| 192 | pci_disable_sriov(accel_to_pci_dev(accel_dev)); | ||
| 193 | |||
| 194 | /* Disable VF to PF interrupts */ | ||
| 195 | adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF); | ||
| 196 | |||
| 197 | /* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */ | ||
| 198 | for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) { | ||
| 199 | reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i); | ||
| 200 | reg &= ~ME2FUNCTION_MAP_VALID; | ||
| 201 | WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg); | ||
| 202 | } | ||
| 203 | |||
| 204 | /* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */ | ||
| 205 | for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) { | ||
| 206 | reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i); | ||
| 207 | reg &= ~ME2FUNCTION_MAP_VALID; | ||
| 208 | WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg); | ||
| 209 | } | ||
| 210 | |||
| 211 | for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) { | ||
| 212 | tasklet_disable(&vf->vf2pf_bh_tasklet); | ||
| 213 | tasklet_kill(&vf->vf2pf_bh_tasklet); | ||
| 214 | mutex_destroy(&vf->pf2vf_lock); | ||
| 215 | } | ||
| 216 | |||
| 217 | kfree(accel_dev->pf.vf_info); | ||
| 218 | accel_dev->pf.vf_info = NULL; | ||
| 219 | |||
| 220 | if (pf2vf_resp_wq) { | ||
| 221 | destroy_workqueue(pf2vf_resp_wq); | ||
| 222 | pf2vf_resp_wq = NULL; | ||
| 223 | } | ||
| 224 | } | ||
| 225 | EXPORT_SYMBOL_GPL(adf_disable_sriov); | ||
| 226 | |||
| 227 | /** | ||
| 228 | * adf_sriov_configure() - Enable SRIOV for the device | ||
| 229 | * @pdev: Pointer to pci device. | ||
| 230 | * | ||
| 231 | * Function enables SRIOV for the pci device. | ||
| 232 | * | ||
| 233 | * Return: 0 on success, error code otherwise. | ||
| 234 | */ | ||
| 235 | int adf_sriov_configure(struct pci_dev *pdev, int numvfs) | ||
| 236 | { | ||
| 237 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
| 238 | int totalvfs = pci_sriov_get_totalvfs(pdev); | ||
| 239 | unsigned long val; | ||
| 240 | int ret; | ||
| 241 | |||
| 242 | if (!accel_dev) { | ||
| 243 | dev_err(&pdev->dev, "Failed to find accel_dev\n"); | ||
| 244 | return -EFAULT; | ||
| 245 | } | ||
| 246 | |||
| 247 | if (!iommu_present(&pci_bus_type)) { | ||
| 248 | dev_err(&pdev->dev, | ||
| 249 | "IOMMU must be enabled for SR-IOV to work\n"); | ||
| 250 | return -EINVAL; | ||
| 251 | } | ||
| 252 | |||
| 253 | if (accel_dev->pf.vf_info) { | ||
| 254 | dev_info(&pdev->dev, "Already enabled for this device\n"); | ||
| 255 | return -EINVAL; | ||
| 256 | } | ||
| 257 | |||
| 258 | if (adf_dev_started(accel_dev)) { | ||
| 259 | if (adf_devmgr_in_reset(accel_dev) || | ||
| 260 | adf_dev_in_use(accel_dev)) { | ||
| 261 | dev_err(&GET_DEV(accel_dev), "Device busy\n"); | ||
| 262 | return -EBUSY; | ||
| 263 | } | ||
| 264 | |||
| 265 | if (adf_dev_stop(accel_dev)) { | ||
| 266 | dev_err(&GET_DEV(accel_dev), | ||
| 267 | "Failed to stop qat_dev%d\n", | ||
| 268 | accel_dev->accel_id); | ||
| 269 | return -EFAULT; | ||
| 270 | } | ||
| 271 | |||
| 272 | adf_dev_shutdown(accel_dev); | ||
| 273 | } | ||
| 274 | |||
| 275 | if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) | ||
| 276 | return -EFAULT; | ||
| 277 | val = 0; | ||
| 278 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
| 279 | ADF_NUM_CY, (void *)&val, ADF_DEC)) | ||
| 280 | return -EFAULT; | ||
| 281 | |||
| 282 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
| 283 | |||
| 284 | /* Allocate memory for VF info structs */ | ||
| 285 | accel_dev->pf.vf_info = kcalloc(totalvfs, | ||
| 286 | sizeof(struct adf_accel_vf_info), | ||
| 287 | GFP_KERNEL); | ||
| 288 | if (!accel_dev->pf.vf_info) | ||
| 289 | return -ENOMEM; | ||
| 290 | |||
| 291 | if (adf_dev_init(accel_dev)) { | ||
| 292 | dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n", | ||
| 293 | accel_dev->accel_id); | ||
| 294 | return -EFAULT; | ||
| 295 | } | ||
| 296 | |||
| 297 | if (adf_dev_start(accel_dev)) { | ||
| 298 | dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", | ||
| 299 | accel_dev->accel_id); | ||
| 300 | return -EFAULT; | ||
| 301 | } | ||
| 302 | |||
| 303 | ret = adf_enable_sriov(accel_dev); | ||
| 304 | if (ret) | ||
| 305 | return ret; | ||
| 306 | |||
| 307 | return numvfs; | ||
| 308 | } | ||
| 309 | EXPORT_SYMBOL_GPL(adf_sriov_configure); | ||
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index db2926bff8a5..3865ae8d96d9 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c | |||
| @@ -264,6 +264,10 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, | |||
| 264 | dev_err(&GET_DEV(accel_dev), "Can't get ring number\n"); | 264 | dev_err(&GET_DEV(accel_dev), "Can't get ring number\n"); |
| 265 | return -EFAULT; | 265 | return -EFAULT; |
| 266 | } | 266 | } |
| 267 | if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) { | ||
| 268 | dev_err(&GET_DEV(accel_dev), "Invalid ring number\n"); | ||
| 269 | return -EFAULT; | ||
| 270 | } | ||
| 267 | 271 | ||
| 268 | bank = &transport_data->banks[bank_num]; | 272 | bank = &transport_data->banks[bank_num]; |
| 269 | if (adf_reserve_ring(bank, ring_num)) { | 273 | if (adf_reserve_ring(bank, ring_num)) { |
| @@ -285,7 +289,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, | |||
| 285 | goto err; | 289 | goto err; |
| 286 | 290 | ||
| 287 | /* Enable HW arbitration for the given ring */ | 291 | /* Enable HW arbitration for the given ring */ |
| 288 | accel_dev->hw_device->hw_arb_ring_enable(ring); | 292 | adf_update_ring_arb(ring); |
| 289 | 293 | ||
| 290 | if (adf_ring_debugfs_add(ring, ring_name)) { | 294 | if (adf_ring_debugfs_add(ring, ring_name)) { |
| 291 | dev_err(&GET_DEV(accel_dev), | 295 | dev_err(&GET_DEV(accel_dev), |
| @@ -302,14 +306,13 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, | |||
| 302 | err: | 306 | err: |
| 303 | adf_cleanup_ring(ring); | 307 | adf_cleanup_ring(ring); |
| 304 | adf_unreserve_ring(bank, ring_num); | 308 | adf_unreserve_ring(bank, ring_num); |
| 305 | accel_dev->hw_device->hw_arb_ring_disable(ring); | 309 | adf_update_ring_arb(ring); |
| 306 | return ret; | 310 | return ret; |
| 307 | } | 311 | } |
| 308 | 312 | ||
| 309 | void adf_remove_ring(struct adf_etr_ring_data *ring) | 313 | void adf_remove_ring(struct adf_etr_ring_data *ring) |
| 310 | { | 314 | { |
| 311 | struct adf_etr_bank_data *bank = ring->bank; | 315 | struct adf_etr_bank_data *bank = ring->bank; |
| 312 | struct adf_accel_dev *accel_dev = bank->accel_dev; | ||
| 313 | 316 | ||
| 314 | /* Disable interrupts for the given ring */ | 317 | /* Disable interrupts for the given ring */ |
| 315 | adf_disable_ring_irq(bank, ring->ring_number); | 318 | adf_disable_ring_irq(bank, ring->ring_number); |
| @@ -322,7 +325,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring) | |||
| 322 | adf_ring_debugfs_rm(ring); | 325 | adf_ring_debugfs_rm(ring); |
| 323 | adf_unreserve_ring(bank, ring->ring_number); | 326 | adf_unreserve_ring(bank, ring->ring_number); |
| 324 | /* Disable HW arbitration for the given ring */ | 327 | /* Disable HW arbitration for the given ring */ |
| 325 | accel_dev->hw_device->hw_arb_ring_disable(ring); | 328 | adf_update_ring_arb(ring); |
| 326 | adf_cleanup_ring(ring); | 329 | adf_cleanup_ring(ring); |
| 327 | } | 330 | } |
| 328 | 331 | ||
| @@ -463,7 +466,7 @@ err: | |||
| 463 | * acceleration device accel_dev. | 466 | * acceleration device accel_dev. |
| 464 | * To be used by QAT device specific drivers. | 467 | * To be used by QAT device specific drivers. |
| 465 | * | 468 | * |
| 466 | * Return: 0 on success, error code othewise. | 469 | * Return: 0 on success, error code otherwise. |
| 467 | */ | 470 | */ |
| 468 | int adf_init_etr_data(struct adf_accel_dev *accel_dev) | 471 | int adf_init_etr_data(struct adf_accel_dev *accel_dev) |
| 469 | { | 472 | { |
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h index 160c9a36c919..6ad7e4e1edca 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h | |||
| @@ -97,8 +97,9 @@ | |||
| 97 | #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) | 97 | #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) |
| 98 | 98 | ||
| 99 | /* Minimum ring bufer size for memory allocation */ | 99 | /* Minimum ring bufer size for memory allocation */ |
| 100 | #define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ | 100 | #define ADF_RING_SIZE_BYTES_MIN(SIZE) \ |
| 101 | ADF_RING_SIZE_4K : SIZE) | 101 | ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \ |
| 102 | ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE) | ||
| 102 | #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) | 103 | #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) |
| 103 | #define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \ | 104 | #define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \ |
| 104 | SIZE) & ~0x4) | 105 | SIZE) & ~0x4) |
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h index f1e30e24a419..46747f01b1d1 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h | |||
| @@ -249,6 +249,8 @@ struct icp_qat_fw_comn_resp { | |||
| 249 | 249 | ||
| 250 | #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 | 250 | #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 |
| 251 | #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 | 251 | #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 |
| 252 | #define QAT_COMN_RESP_PKE_STATUS_BITPOS 6 | ||
| 253 | #define QAT_COMN_RESP_PKE_STATUS_MASK 0x1 | ||
| 252 | #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 | 254 | #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 |
| 253 | #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 | 255 | #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 |
| 254 | #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 | 256 | #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 |
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h new file mode 100644 index 000000000000..0d7a9b51ce9f --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h | |||
| @@ -0,0 +1,112 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2014 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2014 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | #ifndef _ICP_QAT_FW_PKE_ | ||
| 48 | #define _ICP_QAT_FW_PKE_ | ||
| 49 | |||
| 50 | #include "icp_qat_fw.h" | ||
| 51 | |||
| 52 | struct icp_qat_fw_req_hdr_pke_cd_pars { | ||
| 53 | u64 content_desc_addr; | ||
| 54 | u32 content_desc_resrvd; | ||
| 55 | u32 func_id; | ||
| 56 | }; | ||
| 57 | |||
| 58 | struct icp_qat_fw_req_pke_mid { | ||
| 59 | u64 opaque; | ||
| 60 | u64 src_data_addr; | ||
| 61 | u64 dest_data_addr; | ||
| 62 | }; | ||
| 63 | |||
| 64 | struct icp_qat_fw_req_pke_hdr { | ||
| 65 | u8 resrvd1; | ||
| 66 | u8 resrvd2; | ||
| 67 | u8 service_type; | ||
| 68 | u8 hdr_flags; | ||
| 69 | u16 comn_req_flags; | ||
| 70 | u16 resrvd4; | ||
| 71 | struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars; | ||
| 72 | }; | ||
| 73 | |||
| 74 | struct icp_qat_fw_pke_request { | ||
| 75 | struct icp_qat_fw_req_pke_hdr pke_hdr; | ||
| 76 | struct icp_qat_fw_req_pke_mid pke_mid; | ||
| 77 | u8 output_param_count; | ||
| 78 | u8 input_param_count; | ||
| 79 | u16 resrvd1; | ||
| 80 | u32 resrvd2; | ||
| 81 | u64 next_req_adr; | ||
| 82 | }; | ||
| 83 | |||
| 84 | struct icp_qat_fw_resp_pke_hdr { | ||
| 85 | u8 resrvd1; | ||
| 86 | u8 resrvd2; | ||
| 87 | u8 response_type; | ||
| 88 | u8 hdr_flags; | ||
| 89 | u16 comn_resp_flags; | ||
| 90 | u16 resrvd4; | ||
| 91 | }; | ||
| 92 | |||
| 93 | struct icp_qat_fw_pke_resp { | ||
| 94 | struct icp_qat_fw_resp_pke_hdr pke_resp_hdr; | ||
| 95 | u64 opaque; | ||
| 96 | u64 src_data_addr; | ||
| 97 | u64 dest_data_addr; | ||
| 98 | }; | ||
| 99 | |||
| 100 | #define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7 | ||
| 101 | #define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK 0x1 | ||
| 102 | #define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \ | ||
| 103 | QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \ | ||
| 104 | ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \ | ||
| 105 | QAT_COMN_RESP_PKE_STATUS_BITPOS, \ | ||
| 106 | QAT_COMN_RESP_PKE_STATUS_MASK) | ||
| 107 | |||
| 108 | #define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \ | ||
| 109 | QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ | ||
| 110 | ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \ | ||
| 111 | ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK) | ||
| 112 | #endif | ||
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index df427c0e9e7b..2bd913aceaeb 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
| @@ -53,7 +53,6 @@ | |||
| 53 | #include <crypto/hash.h> | 53 | #include <crypto/hash.h> |
| 54 | #include <crypto/algapi.h> | 54 | #include <crypto/algapi.h> |
| 55 | #include <crypto/authenc.h> | 55 | #include <crypto/authenc.h> |
| 56 | #include <crypto/rng.h> | ||
| 57 | #include <linux/dma-mapping.h> | 56 | #include <linux/dma-mapping.h> |
| 58 | #include "adf_accel_devices.h" | 57 | #include "adf_accel_devices.h" |
| 59 | #include "adf_transport.h" | 58 | #include "adf_transport.h" |
| @@ -113,9 +112,6 @@ struct qat_alg_aead_ctx { | |||
| 113 | struct crypto_shash *hash_tfm; | 112 | struct crypto_shash *hash_tfm; |
| 114 | enum icp_qat_hw_auth_algo qat_hash_alg; | 113 | enum icp_qat_hw_auth_algo qat_hash_alg; |
| 115 | struct qat_crypto_instance *inst; | 114 | struct qat_crypto_instance *inst; |
| 116 | struct crypto_tfm *tfm; | ||
| 117 | uint8_t salt[AES_BLOCK_SIZE]; | ||
| 118 | spinlock_t lock; /* protects qat_alg_aead_ctx struct */ | ||
| 119 | }; | 115 | }; |
| 120 | 116 | ||
| 121 | struct qat_alg_ablkcipher_ctx { | 117 | struct qat_alg_ablkcipher_ctx { |
| @@ -130,11 +126,6 @@ struct qat_alg_ablkcipher_ctx { | |||
| 130 | spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ | 126 | spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ |
| 131 | }; | 127 | }; |
| 132 | 128 | ||
| 133 | static int get_current_node(void) | ||
| 134 | { | ||
| 135 | return cpu_data(current_thread_info()->cpu).phys_proc_id; | ||
| 136 | } | ||
| 137 | |||
| 138 | static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) | 129 | static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) |
| 139 | { | 130 | { |
| 140 | switch (qat_hash_alg) { | 131 | switch (qat_hash_alg) { |
| @@ -278,12 +269,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) | |||
| 278 | ICP_QAT_FW_LA_NO_UPDATE_STATE); | 269 | ICP_QAT_FW_LA_NO_UPDATE_STATE); |
| 279 | } | 270 | } |
| 280 | 271 | ||
| 281 | static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx, | 272 | static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, |
| 282 | int alg, | 273 | int alg, |
| 283 | struct crypto_authenc_keys *keys) | 274 | struct crypto_authenc_keys *keys) |
| 284 | { | 275 | { |
| 285 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | 276 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm); |
| 286 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | 277 | unsigned int digestsize = crypto_aead_authsize(aead_tfm); |
| 287 | struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd; | 278 | struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd; |
| 288 | struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher; | 279 | struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher; |
| 289 | struct icp_qat_hw_auth_algo_blk *hash = | 280 | struct icp_qat_hw_auth_algo_blk *hash = |
| @@ -358,12 +349,12 @@ static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx, | |||
| 358 | return 0; | 349 | return 0; |
| 359 | } | 350 | } |
| 360 | 351 | ||
| 361 | static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx, | 352 | static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, |
| 362 | int alg, | 353 | int alg, |
| 363 | struct crypto_authenc_keys *keys) | 354 | struct crypto_authenc_keys *keys) |
| 364 | { | 355 | { |
| 365 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | 356 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm); |
| 366 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | 357 | unsigned int digestsize = crypto_aead_authsize(aead_tfm); |
| 367 | struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; | 358 | struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; |
| 368 | struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash; | 359 | struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash; |
| 369 | struct icp_qat_hw_cipher_algo_blk *cipher = | 360 | struct icp_qat_hw_cipher_algo_blk *cipher = |
| @@ -515,30 +506,27 @@ static int qat_alg_validate_key(int key_len, int *alg) | |||
| 515 | return 0; | 506 | return 0; |
| 516 | } | 507 | } |
| 517 | 508 | ||
| 518 | static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx, | 509 | static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, |
| 519 | const uint8_t *key, unsigned int keylen) | 510 | const uint8_t *key, unsigned int keylen) |
| 520 | { | 511 | { |
| 521 | struct crypto_authenc_keys keys; | 512 | struct crypto_authenc_keys keys; |
| 522 | int alg; | 513 | int alg; |
| 523 | 514 | ||
| 524 | if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) | ||
| 525 | return -EFAULT; | ||
| 526 | |||
| 527 | if (crypto_authenc_extractkeys(&keys, key, keylen)) | 515 | if (crypto_authenc_extractkeys(&keys, key, keylen)) |
| 528 | goto bad_key; | 516 | goto bad_key; |
| 529 | 517 | ||
| 530 | if (qat_alg_validate_key(keys.enckeylen, &alg)) | 518 | if (qat_alg_validate_key(keys.enckeylen, &alg)) |
| 531 | goto bad_key; | 519 | goto bad_key; |
| 532 | 520 | ||
| 533 | if (qat_alg_aead_init_enc_session(ctx, alg, &keys)) | 521 | if (qat_alg_aead_init_enc_session(tfm, alg, &keys)) |
| 534 | goto error; | 522 | goto error; |
| 535 | 523 | ||
| 536 | if (qat_alg_aead_init_dec_session(ctx, alg, &keys)) | 524 | if (qat_alg_aead_init_dec_session(tfm, alg, &keys)) |
| 537 | goto error; | 525 | goto error; |
| 538 | 526 | ||
| 539 | return 0; | 527 | return 0; |
| 540 | bad_key: | 528 | bad_key: |
| 541 | crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 529 | crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 542 | return -EINVAL; | 530 | return -EINVAL; |
| 543 | error: | 531 | error: |
| 544 | return -EFAULT; | 532 | return -EFAULT; |
| @@ -567,7 +555,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, | |||
| 567 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); | 555 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 568 | struct device *dev; | 556 | struct device *dev; |
| 569 | 557 | ||
| 570 | spin_lock(&ctx->lock); | ||
| 571 | if (ctx->enc_cd) { | 558 | if (ctx->enc_cd) { |
| 572 | /* rekeying */ | 559 | /* rekeying */ |
| 573 | dev = &GET_DEV(ctx->inst->accel_dev); | 560 | dev = &GET_DEV(ctx->inst->accel_dev); |
| @@ -581,7 +568,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, | |||
| 581 | struct qat_crypto_instance *inst = | 568 | struct qat_crypto_instance *inst = |
| 582 | qat_crypto_get_instance_node(node); | 569 | qat_crypto_get_instance_node(node); |
| 583 | if (!inst) { | 570 | if (!inst) { |
| 584 | spin_unlock(&ctx->lock); | ||
| 585 | return -EINVAL; | 571 | return -EINVAL; |
| 586 | } | 572 | } |
| 587 | 573 | ||
| @@ -591,19 +577,16 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, | |||
| 591 | &ctx->enc_cd_paddr, | 577 | &ctx->enc_cd_paddr, |
| 592 | GFP_ATOMIC); | 578 | GFP_ATOMIC); |
| 593 | if (!ctx->enc_cd) { | 579 | if (!ctx->enc_cd) { |
| 594 | spin_unlock(&ctx->lock); | ||
| 595 | return -ENOMEM; | 580 | return -ENOMEM; |
| 596 | } | 581 | } |
| 597 | ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), | 582 | ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), |
| 598 | &ctx->dec_cd_paddr, | 583 | &ctx->dec_cd_paddr, |
| 599 | GFP_ATOMIC); | 584 | GFP_ATOMIC); |
| 600 | if (!ctx->dec_cd) { | 585 | if (!ctx->dec_cd) { |
| 601 | spin_unlock(&ctx->lock); | ||
| 602 | goto out_free_enc; | 586 | goto out_free_enc; |
| 603 | } | 587 | } |
| 604 | } | 588 | } |
| 605 | spin_unlock(&ctx->lock); | 589 | if (qat_alg_aead_init_sessions(tfm, key, keylen)) |
| 606 | if (qat_alg_aead_init_sessions(ctx, key, keylen)) | ||
| 607 | goto out_free_all; | 590 | goto out_free_all; |
| 608 | 591 | ||
| 609 | return 0; | 592 | return 0; |
| @@ -654,22 +637,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, | |||
| 654 | } | 637 | } |
| 655 | 638 | ||
| 656 | static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | 639 | static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, |
| 657 | struct scatterlist *assoc, int assoclen, | ||
| 658 | struct scatterlist *sgl, | 640 | struct scatterlist *sgl, |
| 659 | struct scatterlist *sglout, uint8_t *iv, | 641 | struct scatterlist *sglout, |
| 660 | uint8_t ivlen, | ||
| 661 | struct qat_crypto_request *qat_req) | 642 | struct qat_crypto_request *qat_req) |
| 662 | { | 643 | { |
| 663 | struct device *dev = &GET_DEV(inst->accel_dev); | 644 | struct device *dev = &GET_DEV(inst->accel_dev); |
| 664 | int i, bufs = 0, sg_nctr = 0; | 645 | int i, sg_nctr = 0; |
| 665 | int n = sg_nents(sgl), assoc_n = sg_nents(assoc); | 646 | int n = sg_nents(sgl); |
| 666 | struct qat_alg_buf_list *bufl; | 647 | struct qat_alg_buf_list *bufl; |
| 667 | struct qat_alg_buf_list *buflout = NULL; | 648 | struct qat_alg_buf_list *buflout = NULL; |
| 668 | dma_addr_t blp; | 649 | dma_addr_t blp; |
| 669 | dma_addr_t bloutp = 0; | 650 | dma_addr_t bloutp = 0; |
| 670 | struct scatterlist *sg; | 651 | struct scatterlist *sg; |
| 671 | size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + | 652 | size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + |
| 672 | ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); | 653 | ((1 + n) * sizeof(struct qat_alg_buf)); |
| 673 | 654 | ||
| 674 | if (unlikely(!n)) | 655 | if (unlikely(!n)) |
| 675 | return -EINVAL; | 656 | return -EINVAL; |
| @@ -683,35 +664,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 683 | if (unlikely(dma_mapping_error(dev, blp))) | 664 | if (unlikely(dma_mapping_error(dev, blp))) |
| 684 | goto err; | 665 | goto err; |
| 685 | 666 | ||
| 686 | for_each_sg(assoc, sg, assoc_n, i) { | ||
| 687 | if (!sg->length) | ||
| 688 | continue; | ||
| 689 | |||
| 690 | if (!(assoclen > 0)) | ||
| 691 | break; | ||
| 692 | |||
| 693 | bufl->bufers[bufs].addr = | ||
| 694 | dma_map_single(dev, sg_virt(sg), | ||
| 695 | min_t(int, assoclen, sg->length), | ||
| 696 | DMA_BIDIRECTIONAL); | ||
| 697 | bufl->bufers[bufs].len = min_t(int, assoclen, sg->length); | ||
| 698 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | ||
| 699 | goto err; | ||
| 700 | bufs++; | ||
| 701 | assoclen -= sg->length; | ||
| 702 | } | ||
| 703 | |||
| 704 | if (ivlen) { | ||
| 705 | bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, | ||
| 706 | DMA_BIDIRECTIONAL); | ||
| 707 | bufl->bufers[bufs].len = ivlen; | ||
| 708 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | ||
| 709 | goto err; | ||
| 710 | bufs++; | ||
| 711 | } | ||
| 712 | |||
| 713 | for_each_sg(sgl, sg, n, i) { | 667 | for_each_sg(sgl, sg, n, i) { |
| 714 | int y = sg_nctr + bufs; | 668 | int y = sg_nctr; |
| 715 | 669 | ||
| 716 | if (!sg->length) | 670 | if (!sg->length) |
| 717 | continue; | 671 | continue; |
| @@ -724,7 +678,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 724 | goto err; | 678 | goto err; |
| 725 | sg_nctr++; | 679 | sg_nctr++; |
| 726 | } | 680 | } |
| 727 | bufl->num_bufs = sg_nctr + bufs; | 681 | bufl->num_bufs = sg_nctr; |
| 728 | qat_req->buf.bl = bufl; | 682 | qat_req->buf.bl = bufl; |
| 729 | qat_req->buf.blp = blp; | 683 | qat_req->buf.blp = blp; |
| 730 | qat_req->buf.sz = sz; | 684 | qat_req->buf.sz = sz; |
| @@ -734,7 +688,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 734 | 688 | ||
| 735 | n = sg_nents(sglout); | 689 | n = sg_nents(sglout); |
| 736 | sz_out = sizeof(struct qat_alg_buf_list) + | 690 | sz_out = sizeof(struct qat_alg_buf_list) + |
| 737 | ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); | 691 | ((1 + n) * sizeof(struct qat_alg_buf)); |
| 738 | sg_nctr = 0; | 692 | sg_nctr = 0; |
| 739 | buflout = kzalloc_node(sz_out, GFP_ATOMIC, | 693 | buflout = kzalloc_node(sz_out, GFP_ATOMIC, |
| 740 | dev_to_node(&GET_DEV(inst->accel_dev))); | 694 | dev_to_node(&GET_DEV(inst->accel_dev))); |
| @@ -744,14 +698,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 744 | if (unlikely(dma_mapping_error(dev, bloutp))) | 698 | if (unlikely(dma_mapping_error(dev, bloutp))) |
| 745 | goto err; | 699 | goto err; |
| 746 | bufers = buflout->bufers; | 700 | bufers = buflout->bufers; |
| 747 | /* For out of place operation dma map only data and | ||
| 748 | * reuse assoc mapping and iv */ | ||
| 749 | for (i = 0; i < bufs; i++) { | ||
| 750 | bufers[i].len = bufl->bufers[i].len; | ||
| 751 | bufers[i].addr = bufl->bufers[i].addr; | ||
| 752 | } | ||
| 753 | for_each_sg(sglout, sg, n, i) { | 701 | for_each_sg(sglout, sg, n, i) { |
| 754 | int y = sg_nctr + bufs; | 702 | int y = sg_nctr; |
| 755 | 703 | ||
| 756 | if (!sg->length) | 704 | if (!sg->length) |
| 757 | continue; | 705 | continue; |
| @@ -764,7 +712,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 764 | bufers[y].len = sg->length; | 712 | bufers[y].len = sg->length; |
| 765 | sg_nctr++; | 713 | sg_nctr++; |
| 766 | } | 714 | } |
| 767 | buflout->num_bufs = sg_nctr + bufs; | 715 | buflout->num_bufs = sg_nctr; |
| 768 | buflout->num_mapped_bufs = sg_nctr; | 716 | buflout->num_mapped_bufs = sg_nctr; |
| 769 | qat_req->buf.blout = buflout; | 717 | qat_req->buf.blout = buflout; |
| 770 | qat_req->buf.bloutp = bloutp; | 718 | qat_req->buf.bloutp = bloutp; |
| @@ -778,7 +726,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
| 778 | err: | 726 | err: |
| 779 | dev_err(dev, "Failed to map buf for dma\n"); | 727 | dev_err(dev, "Failed to map buf for dma\n"); |
| 780 | sg_nctr = 0; | 728 | sg_nctr = 0; |
| 781 | for (i = 0; i < n + bufs; i++) | 729 | for (i = 0; i < n; i++) |
| 782 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) | 730 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) |
| 783 | dma_unmap_single(dev, bufl->bufers[i].addr, | 731 | dma_unmap_single(dev, bufl->bufers[i].addr, |
| 784 | bufl->bufers[i].len, | 732 | bufl->bufers[i].len, |
| @@ -789,7 +737,7 @@ err: | |||
| 789 | kfree(bufl); | 737 | kfree(bufl); |
| 790 | if (sgl != sglout && buflout) { | 738 | if (sgl != sglout && buflout) { |
| 791 | n = sg_nents(sglout); | 739 | n = sg_nents(sglout); |
| 792 | for (i = bufs; i < n + bufs; i++) | 740 | for (i = 0; i < n; i++) |
| 793 | if (!dma_mapping_error(dev, buflout->bufers[i].addr)) | 741 | if (!dma_mapping_error(dev, buflout->bufers[i].addr)) |
| 794 | dma_unmap_single(dev, buflout->bufers[i].addr, | 742 | dma_unmap_single(dev, buflout->bufers[i].addr, |
| 795 | buflout->bufers[i].len, | 743 | buflout->bufers[i].len, |
| @@ -849,12 +797,10 @@ static int qat_alg_aead_dec(struct aead_request *areq) | |||
| 849 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | 797 | struct icp_qat_fw_la_cipher_req_params *cipher_param; |
| 850 | struct icp_qat_fw_la_auth_req_params *auth_param; | 798 | struct icp_qat_fw_la_auth_req_params *auth_param; |
| 851 | struct icp_qat_fw_la_bulk_req *msg; | 799 | struct icp_qat_fw_la_bulk_req *msg; |
| 852 | int digst_size = crypto_aead_crt(aead_tfm)->authsize; | 800 | int digst_size = crypto_aead_authsize(aead_tfm); |
| 853 | int ret, ctr = 0; | 801 | int ret, ctr = 0; |
| 854 | 802 | ||
| 855 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen, | 803 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); |
| 856 | areq->src, areq->dst, areq->iv, | ||
| 857 | AES_BLOCK_SIZE, qat_req); | ||
| 858 | if (unlikely(ret)) | 804 | if (unlikely(ret)) |
| 859 | return ret; | 805 | return ret; |
| 860 | 806 | ||
| @@ -868,12 +814,11 @@ static int qat_alg_aead_dec(struct aead_request *areq) | |||
| 868 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | 814 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; |
| 869 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | 815 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; |
| 870 | cipher_param->cipher_length = areq->cryptlen - digst_size; | 816 | cipher_param->cipher_length = areq->cryptlen - digst_size; |
| 871 | cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; | 817 | cipher_param->cipher_offset = areq->assoclen; |
| 872 | memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); | 818 | memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); |
| 873 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); | 819 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); |
| 874 | auth_param->auth_off = 0; | 820 | auth_param->auth_off = 0; |
| 875 | auth_param->auth_len = areq->assoclen + | 821 | auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; |
| 876 | cipher_param->cipher_length + AES_BLOCK_SIZE; | ||
| 877 | do { | 822 | do { |
| 878 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | 823 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); |
| 879 | } while (ret == -EAGAIN && ctr++ < 10); | 824 | } while (ret == -EAGAIN && ctr++ < 10); |
| @@ -885,8 +830,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) | |||
| 885 | return -EINPROGRESS; | 830 | return -EINPROGRESS; |
| 886 | } | 831 | } |
| 887 | 832 | ||
| 888 | static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, | 833 | static int qat_alg_aead_enc(struct aead_request *areq) |
| 889 | int enc_iv) | ||
| 890 | { | 834 | { |
| 891 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | 835 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); |
| 892 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | 836 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); |
| @@ -895,11 +839,10 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, | |||
| 895 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | 839 | struct icp_qat_fw_la_cipher_req_params *cipher_param; |
| 896 | struct icp_qat_fw_la_auth_req_params *auth_param; | 840 | struct icp_qat_fw_la_auth_req_params *auth_param; |
| 897 | struct icp_qat_fw_la_bulk_req *msg; | 841 | struct icp_qat_fw_la_bulk_req *msg; |
| 842 | uint8_t *iv = areq->iv; | ||
| 898 | int ret, ctr = 0; | 843 | int ret, ctr = 0; |
| 899 | 844 | ||
| 900 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen, | 845 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); |
| 901 | areq->src, areq->dst, iv, AES_BLOCK_SIZE, | ||
| 902 | qat_req); | ||
| 903 | if (unlikely(ret)) | 846 | if (unlikely(ret)) |
| 904 | return ret; | 847 | return ret; |
| 905 | 848 | ||
| @@ -914,16 +857,12 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, | |||
| 914 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | 857 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; |
| 915 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); | 858 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); |
| 916 | 859 | ||
| 917 | if (enc_iv) { | 860 | memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); |
| 918 | cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE; | 861 | cipher_param->cipher_length = areq->cryptlen; |
| 919 | cipher_param->cipher_offset = areq->assoclen; | 862 | cipher_param->cipher_offset = areq->assoclen; |
| 920 | } else { | 863 | |
| 921 | memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); | ||
| 922 | cipher_param->cipher_length = areq->cryptlen; | ||
| 923 | cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; | ||
| 924 | } | ||
| 925 | auth_param->auth_off = 0; | 864 | auth_param->auth_off = 0; |
| 926 | auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE; | 865 | auth_param->auth_len = areq->assoclen + areq->cryptlen; |
| 927 | 866 | ||
| 928 | do { | 867 | do { |
| 929 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | 868 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); |
| @@ -936,25 +875,6 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, | |||
| 936 | return -EINPROGRESS; | 875 | return -EINPROGRESS; |
| 937 | } | 876 | } |
| 938 | 877 | ||
| 939 | static int qat_alg_aead_enc(struct aead_request *areq) | ||
| 940 | { | ||
| 941 | return qat_alg_aead_enc_internal(areq, areq->iv, 0); | ||
| 942 | } | ||
| 943 | |||
| 944 | static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req) | ||
| 945 | { | ||
| 946 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); | ||
| 947 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | ||
| 948 | struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
| 949 | __be64 seq; | ||
| 950 | |||
| 951 | memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); | ||
| 952 | seq = cpu_to_be64(req->seq); | ||
| 953 | memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), | ||
| 954 | &seq, sizeof(uint64_t)); | ||
| 955 | return qat_alg_aead_enc_internal(&req->areq, req->giv, 1); | ||
| 956 | } | ||
| 957 | |||
| 958 | static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | 878 | static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, |
| 959 | const uint8_t *key, | 879 | const uint8_t *key, |
| 960 | unsigned int keylen) | 880 | unsigned int keylen) |
| @@ -1026,8 +946,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) | |||
| 1026 | struct icp_qat_fw_la_bulk_req *msg; | 946 | struct icp_qat_fw_la_bulk_req *msg; |
| 1027 | int ret, ctr = 0; | 947 | int ret, ctr = 0; |
| 1028 | 948 | ||
| 1029 | ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst, | 949 | ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); |
| 1030 | NULL, 0, qat_req); | ||
| 1031 | if (unlikely(ret)) | 950 | if (unlikely(ret)) |
| 1032 | return ret; | 951 | return ret; |
| 1033 | 952 | ||
| @@ -1064,8 +983,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) | |||
| 1064 | struct icp_qat_fw_la_bulk_req *msg; | 983 | struct icp_qat_fw_la_bulk_req *msg; |
| 1065 | int ret, ctr = 0; | 984 | int ret, ctr = 0; |
| 1066 | 985 | ||
| 1067 | ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst, | 986 | ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); |
| 1068 | NULL, 0, qat_req); | ||
| 1069 | if (unlikely(ret)) | 987 | if (unlikely(ret)) |
| 1070 | return ret; | 988 | return ret; |
| 1071 | 989 | ||
| @@ -1092,47 +1010,43 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) | |||
| 1092 | return -EINPROGRESS; | 1010 | return -EINPROGRESS; |
| 1093 | } | 1011 | } |
| 1094 | 1012 | ||
| 1095 | static int qat_alg_aead_init(struct crypto_tfm *tfm, | 1013 | static int qat_alg_aead_init(struct crypto_aead *tfm, |
| 1096 | enum icp_qat_hw_auth_algo hash, | 1014 | enum icp_qat_hw_auth_algo hash, |
| 1097 | const char *hash_name) | 1015 | const char *hash_name) |
| 1098 | { | 1016 | { |
| 1099 | struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 1017 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 1100 | 1018 | ||
| 1101 | ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); | 1019 | ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); |
| 1102 | if (IS_ERR(ctx->hash_tfm)) | 1020 | if (IS_ERR(ctx->hash_tfm)) |
| 1103 | return -EFAULT; | 1021 | return PTR_ERR(ctx->hash_tfm); |
| 1104 | spin_lock_init(&ctx->lock); | ||
| 1105 | ctx->qat_hash_alg = hash; | 1022 | ctx->qat_hash_alg = hash; |
| 1106 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), | 1023 | crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) + |
| 1107 | sizeof(struct aead_request) + | 1024 | sizeof(struct qat_crypto_request)); |
| 1108 | sizeof(struct qat_crypto_request)); | ||
| 1109 | ctx->tfm = tfm; | ||
| 1110 | return 0; | 1025 | return 0; |
| 1111 | } | 1026 | } |
| 1112 | 1027 | ||
| 1113 | static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm) | 1028 | static int qat_alg_aead_sha1_init(struct crypto_aead *tfm) |
| 1114 | { | 1029 | { |
| 1115 | return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); | 1030 | return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); |
| 1116 | } | 1031 | } |
| 1117 | 1032 | ||
| 1118 | static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm) | 1033 | static int qat_alg_aead_sha256_init(struct crypto_aead *tfm) |
| 1119 | { | 1034 | { |
| 1120 | return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); | 1035 | return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); |
| 1121 | } | 1036 | } |
| 1122 | 1037 | ||
| 1123 | static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm) | 1038 | static int qat_alg_aead_sha512_init(struct crypto_aead *tfm) |
| 1124 | { | 1039 | { |
| 1125 | return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); | 1040 | return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); |
| 1126 | } | 1041 | } |
| 1127 | 1042 | ||
| 1128 | static void qat_alg_aead_exit(struct crypto_tfm *tfm) | 1043 | static void qat_alg_aead_exit(struct crypto_aead *tfm) |
| 1129 | { | 1044 | { |
| 1130 | struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 1045 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 1131 | struct qat_crypto_instance *inst = ctx->inst; | 1046 | struct qat_crypto_instance *inst = ctx->inst; |
| 1132 | struct device *dev; | 1047 | struct device *dev; |
| 1133 | 1048 | ||
| 1134 | if (!IS_ERR(ctx->hash_tfm)) | 1049 | crypto_free_shash(ctx->hash_tfm); |
| 1135 | crypto_free_shash(ctx->hash_tfm); | ||
| 1136 | 1050 | ||
| 1137 | if (!inst) | 1051 | if (!inst) |
| 1138 | return; | 1052 | return; |
| @@ -1189,73 +1103,61 @@ static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm) | |||
| 1189 | qat_crypto_put_instance(inst); | 1103 | qat_crypto_put_instance(inst); |
| 1190 | } | 1104 | } |
| 1191 | 1105 | ||
| 1192 | static struct crypto_alg qat_algs[] = { { | 1106 | |
| 1193 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 1107 | static struct aead_alg qat_aeads[] = { { |
| 1194 | .cra_driver_name = "qat_aes_cbc_hmac_sha1", | 1108 | .base = { |
| 1195 | .cra_priority = 4001, | 1109 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
| 1196 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 1110 | .cra_driver_name = "qat_aes_cbc_hmac_sha1", |
| 1197 | .cra_blocksize = AES_BLOCK_SIZE, | 1111 | .cra_priority = 4001, |
| 1198 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), | 1112 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 1199 | .cra_alignmask = 0, | 1113 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1200 | .cra_type = &crypto_aead_type, | 1114 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), |
| 1201 | .cra_module = THIS_MODULE, | 1115 | .cra_module = THIS_MODULE, |
| 1202 | .cra_init = qat_alg_aead_sha1_init, | ||
| 1203 | .cra_exit = qat_alg_aead_exit, | ||
| 1204 | .cra_u = { | ||
| 1205 | .aead = { | ||
| 1206 | .setkey = qat_alg_aead_setkey, | ||
| 1207 | .decrypt = qat_alg_aead_dec, | ||
| 1208 | .encrypt = qat_alg_aead_enc, | ||
| 1209 | .givencrypt = qat_alg_aead_genivenc, | ||
| 1210 | .ivsize = AES_BLOCK_SIZE, | ||
| 1211 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 1212 | }, | ||
| 1213 | }, | 1116 | }, |
| 1117 | .init = qat_alg_aead_sha1_init, | ||
| 1118 | .exit = qat_alg_aead_exit, | ||
| 1119 | .setkey = qat_alg_aead_setkey, | ||
| 1120 | .decrypt = qat_alg_aead_dec, | ||
| 1121 | .encrypt = qat_alg_aead_enc, | ||
| 1122 | .ivsize = AES_BLOCK_SIZE, | ||
| 1123 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 1214 | }, { | 1124 | }, { |
| 1215 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | 1125 | .base = { |
| 1216 | .cra_driver_name = "qat_aes_cbc_hmac_sha256", | 1126 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
| 1217 | .cra_priority = 4001, | 1127 | .cra_driver_name = "qat_aes_cbc_hmac_sha256", |
| 1218 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 1128 | .cra_priority = 4001, |
| 1219 | .cra_blocksize = AES_BLOCK_SIZE, | 1129 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 1220 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), | 1130 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1221 | .cra_alignmask = 0, | 1131 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), |
| 1222 | .cra_type = &crypto_aead_type, | 1132 | .cra_module = THIS_MODULE, |
| 1223 | .cra_module = THIS_MODULE, | ||
| 1224 | .cra_init = qat_alg_aead_sha256_init, | ||
| 1225 | .cra_exit = qat_alg_aead_exit, | ||
| 1226 | .cra_u = { | ||
| 1227 | .aead = { | ||
| 1228 | .setkey = qat_alg_aead_setkey, | ||
| 1229 | .decrypt = qat_alg_aead_dec, | ||
| 1230 | .encrypt = qat_alg_aead_enc, | ||
| 1231 | .givencrypt = qat_alg_aead_genivenc, | ||
| 1232 | .ivsize = AES_BLOCK_SIZE, | ||
| 1233 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 1234 | }, | ||
| 1235 | }, | 1133 | }, |
| 1134 | .init = qat_alg_aead_sha256_init, | ||
| 1135 | .exit = qat_alg_aead_exit, | ||
| 1136 | .setkey = qat_alg_aead_setkey, | ||
| 1137 | .decrypt = qat_alg_aead_dec, | ||
| 1138 | .encrypt = qat_alg_aead_enc, | ||
| 1139 | .ivsize = AES_BLOCK_SIZE, | ||
| 1140 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 1236 | }, { | 1141 | }, { |
| 1237 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | 1142 | .base = { |
| 1238 | .cra_driver_name = "qat_aes_cbc_hmac_sha512", | 1143 | .cra_name = "authenc(hmac(sha512),cbc(aes))", |
| 1239 | .cra_priority = 4001, | 1144 | .cra_driver_name = "qat_aes_cbc_hmac_sha512", |
| 1240 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 1145 | .cra_priority = 4001, |
| 1241 | .cra_blocksize = AES_BLOCK_SIZE, | 1146 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 1242 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), | 1147 | .cra_blocksize = AES_BLOCK_SIZE, |
| 1243 | .cra_alignmask = 0, | 1148 | .cra_ctxsize = sizeof(struct qat_alg_aead_ctx), |
| 1244 | .cra_type = &crypto_aead_type, | 1149 | .cra_module = THIS_MODULE, |
| 1245 | .cra_module = THIS_MODULE, | ||
| 1246 | .cra_init = qat_alg_aead_sha512_init, | ||
| 1247 | .cra_exit = qat_alg_aead_exit, | ||
| 1248 | .cra_u = { | ||
| 1249 | .aead = { | ||
| 1250 | .setkey = qat_alg_aead_setkey, | ||
| 1251 | .decrypt = qat_alg_aead_dec, | ||
| 1252 | .encrypt = qat_alg_aead_enc, | ||
| 1253 | .givencrypt = qat_alg_aead_genivenc, | ||
| 1254 | .ivsize = AES_BLOCK_SIZE, | ||
| 1255 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
| 1256 | }, | ||
| 1257 | }, | 1150 | }, |
| 1258 | }, { | 1151 | .init = qat_alg_aead_sha512_init, |
| 1152 | .exit = qat_alg_aead_exit, | ||
| 1153 | .setkey = qat_alg_aead_setkey, | ||
| 1154 | .decrypt = qat_alg_aead_dec, | ||
| 1155 | .encrypt = qat_alg_aead_enc, | ||
| 1156 | .ivsize = AES_BLOCK_SIZE, | ||
| 1157 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
| 1158 | } }; | ||
| 1159 | |||
| 1160 | static struct crypto_alg qat_algs[] = { { | ||
| 1259 | .cra_name = "cbc(aes)", | 1161 | .cra_name = "cbc(aes)", |
| 1260 | .cra_driver_name = "qat_aes_cbc", | 1162 | .cra_driver_name = "qat_aes_cbc", |
| 1261 | .cra_priority = 4001, | 1163 | .cra_priority = 4001, |
| @@ -1281,42 +1183,54 @@ static struct crypto_alg qat_algs[] = { { | |||
| 1281 | 1183 | ||
| 1282 | int qat_algs_register(void) | 1184 | int qat_algs_register(void) |
| 1283 | { | 1185 | { |
| 1284 | int ret = 0; | 1186 | int ret = 0, i; |
| 1285 | 1187 | ||
| 1286 | mutex_lock(&algs_lock); | 1188 | mutex_lock(&algs_lock); |
| 1287 | if (++active_devs == 1) { | 1189 | if (++active_devs != 1) |
| 1288 | int i; | 1190 | goto unlock; |
| 1289 | 1191 | ||
| 1290 | for (i = 0; i < ARRAY_SIZE(qat_algs); i++) | 1192 | for (i = 0; i < ARRAY_SIZE(qat_algs); i++) |
| 1291 | qat_algs[i].cra_flags = | 1193 | qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
| 1292 | (qat_algs[i].cra_type == &crypto_aead_type) ? | ||
| 1293 | CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : | ||
| 1294 | CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | ||
| 1295 | 1194 | ||
| 1296 | ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); | 1195 | ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); |
| 1297 | } | 1196 | if (ret) |
| 1197 | goto unlock; | ||
| 1198 | |||
| 1199 | for (i = 0; i < ARRAY_SIZE(qat_aeads); i++) | ||
| 1200 | qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC; | ||
| 1201 | |||
| 1202 | ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads)); | ||
| 1203 | if (ret) | ||
| 1204 | goto unreg_algs; | ||
| 1205 | |||
| 1206 | unlock: | ||
| 1298 | mutex_unlock(&algs_lock); | 1207 | mutex_unlock(&algs_lock); |
| 1299 | return ret; | 1208 | return ret; |
| 1209 | |||
| 1210 | unreg_algs: | ||
| 1211 | crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); | ||
| 1212 | goto unlock; | ||
| 1300 | } | 1213 | } |
| 1301 | 1214 | ||
| 1302 | int qat_algs_unregister(void) | 1215 | int qat_algs_unregister(void) |
| 1303 | { | 1216 | { |
| 1304 | int ret = 0; | ||
| 1305 | |||
| 1306 | mutex_lock(&algs_lock); | 1217 | mutex_lock(&algs_lock); |
| 1307 | if (--active_devs == 0) | 1218 | if (--active_devs != 0) |
| 1308 | ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); | 1219 | goto unlock; |
| 1220 | |||
| 1221 | crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads)); | ||
| 1222 | crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); | ||
| 1223 | |||
| 1224 | unlock: | ||
| 1309 | mutex_unlock(&algs_lock); | 1225 | mutex_unlock(&algs_lock); |
| 1310 | return ret; | 1226 | return 0; |
| 1311 | } | 1227 | } |
| 1312 | 1228 | ||
| 1313 | int qat_algs_init(void) | 1229 | int qat_algs_init(void) |
| 1314 | { | 1230 | { |
| 1315 | crypto_get_default_rng(); | ||
| 1316 | return 0; | 1231 | return 0; |
| 1317 | } | 1232 | } |
| 1318 | 1233 | ||
| 1319 | void qat_algs_exit(void) | 1234 | void qat_algs_exit(void) |
| 1320 | { | 1235 | { |
| 1321 | crypto_put_default_rng(); | ||
| 1322 | } | 1236 | } |
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c new file mode 100644 index 000000000000..e87f51023ba4 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c | |||
| @@ -0,0 +1,652 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2014 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2014 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | |||
| 48 | #include <linux/module.h> | ||
| 49 | #include <crypto/internal/rsa.h> | ||
| 50 | #include <crypto/internal/akcipher.h> | ||
| 51 | #include <crypto/akcipher.h> | ||
| 52 | #include <linux/dma-mapping.h> | ||
| 53 | #include <linux/fips.h> | ||
| 54 | #include "qat_rsakey-asn1.h" | ||
| 55 | #include "icp_qat_fw_pke.h" | ||
| 56 | #include "adf_accel_devices.h" | ||
| 57 | #include "adf_transport.h" | ||
| 58 | #include "adf_common_drv.h" | ||
| 59 | #include "qat_crypto.h" | ||
| 60 | |||
| 61 | static DEFINE_MUTEX(algs_lock); | ||
| 62 | static unsigned int active_devs; | ||
| 63 | |||
| 64 | struct qat_rsa_input_params { | ||
| 65 | union { | ||
| 66 | struct { | ||
| 67 | dma_addr_t m; | ||
| 68 | dma_addr_t e; | ||
| 69 | dma_addr_t n; | ||
| 70 | } enc; | ||
| 71 | struct { | ||
| 72 | dma_addr_t c; | ||
| 73 | dma_addr_t d; | ||
| 74 | dma_addr_t n; | ||
| 75 | } dec; | ||
| 76 | u64 in_tab[8]; | ||
| 77 | }; | ||
| 78 | } __packed __aligned(64); | ||
| 79 | |||
| 80 | struct qat_rsa_output_params { | ||
| 81 | union { | ||
| 82 | struct { | ||
| 83 | dma_addr_t c; | ||
| 84 | } enc; | ||
| 85 | struct { | ||
| 86 | dma_addr_t m; | ||
| 87 | } dec; | ||
| 88 | u64 out_tab[8]; | ||
| 89 | }; | ||
| 90 | } __packed __aligned(64); | ||
| 91 | |||
| 92 | struct qat_rsa_ctx { | ||
| 93 | char *n; | ||
| 94 | char *e; | ||
| 95 | char *d; | ||
| 96 | dma_addr_t dma_n; | ||
| 97 | dma_addr_t dma_e; | ||
| 98 | dma_addr_t dma_d; | ||
| 99 | unsigned int key_sz; | ||
| 100 | struct qat_crypto_instance *inst; | ||
| 101 | } __packed __aligned(64); | ||
| 102 | |||
| 103 | struct qat_rsa_request { | ||
| 104 | struct qat_rsa_input_params in; | ||
| 105 | struct qat_rsa_output_params out; | ||
| 106 | dma_addr_t phy_in; | ||
| 107 | dma_addr_t phy_out; | ||
| 108 | char *src_align; | ||
| 109 | struct icp_qat_fw_pke_request req; | ||
| 110 | struct qat_rsa_ctx *ctx; | ||
| 111 | int err; | ||
| 112 | } __aligned(64); | ||
| 113 | |||
| 114 | static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) | ||
| 115 | { | ||
| 116 | struct akcipher_request *areq = (void *)(__force long)resp->opaque; | ||
| 117 | struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64); | ||
| 118 | struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); | ||
| 119 | int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( | ||
| 120 | resp->pke_resp_hdr.comn_resp_flags); | ||
| 121 | char *ptr = areq->dst; | ||
| 122 | |||
| 123 | err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; | ||
| 124 | |||
| 125 | if (req->src_align) | ||
| 126 | dma_free_coherent(dev, req->ctx->key_sz, req->src_align, | ||
| 127 | req->in.enc.m); | ||
| 128 | else | ||
| 129 | dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, | ||
| 130 | DMA_TO_DEVICE); | ||
| 131 | |||
| 132 | dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, | ||
| 133 | DMA_FROM_DEVICE); | ||
| 134 | dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), | ||
| 135 | DMA_TO_DEVICE); | ||
| 136 | dma_unmap_single(dev, req->phy_out, | ||
| 137 | sizeof(struct qat_rsa_output_params), | ||
| 138 | DMA_TO_DEVICE); | ||
| 139 | |||
| 140 | areq->dst_len = req->ctx->key_sz; | ||
| 141 | /* Need to set the corect length of the output */ | ||
| 142 | while (!(*ptr) && areq->dst_len) { | ||
| 143 | areq->dst_len--; | ||
| 144 | ptr++; | ||
| 145 | } | ||
| 146 | |||
| 147 | if (areq->dst_len != req->ctx->key_sz) | ||
| 148 | memmove(areq->dst, ptr, areq->dst_len); | ||
| 149 | |||
| 150 | akcipher_request_complete(areq, err); | ||
| 151 | } | ||
| 152 | |||
| 153 | void qat_alg_asym_callback(void *_resp) | ||
| 154 | { | ||
| 155 | struct icp_qat_fw_pke_resp *resp = _resp; | ||
| 156 | |||
| 157 | qat_rsa_cb(resp); | ||
| 158 | } | ||
| 159 | |||
| 160 | #define PKE_RSA_EP_512 0x1c161b21 | ||
| 161 | #define PKE_RSA_EP_1024 0x35111bf7 | ||
| 162 | #define PKE_RSA_EP_1536 0x4d111cdc | ||
| 163 | #define PKE_RSA_EP_2048 0x6e111dba | ||
| 164 | #define PKE_RSA_EP_3072 0x7d111ea3 | ||
| 165 | #define PKE_RSA_EP_4096 0xa5101f7e | ||
| 166 | |||
| 167 | static unsigned long qat_rsa_enc_fn_id(unsigned int len) | ||
| 168 | { | ||
| 169 | unsigned int bitslen = len << 3; | ||
| 170 | |||
| 171 | switch (bitslen) { | ||
| 172 | case 512: | ||
| 173 | return PKE_RSA_EP_512; | ||
| 174 | case 1024: | ||
| 175 | return PKE_RSA_EP_1024; | ||
| 176 | case 1536: | ||
| 177 | return PKE_RSA_EP_1536; | ||
| 178 | case 2048: | ||
| 179 | return PKE_RSA_EP_2048; | ||
| 180 | case 3072: | ||
| 181 | return PKE_RSA_EP_3072; | ||
| 182 | case 4096: | ||
| 183 | return PKE_RSA_EP_4096; | ||
| 184 | default: | ||
| 185 | return 0; | ||
| 186 | }; | ||
| 187 | } | ||
| 188 | |||
| 189 | #define PKE_RSA_DP1_512 0x1c161b3c | ||
| 190 | #define PKE_RSA_DP1_1024 0x35111c12 | ||
| 191 | #define PKE_RSA_DP1_1536 0x4d111cf7 | ||
| 192 | #define PKE_RSA_DP1_2048 0x6e111dda | ||
| 193 | #define PKE_RSA_DP1_3072 0x7d111ebe | ||
| 194 | #define PKE_RSA_DP1_4096 0xa5101f98 | ||
| 195 | |||
| 196 | static unsigned long qat_rsa_dec_fn_id(unsigned int len) | ||
| 197 | { | ||
| 198 | unsigned int bitslen = len << 3; | ||
| 199 | |||
| 200 | switch (bitslen) { | ||
| 201 | case 512: | ||
| 202 | return PKE_RSA_DP1_512; | ||
| 203 | case 1024: | ||
| 204 | return PKE_RSA_DP1_1024; | ||
| 205 | case 1536: | ||
| 206 | return PKE_RSA_DP1_1536; | ||
| 207 | case 2048: | ||
| 208 | return PKE_RSA_DP1_2048; | ||
| 209 | case 3072: | ||
| 210 | return PKE_RSA_DP1_3072; | ||
| 211 | case 4096: | ||
| 212 | return PKE_RSA_DP1_4096; | ||
| 213 | default: | ||
| 214 | return 0; | ||
| 215 | }; | ||
| 216 | } | ||
| 217 | |||
| 218 | static int qat_rsa_enc(struct akcipher_request *req) | ||
| 219 | { | ||
| 220 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
| 221 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
| 222 | struct qat_crypto_instance *inst = ctx->inst; | ||
| 223 | struct device *dev = &GET_DEV(inst->accel_dev); | ||
| 224 | struct qat_rsa_request *qat_req = | ||
| 225 | PTR_ALIGN(akcipher_request_ctx(req), 64); | ||
| 226 | struct icp_qat_fw_pke_request *msg = &qat_req->req; | ||
| 227 | int ret, ctr = 0; | ||
| 228 | |||
| 229 | if (unlikely(!ctx->n || !ctx->e)) | ||
| 230 | return -EINVAL; | ||
| 231 | |||
| 232 | if (req->dst_len < ctx->key_sz) { | ||
| 233 | req->dst_len = ctx->key_sz; | ||
| 234 | return -EOVERFLOW; | ||
| 235 | } | ||
| 236 | memset(msg, '\0', sizeof(*msg)); | ||
| 237 | ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, | ||
| 238 | ICP_QAT_FW_COMN_REQ_FLAG_SET); | ||
| 239 | msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz); | ||
| 240 | if (unlikely(!msg->pke_hdr.cd_pars.func_id)) | ||
| 241 | return -EINVAL; | ||
| 242 | |||
| 243 | qat_req->ctx = ctx; | ||
| 244 | msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; | ||
| 245 | msg->pke_hdr.comn_req_flags = | ||
| 246 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, | ||
| 247 | QAT_COMN_CD_FLD_TYPE_64BIT_ADR); | ||
| 248 | |||
| 249 | qat_req->in.enc.e = ctx->dma_e; | ||
| 250 | qat_req->in.enc.n = ctx->dma_n; | ||
| 251 | ret = -ENOMEM; | ||
| 252 | |||
| 253 | /* | ||
| 254 | * src can be of any size in valid range, but HW expects it to be the | ||
| 255 | * same as modulo n so in case it is different we need to allocate a | ||
| 256 | * new buf and copy src data. | ||
| 257 | * In other case we just need to map the user provided buffer. | ||
| 258 | */ | ||
| 259 | if (req->src_len < ctx->key_sz) { | ||
| 260 | int shift = ctx->key_sz - req->src_len; | ||
| 261 | |||
| 262 | qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, | ||
| 263 | &qat_req->in.enc.m, | ||
| 264 | GFP_KERNEL); | ||
| 265 | if (unlikely(!qat_req->src_align)) | ||
| 266 | return ret; | ||
| 267 | |||
| 268 | memcpy(qat_req->src_align + shift, req->src, req->src_len); | ||
| 269 | } else { | ||
| 270 | qat_req->src_align = NULL; | ||
| 271 | qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len, | ||
| 272 | DMA_TO_DEVICE); | ||
| 273 | } | ||
| 274 | qat_req->in.in_tab[3] = 0; | ||
| 275 | qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len, | ||
| 276 | DMA_FROM_DEVICE); | ||
| 277 | qat_req->out.out_tab[1] = 0; | ||
| 278 | qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, | ||
| 279 | sizeof(struct qat_rsa_input_params), | ||
| 280 | DMA_TO_DEVICE); | ||
| 281 | qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, | ||
| 282 | sizeof(struct qat_rsa_output_params), | ||
| 283 | DMA_TO_DEVICE); | ||
| 284 | |||
| 285 | if (unlikely((!qat_req->src_align && | ||
| 286 | dma_mapping_error(dev, qat_req->in.enc.m)) || | ||
| 287 | dma_mapping_error(dev, qat_req->out.enc.c) || | ||
| 288 | dma_mapping_error(dev, qat_req->phy_in) || | ||
| 289 | dma_mapping_error(dev, qat_req->phy_out))) | ||
| 290 | goto unmap; | ||
| 291 | |||
| 292 | msg->pke_mid.src_data_addr = qat_req->phy_in; | ||
| 293 | msg->pke_mid.dest_data_addr = qat_req->phy_out; | ||
| 294 | msg->pke_mid.opaque = (uint64_t)(__force long)req; | ||
| 295 | msg->input_param_count = 3; | ||
| 296 | msg->output_param_count = 1; | ||
| 297 | do { | ||
| 298 | ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); | ||
| 299 | } while (ret == -EBUSY && ctr++ < 100); | ||
| 300 | |||
| 301 | if (!ret) | ||
| 302 | return -EINPROGRESS; | ||
| 303 | unmap: | ||
| 304 | if (qat_req->src_align) | ||
| 305 | dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, | ||
| 306 | qat_req->in.enc.m); | ||
| 307 | else | ||
| 308 | if (!dma_mapping_error(dev, qat_req->in.enc.m)) | ||
| 309 | dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, | ||
| 310 | DMA_TO_DEVICE); | ||
| 311 | if (!dma_mapping_error(dev, qat_req->out.enc.c)) | ||
| 312 | dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, | ||
| 313 | DMA_FROM_DEVICE); | ||
| 314 | if (!dma_mapping_error(dev, qat_req->phy_in)) | ||
| 315 | dma_unmap_single(dev, qat_req->phy_in, | ||
| 316 | sizeof(struct qat_rsa_input_params), | ||
| 317 | DMA_TO_DEVICE); | ||
| 318 | if (!dma_mapping_error(dev, qat_req->phy_out)) | ||
| 319 | dma_unmap_single(dev, qat_req->phy_out, | ||
| 320 | sizeof(struct qat_rsa_output_params), | ||
| 321 | DMA_TO_DEVICE); | ||
| 322 | return ret; | ||
| 323 | } | ||
| 324 | |||
| 325 | static int qat_rsa_dec(struct akcipher_request *req) | ||
| 326 | { | ||
| 327 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
| 328 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
| 329 | struct qat_crypto_instance *inst = ctx->inst; | ||
| 330 | struct device *dev = &GET_DEV(inst->accel_dev); | ||
| 331 | struct qat_rsa_request *qat_req = | ||
| 332 | PTR_ALIGN(akcipher_request_ctx(req), 64); | ||
| 333 | struct icp_qat_fw_pke_request *msg = &qat_req->req; | ||
| 334 | int ret, ctr = 0; | ||
| 335 | |||
| 336 | if (unlikely(!ctx->n || !ctx->d)) | ||
| 337 | return -EINVAL; | ||
| 338 | |||
| 339 | if (req->dst_len < ctx->key_sz) { | ||
| 340 | req->dst_len = ctx->key_sz; | ||
| 341 | return -EOVERFLOW; | ||
| 342 | } | ||
| 343 | memset(msg, '\0', sizeof(*msg)); | ||
| 344 | ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, | ||
| 345 | ICP_QAT_FW_COMN_REQ_FLAG_SET); | ||
| 346 | msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz); | ||
| 347 | if (unlikely(!msg->pke_hdr.cd_pars.func_id)) | ||
| 348 | return -EINVAL; | ||
| 349 | |||
| 350 | qat_req->ctx = ctx; | ||
| 351 | msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; | ||
| 352 | msg->pke_hdr.comn_req_flags = | ||
| 353 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, | ||
| 354 | QAT_COMN_CD_FLD_TYPE_64BIT_ADR); | ||
| 355 | |||
| 356 | qat_req->in.dec.d = ctx->dma_d; | ||
| 357 | qat_req->in.dec.n = ctx->dma_n; | ||
| 358 | ret = -ENOMEM; | ||
| 359 | |||
| 360 | /* | ||
| 361 | * src can be of any size in valid range, but HW expects it to be the | ||
| 362 | * same as modulo n so in case it is different we need to allocate a | ||
| 363 | * new buf and copy src data. | ||
| 364 | * In other case we just need to map the user provided buffer. | ||
| 365 | */ | ||
| 366 | if (req->src_len < ctx->key_sz) { | ||
| 367 | int shift = ctx->key_sz - req->src_len; | ||
| 368 | |||
| 369 | qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, | ||
| 370 | &qat_req->in.dec.c, | ||
| 371 | GFP_KERNEL); | ||
| 372 | if (unlikely(!qat_req->src_align)) | ||
| 373 | return ret; | ||
| 374 | |||
| 375 | memcpy(qat_req->src_align + shift, req->src, req->src_len); | ||
| 376 | } else { | ||
| 377 | qat_req->src_align = NULL; | ||
| 378 | qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len, | ||
| 379 | DMA_TO_DEVICE); | ||
| 380 | } | ||
| 381 | qat_req->in.in_tab[3] = 0; | ||
| 382 | qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len, | ||
| 383 | DMA_FROM_DEVICE); | ||
| 384 | qat_req->out.out_tab[1] = 0; | ||
| 385 | qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, | ||
| 386 | sizeof(struct qat_rsa_input_params), | ||
| 387 | DMA_TO_DEVICE); | ||
| 388 | qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, | ||
| 389 | sizeof(struct qat_rsa_output_params), | ||
| 390 | DMA_TO_DEVICE); | ||
| 391 | |||
| 392 | if (unlikely((!qat_req->src_align && | ||
| 393 | dma_mapping_error(dev, qat_req->in.dec.c)) || | ||
| 394 | dma_mapping_error(dev, qat_req->out.dec.m) || | ||
| 395 | dma_mapping_error(dev, qat_req->phy_in) || | ||
| 396 | dma_mapping_error(dev, qat_req->phy_out))) | ||
| 397 | goto unmap; | ||
| 398 | |||
| 399 | msg->pke_mid.src_data_addr = qat_req->phy_in; | ||
| 400 | msg->pke_mid.dest_data_addr = qat_req->phy_out; | ||
| 401 | msg->pke_mid.opaque = (uint64_t)(__force long)req; | ||
| 402 | msg->input_param_count = 3; | ||
| 403 | msg->output_param_count = 1; | ||
| 404 | do { | ||
| 405 | ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); | ||
| 406 | } while (ret == -EBUSY && ctr++ < 100); | ||
| 407 | |||
| 408 | if (!ret) | ||
| 409 | return -EINPROGRESS; | ||
| 410 | unmap: | ||
| 411 | if (qat_req->src_align) | ||
| 412 | dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, | ||
| 413 | qat_req->in.dec.c); | ||
| 414 | else | ||
| 415 | if (!dma_mapping_error(dev, qat_req->in.dec.c)) | ||
| 416 | dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, | ||
| 417 | DMA_TO_DEVICE); | ||
| 418 | if (!dma_mapping_error(dev, qat_req->out.dec.m)) | ||
| 419 | dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, | ||
| 420 | DMA_FROM_DEVICE); | ||
| 421 | if (!dma_mapping_error(dev, qat_req->phy_in)) | ||
| 422 | dma_unmap_single(dev, qat_req->phy_in, | ||
| 423 | sizeof(struct qat_rsa_input_params), | ||
| 424 | DMA_TO_DEVICE); | ||
| 425 | if (!dma_mapping_error(dev, qat_req->phy_out)) | ||
| 426 | dma_unmap_single(dev, qat_req->phy_out, | ||
| 427 | sizeof(struct qat_rsa_output_params), | ||
| 428 | DMA_TO_DEVICE); | ||
| 429 | return ret; | ||
| 430 | } | ||
| 431 | |||
| 432 | int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag, | ||
| 433 | const void *value, size_t vlen) | ||
| 434 | { | ||
| 435 | struct qat_rsa_ctx *ctx = context; | ||
| 436 | struct qat_crypto_instance *inst = ctx->inst; | ||
| 437 | struct device *dev = &GET_DEV(inst->accel_dev); | ||
| 438 | const char *ptr = value; | ||
| 439 | int ret; | ||
| 440 | |||
| 441 | while (!*ptr && vlen) { | ||
| 442 | ptr++; | ||
| 443 | vlen--; | ||
| 444 | } | ||
| 445 | |||
| 446 | ctx->key_sz = vlen; | ||
| 447 | ret = -EINVAL; | ||
| 448 | /* In FIPS mode only allow key size 2K & 3K */ | ||
| 449 | if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) { | ||
| 450 | pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); | ||
| 451 | goto err; | ||
| 452 | } | ||
| 453 | /* invalid key size provided */ | ||
| 454 | if (!qat_rsa_enc_fn_id(ctx->key_sz)) | ||
| 455 | goto err; | ||
| 456 | |||
| 457 | ret = -ENOMEM; | ||
| 458 | ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); | ||
| 459 | if (!ctx->n) | ||
| 460 | goto err; | ||
| 461 | |||
| 462 | memcpy(ctx->n, ptr, ctx->key_sz); | ||
| 463 | return 0; | ||
| 464 | err: | ||
| 465 | ctx->key_sz = 0; | ||
| 466 | ctx->n = NULL; | ||
| 467 | return ret; | ||
| 468 | } | ||
| 469 | |||
| 470 | int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, | ||
| 471 | const void *value, size_t vlen) | ||
| 472 | { | ||
| 473 | struct qat_rsa_ctx *ctx = context; | ||
| 474 | struct qat_crypto_instance *inst = ctx->inst; | ||
| 475 | struct device *dev = &GET_DEV(inst->accel_dev); | ||
| 476 | const char *ptr = value; | ||
| 477 | |||
| 478 | while (!*ptr && vlen) { | ||
| 479 | ptr++; | ||
| 480 | vlen--; | ||
| 481 | } | ||
| 482 | |||
| 483 | if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { | ||
| 484 | ctx->e = NULL; | ||
| 485 | return -EINVAL; | ||
| 486 | } | ||
| 487 | |||
| 488 | ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); | ||
| 489 | if (!ctx->e) { | ||
| 490 | ctx->e = NULL; | ||
| 491 | return -ENOMEM; | ||
| 492 | } | ||
| 493 | memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); | ||
| 494 | return 0; | ||
| 495 | } | ||
| 496 | |||
| 497 | int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, | ||
| 498 | const void *value, size_t vlen) | ||
| 499 | { | ||
| 500 | struct qat_rsa_ctx *ctx = context; | ||
| 501 | struct qat_crypto_instance *inst = ctx->inst; | ||
| 502 | struct device *dev = &GET_DEV(inst->accel_dev); | ||
| 503 | const char *ptr = value; | ||
| 504 | int ret; | ||
| 505 | |||
| 506 | while (!*ptr && vlen) { | ||
| 507 | ptr++; | ||
| 508 | vlen--; | ||
| 509 | } | ||
| 510 | |||
| 511 | ret = -EINVAL; | ||
| 512 | if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) | ||
| 513 | goto err; | ||
| 514 | |||
| 515 | /* In FIPS mode only allow key size 2K & 3K */ | ||
| 516 | if (fips_enabled && (vlen != 256 && vlen != 384)) { | ||
| 517 | pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); | ||
| 518 | goto err; | ||
| 519 | } | ||
| 520 | |||
| 521 | ret = -ENOMEM; | ||
| 522 | ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); | ||
| 523 | if (!ctx->n) | ||
| 524 | goto err; | ||
| 525 | |||
| 526 | memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); | ||
| 527 | return 0; | ||
| 528 | err: | ||
| 529 | ctx->d = NULL; | ||
| 530 | return ret; | ||
| 531 | } | ||
| 532 | |||
| 533 | static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, | ||
| 534 | unsigned int keylen) | ||
| 535 | { | ||
| 536 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
| 537 | struct device *dev = &GET_DEV(ctx->inst->accel_dev); | ||
| 538 | int ret; | ||
| 539 | |||
| 540 | /* Free the old key if any */ | ||
| 541 | if (ctx->n) | ||
| 542 | dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); | ||
| 543 | if (ctx->e) | ||
| 544 | dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); | ||
| 545 | if (ctx->d) { | ||
| 546 | memset(ctx->d, '\0', ctx->key_sz); | ||
| 547 | dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); | ||
| 548 | } | ||
| 549 | |||
| 550 | ctx->n = NULL; | ||
| 551 | ctx->e = NULL; | ||
| 552 | ctx->d = NULL; | ||
| 553 | ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen); | ||
| 554 | if (ret < 0) | ||
| 555 | goto free; | ||
| 556 | |||
| 557 | if (!ctx->n || !ctx->e) { | ||
| 558 | /* invalid key provided */ | ||
| 559 | ret = -EINVAL; | ||
| 560 | goto free; | ||
| 561 | } | ||
| 562 | |||
| 563 | return 0; | ||
| 564 | free: | ||
| 565 | if (ctx->d) { | ||
| 566 | memset(ctx->d, '\0', ctx->key_sz); | ||
| 567 | dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); | ||
| 568 | ctx->d = NULL; | ||
| 569 | } | ||
| 570 | if (ctx->e) { | ||
| 571 | dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); | ||
| 572 | ctx->e = NULL; | ||
| 573 | } | ||
| 574 | if (ctx->n) { | ||
| 575 | dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); | ||
| 576 | ctx->n = NULL; | ||
| 577 | ctx->key_sz = 0; | ||
| 578 | } | ||
| 579 | return ret; | ||
| 580 | } | ||
| 581 | |||
| 582 | static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) | ||
| 583 | { | ||
| 584 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
| 585 | struct qat_crypto_instance *inst = | ||
| 586 | qat_crypto_get_instance_node(get_current_node()); | ||
| 587 | |||
| 588 | if (!inst) | ||
| 589 | return -EINVAL; | ||
| 590 | |||
| 591 | ctx->key_sz = 0; | ||
| 592 | ctx->inst = inst; | ||
| 593 | return 0; | ||
| 594 | } | ||
| 595 | |||
| 596 | static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) | ||
| 597 | { | ||
| 598 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
| 599 | struct device *dev = &GET_DEV(ctx->inst->accel_dev); | ||
| 600 | |||
| 601 | if (ctx->n) | ||
| 602 | dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); | ||
| 603 | if (ctx->e) | ||
| 604 | dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); | ||
| 605 | if (ctx->d) { | ||
| 606 | memset(ctx->d, '\0', ctx->key_sz); | ||
| 607 | dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); | ||
| 608 | } | ||
| 609 | qat_crypto_put_instance(ctx->inst); | ||
| 610 | ctx->n = NULL; | ||
| 611 | ctx->d = NULL; | ||
| 612 | ctx->d = NULL; | ||
| 613 | } | ||
| 614 | |||
| 615 | static struct akcipher_alg rsa = { | ||
| 616 | .encrypt = qat_rsa_enc, | ||
| 617 | .decrypt = qat_rsa_dec, | ||
| 618 | .sign = qat_rsa_dec, | ||
| 619 | .verify = qat_rsa_enc, | ||
| 620 | .setkey = qat_rsa_setkey, | ||
| 621 | .init = qat_rsa_init_tfm, | ||
| 622 | .exit = qat_rsa_exit_tfm, | ||
| 623 | .reqsize = sizeof(struct qat_rsa_request) + 64, | ||
| 624 | .base = { | ||
| 625 | .cra_name = "rsa", | ||
| 626 | .cra_driver_name = "qat-rsa", | ||
| 627 | .cra_priority = 1000, | ||
| 628 | .cra_module = THIS_MODULE, | ||
| 629 | .cra_ctxsize = sizeof(struct qat_rsa_ctx), | ||
| 630 | }, | ||
| 631 | }; | ||
| 632 | |||
| 633 | int qat_asym_algs_register(void) | ||
| 634 | { | ||
| 635 | int ret = 0; | ||
| 636 | |||
| 637 | mutex_lock(&algs_lock); | ||
| 638 | if (++active_devs == 1) { | ||
| 639 | rsa.base.cra_flags = 0; | ||
| 640 | ret = crypto_register_akcipher(&rsa); | ||
| 641 | } | ||
| 642 | mutex_unlock(&algs_lock); | ||
| 643 | return ret; | ||
| 644 | } | ||
| 645 | |||
| 646 | void qat_asym_algs_unregister(void) | ||
| 647 | { | ||
| 648 | mutex_lock(&algs_lock); | ||
| 649 | if (--active_devs == 0) | ||
| 650 | crypto_unregister_akcipher(&rsa); | ||
| 651 | mutex_unlock(&algs_lock); | ||
| 652 | } | ||
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 3bd705ca5973..07c2f9f9d1fc 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c | |||
| @@ -88,12 +88,6 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) | |||
| 88 | if (inst->pke_rx) | 88 | if (inst->pke_rx) |
| 89 | adf_remove_ring(inst->pke_rx); | 89 | adf_remove_ring(inst->pke_rx); |
| 90 | 90 | ||
| 91 | if (inst->rnd_tx) | ||
| 92 | adf_remove_ring(inst->rnd_tx); | ||
| 93 | |||
| 94 | if (inst->rnd_rx) | ||
| 95 | adf_remove_ring(inst->rnd_rx); | ||
| 96 | |||
| 97 | list_del(list_ptr); | 91 | list_del(list_ptr); |
| 98 | kfree(inst); | 92 | kfree(inst); |
| 99 | } | 93 | } |
| @@ -109,9 +103,11 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) | |||
| 109 | 103 | ||
| 110 | list_for_each(itr, adf_devmgr_get_head()) { | 104 | list_for_each(itr, adf_devmgr_get_head()) { |
| 111 | accel_dev = list_entry(itr, struct adf_accel_dev, list); | 105 | accel_dev = list_entry(itr, struct adf_accel_dev, list); |
| 106 | |||
| 112 | if ((node == dev_to_node(&GET_DEV(accel_dev)) || | 107 | if ((node == dev_to_node(&GET_DEV(accel_dev)) || |
| 113 | dev_to_node(&GET_DEV(accel_dev)) < 0) && | 108 | dev_to_node(&GET_DEV(accel_dev)) < 0) && |
| 114 | adf_dev_started(accel_dev)) | 109 | adf_dev_started(accel_dev) && |
| 110 | !list_empty(&accel_dev->crypto_list)) | ||
| 115 | break; | 111 | break; |
| 116 | accel_dev = NULL; | 112 | accel_dev = NULL; |
| 117 | } | 113 | } |
| @@ -158,7 +154,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) | |||
| 158 | 154 | ||
| 159 | INIT_LIST_HEAD(&accel_dev->crypto_list); | 155 | INIT_LIST_HEAD(&accel_dev->crypto_list); |
| 160 | strlcpy(key, ADF_NUM_CY, sizeof(key)); | 156 | strlcpy(key, ADF_NUM_CY, sizeof(key)); |
| 161 | |||
| 162 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) | 157 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) |
| 163 | return -EFAULT; | 158 | return -EFAULT; |
| 164 | 159 | ||
| @@ -187,7 +182,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) | |||
| 187 | 182 | ||
| 188 | if (kstrtoul(val, 10, &num_msg_sym)) | 183 | if (kstrtoul(val, 10, &num_msg_sym)) |
| 189 | goto err; | 184 | goto err; |
| 185 | |||
| 190 | num_msg_sym = num_msg_sym >> 1; | 186 | num_msg_sym = num_msg_sym >> 1; |
| 187 | |||
| 191 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); | 188 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); |
| 192 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) | 189 | if (adf_cfg_get_param_value(accel_dev, SEC, key, val)) |
| 193 | goto err; | 190 | goto err; |
| @@ -202,11 +199,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) | |||
| 202 | msg_size, key, NULL, 0, &inst->sym_tx)) | 199 | msg_size, key, NULL, 0, &inst->sym_tx)) |
| 203 | goto err; | 200 | goto err; |
| 204 | 201 | ||
| 205 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); | ||
| 206 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | ||
| 207 | msg_size, key, NULL, 0, &inst->rnd_tx)) | ||
| 208 | goto err; | ||
| 209 | |||
| 210 | msg_size = msg_size >> 1; | 202 | msg_size = msg_size >> 1; |
| 211 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); | 203 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); |
| 212 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | 204 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, |
| @@ -220,15 +212,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) | |||
| 220 | &inst->sym_rx)) | 212 | &inst->sym_rx)) |
| 221 | goto err; | 213 | goto err; |
| 222 | 214 | ||
| 223 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); | ||
| 224 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | ||
| 225 | msg_size, key, qat_alg_callback, 0, | ||
| 226 | &inst->rnd_rx)) | ||
| 227 | goto err; | ||
| 228 | |||
| 229 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); | 215 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); |
| 230 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, | 216 | if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, |
| 231 | msg_size, key, qat_alg_callback, 0, | 217 | msg_size, key, qat_alg_asym_callback, 0, |
| 232 | &inst->pke_rx)) | 218 | &inst->pke_rx)) |
| 233 | goto err; | 219 | goto err; |
| 234 | } | 220 | } |
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index d503007b49e6..dc0273fe3620 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h | |||
| @@ -57,8 +57,6 @@ struct qat_crypto_instance { | |||
| 57 | struct adf_etr_ring_data *sym_rx; | 57 | struct adf_etr_ring_data *sym_rx; |
| 58 | struct adf_etr_ring_data *pke_tx; | 58 | struct adf_etr_ring_data *pke_tx; |
| 59 | struct adf_etr_ring_data *pke_rx; | 59 | struct adf_etr_ring_data *pke_rx; |
| 60 | struct adf_etr_ring_data *rnd_tx; | ||
| 61 | struct adf_etr_ring_data *rnd_rx; | ||
| 62 | struct adf_accel_dev *accel_dev; | 60 | struct adf_accel_dev *accel_dev; |
| 63 | struct list_head list; | 61 | struct list_head list; |
| 64 | unsigned long state; | 62 | unsigned long state; |
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 274ff7e9de6e..8e711d1c3084 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c | |||
| @@ -671,7 +671,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | |||
| 671 | #define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) | 671 | #define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) |
| 672 | #define LOCAL_TO_XFER_REG_OFFSET 0x800 | 672 | #define LOCAL_TO_XFER_REG_OFFSET 0x800 |
| 673 | #define ICP_DH895XCC_EP_OFFSET 0x3a000 | 673 | #define ICP_DH895XCC_EP_OFFSET 0x3a000 |
| 674 | #define ICP_DH895XCC_PMISC_BAR 1 | ||
| 675 | int qat_hal_init(struct adf_accel_dev *accel_dev) | 674 | int qat_hal_init(struct adf_accel_dev *accel_dev) |
| 676 | { | 675 | { |
| 677 | unsigned char ae; | 676 | unsigned char ae; |
| @@ -679,21 +678,24 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) | |||
| 679 | struct icp_qat_fw_loader_handle *handle; | 678 | struct icp_qat_fw_loader_handle *handle; |
| 680 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; | 679 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; |
| 681 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | 680 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
| 682 | struct adf_bar *bar = | 681 | struct adf_bar *misc_bar = |
| 683 | &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)]; | 682 | &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)]; |
| 683 | struct adf_bar *sram_bar = | ||
| 684 | &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; | ||
| 684 | 685 | ||
| 685 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); | 686 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); |
| 686 | if (!handle) | 687 | if (!handle) |
| 687 | return -ENOMEM; | 688 | return -ENOMEM; |
| 688 | 689 | ||
| 689 | handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr + | 690 | handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr + |
| 690 | ICP_DH895XCC_CAP_OFFSET; | 691 | ICP_DH895XCC_CAP_OFFSET; |
| 691 | handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr + | 692 | handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr + |
| 692 | ICP_DH895XCC_AE_OFFSET; | 693 | ICP_DH895XCC_AE_OFFSET; |
| 693 | handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET; | 694 | handle->hal_ep_csr_addr_v = misc_bar->virt_addr + |
| 695 | ICP_DH895XCC_EP_OFFSET; | ||
| 694 | handle->hal_cap_ae_local_csr_addr_v = | 696 | handle->hal_cap_ae_local_csr_addr_v = |
| 695 | handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; | 697 | handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; |
| 696 | 698 | handle->hal_sram_addr_v = sram_bar->virt_addr; | |
| 697 | handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); | 699 | handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); |
| 698 | if (!handle->hal_handle) | 700 | if (!handle->hal_handle) |
| 699 | goto out_hal_handle; | 701 | goto out_hal_handle; |
diff --git a/drivers/crypto/qat/qat_common/qat_rsakey.asn1 b/drivers/crypto/qat/qat_common/qat_rsakey.asn1 new file mode 100644 index 000000000000..97b0e02b600a --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_rsakey.asn1 | |||
| @@ -0,0 +1,5 @@ | |||
| 1 | RsaKey ::= SEQUENCE { | ||
| 2 | n INTEGER ({ qat_rsa_get_n }), | ||
| 3 | e INTEGER ({ qat_rsa_get_e }), | ||
| 4 | d INTEGER ({ qat_rsa_get_d }) | ||
| 5 | } | ||
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 1e27f9f7fddf..c48f181e8941 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c | |||
| @@ -359,28 +359,7 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, | |||
| 359 | static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, | 359 | static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, |
| 360 | struct icp_qat_uof_initmem *init_mem) | 360 | struct icp_qat_uof_initmem *init_mem) |
| 361 | { | 361 | { |
| 362 | unsigned int i; | ||
| 363 | struct icp_qat_uof_memvar_attr *mem_val_attr; | ||
| 364 | |||
| 365 | mem_val_attr = | ||
| 366 | (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + | ||
| 367 | sizeof(struct icp_qat_uof_initmem)); | ||
| 368 | |||
| 369 | switch (init_mem->region) { | 362 | switch (init_mem->region) { |
| 370 | case ICP_QAT_UOF_SRAM_REGION: | ||
| 371 | if ((init_mem->addr + init_mem->num_in_bytes) > | ||
| 372 | ICP_DH895XCC_PESRAM_BAR_SIZE) { | ||
| 373 | pr_err("QAT: initmem on SRAM is out of range"); | ||
| 374 | return -EINVAL; | ||
| 375 | } | ||
| 376 | for (i = 0; i < init_mem->val_attr_num; i++) { | ||
| 377 | qat_uclo_wr_sram_by_words(handle, | ||
| 378 | init_mem->addr + | ||
| 379 | mem_val_attr->offset_in_byte, | ||
| 380 | &mem_val_attr->value, 4); | ||
| 381 | mem_val_attr++; | ||
| 382 | } | ||
| 383 | break; | ||
| 384 | case ICP_QAT_UOF_LMEM_REGION: | 363 | case ICP_QAT_UOF_LMEM_REGION: |
| 385 | if (qat_uclo_init_lmem_seg(handle, init_mem)) | 364 | if (qat_uclo_init_lmem_seg(handle, init_mem)) |
| 386 | return -EINVAL; | 365 | return -EINVAL; |
| @@ -990,6 +969,12 @@ out_err: | |||
| 990 | return -EFAULT; | 969 | return -EFAULT; |
| 991 | } | 970 | } |
| 992 | 971 | ||
| 972 | void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, | ||
| 973 | void *addr_ptr, int mem_size) | ||
| 974 | { | ||
| 975 | qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4)); | ||
| 976 | } | ||
| 977 | |||
| 993 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | 978 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, |
| 994 | void *addr_ptr, int mem_size) | 979 | void *addr_ptr, int mem_size) |
| 995 | { | 980 | { |
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile index 25171c557043..8c79c543740f 100644 --- a/drivers/crypto/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile | |||
| @@ -2,7 +2,4 @@ ccflags-y := -I$(src)/../qat_common | |||
| 2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o | 2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o |
| 3 | qat_dh895xcc-objs := adf_drv.o \ | 3 | qat_dh895xcc-objs := adf_drv.o \ |
| 4 | adf_isr.o \ | 4 | adf_isr.o \ |
| 5 | adf_dh895xcc_hw_data.o \ | 5 | adf_dh895xcc_hw_data.o |
| 6 | adf_hw_arbiter.o \ | ||
| 7 | qat_admin.o \ | ||
| 8 | adf_admin.o | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c deleted file mode 100644 index e4666065c399..000000000000 --- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c +++ /dev/null | |||
| @@ -1,145 +0,0 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2014 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2014 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | #include <linux/types.h> | ||
| 48 | #include <linux/mutex.h> | ||
| 49 | #include <linux/slab.h> | ||
| 50 | #include <linux/delay.h> | ||
| 51 | #include <linux/pci.h> | ||
| 52 | #include <linux/dma-mapping.h> | ||
| 53 | #include <adf_accel_devices.h> | ||
| 54 | #include "adf_drv.h" | ||
| 55 | #include "adf_dh895xcc_hw_data.h" | ||
| 56 | |||
| 57 | #define ADF_ADMINMSG_LEN 32 | ||
| 58 | |||
| 59 | struct adf_admin_comms { | ||
| 60 | dma_addr_t phy_addr; | ||
| 61 | void *virt_addr; | ||
| 62 | void __iomem *mailbox_addr; | ||
| 63 | struct mutex lock; /* protects adf_admin_comms struct */ | ||
| 64 | }; | ||
| 65 | |||
| 66 | int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, | ||
| 67 | uint32_t ae, void *in, void *out) | ||
| 68 | { | ||
| 69 | struct adf_admin_comms *admin = accel_dev->admin; | ||
| 70 | int offset = ae * ADF_ADMINMSG_LEN * 2; | ||
| 71 | void __iomem *mailbox = admin->mailbox_addr; | ||
| 72 | int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; | ||
| 73 | int times, received; | ||
| 74 | |||
| 75 | mutex_lock(&admin->lock); | ||
| 76 | |||
| 77 | if (ADF_CSR_RD(mailbox, mb_offset) == 1) { | ||
| 78 | mutex_unlock(&admin->lock); | ||
| 79 | return -EAGAIN; | ||
| 80 | } | ||
| 81 | |||
| 82 | memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); | ||
| 83 | ADF_CSR_WR(mailbox, mb_offset, 1); | ||
| 84 | received = 0; | ||
| 85 | for (times = 0; times < 50; times++) { | ||
| 86 | msleep(20); | ||
| 87 | if (ADF_CSR_RD(mailbox, mb_offset) == 0) { | ||
| 88 | received = 1; | ||
| 89 | break; | ||
| 90 | } | ||
| 91 | } | ||
| 92 | if (received) | ||
| 93 | memcpy(out, admin->virt_addr + offset + | ||
| 94 | ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); | ||
| 95 | else | ||
| 96 | dev_err(&GET_DEV(accel_dev), | ||
| 97 | "Failed to send admin msg to accelerator\n"); | ||
| 98 | |||
| 99 | mutex_unlock(&admin->lock); | ||
| 100 | return received ? 0 : -EFAULT; | ||
| 101 | } | ||
| 102 | |||
| 103 | int adf_init_admin_comms(struct adf_accel_dev *accel_dev) | ||
| 104 | { | ||
| 105 | struct adf_admin_comms *admin; | ||
| 106 | struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; | ||
| 107 | void __iomem *csr = pmisc->virt_addr; | ||
| 108 | void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; | ||
| 109 | uint64_t reg_val; | ||
| 110 | |||
| 111 | admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, | ||
| 112 | dev_to_node(&GET_DEV(accel_dev))); | ||
| 113 | if (!admin) | ||
| 114 | return -ENOMEM; | ||
| 115 | admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||
| 116 | &admin->phy_addr, GFP_KERNEL); | ||
| 117 | if (!admin->virt_addr) { | ||
| 118 | dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); | ||
| 119 | kfree(admin); | ||
| 120 | return -ENOMEM; | ||
| 121 | } | ||
| 122 | reg_val = (uint64_t)admin->phy_addr; | ||
| 123 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); | ||
| 124 | ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); | ||
| 125 | mutex_init(&admin->lock); | ||
| 126 | admin->mailbox_addr = mailbox; | ||
| 127 | accel_dev->admin = admin; | ||
| 128 | return 0; | ||
| 129 | } | ||
| 130 | |||
| 131 | void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) | ||
| 132 | { | ||
| 133 | struct adf_admin_comms *admin = accel_dev->admin; | ||
| 134 | |||
| 135 | if (!admin) | ||
| 136 | return; | ||
| 137 | |||
| 138 | if (admin->virt_addr) | ||
| 139 | dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, | ||
| 140 | admin->virt_addr, admin->phy_addr); | ||
| 141 | |||
| 142 | mutex_destroy(&admin->lock); | ||
| 143 | kfree(admin); | ||
| 144 | accel_dev->admin = NULL; | ||
| 145 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index b1386922d7a2..ff54257eced4 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | |||
| @@ -45,8 +45,9 @@ | |||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 46 | */ | 46 | */ |
| 47 | #include <adf_accel_devices.h> | 47 | #include <adf_accel_devices.h> |
| 48 | #include <adf_pf2vf_msg.h> | ||
| 49 | #include <adf_common_drv.h> | ||
| 48 | #include "adf_dh895xcc_hw_data.h" | 50 | #include "adf_dh895xcc_hw_data.h" |
| 49 | #include "adf_common_drv.h" | ||
| 50 | #include "adf_drv.h" | 51 | #include "adf_drv.h" |
| 51 | 52 | ||
| 52 | /* Worker thread to service arbiter mappings based on dev SKUs */ | 53 | /* Worker thread to service arbiter mappings based on dev SKUs */ |
| @@ -117,6 +118,11 @@ static uint32_t get_etr_bar_id(struct adf_hw_device_data *self) | |||
| 117 | return ADF_DH895XCC_ETR_BAR; | 118 | return ADF_DH895XCC_ETR_BAR; |
| 118 | } | 119 | } |
| 119 | 120 | ||
| 121 | static uint32_t get_sram_bar_id(struct adf_hw_device_data *self) | ||
| 122 | { | ||
| 123 | return ADF_DH895XCC_SRAM_BAR; | ||
| 124 | } | ||
| 125 | |||
| 120 | static enum dev_sku_info get_sku(struct adf_hw_device_data *self) | 126 | static enum dev_sku_info get_sku(struct adf_hw_device_data *self) |
| 121 | { | 127 | { |
| 122 | int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) | 128 | int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) |
| @@ -156,6 +162,16 @@ void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, | |||
| 156 | } | 162 | } |
| 157 | } | 163 | } |
| 158 | 164 | ||
| 165 | static uint32_t get_pf2vf_offset(uint32_t i) | ||
| 166 | { | ||
| 167 | return ADF_DH895XCC_PF2VF_OFFSET(i); | ||
| 168 | } | ||
| 169 | |||
| 170 | static uint32_t get_vintmsk_offset(uint32_t i) | ||
| 171 | { | ||
| 172 | return ADF_DH895XCC_VINTMSK_OFFSET(i); | ||
| 173 | } | ||
| 174 | |||
| 159 | static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) | 175 | static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) |
| 160 | { | 176 | { |
| 161 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | 177 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; |
| @@ -192,18 +208,23 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev) | |||
| 192 | 208 | ||
| 193 | /* Enable bundle and misc interrupts */ | 209 | /* Enable bundle and misc interrupts */ |
| 194 | ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, | 210 | ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, |
| 195 | ADF_DH895XCC_SMIA0_MASK); | 211 | accel_dev->pf.vf_info ? 0 : |
| 212 | GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0)); | ||
| 196 | ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, | 213 | ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, |
| 197 | ADF_DH895XCC_SMIA1_MASK); | 214 | ADF_DH895XCC_SMIA1_MASK); |
| 198 | } | 215 | } |
| 199 | 216 | ||
| 217 | static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) | ||
| 218 | { | ||
| 219 | return 0; | ||
| 220 | } | ||
| 221 | |||
| 200 | void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) | 222 | void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) |
| 201 | { | 223 | { |
| 202 | hw_data->dev_class = &dh895xcc_class; | 224 | hw_data->dev_class = &dh895xcc_class; |
| 203 | hw_data->instance_id = dh895xcc_class.instances++; | 225 | hw_data->instance_id = dh895xcc_class.instances++; |
| 204 | hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS; | 226 | hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS; |
| 205 | hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS; | 227 | hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS; |
| 206 | hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID; | ||
| 207 | hw_data->num_logical_accel = 1; | 228 | hw_data->num_logical_accel = 1; |
| 208 | hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES; | 229 | hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES; |
| 209 | hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET; | 230 | hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET; |
| @@ -211,21 +232,28 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) | |||
| 211 | hw_data->alloc_irq = adf_isr_resource_alloc; | 232 | hw_data->alloc_irq = adf_isr_resource_alloc; |
| 212 | hw_data->free_irq = adf_isr_resource_free; | 233 | hw_data->free_irq = adf_isr_resource_free; |
| 213 | hw_data->enable_error_correction = adf_enable_error_correction; | 234 | hw_data->enable_error_correction = adf_enable_error_correction; |
| 214 | hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable; | ||
| 215 | hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable; | ||
| 216 | hw_data->get_accel_mask = get_accel_mask; | 235 | hw_data->get_accel_mask = get_accel_mask; |
| 217 | hw_data->get_ae_mask = get_ae_mask; | 236 | hw_data->get_ae_mask = get_ae_mask; |
| 218 | hw_data->get_num_accels = get_num_accels; | 237 | hw_data->get_num_accels = get_num_accels; |
| 219 | hw_data->get_num_aes = get_num_aes; | 238 | hw_data->get_num_aes = get_num_aes; |
| 220 | hw_data->get_etr_bar_id = get_etr_bar_id; | 239 | hw_data->get_etr_bar_id = get_etr_bar_id; |
| 221 | hw_data->get_misc_bar_id = get_misc_bar_id; | 240 | hw_data->get_misc_bar_id = get_misc_bar_id; |
| 241 | hw_data->get_pf2vf_offset = get_pf2vf_offset; | ||
| 242 | hw_data->get_vintmsk_offset = get_vintmsk_offset; | ||
| 243 | hw_data->get_sram_bar_id = get_sram_bar_id; | ||
| 222 | hw_data->get_sku = get_sku; | 244 | hw_data->get_sku = get_sku; |
| 223 | hw_data->fw_name = ADF_DH895XCC_FW; | 245 | hw_data->fw_name = ADF_DH895XCC_FW; |
| 246 | hw_data->fw_mmp_name = ADF_DH895XCC_MMP; | ||
| 224 | hw_data->init_admin_comms = adf_init_admin_comms; | 247 | hw_data->init_admin_comms = adf_init_admin_comms; |
| 225 | hw_data->exit_admin_comms = adf_exit_admin_comms; | 248 | hw_data->exit_admin_comms = adf_exit_admin_comms; |
| 249 | hw_data->disable_iov = adf_disable_sriov; | ||
| 250 | hw_data->send_admin_init = adf_send_admin_init; | ||
| 226 | hw_data->init_arb = adf_init_arb; | 251 | hw_data->init_arb = adf_init_arb; |
| 227 | hw_data->exit_arb = adf_exit_arb; | 252 | hw_data->exit_arb = adf_exit_arb; |
| 253 | hw_data->get_arb_mapping = adf_get_arbiter_mapping; | ||
| 228 | hw_data->enable_ints = adf_enable_ints; | 254 | hw_data->enable_ints = adf_enable_ints; |
| 255 | hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; | ||
| 256 | hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; | ||
| 229 | } | 257 | } |
| 230 | 258 | ||
| 231 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) | 259 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index 25269a9f24a2..88dffb297346 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | |||
| @@ -48,6 +48,7 @@ | |||
| 48 | #define ADF_DH895x_HW_DATA_H_ | 48 | #define ADF_DH895x_HW_DATA_H_ |
| 49 | 49 | ||
| 50 | /* PCIe configuration space */ | 50 | /* PCIe configuration space */ |
| 51 | #define ADF_DH895XCC_SRAM_BAR 0 | ||
| 51 | #define ADF_DH895XCC_PMISC_BAR 1 | 52 | #define ADF_DH895XCC_PMISC_BAR 1 |
| 52 | #define ADF_DH895XCC_ETR_BAR 2 | 53 | #define ADF_DH895XCC_ETR_BAR 2 |
| 53 | #define ADF_DH895XCC_RX_RINGS_OFFSET 8 | 54 | #define ADF_DH895XCC_RX_RINGS_OFFSET 8 |
| @@ -79,10 +80,11 @@ | |||
| 79 | #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) | 80 | #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) |
| 80 | #define ADF_DH895XCC_ERRSSMSH_EN BIT(3) | 81 | #define ADF_DH895XCC_ERRSSMSH_EN BIT(3) |
| 81 | 82 | ||
| 82 | /* Admin Messages Registers */ | 83 | #define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C) |
| 83 | #define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574) | 84 | #define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8) |
| 84 | #define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578) | 85 | #define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) |
| 85 | #define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 | 86 | #define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) |
| 86 | #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 | 87 | /* FW names */ |
| 87 | #define ADF_DH895XCC_FW "qat_895xcc.bin" | 88 | #define ADF_DH895XCC_FW "qat_895xcc.bin" |
| 89 | #define ADF_DH895XCC_MMP "qat_mmp.bin" | ||
| 88 | #endif | 90 | #endif |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 1bde45b7a3c5..f8dd14f232c8 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c | |||
| @@ -82,16 +82,21 @@ static struct pci_driver adf_driver = { | |||
| 82 | .id_table = adf_pci_tbl, | 82 | .id_table = adf_pci_tbl, |
| 83 | .name = adf_driver_name, | 83 | .name = adf_driver_name, |
| 84 | .probe = adf_probe, | 84 | .probe = adf_probe, |
| 85 | .remove = adf_remove | 85 | .remove = adf_remove, |
| 86 | .sriov_configure = adf_sriov_configure, | ||
| 86 | }; | 87 | }; |
| 87 | 88 | ||
| 89 | static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) | ||
| 90 | { | ||
| 91 | pci_release_regions(accel_dev->accel_pci_dev.pci_dev); | ||
| 92 | pci_disable_device(accel_dev->accel_pci_dev.pci_dev); | ||
| 93 | } | ||
| 94 | |||
| 88 | static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | 95 | static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) |
| 89 | { | 96 | { |
| 90 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; | 97 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; |
| 91 | int i; | 98 | int i; |
| 92 | 99 | ||
| 93 | adf_dev_shutdown(accel_dev); | ||
| 94 | |||
| 95 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | 100 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { |
| 96 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | 101 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; |
| 97 | 102 | ||
| @@ -100,7 +105,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | |||
| 100 | } | 105 | } |
| 101 | 106 | ||
| 102 | if (accel_dev->hw_device) { | 107 | if (accel_dev->hw_device) { |
| 103 | switch (accel_dev->hw_device->pci_dev_id) { | 108 | switch (accel_pci_dev->pci_dev->device) { |
| 104 | case ADF_DH895XCC_PCI_DEVICE_ID: | 109 | case ADF_DH895XCC_PCI_DEVICE_ID: |
| 105 | adf_clean_hw_data_dh895xcc(accel_dev->hw_device); | 110 | adf_clean_hw_data_dh895xcc(accel_dev->hw_device); |
| 106 | break; | 111 | break; |
| @@ -108,13 +113,11 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | |||
| 108 | break; | 113 | break; |
| 109 | } | 114 | } |
| 110 | kfree(accel_dev->hw_device); | 115 | kfree(accel_dev->hw_device); |
| 116 | accel_dev->hw_device = NULL; | ||
| 111 | } | 117 | } |
| 112 | adf_cfg_dev_remove(accel_dev); | 118 | adf_cfg_dev_remove(accel_dev); |
| 113 | debugfs_remove(accel_dev->debugfs_dir); | 119 | debugfs_remove(accel_dev->debugfs_dir); |
| 114 | adf_devmgr_rm_dev(accel_dev); | 120 | adf_devmgr_rm_dev(accel_dev, NULL); |
| 115 | pci_release_regions(accel_pci_dev->pci_dev); | ||
| 116 | pci_disable_device(accel_pci_dev->pci_dev); | ||
| 117 | kfree(accel_dev); | ||
| 118 | } | 121 | } |
| 119 | 122 | ||
| 120 | static int adf_dev_configure(struct adf_accel_dev *accel_dev) | 123 | static int adf_dev_configure(struct adf_accel_dev *accel_dev) |
| @@ -167,12 +170,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev) | |||
| 167 | key, (void *)&val, ADF_DEC)) | 170 | key, (void *)&val, ADF_DEC)) |
| 168 | goto err; | 171 | goto err; |
| 169 | 172 | ||
| 170 | val = 4; | ||
| 171 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); | ||
| 172 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
| 173 | key, (void *)&val, ADF_DEC)) | ||
| 174 | goto err; | ||
| 175 | |||
| 176 | val = 8; | 173 | val = 8; |
| 177 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); | 174 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); |
| 178 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | 175 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, |
| @@ -185,12 +182,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev) | |||
| 185 | key, (void *)&val, ADF_DEC)) | 182 | key, (void *)&val, ADF_DEC)) |
| 186 | goto err; | 183 | goto err; |
| 187 | 184 | ||
| 188 | val = 12; | ||
| 189 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); | ||
| 190 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
| 191 | key, (void *)&val, ADF_DEC)) | ||
| 192 | goto err; | ||
| 193 | |||
| 194 | val = ADF_COALESCING_DEF_TIME; | 185 | val = ADF_COALESCING_DEF_TIME; |
| 195 | snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); | 186 | snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); |
| 196 | if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", | 187 | if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", |
| @@ -217,7 +208,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 217 | struct adf_hw_device_data *hw_data; | 208 | struct adf_hw_device_data *hw_data; |
| 218 | char name[ADF_DEVICE_NAME_LENGTH]; | 209 | char name[ADF_DEVICE_NAME_LENGTH]; |
| 219 | unsigned int i, bar_nr; | 210 | unsigned int i, bar_nr; |
| 220 | int ret; | 211 | int ret, bar_mask; |
| 221 | 212 | ||
| 222 | switch (ent->device) { | 213 | switch (ent->device) { |
| 223 | case ADF_DH895XCC_PCI_DEVICE_ID: | 214 | case ADF_DH895XCC_PCI_DEVICE_ID: |
| @@ -241,10 +232,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 241 | return -ENOMEM; | 232 | return -ENOMEM; |
| 242 | 233 | ||
| 243 | INIT_LIST_HEAD(&accel_dev->crypto_list); | 234 | INIT_LIST_HEAD(&accel_dev->crypto_list); |
| 235 | accel_pci_dev = &accel_dev->accel_pci_dev; | ||
| 236 | accel_pci_dev->pci_dev = pdev; | ||
| 244 | 237 | ||
| 245 | /* Add accel device to accel table. | 238 | /* Add accel device to accel table. |
| 246 | * This should be called before adf_cleanup_accel is called */ | 239 | * This should be called before adf_cleanup_accel is called */ |
| 247 | if (adf_devmgr_add_dev(accel_dev)) { | 240 | if (adf_devmgr_add_dev(accel_dev, NULL)) { |
| 248 | dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); | 241 | dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); |
| 249 | kfree(accel_dev); | 242 | kfree(accel_dev); |
| 250 | return -EFAULT; | 243 | return -EFAULT; |
| @@ -267,7 +260,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 267 | default: | 260 | default: |
| 268 | return -ENODEV; | 261 | return -ENODEV; |
| 269 | } | 262 | } |
| 270 | accel_pci_dev = &accel_dev->accel_pci_dev; | ||
| 271 | pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); | 263 | pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); |
| 272 | pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, | 264 | pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, |
| 273 | &hw_data->fuses); | 265 | &hw_data->fuses); |
| @@ -276,7 +268,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 276 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); | 268 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); |
| 277 | hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); | 269 | hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); |
| 278 | accel_pci_dev->sku = hw_data->get_sku(hw_data); | 270 | accel_pci_dev->sku = hw_data->get_sku(hw_data); |
| 279 | accel_pci_dev->pci_dev = pdev; | ||
| 280 | /* If the device has no acceleration engines then ignore it. */ | 271 | /* If the device has no acceleration engines then ignore it. */ |
| 281 | if (!hw_data->accel_mask || !hw_data->ae_mask || | 272 | if (!hw_data->accel_mask || !hw_data->ae_mask || |
| 282 | ((~hw_data->ae_mask) & 0x01)) { | 273 | ((~hw_data->ae_mask) & 0x01)) { |
| @@ -286,11 +277,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 286 | } | 277 | } |
| 287 | 278 | ||
| 288 | /* Create dev top level debugfs entry */ | 279 | /* Create dev top level debugfs entry */ |
| 289 | snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX, | 280 | snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", |
| 290 | hw_data->dev_class->name, hw_data->instance_id); | 281 | ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, |
| 282 | pdev->bus->number, PCI_SLOT(pdev->devfn), | ||
| 283 | PCI_FUNC(pdev->devfn)); | ||
| 284 | |||
| 291 | accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); | 285 | accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); |
| 292 | if (!accel_dev->debugfs_dir) { | 286 | if (!accel_dev->debugfs_dir) { |
| 293 | dev_err(&pdev->dev, "Could not create debugfs dir\n"); | 287 | dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); |
| 294 | ret = -EINVAL; | 288 | ret = -EINVAL; |
| 295 | goto out_err; | 289 | goto out_err; |
| 296 | } | 290 | } |
| @@ -313,7 +307,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 313 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | 307 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { |
| 314 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | 308 | dev_err(&pdev->dev, "No usable DMA configuration\n"); |
| 315 | ret = -EFAULT; | 309 | ret = -EFAULT; |
| 316 | goto out_err; | 310 | goto out_err_disable; |
| 317 | } else { | 311 | } else { |
| 318 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 312 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
| 319 | } | 313 | } |
| @@ -324,7 +318,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 324 | 318 | ||
| 325 | if (pci_request_regions(pdev, adf_driver_name)) { | 319 | if (pci_request_regions(pdev, adf_driver_name)) { |
| 326 | ret = -EFAULT; | 320 | ret = -EFAULT; |
| 327 | goto out_err; | 321 | goto out_err_disable; |
| 328 | } | 322 | } |
| 329 | 323 | ||
| 330 | /* Read accelerator capabilities mask */ | 324 | /* Read accelerator capabilities mask */ |
| @@ -332,19 +326,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 332 | &hw_data->accel_capabilities_mask); | 326 | &hw_data->accel_capabilities_mask); |
| 333 | 327 | ||
| 334 | /* Find and map all the device's BARS */ | 328 | /* Find and map all the device's BARS */ |
| 335 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | 329 | i = 0; |
| 336 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | 330 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); |
| 331 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | ||
| 332 | ADF_PCI_MAX_BARS * 2) { | ||
| 333 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | ||
| 337 | 334 | ||
| 338 | bar_nr = i * 2; | ||
| 339 | bar->base_addr = pci_resource_start(pdev, bar_nr); | 335 | bar->base_addr = pci_resource_start(pdev, bar_nr); |
| 340 | if (!bar->base_addr) | 336 | if (!bar->base_addr) |
| 341 | break; | 337 | break; |
| 342 | bar->size = pci_resource_len(pdev, bar_nr); | 338 | bar->size = pci_resource_len(pdev, bar_nr); |
| 343 | bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); | 339 | bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); |
| 344 | if (!bar->virt_addr) { | 340 | if (!bar->virt_addr) { |
| 345 | dev_err(&pdev->dev, "Failed to map BAR %d\n", i); | 341 | dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); |
| 346 | ret = -EFAULT; | 342 | ret = -EFAULT; |
| 347 | goto out_err; | 343 | goto out_err_free_reg; |
| 348 | } | 344 | } |
| 349 | } | 345 | } |
| 350 | pci_set_master(pdev); | 346 | pci_set_master(pdev); |
| @@ -352,32 +348,40 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 352 | if (adf_enable_aer(accel_dev, &adf_driver)) { | 348 | if (adf_enable_aer(accel_dev, &adf_driver)) { |
| 353 | dev_err(&pdev->dev, "Failed to enable aer\n"); | 349 | dev_err(&pdev->dev, "Failed to enable aer\n"); |
| 354 | ret = -EFAULT; | 350 | ret = -EFAULT; |
| 355 | goto out_err; | 351 | goto out_err_free_reg; |
| 356 | } | 352 | } |
| 357 | 353 | ||
| 358 | if (pci_save_state(pdev)) { | 354 | if (pci_save_state(pdev)) { |
| 359 | dev_err(&pdev->dev, "Failed to save pci state\n"); | 355 | dev_err(&pdev->dev, "Failed to save pci state\n"); |
| 360 | ret = -ENOMEM; | 356 | ret = -ENOMEM; |
| 361 | goto out_err; | 357 | goto out_err_free_reg; |
| 362 | } | 358 | } |
| 363 | 359 | ||
| 364 | ret = adf_dev_configure(accel_dev); | 360 | ret = adf_dev_configure(accel_dev); |
| 365 | if (ret) | 361 | if (ret) |
| 366 | goto out_err; | 362 | goto out_err_free_reg; |
| 367 | 363 | ||
| 368 | ret = adf_dev_init(accel_dev); | 364 | ret = adf_dev_init(accel_dev); |
| 369 | if (ret) | 365 | if (ret) |
| 370 | goto out_err; | 366 | goto out_err_dev_shutdown; |
| 371 | 367 | ||
| 372 | ret = adf_dev_start(accel_dev); | 368 | ret = adf_dev_start(accel_dev); |
| 373 | if (ret) { | 369 | if (ret) |
| 374 | adf_dev_stop(accel_dev); | 370 | goto out_err_dev_stop; |
| 375 | goto out_err; | ||
| 376 | } | ||
| 377 | 371 | ||
| 378 | return 0; | 372 | return ret; |
| 373 | |||
| 374 | out_err_dev_stop: | ||
| 375 | adf_dev_stop(accel_dev); | ||
| 376 | out_err_dev_shutdown: | ||
| 377 | adf_dev_shutdown(accel_dev); | ||
| 378 | out_err_free_reg: | ||
| 379 | pci_release_regions(accel_pci_dev->pci_dev); | ||
| 380 | out_err_disable: | ||
| 381 | pci_disable_device(accel_pci_dev->pci_dev); | ||
| 379 | out_err: | 382 | out_err: |
| 380 | adf_cleanup_accel(accel_dev); | 383 | adf_cleanup_accel(accel_dev); |
| 384 | kfree(accel_dev); | ||
| 381 | return ret; | 385 | return ret; |
| 382 | } | 386 | } |
| 383 | 387 | ||
| @@ -391,15 +395,17 @@ static void adf_remove(struct pci_dev *pdev) | |||
| 391 | } | 395 | } |
| 392 | if (adf_dev_stop(accel_dev)) | 396 | if (adf_dev_stop(accel_dev)) |
| 393 | dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); | 397 | dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); |
| 398 | |||
| 399 | adf_dev_shutdown(accel_dev); | ||
| 394 | adf_disable_aer(accel_dev); | 400 | adf_disable_aer(accel_dev); |
| 395 | adf_cleanup_accel(accel_dev); | 401 | adf_cleanup_accel(accel_dev); |
| 402 | adf_cleanup_pci_dev(accel_dev); | ||
| 403 | kfree(accel_dev); | ||
| 396 | } | 404 | } |
| 397 | 405 | ||
| 398 | static int __init adfdrv_init(void) | 406 | static int __init adfdrv_init(void) |
| 399 | { | 407 | { |
| 400 | request_module("intel_qat"); | 408 | request_module("intel_qat"); |
| 401 | if (qat_admin_register()) | ||
| 402 | return -EFAULT; | ||
| 403 | 409 | ||
| 404 | if (pci_register_driver(&adf_driver)) { | 410 | if (pci_register_driver(&adf_driver)) { |
| 405 | pr_err("QAT: Driver initialization failed\n"); | 411 | pr_err("QAT: Driver initialization failed\n"); |
| @@ -411,7 +417,6 @@ static int __init adfdrv_init(void) | |||
| 411 | static void __exit adfdrv_release(void) | 417 | static void __exit adfdrv_release(void) |
| 412 | { | 418 | { |
| 413 | pci_unregister_driver(&adf_driver); | 419 | pci_unregister_driver(&adf_driver); |
| 414 | qat_admin_unregister(); | ||
| 415 | } | 420 | } |
| 416 | 421 | ||
| 417 | module_init(adfdrv_init); | 422 | module_init(adfdrv_init); |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h index a2fbb6ce75cd..85ff245bd1d8 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h | |||
| @@ -53,15 +53,6 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); | |||
| 53 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); | 53 | void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); |
| 54 | int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); | 54 | int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); |
| 55 | void adf_isr_resource_free(struct adf_accel_dev *accel_dev); | 55 | void adf_isr_resource_free(struct adf_accel_dev *accel_dev); |
| 56 | void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); | ||
| 57 | void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, | 56 | void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, |
| 58 | uint32_t const **arb_map_config); | 57 | uint32_t const **arb_map_config); |
| 59 | int adf_init_admin_comms(struct adf_accel_dev *accel_dev); | ||
| 60 | void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); | ||
| 61 | int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, | ||
| 62 | uint32_t ae, void *in, void *out); | ||
| 63 | int qat_admin_register(void); | ||
| 64 | int qat_admin_unregister(void); | ||
| 65 | int adf_init_arb(struct adf_accel_dev *accel_dev); | ||
| 66 | void adf_exit_arb(struct adf_accel_dev *accel_dev); | ||
| 67 | #endif | 58 | #endif |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c index 0d03c109c2d3..5570f78795c1 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c | |||
| @@ -59,21 +59,30 @@ | |||
| 59 | #include <adf_transport_access_macros.h> | 59 | #include <adf_transport_access_macros.h> |
| 60 | #include <adf_transport_internal.h> | 60 | #include <adf_transport_internal.h> |
| 61 | #include "adf_drv.h" | 61 | #include "adf_drv.h" |
| 62 | #include "adf_dh895xcc_hw_data.h" | ||
| 62 | 63 | ||
| 63 | static int adf_enable_msix(struct adf_accel_dev *accel_dev) | 64 | static int adf_enable_msix(struct adf_accel_dev *accel_dev) |
| 64 | { | 65 | { |
| 65 | struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; | 66 | struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; |
| 66 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | 67 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
| 67 | uint32_t msix_num_entries = hw_data->num_banks + 1; | 68 | u32 msix_num_entries = 1; |
| 68 | int i; | 69 | |
| 69 | 70 | /* If SR-IOV is disabled, add entries for each bank */ | |
| 70 | for (i = 0; i < msix_num_entries; i++) | 71 | if (!accel_dev->pf.vf_info) { |
| 71 | pci_dev_info->msix_entries.entries[i].entry = i; | 72 | int i; |
| 73 | |||
| 74 | msix_num_entries += hw_data->num_banks; | ||
| 75 | for (i = 0; i < msix_num_entries; i++) | ||
| 76 | pci_dev_info->msix_entries.entries[i].entry = i; | ||
| 77 | } else { | ||
| 78 | pci_dev_info->msix_entries.entries[0].entry = | ||
| 79 | hw_data->num_banks; | ||
| 80 | } | ||
| 72 | 81 | ||
| 73 | if (pci_enable_msix_exact(pci_dev_info->pci_dev, | 82 | if (pci_enable_msix_exact(pci_dev_info->pci_dev, |
| 74 | pci_dev_info->msix_entries.entries, | 83 | pci_dev_info->msix_entries.entries, |
| 75 | msix_num_entries)) { | 84 | msix_num_entries)) { |
| 76 | dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n"); | 85 | dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n"); |
| 77 | return -EFAULT; | 86 | return -EFAULT; |
| 78 | } | 87 | } |
| 79 | return 0; | 88 | return 0; |
| @@ -97,9 +106,58 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) | |||
| 97 | { | 106 | { |
| 98 | struct adf_accel_dev *accel_dev = dev_ptr; | 107 | struct adf_accel_dev *accel_dev = dev_ptr; |
| 99 | 108 | ||
| 100 | dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", | 109 | #ifdef CONFIG_PCI_IOV |
| 101 | accel_dev->accel_id); | 110 | /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */ |
| 102 | return IRQ_HANDLED; | 111 | if (accel_dev->pf.vf_info) { |
| 112 | void __iomem *pmisc_bar_addr = | ||
| 113 | (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; | ||
| 114 | u32 vf_mask; | ||
| 115 | |||
| 116 | /* Get the interrupt sources triggered by VFs */ | ||
| 117 | vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) & | ||
| 118 | 0x0000FFFF) << 16) | | ||
| 119 | ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) & | ||
| 120 | 0x01FFFE00) >> 9); | ||
| 121 | |||
| 122 | if (vf_mask) { | ||
| 123 | struct adf_accel_vf_info *vf_info; | ||
| 124 | bool irq_handled = false; | ||
| 125 | int i; | ||
| 126 | |||
| 127 | /* Disable VF2PF interrupts for VFs with pending ints */ | ||
| 128 | adf_disable_vf2pf_interrupts(accel_dev, vf_mask); | ||
| 129 | |||
| 130 | /* | ||
| 131 | * Schedule tasklets to handle VF2PF interrupt BHs | ||
| 132 | * unless the VF is malicious and is attempting to | ||
| 133 | * flood the host OS with VF2PF interrupts. | ||
| 134 | */ | ||
| 135 | for_each_set_bit(i, (const unsigned long *)&vf_mask, | ||
| 136 | (sizeof(vf_mask) * BITS_PER_BYTE)) { | ||
| 137 | vf_info = accel_dev->pf.vf_info + i; | ||
| 138 | |||
| 139 | if (!__ratelimit(&vf_info->vf2pf_ratelimit)) { | ||
| 140 | dev_info(&GET_DEV(accel_dev), | ||
| 141 | "Too many ints from VF%d\n", | ||
| 142 | vf_info->vf_nr + 1); | ||
| 143 | continue; | ||
| 144 | } | ||
| 145 | |||
| 146 | /* Tasklet will re-enable ints from this VF */ | ||
| 147 | tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet); | ||
| 148 | irq_handled = true; | ||
| 149 | } | ||
| 150 | |||
| 151 | if (irq_handled) | ||
| 152 | return IRQ_HANDLED; | ||
| 153 | } | ||
| 154 | } | ||
| 155 | #endif /* CONFIG_PCI_IOV */ | ||
| 156 | |||
| 157 | dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", | ||
| 158 | accel_dev->accel_id); | ||
| 159 | |||
| 160 | return IRQ_NONE; | ||
| 103 | } | 161 | } |
| 104 | 162 | ||
| 105 | static int adf_request_irqs(struct adf_accel_dev *accel_dev) | 163 | static int adf_request_irqs(struct adf_accel_dev *accel_dev) |
| @@ -108,28 +166,32 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev) | |||
| 108 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | 166 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
| 109 | struct msix_entry *msixe = pci_dev_info->msix_entries.entries; | 167 | struct msix_entry *msixe = pci_dev_info->msix_entries.entries; |
| 110 | struct adf_etr_data *etr_data = accel_dev->transport; | 168 | struct adf_etr_data *etr_data = accel_dev->transport; |
| 111 | int ret, i; | 169 | int ret, i = 0; |
| 112 | char *name; | 170 | char *name; |
| 113 | 171 | ||
| 114 | /* Request msix irq for all banks */ | 172 | /* Request msix irq for all banks unless SR-IOV enabled */ |
| 115 | for (i = 0; i < hw_data->num_banks; i++) { | 173 | if (!accel_dev->pf.vf_info) { |
| 116 | struct adf_etr_bank_data *bank = &etr_data->banks[i]; | 174 | for (i = 0; i < hw_data->num_banks; i++) { |
| 117 | unsigned int cpu, cpus = num_online_cpus(); | 175 | struct adf_etr_bank_data *bank = &etr_data->banks[i]; |
| 118 | 176 | unsigned int cpu, cpus = num_online_cpus(); | |
| 119 | name = *(pci_dev_info->msix_entries.names + i); | 177 | |
| 120 | snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, | 178 | name = *(pci_dev_info->msix_entries.names + i); |
| 121 | "qat%d-bundle%d", accel_dev->accel_id, i); | 179 | snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, |
| 122 | ret = request_irq(msixe[i].vector, | 180 | "qat%d-bundle%d", accel_dev->accel_id, i); |
| 123 | adf_msix_isr_bundle, 0, name, bank); | 181 | ret = request_irq(msixe[i].vector, |
| 124 | if (ret) { | 182 | adf_msix_isr_bundle, 0, name, bank); |
| 125 | dev_err(&GET_DEV(accel_dev), | 183 | if (ret) { |
| 126 | "failed to enable irq %d for %s\n", | 184 | dev_err(&GET_DEV(accel_dev), |
| 127 | msixe[i].vector, name); | 185 | "failed to enable irq %d for %s\n", |
| 128 | return ret; | 186 | msixe[i].vector, name); |
| 187 | return ret; | ||
| 188 | } | ||
| 189 | |||
| 190 | cpu = ((accel_dev->accel_id * hw_data->num_banks) + | ||
| 191 | i) % cpus; | ||
| 192 | irq_set_affinity_hint(msixe[i].vector, | ||
| 193 | get_cpu_mask(cpu)); | ||
| 129 | } | 194 | } |
| 130 | |||
| 131 | cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus; | ||
| 132 | irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu)); | ||
| 133 | } | 195 | } |
| 134 | 196 | ||
| 135 | /* Request msix irq for AE */ | 197 | /* Request msix irq for AE */ |
| @@ -152,11 +214,13 @@ static void adf_free_irqs(struct adf_accel_dev *accel_dev) | |||
| 152 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | 214 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
| 153 | struct msix_entry *msixe = pci_dev_info->msix_entries.entries; | 215 | struct msix_entry *msixe = pci_dev_info->msix_entries.entries; |
| 154 | struct adf_etr_data *etr_data = accel_dev->transport; | 216 | struct adf_etr_data *etr_data = accel_dev->transport; |
| 155 | int i; | 217 | int i = 0; |
| 156 | 218 | ||
| 157 | for (i = 0; i < hw_data->num_banks; i++) { | 219 | if (pci_dev_info->msix_entries.num_entries > 1) { |
| 158 | irq_set_affinity_hint(msixe[i].vector, NULL); | 220 | for (i = 0; i < hw_data->num_banks; i++) { |
| 159 | free_irq(msixe[i].vector, &etr_data->banks[i]); | 221 | irq_set_affinity_hint(msixe[i].vector, NULL); |
| 222 | free_irq(msixe[i].vector, &etr_data->banks[i]); | ||
| 223 | } | ||
| 160 | } | 224 | } |
| 161 | irq_set_affinity_hint(msixe[i].vector, NULL); | 225 | irq_set_affinity_hint(msixe[i].vector, NULL); |
| 162 | free_irq(msixe[i].vector, accel_dev); | 226 | free_irq(msixe[i].vector, accel_dev); |
| @@ -168,7 +232,11 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) | |||
| 168 | char **names; | 232 | char **names; |
| 169 | struct msix_entry *entries; | 233 | struct msix_entry *entries; |
| 170 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | 234 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
| 171 | uint32_t msix_num_entries = hw_data->num_banks + 1; | 235 | u32 msix_num_entries = 1; |
| 236 | |||
| 237 | /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ | ||
| 238 | if (!accel_dev->pf.vf_info) | ||
| 239 | msix_num_entries += hw_data->num_banks; | ||
| 172 | 240 | ||
| 173 | entries = kzalloc_node(msix_num_entries * sizeof(*entries), | 241 | entries = kzalloc_node(msix_num_entries * sizeof(*entries), |
| 174 | GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); | 242 | GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); |
| @@ -185,6 +253,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) | |||
| 185 | if (!(*(names + i))) | 253 | if (!(*(names + i))) |
| 186 | goto err; | 254 | goto err; |
| 187 | } | 255 | } |
| 256 | accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries; | ||
| 188 | accel_dev->accel_pci_dev.msix_entries.entries = entries; | 257 | accel_dev->accel_pci_dev.msix_entries.entries = entries; |
| 189 | accel_dev->accel_pci_dev.msix_entries.names = names; | 258 | accel_dev->accel_pci_dev.msix_entries.names = names; |
| 190 | return 0; | 259 | return 0; |
| @@ -198,13 +267,11 @@ err: | |||
| 198 | 267 | ||
| 199 | static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) | 268 | static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) |
| 200 | { | 269 | { |
| 201 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | ||
| 202 | uint32_t msix_num_entries = hw_data->num_banks + 1; | ||
| 203 | char **names = accel_dev->accel_pci_dev.msix_entries.names; | 270 | char **names = accel_dev->accel_pci_dev.msix_entries.names; |
| 204 | int i; | 271 | int i; |
| 205 | 272 | ||
| 206 | kfree(accel_dev->accel_pci_dev.msix_entries.entries); | 273 | kfree(accel_dev->accel_pci_dev.msix_entries.entries); |
| 207 | for (i = 0; i < msix_num_entries; i++) | 274 | for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++) |
| 208 | kfree(*(names + i)); | 275 | kfree(*(names + i)); |
| 209 | kfree(names); | 276 | kfree(names); |
| 210 | } | 277 | } |
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile new file mode 100644 index 000000000000..85399fcbbad4 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile | |||
| @@ -0,0 +1,5 @@ | |||
| 1 | ccflags-y := -I$(src)/../qat_common | ||
| 2 | obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o | ||
| 3 | qat_dh895xccvf-objs := adf_drv.o \ | ||
| 4 | adf_isr.o \ | ||
| 5 | adf_dh895xccvf_hw_data.o | ||
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c new file mode 100644 index 000000000000..a9a27eff41fb --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c | |||
| @@ -0,0 +1,172 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2015 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2015 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | #include <adf_accel_devices.h> | ||
| 48 | #include <adf_pf2vf_msg.h> | ||
| 49 | #include <adf_common_drv.h> | ||
| 50 | #include "adf_dh895xccvf_hw_data.h" | ||
| 51 | #include "adf_drv.h" | ||
| 52 | |||
| 53 | static struct adf_hw_device_class dh895xcciov_class = { | ||
| 54 | .name = ADF_DH895XCCVF_DEVICE_NAME, | ||
| 55 | .type = DEV_DH895XCCVF, | ||
| 56 | .instances = 0 | ||
| 57 | }; | ||
| 58 | |||
| 59 | static u32 get_accel_mask(u32 fuse) | ||
| 60 | { | ||
| 61 | return ADF_DH895XCCIOV_ACCELERATORS_MASK; | ||
| 62 | } | ||
| 63 | |||
| 64 | static u32 get_ae_mask(u32 fuse) | ||
| 65 | { | ||
| 66 | return ADF_DH895XCCIOV_ACCELENGINES_MASK; | ||
| 67 | } | ||
| 68 | |||
| 69 | static u32 get_num_accels(struct adf_hw_device_data *self) | ||
| 70 | { | ||
| 71 | return ADF_DH895XCCIOV_MAX_ACCELERATORS; | ||
| 72 | } | ||
| 73 | |||
| 74 | static u32 get_num_aes(struct adf_hw_device_data *self) | ||
| 75 | { | ||
| 76 | return ADF_DH895XCCIOV_MAX_ACCELENGINES; | ||
| 77 | } | ||
| 78 | |||
| 79 | static u32 get_misc_bar_id(struct adf_hw_device_data *self) | ||
| 80 | { | ||
| 81 | return ADF_DH895XCCIOV_PMISC_BAR; | ||
| 82 | } | ||
| 83 | |||
| 84 | static u32 get_etr_bar_id(struct adf_hw_device_data *self) | ||
| 85 | { | ||
| 86 | return ADF_DH895XCCIOV_ETR_BAR; | ||
| 87 | } | ||
| 88 | |||
| 89 | static enum dev_sku_info get_sku(struct adf_hw_device_data *self) | ||
| 90 | { | ||
| 91 | return DEV_SKU_VF; | ||
| 92 | } | ||
| 93 | |||
| 94 | static u32 get_pf2vf_offset(u32 i) | ||
| 95 | { | ||
| 96 | return ADF_DH895XCCIOV_PF2VF_OFFSET; | ||
| 97 | } | ||
| 98 | |||
| 99 | static u32 get_vintmsk_offset(u32 i) | ||
| 100 | { | ||
| 101 | return ADF_DH895XCCIOV_VINTMSK_OFFSET; | ||
| 102 | } | ||
| 103 | |||
| 104 | static int adf_vf_int_noop(struct adf_accel_dev *accel_dev) | ||
| 105 | { | ||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | |||
| 109 | static void adf_vf_void_noop(struct adf_accel_dev *accel_dev) | ||
| 110 | { | ||
| 111 | } | ||
| 112 | |||
| 113 | static int adf_vf2pf_init(struct adf_accel_dev *accel_dev) | ||
| 114 | { | ||
| 115 | u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | | ||
| 116 | (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); | ||
| 117 | |||
| 118 | if (adf_iov_putmsg(accel_dev, msg, 0)) { | ||
| 119 | dev_err(&GET_DEV(accel_dev), | ||
| 120 | "Failed to send Init event to PF\n"); | ||
| 121 | return -EFAULT; | ||
| 122 | } | ||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | |||
| 126 | static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) | ||
| 127 | { | ||
| 128 | u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | | ||
| 129 | (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); | ||
| 130 | |||
| 131 | if (adf_iov_putmsg(accel_dev, msg, 0)) | ||
| 132 | dev_err(&GET_DEV(accel_dev), | ||
| 133 | "Failed to send Shutdown event to PF\n"); | ||
| 134 | } | ||
| 135 | |||
| 136 | void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) | ||
| 137 | { | ||
| 138 | hw_data->dev_class = &dh895xcciov_class; | ||
| 139 | hw_data->instance_id = dh895xcciov_class.instances++; | ||
| 140 | hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS; | ||
| 141 | hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS; | ||
| 142 | hw_data->num_logical_accel = 1; | ||
| 143 | hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES; | ||
| 144 | hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET; | ||
| 145 | hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK; | ||
| 146 | hw_data->alloc_irq = adf_vf_isr_resource_alloc; | ||
| 147 | hw_data->free_irq = adf_vf_isr_resource_free; | ||
| 148 | hw_data->enable_error_correction = adf_vf_void_noop; | ||
| 149 | hw_data->init_admin_comms = adf_vf_int_noop; | ||
| 150 | hw_data->exit_admin_comms = adf_vf_void_noop; | ||
| 151 | hw_data->send_admin_init = adf_vf2pf_init; | ||
| 152 | hw_data->init_arb = adf_vf_int_noop; | ||
| 153 | hw_data->exit_arb = adf_vf_void_noop; | ||
| 154 | hw_data->disable_iov = adf_vf2pf_shutdown; | ||
| 155 | hw_data->get_accel_mask = get_accel_mask; | ||
| 156 | hw_data->get_ae_mask = get_ae_mask; | ||
| 157 | hw_data->get_num_accels = get_num_accels; | ||
| 158 | hw_data->get_num_aes = get_num_aes; | ||
| 159 | hw_data->get_etr_bar_id = get_etr_bar_id; | ||
| 160 | hw_data->get_misc_bar_id = get_misc_bar_id; | ||
| 161 | hw_data->get_pf2vf_offset = get_pf2vf_offset; | ||
| 162 | hw_data->get_vintmsk_offset = get_vintmsk_offset; | ||
| 163 | hw_data->get_sku = get_sku; | ||
| 164 | hw_data->enable_ints = adf_vf_void_noop; | ||
| 165 | hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; | ||
| 166 | hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; | ||
| 167 | } | ||
| 168 | |||
| 169 | void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) | ||
| 170 | { | ||
| 171 | hw_data->dev_class->instances--; | ||
| 172 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h new file mode 100644 index 000000000000..8f6babfef629 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2015 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2015 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | #ifndef ADF_DH895XVF_HW_DATA_H_ | ||
| 48 | #define ADF_DH895XVF_HW_DATA_H_ | ||
| 49 | |||
| 50 | #define ADF_DH895XCCIOV_PMISC_BAR 1 | ||
| 51 | #define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1 | ||
| 52 | #define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1 | ||
| 53 | #define ADF_DH895XCCIOV_MAX_ACCELERATORS 1 | ||
| 54 | #define ADF_DH895XCCIOV_MAX_ACCELENGINES 1 | ||
| 55 | #define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8 | ||
| 56 | #define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF | ||
| 57 | #define ADF_DH895XCCIOV_ETR_BAR 0 | ||
| 58 | #define ADF_DH895XCCIOV_ETR_MAX_BANKS 1 | ||
| 59 | |||
| 60 | #define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200 | ||
| 61 | #define ADF_DH895XCC_PF2VF_PF2VFINT BIT(0) | ||
| 62 | |||
| 63 | #define ADF_DH895XCCIOV_VINTSOU_OFFSET 0x204 | ||
| 64 | #define ADF_DH895XCC_VINTSOU_BUN BIT(0) | ||
| 65 | #define ADF_DH895XCC_VINTSOU_PF2VF BIT(1) | ||
| 66 | |||
| 67 | #define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208 | ||
| 68 | #endif | ||
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c new file mode 100644 index 000000000000..789426f21882 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c | |||
| @@ -0,0 +1,393 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2014 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2014 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | #include <linux/kernel.h> | ||
| 48 | #include <linux/module.h> | ||
| 49 | #include <linux/pci.h> | ||
| 50 | #include <linux/init.h> | ||
| 51 | #include <linux/types.h> | ||
| 52 | #include <linux/fs.h> | ||
| 53 | #include <linux/slab.h> | ||
| 54 | #include <linux/errno.h> | ||
| 55 | #include <linux/device.h> | ||
| 56 | #include <linux/dma-mapping.h> | ||
| 57 | #include <linux/platform_device.h> | ||
| 58 | #include <linux/workqueue.h> | ||
| 59 | #include <linux/io.h> | ||
| 60 | #include <adf_accel_devices.h> | ||
| 61 | #include <adf_common_drv.h> | ||
| 62 | #include <adf_cfg.h> | ||
| 63 | #include <adf_transport_access_macros.h> | ||
| 64 | #include "adf_dh895xccvf_hw_data.h" | ||
| 65 | #include "adf_drv.h" | ||
| 66 | |||
| 67 | static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME; | ||
| 68 | |||
| 69 | #define ADF_SYSTEM_DEVICE(device_id) \ | ||
| 70 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} | ||
| 71 | |||
| 72 | static const struct pci_device_id adf_pci_tbl[] = { | ||
| 73 | ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID), | ||
| 74 | {0,} | ||
| 75 | }; | ||
| 76 | MODULE_DEVICE_TABLE(pci, adf_pci_tbl); | ||
| 77 | |||
| 78 | static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); | ||
| 79 | static void adf_remove(struct pci_dev *dev); | ||
| 80 | |||
| 81 | static struct pci_driver adf_driver = { | ||
| 82 | .id_table = adf_pci_tbl, | ||
| 83 | .name = adf_driver_name, | ||
| 84 | .probe = adf_probe, | ||
| 85 | .remove = adf_remove, | ||
| 86 | }; | ||
| 87 | |||
| 88 | static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) | ||
| 89 | { | ||
| 90 | pci_release_regions(accel_dev->accel_pci_dev.pci_dev); | ||
| 91 | pci_disable_device(accel_dev->accel_pci_dev.pci_dev); | ||
| 92 | } | ||
| 93 | |||
| 94 | static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) | ||
| 95 | { | ||
| 96 | struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; | ||
| 97 | struct adf_accel_dev *pf; | ||
| 98 | int i; | ||
| 99 | |||
| 100 | for (i = 0; i < ADF_PCI_MAX_BARS; i++) { | ||
| 101 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; | ||
| 102 | |||
| 103 | if (bar->virt_addr) | ||
| 104 | pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); | ||
| 105 | } | ||
| 106 | |||
| 107 | if (accel_dev->hw_device) { | ||
| 108 | switch (accel_pci_dev->pci_dev->device) { | ||
| 109 | case ADF_DH895XCCIOV_PCI_DEVICE_ID: | ||
| 110 | adf_clean_hw_data_dh895xcciov(accel_dev->hw_device); | ||
| 111 | break; | ||
| 112 | default: | ||
| 113 | break; | ||
| 114 | } | ||
| 115 | kfree(accel_dev->hw_device); | ||
| 116 | accel_dev->hw_device = NULL; | ||
| 117 | } | ||
| 118 | adf_cfg_dev_remove(accel_dev); | ||
| 119 | debugfs_remove(accel_dev->debugfs_dir); | ||
| 120 | pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); | ||
| 121 | adf_devmgr_rm_dev(accel_dev, pf); | ||
| 122 | } | ||
| 123 | |||
| 124 | static int adf_dev_configure(struct adf_accel_dev *accel_dev) | ||
| 125 | { | ||
| 126 | char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; | ||
| 127 | unsigned long val, bank = 0; | ||
| 128 | |||
| 129 | if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) | ||
| 130 | goto err; | ||
| 131 | if (adf_cfg_section_add(accel_dev, "Accelerator0")) | ||
| 132 | goto err; | ||
| 133 | |||
| 134 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0); | ||
| 135 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, | ||
| 136 | (void *)&bank, ADF_DEC)) | ||
| 137 | goto err; | ||
| 138 | |||
| 139 | val = bank; | ||
| 140 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0); | ||
| 141 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, | ||
| 142 | (void *)&val, ADF_DEC)) | ||
| 143 | goto err; | ||
| 144 | |||
| 145 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0); | ||
| 146 | |||
| 147 | val = 128; | ||
| 148 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, | ||
| 149 | (void *)&val, ADF_DEC)) | ||
| 150 | goto err; | ||
| 151 | |||
| 152 | val = 512; | ||
| 153 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0); | ||
| 154 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
| 155 | key, (void *)&val, ADF_DEC)) | ||
| 156 | goto err; | ||
| 157 | |||
| 158 | val = 0; | ||
| 159 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0); | ||
| 160 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
| 161 | key, (void *)&val, ADF_DEC)) | ||
| 162 | goto err; | ||
| 163 | |||
| 164 | val = 2; | ||
| 165 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0); | ||
| 166 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
| 167 | key, (void *)&val, ADF_DEC)) | ||
| 168 | goto err; | ||
| 169 | |||
| 170 | val = 8; | ||
| 171 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0); | ||
| 172 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
| 173 | key, (void *)&val, ADF_DEC)) | ||
| 174 | goto err; | ||
| 175 | |||
| 176 | val = 10; | ||
| 177 | snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0); | ||
| 178 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
| 179 | key, (void *)&val, ADF_DEC)) | ||
| 180 | goto err; | ||
| 181 | |||
| 182 | val = ADF_COALESCING_DEF_TIME; | ||
| 183 | snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, | ||
| 184 | (int)bank); | ||
| 185 | if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", | ||
| 186 | key, (void *)&val, ADF_DEC)) | ||
| 187 | goto err; | ||
| 188 | |||
| 189 | val = 1; | ||
| 190 | if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, | ||
| 191 | ADF_NUM_CY, (void *)&val, ADF_DEC)) | ||
| 192 | goto err; | ||
| 193 | |||
| 194 | set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); | ||
| 195 | return 0; | ||
| 196 | err: | ||
| 197 | dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n"); | ||
| 198 | return -EINVAL; | ||
| 199 | } | ||
| 200 | |||
| 201 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
| 202 | { | ||
| 203 | struct adf_accel_dev *accel_dev; | ||
| 204 | struct adf_accel_dev *pf; | ||
| 205 | struct adf_accel_pci *accel_pci_dev; | ||
| 206 | struct adf_hw_device_data *hw_data; | ||
| 207 | char name[ADF_DEVICE_NAME_LENGTH]; | ||
| 208 | unsigned int i, bar_nr; | ||
| 209 | int ret, bar_mask; | ||
| 210 | |||
| 211 | switch (ent->device) { | ||
| 212 | case ADF_DH895XCCIOV_PCI_DEVICE_ID: | ||
| 213 | break; | ||
| 214 | default: | ||
| 215 | dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); | ||
| 216 | return -ENODEV; | ||
| 217 | } | ||
| 218 | |||
| 219 | accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, | ||
| 220 | dev_to_node(&pdev->dev)); | ||
| 221 | if (!accel_dev) | ||
| 222 | return -ENOMEM; | ||
| 223 | |||
| 224 | accel_dev->is_vf = true; | ||
| 225 | pf = adf_devmgr_pci_to_accel_dev(pdev->physfn); | ||
| 226 | accel_pci_dev = &accel_dev->accel_pci_dev; | ||
| 227 | accel_pci_dev->pci_dev = pdev; | ||
| 228 | |||
| 229 | /* Add accel device to accel table */ | ||
| 230 | if (adf_devmgr_add_dev(accel_dev, pf)) { | ||
| 231 | dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); | ||
| 232 | kfree(accel_dev); | ||
| 233 | return -EFAULT; | ||
| 234 | } | ||
| 235 | INIT_LIST_HEAD(&accel_dev->crypto_list); | ||
| 236 | |||
| 237 | accel_dev->owner = THIS_MODULE; | ||
| 238 | /* Allocate and configure device configuration structure */ | ||
| 239 | hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, | ||
| 240 | dev_to_node(&pdev->dev)); | ||
| 241 | if (!hw_data) { | ||
| 242 | ret = -ENOMEM; | ||
| 243 | goto out_err; | ||
| 244 | } | ||
| 245 | accel_dev->hw_device = hw_data; | ||
| 246 | switch (ent->device) { | ||
| 247 | case ADF_DH895XCCIOV_PCI_DEVICE_ID: | ||
| 248 | adf_init_hw_data_dh895xcciov(accel_dev->hw_device); | ||
| 249 | break; | ||
| 250 | default: | ||
| 251 | ret = -ENODEV; | ||
| 252 | goto out_err; | ||
| 253 | } | ||
| 254 | |||
| 255 | /* Get Accelerators and Accelerators Engines masks */ | ||
| 256 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); | ||
| 257 | hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); | ||
| 258 | accel_pci_dev->sku = hw_data->get_sku(hw_data); | ||
| 259 | |||
| 260 | /* Create dev top level debugfs entry */ | ||
| 261 | snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", | ||
| 262 | ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, | ||
| 263 | pdev->bus->number, PCI_SLOT(pdev->devfn), | ||
| 264 | PCI_FUNC(pdev->devfn)); | ||
| 265 | |||
| 266 | accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); | ||
| 267 | if (!accel_dev->debugfs_dir) { | ||
| 268 | dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); | ||
| 269 | ret = -EINVAL; | ||
| 270 | goto out_err; | ||
| 271 | } | ||
| 272 | |||
| 273 | /* Create device configuration table */ | ||
| 274 | ret = adf_cfg_dev_add(accel_dev); | ||
| 275 | if (ret) | ||
| 276 | goto out_err; | ||
| 277 | |||
| 278 | /* enable PCI device */ | ||
| 279 | if (pci_enable_device(pdev)) { | ||
| 280 | ret = -EFAULT; | ||
| 281 | goto out_err; | ||
| 282 | } | ||
| 283 | |||
| 284 | /* set dma identifier */ | ||
| 285 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
| 286 | if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { | ||
| 287 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | ||
| 288 | ret = -EFAULT; | ||
| 289 | goto out_err_disable; | ||
| 290 | } else { | ||
| 291 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 292 | } | ||
| 293 | |||
| 294 | } else { | ||
| 295 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
| 296 | } | ||
| 297 | |||
| 298 | if (pci_request_regions(pdev, adf_driver_name)) { | ||
| 299 | ret = -EFAULT; | ||
| 300 | goto out_err_disable; | ||
| 301 | } | ||
| 302 | |||
| 303 | /* Find and map all the device's BARS */ | ||
| 304 | i = 0; | ||
| 305 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | ||
| 306 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | ||
| 307 | ADF_PCI_MAX_BARS * 2) { | ||
| 308 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | ||
| 309 | |||
| 310 | bar->base_addr = pci_resource_start(pdev, bar_nr); | ||
| 311 | if (!bar->base_addr) | ||
| 312 | break; | ||
| 313 | bar->size = pci_resource_len(pdev, bar_nr); | ||
| 314 | bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); | ||
| 315 | if (!bar->virt_addr) { | ||
| 316 | dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); | ||
| 317 | ret = -EFAULT; | ||
| 318 | goto out_err_free_reg; | ||
| 319 | } | ||
| 320 | } | ||
| 321 | pci_set_master(pdev); | ||
| 322 | /* Completion for VF2PF request/response message exchange */ | ||
| 323 | init_completion(&accel_dev->vf.iov_msg_completion); | ||
| 324 | |||
| 325 | ret = adf_dev_configure(accel_dev); | ||
| 326 | if (ret) | ||
| 327 | goto out_err_free_reg; | ||
| 328 | |||
| 329 | ret = adf_dev_init(accel_dev); | ||
| 330 | if (ret) | ||
| 331 | goto out_err_dev_shutdown; | ||
| 332 | |||
| 333 | ret = adf_dev_start(accel_dev); | ||
| 334 | if (ret) | ||
| 335 | goto out_err_dev_stop; | ||
| 336 | |||
| 337 | return ret; | ||
| 338 | |||
| 339 | out_err_dev_stop: | ||
| 340 | adf_dev_stop(accel_dev); | ||
| 341 | out_err_dev_shutdown: | ||
| 342 | adf_dev_shutdown(accel_dev); | ||
| 343 | out_err_free_reg: | ||
| 344 | pci_release_regions(accel_pci_dev->pci_dev); | ||
| 345 | out_err_disable: | ||
| 346 | pci_disable_device(accel_pci_dev->pci_dev); | ||
| 347 | out_err: | ||
| 348 | adf_cleanup_accel(accel_dev); | ||
| 349 | kfree(accel_dev); | ||
| 350 | return ret; | ||
| 351 | } | ||
| 352 | |||
| 353 | static void adf_remove(struct pci_dev *pdev) | ||
| 354 | { | ||
| 355 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); | ||
| 356 | |||
| 357 | if (!accel_dev) { | ||
| 358 | pr_err("QAT: Driver removal failed\n"); | ||
| 359 | return; | ||
| 360 | } | ||
| 361 | if (adf_dev_stop(accel_dev)) | ||
| 362 | dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); | ||
| 363 | |||
| 364 | adf_dev_shutdown(accel_dev); | ||
| 365 | adf_cleanup_accel(accel_dev); | ||
| 366 | adf_cleanup_pci_dev(accel_dev); | ||
| 367 | kfree(accel_dev); | ||
| 368 | } | ||
| 369 | |||
| 370 | static int __init adfdrv_init(void) | ||
| 371 | { | ||
| 372 | request_module("intel_qat"); | ||
| 373 | |||
| 374 | if (pci_register_driver(&adf_driver)) { | ||
| 375 | pr_err("QAT: Driver initialization failed\n"); | ||
| 376 | return -EFAULT; | ||
| 377 | } | ||
| 378 | return 0; | ||
| 379 | } | ||
| 380 | |||
| 381 | static void __exit adfdrv_release(void) | ||
| 382 | { | ||
| 383 | pci_unregister_driver(&adf_driver); | ||
| 384 | adf_clean_vf_map(true); | ||
| 385 | } | ||
| 386 | |||
| 387 | module_init(adfdrv_init); | ||
| 388 | module_exit(adfdrv_release); | ||
| 389 | |||
| 390 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 391 | MODULE_AUTHOR("Intel"); | ||
| 392 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); | ||
| 393 | MODULE_VERSION(ADF_DRV_VERSION); | ||
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h index 55b7a8e48bad..e270e4a63d14 100644 --- a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h | |||
| @@ -44,64 +44,14 @@ | |||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 46 | */ | 46 | */ |
| 47 | #include <icp_qat_fw_init_admin.h> | 47 | #ifndef ADF_DH895xVF_DRV_H_ |
| 48 | #define ADF_DH895xVF_DRV_H_ | ||
| 48 | #include <adf_accel_devices.h> | 49 | #include <adf_accel_devices.h> |
| 49 | #include <adf_common_drv.h> | 50 | #include <adf_transport.h> |
| 50 | #include "adf_drv.h" | 51 | |
| 51 | 52 | void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); | |
| 52 | static struct service_hndl qat_admin; | 53 | void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); |
| 53 | 54 | int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); | |
| 54 | static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) | 55 | void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); |
| 55 | { | 56 | void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); |
| 56 | struct adf_hw_device_data *hw_device = accel_dev->hw_device; | 57 | #endif |
| 57 | struct icp_qat_fw_init_admin_req req; | ||
| 58 | struct icp_qat_fw_init_admin_resp resp; | ||
| 59 | int i; | ||
| 60 | |||
| 61 | memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); | ||
| 62 | req.init_admin_cmd_id = cmd; | ||
| 63 | for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { | ||
| 64 | memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); | ||
| 65 | if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || | ||
| 66 | resp.init_resp_hdr.status) | ||
| 67 | return -EFAULT; | ||
| 68 | } | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static int qat_admin_start(struct adf_accel_dev *accel_dev) | ||
| 73 | { | ||
| 74 | return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); | ||
| 75 | } | ||
| 76 | |||
| 77 | static int qat_admin_event_handler(struct adf_accel_dev *accel_dev, | ||
| 78 | enum adf_event event) | ||
| 79 | { | ||
| 80 | int ret; | ||
| 81 | |||
| 82 | switch (event) { | ||
| 83 | case ADF_EVENT_START: | ||
| 84 | ret = qat_admin_start(accel_dev); | ||
| 85 | break; | ||
| 86 | case ADF_EVENT_STOP: | ||
| 87 | case ADF_EVENT_INIT: | ||
| 88 | case ADF_EVENT_SHUTDOWN: | ||
| 89 | default: | ||
| 90 | ret = 0; | ||
| 91 | } | ||
| 92 | return ret; | ||
| 93 | } | ||
| 94 | |||
| 95 | int qat_admin_register(void) | ||
| 96 | { | ||
| 97 | memset(&qat_admin, 0, sizeof(struct service_hndl)); | ||
| 98 | qat_admin.event_hld = qat_admin_event_handler; | ||
| 99 | qat_admin.name = "qat_admin"; | ||
| 100 | qat_admin.admin = 1; | ||
| 101 | return adf_service_register(&qat_admin); | ||
| 102 | } | ||
| 103 | |||
| 104 | int qat_admin_unregister(void) | ||
| 105 | { | ||
| 106 | return adf_service_unregister(&qat_admin); | ||
| 107 | } | ||
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c new file mode 100644 index 000000000000..87c5d8adb125 --- /dev/null +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c | |||
| @@ -0,0 +1,258 @@ | |||
| 1 | /* | ||
| 2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | redistributing this file, you may do so under either license. | ||
| 4 | |||
| 5 | GPL LICENSE SUMMARY | ||
| 6 | Copyright(c) 2014 Intel Corporation. | ||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of version 2 of the GNU General Public License as | ||
| 9 | published by the Free Software Foundation. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, but | ||
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | General Public License for more details. | ||
| 15 | |||
| 16 | Contact Information: | ||
| 17 | qat-linux@intel.com | ||
| 18 | |||
| 19 | BSD LICENSE | ||
| 20 | Copyright(c) 2014 Intel Corporation. | ||
| 21 | Redistribution and use in source and binary forms, with or without | ||
| 22 | modification, are permitted provided that the following conditions | ||
| 23 | are met: | ||
| 24 | |||
| 25 | * Redistributions of source code must retain the above copyright | ||
| 26 | notice, this list of conditions and the following disclaimer. | ||
| 27 | * Redistributions in binary form must reproduce the above copyright | ||
| 28 | notice, this list of conditions and the following disclaimer in | ||
| 29 | the documentation and/or other materials provided with the | ||
| 30 | distribution. | ||
| 31 | * Neither the name of Intel Corporation nor the names of its | ||
| 32 | contributors may be used to endorse or promote products derived | ||
| 33 | from this software without specific prior written permission. | ||
| 34 | |||
| 35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 46 | */ | ||
| 47 | #include <linux/kernel.h> | ||
| 48 | #include <linux/init.h> | ||
| 49 | #include <linux/types.h> | ||
| 50 | #include <linux/pci.h> | ||
| 51 | #include <linux/slab.h> | ||
| 52 | #include <linux/errno.h> | ||
| 53 | #include <linux/interrupt.h> | ||
| 54 | #include <adf_accel_devices.h> | ||
| 55 | #include <adf_common_drv.h> | ||
| 56 | #include <adf_cfg.h> | ||
| 57 | #include <adf_cfg_strings.h> | ||
| 58 | #include <adf_cfg_common.h> | ||
| 59 | #include <adf_transport_access_macros.h> | ||
| 60 | #include <adf_transport_internal.h> | ||
| 61 | #include <adf_pf2vf_msg.h> | ||
| 62 | #include "adf_drv.h" | ||
| 63 | #include "adf_dh895xccvf_hw_data.h" | ||
| 64 | |||
| 65 | static int adf_enable_msi(struct adf_accel_dev *accel_dev) | ||
| 66 | { | ||
| 67 | struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; | ||
| 68 | int stat = pci_enable_msi(pci_dev_info->pci_dev); | ||
| 69 | |||
| 70 | if (stat) { | ||
| 71 | dev_err(&GET_DEV(accel_dev), | ||
| 72 | "Failed to enable MSI interrupts\n"); | ||
| 73 | return stat; | ||
| 74 | } | ||
| 75 | |||
| 76 | accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); | ||
| 77 | if (!accel_dev->vf.irq_name) | ||
| 78 | return -ENOMEM; | ||
| 79 | |||
| 80 | return stat; | ||
| 81 | } | ||
| 82 | |||
| 83 | static void adf_disable_msi(struct adf_accel_dev *accel_dev) | ||
| 84 | { | ||
| 85 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | ||
| 86 | |||
| 87 | kfree(accel_dev->vf.irq_name); | ||
| 88 | pci_disable_msi(pdev); | ||
| 89 | } | ||
| 90 | |||
| 91 | static void adf_pf2vf_bh_handler(void *data) | ||
| 92 | { | ||
| 93 | struct adf_accel_dev *accel_dev = data; | ||
| 94 | void __iomem *pmisc_bar_addr = | ||
| 95 | (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr; | ||
| 96 | u32 msg; | ||
| 97 | |||
| 98 | /* Read the message from PF */ | ||
| 99 | msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET); | ||
| 100 | |||
| 101 | if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) | ||
| 102 | /* Ignore legacy non-system (non-kernel) PF2VF messages */ | ||
| 103 | goto err; | ||
| 104 | |||
| 105 | switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) { | ||
| 106 | case ADF_PF2VF_MSGTYPE_RESTARTING: | ||
| 107 | dev_dbg(&GET_DEV(accel_dev), | ||
| 108 | "Restarting msg received from PF 0x%x\n", msg); | ||
| 109 | adf_dev_stop(accel_dev); | ||
| 110 | break; | ||
| 111 | case ADF_PF2VF_MSGTYPE_VERSION_RESP: | ||
| 112 | dev_dbg(&GET_DEV(accel_dev), | ||
| 113 | "Version resp received from PF 0x%x\n", msg); | ||
| 114 | accel_dev->vf.pf_version = | ||
| 115 | (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >> | ||
| 116 | ADF_PF2VF_VERSION_RESP_VERS_SHIFT; | ||
| 117 | accel_dev->vf.compatible = | ||
| 118 | (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >> | ||
| 119 | ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; | ||
| 120 | complete(&accel_dev->vf.iov_msg_completion); | ||
| 121 | break; | ||
| 122 | default: | ||
| 123 | goto err; | ||
| 124 | } | ||
| 125 | |||
| 126 | /* To ack, clear the PF2VFINT bit */ | ||
| 127 | msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT; | ||
| 128 | ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg); | ||
| 129 | |||
| 130 | /* Re-enable PF2VF interrupts */ | ||
| 131 | adf_enable_pf2vf_interrupts(accel_dev); | ||
| 132 | return; | ||
| 133 | err: | ||
| 134 | dev_err(&GET_DEV(accel_dev), | ||
| 135 | "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n", | ||
| 136 | msg); | ||
| 137 | } | ||
| 138 | |||
| 139 | static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev) | ||
| 140 | { | ||
| 141 | tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet, | ||
| 142 | (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev); | ||
| 143 | |||
| 144 | mutex_init(&accel_dev->vf.vf2pf_lock); | ||
| 145 | return 0; | ||
| 146 | } | ||
| 147 | |||
| 148 | static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev) | ||
| 149 | { | ||
| 150 | tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet); | ||
| 151 | tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet); | ||
| 152 | mutex_destroy(&accel_dev->vf.vf2pf_lock); | ||
| 153 | } | ||
| 154 | |||
| 155 | static irqreturn_t adf_isr(int irq, void *privdata) | ||
| 156 | { | ||
| 157 | struct adf_accel_dev *accel_dev = privdata; | ||
| 158 | void __iomem *pmisc_bar_addr = | ||
| 159 | (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr; | ||
| 160 | u32 v_int; | ||
| 161 | |||
| 162 | /* Read VF INT source CSR to determine the source of VF interrupt */ | ||
| 163 | v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET); | ||
| 164 | |||
| 165 | /* Check for PF2VF interrupt */ | ||
| 166 | if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) { | ||
| 167 | /* Disable PF to VF interrupt */ | ||
| 168 | adf_disable_pf2vf_interrupts(accel_dev); | ||
| 169 | |||
| 170 | /* Schedule tasklet to handle interrupt BH */ | ||
| 171 | tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet); | ||
| 172 | return IRQ_HANDLED; | ||
| 173 | } | ||
| 174 | |||
| 175 | /* Check bundle interrupt */ | ||
| 176 | if (v_int & ADF_DH895XCC_VINTSOU_BUN) { | ||
| 177 | struct adf_etr_data *etr_data = accel_dev->transport; | ||
| 178 | struct adf_etr_bank_data *bank = &etr_data->banks[0]; | ||
| 179 | |||
| 180 | /* Disable Flag and Coalesce Ring Interrupts */ | ||
| 181 | WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, | ||
| 182 | 0); | ||
| 183 | tasklet_hi_schedule(&bank->resp_handler); | ||
| 184 | return IRQ_HANDLED; | ||
| 185 | } | ||
| 186 | |||
| 187 | return IRQ_NONE; | ||
| 188 | } | ||
| 189 | |||
| 190 | static int adf_request_msi_irq(struct adf_accel_dev *accel_dev) | ||
| 191 | { | ||
| 192 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | ||
| 193 | unsigned int cpu; | ||
| 194 | int ret; | ||
| 195 | |||
| 196 | snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME, | ||
| 197 | "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn), | ||
| 198 | PCI_FUNC(pdev->devfn)); | ||
| 199 | ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name, | ||
| 200 | (void *)accel_dev); | ||
| 201 | if (ret) { | ||
| 202 | dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n", | ||
| 203 | accel_dev->vf.irq_name); | ||
| 204 | return ret; | ||
| 205 | } | ||
| 206 | cpu = accel_dev->accel_id % num_online_cpus(); | ||
| 207 | irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu)); | ||
| 208 | |||
| 209 | return ret; | ||
| 210 | } | ||
| 211 | |||
| 212 | static int adf_setup_bh(struct adf_accel_dev *accel_dev) | ||
| 213 | { | ||
| 214 | struct adf_etr_data *priv_data = accel_dev->transport; | ||
| 215 | |||
| 216 | tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler, | ||
| 217 | (unsigned long)priv_data->banks); | ||
| 218 | return 0; | ||
| 219 | } | ||
| 220 | |||
| 221 | static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) | ||
| 222 | { | ||
| 223 | struct adf_etr_data *priv_data = accel_dev->transport; | ||
| 224 | |||
| 225 | tasklet_disable(&priv_data->banks[0].resp_handler); | ||
| 226 | tasklet_kill(&priv_data->banks[0].resp_handler); | ||
| 227 | } | ||
| 228 | |||
| 229 | void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) | ||
| 230 | { | ||
| 231 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | ||
| 232 | |||
| 233 | irq_set_affinity_hint(pdev->irq, NULL); | ||
| 234 | free_irq(pdev->irq, (void *)accel_dev); | ||
| 235 | adf_cleanup_bh(accel_dev); | ||
| 236 | adf_cleanup_pf2vf_bh(accel_dev); | ||
| 237 | adf_disable_msi(accel_dev); | ||
| 238 | } | ||
| 239 | |||
| 240 | int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) | ||
| 241 | { | ||
| 242 | if (adf_enable_msi(accel_dev)) | ||
| 243 | goto err_out; | ||
| 244 | |||
| 245 | if (adf_setup_pf2vf_bh(accel_dev)) | ||
| 246 | goto err_out; | ||
| 247 | |||
| 248 | if (adf_setup_bh(accel_dev)) | ||
| 249 | goto err_out; | ||
| 250 | |||
| 251 | if (adf_request_msi_irq(accel_dev)) | ||
| 252 | goto err_out; | ||
| 253 | |||
| 254 | return 0; | ||
| 255 | err_out: | ||
| 256 | adf_vf_isr_resource_free(accel_dev); | ||
| 257 | return -EFAULT; | ||
| 258 | } | ||
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 397a500b3d8a..1c19e44c3146 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
| @@ -1516,7 +1516,7 @@ static int sahara_probe(struct platform_device *pdev) | |||
| 1516 | } | 1516 | } |
| 1517 | 1517 | ||
| 1518 | /* Allocate HW descriptors */ | 1518 | /* Allocate HW descriptors */ |
| 1519 | dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev, | 1519 | dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev, |
| 1520 | SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), | 1520 | SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), |
| 1521 | &dev->hw_phys_desc[0], GFP_KERNEL); | 1521 | &dev->hw_phys_desc[0], GFP_KERNEL); |
| 1522 | if (!dev->hw_desc[0]) { | 1522 | if (!dev->hw_desc[0]) { |
| @@ -1528,34 +1528,31 @@ static int sahara_probe(struct platform_device *pdev) | |||
| 1528 | sizeof(struct sahara_hw_desc); | 1528 | sizeof(struct sahara_hw_desc); |
| 1529 | 1529 | ||
| 1530 | /* Allocate space for iv and key */ | 1530 | /* Allocate space for iv and key */ |
| 1531 | dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, | 1531 | dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, |
| 1532 | &dev->key_phys_base, GFP_KERNEL); | 1532 | &dev->key_phys_base, GFP_KERNEL); |
| 1533 | if (!dev->key_base) { | 1533 | if (!dev->key_base) { |
| 1534 | dev_err(&pdev->dev, "Could not allocate memory for key\n"); | 1534 | dev_err(&pdev->dev, "Could not allocate memory for key\n"); |
| 1535 | err = -ENOMEM; | 1535 | return -ENOMEM; |
| 1536 | goto err_key; | ||
| 1537 | } | 1536 | } |
| 1538 | dev->iv_base = dev->key_base + AES_KEYSIZE_128; | 1537 | dev->iv_base = dev->key_base + AES_KEYSIZE_128; |
| 1539 | dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128; | 1538 | dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128; |
| 1540 | 1539 | ||
| 1541 | /* Allocate space for context: largest digest + message length field */ | 1540 | /* Allocate space for context: largest digest + message length field */ |
| 1542 | dev->context_base = dma_alloc_coherent(&pdev->dev, | 1541 | dev->context_base = dmam_alloc_coherent(&pdev->dev, |
| 1543 | SHA256_DIGEST_SIZE + 4, | 1542 | SHA256_DIGEST_SIZE + 4, |
| 1544 | &dev->context_phys_base, GFP_KERNEL); | 1543 | &dev->context_phys_base, GFP_KERNEL); |
| 1545 | if (!dev->context_base) { | 1544 | if (!dev->context_base) { |
| 1546 | dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n"); | 1545 | dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n"); |
| 1547 | err = -ENOMEM; | 1546 | return -ENOMEM; |
| 1548 | goto err_key; | ||
| 1549 | } | 1547 | } |
| 1550 | 1548 | ||
| 1551 | /* Allocate space for HW links */ | 1549 | /* Allocate space for HW links */ |
| 1552 | dev->hw_link[0] = dma_alloc_coherent(&pdev->dev, | 1550 | dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev, |
| 1553 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), | 1551 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), |
| 1554 | &dev->hw_phys_link[0], GFP_KERNEL); | 1552 | &dev->hw_phys_link[0], GFP_KERNEL); |
| 1555 | if (!dev->hw_link[0]) { | 1553 | if (!dev->hw_link[0]) { |
| 1556 | dev_err(&pdev->dev, "Could not allocate hw links\n"); | 1554 | dev_err(&pdev->dev, "Could not allocate hw links\n"); |
| 1557 | err = -ENOMEM; | 1555 | return -ENOMEM; |
| 1558 | goto err_link; | ||
| 1559 | } | 1556 | } |
| 1560 | for (i = 1; i < SAHARA_MAX_HW_LINK; i++) { | 1557 | for (i = 1; i < SAHARA_MAX_HW_LINK; i++) { |
| 1561 | dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] + | 1558 | dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] + |
| @@ -1572,15 +1569,14 @@ static int sahara_probe(struct platform_device *pdev) | |||
| 1572 | 1569 | ||
| 1573 | dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto"); | 1570 | dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto"); |
| 1574 | if (IS_ERR(dev->kthread)) { | 1571 | if (IS_ERR(dev->kthread)) { |
| 1575 | err = PTR_ERR(dev->kthread); | 1572 | return PTR_ERR(dev->kthread); |
| 1576 | goto err_link; | ||
| 1577 | } | 1573 | } |
| 1578 | 1574 | ||
| 1579 | init_completion(&dev->dma_completion); | 1575 | init_completion(&dev->dma_completion); |
| 1580 | 1576 | ||
| 1581 | err = clk_prepare_enable(dev->clk_ipg); | 1577 | err = clk_prepare_enable(dev->clk_ipg); |
| 1582 | if (err) | 1578 | if (err) |
| 1583 | goto err_link; | 1579 | return err; |
| 1584 | err = clk_prepare_enable(dev->clk_ahb); | 1580 | err = clk_prepare_enable(dev->clk_ahb); |
| 1585 | if (err) | 1581 | if (err) |
| 1586 | goto clk_ipg_disable; | 1582 | goto clk_ipg_disable; |
| @@ -1620,25 +1616,11 @@ static int sahara_probe(struct platform_device *pdev) | |||
| 1620 | return 0; | 1616 | return 0; |
| 1621 | 1617 | ||
| 1622 | err_algs: | 1618 | err_algs: |
| 1623 | dma_free_coherent(&pdev->dev, | ||
| 1624 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), | ||
| 1625 | dev->hw_link[0], dev->hw_phys_link[0]); | ||
| 1626 | kthread_stop(dev->kthread); | 1619 | kthread_stop(dev->kthread); |
| 1627 | dev_ptr = NULL; | 1620 | dev_ptr = NULL; |
| 1628 | clk_disable_unprepare(dev->clk_ahb); | 1621 | clk_disable_unprepare(dev->clk_ahb); |
| 1629 | clk_ipg_disable: | 1622 | clk_ipg_disable: |
| 1630 | clk_disable_unprepare(dev->clk_ipg); | 1623 | clk_disable_unprepare(dev->clk_ipg); |
| 1631 | err_link: | ||
| 1632 | dma_free_coherent(&pdev->dev, | ||
| 1633 | 2 * AES_KEYSIZE_128, | ||
| 1634 | dev->key_base, dev->key_phys_base); | ||
| 1635 | dma_free_coherent(&pdev->dev, | ||
| 1636 | SHA256_DIGEST_SIZE, | ||
| 1637 | dev->context_base, dev->context_phys_base); | ||
| 1638 | err_key: | ||
| 1639 | dma_free_coherent(&pdev->dev, | ||
| 1640 | SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), | ||
| 1641 | dev->hw_desc[0], dev->hw_phys_desc[0]); | ||
| 1642 | 1624 | ||
| 1643 | return err; | 1625 | return err; |
| 1644 | } | 1626 | } |
| @@ -1647,16 +1629,6 @@ static int sahara_remove(struct platform_device *pdev) | |||
| 1647 | { | 1629 | { |
| 1648 | struct sahara_dev *dev = platform_get_drvdata(pdev); | 1630 | struct sahara_dev *dev = platform_get_drvdata(pdev); |
| 1649 | 1631 | ||
| 1650 | dma_free_coherent(&pdev->dev, | ||
| 1651 | SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), | ||
| 1652 | dev->hw_link[0], dev->hw_phys_link[0]); | ||
| 1653 | dma_free_coherent(&pdev->dev, | ||
| 1654 | 2 * AES_KEYSIZE_128, | ||
| 1655 | dev->key_base, dev->key_phys_base); | ||
| 1656 | dma_free_coherent(&pdev->dev, | ||
| 1657 | SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc), | ||
| 1658 | dev->hw_desc[0], dev->hw_phys_desc[0]); | ||
| 1659 | |||
| 1660 | kthread_stop(dev->kthread); | 1632 | kthread_stop(dev->kthread); |
| 1661 | 1633 | ||
| 1662 | sahara_unregister_algs(dev); | 1634 | sahara_unregister_algs(dev); |
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile new file mode 100644 index 000000000000..8f4c7a273141 --- /dev/null +++ b/drivers/crypto/sunxi-ss/Makefile | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o | ||
| 2 | sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o | ||
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c new file mode 100644 index 000000000000..e070c316e8b7 --- /dev/null +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c | |||
| @@ -0,0 +1,542 @@ | |||
| 1 | /* | ||
| 2 | * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com> | ||
| 5 | * | ||
| 6 | * This file add support for AES cipher with 128,192,256 bits | ||
| 7 | * keysize in CBC and ECB mode. | ||
| 8 | * Add support also for DES and 3DES in CBC and ECB mode. | ||
| 9 | * | ||
| 10 | * You could find the datasheet in Documentation/arm/sunxi/README | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or modify | ||
| 13 | * it under the terms of the GNU General Public License as published by | ||
| 14 | * the Free Software Foundation; either version 2 of the License, or | ||
| 15 | * (at your option) any later version. | ||
| 16 | */ | ||
| 17 | #include "sun4i-ss.h" | ||
| 18 | |||
| 19 | static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) | ||
| 20 | { | ||
| 21 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 22 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 23 | struct sun4i_ss_ctx *ss = op->ss; | ||
| 24 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); | ||
| 25 | struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq); | ||
| 26 | u32 mode = ctx->mode; | ||
| 27 | /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ | ||
| 28 | u32 rx_cnt = SS_RX_DEFAULT; | ||
| 29 | u32 tx_cnt = 0; | ||
| 30 | u32 spaces; | ||
| 31 | u32 v; | ||
| 32 | int i, err = 0; | ||
| 33 | unsigned int ileft = areq->nbytes; | ||
| 34 | unsigned int oleft = areq->nbytes; | ||
| 35 | unsigned int todo; | ||
| 36 | struct sg_mapping_iter mi, mo; | ||
| 37 | unsigned int oi, oo; /* offset for in and out */ | ||
| 38 | |||
| 39 | if (areq->nbytes == 0) | ||
| 40 | return 0; | ||
| 41 | |||
| 42 | if (!areq->info) { | ||
| 43 | dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); | ||
| 44 | return -EINVAL; | ||
| 45 | } | ||
| 46 | |||
| 47 | if (!areq->src || !areq->dst) { | ||
| 48 | dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); | ||
| 49 | return -EINVAL; | ||
| 50 | } | ||
| 51 | |||
| 52 | spin_lock_bh(&ss->slock); | ||
| 53 | |||
| 54 | for (i = 0; i < op->keylen; i += 4) | ||
| 55 | writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); | ||
| 56 | |||
| 57 | if (areq->info) { | ||
| 58 | for (i = 0; i < 4 && i < ivsize / 4; i++) { | ||
| 59 | v = *(u32 *)(areq->info + i * 4); | ||
| 60 | writel(v, ss->base + SS_IV0 + i * 4); | ||
| 61 | } | ||
| 62 | } | ||
| 63 | writel(mode, ss->base + SS_CTL); | ||
| 64 | |||
| 65 | sg_miter_start(&mi, areq->src, sg_nents(areq->src), | ||
| 66 | SG_MITER_FROM_SG | SG_MITER_ATOMIC); | ||
| 67 | sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), | ||
| 68 | SG_MITER_TO_SG | SG_MITER_ATOMIC); | ||
| 69 | sg_miter_next(&mi); | ||
| 70 | sg_miter_next(&mo); | ||
| 71 | if (!mi.addr || !mo.addr) { | ||
| 72 | dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); | ||
| 73 | err = -EINVAL; | ||
| 74 | goto release_ss; | ||
| 75 | } | ||
| 76 | |||
| 77 | ileft = areq->nbytes / 4; | ||
| 78 | oleft = areq->nbytes / 4; | ||
| 79 | oi = 0; | ||
| 80 | oo = 0; | ||
| 81 | do { | ||
| 82 | todo = min3(rx_cnt, ileft, (mi.length - oi) / 4); | ||
| 83 | if (todo > 0) { | ||
| 84 | ileft -= todo; | ||
| 85 | writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); | ||
| 86 | oi += todo * 4; | ||
| 87 | } | ||
| 88 | if (oi == mi.length) { | ||
| 89 | sg_miter_next(&mi); | ||
| 90 | oi = 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | spaces = readl(ss->base + SS_FCSR); | ||
| 94 | rx_cnt = SS_RXFIFO_SPACES(spaces); | ||
| 95 | tx_cnt = SS_TXFIFO_SPACES(spaces); | ||
| 96 | |||
| 97 | todo = min3(tx_cnt, oleft, (mo.length - oo) / 4); | ||
| 98 | if (todo > 0) { | ||
| 99 | oleft -= todo; | ||
| 100 | readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); | ||
| 101 | oo += todo * 4; | ||
| 102 | } | ||
| 103 | if (oo == mo.length) { | ||
| 104 | sg_miter_next(&mo); | ||
| 105 | oo = 0; | ||
| 106 | } | ||
| 107 | } while (mo.length > 0); | ||
| 108 | |||
| 109 | if (areq->info) { | ||
| 110 | for (i = 0; i < 4 && i < ivsize / 4; i++) { | ||
| 111 | v = readl(ss->base + SS_IV0 + i * 4); | ||
| 112 | *(u32 *)(areq->info + i * 4) = v; | ||
| 113 | } | ||
| 114 | } | ||
| 115 | |||
| 116 | release_ss: | ||
| 117 | sg_miter_stop(&mi); | ||
| 118 | sg_miter_stop(&mo); | ||
| 119 | writel(0, ss->base + SS_CTL); | ||
| 120 | spin_unlock_bh(&ss->slock); | ||
| 121 | return err; | ||
| 122 | } | ||
| 123 | |||
| 124 | /* Generic function that support SG with size not multiple of 4 */ | ||
| 125 | static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | ||
| 126 | { | ||
| 127 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 128 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 129 | struct sun4i_ss_ctx *ss = op->ss; | ||
| 130 | int no_chunk = 1; | ||
| 131 | struct scatterlist *in_sg = areq->src; | ||
| 132 | struct scatterlist *out_sg = areq->dst; | ||
| 133 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); | ||
| 134 | struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq); | ||
| 135 | u32 mode = ctx->mode; | ||
| 136 | /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ | ||
| 137 | u32 rx_cnt = SS_RX_DEFAULT; | ||
| 138 | u32 tx_cnt = 0; | ||
| 139 | u32 v; | ||
| 140 | u32 spaces; | ||
| 141 | int i, err = 0; | ||
| 142 | unsigned int ileft = areq->nbytes; | ||
| 143 | unsigned int oleft = areq->nbytes; | ||
| 144 | unsigned int todo; | ||
| 145 | struct sg_mapping_iter mi, mo; | ||
| 146 | unsigned int oi, oo; /* offset for in and out */ | ||
| 147 | char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ | ||
| 148 | char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ | ||
| 149 | unsigned int ob = 0; /* offset in buf */ | ||
| 150 | unsigned int obo = 0; /* offset in bufo*/ | ||
| 151 | unsigned int obl = 0; /* length of data in bufo */ | ||
| 152 | |||
| 153 | if (areq->nbytes == 0) | ||
| 154 | return 0; | ||
| 155 | |||
| 156 | if (!areq->info) { | ||
| 157 | dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); | ||
| 158 | return -EINVAL; | ||
| 159 | } | ||
| 160 | |||
| 161 | if (!areq->src || !areq->dst) { | ||
| 162 | dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); | ||
| 163 | return -EINVAL; | ||
| 164 | } | ||
| 165 | |||
| 166 | /* | ||
| 167 | * if we have only SGs with size multiple of 4, | ||
| 168 | * we can use the SS optimized function | ||
| 169 | */ | ||
| 170 | while (in_sg && no_chunk == 1) { | ||
| 171 | if ((in_sg->length % 4) != 0) | ||
| 172 | no_chunk = 0; | ||
| 173 | in_sg = sg_next(in_sg); | ||
| 174 | } | ||
| 175 | while (out_sg && no_chunk == 1) { | ||
| 176 | if ((out_sg->length % 4) != 0) | ||
| 177 | no_chunk = 0; | ||
| 178 | out_sg = sg_next(out_sg); | ||
| 179 | } | ||
| 180 | |||
| 181 | if (no_chunk == 1) | ||
| 182 | return sun4i_ss_opti_poll(areq); | ||
| 183 | |||
| 184 | spin_lock_bh(&ss->slock); | ||
| 185 | |||
| 186 | for (i = 0; i < op->keylen; i += 4) | ||
| 187 | writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); | ||
| 188 | |||
| 189 | if (areq->info) { | ||
| 190 | for (i = 0; i < 4 && i < ivsize / 4; i++) { | ||
| 191 | v = *(u32 *)(areq->info + i * 4); | ||
| 192 | writel(v, ss->base + SS_IV0 + i * 4); | ||
| 193 | } | ||
| 194 | } | ||
| 195 | writel(mode, ss->base + SS_CTL); | ||
| 196 | |||
| 197 | sg_miter_start(&mi, areq->src, sg_nents(areq->src), | ||
| 198 | SG_MITER_FROM_SG | SG_MITER_ATOMIC); | ||
| 199 | sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), | ||
| 200 | SG_MITER_TO_SG | SG_MITER_ATOMIC); | ||
| 201 | sg_miter_next(&mi); | ||
| 202 | sg_miter_next(&mo); | ||
| 203 | if (!mi.addr || !mo.addr) { | ||
| 204 | dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); | ||
| 205 | err = -EINVAL; | ||
| 206 | goto release_ss; | ||
| 207 | } | ||
| 208 | ileft = areq->nbytes; | ||
| 209 | oleft = areq->nbytes; | ||
| 210 | oi = 0; | ||
| 211 | oo = 0; | ||
| 212 | |||
| 213 | while (oleft > 0) { | ||
| 214 | if (ileft > 0) { | ||
| 215 | /* | ||
| 216 | * todo is the number of consecutive 4byte word that we | ||
| 217 | * can read from current SG | ||
| 218 | */ | ||
| 219 | todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4); | ||
| 220 | if (todo > 0 && ob == 0) { | ||
| 221 | writesl(ss->base + SS_RXFIFO, mi.addr + oi, | ||
| 222 | todo); | ||
| 223 | ileft -= todo * 4; | ||
| 224 | oi += todo * 4; | ||
| 225 | } else { | ||
| 226 | /* | ||
| 227 | * not enough consecutive bytes, so we need to | ||
| 228 | * linearize in buf. todo is in bytes | ||
| 229 | * After that copy, if we have a multiple of 4 | ||
| 230 | * we need to be able to write all buf in one | ||
| 231 | * pass, so it is why we min() with rx_cnt | ||
| 232 | */ | ||
| 233 | todo = min3(rx_cnt * 4 - ob, ileft, | ||
| 234 | mi.length - oi); | ||
| 235 | memcpy(buf + ob, mi.addr + oi, todo); | ||
| 236 | ileft -= todo; | ||
| 237 | oi += todo; | ||
| 238 | ob += todo; | ||
| 239 | if (ob % 4 == 0) { | ||
| 240 | writesl(ss->base + SS_RXFIFO, buf, | ||
| 241 | ob / 4); | ||
| 242 | ob = 0; | ||
| 243 | } | ||
| 244 | } | ||
| 245 | if (oi == mi.length) { | ||
| 246 | sg_miter_next(&mi); | ||
| 247 | oi = 0; | ||
| 248 | } | ||
| 249 | } | ||
| 250 | |||
| 251 | spaces = readl(ss->base + SS_FCSR); | ||
| 252 | rx_cnt = SS_RXFIFO_SPACES(spaces); | ||
| 253 | tx_cnt = SS_TXFIFO_SPACES(spaces); | ||
| 254 | dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n", | ||
| 255 | mode, | ||
| 256 | oi, mi.length, ileft, areq->nbytes, rx_cnt, | ||
| 257 | oo, mo.length, oleft, areq->nbytes, tx_cnt, | ||
| 258 | todo, ob); | ||
| 259 | |||
| 260 | if (tx_cnt == 0) | ||
| 261 | continue; | ||
| 262 | /* todo in 4bytes word */ | ||
| 263 | todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4); | ||
| 264 | if (todo > 0) { | ||
| 265 | readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); | ||
| 266 | oleft -= todo * 4; | ||
| 267 | oo += todo * 4; | ||
| 268 | if (oo == mo.length) { | ||
| 269 | sg_miter_next(&mo); | ||
| 270 | oo = 0; | ||
| 271 | } | ||
| 272 | } else { | ||
| 273 | /* | ||
| 274 | * read obl bytes in bufo, we read at maximum for | ||
| 275 | * emptying the device | ||
| 276 | */ | ||
| 277 | readsl(ss->base + SS_TXFIFO, bufo, tx_cnt); | ||
| 278 | obl = tx_cnt * 4; | ||
| 279 | obo = 0; | ||
| 280 | do { | ||
| 281 | /* | ||
| 282 | * how many bytes we can copy ? | ||
| 283 | * no more than remaining SG size | ||
| 284 | * no more than remaining buffer | ||
| 285 | * no need to test against oleft | ||
| 286 | */ | ||
| 287 | todo = min(mo.length - oo, obl - obo); | ||
| 288 | memcpy(mo.addr + oo, bufo + obo, todo); | ||
| 289 | oleft -= todo; | ||
| 290 | obo += todo; | ||
| 291 | oo += todo; | ||
| 292 | if (oo == mo.length) { | ||
| 293 | sg_miter_next(&mo); | ||
| 294 | oo = 0; | ||
| 295 | } | ||
| 296 | } while (obo < obl); | ||
| 297 | /* bufo must be fully used here */ | ||
| 298 | } | ||
| 299 | } | ||
| 300 | if (areq->info) { | ||
| 301 | for (i = 0; i < 4 && i < ivsize / 4; i++) { | ||
| 302 | v = readl(ss->base + SS_IV0 + i * 4); | ||
| 303 | *(u32 *)(areq->info + i * 4) = v; | ||
| 304 | } | ||
| 305 | } | ||
| 306 | |||
| 307 | release_ss: | ||
| 308 | sg_miter_stop(&mi); | ||
| 309 | sg_miter_stop(&mo); | ||
| 310 | writel(0, ss->base + SS_CTL); | ||
| 311 | spin_unlock_bh(&ss->slock); | ||
| 312 | |||
| 313 | return err; | ||
| 314 | } | ||
| 315 | |||
| 316 | /* CBC AES */ | ||
| 317 | int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq) | ||
| 318 | { | ||
| 319 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 320 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 321 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 322 | |||
| 323 | rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | | ||
| 324 | op->keymode; | ||
| 325 | return sun4i_ss_cipher_poll(areq); | ||
| 326 | } | ||
| 327 | |||
| 328 | int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq) | ||
| 329 | { | ||
| 330 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 331 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 332 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 333 | |||
| 334 | rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION | | ||
| 335 | op->keymode; | ||
| 336 | return sun4i_ss_cipher_poll(areq); | ||
| 337 | } | ||
| 338 | |||
| 339 | /* ECB AES */ | ||
| 340 | int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq) | ||
| 341 | { | ||
| 342 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 343 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 344 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 345 | |||
| 346 | rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | | ||
| 347 | op->keymode; | ||
| 348 | return sun4i_ss_cipher_poll(areq); | ||
| 349 | } | ||
| 350 | |||
| 351 | int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq) | ||
| 352 | { | ||
| 353 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 354 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 355 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 356 | |||
| 357 | rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION | | ||
| 358 | op->keymode; | ||
| 359 | return sun4i_ss_cipher_poll(areq); | ||
| 360 | } | ||
| 361 | |||
| 362 | /* CBC DES */ | ||
| 363 | int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq) | ||
| 364 | { | ||
| 365 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 366 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 367 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 368 | |||
| 369 | rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | | ||
| 370 | op->keymode; | ||
| 371 | return sun4i_ss_cipher_poll(areq); | ||
| 372 | } | ||
| 373 | |||
| 374 | int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq) | ||
| 375 | { | ||
| 376 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 377 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 378 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 379 | |||
| 380 | rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | | ||
| 381 | op->keymode; | ||
| 382 | return sun4i_ss_cipher_poll(areq); | ||
| 383 | } | ||
| 384 | |||
| 385 | /* ECB DES */ | ||
| 386 | int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq) | ||
| 387 | { | ||
| 388 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 389 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 390 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 391 | |||
| 392 | rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | | ||
| 393 | op->keymode; | ||
| 394 | return sun4i_ss_cipher_poll(areq); | ||
| 395 | } | ||
| 396 | |||
| 397 | int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq) | ||
| 398 | { | ||
| 399 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 400 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 401 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 402 | |||
| 403 | rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | | ||
| 404 | op->keymode; | ||
| 405 | return sun4i_ss_cipher_poll(areq); | ||
| 406 | } | ||
| 407 | |||
| 408 | /* CBC 3DES */ | ||
| 409 | int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq) | ||
| 410 | { | ||
| 411 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 412 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 413 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 414 | |||
| 415 | rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | | ||
| 416 | op->keymode; | ||
| 417 | return sun4i_ss_cipher_poll(areq); | ||
| 418 | } | ||
| 419 | |||
| 420 | int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq) | ||
| 421 | { | ||
| 422 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 423 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 424 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 425 | |||
| 426 | rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | | ||
| 427 | op->keymode; | ||
| 428 | return sun4i_ss_cipher_poll(areq); | ||
| 429 | } | ||
| 430 | |||
| 431 | /* ECB 3DES */ | ||
| 432 | int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq) | ||
| 433 | { | ||
| 434 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 435 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 436 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 437 | |||
| 438 | rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | | ||
| 439 | op->keymode; | ||
| 440 | return sun4i_ss_cipher_poll(areq); | ||
| 441 | } | ||
| 442 | |||
| 443 | int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq) | ||
| 444 | { | ||
| 445 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | ||
| 446 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 447 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | ||
| 448 | |||
| 449 | rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | | ||
| 450 | op->keymode; | ||
| 451 | return sun4i_ss_cipher_poll(areq); | ||
| 452 | } | ||
| 453 | |||
| 454 | int sun4i_ss_cipher_init(struct crypto_tfm *tfm) | ||
| 455 | { | ||
| 456 | struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); | ||
| 457 | struct crypto_alg *alg = tfm->__crt_alg; | ||
| 458 | struct sun4i_ss_alg_template *algt; | ||
| 459 | |||
| 460 | memset(op, 0, sizeof(struct sun4i_tfm_ctx)); | ||
| 461 | |||
| 462 | algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto); | ||
| 463 | op->ss = algt->ss; | ||
| 464 | |||
| 465 | tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx); | ||
| 466 | |||
| 467 | return 0; | ||
| 468 | } | ||
| 469 | |||
| 470 | /* check and set the AES key, prepare the mode to be used */ | ||
| 471 | int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 472 | unsigned int keylen) | ||
| 473 | { | ||
| 474 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 475 | struct sun4i_ss_ctx *ss = op->ss; | ||
| 476 | |||
| 477 | switch (keylen) { | ||
| 478 | case 128 / 8: | ||
| 479 | op->keymode = SS_AES_128BITS; | ||
| 480 | break; | ||
| 481 | case 192 / 8: | ||
| 482 | op->keymode = SS_AES_192BITS; | ||
| 483 | break; | ||
| 484 | case 256 / 8: | ||
| 485 | op->keymode = SS_AES_256BITS; | ||
| 486 | break; | ||
| 487 | default: | ||
| 488 | dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen); | ||
| 489 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 490 | return -EINVAL; | ||
| 491 | } | ||
| 492 | op->keylen = keylen; | ||
| 493 | memcpy(op->key, key, keylen); | ||
| 494 | return 0; | ||
| 495 | } | ||
| 496 | |||
| 497 | /* check and set the DES key, prepare the mode to be used */ | ||
| 498 | int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 499 | unsigned int keylen) | ||
| 500 | { | ||
| 501 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 502 | struct sun4i_ss_ctx *ss = op->ss; | ||
| 503 | u32 flags; | ||
| 504 | u32 tmp[DES_EXPKEY_WORDS]; | ||
| 505 | int ret; | ||
| 506 | |||
| 507 | if (unlikely(keylen != DES_KEY_SIZE)) { | ||
| 508 | dev_err(ss->dev, "Invalid keylen %u\n", keylen); | ||
| 509 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 510 | return -EINVAL; | ||
| 511 | } | ||
| 512 | |||
| 513 | flags = crypto_ablkcipher_get_flags(tfm); | ||
| 514 | |||
| 515 | ret = des_ekey(tmp, key); | ||
| 516 | if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | ||
| 517 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); | ||
| 518 | dev_dbg(ss->dev, "Weak key %u\n", keylen); | ||
| 519 | return -EINVAL; | ||
| 520 | } | ||
| 521 | |||
| 522 | op->keylen = keylen; | ||
| 523 | memcpy(op->key, key, keylen); | ||
| 524 | return 0; | ||
| 525 | } | ||
| 526 | |||
| 527 | /* check and set the 3DES key, prepare the mode to be used */ | ||
| 528 | int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 529 | unsigned int keylen) | ||
| 530 | { | ||
| 531 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | ||
| 532 | struct sun4i_ss_ctx *ss = op->ss; | ||
| 533 | |||
| 534 | if (unlikely(keylen != 3 * DES_KEY_SIZE)) { | ||
| 535 | dev_err(ss->dev, "Invalid keylen %u\n", keylen); | ||
| 536 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 537 | return -EINVAL; | ||
| 538 | } | ||
| 539 | op->keylen = keylen; | ||
| 540 | memcpy(op->key, key, keylen); | ||
| 541 | return 0; | ||
| 542 | } | ||
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c new file mode 100644 index 000000000000..eab6fe227fa0 --- /dev/null +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c | |||
| @@ -0,0 +1,425 @@ | |||
| 1 | /* | ||
| 2 | * sun4i-ss-core.c - hardware cryptographic accelerator for Allwinner A20 SoC | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com> | ||
| 5 | * | ||
| 6 | * Core file which registers crypto algorithms supported by the SS. | ||
| 7 | * | ||
| 8 | * You could find a link for the datasheet in Documentation/arm/sunxi/README | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License as published by | ||
| 12 | * the Free Software Foundation; either version 2 of the License, or | ||
| 13 | * (at your option) any later version. | ||
| 14 | */ | ||
| 15 | #include <linux/clk.h> | ||
| 16 | #include <linux/crypto.h> | ||
| 17 | #include <linux/io.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/of.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | #include <crypto/scatterwalk.h> | ||
| 22 | #include <linux/scatterlist.h> | ||
| 23 | #include <linux/interrupt.h> | ||
| 24 | #include <linux/delay.h> | ||
| 25 | #include <linux/reset.h> | ||
| 26 | |||
| 27 | #include "sun4i-ss.h" | ||
| 28 | |||
| 29 | static struct sun4i_ss_alg_template ss_algs[] = { | ||
| 30 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
| 31 | .mode = SS_OP_MD5, | ||
| 32 | .alg.hash = { | ||
| 33 | .init = sun4i_hash_init, | ||
| 34 | .update = sun4i_hash_update, | ||
| 35 | .final = sun4i_hash_final, | ||
| 36 | .finup = sun4i_hash_finup, | ||
| 37 | .digest = sun4i_hash_digest, | ||
| 38 | .export = sun4i_hash_export_md5, | ||
| 39 | .import = sun4i_hash_import_md5, | ||
| 40 | .halg = { | ||
| 41 | .digestsize = MD5_DIGEST_SIZE, | ||
| 42 | .base = { | ||
| 43 | .cra_name = "md5", | ||
| 44 | .cra_driver_name = "md5-sun4i-ss", | ||
| 45 | .cra_priority = 300, | ||
| 46 | .cra_alignmask = 3, | ||
| 47 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
| 48 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | ||
| 49 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | ||
| 50 | .cra_module = THIS_MODULE, | ||
| 51 | .cra_type = &crypto_ahash_type, | ||
| 52 | .cra_init = sun4i_hash_crainit | ||
| 53 | } | ||
| 54 | } | ||
| 55 | } | ||
| 56 | }, | ||
| 57 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
| 58 | .mode = SS_OP_SHA1, | ||
| 59 | .alg.hash = { | ||
| 60 | .init = sun4i_hash_init, | ||
| 61 | .update = sun4i_hash_update, | ||
| 62 | .final = sun4i_hash_final, | ||
| 63 | .finup = sun4i_hash_finup, | ||
| 64 | .digest = sun4i_hash_digest, | ||
| 65 | .export = sun4i_hash_export_sha1, | ||
| 66 | .import = sun4i_hash_import_sha1, | ||
| 67 | .halg = { | ||
| 68 | .digestsize = SHA1_DIGEST_SIZE, | ||
| 69 | .base = { | ||
| 70 | .cra_name = "sha1", | ||
| 71 | .cra_driver_name = "sha1-sun4i-ss", | ||
| 72 | .cra_priority = 300, | ||
| 73 | .cra_alignmask = 3, | ||
| 74 | .cra_flags = CRYPTO_ALG_TYPE_AHASH, | ||
| 75 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
| 76 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | ||
| 77 | .cra_module = THIS_MODULE, | ||
| 78 | .cra_type = &crypto_ahash_type, | ||
| 79 | .cra_init = sun4i_hash_crainit | ||
| 80 | } | ||
| 81 | } | ||
| 82 | } | ||
| 83 | }, | ||
| 84 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 85 | .alg.crypto = { | ||
| 86 | .cra_name = "cbc(aes)", | ||
| 87 | .cra_driver_name = "cbc-aes-sun4i-ss", | ||
| 88 | .cra_priority = 300, | ||
| 89 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 90 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 91 | .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), | ||
| 92 | .cra_module = THIS_MODULE, | ||
| 93 | .cra_alignmask = 3, | ||
| 94 | .cra_type = &crypto_ablkcipher_type, | ||
| 95 | .cra_init = sun4i_ss_cipher_init, | ||
| 96 | .cra_ablkcipher = { | ||
| 97 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 98 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 99 | .ivsize = AES_BLOCK_SIZE, | ||
| 100 | .setkey = sun4i_ss_aes_setkey, | ||
| 101 | .encrypt = sun4i_ss_cbc_aes_encrypt, | ||
| 102 | .decrypt = sun4i_ss_cbc_aes_decrypt, | ||
| 103 | } | ||
| 104 | } | ||
| 105 | }, | ||
| 106 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 107 | .alg.crypto = { | ||
| 108 | .cra_name = "ecb(aes)", | ||
| 109 | .cra_driver_name = "ecb-aes-sun4i-ss", | ||
| 110 | .cra_priority = 300, | ||
| 111 | .cra_blocksize = AES_BLOCK_SIZE, | ||
| 112 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 113 | .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), | ||
| 114 | .cra_module = THIS_MODULE, | ||
| 115 | .cra_alignmask = 3, | ||
| 116 | .cra_type = &crypto_ablkcipher_type, | ||
| 117 | .cra_init = sun4i_ss_cipher_init, | ||
| 118 | .cra_ablkcipher = { | ||
| 119 | .min_keysize = AES_MIN_KEY_SIZE, | ||
| 120 | .max_keysize = AES_MAX_KEY_SIZE, | ||
| 121 | .ivsize = AES_BLOCK_SIZE, | ||
| 122 | .setkey = sun4i_ss_aes_setkey, | ||
| 123 | .encrypt = sun4i_ss_ecb_aes_encrypt, | ||
| 124 | .decrypt = sun4i_ss_ecb_aes_decrypt, | ||
| 125 | } | ||
| 126 | } | ||
| 127 | }, | ||
| 128 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 129 | .alg.crypto = { | ||
| 130 | .cra_name = "cbc(des)", | ||
| 131 | .cra_driver_name = "cbc-des-sun4i-ss", | ||
| 132 | .cra_priority = 300, | ||
| 133 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 134 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 135 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | ||
| 136 | .cra_module = THIS_MODULE, | ||
| 137 | .cra_alignmask = 3, | ||
| 138 | .cra_type = &crypto_ablkcipher_type, | ||
| 139 | .cra_init = sun4i_ss_cipher_init, | ||
| 140 | .cra_u.ablkcipher = { | ||
| 141 | .min_keysize = DES_KEY_SIZE, | ||
| 142 | .max_keysize = DES_KEY_SIZE, | ||
| 143 | .ivsize = DES_BLOCK_SIZE, | ||
| 144 | .setkey = sun4i_ss_des_setkey, | ||
| 145 | .encrypt = sun4i_ss_cbc_des_encrypt, | ||
| 146 | .decrypt = sun4i_ss_cbc_des_decrypt, | ||
| 147 | } | ||
| 148 | } | ||
| 149 | }, | ||
| 150 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 151 | .alg.crypto = { | ||
| 152 | .cra_name = "ecb(des)", | ||
| 153 | .cra_driver_name = "ecb-des-sun4i-ss", | ||
| 154 | .cra_priority = 300, | ||
| 155 | .cra_blocksize = DES_BLOCK_SIZE, | ||
| 156 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 157 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | ||
| 158 | .cra_module = THIS_MODULE, | ||
| 159 | .cra_alignmask = 3, | ||
| 160 | .cra_type = &crypto_ablkcipher_type, | ||
| 161 | .cra_init = sun4i_ss_cipher_init, | ||
| 162 | .cra_u.ablkcipher = { | ||
| 163 | .min_keysize = DES_KEY_SIZE, | ||
| 164 | .max_keysize = DES_KEY_SIZE, | ||
| 165 | .setkey = sun4i_ss_des_setkey, | ||
| 166 | .encrypt = sun4i_ss_ecb_des_encrypt, | ||
| 167 | .decrypt = sun4i_ss_ecb_des_decrypt, | ||
| 168 | } | ||
| 169 | } | ||
| 170 | }, | ||
| 171 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 172 | .alg.crypto = { | ||
| 173 | .cra_name = "cbc(des3_ede)", | ||
| 174 | .cra_driver_name = "cbc-des3-sun4i-ss", | ||
| 175 | .cra_priority = 300, | ||
| 176 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 177 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 178 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | ||
| 179 | .cra_module = THIS_MODULE, | ||
| 180 | .cra_alignmask = 3, | ||
| 181 | .cra_type = &crypto_ablkcipher_type, | ||
| 182 | .cra_init = sun4i_ss_cipher_init, | ||
| 183 | .cra_u.ablkcipher = { | ||
| 184 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
| 185 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
| 186 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 187 | .setkey = sun4i_ss_des3_setkey, | ||
| 188 | .encrypt = sun4i_ss_cbc_des3_encrypt, | ||
| 189 | .decrypt = sun4i_ss_cbc_des3_decrypt, | ||
| 190 | } | ||
| 191 | } | ||
| 192 | }, | ||
| 193 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 194 | .alg.crypto = { | ||
| 195 | .cra_name = "ecb(des3_ede)", | ||
| 196 | .cra_driver_name = "ecb-des3-sun4i-ss", | ||
| 197 | .cra_priority = 300, | ||
| 198 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
| 199 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
| 200 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | ||
| 201 | .cra_module = THIS_MODULE, | ||
| 202 | .cra_alignmask = 3, | ||
| 203 | .cra_type = &crypto_ablkcipher_type, | ||
| 204 | .cra_init = sun4i_ss_cipher_init, | ||
| 205 | .cra_u.ablkcipher = { | ||
| 206 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
| 207 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
| 208 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 209 | .setkey = sun4i_ss_des3_setkey, | ||
| 210 | .encrypt = sun4i_ss_ecb_des3_encrypt, | ||
| 211 | .decrypt = sun4i_ss_ecb_des3_decrypt, | ||
| 212 | } | ||
| 213 | } | ||
| 214 | }, | ||
| 215 | }; | ||
| 216 | |||
| 217 | static int sun4i_ss_probe(struct platform_device *pdev) | ||
| 218 | { | ||
| 219 | struct resource *res; | ||
| 220 | u32 v; | ||
| 221 | int err, i; | ||
| 222 | unsigned long cr; | ||
| 223 | const unsigned long cr_ahb = 24 * 1000 * 1000; | ||
| 224 | const unsigned long cr_mod = 150 * 1000 * 1000; | ||
| 225 | struct sun4i_ss_ctx *ss; | ||
| 226 | |||
| 227 | if (!pdev->dev.of_node) | ||
| 228 | return -ENODEV; | ||
| 229 | |||
| 230 | ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL); | ||
| 231 | if (!ss) | ||
| 232 | return -ENOMEM; | ||
| 233 | |||
| 234 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 235 | ss->base = devm_ioremap_resource(&pdev->dev, res); | ||
| 236 | if (IS_ERR(ss->base)) { | ||
| 237 | dev_err(&pdev->dev, "Cannot request MMIO\n"); | ||
| 238 | return PTR_ERR(ss->base); | ||
| 239 | } | ||
| 240 | |||
| 241 | ss->ssclk = devm_clk_get(&pdev->dev, "mod"); | ||
| 242 | if (IS_ERR(ss->ssclk)) { | ||
| 243 | err = PTR_ERR(ss->ssclk); | ||
| 244 | dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err); | ||
| 245 | return err; | ||
| 246 | } | ||
| 247 | dev_dbg(&pdev->dev, "clock ss acquired\n"); | ||
| 248 | |||
| 249 | ss->busclk = devm_clk_get(&pdev->dev, "ahb"); | ||
| 250 | if (IS_ERR(ss->busclk)) { | ||
| 251 | err = PTR_ERR(ss->busclk); | ||
| 252 | dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err); | ||
| 253 | return err; | ||
| 254 | } | ||
| 255 | dev_dbg(&pdev->dev, "clock ahb_ss acquired\n"); | ||
| 256 | |||
| 257 | ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); | ||
| 258 | if (IS_ERR(ss->reset)) { | ||
| 259 | if (PTR_ERR(ss->reset) == -EPROBE_DEFER) | ||
| 260 | return PTR_ERR(ss->reset); | ||
| 261 | dev_info(&pdev->dev, "no reset control found\n"); | ||
| 262 | ss->reset = NULL; | ||
| 263 | } | ||
| 264 | |||
| 265 | /* Enable both clocks */ | ||
| 266 | err = clk_prepare_enable(ss->busclk); | ||
| 267 | if (err != 0) { | ||
| 268 | dev_err(&pdev->dev, "Cannot prepare_enable busclk\n"); | ||
| 269 | return err; | ||
| 270 | } | ||
| 271 | err = clk_prepare_enable(ss->ssclk); | ||
| 272 | if (err != 0) { | ||
| 273 | dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n"); | ||
| 274 | goto error_ssclk; | ||
| 275 | } | ||
| 276 | |||
| 277 | /* | ||
| 278 | * Check that clock have the correct rates given in the datasheet | ||
| 279 | * Try to set the clock to the maximum allowed | ||
| 280 | */ | ||
| 281 | err = clk_set_rate(ss->ssclk, cr_mod); | ||
| 282 | if (err != 0) { | ||
| 283 | dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n"); | ||
| 284 | goto error_clk; | ||
| 285 | } | ||
| 286 | |||
| 287 | /* Deassert reset if we have a reset control */ | ||
| 288 | if (ss->reset) { | ||
| 289 | err = reset_control_deassert(ss->reset); | ||
| 290 | if (err) { | ||
| 291 | dev_err(&pdev->dev, "Cannot deassert reset control\n"); | ||
| 292 | goto error_clk; | ||
| 293 | } | ||
| 294 | } | ||
| 295 | |||
| 296 | /* | ||
| 297 | * The only impact on clocks below requirement are bad performance, | ||
| 298 | * so do not print "errors" | ||
| 299 | * warn on Overclocked clocks | ||
| 300 | */ | ||
| 301 | cr = clk_get_rate(ss->busclk); | ||
| 302 | if (cr >= cr_ahb) | ||
| 303 | dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n", | ||
| 304 | cr, cr / 1000000, cr_ahb); | ||
| 305 | else | ||
| 306 | dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n", | ||
| 307 | cr, cr / 1000000, cr_ahb); | ||
| 308 | |||
| 309 | cr = clk_get_rate(ss->ssclk); | ||
| 310 | if (cr <= cr_mod) | ||
| 311 | if (cr < cr_mod) | ||
| 312 | dev_warn(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n", | ||
| 313 | cr, cr / 1000000, cr_mod); | ||
| 314 | else | ||
| 315 | dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n", | ||
| 316 | cr, cr / 1000000, cr_mod); | ||
| 317 | else | ||
| 318 | dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n", | ||
| 319 | cr, cr / 1000000, cr_mod); | ||
| 320 | |||
| 321 | /* | ||
| 322 | * Datasheet named it "Die Bonding ID" | ||
| 323 | * I expect to be a sort of Security System Revision number. | ||
| 324 | * Since the A80 seems to have an other version of SS | ||
| 325 | * this info could be useful | ||
| 326 | */ | ||
| 327 | writel(SS_ENABLED, ss->base + SS_CTL); | ||
| 328 | v = readl(ss->base + SS_CTL); | ||
| 329 | v >>= 16; | ||
| 330 | v &= 0x07; | ||
| 331 | dev_info(&pdev->dev, "Die ID %d\n", v); | ||
| 332 | writel(0, ss->base + SS_CTL); | ||
| 333 | |||
| 334 | ss->dev = &pdev->dev; | ||
| 335 | |||
| 336 | spin_lock_init(&ss->slock); | ||
| 337 | |||
| 338 | for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { | ||
| 339 | ss_algs[i].ss = ss; | ||
| 340 | switch (ss_algs[i].type) { | ||
| 341 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
| 342 | err = crypto_register_alg(&ss_algs[i].alg.crypto); | ||
| 343 | if (err != 0) { | ||
| 344 | dev_err(ss->dev, "Fail to register %s\n", | ||
| 345 | ss_algs[i].alg.crypto.cra_name); | ||
| 346 | goto error_alg; | ||
| 347 | } | ||
| 348 | break; | ||
| 349 | case CRYPTO_ALG_TYPE_AHASH: | ||
| 350 | err = crypto_register_ahash(&ss_algs[i].alg.hash); | ||
| 351 | if (err != 0) { | ||
| 352 | dev_err(ss->dev, "Fail to register %s\n", | ||
| 353 | ss_algs[i].alg.hash.halg.base.cra_name); | ||
| 354 | goto error_alg; | ||
| 355 | } | ||
| 356 | break; | ||
| 357 | } | ||
| 358 | } | ||
| 359 | platform_set_drvdata(pdev, ss); | ||
| 360 | return 0; | ||
| 361 | error_alg: | ||
| 362 | i--; | ||
| 363 | for (; i >= 0; i--) { | ||
| 364 | switch (ss_algs[i].type) { | ||
| 365 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
| 366 | crypto_unregister_alg(&ss_algs[i].alg.crypto); | ||
| 367 | break; | ||
| 368 | case CRYPTO_ALG_TYPE_AHASH: | ||
| 369 | crypto_unregister_ahash(&ss_algs[i].alg.hash); | ||
| 370 | break; | ||
| 371 | } | ||
| 372 | } | ||
| 373 | if (ss->reset) | ||
| 374 | reset_control_assert(ss->reset); | ||
| 375 | error_clk: | ||
| 376 | clk_disable_unprepare(ss->ssclk); | ||
| 377 | error_ssclk: | ||
| 378 | clk_disable_unprepare(ss->busclk); | ||
| 379 | return err; | ||
| 380 | } | ||
| 381 | |||
| 382 | static int sun4i_ss_remove(struct platform_device *pdev) | ||
| 383 | { | ||
| 384 | int i; | ||
| 385 | struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev); | ||
| 386 | |||
| 387 | for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { | ||
| 388 | switch (ss_algs[i].type) { | ||
| 389 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
| 390 | crypto_unregister_alg(&ss_algs[i].alg.crypto); | ||
| 391 | break; | ||
| 392 | case CRYPTO_ALG_TYPE_AHASH: | ||
| 393 | crypto_unregister_ahash(&ss_algs[i].alg.hash); | ||
| 394 | break; | ||
| 395 | } | ||
| 396 | } | ||
| 397 | |||
| 398 | writel(0, ss->base + SS_CTL); | ||
| 399 | if (ss->reset) | ||
| 400 | reset_control_assert(ss->reset); | ||
| 401 | clk_disable_unprepare(ss->busclk); | ||
| 402 | clk_disable_unprepare(ss->ssclk); | ||
| 403 | return 0; | ||
| 404 | } | ||
| 405 | |||
| 406 | static const struct of_device_id a20ss_crypto_of_match_table[] = { | ||
| 407 | { .compatible = "allwinner,sun4i-a10-crypto" }, | ||
| 408 | {} | ||
| 409 | }; | ||
| 410 | MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table); | ||
| 411 | |||
| 412 | static struct platform_driver sun4i_ss_driver = { | ||
| 413 | .probe = sun4i_ss_probe, | ||
| 414 | .remove = sun4i_ss_remove, | ||
| 415 | .driver = { | ||
| 416 | .name = "sun4i-ss", | ||
| 417 | .of_match_table = a20ss_crypto_of_match_table, | ||
| 418 | }, | ||
| 419 | }; | ||
| 420 | |||
| 421 | module_platform_driver(sun4i_ss_driver); | ||
| 422 | |||
| 423 | MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator"); | ||
| 424 | MODULE_LICENSE("GPL"); | ||
| 425 | MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>"); | ||
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c new file mode 100644 index 000000000000..ff8031498809 --- /dev/null +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c | |||
| @@ -0,0 +1,492 @@ | |||
| 1 | /* | ||
| 2 | * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com> | ||
| 5 | * | ||
| 6 | * This file add support for MD5 and SHA1. | ||
| 7 | * | ||
| 8 | * You could find the datasheet in Documentation/arm/sunxi/README | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License as published by | ||
| 12 | * the Free Software Foundation; either version 2 of the License, or | ||
| 13 | * (at your option) any later version. | ||
| 14 | */ | ||
| 15 | #include "sun4i-ss.h" | ||
| 16 | #include <linux/scatterlist.h> | ||
| 17 | |||
| 18 | /* This is a totally arbitrary value */ | ||
| 19 | #define SS_TIMEOUT 100 | ||
| 20 | |||
| 21 | int sun4i_hash_crainit(struct crypto_tfm *tfm) | ||
| 22 | { | ||
| 23 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
| 24 | sizeof(struct sun4i_req_ctx)); | ||
| 25 | return 0; | ||
| 26 | } | ||
| 27 | |||
| 28 | /* sun4i_hash_init: initialize request context */ | ||
| 29 | int sun4i_hash_init(struct ahash_request *areq) | ||
| 30 | { | ||
| 31 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 32 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
| 33 | struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); | ||
| 34 | struct sun4i_ss_alg_template *algt; | ||
| 35 | struct sun4i_ss_ctx *ss; | ||
| 36 | |||
| 37 | memset(op, 0, sizeof(struct sun4i_req_ctx)); | ||
| 38 | |||
| 39 | algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); | ||
| 40 | ss = algt->ss; | ||
| 41 | op->ss = algt->ss; | ||
| 42 | op->mode = algt->mode; | ||
| 43 | |||
| 44 | return 0; | ||
| 45 | } | ||
| 46 | |||
| 47 | int sun4i_hash_export_md5(struct ahash_request *areq, void *out) | ||
| 48 | { | ||
| 49 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 50 | struct md5_state *octx = out; | ||
| 51 | int i; | ||
| 52 | |||
| 53 | octx->byte_count = op->byte_count + op->len; | ||
| 54 | |||
| 55 | memcpy(octx->block, op->buf, op->len); | ||
| 56 | |||
| 57 | if (op->byte_count > 0) { | ||
| 58 | for (i = 0; i < 4; i++) | ||
| 59 | octx->hash[i] = op->hash[i]; | ||
| 60 | } else { | ||
| 61 | octx->hash[0] = SHA1_H0; | ||
| 62 | octx->hash[1] = SHA1_H1; | ||
| 63 | octx->hash[2] = SHA1_H2; | ||
| 64 | octx->hash[3] = SHA1_H3; | ||
| 65 | } | ||
| 66 | |||
| 67 | return 0; | ||
| 68 | } | ||
| 69 | |||
| 70 | int sun4i_hash_import_md5(struct ahash_request *areq, const void *in) | ||
| 71 | { | ||
| 72 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 73 | const struct md5_state *ictx = in; | ||
| 74 | int i; | ||
| 75 | |||
| 76 | sun4i_hash_init(areq); | ||
| 77 | |||
| 78 | op->byte_count = ictx->byte_count & ~0x3F; | ||
| 79 | op->len = ictx->byte_count & 0x3F; | ||
| 80 | |||
| 81 | memcpy(op->buf, ictx->block, op->len); | ||
| 82 | |||
| 83 | for (i = 0; i < 4; i++) | ||
| 84 | op->hash[i] = ictx->hash[i]; | ||
| 85 | |||
| 86 | return 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | int sun4i_hash_export_sha1(struct ahash_request *areq, void *out) | ||
| 90 | { | ||
| 91 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 92 | struct sha1_state *octx = out; | ||
| 93 | int i; | ||
| 94 | |||
| 95 | octx->count = op->byte_count + op->len; | ||
| 96 | |||
| 97 | memcpy(octx->buffer, op->buf, op->len); | ||
| 98 | |||
| 99 | if (op->byte_count > 0) { | ||
| 100 | for (i = 0; i < 5; i++) | ||
| 101 | octx->state[i] = op->hash[i]; | ||
| 102 | } else { | ||
| 103 | octx->state[0] = SHA1_H0; | ||
| 104 | octx->state[1] = SHA1_H1; | ||
| 105 | octx->state[2] = SHA1_H2; | ||
| 106 | octx->state[3] = SHA1_H3; | ||
| 107 | octx->state[4] = SHA1_H4; | ||
| 108 | } | ||
| 109 | |||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | |||
| 113 | int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) | ||
| 114 | { | ||
| 115 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 116 | const struct sha1_state *ictx = in; | ||
| 117 | int i; | ||
| 118 | |||
| 119 | sun4i_hash_init(areq); | ||
| 120 | |||
| 121 | op->byte_count = ictx->count & ~0x3F; | ||
| 122 | op->len = ictx->count & 0x3F; | ||
| 123 | |||
| 124 | memcpy(op->buf, ictx->buffer, op->len); | ||
| 125 | |||
| 126 | for (i = 0; i < 5; i++) | ||
| 127 | op->hash[i] = ictx->state[i]; | ||
| 128 | |||
| 129 | return 0; | ||
| 130 | } | ||
| 131 | |||
| 132 | /* | ||
| 133 | * sun4i_hash_update: update hash engine | ||
| 134 | * | ||
| 135 | * Could be used for both SHA1 and MD5 | ||
| 136 | * Write data by step of 32bits and put then in the SS. | ||
| 137 | * | ||
| 138 | * Since we cannot leave partial data and hash state in the engine, | ||
| 139 | * we need to get the hash state at the end of this function. | ||
| 140 | * We can get the hash state every 64 bytes | ||
| 141 | * | ||
| 142 | * So the first work is to get the number of bytes to write to SS modulo 64 | ||
| 143 | * The extra bytes will go to a temporary buffer op->buf storing op->len bytes | ||
| 144 | * | ||
| 145 | * So at the begin of update() | ||
| 146 | * if op->len + areq->nbytes < 64 | ||
| 147 | * => all data will be written to wait buffer (op->buf) and end=0 | ||
| 148 | * if not, write all data from op->buf to the device and position end to | ||
| 149 | * complete to 64bytes | ||
| 150 | * | ||
| 151 | * example 1: | ||
| 152 | * update1 60o => op->len=60 | ||
| 153 | * update2 60o => need one more word to have 64 bytes | ||
| 154 | * end=4 | ||
| 155 | * so write all data from op->buf and one word of SGs | ||
| 156 | * write remaining data in op->buf | ||
| 157 | * final state op->len=56 | ||
| 158 | */ | ||
| 159 | int sun4i_hash_update(struct ahash_request *areq) | ||
| 160 | { | ||
| 161 | u32 v, ivmode = 0; | ||
| 162 | unsigned int i = 0; | ||
| 163 | /* | ||
| 164 | * i is the total bytes read from SGs, to be compared to areq->nbytes | ||
| 165 | * i is important because we cannot rely on SG length since the sum of | ||
| 166 | * SG->length could be greater than areq->nbytes | ||
| 167 | */ | ||
| 168 | |||
| 169 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 170 | struct sun4i_ss_ctx *ss = op->ss; | ||
| 171 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
| 172 | unsigned int in_i = 0; /* advancement in the current SG */ | ||
| 173 | unsigned int end; | ||
| 174 | /* | ||
| 175 | * end is the position when we need to stop writing to the device, | ||
| 176 | * to be compared to i | ||
| 177 | */ | ||
| 178 | int in_r, err = 0; | ||
| 179 | unsigned int todo; | ||
| 180 | u32 spaces, rx_cnt = SS_RX_DEFAULT; | ||
| 181 | size_t copied = 0; | ||
| 182 | struct sg_mapping_iter mi; | ||
| 183 | |||
| 184 | dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", | ||
| 185 | __func__, crypto_tfm_alg_name(areq->base.tfm), | ||
| 186 | op->byte_count, areq->nbytes, op->mode, | ||
| 187 | op->len, op->hash[0]); | ||
| 188 | |||
| 189 | if (areq->nbytes == 0) | ||
| 190 | return 0; | ||
| 191 | |||
| 192 | /* protect against overflow */ | ||
| 193 | if (areq->nbytes > UINT_MAX - op->len) { | ||
| 194 | dev_err(ss->dev, "Cannot process too large request\n"); | ||
| 195 | return -EINVAL; | ||
| 196 | } | ||
| 197 | |||
| 198 | if (op->len + areq->nbytes < 64) { | ||
| 199 | /* linearize data to op->buf */ | ||
| 200 | copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | ||
| 201 | op->buf + op->len, areq->nbytes, 0); | ||
| 202 | op->len += copied; | ||
| 203 | return 0; | ||
| 204 | } | ||
| 205 | |||
| 206 | end = ((areq->nbytes + op->len) / 64) * 64 - op->len; | ||
| 207 | |||
| 208 | if (end > areq->nbytes || areq->nbytes - end > 63) { | ||
| 209 | dev_err(ss->dev, "ERROR: Bound error %u %u\n", | ||
| 210 | end, areq->nbytes); | ||
| 211 | return -EINVAL; | ||
| 212 | } | ||
| 213 | |||
| 214 | spin_lock_bh(&ss->slock); | ||
| 215 | |||
| 216 | /* | ||
| 217 | * if some data have been processed before, | ||
| 218 | * we need to restore the partial hash state | ||
| 219 | */ | ||
| 220 | if (op->byte_count > 0) { | ||
| 221 | ivmode = SS_IV_ARBITRARY; | ||
| 222 | for (i = 0; i < 5; i++) | ||
| 223 | writel(op->hash[i], ss->base + SS_IV0 + i * 4); | ||
| 224 | } | ||
| 225 | /* Enable the device */ | ||
| 226 | writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); | ||
| 227 | |||
| 228 | i = 0; | ||
| 229 | sg_miter_start(&mi, areq->src, sg_nents(areq->src), | ||
| 230 | SG_MITER_FROM_SG | SG_MITER_ATOMIC); | ||
| 231 | sg_miter_next(&mi); | ||
| 232 | in_i = 0; | ||
| 233 | |||
| 234 | do { | ||
| 235 | /* | ||
| 236 | * we need to linearize in two case: | ||
| 237 | * - the buffer is already used | ||
| 238 | * - the SG does not have enough byte remaining ( < 4) | ||
| 239 | */ | ||
| 240 | if (op->len > 0 || (mi.length - in_i) < 4) { | ||
| 241 | /* | ||
| 242 | * if we have entered here we have two reason to stop | ||
| 243 | * - the buffer is full | ||
| 244 | * - reach the end | ||
| 245 | */ | ||
| 246 | while (op->len < 64 && i < end) { | ||
| 247 | /* how many bytes we can read from current SG */ | ||
| 248 | in_r = min3(mi.length - in_i, end - i, | ||
| 249 | 64 - op->len); | ||
| 250 | memcpy(op->buf + op->len, mi.addr + in_i, in_r); | ||
| 251 | op->len += in_r; | ||
| 252 | i += in_r; | ||
| 253 | in_i += in_r; | ||
| 254 | if (in_i == mi.length) { | ||
| 255 | sg_miter_next(&mi); | ||
| 256 | in_i = 0; | ||
| 257 | } | ||
| 258 | } | ||
| 259 | if (op->len > 3 && (op->len % 4) == 0) { | ||
| 260 | /* write buf to the device */ | ||
| 261 | writesl(ss->base + SS_RXFIFO, op->buf, | ||
| 262 | op->len / 4); | ||
| 263 | op->byte_count += op->len; | ||
| 264 | op->len = 0; | ||
| 265 | } | ||
| 266 | } | ||
| 267 | if (mi.length - in_i > 3 && i < end) { | ||
| 268 | /* how many bytes we can read from current SG */ | ||
| 269 | in_r = min3(mi.length - in_i, areq->nbytes - i, | ||
| 270 | ((mi.length - in_i) / 4) * 4); | ||
| 271 | /* how many bytes we can write in the device*/ | ||
| 272 | todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4); | ||
| 273 | writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo); | ||
| 274 | op->byte_count += todo * 4; | ||
| 275 | i += todo * 4; | ||
| 276 | in_i += todo * 4; | ||
| 277 | rx_cnt -= todo; | ||
| 278 | if (rx_cnt == 0) { | ||
| 279 | spaces = readl(ss->base + SS_FCSR); | ||
| 280 | rx_cnt = SS_RXFIFO_SPACES(spaces); | ||
| 281 | } | ||
| 282 | if (in_i == mi.length) { | ||
| 283 | sg_miter_next(&mi); | ||
| 284 | in_i = 0; | ||
| 285 | } | ||
| 286 | } | ||
| 287 | } while (i < end); | ||
| 288 | /* final linear */ | ||
| 289 | if ((areq->nbytes - i) < 64) { | ||
| 290 | while (i < areq->nbytes && in_i < mi.length && op->len < 64) { | ||
| 291 | /* how many bytes we can read from current SG */ | ||
| 292 | in_r = min3(mi.length - in_i, areq->nbytes - i, | ||
| 293 | 64 - op->len); | ||
| 294 | memcpy(op->buf + op->len, mi.addr + in_i, in_r); | ||
| 295 | op->len += in_r; | ||
| 296 | i += in_r; | ||
| 297 | in_i += in_r; | ||
| 298 | if (in_i == mi.length) { | ||
| 299 | sg_miter_next(&mi); | ||
| 300 | in_i = 0; | ||
| 301 | } | ||
| 302 | } | ||
| 303 | } | ||
| 304 | |||
| 305 | sg_miter_stop(&mi); | ||
| 306 | |||
| 307 | writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); | ||
| 308 | i = 0; | ||
| 309 | do { | ||
| 310 | v = readl(ss->base + SS_CTL); | ||
| 311 | i++; | ||
| 312 | } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); | ||
| 313 | if (i >= SS_TIMEOUT) { | ||
| 314 | dev_err_ratelimited(ss->dev, | ||
| 315 | "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", | ||
| 316 | i, SS_TIMEOUT, v, areq->nbytes); | ||
| 317 | err = -EIO; | ||
| 318 | goto release_ss; | ||
| 319 | } | ||
| 320 | |||
| 321 | /* get the partial hash only if something was written */ | ||
| 322 | for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) | ||
| 323 | op->hash[i] = readl(ss->base + SS_MD0 + i * 4); | ||
| 324 | |||
| 325 | release_ss: | ||
| 326 | writel(0, ss->base + SS_CTL); | ||
| 327 | spin_unlock_bh(&ss->slock); | ||
| 328 | return err; | ||
| 329 | } | ||
| 330 | |||
| 331 | /* | ||
| 332 | * sun4i_hash_final: finalize hashing operation | ||
| 333 | * | ||
| 334 | * If we have some remaining bytes, we write them. | ||
| 335 | * Then ask the SS for finalizing the hashing operation | ||
| 336 | * | ||
| 337 | * I do not check RX FIFO size in this function since the size is 32 | ||
| 338 | * after each enabling and this function neither write more than 32 words. | ||
| 339 | */ | ||
| 340 | int sun4i_hash_final(struct ahash_request *areq) | ||
| 341 | { | ||
| 342 | u32 v, ivmode = 0; | ||
| 343 | unsigned int i; | ||
| 344 | unsigned int j = 0; | ||
| 345 | int zeros, err = 0; | ||
| 346 | unsigned int index, padlen; | ||
| 347 | __be64 bits; | ||
| 348 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | ||
| 349 | struct sun4i_ss_ctx *ss = op->ss; | ||
| 350 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
| 351 | u32 bf[32]; | ||
| 352 | u32 wb = 0; | ||
| 353 | unsigned int nwait, nbw = 0; | ||
| 354 | |||
| 355 | dev_dbg(ss->dev, "%s: byte=%llu len=%u mode=%x wl=%u h=%x", | ||
| 356 | __func__, op->byte_count, areq->nbytes, op->mode, | ||
| 357 | op->len, op->hash[0]); | ||
| 358 | |||
| 359 | spin_lock_bh(&ss->slock); | ||
| 360 | |||
| 361 | /* | ||
| 362 | * if we have already written something, | ||
| 363 | * restore the partial hash state | ||
| 364 | */ | ||
| 365 | if (op->byte_count > 0) { | ||
| 366 | ivmode = SS_IV_ARBITRARY; | ||
| 367 | for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) | ||
| 368 | writel(op->hash[i], ss->base + SS_IV0 + i * 4); | ||
| 369 | } | ||
| 370 | writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); | ||
| 371 | |||
| 372 | /* write the remaining words of the wait buffer */ | ||
| 373 | if (op->len > 0) { | ||
| 374 | nwait = op->len / 4; | ||
| 375 | if (nwait > 0) { | ||
| 376 | writesl(ss->base + SS_RXFIFO, op->buf, nwait); | ||
| 377 | op->byte_count += 4 * nwait; | ||
| 378 | } | ||
| 379 | nbw = op->len - 4 * nwait; | ||
| 380 | wb = *(u32 *)(op->buf + nwait * 4); | ||
| 381 | wb &= (0xFFFFFFFF >> (4 - nbw) * 8); | ||
| 382 | } | ||
| 383 | |||
| 384 | /* write the remaining bytes of the nbw buffer */ | ||
| 385 | if (nbw > 0) { | ||
| 386 | wb |= ((1 << 7) << (nbw * 8)); | ||
| 387 | bf[j++] = wb; | ||
| 388 | } else { | ||
| 389 | bf[j++] = 1 << 7; | ||
| 390 | } | ||
| 391 | |||
| 392 | /* | ||
| 393 | * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1) | ||
| 394 | * I take the operations from other MD5/SHA1 implementations | ||
| 395 | */ | ||
| 396 | |||
| 397 | /* we have already send 4 more byte of which nbw data */ | ||
| 398 | if (op->mode == SS_OP_MD5) { | ||
| 399 | index = (op->byte_count + 4) & 0x3f; | ||
| 400 | op->byte_count += nbw; | ||
| 401 | if (index > 56) | ||
| 402 | zeros = (120 - index) / 4; | ||
| 403 | else | ||
| 404 | zeros = (56 - index) / 4; | ||
| 405 | } else { | ||
| 406 | op->byte_count += nbw; | ||
| 407 | index = op->byte_count & 0x3f; | ||
| 408 | padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); | ||
| 409 | zeros = (padlen - 1) / 4; | ||
| 410 | } | ||
| 411 | |||
| 412 | memset(bf + j, 0, 4 * zeros); | ||
| 413 | j += zeros; | ||
| 414 | |||
| 415 | /* write the length of data */ | ||
| 416 | if (op->mode == SS_OP_SHA1) { | ||
| 417 | bits = cpu_to_be64(op->byte_count << 3); | ||
| 418 | bf[j++] = bits & 0xffffffff; | ||
| 419 | bf[j++] = (bits >> 32) & 0xffffffff; | ||
| 420 | } else { | ||
| 421 | bf[j++] = (op->byte_count << 3) & 0xffffffff; | ||
| 422 | bf[j++] = (op->byte_count >> 29) & 0xffffffff; | ||
| 423 | } | ||
| 424 | writesl(ss->base + SS_RXFIFO, bf, j); | ||
| 425 | |||
| 426 | /* Tell the SS to stop the hashing */ | ||
| 427 | writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); | ||
| 428 | |||
| 429 | /* | ||
| 430 | * Wait for SS to finish the hash. | ||
| 431 | * The timeout could happen only in case of bad overcloking | ||
| 432 | * or driver bug. | ||
| 433 | */ | ||
| 434 | i = 0; | ||
| 435 | do { | ||
| 436 | v = readl(ss->base + SS_CTL); | ||
| 437 | i++; | ||
| 438 | } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); | ||
| 439 | if (i >= SS_TIMEOUT) { | ||
| 440 | dev_err_ratelimited(ss->dev, | ||
| 441 | "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", | ||
| 442 | i, SS_TIMEOUT, v, areq->nbytes); | ||
| 443 | err = -EIO; | ||
| 444 | goto release_ss; | ||
| 445 | } | ||
| 446 | |||
| 447 | /* Get the hash from the device */ | ||
| 448 | if (op->mode == SS_OP_SHA1) { | ||
| 449 | for (i = 0; i < 5; i++) { | ||
| 450 | v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4)); | ||
| 451 | memcpy(areq->result + i * 4, &v, 4); | ||
| 452 | } | ||
| 453 | } else { | ||
| 454 | for (i = 0; i < 4; i++) { | ||
| 455 | v = readl(ss->base + SS_MD0 + i * 4); | ||
| 456 | memcpy(areq->result + i * 4, &v, 4); | ||
| 457 | } | ||
| 458 | } | ||
| 459 | |||
| 460 | release_ss: | ||
| 461 | writel(0, ss->base + SS_CTL); | ||
| 462 | spin_unlock_bh(&ss->slock); | ||
| 463 | return err; | ||
| 464 | } | ||
| 465 | |||
| 466 | /* sun4i_hash_finup: finalize hashing operation after an update */ | ||
| 467 | int sun4i_hash_finup(struct ahash_request *areq) | ||
| 468 | { | ||
| 469 | int err; | ||
| 470 | |||
| 471 | err = sun4i_hash_update(areq); | ||
| 472 | if (err != 0) | ||
| 473 | return err; | ||
| 474 | |||
| 475 | return sun4i_hash_final(areq); | ||
| 476 | } | ||
| 477 | |||
| 478 | /* combo of init/update/final functions */ | ||
| 479 | int sun4i_hash_digest(struct ahash_request *areq) | ||
| 480 | { | ||
| 481 | int err; | ||
| 482 | |||
| 483 | err = sun4i_hash_init(areq); | ||
| 484 | if (err != 0) | ||
| 485 | return err; | ||
| 486 | |||
| 487 | err = sun4i_hash_update(areq); | ||
| 488 | if (err != 0) | ||
| 489 | return err; | ||
| 490 | |||
| 491 | return sun4i_hash_final(areq); | ||
| 492 | } | ||
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h new file mode 100644 index 000000000000..8e9c05f6e4d4 --- /dev/null +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h | |||
| @@ -0,0 +1,201 @@ | |||
| 1 | /* | ||
| 2 | * sun4i-ss.h - hardware cryptographic accelerator for Allwinner A20 SoC | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com> | ||
| 5 | * | ||
| 6 | * Support AES cipher with 128,192,256 bits keysize. | ||
| 7 | * Support MD5 and SHA1 hash algorithms. | ||
| 8 | * Support DES and 3DES | ||
| 9 | * | ||
| 10 | * You could find the datasheet in Documentation/arm/sunxi/README | ||
| 11 | * | ||
| 12 | * Licensed under the GPL-2. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/clk.h> | ||
| 16 | #include <linux/crypto.h> | ||
| 17 | #include <linux/io.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/of.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | #include <linux/reset.h> | ||
| 22 | #include <crypto/scatterwalk.h> | ||
| 23 | #include <linux/scatterlist.h> | ||
| 24 | #include <linux/interrupt.h> | ||
| 25 | #include <linux/delay.h> | ||
| 26 | #include <crypto/md5.h> | ||
| 27 | #include <crypto/sha.h> | ||
| 28 | #include <crypto/hash.h> | ||
| 29 | #include <crypto/internal/hash.h> | ||
| 30 | #include <crypto/aes.h> | ||
| 31 | #include <crypto/des.h> | ||
| 32 | #include <crypto/internal/rng.h> | ||
| 33 | |||
| 34 | #define SS_CTL 0x00 | ||
| 35 | #define SS_KEY0 0x04 | ||
| 36 | #define SS_KEY1 0x08 | ||
| 37 | #define SS_KEY2 0x0C | ||
| 38 | #define SS_KEY3 0x10 | ||
| 39 | #define SS_KEY4 0x14 | ||
| 40 | #define SS_KEY5 0x18 | ||
| 41 | #define SS_KEY6 0x1C | ||
| 42 | #define SS_KEY7 0x20 | ||
| 43 | |||
| 44 | #define SS_IV0 0x24 | ||
| 45 | #define SS_IV1 0x28 | ||
| 46 | #define SS_IV2 0x2C | ||
| 47 | #define SS_IV3 0x30 | ||
| 48 | |||
| 49 | #define SS_FCSR 0x44 | ||
| 50 | |||
| 51 | #define SS_MD0 0x4C | ||
| 52 | #define SS_MD1 0x50 | ||
| 53 | #define SS_MD2 0x54 | ||
| 54 | #define SS_MD3 0x58 | ||
| 55 | #define SS_MD4 0x5C | ||
| 56 | |||
| 57 | #define SS_RXFIFO 0x200 | ||
| 58 | #define SS_TXFIFO 0x204 | ||
| 59 | |||
| 60 | /* SS_CTL configuration values */ | ||
| 61 | |||
| 62 | /* PRNG generator mode - bit 15 */ | ||
| 63 | #define SS_PRNG_ONESHOT (0 << 15) | ||
| 64 | #define SS_PRNG_CONTINUE (1 << 15) | ||
| 65 | |||
| 66 | /* IV mode for hash */ | ||
| 67 | #define SS_IV_ARBITRARY (1 << 14) | ||
| 68 | |||
| 69 | /* SS operation mode - bits 12-13 */ | ||
| 70 | #define SS_ECB (0 << 12) | ||
| 71 | #define SS_CBC (1 << 12) | ||
| 72 | #define SS_CTS (3 << 12) | ||
| 73 | |||
| 74 | /* Counter width for CNT mode - bits 10-11 */ | ||
| 75 | #define SS_CNT_16BITS (0 << 10) | ||
| 76 | #define SS_CNT_32BITS (1 << 10) | ||
| 77 | #define SS_CNT_64BITS (2 << 10) | ||
| 78 | |||
| 79 | /* Key size for AES - bits 8-9 */ | ||
| 80 | #define SS_AES_128BITS (0 << 8) | ||
| 81 | #define SS_AES_192BITS (1 << 8) | ||
| 82 | #define SS_AES_256BITS (2 << 8) | ||
| 83 | |||
| 84 | /* Operation direction - bit 7 */ | ||
| 85 | #define SS_ENCRYPTION (0 << 7) | ||
| 86 | #define SS_DECRYPTION (1 << 7) | ||
| 87 | |||
| 88 | /* SS Method - bits 4-6 */ | ||
| 89 | #define SS_OP_AES (0 << 4) | ||
| 90 | #define SS_OP_DES (1 << 4) | ||
| 91 | #define SS_OP_3DES (2 << 4) | ||
| 92 | #define SS_OP_SHA1 (3 << 4) | ||
| 93 | #define SS_OP_MD5 (4 << 4) | ||
| 94 | #define SS_OP_PRNG (5 << 4) | ||
| 95 | |||
| 96 | /* Data end bit - bit 2 */ | ||
| 97 | #define SS_DATA_END (1 << 2) | ||
| 98 | |||
| 99 | /* PRNG start bit - bit 1 */ | ||
| 100 | #define SS_PRNG_START (1 << 1) | ||
| 101 | |||
| 102 | /* SS Enable bit - bit 0 */ | ||
| 103 | #define SS_DISABLED (0 << 0) | ||
| 104 | #define SS_ENABLED (1 << 0) | ||
| 105 | |||
| 106 | /* SS_FCSR configuration values */ | ||
| 107 | /* RX FIFO status - bit 30 */ | ||
| 108 | #define SS_RXFIFO_FREE (1 << 30) | ||
| 109 | |||
| 110 | /* RX FIFO empty spaces - bits 24-29 */ | ||
| 111 | #define SS_RXFIFO_SPACES(val) (((val) >> 24) & 0x3f) | ||
| 112 | |||
| 113 | /* TX FIFO status - bit 22 */ | ||
| 114 | #define SS_TXFIFO_AVAILABLE (1 << 22) | ||
| 115 | |||
| 116 | /* TX FIFO available spaces - bits 16-21 */ | ||
| 117 | #define SS_TXFIFO_SPACES(val) (((val) >> 16) & 0x3f) | ||
| 118 | |||
| 119 | #define SS_RX_MAX 32 | ||
| 120 | #define SS_RX_DEFAULT SS_RX_MAX | ||
| 121 | #define SS_TX_MAX 33 | ||
| 122 | |||
| 123 | #define SS_RXFIFO_EMP_INT_PENDING (1 << 10) | ||
| 124 | #define SS_TXFIFO_AVA_INT_PENDING (1 << 8) | ||
| 125 | #define SS_RXFIFO_EMP_INT_ENABLE (1 << 2) | ||
| 126 | #define SS_TXFIFO_AVA_INT_ENABLE (1 << 0) | ||
| 127 | |||
| 128 | struct sun4i_ss_ctx { | ||
| 129 | void __iomem *base; | ||
| 130 | int irq; | ||
| 131 | struct clk *busclk; | ||
| 132 | struct clk *ssclk; | ||
| 133 | struct reset_control *reset; | ||
| 134 | struct device *dev; | ||
| 135 | struct resource *res; | ||
| 136 | spinlock_t slock; /* control the use of the device */ | ||
| 137 | }; | ||
| 138 | |||
| 139 | struct sun4i_ss_alg_template { | ||
| 140 | u32 type; | ||
| 141 | u32 mode; | ||
| 142 | union { | ||
| 143 | struct crypto_alg crypto; | ||
| 144 | struct ahash_alg hash; | ||
| 145 | } alg; | ||
| 146 | struct sun4i_ss_ctx *ss; | ||
| 147 | }; | ||
| 148 | |||
| 149 | struct sun4i_tfm_ctx { | ||
| 150 | u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */ | ||
| 151 | u32 keylen; | ||
| 152 | u32 keymode; | ||
| 153 | struct sun4i_ss_ctx *ss; | ||
| 154 | }; | ||
| 155 | |||
| 156 | struct sun4i_cipher_req_ctx { | ||
| 157 | u32 mode; | ||
| 158 | }; | ||
| 159 | |||
| 160 | struct sun4i_req_ctx { | ||
| 161 | u32 mode; | ||
| 162 | u64 byte_count; /* number of bytes "uploaded" to the device */ | ||
| 163 | u32 hash[5]; /* for storing SS_IVx register */ | ||
| 164 | char buf[64]; | ||
| 165 | unsigned int len; | ||
| 166 | struct sun4i_ss_ctx *ss; | ||
| 167 | }; | ||
| 168 | |||
| 169 | int sun4i_hash_crainit(struct crypto_tfm *tfm); | ||
| 170 | int sun4i_hash_init(struct ahash_request *areq); | ||
| 171 | int sun4i_hash_update(struct ahash_request *areq); | ||
| 172 | int sun4i_hash_final(struct ahash_request *areq); | ||
| 173 | int sun4i_hash_finup(struct ahash_request *areq); | ||
| 174 | int sun4i_hash_digest(struct ahash_request *areq); | ||
| 175 | int sun4i_hash_export_md5(struct ahash_request *areq, void *out); | ||
| 176 | int sun4i_hash_import_md5(struct ahash_request *areq, const void *in); | ||
| 177 | int sun4i_hash_export_sha1(struct ahash_request *areq, void *out); | ||
| 178 | int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in); | ||
| 179 | |||
| 180 | int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq); | ||
| 181 | int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq); | ||
| 182 | int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq); | ||
| 183 | int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq); | ||
| 184 | |||
| 185 | int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq); | ||
| 186 | int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq); | ||
| 187 | int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq); | ||
| 188 | int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq); | ||
| 189 | |||
| 190 | int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq); | ||
| 191 | int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq); | ||
| 192 | int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq); | ||
| 193 | int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq); | ||
| 194 | |||
| 195 | int sun4i_ss_cipher_init(struct crypto_tfm *tfm); | ||
| 196 | int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 197 | unsigned int keylen); | ||
| 198 | int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 199 | unsigned int keylen); | ||
| 200 | int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | ||
| 201 | unsigned int keylen); | ||
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 83aca95a95bc..cd774534d987 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
| @@ -766,6 +766,7 @@ static int talitos_rng_init(struct hwrng *rng) | |||
| 766 | static int talitos_register_rng(struct device *dev) | 766 | static int talitos_register_rng(struct device *dev) |
| 767 | { | 767 | { |
| 768 | struct talitos_private *priv = dev_get_drvdata(dev); | 768 | struct talitos_private *priv = dev_get_drvdata(dev); |
| 769 | int err; | ||
| 769 | 770 | ||
| 770 | priv->rng.name = dev_driver_string(dev), | 771 | priv->rng.name = dev_driver_string(dev), |
| 771 | priv->rng.init = talitos_rng_init, | 772 | priv->rng.init = talitos_rng_init, |
| @@ -773,14 +774,22 @@ static int talitos_register_rng(struct device *dev) | |||
| 773 | priv->rng.data_read = talitos_rng_data_read, | 774 | priv->rng.data_read = talitos_rng_data_read, |
| 774 | priv->rng.priv = (unsigned long)dev; | 775 | priv->rng.priv = (unsigned long)dev; |
| 775 | 776 | ||
| 776 | return hwrng_register(&priv->rng); | 777 | err = hwrng_register(&priv->rng); |
| 778 | if (!err) | ||
| 779 | priv->rng_registered = true; | ||
| 780 | |||
| 781 | return err; | ||
| 777 | } | 782 | } |
| 778 | 783 | ||
| 779 | static void talitos_unregister_rng(struct device *dev) | 784 | static void talitos_unregister_rng(struct device *dev) |
| 780 | { | 785 | { |
| 781 | struct talitos_private *priv = dev_get_drvdata(dev); | 786 | struct talitos_private *priv = dev_get_drvdata(dev); |
| 782 | 787 | ||
| 788 | if (!priv->rng_registered) | ||
| 789 | return; | ||
| 790 | |||
| 783 | hwrng_unregister(&priv->rng); | 791 | hwrng_unregister(&priv->rng); |
| 792 | priv->rng_registered = false; | ||
| 784 | } | 793 | } |
| 785 | 794 | ||
| 786 | /* | 795 | /* |
| @@ -799,7 +808,6 @@ struct talitos_ctx { | |||
| 799 | unsigned int keylen; | 808 | unsigned int keylen; |
| 800 | unsigned int enckeylen; | 809 | unsigned int enckeylen; |
| 801 | unsigned int authkeylen; | 810 | unsigned int authkeylen; |
| 802 | unsigned int authsize; | ||
| 803 | }; | 811 | }; |
| 804 | 812 | ||
| 805 | #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE | 813 | #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE |
| @@ -819,16 +827,6 @@ struct talitos_ahash_req_ctx { | |||
| 819 | struct scatterlist *psrc; | 827 | struct scatterlist *psrc; |
| 820 | }; | 828 | }; |
| 821 | 829 | ||
| 822 | static int aead_setauthsize(struct crypto_aead *authenc, | ||
| 823 | unsigned int authsize) | ||
| 824 | { | ||
| 825 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 826 | |||
| 827 | ctx->authsize = authsize; | ||
| 828 | |||
| 829 | return 0; | ||
| 830 | } | ||
| 831 | |||
| 832 | static int aead_setkey(struct crypto_aead *authenc, | 830 | static int aead_setkey(struct crypto_aead *authenc, |
| 833 | const u8 *key, unsigned int keylen) | 831 | const u8 *key, unsigned int keylen) |
| 834 | { | 832 | { |
| @@ -857,12 +855,11 @@ badkey: | |||
| 857 | 855 | ||
| 858 | /* | 856 | /* |
| 859 | * talitos_edesc - s/w-extended descriptor | 857 | * talitos_edesc - s/w-extended descriptor |
| 860 | * @assoc_nents: number of segments in associated data scatterlist | ||
| 861 | * @src_nents: number of segments in input scatterlist | 858 | * @src_nents: number of segments in input scatterlist |
| 862 | * @dst_nents: number of segments in output scatterlist | 859 | * @dst_nents: number of segments in output scatterlist |
| 863 | * @assoc_chained: whether assoc is chained or not | ||
| 864 | * @src_chained: whether src is chained or not | 860 | * @src_chained: whether src is chained or not |
| 865 | * @dst_chained: whether dst is chained or not | 861 | * @dst_chained: whether dst is chained or not |
| 862 | * @icv_ool: whether ICV is out-of-line | ||
| 866 | * @iv_dma: dma address of iv for checking continuity and link table | 863 | * @iv_dma: dma address of iv for checking continuity and link table |
| 867 | * @dma_len: length of dma mapped link_tbl space | 864 | * @dma_len: length of dma mapped link_tbl space |
| 868 | * @dma_link_tbl: bus physical address of link_tbl/buf | 865 | * @dma_link_tbl: bus physical address of link_tbl/buf |
| @@ -875,12 +872,11 @@ badkey: | |||
| 875 | * of link_tbl data | 872 | * of link_tbl data |
| 876 | */ | 873 | */ |
| 877 | struct talitos_edesc { | 874 | struct talitos_edesc { |
| 878 | int assoc_nents; | ||
| 879 | int src_nents; | 875 | int src_nents; |
| 880 | int dst_nents; | 876 | int dst_nents; |
| 881 | bool assoc_chained; | ||
| 882 | bool src_chained; | 877 | bool src_chained; |
| 883 | bool dst_chained; | 878 | bool dst_chained; |
| 879 | bool icv_ool; | ||
| 884 | dma_addr_t iv_dma; | 880 | dma_addr_t iv_dma; |
| 885 | int dma_len; | 881 | int dma_len; |
| 886 | dma_addr_t dma_link_tbl; | 882 | dma_addr_t dma_link_tbl; |
| @@ -952,14 +948,6 @@ static void ipsec_esp_unmap(struct device *dev, | |||
| 952 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); | 948 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); |
| 953 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); | 949 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); |
| 954 | 950 | ||
| 955 | if (edesc->assoc_chained) | ||
| 956 | talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); | ||
| 957 | else if (areq->assoclen) | ||
| 958 | /* assoc_nents counts also for IV in non-contiguous cases */ | ||
| 959 | dma_unmap_sg(dev, areq->assoc, | ||
| 960 | edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, | ||
| 961 | DMA_TO_DEVICE); | ||
| 962 | |||
| 963 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst); | 951 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst); |
| 964 | 952 | ||
| 965 | if (edesc->dma_len) | 953 | if (edesc->dma_len) |
| @@ -976,7 +964,7 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
| 976 | { | 964 | { |
| 977 | struct aead_request *areq = context; | 965 | struct aead_request *areq = context; |
| 978 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 966 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
| 979 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 967 | unsigned int authsize = crypto_aead_authsize(authenc); |
| 980 | struct talitos_edesc *edesc; | 968 | struct talitos_edesc *edesc; |
| 981 | struct scatterlist *sg; | 969 | struct scatterlist *sg; |
| 982 | void *icvdata; | 970 | void *icvdata; |
| @@ -986,13 +974,12 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
| 986 | ipsec_esp_unmap(dev, edesc, areq); | 974 | ipsec_esp_unmap(dev, edesc, areq); |
| 987 | 975 | ||
| 988 | /* copy the generated ICV to dst */ | 976 | /* copy the generated ICV to dst */ |
| 989 | if (edesc->dst_nents) { | 977 | if (edesc->icv_ool) { |
| 990 | icvdata = &edesc->link_tbl[edesc->src_nents + | 978 | icvdata = &edesc->link_tbl[edesc->src_nents + |
| 991 | edesc->dst_nents + 2 + | 979 | edesc->dst_nents + 2]; |
| 992 | edesc->assoc_nents]; | ||
| 993 | sg = sg_last(areq->dst, edesc->dst_nents); | 980 | sg = sg_last(areq->dst, edesc->dst_nents); |
| 994 | memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, | 981 | memcpy((char *)sg_virt(sg) + sg->length - authsize, |
| 995 | icvdata, ctx->authsize); | 982 | icvdata, authsize); |
| 996 | } | 983 | } |
| 997 | 984 | ||
| 998 | kfree(edesc); | 985 | kfree(edesc); |
| @@ -1006,10 +993,10 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, | |||
| 1006 | { | 993 | { |
| 1007 | struct aead_request *req = context; | 994 | struct aead_request *req = context; |
| 1008 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 995 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 1009 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 996 | unsigned int authsize = crypto_aead_authsize(authenc); |
| 1010 | struct talitos_edesc *edesc; | 997 | struct talitos_edesc *edesc; |
| 1011 | struct scatterlist *sg; | 998 | struct scatterlist *sg; |
| 1012 | void *icvdata; | 999 | char *oicv, *icv; |
| 1013 | 1000 | ||
| 1014 | edesc = container_of(desc, struct talitos_edesc, desc); | 1001 | edesc = container_of(desc, struct talitos_edesc, desc); |
| 1015 | 1002 | ||
| @@ -1017,16 +1004,18 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, | |||
| 1017 | 1004 | ||
| 1018 | if (!err) { | 1005 | if (!err) { |
| 1019 | /* auth check */ | 1006 | /* auth check */ |
| 1020 | if (edesc->dma_len) | ||
| 1021 | icvdata = &edesc->link_tbl[edesc->src_nents + | ||
| 1022 | edesc->dst_nents + 2 + | ||
| 1023 | edesc->assoc_nents]; | ||
| 1024 | else | ||
| 1025 | icvdata = &edesc->link_tbl[0]; | ||
| 1026 | |||
| 1027 | sg = sg_last(req->dst, edesc->dst_nents ? : 1); | 1007 | sg = sg_last(req->dst, edesc->dst_nents ? : 1); |
| 1028 | err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length - | 1008 | icv = (char *)sg_virt(sg) + sg->length - authsize; |
| 1029 | ctx->authsize, ctx->authsize) ? -EBADMSG : 0; | 1009 | |
| 1010 | if (edesc->dma_len) { | ||
| 1011 | oicv = (char *)&edesc->link_tbl[edesc->src_nents + | ||
| 1012 | edesc->dst_nents + 2]; | ||
| 1013 | if (edesc->icv_ool) | ||
| 1014 | icv = oicv + authsize; | ||
| 1015 | } else | ||
| 1016 | oicv = (char *)&edesc->link_tbl[0]; | ||
| 1017 | |||
| 1018 | err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0; | ||
| 1030 | } | 1019 | } |
| 1031 | 1020 | ||
| 1032 | kfree(edesc); | 1021 | kfree(edesc); |
| @@ -1059,53 +1048,69 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, | |||
| 1059 | * convert scatterlist to SEC h/w link table format | 1048 | * convert scatterlist to SEC h/w link table format |
| 1060 | * stop at cryptlen bytes | 1049 | * stop at cryptlen bytes |
| 1061 | */ | 1050 | */ |
| 1062 | static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | 1051 | static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, |
| 1063 | int cryptlen, struct talitos_ptr *link_tbl_ptr) | 1052 | unsigned int offset, int cryptlen, |
| 1053 | struct talitos_ptr *link_tbl_ptr) | ||
| 1064 | { | 1054 | { |
| 1065 | int n_sg = sg_count; | 1055 | int n_sg = sg_count; |
| 1056 | int count = 0; | ||
| 1066 | 1057 | ||
| 1067 | while (sg && n_sg--) { | 1058 | while (cryptlen && sg && n_sg--) { |
| 1068 | to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0); | 1059 | unsigned int len = sg_dma_len(sg); |
| 1069 | link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); | 1060 | |
| 1070 | link_tbl_ptr->j_extent = 0; | 1061 | if (offset >= len) { |
| 1071 | link_tbl_ptr++; | 1062 | offset -= len; |
| 1072 | cryptlen -= sg_dma_len(sg); | 1063 | goto next; |
| 1073 | sg = sg_next(sg); | 1064 | } |
| 1074 | } | 1065 | |
| 1066 | len -= offset; | ||
| 1067 | |||
| 1068 | if (len > cryptlen) | ||
| 1069 | len = cryptlen; | ||
| 1075 | 1070 | ||
| 1076 | /* adjust (decrease) last one (or two) entry's len to cryptlen */ | 1071 | to_talitos_ptr(link_tbl_ptr + count, |
| 1077 | link_tbl_ptr--; | 1072 | sg_dma_address(sg) + offset, 0); |
| 1078 | while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) { | 1073 | link_tbl_ptr[count].len = cpu_to_be16(len); |
| 1079 | /* Empty this entry, and move to previous one */ | 1074 | link_tbl_ptr[count].j_extent = 0; |
| 1080 | cryptlen += be16_to_cpu(link_tbl_ptr->len); | 1075 | count++; |
| 1081 | link_tbl_ptr->len = 0; | 1076 | cryptlen -= len; |
| 1082 | sg_count--; | 1077 | offset = 0; |
| 1083 | link_tbl_ptr--; | 1078 | |
| 1079 | next: | ||
| 1080 | sg = sg_next(sg); | ||
| 1084 | } | 1081 | } |
| 1085 | link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len) | ||
| 1086 | + cryptlen); | ||
| 1087 | 1082 | ||
| 1088 | /* tag end of link table */ | 1083 | /* tag end of link table */ |
| 1089 | link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; | 1084 | if (count > 0) |
| 1085 | link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN; | ||
| 1090 | 1086 | ||
| 1091 | return sg_count; | 1087 | return count; |
| 1088 | } | ||
| 1089 | |||
| 1090 | static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | ||
| 1091 | int cryptlen, | ||
| 1092 | struct talitos_ptr *link_tbl_ptr) | ||
| 1093 | { | ||
| 1094 | return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen, | ||
| 1095 | link_tbl_ptr); | ||
| 1092 | } | 1096 | } |
| 1093 | 1097 | ||
| 1094 | /* | 1098 | /* |
| 1095 | * fill in and submit ipsec_esp descriptor | 1099 | * fill in and submit ipsec_esp descriptor |
| 1096 | */ | 1100 | */ |
| 1097 | static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | 1101 | static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, |
| 1098 | u64 seq, void (*callback) (struct device *dev, | 1102 | void (*callback)(struct device *dev, |
| 1099 | struct talitos_desc *desc, | 1103 | struct talitos_desc *desc, |
| 1100 | void *context, int error)) | 1104 | void *context, int error)) |
| 1101 | { | 1105 | { |
| 1102 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 1106 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); |
| 1107 | unsigned int authsize = crypto_aead_authsize(aead); | ||
| 1103 | struct talitos_ctx *ctx = crypto_aead_ctx(aead); | 1108 | struct talitos_ctx *ctx = crypto_aead_ctx(aead); |
| 1104 | struct device *dev = ctx->dev; | 1109 | struct device *dev = ctx->dev; |
| 1105 | struct talitos_desc *desc = &edesc->desc; | 1110 | struct talitos_desc *desc = &edesc->desc; |
| 1106 | unsigned int cryptlen = areq->cryptlen; | 1111 | unsigned int cryptlen = areq->cryptlen; |
| 1107 | unsigned int authsize = ctx->authsize; | ||
| 1108 | unsigned int ivsize = crypto_aead_ivsize(aead); | 1112 | unsigned int ivsize = crypto_aead_ivsize(aead); |
| 1113 | int tbl_off = 0; | ||
| 1109 | int sg_count, ret; | 1114 | int sg_count, ret; |
| 1110 | int sg_link_tbl_len; | 1115 | int sg_link_tbl_len; |
| 1111 | 1116 | ||
| @@ -1113,36 +1118,27 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
| 1113 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, | 1118 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, |
| 1114 | DMA_TO_DEVICE); | 1119 | DMA_TO_DEVICE); |
| 1115 | 1120 | ||
| 1121 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1, | ||
| 1122 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL | ||
| 1123 | : DMA_TO_DEVICE, | ||
| 1124 | edesc->src_chained); | ||
| 1125 | |||
| 1116 | /* hmac data */ | 1126 | /* hmac data */ |
| 1117 | desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize); | 1127 | desc->ptr[1].len = cpu_to_be16(areq->assoclen); |
| 1118 | if (edesc->assoc_nents) { | 1128 | if (sg_count > 1 && |
| 1119 | int tbl_off = edesc->src_nents + edesc->dst_nents + 2; | 1129 | (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, |
| 1120 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; | 1130 | areq->assoclen, |
| 1131 | &edesc->link_tbl[tbl_off])) > 1) { | ||
| 1132 | tbl_off += ret; | ||
| 1121 | 1133 | ||
| 1122 | to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * | 1134 | to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * |
| 1123 | sizeof(struct talitos_ptr), 0); | 1135 | sizeof(struct talitos_ptr), 0); |
| 1124 | desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; | 1136 | desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; |
| 1125 | 1137 | ||
| 1126 | /* assoc_nents - 1 entries for assoc, 1 for IV */ | ||
| 1127 | sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1, | ||
| 1128 | areq->assoclen, tbl_ptr); | ||
| 1129 | |||
| 1130 | /* add IV to link table */ | ||
| 1131 | tbl_ptr += sg_count - 1; | ||
| 1132 | tbl_ptr->j_extent = 0; | ||
| 1133 | tbl_ptr++; | ||
| 1134 | to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0); | ||
| 1135 | tbl_ptr->len = cpu_to_be16(ivsize); | ||
| 1136 | tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; | ||
| 1137 | |||
| 1138 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1138 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
| 1139 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1139 | edesc->dma_len, DMA_BIDIRECTIONAL); |
| 1140 | } else { | 1140 | } else { |
| 1141 | if (areq->assoclen) | 1141 | to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); |
| 1142 | to_talitos_ptr(&desc->ptr[1], | ||
| 1143 | sg_dma_address(areq->assoc), 0); | ||
| 1144 | else | ||
| 1145 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0); | ||
| 1146 | desc->ptr[1].j_extent = 0; | 1142 | desc->ptr[1].j_extent = 0; |
| 1147 | } | 1143 | } |
| 1148 | 1144 | ||
| @@ -1150,8 +1146,6 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
| 1150 | to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); | 1146 | to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); |
| 1151 | desc->ptr[2].len = cpu_to_be16(ivsize); | 1147 | desc->ptr[2].len = cpu_to_be16(ivsize); |
| 1152 | desc->ptr[2].j_extent = 0; | 1148 | desc->ptr[2].j_extent = 0; |
| 1153 | /* Sync needed for the aead_givencrypt case */ | ||
| 1154 | dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); | ||
| 1155 | 1149 | ||
| 1156 | /* cipher key */ | 1150 | /* cipher key */ |
| 1157 | map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, | 1151 | map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, |
| @@ -1167,33 +1161,24 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
| 1167 | desc->ptr[4].len = cpu_to_be16(cryptlen); | 1161 | desc->ptr[4].len = cpu_to_be16(cryptlen); |
| 1168 | desc->ptr[4].j_extent = authsize; | 1162 | desc->ptr[4].j_extent = authsize; |
| 1169 | 1163 | ||
| 1170 | sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, | 1164 | sg_link_tbl_len = cryptlen; |
| 1171 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL | 1165 | if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) |
| 1172 | : DMA_TO_DEVICE, | 1166 | sg_link_tbl_len += authsize; |
| 1173 | edesc->src_chained); | 1167 | |
| 1174 | 1168 | if (sg_count > 1 && | |
| 1175 | if (sg_count == 1) { | 1169 | (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen, |
| 1170 | sg_link_tbl_len, | ||
| 1171 | &edesc->link_tbl[tbl_off])) > 1) { | ||
| 1172 | tbl_off += ret; | ||
| 1173 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
| 1174 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + | ||
| 1175 | tbl_off * | ||
| 1176 | sizeof(struct talitos_ptr), 0); | ||
| 1177 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | ||
| 1178 | edesc->dma_len, | ||
| 1179 | DMA_BIDIRECTIONAL); | ||
| 1180 | } else | ||
| 1176 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); | 1181 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); |
| 1177 | } else { | ||
| 1178 | sg_link_tbl_len = cryptlen; | ||
| 1179 | |||
| 1180 | if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) | ||
| 1181 | sg_link_tbl_len = cryptlen + authsize; | ||
| 1182 | |||
| 1183 | sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, | ||
| 1184 | &edesc->link_tbl[0]); | ||
| 1185 | if (sg_count > 1) { | ||
| 1186 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
| 1187 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0); | ||
| 1188 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | ||
| 1189 | edesc->dma_len, | ||
| 1190 | DMA_BIDIRECTIONAL); | ||
| 1191 | } else { | ||
| 1192 | /* Only one segment now, so no link tbl needed */ | ||
| 1193 | to_talitos_ptr(&desc->ptr[4], | ||
| 1194 | sg_dma_address(areq->src), 0); | ||
| 1195 | } | ||
| 1196 | } | ||
| 1197 | 1182 | ||
| 1198 | /* cipher out */ | 1183 | /* cipher out */ |
| 1199 | desc->ptr[5].len = cpu_to_be16(cryptlen); | 1184 | desc->ptr[5].len = cpu_to_be16(cryptlen); |
| @@ -1204,16 +1189,17 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
| 1204 | edesc->dst_nents ? : 1, | 1189 | edesc->dst_nents ? : 1, |
| 1205 | DMA_FROM_DEVICE, edesc->dst_chained); | 1190 | DMA_FROM_DEVICE, edesc->dst_chained); |
| 1206 | 1191 | ||
| 1207 | if (sg_count == 1) { | 1192 | edesc->icv_ool = false; |
| 1208 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); | 1193 | |
| 1209 | } else { | 1194 | if (sg_count > 1 && |
| 1210 | int tbl_off = edesc->src_nents + 1; | 1195 | (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count, |
| 1196 | areq->assoclen, cryptlen, | ||
| 1197 | &edesc->link_tbl[tbl_off])) > | ||
| 1198 | 1) { | ||
| 1211 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; | 1199 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; |
| 1212 | 1200 | ||
| 1213 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + | 1201 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + |
| 1214 | tbl_off * sizeof(struct talitos_ptr), 0); | 1202 | tbl_off * sizeof(struct talitos_ptr), 0); |
| 1215 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | ||
| 1216 | tbl_ptr); | ||
| 1217 | 1203 | ||
| 1218 | /* Add an entry to the link table for ICV data */ | 1204 | /* Add an entry to the link table for ICV data */ |
| 1219 | tbl_ptr += sg_count - 1; | 1205 | tbl_ptr += sg_count - 1; |
| @@ -1224,13 +1210,16 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
| 1224 | 1210 | ||
| 1225 | /* icv data follows link tables */ | 1211 | /* icv data follows link tables */ |
| 1226 | to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + | 1212 | to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + |
| 1227 | (tbl_off + edesc->dst_nents + 1 + | 1213 | (edesc->src_nents + edesc->dst_nents + |
| 1228 | edesc->assoc_nents) * | 1214 | 2) * sizeof(struct talitos_ptr) + |
| 1229 | sizeof(struct talitos_ptr), 0); | 1215 | authsize, 0); |
| 1230 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1216 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; |
| 1231 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1217 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
| 1232 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1218 | edesc->dma_len, DMA_BIDIRECTIONAL); |
| 1233 | } | 1219 | |
| 1220 | edesc->icv_ool = true; | ||
| 1221 | } else | ||
| 1222 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); | ||
| 1234 | 1223 | ||
| 1235 | /* iv out */ | 1224 | /* iv out */ |
| 1236 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, | 1225 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, |
| @@ -1268,7 +1257,6 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) | |||
| 1268 | * allocate and map the extended descriptor | 1257 | * allocate and map the extended descriptor |
| 1269 | */ | 1258 | */ |
| 1270 | static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | 1259 | static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
| 1271 | struct scatterlist *assoc, | ||
| 1272 | struct scatterlist *src, | 1260 | struct scatterlist *src, |
| 1273 | struct scatterlist *dst, | 1261 | struct scatterlist *dst, |
| 1274 | u8 *iv, | 1262 | u8 *iv, |
| @@ -1281,8 +1269,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1281 | bool encrypt) | 1269 | bool encrypt) |
| 1282 | { | 1270 | { |
| 1283 | struct talitos_edesc *edesc; | 1271 | struct talitos_edesc *edesc; |
| 1284 | int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; | 1272 | int src_nents, dst_nents, alloc_len, dma_len; |
| 1285 | bool assoc_chained = false, src_chained = false, dst_chained = false; | 1273 | bool src_chained = false, dst_chained = false; |
| 1286 | dma_addr_t iv_dma = 0; | 1274 | dma_addr_t iv_dma = 0; |
| 1287 | gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1275 | gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
| 1288 | GFP_ATOMIC; | 1276 | GFP_ATOMIC; |
| @@ -1298,48 +1286,35 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1298 | if (ivsize) | 1286 | if (ivsize) |
| 1299 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | 1287 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); |
| 1300 | 1288 | ||
| 1301 | if (assoclen) { | ||
| 1302 | /* | ||
| 1303 | * Currently it is assumed that iv is provided whenever assoc | ||
| 1304 | * is. | ||
| 1305 | */ | ||
| 1306 | BUG_ON(!iv); | ||
| 1307 | |||
| 1308 | assoc_nents = sg_count(assoc, assoclen, &assoc_chained); | ||
| 1309 | talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE, | ||
| 1310 | assoc_chained); | ||
| 1311 | assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents; | ||
| 1312 | |||
| 1313 | if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma) | ||
| 1314 | assoc_nents = assoc_nents ? assoc_nents + 1 : 2; | ||
| 1315 | } | ||
| 1316 | |||
| 1317 | if (!dst || dst == src) { | 1289 | if (!dst || dst == src) { |
| 1318 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); | 1290 | src_nents = sg_count(src, assoclen + cryptlen + authsize, |
| 1291 | &src_chained); | ||
| 1319 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1292 | src_nents = (src_nents == 1) ? 0 : src_nents; |
| 1320 | dst_nents = dst ? src_nents : 0; | 1293 | dst_nents = dst ? src_nents : 0; |
| 1321 | } else { /* dst && dst != src*/ | 1294 | } else { /* dst && dst != src*/ |
| 1322 | src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize), | 1295 | src_nents = sg_count(src, assoclen + cryptlen + |
| 1296 | (encrypt ? 0 : authsize), | ||
| 1323 | &src_chained); | 1297 | &src_chained); |
| 1324 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1298 | src_nents = (src_nents == 1) ? 0 : src_nents; |
| 1325 | dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0), | 1299 | dst_nents = sg_count(dst, assoclen + cryptlen + |
| 1300 | (encrypt ? authsize : 0), | ||
| 1326 | &dst_chained); | 1301 | &dst_chained); |
| 1327 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | 1302 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
| 1328 | } | 1303 | } |
| 1329 | 1304 | ||
| 1330 | /* | 1305 | /* |
| 1331 | * allocate space for base edesc plus the link tables, | 1306 | * allocate space for base edesc plus the link tables, |
| 1332 | * allowing for two separate entries for ICV and generated ICV (+ 2), | 1307 | * allowing for two separate entries for AD and generated ICV (+ 2), |
| 1333 | * and the ICV data itself | 1308 | * and space for two sets of ICVs (stashed and generated) |
| 1334 | */ | 1309 | */ |
| 1335 | alloc_len = sizeof(struct talitos_edesc); | 1310 | alloc_len = sizeof(struct talitos_edesc); |
| 1336 | if (assoc_nents || src_nents || dst_nents) { | 1311 | if (src_nents || dst_nents) { |
| 1337 | if (is_sec1) | 1312 | if (is_sec1) |
| 1338 | dma_len = (src_nents ? cryptlen : 0) + | 1313 | dma_len = (src_nents ? cryptlen : 0) + |
| 1339 | (dst_nents ? cryptlen : 0); | 1314 | (dst_nents ? cryptlen : 0); |
| 1340 | else | 1315 | else |
| 1341 | dma_len = (src_nents + dst_nents + 2 + assoc_nents) * | 1316 | dma_len = (src_nents + dst_nents + 2) * |
| 1342 | sizeof(struct talitos_ptr) + authsize; | 1317 | sizeof(struct talitos_ptr) + authsize * 2; |
| 1343 | alloc_len += dma_len; | 1318 | alloc_len += dma_len; |
| 1344 | } else { | 1319 | } else { |
| 1345 | dma_len = 0; | 1320 | dma_len = 0; |
| @@ -1348,13 +1323,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1348 | 1323 | ||
| 1349 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1324 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
| 1350 | if (!edesc) { | 1325 | if (!edesc) { |
| 1351 | if (assoc_chained) | ||
| 1352 | talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); | ||
| 1353 | else if (assoclen) | ||
| 1354 | dma_unmap_sg(dev, assoc, | ||
| 1355 | assoc_nents ? assoc_nents - 1 : 1, | ||
| 1356 | DMA_TO_DEVICE); | ||
| 1357 | |||
| 1358 | if (iv_dma) | 1326 | if (iv_dma) |
| 1359 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 1327 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
| 1360 | 1328 | ||
| @@ -1362,10 +1330,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1362 | return ERR_PTR(-ENOMEM); | 1330 | return ERR_PTR(-ENOMEM); |
| 1363 | } | 1331 | } |
| 1364 | 1332 | ||
| 1365 | edesc->assoc_nents = assoc_nents; | ||
| 1366 | edesc->src_nents = src_nents; | 1333 | edesc->src_nents = src_nents; |
| 1367 | edesc->dst_nents = dst_nents; | 1334 | edesc->dst_nents = dst_nents; |
| 1368 | edesc->assoc_chained = assoc_chained; | ||
| 1369 | edesc->src_chained = src_chained; | 1335 | edesc->src_chained = src_chained; |
| 1370 | edesc->dst_chained = dst_chained; | 1336 | edesc->dst_chained = dst_chained; |
| 1371 | edesc->iv_dma = iv_dma; | 1337 | edesc->iv_dma = iv_dma; |
| @@ -1382,12 +1348,13 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | |||
| 1382 | int icv_stashing, bool encrypt) | 1348 | int icv_stashing, bool encrypt) |
| 1383 | { | 1349 | { |
| 1384 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1350 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
| 1351 | unsigned int authsize = crypto_aead_authsize(authenc); | ||
| 1385 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1352 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
| 1386 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 1353 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
| 1387 | 1354 | ||
| 1388 | return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, | 1355 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, |
| 1389 | iv, areq->assoclen, areq->cryptlen, | 1356 | iv, areq->assoclen, areq->cryptlen, |
| 1390 | ctx->authsize, ivsize, icv_stashing, | 1357 | authsize, ivsize, icv_stashing, |
| 1391 | areq->base.flags, encrypt); | 1358 | areq->base.flags, encrypt); |
| 1392 | } | 1359 | } |
| 1393 | 1360 | ||
| @@ -1405,14 +1372,14 @@ static int aead_encrypt(struct aead_request *req) | |||
| 1405 | /* set encrypt */ | 1372 | /* set encrypt */ |
| 1406 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; | 1373 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; |
| 1407 | 1374 | ||
| 1408 | return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done); | 1375 | return ipsec_esp(edesc, req, ipsec_esp_encrypt_done); |
| 1409 | } | 1376 | } |
| 1410 | 1377 | ||
| 1411 | static int aead_decrypt(struct aead_request *req) | 1378 | static int aead_decrypt(struct aead_request *req) |
| 1412 | { | 1379 | { |
| 1413 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 1380 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
| 1381 | unsigned int authsize = crypto_aead_authsize(authenc); | ||
| 1414 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1382 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
| 1415 | unsigned int authsize = ctx->authsize; | ||
| 1416 | struct talitos_private *priv = dev_get_drvdata(ctx->dev); | 1383 | struct talitos_private *priv = dev_get_drvdata(ctx->dev); |
| 1417 | struct talitos_edesc *edesc; | 1384 | struct talitos_edesc *edesc; |
| 1418 | struct scatterlist *sg; | 1385 | struct scatterlist *sg; |
| @@ -1437,7 +1404,7 @@ static int aead_decrypt(struct aead_request *req) | |||
| 1437 | /* reset integrity check result bits */ | 1404 | /* reset integrity check result bits */ |
| 1438 | edesc->desc.hdr_lo = 0; | 1405 | edesc->desc.hdr_lo = 0; |
| 1439 | 1406 | ||
| 1440 | return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done); | 1407 | return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); |
| 1441 | } | 1408 | } |
| 1442 | 1409 | ||
| 1443 | /* Have to check the ICV with software */ | 1410 | /* Have to check the ICV with software */ |
| @@ -1445,40 +1412,16 @@ static int aead_decrypt(struct aead_request *req) | |||
| 1445 | 1412 | ||
| 1446 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ | 1413 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ |
| 1447 | if (edesc->dma_len) | 1414 | if (edesc->dma_len) |
| 1448 | icvdata = &edesc->link_tbl[edesc->src_nents + | 1415 | icvdata = (char *)&edesc->link_tbl[edesc->src_nents + |
| 1449 | edesc->dst_nents + 2 + | 1416 | edesc->dst_nents + 2]; |
| 1450 | edesc->assoc_nents]; | ||
| 1451 | else | 1417 | else |
| 1452 | icvdata = &edesc->link_tbl[0]; | 1418 | icvdata = &edesc->link_tbl[0]; |
| 1453 | 1419 | ||
| 1454 | sg = sg_last(req->src, edesc->src_nents ? : 1); | 1420 | sg = sg_last(req->src, edesc->src_nents ? : 1); |
| 1455 | 1421 | ||
| 1456 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, | 1422 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize); |
| 1457 | ctx->authsize); | ||
| 1458 | 1423 | ||
| 1459 | return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done); | 1424 | return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done); |
| 1460 | } | ||
| 1461 | |||
| 1462 | static int aead_givencrypt(struct aead_givcrypt_request *req) | ||
| 1463 | { | ||
| 1464 | struct aead_request *areq = &req->areq; | ||
| 1465 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | ||
| 1466 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 1467 | struct talitos_edesc *edesc; | ||
| 1468 | |||
| 1469 | /* allocate extended descriptor */ | ||
| 1470 | edesc = aead_edesc_alloc(areq, req->giv, 0, true); | ||
| 1471 | if (IS_ERR(edesc)) | ||
| 1472 | return PTR_ERR(edesc); | ||
| 1473 | |||
| 1474 | /* set encrypt */ | ||
| 1475 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; | ||
| 1476 | |||
| 1477 | memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); | ||
| 1478 | /* avoid consecutive packets going out with same IV */ | ||
| 1479 | *(__be64 *)req->giv ^= cpu_to_be64(req->seq); | ||
| 1480 | |||
| 1481 | return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done); | ||
| 1482 | } | 1425 | } |
| 1483 | 1426 | ||
| 1484 | static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | 1427 | static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, |
| @@ -1710,7 +1653,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | |||
| 1710 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1653 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
| 1711 | unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); | 1654 | unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); |
| 1712 | 1655 | ||
| 1713 | return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, | 1656 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, |
| 1714 | areq->info, 0, areq->nbytes, 0, ivsize, 0, | 1657 | areq->info, 0, areq->nbytes, 0, ivsize, 0, |
| 1715 | areq->base.flags, encrypt); | 1658 | areq->base.flags, encrypt); |
| 1716 | } | 1659 | } |
| @@ -1895,7 +1838,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, | |||
| 1895 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | 1838 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); |
| 1896 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1839 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
| 1897 | 1840 | ||
| 1898 | return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, | 1841 | return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0, |
| 1899 | nbytes, 0, 0, 0, areq->base.flags, false); | 1842 | nbytes, 0, 0, 0, areq->base.flags, false); |
| 1900 | } | 1843 | } |
| 1901 | 1844 | ||
| @@ -2161,6 +2104,7 @@ struct talitos_alg_template { | |||
| 2161 | union { | 2104 | union { |
| 2162 | struct crypto_alg crypto; | 2105 | struct crypto_alg crypto; |
| 2163 | struct ahash_alg hash; | 2106 | struct ahash_alg hash; |
| 2107 | struct aead_alg aead; | ||
| 2164 | } alg; | 2108 | } alg; |
| 2165 | __be32 desc_hdr_template; | 2109 | __be32 desc_hdr_template; |
| 2166 | }; | 2110 | }; |
| @@ -2168,15 +2112,16 @@ struct talitos_alg_template { | |||
| 2168 | static struct talitos_alg_template driver_algs[] = { | 2112 | static struct talitos_alg_template driver_algs[] = { |
| 2169 | /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ | 2113 | /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ |
| 2170 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2114 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2171 | .alg.crypto = { | 2115 | .alg.aead = { |
| 2172 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 2116 | .base = { |
| 2173 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", | 2117 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
| 2174 | .cra_blocksize = AES_BLOCK_SIZE, | 2118 | .cra_driver_name = "authenc-hmac-sha1-" |
| 2175 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2119 | "cbc-aes-talitos", |
| 2176 | .cra_aead = { | 2120 | .cra_blocksize = AES_BLOCK_SIZE, |
| 2177 | .ivsize = AES_BLOCK_SIZE, | 2121 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2178 | .maxauthsize = SHA1_DIGEST_SIZE, | 2122 | }, |
| 2179 | } | 2123 | .ivsize = AES_BLOCK_SIZE, |
| 2124 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 2180 | }, | 2125 | }, |
| 2181 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2126 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2182 | DESC_HDR_SEL0_AESU | | 2127 | DESC_HDR_SEL0_AESU | |
| @@ -2187,15 +2132,17 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2187 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 2132 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
| 2188 | }, | 2133 | }, |
| 2189 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2134 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2190 | .alg.crypto = { | 2135 | .alg.aead = { |
| 2191 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", | 2136 | .base = { |
| 2192 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", | 2137 | .cra_name = "authenc(hmac(sha1)," |
| 2193 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2138 | "cbc(des3_ede))", |
| 2194 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2139 | .cra_driver_name = "authenc-hmac-sha1-" |
| 2195 | .cra_aead = { | 2140 | "cbc-3des-talitos", |
| 2196 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2141 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 2197 | .maxauthsize = SHA1_DIGEST_SIZE, | 2142 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2198 | } | 2143 | }, |
| 2144 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 2145 | .maxauthsize = SHA1_DIGEST_SIZE, | ||
| 2199 | }, | 2146 | }, |
| 2200 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2147 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2201 | DESC_HDR_SEL0_DEU | | 2148 | DESC_HDR_SEL0_DEU | |
| @@ -2207,15 +2154,16 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2207 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 2154 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
| 2208 | }, | 2155 | }, |
| 2209 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2156 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2210 | .alg.crypto = { | 2157 | .alg.aead = { |
| 2211 | .cra_name = "authenc(hmac(sha224),cbc(aes))", | 2158 | .base = { |
| 2212 | .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos", | 2159 | .cra_name = "authenc(hmac(sha224),cbc(aes))", |
| 2213 | .cra_blocksize = AES_BLOCK_SIZE, | 2160 | .cra_driver_name = "authenc-hmac-sha224-" |
| 2214 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2161 | "cbc-aes-talitos", |
| 2215 | .cra_aead = { | 2162 | .cra_blocksize = AES_BLOCK_SIZE, |
| 2216 | .ivsize = AES_BLOCK_SIZE, | 2163 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2217 | .maxauthsize = SHA224_DIGEST_SIZE, | 2164 | }, |
| 2218 | } | 2165 | .ivsize = AES_BLOCK_SIZE, |
| 2166 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
| 2219 | }, | 2167 | }, |
| 2220 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2168 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2221 | DESC_HDR_SEL0_AESU | | 2169 | DESC_HDR_SEL0_AESU | |
| @@ -2226,15 +2174,17 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2226 | DESC_HDR_MODE1_MDEU_SHA224_HMAC, | 2174 | DESC_HDR_MODE1_MDEU_SHA224_HMAC, |
| 2227 | }, | 2175 | }, |
| 2228 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2176 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2229 | .alg.crypto = { | 2177 | .alg.aead = { |
| 2230 | .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", | 2178 | .base = { |
| 2231 | .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos", | 2179 | .cra_name = "authenc(hmac(sha224)," |
| 2232 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2180 | "cbc(des3_ede))", |
| 2233 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2181 | .cra_driver_name = "authenc-hmac-sha224-" |
| 2234 | .cra_aead = { | 2182 | "cbc-3des-talitos", |
| 2235 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2183 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 2236 | .maxauthsize = SHA224_DIGEST_SIZE, | 2184 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2237 | } | 2185 | }, |
| 2186 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 2187 | .maxauthsize = SHA224_DIGEST_SIZE, | ||
| 2238 | }, | 2188 | }, |
| 2239 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2189 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2240 | DESC_HDR_SEL0_DEU | | 2190 | DESC_HDR_SEL0_DEU | |
| @@ -2246,15 +2196,16 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2246 | DESC_HDR_MODE1_MDEU_SHA224_HMAC, | 2196 | DESC_HDR_MODE1_MDEU_SHA224_HMAC, |
| 2247 | }, | 2197 | }, |
| 2248 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2198 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2249 | .alg.crypto = { | 2199 | .alg.aead = { |
| 2250 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | 2200 | .base = { |
| 2251 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", | 2201 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
| 2252 | .cra_blocksize = AES_BLOCK_SIZE, | 2202 | .cra_driver_name = "authenc-hmac-sha256-" |
| 2253 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2203 | "cbc-aes-talitos", |
| 2254 | .cra_aead = { | 2204 | .cra_blocksize = AES_BLOCK_SIZE, |
| 2255 | .ivsize = AES_BLOCK_SIZE, | 2205 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2256 | .maxauthsize = SHA256_DIGEST_SIZE, | 2206 | }, |
| 2257 | } | 2207 | .ivsize = AES_BLOCK_SIZE, |
| 2208 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 2258 | }, | 2209 | }, |
| 2259 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2210 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2260 | DESC_HDR_SEL0_AESU | | 2211 | DESC_HDR_SEL0_AESU | |
| @@ -2265,15 +2216,17 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2265 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, | 2216 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, |
| 2266 | }, | 2217 | }, |
| 2267 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2218 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2268 | .alg.crypto = { | 2219 | .alg.aead = { |
| 2269 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", | 2220 | .base = { |
| 2270 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", | 2221 | .cra_name = "authenc(hmac(sha256)," |
| 2271 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2222 | "cbc(des3_ede))", |
| 2272 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2223 | .cra_driver_name = "authenc-hmac-sha256-" |
| 2273 | .cra_aead = { | 2224 | "cbc-3des-talitos", |
| 2274 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2225 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 2275 | .maxauthsize = SHA256_DIGEST_SIZE, | 2226 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2276 | } | 2227 | }, |
| 2228 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 2229 | .maxauthsize = SHA256_DIGEST_SIZE, | ||
| 2277 | }, | 2230 | }, |
| 2278 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2231 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2279 | DESC_HDR_SEL0_DEU | | 2232 | DESC_HDR_SEL0_DEU | |
| @@ -2285,15 +2238,16 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2285 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, | 2238 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, |
| 2286 | }, | 2239 | }, |
| 2287 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2240 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2288 | .alg.crypto = { | 2241 | .alg.aead = { |
| 2289 | .cra_name = "authenc(hmac(sha384),cbc(aes))", | 2242 | .base = { |
| 2290 | .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos", | 2243 | .cra_name = "authenc(hmac(sha384),cbc(aes))", |
| 2291 | .cra_blocksize = AES_BLOCK_SIZE, | 2244 | .cra_driver_name = "authenc-hmac-sha384-" |
| 2292 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2245 | "cbc-aes-talitos", |
| 2293 | .cra_aead = { | 2246 | .cra_blocksize = AES_BLOCK_SIZE, |
| 2294 | .ivsize = AES_BLOCK_SIZE, | 2247 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2295 | .maxauthsize = SHA384_DIGEST_SIZE, | 2248 | }, |
| 2296 | } | 2249 | .ivsize = AES_BLOCK_SIZE, |
| 2250 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
| 2297 | }, | 2251 | }, |
| 2298 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2252 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2299 | DESC_HDR_SEL0_AESU | | 2253 | DESC_HDR_SEL0_AESU | |
| @@ -2304,15 +2258,17 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2304 | DESC_HDR_MODE1_MDEUB_SHA384_HMAC, | 2258 | DESC_HDR_MODE1_MDEUB_SHA384_HMAC, |
| 2305 | }, | 2259 | }, |
| 2306 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2260 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2307 | .alg.crypto = { | 2261 | .alg.aead = { |
| 2308 | .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", | 2262 | .base = { |
| 2309 | .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos", | 2263 | .cra_name = "authenc(hmac(sha384)," |
| 2310 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2264 | "cbc(des3_ede))", |
| 2311 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2265 | .cra_driver_name = "authenc-hmac-sha384-" |
| 2312 | .cra_aead = { | 2266 | "cbc-3des-talitos", |
| 2313 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2267 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 2314 | .maxauthsize = SHA384_DIGEST_SIZE, | 2268 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2315 | } | 2269 | }, |
| 2270 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 2271 | .maxauthsize = SHA384_DIGEST_SIZE, | ||
| 2316 | }, | 2272 | }, |
| 2317 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2273 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2318 | DESC_HDR_SEL0_DEU | | 2274 | DESC_HDR_SEL0_DEU | |
| @@ -2324,15 +2280,16 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2324 | DESC_HDR_MODE1_MDEUB_SHA384_HMAC, | 2280 | DESC_HDR_MODE1_MDEUB_SHA384_HMAC, |
| 2325 | }, | 2281 | }, |
| 2326 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2282 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2327 | .alg.crypto = { | 2283 | .alg.aead = { |
| 2328 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | 2284 | .base = { |
| 2329 | .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos", | 2285 | .cra_name = "authenc(hmac(sha512),cbc(aes))", |
| 2330 | .cra_blocksize = AES_BLOCK_SIZE, | 2286 | .cra_driver_name = "authenc-hmac-sha512-" |
| 2331 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2287 | "cbc-aes-talitos", |
| 2332 | .cra_aead = { | 2288 | .cra_blocksize = AES_BLOCK_SIZE, |
| 2333 | .ivsize = AES_BLOCK_SIZE, | 2289 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2334 | .maxauthsize = SHA512_DIGEST_SIZE, | 2290 | }, |
| 2335 | } | 2291 | .ivsize = AES_BLOCK_SIZE, |
| 2292 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
| 2336 | }, | 2293 | }, |
| 2337 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2294 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2338 | DESC_HDR_SEL0_AESU | | 2295 | DESC_HDR_SEL0_AESU | |
| @@ -2343,15 +2300,17 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2343 | DESC_HDR_MODE1_MDEUB_SHA512_HMAC, | 2300 | DESC_HDR_MODE1_MDEUB_SHA512_HMAC, |
| 2344 | }, | 2301 | }, |
| 2345 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2302 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2346 | .alg.crypto = { | 2303 | .alg.aead = { |
| 2347 | .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", | 2304 | .base = { |
| 2348 | .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos", | 2305 | .cra_name = "authenc(hmac(sha512)," |
| 2349 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2306 | "cbc(des3_ede))", |
| 2350 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2307 | .cra_driver_name = "authenc-hmac-sha512-" |
| 2351 | .cra_aead = { | 2308 | "cbc-3des-talitos", |
| 2352 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2309 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 2353 | .maxauthsize = SHA512_DIGEST_SIZE, | 2310 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2354 | } | 2311 | }, |
| 2312 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
| 2313 | .maxauthsize = SHA512_DIGEST_SIZE, | ||
| 2355 | }, | 2314 | }, |
| 2356 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2315 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2357 | DESC_HDR_SEL0_DEU | | 2316 | DESC_HDR_SEL0_DEU | |
| @@ -2363,15 +2322,16 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2363 | DESC_HDR_MODE1_MDEUB_SHA512_HMAC, | 2322 | DESC_HDR_MODE1_MDEUB_SHA512_HMAC, |
| 2364 | }, | 2323 | }, |
| 2365 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2324 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2366 | .alg.crypto = { | 2325 | .alg.aead = { |
| 2367 | .cra_name = "authenc(hmac(md5),cbc(aes))", | 2326 | .base = { |
| 2368 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", | 2327 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
| 2369 | .cra_blocksize = AES_BLOCK_SIZE, | 2328 | .cra_driver_name = "authenc-hmac-md5-" |
| 2370 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2329 | "cbc-aes-talitos", |
| 2371 | .cra_aead = { | 2330 | .cra_blocksize = AES_BLOCK_SIZE, |
| 2372 | .ivsize = AES_BLOCK_SIZE, | 2331 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2373 | .maxauthsize = MD5_DIGEST_SIZE, | 2332 | }, |
| 2374 | } | 2333 | .ivsize = AES_BLOCK_SIZE, |
| 2334 | .maxauthsize = MD5_DIGEST_SIZE, | ||
| 2375 | }, | 2335 | }, |
| 2376 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2336 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2377 | DESC_HDR_SEL0_AESU | | 2337 | DESC_HDR_SEL0_AESU | |
| @@ -2382,15 +2342,16 @@ static struct talitos_alg_template driver_algs[] = { | |||
| 2382 | DESC_HDR_MODE1_MDEU_MD5_HMAC, | 2342 | DESC_HDR_MODE1_MDEU_MD5_HMAC, |
| 2383 | }, | 2343 | }, |
| 2384 | { .type = CRYPTO_ALG_TYPE_AEAD, | 2344 | { .type = CRYPTO_ALG_TYPE_AEAD, |
| 2385 | .alg.crypto = { | 2345 | .alg.aead = { |
| 2386 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", | 2346 | .base = { |
| 2387 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", | 2347 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", |
| 2388 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2348 | .cra_driver_name = "authenc-hmac-md5-" |
| 2389 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 2349 | "cbc-3des-talitos", |
| 2390 | .cra_aead = { | 2350 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
| 2391 | .ivsize = DES3_EDE_BLOCK_SIZE, | 2351 | .cra_flags = CRYPTO_ALG_ASYNC, |
| 2392 | .maxauthsize = MD5_DIGEST_SIZE, | 2352 | }, |
| 2393 | } | 2353 | .ivsize = DES3_EDE_BLOCK_SIZE, |
| 2354 | .maxauthsize = MD5_DIGEST_SIZE, | ||
| 2394 | }, | 2355 | }, |
| 2395 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | | 2356 | .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | |
| 2396 | DESC_HDR_SEL0_DEU | | 2357 | DESC_HDR_SEL0_DEU | |
| @@ -2658,15 +2619,9 @@ static int talitos_cra_init(struct crypto_tfm *tfm) | |||
| 2658 | return 0; | 2619 | return 0; |
| 2659 | } | 2620 | } |
| 2660 | 2621 | ||
| 2661 | static int talitos_cra_init_aead(struct crypto_tfm *tfm) | 2622 | static int talitos_cra_init_aead(struct crypto_aead *tfm) |
| 2662 | { | 2623 | { |
| 2663 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | 2624 | talitos_cra_init(crypto_aead_tfm(tfm)); |
| 2664 | |||
| 2665 | talitos_cra_init(tfm); | ||
| 2666 | |||
| 2667 | /* random first IV */ | ||
| 2668 | get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); | ||
| 2669 | |||
| 2670 | return 0; | 2625 | return 0; |
| 2671 | } | 2626 | } |
| 2672 | 2627 | ||
| @@ -2713,9 +2668,9 @@ static int talitos_remove(struct platform_device *ofdev) | |||
| 2713 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { | 2668 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { |
| 2714 | switch (t_alg->algt.type) { | 2669 | switch (t_alg->algt.type) { |
| 2715 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 2670 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
| 2716 | case CRYPTO_ALG_TYPE_AEAD: | ||
| 2717 | crypto_unregister_alg(&t_alg->algt.alg.crypto); | ||
| 2718 | break; | 2671 | break; |
| 2672 | case CRYPTO_ALG_TYPE_AEAD: | ||
| 2673 | crypto_unregister_aead(&t_alg->algt.alg.aead); | ||
| 2719 | case CRYPTO_ALG_TYPE_AHASH: | 2674 | case CRYPTO_ALG_TYPE_AHASH: |
| 2720 | crypto_unregister_ahash(&t_alg->algt.alg.hash); | 2675 | crypto_unregister_ahash(&t_alg->algt.alg.hash); |
| 2721 | break; | 2676 | break; |
| @@ -2727,7 +2682,7 @@ static int talitos_remove(struct platform_device *ofdev) | |||
| 2727 | if (hw_supports(dev, DESC_HDR_SEL0_RNG)) | 2682 | if (hw_supports(dev, DESC_HDR_SEL0_RNG)) |
| 2728 | talitos_unregister_rng(dev); | 2683 | talitos_unregister_rng(dev); |
| 2729 | 2684 | ||
| 2730 | for (i = 0; i < priv->num_channels; i++) | 2685 | for (i = 0; priv->chan && i < priv->num_channels; i++) |
| 2731 | kfree(priv->chan[i].fifo); | 2686 | kfree(priv->chan[i].fifo); |
| 2732 | 2687 | ||
| 2733 | kfree(priv->chan); | 2688 | kfree(priv->chan); |
| @@ -2774,15 +2729,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
| 2774 | alg->cra_ablkcipher.geniv = "eseqiv"; | 2729 | alg->cra_ablkcipher.geniv = "eseqiv"; |
| 2775 | break; | 2730 | break; |
| 2776 | case CRYPTO_ALG_TYPE_AEAD: | 2731 | case CRYPTO_ALG_TYPE_AEAD: |
| 2777 | alg = &t_alg->algt.alg.crypto; | 2732 | alg = &t_alg->algt.alg.aead.base; |
| 2778 | alg->cra_init = talitos_cra_init_aead; | 2733 | t_alg->algt.alg.aead.init = talitos_cra_init_aead; |
| 2779 | alg->cra_type = &crypto_aead_type; | 2734 | t_alg->algt.alg.aead.setkey = aead_setkey; |
| 2780 | alg->cra_aead.setkey = aead_setkey; | 2735 | t_alg->algt.alg.aead.encrypt = aead_encrypt; |
| 2781 | alg->cra_aead.setauthsize = aead_setauthsize; | 2736 | t_alg->algt.alg.aead.decrypt = aead_decrypt; |
| 2782 | alg->cra_aead.encrypt = aead_encrypt; | ||
| 2783 | alg->cra_aead.decrypt = aead_decrypt; | ||
| 2784 | alg->cra_aead.givencrypt = aead_givencrypt; | ||
| 2785 | alg->cra_aead.geniv = "<built-in>"; | ||
| 2786 | break; | 2737 | break; |
| 2787 | case CRYPTO_ALG_TYPE_AHASH: | 2738 | case CRYPTO_ALG_TYPE_AHASH: |
| 2788 | alg = &t_alg->algt.alg.hash.halg.base; | 2739 | alg = &t_alg->algt.alg.hash.halg.base; |
| @@ -3041,7 +2992,7 @@ static int talitos_probe(struct platform_device *ofdev) | |||
| 3041 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | 2992 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
| 3042 | if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { | 2993 | if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { |
| 3043 | struct talitos_crypto_alg *t_alg; | 2994 | struct talitos_crypto_alg *t_alg; |
| 3044 | char *name = NULL; | 2995 | struct crypto_alg *alg = NULL; |
| 3045 | 2996 | ||
| 3046 | t_alg = talitos_alg_alloc(dev, &driver_algs[i]); | 2997 | t_alg = talitos_alg_alloc(dev, &driver_algs[i]); |
| 3047 | if (IS_ERR(t_alg)) { | 2998 | if (IS_ERR(t_alg)) { |
| @@ -3053,21 +3004,26 @@ static int talitos_probe(struct platform_device *ofdev) | |||
| 3053 | 3004 | ||
| 3054 | switch (t_alg->algt.type) { | 3005 | switch (t_alg->algt.type) { |
| 3055 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 3006 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
| 3056 | case CRYPTO_ALG_TYPE_AEAD: | ||
| 3057 | err = crypto_register_alg( | 3007 | err = crypto_register_alg( |
| 3058 | &t_alg->algt.alg.crypto); | 3008 | &t_alg->algt.alg.crypto); |
| 3059 | name = t_alg->algt.alg.crypto.cra_driver_name; | 3009 | alg = &t_alg->algt.alg.crypto; |
| 3060 | break; | 3010 | break; |
| 3011 | |||
| 3012 | case CRYPTO_ALG_TYPE_AEAD: | ||
| 3013 | err = crypto_register_aead( | ||
| 3014 | &t_alg->algt.alg.aead); | ||
| 3015 | alg = &t_alg->algt.alg.aead.base; | ||
| 3016 | break; | ||
| 3017 | |||
| 3061 | case CRYPTO_ALG_TYPE_AHASH: | 3018 | case CRYPTO_ALG_TYPE_AHASH: |
| 3062 | err = crypto_register_ahash( | 3019 | err = crypto_register_ahash( |
| 3063 | &t_alg->algt.alg.hash); | 3020 | &t_alg->algt.alg.hash); |
| 3064 | name = | 3021 | alg = &t_alg->algt.alg.hash.halg.base; |
| 3065 | t_alg->algt.alg.hash.halg.base.cra_driver_name; | ||
| 3066 | break; | 3022 | break; |
| 3067 | } | 3023 | } |
| 3068 | if (err) { | 3024 | if (err) { |
| 3069 | dev_err(dev, "%s alg registration failed\n", | 3025 | dev_err(dev, "%s alg registration failed\n", |
| 3070 | name); | 3026 | alg->cra_driver_name); |
| 3071 | kfree(t_alg); | 3027 | kfree(t_alg); |
| 3072 | } else | 3028 | } else |
| 3073 | list_add_tail(&t_alg->entry, &priv->alg_list); | 3029 | list_add_tail(&t_alg->entry, &priv->alg_list); |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 314daf55e7f7..0090f3211d68 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
| @@ -52,12 +52,7 @@ struct talitos_ptr { | |||
| 52 | __be32 ptr; /* address */ | 52 | __be32 ptr; /* address */ |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | static const struct talitos_ptr zero_entry = { | 55 | static const struct talitos_ptr zero_entry; |
| 56 | .len = 0, | ||
| 57 | .j_extent = 0, | ||
| 58 | .eptr = 0, | ||
| 59 | .ptr = 0 | ||
| 60 | }; | ||
| 61 | 56 | ||
| 62 | /* descriptor */ | 57 | /* descriptor */ |
| 63 | struct talitos_desc { | 58 | struct talitos_desc { |
| @@ -154,6 +149,7 @@ struct talitos_private { | |||
| 154 | 149 | ||
| 155 | /* hwrng device */ | 150 | /* hwrng device */ |
| 156 | struct hwrng rng; | 151 | struct hwrng rng; |
| 152 | bool rng_registered; | ||
| 157 | }; | 153 | }; |
| 158 | 154 | ||
| 159 | extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | 155 | extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, |
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index e79e567e43aa..263af709e536 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c | |||
| @@ -84,6 +84,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
| 84 | preempt_disable(); | 84 | preempt_disable(); |
| 85 | pagefault_disable(); | 85 | pagefault_disable(); |
| 86 | enable_kernel_altivec(); | 86 | enable_kernel_altivec(); |
| 87 | enable_kernel_vsx(); | ||
| 87 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); | 88 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); |
| 88 | ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); | 89 | ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); |
| 89 | pagefault_enable(); | 90 | pagefault_enable(); |
| @@ -103,6 +104,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
| 103 | preempt_disable(); | 104 | preempt_disable(); |
| 104 | pagefault_disable(); | 105 | pagefault_disable(); |
| 105 | enable_kernel_altivec(); | 106 | enable_kernel_altivec(); |
| 107 | enable_kernel_vsx(); | ||
| 106 | aes_p8_encrypt(src, dst, &ctx->enc_key); | 108 | aes_p8_encrypt(src, dst, &ctx->enc_key); |
| 107 | pagefault_enable(); | 109 | pagefault_enable(); |
| 108 | preempt_enable(); | 110 | preempt_enable(); |
| @@ -119,6 +121,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
| 119 | preempt_disable(); | 121 | preempt_disable(); |
| 120 | pagefault_disable(); | 122 | pagefault_disable(); |
| 121 | enable_kernel_altivec(); | 123 | enable_kernel_altivec(); |
| 124 | enable_kernel_vsx(); | ||
| 122 | aes_p8_decrypt(src, dst, &ctx->dec_key); | 125 | aes_p8_decrypt(src, dst, &ctx->dec_key); |
| 123 | pagefault_enable(); | 126 | pagefault_enable(); |
| 124 | preempt_enable(); | 127 | preempt_enable(); |
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 7299995c78ec..0b8fe2ec5315 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c | |||
| @@ -85,6 +85,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
| 85 | preempt_disable(); | 85 | preempt_disable(); |
| 86 | pagefault_disable(); | 86 | pagefault_disable(); |
| 87 | enable_kernel_altivec(); | 87 | enable_kernel_altivec(); |
| 88 | enable_kernel_vsx(); | ||
| 88 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); | 89 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); |
| 89 | ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); | 90 | ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); |
| 90 | pagefault_enable(); | 91 | pagefault_enable(); |
| @@ -115,6 +116,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, | |||
| 115 | preempt_disable(); | 116 | preempt_disable(); |
| 116 | pagefault_disable(); | 117 | pagefault_disable(); |
| 117 | enable_kernel_altivec(); | 118 | enable_kernel_altivec(); |
| 119 | enable_kernel_vsx(); | ||
| 118 | 120 | ||
| 119 | blkcipher_walk_init(&walk, dst, src, nbytes); | 121 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 120 | ret = blkcipher_walk_virt(desc, &walk); | 122 | ret = blkcipher_walk_virt(desc, &walk); |
| @@ -155,6 +157,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, | |||
| 155 | preempt_disable(); | 157 | preempt_disable(); |
| 156 | pagefault_disable(); | 158 | pagefault_disable(); |
| 157 | enable_kernel_altivec(); | 159 | enable_kernel_altivec(); |
| 160 | enable_kernel_vsx(); | ||
| 158 | 161 | ||
| 159 | blkcipher_walk_init(&walk, dst, src, nbytes); | 162 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 160 | ret = blkcipher_walk_virt(desc, &walk); | 163 | ret = blkcipher_walk_virt(desc, &walk); |
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 7adae42a7b79..ee1306cd8f59 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c | |||
| @@ -82,6 +82,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
| 82 | 82 | ||
| 83 | pagefault_disable(); | 83 | pagefault_disable(); |
| 84 | enable_kernel_altivec(); | 84 | enable_kernel_altivec(); |
| 85 | enable_kernel_vsx(); | ||
| 85 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); | 86 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); |
| 86 | pagefault_enable(); | 87 | pagefault_enable(); |
| 87 | 88 | ||
| @@ -100,6 +101,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, | |||
| 100 | 101 | ||
| 101 | pagefault_disable(); | 102 | pagefault_disable(); |
| 102 | enable_kernel_altivec(); | 103 | enable_kernel_altivec(); |
| 104 | enable_kernel_vsx(); | ||
| 103 | aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); | 105 | aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); |
| 104 | pagefault_enable(); | 106 | pagefault_enable(); |
| 105 | 107 | ||
| @@ -113,6 +115,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, | |||
| 113 | struct scatterlist *src, unsigned int nbytes) | 115 | struct scatterlist *src, unsigned int nbytes) |
| 114 | { | 116 | { |
| 115 | int ret; | 117 | int ret; |
| 118 | u64 inc; | ||
| 116 | struct blkcipher_walk walk; | 119 | struct blkcipher_walk walk; |
| 117 | struct p8_aes_ctr_ctx *ctx = | 120 | struct p8_aes_ctr_ctx *ctx = |
| 118 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); | 121 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); |
| @@ -131,6 +134,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, | |||
| 131 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { | 134 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { |
| 132 | pagefault_disable(); | 135 | pagefault_disable(); |
| 133 | enable_kernel_altivec(); | 136 | enable_kernel_altivec(); |
| 137 | enable_kernel_vsx(); | ||
| 134 | aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, | 138 | aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, |
| 135 | walk.dst.virt.addr, | 139 | walk.dst.virt.addr, |
| 136 | (nbytes & | 140 | (nbytes & |
| @@ -140,7 +144,12 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, | |||
| 140 | walk.iv); | 144 | walk.iv); |
| 141 | pagefault_enable(); | 145 | pagefault_enable(); |
| 142 | 146 | ||
| 143 | crypto_inc(walk.iv, AES_BLOCK_SIZE); | 147 | /* We need to update IV mostly for last bytes/round */ |
| 148 | inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE; | ||
| 149 | if (inc > 0) | ||
| 150 | while (inc--) | ||
| 151 | crypto_inc(walk.iv, AES_BLOCK_SIZE); | ||
| 152 | |||
| 144 | nbytes &= AES_BLOCK_SIZE - 1; | 153 | nbytes &= AES_BLOCK_SIZE - 1; |
| 145 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 154 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
| 146 | } | 155 | } |
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl index 6c5c20c6108e..228053921b3f 100644 --- a/drivers/crypto/vmx/aesp8-ppc.pl +++ b/drivers/crypto/vmx/aesp8-ppc.pl | |||
| @@ -1437,28 +1437,28 @@ Load_ctr32_enc_key: | |||
| 1437 | ?vperm v31,v31,$out0,$keyperm | 1437 | ?vperm v31,v31,$out0,$keyperm |
| 1438 | lvx v25,$x10,$key_ # pre-load round[2] | 1438 | lvx v25,$x10,$key_ # pre-load round[2] |
| 1439 | 1439 | ||
| 1440 | vadduwm $two,$one,$one | 1440 | vadduqm $two,$one,$one |
| 1441 | subi $inp,$inp,15 # undo "caller" | 1441 | subi $inp,$inp,15 # undo "caller" |
| 1442 | $SHL $len,$len,4 | 1442 | $SHL $len,$len,4 |
| 1443 | 1443 | ||
| 1444 | vadduwm $out1,$ivec,$one # counter values ... | 1444 | vadduqm $out1,$ivec,$one # counter values ... |
| 1445 | vadduwm $out2,$ivec,$two | 1445 | vadduqm $out2,$ivec,$two |
| 1446 | vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] | 1446 | vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] |
| 1447 | le?li $idx,8 | 1447 | le?li $idx,8 |
| 1448 | vadduwm $out3,$out1,$two | 1448 | vadduqm $out3,$out1,$two |
| 1449 | vxor $out1,$out1,$rndkey0 | 1449 | vxor $out1,$out1,$rndkey0 |
| 1450 | le?lvsl $inpperm,0,$idx | 1450 | le?lvsl $inpperm,0,$idx |
| 1451 | vadduwm $out4,$out2,$two | 1451 | vadduqm $out4,$out2,$two |
| 1452 | vxor $out2,$out2,$rndkey0 | 1452 | vxor $out2,$out2,$rndkey0 |
| 1453 | le?vspltisb $tmp,0x0f | 1453 | le?vspltisb $tmp,0x0f |
| 1454 | vadduwm $out5,$out3,$two | 1454 | vadduqm $out5,$out3,$two |
| 1455 | vxor $out3,$out3,$rndkey0 | 1455 | vxor $out3,$out3,$rndkey0 |
| 1456 | le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u | 1456 | le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u |
| 1457 | vadduwm $out6,$out4,$two | 1457 | vadduqm $out6,$out4,$two |
| 1458 | vxor $out4,$out4,$rndkey0 | 1458 | vxor $out4,$out4,$rndkey0 |
| 1459 | vadduwm $out7,$out5,$two | 1459 | vadduqm $out7,$out5,$two |
| 1460 | vxor $out5,$out5,$rndkey0 | 1460 | vxor $out5,$out5,$rndkey0 |
| 1461 | vadduwm $ivec,$out6,$two # next counter value | 1461 | vadduqm $ivec,$out6,$two # next counter value |
| 1462 | vxor $out6,$out6,$rndkey0 | 1462 | vxor $out6,$out6,$rndkey0 |
| 1463 | vxor $out7,$out7,$rndkey0 | 1463 | vxor $out7,$out7,$rndkey0 |
| 1464 | 1464 | ||
| @@ -1594,27 +1594,27 @@ Loop_ctr32_enc8x_middle: | |||
| 1594 | 1594 | ||
| 1595 | vcipherlast $in0,$out0,$in0 | 1595 | vcipherlast $in0,$out0,$in0 |
| 1596 | vcipherlast $in1,$out1,$in1 | 1596 | vcipherlast $in1,$out1,$in1 |
| 1597 | vadduwm $out1,$ivec,$one # counter values ... | 1597 | vadduqm $out1,$ivec,$one # counter values ... |
| 1598 | vcipherlast $in2,$out2,$in2 | 1598 | vcipherlast $in2,$out2,$in2 |
| 1599 | vadduwm $out2,$ivec,$two | 1599 | vadduqm $out2,$ivec,$two |
| 1600 | vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] | 1600 | vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] |
| 1601 | vcipherlast $in3,$out3,$in3 | 1601 | vcipherlast $in3,$out3,$in3 |
| 1602 | vadduwm $out3,$out1,$two | 1602 | vadduqm $out3,$out1,$two |
| 1603 | vxor $out1,$out1,$rndkey0 | 1603 | vxor $out1,$out1,$rndkey0 |
| 1604 | vcipherlast $in4,$out4,$in4 | 1604 | vcipherlast $in4,$out4,$in4 |
| 1605 | vadduwm $out4,$out2,$two | 1605 | vadduqm $out4,$out2,$two |
| 1606 | vxor $out2,$out2,$rndkey0 | 1606 | vxor $out2,$out2,$rndkey0 |
| 1607 | vcipherlast $in5,$out5,$in5 | 1607 | vcipherlast $in5,$out5,$in5 |
| 1608 | vadduwm $out5,$out3,$two | 1608 | vadduqm $out5,$out3,$two |
| 1609 | vxor $out3,$out3,$rndkey0 | 1609 | vxor $out3,$out3,$rndkey0 |
| 1610 | vcipherlast $in6,$out6,$in6 | 1610 | vcipherlast $in6,$out6,$in6 |
| 1611 | vadduwm $out6,$out4,$two | 1611 | vadduqm $out6,$out4,$two |
| 1612 | vxor $out4,$out4,$rndkey0 | 1612 | vxor $out4,$out4,$rndkey0 |
| 1613 | vcipherlast $in7,$out7,$in7 | 1613 | vcipherlast $in7,$out7,$in7 |
| 1614 | vadduwm $out7,$out5,$two | 1614 | vadduqm $out7,$out5,$two |
| 1615 | vxor $out5,$out5,$rndkey0 | 1615 | vxor $out5,$out5,$rndkey0 |
| 1616 | le?vperm $in0,$in0,$in0,$inpperm | 1616 | le?vperm $in0,$in0,$in0,$inpperm |
| 1617 | vadduwm $ivec,$out6,$two # next counter value | 1617 | vadduqm $ivec,$out6,$two # next counter value |
| 1618 | vxor $out6,$out6,$rndkey0 | 1618 | vxor $out6,$out6,$rndkey0 |
| 1619 | le?vperm $in1,$in1,$in1,$inpperm | 1619 | le?vperm $in1,$in1,$in1,$inpperm |
| 1620 | vxor $out7,$out7,$rndkey0 | 1620 | vxor $out7,$out7,$rndkey0 |
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index b5e29002b666..2183a2e77641 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c | |||
| @@ -119,6 +119,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, | |||
| 119 | preempt_disable(); | 119 | preempt_disable(); |
| 120 | pagefault_disable(); | 120 | pagefault_disable(); |
| 121 | enable_kernel_altivec(); | 121 | enable_kernel_altivec(); |
| 122 | enable_kernel_vsx(); | ||
| 122 | enable_kernel_fp(); | 123 | enable_kernel_fp(); |
| 123 | gcm_init_p8(ctx->htable, (const u64 *) key); | 124 | gcm_init_p8(ctx->htable, (const u64 *) key); |
| 124 | pagefault_enable(); | 125 | pagefault_enable(); |
| @@ -149,6 +150,7 @@ static int p8_ghash_update(struct shash_desc *desc, | |||
| 149 | preempt_disable(); | 150 | preempt_disable(); |
| 150 | pagefault_disable(); | 151 | pagefault_disable(); |
| 151 | enable_kernel_altivec(); | 152 | enable_kernel_altivec(); |
| 153 | enable_kernel_vsx(); | ||
| 152 | enable_kernel_fp(); | 154 | enable_kernel_fp(); |
| 153 | gcm_ghash_p8(dctx->shash, ctx->htable, | 155 | gcm_ghash_p8(dctx->shash, ctx->htable, |
| 154 | dctx->buffer, GHASH_DIGEST_SIZE); | 156 | dctx->buffer, GHASH_DIGEST_SIZE); |
| @@ -163,6 +165,7 @@ static int p8_ghash_update(struct shash_desc *desc, | |||
| 163 | preempt_disable(); | 165 | preempt_disable(); |
| 164 | pagefault_disable(); | 166 | pagefault_disable(); |
| 165 | enable_kernel_altivec(); | 167 | enable_kernel_altivec(); |
| 168 | enable_kernel_vsx(); | ||
| 166 | enable_kernel_fp(); | 169 | enable_kernel_fp(); |
| 167 | gcm_ghash_p8(dctx->shash, ctx->htable, src, len); | 170 | gcm_ghash_p8(dctx->shash, ctx->htable, src, len); |
| 168 | pagefault_enable(); | 171 | pagefault_enable(); |
| @@ -193,6 +196,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) | |||
| 193 | preempt_disable(); | 196 | preempt_disable(); |
| 194 | pagefault_disable(); | 197 | pagefault_disable(); |
| 195 | enable_kernel_altivec(); | 198 | enable_kernel_altivec(); |
| 199 | enable_kernel_vsx(); | ||
| 196 | enable_kernel_fp(); | 200 | enable_kernel_fp(); |
| 197 | gcm_ghash_p8(dctx->shash, ctx->htable, | 201 | gcm_ghash_p8(dctx->shash, ctx->htable, |
| 198 | dctx->buffer, GHASH_DIGEST_SIZE); | 202 | dctx->buffer, GHASH_DIGEST_SIZE); |
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl index 0a6f899839dd..d8429cb71f02 100644 --- a/drivers/crypto/vmx/ghashp8-ppc.pl +++ b/drivers/crypto/vmx/ghashp8-ppc.pl | |||
| @@ -61,6 +61,12 @@ $code=<<___; | |||
| 61 | mtspr 256,r0 | 61 | mtspr 256,r0 |
| 62 | li r10,0x30 | 62 | li r10,0x30 |
| 63 | lvx_u $H,0,r4 # load H | 63 | lvx_u $H,0,r4 # load H |
| 64 | le?xor r7,r7,r7 | ||
| 65 | le?addi r7,r7,0x8 # need a vperm start with 08 | ||
| 66 | le?lvsr 5,0,r7 | ||
| 67 | le?vspltisb 6,0x0f | ||
| 68 | le?vxor 5,5,6 # set a b-endian mask | ||
| 69 | le?vperm $H,$H,$H,5 | ||
| 64 | 70 | ||
| 65 | vspltisb $xC2,-16 # 0xf0 | 71 | vspltisb $xC2,-16 # 0xf0 |
| 66 | vspltisb $t0,1 # one | 72 | vspltisb $t0,1 # one |
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl index a59188494af8..b9997335f193 100644 --- a/drivers/crypto/vmx/ppc-xlate.pl +++ b/drivers/crypto/vmx/ppc-xlate.pl | |||
| @@ -169,6 +169,7 @@ my $vpmsumd = sub { vcrypto_op(@_, 1224); }; | |||
| 169 | my $vpmsubh = sub { vcrypto_op(@_, 1096); }; | 169 | my $vpmsubh = sub { vcrypto_op(@_, 1096); }; |
| 170 | my $vpmsumw = sub { vcrypto_op(@_, 1160); }; | 170 | my $vpmsumw = sub { vcrypto_op(@_, 1160); }; |
| 171 | my $vaddudm = sub { vcrypto_op(@_, 192); }; | 171 | my $vaddudm = sub { vcrypto_op(@_, 192); }; |
| 172 | my $vadduqm = sub { vcrypto_op(@_, 256); }; | ||
| 172 | 173 | ||
| 173 | my $mtsle = sub { | 174 | my $mtsle = sub { |
| 174 | my ($f, $arg) = @_; | 175 | my ($f, $arg) = @_; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index de5f610e0810..6a30252cd79f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -4048,3 +4048,88 @@ void pci_dev_specific_enable_acs(struct pci_dev *dev) | |||
| 4048 | } | 4048 | } |
| 4049 | } | 4049 | } |
| 4050 | } | 4050 | } |
| 4051 | |||
| 4052 | /* | ||
| 4053 | * The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with | ||
| 4054 | * QuickAssist Technology (QAT) is prematurely terminated in hardware. The | ||
| 4055 | * Next Capability pointer in the MSI Capability Structure should point to | ||
| 4056 | * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating | ||
| 4057 | * the list. | ||
| 4058 | */ | ||
| 4059 | static void quirk_intel_qat_vf_cap(struct pci_dev *pdev) | ||
| 4060 | { | ||
| 4061 | int pos, i = 0; | ||
| 4062 | u8 next_cap; | ||
| 4063 | u16 reg16, *cap; | ||
| 4064 | struct pci_cap_saved_state *state; | ||
| 4065 | |||
| 4066 | /* Bail if the hardware bug is fixed */ | ||
| 4067 | if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP)) | ||
| 4068 | return; | ||
| 4069 | |||
| 4070 | /* Bail if MSI Capability Structure is not found for some reason */ | ||
| 4071 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
| 4072 | if (!pos) | ||
| 4073 | return; | ||
| 4074 | |||
| 4075 | /* | ||
| 4076 | * Bail if Next Capability pointer in the MSI Capability Structure | ||
| 4077 | * is not the expected incorrect 0x00. | ||
| 4078 | */ | ||
| 4079 | pci_read_config_byte(pdev, pos + 1, &next_cap); | ||
| 4080 | if (next_cap) | ||
| 4081 | return; | ||
| 4082 | |||
| 4083 | /* | ||
| 4084 | * PCIe Capability Structure is expected to be at 0x50 and should | ||
| 4085 | * terminate the list (Next Capability pointer is 0x00). Verify | ||
| 4086 | * Capability Id and Next Capability pointer is as expected. | ||
| 4087 | * Open-code some of set_pcie_port_type() and pci_cfg_space_size_ext() | ||
| 4088 | * to correctly set kernel data structures which have already been | ||
| 4089 | * set incorrectly due to the hardware bug. | ||
| 4090 | */ | ||
| 4091 | pos = 0x50; | ||
| 4092 | pci_read_config_word(pdev, pos, ®16); | ||
| 4093 | if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) { | ||
| 4094 | u32 status; | ||
| 4095 | #ifndef PCI_EXP_SAVE_REGS | ||
| 4096 | #define PCI_EXP_SAVE_REGS 7 | ||
| 4097 | #endif | ||
| 4098 | int size = PCI_EXP_SAVE_REGS * sizeof(u16); | ||
| 4099 | |||
| 4100 | pdev->pcie_cap = pos; | ||
| 4101 | pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); | ||
| 4102 | pdev->pcie_flags_reg = reg16; | ||
| 4103 | pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); | ||
| 4104 | pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; | ||
| 4105 | |||
| 4106 | pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE; | ||
| 4107 | if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) != | ||
| 4108 | PCIBIOS_SUCCESSFUL || (status == 0xffffffff)) | ||
| 4109 | pdev->cfg_size = PCI_CFG_SPACE_SIZE; | ||
| 4110 | |||
| 4111 | if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP)) | ||
| 4112 | return; | ||
| 4113 | |||
| 4114 | /* | ||
| 4115 | * Save PCIE cap | ||
| 4116 | */ | ||
| 4117 | state = kzalloc(sizeof(*state) + size, GFP_KERNEL); | ||
| 4118 | if (!state) | ||
| 4119 | return; | ||
| 4120 | |||
| 4121 | state->cap.cap_nr = PCI_CAP_ID_EXP; | ||
| 4122 | state->cap.cap_extended = 0; | ||
| 4123 | state->cap.size = size; | ||
| 4124 | cap = (u16 *)&state->cap.data[0]; | ||
| 4125 | pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]); | ||
| 4126 | pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]); | ||
| 4127 | pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]); | ||
| 4128 | pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]); | ||
| 4129 | pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]); | ||
| 4130 | pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]); | ||
| 4131 | pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]); | ||
| 4132 | hlist_add_head(&state->next, &pdev->saved_cap_space); | ||
| 4133 | } | ||
| 4134 | } | ||
| 4135 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap); | ||
diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 7169ad04acc0..077cae1e6b51 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * AEAD: Authenticated Encryption with Associated Data | 2 | * AEAD: Authenticated Encryption with Associated Data |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
| @@ -45,16 +45,40 @@ | |||
| 45 | * a breach in the integrity of the message. In essence, that -EBADMSG error | 45 | * a breach in the integrity of the message. In essence, that -EBADMSG error |
| 46 | * code is the key bonus an AEAD cipher has over "standard" block chaining | 46 | * code is the key bonus an AEAD cipher has over "standard" block chaining |
| 47 | * modes. | 47 | * modes. |
| 48 | * | ||
| 49 | * Memory Structure: | ||
| 50 | * | ||
| 51 | * To support the needs of the most prominent user of AEAD ciphers, namely | ||
| 52 | * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere | ||
| 53 | * to. | ||
| 54 | * | ||
| 55 | * The scatter list pointing to the input data must contain: | ||
| 56 | * | ||
| 57 | * * for RFC4106 ciphers, the concatenation of | ||
| 58 | * associated authentication data || IV || plaintext or ciphertext. Note, the | ||
| 59 | * same IV (buffer) is also set with the aead_request_set_crypt call. Note, | ||
| 60 | * the API call of aead_request_set_ad must provide the length of the AAD and | ||
| 61 | * the IV. The API call of aead_request_set_crypt only points to the size of | ||
| 62 | * the input plaintext or ciphertext. | ||
| 63 | * | ||
| 64 | * * for "normal" AEAD ciphers, the concatenation of | ||
| 65 | * associated authentication data || plaintext or ciphertext. | ||
| 66 | * | ||
| 67 | * It is important to note that if multiple scatter gather list entries form | ||
| 68 | * the input data mentioned above, the first entry must not point to a NULL | ||
| 69 | * buffer. If there is any potential where the AAD buffer can be NULL, the | ||
| 70 | * calling code must contain a precaution to ensure that this does not result | ||
| 71 | * in the first scatter gather list entry pointing to a NULL buffer. | ||
| 48 | */ | 72 | */ |
| 49 | 73 | ||
| 74 | struct crypto_aead; | ||
| 75 | |||
| 50 | /** | 76 | /** |
| 51 | * struct aead_request - AEAD request | 77 | * struct aead_request - AEAD request |
| 52 | * @base: Common attributes for async crypto requests | 78 | * @base: Common attributes for async crypto requests |
| 53 | * @old: Boolean whether the old or new AEAD API is used | ||
| 54 | * @assoclen: Length in bytes of associated data for authentication | 79 | * @assoclen: Length in bytes of associated data for authentication |
| 55 | * @cryptlen: Length of data to be encrypted or decrypted | 80 | * @cryptlen: Length of data to be encrypted or decrypted |
| 56 | * @iv: Initialisation vector | 81 | * @iv: Initialisation vector |
| 57 | * @assoc: Associated data | ||
| 58 | * @src: Source data | 82 | * @src: Source data |
| 59 | * @dst: Destination data | 83 | * @dst: Destination data |
| 60 | * @__ctx: Start of private context data | 84 | * @__ctx: Start of private context data |
| @@ -62,14 +86,11 @@ | |||
| 62 | struct aead_request { | 86 | struct aead_request { |
| 63 | struct crypto_async_request base; | 87 | struct crypto_async_request base; |
| 64 | 88 | ||
| 65 | bool old; | ||
| 66 | |||
| 67 | unsigned int assoclen; | 89 | unsigned int assoclen; |
| 68 | unsigned int cryptlen; | 90 | unsigned int cryptlen; |
| 69 | 91 | ||
| 70 | u8 *iv; | 92 | u8 *iv; |
| 71 | 93 | ||
| 72 | struct scatterlist *assoc; | ||
| 73 | struct scatterlist *src; | 94 | struct scatterlist *src; |
| 74 | struct scatterlist *dst; | 95 | struct scatterlist *dst; |
| 75 | 96 | ||
| @@ -77,19 +98,6 @@ struct aead_request { | |||
| 77 | }; | 98 | }; |
| 78 | 99 | ||
| 79 | /** | 100 | /** |
| 80 | * struct aead_givcrypt_request - AEAD request with IV generation | ||
| 81 | * @seq: Sequence number for IV generation | ||
| 82 | * @giv: Space for generated IV | ||
| 83 | * @areq: The AEAD request itself | ||
| 84 | */ | ||
| 85 | struct aead_givcrypt_request { | ||
| 86 | u64 seq; | ||
| 87 | u8 *giv; | ||
| 88 | |||
| 89 | struct aead_request areq; | ||
| 90 | }; | ||
| 91 | |||
| 92 | /** | ||
| 93 | * struct aead_alg - AEAD cipher definition | 101 | * struct aead_alg - AEAD cipher definition |
| 94 | * @maxauthsize: Set the maximum authentication tag size supported by the | 102 | * @maxauthsize: Set the maximum authentication tag size supported by the |
| 95 | * transformation. A transformation may support smaller tag sizes. | 103 | * transformation. A transformation may support smaller tag sizes. |
| @@ -141,16 +149,6 @@ struct aead_alg { | |||
| 141 | }; | 149 | }; |
| 142 | 150 | ||
| 143 | struct crypto_aead { | 151 | struct crypto_aead { |
| 144 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | ||
| 145 | unsigned int keylen); | ||
| 146 | int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); | ||
| 147 | int (*encrypt)(struct aead_request *req); | ||
| 148 | int (*decrypt)(struct aead_request *req); | ||
| 149 | int (*givencrypt)(struct aead_givcrypt_request *req); | ||
| 150 | int (*givdecrypt)(struct aead_givcrypt_request *req); | ||
| 151 | |||
| 152 | struct crypto_aead *child; | ||
| 153 | |||
| 154 | unsigned int authsize; | 152 | unsigned int authsize; |
| 155 | unsigned int reqsize; | 153 | unsigned int reqsize; |
| 156 | 154 | ||
| @@ -192,16 +190,6 @@ static inline void crypto_free_aead(struct crypto_aead *tfm) | |||
| 192 | crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); | 190 | crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); |
| 193 | } | 191 | } |
| 194 | 192 | ||
| 195 | static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm) | ||
| 196 | { | ||
| 197 | return tfm; | ||
| 198 | } | ||
| 199 | |||
| 200 | static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm) | ||
| 201 | { | ||
| 202 | return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead; | ||
| 203 | } | ||
| 204 | |||
| 205 | static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) | 193 | static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) |
| 206 | { | 194 | { |
| 207 | return container_of(crypto_aead_tfm(tfm)->__crt_alg, | 195 | return container_of(crypto_aead_tfm(tfm)->__crt_alg, |
| @@ -210,8 +198,7 @@ static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) | |||
| 210 | 198 | ||
| 211 | static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg) | 199 | static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg) |
| 212 | { | 200 | { |
| 213 | return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize : | 201 | return alg->ivsize; |
| 214 | alg->ivsize; | ||
| 215 | } | 202 | } |
| 216 | 203 | ||
| 217 | /** | 204 | /** |
| @@ -337,7 +324,7 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | |||
| 337 | */ | 324 | */ |
| 338 | static inline int crypto_aead_encrypt(struct aead_request *req) | 325 | static inline int crypto_aead_encrypt(struct aead_request *req) |
| 339 | { | 326 | { |
| 340 | return crypto_aead_reqtfm(req)->encrypt(req); | 327 | return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req); |
| 341 | } | 328 | } |
| 342 | 329 | ||
| 343 | /** | 330 | /** |
| @@ -364,10 +351,12 @@ static inline int crypto_aead_encrypt(struct aead_request *req) | |||
| 364 | */ | 351 | */ |
| 365 | static inline int crypto_aead_decrypt(struct aead_request *req) | 352 | static inline int crypto_aead_decrypt(struct aead_request *req) |
| 366 | { | 353 | { |
| 367 | if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req))) | 354 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 355 | |||
| 356 | if (req->cryptlen < crypto_aead_authsize(aead)) | ||
| 368 | return -EINVAL; | 357 | return -EINVAL; |
| 369 | 358 | ||
| 370 | return crypto_aead_reqtfm(req)->decrypt(req); | 359 | return crypto_aead_alg(aead)->decrypt(req); |
| 371 | } | 360 | } |
| 372 | 361 | ||
| 373 | /** | 362 | /** |
| @@ -387,7 +376,10 @@ static inline int crypto_aead_decrypt(struct aead_request *req) | |||
| 387 | * | 376 | * |
| 388 | * Return: number of bytes | 377 | * Return: number of bytes |
| 389 | */ | 378 | */ |
| 390 | unsigned int crypto_aead_reqsize(struct crypto_aead *tfm); | 379 | static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) |
| 380 | { | ||
| 381 | return tfm->reqsize; | ||
| 382 | } | ||
| 391 | 383 | ||
| 392 | /** | 384 | /** |
| 393 | * aead_request_set_tfm() - update cipher handle reference in request | 385 | * aead_request_set_tfm() - update cipher handle reference in request |
| @@ -400,7 +392,7 @@ unsigned int crypto_aead_reqsize(struct crypto_aead *tfm); | |||
| 400 | static inline void aead_request_set_tfm(struct aead_request *req, | 392 | static inline void aead_request_set_tfm(struct aead_request *req, |
| 401 | struct crypto_aead *tfm) | 393 | struct crypto_aead *tfm) |
| 402 | { | 394 | { |
| 403 | req->base.tfm = crypto_aead_tfm(tfm->child); | 395 | req->base.tfm = crypto_aead_tfm(tfm); |
| 404 | } | 396 | } |
| 405 | 397 | ||
| 406 | /** | 398 | /** |
| @@ -526,23 +518,6 @@ static inline void aead_request_set_crypt(struct aead_request *req, | |||
| 526 | } | 518 | } |
| 527 | 519 | ||
| 528 | /** | 520 | /** |
| 529 | * aead_request_set_assoc() - set the associated data scatter / gather list | ||
| 530 | * @req: request handle | ||
| 531 | * @assoc: associated data scatter / gather list | ||
| 532 | * @assoclen: number of bytes to process from @assoc | ||
| 533 | * | ||
| 534 | * Obsolete, do not use. | ||
| 535 | */ | ||
| 536 | static inline void aead_request_set_assoc(struct aead_request *req, | ||
| 537 | struct scatterlist *assoc, | ||
| 538 | unsigned int assoclen) | ||
| 539 | { | ||
| 540 | req->assoc = assoc; | ||
| 541 | req->assoclen = assoclen; | ||
| 542 | req->old = true; | ||
| 543 | } | ||
| 544 | |||
| 545 | /** | ||
| 546 | * aead_request_set_ad - set associated data information | 521 | * aead_request_set_ad - set associated data information |
| 547 | * @req: request handle | 522 | * @req: request handle |
| 548 | * @assoclen: number of bytes in associated data | 523 | * @assoclen: number of bytes in associated data |
| @@ -554,77 +529,6 @@ static inline void aead_request_set_ad(struct aead_request *req, | |||
| 554 | unsigned int assoclen) | 529 | unsigned int assoclen) |
| 555 | { | 530 | { |
| 556 | req->assoclen = assoclen; | 531 | req->assoclen = assoclen; |
| 557 | req->old = false; | ||
| 558 | } | ||
| 559 | |||
| 560 | static inline struct crypto_aead *aead_givcrypt_reqtfm( | ||
| 561 | struct aead_givcrypt_request *req) | ||
| 562 | { | ||
| 563 | return crypto_aead_reqtfm(&req->areq); | ||
| 564 | } | ||
| 565 | |||
| 566 | static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req) | ||
| 567 | { | ||
| 568 | return aead_givcrypt_reqtfm(req)->givencrypt(req); | ||
| 569 | }; | ||
| 570 | |||
| 571 | static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req) | ||
| 572 | { | ||
| 573 | return aead_givcrypt_reqtfm(req)->givdecrypt(req); | ||
| 574 | }; | ||
| 575 | |||
| 576 | static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req, | ||
| 577 | struct crypto_aead *tfm) | ||
| 578 | { | ||
| 579 | req->areq.base.tfm = crypto_aead_tfm(tfm); | ||
| 580 | } | ||
| 581 | |||
| 582 | static inline struct aead_givcrypt_request *aead_givcrypt_alloc( | ||
| 583 | struct crypto_aead *tfm, gfp_t gfp) | ||
| 584 | { | ||
| 585 | struct aead_givcrypt_request *req; | ||
| 586 | |||
| 587 | req = kmalloc(sizeof(struct aead_givcrypt_request) + | ||
| 588 | crypto_aead_reqsize(tfm), gfp); | ||
| 589 | |||
| 590 | if (likely(req)) | ||
| 591 | aead_givcrypt_set_tfm(req, tfm); | ||
| 592 | |||
| 593 | return req; | ||
| 594 | } | ||
| 595 | |||
| 596 | static inline void aead_givcrypt_free(struct aead_givcrypt_request *req) | ||
| 597 | { | ||
| 598 | kfree(req); | ||
| 599 | } | ||
| 600 | |||
| 601 | static inline void aead_givcrypt_set_callback( | ||
| 602 | struct aead_givcrypt_request *req, u32 flags, | ||
| 603 | crypto_completion_t compl, void *data) | ||
| 604 | { | ||
| 605 | aead_request_set_callback(&req->areq, flags, compl, data); | ||
| 606 | } | ||
| 607 | |||
| 608 | static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req, | ||
| 609 | struct scatterlist *src, | ||
| 610 | struct scatterlist *dst, | ||
| 611 | unsigned int nbytes, void *iv) | ||
| 612 | { | ||
| 613 | aead_request_set_crypt(&req->areq, src, dst, nbytes, iv); | ||
| 614 | } | ||
| 615 | |||
| 616 | static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req, | ||
| 617 | struct scatterlist *assoc, | ||
| 618 | unsigned int assoclen) | ||
| 619 | { | ||
| 620 | aead_request_set_assoc(&req->areq, assoc, assoclen); | ||
| 621 | } | ||
| 622 | |||
| 623 | static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req, | ||
| 624 | u8 *giv, u64 seq) | ||
| 625 | { | ||
| 626 | req->giv = giv; | ||
| 627 | req->seq = seq; | ||
| 628 | } | 532 | } |
| 629 | 533 | ||
| 630 | #endif /* _CRYPTO_AEAD_H */ | 534 | #endif /* _CRYPTO_AEAD_H */ |
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index d4ebf6e9af6a..c9fe145f7dd3 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
| 19 | 19 | ||
| 20 | struct crypto_aead; | 20 | struct crypto_aead; |
| 21 | struct crypto_instance; | ||
| 21 | struct module; | 22 | struct module; |
| 22 | struct rtattr; | 23 | struct rtattr; |
| 23 | struct seq_file; | 24 | struct seq_file; |
| @@ -30,6 +31,7 @@ struct crypto_type { | |||
| 30 | void (*show)(struct seq_file *m, struct crypto_alg *alg); | 31 | void (*show)(struct seq_file *m, struct crypto_alg *alg); |
| 31 | int (*report)(struct sk_buff *skb, struct crypto_alg *alg); | 32 | int (*report)(struct sk_buff *skb, struct crypto_alg *alg); |
| 32 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); | 33 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); |
| 34 | void (*free)(struct crypto_instance *inst); | ||
| 33 | 35 | ||
| 34 | unsigned int type; | 36 | unsigned int type; |
| 35 | unsigned int maskclear; | 37 | unsigned int maskclear; |
| @@ -180,7 +182,6 @@ struct crypto_instance *crypto_alloc_instance(const char *name, | |||
| 180 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); | 182 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); |
| 181 | int crypto_enqueue_request(struct crypto_queue *queue, | 183 | int crypto_enqueue_request(struct crypto_queue *queue, |
| 182 | struct crypto_async_request *request); | 184 | struct crypto_async_request *request); |
| 183 | void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset); | ||
| 184 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); | 185 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); |
| 185 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); | 186 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); |
| 186 | 187 | ||
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h new file mode 100644 index 000000000000..274bbaeeed0f --- /dev/null +++ b/include/crypto/chacha20.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* | ||
| 2 | * Common values for the ChaCha20 algorithm | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef _CRYPTO_CHACHA20_H | ||
| 6 | #define _CRYPTO_CHACHA20_H | ||
| 7 | |||
| 8 | #include <linux/types.h> | ||
| 9 | #include <linux/crypto.h> | ||
| 10 | |||
| 11 | #define CHACHA20_IV_SIZE 16 | ||
| 12 | #define CHACHA20_KEY_SIZE 32 | ||
| 13 | #define CHACHA20_BLOCK_SIZE 64 | ||
| 14 | |||
| 15 | struct chacha20_ctx { | ||
| 16 | u32 key[8]; | ||
| 17 | }; | ||
| 18 | |||
| 19 | void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); | ||
| 20 | int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
| 21 | unsigned int keysize); | ||
| 22 | int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
| 23 | struct scatterlist *src, unsigned int nbytes); | ||
| 24 | |||
| 25 | #endif | ||
diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 57c8a6ee33c2..8e920b44c0ac 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h | |||
| @@ -63,6 +63,11 @@ struct ahash_request { | |||
| 63 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | 63 | void *__ctx[] CRYPTO_MINALIGN_ATTR; |
| 64 | }; | 64 | }; |
| 65 | 65 | ||
| 66 | #define AHASH_REQUEST_ON_STACK(name, ahash) \ | ||
| 67 | char __##name##_desc[sizeof(struct ahash_request) + \ | ||
| 68 | crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \ | ||
| 69 | struct ahash_request *name = (void *)__##name##_desc | ||
| 70 | |||
| 66 | /** | 71 | /** |
| 67 | * struct ahash_alg - asynchronous message digest definition | 72 | * struct ahash_alg - asynchronous message digest definition |
| 68 | * @init: Initialize the transformation context. Intended only to initialize the | 73 | * @init: Initialize the transformation context. Intended only to initialize the |
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h index 4b2547186519..5554cdd8d6c1 100644 --- a/include/crypto/internal/aead.h +++ b/include/crypto/internal/aead.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * AEAD: Authenticated Encryption with Associated Data | 2 | * AEAD: Authenticated Encryption with Associated Data |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
| @@ -21,6 +21,7 @@ | |||
| 21 | struct rtattr; | 21 | struct rtattr; |
| 22 | 22 | ||
| 23 | struct aead_instance { | 23 | struct aead_instance { |
| 24 | void (*free)(struct aead_instance *inst); | ||
| 24 | union { | 25 | union { |
| 25 | struct { | 26 | struct { |
| 26 | char head[offsetof(struct aead_alg, base)]; | 27 | char head[offsetof(struct aead_alg, base)]; |
| @@ -34,20 +35,15 @@ struct crypto_aead_spawn { | |||
| 34 | struct crypto_spawn base; | 35 | struct crypto_spawn base; |
| 35 | }; | 36 | }; |
| 36 | 37 | ||
| 37 | extern const struct crypto_type crypto_aead_type; | 38 | struct aead_queue { |
| 38 | extern const struct crypto_type crypto_nivaead_type; | 39 | struct crypto_queue base; |
| 40 | }; | ||
| 39 | 41 | ||
| 40 | static inline void *crypto_aead_ctx(struct crypto_aead *tfm) | 42 | static inline void *crypto_aead_ctx(struct crypto_aead *tfm) |
| 41 | { | 43 | { |
| 42 | return crypto_tfm_ctx(&tfm->base); | 44 | return crypto_tfm_ctx(&tfm->base); |
| 43 | } | 45 | } |
| 44 | 46 | ||
| 45 | static inline struct crypto_instance *crypto_aead_alg_instance( | ||
| 46 | struct crypto_aead *aead) | ||
| 47 | { | ||
| 48 | return crypto_tfm_alg_instance(&aead->base); | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline struct crypto_instance *aead_crypto_instance( | 47 | static inline struct crypto_instance *aead_crypto_instance( |
| 52 | struct aead_instance *inst) | 48 | struct aead_instance *inst) |
| 53 | { | 49 | { |
| @@ -61,7 +57,7 @@ static inline struct aead_instance *aead_instance(struct crypto_instance *inst) | |||
| 61 | 57 | ||
| 62 | static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead) | 58 | static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead) |
| 63 | { | 59 | { |
| 64 | return aead_instance(crypto_aead_alg_instance(aead)); | 60 | return aead_instance(crypto_tfm_alg_instance(&aead->base)); |
| 65 | } | 61 | } |
| 66 | 62 | ||
| 67 | static inline void *aead_instance_ctx(struct aead_instance *inst) | 63 | static inline void *aead_instance_ctx(struct aead_instance *inst) |
| @@ -90,8 +86,6 @@ static inline void crypto_set_aead_spawn( | |||
| 90 | crypto_set_spawn(&spawn->base, inst); | 86 | crypto_set_spawn(&spawn->base, inst); |
| 91 | } | 87 | } |
| 92 | 88 | ||
| 93 | struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask); | ||
| 94 | |||
| 95 | int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, | 89 | int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, |
| 96 | u32 type, u32 mask); | 90 | u32 type, u32 mask); |
| 97 | 91 | ||
| @@ -100,12 +94,6 @@ static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) | |||
| 100 | crypto_drop_spawn(&spawn->base); | 94 | crypto_drop_spawn(&spawn->base); |
| 101 | } | 95 | } |
| 102 | 96 | ||
| 103 | static inline struct crypto_alg *crypto_aead_spawn_alg( | ||
| 104 | struct crypto_aead_spawn *spawn) | ||
| 105 | { | ||
| 106 | return spawn->base.alg; | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline struct aead_alg *crypto_spawn_aead_alg( | 97 | static inline struct aead_alg *crypto_spawn_aead_alg( |
| 110 | struct crypto_aead_spawn *spawn) | 98 | struct crypto_aead_spawn *spawn) |
| 111 | { | 99 | { |
| @@ -118,43 +106,51 @@ static inline struct crypto_aead *crypto_spawn_aead( | |||
| 118 | return crypto_spawn_tfm2(&spawn->base); | 106 | return crypto_spawn_tfm2(&spawn->base); |
| 119 | } | 107 | } |
| 120 | 108 | ||
| 121 | struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, | 109 | static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, |
| 122 | struct rtattr **tb, u32 type, u32 mask); | 110 | unsigned int reqsize) |
| 123 | void aead_geniv_free(struct aead_instance *inst); | 111 | { |
| 124 | int aead_geniv_init(struct crypto_tfm *tfm); | 112 | aead->reqsize = reqsize; |
| 125 | void aead_geniv_exit(struct crypto_tfm *tfm); | 113 | } |
| 126 | 114 | ||
| 127 | static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv) | 115 | static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) |
| 128 | { | 116 | { |
| 129 | return geniv->child; | 117 | return alg->maxauthsize; |
| 130 | } | 118 | } |
| 131 | 119 | ||
| 132 | static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req) | 120 | static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) |
| 133 | { | 121 | { |
| 134 | return aead_request_ctx(&req->areq); | 122 | return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); |
| 135 | } | 123 | } |
| 136 | 124 | ||
| 137 | static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req, | 125 | static inline void aead_init_queue(struct aead_queue *queue, |
| 138 | int err) | 126 | unsigned int max_qlen) |
| 139 | { | 127 | { |
| 140 | aead_request_complete(&req->areq, err); | 128 | crypto_init_queue(&queue->base, max_qlen); |
| 141 | } | 129 | } |
| 142 | 130 | ||
| 143 | static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, | 131 | static inline int aead_enqueue_request(struct aead_queue *queue, |
| 144 | unsigned int reqsize) | 132 | struct aead_request *request) |
| 145 | { | 133 | { |
| 146 | crypto_aead_crt(aead)->reqsize = reqsize; | 134 | return crypto_enqueue_request(&queue->base, &request->base); |
| 147 | } | 135 | } |
| 148 | 136 | ||
| 149 | static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) | 137 | static inline struct aead_request *aead_dequeue_request( |
| 138 | struct aead_queue *queue) | ||
| 150 | { | 139 | { |
| 151 | return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize : | 140 | struct crypto_async_request *req; |
| 152 | alg->maxauthsize; | 141 | |
| 142 | req = crypto_dequeue_request(&queue->base); | ||
| 143 | |||
| 144 | return req ? container_of(req, struct aead_request, base) : NULL; | ||
| 153 | } | 145 | } |
| 154 | 146 | ||
| 155 | static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) | 147 | static inline struct aead_request *aead_get_backlog(struct aead_queue *queue) |
| 156 | { | 148 | { |
| 157 | return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); | 149 | struct crypto_async_request *req; |
| 150 | |||
| 151 | req = crypto_get_backlog(&queue->base); | ||
| 152 | |||
| 153 | return req ? container_of(req, struct aead_request, base) : NULL; | ||
| 158 | } | 154 | } |
| 159 | 155 | ||
| 160 | int crypto_register_aead(struct aead_alg *alg); | 156 | int crypto_register_aead(struct aead_alg *alg); |
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 9ca9b871aba5..59333635e712 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h | |||
| @@ -15,10 +15,19 @@ | |||
| 15 | 15 | ||
| 16 | #include <crypto/internal/aead.h> | 16 | #include <crypto/internal/aead.h> |
| 17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
| 18 | #include <linux/types.h> | ||
| 18 | 19 | ||
| 19 | struct aead_geniv_ctx { | 20 | struct aead_geniv_ctx { |
| 20 | spinlock_t lock; | 21 | spinlock_t lock; |
| 21 | struct crypto_aead *child; | 22 | struct crypto_aead *child; |
| 23 | struct crypto_blkcipher *null; | ||
| 24 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); | ||
| 22 | }; | 25 | }; |
| 23 | 26 | ||
| 27 | struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, | ||
| 28 | struct rtattr **tb, u32 type, u32 mask); | ||
| 29 | void aead_geniv_free(struct aead_instance *inst); | ||
| 30 | int aead_init_geniv(struct crypto_aead *tfm); | ||
| 31 | void aead_exit_geniv(struct crypto_aead *tfm); | ||
| 32 | |||
| 24 | #endif /* _CRYPTO_INTERNAL_GENIV_H */ | 33 | #endif /* _CRYPTO_INTERNAL_GENIV_H */ |
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index b3a46c515d1b..2cf7a61ece59 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h | |||
| @@ -107,5 +107,20 @@ static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) | |||
| 107 | return req->base.flags; | 107 | return req->base.flags; |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) | ||
| 111 | { | ||
| 112 | return crypto_tfm_ctx(&tfm->base); | ||
| 113 | } | ||
| 114 | |||
| 115 | static inline void *skcipher_request_ctx(struct skcipher_request *req) | ||
| 116 | { | ||
| 117 | return req->__ctx; | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline u32 skcipher_request_flags(struct skcipher_request *req) | ||
| 121 | { | ||
| 122 | return req->base.flags; | ||
| 123 | } | ||
| 124 | |||
| 110 | #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ | 125 | #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ |
| 111 | 126 | ||
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h new file mode 100644 index 000000000000..894df59b74e4 --- /dev/null +++ b/include/crypto/poly1305.h | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | /* | ||
| 2 | * Common values for the Poly1305 algorithm | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef _CRYPTO_POLY1305_H | ||
| 6 | #define _CRYPTO_POLY1305_H | ||
| 7 | |||
| 8 | #include <linux/types.h> | ||
| 9 | #include <linux/crypto.h> | ||
| 10 | |||
| 11 | #define POLY1305_BLOCK_SIZE 16 | ||
| 12 | #define POLY1305_KEY_SIZE 32 | ||
| 13 | #define POLY1305_DIGEST_SIZE 16 | ||
| 14 | |||
| 15 | struct poly1305_desc_ctx { | ||
| 16 | /* key */ | ||
| 17 | u32 r[5]; | ||
| 18 | /* finalize key */ | ||
| 19 | u32 s[4]; | ||
| 20 | /* accumulator */ | ||
| 21 | u32 h[5]; | ||
| 22 | /* partial buffer */ | ||
| 23 | u8 buf[POLY1305_BLOCK_SIZE]; | ||
| 24 | /* bytes used in partial buffer */ | ||
| 25 | unsigned int buflen; | ||
| 26 | /* r key has been set */ | ||
| 27 | bool rset; | ||
| 28 | /* s key has been set */ | ||
| 29 | bool sset; | ||
| 30 | }; | ||
| 31 | |||
| 32 | int crypto_poly1305_init(struct shash_desc *desc); | ||
| 33 | int crypto_poly1305_setkey(struct crypto_shash *tfm, | ||
| 34 | const u8 *key, unsigned int keylen); | ||
| 35 | unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, | ||
| 36 | const u8 *src, unsigned int srclen); | ||
| 37 | int crypto_poly1305_update(struct shash_desc *desc, | ||
| 38 | const u8 *src, unsigned int srclen); | ||
| 39 | int crypto_poly1305_final(struct shash_desc *desc, u8 *dst); | ||
| 40 | |||
| 41 | #endif | ||
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 07d245f073d1..d8dd41fb034f 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Symmetric key ciphers. | 2 | * Symmetric key ciphers. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
| @@ -18,6 +18,28 @@ | |||
| 18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
| 19 | 19 | ||
| 20 | /** | 20 | /** |
| 21 | * struct skcipher_request - Symmetric key cipher request | ||
| 22 | * @cryptlen: Number of bytes to encrypt or decrypt | ||
| 23 | * @iv: Initialisation Vector | ||
| 24 | * @src: Source SG list | ||
| 25 | * @dst: Destination SG list | ||
| 26 | * @base: Underlying async request request | ||
| 27 | * @__ctx: Start of private context data | ||
| 28 | */ | ||
| 29 | struct skcipher_request { | ||
| 30 | unsigned int cryptlen; | ||
| 31 | |||
| 32 | u8 *iv; | ||
| 33 | |||
| 34 | struct scatterlist *src; | ||
| 35 | struct scatterlist *dst; | ||
| 36 | |||
| 37 | struct crypto_async_request base; | ||
| 38 | |||
| 39 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | ||
| 40 | }; | ||
| 41 | |||
| 42 | /** | ||
| 21 | * struct skcipher_givcrypt_request - Crypto request with IV generation | 43 | * struct skcipher_givcrypt_request - Crypto request with IV generation |
| 22 | * @seq: Sequence number for IV generation | 44 | * @seq: Sequence number for IV generation |
| 23 | * @giv: Space for generated IV | 45 | * @giv: Space for generated IV |
| @@ -30,6 +52,23 @@ struct skcipher_givcrypt_request { | |||
| 30 | struct ablkcipher_request creq; | 52 | struct ablkcipher_request creq; |
| 31 | }; | 53 | }; |
| 32 | 54 | ||
| 55 | struct crypto_skcipher { | ||
| 56 | int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, | ||
| 57 | unsigned int keylen); | ||
| 58 | int (*encrypt)(struct skcipher_request *req); | ||
| 59 | int (*decrypt)(struct skcipher_request *req); | ||
| 60 | |||
| 61 | unsigned int ivsize; | ||
| 62 | unsigned int reqsize; | ||
| 63 | |||
| 64 | struct crypto_tfm base; | ||
| 65 | }; | ||
| 66 | |||
| 67 | #define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ | ||
| 68 | char __##name##_desc[sizeof(struct skcipher_request) + \ | ||
| 69 | crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ | ||
| 70 | struct skcipher_request *name = (void *)__##name##_desc | ||
| 71 | |||
| 33 | static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( | 72 | static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( |
| 34 | struct skcipher_givcrypt_request *req) | 73 | struct skcipher_givcrypt_request *req) |
| 35 | { | 74 | { |
| @@ -106,5 +145,355 @@ static inline void skcipher_givcrypt_set_giv( | |||
| 106 | req->seq = seq; | 145 | req->seq = seq; |
| 107 | } | 146 | } |
| 108 | 147 | ||
| 148 | /** | ||
| 149 | * DOC: Symmetric Key Cipher API | ||
| 150 | * | ||
| 151 | * Symmetric key cipher API is used with the ciphers of type | ||
| 152 | * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto). | ||
| 153 | * | ||
| 154 | * Asynchronous cipher operations imply that the function invocation for a | ||
| 155 | * cipher request returns immediately before the completion of the operation. | ||
| 156 | * The cipher request is scheduled as a separate kernel thread and therefore | ||
| 157 | * load-balanced on the different CPUs via the process scheduler. To allow | ||
| 158 | * the kernel crypto API to inform the caller about the completion of a cipher | ||
| 159 | * request, the caller must provide a callback function. That function is | ||
| 160 | * invoked with the cipher handle when the request completes. | ||
| 161 | * | ||
| 162 | * To support the asynchronous operation, additional information than just the | ||
| 163 | * cipher handle must be supplied to the kernel crypto API. That additional | ||
| 164 | * information is given by filling in the skcipher_request data structure. | ||
| 165 | * | ||
| 166 | * For the symmetric key cipher API, the state is maintained with the tfm | ||
| 167 | * cipher handle. A single tfm can be used across multiple calls and in | ||
| 168 | * parallel. For asynchronous block cipher calls, context data supplied and | ||
| 169 | * only used by the caller can be referenced the request data structure in | ||
| 170 | * addition to the IV used for the cipher request. The maintenance of such | ||
| 171 | * state information would be important for a crypto driver implementer to | ||
| 172 | * have, because when calling the callback function upon completion of the | ||
| 173 | * cipher operation, that callback function may need some information about | ||
| 174 | * which operation just finished if it invoked multiple in parallel. This | ||
| 175 | * state information is unused by the kernel crypto API. | ||
| 176 | */ | ||
| 177 | |||
| 178 | static inline struct crypto_skcipher *__crypto_skcipher_cast( | ||
| 179 | struct crypto_tfm *tfm) | ||
| 180 | { | ||
| 181 | return container_of(tfm, struct crypto_skcipher, base); | ||
| 182 | } | ||
| 183 | |||
| 184 | /** | ||
| 185 | * crypto_alloc_skcipher() - allocate symmetric key cipher handle | ||
| 186 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 187 | * skcipher cipher | ||
| 188 | * @type: specifies the type of the cipher | ||
| 189 | * @mask: specifies the mask for the cipher | ||
| 190 | * | ||
| 191 | * Allocate a cipher handle for an skcipher. The returned struct | ||
| 192 | * crypto_skcipher is the cipher handle that is required for any subsequent | ||
| 193 | * API invocation for that skcipher. | ||
| 194 | * | ||
| 195 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
| 196 | * of an error, PTR_ERR() returns the error code. | ||
| 197 | */ | ||
| 198 | struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, | ||
| 199 | u32 type, u32 mask); | ||
| 200 | |||
| 201 | static inline struct crypto_tfm *crypto_skcipher_tfm( | ||
| 202 | struct crypto_skcipher *tfm) | ||
| 203 | { | ||
| 204 | return &tfm->base; | ||
| 205 | } | ||
| 206 | |||
| 207 | /** | ||
| 208 | * crypto_free_skcipher() - zeroize and free cipher handle | ||
| 209 | * @tfm: cipher handle to be freed | ||
| 210 | */ | ||
| 211 | static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) | ||
| 212 | { | ||
| 213 | crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm)); | ||
| 214 | } | ||
| 215 | |||
| 216 | /** | ||
| 217 | * crypto_has_skcipher() - Search for the availability of an skcipher. | ||
| 218 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 219 | * skcipher | ||
| 220 | * @type: specifies the type of the cipher | ||
| 221 | * @mask: specifies the mask for the cipher | ||
| 222 | * | ||
| 223 | * Return: true when the skcipher is known to the kernel crypto API; false | ||
| 224 | * otherwise | ||
| 225 | */ | ||
| 226 | static inline int crypto_has_skcipher(const char *alg_name, u32 type, | ||
| 227 | u32 mask) | ||
| 228 | { | ||
| 229 | return crypto_has_alg(alg_name, crypto_skcipher_type(type), | ||
| 230 | crypto_skcipher_mask(mask)); | ||
| 231 | } | ||
| 232 | |||
| 233 | /** | ||
| 234 | * crypto_skcipher_ivsize() - obtain IV size | ||
| 235 | * @tfm: cipher handle | ||
| 236 | * | ||
| 237 | * The size of the IV for the skcipher referenced by the cipher handle is | ||
| 238 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
| 239 | * | ||
| 240 | * Return: IV size in bytes | ||
| 241 | */ | ||
| 242 | static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) | ||
| 243 | { | ||
| 244 | return tfm->ivsize; | ||
| 245 | } | ||
| 246 | |||
| 247 | /** | ||
| 248 | * crypto_skcipher_blocksize() - obtain block size of cipher | ||
| 249 | * @tfm: cipher handle | ||
| 250 | * | ||
| 251 | * The block size for the skcipher referenced with the cipher handle is | ||
| 252 | * returned. The caller may use that information to allocate appropriate | ||
| 253 | * memory for the data returned by the encryption or decryption operation | ||
| 254 | * | ||
| 255 | * Return: block size of cipher | ||
| 256 | */ | ||
| 257 | static inline unsigned int crypto_skcipher_blocksize( | ||
| 258 | struct crypto_skcipher *tfm) | ||
| 259 | { | ||
| 260 | return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); | ||
| 261 | } | ||
| 262 | |||
| 263 | static inline unsigned int crypto_skcipher_alignmask( | ||
| 264 | struct crypto_skcipher *tfm) | ||
| 265 | { | ||
| 266 | return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)); | ||
| 267 | } | ||
| 268 | |||
| 269 | static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm) | ||
| 270 | { | ||
| 271 | return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm)); | ||
| 272 | } | ||
| 273 | |||
| 274 | static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm, | ||
| 275 | u32 flags) | ||
| 276 | { | ||
| 277 | crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags); | ||
| 278 | } | ||
| 279 | |||
| 280 | static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm, | ||
| 281 | u32 flags) | ||
| 282 | { | ||
| 283 | crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags); | ||
| 284 | } | ||
| 285 | |||
| 286 | /** | ||
| 287 | * crypto_skcipher_setkey() - set key for cipher | ||
| 288 | * @tfm: cipher handle | ||
| 289 | * @key: buffer holding the key | ||
| 290 | * @keylen: length of the key in bytes | ||
| 291 | * | ||
| 292 | * The caller provided key is set for the skcipher referenced by the cipher | ||
| 293 | * handle. | ||
| 294 | * | ||
| 295 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
| 296 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
| 297 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
| 298 | * is performed. | ||
| 299 | * | ||
| 300 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
| 301 | */ | ||
| 302 | static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, | ||
| 303 | const u8 *key, unsigned int keylen) | ||
| 304 | { | ||
| 305 | return tfm->setkey(tfm, key, keylen); | ||
| 306 | } | ||
| 307 | |||
| 308 | /** | ||
| 309 | * crypto_skcipher_reqtfm() - obtain cipher handle from request | ||
| 310 | * @req: skcipher_request out of which the cipher handle is to be obtained | ||
| 311 | * | ||
| 312 | * Return the crypto_skcipher handle when furnishing an skcipher_request | ||
| 313 | * data structure. | ||
| 314 | * | ||
| 315 | * Return: crypto_skcipher handle | ||
| 316 | */ | ||
| 317 | static inline struct crypto_skcipher *crypto_skcipher_reqtfm( | ||
| 318 | struct skcipher_request *req) | ||
| 319 | { | ||
| 320 | return __crypto_skcipher_cast(req->base.tfm); | ||
| 321 | } | ||
| 322 | |||
| 323 | /** | ||
| 324 | * crypto_skcipher_encrypt() - encrypt plaintext | ||
| 325 | * @req: reference to the skcipher_request handle that holds all information | ||
| 326 | * needed to perform the cipher operation | ||
| 327 | * | ||
| 328 | * Encrypt plaintext data using the skcipher_request handle. That data | ||
| 329 | * structure and how it is filled with data is discussed with the | ||
| 330 | * skcipher_request_* functions. | ||
| 331 | * | ||
| 332 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
| 333 | */ | ||
| 334 | static inline int crypto_skcipher_encrypt(struct skcipher_request *req) | ||
| 335 | { | ||
| 336 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 337 | |||
| 338 | return tfm->encrypt(req); | ||
| 339 | } | ||
| 340 | |||
| 341 | /** | ||
| 342 | * crypto_skcipher_decrypt() - decrypt ciphertext | ||
| 343 | * @req: reference to the skcipher_request handle that holds all information | ||
| 344 | * needed to perform the cipher operation | ||
| 345 | * | ||
| 346 | * Decrypt ciphertext data using the skcipher_request handle. That data | ||
| 347 | * structure and how it is filled with data is discussed with the | ||
| 348 | * skcipher_request_* functions. | ||
| 349 | * | ||
| 350 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
| 351 | */ | ||
| 352 | static inline int crypto_skcipher_decrypt(struct skcipher_request *req) | ||
| 353 | { | ||
| 354 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
| 355 | |||
| 356 | return tfm->decrypt(req); | ||
| 357 | } | ||
| 358 | |||
| 359 | /** | ||
| 360 | * DOC: Symmetric Key Cipher Request Handle | ||
| 361 | * | ||
| 362 | * The skcipher_request data structure contains all pointers to data | ||
| 363 | * required for the symmetric key cipher operation. This includes the cipher | ||
| 364 | * handle (which can be used by multiple skcipher_request instances), pointer | ||
| 365 | * to plaintext and ciphertext, asynchronous callback function, etc. It acts | ||
| 366 | * as a handle to the skcipher_request_* API calls in a similar way as | ||
| 367 | * skcipher handle to the crypto_skcipher_* API calls. | ||
| 368 | */ | ||
| 369 | |||
| 370 | /** | ||
| 371 | * crypto_skcipher_reqsize() - obtain size of the request data structure | ||
| 372 | * @tfm: cipher handle | ||
| 373 | * | ||
| 374 | * Return: number of bytes | ||
| 375 | */ | ||
| 376 | static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm) | ||
| 377 | { | ||
| 378 | return tfm->reqsize; | ||
| 379 | } | ||
| 380 | |||
| 381 | /** | ||
| 382 | * skcipher_request_set_tfm() - update cipher handle reference in request | ||
| 383 | * @req: request handle to be modified | ||
| 384 | * @tfm: cipher handle that shall be added to the request handle | ||
| 385 | * | ||
| 386 | * Allow the caller to replace the existing skcipher handle in the request | ||
| 387 | * data structure with a different one. | ||
| 388 | */ | ||
| 389 | static inline void skcipher_request_set_tfm(struct skcipher_request *req, | ||
| 390 | struct crypto_skcipher *tfm) | ||
| 391 | { | ||
| 392 | req->base.tfm = crypto_skcipher_tfm(tfm); | ||
| 393 | } | ||
| 394 | |||
| 395 | static inline struct skcipher_request *skcipher_request_cast( | ||
| 396 | struct crypto_async_request *req) | ||
| 397 | { | ||
| 398 | return container_of(req, struct skcipher_request, base); | ||
| 399 | } | ||
| 400 | |||
| 401 | /** | ||
| 402 | * skcipher_request_alloc() - allocate request data structure | ||
| 403 | * @tfm: cipher handle to be registered with the request | ||
| 404 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | ||
| 405 | * | ||
| 406 | * Allocate the request data structure that must be used with the skcipher | ||
| 407 | * encrypt and decrypt API calls. During the allocation, the provided skcipher | ||
| 408 | * handle is registered in the request data structure. | ||
| 409 | * | ||
| 410 | * Return: allocated request handle in case of success; IS_ERR() is true in case | ||
| 411 | * of an error, PTR_ERR() returns the error code. | ||
| 412 | */ | ||
| 413 | static inline struct skcipher_request *skcipher_request_alloc( | ||
| 414 | struct crypto_skcipher *tfm, gfp_t gfp) | ||
| 415 | { | ||
| 416 | struct skcipher_request *req; | ||
| 417 | |||
| 418 | req = kmalloc(sizeof(struct skcipher_request) + | ||
| 419 | crypto_skcipher_reqsize(tfm), gfp); | ||
| 420 | |||
| 421 | if (likely(req)) | ||
| 422 | skcipher_request_set_tfm(req, tfm); | ||
| 423 | |||
| 424 | return req; | ||
| 425 | } | ||
| 426 | |||
| 427 | /** | ||
| 428 | * skcipher_request_free() - zeroize and free request data structure | ||
| 429 | * @req: request data structure cipher handle to be freed | ||
| 430 | */ | ||
| 431 | static inline void skcipher_request_free(struct skcipher_request *req) | ||
| 432 | { | ||
| 433 | kzfree(req); | ||
| 434 | } | ||
| 435 | |||
| 436 | /** | ||
| 437 | * skcipher_request_set_callback() - set asynchronous callback function | ||
| 438 | * @req: request handle | ||
| 439 | * @flags: specify zero or an ORing of the flags | ||
| 440 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | ||
| 441 | * increase the wait queue beyond the initial maximum size; | ||
| 442 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | ||
| 443 | * @compl: callback function pointer to be registered with the request handle | ||
| 444 | * @data: The data pointer refers to memory that is not used by the kernel | ||
| 445 | * crypto API, but provided to the callback function for it to use. Here, | ||
| 446 | * the caller can provide a reference to memory the callback function can | ||
| 447 | * operate on. As the callback function is invoked asynchronously to the | ||
| 448 | * related functionality, it may need to access data structures of the | ||
| 449 | * related functionality which can be referenced using this pointer. The | ||
| 450 | * callback function can access the memory via the "data" field in the | ||
| 451 | * crypto_async_request data structure provided to the callback function. | ||
| 452 | * | ||
| 453 | * This function allows setting the callback function that is triggered once the | ||
| 454 | * cipher operation completes. | ||
| 455 | * | ||
| 456 | * The callback function is registered with the skcipher_request handle and | ||
| 457 | * must comply with the following template | ||
| 458 | * | ||
| 459 | * void callback_function(struct crypto_async_request *req, int error) | ||
| 460 | */ | ||
| 461 | static inline void skcipher_request_set_callback(struct skcipher_request *req, | ||
| 462 | u32 flags, | ||
| 463 | crypto_completion_t compl, | ||
| 464 | void *data) | ||
| 465 | { | ||
| 466 | req->base.complete = compl; | ||
| 467 | req->base.data = data; | ||
| 468 | req->base.flags = flags; | ||
| 469 | } | ||
| 470 | |||
| 471 | /** | ||
| 472 | * skcipher_request_set_crypt() - set data buffers | ||
| 473 | * @req: request handle | ||
| 474 | * @src: source scatter / gather list | ||
| 475 | * @dst: destination scatter / gather list | ||
| 476 | * @cryptlen: number of bytes to process from @src | ||
| 477 | * @iv: IV for the cipher operation which must comply with the IV size defined | ||
| 478 | * by crypto_skcipher_ivsize | ||
| 479 | * | ||
| 480 | * This function allows setting of the source data and destination data | ||
| 481 | * scatter / gather lists. | ||
| 482 | * | ||
| 483 | * For encryption, the source is treated as the plaintext and the | ||
| 484 | * destination is the ciphertext. For a decryption operation, the use is | ||
| 485 | * reversed - the source is the ciphertext and the destination is the plaintext. | ||
| 486 | */ | ||
| 487 | static inline void skcipher_request_set_crypt( | ||
| 488 | struct skcipher_request *req, | ||
| 489 | struct scatterlist *src, struct scatterlist *dst, | ||
| 490 | unsigned int cryptlen, void *iv) | ||
| 491 | { | ||
| 492 | req->src = src; | ||
| 493 | req->dst = dst; | ||
| 494 | req->cryptlen = cryptlen; | ||
| 495 | req->iv = iv; | ||
| 496 | } | ||
| 497 | |||
| 109 | #endif /* _CRYPTO_SKCIPHER_H */ | 498 | #endif /* _CRYPTO_SKCIPHER_H */ |
| 110 | 499 | ||
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h index 8780868458a0..8de173ff19f3 100644 --- a/include/dt-bindings/clock/imx6qdl-clock.h +++ b/include/dt-bindings/clock/imx6qdl-clock.h | |||
| @@ -251,6 +251,9 @@ | |||
| 251 | #define IMX6QDL_CLK_VIDEO_27M 238 | 251 | #define IMX6QDL_CLK_VIDEO_27M 238 |
| 252 | #define IMX6QDL_CLK_MIPI_CORE_CFG 239 | 252 | #define IMX6QDL_CLK_MIPI_CORE_CFG 239 |
| 253 | #define IMX6QDL_CLK_MIPI_IPG 240 | 253 | #define IMX6QDL_CLK_MIPI_IPG 240 |
| 254 | #define IMX6QDL_CLK_END 241 | 254 | #define IMX6QDL_CLK_CAAM_MEM 241 |
| 255 | #define IMX6QDL_CLK_CAAM_ACLK 242 | ||
| 256 | #define IMX6QDL_CLK_CAAM_IPG 243 | ||
| 257 | #define IMX6QDL_CLK_END 244 | ||
| 255 | 258 | ||
| 256 | #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ | 259 | #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 81ef938b0a8e..e71cb70a1ac2 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
| @@ -102,12 +102,6 @@ | |||
| 102 | #define CRYPTO_ALG_INTERNAL 0x00002000 | 102 | #define CRYPTO_ALG_INTERNAL 0x00002000 |
| 103 | 103 | ||
| 104 | /* | 104 | /* |
| 105 | * Temporary flag used to prevent legacy AEAD implementations from | ||
| 106 | * being used by user-space. | ||
| 107 | */ | ||
| 108 | #define CRYPTO_ALG_AEAD_NEW 0x00004000 | ||
| 109 | |||
| 110 | /* | ||
| 111 | * Transform masks and values (for crt_flags). | 105 | * Transform masks and values (for crt_flags). |
| 112 | */ | 106 | */ |
| 113 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 | 107 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 |
| @@ -142,13 +136,10 @@ | |||
| 142 | struct scatterlist; | 136 | struct scatterlist; |
| 143 | struct crypto_ablkcipher; | 137 | struct crypto_ablkcipher; |
| 144 | struct crypto_async_request; | 138 | struct crypto_async_request; |
| 145 | struct crypto_aead; | ||
| 146 | struct crypto_blkcipher; | 139 | struct crypto_blkcipher; |
| 147 | struct crypto_hash; | 140 | struct crypto_hash; |
| 148 | struct crypto_tfm; | 141 | struct crypto_tfm; |
| 149 | struct crypto_type; | 142 | struct crypto_type; |
| 150 | struct aead_request; | ||
| 151 | struct aead_givcrypt_request; | ||
| 152 | struct skcipher_givcrypt_request; | 143 | struct skcipher_givcrypt_request; |
| 153 | 144 | ||
| 154 | typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); | 145 | typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); |
| @@ -275,47 +266,6 @@ struct ablkcipher_alg { | |||
| 275 | }; | 266 | }; |
| 276 | 267 | ||
| 277 | /** | 268 | /** |
| 278 | * struct old_aead_alg - AEAD cipher definition | ||
| 279 | * @maxauthsize: Set the maximum authentication tag size supported by the | ||
| 280 | * transformation. A transformation may support smaller tag sizes. | ||
| 281 | * As the authentication tag is a message digest to ensure the | ||
| 282 | * integrity of the encrypted data, a consumer typically wants the | ||
| 283 | * largest authentication tag possible as defined by this | ||
| 284 | * variable. | ||
| 285 | * @setauthsize: Set authentication size for the AEAD transformation. This | ||
| 286 | * function is used to specify the consumer requested size of the | ||
| 287 | * authentication tag to be either generated by the transformation | ||
| 288 | * during encryption or the size of the authentication tag to be | ||
| 289 | * supplied during the decryption operation. This function is also | ||
| 290 | * responsible for checking the authentication tag size for | ||
| 291 | * validity. | ||
| 292 | * @setkey: see struct ablkcipher_alg | ||
| 293 | * @encrypt: see struct ablkcipher_alg | ||
| 294 | * @decrypt: see struct ablkcipher_alg | ||
| 295 | * @givencrypt: see struct ablkcipher_alg | ||
| 296 | * @givdecrypt: see struct ablkcipher_alg | ||
| 297 | * @geniv: see struct ablkcipher_alg | ||
| 298 | * @ivsize: see struct ablkcipher_alg | ||
| 299 | * | ||
| 300 | * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are | ||
| 301 | * mandatory and must be filled. | ||
| 302 | */ | ||
| 303 | struct old_aead_alg { | ||
| 304 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | ||
| 305 | unsigned int keylen); | ||
| 306 | int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); | ||
| 307 | int (*encrypt)(struct aead_request *req); | ||
| 308 | int (*decrypt)(struct aead_request *req); | ||
| 309 | int (*givencrypt)(struct aead_givcrypt_request *req); | ||
| 310 | int (*givdecrypt)(struct aead_givcrypt_request *req); | ||
| 311 | |||
| 312 | const char *geniv; | ||
| 313 | |||
| 314 | unsigned int ivsize; | ||
| 315 | unsigned int maxauthsize; | ||
| 316 | }; | ||
| 317 | |||
| 318 | /** | ||
| 319 | * struct blkcipher_alg - synchronous block cipher definition | 269 | * struct blkcipher_alg - synchronous block cipher definition |
| 320 | * @min_keysize: see struct ablkcipher_alg | 270 | * @min_keysize: see struct ablkcipher_alg |
| 321 | * @max_keysize: see struct ablkcipher_alg | 271 | * @max_keysize: see struct ablkcipher_alg |
| @@ -409,7 +359,6 @@ struct compress_alg { | |||
| 409 | 359 | ||
| 410 | 360 | ||
| 411 | #define cra_ablkcipher cra_u.ablkcipher | 361 | #define cra_ablkcipher cra_u.ablkcipher |
| 412 | #define cra_aead cra_u.aead | ||
| 413 | #define cra_blkcipher cra_u.blkcipher | 362 | #define cra_blkcipher cra_u.blkcipher |
| 414 | #define cra_cipher cra_u.cipher | 363 | #define cra_cipher cra_u.cipher |
| 415 | #define cra_compress cra_u.compress | 364 | #define cra_compress cra_u.compress |
| @@ -460,7 +409,7 @@ struct compress_alg { | |||
| 460 | * struct crypto_type, which implements callbacks common for all | 409 | * struct crypto_type, which implements callbacks common for all |
| 461 | * transformation types. There are multiple options: | 410 | * transformation types. There are multiple options: |
| 462 | * &crypto_blkcipher_type, &crypto_ablkcipher_type, | 411 | * &crypto_blkcipher_type, &crypto_ablkcipher_type, |
| 463 | * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. | 412 | * &crypto_ahash_type, &crypto_rng_type. |
| 464 | * This field might be empty. In that case, there are no common | 413 | * This field might be empty. In that case, there are no common |
| 465 | * callbacks. This is the case for: cipher, compress, shash. | 414 | * callbacks. This is the case for: cipher, compress, shash. |
| 466 | * @cra_u: Callbacks implementing the transformation. This is a union of | 415 | * @cra_u: Callbacks implementing the transformation. This is a union of |
| @@ -508,7 +457,6 @@ struct crypto_alg { | |||
| 508 | 457 | ||
| 509 | union { | 458 | union { |
| 510 | struct ablkcipher_alg ablkcipher; | 459 | struct ablkcipher_alg ablkcipher; |
| 511 | struct old_aead_alg aead; | ||
| 512 | struct blkcipher_alg blkcipher; | 460 | struct blkcipher_alg blkcipher; |
| 513 | struct cipher_alg cipher; | 461 | struct cipher_alg cipher; |
| 514 | struct compress_alg compress; | 462 | struct compress_alg compress; |
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index bc0a1da8afba..95c52a95259e 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c | |||
| @@ -146,18 +146,25 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, | |||
| 146 | uint8_t *p; | 146 | uint8_t *p; |
| 147 | mpi_limb_t alimb; | 147 | mpi_limb_t alimb; |
| 148 | unsigned int n = mpi_get_size(a); | 148 | unsigned int n = mpi_get_size(a); |
| 149 | int i; | 149 | int i, lzeros = 0; |
| 150 | 150 | ||
| 151 | if (buf_len < n || !buf) | 151 | if (buf_len < n || !buf || !nbytes) |
| 152 | return -EINVAL; | 152 | return -EINVAL; |
| 153 | 153 | ||
| 154 | if (sign) | 154 | if (sign) |
| 155 | *sign = a->sign; | 155 | *sign = a->sign; |
| 156 | 156 | ||
| 157 | if (nbytes) | 157 | p = (void *)&a->d[a->nlimbs] - 1; |
| 158 | *nbytes = n; | 158 | |
| 159 | for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) { | ||
| 160 | if (!*p) | ||
| 161 | lzeros++; | ||
| 162 | else | ||
| 163 | break; | ||
| 164 | } | ||
| 159 | 165 | ||
| 160 | p = buf; | 166 | p = buf; |
| 167 | *nbytes = n - lzeros; | ||
| 161 | 168 | ||
| 162 | for (i = a->nlimbs - 1; i >= 0; i--) { | 169 | for (i = a->nlimbs - 1; i >= 0; i--) { |
| 163 | alimb = a->d[i]; | 170 | alimb = a->d[i]; |
| @@ -178,6 +185,19 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, | |||
| 178 | #else | 185 | #else |
| 179 | #error please implement for this limb size. | 186 | #error please implement for this limb size. |
| 180 | #endif | 187 | #endif |
| 188 | |||
| 189 | if (lzeros > 0) { | ||
| 190 | if (lzeros >= sizeof(alimb)) { | ||
| 191 | p -= sizeof(alimb); | ||
| 192 | } else { | ||
| 193 | mpi_limb_t *limb1 = (void *)p - sizeof(alimb); | ||
| 194 | mpi_limb_t *limb2 = (void *)p - sizeof(alimb) | ||
| 195 | + lzeros; | ||
| 196 | *limb1 = *limb2; | ||
| 197 | p -= lzeros; | ||
| 198 | } | ||
| 199 | lzeros -= sizeof(alimb); | ||
| 200 | } | ||
| 181 | } | 201 | } |
| 182 | return 0; | 202 | return 0; |
| 183 | } | 203 | } |
| @@ -197,7 +217,7 @@ EXPORT_SYMBOL_GPL(mpi_read_buffer); | |||
| 197 | */ | 217 | */ |
| 198 | void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) | 218 | void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) |
| 199 | { | 219 | { |
| 200 | uint8_t *buf, *p; | 220 | uint8_t *buf; |
| 201 | unsigned int n; | 221 | unsigned int n; |
| 202 | int ret; | 222 | int ret; |
| 203 | 223 | ||
| @@ -220,14 +240,6 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) | |||
| 220 | kfree(buf); | 240 | kfree(buf); |
| 221 | return NULL; | 241 | return NULL; |
| 222 | } | 242 | } |
| 223 | |||
| 224 | /* this is sub-optimal but we need to do the shift operation | ||
| 225 | * because the caller has to free the returned buffer */ | ||
| 226 | for (p = buf; !*p && *nbytes; p++, --*nbytes) | ||
| 227 | ; | ||
| 228 | if (p != buf) | ||
| 229 | memmove(buf, p, *nbytes); | ||
| 230 | |||
| 231 | return buf; | 243 | return buf; |
| 232 | } | 244 | } |
| 233 | EXPORT_SYMBOL_GPL(mpi_get_buffer); | 245 | EXPORT_SYMBOL_GPL(mpi_get_buffer); |
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 42f7c76cf853..f07224d8b88f 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
| @@ -31,7 +31,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
| 31 | 31 | ||
| 32 | .uinfo = { | 32 | .uinfo = { |
| 33 | .aead = { | 33 | .aead = { |
| 34 | .geniv = "seqniv", | 34 | .geniv = "seqiv", |
| 35 | .icv_truncbits = 64, | 35 | .icv_truncbits = 64, |
| 36 | } | 36 | } |
| 37 | }, | 37 | }, |
| @@ -50,7 +50,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
| 50 | 50 | ||
| 51 | .uinfo = { | 51 | .uinfo = { |
| 52 | .aead = { | 52 | .aead = { |
| 53 | .geniv = "seqniv", | 53 | .geniv = "seqiv", |
| 54 | .icv_truncbits = 96, | 54 | .icv_truncbits = 96, |
| 55 | } | 55 | } |
| 56 | }, | 56 | }, |
| @@ -69,7 +69,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
| 69 | 69 | ||
| 70 | .uinfo = { | 70 | .uinfo = { |
| 71 | .aead = { | 71 | .aead = { |
| 72 | .geniv = "seqniv", | 72 | .geniv = "seqiv", |
| 73 | .icv_truncbits = 128, | 73 | .icv_truncbits = 128, |
| 74 | } | 74 | } |
| 75 | }, | 75 | }, |
| @@ -88,7 +88,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
| 88 | 88 | ||
| 89 | .uinfo = { | 89 | .uinfo = { |
| 90 | .aead = { | 90 | .aead = { |
| 91 | .geniv = "seqniv", | 91 | .geniv = "seqiv", |
| 92 | .icv_truncbits = 64, | 92 | .icv_truncbits = 64, |
| 93 | } | 93 | } |
| 94 | }, | 94 | }, |
| @@ -107,7 +107,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
| 107 | 107 | ||
| 108 | .uinfo = { | 108 | .uinfo = { |
| 109 | .aead = { | 109 | .aead = { |
| 110 | .geniv = "seqniv", | 110 | .geniv = "seqiv", |
| 111 | .icv_truncbits = 96, | 111 | .icv_truncbits = 96, |
| 112 | } | 112 | } |
| 113 | }, | 113 | }, |
| @@ -126,7 +126,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
| 126 | 126 | ||
| 127 | .uinfo = { | 127 | .uinfo = { |
| 128 | .aead = { | 128 | .aead = { |
| 129 | .geniv = "seqniv", | 129 | .geniv = "seqiv", |
| 130 | .icv_truncbits = 128, | 130 | .icv_truncbits = 128, |
| 131 | } | 131 | } |
| 132 | }, | 132 | }, |
| @@ -164,7 +164,7 @@ static struct xfrm_algo_desc aead_list[] = { | |||
| 164 | 164 | ||
| 165 | .uinfo = { | 165 | .uinfo = { |
| 166 | .aead = { | 166 | .aead = { |
| 167 | .geniv = "seqniv", | 167 | .geniv = "seqiv", |
| 168 | .icv_truncbits = 128, | 168 | .icv_truncbits = 128, |
| 169 | } | 169 | } |
| 170 | }, | 170 | }, |
