aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 18:53:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 18:53:46 -0400
commit5a0387a8a8efb90ae7fea1e2e5c62de3efa74691 (patch)
tree9e5bbbafe7fea01c843d86c7c3d40f29f962c474
parent204f144c9fcac355843412b6ba1150086488a208 (diff)
parent929562b144783b9212625305eadcbbd800809643 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.12: API: - Add batch registration for acomp/scomp - Change acomp testing to non-unique compressed result - Extend algorithm name limit to 128 bytes - Require setkey before accept(2) in algif_aead Algorithms: - Add support for deflate rfc1950 (zlib) Drivers: - Add accelerated crct10dif for powerpc - Add crc32 in stm32 - Add sha384/sha512 in ccp - Add 3des/gcm(aes) for v5 devices in ccp - Add Queue Interface (QI) backend support in caam - Add new Exynos RNG driver - Add ThunderX ZIP driver - Add driver for hardware random generator on MT7623 SoC" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (101 commits) crypto: stm32 - Fix OF module alias information crypto: algif_aead - Require setkey before accept(2) crypto: scomp - add support for deflate rfc1950 (zlib) crypto: scomp - allow registration of multiple scomps crypto: ccp - Change ISR handler method for a v5 CCP crypto: ccp - Change ISR handler method for a v3 CCP crypto: crypto4xx - rename ce_ring_contol to ce_ring_control crypto: testmgr - Allow ecb(cipher_null) in FIPS mode Revert "crypto: arm64/sha - Add constant operand modifier to ASM_EXPORT" crypto: ccp - Disable interrupts early on unload crypto: ccp - Use only the relevant interrupt bits hwrng: mtk - Add driver for hardware random generator on MT7623 SoC dt-bindings: hwrng: Add Mediatek hardware random generator bindings crypto: crct10dif-vpmsum - Fix missing preempt_disable() crypto: testmgr - replace compression known answer test crypto: acomp - allow registration of multiple acomps hwrng: n2 - Use devm_kcalloc() in n2rng_probe() crypto: chcr - Fix error handling related to 'chcr_alloc_shash' padata: get_next is never NULL crypto: exynos - Add new Exynos RNG driver ...
-rw-r--r--Documentation/crypto/api-samples.rst6
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-crc.txt16
-rw-r--r--Documentation/devicetree/bindings/rng/amlogic,meson-rng.txt11
-rw-r--r--Documentation/devicetree/bindings/rng/mtk-rng.txt18
-rw-r--r--MAINTAINERS18
-rw-r--r--arch/arm/boot/dts/stm32746g-eval.dts4
-rw-r--r--arch/arm/boot/dts/stm32f746.dtsi7
-rw-r--r--arch/arm/configs/stm32_defconfig2
-rw-r--r--arch/arm/crypto/Kconfig2
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c60
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi5
-rw-r--r--arch/metag/kernel/stacktrace.c2
-rw-r--r--arch/powerpc/crypto/Makefile3
-rw-r--r--arch/powerpc/crypto/crc-vpmsum_test.c137
-rw-r--r--arch/powerpc/crypto/crc32-vpmsum_core.S755
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_asm.S715
-rw-r--r--arch/powerpc/crypto/crct10dif-vpmsum_asm.S850
-rw-r--r--arch/powerpc/crypto/crct10dif-vpmsum_glue.c128
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S7
-rw-r--r--arch/x86/crypto/camellia_glue.c4
-rw-r--r--arch/x86/crypto/glue_helper.c3
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c4
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c4
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h10
-rw-r--r--crypto/Kconfig18
-rw-r--r--crypto/acompress.c29
-rw-r--r--crypto/af_alg.c4
-rw-r--r--crypto/algapi.c4
-rw-r--r--crypto/algif_aead.c157
-rw-r--r--crypto/cbc.c15
-rw-r--r--crypto/crypto_user.c18
-rw-r--r--crypto/ctr.c23
-rw-r--r--crypto/deflate.c61
-rw-r--r--crypto/dh.c3
-rw-r--r--crypto/drbg.c5
-rw-r--r--crypto/ecdh.c3
-rw-r--r--crypto/gf128mul.c111
-rw-r--r--crypto/lz4.c2
-rw-r--r--crypto/lz4hc.c2
-rw-r--r--crypto/md5.c95
-rw-r--r--crypto/scompress.c29
-rw-r--r--crypto/testmgr.c112
-rw-r--r--crypto/testmgr.h587
-rw-r--r--crypto/xts.c38
-rw-r--r--drivers/char/hw_random/Kconfig28
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/exynos-rng.c231
-rw-r--r--drivers/char/hw_random/meson-rng.c22
-rw-r--r--drivers/char/hw_random/mtk-rng.c168
-rw-r--r--drivers/char/hw_random/n2-drv.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c22
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c157
-rw-r--r--drivers/clk/meson/gxbb.h2
-rw-r--r--drivers/crypto/Kconfig24
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h2
-rw-r--r--drivers/crypto/bcm/util.c2
-rw-r--r--drivers/crypto/caam/Kconfig20
-rw-r--r--drivers/crypto/caam/Makefile5
-rw-r--r--drivers/crypto/caam/caamalg.c9
-rw-r--r--drivers/crypto/caam/caamalg_desc.c77
-rw-r--r--drivers/crypto/caam/caamalg_desc.h15
-rw-r--r--drivers/crypto/caam/caamalg_qi.c2387
-rw-r--r--drivers/crypto/caam/ctrl.c59
-rw-r--r--drivers/crypto/caam/desc_constr.h5
-rw-r--r--drivers/crypto/caam/intern.h24
-rw-r--r--drivers/crypto/caam/qi.c805
-rw-r--r--drivers/crypto/caam/qi.h201
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h108
-rw-r--r--drivers/crypto/cavium/Makefile4
-rw-r--r--drivers/crypto/cavium/zip/Makefile11
-rw-r--r--drivers/crypto/cavium/zip/common.h202
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c313
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.h79
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.c200
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_device.c202
-rw-r--r--drivers/crypto/cavium/zip/zip_device.h108
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.c223
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c729
-rw-r--r--drivers/crypto/cavium/zip/zip_main.h121
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.c114
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.h78
-rw-r--r--drivers/crypto/cavium/zip/zip_regs.h1347
-rw-r--r--drivers/crypto/ccp/Makefile2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c252
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c254
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c22
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c22
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h44
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c121
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c168
-rw-r--r--drivers/crypto/ccp/ccp-dev.h30
-rw-r--r--drivers/crypto/ccp/ccp-ops.c522
-rw-r--r--drivers/crypto/ccp/ccp-pci.c2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c304
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h4
-rw-r--r--drivers/crypto/chelsio/chcr_core.h2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h10
-rw-r--r--drivers/crypto/exynos-rng.c389
-rw-r--r--drivers/crypto/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c421
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c15
-rw-r--r--drivers/crypto/mediatek/mtk-platform.h56
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c309
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c2
-rw-r--r--drivers/crypto/s5p-sss.c35
-rw-r--r--drivers/crypto/stm32/Kconfig7
-rw-r--r--drivers/crypto/stm32/Makefile2
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c324
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h3
-rw-r--r--drivers/soc/fsl/qbman/qman.c4
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c6
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h97
-rw-r--r--include/crypto/gf128mul.h87
-rw-r--r--include/crypto/internal/acompress.h3
-rw-r--r--include/crypto/internal/scompress.h3
-rw-r--r--include/crypto/kpp.h6
-rw-r--r--include/crypto/xts.h2
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h3
-rw-r--r--include/linux/ccp.h68
-rw-r--r--include/linux/crypto.h2
-rw-r--r--include/linux/cryptohash.h5
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/soc/fsl/qman.h109
-rw-r--r--include/uapi/linux/cryptouser.h10
-rw-r--r--include/video/udlfb.h2
-rw-r--r--kernel/padata.c15
-rw-r--r--lib/Makefile2
-rw-r--r--lib/md5.c95
-rw-r--r--net/xfrm/xfrm_user.c6
137 files changed, 13710 insertions, 2480 deletions
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
index 0a10819f6107..d021fd96a76d 100644
--- a/Documentation/crypto/api-samples.rst
+++ b/Documentation/crypto/api-samples.rst
@@ -155,9 +155,9 @@ Code Example For Use of Operational State Memory With SHASH
155 char ctx[]; 155 char ctx[];
156 }; 156 };
157 157
158 static struct sdescinit_sdesc(struct crypto_shash *alg) 158 static struct sdesc init_sdesc(struct crypto_shash *alg)
159 { 159 {
160 struct sdescsdesc; 160 struct sdesc sdesc;
161 int size; 161 int size;
162 162
163 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); 163 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
@@ -172,7 +172,7 @@ Code Example For Use of Operational State Memory With SHASH
172 static int calc_hash(struct crypto_shashalg, 172 static int calc_hash(struct crypto_shashalg,
173 const unsigned chardata, unsigned int datalen, 173 const unsigned chardata, unsigned int datalen,
174 unsigned chardigest) { 174 unsigned chardigest) {
175 struct sdescsdesc; 175 struct sdesc sdesc;
176 int ret; 176 int ret;
177 177
178 sdesc = init_sdesc(alg); 178 sdesc = init_sdesc(alg);
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-crc.txt b/Documentation/devicetree/bindings/crypto/st,stm32-crc.txt
new file mode 100644
index 000000000000..3ba92a5e9b36
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-crc.txt
@@ -0,0 +1,16 @@
1* STMicroelectronics STM32 CRC
2
3Required properties:
4- compatible: Should be "st,stm32f7-crc".
5- reg: The address and length of the peripheral registers space
6- clocks: The input clock of the CRC instance
7
8Optional properties: none
9
10Example:
11
12crc: crc@40023000 {
13 compatible = "st,stm32f7-crc";
14 reg = <0x40023000 0x400>;
15 clocks = <&rcc 0 12>;
16};
diff --git a/Documentation/devicetree/bindings/rng/amlogic,meson-rng.txt b/Documentation/devicetree/bindings/rng/amlogic,meson-rng.txt
index 202f2d09a23f..4d403645ac9b 100644
--- a/Documentation/devicetree/bindings/rng/amlogic,meson-rng.txt
+++ b/Documentation/devicetree/bindings/rng/amlogic,meson-rng.txt
@@ -6,9 +6,16 @@ Required properties:
6- compatible : should be "amlogic,meson-rng" 6- compatible : should be "amlogic,meson-rng"
7- reg : Specifies base physical address and size of the registers. 7- reg : Specifies base physical address and size of the registers.
8 8
9Optional properties:
10
11- clocks : phandle to the following named clocks
12- clock-names: Name of core clock, must be "core"
13
9Example: 14Example:
10 15
11rng { 16rng {
12 compatible = "amlogic,meson-rng"; 17 compatible = "amlogic,meson-rng";
13 reg = <0x0 0xc8834000 0x0 0x4>; 18 reg = <0x0 0xc8834000 0x0 0x4>;
19 clocks = <&clkc CLKID_RNG0>;
20 clock-names = "core";
14}; 21};
diff --git a/Documentation/devicetree/bindings/rng/mtk-rng.txt b/Documentation/devicetree/bindings/rng/mtk-rng.txt
new file mode 100644
index 000000000000..a6d62a2abd39
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/mtk-rng.txt
@@ -0,0 +1,18 @@
1Device-Tree bindings for Mediatek random number generator
2found in Mediatek SoC family
3
4Required properties:
5- compatible : Should be "mediatek,mt7623-rng"
6- clocks : list of clock specifiers, corresponding to
7 entries in clock-names property;
8- clock-names : Should contain "rng" entries;
9- reg : Specifies base physical address and size of the registers
10
11Example:
12
13rng: rng@1020f000 {
14 compatible = "mediatek,mt7623-rng";
15 reg = <0 0x1020f000 0 0x1000>;
16 clocks = <&infracfg CLK_INFRA_TRNG>;
17 clock-names = "rng";
18};
diff --git a/MAINTAINERS b/MAINTAINERS
index 24f894eb2a7f..756da3f484d1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6242,7 +6242,7 @@ F: drivers/crypto/nx/nx_csbcpb.h
6242F: drivers/crypto/nx/nx_debugfs.h 6242F: drivers/crypto/nx/nx_debugfs.h
6243 6243
6244IBM Power 842 compression accelerator 6244IBM Power 842 compression accelerator
6245M: Dan Streetman <ddstreet@ieee.org> 6245M: Haren Myneni <haren@us.ibm.com>
6246S: Supported 6246S: Supported
6247F: drivers/crypto/nx/Makefile 6247F: drivers/crypto/nx/Makefile
6248F: drivers/crypto/nx/Kconfig 6248F: drivers/crypto/nx/Kconfig
@@ -10954,6 +10954,14 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
10954S: Supported 10954S: Supported
10955F: sound/soc/samsung/ 10955F: sound/soc/samsung/
10956 10956
10957SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
10958M: Krzysztof Kozlowski <krzk@kernel.org>
10959L: linux-crypto@vger.kernel.org
10960L: linux-samsung-soc@vger.kernel.org
10961S: Maintained
10962F: drivers/crypto/exynos-rng.c
10963F: Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt
10964
10957SAMSUNG FRAMEBUFFER DRIVER 10965SAMSUNG FRAMEBUFFER DRIVER
10958M: Jingoo Han <jingoohan1@gmail.com> 10966M: Jingoo Han <jingoohan1@gmail.com>
10959L: linux-fbdev@vger.kernel.org 10967L: linux-fbdev@vger.kernel.org
@@ -10978,6 +10986,14 @@ F: Documentation/devicetree/bindings/regulator/samsung,s2m*.txt
10978F: Documentation/devicetree/bindings/regulator/samsung,s5m*.txt 10986F: Documentation/devicetree/bindings/regulator/samsung,s5m*.txt
10979F: Documentation/devicetree/bindings/clock/samsung,s2mps11.txt 10987F: Documentation/devicetree/bindings/clock/samsung,s2mps11.txt
10980 10988
10989SAMSUNG S5P Security SubSystem (SSS) DRIVER
10990M: Krzysztof Kozlowski <krzk@kernel.org>
10991M: Vladimir Zapolskiy <vz@mleia.com>
10992L: linux-crypto@vger.kernel.org
10993L: linux-samsung-soc@vger.kernel.org
10994S: Maintained
10995F: drivers/crypto/s5p-sss.c
10996
10981SAMSUNG S5P/EXYNOS4 SOC SERIES CAMERA SUBSYSTEM DRIVERS 10997SAMSUNG S5P/EXYNOS4 SOC SERIES CAMERA SUBSYSTEM DRIVERS
10982M: Kyungmin Park <kyungmin.park@samsung.com> 10998M: Kyungmin Park <kyungmin.park@samsung.com>
10983M: Sylwester Nawrocki <s.nawrocki@samsung.com> 10999M: Sylwester Nawrocki <s.nawrocki@samsung.com>
diff --git a/arch/arm/boot/dts/stm32746g-eval.dts b/arch/arm/boot/dts/stm32746g-eval.dts
index aa03fac1ec55..0dc18a0f0940 100644
--- a/arch/arm/boot/dts/stm32746g-eval.dts
+++ b/arch/arm/boot/dts/stm32746g-eval.dts
@@ -89,6 +89,10 @@
89 clock-frequency = <25000000>; 89 clock-frequency = <25000000>;
90}; 90};
91 91
92&crc {
93 status = "okay";
94};
95
92&usart1 { 96&usart1 {
93 pinctrl-0 = <&usart1_pins_a>; 97 pinctrl-0 = <&usart1_pins_a>;
94 pinctrl-names = "default"; 98 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi
index f321ffe87144..755fb923c07b 100644
--- a/arch/arm/boot/dts/stm32f746.dtsi
+++ b/arch/arm/boot/dts/stm32f746.dtsi
@@ -289,6 +289,13 @@
289 }; 289 };
290 }; 290 };
291 291
292 crc: crc@40023000 {
293 compatible = "st,stm32f7-crc";
294 reg = <0x40023000 0x400>;
295 clocks = <&rcc 0 12>;
296 status = "disabled";
297 };
298
292 rcc: rcc@40023800 { 299 rcc: rcc@40023800 {
293 #clock-cells = <2>; 300 #clock-cells = <2>;
294 compatible = "st,stm32f42xx-rcc", "st,stm32-rcc"; 301 compatible = "st,stm32f42xx-rcc", "st,stm32-rcc";
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index a9d8e3c9b487..03437f8f9ad1 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -75,5 +75,7 @@ CONFIG_MAGIC_SYSRQ=y
75# CONFIG_SCHED_DEBUG is not set 75# CONFIG_SCHED_DEBUG is not set
76# CONFIG_DEBUG_BUGVERBOSE is not set 76# CONFIG_DEBUG_BUGVERBOSE is not set
77# CONFIG_FTRACE is not set 77# CONFIG_FTRACE is not set
78CONFIG_CRYPTO=y
79CONFIG_CRYPTO_DEV_STM32=y
78CONFIG_CRC_ITU_T=y 80CONFIG_CRC_ITU_T=y
79CONFIG_CRC7=y 81CONFIG_CRC7=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index a8fce93137fb..b9adedcc5b2e 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -73,7 +73,7 @@ config CRYPTO_AES_ARM_BS
73 depends on KERNEL_MODE_NEON 73 depends on KERNEL_MODE_NEON
74 select CRYPTO_BLKCIPHER 74 select CRYPTO_BLKCIPHER
75 select CRYPTO_SIMD 75 select CRYPTO_SIMD
76 select CRYPTO_AES_ARM 76 select CRYPTO_AES
77 help 77 help
78 Use a faster and more secure NEON based implementation of AES in CBC, 78 Use a faster and more secure NEON based implementation of AES in CBC,
79 CTR and XTS modes 79 CTR and XTS modes
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 2920b96dbd36..c76377961444 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -42,9 +42,6 @@ asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
42asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], 42asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
43 int rounds, int blocks, u8 iv[]); 43 int rounds, int blocks, u8 iv[]);
44 44
45asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds, const u8 in[],
46 u8 out[]);
47
48struct aesbs_ctx { 45struct aesbs_ctx {
49 int rounds; 46 int rounds;
50 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE); 47 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE);
@@ -52,12 +49,12 @@ struct aesbs_ctx {
52 49
53struct aesbs_cbc_ctx { 50struct aesbs_cbc_ctx {
54 struct aesbs_ctx key; 51 struct aesbs_ctx key;
55 u32 enc[AES_MAX_KEYLENGTH_U32]; 52 struct crypto_cipher *enc_tfm;
56}; 53};
57 54
58struct aesbs_xts_ctx { 55struct aesbs_xts_ctx {
59 struct aesbs_ctx key; 56 struct aesbs_ctx key;
60 u32 twkey[AES_MAX_KEYLENGTH_U32]; 57 struct crypto_cipher *tweak_tfm;
61}; 58};
62 59
63static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 60static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
@@ -132,20 +129,18 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
132 129
133 ctx->key.rounds = 6 + key_len / 4; 130 ctx->key.rounds = 6 + key_len / 4;
134 131
135 memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc));
136
137 kernel_neon_begin(); 132 kernel_neon_begin();
138 aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); 133 aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
139 kernel_neon_end(); 134 kernel_neon_end();
140 135
141 return 0; 136 return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len);
142} 137}
143 138
144static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) 139static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
145{ 140{
146 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 141 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
147 142
148 __aes_arm_encrypt(ctx->enc, ctx->key.rounds, src, dst); 143 crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src);
149} 144}
150 145
151static int cbc_encrypt(struct skcipher_request *req) 146static int cbc_encrypt(struct skcipher_request *req)
@@ -181,6 +176,23 @@ static int cbc_decrypt(struct skcipher_request *req)
181 return err; 176 return err;
182} 177}
183 178
179static int cbc_init(struct crypto_tfm *tfm)
180{
181 struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
182
183 ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0);
184 if (IS_ERR(ctx->enc_tfm))
185 return PTR_ERR(ctx->enc_tfm);
186 return 0;
187}
188
189static void cbc_exit(struct crypto_tfm *tfm)
190{
191 struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
192
193 crypto_free_cipher(ctx->enc_tfm);
194}
195
184static int ctr_encrypt(struct skcipher_request *req) 196static int ctr_encrypt(struct skcipher_request *req)
185{ 197{
186 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 198 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -228,7 +240,6 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
228 unsigned int key_len) 240 unsigned int key_len)
229{ 241{
230 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 242 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
231 struct crypto_aes_ctx rk;
232 int err; 243 int err;
233 244
234 err = xts_verify_key(tfm, in_key, key_len); 245 err = xts_verify_key(tfm, in_key, key_len);
@@ -236,15 +247,30 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
236 return err; 247 return err;
237 248
238 key_len /= 2; 249 key_len /= 2;
239 err = crypto_aes_expand_key(&rk, in_key + key_len, key_len); 250 err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len);
240 if (err) 251 if (err)
241 return err; 252 return err;
242 253
243 memcpy(ctx->twkey, rk.key_enc, sizeof(ctx->twkey));
244
245 return aesbs_setkey(tfm, in_key, key_len); 254 return aesbs_setkey(tfm, in_key, key_len);
246} 255}
247 256
257static int xts_init(struct crypto_tfm *tfm)
258{
259 struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
260
261 ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
262 if (IS_ERR(ctx->tweak_tfm))
263 return PTR_ERR(ctx->tweak_tfm);
264 return 0;
265}
266
267static void xts_exit(struct crypto_tfm *tfm)
268{
269 struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
270
271 crypto_free_cipher(ctx->tweak_tfm);
272}
273
248static int __xts_crypt(struct skcipher_request *req, 274static int __xts_crypt(struct skcipher_request *req,
249 void (*fn)(u8 out[], u8 const in[], u8 const rk[], 275 void (*fn)(u8 out[], u8 const in[], u8 const rk[],
250 int rounds, int blocks, u8 iv[])) 276 int rounds, int blocks, u8 iv[]))
@@ -256,7 +282,7 @@ static int __xts_crypt(struct skcipher_request *req,
256 282
257 err = skcipher_walk_virt(&walk, req, true); 283 err = skcipher_walk_virt(&walk, req, true);
258 284
259 __aes_arm_encrypt(ctx->twkey, ctx->key.rounds, walk.iv, walk.iv); 285 crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
260 286
261 kernel_neon_begin(); 287 kernel_neon_begin();
262 while (walk.nbytes >= AES_BLOCK_SIZE) { 288 while (walk.nbytes >= AES_BLOCK_SIZE) {
@@ -309,6 +335,8 @@ static struct skcipher_alg aes_algs[] = { {
309 .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), 335 .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
310 .base.cra_module = THIS_MODULE, 336 .base.cra_module = THIS_MODULE,
311 .base.cra_flags = CRYPTO_ALG_INTERNAL, 337 .base.cra_flags = CRYPTO_ALG_INTERNAL,
338 .base.cra_init = cbc_init,
339 .base.cra_exit = cbc_exit,
312 340
313 .min_keysize = AES_MIN_KEY_SIZE, 341 .min_keysize = AES_MIN_KEY_SIZE,
314 .max_keysize = AES_MAX_KEY_SIZE, 342 .max_keysize = AES_MAX_KEY_SIZE,
@@ -342,6 +370,8 @@ static struct skcipher_alg aes_algs[] = { {
342 .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx), 370 .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
343 .base.cra_module = THIS_MODULE, 371 .base.cra_module = THIS_MODULE,
344 .base.cra_flags = CRYPTO_ALG_INTERNAL, 372 .base.cra_flags = CRYPTO_ALG_INTERNAL,
373 .base.cra_init = xts_init,
374 .base.cra_exit = xts_exit,
345 375
346 .min_keysize = 2 * AES_MIN_KEY_SIZE, 376 .min_keysize = 2 * AES_MIN_KEY_SIZE,
347 .max_keysize = 2 * AES_MAX_KEY_SIZE, 377 .max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -402,5 +432,5 @@ unregister_simds:
402 return err; 432 return err;
403} 433}
404 434
405module_init(aes_init); 435late_initcall(aes_init);
406module_exit(aes_exit); 436module_exit(aes_exit);
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 5d995f7724af..620495a43363 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -380,7 +380,7 @@
380 #size-cells = <2>; 380 #size-cells = <2>;
381 ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>; 381 ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>;
382 382
383 rng { 383 hwrng: rng {
384 compatible = "amlogic,meson-rng"; 384 compatible = "amlogic,meson-rng";
385 reg = <0x0 0x0 0x0 0x4>; 385 reg = <0x0 0x0 0x0 0x4>;
386 }; 386 };
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 04b3324bc132..a375cb21cc8b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -524,3 +524,8 @@
524&vpu { 524&vpu {
525 compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu"; 525 compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu";
526}; 526};
527
528&hwrng {
529 clocks = <&clkc CLKID_RNG0>;
530 clock-names = "core";
531};
diff --git a/arch/metag/kernel/stacktrace.c b/arch/metag/kernel/stacktrace.c
index 91ffc4b75c33..09d67b7f51ca 100644
--- a/arch/metag/kernel/stacktrace.c
+++ b/arch/metag/kernel/stacktrace.c
@@ -31,8 +31,6 @@ static void tbi_boing_init(void)
31} 31}
32#endif 32#endif
33 33
34#define ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
35
36/* 34/*
37 * Unwind the current stack frame and store the new register values in the 35 * Unwind the current stack frame and store the new register values in the
38 * structure passed as argument. Unwinding is equivalent to a function return, 36 * structure passed as argument. Unwinding is equivalent to a function return,
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 87f40454bad3..67eca3af9fc7 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
10obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o 10obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
11obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o 11obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
12obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o 12obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
13obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o
14obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o
13 15
14aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o 16aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
15md5-ppc-y := md5-asm.o md5-glue.o 17md5-ppc-y := md5-asm.o md5-glue.o
@@ -17,3 +19,4 @@ sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
17sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o 19sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
18sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o 20sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
19crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o 21crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
22crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c
new file mode 100644
index 000000000000..0153a9c6f4af
--- /dev/null
+++ b/arch/powerpc/crypto/crc-vpmsum_test.c
@@ -0,0 +1,137 @@
1/*
2 * CRC vpmsum tester
3 * Copyright 2017 Daniel Axtens, IBM Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/crc-t10dif.h>
11#include <linux/crc32.h>
12#include <crypto/internal/hash.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/string.h>
16#include <linux/kernel.h>
17#include <linux/cpufeature.h>
18#include <asm/switch_to.h>
19
20static unsigned long iterations = 10000;
21
22#define MAX_CRC_LENGTH 65535
23
24
25static int __init crc_test_init(void)
26{
27 u16 crc16 = 0, verify16 = 0;
28 u32 crc32 = 0, verify32 = 0;
29 __le32 verify32le = 0;
30 unsigned char *data;
31 unsigned long i;
32 int ret;
33
34 struct crypto_shash *crct10dif_tfm;
35 struct crypto_shash *crc32c_tfm;
36
37 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
38 return -ENODEV;
39
40 data = kmalloc(MAX_CRC_LENGTH, GFP_KERNEL);
41 if (!data)
42 return -ENOMEM;
43
44 crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
45
46 if (IS_ERR(crct10dif_tfm)) {
47 pr_err("Error allocating crc-t10dif\n");
48 goto free_buf;
49 }
50
51 crc32c_tfm = crypto_alloc_shash("crc32c", 0, 0);
52
53 if (IS_ERR(crc32c_tfm)) {
54 pr_err("Error allocating crc32c\n");
55 goto free_16;
56 }
57
58 do {
59 SHASH_DESC_ON_STACK(crct10dif_shash, crct10dif_tfm);
60 SHASH_DESC_ON_STACK(crc32c_shash, crc32c_tfm);
61
62 crct10dif_shash->tfm = crct10dif_tfm;
63 ret = crypto_shash_init(crct10dif_shash);
64
65 if (ret) {
66 pr_err("Error initing crc-t10dif\n");
67 goto free_32;
68 }
69
70
71 crc32c_shash->tfm = crc32c_tfm;
72 ret = crypto_shash_init(crc32c_shash);
73
74 if (ret) {
75 pr_err("Error initing crc32c\n");
76 goto free_32;
77 }
78
79 pr_info("crc-vpmsum_test begins, %lu iterations\n", iterations);
80 for (i=0; i<iterations; i++) {
81 size_t len, offset;
82
83 get_random_bytes(data, MAX_CRC_LENGTH);
84 get_random_bytes(&len, sizeof(len));
85 get_random_bytes(&offset, sizeof(offset));
86
87 len %= MAX_CRC_LENGTH;
88 offset &= 15;
89 if (len <= offset)
90 continue;
91 len -= offset;
92
93 crypto_shash_update(crct10dif_shash, data+offset, len);
94 crypto_shash_final(crct10dif_shash, (u8 *)(&crc16));
95 verify16 = crc_t10dif_generic(verify16, data+offset, len);
96
97
98 if (crc16 != verify16) {
99 pr_err("FAILURE in CRC16: got 0x%04x expected 0x%04x (len %lu)\n",
100 crc16, verify16, len);
101 break;
102 }
103
104 crypto_shash_update(crc32c_shash, data+offset, len);
105 crypto_shash_final(crc32c_shash, (u8 *)(&crc32));
106 verify32 = le32_to_cpu(verify32le);
107 verify32le = ~cpu_to_le32(__crc32c_le(~verify32, data+offset, len));
108 if (crc32 != (u32)verify32le) {
109 pr_err("FAILURE in CRC32: got 0x%08x expected 0x%08x (len %lu)\n",
110 crc32, verify32, len);
111 break;
112 }
113 }
114 pr_info("crc-vpmsum_test done, completed %lu iterations\n", i);
115 } while (0);
116
117free_32:
118 crypto_free_shash(crc32c_tfm);
119
120free_16:
121 crypto_free_shash(crct10dif_tfm);
122
123free_buf:
124 kfree(data);
125
126 return 0;
127}
128
129static void __exit crc_test_exit(void) {}
130
131module_init(crc_test_init);
132module_exit(crc_test_exit);
133module_param(iterations, long, 0400);
134
135MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>");
136MODULE_DESCRIPTION("Vector polynomial multiply-sum CRC tester");
137MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/crypto/crc32-vpmsum_core.S b/arch/powerpc/crypto/crc32-vpmsum_core.S
new file mode 100644
index 000000000000..aadb59c96a27
--- /dev/null
+++ b/arch/powerpc/crypto/crc32-vpmsum_core.S
@@ -0,0 +1,755 @@
1/*
2 * Core of the accelerated CRC algorithm.
3 * In your file, define the constants and CRC_FUNCTION_NAME
4 * Then include this file.
5 *
6 * Calculate the checksum of data that is 16 byte aligned and a multiple of
7 * 16 bytes.
8 *
9 * The first step is to reduce it to 1024 bits. We do this in 8 parallel
10 * chunks in order to mask the latency of the vpmsum instructions. If we
11 * have more than 32 kB of data to checksum we repeat this step multiple
12 * times, passing in the previous 1024 bits.
13 *
14 * The next step is to reduce the 1024 bits to 64 bits. This step adds
15 * 32 bits of 0s to the end - this matches what a CRC does. We just
16 * calculate constants that land the data in this 32 bits.
17 *
18 * We then use fixed point Barrett reduction to compute a mod n over GF(2)
19 * for n = CRC using POWER8 instructions. We use x = 32.
20 *
21 * http://en.wikipedia.org/wiki/Barrett_reduction
22 *
23 * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
24 *
25 * This program is free software; you can redistribute it and/or
26 * modify it under the terms of the GNU General Public License
27 * as published by the Free Software Foundation; either version
28 * 2 of the License, or (at your option) any later version.
29*/
30
31#include <asm/ppc_asm.h>
32#include <asm/ppc-opcode.h>
33
34#define MAX_SIZE 32768
35
36 .text
37
38#if defined(__BIG_ENDIAN__) && defined(REFLECT)
39#define BYTESWAP_DATA
40#elif defined(__LITTLE_ENDIAN__) && !defined(REFLECT)
41#define BYTESWAP_DATA
42#else
43#undef BYTESWAP_DATA
44#endif
45
46#define off16 r25
47#define off32 r26
48#define off48 r27
49#define off64 r28
50#define off80 r29
51#define off96 r30
52#define off112 r31
53
54#define const1 v24
55#define const2 v25
56
57#define byteswap v26
58#define mask_32bit v27
59#define mask_64bit v28
60#define zeroes v29
61
62#ifdef BYTESWAP_DATA
63#define VPERM(A, B, C, D) vperm A, B, C, D
64#else
65#define VPERM(A, B, C, D)
66#endif
67
68/* unsigned int CRC_FUNCTION_NAME(unsigned int crc, void *p, unsigned long len) */
69FUNC_START(CRC_FUNCTION_NAME)
70 std r31,-8(r1)
71 std r30,-16(r1)
72 std r29,-24(r1)
73 std r28,-32(r1)
74 std r27,-40(r1)
75 std r26,-48(r1)
76 std r25,-56(r1)
77
78 li off16,16
79 li off32,32
80 li off48,48
81 li off64,64
82 li off80,80
83 li off96,96
84 li off112,112
85 li r0,0
86
87 /* Enough room for saving 10 non volatile VMX registers */
88 subi r6,r1,56+10*16
89 subi r7,r1,56+2*16
90
91 stvx v20,0,r6
92 stvx v21,off16,r6
93 stvx v22,off32,r6
94 stvx v23,off48,r6
95 stvx v24,off64,r6
96 stvx v25,off80,r6
97 stvx v26,off96,r6
98 stvx v27,off112,r6
99 stvx v28,0,r7
100 stvx v29,off16,r7
101
102 mr r10,r3
103
104 vxor zeroes,zeroes,zeroes
105 vspltisw v0,-1
106
107 vsldoi mask_32bit,zeroes,v0,4
108 vsldoi mask_64bit,zeroes,v0,8
109
110 /* Get the initial value into v8 */
111 vxor v8,v8,v8
112 MTVRD(v8, R3)
113#ifdef REFLECT
114 vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */
115#else
116 vsldoi v8,v8,zeroes,4 /* shift into top 32 bits */
117#endif
118
119#ifdef BYTESWAP_DATA
120 addis r3,r2,.byteswap_constant@toc@ha
121 addi r3,r3,.byteswap_constant@toc@l
122
123 lvx byteswap,0,r3
124 addi r3,r3,16
125#endif
126
127 cmpdi r5,256
128 blt .Lshort
129
130 rldicr r6,r5,0,56
131
132 /* Checksum in blocks of MAX_SIZE */
1331: lis r7,MAX_SIZE@h
134 ori r7,r7,MAX_SIZE@l
135 mr r9,r7
136 cmpd r6,r7
137 bgt 2f
138 mr r7,r6
1392: subf r6,r7,r6
140
141 /* our main loop does 128 bytes at a time */
142 srdi r7,r7,7
143
144 /*
145 * Work out the offset into the constants table to start at. Each
146 * constant is 16 bytes, and it is used against 128 bytes of input
147 * data - 128 / 16 = 8
148 */
149 sldi r8,r7,4
150 srdi r9,r9,3
151 subf r8,r8,r9
152
153 /* We reduce our final 128 bytes in a separate step */
154 addi r7,r7,-1
155 mtctr r7
156
157 addis r3,r2,.constants@toc@ha
158 addi r3,r3,.constants@toc@l
159
160 /* Find the start of our constants */
161 add r3,r3,r8
162
163 /* zero v0-v7 which will contain our checksums */
164 vxor v0,v0,v0
165 vxor v1,v1,v1
166 vxor v2,v2,v2
167 vxor v3,v3,v3
168 vxor v4,v4,v4
169 vxor v5,v5,v5
170 vxor v6,v6,v6
171 vxor v7,v7,v7
172
173 lvx const1,0,r3
174
175 /*
176 * If we are looping back to consume more data we use the values
177 * already in v16-v23.
178 */
179 cmpdi r0,1
180 beq 2f
181
182 /* First warm up pass */
183 lvx v16,0,r4
184 lvx v17,off16,r4
185 VPERM(v16,v16,v16,byteswap)
186 VPERM(v17,v17,v17,byteswap)
187 lvx v18,off32,r4
188 lvx v19,off48,r4
189 VPERM(v18,v18,v18,byteswap)
190 VPERM(v19,v19,v19,byteswap)
191 lvx v20,off64,r4
192 lvx v21,off80,r4
193 VPERM(v20,v20,v20,byteswap)
194 VPERM(v21,v21,v21,byteswap)
195 lvx v22,off96,r4
196 lvx v23,off112,r4
197 VPERM(v22,v22,v22,byteswap)
198 VPERM(v23,v23,v23,byteswap)
199 addi r4,r4,8*16
200
201 /* xor in initial value */
202 vxor v16,v16,v8
203
2042: bdz .Lfirst_warm_up_done
205
206 addi r3,r3,16
207 lvx const2,0,r3
208
209 /* Second warm up pass */
210 VPMSUMD(v8,v16,const1)
211 lvx v16,0,r4
212 VPERM(v16,v16,v16,byteswap)
213 ori r2,r2,0
214
215 VPMSUMD(v9,v17,const1)
216 lvx v17,off16,r4
217 VPERM(v17,v17,v17,byteswap)
218 ori r2,r2,0
219
220 VPMSUMD(v10,v18,const1)
221 lvx v18,off32,r4
222 VPERM(v18,v18,v18,byteswap)
223 ori r2,r2,0
224
225 VPMSUMD(v11,v19,const1)
226 lvx v19,off48,r4
227 VPERM(v19,v19,v19,byteswap)
228 ori r2,r2,0
229
230 VPMSUMD(v12,v20,const1)
231 lvx v20,off64,r4
232 VPERM(v20,v20,v20,byteswap)
233 ori r2,r2,0
234
235 VPMSUMD(v13,v21,const1)
236 lvx v21,off80,r4
237 VPERM(v21,v21,v21,byteswap)
238 ori r2,r2,0
239
240 VPMSUMD(v14,v22,const1)
241 lvx v22,off96,r4
242 VPERM(v22,v22,v22,byteswap)
243 ori r2,r2,0
244
245 VPMSUMD(v15,v23,const1)
246 lvx v23,off112,r4
247 VPERM(v23,v23,v23,byteswap)
248
249 addi r4,r4,8*16
250
251 bdz .Lfirst_cool_down
252
253 /*
254 * main loop. We modulo schedule it such that it takes three iterations
255 * to complete - first iteration load, second iteration vpmsum, third
256 * iteration xor.
257 */
258 .balign 16
2594: lvx const1,0,r3
260 addi r3,r3,16
261 ori r2,r2,0
262
263 vxor v0,v0,v8
264 VPMSUMD(v8,v16,const2)
265 lvx v16,0,r4
266 VPERM(v16,v16,v16,byteswap)
267 ori r2,r2,0
268
269 vxor v1,v1,v9
270 VPMSUMD(v9,v17,const2)
271 lvx v17,off16,r4
272 VPERM(v17,v17,v17,byteswap)
273 ori r2,r2,0
274
275 vxor v2,v2,v10
276 VPMSUMD(v10,v18,const2)
277 lvx v18,off32,r4
278 VPERM(v18,v18,v18,byteswap)
279 ori r2,r2,0
280
281 vxor v3,v3,v11
282 VPMSUMD(v11,v19,const2)
283 lvx v19,off48,r4
284 VPERM(v19,v19,v19,byteswap)
285 lvx const2,0,r3
286 ori r2,r2,0
287
288 vxor v4,v4,v12
289 VPMSUMD(v12,v20,const1)
290 lvx v20,off64,r4
291 VPERM(v20,v20,v20,byteswap)
292 ori r2,r2,0
293
294 vxor v5,v5,v13
295 VPMSUMD(v13,v21,const1)
296 lvx v21,off80,r4
297 VPERM(v21,v21,v21,byteswap)
298 ori r2,r2,0
299
300 vxor v6,v6,v14
301 VPMSUMD(v14,v22,const1)
302 lvx v22,off96,r4
303 VPERM(v22,v22,v22,byteswap)
304 ori r2,r2,0
305
306 vxor v7,v7,v15
307 VPMSUMD(v15,v23,const1)
308 lvx v23,off112,r4
309 VPERM(v23,v23,v23,byteswap)
310
311 addi r4,r4,8*16
312
313 bdnz 4b
314
315.Lfirst_cool_down:
316 /* First cool down pass */
317 lvx const1,0,r3
318 addi r3,r3,16
319
320 vxor v0,v0,v8
321 VPMSUMD(v8,v16,const1)
322 ori r2,r2,0
323
324 vxor v1,v1,v9
325 VPMSUMD(v9,v17,const1)
326 ori r2,r2,0
327
328 vxor v2,v2,v10
329 VPMSUMD(v10,v18,const1)
330 ori r2,r2,0
331
332 vxor v3,v3,v11
333 VPMSUMD(v11,v19,const1)
334 ori r2,r2,0
335
336 vxor v4,v4,v12
337 VPMSUMD(v12,v20,const1)
338 ori r2,r2,0
339
340 vxor v5,v5,v13
341 VPMSUMD(v13,v21,const1)
342 ori r2,r2,0
343
344 vxor v6,v6,v14
345 VPMSUMD(v14,v22,const1)
346 ori r2,r2,0
347
348 vxor v7,v7,v15
349 VPMSUMD(v15,v23,const1)
350 ori r2,r2,0
351
352.Lsecond_cool_down:
353 /* Second cool down pass */
354 vxor v0,v0,v8
355 vxor v1,v1,v9
356 vxor v2,v2,v10
357 vxor v3,v3,v11
358 vxor v4,v4,v12
359 vxor v5,v5,v13
360 vxor v6,v6,v14
361 vxor v7,v7,v15
362
363#ifdef REFLECT
364 /*
365 * vpmsumd produces a 96 bit result in the least significant bits
366 * of the register. Since we are bit reflected we have to shift it
367 * left 32 bits so it occupies the least significant bits in the
368 * bit reflected domain.
369 */
370 vsldoi v0,v0,zeroes,4
371 vsldoi v1,v1,zeroes,4
372 vsldoi v2,v2,zeroes,4
373 vsldoi v3,v3,zeroes,4
374 vsldoi v4,v4,zeroes,4
375 vsldoi v5,v5,zeroes,4
376 vsldoi v6,v6,zeroes,4
377 vsldoi v7,v7,zeroes,4
378#endif
379
380 /* xor with last 1024 bits */
381 lvx v8,0,r4
382 lvx v9,off16,r4
383 VPERM(v8,v8,v8,byteswap)
384 VPERM(v9,v9,v9,byteswap)
385 lvx v10,off32,r4
386 lvx v11,off48,r4
387 VPERM(v10,v10,v10,byteswap)
388 VPERM(v11,v11,v11,byteswap)
389 lvx v12,off64,r4
390 lvx v13,off80,r4
391 VPERM(v12,v12,v12,byteswap)
392 VPERM(v13,v13,v13,byteswap)
393 lvx v14,off96,r4
394 lvx v15,off112,r4
395 VPERM(v14,v14,v14,byteswap)
396 VPERM(v15,v15,v15,byteswap)
397
398 addi r4,r4,8*16
399
400 vxor v16,v0,v8
401 vxor v17,v1,v9
402 vxor v18,v2,v10
403 vxor v19,v3,v11
404 vxor v20,v4,v12
405 vxor v21,v5,v13
406 vxor v22,v6,v14
407 vxor v23,v7,v15
408
409 li r0,1
410 cmpdi r6,0
411 addi r6,r6,128
412 bne 1b
413
414 /* Work out how many bytes we have left */
415 andi. r5,r5,127
416
417 /* Calculate where in the constant table we need to start */
418 subfic r6,r5,128
419 add r3,r3,r6
420
421 /* How many 16 byte chunks are in the tail */
422 srdi r7,r5,4
423 mtctr r7
424
425 /*
426 * Reduce the previously calculated 1024 bits to 64 bits, shifting
427 * 32 bits to include the trailing 32 bits of zeros
428 */
429 lvx v0,0,r3
430 lvx v1,off16,r3
431 lvx v2,off32,r3
432 lvx v3,off48,r3
433 lvx v4,off64,r3
434 lvx v5,off80,r3
435 lvx v6,off96,r3
436 lvx v7,off112,r3
437 addi r3,r3,8*16
438
439 VPMSUMW(v0,v16,v0)
440 VPMSUMW(v1,v17,v1)
441 VPMSUMW(v2,v18,v2)
442 VPMSUMW(v3,v19,v3)
443 VPMSUMW(v4,v20,v4)
444 VPMSUMW(v5,v21,v5)
445 VPMSUMW(v6,v22,v6)
446 VPMSUMW(v7,v23,v7)
447
448 /* Now reduce the tail (0 - 112 bytes) */
449 cmpdi r7,0
450 beq 1f
451
452 lvx v16,0,r4
453 lvx v17,0,r3
454 VPERM(v16,v16,v16,byteswap)
455 VPMSUMW(v16,v16,v17)
456 vxor v0,v0,v16
457 bdz 1f
458
459 lvx v16,off16,r4
460 lvx v17,off16,r3
461 VPERM(v16,v16,v16,byteswap)
462 VPMSUMW(v16,v16,v17)
463 vxor v0,v0,v16
464 bdz 1f
465
466 lvx v16,off32,r4
467 lvx v17,off32,r3
468 VPERM(v16,v16,v16,byteswap)
469 VPMSUMW(v16,v16,v17)
470 vxor v0,v0,v16
471 bdz 1f
472
473 lvx v16,off48,r4
474 lvx v17,off48,r3
475 VPERM(v16,v16,v16,byteswap)
476 VPMSUMW(v16,v16,v17)
477 vxor v0,v0,v16
478 bdz 1f
479
480 lvx v16,off64,r4
481 lvx v17,off64,r3
482 VPERM(v16,v16,v16,byteswap)
483 VPMSUMW(v16,v16,v17)
484 vxor v0,v0,v16
485 bdz 1f
486
487 lvx v16,off80,r4
488 lvx v17,off80,r3
489 VPERM(v16,v16,v16,byteswap)
490 VPMSUMW(v16,v16,v17)
491 vxor v0,v0,v16
492 bdz 1f
493
494 lvx v16,off96,r4
495 lvx v17,off96,r3
496 VPERM(v16,v16,v16,byteswap)
497 VPMSUMW(v16,v16,v17)
498 vxor v0,v0,v16
499
500 /* Now xor all the parallel chunks together */
5011: vxor v0,v0,v1
502 vxor v2,v2,v3
503 vxor v4,v4,v5
504 vxor v6,v6,v7
505
506 vxor v0,v0,v2
507 vxor v4,v4,v6
508
509 vxor v0,v0,v4
510
511.Lbarrett_reduction:
512 /* Barrett constants */
513 addis r3,r2,.barrett_constants@toc@ha
514 addi r3,r3,.barrett_constants@toc@l
515
516 lvx const1,0,r3
517 lvx const2,off16,r3
518
519 vsldoi v1,v0,v0,8
520 vxor v0,v0,v1 /* xor two 64 bit results together */
521
522#ifdef REFLECT
523 /* shift left one bit */
524 vspltisb v1,1
525 vsl v0,v0,v1
526#endif
527
528 vand v0,v0,mask_64bit
529#ifndef REFLECT
530 /*
531 * Now for the Barrett reduction algorithm. The idea is to calculate q,
532 * the multiple of our polynomial that we need to subtract. By
533 * doing the computation 2x bits higher (ie 64 bits) and shifting the
534 * result back down 2x bits, we round down to the nearest multiple.
535 */
536 VPMSUMD(v1,v0,const1) /* ma */
537 vsldoi v1,zeroes,v1,8 /* q = floor(ma/(2^64)) */
538 VPMSUMD(v1,v1,const2) /* qn */
539 vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */
540
541 /*
542 * Get the result into r3. We need to shift it left 8 bytes:
543 * V0 [ 0 1 2 X ]
544 * V0 [ 0 X 2 3 ]
545 */
546 vsldoi v0,v0,zeroes,8 /* shift result into top 64 bits */
547#else
548 /*
549 * The reflected version of Barrett reduction. Instead of bit
550 * reflecting our data (which is expensive to do), we bit reflect our
551 * constants and our algorithm, which means the intermediate data in
552 * our vector registers goes from 0-63 instead of 63-0. We can reflect
553 * the algorithm because we don't carry in mod 2 arithmetic.
554 */
555 vand v1,v0,mask_32bit /* bottom 32 bits of a */
556 VPMSUMD(v1,v1,const1) /* ma */
557 vand v1,v1,mask_32bit /* bottom 32bits of ma */
558 VPMSUMD(v1,v1,const2) /* qn */
559 vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */
560
561 /*
562 * Since we are bit reflected, the result (ie the low 32 bits) is in
563 * the high 32 bits. We just need to shift it left 4 bytes
564 * V0 [ 0 1 X 3 ]
565 * V0 [ 0 X 2 3 ]
566 */
567 vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */
568#endif
569
570 /* Get it into r3 */
571 MFVRD(R3, v0)
572
573.Lout:
574 subi r6,r1,56+10*16
575 subi r7,r1,56+2*16
576
577 lvx v20,0,r6
578 lvx v21,off16,r6
579 lvx v22,off32,r6
580 lvx v23,off48,r6
581 lvx v24,off64,r6
582 lvx v25,off80,r6
583 lvx v26,off96,r6
584 lvx v27,off112,r6
585 lvx v28,0,r7
586 lvx v29,off16,r7
587
588 ld r31,-8(r1)
589 ld r30,-16(r1)
590 ld r29,-24(r1)
591 ld r28,-32(r1)
592 ld r27,-40(r1)
593 ld r26,-48(r1)
594 ld r25,-56(r1)
595
596 blr
597
598.Lfirst_warm_up_done:
599 lvx const1,0,r3
600 addi r3,r3,16
601
602 VPMSUMD(v8,v16,const1)
603 VPMSUMD(v9,v17,const1)
604 VPMSUMD(v10,v18,const1)
605 VPMSUMD(v11,v19,const1)
606 VPMSUMD(v12,v20,const1)
607 VPMSUMD(v13,v21,const1)
608 VPMSUMD(v14,v22,const1)
609 VPMSUMD(v15,v23,const1)
610
611 b .Lsecond_cool_down
612
613.Lshort:
614 cmpdi r5,0
615 beq .Lzero
616
617 addis r3,r2,.short_constants@toc@ha
618 addi r3,r3,.short_constants@toc@l
619
620 /* Calculate where in the constant table we need to start */
621 subfic r6,r5,256
622 add r3,r3,r6
623
624 /* How many 16 byte chunks? */
625 srdi r7,r5,4
626 mtctr r7
627
628 vxor v19,v19,v19
629 vxor v20,v20,v20
630
631 lvx v0,0,r4
632 lvx v16,0,r3
633 VPERM(v0,v0,v16,byteswap)
634 vxor v0,v0,v8 /* xor in initial value */
635 VPMSUMW(v0,v0,v16)
636 bdz .Lv0
637
638 lvx v1,off16,r4
639 lvx v17,off16,r3
640 VPERM(v1,v1,v17,byteswap)
641 VPMSUMW(v1,v1,v17)
642 bdz .Lv1
643
644 lvx v2,off32,r4
645 lvx v16,off32,r3
646 VPERM(v2,v2,v16,byteswap)
647 VPMSUMW(v2,v2,v16)
648 bdz .Lv2
649
650 lvx v3,off48,r4
651 lvx v17,off48,r3
652 VPERM(v3,v3,v17,byteswap)
653 VPMSUMW(v3,v3,v17)
654 bdz .Lv3
655
656 lvx v4,off64,r4
657 lvx v16,off64,r3
658 VPERM(v4,v4,v16,byteswap)
659 VPMSUMW(v4,v4,v16)
660 bdz .Lv4
661
662 lvx v5,off80,r4
663 lvx v17,off80,r3
664 VPERM(v5,v5,v17,byteswap)
665 VPMSUMW(v5,v5,v17)
666 bdz .Lv5
667
668 lvx v6,off96,r4
669 lvx v16,off96,r3
670 VPERM(v6,v6,v16,byteswap)
671 VPMSUMW(v6,v6,v16)
672 bdz .Lv6
673
674 lvx v7,off112,r4
675 lvx v17,off112,r3
676 VPERM(v7,v7,v17,byteswap)
677 VPMSUMW(v7,v7,v17)
678 bdz .Lv7
679
680 addi r3,r3,128
681 addi r4,r4,128
682
683 lvx v8,0,r4
684 lvx v16,0,r3
685 VPERM(v8,v8,v16,byteswap)
686 VPMSUMW(v8,v8,v16)
687 bdz .Lv8
688
689 lvx v9,off16,r4
690 lvx v17,off16,r3
691 VPERM(v9,v9,v17,byteswap)
692 VPMSUMW(v9,v9,v17)
693 bdz .Lv9
694
695 lvx v10,off32,r4
696 lvx v16,off32,r3
697 VPERM(v10,v10,v16,byteswap)
698 VPMSUMW(v10,v10,v16)
699 bdz .Lv10
700
701 lvx v11,off48,r4
702 lvx v17,off48,r3
703 VPERM(v11,v11,v17,byteswap)
704 VPMSUMW(v11,v11,v17)
705 bdz .Lv11
706
707 lvx v12,off64,r4
708 lvx v16,off64,r3
709 VPERM(v12,v12,v16,byteswap)
710 VPMSUMW(v12,v12,v16)
711 bdz .Lv12
712
713 lvx v13,off80,r4
714 lvx v17,off80,r3
715 VPERM(v13,v13,v17,byteswap)
716 VPMSUMW(v13,v13,v17)
717 bdz .Lv13
718
719 lvx v14,off96,r4
720 lvx v16,off96,r3
721 VPERM(v14,v14,v16,byteswap)
722 VPMSUMW(v14,v14,v16)
723 bdz .Lv14
724
725 lvx v15,off112,r4
726 lvx v17,off112,r3
727 VPERM(v15,v15,v17,byteswap)
728 VPMSUMW(v15,v15,v17)
729
730.Lv15: vxor v19,v19,v15
731.Lv14: vxor v20,v20,v14
732.Lv13: vxor v19,v19,v13
733.Lv12: vxor v20,v20,v12
734.Lv11: vxor v19,v19,v11
735.Lv10: vxor v20,v20,v10
736.Lv9: vxor v19,v19,v9
737.Lv8: vxor v20,v20,v8
738.Lv7: vxor v19,v19,v7
739.Lv6: vxor v20,v20,v6
740.Lv5: vxor v19,v19,v5
741.Lv4: vxor v20,v20,v4
742.Lv3: vxor v19,v19,v3
743.Lv2: vxor v20,v20,v2
744.Lv1: vxor v19,v19,v1
745.Lv0: vxor v20,v20,v0
746
747 vxor v0,v19,v20
748
749 b .Lbarrett_reduction
750
751.Lzero:
752 mr r3,r10
753 b .Lout
754
755FUNC_END(CRC_FUNCTION_NAME)
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_asm.S b/arch/powerpc/crypto/crc32c-vpmsum_asm.S
index dc640b212299..d2bea48051a0 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_asm.S
+++ b/arch/powerpc/crypto/crc32c-vpmsum_asm.S
@@ -1,20 +1,5 @@
1/* 1/*
2 * Calculate the checksum of data that is 16 byte aligned and a multiple of 2 * Calculate a crc32c with vpmsum acceleration
3 * 16 bytes.
4 *
5 * The first step is to reduce it to 1024 bits. We do this in 8 parallel
6 * chunks in order to mask the latency of the vpmsum instructions. If we
7 * have more than 32 kB of data to checksum we repeat this step multiple
8 * times, passing in the previous 1024 bits.
9 *
10 * The next step is to reduce the 1024 bits to 64 bits. This step adds
11 * 32 bits of 0s to the end - this matches what a CRC does. We just
12 * calculate constants that land the data in this 32 bits.
13 *
14 * We then use fixed point Barrett reduction to compute a mod n over GF(2)
15 * for n = CRC using POWER8 instructions. We use x = 32.
16 *
17 * http://en.wikipedia.org/wiki/Barrett_reduction
18 * 3 *
19 * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM 4 * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
20 * 5 *
@@ -23,9 +8,6 @@
23 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
24 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
25 */ 10 */
26#include <asm/ppc_asm.h>
27#include <asm/ppc-opcode.h>
28
29 .section .rodata 11 .section .rodata
30.balign 16 12.balign 16
31 13
@@ -33,7 +15,6 @@
33 /* byte reverse permute constant */ 15 /* byte reverse permute constant */
34 .octa 0x0F0E0D0C0B0A09080706050403020100 16 .octa 0x0F0E0D0C0B0A09080706050403020100
35 17
36#define MAX_SIZE 32768
37.constants: 18.constants:
38 19
39 /* Reduce 262144 kbits to 1024 bits */ 20 /* Reduce 262144 kbits to 1024 bits */
@@ -860,694 +841,6 @@
860 /* 33 bit reflected Barrett constant n */ 841 /* 33 bit reflected Barrett constant n */
861 .octa 0x00000000000000000000000105ec76f1 842 .octa 0x00000000000000000000000105ec76f1
862 843
863 .text 844#define CRC_FUNCTION_NAME __crc32c_vpmsum
864 845#define REFLECT
865#if defined(__BIG_ENDIAN__) 846#include "crc32-vpmsum_core.S"
866#define BYTESWAP_DATA
867#else
868#undef BYTESWAP_DATA
869#endif
870
871#define off16 r25
872#define off32 r26
873#define off48 r27
874#define off64 r28
875#define off80 r29
876#define off96 r30
877#define off112 r31
878
879#define const1 v24
880#define const2 v25
881
882#define byteswap v26
883#define mask_32bit v27
884#define mask_64bit v28
885#define zeroes v29
886
887#ifdef BYTESWAP_DATA
888#define VPERM(A, B, C, D) vperm A, B, C, D
889#else
890#define VPERM(A, B, C, D)
891#endif
892
893/* unsigned int __crc32c_vpmsum(unsigned int crc, void *p, unsigned long len) */
894FUNC_START(__crc32c_vpmsum)
895 std r31,-8(r1)
896 std r30,-16(r1)
897 std r29,-24(r1)
898 std r28,-32(r1)
899 std r27,-40(r1)
900 std r26,-48(r1)
901 std r25,-56(r1)
902
903 li off16,16
904 li off32,32
905 li off48,48
906 li off64,64
907 li off80,80
908 li off96,96
909 li off112,112
910 li r0,0
911
912 /* Enough room for saving 10 non volatile VMX registers */
913 subi r6,r1,56+10*16
914 subi r7,r1,56+2*16
915
916 stvx v20,0,r6
917 stvx v21,off16,r6
918 stvx v22,off32,r6
919 stvx v23,off48,r6
920 stvx v24,off64,r6
921 stvx v25,off80,r6
922 stvx v26,off96,r6
923 stvx v27,off112,r6
924 stvx v28,0,r7
925 stvx v29,off16,r7
926
927 mr r10,r3
928
929 vxor zeroes,zeroes,zeroes
930 vspltisw v0,-1
931
932 vsldoi mask_32bit,zeroes,v0,4
933 vsldoi mask_64bit,zeroes,v0,8
934
935 /* Get the initial value into v8 */
936 vxor v8,v8,v8
937 MTVRD(v8, R3)
938 vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */
939
940#ifdef BYTESWAP_DATA
941 addis r3,r2,.byteswap_constant@toc@ha
942 addi r3,r3,.byteswap_constant@toc@l
943
944 lvx byteswap,0,r3
945 addi r3,r3,16
946#endif
947
948 cmpdi r5,256
949 blt .Lshort
950
951 rldicr r6,r5,0,56
952
953 /* Checksum in blocks of MAX_SIZE */
9541: lis r7,MAX_SIZE@h
955 ori r7,r7,MAX_SIZE@l
956 mr r9,r7
957 cmpd r6,r7
958 bgt 2f
959 mr r7,r6
9602: subf r6,r7,r6
961
962 /* our main loop does 128 bytes at a time */
963 srdi r7,r7,7
964
965 /*
966 * Work out the offset into the constants table to start at. Each
967 * constant is 16 bytes, and it is used against 128 bytes of input
968 * data - 128 / 16 = 8
969 */
970 sldi r8,r7,4
971 srdi r9,r9,3
972 subf r8,r8,r9
973
974 /* We reduce our final 128 bytes in a separate step */
975 addi r7,r7,-1
976 mtctr r7
977
978 addis r3,r2,.constants@toc@ha
979 addi r3,r3,.constants@toc@l
980
981 /* Find the start of our constants */
982 add r3,r3,r8
983
984 /* zero v0-v7 which will contain our checksums */
985 vxor v0,v0,v0
986 vxor v1,v1,v1
987 vxor v2,v2,v2
988 vxor v3,v3,v3
989 vxor v4,v4,v4
990 vxor v5,v5,v5
991 vxor v6,v6,v6
992 vxor v7,v7,v7
993
994 lvx const1,0,r3
995
996 /*
997 * If we are looping back to consume more data we use the values
998 * already in v16-v23.
999 */
1000 cmpdi r0,1
1001 beq 2f
1002
1003 /* First warm up pass */
1004 lvx v16,0,r4
1005 lvx v17,off16,r4
1006 VPERM(v16,v16,v16,byteswap)
1007 VPERM(v17,v17,v17,byteswap)
1008 lvx v18,off32,r4
1009 lvx v19,off48,r4
1010 VPERM(v18,v18,v18,byteswap)
1011 VPERM(v19,v19,v19,byteswap)
1012 lvx v20,off64,r4
1013 lvx v21,off80,r4
1014 VPERM(v20,v20,v20,byteswap)
1015 VPERM(v21,v21,v21,byteswap)
1016 lvx v22,off96,r4
1017 lvx v23,off112,r4
1018 VPERM(v22,v22,v22,byteswap)
1019 VPERM(v23,v23,v23,byteswap)
1020 addi r4,r4,8*16
1021
1022 /* xor in initial value */
1023 vxor v16,v16,v8
1024
10252: bdz .Lfirst_warm_up_done
1026
1027 addi r3,r3,16
1028 lvx const2,0,r3
1029
1030 /* Second warm up pass */
1031 VPMSUMD(v8,v16,const1)
1032 lvx v16,0,r4
1033 VPERM(v16,v16,v16,byteswap)
1034 ori r2,r2,0
1035
1036 VPMSUMD(v9,v17,const1)
1037 lvx v17,off16,r4
1038 VPERM(v17,v17,v17,byteswap)
1039 ori r2,r2,0
1040
1041 VPMSUMD(v10,v18,const1)
1042 lvx v18,off32,r4
1043 VPERM(v18,v18,v18,byteswap)
1044 ori r2,r2,0
1045
1046 VPMSUMD(v11,v19,const1)
1047 lvx v19,off48,r4
1048 VPERM(v19,v19,v19,byteswap)
1049 ori r2,r2,0
1050
1051 VPMSUMD(v12,v20,const1)
1052 lvx v20,off64,r4
1053 VPERM(v20,v20,v20,byteswap)
1054 ori r2,r2,0
1055
1056 VPMSUMD(v13,v21,const1)
1057 lvx v21,off80,r4
1058 VPERM(v21,v21,v21,byteswap)
1059 ori r2,r2,0
1060
1061 VPMSUMD(v14,v22,const1)
1062 lvx v22,off96,r4
1063 VPERM(v22,v22,v22,byteswap)
1064 ori r2,r2,0
1065
1066 VPMSUMD(v15,v23,const1)
1067 lvx v23,off112,r4
1068 VPERM(v23,v23,v23,byteswap)
1069
1070 addi r4,r4,8*16
1071
1072 bdz .Lfirst_cool_down
1073
1074 /*
1075 * main loop. We modulo schedule it such that it takes three iterations
1076 * to complete - first iteration load, second iteration vpmsum, third
1077 * iteration xor.
1078 */
1079 .balign 16
10804: lvx const1,0,r3
1081 addi r3,r3,16
1082 ori r2,r2,0
1083
1084 vxor v0,v0,v8
1085 VPMSUMD(v8,v16,const2)
1086 lvx v16,0,r4
1087 VPERM(v16,v16,v16,byteswap)
1088 ori r2,r2,0
1089
1090 vxor v1,v1,v9
1091 VPMSUMD(v9,v17,const2)
1092 lvx v17,off16,r4
1093 VPERM(v17,v17,v17,byteswap)
1094 ori r2,r2,0
1095
1096 vxor v2,v2,v10
1097 VPMSUMD(v10,v18,const2)
1098 lvx v18,off32,r4
1099 VPERM(v18,v18,v18,byteswap)
1100 ori r2,r2,0
1101
1102 vxor v3,v3,v11
1103 VPMSUMD(v11,v19,const2)
1104 lvx v19,off48,r4
1105 VPERM(v19,v19,v19,byteswap)
1106 lvx const2,0,r3
1107 ori r2,r2,0
1108
1109 vxor v4,v4,v12
1110 VPMSUMD(v12,v20,const1)
1111 lvx v20,off64,r4
1112 VPERM(v20,v20,v20,byteswap)
1113 ori r2,r2,0
1114
1115 vxor v5,v5,v13
1116 VPMSUMD(v13,v21,const1)
1117 lvx v21,off80,r4
1118 VPERM(v21,v21,v21,byteswap)
1119 ori r2,r2,0
1120
1121 vxor v6,v6,v14
1122 VPMSUMD(v14,v22,const1)
1123 lvx v22,off96,r4
1124 VPERM(v22,v22,v22,byteswap)
1125 ori r2,r2,0
1126
1127 vxor v7,v7,v15
1128 VPMSUMD(v15,v23,const1)
1129 lvx v23,off112,r4
1130 VPERM(v23,v23,v23,byteswap)
1131
1132 addi r4,r4,8*16
1133
1134 bdnz 4b
1135
1136.Lfirst_cool_down:
1137 /* First cool down pass */
1138 lvx const1,0,r3
1139 addi r3,r3,16
1140
1141 vxor v0,v0,v8
1142 VPMSUMD(v8,v16,const1)
1143 ori r2,r2,0
1144
1145 vxor v1,v1,v9
1146 VPMSUMD(v9,v17,const1)
1147 ori r2,r2,0
1148
1149 vxor v2,v2,v10
1150 VPMSUMD(v10,v18,const1)
1151 ori r2,r2,0
1152
1153 vxor v3,v3,v11
1154 VPMSUMD(v11,v19,const1)
1155 ori r2,r2,0
1156
1157 vxor v4,v4,v12
1158 VPMSUMD(v12,v20,const1)
1159 ori r2,r2,0
1160
1161 vxor v5,v5,v13
1162 VPMSUMD(v13,v21,const1)
1163 ori r2,r2,0
1164
1165 vxor v6,v6,v14
1166 VPMSUMD(v14,v22,const1)
1167 ori r2,r2,0
1168
1169 vxor v7,v7,v15
1170 VPMSUMD(v15,v23,const1)
1171 ori r2,r2,0
1172
1173.Lsecond_cool_down:
1174 /* Second cool down pass */
1175 vxor v0,v0,v8
1176 vxor v1,v1,v9
1177 vxor v2,v2,v10
1178 vxor v3,v3,v11
1179 vxor v4,v4,v12
1180 vxor v5,v5,v13
1181 vxor v6,v6,v14
1182 vxor v7,v7,v15
1183
1184 /*
1185 * vpmsumd produces a 96 bit result in the least significant bits
1186 * of the register. Since we are bit reflected we have to shift it
1187 * left 32 bits so it occupies the least significant bits in the
1188 * bit reflected domain.
1189 */
1190 vsldoi v0,v0,zeroes,4
1191 vsldoi v1,v1,zeroes,4
1192 vsldoi v2,v2,zeroes,4
1193 vsldoi v3,v3,zeroes,4
1194 vsldoi v4,v4,zeroes,4
1195 vsldoi v5,v5,zeroes,4
1196 vsldoi v6,v6,zeroes,4
1197 vsldoi v7,v7,zeroes,4
1198
1199 /* xor with last 1024 bits */
1200 lvx v8,0,r4
1201 lvx v9,off16,r4
1202 VPERM(v8,v8,v8,byteswap)
1203 VPERM(v9,v9,v9,byteswap)
1204 lvx v10,off32,r4
1205 lvx v11,off48,r4
1206 VPERM(v10,v10,v10,byteswap)
1207 VPERM(v11,v11,v11,byteswap)
1208 lvx v12,off64,r4
1209 lvx v13,off80,r4
1210 VPERM(v12,v12,v12,byteswap)
1211 VPERM(v13,v13,v13,byteswap)
1212 lvx v14,off96,r4
1213 lvx v15,off112,r4
1214 VPERM(v14,v14,v14,byteswap)
1215 VPERM(v15,v15,v15,byteswap)
1216
1217 addi r4,r4,8*16
1218
1219 vxor v16,v0,v8
1220 vxor v17,v1,v9
1221 vxor v18,v2,v10
1222 vxor v19,v3,v11
1223 vxor v20,v4,v12
1224 vxor v21,v5,v13
1225 vxor v22,v6,v14
1226 vxor v23,v7,v15
1227
1228 li r0,1
1229 cmpdi r6,0
1230 addi r6,r6,128
1231 bne 1b
1232
1233 /* Work out how many bytes we have left */
1234 andi. r5,r5,127
1235
1236 /* Calculate where in the constant table we need to start */
1237 subfic r6,r5,128
1238 add r3,r3,r6
1239
1240 /* How many 16 byte chunks are in the tail */
1241 srdi r7,r5,4
1242 mtctr r7
1243
1244 /*
1245 * Reduce the previously calculated 1024 bits to 64 bits, shifting
1246 * 32 bits to include the trailing 32 bits of zeros
1247 */
1248 lvx v0,0,r3
1249 lvx v1,off16,r3
1250 lvx v2,off32,r3
1251 lvx v3,off48,r3
1252 lvx v4,off64,r3
1253 lvx v5,off80,r3
1254 lvx v6,off96,r3
1255 lvx v7,off112,r3
1256 addi r3,r3,8*16
1257
1258 VPMSUMW(v0,v16,v0)
1259 VPMSUMW(v1,v17,v1)
1260 VPMSUMW(v2,v18,v2)
1261 VPMSUMW(v3,v19,v3)
1262 VPMSUMW(v4,v20,v4)
1263 VPMSUMW(v5,v21,v5)
1264 VPMSUMW(v6,v22,v6)
1265 VPMSUMW(v7,v23,v7)
1266
1267 /* Now reduce the tail (0 - 112 bytes) */
1268 cmpdi r7,0
1269 beq 1f
1270
1271 lvx v16,0,r4
1272 lvx v17,0,r3
1273 VPERM(v16,v16,v16,byteswap)
1274 VPMSUMW(v16,v16,v17)
1275 vxor v0,v0,v16
1276 bdz 1f
1277
1278 lvx v16,off16,r4
1279 lvx v17,off16,r3
1280 VPERM(v16,v16,v16,byteswap)
1281 VPMSUMW(v16,v16,v17)
1282 vxor v0,v0,v16
1283 bdz 1f
1284
1285 lvx v16,off32,r4
1286 lvx v17,off32,r3
1287 VPERM(v16,v16,v16,byteswap)
1288 VPMSUMW(v16,v16,v17)
1289 vxor v0,v0,v16
1290 bdz 1f
1291
1292 lvx v16,off48,r4
1293 lvx v17,off48,r3
1294 VPERM(v16,v16,v16,byteswap)
1295 VPMSUMW(v16,v16,v17)
1296 vxor v0,v0,v16
1297 bdz 1f
1298
1299 lvx v16,off64,r4
1300 lvx v17,off64,r3
1301 VPERM(v16,v16,v16,byteswap)
1302 VPMSUMW(v16,v16,v17)
1303 vxor v0,v0,v16
1304 bdz 1f
1305
1306 lvx v16,off80,r4
1307 lvx v17,off80,r3
1308 VPERM(v16,v16,v16,byteswap)
1309 VPMSUMW(v16,v16,v17)
1310 vxor v0,v0,v16
1311 bdz 1f
1312
1313 lvx v16,off96,r4
1314 lvx v17,off96,r3
1315 VPERM(v16,v16,v16,byteswap)
1316 VPMSUMW(v16,v16,v17)
1317 vxor v0,v0,v16
1318
1319 /* Now xor all the parallel chunks together */
13201: vxor v0,v0,v1
1321 vxor v2,v2,v3
1322 vxor v4,v4,v5
1323 vxor v6,v6,v7
1324
1325 vxor v0,v0,v2
1326 vxor v4,v4,v6
1327
1328 vxor v0,v0,v4
1329
1330.Lbarrett_reduction:
1331 /* Barrett constants */
1332 addis r3,r2,.barrett_constants@toc@ha
1333 addi r3,r3,.barrett_constants@toc@l
1334
1335 lvx const1,0,r3
1336 lvx const2,off16,r3
1337
1338 vsldoi v1,v0,v0,8
1339 vxor v0,v0,v1 /* xor two 64 bit results together */
1340
1341 /* shift left one bit */
1342 vspltisb v1,1
1343 vsl v0,v0,v1
1344
1345 vand v0,v0,mask_64bit
1346
1347 /*
1348 * The reflected version of Barrett reduction. Instead of bit
1349 * reflecting our data (which is expensive to do), we bit reflect our
1350 * constants and our algorithm, which means the intermediate data in
1351 * our vector registers goes from 0-63 instead of 63-0. We can reflect
1352 * the algorithm because we don't carry in mod 2 arithmetic.
1353 */
1354 vand v1,v0,mask_32bit /* bottom 32 bits of a */
1355 VPMSUMD(v1,v1,const1) /* ma */
1356 vand v1,v1,mask_32bit /* bottom 32bits of ma */
1357 VPMSUMD(v1,v1,const2) /* qn */
1358 vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */
1359
1360 /*
1361 * Since we are bit reflected, the result (ie the low 32 bits) is in
1362 * the high 32 bits. We just need to shift it left 4 bytes
1363 * V0 [ 0 1 X 3 ]
1364 * V0 [ 0 X 2 3 ]
1365 */
1366 vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */
1367
1368 /* Get it into r3 */
1369 MFVRD(R3, v0)
1370
1371.Lout:
1372 subi r6,r1,56+10*16
1373 subi r7,r1,56+2*16
1374
1375 lvx v20,0,r6
1376 lvx v21,off16,r6
1377 lvx v22,off32,r6
1378 lvx v23,off48,r6
1379 lvx v24,off64,r6
1380 lvx v25,off80,r6
1381 lvx v26,off96,r6
1382 lvx v27,off112,r6
1383 lvx v28,0,r7
1384 lvx v29,off16,r7
1385
1386 ld r31,-8(r1)
1387 ld r30,-16(r1)
1388 ld r29,-24(r1)
1389 ld r28,-32(r1)
1390 ld r27,-40(r1)
1391 ld r26,-48(r1)
1392 ld r25,-56(r1)
1393
1394 blr
1395
1396.Lfirst_warm_up_done:
1397 lvx const1,0,r3
1398 addi r3,r3,16
1399
1400 VPMSUMD(v8,v16,const1)
1401 VPMSUMD(v9,v17,const1)
1402 VPMSUMD(v10,v18,const1)
1403 VPMSUMD(v11,v19,const1)
1404 VPMSUMD(v12,v20,const1)
1405 VPMSUMD(v13,v21,const1)
1406 VPMSUMD(v14,v22,const1)
1407 VPMSUMD(v15,v23,const1)
1408
1409 b .Lsecond_cool_down
1410
1411.Lshort:
1412 cmpdi r5,0
1413 beq .Lzero
1414
1415 addis r3,r2,.short_constants@toc@ha
1416 addi r3,r3,.short_constants@toc@l
1417
1418 /* Calculate where in the constant table we need to start */
1419 subfic r6,r5,256
1420 add r3,r3,r6
1421
1422 /* How many 16 byte chunks? */
1423 srdi r7,r5,4
1424 mtctr r7
1425
1426 vxor v19,v19,v19
1427 vxor v20,v20,v20
1428
1429 lvx v0,0,r4
1430 lvx v16,0,r3
1431 VPERM(v0,v0,v16,byteswap)
1432 vxor v0,v0,v8 /* xor in initial value */
1433 VPMSUMW(v0,v0,v16)
1434 bdz .Lv0
1435
1436 lvx v1,off16,r4
1437 lvx v17,off16,r3
1438 VPERM(v1,v1,v17,byteswap)
1439 VPMSUMW(v1,v1,v17)
1440 bdz .Lv1
1441
1442 lvx v2,off32,r4
1443 lvx v16,off32,r3
1444 VPERM(v2,v2,v16,byteswap)
1445 VPMSUMW(v2,v2,v16)
1446 bdz .Lv2
1447
1448 lvx v3,off48,r4
1449 lvx v17,off48,r3
1450 VPERM(v3,v3,v17,byteswap)
1451 VPMSUMW(v3,v3,v17)
1452 bdz .Lv3
1453
1454 lvx v4,off64,r4
1455 lvx v16,off64,r3
1456 VPERM(v4,v4,v16,byteswap)
1457 VPMSUMW(v4,v4,v16)
1458 bdz .Lv4
1459
1460 lvx v5,off80,r4
1461 lvx v17,off80,r3
1462 VPERM(v5,v5,v17,byteswap)
1463 VPMSUMW(v5,v5,v17)
1464 bdz .Lv5
1465
1466 lvx v6,off96,r4
1467 lvx v16,off96,r3
1468 VPERM(v6,v6,v16,byteswap)
1469 VPMSUMW(v6,v6,v16)
1470 bdz .Lv6
1471
1472 lvx v7,off112,r4
1473 lvx v17,off112,r3
1474 VPERM(v7,v7,v17,byteswap)
1475 VPMSUMW(v7,v7,v17)
1476 bdz .Lv7
1477
1478 addi r3,r3,128
1479 addi r4,r4,128
1480
1481 lvx v8,0,r4
1482 lvx v16,0,r3
1483 VPERM(v8,v8,v16,byteswap)
1484 VPMSUMW(v8,v8,v16)
1485 bdz .Lv8
1486
1487 lvx v9,off16,r4
1488 lvx v17,off16,r3
1489 VPERM(v9,v9,v17,byteswap)
1490 VPMSUMW(v9,v9,v17)
1491 bdz .Lv9
1492
1493 lvx v10,off32,r4
1494 lvx v16,off32,r3
1495 VPERM(v10,v10,v16,byteswap)
1496 VPMSUMW(v10,v10,v16)
1497 bdz .Lv10
1498
1499 lvx v11,off48,r4
1500 lvx v17,off48,r3
1501 VPERM(v11,v11,v17,byteswap)
1502 VPMSUMW(v11,v11,v17)
1503 bdz .Lv11
1504
1505 lvx v12,off64,r4
1506 lvx v16,off64,r3
1507 VPERM(v12,v12,v16,byteswap)
1508 VPMSUMW(v12,v12,v16)
1509 bdz .Lv12
1510
1511 lvx v13,off80,r4
1512 lvx v17,off80,r3
1513 VPERM(v13,v13,v17,byteswap)
1514 VPMSUMW(v13,v13,v17)
1515 bdz .Lv13
1516
1517 lvx v14,off96,r4
1518 lvx v16,off96,r3
1519 VPERM(v14,v14,v16,byteswap)
1520 VPMSUMW(v14,v14,v16)
1521 bdz .Lv14
1522
1523 lvx v15,off112,r4
1524 lvx v17,off112,r3
1525 VPERM(v15,v15,v17,byteswap)
1526 VPMSUMW(v15,v15,v17)
1527
1528.Lv15: vxor v19,v19,v15
1529.Lv14: vxor v20,v20,v14
1530.Lv13: vxor v19,v19,v13
1531.Lv12: vxor v20,v20,v12
1532.Lv11: vxor v19,v19,v11
1533.Lv10: vxor v20,v20,v10
1534.Lv9: vxor v19,v19,v9
1535.Lv8: vxor v20,v20,v8
1536.Lv7: vxor v19,v19,v7
1537.Lv6: vxor v20,v20,v6
1538.Lv5: vxor v19,v19,v5
1539.Lv4: vxor v20,v20,v4
1540.Lv3: vxor v19,v19,v3
1541.Lv2: vxor v20,v20,v2
1542.Lv1: vxor v19,v19,v1
1543.Lv0: vxor v20,v20,v0
1544
1545 vxor v0,v19,v20
1546
1547 b .Lbarrett_reduction
1548
1549.Lzero:
1550 mr r3,r10
1551 b .Lout
1552
1553FUNC_END(__crc32_vpmsum)
diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_asm.S b/arch/powerpc/crypto/crct10dif-vpmsum_asm.S
new file mode 100644
index 000000000000..5e3d81a0af1b
--- /dev/null
+++ b/arch/powerpc/crypto/crct10dif-vpmsum_asm.S
@@ -0,0 +1,850 @@
1/*
2 * Calculate a CRC T10DIF with vpmsum acceleration
3 *
4 * Constants generated by crc32-vpmsum, available at
5 * https://github.com/antonblanchard/crc32-vpmsum
6 *
7 * crc32-vpmsum is
8 * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
9 * and is available under the GPL v2 or later.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16 .section .rodata
17.balign 16
18
19.byteswap_constant:
20 /* byte reverse permute constant */
21 .octa 0x0F0E0D0C0B0A09080706050403020100
22
23.constants:
24
25 /* Reduce 262144 kbits to 1024 bits */
26 /* x^261184 mod p(x), x^261120 mod p(x) */
27 .octa 0x0000000056d300000000000052550000
28
29 /* x^260160 mod p(x), x^260096 mod p(x) */
30 .octa 0x00000000ee67000000000000a1e40000
31
32 /* x^259136 mod p(x), x^259072 mod p(x) */
33 .octa 0x0000000060830000000000004ad10000
34
35 /* x^258112 mod p(x), x^258048 mod p(x) */
36 .octa 0x000000008cfe0000000000009ab40000
37
38 /* x^257088 mod p(x), x^257024 mod p(x) */
39 .octa 0x000000003e93000000000000fdb50000
40
41 /* x^256064 mod p(x), x^256000 mod p(x) */
42 .octa 0x000000003c2000000000000045480000
43
44 /* x^255040 mod p(x), x^254976 mod p(x) */
45 .octa 0x00000000b1fc0000000000008d690000
46
47 /* x^254016 mod p(x), x^253952 mod p(x) */
48 .octa 0x00000000f82b00000000000024ad0000
49
50 /* x^252992 mod p(x), x^252928 mod p(x) */
51 .octa 0x0000000044420000000000009f1a0000
52
53 /* x^251968 mod p(x), x^251904 mod p(x) */
54 .octa 0x00000000e88c00000000000066ec0000
55
56 /* x^250944 mod p(x), x^250880 mod p(x) */
57 .octa 0x00000000385c000000000000c87d0000
58
59 /* x^249920 mod p(x), x^249856 mod p(x) */
60 .octa 0x000000003227000000000000c8ff0000
61
62 /* x^248896 mod p(x), x^248832 mod p(x) */
63 .octa 0x00000000a9a900000000000033440000
64
65 /* x^247872 mod p(x), x^247808 mod p(x) */
66 .octa 0x00000000abaa00000000000066eb0000
67
68 /* x^246848 mod p(x), x^246784 mod p(x) */
69 .octa 0x000000001ac3000000000000c4ef0000
70
71 /* x^245824 mod p(x), x^245760 mod p(x) */
72 .octa 0x0000000063f000000000000056f30000
73
74 /* x^244800 mod p(x), x^244736 mod p(x) */
75 .octa 0x0000000032cc00000000000002050000
76
77 /* x^243776 mod p(x), x^243712 mod p(x) */
78 .octa 0x00000000f8b5000000000000568e0000
79
80 /* x^242752 mod p(x), x^242688 mod p(x) */
81 .octa 0x000000008db100000000000064290000
82
83 /* x^241728 mod p(x), x^241664 mod p(x) */
84 .octa 0x0000000059ca0000000000006b660000
85
86 /* x^240704 mod p(x), x^240640 mod p(x) */
87 .octa 0x000000005f5c00000000000018f80000
88
89 /* x^239680 mod p(x), x^239616 mod p(x) */
90 .octa 0x0000000061af000000000000b6090000
91
92 /* x^238656 mod p(x), x^238592 mod p(x) */
93 .octa 0x00000000e29e000000000000099a0000
94
95 /* x^237632 mod p(x), x^237568 mod p(x) */
96 .octa 0x000000000975000000000000a8360000
97
98 /* x^236608 mod p(x), x^236544 mod p(x) */
99 .octa 0x0000000043900000000000004f570000
100
101 /* x^235584 mod p(x), x^235520 mod p(x) */
102 .octa 0x00000000f9cd000000000000134c0000
103
104 /* x^234560 mod p(x), x^234496 mod p(x) */
105 .octa 0x000000007c29000000000000ec380000
106
107 /* x^233536 mod p(x), x^233472 mod p(x) */
108 .octa 0x000000004c6a000000000000b0d10000
109
110 /* x^232512 mod p(x), x^232448 mod p(x) */
111 .octa 0x00000000e7290000000000007d3e0000
112
113 /* x^231488 mod p(x), x^231424 mod p(x) */
114 .octa 0x00000000f1ab000000000000f0b20000
115
116 /* x^230464 mod p(x), x^230400 mod p(x) */
117 .octa 0x0000000039db0000000000009c270000
118
119 /* x^229440 mod p(x), x^229376 mod p(x) */
120 .octa 0x000000005e2800000000000092890000
121
122 /* x^228416 mod p(x), x^228352 mod p(x) */
123 .octa 0x00000000d44e000000000000d5ee0000
124
125 /* x^227392 mod p(x), x^227328 mod p(x) */
126 .octa 0x00000000cd0a00000000000041f50000
127
128 /* x^226368 mod p(x), x^226304 mod p(x) */
129 .octa 0x00000000c5b400000000000010520000
130
131 /* x^225344 mod p(x), x^225280 mod p(x) */
132 .octa 0x00000000fd2100000000000042170000
133
134 /* x^224320 mod p(x), x^224256 mod p(x) */
135 .octa 0x000000002f2500000000000095c20000
136
137 /* x^223296 mod p(x), x^223232 mod p(x) */
138 .octa 0x000000001b0100000000000001ce0000
139
140 /* x^222272 mod p(x), x^222208 mod p(x) */
141 .octa 0x000000000d430000000000002aca0000
142
143 /* x^221248 mod p(x), x^221184 mod p(x) */
144 .octa 0x0000000030a6000000000000385e0000
145
146 /* x^220224 mod p(x), x^220160 mod p(x) */
147 .octa 0x00000000e37b0000000000006f7a0000
148
149 /* x^219200 mod p(x), x^219136 mod p(x) */
150 .octa 0x00000000873600000000000024320000
151
152 /* x^218176 mod p(x), x^218112 mod p(x) */
153 .octa 0x00000000e9fb000000000000bd9c0000
154
155 /* x^217152 mod p(x), x^217088 mod p(x) */
156 .octa 0x000000003b9500000000000054bc0000
157
158 /* x^216128 mod p(x), x^216064 mod p(x) */
159 .octa 0x00000000133e000000000000a4660000
160
161 /* x^215104 mod p(x), x^215040 mod p(x) */
162 .octa 0x00000000784500000000000079930000
163
164 /* x^214080 mod p(x), x^214016 mod p(x) */
165 .octa 0x00000000b9800000000000001bb80000
166
167 /* x^213056 mod p(x), x^212992 mod p(x) */
168 .octa 0x00000000687600000000000024400000
169
170 /* x^212032 mod p(x), x^211968 mod p(x) */
171 .octa 0x00000000aff300000000000029e10000
172
173 /* x^211008 mod p(x), x^210944 mod p(x) */
174 .octa 0x0000000024b50000000000005ded0000
175
176 /* x^209984 mod p(x), x^209920 mod p(x) */
177 .octa 0x0000000017e8000000000000b12e0000
178
179 /* x^208960 mod p(x), x^208896 mod p(x) */
180 .octa 0x00000000128400000000000026d20000
181
182 /* x^207936 mod p(x), x^207872 mod p(x) */
183 .octa 0x000000002115000000000000a32a0000
184
185 /* x^206912 mod p(x), x^206848 mod p(x) */
186 .octa 0x000000009595000000000000a1210000
187
188 /* x^205888 mod p(x), x^205824 mod p(x) */
189 .octa 0x00000000281e000000000000ee8b0000
190
191 /* x^204864 mod p(x), x^204800 mod p(x) */
192 .octa 0x0000000006010000000000003d0d0000
193
194 /* x^203840 mod p(x), x^203776 mod p(x) */
195 .octa 0x00000000e2b600000000000034e90000
196
197 /* x^202816 mod p(x), x^202752 mod p(x) */
198 .octa 0x000000001bd40000000000004cdb0000
199
200 /* x^201792 mod p(x), x^201728 mod p(x) */
201 .octa 0x00000000df2800000000000030e90000
202
203 /* x^200768 mod p(x), x^200704 mod p(x) */
204 .octa 0x0000000049c200000000000042590000
205
206 /* x^199744 mod p(x), x^199680 mod p(x) */
207 .octa 0x000000009b97000000000000df950000
208
209 /* x^198720 mod p(x), x^198656 mod p(x) */
210 .octa 0x000000006184000000000000da7b0000
211
212 /* x^197696 mod p(x), x^197632 mod p(x) */
213 .octa 0x00000000461700000000000012510000
214
215 /* x^196672 mod p(x), x^196608 mod p(x) */
216 .octa 0x000000009b40000000000000f37e0000
217
218 /* x^195648 mod p(x), x^195584 mod p(x) */
219 .octa 0x00000000eeb2000000000000ecf10000
220
221 /* x^194624 mod p(x), x^194560 mod p(x) */
222 .octa 0x00000000b2e800000000000050f20000
223
224 /* x^193600 mod p(x), x^193536 mod p(x) */
225 .octa 0x00000000f59a000000000000e0b30000
226
227 /* x^192576 mod p(x), x^192512 mod p(x) */
228 .octa 0x00000000467f0000000000004d5a0000
229
230 /* x^191552 mod p(x), x^191488 mod p(x) */
231 .octa 0x00000000da92000000000000bb010000
232
233 /* x^190528 mod p(x), x^190464 mod p(x) */
234 .octa 0x000000001e1000000000000022a40000
235
236 /* x^189504 mod p(x), x^189440 mod p(x) */
237 .octa 0x0000000058fe000000000000836f0000
238
239 /* x^188480 mod p(x), x^188416 mod p(x) */
240 .octa 0x00000000b9ce000000000000d78d0000
241
242 /* x^187456 mod p(x), x^187392 mod p(x) */
243 .octa 0x0000000022210000000000004f8d0000
244
245 /* x^186432 mod p(x), x^186368 mod p(x) */
246 .octa 0x00000000744600000000000033760000
247
248 /* x^185408 mod p(x), x^185344 mod p(x) */
249 .octa 0x000000001c2e000000000000a1e50000
250
251 /* x^184384 mod p(x), x^184320 mod p(x) */
252 .octa 0x00000000dcc8000000000000a1a40000
253
254 /* x^183360 mod p(x), x^183296 mod p(x) */
255 .octa 0x00000000910f00000000000019a20000
256
257 /* x^182336 mod p(x), x^182272 mod p(x) */
258 .octa 0x0000000055d5000000000000f6ae0000
259
260 /* x^181312 mod p(x), x^181248 mod p(x) */
261 .octa 0x00000000c8ba000000000000a7ac0000
262
263 /* x^180288 mod p(x), x^180224 mod p(x) */
264 .octa 0x0000000031f8000000000000eea20000
265
266 /* x^179264 mod p(x), x^179200 mod p(x) */
267 .octa 0x000000001966000000000000c4d90000
268
269 /* x^178240 mod p(x), x^178176 mod p(x) */
270 .octa 0x00000000b9810000000000002b470000
271
272 /* x^177216 mod p(x), x^177152 mod p(x) */
273 .octa 0x000000008303000000000000f7cf0000
274
275 /* x^176192 mod p(x), x^176128 mod p(x) */
276 .octa 0x000000002ce500000000000035b30000
277
278 /* x^175168 mod p(x), x^175104 mod p(x) */
279 .octa 0x000000002fae0000000000000c7c0000
280
281 /* x^174144 mod p(x), x^174080 mod p(x) */
282 .octa 0x00000000f50c0000000000009edf0000
283
284 /* x^173120 mod p(x), x^173056 mod p(x) */
285 .octa 0x00000000714f00000000000004cd0000
286
287 /* x^172096 mod p(x), x^172032 mod p(x) */
288 .octa 0x00000000c161000000000000541b0000
289
290 /* x^171072 mod p(x), x^171008 mod p(x) */
291 .octa 0x0000000021c8000000000000e2700000
292
293 /* x^170048 mod p(x), x^169984 mod p(x) */
294 .octa 0x00000000b93d00000000000009a60000
295
296 /* x^169024 mod p(x), x^168960 mod p(x) */
297 .octa 0x00000000fbcf000000000000761c0000
298
299 /* x^168000 mod p(x), x^167936 mod p(x) */
300 .octa 0x0000000026350000000000009db30000
301
302 /* x^166976 mod p(x), x^166912 mod p(x) */
303 .octa 0x00000000b64f0000000000003e9f0000
304
305 /* x^165952 mod p(x), x^165888 mod p(x) */
306 .octa 0x00000000bd0e00000000000078590000
307
308 /* x^164928 mod p(x), x^164864 mod p(x) */
309 .octa 0x00000000d9360000000000008bc80000
310
311 /* x^163904 mod p(x), x^163840 mod p(x) */
312 .octa 0x000000002f140000000000008c9f0000
313
314 /* x^162880 mod p(x), x^162816 mod p(x) */
315 .octa 0x000000006a270000000000006af70000
316
317 /* x^161856 mod p(x), x^161792 mod p(x) */
318 .octa 0x000000006685000000000000e5210000
319
320 /* x^160832 mod p(x), x^160768 mod p(x) */
321 .octa 0x0000000062da00000000000008290000
322
323 /* x^159808 mod p(x), x^159744 mod p(x) */
324 .octa 0x00000000bb4b000000000000e4d00000
325
326 /* x^158784 mod p(x), x^158720 mod p(x) */
327 .octa 0x00000000d2490000000000004ae10000
328
329 /* x^157760 mod p(x), x^157696 mod p(x) */
330 .octa 0x00000000c85b00000000000000e70000
331
332 /* x^156736 mod p(x), x^156672 mod p(x) */
333 .octa 0x00000000c37a00000000000015650000
334
335 /* x^155712 mod p(x), x^155648 mod p(x) */
336 .octa 0x0000000018530000000000001c2f0000
337
338 /* x^154688 mod p(x), x^154624 mod p(x) */
339 .octa 0x00000000b46600000000000037bd0000
340
341 /* x^153664 mod p(x), x^153600 mod p(x) */
342 .octa 0x00000000439b00000000000012190000
343
344 /* x^152640 mod p(x), x^152576 mod p(x) */
345 .octa 0x00000000b1260000000000005ece0000
346
347 /* x^151616 mod p(x), x^151552 mod p(x) */
348 .octa 0x00000000d8110000000000002a5e0000
349
350 /* x^150592 mod p(x), x^150528 mod p(x) */
351 .octa 0x00000000099f00000000000052330000
352
353 /* x^149568 mod p(x), x^149504 mod p(x) */
354 .octa 0x00000000f9f9000000000000f9120000
355
356 /* x^148544 mod p(x), x^148480 mod p(x) */
357 .octa 0x000000005cc00000000000000ddc0000
358
359 /* x^147520 mod p(x), x^147456 mod p(x) */
360 .octa 0x00000000343b00000000000012200000
361
362 /* x^146496 mod p(x), x^146432 mod p(x) */
363 .octa 0x000000009222000000000000d12b0000
364
365 /* x^145472 mod p(x), x^145408 mod p(x) */
366 .octa 0x00000000d781000000000000eb2d0000
367
368 /* x^144448 mod p(x), x^144384 mod p(x) */
369 .octa 0x000000000bf400000000000058970000
370
371 /* x^143424 mod p(x), x^143360 mod p(x) */
372 .octa 0x00000000094200000000000013690000
373
374 /* x^142400 mod p(x), x^142336 mod p(x) */
375 .octa 0x00000000d55100000000000051950000
376
377 /* x^141376 mod p(x), x^141312 mod p(x) */
378 .octa 0x000000008f11000000000000954b0000
379
380 /* x^140352 mod p(x), x^140288 mod p(x) */
381 .octa 0x00000000140f000000000000b29e0000
382
383 /* x^139328 mod p(x), x^139264 mod p(x) */
384 .octa 0x00000000c6db000000000000db5d0000
385
386 /* x^138304 mod p(x), x^138240 mod p(x) */
387 .octa 0x00000000715b000000000000dfaf0000
388
389 /* x^137280 mod p(x), x^137216 mod p(x) */
390 .octa 0x000000000dea000000000000e3b60000
391
392 /* x^136256 mod p(x), x^136192 mod p(x) */
393 .octa 0x000000006f94000000000000ddaf0000
394
395 /* x^135232 mod p(x), x^135168 mod p(x) */
396 .octa 0x0000000024e1000000000000e4f70000
397
398 /* x^134208 mod p(x), x^134144 mod p(x) */
399 .octa 0x000000008810000000000000aa110000
400
401 /* x^133184 mod p(x), x^133120 mod p(x) */
402 .octa 0x0000000030c2000000000000a8e60000
403
404 /* x^132160 mod p(x), x^132096 mod p(x) */
405 .octa 0x00000000e6d0000000000000ccf30000
406
407 /* x^131136 mod p(x), x^131072 mod p(x) */
408 .octa 0x000000004da000000000000079bf0000
409
410 /* x^130112 mod p(x), x^130048 mod p(x) */
411 .octa 0x000000007759000000000000b3a30000
412
413 /* x^129088 mod p(x), x^129024 mod p(x) */
414 .octa 0x00000000597400000000000028790000
415
416 /* x^128064 mod p(x), x^128000 mod p(x) */
417 .octa 0x000000007acd000000000000b5820000
418
419 /* x^127040 mod p(x), x^126976 mod p(x) */
420 .octa 0x00000000e6e400000000000026ad0000
421
422 /* x^126016 mod p(x), x^125952 mod p(x) */
423 .octa 0x000000006d49000000000000985b0000
424
425 /* x^124992 mod p(x), x^124928 mod p(x) */
426 .octa 0x000000000f0800000000000011520000
427
428 /* x^123968 mod p(x), x^123904 mod p(x) */
429 .octa 0x000000002c7f000000000000846c0000
430
431 /* x^122944 mod p(x), x^122880 mod p(x) */
432 .octa 0x000000005ce7000000000000ae1d0000
433
434 /* x^121920 mod p(x), x^121856 mod p(x) */
435 .octa 0x00000000d4cb000000000000e21d0000
436
437 /* x^120896 mod p(x), x^120832 mod p(x) */
438 .octa 0x000000003a2300000000000019bb0000
439
440 /* x^119872 mod p(x), x^119808 mod p(x) */
441 .octa 0x000000000e1700000000000095290000
442
443 /* x^118848 mod p(x), x^118784 mod p(x) */
444 .octa 0x000000006e6400000000000050d20000
445
446 /* x^117824 mod p(x), x^117760 mod p(x) */
447 .octa 0x000000008d5c0000000000000cd10000
448
449 /* x^116800 mod p(x), x^116736 mod p(x) */
450 .octa 0x00000000ef310000000000007b570000
451
452 /* x^115776 mod p(x), x^115712 mod p(x) */
453 .octa 0x00000000645d00000000000053d60000
454
455 /* x^114752 mod p(x), x^114688 mod p(x) */
456 .octa 0x0000000018fc00000000000077510000
457
458 /* x^113728 mod p(x), x^113664 mod p(x) */
459 .octa 0x000000000cb3000000000000a7b70000
460
461 /* x^112704 mod p(x), x^112640 mod p(x) */
462 .octa 0x00000000991b000000000000d0780000
463
464 /* x^111680 mod p(x), x^111616 mod p(x) */
465 .octa 0x00000000845a000000000000be3c0000
466
467 /* x^110656 mod p(x), x^110592 mod p(x) */
468 .octa 0x00000000d3a9000000000000df020000
469
470 /* x^109632 mod p(x), x^109568 mod p(x) */
471 .octa 0x0000000017d7000000000000063e0000
472
473 /* x^108608 mod p(x), x^108544 mod p(x) */
474 .octa 0x000000007a860000000000008ab40000
475
476 /* x^107584 mod p(x), x^107520 mod p(x) */
477 .octa 0x00000000fd7c000000000000c7bd0000
478
479 /* x^106560 mod p(x), x^106496 mod p(x) */
480 .octa 0x00000000a56b000000000000efd60000
481
482 /* x^105536 mod p(x), x^105472 mod p(x) */
483 .octa 0x0000000010e400000000000071380000
484
485 /* x^104512 mod p(x), x^104448 mod p(x) */
486 .octa 0x00000000994500000000000004d30000
487
488 /* x^103488 mod p(x), x^103424 mod p(x) */
489 .octa 0x00000000b83c0000000000003b0e0000
490
491 /* x^102464 mod p(x), x^102400 mod p(x) */
492 .octa 0x00000000d6c10000000000008b020000
493
494 /* x^101440 mod p(x), x^101376 mod p(x) */
495 .octa 0x000000009efc000000000000da940000
496
497 /* x^100416 mod p(x), x^100352 mod p(x) */
498 .octa 0x000000005e87000000000000f9f70000
499
500 /* x^99392 mod p(x), x^99328 mod p(x) */
501 .octa 0x000000006c9b00000000000045e40000
502
503 /* x^98368 mod p(x), x^98304 mod p(x) */
504 .octa 0x00000000178a00000000000083940000
505
506 /* x^97344 mod p(x), x^97280 mod p(x) */
507 .octa 0x00000000f0c8000000000000f0a00000
508
509 /* x^96320 mod p(x), x^96256 mod p(x) */
510 .octa 0x00000000f699000000000000b74b0000
511
512 /* x^95296 mod p(x), x^95232 mod p(x) */
513 .octa 0x00000000316d000000000000c1cf0000
514
515 /* x^94272 mod p(x), x^94208 mod p(x) */
516 .octa 0x00000000987e00000000000072680000
517
518 /* x^93248 mod p(x), x^93184 mod p(x) */
519 .octa 0x00000000acff000000000000e0ab0000
520
521 /* x^92224 mod p(x), x^92160 mod p(x) */
522 .octa 0x00000000a1f6000000000000c5a80000
523
524 /* x^91200 mod p(x), x^91136 mod p(x) */
525 .octa 0x0000000061bd000000000000cf690000
526
527 /* x^90176 mod p(x), x^90112 mod p(x) */
528 .octa 0x00000000c9f2000000000000cbcc0000
529
530 /* x^89152 mod p(x), x^89088 mod p(x) */
531 .octa 0x000000005a33000000000000de050000
532
533 /* x^88128 mod p(x), x^88064 mod p(x) */
534 .octa 0x00000000e416000000000000ccd70000
535
536 /* x^87104 mod p(x), x^87040 mod p(x) */
537 .octa 0x0000000058930000000000002f670000
538
539 /* x^86080 mod p(x), x^86016 mod p(x) */
540 .octa 0x00000000a9d3000000000000152f0000
541
542 /* x^85056 mod p(x), x^84992 mod p(x) */
543 .octa 0x00000000c114000000000000ecc20000
544
545 /* x^84032 mod p(x), x^83968 mod p(x) */
546 .octa 0x00000000b9270000000000007c890000
547
548 /* x^83008 mod p(x), x^82944 mod p(x) */
549 .octa 0x000000002e6000000000000006ee0000
550
551 /* x^81984 mod p(x), x^81920 mod p(x) */
552 .octa 0x00000000dfc600000000000009100000
553
554 /* x^80960 mod p(x), x^80896 mod p(x) */
555 .octa 0x000000004911000000000000ad4e0000
556
557 /* x^79936 mod p(x), x^79872 mod p(x) */
558 .octa 0x00000000ae1b000000000000b04d0000
559
560 /* x^78912 mod p(x), x^78848 mod p(x) */
561 .octa 0x0000000005fa000000000000e9900000
562
563 /* x^77888 mod p(x), x^77824 mod p(x) */
564 .octa 0x0000000004a1000000000000cc6f0000
565
566 /* x^76864 mod p(x), x^76800 mod p(x) */
567 .octa 0x00000000af73000000000000ed110000
568
569 /* x^75840 mod p(x), x^75776 mod p(x) */
570 .octa 0x0000000082530000000000008f7e0000
571
572 /* x^74816 mod p(x), x^74752 mod p(x) */
573 .octa 0x00000000cfdc000000000000594f0000
574
575 /* x^73792 mod p(x), x^73728 mod p(x) */
576 .octa 0x00000000a6b6000000000000a8750000
577
578 /* x^72768 mod p(x), x^72704 mod p(x) */
579 .octa 0x00000000fd76000000000000aa0c0000
580
581 /* x^71744 mod p(x), x^71680 mod p(x) */
582 .octa 0x0000000006f500000000000071db0000
583
584 /* x^70720 mod p(x), x^70656 mod p(x) */
585 .octa 0x0000000037ca000000000000ab0c0000
586
587 /* x^69696 mod p(x), x^69632 mod p(x) */
588 .octa 0x00000000d7ab000000000000b7a00000
589
590 /* x^68672 mod p(x), x^68608 mod p(x) */
591 .octa 0x00000000440800000000000090d30000
592
593 /* x^67648 mod p(x), x^67584 mod p(x) */
594 .octa 0x00000000186100000000000054730000
595
596 /* x^66624 mod p(x), x^66560 mod p(x) */
597 .octa 0x000000007368000000000000a3a20000
598
599 /* x^65600 mod p(x), x^65536 mod p(x) */
600 .octa 0x0000000026d0000000000000f9040000
601
602 /* x^64576 mod p(x), x^64512 mod p(x) */
603 .octa 0x00000000fe770000000000009c0a0000
604
605 /* x^63552 mod p(x), x^63488 mod p(x) */
606 .octa 0x000000002cba000000000000d1e70000
607
608 /* x^62528 mod p(x), x^62464 mod p(x) */
609 .octa 0x00000000f8bd0000000000005ac10000
610
611 /* x^61504 mod p(x), x^61440 mod p(x) */
612 .octa 0x000000007372000000000000d68d0000
613
614 /* x^60480 mod p(x), x^60416 mod p(x) */
615 .octa 0x00000000f37f00000000000089f60000
616
617 /* x^59456 mod p(x), x^59392 mod p(x) */
618 .octa 0x00000000078400000000000008a90000
619
620 /* x^58432 mod p(x), x^58368 mod p(x) */
621 .octa 0x00000000d3e400000000000042360000
622
623 /* x^57408 mod p(x), x^57344 mod p(x) */
624 .octa 0x00000000eba800000000000092d50000
625
626 /* x^56384 mod p(x), x^56320 mod p(x) */
627 .octa 0x00000000afbe000000000000b4d50000
628
629 /* x^55360 mod p(x), x^55296 mod p(x) */
630 .octa 0x00000000d8ca000000000000c9060000
631
632 /* x^54336 mod p(x), x^54272 mod p(x) */
633 .octa 0x00000000c2d00000000000008f4f0000
634
635 /* x^53312 mod p(x), x^53248 mod p(x) */
636 .octa 0x00000000373200000000000028690000
637
638 /* x^52288 mod p(x), x^52224 mod p(x) */
639 .octa 0x0000000046ae000000000000c3b30000
640
641 /* x^51264 mod p(x), x^51200 mod p(x) */
642 .octa 0x00000000b243000000000000f8700000
643
644 /* x^50240 mod p(x), x^50176 mod p(x) */
645 .octa 0x00000000f7f500000000000029eb0000
646
647 /* x^49216 mod p(x), x^49152 mod p(x) */
648 .octa 0x000000000c7e000000000000fe730000
649
650 /* x^48192 mod p(x), x^48128 mod p(x) */
651 .octa 0x00000000c38200000000000096000000
652
653 /* x^47168 mod p(x), x^47104 mod p(x) */
654 .octa 0x000000008956000000000000683c0000
655
656 /* x^46144 mod p(x), x^46080 mod p(x) */
657 .octa 0x00000000422d0000000000005f1e0000
658
659 /* x^45120 mod p(x), x^45056 mod p(x) */
660 .octa 0x00000000ac0f0000000000006f810000
661
662 /* x^44096 mod p(x), x^44032 mod p(x) */
663 .octa 0x00000000ce30000000000000031f0000
664
665 /* x^43072 mod p(x), x^43008 mod p(x) */
666 .octa 0x000000003d43000000000000455a0000
667
668 /* x^42048 mod p(x), x^41984 mod p(x) */
669 .octa 0x000000007ebe000000000000a6050000
670
671 /* x^41024 mod p(x), x^40960 mod p(x) */
672 .octa 0x00000000976e00000000000077eb0000
673
674 /* x^40000 mod p(x), x^39936 mod p(x) */
675 .octa 0x000000000872000000000000389c0000
676
677 /* x^38976 mod p(x), x^38912 mod p(x) */
678 .octa 0x000000008979000000000000c7b20000
679
680 /* x^37952 mod p(x), x^37888 mod p(x) */
681 .octa 0x000000005c1e0000000000001d870000
682
683 /* x^36928 mod p(x), x^36864 mod p(x) */
684 .octa 0x00000000aebb00000000000045810000
685
686 /* x^35904 mod p(x), x^35840 mod p(x) */
687 .octa 0x000000004f7e0000000000006d4a0000
688
689 /* x^34880 mod p(x), x^34816 mod p(x) */
690 .octa 0x00000000ea98000000000000b9200000
691
692 /* x^33856 mod p(x), x^33792 mod p(x) */
693 .octa 0x00000000f39600000000000022f20000
694
695 /* x^32832 mod p(x), x^32768 mod p(x) */
696 .octa 0x000000000bc500000000000041ca0000
697
698 /* x^31808 mod p(x), x^31744 mod p(x) */
699 .octa 0x00000000786400000000000078500000
700
701 /* x^30784 mod p(x), x^30720 mod p(x) */
702 .octa 0x00000000be970000000000009e7e0000
703
704 /* x^29760 mod p(x), x^29696 mod p(x) */
705 .octa 0x00000000dd6d000000000000a53c0000
706
707 /* x^28736 mod p(x), x^28672 mod p(x) */
708 .octa 0x000000004c3f00000000000039340000
709
710 /* x^27712 mod p(x), x^27648 mod p(x) */
711 .octa 0x0000000093a4000000000000b58e0000
712
713 /* x^26688 mod p(x), x^26624 mod p(x) */
714 .octa 0x0000000050fb00000000000062d40000
715
716 /* x^25664 mod p(x), x^25600 mod p(x) */
717 .octa 0x00000000f505000000000000a26f0000
718
719 /* x^24640 mod p(x), x^24576 mod p(x) */
720 .octa 0x0000000064f900000000000065e60000
721
722 /* x^23616 mod p(x), x^23552 mod p(x) */
723 .octa 0x00000000e8c2000000000000aad90000
724
725 /* x^22592 mod p(x), x^22528 mod p(x) */
726 .octa 0x00000000720b000000000000a3b00000
727
728 /* x^21568 mod p(x), x^21504 mod p(x) */
729 .octa 0x00000000e992000000000000d2680000
730
731 /* x^20544 mod p(x), x^20480 mod p(x) */
732 .octa 0x000000009132000000000000cf4c0000
733
734 /* x^19520 mod p(x), x^19456 mod p(x) */
735 .octa 0x00000000608a00000000000076610000
736
737 /* x^18496 mod p(x), x^18432 mod p(x) */
738 .octa 0x000000009948000000000000fb9f0000
739
740 /* x^17472 mod p(x), x^17408 mod p(x) */
741 .octa 0x00000000173000000000000003770000
742
743 /* x^16448 mod p(x), x^16384 mod p(x) */
744 .octa 0x000000006fe300000000000004880000
745
746 /* x^15424 mod p(x), x^15360 mod p(x) */
747 .octa 0x00000000e15300000000000056a70000
748
749 /* x^14400 mod p(x), x^14336 mod p(x) */
750 .octa 0x0000000092d60000000000009dfd0000
751
752 /* x^13376 mod p(x), x^13312 mod p(x) */
753 .octa 0x0000000002fd00000000000074c80000
754
755 /* x^12352 mod p(x), x^12288 mod p(x) */
756 .octa 0x00000000c78b000000000000a3ec0000
757
758 /* x^11328 mod p(x), x^11264 mod p(x) */
759 .octa 0x000000009262000000000000b3530000
760
761 /* x^10304 mod p(x), x^10240 mod p(x) */
762 .octa 0x0000000084f200000000000047bf0000
763
764 /* x^9280 mod p(x), x^9216 mod p(x) */
765 .octa 0x0000000067ee000000000000e97c0000
766
767 /* x^8256 mod p(x), x^8192 mod p(x) */
768 .octa 0x00000000535b00000000000091e10000
769
770 /* x^7232 mod p(x), x^7168 mod p(x) */
771 .octa 0x000000007ebb00000000000055060000
772
773 /* x^6208 mod p(x), x^6144 mod p(x) */
774 .octa 0x00000000c6a1000000000000fd360000
775
776 /* x^5184 mod p(x), x^5120 mod p(x) */
777 .octa 0x000000001be500000000000055860000
778
779 /* x^4160 mod p(x), x^4096 mod p(x) */
780 .octa 0x00000000ae0e0000000000005bd00000
781
782 /* x^3136 mod p(x), x^3072 mod p(x) */
783 .octa 0x0000000022040000000000008db20000
784
785 /* x^2112 mod p(x), x^2048 mod p(x) */
786 .octa 0x00000000c9eb000000000000efe20000
787
788 /* x^1088 mod p(x), x^1024 mod p(x) */
789 .octa 0x0000000039b400000000000051d10000
790
791.short_constants:
792
793 /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */
794 /* x^2048 mod p(x), x^2016 mod p(x), x^1984 mod p(x), x^1952 mod p(x) */
795 .octa 0xefe20000dccf00009440000033590000
796
797 /* x^1920 mod p(x), x^1888 mod p(x), x^1856 mod p(x), x^1824 mod p(x) */
798 .octa 0xee6300002f3f000062180000e0ed0000
799
800 /* x^1792 mod p(x), x^1760 mod p(x), x^1728 mod p(x), x^1696 mod p(x) */
801 .octa 0xcf5f000017ef0000ccbe000023d30000
802
803 /* x^1664 mod p(x), x^1632 mod p(x), x^1600 mod p(x), x^1568 mod p(x) */
804 .octa 0x6d0c0000a30e00000920000042630000
805
806 /* x^1536 mod p(x), x^1504 mod p(x), x^1472 mod p(x), x^1440 mod p(x) */
807 .octa 0x21d30000932b0000a7a00000efcc0000
808
809 /* x^1408 mod p(x), x^1376 mod p(x), x^1344 mod p(x), x^1312 mod p(x) */
810 .octa 0x10be00000b310000666f00000d1c0000
811
812 /* x^1280 mod p(x), x^1248 mod p(x), x^1216 mod p(x), x^1184 mod p(x) */
813 .octa 0x1f240000ce9e0000caad0000589e0000
814
815 /* x^1152 mod p(x), x^1120 mod p(x), x^1088 mod p(x), x^1056 mod p(x) */
816 .octa 0x29610000d02b000039b400007cf50000
817
818 /* x^1024 mod p(x), x^992 mod p(x), x^960 mod p(x), x^928 mod p(x) */
819 .octa 0x51d100009d9d00003c0e0000bfd60000
820
821 /* x^896 mod p(x), x^864 mod p(x), x^832 mod p(x), x^800 mod p(x) */
822 .octa 0xda390000ceae000013830000713c0000
823
824 /* x^768 mod p(x), x^736 mod p(x), x^704 mod p(x), x^672 mod p(x) */
825 .octa 0xb67800001e16000085c0000080a60000
826
827 /* x^640 mod p(x), x^608 mod p(x), x^576 mod p(x), x^544 mod p(x) */
828 .octa 0x0db40000f7f90000371d0000e6580000
829
830 /* x^512 mod p(x), x^480 mod p(x), x^448 mod p(x), x^416 mod p(x) */
831 .octa 0x87e70000044c0000aadb0000a4970000
832
833 /* x^384 mod p(x), x^352 mod p(x), x^320 mod p(x), x^288 mod p(x) */
834 .octa 0x1f990000ad180000d8b30000e7b50000
835
836 /* x^256 mod p(x), x^224 mod p(x), x^192 mod p(x), x^160 mod p(x) */
837 .octa 0xbe6c00006ee300004c1a000006df0000
838
839 /* x^128 mod p(x), x^96 mod p(x), x^64 mod p(x), x^32 mod p(x) */
840 .octa 0xfb0b00002d560000136800008bb70000
841
842
843.barrett_constants:
844 /* Barrett constant m - (4^32)/n */
845 .octa 0x000000000000000000000001f65a57f8 /* x^64 div p(x) */
846 /* Barrett constant n */
847 .octa 0x0000000000000000000000018bb70000
848
849#define CRC_FUNCTION_NAME __crct10dif_vpmsum
850#include "crc32-vpmsum_core.S"
diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
new file mode 100644
index 000000000000..02ea277863d1
--- /dev/null
+++ b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
@@ -0,0 +1,128 @@
1/*
2 * Calculate a CRC T10-DIF with vpmsum acceleration
3 *
4 * Copyright 2017, Daniel Axtens, IBM Corporation.
5 * [based on crc32c-vpmsum_glue.c]
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 */
12
13#include <linux/crc-t10dif.h>
14#include <crypto/internal/hash.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/kernel.h>
19#include <linux/cpufeature.h>
20#include <asm/switch_to.h>
21
22#define VMX_ALIGN 16
23#define VMX_ALIGN_MASK (VMX_ALIGN-1)
24
25#define VECTOR_BREAKPOINT 64
26
27u32 __crct10dif_vpmsum(u32 crc, unsigned char const *p, size_t len);
28
29static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len)
30{
31 unsigned int prealign;
32 unsigned int tail;
33 u32 crc = crci;
34
35 if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
36 return crc_t10dif_generic(crc, p, len);
37
38 if ((unsigned long)p & VMX_ALIGN_MASK) {
39 prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
40 crc = crc_t10dif_generic(crc, p, prealign);
41 len -= prealign;
42 p += prealign;
43 }
44
45 if (len & ~VMX_ALIGN_MASK) {
46 crc <<= 16;
47 preempt_disable();
48 pagefault_disable();
49 enable_kernel_altivec();
50 crc = __crct10dif_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
51 disable_kernel_altivec();
52 pagefault_enable();
53 preempt_enable();
54 crc >>= 16;
55 }
56
57 tail = len & VMX_ALIGN_MASK;
58 if (tail) {
59 p += len & ~VMX_ALIGN_MASK;
60 crc = crc_t10dif_generic(crc, p, tail);
61 }
62
63 return crc & 0xffff;
64}
65
66static int crct10dif_vpmsum_init(struct shash_desc *desc)
67{
68 u16 *crc = shash_desc_ctx(desc);
69
70 *crc = 0;
71 return 0;
72}
73
74static int crct10dif_vpmsum_update(struct shash_desc *desc, const u8 *data,
75 unsigned int length)
76{
77 u16 *crc = shash_desc_ctx(desc);
78
79 *crc = crct10dif_vpmsum(*crc, data, length);
80
81 return 0;
82}
83
84
85static int crct10dif_vpmsum_final(struct shash_desc *desc, u8 *out)
86{
87 u16 *crcp = shash_desc_ctx(desc);
88
89 *(u16 *)out = *crcp;
90 return 0;
91}
92
93static struct shash_alg alg = {
94 .init = crct10dif_vpmsum_init,
95 .update = crct10dif_vpmsum_update,
96 .final = crct10dif_vpmsum_final,
97 .descsize = CRC_T10DIF_DIGEST_SIZE,
98 .digestsize = CRC_T10DIF_DIGEST_SIZE,
99 .base = {
100 .cra_name = "crct10dif",
101 .cra_driver_name = "crct10dif-vpmsum",
102 .cra_priority = 200,
103 .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
104 .cra_module = THIS_MODULE,
105 }
106};
107
108static int __init crct10dif_vpmsum_mod_init(void)
109{
110 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
111 return -ENODEV;
112
113 return crypto_register_shash(&alg);
114}
115
116static void __exit crct10dif_vpmsum_mod_fini(void)
117{
118 crypto_unregister_shash(&alg);
119}
120
121module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crct10dif_vpmsum_mod_init);
122module_exit(crct10dif_vpmsum_mod_fini);
123
124MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>");
125MODULE_DESCRIPTION("CRCT10DIF using vector polynomial multiply-sum instructions");
126MODULE_LICENSE("GPL");
127MODULE_ALIAS_CRYPTO("crct10dif");
128MODULE_ALIAS_CRYPTO("crct10dif-vpmsum");
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index a916c4a61165..5f6a5af9c489 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -65,7 +65,6 @@
65#include <linux/linkage.h> 65#include <linux/linkage.h>
66#include <asm/inst.h> 66#include <asm/inst.h>
67 67
68#define CONCAT(a,b) a##b
69#define VMOVDQ vmovdqu 68#define VMOVDQ vmovdqu
70 69
71#define xdata0 %xmm0 70#define xdata0 %xmm0
@@ -92,8 +91,6 @@
92#define num_bytes %r8 91#define num_bytes %r8
93 92
94#define tmp %r10 93#define tmp %r10
95#define DDQ(i) CONCAT(ddq_add_,i)
96#define XMM(i) CONCAT(%xmm, i)
97#define DDQ_DATA 0 94#define DDQ_DATA 0
98#define XDATA 1 95#define XDATA 1
99#define KEY_128 1 96#define KEY_128 1
@@ -131,12 +128,12 @@ ddq_add_8:
131/* generate a unique variable for ddq_add_x */ 128/* generate a unique variable for ddq_add_x */
132 129
133.macro setddq n 130.macro setddq n
134 var_ddq_add = DDQ(\n) 131 var_ddq_add = ddq_add_\n
135.endm 132.endm
136 133
137/* generate a unique variable for xmm register */ 134/* generate a unique variable for xmm register */
138.macro setxdata n 135.macro setxdata n
139 var_xdata = XMM(\n) 136 var_xdata = %xmm\n
140.endm 137.endm
141 138
142/* club the numeric 'id' to the symbol 'name' */ 139/* club the numeric 'id' to the symbol 'name' */
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index aa76cad9d262..af4840ab2a3d 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -1522,7 +1522,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1522 struct scatterlist *src, unsigned int nbytes) 1522 struct scatterlist *src, unsigned int nbytes)
1523{ 1523{
1524 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 1524 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
1525 be128 buf[2 * 4]; 1525 le128 buf[2 * 4];
1526 struct xts_crypt_req req = { 1526 struct xts_crypt_req req = {
1527 .tbuf = buf, 1527 .tbuf = buf,
1528 .tbuflen = sizeof(buf), 1528 .tbuflen = sizeof(buf),
@@ -1540,7 +1540,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1540 struct scatterlist *src, unsigned int nbytes) 1540 struct scatterlist *src, unsigned int nbytes)
1541{ 1541{
1542 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 1542 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
1543 be128 buf[2 * 4]; 1543 le128 buf[2 * 4];
1544 struct xts_crypt_req req = { 1544 struct xts_crypt_req req = {
1545 .tbuf = buf, 1545 .tbuf = buf,
1546 .tbuflen = sizeof(buf), 1546 .tbuflen = sizeof(buf),
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index 260a060d7275..24ac9fad832d 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -27,6 +27,7 @@
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <crypto/b128ops.h> 29#include <crypto/b128ops.h>
30#include <crypto/gf128mul.h>
30#include <crypto/internal/skcipher.h> 31#include <crypto/internal/skcipher.h>
31#include <crypto/lrw.h> 32#include <crypto/lrw.h>
32#include <crypto/xts.h> 33#include <crypto/xts.h>
@@ -457,7 +458,7 @@ void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
457 le128 ivblk = *iv; 458 le128 ivblk = *iv;
458 459
459 /* generate next IV */ 460 /* generate next IV */
460 le128_gf128mul_x_ble(iv, &ivblk); 461 gf128mul_x_ble(iv, &ivblk);
461 462
462 /* CC <- T xor C */ 463 /* CC <- T xor C */
463 u128_xor(dst, src, (u128 *)&ivblk); 464 u128_xor(dst, src, (u128 *)&ivblk);
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 644f97ab8cac..ac0e831943f5 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -328,7 +328,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
328 struct scatterlist *src, unsigned int nbytes) 328 struct scatterlist *src, unsigned int nbytes)
329{ 329{
330 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 330 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
331 be128 buf[SERPENT_PARALLEL_BLOCKS]; 331 le128 buf[SERPENT_PARALLEL_BLOCKS];
332 struct crypt_priv crypt_ctx = { 332 struct crypt_priv crypt_ctx = {
333 .ctx = &ctx->crypt_ctx, 333 .ctx = &ctx->crypt_ctx,
334 .fpu_enabled = false, 334 .fpu_enabled = false,
@@ -355,7 +355,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
355 struct scatterlist *src, unsigned int nbytes) 355 struct scatterlist *src, unsigned int nbytes)
356{ 356{
357 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 357 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
358 be128 buf[SERPENT_PARALLEL_BLOCKS]; 358 le128 buf[SERPENT_PARALLEL_BLOCKS];
359 struct crypt_priv crypt_ctx = { 359 struct crypt_priv crypt_ctx = {
360 .ctx = &ctx->crypt_ctx, 360 .ctx = &ctx->crypt_ctx,
361 .fpu_enabled = false, 361 .fpu_enabled = false,
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 2ebb5e9789f3..243e90a4b5d9 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -296,7 +296,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
296 struct scatterlist *src, unsigned int nbytes) 296 struct scatterlist *src, unsigned int nbytes)
297{ 297{
298 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 298 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
299 be128 buf[3]; 299 le128 buf[3];
300 struct xts_crypt_req req = { 300 struct xts_crypt_req req = {
301 .tbuf = buf, 301 .tbuf = buf,
302 .tbuflen = sizeof(buf), 302 .tbuflen = sizeof(buf),
@@ -314,7 +314,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
314 struct scatterlist *src, unsigned int nbytes) 314 struct scatterlist *src, unsigned int nbytes)
315{ 315{
316 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 316 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
317 be128 buf[3]; 317 le128 buf[3];
318 struct xts_crypt_req req = { 318 struct xts_crypt_req req = {
319 .tbuf = buf, 319 .tbuf = buf,
320 .tbuflen = sizeof(buf), 320 .tbuflen = sizeof(buf),
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
index 29e53ea7d764..ed8b66de541f 100644
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ b/arch/x86/include/asm/crypto/glue_helper.h
@@ -125,16 +125,6 @@ static inline void le128_inc(le128 *i)
125 i->b = cpu_to_le64(b); 125 i->b = cpu_to_le64(b);
126} 126}
127 127
128static inline void le128_gf128mul_x_ble(le128 *dst, const le128 *src)
129{
130 u64 a = le64_to_cpu(src->a);
131 u64 b = le64_to_cpu(src->b);
132 u64 _tt = ((s64)a >> 63) & 0x87;
133
134 dst->a = cpu_to_le64((a << 1) ^ (b >> 63));
135 dst->b = cpu_to_le64((b << 1) ^ _tt);
136}
137
138extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, 128extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
139 struct blkcipher_desc *desc, 129 struct blkcipher_desc *desc,
140 struct scatterlist *dst, 130 struct scatterlist *dst,
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f37e9cca50e1..aac4bc90a138 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -374,7 +374,6 @@ config CRYPTO_XTS
374 tristate "XTS support" 374 tristate "XTS support"
375 select CRYPTO_BLKCIPHER 375 select CRYPTO_BLKCIPHER
376 select CRYPTO_MANAGER 376 select CRYPTO_MANAGER
377 select CRYPTO_GF128MUL
378 select CRYPTO_ECB 377 select CRYPTO_ECB
379 help 378 help
380 XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain, 379 XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
@@ -513,6 +512,23 @@ config CRYPTO_CRCT10DIF_PCLMUL
513 'crct10dif-plcmul' module, which is faster when computing the 512 'crct10dif-plcmul' module, which is faster when computing the
514 crct10dif checksum as compared with the generic table implementation. 513 crct10dif checksum as compared with the generic table implementation.
515 514
515config CRYPTO_CRCT10DIF_VPMSUM
516 tristate "CRC32T10DIF powerpc64 hardware acceleration"
517 depends on PPC64 && ALTIVEC && CRC_T10DIF
518 select CRYPTO_HASH
519 help
520 CRC10T10DIF algorithm implemented using vector polynomial
521 multiply-sum (vpmsum) instructions, introduced in POWER8. Enable on
522 POWER8 and newer processors for improved performance.
523
524config CRYPTO_VPMSUM_TESTER
525 tristate "Powerpc64 vpmsum hardware acceleration tester"
526 depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM
527 help
528 Stress test for CRC32c and CRC-T10DIF algorithms implemented with
529 POWER8 vpmsum instructions.
530 Unless you are testing these algorithms, you don't need this.
531
516config CRYPTO_GHASH 532config CRYPTO_GHASH
517 tristate "GHASH digest algorithm" 533 tristate "GHASH digest algorithm"
518 select CRYPTO_GF128MUL 534 select CRYPTO_GF128MUL
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 47d11627cd20..1544b7c057fb 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -166,5 +166,34 @@ int crypto_unregister_acomp(struct acomp_alg *alg)
166} 166}
167EXPORT_SYMBOL_GPL(crypto_unregister_acomp); 167EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
168 168
169int crypto_register_acomps(struct acomp_alg *algs, int count)
170{
171 int i, ret;
172
173 for (i = 0; i < count; i++) {
174 ret = crypto_register_acomp(&algs[i]);
175 if (ret)
176 goto err;
177 }
178
179 return 0;
180
181err:
182 for (--i; i >= 0; --i)
183 crypto_unregister_acomp(&algs[i]);
184
185 return ret;
186}
187EXPORT_SYMBOL_GPL(crypto_register_acomps);
188
189void crypto_unregister_acomps(struct acomp_alg *algs, int count)
190{
191 int i;
192
193 for (i = count - 1; i >= 0; --i)
194 crypto_unregister_acomp(&algs[i]);
195}
196EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
197
169MODULE_LICENSE("GPL"); 198MODULE_LICENSE("GPL");
170MODULE_DESCRIPTION("Asynchronous compression type"); 199MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 690deca17c35..3556d8eb54a7 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -160,11 +160,11 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
160 if (sock->state == SS_CONNECTED) 160 if (sock->state == SS_CONNECTED)
161 return -EINVAL; 161 return -EINVAL;
162 162
163 if (addr_len != sizeof(*sa)) 163 if (addr_len < sizeof(*sa))
164 return -EINVAL; 164 return -EINVAL;
165 165
166 sa->salg_type[sizeof(sa->salg_type) - 1] = 0; 166 sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
167 sa->salg_name[sizeof(sa->salg_name) - 1] = 0; 167 sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
168 168
169 type = alg_get_type(sa->salg_type); 169 type = alg_get_type(sa->salg_type);
170 if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) { 170 if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 6b52e8f0b95f..9eed4ef9c971 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -963,11 +963,11 @@ void crypto_inc(u8 *a, unsigned int size)
963 u32 c; 963 u32 c;
964 964
965 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || 965 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
966 !((unsigned long)b & (__alignof__(*b) - 1))) 966 IS_ALIGNED((unsigned long)b, __alignof__(*b)))
967 for (; size >= 4; size -= 4) { 967 for (; size >= 4; size -= 4) {
968 c = be32_to_cpu(*--b) + 1; 968 c = be32_to_cpu(*--b) + 1;
969 *b = cpu_to_be32(c); 969 *b = cpu_to_be32(c);
970 if (c) 970 if (likely(c))
971 return; 971 return;
972 } 972 }
973 973
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index ef59d9926ee9..8af664f7d27c 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -45,6 +45,11 @@ struct aead_async_req {
45 char iv[]; 45 char iv[];
46}; 46};
47 47
48struct aead_tfm {
49 struct crypto_aead *aead;
50 bool has_key;
51};
52
48struct aead_ctx { 53struct aead_ctx {
49 struct aead_sg_list tsgl; 54 struct aead_sg_list tsgl;
50 struct aead_async_rsgl first_rsgl; 55 struct aead_async_rsgl first_rsgl;
@@ -723,24 +728,146 @@ static struct proto_ops algif_aead_ops = {
723 .poll = aead_poll, 728 .poll = aead_poll,
724}; 729};
725 730
731static int aead_check_key(struct socket *sock)
732{
733 int err = 0;
734 struct sock *psk;
735 struct alg_sock *pask;
736 struct aead_tfm *tfm;
737 struct sock *sk = sock->sk;
738 struct alg_sock *ask = alg_sk(sk);
739
740 lock_sock(sk);
741 if (ask->refcnt)
742 goto unlock_child;
743
744 psk = ask->parent;
745 pask = alg_sk(ask->parent);
746 tfm = pask->private;
747
748 err = -ENOKEY;
749 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
750 if (!tfm->has_key)
751 goto unlock;
752
753 if (!pask->refcnt++)
754 sock_hold(psk);
755
756 ask->refcnt = 1;
757 sock_put(psk);
758
759 err = 0;
760
761unlock:
762 release_sock(psk);
763unlock_child:
764 release_sock(sk);
765
766 return err;
767}
768
769static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
770 size_t size)
771{
772 int err;
773
774 err = aead_check_key(sock);
775 if (err)
776 return err;
777
778 return aead_sendmsg(sock, msg, size);
779}
780
781static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
782 int offset, size_t size, int flags)
783{
784 int err;
785
786 err = aead_check_key(sock);
787 if (err)
788 return err;
789
790 return aead_sendpage(sock, page, offset, size, flags);
791}
792
793static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
794 size_t ignored, int flags)
795{
796 int err;
797
798 err = aead_check_key(sock);
799 if (err)
800 return err;
801
802 return aead_recvmsg(sock, msg, ignored, flags);
803}
804
805static struct proto_ops algif_aead_ops_nokey = {
806 .family = PF_ALG,
807
808 .connect = sock_no_connect,
809 .socketpair = sock_no_socketpair,
810 .getname = sock_no_getname,
811 .ioctl = sock_no_ioctl,
812 .listen = sock_no_listen,
813 .shutdown = sock_no_shutdown,
814 .getsockopt = sock_no_getsockopt,
815 .mmap = sock_no_mmap,
816 .bind = sock_no_bind,
817 .accept = sock_no_accept,
818 .setsockopt = sock_no_setsockopt,
819
820 .release = af_alg_release,
821 .sendmsg = aead_sendmsg_nokey,
822 .sendpage = aead_sendpage_nokey,
823 .recvmsg = aead_recvmsg_nokey,
824 .poll = aead_poll,
825};
826
726static void *aead_bind(const char *name, u32 type, u32 mask) 827static void *aead_bind(const char *name, u32 type, u32 mask)
727{ 828{
728 return crypto_alloc_aead(name, type, mask); 829 struct aead_tfm *tfm;
830 struct crypto_aead *aead;
831
832 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
833 if (!tfm)
834 return ERR_PTR(-ENOMEM);
835
836 aead = crypto_alloc_aead(name, type, mask);
837 if (IS_ERR(aead)) {
838 kfree(tfm);
839 return ERR_CAST(aead);
840 }
841
842 tfm->aead = aead;
843
844 return tfm;
729} 845}
730 846
731static void aead_release(void *private) 847static void aead_release(void *private)
732{ 848{
733 crypto_free_aead(private); 849 struct aead_tfm *tfm = private;
850
851 crypto_free_aead(tfm->aead);
852 kfree(tfm);
734} 853}
735 854
736static int aead_setauthsize(void *private, unsigned int authsize) 855static int aead_setauthsize(void *private, unsigned int authsize)
737{ 856{
738 return crypto_aead_setauthsize(private, authsize); 857 struct aead_tfm *tfm = private;
858
859 return crypto_aead_setauthsize(tfm->aead, authsize);
739} 860}
740 861
741static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 862static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
742{ 863{
743 return crypto_aead_setkey(private, key, keylen); 864 struct aead_tfm *tfm = private;
865 int err;
866
867 err = crypto_aead_setkey(tfm->aead, key, keylen);
868 tfm->has_key = !err;
869
870 return err;
744} 871}
745 872
746static void aead_sock_destruct(struct sock *sk) 873static void aead_sock_destruct(struct sock *sk)
@@ -757,12 +884,14 @@ static void aead_sock_destruct(struct sock *sk)
757 af_alg_release_parent(sk); 884 af_alg_release_parent(sk);
758} 885}
759 886
760static int aead_accept_parent(void *private, struct sock *sk) 887static int aead_accept_parent_nokey(void *private, struct sock *sk)
761{ 888{
762 struct aead_ctx *ctx; 889 struct aead_ctx *ctx;
763 struct alg_sock *ask = alg_sk(sk); 890 struct alg_sock *ask = alg_sk(sk);
764 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private); 891 struct aead_tfm *tfm = private;
765 unsigned int ivlen = crypto_aead_ivsize(private); 892 struct crypto_aead *aead = tfm->aead;
893 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
894 unsigned int ivlen = crypto_aead_ivsize(aead);
766 895
767 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 896 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
768 if (!ctx) 897 if (!ctx)
@@ -789,7 +918,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
789 918
790 ask->private = ctx; 919 ask->private = ctx;
791 920
792 aead_request_set_tfm(&ctx->aead_req, private); 921 aead_request_set_tfm(&ctx->aead_req, aead);
793 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 922 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
794 af_alg_complete, &ctx->completion); 923 af_alg_complete, &ctx->completion);
795 924
@@ -798,13 +927,25 @@ static int aead_accept_parent(void *private, struct sock *sk)
798 return 0; 927 return 0;
799} 928}
800 929
930static int aead_accept_parent(void *private, struct sock *sk)
931{
932 struct aead_tfm *tfm = private;
933
934 if (!tfm->has_key)
935 return -ENOKEY;
936
937 return aead_accept_parent_nokey(private, sk);
938}
939
801static const struct af_alg_type algif_type_aead = { 940static const struct af_alg_type algif_type_aead = {
802 .bind = aead_bind, 941 .bind = aead_bind,
803 .release = aead_release, 942 .release = aead_release,
804 .setkey = aead_setkey, 943 .setkey = aead_setkey,
805 .setauthsize = aead_setauthsize, 944 .setauthsize = aead_setauthsize,
806 .accept = aead_accept_parent, 945 .accept = aead_accept_parent,
946 .accept_nokey = aead_accept_parent_nokey,
807 .ops = &algif_aead_ops, 947 .ops = &algif_aead_ops,
948 .ops_nokey = &algif_aead_ops_nokey,
808 .name = "aead", 949 .name = "aead",
809 .owner = THIS_MODULE 950 .owner = THIS_MODULE
810}; 951};
diff --git a/crypto/cbc.c b/crypto/cbc.c
index bc160a3186dc..b761b1f9c6ca 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -10,6 +10,7 @@
10 * 10 *
11 */ 11 */
12 12
13#include <crypto/algapi.h>
13#include <crypto/cbc.h> 14#include <crypto/cbc.h>
14#include <crypto/internal/skcipher.h> 15#include <crypto/internal/skcipher.h>
15#include <linux/err.h> 16#include <linux/err.h>
@@ -108,8 +109,10 @@ static void crypto_cbc_free(struct skcipher_instance *inst)
108static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) 109static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
109{ 110{
110 struct skcipher_instance *inst; 111 struct skcipher_instance *inst;
112 struct crypto_attr_type *algt;
111 struct crypto_spawn *spawn; 113 struct crypto_spawn *spawn;
112 struct crypto_alg *alg; 114 struct crypto_alg *alg;
115 u32 mask;
113 int err; 116 int err;
114 117
115 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); 118 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
@@ -120,8 +123,16 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
120 if (!inst) 123 if (!inst)
121 return -ENOMEM; 124 return -ENOMEM;
122 125
123 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, 126 algt = crypto_get_attr_type(tb);
124 CRYPTO_ALG_TYPE_MASK); 127 err = PTR_ERR(algt);
128 if (IS_ERR(algt))
129 goto err_free_inst;
130
131 mask = CRYPTO_ALG_TYPE_MASK |
132 crypto_requires_off(algt->type, algt->mask,
133 CRYPTO_ALG_NEED_FALLBACK);
134
135 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
125 err = PTR_ERR(alg); 136 err = PTR_ERR(alg);
126 if (IS_ERR(alg)) 137 if (IS_ERR(alg))
127 goto err_free_inst; 138 goto err_free_inst;
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index a90404a0c5ff..89acaab1d909 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -83,7 +83,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
83{ 83{
84 struct crypto_report_cipher rcipher; 84 struct crypto_report_cipher rcipher;
85 85
86 strncpy(rcipher.type, "cipher", sizeof(rcipher.type)); 86 strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
87 87
88 rcipher.blocksize = alg->cra_blocksize; 88 rcipher.blocksize = alg->cra_blocksize;
89 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; 89 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
@@ -102,7 +102,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
102{ 102{
103 struct crypto_report_comp rcomp; 103 struct crypto_report_comp rcomp;
104 104
105 strncpy(rcomp.type, "compression", sizeof(rcomp.type)); 105 strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
106 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, 106 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
107 sizeof(struct crypto_report_comp), &rcomp)) 107 sizeof(struct crypto_report_comp), &rcomp))
108 goto nla_put_failure; 108 goto nla_put_failure;
@@ -116,7 +116,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
116{ 116{
117 struct crypto_report_acomp racomp; 117 struct crypto_report_acomp racomp;
118 118
119 strncpy(racomp.type, "acomp", sizeof(racomp.type)); 119 strlcpy(racomp.type, "acomp", sizeof(racomp.type));
120 120
121 if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, 121 if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
122 sizeof(struct crypto_report_acomp), &racomp)) 122 sizeof(struct crypto_report_acomp), &racomp))
@@ -131,7 +131,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
131{ 131{
132 struct crypto_report_akcipher rakcipher; 132 struct crypto_report_akcipher rakcipher;
133 133
134 strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); 134 strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
135 135
136 if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, 136 if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
137 sizeof(struct crypto_report_akcipher), &rakcipher)) 137 sizeof(struct crypto_report_akcipher), &rakcipher))
@@ -146,7 +146,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
146{ 146{
147 struct crypto_report_kpp rkpp; 147 struct crypto_report_kpp rkpp;
148 148
149 strncpy(rkpp.type, "kpp", sizeof(rkpp.type)); 149 strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
150 150
151 if (nla_put(skb, CRYPTOCFGA_REPORT_KPP, 151 if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
152 sizeof(struct crypto_report_kpp), &rkpp)) 152 sizeof(struct crypto_report_kpp), &rkpp))
@@ -160,10 +160,10 @@ nla_put_failure:
160static int crypto_report_one(struct crypto_alg *alg, 160static int crypto_report_one(struct crypto_alg *alg,
161 struct crypto_user_alg *ualg, struct sk_buff *skb) 161 struct crypto_user_alg *ualg, struct sk_buff *skb)
162{ 162{
163 strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); 163 strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
164 strncpy(ualg->cru_driver_name, alg->cra_driver_name, 164 strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
165 sizeof(ualg->cru_driver_name)); 165 sizeof(ualg->cru_driver_name));
166 strncpy(ualg->cru_module_name, module_name(alg->cra_module), 166 strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
167 sizeof(ualg->cru_module_name)); 167 sizeof(ualg->cru_module_name));
168 168
169 ualg->cru_type = 0; 169 ualg->cru_type = 0;
@@ -176,7 +176,7 @@ static int crypto_report_one(struct crypto_alg *alg,
176 if (alg->cra_flags & CRYPTO_ALG_LARVAL) { 176 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
177 struct crypto_report_larval rl; 177 struct crypto_report_larval rl;
178 178
179 strncpy(rl.type, "larval", sizeof(rl.type)); 179 strlcpy(rl.type, "larval", sizeof(rl.type));
180 if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, 180 if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
181 sizeof(struct crypto_report_larval), &rl)) 181 sizeof(struct crypto_report_larval), &rl))
182 goto nla_put_failure; 182 goto nla_put_failure;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index a4f4a8983169..477d9226ccaa 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -181,15 +181,24 @@ static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm)
181static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) 181static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
182{ 182{
183 struct crypto_instance *inst; 183 struct crypto_instance *inst;
184 struct crypto_attr_type *algt;
184 struct crypto_alg *alg; 185 struct crypto_alg *alg;
186 u32 mask;
185 int err; 187 int err;
186 188
187 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); 189 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
188 if (err) 190 if (err)
189 return ERR_PTR(err); 191 return ERR_PTR(err);
190 192
191 alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, 193 algt = crypto_get_attr_type(tb);
192 CRYPTO_ALG_TYPE_MASK); 194 if (IS_ERR(algt))
195 return ERR_CAST(algt);
196
197 mask = CRYPTO_ALG_TYPE_MASK |
198 crypto_requires_off(algt->type, algt->mask,
199 CRYPTO_ALG_NEED_FALLBACK);
200
201 alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, mask);
193 if (IS_ERR(alg)) 202 if (IS_ERR(alg))
194 return ERR_CAST(alg); 203 return ERR_CAST(alg);
195 204
@@ -350,6 +359,8 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
350 struct skcipher_alg *alg; 359 struct skcipher_alg *alg;
351 struct crypto_skcipher_spawn *spawn; 360 struct crypto_skcipher_spawn *spawn;
352 const char *cipher_name; 361 const char *cipher_name;
362 u32 mask;
363
353 int err; 364 int err;
354 365
355 algt = crypto_get_attr_type(tb); 366 algt = crypto_get_attr_type(tb);
@@ -367,12 +378,14 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
367 if (!inst) 378 if (!inst)
368 return -ENOMEM; 379 return -ENOMEM;
369 380
381 mask = crypto_requires_sync(algt->type, algt->mask) |
382 crypto_requires_off(algt->type, algt->mask,
383 CRYPTO_ALG_NEED_FALLBACK);
384
370 spawn = skcipher_instance_ctx(inst); 385 spawn = skcipher_instance_ctx(inst);
371 386
372 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); 387 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
373 err = crypto_grab_skcipher(spawn, cipher_name, 0, 388 err = crypto_grab_skcipher(spawn, cipher_name, 0, mask);
374 crypto_requires_sync(algt->type,
375 algt->mask));
376 if (err) 389 if (err)
377 goto err_free_inst; 390 goto err_free_inst;
378 391
diff --git a/crypto/deflate.c b/crypto/deflate.c
index f942cb391890..94ec3b36a8e8 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -43,20 +43,24 @@ struct deflate_ctx {
43 struct z_stream_s decomp_stream; 43 struct z_stream_s decomp_stream;
44}; 44};
45 45
46static int deflate_comp_init(struct deflate_ctx *ctx) 46static int deflate_comp_init(struct deflate_ctx *ctx, int format)
47{ 47{
48 int ret = 0; 48 int ret = 0;
49 struct z_stream_s *stream = &ctx->comp_stream; 49 struct z_stream_s *stream = &ctx->comp_stream;
50 50
51 stream->workspace = vzalloc(zlib_deflate_workspacesize( 51 stream->workspace = vzalloc(zlib_deflate_workspacesize(
52 -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL)); 52 MAX_WBITS, MAX_MEM_LEVEL));
53 if (!stream->workspace) { 53 if (!stream->workspace) {
54 ret = -ENOMEM; 54 ret = -ENOMEM;
55 goto out; 55 goto out;
56 } 56 }
57 ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, 57 if (format)
58 -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, 58 ret = zlib_deflateInit(stream, 3);
59 Z_DEFAULT_STRATEGY); 59 else
60 ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
61 -DEFLATE_DEF_WINBITS,
62 DEFLATE_DEF_MEMLEVEL,
63 Z_DEFAULT_STRATEGY);
60 if (ret != Z_OK) { 64 if (ret != Z_OK) {
61 ret = -EINVAL; 65 ret = -EINVAL;
62 goto out_free; 66 goto out_free;
@@ -68,7 +72,7 @@ out_free:
68 goto out; 72 goto out;
69} 73}
70 74
71static int deflate_decomp_init(struct deflate_ctx *ctx) 75static int deflate_decomp_init(struct deflate_ctx *ctx, int format)
72{ 76{
73 int ret = 0; 77 int ret = 0;
74 struct z_stream_s *stream = &ctx->decomp_stream; 78 struct z_stream_s *stream = &ctx->decomp_stream;
@@ -78,7 +82,10 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
78 ret = -ENOMEM; 82 ret = -ENOMEM;
79 goto out; 83 goto out;
80 } 84 }
81 ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS); 85 if (format)
86 ret = zlib_inflateInit(stream);
87 else
88 ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
82 if (ret != Z_OK) { 89 if (ret != Z_OK) {
83 ret = -EINVAL; 90 ret = -EINVAL;
84 goto out_free; 91 goto out_free;
@@ -102,21 +109,21 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx)
102 vfree(ctx->decomp_stream.workspace); 109 vfree(ctx->decomp_stream.workspace);
103} 110}
104 111
105static int __deflate_init(void *ctx) 112static int __deflate_init(void *ctx, int format)
106{ 113{
107 int ret; 114 int ret;
108 115
109 ret = deflate_comp_init(ctx); 116 ret = deflate_comp_init(ctx, format);
110 if (ret) 117 if (ret)
111 goto out; 118 goto out;
112 ret = deflate_decomp_init(ctx); 119 ret = deflate_decomp_init(ctx, format);
113 if (ret) 120 if (ret)
114 deflate_comp_exit(ctx); 121 deflate_comp_exit(ctx);
115out: 122out:
116 return ret; 123 return ret;
117} 124}
118 125
119static void *deflate_alloc_ctx(struct crypto_scomp *tfm) 126static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format)
120{ 127{
121 struct deflate_ctx *ctx; 128 struct deflate_ctx *ctx;
122 int ret; 129 int ret;
@@ -125,7 +132,7 @@ static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
125 if (!ctx) 132 if (!ctx)
126 return ERR_PTR(-ENOMEM); 133 return ERR_PTR(-ENOMEM);
127 134
128 ret = __deflate_init(ctx); 135 ret = __deflate_init(ctx, format);
129 if (ret) { 136 if (ret) {
130 kfree(ctx); 137 kfree(ctx);
131 return ERR_PTR(ret); 138 return ERR_PTR(ret);
@@ -134,11 +141,21 @@ static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
134 return ctx; 141 return ctx;
135} 142}
136 143
144static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
145{
146 return gen_deflate_alloc_ctx(tfm, 0);
147}
148
149static void *zlib_deflate_alloc_ctx(struct crypto_scomp *tfm)
150{
151 return gen_deflate_alloc_ctx(tfm, 1);
152}
153
137static int deflate_init(struct crypto_tfm *tfm) 154static int deflate_init(struct crypto_tfm *tfm)
138{ 155{
139 struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); 156 struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
140 157
141 return __deflate_init(ctx); 158 return __deflate_init(ctx, 0);
142} 159}
143 160
144static void __deflate_exit(void *ctx) 161static void __deflate_exit(void *ctx)
@@ -272,7 +289,7 @@ static struct crypto_alg alg = {
272 .coa_decompress = deflate_decompress } } 289 .coa_decompress = deflate_decompress } }
273}; 290};
274 291
275static struct scomp_alg scomp = { 292static struct scomp_alg scomp[] = { {
276 .alloc_ctx = deflate_alloc_ctx, 293 .alloc_ctx = deflate_alloc_ctx,
277 .free_ctx = deflate_free_ctx, 294 .free_ctx = deflate_free_ctx,
278 .compress = deflate_scompress, 295 .compress = deflate_scompress,
@@ -282,7 +299,17 @@ static struct scomp_alg scomp = {
282 .cra_driver_name = "deflate-scomp", 299 .cra_driver_name = "deflate-scomp",
283 .cra_module = THIS_MODULE, 300 .cra_module = THIS_MODULE,
284 } 301 }
285}; 302}, {
303 .alloc_ctx = zlib_deflate_alloc_ctx,
304 .free_ctx = deflate_free_ctx,
305 .compress = deflate_scompress,
306 .decompress = deflate_sdecompress,
307 .base = {
308 .cra_name = "zlib-deflate",
309 .cra_driver_name = "zlib-deflate-scomp",
310 .cra_module = THIS_MODULE,
311 }
312} };
286 313
287static int __init deflate_mod_init(void) 314static int __init deflate_mod_init(void)
288{ 315{
@@ -292,7 +319,7 @@ static int __init deflate_mod_init(void)
292 if (ret) 319 if (ret)
293 return ret; 320 return ret;
294 321
295 ret = crypto_register_scomp(&scomp); 322 ret = crypto_register_scomps(scomp, ARRAY_SIZE(scomp));
296 if (ret) { 323 if (ret) {
297 crypto_unregister_alg(&alg); 324 crypto_unregister_alg(&alg);
298 return ret; 325 return ret;
@@ -304,7 +331,7 @@ static int __init deflate_mod_init(void)
304static void __exit deflate_mod_fini(void) 331static void __exit deflate_mod_fini(void)
305{ 332{
306 crypto_unregister_alg(&alg); 333 crypto_unregister_alg(&alg);
307 crypto_unregister_scomp(&scomp); 334 crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp));
308} 335}
309 336
310module_init(deflate_mod_init); 337module_init(deflate_mod_init);
diff --git a/crypto/dh.c b/crypto/dh.c
index ddcb528ab2cc..87e3542cf1b8 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -79,7 +79,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
79 return 0; 79 return 0;
80} 80}
81 81
82static int dh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len) 82static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
83 unsigned int len)
83{ 84{
84 struct dh_ctx *ctx = dh_get_ctx(tfm); 85 struct dh_ctx *ctx = dh_get_ctx(tfm);
85 struct dh params; 86 struct dh params;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 8a4d98b4adba..fa749f470135 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1749,17 +1749,16 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
1749 u8 *inbuf, u32 inlen, 1749 u8 *inbuf, u32 inlen,
1750 u8 *outbuf, u32 outlen) 1750 u8 *outbuf, u32 outlen)
1751{ 1751{
1752 struct scatterlist sg_in; 1752 struct scatterlist sg_in, sg_out;
1753 int ret; 1753 int ret;
1754 1754
1755 sg_init_one(&sg_in, inbuf, inlen); 1755 sg_init_one(&sg_in, inbuf, inlen);
1756 sg_init_one(&sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
1756 1757
1757 while (outlen) { 1758 while (outlen) {
1758 u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN); 1759 u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
1759 struct scatterlist sg_out;
1760 1760
1761 /* Output buffer may not be valid for SGL, use scratchpad */ 1761 /* Output buffer may not be valid for SGL, use scratchpad */
1762 sg_init_one(&sg_out, drbg->outscratchpad, cryptlen);
1763 skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, 1762 skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
1764 cryptlen, drbg->V); 1763 cryptlen, drbg->V);
1765 ret = crypto_skcipher_encrypt(drbg->ctr_req); 1764 ret = crypto_skcipher_encrypt(drbg->ctr_req);
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 3de289806d67..63ca33771e4e 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -38,7 +38,8 @@ static unsigned int ecdh_supported_curve(unsigned int curve_id)
38 } 38 }
39} 39}
40 40
41static int ecdh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len) 41static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
42 unsigned int len)
42{ 43{
43 struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); 44 struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
44 struct ecdh params; 45 struct ecdh params;
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index 72015fee533d..dc012129c063 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -44,7 +44,7 @@
44 --------------------------------------------------------------------------- 44 ---------------------------------------------------------------------------
45 Issue 31/01/2006 45 Issue 31/01/2006
46 46
47 This file provides fast multiplication in GF(128) as required by several 47 This file provides fast multiplication in GF(2^128) as required by several
48 cryptographic authentication modes 48 cryptographic authentication modes
49*/ 49*/
50 50
@@ -88,76 +88,59 @@
88 q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \ 88 q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
89} 89}
90 90
91/* Given the value i in 0..255 as the byte overflow when a field element 91/*
92 in GHASH is multiplied by x^8, this function will return the values that 92 * Given a value i in 0..255 as the byte overflow when a field element
93 are generated in the lo 16-bit word of the field value by applying the 93 * in GF(2^128) is multiplied by x^8, the following macro returns the
94 modular polynomial. The values lo_byte and hi_byte are returned via the 94 * 16-bit value that must be XOR-ed into the low-degree end of the
95 macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into 95 * product to reduce it modulo the polynomial x^128 + x^7 + x^2 + x + 1.
96 memory as required by a suitable definition of this macro operating on 96 *
97 the table above 97 * There are two versions of the macro, and hence two tables: one for
98*/ 98 * the "be" convention where the highest-order bit is the coefficient of
99 99 * the highest-degree polynomial term, and one for the "le" convention
100#define xx(p, q) 0x##p##q 100 * where the highest-order bit is the coefficient of the lowest-degree
101 * polynomial term. In both cases the values are stored in CPU byte
102 * endianness such that the coefficients are ordered consistently across
103 * bytes, i.e. in the "be" table bits 15..0 of the stored value
104 * correspond to the coefficients of x^15..x^0, and in the "le" table
105 * bits 15..0 correspond to the coefficients of x^0..x^15.
106 *
107 * Therefore, provided that the appropriate byte endianness conversions
108 * are done by the multiplication functions (and these must be in place
109 * anyway to support both little endian and big endian CPUs), the "be"
110 * table can be used for multiplications of both "bbe" and "ble"
111 * elements, and the "le" table can be used for multiplications of both
112 * "lle" and "lbe" elements.
113 */
101 114
102#define xda_bbe(i) ( \ 115#define xda_be(i) ( \
103 (i & 0x80 ? xx(43, 80) : 0) ^ (i & 0x40 ? xx(21, c0) : 0) ^ \ 116 (i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \
104 (i & 0x20 ? xx(10, e0) : 0) ^ (i & 0x10 ? xx(08, 70) : 0) ^ \ 117 (i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \
105 (i & 0x08 ? xx(04, 38) : 0) ^ (i & 0x04 ? xx(02, 1c) : 0) ^ \ 118 (i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \
106 (i & 0x02 ? xx(01, 0e) : 0) ^ (i & 0x01 ? xx(00, 87) : 0) \ 119 (i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \
107) 120)
108 121
109#define xda_lle(i) ( \ 122#define xda_le(i) ( \
110 (i & 0x80 ? xx(e1, 00) : 0) ^ (i & 0x40 ? xx(70, 80) : 0) ^ \ 123 (i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \
111 (i & 0x20 ? xx(38, 40) : 0) ^ (i & 0x10 ? xx(1c, 20) : 0) ^ \ 124 (i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \
112 (i & 0x08 ? xx(0e, 10) : 0) ^ (i & 0x04 ? xx(07, 08) : 0) ^ \ 125 (i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \
113 (i & 0x02 ? xx(03, 84) : 0) ^ (i & 0x01 ? xx(01, c2) : 0) \ 126 (i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \
114) 127)
115 128
116static const u16 gf128mul_table_lle[256] = gf128mul_dat(xda_lle); 129static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le);
117static const u16 gf128mul_table_bbe[256] = gf128mul_dat(xda_bbe); 130static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be);
118 131
119/* These functions multiply a field element by x, by x^4 and by x^8 132/*
120 * in the polynomial field representation. It uses 32-bit word operations 133 * The following functions multiply a field element by x^8 in
121 * to gain speed but compensates for machine endianess and hence works 134 * the polynomial field representation. They use 64-bit word operations
135 * to gain speed but compensate for machine endianness and hence work
122 * correctly on both styles of machine. 136 * correctly on both styles of machine.
123 */ 137 */
124 138
125static void gf128mul_x_lle(be128 *r, const be128 *x)
126{
127 u64 a = be64_to_cpu(x->a);
128 u64 b = be64_to_cpu(x->b);
129 u64 _tt = gf128mul_table_lle[(b << 7) & 0xff];
130
131 r->b = cpu_to_be64((b >> 1) | (a << 63));
132 r->a = cpu_to_be64((a >> 1) ^ (_tt << 48));
133}
134
135static void gf128mul_x_bbe(be128 *r, const be128 *x)
136{
137 u64 a = be64_to_cpu(x->a);
138 u64 b = be64_to_cpu(x->b);
139 u64 _tt = gf128mul_table_bbe[a >> 63];
140
141 r->a = cpu_to_be64((a << 1) | (b >> 63));
142 r->b = cpu_to_be64((b << 1) ^ _tt);
143}
144
145void gf128mul_x_ble(be128 *r, const be128 *x)
146{
147 u64 a = le64_to_cpu(x->a);
148 u64 b = le64_to_cpu(x->b);
149 u64 _tt = gf128mul_table_bbe[b >> 63];
150
151 r->a = cpu_to_le64((a << 1) ^ _tt);
152 r->b = cpu_to_le64((b << 1) | (a >> 63));
153}
154EXPORT_SYMBOL(gf128mul_x_ble);
155
156static void gf128mul_x8_lle(be128 *x) 139static void gf128mul_x8_lle(be128 *x)
157{ 140{
158 u64 a = be64_to_cpu(x->a); 141 u64 a = be64_to_cpu(x->a);
159 u64 b = be64_to_cpu(x->b); 142 u64 b = be64_to_cpu(x->b);
160 u64 _tt = gf128mul_table_lle[b & 0xff]; 143 u64 _tt = gf128mul_table_le[b & 0xff];
161 144
162 x->b = cpu_to_be64((b >> 8) | (a << 56)); 145 x->b = cpu_to_be64((b >> 8) | (a << 56));
163 x->a = cpu_to_be64((a >> 8) ^ (_tt << 48)); 146 x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
@@ -167,7 +150,7 @@ static void gf128mul_x8_bbe(be128 *x)
167{ 150{
168 u64 a = be64_to_cpu(x->a); 151 u64 a = be64_to_cpu(x->a);
169 u64 b = be64_to_cpu(x->b); 152 u64 b = be64_to_cpu(x->b);
170 u64 _tt = gf128mul_table_bbe[a >> 56]; 153 u64 _tt = gf128mul_table_be[a >> 56];
171 154
172 x->a = cpu_to_be64((a << 8) | (b >> 56)); 155 x->a = cpu_to_be64((a << 8) | (b >> 56));
173 x->b = cpu_to_be64((b << 8) ^ _tt); 156 x->b = cpu_to_be64((b << 8) ^ _tt);
@@ -251,7 +234,7 @@ EXPORT_SYMBOL(gf128mul_bbe);
251 234
252/* This version uses 64k bytes of table space. 235/* This version uses 64k bytes of table space.
253 A 16 byte buffer has to be multiplied by a 16 byte key 236 A 16 byte buffer has to be multiplied by a 16 byte key
254 value in GF(128). If we consider a GF(128) value in 237 value in GF(2^128). If we consider a GF(2^128) value in
255 the buffer's lowest byte, we can construct a table of 238 the buffer's lowest byte, we can construct a table of
256 the 256 16 byte values that result from the 256 values 239 the 256 16 byte values that result from the 256 values
257 of this byte. This requires 4096 bytes. But we also 240 of this byte. This requires 4096 bytes. But we also
@@ -315,7 +298,7 @@ void gf128mul_free_64k(struct gf128mul_64k *t)
315} 298}
316EXPORT_SYMBOL(gf128mul_free_64k); 299EXPORT_SYMBOL(gf128mul_free_64k);
317 300
318void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t) 301void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t)
319{ 302{
320 u8 *ap = (u8 *)a; 303 u8 *ap = (u8 *)a;
321 be128 r[1]; 304 be128 r[1];
@@ -330,7 +313,7 @@ EXPORT_SYMBOL(gf128mul_64k_bbe);
330 313
331/* This version uses 4k bytes of table space. 314/* This version uses 4k bytes of table space.
332 A 16 byte buffer has to be multiplied by a 16 byte key 315 A 16 byte buffer has to be multiplied by a 16 byte key
333 value in GF(128). If we consider a GF(128) value in a 316 value in GF(2^128). If we consider a GF(2^128) value in a
334 single byte, we can construct a table of the 256 16 byte 317 single byte, we can construct a table of the 256 16 byte
335 values that result from the 256 values of this byte. 318 values that result from the 256 values of this byte.
336 This requires 4096 bytes. If we take the highest byte in 319 This requires 4096 bytes. If we take the highest byte in
@@ -388,7 +371,7 @@ out:
388} 371}
389EXPORT_SYMBOL(gf128mul_init_4k_bbe); 372EXPORT_SYMBOL(gf128mul_init_4k_bbe);
390 373
391void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t) 374void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
392{ 375{
393 u8 *ap = (u8 *)a; 376 u8 *ap = (u8 *)a;
394 be128 r[1]; 377 be128 r[1];
@@ -403,7 +386,7 @@ void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t)
403} 386}
404EXPORT_SYMBOL(gf128mul_4k_lle); 387EXPORT_SYMBOL(gf128mul_4k_lle);
405 388
406void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t) 389void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
407{ 390{
408 u8 *ap = (u8 *)a; 391 u8 *ap = (u8 *)a;
409 be128 r[1]; 392 be128 r[1];
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 71eff9b01b12..2ce2660d3519 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -97,7 +97,7 @@ static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
97 int out_len = LZ4_decompress_safe(src, dst, slen, *dlen); 97 int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
98 98
99 if (out_len < 0) 99 if (out_len < 0)
100 return out_len; 100 return -EINVAL;
101 101
102 *dlen = out_len; 102 *dlen = out_len;
103 return 0; 103 return 0;
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 03a34a8109c0..2be14f054daf 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -98,7 +98,7 @@ static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
98 int out_len = LZ4_decompress_safe(src, dst, slen, *dlen); 98 int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
99 99
100 if (out_len < 0) 100 if (out_len < 0)
101 return out_len; 101 return -EINVAL;
102 102
103 *dlen = out_len; 103 *dlen = out_len;
104 return 0; 104 return 0;
diff --git a/crypto/md5.c b/crypto/md5.c
index 2355a7c25c45..f7ae1a48225b 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -21,9 +21,11 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/cryptohash.h>
25#include <asm/byteorder.h> 24#include <asm/byteorder.h>
26 25
26#define MD5_DIGEST_WORDS 4
27#define MD5_MESSAGE_BYTES 64
28
27const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { 29const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
28 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, 30 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
29 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, 31 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
@@ -47,6 +49,97 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
47 } 49 }
48} 50}
49 51
52#define F1(x, y, z) (z ^ (x & (y ^ z)))
53#define F2(x, y, z) F1(z, x, y)
54#define F3(x, y, z) (x ^ y ^ z)
55#define F4(x, y, z) (y ^ (x | ~z))
56
57#define MD5STEP(f, w, x, y, z, in, s) \
58 (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
59
60static void md5_transform(__u32 *hash, __u32 const *in)
61{
62 u32 a, b, c, d;
63
64 a = hash[0];
65 b = hash[1];
66 c = hash[2];
67 d = hash[3];
68
69 MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
70 MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
71 MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
72 MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
73 MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
74 MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
75 MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
76 MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
77 MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
78 MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
79 MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
80 MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
81 MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
82 MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
83 MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
84 MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
85
86 MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
87 MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
88 MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
89 MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
90 MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
91 MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
92 MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
93 MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
94 MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
95 MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
96 MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
97 MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
98 MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
99 MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
100 MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
101 MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
102
103 MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
104 MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
105 MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
106 MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
107 MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
108 MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
109 MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
110 MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
111 MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
112 MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
113 MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
114 MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
115 MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
116 MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
117 MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
118 MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
119
120 MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
121 MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
122 MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
123 MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
124 MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
125 MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
126 MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
127 MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
128 MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
129 MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
130 MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
131 MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
132 MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
133 MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
134 MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
135 MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
136
137 hash[0] += a;
138 hash[1] += b;
139 hash[2] += c;
140 hash[3] += d;
141}
142
50static inline void md5_transform_helper(struct md5_state *ctx) 143static inline void md5_transform_helper(struct md5_state *ctx)
51{ 144{
52 le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); 145 le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 6b048b36312d..ae1d3cf209e4 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -353,5 +353,34 @@ int crypto_unregister_scomp(struct scomp_alg *alg)
353} 353}
354EXPORT_SYMBOL_GPL(crypto_unregister_scomp); 354EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
355 355
356int crypto_register_scomps(struct scomp_alg *algs, int count)
357{
358 int i, ret;
359
360 for (i = 0; i < count; i++) {
361 ret = crypto_register_scomp(&algs[i]);
362 if (ret)
363 goto err;
364 }
365
366 return 0;
367
368err:
369 for (--i; i >= 0; --i)
370 crypto_unregister_scomp(&algs[i]);
371
372 return ret;
373}
374EXPORT_SYMBOL_GPL(crypto_register_scomps);
375
376void crypto_unregister_scomps(struct scomp_alg *algs, int count)
377{
378 int i;
379
380 for (i = count - 1; i >= 0; --i)
381 crypto_unregister_scomp(&algs[i]);
382}
383EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
384
356MODULE_LICENSE("GPL"); 385MODULE_LICENSE("GPL");
357MODULE_DESCRIPTION("Synchronous compression type"); 386MODULE_DESCRIPTION("Synchronous compression type");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index f9c378af3907..6f5f3ed8376c 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -83,47 +83,47 @@ struct tcrypt_result {
83 83
84struct aead_test_suite { 84struct aead_test_suite {
85 struct { 85 struct {
86 struct aead_testvec *vecs; 86 const struct aead_testvec *vecs;
87 unsigned int count; 87 unsigned int count;
88 } enc, dec; 88 } enc, dec;
89}; 89};
90 90
91struct cipher_test_suite { 91struct cipher_test_suite {
92 struct { 92 struct {
93 struct cipher_testvec *vecs; 93 const struct cipher_testvec *vecs;
94 unsigned int count; 94 unsigned int count;
95 } enc, dec; 95 } enc, dec;
96}; 96};
97 97
98struct comp_test_suite { 98struct comp_test_suite {
99 struct { 99 struct {
100 struct comp_testvec *vecs; 100 const struct comp_testvec *vecs;
101 unsigned int count; 101 unsigned int count;
102 } comp, decomp; 102 } comp, decomp;
103}; 103};
104 104
105struct hash_test_suite { 105struct hash_test_suite {
106 struct hash_testvec *vecs; 106 const struct hash_testvec *vecs;
107 unsigned int count; 107 unsigned int count;
108}; 108};
109 109
110struct cprng_test_suite { 110struct cprng_test_suite {
111 struct cprng_testvec *vecs; 111 const struct cprng_testvec *vecs;
112 unsigned int count; 112 unsigned int count;
113}; 113};
114 114
115struct drbg_test_suite { 115struct drbg_test_suite {
116 struct drbg_testvec *vecs; 116 const struct drbg_testvec *vecs;
117 unsigned int count; 117 unsigned int count;
118}; 118};
119 119
120struct akcipher_test_suite { 120struct akcipher_test_suite {
121 struct akcipher_testvec *vecs; 121 const struct akcipher_testvec *vecs;
122 unsigned int count; 122 unsigned int count;
123}; 123};
124 124
125struct kpp_test_suite { 125struct kpp_test_suite {
126 struct kpp_testvec *vecs; 126 const struct kpp_testvec *vecs;
127 unsigned int count; 127 unsigned int count;
128}; 128};
129 129
@@ -145,7 +145,8 @@ struct alg_test_desc {
145 } suite; 145 } suite;
146}; 146};
147 147
148static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; 148static const unsigned int IDX[8] = {
149 IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
149 150
150static void hexdump(unsigned char *buf, unsigned int len) 151static void hexdump(unsigned char *buf, unsigned int len)
151{ 152{
@@ -203,7 +204,7 @@ static int wait_async_op(struct tcrypt_result *tr, int ret)
203} 204}
204 205
205static int ahash_partial_update(struct ahash_request **preq, 206static int ahash_partial_update(struct ahash_request **preq,
206 struct crypto_ahash *tfm, struct hash_testvec *template, 207 struct crypto_ahash *tfm, const struct hash_testvec *template,
207 void *hash_buff, int k, int temp, struct scatterlist *sg, 208 void *hash_buff, int k, int temp, struct scatterlist *sg,
208 const char *algo, char *result, struct tcrypt_result *tresult) 209 const char *algo, char *result, struct tcrypt_result *tresult)
209{ 210{
@@ -260,9 +261,9 @@ out_nostate:
260 return ret; 261 return ret;
261} 262}
262 263
263static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, 264static int __test_hash(struct crypto_ahash *tfm,
264 unsigned int tcount, bool use_digest, 265 const struct hash_testvec *template, unsigned int tcount,
265 const int align_offset) 266 bool use_digest, const int align_offset)
266{ 267{
267 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); 268 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
268 size_t digest_size = crypto_ahash_digestsize(tfm); 269 size_t digest_size = crypto_ahash_digestsize(tfm);
@@ -538,7 +539,8 @@ out_nobuf:
538 return ret; 539 return ret;
539} 540}
540 541
541static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, 542static int test_hash(struct crypto_ahash *tfm,
543 const struct hash_testvec *template,
542 unsigned int tcount, bool use_digest) 544 unsigned int tcount, bool use_digest)
543{ 545{
544 unsigned int alignmask; 546 unsigned int alignmask;
@@ -566,7 +568,7 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
566} 568}
567 569
568static int __test_aead(struct crypto_aead *tfm, int enc, 570static int __test_aead(struct crypto_aead *tfm, int enc,
569 struct aead_testvec *template, unsigned int tcount, 571 const struct aead_testvec *template, unsigned int tcount,
570 const bool diff_dst, const int align_offset) 572 const bool diff_dst, const int align_offset)
571{ 573{
572 const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); 574 const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
@@ -957,7 +959,7 @@ out_noxbuf:
957} 959}
958 960
959static int test_aead(struct crypto_aead *tfm, int enc, 961static int test_aead(struct crypto_aead *tfm, int enc,
960 struct aead_testvec *template, unsigned int tcount) 962 const struct aead_testvec *template, unsigned int tcount)
961{ 963{
962 unsigned int alignmask; 964 unsigned int alignmask;
963 int ret; 965 int ret;
@@ -990,7 +992,8 @@ static int test_aead(struct crypto_aead *tfm, int enc,
990} 992}
991 993
992static int test_cipher(struct crypto_cipher *tfm, int enc, 994static int test_cipher(struct crypto_cipher *tfm, int enc,
993 struct cipher_testvec *template, unsigned int tcount) 995 const struct cipher_testvec *template,
996 unsigned int tcount)
994{ 997{
995 const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm)); 998 const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
996 unsigned int i, j, k; 999 unsigned int i, j, k;
@@ -1068,7 +1071,8 @@ out_nobuf:
1068} 1071}
1069 1072
1070static int __test_skcipher(struct crypto_skcipher *tfm, int enc, 1073static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1071 struct cipher_testvec *template, unsigned int tcount, 1074 const struct cipher_testvec *template,
1075 unsigned int tcount,
1072 const bool diff_dst, const int align_offset) 1076 const bool diff_dst, const int align_offset)
1073{ 1077{
1074 const char *algo = 1078 const char *algo =
@@ -1332,7 +1336,8 @@ out_nobuf:
1332} 1336}
1333 1337
1334static int test_skcipher(struct crypto_skcipher *tfm, int enc, 1338static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1335 struct cipher_testvec *template, unsigned int tcount) 1339 const struct cipher_testvec *template,
1340 unsigned int tcount)
1336{ 1341{
1337 unsigned int alignmask; 1342 unsigned int alignmask;
1338 int ret; 1343 int ret;
@@ -1364,8 +1369,10 @@ static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1364 return 0; 1369 return 0;
1365} 1370}
1366 1371
1367static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate, 1372static int test_comp(struct crypto_comp *tfm,
1368 struct comp_testvec *dtemplate, int ctcount, int dtcount) 1373 const struct comp_testvec *ctemplate,
1374 const struct comp_testvec *dtemplate,
1375 int ctcount, int dtcount)
1369{ 1376{
1370 const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm)); 1377 const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
1371 unsigned int i; 1378 unsigned int i;
@@ -1444,12 +1451,14 @@ out:
1444 return ret; 1451 return ret;
1445} 1452}
1446 1453
1447static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, 1454static int test_acomp(struct crypto_acomp *tfm,
1448 struct comp_testvec *dtemplate, int ctcount, int dtcount) 1455 const struct comp_testvec *ctemplate,
1456 const struct comp_testvec *dtemplate,
1457 int ctcount, int dtcount)
1449{ 1458{
1450 const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); 1459 const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
1451 unsigned int i; 1460 unsigned int i;
1452 char *output; 1461 char *output, *decomp_out;
1453 int ret; 1462 int ret;
1454 struct scatterlist src, dst; 1463 struct scatterlist src, dst;
1455 struct acomp_req *req; 1464 struct acomp_req *req;
@@ -1459,6 +1468,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1459 if (!output) 1468 if (!output)
1460 return -ENOMEM; 1469 return -ENOMEM;
1461 1470
1471 decomp_out = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
1472 if (!decomp_out) {
1473 kfree(output);
1474 return -ENOMEM;
1475 }
1476
1462 for (i = 0; i < ctcount; i++) { 1477 for (i = 0; i < ctcount; i++) {
1463 unsigned int dlen = COMP_BUF_SIZE; 1478 unsigned int dlen = COMP_BUF_SIZE;
1464 int ilen = ctemplate[i].inlen; 1479 int ilen = ctemplate[i].inlen;
@@ -1497,7 +1512,23 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1497 goto out; 1512 goto out;
1498 } 1513 }
1499 1514
1500 if (req->dlen != ctemplate[i].outlen) { 1515 ilen = req->dlen;
1516 dlen = COMP_BUF_SIZE;
1517 sg_init_one(&src, output, ilen);
1518 sg_init_one(&dst, decomp_out, dlen);
1519 init_completion(&result.completion);
1520 acomp_request_set_params(req, &src, &dst, ilen, dlen);
1521
1522 ret = wait_async_op(&result, crypto_acomp_decompress(req));
1523 if (ret) {
1524 pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1525 i + 1, algo, -ret);
1526 kfree(input_vec);
1527 acomp_request_free(req);
1528 goto out;
1529 }
1530
1531 if (req->dlen != ctemplate[i].inlen) {
1501 pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n", 1532 pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1502 i + 1, algo, req->dlen); 1533 i + 1, algo, req->dlen);
1503 ret = -EINVAL; 1534 ret = -EINVAL;
@@ -1506,7 +1537,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1506 goto out; 1537 goto out;
1507 } 1538 }
1508 1539
1509 if (memcmp(output, ctemplate[i].output, req->dlen)) { 1540 if (memcmp(input_vec, decomp_out, req->dlen)) {
1510 pr_err("alg: acomp: Compression test %d failed for %s\n", 1541 pr_err("alg: acomp: Compression test %d failed for %s\n",
1511 i + 1, algo); 1542 i + 1, algo);
1512 hexdump(output, req->dlen); 1543 hexdump(output, req->dlen);
@@ -1584,11 +1615,13 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1584 ret = 0; 1615 ret = 0;
1585 1616
1586out: 1617out:
1618 kfree(decomp_out);
1587 kfree(output); 1619 kfree(output);
1588 return ret; 1620 return ret;
1589} 1621}
1590 1622
1591static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, 1623static int test_cprng(struct crypto_rng *tfm,
1624 const struct cprng_testvec *template,
1592 unsigned int tcount) 1625 unsigned int tcount)
1593{ 1626{
1594 const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); 1627 const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
@@ -1865,7 +1898,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
1865} 1898}
1866 1899
1867 1900
1868static int drbg_cavs_test(struct drbg_testvec *test, int pr, 1901static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
1869 const char *driver, u32 type, u32 mask) 1902 const char *driver, u32 type, u32 mask)
1870{ 1903{
1871 int ret = -EAGAIN; 1904 int ret = -EAGAIN;
@@ -1939,7 +1972,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
1939 int err = 0; 1972 int err = 0;
1940 int pr = 0; 1973 int pr = 0;
1941 int i = 0; 1974 int i = 0;
1942 struct drbg_testvec *template = desc->suite.drbg.vecs; 1975 const struct drbg_testvec *template = desc->suite.drbg.vecs;
1943 unsigned int tcount = desc->suite.drbg.count; 1976 unsigned int tcount = desc->suite.drbg.count;
1944 1977
1945 if (0 == memcmp(driver, "drbg_pr_", 8)) 1978 if (0 == memcmp(driver, "drbg_pr_", 8))
@@ -1958,7 +1991,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
1958 1991
1959} 1992}
1960 1993
1961static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec, 1994static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
1962 const char *alg) 1995 const char *alg)
1963{ 1996{
1964 struct kpp_request *req; 1997 struct kpp_request *req;
@@ -2050,7 +2083,7 @@ free_req:
2050} 2083}
2051 2084
2052static int test_kpp(struct crypto_kpp *tfm, const char *alg, 2085static int test_kpp(struct crypto_kpp *tfm, const char *alg,
2053 struct kpp_testvec *vecs, unsigned int tcount) 2086 const struct kpp_testvec *vecs, unsigned int tcount)
2054{ 2087{
2055 int ret, i; 2088 int ret, i;
2056 2089
@@ -2086,7 +2119,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
2086} 2119}
2087 2120
2088static int test_akcipher_one(struct crypto_akcipher *tfm, 2121static int test_akcipher_one(struct crypto_akcipher *tfm,
2089 struct akcipher_testvec *vecs) 2122 const struct akcipher_testvec *vecs)
2090{ 2123{
2091 char *xbuf[XBUFSIZE]; 2124 char *xbuf[XBUFSIZE];
2092 struct akcipher_request *req; 2125 struct akcipher_request *req;
@@ -2206,7 +2239,8 @@ free_xbuf:
2206} 2239}
2207 2240
2208static int test_akcipher(struct crypto_akcipher *tfm, const char *alg, 2241static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
2209 struct akcipher_testvec *vecs, unsigned int tcount) 2242 const struct akcipher_testvec *vecs,
2243 unsigned int tcount)
2210{ 2244{
2211 const char *algo = 2245 const char *algo =
2212 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm)); 2246 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
@@ -2634,6 +2668,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2634 }, { 2668 }, {
2635 .alg = "ctr(des3_ede)", 2669 .alg = "ctr(des3_ede)",
2636 .test = alg_test_skcipher, 2670 .test = alg_test_skcipher,
2671 .fips_allowed = 1,
2637 .suite = { 2672 .suite = {
2638 .cipher = { 2673 .cipher = {
2639 .enc = __VECS(des3_ede_ctr_enc_tv_template), 2674 .enc = __VECS(des3_ede_ctr_enc_tv_template),
@@ -2875,6 +2910,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2875 }, { 2910 }, {
2876 .alg = "ecb(cipher_null)", 2911 .alg = "ecb(cipher_null)",
2877 .test = alg_test_null, 2912 .test = alg_test_null,
2913 .fips_allowed = 1,
2878 }, { 2914 }, {
2879 .alg = "ecb(des)", 2915 .alg = "ecb(des)",
2880 .test = alg_test_skcipher, 2916 .test = alg_test_skcipher,
@@ -3477,6 +3513,16 @@ static const struct alg_test_desc alg_test_descs[] = {
3477 .dec = __VECS(tf_xts_dec_tv_template) 3513 .dec = __VECS(tf_xts_dec_tv_template)
3478 } 3514 }
3479 } 3515 }
3516 }, {
3517 .alg = "zlib-deflate",
3518 .test = alg_test_comp,
3519 .fips_allowed = 1,
3520 .suite = {
3521 .comp = {
3522 .comp = __VECS(zlib_deflate_comp_tv_template),
3523 .decomp = __VECS(zlib_deflate_decomp_tv_template)
3524 }
3525 }
3480 } 3526 }
3481}; 3527};
3482 3528
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 03f473116f78..429357339dcc 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -34,9 +34,9 @@
34 34
35struct hash_testvec { 35struct hash_testvec {
36 /* only used with keyed hash algorithms */ 36 /* only used with keyed hash algorithms */
37 char *key; 37 const char *key;
38 char *plaintext; 38 const char *plaintext;
39 char *digest; 39 const char *digest;
40 unsigned char tap[MAX_TAP]; 40 unsigned char tap[MAX_TAP];
41 unsigned short psize; 41 unsigned short psize;
42 unsigned char np; 42 unsigned char np;
@@ -63,11 +63,11 @@ struct hash_testvec {
63 */ 63 */
64 64
65struct cipher_testvec { 65struct cipher_testvec {
66 char *key; 66 const char *key;
67 char *iv; 67 const char *iv;
68 char *iv_out; 68 const char *iv_out;
69 char *input; 69 const char *input;
70 char *result; 70 const char *result;
71 unsigned short tap[MAX_TAP]; 71 unsigned short tap[MAX_TAP];
72 int np; 72 int np;
73 unsigned char also_non_np; 73 unsigned char also_non_np;
@@ -80,11 +80,11 @@ struct cipher_testvec {
80}; 80};
81 81
82struct aead_testvec { 82struct aead_testvec {
83 char *key; 83 const char *key;
84 char *iv; 84 const char *iv;
85 char *input; 85 const char *input;
86 char *assoc; 86 const char *assoc;
87 char *result; 87 const char *result;
88 unsigned char tap[MAX_TAP]; 88 unsigned char tap[MAX_TAP];
89 unsigned char atap[MAX_TAP]; 89 unsigned char atap[MAX_TAP];
90 int np; 90 int np;
@@ -99,10 +99,10 @@ struct aead_testvec {
99}; 99};
100 100
101struct cprng_testvec { 101struct cprng_testvec {
102 char *key; 102 const char *key;
103 char *dt; 103 const char *dt;
104 char *v; 104 const char *v;
105 char *result; 105 const char *result;
106 unsigned char klen; 106 unsigned char klen;
107 unsigned short dtlen; 107 unsigned short dtlen;
108 unsigned short vlen; 108 unsigned short vlen;
@@ -111,24 +111,24 @@ struct cprng_testvec {
111}; 111};
112 112
113struct drbg_testvec { 113struct drbg_testvec {
114 unsigned char *entropy; 114 const unsigned char *entropy;
115 size_t entropylen; 115 size_t entropylen;
116 unsigned char *entpra; 116 const unsigned char *entpra;
117 unsigned char *entprb; 117 const unsigned char *entprb;
118 size_t entprlen; 118 size_t entprlen;
119 unsigned char *addtla; 119 const unsigned char *addtla;
120 unsigned char *addtlb; 120 const unsigned char *addtlb;
121 size_t addtllen; 121 size_t addtllen;
122 unsigned char *pers; 122 const unsigned char *pers;
123 size_t perslen; 123 size_t perslen;
124 unsigned char *expected; 124 const unsigned char *expected;
125 size_t expectedlen; 125 size_t expectedlen;
126}; 126};
127 127
128struct akcipher_testvec { 128struct akcipher_testvec {
129 unsigned char *key; 129 const unsigned char *key;
130 unsigned char *m; 130 const unsigned char *m;
131 unsigned char *c; 131 const unsigned char *c;
132 unsigned int key_len; 132 unsigned int key_len;
133 unsigned int m_size; 133 unsigned int m_size;
134 unsigned int c_size; 134 unsigned int c_size;
@@ -136,22 +136,22 @@ struct akcipher_testvec {
136}; 136};
137 137
138struct kpp_testvec { 138struct kpp_testvec {
139 unsigned char *secret; 139 const unsigned char *secret;
140 unsigned char *b_public; 140 const unsigned char *b_public;
141 unsigned char *expected_a_public; 141 const unsigned char *expected_a_public;
142 unsigned char *expected_ss; 142 const unsigned char *expected_ss;
143 unsigned short secret_size; 143 unsigned short secret_size;
144 unsigned short b_public_size; 144 unsigned short b_public_size;
145 unsigned short expected_a_public_size; 145 unsigned short expected_a_public_size;
146 unsigned short expected_ss_size; 146 unsigned short expected_ss_size;
147}; 147};
148 148
149static char zeroed_string[48]; 149static const char zeroed_string[48];
150 150
151/* 151/*
152 * RSA test vectors. Borrowed from openSSL. 152 * RSA test vectors. Borrowed from openSSL.
153 */ 153 */
154static struct akcipher_testvec rsa_tv_template[] = { 154static const struct akcipher_testvec rsa_tv_template[] = {
155 { 155 {
156#ifndef CONFIG_CRYPTO_FIPS 156#ifndef CONFIG_CRYPTO_FIPS
157 .key = 157 .key =
@@ -538,7 +538,7 @@ static struct akcipher_testvec rsa_tv_template[] = {
538 } 538 }
539}; 539};
540 540
541struct kpp_testvec dh_tv_template[] = { 541static const struct kpp_testvec dh_tv_template[] = {
542 { 542 {
543 .secret = 543 .secret =
544#ifdef __LITTLE_ENDIAN 544#ifdef __LITTLE_ENDIAN
@@ -755,7 +755,7 @@ struct kpp_testvec dh_tv_template[] = {
755 } 755 }
756}; 756};
757 757
758struct kpp_testvec ecdh_tv_template[] = { 758static const struct kpp_testvec ecdh_tv_template[] = {
759 { 759 {
760#ifndef CONFIG_CRYPTO_FIPS 760#ifndef CONFIG_CRYPTO_FIPS
761 .secret = 761 .secret =
@@ -846,7 +846,7 @@ struct kpp_testvec ecdh_tv_template[] = {
846/* 846/*
847 * MD4 test vectors from RFC1320 847 * MD4 test vectors from RFC1320
848 */ 848 */
849static struct hash_testvec md4_tv_template [] = { 849static const struct hash_testvec md4_tv_template[] = {
850 { 850 {
851 .plaintext = "", 851 .plaintext = "",
852 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31" 852 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
@@ -887,7 +887,7 @@ static struct hash_testvec md4_tv_template [] = {
887 }, 887 },
888}; 888};
889 889
890static struct hash_testvec sha3_224_tv_template[] = { 890static const struct hash_testvec sha3_224_tv_template[] = {
891 { 891 {
892 .plaintext = "", 892 .plaintext = "",
893 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7" 893 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
@@ -912,7 +912,7 @@ static struct hash_testvec sha3_224_tv_template[] = {
912 }, 912 },
913}; 913};
914 914
915static struct hash_testvec sha3_256_tv_template[] = { 915static const struct hash_testvec sha3_256_tv_template[] = {
916 { 916 {
917 .plaintext = "", 917 .plaintext = "",
918 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66" 918 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
@@ -938,7 +938,7 @@ static struct hash_testvec sha3_256_tv_template[] = {
938}; 938};
939 939
940 940
941static struct hash_testvec sha3_384_tv_template[] = { 941static const struct hash_testvec sha3_384_tv_template[] = {
942 { 942 {
943 .plaintext = "", 943 .plaintext = "",
944 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d" 944 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
@@ -970,7 +970,7 @@ static struct hash_testvec sha3_384_tv_template[] = {
970}; 970};
971 971
972 972
973static struct hash_testvec sha3_512_tv_template[] = { 973static const struct hash_testvec sha3_512_tv_template[] = {
974 { 974 {
975 .plaintext = "", 975 .plaintext = "",
976 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5" 976 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
@@ -1011,7 +1011,7 @@ static struct hash_testvec sha3_512_tv_template[] = {
1011/* 1011/*
1012 * MD5 test vectors from RFC1321 1012 * MD5 test vectors from RFC1321
1013 */ 1013 */
1014static struct hash_testvec md5_tv_template[] = { 1014static const struct hash_testvec md5_tv_template[] = {
1015 { 1015 {
1016 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04" 1016 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
1017 "\xe9\x80\x09\x98\xec\xf8\x42\x7e", 1017 "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
@@ -1055,7 +1055,7 @@ static struct hash_testvec md5_tv_template[] = {
1055/* 1055/*
1056 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E) 1056 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
1057 */ 1057 */
1058static struct hash_testvec rmd128_tv_template[] = { 1058static const struct hash_testvec rmd128_tv_template[] = {
1059 { 1059 {
1060 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e" 1060 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
1061 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46", 1061 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
@@ -1117,7 +1117,7 @@ static struct hash_testvec rmd128_tv_template[] = {
1117/* 1117/*
1118 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E) 1118 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
1119 */ 1119 */
1120static struct hash_testvec rmd160_tv_template[] = { 1120static const struct hash_testvec rmd160_tv_template[] = {
1121 { 1121 {
1122 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28" 1122 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
1123 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31", 1123 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
@@ -1179,7 +1179,7 @@ static struct hash_testvec rmd160_tv_template[] = {
1179/* 1179/*
1180 * RIPEMD-256 test vectors 1180 * RIPEMD-256 test vectors
1181 */ 1181 */
1182static struct hash_testvec rmd256_tv_template[] = { 1182static const struct hash_testvec rmd256_tv_template[] = {
1183 { 1183 {
1184 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18" 1184 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
1185 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a" 1185 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
@@ -1245,7 +1245,7 @@ static struct hash_testvec rmd256_tv_template[] = {
1245/* 1245/*
1246 * RIPEMD-320 test vectors 1246 * RIPEMD-320 test vectors
1247 */ 1247 */
1248static struct hash_testvec rmd320_tv_template[] = { 1248static const struct hash_testvec rmd320_tv_template[] = {
1249 { 1249 {
1250 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1" 1250 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
1251 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25" 1251 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
@@ -1308,7 +1308,7 @@ static struct hash_testvec rmd320_tv_template[] = {
1308 } 1308 }
1309}; 1309};
1310 1310
1311static struct hash_testvec crct10dif_tv_template[] = { 1311static const struct hash_testvec crct10dif_tv_template[] = {
1312 { 1312 {
1313 .plaintext = "abc", 1313 .plaintext = "abc",
1314 .psize = 3, 1314 .psize = 3,
@@ -1358,7 +1358,7 @@ static struct hash_testvec crct10dif_tv_template[] = {
1358 * SHA1 test vectors from from FIPS PUB 180-1 1358 * SHA1 test vectors from from FIPS PUB 180-1
1359 * Long vector from CAVS 5.0 1359 * Long vector from CAVS 5.0
1360 */ 1360 */
1361static struct hash_testvec sha1_tv_template[] = { 1361static const struct hash_testvec sha1_tv_template[] = {
1362 { 1362 {
1363 .plaintext = "", 1363 .plaintext = "",
1364 .psize = 0, 1364 .psize = 0,
@@ -1548,7 +1548,7 @@ static struct hash_testvec sha1_tv_template[] = {
1548/* 1548/*
1549 * SHA224 test vectors from from FIPS PUB 180-2 1549 * SHA224 test vectors from from FIPS PUB 180-2
1550 */ 1550 */
1551static struct hash_testvec sha224_tv_template[] = { 1551static const struct hash_testvec sha224_tv_template[] = {
1552 { 1552 {
1553 .plaintext = "", 1553 .plaintext = "",
1554 .psize = 0, 1554 .psize = 0,
@@ -1720,7 +1720,7 @@ static struct hash_testvec sha224_tv_template[] = {
1720/* 1720/*
1721 * SHA256 test vectors from from NIST 1721 * SHA256 test vectors from from NIST
1722 */ 1722 */
1723static struct hash_testvec sha256_tv_template[] = { 1723static const struct hash_testvec sha256_tv_template[] = {
1724 { 1724 {
1725 .plaintext = "", 1725 .plaintext = "",
1726 .psize = 0, 1726 .psize = 0,
@@ -1891,7 +1891,7 @@ static struct hash_testvec sha256_tv_template[] = {
1891/* 1891/*
1892 * SHA384 test vectors from from NIST and kerneli 1892 * SHA384 test vectors from from NIST and kerneli
1893 */ 1893 */
1894static struct hash_testvec sha384_tv_template[] = { 1894static const struct hash_testvec sha384_tv_template[] = {
1895 { 1895 {
1896 .plaintext = "", 1896 .plaintext = "",
1897 .psize = 0, 1897 .psize = 0,
@@ -2083,7 +2083,7 @@ static struct hash_testvec sha384_tv_template[] = {
2083/* 2083/*
2084 * SHA512 test vectors from from NIST and kerneli 2084 * SHA512 test vectors from from NIST and kerneli
2085 */ 2085 */
2086static struct hash_testvec sha512_tv_template[] = { 2086static const struct hash_testvec sha512_tv_template[] = {
2087 { 2087 {
2088 .plaintext = "", 2088 .plaintext = "",
2089 .psize = 0, 2089 .psize = 0,
@@ -2290,7 +2290,7 @@ static struct hash_testvec sha512_tv_template[] = {
2290 * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE 2290 * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
2291 * submission 2291 * submission
2292 */ 2292 */
2293static struct hash_testvec wp512_tv_template[] = { 2293static const struct hash_testvec wp512_tv_template[] = {
2294 { 2294 {
2295 .plaintext = "", 2295 .plaintext = "",
2296 .psize = 0, 2296 .psize = 0,
@@ -2386,7 +2386,7 @@ static struct hash_testvec wp512_tv_template[] = {
2386 }, 2386 },
2387}; 2387};
2388 2388
2389static struct hash_testvec wp384_tv_template[] = { 2389static const struct hash_testvec wp384_tv_template[] = {
2390 { 2390 {
2391 .plaintext = "", 2391 .plaintext = "",
2392 .psize = 0, 2392 .psize = 0,
@@ -2466,7 +2466,7 @@ static struct hash_testvec wp384_tv_template[] = {
2466 }, 2466 },
2467}; 2467};
2468 2468
2469static struct hash_testvec wp256_tv_template[] = { 2469static const struct hash_testvec wp256_tv_template[] = {
2470 { 2470 {
2471 .plaintext = "", 2471 .plaintext = "",
2472 .psize = 0, 2472 .psize = 0,
@@ -2533,7 +2533,7 @@ static struct hash_testvec wp256_tv_template[] = {
2533/* 2533/*
2534 * TIGER test vectors from Tiger website 2534 * TIGER test vectors from Tiger website
2535 */ 2535 */
2536static struct hash_testvec tgr192_tv_template[] = { 2536static const struct hash_testvec tgr192_tv_template[] = {
2537 { 2537 {
2538 .plaintext = "", 2538 .plaintext = "",
2539 .psize = 0, 2539 .psize = 0,
@@ -2576,7 +2576,7 @@ static struct hash_testvec tgr192_tv_template[] = {
2576 }, 2576 },
2577}; 2577};
2578 2578
2579static struct hash_testvec tgr160_tv_template[] = { 2579static const struct hash_testvec tgr160_tv_template[] = {
2580 { 2580 {
2581 .plaintext = "", 2581 .plaintext = "",
2582 .psize = 0, 2582 .psize = 0,
@@ -2619,7 +2619,7 @@ static struct hash_testvec tgr160_tv_template[] = {
2619 }, 2619 },
2620}; 2620};
2621 2621
2622static struct hash_testvec tgr128_tv_template[] = { 2622static const struct hash_testvec tgr128_tv_template[] = {
2623 { 2623 {
2624 .plaintext = "", 2624 .plaintext = "",
2625 .psize = 0, 2625 .psize = 0,
@@ -2656,7 +2656,7 @@ static struct hash_testvec tgr128_tv_template[] = {
2656 }, 2656 },
2657}; 2657};
2658 2658
2659static struct hash_testvec ghash_tv_template[] = 2659static const struct hash_testvec ghash_tv_template[] =
2660{ 2660{
2661 { 2661 {
2662 .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03" 2662 .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
@@ -2771,7 +2771,7 @@ static struct hash_testvec ghash_tv_template[] =
2771 * HMAC-MD5 test vectors from RFC2202 2771 * HMAC-MD5 test vectors from RFC2202
2772 * (These need to be fixed to not use strlen). 2772 * (These need to be fixed to not use strlen).
2773 */ 2773 */
2774static struct hash_testvec hmac_md5_tv_template[] = 2774static const struct hash_testvec hmac_md5_tv_template[] =
2775{ 2775{
2776 { 2776 {
2777 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", 2777 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
@@ -2851,7 +2851,7 @@ static struct hash_testvec hmac_md5_tv_template[] =
2851/* 2851/*
2852 * HMAC-RIPEMD128 test vectors from RFC2286 2852 * HMAC-RIPEMD128 test vectors from RFC2286
2853 */ 2853 */
2854static struct hash_testvec hmac_rmd128_tv_template[] = { 2854static const struct hash_testvec hmac_rmd128_tv_template[] = {
2855 { 2855 {
2856 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", 2856 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
2857 .ksize = 16, 2857 .ksize = 16,
@@ -2930,7 +2930,7 @@ static struct hash_testvec hmac_rmd128_tv_template[] = {
2930/* 2930/*
2931 * HMAC-RIPEMD160 test vectors from RFC2286 2931 * HMAC-RIPEMD160 test vectors from RFC2286
2932 */ 2932 */
2933static struct hash_testvec hmac_rmd160_tv_template[] = { 2933static const struct hash_testvec hmac_rmd160_tv_template[] = {
2934 { 2934 {
2935 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", 2935 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
2936 .ksize = 20, 2936 .ksize = 20,
@@ -3009,7 +3009,7 @@ static struct hash_testvec hmac_rmd160_tv_template[] = {
3009/* 3009/*
3010 * HMAC-SHA1 test vectors from RFC2202 3010 * HMAC-SHA1 test vectors from RFC2202
3011 */ 3011 */
3012static struct hash_testvec hmac_sha1_tv_template[] = { 3012static const struct hash_testvec hmac_sha1_tv_template[] = {
3013 { 3013 {
3014 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", 3014 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
3015 .ksize = 20, 3015 .ksize = 20,
@@ -3090,7 +3090,7 @@ static struct hash_testvec hmac_sha1_tv_template[] = {
3090/* 3090/*
3091 * SHA224 HMAC test vectors from RFC4231 3091 * SHA224 HMAC test vectors from RFC4231
3092 */ 3092 */
3093static struct hash_testvec hmac_sha224_tv_template[] = { 3093static const struct hash_testvec hmac_sha224_tv_template[] = {
3094 { 3094 {
3095 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 3095 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3096 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 3096 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
@@ -3203,7 +3203,7 @@ static struct hash_testvec hmac_sha224_tv_template[] = {
3203 * HMAC-SHA256 test vectors from 3203 * HMAC-SHA256 test vectors from
3204 * draft-ietf-ipsec-ciph-sha-256-01.txt 3204 * draft-ietf-ipsec-ciph-sha-256-01.txt
3205 */ 3205 */
3206static struct hash_testvec hmac_sha256_tv_template[] = { 3206static const struct hash_testvec hmac_sha256_tv_template[] = {
3207 { 3207 {
3208 .key = "\x01\x02\x03\x04\x05\x06\x07\x08" 3208 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
3209 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" 3209 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
@@ -3338,7 +3338,7 @@ static struct hash_testvec hmac_sha256_tv_template[] = {
3338 }, 3338 },
3339}; 3339};
3340 3340
3341static struct hash_testvec aes_cmac128_tv_template[] = { 3341static const struct hash_testvec aes_cmac128_tv_template[] = {
3342 { /* From NIST Special Publication 800-38B, AES-128 */ 3342 { /* From NIST Special Publication 800-38B, AES-128 */
3343 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" 3343 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
3344 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", 3344 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
@@ -3413,7 +3413,7 @@ static struct hash_testvec aes_cmac128_tv_template[] = {
3413 } 3413 }
3414}; 3414};
3415 3415
3416static struct hash_testvec aes_cbcmac_tv_template[] = { 3416static const struct hash_testvec aes_cbcmac_tv_template[] = {
3417 { 3417 {
3418 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" 3418 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
3419 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", 3419 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
@@ -3473,7 +3473,7 @@ static struct hash_testvec aes_cbcmac_tv_template[] = {
3473 } 3473 }
3474}; 3474};
3475 3475
3476static struct hash_testvec des3_ede_cmac64_tv_template[] = { 3476static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
3477/* 3477/*
3478 * From NIST Special Publication 800-38B, Three Key TDEA 3478 * From NIST Special Publication 800-38B, Three Key TDEA
3479 * Corrected test vectors from: 3479 * Corrected test vectors from:
@@ -3519,7 +3519,7 @@ static struct hash_testvec des3_ede_cmac64_tv_template[] = {
3519 } 3519 }
3520}; 3520};
3521 3521
3522static struct hash_testvec aes_xcbc128_tv_template[] = { 3522static const struct hash_testvec aes_xcbc128_tv_template[] = {
3523 { 3523 {
3524 .key = "\x00\x01\x02\x03\x04\x05\x06\x07" 3524 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
3525 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", 3525 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
@@ -3585,35 +3585,35 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
3585 } 3585 }
3586}; 3586};
3587 3587
3588static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01', 3588static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
3589 '\x02', '\x03', '\x02', '\x02', 3589 '\x02', '\x03', '\x02', '\x02',
3590 '\x02', '\x04', '\x01', '\x07', 3590 '\x02', '\x04', '\x01', '\x07',
3591 '\x04', '\x01', '\x04', '\x03',}; 3591 '\x04', '\x01', '\x04', '\x03',};
3592static char vmac_string2[128] = {'a', 'b', 'c',}; 3592static const char vmac_string2[128] = {'a', 'b', 'c',};
3593static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c', 3593static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
3594 'a', 'b', 'c', 'a', 'b', 'c', 3594 'a', 'b', 'c', 'a', 'b', 'c',
3595 'a', 'b', 'c', 'a', 'b', 'c', 3595 'a', 'b', 'c', 'a', 'b', 'c',
3596 'a', 'b', 'c', 'a', 'b', 'c', 3596 'a', 'b', 'c', 'a', 'b', 'c',
3597 'a', 'b', 'c', 'a', 'b', 'c', 3597 'a', 'b', 'c', 'a', 'b', 'c',
3598 'a', 'b', 'c', 'a', 'b', 'c', 3598 'a', 'b', 'c', 'a', 'b', 'c',
3599 'a', 'b', 'c', 'a', 'b', 'c', 3599 'a', 'b', 'c', 'a', 'b', 'c',
3600 'a', 'b', 'c', 'a', 'b', 'c', 3600 'a', 'b', 'c', 'a', 'b', 'c',
3601 }; 3601 };
3602 3602
3603static char vmac_string4[17] = {'b', 'c', 'e', 'f', 3603static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
3604 'i', 'j', 'l', 'm', 3604 'i', 'j', 'l', 'm',
3605 'o', 'p', 'r', 's', 3605 'o', 'p', 'r', 's',
3606 't', 'u', 'w', 'x', 'z'}; 3606 't', 'u', 'w', 'x', 'z'};
3607 3607
3608static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c', 3608static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
3609 'o', 'l', 'k', ']', '%', 3609 'o', 'l', 'k', ']', '%',
3610 '9', '2', '7', '!', 'A'}; 3610 '9', '2', '7', '!', 'A'};
3611 3611
3612static char vmac_string6[129] = {'p', 't', '*', '7', 'l', 3612static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
3613 'i', '!', '#', 'w', '0', 3613 'i', '!', '#', 'w', '0',
3614 'z', '/', '4', 'A', 'n'}; 3614 'z', '/', '4', 'A', 'n'};
3615 3615
3616static struct hash_testvec aes_vmac128_tv_template[] = { 3616static const struct hash_testvec aes_vmac128_tv_template[] = {
3617 { 3617 {
3618 .key = "\x00\x01\x02\x03\x04\x05\x06\x07" 3618 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
3619 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", 3619 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
@@ -3691,7 +3691,7 @@ static struct hash_testvec aes_vmac128_tv_template[] = {
3691 * SHA384 HMAC test vectors from RFC4231 3691 * SHA384 HMAC test vectors from RFC4231
3692 */ 3692 */
3693 3693
3694static struct hash_testvec hmac_sha384_tv_template[] = { 3694static const struct hash_testvec hmac_sha384_tv_template[] = {
3695 { 3695 {
3696 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 3696 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3697 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 3697 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
@@ -3789,7 +3789,7 @@ static struct hash_testvec hmac_sha384_tv_template[] = {
3789 * SHA512 HMAC test vectors from RFC4231 3789 * SHA512 HMAC test vectors from RFC4231
3790 */ 3790 */
3791 3791
3792static struct hash_testvec hmac_sha512_tv_template[] = { 3792static const struct hash_testvec hmac_sha512_tv_template[] = {
3793 { 3793 {
3794 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 3794 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3795 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 3795 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
@@ -3894,7 +3894,7 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
3894 }, 3894 },
3895}; 3895};
3896 3896
3897static struct hash_testvec hmac_sha3_224_tv_template[] = { 3897static const struct hash_testvec hmac_sha3_224_tv_template[] = {
3898 { 3898 {
3899 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 3899 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3900 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 3900 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
@@ -3983,7 +3983,7 @@ static struct hash_testvec hmac_sha3_224_tv_template[] = {
3983 }, 3983 },
3984}; 3984};
3985 3985
3986static struct hash_testvec hmac_sha3_256_tv_template[] = { 3986static const struct hash_testvec hmac_sha3_256_tv_template[] = {
3987 { 3987 {
3988 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 3988 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3989 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 3989 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
@@ -4072,7 +4072,7 @@ static struct hash_testvec hmac_sha3_256_tv_template[] = {
4072 }, 4072 },
4073}; 4073};
4074 4074
4075static struct hash_testvec hmac_sha3_384_tv_template[] = { 4075static const struct hash_testvec hmac_sha3_384_tv_template[] = {
4076 { 4076 {
4077 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 4077 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4078 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 4078 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
@@ -4169,7 +4169,7 @@ static struct hash_testvec hmac_sha3_384_tv_template[] = {
4169 }, 4169 },
4170}; 4170};
4171 4171
4172static struct hash_testvec hmac_sha3_512_tv_template[] = { 4172static const struct hash_testvec hmac_sha3_512_tv_template[] = {
4173 { 4173 {
4174 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 4174 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4175 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b" 4175 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
@@ -4278,7 +4278,7 @@ static struct hash_testvec hmac_sha3_512_tv_template[] = {
4278 * Poly1305 test vectors from RFC7539 A.3. 4278 * Poly1305 test vectors from RFC7539 A.3.
4279 */ 4279 */
4280 4280
4281static struct hash_testvec poly1305_tv_template[] = { 4281static const struct hash_testvec poly1305_tv_template[] = {
4282 { /* Test Vector #1 */ 4282 { /* Test Vector #1 */
4283 .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" 4283 .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
4284 "\x00\x00\x00\x00\x00\x00\x00\x00" 4284 "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -4523,7 +4523,7 @@ static struct hash_testvec poly1305_tv_template[] = {
4523/* 4523/*
4524 * DES test vectors. 4524 * DES test vectors.
4525 */ 4525 */
4526static struct cipher_testvec des_enc_tv_template[] = { 4526static const struct cipher_testvec des_enc_tv_template[] = {
4527 { /* From Applied Cryptography */ 4527 { /* From Applied Cryptography */
4528 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", 4528 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4529 .klen = 8, 4529 .klen = 8,
@@ -4697,7 +4697,7 @@ static struct cipher_testvec des_enc_tv_template[] = {
4697 }, 4697 },
4698}; 4698};
4699 4699
4700static struct cipher_testvec des_dec_tv_template[] = { 4700static const struct cipher_testvec des_dec_tv_template[] = {
4701 { /* From Applied Cryptography */ 4701 { /* From Applied Cryptography */
4702 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", 4702 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4703 .klen = 8, 4703 .klen = 8,
@@ -4807,7 +4807,7 @@ static struct cipher_testvec des_dec_tv_template[] = {
4807 }, 4807 },
4808}; 4808};
4809 4809
4810static struct cipher_testvec des_cbc_enc_tv_template[] = { 4810static const struct cipher_testvec des_cbc_enc_tv_template[] = {
4811 { /* From OpenSSL */ 4811 { /* From OpenSSL */
4812 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", 4812 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4813 .klen = 8, 4813 .klen = 8,
@@ -4933,7 +4933,7 @@ static struct cipher_testvec des_cbc_enc_tv_template[] = {
4933 }, 4933 },
4934}; 4934};
4935 4935
4936static struct cipher_testvec des_cbc_dec_tv_template[] = { 4936static const struct cipher_testvec des_cbc_dec_tv_template[] = {
4937 { /* FIPS Pub 81 */ 4937 { /* FIPS Pub 81 */
4938 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", 4938 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4939 .klen = 8, 4939 .klen = 8,
@@ -5042,7 +5042,7 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
5042 }, 5042 },
5043}; 5043};
5044 5044
5045static struct cipher_testvec des_ctr_enc_tv_template[] = { 5045static const struct cipher_testvec des_ctr_enc_tv_template[] = {
5046 { /* Generated with Crypto++ */ 5046 { /* Generated with Crypto++ */
5047 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", 5047 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
5048 .klen = 8, 5048 .klen = 8,
@@ -5188,7 +5188,7 @@ static struct cipher_testvec des_ctr_enc_tv_template[] = {
5188 }, 5188 },
5189}; 5189};
5190 5190
5191static struct cipher_testvec des_ctr_dec_tv_template[] = { 5191static const struct cipher_testvec des_ctr_dec_tv_template[] = {
5192 { /* Generated with Crypto++ */ 5192 { /* Generated with Crypto++ */
5193 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", 5193 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
5194 .klen = 8, 5194 .klen = 8,
@@ -5334,7 +5334,7 @@ static struct cipher_testvec des_ctr_dec_tv_template[] = {
5334 }, 5334 },
5335}; 5335};
5336 5336
5337static struct cipher_testvec des3_ede_enc_tv_template[] = { 5337static const struct cipher_testvec des3_ede_enc_tv_template[] = {
5338 { /* These are from openssl */ 5338 { /* These are from openssl */
5339 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" 5339 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
5340 "\x55\x55\x55\x55\x55\x55\x55\x55" 5340 "\x55\x55\x55\x55\x55\x55\x55\x55"
@@ -5499,7 +5499,7 @@ static struct cipher_testvec des3_ede_enc_tv_template[] = {
5499 }, 5499 },
5500}; 5500};
5501 5501
5502static struct cipher_testvec des3_ede_dec_tv_template[] = { 5502static const struct cipher_testvec des3_ede_dec_tv_template[] = {
5503 { /* These are from openssl */ 5503 { /* These are from openssl */
5504 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" 5504 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
5505 "\x55\x55\x55\x55\x55\x55\x55\x55" 5505 "\x55\x55\x55\x55\x55\x55\x55\x55"
@@ -5664,7 +5664,7 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
5664 }, 5664 },
5665}; 5665};
5666 5666
5667static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = { 5667static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
5668 { /* Generated from openssl */ 5668 { /* Generated from openssl */
5669 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" 5669 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
5670 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" 5670 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
@@ -5844,7 +5844,7 @@ static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
5844 }, 5844 },
5845}; 5845};
5846 5846
5847static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = { 5847static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
5848 { /* Generated from openssl */ 5848 { /* Generated from openssl */
5849 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" 5849 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
5850 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" 5850 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
@@ -6024,7 +6024,7 @@ static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
6024 }, 6024 },
6025}; 6025};
6026 6026
6027static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = { 6027static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
6028 { /* Generated with Crypto++ */ 6028 { /* Generated with Crypto++ */
6029 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00" 6029 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
6030 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" 6030 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
@@ -6302,7 +6302,7 @@ static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
6302 }, 6302 },
6303}; 6303};
6304 6304
6305static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = { 6305static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
6306 { /* Generated with Crypto++ */ 6306 { /* Generated with Crypto++ */
6307 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00" 6307 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
6308 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" 6308 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
@@ -6583,7 +6583,7 @@ static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
6583/* 6583/*
6584 * Blowfish test vectors. 6584 * Blowfish test vectors.
6585 */ 6585 */
6586static struct cipher_testvec bf_enc_tv_template[] = { 6586static const struct cipher_testvec bf_enc_tv_template[] = {
6587 { /* DES test vectors from OpenSSL */ 6587 { /* DES test vectors from OpenSSL */
6588 .key = "\x00\x00\x00\x00\x00\x00\x00\x00", 6588 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6589 .klen = 8, 6589 .klen = 8,
@@ -6775,7 +6775,7 @@ static struct cipher_testvec bf_enc_tv_template[] = {
6775 }, 6775 },
6776}; 6776};
6777 6777
6778static struct cipher_testvec bf_dec_tv_template[] = { 6778static const struct cipher_testvec bf_dec_tv_template[] = {
6779 { /* DES test vectors from OpenSSL */ 6779 { /* DES test vectors from OpenSSL */
6780 .key = "\x00\x00\x00\x00\x00\x00\x00\x00", 6780 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6781 .klen = 8, 6781 .klen = 8,
@@ -6967,7 +6967,7 @@ static struct cipher_testvec bf_dec_tv_template[] = {
6967 }, 6967 },
6968}; 6968};
6969 6969
6970static struct cipher_testvec bf_cbc_enc_tv_template[] = { 6970static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
6971 { /* From OpenSSL */ 6971 { /* From OpenSSL */
6972 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" 6972 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6973 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87", 6973 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
@@ -7124,7 +7124,7 @@ static struct cipher_testvec bf_cbc_enc_tv_template[] = {
7124 }, 7124 },
7125}; 7125};
7126 7126
7127static struct cipher_testvec bf_cbc_dec_tv_template[] = { 7127static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
7128 { /* From OpenSSL */ 7128 { /* From OpenSSL */
7129 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" 7129 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
7130 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87", 7130 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
@@ -7281,7 +7281,7 @@ static struct cipher_testvec bf_cbc_dec_tv_template[] = {
7281 }, 7281 },
7282}; 7282};
7283 7283
7284static struct cipher_testvec bf_ctr_enc_tv_template[] = { 7284static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
7285 { /* Generated with Crypto++ */ 7285 { /* Generated with Crypto++ */
7286 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 7286 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
7287 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 7287 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -7693,7 +7693,7 @@ static struct cipher_testvec bf_ctr_enc_tv_template[] = {
7693 }, 7693 },
7694}; 7694};
7695 7695
7696static struct cipher_testvec bf_ctr_dec_tv_template[] = { 7696static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
7697 { /* Generated with Crypto++ */ 7697 { /* Generated with Crypto++ */
7698 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 7698 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
7699 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 7699 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -8108,7 +8108,7 @@ static struct cipher_testvec bf_ctr_dec_tv_template[] = {
8108/* 8108/*
8109 * Twofish test vectors. 8109 * Twofish test vectors.
8110 */ 8110 */
8111static struct cipher_testvec tf_enc_tv_template[] = { 8111static const struct cipher_testvec tf_enc_tv_template[] = {
8112 { 8112 {
8113 .key = zeroed_string, 8113 .key = zeroed_string,
8114 .klen = 16, 8114 .klen = 16,
@@ -8276,7 +8276,7 @@ static struct cipher_testvec tf_enc_tv_template[] = {
8276 }, 8276 },
8277}; 8277};
8278 8278
8279static struct cipher_testvec tf_dec_tv_template[] = { 8279static const struct cipher_testvec tf_dec_tv_template[] = {
8280 { 8280 {
8281 .key = zeroed_string, 8281 .key = zeroed_string,
8282 .klen = 16, 8282 .klen = 16,
@@ -8444,7 +8444,7 @@ static struct cipher_testvec tf_dec_tv_template[] = {
8444 }, 8444 },
8445}; 8445};
8446 8446
8447static struct cipher_testvec tf_cbc_enc_tv_template[] = { 8447static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
8448 { /* Generated with Nettle */ 8448 { /* Generated with Nettle */
8449 .key = zeroed_string, 8449 .key = zeroed_string,
8450 .klen = 16, 8450 .klen = 16,
@@ -8627,7 +8627,7 @@ static struct cipher_testvec tf_cbc_enc_tv_template[] = {
8627 }, 8627 },
8628}; 8628};
8629 8629
8630static struct cipher_testvec tf_cbc_dec_tv_template[] = { 8630static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
8631 { /* Reverse of the first four above */ 8631 { /* Reverse of the first four above */
8632 .key = zeroed_string, 8632 .key = zeroed_string,
8633 .klen = 16, 8633 .klen = 16,
@@ -8810,7 +8810,7 @@ static struct cipher_testvec tf_cbc_dec_tv_template[] = {
8810 }, 8810 },
8811}; 8811};
8812 8812
8813static struct cipher_testvec tf_ctr_enc_tv_template[] = { 8813static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
8814 { /* Generated with Crypto++ */ 8814 { /* Generated with Crypto++ */
8815 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 8815 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
8816 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 8816 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -9221,7 +9221,7 @@ static struct cipher_testvec tf_ctr_enc_tv_template[] = {
9221 }, 9221 },
9222}; 9222};
9223 9223
9224static struct cipher_testvec tf_ctr_dec_tv_template[] = { 9224static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
9225 { /* Generated with Crypto++ */ 9225 { /* Generated with Crypto++ */
9226 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 9226 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
9227 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 9227 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -9632,7 +9632,7 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = {
9632 }, 9632 },
9633}; 9633};
9634 9634
9635static struct cipher_testvec tf_lrw_enc_tv_template[] = { 9635static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
9636 /* Generated from AES-LRW test vectors */ 9636 /* Generated from AES-LRW test vectors */
9637 { 9637 {
9638 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d" 9638 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
@@ -9884,7 +9884,7 @@ static struct cipher_testvec tf_lrw_enc_tv_template[] = {
9884 }, 9884 },
9885}; 9885};
9886 9886
9887static struct cipher_testvec tf_lrw_dec_tv_template[] = { 9887static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
9888 /* Generated from AES-LRW test vectors */ 9888 /* Generated from AES-LRW test vectors */
9889 /* same as enc vectors with input and result reversed */ 9889 /* same as enc vectors with input and result reversed */
9890 { 9890 {
@@ -10137,7 +10137,7 @@ static struct cipher_testvec tf_lrw_dec_tv_template[] = {
10137 }, 10137 },
10138}; 10138};
10139 10139
10140static struct cipher_testvec tf_xts_enc_tv_template[] = { 10140static const struct cipher_testvec tf_xts_enc_tv_template[] = {
10141 /* Generated from AES-XTS test vectors */ 10141 /* Generated from AES-XTS test vectors */
10142{ 10142{
10143 .key = "\x00\x00\x00\x00\x00\x00\x00\x00" 10143 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -10479,7 +10479,7 @@ static struct cipher_testvec tf_xts_enc_tv_template[] = {
10479 }, 10479 },
10480}; 10480};
10481 10481
10482static struct cipher_testvec tf_xts_dec_tv_template[] = { 10482static const struct cipher_testvec tf_xts_dec_tv_template[] = {
10483 /* Generated from AES-XTS test vectors */ 10483 /* Generated from AES-XTS test vectors */
10484 /* same as enc vectors with input and result reversed */ 10484 /* same as enc vectors with input and result reversed */
10485 { 10485 {
@@ -10826,7 +10826,7 @@ static struct cipher_testvec tf_xts_dec_tv_template[] = {
10826 * Serpent test vectors. These are backwards because Serpent writes 10826 * Serpent test vectors. These are backwards because Serpent writes
10827 * octet sequences in right-to-left mode. 10827 * octet sequences in right-to-left mode.
10828 */ 10828 */
10829static struct cipher_testvec serpent_enc_tv_template[] = { 10829static const struct cipher_testvec serpent_enc_tv_template[] = {
10830 { 10830 {
10831 .input = "\x00\x01\x02\x03\x04\x05\x06\x07" 10831 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
10832 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", 10832 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
@@ -11002,7 +11002,7 @@ static struct cipher_testvec serpent_enc_tv_template[] = {
11002 }, 11002 },
11003}; 11003};
11004 11004
11005static struct cipher_testvec tnepres_enc_tv_template[] = { 11005static const struct cipher_testvec tnepres_enc_tv_template[] = {
11006 { /* KeySize=128, PT=0, I=1 */ 11006 { /* KeySize=128, PT=0, I=1 */
11007 .input = "\x00\x00\x00\x00\x00\x00\x00\x00" 11007 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
11008 "\x00\x00\x00\x00\x00\x00\x00\x00", 11008 "\x00\x00\x00\x00\x00\x00\x00\x00",
@@ -11052,7 +11052,7 @@ static struct cipher_testvec tnepres_enc_tv_template[] = {
11052}; 11052};
11053 11053
11054 11054
11055static struct cipher_testvec serpent_dec_tv_template[] = { 11055static const struct cipher_testvec serpent_dec_tv_template[] = {
11056 { 11056 {
11057 .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47" 11057 .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
11058 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2", 11058 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
@@ -11228,7 +11228,7 @@ static struct cipher_testvec serpent_dec_tv_template[] = {
11228 }, 11228 },
11229}; 11229};
11230 11230
11231static struct cipher_testvec tnepres_dec_tv_template[] = { 11231static const struct cipher_testvec tnepres_dec_tv_template[] = {
11232 { 11232 {
11233 .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97" 11233 .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
11234 "\x6d\x6f\xbb\x38\x4b\x37\x21\x28", 11234 "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
@@ -11269,7 +11269,7 @@ static struct cipher_testvec tnepres_dec_tv_template[] = {
11269 }, 11269 },
11270}; 11270};
11271 11271
11272static struct cipher_testvec serpent_cbc_enc_tv_template[] = { 11272static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
11273 { /* Generated with Crypto++ */ 11273 { /* Generated with Crypto++ */
11274 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 11274 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
11275 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 11275 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -11410,7 +11410,7 @@ static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
11410 }, 11410 },
11411}; 11411};
11412 11412
11413static struct cipher_testvec serpent_cbc_dec_tv_template[] = { 11413static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
11414 { /* Generated with Crypto++ */ 11414 { /* Generated with Crypto++ */
11415 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 11415 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
11416 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 11416 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -11551,7 +11551,7 @@ static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
11551 }, 11551 },
11552}; 11552};
11553 11553
11554static struct cipher_testvec serpent_ctr_enc_tv_template[] = { 11554static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
11555 { /* Generated with Crypto++ */ 11555 { /* Generated with Crypto++ */
11556 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 11556 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
11557 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 11557 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -11962,7 +11962,7 @@ static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
11962 }, 11962 },
11963}; 11963};
11964 11964
11965static struct cipher_testvec serpent_ctr_dec_tv_template[] = { 11965static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
11966 { /* Generated with Crypto++ */ 11966 { /* Generated with Crypto++ */
11967 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 11967 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
11968 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 11968 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -12373,7 +12373,7 @@ static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
12373 }, 12373 },
12374}; 12374};
12375 12375
12376static struct cipher_testvec serpent_lrw_enc_tv_template[] = { 12376static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
12377 /* Generated from AES-LRW test vectors */ 12377 /* Generated from AES-LRW test vectors */
12378 { 12378 {
12379 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d" 12379 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
@@ -12625,7 +12625,7 @@ static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
12625 }, 12625 },
12626}; 12626};
12627 12627
12628static struct cipher_testvec serpent_lrw_dec_tv_template[] = { 12628static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
12629 /* Generated from AES-LRW test vectors */ 12629 /* Generated from AES-LRW test vectors */
12630 /* same as enc vectors with input and result reversed */ 12630 /* same as enc vectors with input and result reversed */
12631 { 12631 {
@@ -12878,7 +12878,7 @@ static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
12878 }, 12878 },
12879}; 12879};
12880 12880
12881static struct cipher_testvec serpent_xts_enc_tv_template[] = { 12881static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
12882 /* Generated from AES-XTS test vectors */ 12882 /* Generated from AES-XTS test vectors */
12883 { 12883 {
12884 .key = "\x00\x00\x00\x00\x00\x00\x00\x00" 12884 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -13220,7 +13220,7 @@ static struct cipher_testvec serpent_xts_enc_tv_template[] = {
13220 }, 13220 },
13221}; 13221};
13222 13222
13223static struct cipher_testvec serpent_xts_dec_tv_template[] = { 13223static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
13224 /* Generated from AES-XTS test vectors */ 13224 /* Generated from AES-XTS test vectors */
13225 /* same as enc vectors with input and result reversed */ 13225 /* same as enc vectors with input and result reversed */
13226 { 13226 {
@@ -13564,7 +13564,7 @@ static struct cipher_testvec serpent_xts_dec_tv_template[] = {
13564}; 13564};
13565 13565
13566/* Cast6 test vectors from RFC 2612 */ 13566/* Cast6 test vectors from RFC 2612 */
13567static struct cipher_testvec cast6_enc_tv_template[] = { 13567static const struct cipher_testvec cast6_enc_tv_template[] = {
13568 { 13568 {
13569 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" 13569 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
13570 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d", 13570 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
@@ -13735,7 +13735,7 @@ static struct cipher_testvec cast6_enc_tv_template[] = {
13735 }, 13735 },
13736}; 13736};
13737 13737
13738static struct cipher_testvec cast6_dec_tv_template[] = { 13738static const struct cipher_testvec cast6_dec_tv_template[] = {
13739 { 13739 {
13740 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c" 13740 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
13741 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d", 13741 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
@@ -13906,7 +13906,7 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
13906 }, 13906 },
13907}; 13907};
13908 13908
13909static struct cipher_testvec cast6_cbc_enc_tv_template[] = { 13909static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
13910 { /* Generated from TF test vectors */ 13910 { /* Generated from TF test vectors */
13911 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 13911 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
13912 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 13912 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -14047,7 +14047,7 @@ static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
14047 }, 14047 },
14048}; 14048};
14049 14049
14050static struct cipher_testvec cast6_cbc_dec_tv_template[] = { 14050static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
14051 { /* Generated from TF test vectors */ 14051 { /* Generated from TF test vectors */
14052 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 14052 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
14053 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 14053 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -14188,7 +14188,7 @@ static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
14188 }, 14188 },
14189}; 14189};
14190 14190
14191static struct cipher_testvec cast6_ctr_enc_tv_template[] = { 14191static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
14192 { /* Generated from TF test vectors */ 14192 { /* Generated from TF test vectors */
14193 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 14193 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
14194 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 14194 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -14345,7 +14345,7 @@ static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
14345 }, 14345 },
14346}; 14346};
14347 14347
14348static struct cipher_testvec cast6_ctr_dec_tv_template[] = { 14348static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
14349 { /* Generated from TF test vectors */ 14349 { /* Generated from TF test vectors */
14350 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 14350 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
14351 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 14351 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -14502,7 +14502,7 @@ static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
14502 }, 14502 },
14503}; 14503};
14504 14504
14505static struct cipher_testvec cast6_lrw_enc_tv_template[] = { 14505static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
14506 { /* Generated from TF test vectors */ 14506 { /* Generated from TF test vectors */
14507 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c" 14507 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
14508 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d" 14508 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
@@ -14649,7 +14649,7 @@ static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
14649 }, 14649 },
14650}; 14650};
14651 14651
14652static struct cipher_testvec cast6_lrw_dec_tv_template[] = { 14652static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
14653 { /* Generated from TF test vectors */ 14653 { /* Generated from TF test vectors */
14654 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c" 14654 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
14655 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d" 14655 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
@@ -14796,7 +14796,7 @@ static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
14796 }, 14796 },
14797}; 14797};
14798 14798
14799static struct cipher_testvec cast6_xts_enc_tv_template[] = { 14799static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
14800 { /* Generated from TF test vectors */ 14800 { /* Generated from TF test vectors */
14801 .key = "\x27\x18\x28\x18\x28\x45\x90\x45" 14801 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
14802 "\x23\x53\x60\x28\x74\x71\x35\x26" 14802 "\x23\x53\x60\x28\x74\x71\x35\x26"
@@ -14945,7 +14945,7 @@ static struct cipher_testvec cast6_xts_enc_tv_template[] = {
14945 }, 14945 },
14946}; 14946};
14947 14947
14948static struct cipher_testvec cast6_xts_dec_tv_template[] = { 14948static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
14949 { /* Generated from TF test vectors */ 14949 { /* Generated from TF test vectors */
14950 .key = "\x27\x18\x28\x18\x28\x45\x90\x45" 14950 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
14951 "\x23\x53\x60\x28\x74\x71\x35\x26" 14951 "\x23\x53\x60\x28\x74\x71\x35\x26"
@@ -15098,7 +15098,7 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {
15098/* 15098/*
15099 * AES test vectors. 15099 * AES test vectors.
15100 */ 15100 */
15101static struct cipher_testvec aes_enc_tv_template[] = { 15101static const struct cipher_testvec aes_enc_tv_template[] = {
15102 { /* From FIPS-197 */ 15102 { /* From FIPS-197 */
15103 .key = "\x00\x01\x02\x03\x04\x05\x06\x07" 15103 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
15104 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", 15104 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
@@ -15270,7 +15270,7 @@ static struct cipher_testvec aes_enc_tv_template[] = {
15270 }, 15270 },
15271}; 15271};
15272 15272
15273static struct cipher_testvec aes_dec_tv_template[] = { 15273static const struct cipher_testvec aes_dec_tv_template[] = {
15274 { /* From FIPS-197 */ 15274 { /* From FIPS-197 */
15275 .key = "\x00\x01\x02\x03\x04\x05\x06\x07" 15275 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
15276 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", 15276 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
@@ -15442,7 +15442,7 @@ static struct cipher_testvec aes_dec_tv_template[] = {
15442 }, 15442 },
15443}; 15443};
15444 15444
15445static struct cipher_testvec aes_cbc_enc_tv_template[] = { 15445static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
15446 { /* From RFC 3602 */ 15446 { /* From RFC 3602 */
15447 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" 15447 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
15448 "\x51\x2e\x03\xd5\x34\x12\x00\x06", 15448 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
@@ -15664,7 +15664,7 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
15664 }, 15664 },
15665}; 15665};
15666 15666
15667static struct cipher_testvec aes_cbc_dec_tv_template[] = { 15667static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
15668 { /* From RFC 3602 */ 15668 { /* From RFC 3602 */
15669 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" 15669 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
15670 "\x51\x2e\x03\xd5\x34\x12\x00\x06", 15670 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
@@ -15886,7 +15886,7 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
15886 }, 15886 },
15887}; 15887};
15888 15888
15889static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { 15889static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
15890 { /* Input data from RFC 2410 Case 1 */ 15890 { /* Input data from RFC 2410 Case 1 */
15891#ifdef __LITTLE_ENDIAN 15891#ifdef __LITTLE_ENDIAN
15892 .key = "\x08\x00" /* rta length */ 15892 .key = "\x08\x00" /* rta length */
@@ -15928,7 +15928,7 @@ static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
15928 }, 15928 },
15929}; 15929};
15930 15930
15931static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = { 15931static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
15932 { 15932 {
15933#ifdef __LITTLE_ENDIAN 15933#ifdef __LITTLE_ENDIAN
15934 .key = "\x08\x00" /* rta length */ 15934 .key = "\x08\x00" /* rta length */
@@ -15970,7 +15970,7 @@ static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
15970 }, 15970 },
15971}; 15971};
15972 15972
15973static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { 15973static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
15974 { /* RFC 3602 Case 1 */ 15974 { /* RFC 3602 Case 1 */
15975#ifdef __LITTLE_ENDIAN 15975#ifdef __LITTLE_ENDIAN
15976 .key = "\x08\x00" /* rta length */ 15976 .key = "\x08\x00" /* rta length */
@@ -16239,7 +16239,7 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
16239 }, 16239 },
16240}; 16240};
16241 16241
16242static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = { 16242static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
16243 { /* Input data from RFC 2410 Case 1 */ 16243 { /* Input data from RFC 2410 Case 1 */
16244#ifdef __LITTLE_ENDIAN 16244#ifdef __LITTLE_ENDIAN
16245 .key = "\x08\x00" /* rta length */ 16245 .key = "\x08\x00" /* rta length */
@@ -16285,7 +16285,7 @@ static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
16285 }, 16285 },
16286}; 16286};
16287 16287
16288static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = { 16288static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
16289 { 16289 {
16290#ifdef __LITTLE_ENDIAN 16290#ifdef __LITTLE_ENDIAN
16291 .key = "\x08\x00" /* rta length */ 16291 .key = "\x08\x00" /* rta length */
@@ -16331,7 +16331,7 @@ static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
16331 }, 16331 },
16332}; 16332};
16333 16333
16334static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { 16334static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
16335 { /* RFC 3602 Case 1 */ 16335 { /* RFC 3602 Case 1 */
16336#ifdef __LITTLE_ENDIAN 16336#ifdef __LITTLE_ENDIAN
16337 .key = "\x08\x00" /* rta length */ 16337 .key = "\x08\x00" /* rta length */
@@ -16614,7 +16614,7 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
16614 }, 16614 },
16615}; 16615};
16616 16616
16617static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { 16617static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
16618 { /* RFC 3602 Case 1 */ 16618 { /* RFC 3602 Case 1 */
16619#ifdef __LITTLE_ENDIAN 16619#ifdef __LITTLE_ENDIAN
16620 .key = "\x08\x00" /* rta length */ 16620 .key = "\x08\x00" /* rta length */
@@ -16953,7 +16953,7 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
16953 }, 16953 },
16954}; 16954};
16955 16955
16956static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { 16956static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
16957 { /*Generated with cryptopp*/ 16957 { /*Generated with cryptopp*/
16958#ifdef __LITTLE_ENDIAN 16958#ifdef __LITTLE_ENDIAN
16959 .key = "\x08\x00" /* rta length */ 16959 .key = "\x08\x00" /* rta length */
@@ -17012,7 +17012,7 @@ static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
17012 }, 17012 },
17013}; 17013};
17014 17014
17015static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { 17015static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
17016 { /*Generated with cryptopp*/ 17016 { /*Generated with cryptopp*/
17017#ifdef __LITTLE_ENDIAN 17017#ifdef __LITTLE_ENDIAN
17018 .key = "\x08\x00" /* rta length */ 17018 .key = "\x08\x00" /* rta length */
@@ -17071,7 +17071,7 @@ static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
17071 }, 17071 },
17072}; 17072};
17073 17073
17074static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { 17074static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
17075 { /*Generated with cryptopp*/ 17075 { /*Generated with cryptopp*/
17076#ifdef __LITTLE_ENDIAN 17076#ifdef __LITTLE_ENDIAN
17077 .key = "\x08\x00" /* rta length */ 17077 .key = "\x08\x00" /* rta length */
@@ -17132,7 +17132,7 @@ static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
17132 }, 17132 },
17133}; 17133};
17134 17134
17135static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { 17135static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
17136 { /*Generated with cryptopp*/ 17136 { /*Generated with cryptopp*/
17137#ifdef __LITTLE_ENDIAN 17137#ifdef __LITTLE_ENDIAN
17138 .key = "\x08\x00" /* rta length */ 17138 .key = "\x08\x00" /* rta length */
@@ -17197,7 +17197,7 @@ static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
17197 }, 17197 },
17198}; 17198};
17199 17199
17200static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { 17200static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
17201 { /*Generated with cryptopp*/ 17201 { /*Generated with cryptopp*/
17202#ifdef __LITTLE_ENDIAN 17202#ifdef __LITTLE_ENDIAN
17203 .key = "\x08\x00" /* rta length */ 17203 .key = "\x08\x00" /* rta length */
@@ -17266,7 +17266,7 @@ static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
17266 }, 17266 },
17267}; 17267};
17268 17268
17269static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { 17269static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
17270 { /*Generated with cryptopp*/ 17270 { /*Generated with cryptopp*/
17271#ifdef __LITTLE_ENDIAN 17271#ifdef __LITTLE_ENDIAN
17272 .key = "\x08\x00" /* rta length */ 17272 .key = "\x08\x00" /* rta length */
@@ -17327,7 +17327,7 @@ static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
17327 }, 17327 },
17328}; 17328};
17329 17329
17330static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { 17330static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
17331 { /*Generated with cryptopp*/ 17331 { /*Generated with cryptopp*/
17332#ifdef __LITTLE_ENDIAN 17332#ifdef __LITTLE_ENDIAN
17333 .key = "\x08\x00" /* rta length */ 17333 .key = "\x08\x00" /* rta length */
@@ -17388,7 +17388,7 @@ static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
17388 }, 17388 },
17389}; 17389};
17390 17390
17391static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { 17391static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
17392 { /*Generated with cryptopp*/ 17392 { /*Generated with cryptopp*/
17393#ifdef __LITTLE_ENDIAN 17393#ifdef __LITTLE_ENDIAN
17394 .key = "\x08\x00" /* rta length */ 17394 .key = "\x08\x00" /* rta length */
@@ -17451,7 +17451,7 @@ static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
17451 }, 17451 },
17452}; 17452};
17453 17453
17454static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { 17454static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
17455 { /*Generated with cryptopp*/ 17455 { /*Generated with cryptopp*/
17456#ifdef __LITTLE_ENDIAN 17456#ifdef __LITTLE_ENDIAN
17457 .key = "\x08\x00" /* rta length */ 17457 .key = "\x08\x00" /* rta length */
@@ -17518,7 +17518,7 @@ static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
17518 }, 17518 },
17519}; 17519};
17520 17520
17521static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { 17521static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
17522 { /*Generated with cryptopp*/ 17522 { /*Generated with cryptopp*/
17523#ifdef __LITTLE_ENDIAN 17523#ifdef __LITTLE_ENDIAN
17524 .key = "\x08\x00" /* rta length */ 17524 .key = "\x08\x00" /* rta length */
@@ -17589,7 +17589,7 @@ static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
17589 }, 17589 },
17590}; 17590};
17591 17591
17592static struct cipher_testvec aes_lrw_enc_tv_template[] = { 17592static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
17593 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */ 17593 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
17594 { /* LRW-32-AES 1 */ 17594 { /* LRW-32-AES 1 */
17595 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d" 17595 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
@@ -17842,7 +17842,7 @@ static struct cipher_testvec aes_lrw_enc_tv_template[] = {
17842 } 17842 }
17843}; 17843};
17844 17844
17845static struct cipher_testvec aes_lrw_dec_tv_template[] = { 17845static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
17846 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */ 17846 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
17847 /* same as enc vectors with input and result reversed */ 17847 /* same as enc vectors with input and result reversed */
17848 { /* LRW-32-AES 1 */ 17848 { /* LRW-32-AES 1 */
@@ -18096,7 +18096,7 @@ static struct cipher_testvec aes_lrw_dec_tv_template[] = {
18096 } 18096 }
18097}; 18097};
18098 18098
18099static struct cipher_testvec aes_xts_enc_tv_template[] = { 18099static const struct cipher_testvec aes_xts_enc_tv_template[] = {
18100 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */ 18100 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
18101 { /* XTS-AES 1 */ 18101 { /* XTS-AES 1 */
18102 .key = "\x00\x00\x00\x00\x00\x00\x00\x00" 18102 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -18439,7 +18439,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
18439 } 18439 }
18440}; 18440};
18441 18441
18442static struct cipher_testvec aes_xts_dec_tv_template[] = { 18442static const struct cipher_testvec aes_xts_dec_tv_template[] = {
18443 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */ 18443 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
18444 { /* XTS-AES 1 */ 18444 { /* XTS-AES 1 */
18445 .key = "\x00\x00\x00\x00\x00\x00\x00\x00" 18445 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -18783,7 +18783,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
18783}; 18783};
18784 18784
18785 18785
18786static struct cipher_testvec aes_ctr_enc_tv_template[] = { 18786static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
18787 { /* From NIST Special Publication 800-38A, Appendix F.5 */ 18787 { /* From NIST Special Publication 800-38A, Appendix F.5 */
18788 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" 18788 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
18789 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", 18789 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
@@ -19138,7 +19138,7 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = {
19138 }, 19138 },
19139}; 19139};
19140 19140
19141static struct cipher_testvec aes_ctr_dec_tv_template[] = { 19141static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
19142 { /* From NIST Special Publication 800-38A, Appendix F.5 */ 19142 { /* From NIST Special Publication 800-38A, Appendix F.5 */
19143 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" 19143 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
19144 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", 19144 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
@@ -19493,7 +19493,7 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = {
19493 }, 19493 },
19494}; 19494};
19495 19495
19496static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = { 19496static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
19497 { /* From RFC 3686 */ 19497 { /* From RFC 3686 */
19498 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc" 19498 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
19499 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" 19499 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
@@ -20625,7 +20625,7 @@ static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
20625 }, 20625 },
20626}; 20626};
20627 20627
20628static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = { 20628static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
20629 { /* From RFC 3686 */ 20629 { /* From RFC 3686 */
20630 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc" 20630 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
20631 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e" 20631 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
@@ -20716,7 +20716,7 @@ static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
20716 }, 20716 },
20717}; 20717};
20718 20718
20719static struct cipher_testvec aes_ofb_enc_tv_template[] = { 20719static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
20720 /* From NIST Special Publication 800-38A, Appendix F.5 */ 20720 /* From NIST Special Publication 800-38A, Appendix F.5 */
20721 { 20721 {
20722 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" 20722 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
@@ -20745,7 +20745,7 @@ static struct cipher_testvec aes_ofb_enc_tv_template[] = {
20745 } 20745 }
20746}; 20746};
20747 20747
20748static struct cipher_testvec aes_ofb_dec_tv_template[] = { 20748static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
20749 /* From NIST Special Publication 800-38A, Appendix F.5 */ 20749 /* From NIST Special Publication 800-38A, Appendix F.5 */
20750 { 20750 {
20751 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" 20751 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
@@ -20774,7 +20774,7 @@ static struct cipher_testvec aes_ofb_dec_tv_template[] = {
20774 } 20774 }
20775}; 20775};
20776 20776
20777static struct aead_testvec aes_gcm_enc_tv_template[] = { 20777static const struct aead_testvec aes_gcm_enc_tv_template[] = {
20778 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ 20778 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
20779 .key = zeroed_string, 20779 .key = zeroed_string,
20780 .klen = 16, 20780 .klen = 16,
@@ -20934,7 +20934,7 @@ static struct aead_testvec aes_gcm_enc_tv_template[] = {
20934 } 20934 }
20935}; 20935};
20936 20936
20937static struct aead_testvec aes_gcm_dec_tv_template[] = { 20937static const struct aead_testvec aes_gcm_dec_tv_template[] = {
20938 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ 20938 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
20939 .key = zeroed_string, 20939 .key = zeroed_string,
20940 .klen = 32, 20940 .klen = 32,
@@ -21136,7 +21136,7 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = {
21136 } 21136 }
21137}; 21137};
21138 21138
21139static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { 21139static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
21140 { /* Generated using Crypto++ */ 21140 { /* Generated using Crypto++ */
21141 .key = zeroed_string, 21141 .key = zeroed_string,
21142 .klen = 20, 21142 .klen = 20,
@@ -21749,7 +21749,7 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
21749 } 21749 }
21750}; 21750};
21751 21751
21752static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { 21752static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
21753 { /* Generated using Crypto++ */ 21753 { /* Generated using Crypto++ */
21754 .key = zeroed_string, 21754 .key = zeroed_string,
21755 .klen = 20, 21755 .klen = 20,
@@ -22363,7 +22363,7 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
22363 } 22363 }
22364}; 22364};
22365 22365
22366static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = { 22366static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
22367 { /* From draft-mcgrew-gcm-test-01 */ 22367 { /* From draft-mcgrew-gcm-test-01 */
22368 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" 22368 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
22369 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" 22369 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
@@ -22394,7 +22394,7 @@ static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
22394 } 22394 }
22395}; 22395};
22396 22396
22397static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = { 22397static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
22398 { /* From draft-mcgrew-gcm-test-01 */ 22398 { /* From draft-mcgrew-gcm-test-01 */
22399 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" 22399 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
22400 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" 22400 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
@@ -22453,7 +22453,7 @@ static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
22453 }, 22453 },
22454}; 22454};
22455 22455
22456static struct aead_testvec aes_ccm_enc_tv_template[] = { 22456static const struct aead_testvec aes_ccm_enc_tv_template[] = {
22457 { /* From RFC 3610 */ 22457 { /* From RFC 3610 */
22458 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" 22458 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
22459 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", 22459 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
@@ -22737,7 +22737,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = {
22737 } 22737 }
22738}; 22738};
22739 22739
22740static struct aead_testvec aes_ccm_dec_tv_template[] = { 22740static const struct aead_testvec aes_ccm_dec_tv_template[] = {
22741 { /* From RFC 3610 */ 22741 { /* From RFC 3610 */
22742 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" 22742 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
22743 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", 22743 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
@@ -23069,7 +23069,7 @@ static struct aead_testvec aes_ccm_dec_tv_template[] = {
23069 * These vectors are copied/generated from the ones for rfc4106 with 23069 * These vectors are copied/generated from the ones for rfc4106 with
23070 * the key truncated by one byte.. 23070 * the key truncated by one byte..
23071 */ 23071 */
23072static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = { 23072static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
23073 { /* Generated using Crypto++ */ 23073 { /* Generated using Crypto++ */
23074 .key = zeroed_string, 23074 .key = zeroed_string,
23075 .klen = 19, 23075 .klen = 19,
@@ -23682,7 +23682,7 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
23682 } 23682 }
23683}; 23683};
23684 23684
23685static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { 23685static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
23686 { /* Generated using Crypto++ */ 23686 { /* Generated using Crypto++ */
23687 .key = zeroed_string, 23687 .key = zeroed_string,
23688 .klen = 19, 23688 .klen = 19,
@@ -24298,7 +24298,7 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
24298/* 24298/*
24299 * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5. 24299 * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
24300 */ 24300 */
24301static struct aead_testvec rfc7539_enc_tv_template[] = { 24301static const struct aead_testvec rfc7539_enc_tv_template[] = {
24302 { 24302 {
24303 .key = "\x80\x81\x82\x83\x84\x85\x86\x87" 24303 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
24304 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" 24304 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
@@ -24430,7 +24430,7 @@ static struct aead_testvec rfc7539_enc_tv_template[] = {
24430 }, 24430 },
24431}; 24431};
24432 24432
24433static struct aead_testvec rfc7539_dec_tv_template[] = { 24433static const struct aead_testvec rfc7539_dec_tv_template[] = {
24434 { 24434 {
24435 .key = "\x80\x81\x82\x83\x84\x85\x86\x87" 24435 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
24436 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" 24436 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
@@ -24565,7 +24565,7 @@ static struct aead_testvec rfc7539_dec_tv_template[] = {
24565/* 24565/*
24566 * draft-irtf-cfrg-chacha20-poly1305 24566 * draft-irtf-cfrg-chacha20-poly1305
24567 */ 24567 */
24568static struct aead_testvec rfc7539esp_enc_tv_template[] = { 24568static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
24569 { 24569 {
24570 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" 24570 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
24571 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" 24571 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
@@ -24653,7 +24653,7 @@ static struct aead_testvec rfc7539esp_enc_tv_template[] = {
24653 }, 24653 },
24654}; 24654};
24655 24655
24656static struct aead_testvec rfc7539esp_dec_tv_template[] = { 24656static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
24657 { 24657 {
24658 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" 24658 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
24659 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" 24659 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
@@ -24749,7 +24749,7 @@ static struct aead_testvec rfc7539esp_dec_tv_template[] = {
24749 * semiblock of the ciphertext from the test vector. For decryption, iv is 24749 * semiblock of the ciphertext from the test vector. For decryption, iv is
24750 * the first semiblock of the ciphertext. 24750 * the first semiblock of the ciphertext.
24751 */ 24751 */
24752static struct cipher_testvec aes_kw_enc_tv_template[] = { 24752static const struct cipher_testvec aes_kw_enc_tv_template[] = {
24753 { 24753 {
24754 .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2" 24754 .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
24755 "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6", 24755 "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
@@ -24764,7 +24764,7 @@ static struct cipher_testvec aes_kw_enc_tv_template[] = {
24764 }, 24764 },
24765}; 24765};
24766 24766
24767static struct cipher_testvec aes_kw_dec_tv_template[] = { 24767static const struct cipher_testvec aes_kw_dec_tv_template[] = {
24768 { 24768 {
24769 .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b" 24769 .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
24770 "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71" 24770 "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
@@ -24787,7 +24787,7 @@ static struct cipher_testvec aes_kw_dec_tv_template[] = {
24787 * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf 24787 * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
24788 * Only AES-128 is supported at this time. 24788 * Only AES-128 is supported at this time.
24789 */ 24789 */
24790static struct cprng_testvec ansi_cprng_aes_tv_template[] = { 24790static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
24791 { 24791 {
24792 .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42" 24792 .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
24793 "\xed\x06\x1c\xab\xb8\xd4\x62\x02", 24793 "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
@@ -24883,7 +24883,7 @@ static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
24883 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and 24883 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
24884 * w/o personalization string, w/ and w/o additional input string). 24884 * w/o personalization string, w/ and w/o additional input string).
24885 */ 24885 */
24886static struct drbg_testvec drbg_pr_sha256_tv_template[] = { 24886static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
24887 { 24887 {
24888 .entropy = (unsigned char *) 24888 .entropy = (unsigned char *)
24889 "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86" 24889 "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
@@ -25041,7 +25041,7 @@ static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
25041 }, 25041 },
25042}; 25042};
25043 25043
25044static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = { 25044static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
25045 { 25045 {
25046 .entropy = (unsigned char *) 25046 .entropy = (unsigned char *)
25047 "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a" 25047 "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
@@ -25199,7 +25199,7 @@ static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
25199 }, 25199 },
25200}; 25200};
25201 25201
25202static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = { 25202static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
25203 { 25203 {
25204 .entropy = (unsigned char *) 25204 .entropy = (unsigned char *)
25205 "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42" 25205 "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
@@ -25323,7 +25323,7 @@ static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
25323 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and 25323 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
25324 * w/o personalization string, w/ and w/o additional input string). 25324 * w/o personalization string, w/ and w/o additional input string).
25325 */ 25325 */
25326static struct drbg_testvec drbg_nopr_sha256_tv_template[] = { 25326static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
25327 { 25327 {
25328 .entropy = (unsigned char *) 25328 .entropy = (unsigned char *)
25329 "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3" 25329 "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
@@ -25445,7 +25445,7 @@ static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
25445 }, 25445 },
25446}; 25446};
25447 25447
25448static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = { 25448static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
25449 { 25449 {
25450 .entropy = (unsigned char *) 25450 .entropy = (unsigned char *)
25451 "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c" 25451 "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
@@ -25567,7 +25567,7 @@ static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
25567 }, 25567 },
25568}; 25568};
25569 25569
25570static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = { 25570static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
25571 { 25571 {
25572 .entropy = (unsigned char *) 25572 .entropy = (unsigned char *)
25573 "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9" 25573 "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
@@ -25591,7 +25591,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
25591 }, 25591 },
25592}; 25592};
25593 25593
25594static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = { 25594static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
25595 { 25595 {
25596 .entropy = (unsigned char *) 25596 .entropy = (unsigned char *)
25597 "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f" 25597 "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
@@ -25615,7 +25615,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
25615 }, 25615 },
25616}; 25616};
25617 25617
25618static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = { 25618static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
25619 { 25619 {
25620 .entropy = (unsigned char *) 25620 .entropy = (unsigned char *)
25621 "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8" 25621 "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
@@ -25704,7 +25704,7 @@ static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
25704}; 25704};
25705 25705
25706/* Cast5 test vectors from RFC 2144 */ 25706/* Cast5 test vectors from RFC 2144 */
25707static struct cipher_testvec cast5_enc_tv_template[] = { 25707static const struct cipher_testvec cast5_enc_tv_template[] = {
25708 { 25708 {
25709 .key = "\x01\x23\x45\x67\x12\x34\x56\x78" 25709 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
25710 "\x23\x45\x67\x89\x34\x56\x78\x9a", 25710 "\x23\x45\x67\x89\x34\x56\x78\x9a",
@@ -25865,7 +25865,7 @@ static struct cipher_testvec cast5_enc_tv_template[] = {
25865 }, 25865 },
25866}; 25866};
25867 25867
25868static struct cipher_testvec cast5_dec_tv_template[] = { 25868static const struct cipher_testvec cast5_dec_tv_template[] = {
25869 { 25869 {
25870 .key = "\x01\x23\x45\x67\x12\x34\x56\x78" 25870 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
25871 "\x23\x45\x67\x89\x34\x56\x78\x9a", 25871 "\x23\x45\x67\x89\x34\x56\x78\x9a",
@@ -26026,7 +26026,7 @@ static struct cipher_testvec cast5_dec_tv_template[] = {
26026 }, 26026 },
26027}; 26027};
26028 26028
26029static struct cipher_testvec cast5_cbc_enc_tv_template[] = { 26029static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
26030 { /* Generated from TF test vectors */ 26030 { /* Generated from TF test vectors */
26031 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 26031 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
26032 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", 26032 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
@@ -26164,7 +26164,7 @@ static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
26164 }, 26164 },
26165}; 26165};
26166 26166
26167static struct cipher_testvec cast5_cbc_dec_tv_template[] = { 26167static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
26168 { /* Generated from TF test vectors */ 26168 { /* Generated from TF test vectors */
26169 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 26169 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
26170 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", 26170 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
@@ -26302,7 +26302,7 @@ static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
26302 }, 26302 },
26303}; 26303};
26304 26304
26305static struct cipher_testvec cast5_ctr_enc_tv_template[] = { 26305static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
26306 { /* Generated from TF test vectors */ 26306 { /* Generated from TF test vectors */
26307 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 26307 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
26308 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", 26308 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
@@ -26453,7 +26453,7 @@ static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
26453 }, 26453 },
26454}; 26454};
26455 26455
26456static struct cipher_testvec cast5_ctr_dec_tv_template[] = { 26456static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
26457 { /* Generated from TF test vectors */ 26457 { /* Generated from TF test vectors */
26458 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 26458 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
26459 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", 26459 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
@@ -26607,7 +26607,7 @@ static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
26607/* 26607/*
26608 * ARC4 test vectors from OpenSSL 26608 * ARC4 test vectors from OpenSSL
26609 */ 26609 */
26610static struct cipher_testvec arc4_enc_tv_template[] = { 26610static const struct cipher_testvec arc4_enc_tv_template[] = {
26611 { 26611 {
26612 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", 26612 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
26613 .klen = 8, 26613 .klen = 8,
@@ -26673,7 +26673,7 @@ static struct cipher_testvec arc4_enc_tv_template[] = {
26673 }, 26673 },
26674}; 26674};
26675 26675
26676static struct cipher_testvec arc4_dec_tv_template[] = { 26676static const struct cipher_testvec arc4_dec_tv_template[] = {
26677 { 26677 {
26678 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", 26678 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
26679 .klen = 8, 26679 .klen = 8,
@@ -26742,7 +26742,7 @@ static struct cipher_testvec arc4_dec_tv_template[] = {
26742/* 26742/*
26743 * TEA test vectors 26743 * TEA test vectors
26744 */ 26744 */
26745static struct cipher_testvec tea_enc_tv_template[] = { 26745static const struct cipher_testvec tea_enc_tv_template[] = {
26746 { 26746 {
26747 .key = zeroed_string, 26747 .key = zeroed_string,
26748 .klen = 16, 26748 .klen = 16,
@@ -26785,7 +26785,7 @@ static struct cipher_testvec tea_enc_tv_template[] = {
26785 } 26785 }
26786}; 26786};
26787 26787
26788static struct cipher_testvec tea_dec_tv_template[] = { 26788static const struct cipher_testvec tea_dec_tv_template[] = {
26789 { 26789 {
26790 .key = zeroed_string, 26790 .key = zeroed_string,
26791 .klen = 16, 26791 .klen = 16,
@@ -26831,7 +26831,7 @@ static struct cipher_testvec tea_dec_tv_template[] = {
26831/* 26831/*
26832 * XTEA test vectors 26832 * XTEA test vectors
26833 */ 26833 */
26834static struct cipher_testvec xtea_enc_tv_template[] = { 26834static const struct cipher_testvec xtea_enc_tv_template[] = {
26835 { 26835 {
26836 .key = zeroed_string, 26836 .key = zeroed_string,
26837 .klen = 16, 26837 .klen = 16,
@@ -26874,7 +26874,7 @@ static struct cipher_testvec xtea_enc_tv_template[] = {
26874 } 26874 }
26875}; 26875};
26876 26876
26877static struct cipher_testvec xtea_dec_tv_template[] = { 26877static const struct cipher_testvec xtea_dec_tv_template[] = {
26878 { 26878 {
26879 .key = zeroed_string, 26879 .key = zeroed_string,
26880 .klen = 16, 26880 .klen = 16,
@@ -26920,7 +26920,7 @@ static struct cipher_testvec xtea_dec_tv_template[] = {
26920/* 26920/*
26921 * KHAZAD test vectors. 26921 * KHAZAD test vectors.
26922 */ 26922 */
26923static struct cipher_testvec khazad_enc_tv_template[] = { 26923static const struct cipher_testvec khazad_enc_tv_template[] = {
26924 { 26924 {
26925 .key = "\x80\x00\x00\x00\x00\x00\x00\x00" 26925 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
26926 "\x00\x00\x00\x00\x00\x00\x00\x00", 26926 "\x00\x00\x00\x00\x00\x00\x00\x00",
@@ -26966,7 +26966,7 @@ static struct cipher_testvec khazad_enc_tv_template[] = {
26966 }, 26966 },
26967}; 26967};
26968 26968
26969static struct cipher_testvec khazad_dec_tv_template[] = { 26969static const struct cipher_testvec khazad_dec_tv_template[] = {
26970 { 26970 {
26971 .key = "\x80\x00\x00\x00\x00\x00\x00\x00" 26971 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
26972 "\x00\x00\x00\x00\x00\x00\x00\x00", 26972 "\x00\x00\x00\x00\x00\x00\x00\x00",
@@ -27016,7 +27016,7 @@ static struct cipher_testvec khazad_dec_tv_template[] = {
27016 * Anubis test vectors. 27016 * Anubis test vectors.
27017 */ 27017 */
27018 27018
27019static struct cipher_testvec anubis_enc_tv_template[] = { 27019static const struct cipher_testvec anubis_enc_tv_template[] = {
27020 { 27020 {
27021 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" 27021 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
27022 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe", 27022 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
@@ -27079,7 +27079,7 @@ static struct cipher_testvec anubis_enc_tv_template[] = {
27079 }, 27079 },
27080}; 27080};
27081 27081
27082static struct cipher_testvec anubis_dec_tv_template[] = { 27082static const struct cipher_testvec anubis_dec_tv_template[] = {
27083 { 27083 {
27084 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" 27084 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
27085 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe", 27085 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
@@ -27142,7 +27142,7 @@ static struct cipher_testvec anubis_dec_tv_template[] = {
27142 }, 27142 },
27143}; 27143};
27144 27144
27145static struct cipher_testvec anubis_cbc_enc_tv_template[] = { 27145static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
27146 { 27146 {
27147 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" 27147 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
27148 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe", 27148 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
@@ -27177,7 +27177,7 @@ static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
27177 }, 27177 },
27178}; 27178};
27179 27179
27180static struct cipher_testvec anubis_cbc_dec_tv_template[] = { 27180static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
27181 { 27181 {
27182 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" 27182 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
27183 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe", 27183 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
@@ -27215,7 +27215,7 @@ static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
27215/* 27215/*
27216 * XETA test vectors 27216 * XETA test vectors
27217 */ 27217 */
27218static struct cipher_testvec xeta_enc_tv_template[] = { 27218static const struct cipher_testvec xeta_enc_tv_template[] = {
27219 { 27219 {
27220 .key = zeroed_string, 27220 .key = zeroed_string,
27221 .klen = 16, 27221 .klen = 16,
@@ -27258,7 +27258,7 @@ static struct cipher_testvec xeta_enc_tv_template[] = {
27258 } 27258 }
27259}; 27259};
27260 27260
27261static struct cipher_testvec xeta_dec_tv_template[] = { 27261static const struct cipher_testvec xeta_dec_tv_template[] = {
27262 { 27262 {
27263 .key = zeroed_string, 27263 .key = zeroed_string,
27264 .klen = 16, 27264 .klen = 16,
@@ -27304,7 +27304,7 @@ static struct cipher_testvec xeta_dec_tv_template[] = {
27304/* 27304/*
27305 * FCrypt test vectors 27305 * FCrypt test vectors
27306 */ 27306 */
27307static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = { 27307static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
27308 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */ 27308 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
27309 .key = "\x00\x00\x00\x00\x00\x00\x00\x00", 27309 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
27310 .klen = 8, 27310 .klen = 8,
@@ -27365,7 +27365,7 @@ static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
27365 } 27365 }
27366}; 27366};
27367 27367
27368static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = { 27368static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
27369 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */ 27369 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
27370 .key = "\x00\x00\x00\x00\x00\x00\x00\x00", 27370 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
27371 .klen = 8, 27371 .klen = 8,
@@ -27429,7 +27429,7 @@ static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
27429/* 27429/*
27430 * CAMELLIA test vectors. 27430 * CAMELLIA test vectors.
27431 */ 27431 */
27432static struct cipher_testvec camellia_enc_tv_template[] = { 27432static const struct cipher_testvec camellia_enc_tv_template[] = {
27433 { 27433 {
27434 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" 27434 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
27435 "\xfe\xdc\xba\x98\x76\x54\x32\x10", 27435 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
@@ -27729,7 +27729,7 @@ static struct cipher_testvec camellia_enc_tv_template[] = {
27729 }, 27729 },
27730}; 27730};
27731 27731
27732static struct cipher_testvec camellia_dec_tv_template[] = { 27732static const struct cipher_testvec camellia_dec_tv_template[] = {
27733 { 27733 {
27734 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" 27734 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
27735 "\xfe\xdc\xba\x98\x76\x54\x32\x10", 27735 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
@@ -28029,7 +28029,7 @@ static struct cipher_testvec camellia_dec_tv_template[] = {
28029 }, 28029 },
28030}; 28030};
28031 28031
28032static struct cipher_testvec camellia_cbc_enc_tv_template[] = { 28032static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
28033 { 28033 {
28034 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" 28034 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
28035 "\x51\x2e\x03\xd5\x34\x12\x00\x06", 28035 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
@@ -28325,7 +28325,7 @@ static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
28325 }, 28325 },
28326}; 28326};
28327 28327
28328static struct cipher_testvec camellia_cbc_dec_tv_template[] = { 28328static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
28329 { 28329 {
28330 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" 28330 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
28331 "\x51\x2e\x03\xd5\x34\x12\x00\x06", 28331 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
@@ -28621,7 +28621,7 @@ static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
28621 }, 28621 },
28622}; 28622};
28623 28623
28624static struct cipher_testvec camellia_ctr_enc_tv_template[] = { 28624static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
28625 { /* Generated with Crypto++ */ 28625 { /* Generated with Crypto++ */
28626 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 28626 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
28627 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 28627 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -29288,7 +29288,7 @@ static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
29288 }, 29288 },
29289}; 29289};
29290 29290
29291static struct cipher_testvec camellia_ctr_dec_tv_template[] = { 29291static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
29292 { /* Generated with Crypto++ */ 29292 { /* Generated with Crypto++ */
29293 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" 29293 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
29294 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" 29294 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -29955,7 +29955,7 @@ static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
29955 }, 29955 },
29956}; 29956};
29957 29957
29958static struct cipher_testvec camellia_lrw_enc_tv_template[] = { 29958static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
29959 /* Generated from AES-LRW test vectors */ 29959 /* Generated from AES-LRW test vectors */
29960 { 29960 {
29961 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d" 29961 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
@@ -30207,7 +30207,7 @@ static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
30207 }, 30207 },
30208}; 30208};
30209 30209
30210static struct cipher_testvec camellia_lrw_dec_tv_template[] = { 30210static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
30211 /* Generated from AES-LRW test vectors */ 30211 /* Generated from AES-LRW test vectors */
30212 /* same as enc vectors with input and result reversed */ 30212 /* same as enc vectors with input and result reversed */
30213 { 30213 {
@@ -30460,7 +30460,7 @@ static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
30460 }, 30460 },
30461}; 30461};
30462 30462
30463static struct cipher_testvec camellia_xts_enc_tv_template[] = { 30463static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
30464 /* Generated from AES-XTS test vectors */ 30464 /* Generated from AES-XTS test vectors */
30465 { 30465 {
30466 .key = "\x00\x00\x00\x00\x00\x00\x00\x00" 30466 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -30802,7 +30802,7 @@ static struct cipher_testvec camellia_xts_enc_tv_template[] = {
30802 }, 30802 },
30803}; 30803};
30804 30804
30805static struct cipher_testvec camellia_xts_dec_tv_template[] = { 30805static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
30806 /* Generated from AES-XTS test vectors */ 30806 /* Generated from AES-XTS test vectors */
30807 /* same as enc vectors with input and result reversed */ 30807 /* same as enc vectors with input and result reversed */
30808 { 30808 {
@@ -31148,7 +31148,7 @@ static struct cipher_testvec camellia_xts_dec_tv_template[] = {
31148/* 31148/*
31149 * SEED test vectors 31149 * SEED test vectors
31150 */ 31150 */
31151static struct cipher_testvec seed_enc_tv_template[] = { 31151static const struct cipher_testvec seed_enc_tv_template[] = {
31152 { 31152 {
31153 .key = zeroed_string, 31153 .key = zeroed_string,
31154 .klen = 16, 31154 .klen = 16,
@@ -31190,7 +31190,7 @@ static struct cipher_testvec seed_enc_tv_template[] = {
31190 } 31190 }
31191}; 31191};
31192 31192
31193static struct cipher_testvec seed_dec_tv_template[] = { 31193static const struct cipher_testvec seed_dec_tv_template[] = {
31194 { 31194 {
31195 .key = zeroed_string, 31195 .key = zeroed_string,
31196 .klen = 16, 31196 .klen = 16,
@@ -31232,7 +31232,7 @@ static struct cipher_testvec seed_dec_tv_template[] = {
31232 } 31232 }
31233}; 31233};
31234 31234
31235static struct cipher_testvec salsa20_stream_enc_tv_template[] = { 31235static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
31236 /* 31236 /*
31237 * Testvectors from verified.test-vectors submitted to ECRYPT. 31237 * Testvectors from verified.test-vectors submitted to ECRYPT.
31238 * They are truncated to size 39, 64, 111, 129 to test a variety 31238 * They are truncated to size 39, 64, 111, 129 to test a variety
@@ -32401,7 +32401,7 @@ static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
32401 }, 32401 },
32402}; 32402};
32403 32403
32404static struct cipher_testvec chacha20_enc_tv_template[] = { 32404static const struct cipher_testvec chacha20_enc_tv_template[] = {
32405 { /* RFC7539 A.2. Test Vector #1 */ 32405 { /* RFC7539 A.2. Test Vector #1 */
32406 .key = "\x00\x00\x00\x00\x00\x00\x00\x00" 32406 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
32407 "\x00\x00\x00\x00\x00\x00\x00\x00" 32407 "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -32912,7 +32912,7 @@ static struct cipher_testvec chacha20_enc_tv_template[] = {
32912/* 32912/*
32913 * CTS (Cipher Text Stealing) mode tests 32913 * CTS (Cipher Text Stealing) mode tests
32914 */ 32914 */
32915static struct cipher_testvec cts_mode_enc_tv_template[] = { 32915static const struct cipher_testvec cts_mode_enc_tv_template[] = {
32916 { /* from rfc3962 */ 32916 { /* from rfc3962 */
32917 .klen = 16, 32917 .klen = 16,
32918 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" 32918 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
@@ -33014,7 +33014,7 @@ static struct cipher_testvec cts_mode_enc_tv_template[] = {
33014 } 33014 }
33015}; 33015};
33016 33016
33017static struct cipher_testvec cts_mode_dec_tv_template[] = { 33017static const struct cipher_testvec cts_mode_dec_tv_template[] = {
33018 { /* from rfc3962 */ 33018 { /* from rfc3962 */
33019 .klen = 16, 33019 .klen = 16,
33020 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20" 33020 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
@@ -33132,7 +33132,7 @@ struct comp_testvec {
33132 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL. 33132 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
33133 */ 33133 */
33134 33134
33135static struct comp_testvec deflate_comp_tv_template[] = { 33135static const struct comp_testvec deflate_comp_tv_template[] = {
33136 { 33136 {
33137 .inlen = 70, 33137 .inlen = 70,
33138 .outlen = 38, 33138 .outlen = 38,
@@ -33168,7 +33168,7 @@ static struct comp_testvec deflate_comp_tv_template[] = {
33168 }, 33168 },
33169}; 33169};
33170 33170
33171static struct comp_testvec deflate_decomp_tv_template[] = { 33171static const struct comp_testvec deflate_decomp_tv_template[] = {
33172 { 33172 {
33173 .inlen = 122, 33173 .inlen = 122,
33174 .outlen = 191, 33174 .outlen = 191,
@@ -33204,10 +33204,85 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
33204 }, 33204 },
33205}; 33205};
33206 33206
33207static const struct comp_testvec zlib_deflate_comp_tv_template[] = {
33208 {
33209 .inlen = 70,
33210 .outlen = 44,
33211 .input = "Join us now and share the software "
33212 "Join us now and share the software ",
33213 .output = "\x78\x5e\xf3\xca\xcf\xcc\x53\x28"
33214 "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc"
33215 "\x4b\x51\x28\xce\x48\x2c\x4a\x55"
33216 "\x28\xc9\x48\x55\x28\xce\x4f\x2b"
33217 "\x29\x07\x71\xbc\x08\x2b\x01\x00"
33218 "\x7c\x65\x19\x3d",
33219 }, {
33220 .inlen = 191,
33221 .outlen = 129,
33222 .input = "This document describes a compression method based on the DEFLATE"
33223 "compression algorithm. This document defines the application of "
33224 "the DEFLATE algorithm to the IP Payload Compression Protocol.",
33225 .output = "\x78\x5e\x5d\xce\x41\x0a\xc3\x30"
33226 "\x0c\x04\xc0\xaf\xec\x0b\xf2\x87"
33227 "\xd2\xa6\x50\xe8\xc1\x07\x7f\x40"
33228 "\xb1\x95\x5a\x60\x5b\xc6\x56\x0f"
33229 "\xfd\x7d\x93\x1e\x42\xe8\x51\xec"
33230 "\xee\x20\x9f\x64\x20\x6a\x78\x17"
33231 "\xae\x86\xc8\x23\x74\x59\x78\x80"
33232 "\x10\xb4\xb4\xce\x63\x88\x56\x14"
33233 "\xb6\xa4\x11\x0b\x0d\x8e\xd8\x6e"
33234 "\x4b\x8c\xdb\x7c\x7f\x5e\xfc\x7c"
33235 "\xae\x51\x7e\x69\x17\x4b\x65\x02"
33236 "\xfc\x1f\xbc\x4a\xdd\xd8\x7d\x48"
33237 "\xad\x65\x09\x64\x3b\xac\xeb\xd9"
33238 "\xc2\x01\xc0\xf4\x17\x3c\x1c\x1c"
33239 "\x7d\xb2\x52\xc4\xf5\xf4\x8f\xeb"
33240 "\x6a\x1a\x34\x4f\x5f\x2e\x32\x45"
33241 "\x4e",
33242 },
33243};
33244
33245static const struct comp_testvec zlib_deflate_decomp_tv_template[] = {
33246 {
33247 .inlen = 128,
33248 .outlen = 191,
33249 .input = "\x78\x9c\x5d\x8d\x31\x0e\xc2\x30"
33250 "\x10\x04\xbf\xb2\x2f\xc8\x1f\x10"
33251 "\x04\x09\x89\xc2\x85\x3f\x70\xb1"
33252 "\x2f\xf8\x24\xdb\x67\xd9\x47\xc1"
33253 "\xef\x49\x68\x12\x51\xae\x76\x67"
33254 "\xd6\x27\x19\x88\x1a\xde\x85\xab"
33255 "\x21\xf2\x08\x5d\x16\x1e\x20\x04"
33256 "\x2d\xad\xf3\x18\xa2\x15\x85\x2d"
33257 "\x69\xc4\x42\x83\x23\xb6\x6c\x89"
33258 "\x71\x9b\xef\xcf\x8b\x9f\xcf\x33"
33259 "\xca\x2f\xed\x62\xa9\x4c\x80\xff"
33260 "\x13\xaf\x52\x37\xed\x0e\x52\x6b"
33261 "\x59\x02\xd9\x4e\xe8\x7a\x76\x1d"
33262 "\x02\x98\xfe\x8a\x87\x83\xa3\x4f"
33263 "\x56\x8a\xb8\x9e\x8e\x5c\x57\xd3"
33264 "\xa0\x79\xfa\x02\x2e\x32\x45\x4e",
33265 .output = "This document describes a compression method based on the DEFLATE"
33266 "compression algorithm. This document defines the application of "
33267 "the DEFLATE algorithm to the IP Payload Compression Protocol.",
33268 }, {
33269 .inlen = 44,
33270 .outlen = 70,
33271 .input = "\x78\x9c\xf3\xca\xcf\xcc\x53\x28"
33272 "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc"
33273 "\x4b\x51\x28\xce\x48\x2c\x4a\x55"
33274 "\x28\xc9\x48\x55\x28\xce\x4f\x2b"
33275 "\x29\x07\x71\xbc\x08\x2b\x01\x00"
33276 "\x7c\x65\x19\x3d",
33277 .output = "Join us now and share the software "
33278 "Join us now and share the software ",
33279 },
33280};
33281
33207/* 33282/*
33208 * LZO test vectors (null-terminated strings). 33283 * LZO test vectors (null-terminated strings).
33209 */ 33284 */
33210static struct comp_testvec lzo_comp_tv_template[] = { 33285static const struct comp_testvec lzo_comp_tv_template[] = {
33211 { 33286 {
33212 .inlen = 70, 33287 .inlen = 70,
33213 .outlen = 57, 33288 .outlen = 57,
@@ -33247,7 +33322,7 @@ static struct comp_testvec lzo_comp_tv_template[] = {
33247 }, 33322 },
33248}; 33323};
33249 33324
33250static struct comp_testvec lzo_decomp_tv_template[] = { 33325static const struct comp_testvec lzo_decomp_tv_template[] = {
33251 { 33326 {
33252 .inlen = 133, 33327 .inlen = 133,
33253 .outlen = 159, 33328 .outlen = 159,
@@ -33290,7 +33365,7 @@ static struct comp_testvec lzo_decomp_tv_template[] = {
33290 */ 33365 */
33291#define MICHAEL_MIC_TEST_VECTORS 6 33366#define MICHAEL_MIC_TEST_VECTORS 6
33292 33367
33293static struct hash_testvec michael_mic_tv_template[] = { 33368static const struct hash_testvec michael_mic_tv_template[] = {
33294 { 33369 {
33295 .key = "\x00\x00\x00\x00\x00\x00\x00\x00", 33370 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
33296 .ksize = 8, 33371 .ksize = 8,
@@ -33338,7 +33413,7 @@ static struct hash_testvec michael_mic_tv_template[] = {
33338/* 33413/*
33339 * CRC32 test vectors 33414 * CRC32 test vectors
33340 */ 33415 */
33341static struct hash_testvec crc32_tv_template[] = { 33416static const struct hash_testvec crc32_tv_template[] = {
33342 { 33417 {
33343 .key = "\x87\xa9\xcb\xed", 33418 .key = "\x87\xa9\xcb\xed",
33344 .ksize = 4, 33419 .ksize = 4,
@@ -33770,7 +33845,7 @@ static struct hash_testvec crc32_tv_template[] = {
33770/* 33845/*
33771 * CRC32C test vectors 33846 * CRC32C test vectors
33772 */ 33847 */
33773static struct hash_testvec crc32c_tv_template[] = { 33848static const struct hash_testvec crc32c_tv_template[] = {
33774 { 33849 {
33775 .psize = 0, 33850 .psize = 0,
33776 .digest = "\x00\x00\x00\x00", 33851 .digest = "\x00\x00\x00\x00",
@@ -34206,7 +34281,7 @@ static struct hash_testvec crc32c_tv_template[] = {
34206/* 34281/*
34207 * Blakcifn CRC test vectors 34282 * Blakcifn CRC test vectors
34208 */ 34283 */
34209static struct hash_testvec bfin_crc_tv_template[] = { 34284static const struct hash_testvec bfin_crc_tv_template[] = {
34210 { 34285 {
34211 .psize = 0, 34286 .psize = 0,
34212 .digest = "\x00\x00\x00\x00", 34287 .digest = "\x00\x00\x00\x00",
@@ -34291,7 +34366,7 @@ static struct hash_testvec bfin_crc_tv_template[] = {
34291 34366
34292}; 34367};
34293 34368
34294static struct comp_testvec lz4_comp_tv_template[] = { 34369static const struct comp_testvec lz4_comp_tv_template[] = {
34295 { 34370 {
34296 .inlen = 255, 34371 .inlen = 255,
34297 .outlen = 218, 34372 .outlen = 218,
@@ -34322,7 +34397,7 @@ static struct comp_testvec lz4_comp_tv_template[] = {
34322 }, 34397 },
34323}; 34398};
34324 34399
34325static struct comp_testvec lz4_decomp_tv_template[] = { 34400static const struct comp_testvec lz4_decomp_tv_template[] = {
34326 { 34401 {
34327 .inlen = 218, 34402 .inlen = 218,
34328 .outlen = 255, 34403 .outlen = 255,
@@ -34352,7 +34427,7 @@ static struct comp_testvec lz4_decomp_tv_template[] = {
34352 }, 34427 },
34353}; 34428};
34354 34429
34355static struct comp_testvec lz4hc_comp_tv_template[] = { 34430static const struct comp_testvec lz4hc_comp_tv_template[] = {
34356 { 34431 {
34357 .inlen = 255, 34432 .inlen = 255,
34358 .outlen = 216, 34433 .outlen = 216,
@@ -34383,7 +34458,7 @@ static struct comp_testvec lz4hc_comp_tv_template[] = {
34383 }, 34458 },
34384}; 34459};
34385 34460
34386static struct comp_testvec lz4hc_decomp_tv_template[] = { 34461static const struct comp_testvec lz4hc_decomp_tv_template[] = {
34387 { 34462 {
34388 .inlen = 216, 34463 .inlen = 216,
34389 .outlen = 255, 34464 .outlen = 255,
diff --git a/crypto/xts.c b/crypto/xts.c
index 89ace5ebc2da..d86c11a8c882 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -39,11 +39,11 @@ struct xts_instance_ctx {
39}; 39};
40 40
41struct rctx { 41struct rctx {
42 be128 buf[XTS_BUFFER_SIZE / sizeof(be128)]; 42 le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
43 43
44 be128 t; 44 le128 t;
45 45
46 be128 *ext; 46 le128 *ext;
47 47
48 struct scatterlist srcbuf[2]; 48 struct scatterlist srcbuf[2];
49 struct scatterlist dstbuf[2]; 49 struct scatterlist dstbuf[2];
@@ -99,7 +99,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
99static int post_crypt(struct skcipher_request *req) 99static int post_crypt(struct skcipher_request *req)
100{ 100{
101 struct rctx *rctx = skcipher_request_ctx(req); 101 struct rctx *rctx = skcipher_request_ctx(req);
102 be128 *buf = rctx->ext ?: rctx->buf; 102 le128 *buf = rctx->ext ?: rctx->buf;
103 struct skcipher_request *subreq; 103 struct skcipher_request *subreq;
104 const int bs = XTS_BLOCK_SIZE; 104 const int bs = XTS_BLOCK_SIZE;
105 struct skcipher_walk w; 105 struct skcipher_walk w;
@@ -112,12 +112,12 @@ static int post_crypt(struct skcipher_request *req)
112 112
113 while (w.nbytes) { 113 while (w.nbytes) {
114 unsigned int avail = w.nbytes; 114 unsigned int avail = w.nbytes;
115 be128 *wdst; 115 le128 *wdst;
116 116
117 wdst = w.dst.virt.addr; 117 wdst = w.dst.virt.addr;
118 118
119 do { 119 do {
120 be128_xor(wdst, buf++, wdst); 120 le128_xor(wdst, buf++, wdst);
121 wdst++; 121 wdst++;
122 } while ((avail -= bs) >= bs); 122 } while ((avail -= bs) >= bs);
123 123
@@ -150,7 +150,7 @@ out:
150static int pre_crypt(struct skcipher_request *req) 150static int pre_crypt(struct skcipher_request *req)
151{ 151{
152 struct rctx *rctx = skcipher_request_ctx(req); 152 struct rctx *rctx = skcipher_request_ctx(req);
153 be128 *buf = rctx->ext ?: rctx->buf; 153 le128 *buf = rctx->ext ?: rctx->buf;
154 struct skcipher_request *subreq; 154 struct skcipher_request *subreq;
155 const int bs = XTS_BLOCK_SIZE; 155 const int bs = XTS_BLOCK_SIZE;
156 struct skcipher_walk w; 156 struct skcipher_walk w;
@@ -174,15 +174,15 @@ static int pre_crypt(struct skcipher_request *req)
174 174
175 while (w.nbytes) { 175 while (w.nbytes) {
176 unsigned int avail = w.nbytes; 176 unsigned int avail = w.nbytes;
177 be128 *wsrc; 177 le128 *wsrc;
178 be128 *wdst; 178 le128 *wdst;
179 179
180 wsrc = w.src.virt.addr; 180 wsrc = w.src.virt.addr;
181 wdst = w.dst.virt.addr; 181 wdst = w.dst.virt.addr;
182 182
183 do { 183 do {
184 *buf++ = rctx->t; 184 *buf++ = rctx->t;
185 be128_xor(wdst++, &rctx->t, wsrc++); 185 le128_xor(wdst++, &rctx->t, wsrc++);
186 gf128mul_x_ble(&rctx->t, &rctx->t); 186 gf128mul_x_ble(&rctx->t, &rctx->t);
187 } while ((avail -= bs) >= bs); 187 } while ((avail -= bs) >= bs);
188 188
@@ -369,8 +369,8 @@ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
369 const unsigned int max_blks = req->tbuflen / bsize; 369 const unsigned int max_blks = req->tbuflen / bsize;
370 struct blkcipher_walk walk; 370 struct blkcipher_walk walk;
371 unsigned int nblocks; 371 unsigned int nblocks;
372 be128 *src, *dst, *t; 372 le128 *src, *dst, *t;
373 be128 *t_buf = req->tbuf; 373 le128 *t_buf = req->tbuf;
374 int err, i; 374 int err, i;
375 375
376 BUG_ON(max_blks < 1); 376 BUG_ON(max_blks < 1);
@@ -383,8 +383,8 @@ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
383 return err; 383 return err;
384 384
385 nblocks = min(nbytes / bsize, max_blks); 385 nblocks = min(nbytes / bsize, max_blks);
386 src = (be128 *)walk.src.virt.addr; 386 src = (le128 *)walk.src.virt.addr;
387 dst = (be128 *)walk.dst.virt.addr; 387 dst = (le128 *)walk.dst.virt.addr;
388 388
389 /* calculate first value of T */ 389 /* calculate first value of T */
390 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv); 390 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
@@ -400,7 +400,7 @@ first:
400 t = &t_buf[i]; 400 t = &t_buf[i];
401 401
402 /* PP <- T xor P */ 402 /* PP <- T xor P */
403 be128_xor(dst + i, t, src + i); 403 le128_xor(dst + i, t, src + i);
404 } 404 }
405 405
406 /* CC <- E(Key2,PP) */ 406 /* CC <- E(Key2,PP) */
@@ -409,7 +409,7 @@ first:
409 409
410 /* C <- T xor CC */ 410 /* C <- T xor CC */
411 for (i = 0; i < nblocks; i++) 411 for (i = 0; i < nblocks; i++)
412 be128_xor(dst + i, dst + i, &t_buf[i]); 412 le128_xor(dst + i, dst + i, &t_buf[i]);
413 413
414 src += nblocks; 414 src += nblocks;
415 dst += nblocks; 415 dst += nblocks;
@@ -417,7 +417,7 @@ first:
417 nblocks = min(nbytes / bsize, max_blks); 417 nblocks = min(nbytes / bsize, max_blks);
418 } while (nblocks > 0); 418 } while (nblocks > 0);
419 419
420 *(be128 *)walk.iv = *t; 420 *(le128 *)walk.iv = *t;
421 421
422 err = blkcipher_walk_done(desc, &walk, nbytes); 422 err = blkcipher_walk_done(desc, &walk, nbytes);
423 nbytes = walk.nbytes; 423 nbytes = walk.nbytes;
@@ -425,8 +425,8 @@ first:
425 break; 425 break;
426 426
427 nblocks = min(nbytes / bsize, max_blks); 427 nblocks = min(nbytes / bsize, max_blks);
428 src = (be128 *)walk.src.virt.addr; 428 src = (le128 *)walk.src.virt.addr;
429 dst = (be128 *)walk.dst.virt.addr; 429 dst = (le128 *)walk.dst.virt.addr;
430 } 430 }
431 431
432 return err; 432 return err;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index b9918fb9587d..1b223c32a8ae 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -294,20 +294,6 @@ config HW_RANDOM_POWERNV
294 294
295 If unsure, say Y. 295 If unsure, say Y.
296 296
297config HW_RANDOM_EXYNOS
298 tristate "EXYNOS HW random number generator support"
299 depends on ARCH_EXYNOS || COMPILE_TEST
300 depends on HAS_IOMEM
301 default HW_RANDOM
302 ---help---
303 This driver provides kernel-side support for the Random Number
304 Generator hardware found on EXYNOS SOCs.
305
306 To compile this driver as a module, choose M here: the
307 module will be called exynos-rng.
308
309 If unsure, say Y.
310
311config HW_RANDOM_TPM 297config HW_RANDOM_TPM
312 tristate "TPM HW Random Number Generator support" 298 tristate "TPM HW Random Number Generator support"
313 depends on TCG_TPM 299 depends on TCG_TPM
@@ -423,6 +409,20 @@ config HW_RANDOM_CAVIUM
423 409
424 If unsure, say Y. 410 If unsure, say Y.
425 411
412config HW_RANDOM_MTK
413 tristate "Mediatek Random Number Generator support"
414 depends on HW_RANDOM
415 depends on ARCH_MEDIATEK || COMPILE_TEST
416 default y
417 ---help---
418 This driver provides kernel-side support for the Random Number
419 Generator hardware found on Mediatek SoCs.
420
421 To compile this driver as a module, choose M here. the
422 module will be called mtk-rng.
423
424 If unsure, say Y.
425
426config HW_RANDOM_S390 426config HW_RANDOM_S390
427 tristate "S390 True Random Number Generator support" 427 tristate "S390 True Random Number Generator support"
428 depends on S390 428 depends on S390
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index dd1765246255..b085975ec1d2 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
24obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o 24obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
25obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o 25obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
26obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o 26obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
27obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
28obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o 27obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
29obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o 28obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
30obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o 29obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
@@ -36,4 +35,5 @@ obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
36obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o 35obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
37obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o 36obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
38obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o 37obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
38obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
39obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o 39obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
deleted file mode 100644
index 23d358553b21..000000000000
--- a/drivers/char/hw_random/exynos-rng.c
+++ /dev/null
@@ -1,231 +0,0 @@
1/*
2 * exynos-rng.c - Random Number Generator driver for the exynos
3 *
4 * Copyright (C) 2012 Samsung Electronics
5 * Jonghwa Lee <jonghwa3.lee@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation;
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <linux/hw_random.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/io.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <linux/pm_runtime.h>
29#include <linux/err.h>
30
31#define EXYNOS_PRNG_STATUS_OFFSET 0x10
32#define EXYNOS_PRNG_SEED_OFFSET 0x140
33#define EXYNOS_PRNG_OUT1_OFFSET 0x160
34#define SEED_SETTING_DONE BIT(1)
35#define PRNG_START 0x18
36#define PRNG_DONE BIT(5)
37#define EXYNOS_AUTOSUSPEND_DELAY 100
38
39struct exynos_rng {
40 struct device *dev;
41 struct hwrng rng;
42 void __iomem *mem;
43 struct clk *clk;
44};
45
46static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset)
47{
48 return readl_relaxed(rng->mem + offset);
49}
50
51static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset)
52{
53 writel_relaxed(val, rng->mem + offset);
54}
55
56static int exynos_rng_configure(struct exynos_rng *exynos_rng)
57{
58 int i;
59 int ret = 0;
60
61 for (i = 0 ; i < 5 ; i++)
62 exynos_rng_writel(exynos_rng, jiffies,
63 EXYNOS_PRNG_SEED_OFFSET + 4*i);
64
65 if (!(exynos_rng_readl(exynos_rng, EXYNOS_PRNG_STATUS_OFFSET)
66 & SEED_SETTING_DONE))
67 ret = -EIO;
68
69 return ret;
70}
71
72static int exynos_init(struct hwrng *rng)
73{
74 struct exynos_rng *exynos_rng = container_of(rng,
75 struct exynos_rng, rng);
76 int ret = 0;
77
78 pm_runtime_get_sync(exynos_rng->dev);
79 ret = exynos_rng_configure(exynos_rng);
80 pm_runtime_mark_last_busy(exynos_rng->dev);
81 pm_runtime_put_autosuspend(exynos_rng->dev);
82
83 return ret;
84}
85
86static int exynos_read(struct hwrng *rng, void *buf,
87 size_t max, bool wait)
88{
89 struct exynos_rng *exynos_rng = container_of(rng,
90 struct exynos_rng, rng);
91 u32 *data = buf;
92 int retry = 100;
93 int ret = 4;
94
95 pm_runtime_get_sync(exynos_rng->dev);
96
97 exynos_rng_writel(exynos_rng, PRNG_START, 0);
98
99 while (!(exynos_rng_readl(exynos_rng,
100 EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE) && --retry)
101 cpu_relax();
102 if (!retry) {
103 ret = -ETIMEDOUT;
104 goto out;
105 }
106
107 exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET);
108
109 *data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET);
110
111out:
112 pm_runtime_mark_last_busy(exynos_rng->dev);
113 pm_runtime_put_sync_autosuspend(exynos_rng->dev);
114
115 return ret;
116}
117
118static int exynos_rng_probe(struct platform_device *pdev)
119{
120 struct exynos_rng *exynos_rng;
121 struct resource *res;
122 int ret;
123
124 exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
125 GFP_KERNEL);
126 if (!exynos_rng)
127 return -ENOMEM;
128
129 exynos_rng->dev = &pdev->dev;
130 exynos_rng->rng.name = "exynos";
131 exynos_rng->rng.init = exynos_init;
132 exynos_rng->rng.read = exynos_read;
133 exynos_rng->clk = devm_clk_get(&pdev->dev, "secss");
134 if (IS_ERR(exynos_rng->clk)) {
135 dev_err(&pdev->dev, "Couldn't get clock.\n");
136 return -ENOENT;
137 }
138
139 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
140 exynos_rng->mem = devm_ioremap_resource(&pdev->dev, res);
141 if (IS_ERR(exynos_rng->mem))
142 return PTR_ERR(exynos_rng->mem);
143
144 platform_set_drvdata(pdev, exynos_rng);
145
146 pm_runtime_set_autosuspend_delay(&pdev->dev, EXYNOS_AUTOSUSPEND_DELAY);
147 pm_runtime_use_autosuspend(&pdev->dev);
148 pm_runtime_enable(&pdev->dev);
149
150 ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
151 if (ret) {
152 pm_runtime_dont_use_autosuspend(&pdev->dev);
153 pm_runtime_disable(&pdev->dev);
154 }
155
156 return ret;
157}
158
159static int exynos_rng_remove(struct platform_device *pdev)
160{
161 pm_runtime_dont_use_autosuspend(&pdev->dev);
162 pm_runtime_disable(&pdev->dev);
163
164 return 0;
165}
166
167static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
168{
169 struct platform_device *pdev = to_platform_device(dev);
170 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
171
172 clk_disable_unprepare(exynos_rng->clk);
173
174 return 0;
175}
176
177static int __maybe_unused exynos_rng_runtime_resume(struct device *dev)
178{
179 struct platform_device *pdev = to_platform_device(dev);
180 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
181
182 return clk_prepare_enable(exynos_rng->clk);
183}
184
185static int __maybe_unused exynos_rng_suspend(struct device *dev)
186{
187 return pm_runtime_force_suspend(dev);
188}
189
190static int __maybe_unused exynos_rng_resume(struct device *dev)
191{
192 struct platform_device *pdev = to_platform_device(dev);
193 struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
194 int ret;
195
196 ret = pm_runtime_force_resume(dev);
197 if (ret)
198 return ret;
199
200 return exynos_rng_configure(exynos_rng);
201}
202
203static const struct dev_pm_ops exynos_rng_pm_ops = {
204 SET_SYSTEM_SLEEP_PM_OPS(exynos_rng_suspend, exynos_rng_resume)
205 SET_RUNTIME_PM_OPS(exynos_rng_runtime_suspend,
206 exynos_rng_runtime_resume, NULL)
207};
208
209static const struct of_device_id exynos_rng_dt_match[] = {
210 {
211 .compatible = "samsung,exynos4-rng",
212 },
213 { },
214};
215MODULE_DEVICE_TABLE(of, exynos_rng_dt_match);
216
217static struct platform_driver exynos_rng_driver = {
218 .driver = {
219 .name = "exynos-rng",
220 .pm = &exynos_rng_pm_ops,
221 .of_match_table = exynos_rng_dt_match,
222 },
223 .probe = exynos_rng_probe,
224 .remove = exynos_rng_remove,
225};
226
227module_platform_driver(exynos_rng_driver);
228
229MODULE_DESCRIPTION("EXYNOS 4 H/W Random Number Generator driver");
230MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
231MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 119d698439ae..2e23be802a62 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -62,6 +62,7 @@
62#include <linux/slab.h> 62#include <linux/slab.h>
63#include <linux/types.h> 63#include <linux/types.h>
64#include <linux/of.h> 64#include <linux/of.h>
65#include <linux/clk.h>
65 66
66#define RNG_DATA 0x00 67#define RNG_DATA 0x00
67 68
@@ -69,6 +70,7 @@ struct meson_rng_data {
69 void __iomem *base; 70 void __iomem *base;
70 struct platform_device *pdev; 71 struct platform_device *pdev;
71 struct hwrng rng; 72 struct hwrng rng;
73 struct clk *core_clk;
72}; 74};
73 75
74static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) 76static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -81,11 +83,17 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
81 return sizeof(u32); 83 return sizeof(u32);
82} 84}
83 85
86static void meson_rng_clk_disable(void *data)
87{
88 clk_disable_unprepare(data);
89}
90
84static int meson_rng_probe(struct platform_device *pdev) 91static int meson_rng_probe(struct platform_device *pdev)
85{ 92{
86 struct device *dev = &pdev->dev; 93 struct device *dev = &pdev->dev;
87 struct meson_rng_data *data; 94 struct meson_rng_data *data;
88 struct resource *res; 95 struct resource *res;
96 int ret;
89 97
90 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 98 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
91 if (!data) 99 if (!data)
@@ -98,6 +106,20 @@ static int meson_rng_probe(struct platform_device *pdev)
98 if (IS_ERR(data->base)) 106 if (IS_ERR(data->base))
99 return PTR_ERR(data->base); 107 return PTR_ERR(data->base);
100 108
109 data->core_clk = devm_clk_get(dev, "core");
110 if (IS_ERR(data->core_clk))
111 data->core_clk = NULL;
112
113 if (data->core_clk) {
114 ret = clk_prepare_enable(data->core_clk);
115 if (ret)
116 return ret;
117 ret = devm_add_action_or_reset(dev, meson_rng_clk_disable,
118 data->core_clk);
119 if (ret)
120 return ret;
121 }
122
101 data->rng.name = pdev->name; 123 data->rng.name = pdev->name;
102 data->rng.read = meson_rng_read; 124 data->rng.read = meson_rng_read;
103 125
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
new file mode 100644
index 000000000000..df8eb54fd5a3
--- /dev/null
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -0,0 +1,168 @@
1/*
2 * Driver for Mediatek Hardware Random Number Generator
3 *
4 * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of
9 * the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16#define MTK_RNG_DEV KBUILD_MODNAME
17
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/err.h>
21#include <linux/hw_random.h>
22#include <linux/io.h>
23#include <linux/iopoll.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/of.h>
27#include <linux/platform_device.h>
28
29#define USEC_POLL 2
30#define TIMEOUT_POLL 20
31
32#define RNG_CTRL 0x00
33#define RNG_EN BIT(0)
34#define RNG_READY BIT(31)
35
36#define RNG_DATA 0x08
37
38#define to_mtk_rng(p) container_of(p, struct mtk_rng, rng)
39
40struct mtk_rng {
41 void __iomem *base;
42 struct clk *clk;
43 struct hwrng rng;
44};
45
46static int mtk_rng_init(struct hwrng *rng)
47{
48 struct mtk_rng *priv = to_mtk_rng(rng);
49 u32 val;
50 int err;
51
52 err = clk_prepare_enable(priv->clk);
53 if (err)
54 return err;
55
56 val = readl(priv->base + RNG_CTRL);
57 val |= RNG_EN;
58 writel(val, priv->base + RNG_CTRL);
59
60 return 0;
61}
62
63static void mtk_rng_cleanup(struct hwrng *rng)
64{
65 struct mtk_rng *priv = to_mtk_rng(rng);
66 u32 val;
67
68 val = readl(priv->base + RNG_CTRL);
69 val &= ~RNG_EN;
70 writel(val, priv->base + RNG_CTRL);
71
72 clk_disable_unprepare(priv->clk);
73}
74
75static bool mtk_rng_wait_ready(struct hwrng *rng, bool wait)
76{
77 struct mtk_rng *priv = to_mtk_rng(rng);
78 int ready;
79
80 ready = readl(priv->base + RNG_CTRL) & RNG_READY;
81 if (!ready && wait)
82 readl_poll_timeout_atomic(priv->base + RNG_CTRL, ready,
83 ready & RNG_READY, USEC_POLL,
84 TIMEOUT_POLL);
85 return !!ready;
86}
87
88static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
89{
90 struct mtk_rng *priv = to_mtk_rng(rng);
91 int retval = 0;
92
93 while (max >= sizeof(u32)) {
94 if (!mtk_rng_wait_ready(rng, wait))
95 break;
96
97 *(u32 *)buf = readl(priv->base + RNG_DATA);
98 retval += sizeof(u32);
99 buf += sizeof(u32);
100 max -= sizeof(u32);
101 }
102
103 return retval || !wait ? retval : -EIO;
104}
105
106static int mtk_rng_probe(struct platform_device *pdev)
107{
108 struct resource *res;
109 int ret;
110 struct mtk_rng *priv;
111
112 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
113 if (!res) {
114 dev_err(&pdev->dev, "no iomem resource\n");
115 return -ENXIO;
116 }
117
118 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
119 if (!priv)
120 return -ENOMEM;
121
122 priv->rng.name = pdev->name;
123 priv->rng.init = mtk_rng_init;
124 priv->rng.cleanup = mtk_rng_cleanup;
125 priv->rng.read = mtk_rng_read;
126
127 priv->clk = devm_clk_get(&pdev->dev, "rng");
128 if (IS_ERR(priv->clk)) {
129 ret = PTR_ERR(priv->clk);
130 dev_err(&pdev->dev, "no clock for device: %d\n", ret);
131 return ret;
132 }
133
134 priv->base = devm_ioremap_resource(&pdev->dev, res);
135 if (IS_ERR(priv->base))
136 return PTR_ERR(priv->base);
137
138 ret = devm_hwrng_register(&pdev->dev, &priv->rng);
139 if (ret) {
140 dev_err(&pdev->dev, "failed to register rng device: %d\n",
141 ret);
142 return ret;
143 }
144
145 dev_info(&pdev->dev, "registered RNG driver\n");
146
147 return 0;
148}
149
150static const struct of_device_id mtk_rng_match[] = {
151 { .compatible = "mediatek,mt7623-rng" },
152 {},
153};
154MODULE_DEVICE_TABLE(of, mtk_rng_match);
155
156static struct platform_driver mtk_rng_driver = {
157 .probe = mtk_rng_probe,
158 .driver = {
159 .name = MTK_RNG_DEV,
160 .of_match_table = mtk_rng_match,
161 },
162};
163
164module_platform_driver(mtk_rng_driver);
165
166MODULE_DESCRIPTION("Mediatek Random Number Generator Driver");
167MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
168MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 31cbdbbaebfc..92dd4e925315 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -748,9 +748,7 @@ static int n2rng_probe(struct platform_device *op)
748 748
749 dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n", 749 dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n",
750 np->hvapi_major, np->hvapi_minor); 750 np->hvapi_major, np->hvapi_minor);
751 751 np->units = devm_kcalloc(&op->dev, np->num_units, sizeof(*np->units),
752 np->units = devm_kzalloc(&op->dev,
753 sizeof(struct n2rng_unit) * np->num_units,
754 GFP_KERNEL); 752 GFP_KERNEL);
755 err = -ENOMEM; 753 err = -ENOMEM;
756 if (!np->units) 754 if (!np->units)
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b1ad12552b56..74d11ae6abe9 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -398,16 +398,6 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
398 return err; 398 return err;
399 } 399 }
400 400
401 priv->clk = devm_clk_get(&pdev->dev, NULL);
402 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
403 return -EPROBE_DEFER;
404 if (!IS_ERR(priv->clk)) {
405 err = clk_prepare_enable(priv->clk);
406 if (err)
407 dev_err(&pdev->dev, "unable to enable the clk, "
408 "err = %d\n", err);
409 }
410
411 /* 401 /*
412 * On OMAP4, enabling the shutdown_oflo interrupt is 402 * On OMAP4, enabling the shutdown_oflo interrupt is
413 * done in the interrupt mask register. There is no 403 * done in the interrupt mask register. There is no
@@ -478,6 +468,18 @@ static int omap_rng_probe(struct platform_device *pdev)
478 goto err_ioremap; 468 goto err_ioremap;
479 } 469 }
480 470
471 priv->clk = devm_clk_get(&pdev->dev, NULL);
472 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
473 return -EPROBE_DEFER;
474 if (!IS_ERR(priv->clk)) {
475 ret = clk_prepare_enable(priv->clk);
476 if (ret) {
477 dev_err(&pdev->dev,
478 "Unable to enable the clk: %d\n", ret);
479 goto err_register;
480 }
481 }
482
481 ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : 483 ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
482 get_omap_rng_device_details(priv); 484 get_omap_rng_device_details(priv);
483 if (ret) 485 if (ret)
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index cf37db263ecd..a0faa5f05deb 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -20,84 +20,100 @@
20 * TODO: add support for reading sizes other than 32bits and masking 20 * TODO: add support for reading sizes other than 32bits and masking
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/completion.h>
24#include <linux/kernel.h> 24#include <linux/delay.h>
25#include <linux/platform_device.h> 25#include <linux/hrtimer.h>
26#include <linux/of.h>
27#include <linux/hw_random.h> 26#include <linux/hw_random.h>
28#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/ktime.h>
29#include <linux/module.h>
30#include <linux/of.h>
31#include <linux/platform_device.h>
29#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/time.h>
30#include <linux/timeriomem-rng.h> 34#include <linux/timeriomem-rng.h>
31#include <linux/jiffies.h>
32#include <linux/sched.h>
33#include <linux/timer.h>
34#include <linux/completion.h>
35 35
36struct timeriomem_rng_private_data { 36struct timeriomem_rng_private {
37 void __iomem *io_base; 37 void __iomem *io_base;
38 unsigned int expires; 38 ktime_t period;
39 unsigned int period;
40 unsigned int present:1; 39 unsigned int present:1;
41 40
42 struct timer_list timer; 41 struct hrtimer timer;
43 struct completion completion; 42 struct completion completion;
44 43
45 struct hwrng timeriomem_rng_ops; 44 struct hwrng rng_ops;
46}; 45};
47 46
48#define to_rng_priv(rng) \ 47static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
49 ((struct timeriomem_rng_private_data *)rng->priv) 48 size_t max, bool wait)
50
51/*
52 * have data return 1, however return 0 if we have nothing
53 */
54static int timeriomem_rng_data_present(struct hwrng *rng, int wait)
55{ 49{
56 struct timeriomem_rng_private_data *priv = to_rng_priv(rng); 50 struct timeriomem_rng_private *priv =
57 51 container_of(hwrng, struct timeriomem_rng_private, rng_ops);
58 if (!wait || priv->present) 52 int retval = 0;
59 return priv->present; 53 int period_us = ktime_to_us(priv->period);
54
55 /*
56 * The RNG provides 32-bits per read. Ensure there is enough space for
57 * at minimum one read.
58 */
59 if (max < sizeof(u32))
60 return 0;
61
62 /*
63 * There may not have been enough time for new data to be generated
64 * since the last request. If the caller doesn't want to wait, let them
65 * bail out. Otherwise, wait for the completion. If the new data has
66 * already been generated, the completion should already be available.
67 */
68 if (!wait && !priv->present)
69 return 0;
60 70
61 wait_for_completion(&priv->completion); 71 wait_for_completion(&priv->completion);
62 72
63 return 1; 73 do {
64} 74 /*
65 75 * After the first read, all additional reads will need to wait
66static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data) 76 * for the RNG to generate new data. Since the period can have
67{ 77 * a wide range of values (1us to 1s have been observed), allow
68 struct timeriomem_rng_private_data *priv = to_rng_priv(rng); 78 * for 1% tolerance in the sleep time rather than a fixed value.
69 unsigned long cur; 79 */
70 s32 delay; 80 if (retval > 0)
71 81 usleep_range(period_us,
72 *data = readl(priv->io_base); 82 period_us + min(1, period_us / 100));
73 83
74 cur = jiffies; 84 *(u32 *)data = readl(priv->io_base);
75 85 retval += sizeof(u32);
76 delay = cur - priv->expires; 86 data += sizeof(u32);
77 delay = priv->period - (delay % priv->period); 87 max -= sizeof(u32);
78 88 } while (wait && max > sizeof(u32));
79 priv->expires = cur + delay; 89
90 /*
91 * Block any new callers until the RNG has had time to generate new
92 * data.
93 */
80 priv->present = 0; 94 priv->present = 0;
81
82 reinit_completion(&priv->completion); 95 reinit_completion(&priv->completion);
83 mod_timer(&priv->timer, priv->expires); 96 hrtimer_forward_now(&priv->timer, priv->period);
97 hrtimer_restart(&priv->timer);
84 98
85 return 4; 99 return retval;
86} 100}
87 101
88static void timeriomem_rng_trigger(unsigned long data) 102static enum hrtimer_restart timeriomem_rng_trigger(struct hrtimer *timer)
89{ 103{
90 struct timeriomem_rng_private_data *priv 104 struct timeriomem_rng_private *priv
91 = (struct timeriomem_rng_private_data *)data; 105 = container_of(timer, struct timeriomem_rng_private, timer);
92 106
93 priv->present = 1; 107 priv->present = 1;
94 complete(&priv->completion); 108 complete(&priv->completion);
109
110 return HRTIMER_NORESTART;
95} 111}
96 112
97static int timeriomem_rng_probe(struct platform_device *pdev) 113static int timeriomem_rng_probe(struct platform_device *pdev)
98{ 114{
99 struct timeriomem_rng_data *pdata = pdev->dev.platform_data; 115 struct timeriomem_rng_data *pdata = pdev->dev.platform_data;
100 struct timeriomem_rng_private_data *priv; 116 struct timeriomem_rng_private *priv;
101 struct resource *res; 117 struct resource *res;
102 int err = 0; 118 int err = 0;
103 int period; 119 int period;
@@ -119,7 +135,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
119 135
120 /* Allocate memory for the device structure (and zero it) */ 136 /* Allocate memory for the device structure (and zero it) */
121 priv = devm_kzalloc(&pdev->dev, 137 priv = devm_kzalloc(&pdev->dev,
122 sizeof(struct timeriomem_rng_private_data), GFP_KERNEL); 138 sizeof(struct timeriomem_rng_private), GFP_KERNEL);
123 if (!priv) 139 if (!priv)
124 return -ENOMEM; 140 return -ENOMEM;
125 141
@@ -139,54 +155,41 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
139 period = pdata->period; 155 period = pdata->period;
140 } 156 }
141 157
142 priv->period = usecs_to_jiffies(period); 158 priv->period = ns_to_ktime(period * NSEC_PER_USEC);
143 if (priv->period < 1) {
144 dev_err(&pdev->dev, "period is less than one jiffy\n");
145 return -EINVAL;
146 }
147
148 priv->expires = jiffies;
149 priv->present = 1;
150
151 init_completion(&priv->completion); 159 init_completion(&priv->completion);
152 complete(&priv->completion); 160 hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
161 priv->timer.function = timeriomem_rng_trigger;
153 162
154 setup_timer(&priv->timer, timeriomem_rng_trigger, (unsigned long)priv); 163 priv->rng_ops.name = dev_name(&pdev->dev);
155 164 priv->rng_ops.read = timeriomem_rng_read;
156 priv->timeriomem_rng_ops.name = dev_name(&pdev->dev);
157 priv->timeriomem_rng_ops.data_present = timeriomem_rng_data_present;
158 priv->timeriomem_rng_ops.data_read = timeriomem_rng_data_read;
159 priv->timeriomem_rng_ops.priv = (unsigned long)priv;
160 165
161 priv->io_base = devm_ioremap_resource(&pdev->dev, res); 166 priv->io_base = devm_ioremap_resource(&pdev->dev, res);
162 if (IS_ERR(priv->io_base)) { 167 if (IS_ERR(priv->io_base)) {
163 err = PTR_ERR(priv->io_base); 168 return PTR_ERR(priv->io_base);
164 goto out_timer;
165 } 169 }
166 170
167 err = hwrng_register(&priv->timeriomem_rng_ops); 171 /* Assume random data is already available. */
172 priv->present = 1;
173 complete(&priv->completion);
174
175 err = hwrng_register(&priv->rng_ops);
168 if (err) { 176 if (err) {
169 dev_err(&pdev->dev, "problem registering\n"); 177 dev_err(&pdev->dev, "problem registering\n");
170 goto out_timer; 178 return err;
171 } 179 }
172 180
173 dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", 181 dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
174 priv->io_base, period); 182 priv->io_base, period);
175 183
176 return 0; 184 return 0;
177
178out_timer:
179 del_timer_sync(&priv->timer);
180 return err;
181} 185}
182 186
183static int timeriomem_rng_remove(struct platform_device *pdev) 187static int timeriomem_rng_remove(struct platform_device *pdev)
184{ 188{
185 struct timeriomem_rng_private_data *priv = platform_get_drvdata(pdev); 189 struct timeriomem_rng_private *priv = platform_get_drvdata(pdev);
186
187 hwrng_unregister(&priv->timeriomem_rng_ops);
188 190
189 del_timer_sync(&priv->timer); 191 hwrng_unregister(&priv->rng_ops);
192 hrtimer_cancel(&priv->timer);
190 193
191 return 0; 194 return 0;
192} 195}
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index 8ee2022ce5d5..cbd62e46bb5b 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -193,7 +193,7 @@
193/* CLKID_I2C */ 193/* CLKID_I2C */
194/* #define CLKID_SAR_ADC */ 194/* #define CLKID_SAR_ADC */
195#define CLKID_SMART_CARD 24 195#define CLKID_SMART_CARD 24
196#define CLKID_RNG0 25 196/* CLKID_RNG0 */
197#define CLKID_UART0 26 197#define CLKID_UART0 26
198#define CLKID_SDHC 27 198#define CLKID_SDHC 27
199#define CLKID_STREAM 28 199#define CLKID_STREAM 28
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 473d31288ad8..fb1e60f5002e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -388,6 +388,21 @@ config CRYPTO_DEV_MXC_SCC
388 This option enables support for the Security Controller (SCC) 388 This option enables support for the Security Controller (SCC)
389 found in Freescale i.MX25 chips. 389 found in Freescale i.MX25 chips.
390 390
391config CRYPTO_DEV_EXYNOS_RNG
392 tristate "EXYNOS HW pseudo random number generator support"
393 depends on ARCH_EXYNOS || COMPILE_TEST
394 depends on HAS_IOMEM
395 select CRYPTO_RNG
396 ---help---
397 This driver provides kernel-side support through the
398 cryptographic API for the pseudo random number generator hardware
399 found on Exynos SoCs.
400
401 To compile this driver as a module, choose M here: the
402 module will be called exynos-rng.
403
404 If unsure, say Y.
405
391config CRYPTO_DEV_S5P 406config CRYPTO_DEV_S5P
392 tristate "Support for Samsung S5PV210/Exynos crypto accelerator" 407 tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
393 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST 408 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
@@ -515,6 +530,13 @@ config CRYPTO_DEV_MXS_DCP
515source "drivers/crypto/qat/Kconfig" 530source "drivers/crypto/qat/Kconfig"
516source "drivers/crypto/cavium/cpt/Kconfig" 531source "drivers/crypto/cavium/cpt/Kconfig"
517 532
533config CRYPTO_DEV_CAVIUM_ZIP
534 tristate "Cavium ZIP driver"
535 depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
536 ---help---
537 Select this option if you want to enable compression/decompression
538 acceleration on Cavium's ARM based SoCs
539
518config CRYPTO_DEV_QCE 540config CRYPTO_DEV_QCE
519 tristate "Qualcomm crypto engine accelerator" 541 tristate "Qualcomm crypto engine accelerator"
520 depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM 542 depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM
@@ -619,4 +641,6 @@ config CRYPTO_DEV_BCM_SPU
619 Secure Processing Unit (SPU). The SPU driver registers ablkcipher, 641 Secure Processing Unit (SPU). The SPU driver registers ablkcipher,
620 ahash, and aead algorithms with the kernel cryptographic API. 642 ahash, and aead algorithms with the kernel cryptographic API.
621 643
644source "drivers/crypto/stm32/Kconfig"
645
622endif # CRYPTO_HW 646endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 739609471169..463f33592d93 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,9 +2,11 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
2obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o 2obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
3obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o 3obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
4obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o 4obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
5obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
5obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ 6obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
6obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ 7obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
7obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/ 8obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
9obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/ 10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
9obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 11obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
10obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 12obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
@@ -30,6 +32,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
30obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ 32obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
31obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o 33obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
32obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o 34obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
35obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32/
33obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ 36obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
34obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 37obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
35obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ 38obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index d10b4ae5e0da..fdc83a2281ca 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -50,7 +50,7 @@
50static void crypto4xx_hw_init(struct crypto4xx_device *dev) 50static void crypto4xx_hw_init(struct crypto4xx_device *dev)
51{ 51{
52 union ce_ring_size ring_size; 52 union ce_ring_size ring_size;
53 union ce_ring_contol ring_ctrl; 53 union ce_ring_control ring_ctrl;
54 union ce_part_ring_size part_ring_size; 54 union ce_part_ring_size part_ring_size;
55 union ce_io_threshold io_threshold; 55 union ce_io_threshold io_threshold;
56 u32 rand_num; 56 u32 rand_num;
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 46fe57c8f6eb..279b8725559f 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -180,7 +180,7 @@ union ce_ring_size {
180} __attribute__((packed)); 180} __attribute__((packed));
181 181
182#define CRYPTO4XX_RING_CONTROL_OFFSET 0x54 182#define CRYPTO4XX_RING_CONTROL_OFFSET 0x54
183union ce_ring_contol { 183union ce_ring_control {
184 struct { 184 struct {
185 u32 continuous:1; 185 u32 continuous:1;
186 u32 rsv:5; 186 u32 rsv:5;
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index 0502f460dacd..430c5570ea87 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -312,7 +312,7 @@ int do_shash(unsigned char *name, unsigned char *result,
312 } 312 }
313 rc = crypto_shash_final(&sdesc->shash, result); 313 rc = crypto_shash_final(&sdesc->shash, result);
314 if (rc) 314 if (rc)
315 pr_err("%s: Could not genereate %s hash", __func__, name); 315 pr_err("%s: Could not generate %s hash", __func__, name);
316 316
317do_shash_err: 317do_shash_err:
318 crypto_free_shash(hash); 318 crypto_free_shash(hash);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index bc0d3569f8d9..e36aeacd7635 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -87,6 +87,23 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
87 To compile this as a module, choose M here: the module 87 To compile this as a module, choose M here: the module
88 will be called caamalg. 88 will be called caamalg.
89 89
90config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
91 tristate "Queue Interface as Crypto API backend"
92 depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
93 default y
94 select CRYPTO_AUTHENC
95 select CRYPTO_BLKCIPHER
96 help
97 Selecting this will use CAAM Queue Interface (QI) for sending
98 & receiving crypto jobs to/from CAAM. This gives better performance
99 than job ring interface when the number of cores are more than the
100 number of job rings assigned to the kernel. The number of portals
101 assigned to the kernel should also be more than the number of
102 job rings.
103
104 To compile this as a module, choose M here: the module
105 will be called caamalg_qi.
106
90config CRYPTO_DEV_FSL_CAAM_AHASH_API 107config CRYPTO_DEV_FSL_CAAM_AHASH_API
91 tristate "Register hash algorithm implementations with Crypto API" 108 tristate "Register hash algorithm implementations with Crypto API"
92 depends on CRYPTO_DEV_FSL_CAAM_JR 109 depends on CRYPTO_DEV_FSL_CAAM_JR
@@ -136,4 +153,5 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
136 information in the CAAM driver. 153 information in the CAAM driver.
137 154
138config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC 155config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
139 def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API 156 def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
157 CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 6554742f357e..9e2e98856b9b 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -8,6 +8,7 @@ endif
8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o 9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o 12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o 13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o 14obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
@@ -16,3 +17,7 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
16caam-objs := ctrl.o 17caam-objs := ctrl.o
17caam_jr-objs := jr.o key_gen.o error.o 18caam_jr-objs := jr.o key_gen.o error.o
18caam_pkc-y := caampkc.o pkc_desc.o 19caam_pkc-y := caampkc.o pkc_desc.o
20ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
21 ccflags-y += -DCONFIG_CAAM_QI
22 caam-objs += qi.o
23endif
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 9bc80eb06934..398807d1b77e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -266,8 +266,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
266 266
267 /* aead_encrypt shared descriptor */ 267 /* aead_encrypt shared descriptor */
268 desc = ctx->sh_desc_enc; 268 desc = ctx->sh_desc_enc;
269 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize, 269 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
270 is_rfc3686, nonce, ctx1_iv_off); 270 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
271 false);
271 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 272 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
272 desc_bytes(desc), DMA_TO_DEVICE); 273 desc_bytes(desc), DMA_TO_DEVICE);
273 274
@@ -299,7 +300,7 @@ skip_enc:
299 desc = ctx->sh_desc_dec; 300 desc = ctx->sh_desc_dec;
300 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, 301 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
301 ctx->authsize, alg->caam.geniv, is_rfc3686, 302 ctx->authsize, alg->caam.geniv, is_rfc3686,
302 nonce, ctx1_iv_off); 303 nonce, ctx1_iv_off, false);
303 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 304 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
304 desc_bytes(desc), DMA_TO_DEVICE); 305 desc_bytes(desc), DMA_TO_DEVICE);
305 306
@@ -333,7 +334,7 @@ skip_enc:
333 desc = ctx->sh_desc_enc; 334 desc = ctx->sh_desc_enc;
334 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, 335 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
335 ctx->authsize, is_rfc3686, nonce, 336 ctx->authsize, is_rfc3686, nonce,
336 ctx1_iv_off); 337 ctx1_iv_off, false);
337 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 338 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
338 desc_bytes(desc), DMA_TO_DEVICE); 339 desc_bytes(desc), DMA_TO_DEVICE);
339 340
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index f3f48c10b9d6..6f9c7ec0e339 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -265,17 +265,19 @@ static void init_sh_desc_key_aead(u32 * const desc,
265 * split key is to be used, the size of the split key itself is 265 * split key is to be used, the size of the split key itself is
266 * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 266 * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
267 * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 267 * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
268 * @ivsize: initialization vector size
268 * @icvsize: integrity check value (ICV) size (truncated or full) 269 * @icvsize: integrity check value (ICV) size (truncated or full)
269 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 270 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
270 * @nonce: pointer to rfc3686 nonce 271 * @nonce: pointer to rfc3686 nonce
271 * @ctx1_iv_off: IV offset in CONTEXT1 register 272 * @ctx1_iv_off: IV offset in CONTEXT1 register
273 * @is_qi: true when called from caam/qi
272 * 274 *
273 * Note: Requires an MDHA split key. 275 * Note: Requires an MDHA split key.
274 */ 276 */
275void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, 277void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
276 struct alginfo *adata, unsigned int icvsize, 278 struct alginfo *adata, unsigned int ivsize,
277 const bool is_rfc3686, u32 *nonce, 279 unsigned int icvsize, const bool is_rfc3686,
278 const u32 ctx1_iv_off) 280 u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
279{ 281{
280 /* Note: Context registers are saved. */ 282 /* Note: Context registers are saved. */
281 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); 283 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
@@ -284,6 +286,25 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
284 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | 286 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
285 OP_ALG_ENCRYPT); 287 OP_ALG_ENCRYPT);
286 288
289 if (is_qi) {
290 u32 *wait_load_cmd;
291
292 /* REG3 = assoclen */
293 append_seq_load(desc, 4, LDST_CLASS_DECO |
294 LDST_SRCDST_WORD_DECO_MATH3 |
295 (4 << LDST_OFFSET_SHIFT));
296
297 wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
298 JUMP_COND_CALM | JUMP_COND_NCP |
299 JUMP_COND_NOP | JUMP_COND_NIP |
300 JUMP_COND_NIFP);
301 set_jump_tgt_here(desc, wait_load_cmd);
302
303 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
304 LDST_SRCDST_BYTE_CONTEXT |
305 (ctx1_iv_off << LDST_OFFSET_SHIFT));
306 }
307
287 /* Read and write assoclen bytes */ 308 /* Read and write assoclen bytes */
288 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 309 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
289 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 310 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
@@ -338,6 +359,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
338 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 359 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
339 * @nonce: pointer to rfc3686 nonce 360 * @nonce: pointer to rfc3686 nonce
340 * @ctx1_iv_off: IV offset in CONTEXT1 register 361 * @ctx1_iv_off: IV offset in CONTEXT1 register
362 * @is_qi: true when called from caam/qi
341 * 363 *
342 * Note: Requires an MDHA split key. 364 * Note: Requires an MDHA split key.
343 */ 365 */
@@ -345,7 +367,7 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
345 struct alginfo *adata, unsigned int ivsize, 367 struct alginfo *adata, unsigned int ivsize,
346 unsigned int icvsize, const bool geniv, 368 unsigned int icvsize, const bool geniv,
347 const bool is_rfc3686, u32 *nonce, 369 const bool is_rfc3686, u32 *nonce,
348 const u32 ctx1_iv_off) 370 const u32 ctx1_iv_off, const bool is_qi)
349{ 371{
350 /* Note: Context registers are saved. */ 372 /* Note: Context registers are saved. */
351 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); 373 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
@@ -354,6 +376,26 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
354 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | 376 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
355 OP_ALG_DECRYPT | OP_ALG_ICV_ON); 377 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
356 378
379 if (is_qi) {
380 u32 *wait_load_cmd;
381
382 /* REG3 = assoclen */
383 append_seq_load(desc, 4, LDST_CLASS_DECO |
384 LDST_SRCDST_WORD_DECO_MATH3 |
385 (4 << LDST_OFFSET_SHIFT));
386
387 wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
388 JUMP_COND_CALM | JUMP_COND_NCP |
389 JUMP_COND_NOP | JUMP_COND_NIP |
390 JUMP_COND_NIFP);
391 set_jump_tgt_here(desc, wait_load_cmd);
392
393 if (!geniv)
394 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
395 LDST_SRCDST_BYTE_CONTEXT |
396 (ctx1_iv_off << LDST_OFFSET_SHIFT));
397 }
398
357 /* Read and write assoclen bytes */ 399 /* Read and write assoclen bytes */
358 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 400 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
359 if (geniv) 401 if (geniv)
@@ -423,21 +465,44 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
423 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 465 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
424 * @nonce: pointer to rfc3686 nonce 466 * @nonce: pointer to rfc3686 nonce
425 * @ctx1_iv_off: IV offset in CONTEXT1 register 467 * @ctx1_iv_off: IV offset in CONTEXT1 register
468 * @is_qi: true when called from caam/qi
426 * 469 *
427 * Note: Requires an MDHA split key. 470 * Note: Requires an MDHA split key.
428 */ 471 */
429void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, 472void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
430 struct alginfo *adata, unsigned int ivsize, 473 struct alginfo *adata, unsigned int ivsize,
431 unsigned int icvsize, const bool is_rfc3686, 474 unsigned int icvsize, const bool is_rfc3686,
432 u32 *nonce, const u32 ctx1_iv_off) 475 u32 *nonce, const u32 ctx1_iv_off,
476 const bool is_qi)
433{ 477{
434 u32 geniv, moveiv; 478 u32 geniv, moveiv;
435 479
436 /* Note: Context registers are saved. */ 480 /* Note: Context registers are saved. */
437 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); 481 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
438 482
439 if (is_rfc3686) 483 if (is_qi) {
484 u32 *wait_load_cmd;
485
486 /* REG3 = assoclen */
487 append_seq_load(desc, 4, LDST_CLASS_DECO |
488 LDST_SRCDST_WORD_DECO_MATH3 |
489 (4 << LDST_OFFSET_SHIFT));
490
491 wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
492 JUMP_COND_CALM | JUMP_COND_NCP |
493 JUMP_COND_NOP | JUMP_COND_NIP |
494 JUMP_COND_NIFP);
495 set_jump_tgt_here(desc, wait_load_cmd);
496 }
497
498 if (is_rfc3686) {
499 if (is_qi)
500 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
501 LDST_SRCDST_BYTE_CONTEXT |
502 (ctx1_iv_off << LDST_OFFSET_SHIFT));
503
440 goto copy_iv; 504 goto copy_iv;
505 }
441 506
442 /* Generate IV */ 507 /* Generate IV */
443 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | 508 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index 95551737333a..8731e4a7ff05 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -12,6 +12,9 @@
12#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) 12#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
13#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) 13#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
14#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) 14#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
15#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
16#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
17#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
15 18
16/* Note: Nonce is counted in cdata.keylen */ 19/* Note: Nonce is counted in cdata.keylen */
17#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ) 20#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
@@ -45,20 +48,22 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
45 unsigned int icvsize); 48 unsigned int icvsize);
46 49
47void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, 50void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
48 struct alginfo *adata, unsigned int icvsize, 51 struct alginfo *adata, unsigned int ivsize,
49 const bool is_rfc3686, u32 *nonce, 52 unsigned int icvsize, const bool is_rfc3686,
50 const u32 ctx1_iv_off); 53 u32 *nonce, const u32 ctx1_iv_off,
54 const bool is_qi);
51 55
52void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, 56void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
53 struct alginfo *adata, unsigned int ivsize, 57 struct alginfo *adata, unsigned int ivsize,
54 unsigned int icvsize, const bool geniv, 58 unsigned int icvsize, const bool geniv,
55 const bool is_rfc3686, u32 *nonce, 59 const bool is_rfc3686, u32 *nonce,
56 const u32 ctx1_iv_off); 60 const u32 ctx1_iv_off, const bool is_qi);
57 61
58void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, 62void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
59 struct alginfo *adata, unsigned int ivsize, 63 struct alginfo *adata, unsigned int ivsize,
60 unsigned int icvsize, const bool is_rfc3686, 64 unsigned int icvsize, const bool is_rfc3686,
61 u32 *nonce, const u32 ctx1_iv_off); 65 u32 *nonce, const u32 ctx1_iv_off,
66 const bool is_qi);
62 67
63void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, 68void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
64 unsigned int icvsize); 69 unsigned int icvsize);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
new file mode 100644
index 000000000000..ea0e5b8b9171
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -0,0 +1,2387 @@
1/*
2 * Freescale FSL CAAM support for crypto API over QI backend.
3 * Based on caamalg.c
4 *
5 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP
7 */
8
9#include "compat.h"
10
11#include "regs.h"
12#include "intern.h"
13#include "desc_constr.h"
14#include "error.h"
15#include "sg_sw_sec4.h"
16#include "sg_sw_qm.h"
17#include "key_gen.h"
18#include "qi.h"
19#include "jr.h"
20#include "caamalg_desc.h"
21
22/*
23 * crypto alg
24 */
25#define CAAM_CRA_PRIORITY 2000
26/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
27#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
28 SHA512_DIGEST_SIZE * 2)
29
30#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
31 CAAM_MAX_KEY_SIZE)
32#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
33
34struct caam_alg_entry {
35 int class1_alg_type;
36 int class2_alg_type;
37 bool rfc3686;
38 bool geniv;
39};
40
41struct caam_aead_alg {
42 struct aead_alg aead;
43 struct caam_alg_entry caam;
44 bool registered;
45};
46
47/*
48 * per-session context
49 */
50struct caam_ctx {
51 struct device *jrdev;
52 u32 sh_desc_enc[DESC_MAX_USED_LEN];
53 u32 sh_desc_dec[DESC_MAX_USED_LEN];
54 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
55 u8 key[CAAM_MAX_KEY_SIZE];
56 dma_addr_t key_dma;
57 struct alginfo adata;
58 struct alginfo cdata;
59 unsigned int authsize;
60 struct device *qidev;
61 spinlock_t lock; /* Protects multiple init of driver context */
62 struct caam_drv_ctx *drv_ctx[NUM_OP];
63};
64
65static int aead_set_sh_desc(struct crypto_aead *aead)
66{
67 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
68 typeof(*alg), aead);
69 struct caam_ctx *ctx = crypto_aead_ctx(aead);
70 unsigned int ivsize = crypto_aead_ivsize(aead);
71 u32 ctx1_iv_off = 0;
72 u32 *nonce = NULL;
73 unsigned int data_len[2];
74 u32 inl_mask;
75 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
76 OP_ALG_AAI_CTR_MOD128);
77 const bool is_rfc3686 = alg->caam.rfc3686;
78
79 if (!ctx->cdata.keylen || !ctx->authsize)
80 return 0;
81
82 /*
83 * AES-CTR needs to load IV in CONTEXT1 reg
84 * at an offset of 128bits (16bytes)
85 * CONTEXT1[255:128] = IV
86 */
87 if (ctr_mode)
88 ctx1_iv_off = 16;
89
90 /*
91 * RFC3686 specific:
92 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
93 */
94 if (is_rfc3686) {
95 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
96 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
97 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
98 }
99
100 data_len[0] = ctx->adata.keylen_pad;
101 data_len[1] = ctx->cdata.keylen;
102
103 if (alg->caam.geniv)
104 goto skip_enc;
105
106 /* aead_encrypt shared descriptor */
107 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
108 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
109 DESC_JOB_IO_LEN, data_len, &inl_mask,
110 ARRAY_SIZE(data_len)) < 0)
111 return -EINVAL;
112
113 if (inl_mask & 1)
114 ctx->adata.key_virt = ctx->key;
115 else
116 ctx->adata.key_dma = ctx->key_dma;
117
118 if (inl_mask & 2)
119 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
120 else
121 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
122
123 ctx->adata.key_inline = !!(inl_mask & 1);
124 ctx->cdata.key_inline = !!(inl_mask & 2);
125
126 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
127 ivsize, ctx->authsize, is_rfc3686, nonce,
128 ctx1_iv_off, true);
129
130skip_enc:
131 /* aead_decrypt shared descriptor */
132 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
133 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
134 DESC_JOB_IO_LEN, data_len, &inl_mask,
135 ARRAY_SIZE(data_len)) < 0)
136 return -EINVAL;
137
138 if (inl_mask & 1)
139 ctx->adata.key_virt = ctx->key;
140 else
141 ctx->adata.key_dma = ctx->key_dma;
142
143 if (inl_mask & 2)
144 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
145 else
146 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
147
148 ctx->adata.key_inline = !!(inl_mask & 1);
149 ctx->cdata.key_inline = !!(inl_mask & 2);
150
151 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
152 ivsize, ctx->authsize, alg->caam.geniv,
153 is_rfc3686, nonce, ctx1_iv_off, true);
154
155 if (!alg->caam.geniv)
156 goto skip_givenc;
157
158 /* aead_givencrypt shared descriptor */
159 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
160 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
161 DESC_JOB_IO_LEN, data_len, &inl_mask,
162 ARRAY_SIZE(data_len)) < 0)
163 return -EINVAL;
164
165 if (inl_mask & 1)
166 ctx->adata.key_virt = ctx->key;
167 else
168 ctx->adata.key_dma = ctx->key_dma;
169
170 if (inl_mask & 2)
171 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
172 else
173 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
174
175 ctx->adata.key_inline = !!(inl_mask & 1);
176 ctx->cdata.key_inline = !!(inl_mask & 2);
177
178 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
179 ivsize, ctx->authsize, is_rfc3686, nonce,
180 ctx1_iv_off, true);
181
182skip_givenc:
183 return 0;
184}
185
186static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
187{
188 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
189
190 ctx->authsize = authsize;
191 aead_set_sh_desc(authenc);
192
193 return 0;
194}
195
196static int aead_setkey(struct crypto_aead *aead, const u8 *key,
197 unsigned int keylen)
198{
199 struct caam_ctx *ctx = crypto_aead_ctx(aead);
200 struct device *jrdev = ctx->jrdev;
201 struct crypto_authenc_keys keys;
202 int ret = 0;
203
204 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
205 goto badkey;
206
207#ifdef DEBUG
208 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
209 keys.authkeylen + keys.enckeylen, keys.enckeylen,
210 keys.authkeylen);
211 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
212 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
213#endif
214
215 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
216 keys.authkeylen, CAAM_MAX_KEY_SIZE -
217 keys.enckeylen);
218 if (ret)
219 goto badkey;
220
221 /* postpend encryption key to auth split key */
222 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
223 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
224 keys.enckeylen, DMA_TO_DEVICE);
225#ifdef DEBUG
226 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
227 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
228 ctx->adata.keylen_pad + keys.enckeylen, 1);
229#endif
230
231 ctx->cdata.keylen = keys.enckeylen;
232
233 ret = aead_set_sh_desc(aead);
234 if (ret)
235 goto badkey;
236
237 /* Now update the driver contexts with the new shared descriptor */
238 if (ctx->drv_ctx[ENCRYPT]) {
239 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
240 ctx->sh_desc_enc);
241 if (ret) {
242 dev_err(jrdev, "driver enc context update failed\n");
243 goto badkey;
244 }
245 }
246
247 if (ctx->drv_ctx[DECRYPT]) {
248 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
249 ctx->sh_desc_dec);
250 if (ret) {
251 dev_err(jrdev, "driver dec context update failed\n");
252 goto badkey;
253 }
254 }
255
256 return ret;
257badkey:
258 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
259 return -EINVAL;
260}
261
262static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
263 const u8 *key, unsigned int keylen)
264{
265 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
266 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
267 const char *alg_name = crypto_tfm_alg_name(tfm);
268 struct device *jrdev = ctx->jrdev;
269 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
270 u32 ctx1_iv_off = 0;
271 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
272 OP_ALG_AAI_CTR_MOD128);
273 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
274 int ret = 0;
275
276 memcpy(ctx->key, key, keylen);
277#ifdef DEBUG
278 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
279 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
280#endif
281 /*
282 * AES-CTR needs to load IV in CONTEXT1 reg
283 * at an offset of 128bits (16bytes)
284 * CONTEXT1[255:128] = IV
285 */
286 if (ctr_mode)
287 ctx1_iv_off = 16;
288
289 /*
290 * RFC3686 specific:
291 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
292 * | *key = {KEY, NONCE}
293 */
294 if (is_rfc3686) {
295 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
296 keylen -= CTR_RFC3686_NONCE_SIZE;
297 }
298
299 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
300 ctx->cdata.keylen = keylen;
301 ctx->cdata.key_virt = ctx->key;
302 ctx->cdata.key_inline = true;
303
304 /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
305 cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
306 is_rfc3686, ctx1_iv_off);
307 cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
308 is_rfc3686, ctx1_iv_off);
309 cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
310 ivsize, is_rfc3686, ctx1_iv_off);
311
312 /* Now update the driver contexts with the new shared descriptor */
313 if (ctx->drv_ctx[ENCRYPT]) {
314 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
315 ctx->sh_desc_enc);
316 if (ret) {
317 dev_err(jrdev, "driver enc context update failed\n");
318 goto badkey;
319 }
320 }
321
322 if (ctx->drv_ctx[DECRYPT]) {
323 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
324 ctx->sh_desc_dec);
325 if (ret) {
326 dev_err(jrdev, "driver dec context update failed\n");
327 goto badkey;
328 }
329 }
330
331 if (ctx->drv_ctx[GIVENCRYPT]) {
332 ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
333 ctx->sh_desc_givenc);
334 if (ret) {
335 dev_err(jrdev, "driver givenc context update failed\n");
336 goto badkey;
337 }
338 }
339
340 return ret;
341badkey:
342 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
343 return -EINVAL;
344}
345
346static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
347 const u8 *key, unsigned int keylen)
348{
349 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
350 struct device *jrdev = ctx->jrdev;
351 int ret = 0;
352
353 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
354 crypto_ablkcipher_set_flags(ablkcipher,
355 CRYPTO_TFM_RES_BAD_KEY_LEN);
356 dev_err(jrdev, "key size mismatch\n");
357 return -EINVAL;
358 }
359
360 memcpy(ctx->key, key, keylen);
361 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
362 ctx->cdata.keylen = keylen;
363 ctx->cdata.key_virt = ctx->key;
364 ctx->cdata.key_inline = true;
365
366 /* xts ablkcipher encrypt, decrypt shared descriptors */
367 cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
368 cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
369
370 /* Now update the driver contexts with the new shared descriptor */
371 if (ctx->drv_ctx[ENCRYPT]) {
372 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
373 ctx->sh_desc_enc);
374 if (ret) {
375 dev_err(jrdev, "driver enc context update failed\n");
376 goto badkey;
377 }
378 }
379
380 if (ctx->drv_ctx[DECRYPT]) {
381 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
382 ctx->sh_desc_dec);
383 if (ret) {
384 dev_err(jrdev, "driver dec context update failed\n");
385 goto badkey;
386 }
387 }
388
389 return ret;
390badkey:
391 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
392 return 0;
393}
394
395/*
396 * aead_edesc - s/w-extended aead descriptor
397 * @src_nents: number of segments in input scatterlist
398 * @dst_nents: number of segments in output scatterlist
399 * @iv_dma: dma address of iv for checking continuity and link table
400 * @qm_sg_bytes: length of dma mapped h/w link table
401 * @qm_sg_dma: bus physical mapped address of h/w link table
402 * @assoclen_dma: bus physical mapped address of req->assoclen
403 * @drv_req: driver-specific request structure
404 * @sgt: the h/w link table
405 */
406struct aead_edesc {
407 int src_nents;
408 int dst_nents;
409 dma_addr_t iv_dma;
410 int qm_sg_bytes;
411 dma_addr_t qm_sg_dma;
412 dma_addr_t assoclen_dma;
413 struct caam_drv_req drv_req;
414 struct qm_sg_entry sgt[0];
415};
416
417/*
418 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
419 * @src_nents: number of segments in input scatterlist
420 * @dst_nents: number of segments in output scatterlist
421 * @iv_dma: dma address of iv for checking continuity and link table
422 * @qm_sg_bytes: length of dma mapped h/w link table
423 * @qm_sg_dma: bus physical mapped address of h/w link table
424 * @drv_req: driver-specific request structure
425 * @sgt: the h/w link table
426 */
427struct ablkcipher_edesc {
428 int src_nents;
429 int dst_nents;
430 dma_addr_t iv_dma;
431 int qm_sg_bytes;
432 dma_addr_t qm_sg_dma;
433 struct caam_drv_req drv_req;
434 struct qm_sg_entry sgt[0];
435};
436
437static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
438 enum optype type)
439{
440 /*
441 * This function is called on the fast path with values of 'type'
442 * known at compile time. Invalid arguments are not expected and
443 * thus no checks are made.
444 */
445 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
446 u32 *desc;
447
448 if (unlikely(!drv_ctx)) {
449 spin_lock(&ctx->lock);
450
451 /* Read again to check if some other core init drv_ctx */
452 drv_ctx = ctx->drv_ctx[type];
453 if (!drv_ctx) {
454 int cpu;
455
456 if (type == ENCRYPT)
457 desc = ctx->sh_desc_enc;
458 else if (type == DECRYPT)
459 desc = ctx->sh_desc_dec;
460 else /* (type == GIVENCRYPT) */
461 desc = ctx->sh_desc_givenc;
462
463 cpu = smp_processor_id();
464 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
465 if (likely(!IS_ERR_OR_NULL(drv_ctx)))
466 drv_ctx->op_type = type;
467
468 ctx->drv_ctx[type] = drv_ctx;
469 }
470
471 spin_unlock(&ctx->lock);
472 }
473
474 return drv_ctx;
475}
476
477static void caam_unmap(struct device *dev, struct scatterlist *src,
478 struct scatterlist *dst, int src_nents,
479 int dst_nents, dma_addr_t iv_dma, int ivsize,
480 enum optype op_type, dma_addr_t qm_sg_dma,
481 int qm_sg_bytes)
482{
483 if (dst != src) {
484 if (src_nents)
485 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
486 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
487 } else {
488 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
489 }
490
491 if (iv_dma)
492 dma_unmap_single(dev, iv_dma, ivsize,
493 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
494 DMA_TO_DEVICE);
495 if (qm_sg_bytes)
496 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
497}
498
499static void aead_unmap(struct device *dev,
500 struct aead_edesc *edesc,
501 struct aead_request *req)
502{
503 struct crypto_aead *aead = crypto_aead_reqtfm(req);
504 int ivsize = crypto_aead_ivsize(aead);
505
506 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
507 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
508 edesc->qm_sg_dma, edesc->qm_sg_bytes);
509 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
510}
511
512static void ablkcipher_unmap(struct device *dev,
513 struct ablkcipher_edesc *edesc,
514 struct ablkcipher_request *req)
515{
516 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
517 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
518
519 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
520 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
521 edesc->qm_sg_dma, edesc->qm_sg_bytes);
522}
523
524static void aead_done(struct caam_drv_req *drv_req, u32 status)
525{
526 struct device *qidev;
527 struct aead_edesc *edesc;
528 struct aead_request *aead_req = drv_req->app_ctx;
529 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
530 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
531 int ecode = 0;
532
533 qidev = caam_ctx->qidev;
534
535 if (unlikely(status)) {
536 caam_jr_strstatus(qidev, status);
537 ecode = -EIO;
538 }
539
540 edesc = container_of(drv_req, typeof(*edesc), drv_req);
541 aead_unmap(qidev, edesc, aead_req);
542
543 aead_request_complete(aead_req, ecode);
544 qi_cache_free(edesc);
545}
546
547/*
548 * allocate and map the aead extended descriptor
549 */
550static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
551 bool encrypt)
552{
553 struct crypto_aead *aead = crypto_aead_reqtfm(req);
554 struct caam_ctx *ctx = crypto_aead_ctx(aead);
555 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
556 typeof(*alg), aead);
557 struct device *qidev = ctx->qidev;
558 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
559 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
560 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
561 struct aead_edesc *edesc;
562 dma_addr_t qm_sg_dma, iv_dma = 0;
563 int ivsize = 0;
564 unsigned int authsize = ctx->authsize;
565 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
566 int in_len, out_len;
567 struct qm_sg_entry *sg_table, *fd_sgt;
568 struct caam_drv_ctx *drv_ctx;
569 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
570
571 drv_ctx = get_drv_ctx(ctx, op_type);
572 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
573 return (struct aead_edesc *)drv_ctx;
574
575 /* allocate space for base edesc and hw desc commands, link tables */
576 edesc = qi_cache_alloc(GFP_DMA | flags);
577 if (unlikely(!edesc)) {
578 dev_err(qidev, "could not allocate extended descriptor\n");
579 return ERR_PTR(-ENOMEM);
580 }
581
582 if (likely(req->src == req->dst)) {
583 src_nents = sg_nents_for_len(req->src, req->assoclen +
584 req->cryptlen +
585 (encrypt ? authsize : 0));
586 if (unlikely(src_nents < 0)) {
587 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
588 req->assoclen + req->cryptlen +
589 (encrypt ? authsize : 0));
590 qi_cache_free(edesc);
591 return ERR_PTR(src_nents);
592 }
593
594 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
595 DMA_BIDIRECTIONAL);
596 if (unlikely(!mapped_src_nents)) {
597 dev_err(qidev, "unable to map source\n");
598 qi_cache_free(edesc);
599 return ERR_PTR(-ENOMEM);
600 }
601 } else {
602 src_nents = sg_nents_for_len(req->src, req->assoclen +
603 req->cryptlen);
604 if (unlikely(src_nents < 0)) {
605 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
606 req->assoclen + req->cryptlen);
607 qi_cache_free(edesc);
608 return ERR_PTR(src_nents);
609 }
610
611 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
612 req->cryptlen +
613 (encrypt ? authsize :
614 (-authsize)));
615 if (unlikely(dst_nents < 0)) {
616 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
617 req->assoclen + req->cryptlen +
618 (encrypt ? authsize : (-authsize)));
619 qi_cache_free(edesc);
620 return ERR_PTR(dst_nents);
621 }
622
623 if (src_nents) {
624 mapped_src_nents = dma_map_sg(qidev, req->src,
625 src_nents, DMA_TO_DEVICE);
626 if (unlikely(!mapped_src_nents)) {
627 dev_err(qidev, "unable to map source\n");
628 qi_cache_free(edesc);
629 return ERR_PTR(-ENOMEM);
630 }
631 } else {
632 mapped_src_nents = 0;
633 }
634
635 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
636 DMA_FROM_DEVICE);
637 if (unlikely(!mapped_dst_nents)) {
638 dev_err(qidev, "unable to map destination\n");
639 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
640 qi_cache_free(edesc);
641 return ERR_PTR(-ENOMEM);
642 }
643 }
644
645 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
646 ivsize = crypto_aead_ivsize(aead);
647 iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
648 if (dma_mapping_error(qidev, iv_dma)) {
649 dev_err(qidev, "unable to map IV\n");
650 caam_unmap(qidev, req->src, req->dst, src_nents,
651 dst_nents, 0, 0, op_type, 0, 0);
652 qi_cache_free(edesc);
653 return ERR_PTR(-ENOMEM);
654 }
655 }
656
657 /*
658 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
659 * Input is not contiguous.
660 */
661 qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
662 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
663 sg_table = &edesc->sgt[0];
664 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
665
666 edesc->src_nents = src_nents;
667 edesc->dst_nents = dst_nents;
668 edesc->iv_dma = iv_dma;
669 edesc->drv_req.app_ctx = req;
670 edesc->drv_req.cbk = aead_done;
671 edesc->drv_req.drv_ctx = drv_ctx;
672
673 edesc->assoclen_dma = dma_map_single(qidev, &req->assoclen, 4,
674 DMA_TO_DEVICE);
675 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
676 dev_err(qidev, "unable to map assoclen\n");
677 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
678 iv_dma, ivsize, op_type, 0, 0);
679 qi_cache_free(edesc);
680 return ERR_PTR(-ENOMEM);
681 }
682
683 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
684 qm_sg_index++;
685 if (ivsize) {
686 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
687 qm_sg_index++;
688 }
689 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
690 qm_sg_index += mapped_src_nents;
691
692 if (mapped_dst_nents > 1)
693 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
694 qm_sg_index, 0);
695
696 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
697 if (dma_mapping_error(qidev, qm_sg_dma)) {
698 dev_err(qidev, "unable to map S/G table\n");
699 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
700 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
701 iv_dma, ivsize, op_type, 0, 0);
702 qi_cache_free(edesc);
703 return ERR_PTR(-ENOMEM);
704 }
705
706 edesc->qm_sg_dma = qm_sg_dma;
707 edesc->qm_sg_bytes = qm_sg_bytes;
708
709 out_len = req->assoclen + req->cryptlen +
710 (encrypt ? ctx->authsize : (-ctx->authsize));
711 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
712
713 fd_sgt = &edesc->drv_req.fd_sgt[0];
714 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
715
716 if (req->dst == req->src) {
717 if (mapped_src_nents == 1)
718 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
719 out_len, 0);
720 else
721 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
722 (1 + !!ivsize) * sizeof(*sg_table),
723 out_len, 0);
724 } else if (mapped_dst_nents == 1) {
725 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
726 0);
727 } else {
728 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
729 qm_sg_index, out_len, 0);
730 }
731
732 return edesc;
733}
734
735static inline int aead_crypt(struct aead_request *req, bool encrypt)
736{
737 struct aead_edesc *edesc;
738 struct crypto_aead *aead = crypto_aead_reqtfm(req);
739 struct caam_ctx *ctx = crypto_aead_ctx(aead);
740 int ret;
741
742 if (unlikely(caam_congested))
743 return -EAGAIN;
744
745 /* allocate extended descriptor */
746 edesc = aead_edesc_alloc(req, encrypt);
747 if (IS_ERR_OR_NULL(edesc))
748 return PTR_ERR(edesc);
749
750 /* Create and submit job descriptor */
751 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
752 if (!ret) {
753 ret = -EINPROGRESS;
754 } else {
755 aead_unmap(ctx->qidev, edesc, req);
756 qi_cache_free(edesc);
757 }
758
759 return ret;
760}
761
762static int aead_encrypt(struct aead_request *req)
763{
764 return aead_crypt(req, true);
765}
766
767static int aead_decrypt(struct aead_request *req)
768{
769 return aead_crypt(req, false);
770}
771
772static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
773{
774 struct ablkcipher_edesc *edesc;
775 struct ablkcipher_request *req = drv_req->app_ctx;
776 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
777 struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
778 struct device *qidev = caam_ctx->qidev;
779#ifdef DEBUG
780 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
781
782 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
783#endif
784
785 edesc = container_of(drv_req, typeof(*edesc), drv_req);
786
787 if (status)
788 caam_jr_strstatus(qidev, status);
789
790#ifdef DEBUG
791 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
792 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
793 edesc->src_nents > 1 ? 100 : ivsize, 1);
794 dbg_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
795 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
796 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
797#endif
798
799 ablkcipher_unmap(qidev, edesc, req);
800 qi_cache_free(edesc);
801
802 ablkcipher_request_complete(req, status);
803}
804
805static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
806 *req, bool encrypt)
807{
808 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
809 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
810 struct device *qidev = ctx->qidev;
811 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
812 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
813 GFP_KERNEL : GFP_ATOMIC;
814 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
815 struct ablkcipher_edesc *edesc;
816 dma_addr_t iv_dma;
817 bool in_contig;
818 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
819 int dst_sg_idx, qm_sg_ents;
820 struct qm_sg_entry *sg_table, *fd_sgt;
821 struct caam_drv_ctx *drv_ctx;
822 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
823
824 drv_ctx = get_drv_ctx(ctx, op_type);
825 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
826 return (struct ablkcipher_edesc *)drv_ctx;
827
828 src_nents = sg_nents_for_len(req->src, req->nbytes);
829 if (unlikely(src_nents < 0)) {
830 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
831 req->nbytes);
832 return ERR_PTR(src_nents);
833 }
834
835 if (unlikely(req->src != req->dst)) {
836 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
837 if (unlikely(dst_nents < 0)) {
838 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
839 req->nbytes);
840 return ERR_PTR(dst_nents);
841 }
842
843 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
844 DMA_TO_DEVICE);
845 if (unlikely(!mapped_src_nents)) {
846 dev_err(qidev, "unable to map source\n");
847 return ERR_PTR(-ENOMEM);
848 }
849
850 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
851 DMA_FROM_DEVICE);
852 if (unlikely(!mapped_dst_nents)) {
853 dev_err(qidev, "unable to map destination\n");
854 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
855 return ERR_PTR(-ENOMEM);
856 }
857 } else {
858 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
859 DMA_BIDIRECTIONAL);
860 if (unlikely(!mapped_src_nents)) {
861 dev_err(qidev, "unable to map source\n");
862 return ERR_PTR(-ENOMEM);
863 }
864 }
865
866 iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
867 if (dma_mapping_error(qidev, iv_dma)) {
868 dev_err(qidev, "unable to map IV\n");
869 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
870 0, 0, 0, 0);
871 return ERR_PTR(-ENOMEM);
872 }
873
874 if (mapped_src_nents == 1 &&
875 iv_dma + ivsize == sg_dma_address(req->src)) {
876 in_contig = true;
877 qm_sg_ents = 0;
878 } else {
879 in_contig = false;
880 qm_sg_ents = 1 + mapped_src_nents;
881 }
882 dst_sg_idx = qm_sg_ents;
883
884 /* allocate space for base edesc and link tables */
885 edesc = qi_cache_alloc(GFP_DMA | flags);
886 if (unlikely(!edesc)) {
887 dev_err(qidev, "could not allocate extended descriptor\n");
888 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
889 iv_dma, ivsize, op_type, 0, 0);
890 return ERR_PTR(-ENOMEM);
891 }
892
893 edesc->src_nents = src_nents;
894 edesc->dst_nents = dst_nents;
895 edesc->iv_dma = iv_dma;
896 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
897 sg_table = &edesc->sgt[0];
898 edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
899 edesc->drv_req.app_ctx = req;
900 edesc->drv_req.cbk = ablkcipher_done;
901 edesc->drv_req.drv_ctx = drv_ctx;
902
903 if (!in_contig) {
904 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
905 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
906 }
907
908 if (mapped_dst_nents > 1)
909 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
910 dst_sg_idx, 0);
911
912 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
913 DMA_TO_DEVICE);
914 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
915 dev_err(qidev, "unable to map S/G table\n");
916 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
917 iv_dma, ivsize, op_type, 0, 0);
918 qi_cache_free(edesc);
919 return ERR_PTR(-ENOMEM);
920 }
921
922 fd_sgt = &edesc->drv_req.fd_sgt[0];
923
924 if (!in_contig)
925 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
926 ivsize + req->nbytes, 0);
927 else
928 dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
929 0);
930
931 if (req->src == req->dst) {
932 if (!in_contig)
933 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
934 sizeof(*sg_table), req->nbytes, 0);
935 else
936 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
937 req->nbytes, 0);
938 } else if (mapped_dst_nents > 1) {
939 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
940 sizeof(*sg_table), req->nbytes, 0);
941 } else {
942 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
943 req->nbytes, 0);
944 }
945
946 return edesc;
947}
948
949static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
950 struct skcipher_givcrypt_request *creq)
951{
952 struct ablkcipher_request *req = &creq->creq;
953 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
954 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
955 struct device *qidev = ctx->qidev;
956 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
957 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
958 GFP_KERNEL : GFP_ATOMIC;
959 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
960 struct ablkcipher_edesc *edesc;
961 dma_addr_t iv_dma;
962 bool out_contig;
963 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
964 struct qm_sg_entry *sg_table, *fd_sgt;
965 int dst_sg_idx, qm_sg_ents;
966 struct caam_drv_ctx *drv_ctx;
967
968 drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
969 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
970 return (struct ablkcipher_edesc *)drv_ctx;
971
972 src_nents = sg_nents_for_len(req->src, req->nbytes);
973 if (unlikely(src_nents < 0)) {
974 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
975 req->nbytes);
976 return ERR_PTR(src_nents);
977 }
978
979 if (unlikely(req->src != req->dst)) {
980 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
981 if (unlikely(dst_nents < 0)) {
982 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
983 req->nbytes);
984 return ERR_PTR(dst_nents);
985 }
986
987 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
988 DMA_TO_DEVICE);
989 if (unlikely(!mapped_src_nents)) {
990 dev_err(qidev, "unable to map source\n");
991 return ERR_PTR(-ENOMEM);
992 }
993
994 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
995 DMA_FROM_DEVICE);
996 if (unlikely(!mapped_dst_nents)) {
997 dev_err(qidev, "unable to map destination\n");
998 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
999 return ERR_PTR(-ENOMEM);
1000 }
1001 } else {
1002 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1003 DMA_BIDIRECTIONAL);
1004 if (unlikely(!mapped_src_nents)) {
1005 dev_err(qidev, "unable to map source\n");
1006 return ERR_PTR(-ENOMEM);
1007 }
1008
1009 dst_nents = src_nents;
1010 mapped_dst_nents = src_nents;
1011 }
1012
1013 iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
1014 if (dma_mapping_error(qidev, iv_dma)) {
1015 dev_err(qidev, "unable to map IV\n");
1016 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1017 0, 0, 0, 0);
1018 return ERR_PTR(-ENOMEM);
1019 }
1020
1021 qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1022 dst_sg_idx = qm_sg_ents;
1023 if (mapped_dst_nents == 1 &&
1024 iv_dma + ivsize == sg_dma_address(req->dst)) {
1025 out_contig = true;
1026 } else {
1027 out_contig = false;
1028 qm_sg_ents += 1 + mapped_dst_nents;
1029 }
1030
1031 /* allocate space for base edesc and link tables */
1032 edesc = qi_cache_alloc(GFP_DMA | flags);
1033 if (!edesc) {
1034 dev_err(qidev, "could not allocate extended descriptor\n");
1035 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1036 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1037 return ERR_PTR(-ENOMEM);
1038 }
1039
1040 edesc->src_nents = src_nents;
1041 edesc->dst_nents = dst_nents;
1042 edesc->iv_dma = iv_dma;
1043 sg_table = &edesc->sgt[0];
1044 edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1045 edesc->drv_req.app_ctx = req;
1046 edesc->drv_req.cbk = ablkcipher_done;
1047 edesc->drv_req.drv_ctx = drv_ctx;
1048
1049 if (mapped_src_nents > 1)
1050 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1051
1052 if (!out_contig) {
1053 dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1054 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1055 dst_sg_idx + 1, 0);
1056 }
1057
1058 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1059 DMA_TO_DEVICE);
1060 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1061 dev_err(qidev, "unable to map S/G table\n");
1062 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1063 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1064 qi_cache_free(edesc);
1065 return ERR_PTR(-ENOMEM);
1066 }
1067
1068 fd_sgt = &edesc->drv_req.fd_sgt[0];
1069
1070 if (mapped_src_nents > 1)
1071 dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1072 0);
1073 else
1074 dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1075 req->nbytes, 0);
1076
1077 if (!out_contig)
1078 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1079 sizeof(*sg_table), ivsize + req->nbytes,
1080 0);
1081 else
1082 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1083 ivsize + req->nbytes, 0);
1084
1085 return edesc;
1086}
1087
1088static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1089{
1090 struct ablkcipher_edesc *edesc;
1091 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1092 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1093 int ret;
1094
1095 if (unlikely(caam_congested))
1096 return -EAGAIN;
1097
1098 /* allocate extended descriptor */
1099 edesc = ablkcipher_edesc_alloc(req, encrypt);
1100 if (IS_ERR(edesc))
1101 return PTR_ERR(edesc);
1102
1103 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1104 if (!ret) {
1105 ret = -EINPROGRESS;
1106 } else {
1107 ablkcipher_unmap(ctx->qidev, edesc, req);
1108 qi_cache_free(edesc);
1109 }
1110
1111 return ret;
1112}
1113
1114static int ablkcipher_encrypt(struct ablkcipher_request *req)
1115{
1116 return ablkcipher_crypt(req, true);
1117}
1118
1119static int ablkcipher_decrypt(struct ablkcipher_request *req)
1120{
1121 return ablkcipher_crypt(req, false);
1122}
1123
1124static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1125{
1126 struct ablkcipher_request *req = &creq->creq;
1127 struct ablkcipher_edesc *edesc;
1128 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1129 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1130 int ret;
1131
1132 if (unlikely(caam_congested))
1133 return -EAGAIN;
1134
1135 /* allocate extended descriptor */
1136 edesc = ablkcipher_giv_edesc_alloc(creq);
1137 if (IS_ERR(edesc))
1138 return PTR_ERR(edesc);
1139
1140 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1141 if (!ret) {
1142 ret = -EINPROGRESS;
1143 } else {
1144 ablkcipher_unmap(ctx->qidev, edesc, req);
1145 qi_cache_free(edesc);
1146 }
1147
1148 return ret;
1149}
1150
1151#define template_ablkcipher template_u.ablkcipher
1152struct caam_alg_template {
1153 char name[CRYPTO_MAX_ALG_NAME];
1154 char driver_name[CRYPTO_MAX_ALG_NAME];
1155 unsigned int blocksize;
1156 u32 type;
1157 union {
1158 struct ablkcipher_alg ablkcipher;
1159 } template_u;
1160 u32 class1_alg_type;
1161 u32 class2_alg_type;
1162};
1163
1164static struct caam_alg_template driver_algs[] = {
1165 /* ablkcipher descriptor */
1166 {
1167 .name = "cbc(aes)",
1168 .driver_name = "cbc-aes-caam-qi",
1169 .blocksize = AES_BLOCK_SIZE,
1170 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1171 .template_ablkcipher = {
1172 .setkey = ablkcipher_setkey,
1173 .encrypt = ablkcipher_encrypt,
1174 .decrypt = ablkcipher_decrypt,
1175 .givencrypt = ablkcipher_givencrypt,
1176 .geniv = "<built-in>",
1177 .min_keysize = AES_MIN_KEY_SIZE,
1178 .max_keysize = AES_MAX_KEY_SIZE,
1179 .ivsize = AES_BLOCK_SIZE,
1180 },
1181 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1182 },
1183 {
1184 .name = "cbc(des3_ede)",
1185 .driver_name = "cbc-3des-caam-qi",
1186 .blocksize = DES3_EDE_BLOCK_SIZE,
1187 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1188 .template_ablkcipher = {
1189 .setkey = ablkcipher_setkey,
1190 .encrypt = ablkcipher_encrypt,
1191 .decrypt = ablkcipher_decrypt,
1192 .givencrypt = ablkcipher_givencrypt,
1193 .geniv = "<built-in>",
1194 .min_keysize = DES3_EDE_KEY_SIZE,
1195 .max_keysize = DES3_EDE_KEY_SIZE,
1196 .ivsize = DES3_EDE_BLOCK_SIZE,
1197 },
1198 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1199 },
1200 {
1201 .name = "cbc(des)",
1202 .driver_name = "cbc-des-caam-qi",
1203 .blocksize = DES_BLOCK_SIZE,
1204 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1205 .template_ablkcipher = {
1206 .setkey = ablkcipher_setkey,
1207 .encrypt = ablkcipher_encrypt,
1208 .decrypt = ablkcipher_decrypt,
1209 .givencrypt = ablkcipher_givencrypt,
1210 .geniv = "<built-in>",
1211 .min_keysize = DES_KEY_SIZE,
1212 .max_keysize = DES_KEY_SIZE,
1213 .ivsize = DES_BLOCK_SIZE,
1214 },
1215 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1216 },
1217 {
1218 .name = "ctr(aes)",
1219 .driver_name = "ctr-aes-caam-qi",
1220 .blocksize = 1,
1221 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1222 .template_ablkcipher = {
1223 .setkey = ablkcipher_setkey,
1224 .encrypt = ablkcipher_encrypt,
1225 .decrypt = ablkcipher_decrypt,
1226 .geniv = "chainiv",
1227 .min_keysize = AES_MIN_KEY_SIZE,
1228 .max_keysize = AES_MAX_KEY_SIZE,
1229 .ivsize = AES_BLOCK_SIZE,
1230 },
1231 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1232 },
1233 {
1234 .name = "rfc3686(ctr(aes))",
1235 .driver_name = "rfc3686-ctr-aes-caam-qi",
1236 .blocksize = 1,
1237 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1238 .template_ablkcipher = {
1239 .setkey = ablkcipher_setkey,
1240 .encrypt = ablkcipher_encrypt,
1241 .decrypt = ablkcipher_decrypt,
1242 .givencrypt = ablkcipher_givencrypt,
1243 .geniv = "<built-in>",
1244 .min_keysize = AES_MIN_KEY_SIZE +
1245 CTR_RFC3686_NONCE_SIZE,
1246 .max_keysize = AES_MAX_KEY_SIZE +
1247 CTR_RFC3686_NONCE_SIZE,
1248 .ivsize = CTR_RFC3686_IV_SIZE,
1249 },
1250 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1251 },
1252 {
1253 .name = "xts(aes)",
1254 .driver_name = "xts-aes-caam-qi",
1255 .blocksize = AES_BLOCK_SIZE,
1256 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1257 .template_ablkcipher = {
1258 .setkey = xts_ablkcipher_setkey,
1259 .encrypt = ablkcipher_encrypt,
1260 .decrypt = ablkcipher_decrypt,
1261 .geniv = "eseqiv",
1262 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1263 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1264 .ivsize = AES_BLOCK_SIZE,
1265 },
1266 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1267 },
1268};
1269
1270static struct caam_aead_alg driver_aeads[] = {
1271 /* single-pass ipsec_esp descriptor */
1272 {
1273 .aead = {
1274 .base = {
1275 .cra_name = "authenc(hmac(md5),cbc(aes))",
1276 .cra_driver_name = "authenc-hmac-md5-"
1277 "cbc-aes-caam-qi",
1278 .cra_blocksize = AES_BLOCK_SIZE,
1279 },
1280 .setkey = aead_setkey,
1281 .setauthsize = aead_setauthsize,
1282 .encrypt = aead_encrypt,
1283 .decrypt = aead_decrypt,
1284 .ivsize = AES_BLOCK_SIZE,
1285 .maxauthsize = MD5_DIGEST_SIZE,
1286 },
1287 .caam = {
1288 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1289 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1290 OP_ALG_AAI_HMAC_PRECOMP,
1291 }
1292 },
1293 {
1294 .aead = {
1295 .base = {
1296 .cra_name = "echainiv(authenc(hmac(md5),"
1297 "cbc(aes)))",
1298 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1299 "cbc-aes-caam-qi",
1300 .cra_blocksize = AES_BLOCK_SIZE,
1301 },
1302 .setkey = aead_setkey,
1303 .setauthsize = aead_setauthsize,
1304 .encrypt = aead_encrypt,
1305 .decrypt = aead_decrypt,
1306 .ivsize = AES_BLOCK_SIZE,
1307 .maxauthsize = MD5_DIGEST_SIZE,
1308 },
1309 .caam = {
1310 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1311 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1312 OP_ALG_AAI_HMAC_PRECOMP,
1313 .geniv = true,
1314 }
1315 },
1316 {
1317 .aead = {
1318 .base = {
1319 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1320 .cra_driver_name = "authenc-hmac-sha1-"
1321 "cbc-aes-caam-qi",
1322 .cra_blocksize = AES_BLOCK_SIZE,
1323 },
1324 .setkey = aead_setkey,
1325 .setauthsize = aead_setauthsize,
1326 .encrypt = aead_encrypt,
1327 .decrypt = aead_decrypt,
1328 .ivsize = AES_BLOCK_SIZE,
1329 .maxauthsize = SHA1_DIGEST_SIZE,
1330 },
1331 .caam = {
1332 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1333 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1334 OP_ALG_AAI_HMAC_PRECOMP,
1335 }
1336 },
1337 {
1338 .aead = {
1339 .base = {
1340 .cra_name = "echainiv(authenc(hmac(sha1),"
1341 "cbc(aes)))",
1342 .cra_driver_name = "echainiv-authenc-"
1343 "hmac-sha1-cbc-aes-caam-qi",
1344 .cra_blocksize = AES_BLOCK_SIZE,
1345 },
1346 .setkey = aead_setkey,
1347 .setauthsize = aead_setauthsize,
1348 .encrypt = aead_encrypt,
1349 .decrypt = aead_decrypt,
1350 .ivsize = AES_BLOCK_SIZE,
1351 .maxauthsize = SHA1_DIGEST_SIZE,
1352 },
1353 .caam = {
1354 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1355 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1356 OP_ALG_AAI_HMAC_PRECOMP,
1357 .geniv = true,
1358 },
1359 },
1360 {
1361 .aead = {
1362 .base = {
1363 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1364 .cra_driver_name = "authenc-hmac-sha224-"
1365 "cbc-aes-caam-qi",
1366 .cra_blocksize = AES_BLOCK_SIZE,
1367 },
1368 .setkey = aead_setkey,
1369 .setauthsize = aead_setauthsize,
1370 .encrypt = aead_encrypt,
1371 .decrypt = aead_decrypt,
1372 .ivsize = AES_BLOCK_SIZE,
1373 .maxauthsize = SHA224_DIGEST_SIZE,
1374 },
1375 .caam = {
1376 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1377 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1378 OP_ALG_AAI_HMAC_PRECOMP,
1379 }
1380 },
1381 {
1382 .aead = {
1383 .base = {
1384 .cra_name = "echainiv(authenc(hmac(sha224),"
1385 "cbc(aes)))",
1386 .cra_driver_name = "echainiv-authenc-"
1387 "hmac-sha224-cbc-aes-caam-qi",
1388 .cra_blocksize = AES_BLOCK_SIZE,
1389 },
1390 .setkey = aead_setkey,
1391 .setauthsize = aead_setauthsize,
1392 .encrypt = aead_encrypt,
1393 .decrypt = aead_decrypt,
1394 .ivsize = AES_BLOCK_SIZE,
1395 .maxauthsize = SHA224_DIGEST_SIZE,
1396 },
1397 .caam = {
1398 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1399 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1400 OP_ALG_AAI_HMAC_PRECOMP,
1401 .geniv = true,
1402 }
1403 },
1404 {
1405 .aead = {
1406 .base = {
1407 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1408 .cra_driver_name = "authenc-hmac-sha256-"
1409 "cbc-aes-caam-qi",
1410 .cra_blocksize = AES_BLOCK_SIZE,
1411 },
1412 .setkey = aead_setkey,
1413 .setauthsize = aead_setauthsize,
1414 .encrypt = aead_encrypt,
1415 .decrypt = aead_decrypt,
1416 .ivsize = AES_BLOCK_SIZE,
1417 .maxauthsize = SHA256_DIGEST_SIZE,
1418 },
1419 .caam = {
1420 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1421 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1422 OP_ALG_AAI_HMAC_PRECOMP,
1423 }
1424 },
1425 {
1426 .aead = {
1427 .base = {
1428 .cra_name = "echainiv(authenc(hmac(sha256),"
1429 "cbc(aes)))",
1430 .cra_driver_name = "echainiv-authenc-"
1431 "hmac-sha256-cbc-aes-"
1432 "caam-qi",
1433 .cra_blocksize = AES_BLOCK_SIZE,
1434 },
1435 .setkey = aead_setkey,
1436 .setauthsize = aead_setauthsize,
1437 .encrypt = aead_encrypt,
1438 .decrypt = aead_decrypt,
1439 .ivsize = AES_BLOCK_SIZE,
1440 .maxauthsize = SHA256_DIGEST_SIZE,
1441 },
1442 .caam = {
1443 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1444 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1445 OP_ALG_AAI_HMAC_PRECOMP,
1446 .geniv = true,
1447 }
1448 },
1449 {
1450 .aead = {
1451 .base = {
1452 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1453 .cra_driver_name = "authenc-hmac-sha384-"
1454 "cbc-aes-caam-qi",
1455 .cra_blocksize = AES_BLOCK_SIZE,
1456 },
1457 .setkey = aead_setkey,
1458 .setauthsize = aead_setauthsize,
1459 .encrypt = aead_encrypt,
1460 .decrypt = aead_decrypt,
1461 .ivsize = AES_BLOCK_SIZE,
1462 .maxauthsize = SHA384_DIGEST_SIZE,
1463 },
1464 .caam = {
1465 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1466 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1467 OP_ALG_AAI_HMAC_PRECOMP,
1468 }
1469 },
1470 {
1471 .aead = {
1472 .base = {
1473 .cra_name = "echainiv(authenc(hmac(sha384),"
1474 "cbc(aes)))",
1475 .cra_driver_name = "echainiv-authenc-"
1476 "hmac-sha384-cbc-aes-"
1477 "caam-qi",
1478 .cra_blocksize = AES_BLOCK_SIZE,
1479 },
1480 .setkey = aead_setkey,
1481 .setauthsize = aead_setauthsize,
1482 .encrypt = aead_encrypt,
1483 .decrypt = aead_decrypt,
1484 .ivsize = AES_BLOCK_SIZE,
1485 .maxauthsize = SHA384_DIGEST_SIZE,
1486 },
1487 .caam = {
1488 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1489 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1490 OP_ALG_AAI_HMAC_PRECOMP,
1491 .geniv = true,
1492 }
1493 },
1494 {
1495 .aead = {
1496 .base = {
1497 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1498 .cra_driver_name = "authenc-hmac-sha512-"
1499 "cbc-aes-caam-qi",
1500 .cra_blocksize = AES_BLOCK_SIZE,
1501 },
1502 .setkey = aead_setkey,
1503 .setauthsize = aead_setauthsize,
1504 .encrypt = aead_encrypt,
1505 .decrypt = aead_decrypt,
1506 .ivsize = AES_BLOCK_SIZE,
1507 .maxauthsize = SHA512_DIGEST_SIZE,
1508 },
1509 .caam = {
1510 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1511 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1512 OP_ALG_AAI_HMAC_PRECOMP,
1513 }
1514 },
1515 {
1516 .aead = {
1517 .base = {
1518 .cra_name = "echainiv(authenc(hmac(sha512),"
1519 "cbc(aes)))",
1520 .cra_driver_name = "echainiv-authenc-"
1521 "hmac-sha512-cbc-aes-"
1522 "caam-qi",
1523 .cra_blocksize = AES_BLOCK_SIZE,
1524 },
1525 .setkey = aead_setkey,
1526 .setauthsize = aead_setauthsize,
1527 .encrypt = aead_encrypt,
1528 .decrypt = aead_decrypt,
1529 .ivsize = AES_BLOCK_SIZE,
1530 .maxauthsize = SHA512_DIGEST_SIZE,
1531 },
1532 .caam = {
1533 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1534 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1535 OP_ALG_AAI_HMAC_PRECOMP,
1536 .geniv = true,
1537 }
1538 },
1539 {
1540 .aead = {
1541 .base = {
1542 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1543 .cra_driver_name = "authenc-hmac-md5-"
1544 "cbc-des3_ede-caam-qi",
1545 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1546 },
1547 .setkey = aead_setkey,
1548 .setauthsize = aead_setauthsize,
1549 .encrypt = aead_encrypt,
1550 .decrypt = aead_decrypt,
1551 .ivsize = DES3_EDE_BLOCK_SIZE,
1552 .maxauthsize = MD5_DIGEST_SIZE,
1553 },
1554 .caam = {
1555 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1556 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1557 OP_ALG_AAI_HMAC_PRECOMP,
1558 }
1559 },
1560 {
1561 .aead = {
1562 .base = {
1563 .cra_name = "echainiv(authenc(hmac(md5),"
1564 "cbc(des3_ede)))",
1565 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1566 "cbc-des3_ede-caam-qi",
1567 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1568 },
1569 .setkey = aead_setkey,
1570 .setauthsize = aead_setauthsize,
1571 .encrypt = aead_encrypt,
1572 .decrypt = aead_decrypt,
1573 .ivsize = DES3_EDE_BLOCK_SIZE,
1574 .maxauthsize = MD5_DIGEST_SIZE,
1575 },
1576 .caam = {
1577 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1578 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1579 OP_ALG_AAI_HMAC_PRECOMP,
1580 .geniv = true,
1581 }
1582 },
1583 {
1584 .aead = {
1585 .base = {
1586 .cra_name = "authenc(hmac(sha1),"
1587 "cbc(des3_ede))",
1588 .cra_driver_name = "authenc-hmac-sha1-"
1589 "cbc-des3_ede-caam-qi",
1590 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1591 },
1592 .setkey = aead_setkey,
1593 .setauthsize = aead_setauthsize,
1594 .encrypt = aead_encrypt,
1595 .decrypt = aead_decrypt,
1596 .ivsize = DES3_EDE_BLOCK_SIZE,
1597 .maxauthsize = SHA1_DIGEST_SIZE,
1598 },
1599 .caam = {
1600 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1601 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1602 OP_ALG_AAI_HMAC_PRECOMP,
1603 },
1604 },
1605 {
1606 .aead = {
1607 .base = {
1608 .cra_name = "echainiv(authenc(hmac(sha1),"
1609 "cbc(des3_ede)))",
1610 .cra_driver_name = "echainiv-authenc-"
1611 "hmac-sha1-"
1612 "cbc-des3_ede-caam-qi",
1613 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1614 },
1615 .setkey = aead_setkey,
1616 .setauthsize = aead_setauthsize,
1617 .encrypt = aead_encrypt,
1618 .decrypt = aead_decrypt,
1619 .ivsize = DES3_EDE_BLOCK_SIZE,
1620 .maxauthsize = SHA1_DIGEST_SIZE,
1621 },
1622 .caam = {
1623 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1624 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1625 OP_ALG_AAI_HMAC_PRECOMP,
1626 .geniv = true,
1627 }
1628 },
1629 {
1630 .aead = {
1631 .base = {
1632 .cra_name = "authenc(hmac(sha224),"
1633 "cbc(des3_ede))",
1634 .cra_driver_name = "authenc-hmac-sha224-"
1635 "cbc-des3_ede-caam-qi",
1636 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1637 },
1638 .setkey = aead_setkey,
1639 .setauthsize = aead_setauthsize,
1640 .encrypt = aead_encrypt,
1641 .decrypt = aead_decrypt,
1642 .ivsize = DES3_EDE_BLOCK_SIZE,
1643 .maxauthsize = SHA224_DIGEST_SIZE,
1644 },
1645 .caam = {
1646 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1647 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1648 OP_ALG_AAI_HMAC_PRECOMP,
1649 },
1650 },
1651 {
1652 .aead = {
1653 .base = {
1654 .cra_name = "echainiv(authenc(hmac(sha224),"
1655 "cbc(des3_ede)))",
1656 .cra_driver_name = "echainiv-authenc-"
1657 "hmac-sha224-"
1658 "cbc-des3_ede-caam-qi",
1659 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1660 },
1661 .setkey = aead_setkey,
1662 .setauthsize = aead_setauthsize,
1663 .encrypt = aead_encrypt,
1664 .decrypt = aead_decrypt,
1665 .ivsize = DES3_EDE_BLOCK_SIZE,
1666 .maxauthsize = SHA224_DIGEST_SIZE,
1667 },
1668 .caam = {
1669 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1670 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1671 OP_ALG_AAI_HMAC_PRECOMP,
1672 .geniv = true,
1673 }
1674 },
1675 {
1676 .aead = {
1677 .base = {
1678 .cra_name = "authenc(hmac(sha256),"
1679 "cbc(des3_ede))",
1680 .cra_driver_name = "authenc-hmac-sha256-"
1681 "cbc-des3_ede-caam-qi",
1682 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1683 },
1684 .setkey = aead_setkey,
1685 .setauthsize = aead_setauthsize,
1686 .encrypt = aead_encrypt,
1687 .decrypt = aead_decrypt,
1688 .ivsize = DES3_EDE_BLOCK_SIZE,
1689 .maxauthsize = SHA256_DIGEST_SIZE,
1690 },
1691 .caam = {
1692 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1693 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1694 OP_ALG_AAI_HMAC_PRECOMP,
1695 },
1696 },
1697 {
1698 .aead = {
1699 .base = {
1700 .cra_name = "echainiv(authenc(hmac(sha256),"
1701 "cbc(des3_ede)))",
1702 .cra_driver_name = "echainiv-authenc-"
1703 "hmac-sha256-"
1704 "cbc-des3_ede-caam-qi",
1705 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1706 },
1707 .setkey = aead_setkey,
1708 .setauthsize = aead_setauthsize,
1709 .encrypt = aead_encrypt,
1710 .decrypt = aead_decrypt,
1711 .ivsize = DES3_EDE_BLOCK_SIZE,
1712 .maxauthsize = SHA256_DIGEST_SIZE,
1713 },
1714 .caam = {
1715 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1716 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1717 OP_ALG_AAI_HMAC_PRECOMP,
1718 .geniv = true,
1719 }
1720 },
1721 {
1722 .aead = {
1723 .base = {
1724 .cra_name = "authenc(hmac(sha384),"
1725 "cbc(des3_ede))",
1726 .cra_driver_name = "authenc-hmac-sha384-"
1727 "cbc-des3_ede-caam-qi",
1728 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1729 },
1730 .setkey = aead_setkey,
1731 .setauthsize = aead_setauthsize,
1732 .encrypt = aead_encrypt,
1733 .decrypt = aead_decrypt,
1734 .ivsize = DES3_EDE_BLOCK_SIZE,
1735 .maxauthsize = SHA384_DIGEST_SIZE,
1736 },
1737 .caam = {
1738 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1739 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1740 OP_ALG_AAI_HMAC_PRECOMP,
1741 },
1742 },
1743 {
1744 .aead = {
1745 .base = {
1746 .cra_name = "echainiv(authenc(hmac(sha384),"
1747 "cbc(des3_ede)))",
1748 .cra_driver_name = "echainiv-authenc-"
1749 "hmac-sha384-"
1750 "cbc-des3_ede-caam-qi",
1751 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1752 },
1753 .setkey = aead_setkey,
1754 .setauthsize = aead_setauthsize,
1755 .encrypt = aead_encrypt,
1756 .decrypt = aead_decrypt,
1757 .ivsize = DES3_EDE_BLOCK_SIZE,
1758 .maxauthsize = SHA384_DIGEST_SIZE,
1759 },
1760 .caam = {
1761 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1762 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1763 OP_ALG_AAI_HMAC_PRECOMP,
1764 .geniv = true,
1765 }
1766 },
1767 {
1768 .aead = {
1769 .base = {
1770 .cra_name = "authenc(hmac(sha512),"
1771 "cbc(des3_ede))",
1772 .cra_driver_name = "authenc-hmac-sha512-"
1773 "cbc-des3_ede-caam-qi",
1774 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1775 },
1776 .setkey = aead_setkey,
1777 .setauthsize = aead_setauthsize,
1778 .encrypt = aead_encrypt,
1779 .decrypt = aead_decrypt,
1780 .ivsize = DES3_EDE_BLOCK_SIZE,
1781 .maxauthsize = SHA512_DIGEST_SIZE,
1782 },
1783 .caam = {
1784 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1785 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1786 OP_ALG_AAI_HMAC_PRECOMP,
1787 },
1788 },
1789 {
1790 .aead = {
1791 .base = {
1792 .cra_name = "echainiv(authenc(hmac(sha512),"
1793 "cbc(des3_ede)))",
1794 .cra_driver_name = "echainiv-authenc-"
1795 "hmac-sha512-"
1796 "cbc-des3_ede-caam-qi",
1797 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1798 },
1799 .setkey = aead_setkey,
1800 .setauthsize = aead_setauthsize,
1801 .encrypt = aead_encrypt,
1802 .decrypt = aead_decrypt,
1803 .ivsize = DES3_EDE_BLOCK_SIZE,
1804 .maxauthsize = SHA512_DIGEST_SIZE,
1805 },
1806 .caam = {
1807 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1808 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1809 OP_ALG_AAI_HMAC_PRECOMP,
1810 .geniv = true,
1811 }
1812 },
1813 {
1814 .aead = {
1815 .base = {
1816 .cra_name = "authenc(hmac(md5),cbc(des))",
1817 .cra_driver_name = "authenc-hmac-md5-"
1818 "cbc-des-caam-qi",
1819 .cra_blocksize = DES_BLOCK_SIZE,
1820 },
1821 .setkey = aead_setkey,
1822 .setauthsize = aead_setauthsize,
1823 .encrypt = aead_encrypt,
1824 .decrypt = aead_decrypt,
1825 .ivsize = DES_BLOCK_SIZE,
1826 .maxauthsize = MD5_DIGEST_SIZE,
1827 },
1828 .caam = {
1829 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1830 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1831 OP_ALG_AAI_HMAC_PRECOMP,
1832 },
1833 },
1834 {
1835 .aead = {
1836 .base = {
1837 .cra_name = "echainiv(authenc(hmac(md5),"
1838 "cbc(des)))",
1839 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1840 "cbc-des-caam-qi",
1841 .cra_blocksize = DES_BLOCK_SIZE,
1842 },
1843 .setkey = aead_setkey,
1844 .setauthsize = aead_setauthsize,
1845 .encrypt = aead_encrypt,
1846 .decrypt = aead_decrypt,
1847 .ivsize = DES_BLOCK_SIZE,
1848 .maxauthsize = MD5_DIGEST_SIZE,
1849 },
1850 .caam = {
1851 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1852 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1853 OP_ALG_AAI_HMAC_PRECOMP,
1854 .geniv = true,
1855 }
1856 },
1857 {
1858 .aead = {
1859 .base = {
1860 .cra_name = "authenc(hmac(sha1),cbc(des))",
1861 .cra_driver_name = "authenc-hmac-sha1-"
1862 "cbc-des-caam-qi",
1863 .cra_blocksize = DES_BLOCK_SIZE,
1864 },
1865 .setkey = aead_setkey,
1866 .setauthsize = aead_setauthsize,
1867 .encrypt = aead_encrypt,
1868 .decrypt = aead_decrypt,
1869 .ivsize = DES_BLOCK_SIZE,
1870 .maxauthsize = SHA1_DIGEST_SIZE,
1871 },
1872 .caam = {
1873 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1874 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1875 OP_ALG_AAI_HMAC_PRECOMP,
1876 },
1877 },
1878 {
1879 .aead = {
1880 .base = {
1881 .cra_name = "echainiv(authenc(hmac(sha1),"
1882 "cbc(des)))",
1883 .cra_driver_name = "echainiv-authenc-"
1884 "hmac-sha1-cbc-des-caam-qi",
1885 .cra_blocksize = DES_BLOCK_SIZE,
1886 },
1887 .setkey = aead_setkey,
1888 .setauthsize = aead_setauthsize,
1889 .encrypt = aead_encrypt,
1890 .decrypt = aead_decrypt,
1891 .ivsize = DES_BLOCK_SIZE,
1892 .maxauthsize = SHA1_DIGEST_SIZE,
1893 },
1894 .caam = {
1895 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1896 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1897 OP_ALG_AAI_HMAC_PRECOMP,
1898 .geniv = true,
1899 }
1900 },
1901 {
1902 .aead = {
1903 .base = {
1904 .cra_name = "authenc(hmac(sha224),cbc(des))",
1905 .cra_driver_name = "authenc-hmac-sha224-"
1906 "cbc-des-caam-qi",
1907 .cra_blocksize = DES_BLOCK_SIZE,
1908 },
1909 .setkey = aead_setkey,
1910 .setauthsize = aead_setauthsize,
1911 .encrypt = aead_encrypt,
1912 .decrypt = aead_decrypt,
1913 .ivsize = DES_BLOCK_SIZE,
1914 .maxauthsize = SHA224_DIGEST_SIZE,
1915 },
1916 .caam = {
1917 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1918 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1919 OP_ALG_AAI_HMAC_PRECOMP,
1920 },
1921 },
1922 {
1923 .aead = {
1924 .base = {
1925 .cra_name = "echainiv(authenc(hmac(sha224),"
1926 "cbc(des)))",
1927 .cra_driver_name = "echainiv-authenc-"
1928 "hmac-sha224-cbc-des-"
1929 "caam-qi",
1930 .cra_blocksize = DES_BLOCK_SIZE,
1931 },
1932 .setkey = aead_setkey,
1933 .setauthsize = aead_setauthsize,
1934 .encrypt = aead_encrypt,
1935 .decrypt = aead_decrypt,
1936 .ivsize = DES_BLOCK_SIZE,
1937 .maxauthsize = SHA224_DIGEST_SIZE,
1938 },
1939 .caam = {
1940 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1941 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1942 OP_ALG_AAI_HMAC_PRECOMP,
1943 .geniv = true,
1944 }
1945 },
1946 {
1947 .aead = {
1948 .base = {
1949 .cra_name = "authenc(hmac(sha256),cbc(des))",
1950 .cra_driver_name = "authenc-hmac-sha256-"
1951 "cbc-des-caam-qi",
1952 .cra_blocksize = DES_BLOCK_SIZE,
1953 },
1954 .setkey = aead_setkey,
1955 .setauthsize = aead_setauthsize,
1956 .encrypt = aead_encrypt,
1957 .decrypt = aead_decrypt,
1958 .ivsize = DES_BLOCK_SIZE,
1959 .maxauthsize = SHA256_DIGEST_SIZE,
1960 },
1961 .caam = {
1962 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1963 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1964 OP_ALG_AAI_HMAC_PRECOMP,
1965 },
1966 },
1967 {
1968 .aead = {
1969 .base = {
1970 .cra_name = "echainiv(authenc(hmac(sha256),"
1971 "cbc(des)))",
1972 .cra_driver_name = "echainiv-authenc-"
1973 "hmac-sha256-cbc-desi-"
1974 "caam-qi",
1975 .cra_blocksize = DES_BLOCK_SIZE,
1976 },
1977 .setkey = aead_setkey,
1978 .setauthsize = aead_setauthsize,
1979 .encrypt = aead_encrypt,
1980 .decrypt = aead_decrypt,
1981 .ivsize = DES_BLOCK_SIZE,
1982 .maxauthsize = SHA256_DIGEST_SIZE,
1983 },
1984 .caam = {
1985 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1986 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1987 OP_ALG_AAI_HMAC_PRECOMP,
1988 .geniv = true,
1989 },
1990 },
1991 {
1992 .aead = {
1993 .base = {
1994 .cra_name = "authenc(hmac(sha384),cbc(des))",
1995 .cra_driver_name = "authenc-hmac-sha384-"
1996 "cbc-des-caam-qi",
1997 .cra_blocksize = DES_BLOCK_SIZE,
1998 },
1999 .setkey = aead_setkey,
2000 .setauthsize = aead_setauthsize,
2001 .encrypt = aead_encrypt,
2002 .decrypt = aead_decrypt,
2003 .ivsize = DES_BLOCK_SIZE,
2004 .maxauthsize = SHA384_DIGEST_SIZE,
2005 },
2006 .caam = {
2007 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2008 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2009 OP_ALG_AAI_HMAC_PRECOMP,
2010 },
2011 },
2012 {
2013 .aead = {
2014 .base = {
2015 .cra_name = "echainiv(authenc(hmac(sha384),"
2016 "cbc(des)))",
2017 .cra_driver_name = "echainiv-authenc-"
2018 "hmac-sha384-cbc-des-"
2019 "caam-qi",
2020 .cra_blocksize = DES_BLOCK_SIZE,
2021 },
2022 .setkey = aead_setkey,
2023 .setauthsize = aead_setauthsize,
2024 .encrypt = aead_encrypt,
2025 .decrypt = aead_decrypt,
2026 .ivsize = DES_BLOCK_SIZE,
2027 .maxauthsize = SHA384_DIGEST_SIZE,
2028 },
2029 .caam = {
2030 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2031 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2032 OP_ALG_AAI_HMAC_PRECOMP,
2033 .geniv = true,
2034 }
2035 },
2036 {
2037 .aead = {
2038 .base = {
2039 .cra_name = "authenc(hmac(sha512),cbc(des))",
2040 .cra_driver_name = "authenc-hmac-sha512-"
2041 "cbc-des-caam-qi",
2042 .cra_blocksize = DES_BLOCK_SIZE,
2043 },
2044 .setkey = aead_setkey,
2045 .setauthsize = aead_setauthsize,
2046 .encrypt = aead_encrypt,
2047 .decrypt = aead_decrypt,
2048 .ivsize = DES_BLOCK_SIZE,
2049 .maxauthsize = SHA512_DIGEST_SIZE,
2050 },
2051 .caam = {
2052 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2053 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2054 OP_ALG_AAI_HMAC_PRECOMP,
2055 }
2056 },
2057 {
2058 .aead = {
2059 .base = {
2060 .cra_name = "echainiv(authenc(hmac(sha512),"
2061 "cbc(des)))",
2062 .cra_driver_name = "echainiv-authenc-"
2063 "hmac-sha512-cbc-des-"
2064 "caam-qi",
2065 .cra_blocksize = DES_BLOCK_SIZE,
2066 },
2067 .setkey = aead_setkey,
2068 .setauthsize = aead_setauthsize,
2069 .encrypt = aead_encrypt,
2070 .decrypt = aead_decrypt,
2071 .ivsize = DES_BLOCK_SIZE,
2072 .maxauthsize = SHA512_DIGEST_SIZE,
2073 },
2074 .caam = {
2075 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2076 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2077 OP_ALG_AAI_HMAC_PRECOMP,
2078 .geniv = true,
2079 }
2080 },
2081};
2082
2083struct caam_crypto_alg {
2084 struct list_head entry;
2085 struct crypto_alg crypto_alg;
2086 struct caam_alg_entry caam;
2087};
2088
2089static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
2090{
2091 struct caam_drv_private *priv;
2092
2093 /*
2094 * distribute tfms across job rings to ensure in-order
2095 * crypto request processing per tfm
2096 */
2097 ctx->jrdev = caam_jr_alloc();
2098 if (IS_ERR(ctx->jrdev)) {
2099 pr_err("Job Ring Device allocation for transform failed\n");
2100 return PTR_ERR(ctx->jrdev);
2101 }
2102
2103 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2104 DMA_TO_DEVICE);
2105 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2106 dev_err(ctx->jrdev, "unable to map key\n");
2107 caam_jr_free(ctx->jrdev);
2108 return -ENOMEM;
2109 }
2110
2111 /* copy descriptor header template value */
2112 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2113 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2114
2115 priv = dev_get_drvdata(ctx->jrdev->parent);
2116 ctx->qidev = priv->qidev;
2117
2118 spin_lock_init(&ctx->lock);
2119 ctx->drv_ctx[ENCRYPT] = NULL;
2120 ctx->drv_ctx[DECRYPT] = NULL;
2121 ctx->drv_ctx[GIVENCRYPT] = NULL;
2122
2123 return 0;
2124}
2125
2126static int caam_cra_init(struct crypto_tfm *tfm)
2127{
2128 struct crypto_alg *alg = tfm->__crt_alg;
2129 struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2130 crypto_alg);
2131 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2132
2133 return caam_init_common(ctx, &caam_alg->caam);
2134}
2135
2136static int caam_aead_init(struct crypto_aead *tfm)
2137{
2138 struct aead_alg *alg = crypto_aead_alg(tfm);
2139 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2140 aead);
2141 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2142
2143 return caam_init_common(ctx, &caam_alg->caam);
2144}
2145
2146static void caam_exit_common(struct caam_ctx *ctx)
2147{
2148 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2149 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2150 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2151
2152 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
2153 DMA_TO_DEVICE);
2154
2155 caam_jr_free(ctx->jrdev);
2156}
2157
2158static void caam_cra_exit(struct crypto_tfm *tfm)
2159{
2160 caam_exit_common(crypto_tfm_ctx(tfm));
2161}
2162
2163static void caam_aead_exit(struct crypto_aead *tfm)
2164{
2165 caam_exit_common(crypto_aead_ctx(tfm));
2166}
2167
2168static struct list_head alg_list;
2169static void __exit caam_qi_algapi_exit(void)
2170{
2171 struct caam_crypto_alg *t_alg, *n;
2172 int i;
2173
2174 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2175 struct caam_aead_alg *t_alg = driver_aeads + i;
2176
2177 if (t_alg->registered)
2178 crypto_unregister_aead(&t_alg->aead);
2179 }
2180
2181 if (!alg_list.next)
2182 return;
2183
2184 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2185 crypto_unregister_alg(&t_alg->crypto_alg);
2186 list_del(&t_alg->entry);
2187 kfree(t_alg);
2188 }
2189}
2190
2191static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2192 *template)
2193{
2194 struct caam_crypto_alg *t_alg;
2195 struct crypto_alg *alg;
2196
2197 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2198 if (!t_alg)
2199 return ERR_PTR(-ENOMEM);
2200
2201 alg = &t_alg->crypto_alg;
2202
2203 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2204 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2205 template->driver_name);
2206 alg->cra_module = THIS_MODULE;
2207 alg->cra_init = caam_cra_init;
2208 alg->cra_exit = caam_cra_exit;
2209 alg->cra_priority = CAAM_CRA_PRIORITY;
2210 alg->cra_blocksize = template->blocksize;
2211 alg->cra_alignmask = 0;
2212 alg->cra_ctxsize = sizeof(struct caam_ctx);
2213 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2214 template->type;
2215 switch (template->type) {
2216 case CRYPTO_ALG_TYPE_GIVCIPHER:
2217 alg->cra_type = &crypto_givcipher_type;
2218 alg->cra_ablkcipher = template->template_ablkcipher;
2219 break;
2220 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2221 alg->cra_type = &crypto_ablkcipher_type;
2222 alg->cra_ablkcipher = template->template_ablkcipher;
2223 break;
2224 }
2225
2226 t_alg->caam.class1_alg_type = template->class1_alg_type;
2227 t_alg->caam.class2_alg_type = template->class2_alg_type;
2228
2229 return t_alg;
2230}
2231
2232static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2233{
2234 struct aead_alg *alg = &t_alg->aead;
2235
2236 alg->base.cra_module = THIS_MODULE;
2237 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2238 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2239 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2240
2241 alg->init = caam_aead_init;
2242 alg->exit = caam_aead_exit;
2243}
2244
2245static int __init caam_qi_algapi_init(void)
2246{
2247 struct device_node *dev_node;
2248 struct platform_device *pdev;
2249 struct device *ctrldev;
2250 struct caam_drv_private *priv;
2251 int i = 0, err = 0;
2252 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2253 unsigned int md_limit = SHA512_DIGEST_SIZE;
2254 bool registered = false;
2255
2256 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2257 if (!dev_node) {
2258 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2259 if (!dev_node)
2260 return -ENODEV;
2261 }
2262
2263 pdev = of_find_device_by_node(dev_node);
2264 of_node_put(dev_node);
2265 if (!pdev)
2266 return -ENODEV;
2267
2268 ctrldev = &pdev->dev;
2269 priv = dev_get_drvdata(ctrldev);
2270
2271 /*
2272 * If priv is NULL, it's probably because the caam driver wasn't
2273 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2274 */
2275 if (!priv || !priv->qi_present)
2276 return -ENODEV;
2277
2278 INIT_LIST_HEAD(&alg_list);
2279
2280 /*
2281 * Register crypto algorithms the device supports.
2282 * First, detect presence and attributes of DES, AES, and MD blocks.
2283 */
2284 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2285 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2286 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2287 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2288 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2289
2290 /* If MD is present, limit digest size based on LP256 */
2291 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2292 md_limit = SHA256_DIGEST_SIZE;
2293
2294 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2295 struct caam_crypto_alg *t_alg;
2296 struct caam_alg_template *alg = driver_algs + i;
2297 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2298
2299 /* Skip DES algorithms if not supported by device */
2300 if (!des_inst &&
2301 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2302 (alg_sel == OP_ALG_ALGSEL_DES)))
2303 continue;
2304
2305 /* Skip AES algorithms if not supported by device */
2306 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2307 continue;
2308
2309 t_alg = caam_alg_alloc(alg);
2310 if (IS_ERR(t_alg)) {
2311 err = PTR_ERR(t_alg);
2312 dev_warn(priv->qidev, "%s alg allocation failed\n",
2313 alg->driver_name);
2314 continue;
2315 }
2316
2317 err = crypto_register_alg(&t_alg->crypto_alg);
2318 if (err) {
2319 dev_warn(priv->qidev, "%s alg registration failed\n",
2320 t_alg->crypto_alg.cra_driver_name);
2321 kfree(t_alg);
2322 continue;
2323 }
2324
2325 list_add_tail(&t_alg->entry, &alg_list);
2326 registered = true;
2327 }
2328
2329 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2330 struct caam_aead_alg *t_alg = driver_aeads + i;
2331 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2332 OP_ALG_ALGSEL_MASK;
2333 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2334 OP_ALG_ALGSEL_MASK;
2335 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2336
2337 /* Skip DES algorithms if not supported by device */
2338 if (!des_inst &&
2339 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2340 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2341 continue;
2342
2343 /* Skip AES algorithms if not supported by device */
2344 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2345 continue;
2346
2347 /*
2348 * Check support for AES algorithms not available
2349 * on LP devices.
2350 */
2351 if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2352 (alg_aai == OP_ALG_AAI_GCM))
2353 continue;
2354
2355 /*
2356 * Skip algorithms requiring message digests
2357 * if MD or MD size is not supported by device.
2358 */
2359 if (c2_alg_sel &&
2360 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2361 continue;
2362
2363 caam_aead_alg_init(t_alg);
2364
2365 err = crypto_register_aead(&t_alg->aead);
2366 if (err) {
2367 pr_warn("%s alg registration failed\n",
2368 t_alg->aead.base.cra_driver_name);
2369 continue;
2370 }
2371
2372 t_alg->registered = true;
2373 registered = true;
2374 }
2375
2376 if (registered)
2377 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2378
2379 return err;
2380}
2381
2382module_init(caam_qi_algapi_init);
2383module_exit(caam_qi_algapi_exit);
2384
2385MODULE_LICENSE("GPL");
2386MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2387MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 5d7f73d60515..dd353e342c12 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -18,6 +18,10 @@
18bool caam_little_end; 18bool caam_little_end;
19EXPORT_SYMBOL(caam_little_end); 19EXPORT_SYMBOL(caam_little_end);
20 20
21#ifdef CONFIG_CAAM_QI
22#include "qi.h"
23#endif
24
21/* 25/*
22 * i.MX targets tend to have clock control subsystems that can 26 * i.MX targets tend to have clock control subsystems that can
23 * enable/disable clocking to our device. 27 * enable/disable clocking to our device.
@@ -310,6 +314,11 @@ static int caam_remove(struct platform_device *pdev)
310 /* Remove platform devices under the crypto node */ 314 /* Remove platform devices under the crypto node */
311 of_platform_depopulate(ctrldev); 315 of_platform_depopulate(ctrldev);
312 316
317#ifdef CONFIG_CAAM_QI
318 if (ctrlpriv->qidev)
319 caam_qi_shutdown(ctrlpriv->qidev);
320#endif
321
313 /* De-initialize RNG state handles initialized by this driver. */ 322 /* De-initialize RNG state handles initialized by this driver. */
314 if (ctrlpriv->rng4_sh_init) 323 if (ctrlpriv->rng4_sh_init)
315 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); 324 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
@@ -400,23 +409,6 @@ int caam_get_era(void)
400} 409}
401EXPORT_SYMBOL(caam_get_era); 410EXPORT_SYMBOL(caam_get_era);
402 411
403#ifdef CONFIG_DEBUG_FS
404static int caam_debugfs_u64_get(void *data, u64 *val)
405{
406 *val = caam64_to_cpu(*(u64 *)data);
407 return 0;
408}
409
410static int caam_debugfs_u32_get(void *data, u64 *val)
411{
412 *val = caam32_to_cpu(*(u32 *)data);
413 return 0;
414}
415
416DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
417DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
418#endif
419
420static const struct of_device_id caam_match[] = { 412static const struct of_device_id caam_match[] = {
421 { 413 {
422 .compatible = "fsl,sec-v4.0", 414 .compatible = "fsl,sec-v4.0",
@@ -613,6 +605,18 @@ static int caam_probe(struct platform_device *pdev)
613 goto iounmap_ctrl; 605 goto iounmap_ctrl;
614 } 606 }
615 607
608#ifdef CONFIG_DEBUG_FS
609 /*
610 * FIXME: needs better naming distinction, as some amalgamation of
611 * "caam" and nprop->full_name. The OF name isn't distinctive,
612 * but does separate instances
613 */
614 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
615
616 ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
617 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
618#endif
619
616 ring = 0; 620 ring = 0;
617 for_each_available_child_of_node(nprop, np) 621 for_each_available_child_of_node(nprop, np)
618 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || 622 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
@@ -637,6 +641,13 @@ static int caam_probe(struct platform_device *pdev)
637 ); 641 );
638 /* This is all that's required to physically enable QI */ 642 /* This is all that's required to physically enable QI */
639 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN); 643 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
644
645 /* If QMAN driver is present, init CAAM-QI backend */
646#ifdef CONFIG_CAAM_QI
647 ret = caam_qi_init(pdev);
648 if (ret)
649 dev_err(dev, "caam qi i/f init failed: %d\n", ret);
650#endif
640 } 651 }
641 652
642 /* If no QI and no rings specified, quit and go home */ 653 /* If no QI and no rings specified, quit and go home */
@@ -724,17 +735,6 @@ static int caam_probe(struct platform_device *pdev)
724 ctrlpriv->total_jobrs, ctrlpriv->qi_present); 735 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
725 736
726#ifdef CONFIG_DEBUG_FS 737#ifdef CONFIG_DEBUG_FS
727 /*
728 * FIXME: needs better naming distinction, as some amalgamation of
729 * "caam" and nprop->full_name. The OF name isn't distinctive,
730 * but does separate instances
731 */
732 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
733
734 ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
735 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
736
737 /* Controller-level - performance monitor counters */
738 738
739 ctrlpriv->ctl_rq_dequeued = 739 ctrlpriv->ctl_rq_dequeued =
740 debugfs_create_file("rq_dequeued", 740 debugfs_create_file("rq_dequeued",
@@ -817,6 +817,9 @@ static int caam_probe(struct platform_device *pdev)
817 return 0; 817 return 0;
818 818
819caam_remove: 819caam_remove:
820#ifdef CONFIG_DEBUG_FS
821 debugfs_remove_recursive(ctrlpriv->dfs_root);
822#endif
820 caam_remove(pdev); 823 caam_remove(pdev);
821 return ret; 824 return ret;
822 825
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index b9c8d98ef826..d8e83ca104e0 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -4,6 +4,9 @@
4 * Copyright 2008-2012 Freescale Semiconductor, Inc. 4 * Copyright 2008-2012 Freescale Semiconductor, Inc.
5 */ 5 */
6 6
7#ifndef DESC_CONSTR_H
8#define DESC_CONSTR_H
9
7#include "desc.h" 10#include "desc.h"
8#include "regs.h" 11#include "regs.h"
9 12
@@ -491,3 +494,5 @@ static inline int desc_inline_query(unsigned int sd_base_len,
491 494
492 return (rem_bytes >= 0) ? 0 : -1; 495 return (rem_bytes >= 0) ? 0 : -1;
493} 496}
497
498#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index dbed8baeebe5..85b6c5835b8f 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -66,6 +66,9 @@ struct caam_drv_private_jr {
66struct caam_drv_private { 66struct caam_drv_private {
67 67
68 struct device *dev; 68 struct device *dev;
69#ifdef CONFIG_CAAM_QI
70 struct device *qidev;
71#endif
69 struct platform_device *pdev; 72 struct platform_device *pdev;
70 73
71 /* Physical-presence section */ 74 /* Physical-presence section */
@@ -109,9 +112,30 @@ struct caam_drv_private {
109 112
110 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; 113 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
111 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk; 114 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
115#ifdef CONFIG_CAAM_QI
116 struct dentry *qi_congested;
117#endif
112#endif 118#endif
113}; 119};
114 120
115void caam_jr_algapi_init(struct device *dev); 121void caam_jr_algapi_init(struct device *dev);
116void caam_jr_algapi_remove(struct device *dev); 122void caam_jr_algapi_remove(struct device *dev);
123
124#ifdef CONFIG_DEBUG_FS
125static int caam_debugfs_u64_get(void *data, u64 *val)
126{
127 *val = caam64_to_cpu(*(u64 *)data);
128 return 0;
129}
130
131static int caam_debugfs_u32_get(void *data, u64 *val)
132{
133 *val = caam32_to_cpu(*(u32 *)data);
134 return 0;
135}
136
137DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
138DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
139#endif
140
117#endif /* INTERN_H */ 141#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
new file mode 100644
index 000000000000..1990ed460c46
--- /dev/null
+++ b/drivers/crypto/caam/qi.c
@@ -0,0 +1,805 @@
1/*
2 * CAAM/SEC 4.x QI transport/backend driver
3 * Queue Interface backend functionality
4 *
5 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP
7 */
8
9#include <linux/cpumask.h>
10#include <linux/kthread.h>
11#include <soc/fsl/qman.h>
12
13#include "regs.h"
14#include "qi.h"
15#include "desc.h"
16#include "intern.h"
17#include "desc_constr.h"
18
19#define PREHDR_RSLS_SHIFT 31
20
21/*
22 * Use a reasonable backlog of frames (per CPU) as congestion threshold,
23 * so that resources used by the in-flight buffers do not become a memory hog.
24 */
25#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
26
27/* Length of a single buffer in the QI driver memory cache */
28#define CAAM_QI_MEMCACHE_SIZE 512
29
30#define CAAM_QI_ENQUEUE_RETRIES 10000
31
32#define CAAM_NAPI_WEIGHT 63
33
34/*
35 * caam_napi - struct holding CAAM NAPI-related params
36 * @irqtask: IRQ task for QI backend
37 * @p: QMan portal
38 */
39struct caam_napi {
40 struct napi_struct irqtask;
41 struct qman_portal *p;
42};
43
44/*
45 * caam_qi_pcpu_priv - percpu private data structure to main list of pending
46 * responses expected on each cpu.
47 * @caam_napi: CAAM NAPI params
48 * @net_dev: netdev used by NAPI
49 * @rsp_fq: response FQ from CAAM
50 */
51struct caam_qi_pcpu_priv {
52 struct caam_napi caam_napi;
53 struct net_device net_dev;
54 struct qman_fq *rsp_fq;
55} ____cacheline_aligned;
56
57static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
58
59/*
60 * caam_qi_priv - CAAM QI backend private params
61 * @cgr: QMan congestion group
62 * @qi_pdev: platform device for QI backend
63 */
64struct caam_qi_priv {
65 struct qman_cgr cgr;
66 struct platform_device *qi_pdev;
67};
68
69static struct caam_qi_priv qipriv ____cacheline_aligned;
70
71/*
72 * This is written by only one core - the one that initialized the CGR - and
73 * read by multiple cores (all the others).
74 */
75bool caam_congested __read_mostly;
76EXPORT_SYMBOL(caam_congested);
77
78#ifdef CONFIG_DEBUG_FS
79/*
80 * This is a counter for the number of times the congestion group (where all
81 * the request and response queueus are) reached congestion. Incremented
82 * each time the congestion callback is called with congested == true.
83 */
84static u64 times_congested;
85#endif
86
87/*
88 * CPU from where the module initialised. This is required because QMan driver
89 * requires CGRs to be removed from same CPU from where they were originally
90 * allocated.
91 */
92static int mod_init_cpu;
93
94/*
95 * This is a a cache of buffers, from which the users of CAAM QI driver
96 * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
97 * doing malloc on the hotpath.
98 * NOTE: A more elegant solution would be to have some headroom in the frames
99 * being processed. This could be added by the dpaa-ethernet driver.
100 * This would pose a problem for userspace application processing which
101 * cannot know of this limitation. So for now, this will work.
102 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
103 */
104static struct kmem_cache *qi_cache;
105
106int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
107{
108 struct qm_fd fd;
109 dma_addr_t addr;
110 int ret;
111 int num_retries = 0;
112
113 qm_fd_clear_fd(&fd);
114 qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
115
116 addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
117 DMA_BIDIRECTIONAL);
118 if (dma_mapping_error(qidev, addr)) {
119 dev_err(qidev, "DMA mapping error for QI enqueue request\n");
120 return -EIO;
121 }
122 qm_fd_addr_set64(&fd, addr);
123
124 do {
125 ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
126 if (likely(!ret))
127 return 0;
128
129 if (ret != -EBUSY)
130 break;
131 num_retries++;
132 } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
133
134 dev_err(qidev, "qman_enqueue failed: %d\n", ret);
135
136 return ret;
137}
138EXPORT_SYMBOL(caam_qi_enqueue);
139
140static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
141 const union qm_mr_entry *msg)
142{
143 const struct qm_fd *fd;
144 struct caam_drv_req *drv_req;
145 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
146
147 fd = &msg->ern.fd;
148
149 if (qm_fd_get_format(fd) != qm_fd_compound) {
150 dev_err(qidev, "Non-compound FD from CAAM\n");
151 return;
152 }
153
154 drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
155 if (!drv_req) {
156 dev_err(qidev,
157 "Can't find original request for CAAM response\n");
158 return;
159 }
160
161 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
162 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
163
164 drv_req->cbk(drv_req, -EIO);
165}
166
167static struct qman_fq *create_caam_req_fq(struct device *qidev,
168 struct qman_fq *rsp_fq,
169 dma_addr_t hwdesc,
170 int fq_sched_flag)
171{
172 int ret;
173 struct qman_fq *req_fq;
174 struct qm_mcc_initfq opts;
175
176 req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
177 if (!req_fq)
178 return ERR_PTR(-ENOMEM);
179
180 req_fq->cb.ern = caam_fq_ern_cb;
181 req_fq->cb.fqs = NULL;
182
183 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
184 QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
185 if (ret) {
186 dev_err(qidev, "Failed to create session req FQ\n");
187 goto create_req_fq_fail;
188 }
189
190 memset(&opts, 0, sizeof(opts));
191 opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
192 QM_INITFQ_WE_CONTEXTB |
193 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
194 opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
195 qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
196 opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
197 qm_fqd_context_a_set64(&opts.fqd, hwdesc);
198 opts.fqd.cgid = qipriv.cgr.cgrid;
199
200 ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
201 if (ret) {
202 dev_err(qidev, "Failed to init session req FQ\n");
203 goto init_req_fq_fail;
204 }
205
206 dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
207 smp_processor_id());
208 return req_fq;
209
210init_req_fq_fail:
211 qman_destroy_fq(req_fq);
212create_req_fq_fail:
213 kfree(req_fq);
214 return ERR_PTR(ret);
215}
216
217static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
218{
219 int ret;
220
221 ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
222 QMAN_VOLATILE_FLAG_FINISH,
223 QM_VDQCR_PRECEDENCE_VDQCR |
224 QM_VDQCR_NUMFRAMES_TILLEMPTY);
225 if (ret) {
226 dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
227 return ret;
228 }
229
230 do {
231 struct qman_portal *p;
232
233 p = qman_get_affine_portal(smp_processor_id());
234 qman_p_poll_dqrr(p, 16);
235 } while (fq->flags & QMAN_FQ_STATE_NE);
236
237 return 0;
238}
239
240static int kill_fq(struct device *qidev, struct qman_fq *fq)
241{
242 u32 flags;
243 int ret;
244
245 ret = qman_retire_fq(fq, &flags);
246 if (ret < 0) {
247 dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
248 return ret;
249 }
250
251 if (!ret)
252 goto empty_fq;
253
254 /* Async FQ retirement condition */
255 if (ret == 1) {
256 /* Retry till FQ gets in retired state */
257 do {
258 msleep(20);
259 } while (fq->state != qman_fq_state_retired);
260
261 WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
262 WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
263 }
264
265empty_fq:
266 if (fq->flags & QMAN_FQ_STATE_NE) {
267 ret = empty_retired_fq(qidev, fq);
268 if (ret) {
269 dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
270 fq->fqid);
271 return ret;
272 }
273 }
274
275 ret = qman_oos_fq(fq);
276 if (ret)
277 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
278
279 qman_destroy_fq(fq);
280
281 return ret;
282}
283
284static int empty_caam_fq(struct qman_fq *fq)
285{
286 int ret;
287 struct qm_mcr_queryfq_np np;
288
289 /* Wait till the older CAAM FQ get empty */
290 do {
291 ret = qman_query_fq_np(fq, &np);
292 if (ret)
293 return ret;
294
295 if (!qm_mcr_np_get(&np, frm_cnt))
296 break;
297
298 msleep(20);
299 } while (1);
300
301 /*
302 * Give extra time for pending jobs from this FQ in holding tanks
303 * to get processed
304 */
305 msleep(20);
306 return 0;
307}
308
309int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
310{
311 int ret;
312 u32 num_words;
313 struct qman_fq *new_fq, *old_fq;
314 struct device *qidev = drv_ctx->qidev;
315
316 num_words = desc_len(sh_desc);
317 if (num_words > MAX_SDLEN) {
318 dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
319 return -EINVAL;
320 }
321
322 /* Note down older req FQ */
323 old_fq = drv_ctx->req_fq;
324
325 /* Create a new req FQ in parked state */
326 new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
327 drv_ctx->context_a, 0);
328 if (unlikely(IS_ERR_OR_NULL(new_fq))) {
329 dev_err(qidev, "FQ allocation for shdesc update failed\n");
330 return PTR_ERR(new_fq);
331 }
332
333 /* Hook up new FQ to context so that new requests keep queuing */
334 drv_ctx->req_fq = new_fq;
335
336 /* Empty and remove the older FQ */
337 ret = empty_caam_fq(old_fq);
338 if (ret) {
339 dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
340
341 /* We can revert to older FQ */
342 drv_ctx->req_fq = old_fq;
343
344 if (kill_fq(qidev, new_fq))
345 dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
346 new_fq->fqid);
347
348 return ret;
349 }
350
351 /*
352 * Re-initialise pre-header. Set RSLS and SDLEN.
353 * Update the shared descriptor for driver context.
354 */
355 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
356 num_words);
357 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
358 dma_sync_single_for_device(qidev, drv_ctx->context_a,
359 sizeof(drv_ctx->sh_desc) +
360 sizeof(drv_ctx->prehdr),
361 DMA_BIDIRECTIONAL);
362
363 /* Put the new FQ in scheduled state */
364 ret = qman_schedule_fq(new_fq);
365 if (ret) {
366 dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
367
368 /*
369 * We can kill new FQ and revert to old FQ.
370 * Since the desc is already modified, it is success case
371 */
372
373 drv_ctx->req_fq = old_fq;
374
375 if (kill_fq(qidev, new_fq))
376 dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
377 new_fq->fqid);
378 } else if (kill_fq(qidev, old_fq)) {
379 dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid);
380 }
381
382 return 0;
383}
384EXPORT_SYMBOL(caam_drv_ctx_update);
385
386struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
387 int *cpu,
388 u32 *sh_desc)
389{
390 size_t size;
391 u32 num_words;
392 dma_addr_t hwdesc;
393 struct caam_drv_ctx *drv_ctx;
394 const cpumask_t *cpus = qman_affine_cpus();
395 static DEFINE_PER_CPU(int, last_cpu);
396
397 num_words = desc_len(sh_desc);
398 if (num_words > MAX_SDLEN) {
399 dev_err(qidev, "Invalid descriptor len: %d words\n",
400 num_words);
401 return ERR_PTR(-EINVAL);
402 }
403
404 drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
405 if (!drv_ctx)
406 return ERR_PTR(-ENOMEM);
407
408 /*
409 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
410 * and dma-map them.
411 */
412 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
413 num_words);
414 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
415 size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
416 hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
417 DMA_BIDIRECTIONAL);
418 if (dma_mapping_error(qidev, hwdesc)) {
419 dev_err(qidev, "DMA map error for preheader + shdesc\n");
420 kfree(drv_ctx);
421 return ERR_PTR(-ENOMEM);
422 }
423 drv_ctx->context_a = hwdesc;
424
425 /* If given CPU does not own the portal, choose another one that does */
426 if (!cpumask_test_cpu(*cpu, cpus)) {
427 int *pcpu = &get_cpu_var(last_cpu);
428
429 *pcpu = cpumask_next(*pcpu, cpus);
430 if (*pcpu >= nr_cpu_ids)
431 *pcpu = cpumask_first(cpus);
432 *cpu = *pcpu;
433
434 put_cpu_var(last_cpu);
435 }
436 drv_ctx->cpu = *cpu;
437
438 /* Find response FQ hooked with this CPU */
439 drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
440
441 /* Attach request FQ */
442 drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
443 QMAN_INITFQ_FLAG_SCHED);
444 if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
445 dev_err(qidev, "create_caam_req_fq failed\n");
446 dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
447 kfree(drv_ctx);
448 return ERR_PTR(-ENOMEM);
449 }
450
451 drv_ctx->qidev = qidev;
452 return drv_ctx;
453}
454EXPORT_SYMBOL(caam_drv_ctx_init);
455
456void *qi_cache_alloc(gfp_t flags)
457{
458 return kmem_cache_alloc(qi_cache, flags);
459}
460EXPORT_SYMBOL(qi_cache_alloc);
461
462void qi_cache_free(void *obj)
463{
464 kmem_cache_free(qi_cache, obj);
465}
466EXPORT_SYMBOL(qi_cache_free);
467
468static int caam_qi_poll(struct napi_struct *napi, int budget)
469{
470 struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
471
472 int cleaned = qman_p_poll_dqrr(np->p, budget);
473
474 if (cleaned < budget) {
475 napi_complete(napi);
476 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
477 }
478
479 return cleaned;
480}
481
482void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
483{
484 if (IS_ERR_OR_NULL(drv_ctx))
485 return;
486
487 /* Remove request FQ */
488 if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
489 dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
490
491 dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
492 sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
493 DMA_BIDIRECTIONAL);
494 kfree(drv_ctx);
495}
496EXPORT_SYMBOL(caam_drv_ctx_rel);
497
498int caam_qi_shutdown(struct device *qidev)
499{
500 int i, ret;
501 struct caam_qi_priv *priv = dev_get_drvdata(qidev);
502 const cpumask_t *cpus = qman_affine_cpus();
503 struct cpumask old_cpumask = current->cpus_allowed;
504
505 for_each_cpu(i, cpus) {
506 struct napi_struct *irqtask;
507
508 irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
509 napi_disable(irqtask);
510 netif_napi_del(irqtask);
511
512 if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
513 dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
514 kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
515 }
516
517 /*
518 * QMan driver requires CGRs to be deleted from same CPU from where they
519 * were instantiated. Hence we get the module removal execute from the
520 * same CPU from where it was originally inserted.
521 */
522 set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
523
524 ret = qman_delete_cgr(&priv->cgr);
525 if (ret)
526 dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
527 else
528 qman_release_cgrid(priv->cgr.cgrid);
529
530 kmem_cache_destroy(qi_cache);
531
532 /* Now that we're done with the CGRs, restore the cpus allowed mask */
533 set_cpus_allowed_ptr(current, &old_cpumask);
534
535 platform_device_unregister(priv->qi_pdev);
536 return ret;
537}
538
539static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
540{
541 caam_congested = congested;
542
543 if (congested) {
544#ifdef CONFIG_DEBUG_FS
545 times_congested++;
546#endif
547 pr_debug_ratelimited("CAAM entered congestion\n");
548
549 } else {
550 pr_debug_ratelimited("CAAM exited congestion\n");
551 }
552}
553
554static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
555{
556 /*
557 * In case of threaded ISR, for RT kernels in_irq() does not return
558 * appropriate value, so use in_serving_softirq to distinguish between
559 * softirq and irq contexts.
560 */
561 if (unlikely(in_irq() || !in_serving_softirq())) {
562 /* Disable QMan IRQ source and invoke NAPI */
563 qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
564 np->p = p;
565 napi_schedule(&np->irqtask);
566 return 1;
567 }
568 return 0;
569}
570
571static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
572 struct qman_fq *rsp_fq,
573 const struct qm_dqrr_entry *dqrr)
574{
575 struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
576 struct caam_drv_req *drv_req;
577 const struct qm_fd *fd;
578 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
579 u32 status;
580
581 if (caam_qi_napi_schedule(p, caam_napi))
582 return qman_cb_dqrr_stop;
583
584 fd = &dqrr->fd;
585 status = be32_to_cpu(fd->status);
586 if (unlikely(status))
587 dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
588
589 if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
590 dev_err(qidev, "Non-compound FD from CAAM\n");
591 return qman_cb_dqrr_consume;
592 }
593
594 drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
595 if (unlikely(!drv_req)) {
596 dev_err(qidev,
597 "Can't find original request for caam response\n");
598 return qman_cb_dqrr_consume;
599 }
600
601 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
602 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
603
604 drv_req->cbk(drv_req, status);
605 return qman_cb_dqrr_consume;
606}
607
608static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
609{
610 struct qm_mcc_initfq opts;
611 struct qman_fq *fq;
612 int ret;
613
614 fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
615 if (!fq)
616 return -ENOMEM;
617
618 fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
619
620 ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
621 QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
622 if (ret) {
623 dev_err(qidev, "Rsp FQ create failed\n");
624 kfree(fq);
625 return -ENODEV;
626 }
627
628 memset(&opts, 0, sizeof(opts));
629 opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
630 QM_INITFQ_WE_CONTEXTB |
631 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
632 opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
633 QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
634 qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
635 opts.fqd.cgid = qipriv.cgr.cgrid;
636 opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
637 QM_STASHING_EXCL_DATA;
638 qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
639
640 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
641 if (ret) {
642 dev_err(qidev, "Rsp FQ init failed\n");
643 kfree(fq);
644 return -ENODEV;
645 }
646
647 per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
648
649 dev_info(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
650 return 0;
651}
652
653static int init_cgr(struct device *qidev)
654{
655 int ret;
656 struct qm_mcc_initcgr opts;
657 const u64 cpus = *(u64 *)qman_affine_cpus();
658 const int num_cpus = hweight64(cpus);
659 const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
660
661 ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
662 if (ret) {
663 dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
664 return ret;
665 }
666
667 qipriv.cgr.cb = cgr_cb;
668 memset(&opts, 0, sizeof(opts));
669 opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
670 QM_CGR_WE_MODE);
671 opts.cgr.cscn_en = QM_CGR_EN;
672 opts.cgr.mode = QMAN_CGR_MODE_FRAME;
673 qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
674
675 ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
676 if (ret) {
677 dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
678 qipriv.cgr.cgrid);
679 return ret;
680 }
681
682 dev_info(qidev, "Congestion threshold set to %llu\n", val);
683 return 0;
684}
685
686static int alloc_rsp_fqs(struct device *qidev)
687{
688 int ret, i;
689 const cpumask_t *cpus = qman_affine_cpus();
690
691 /*Now create response FQs*/
692 for_each_cpu(i, cpus) {
693 ret = alloc_rsp_fq_cpu(qidev, i);
694 if (ret) {
695 dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
696 return ret;
697 }
698 }
699
700 return 0;
701}
702
703static void free_rsp_fqs(void)
704{
705 int i;
706 const cpumask_t *cpus = qman_affine_cpus();
707
708 for_each_cpu(i, cpus)
709 kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
710}
711
712int caam_qi_init(struct platform_device *caam_pdev)
713{
714 int err, i;
715 struct platform_device *qi_pdev;
716 struct device *ctrldev = &caam_pdev->dev, *qidev;
717 struct caam_drv_private *ctrlpriv;
718 const cpumask_t *cpus = qman_affine_cpus();
719 struct cpumask old_cpumask = current->cpus_allowed;
720 static struct platform_device_info qi_pdev_info = {
721 .name = "caam_qi",
722 .id = PLATFORM_DEVID_NONE
723 };
724
725 /*
726 * QMAN requires CGRs to be removed from same CPU+portal from where it
727 * was originally allocated. Hence we need to note down the
728 * initialisation CPU and use the same CPU for module exit.
729 * We select the first CPU to from the list of portal owning CPUs.
730 * Then we pin module init to this CPU.
731 */
732 mod_init_cpu = cpumask_first(cpus);
733 set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
734
735 qi_pdev_info.parent = ctrldev;
736 qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
737 qi_pdev = platform_device_register_full(&qi_pdev_info);
738 if (IS_ERR(qi_pdev))
739 return PTR_ERR(qi_pdev);
740
741 ctrlpriv = dev_get_drvdata(ctrldev);
742 qidev = &qi_pdev->dev;
743
744 qipriv.qi_pdev = qi_pdev;
745 dev_set_drvdata(qidev, &qipriv);
746
747 /* Initialize the congestion detection */
748 err = init_cgr(qidev);
749 if (err) {
750 dev_err(qidev, "CGR initialization failed: %d\n", err);
751 platform_device_unregister(qi_pdev);
752 return err;
753 }
754
755 /* Initialise response FQs */
756 err = alloc_rsp_fqs(qidev);
757 if (err) {
758 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
759 free_rsp_fqs();
760 platform_device_unregister(qi_pdev);
761 return err;
762 }
763
764 /*
765 * Enable the NAPI contexts on each of the core which has an affine
766 * portal.
767 */
768 for_each_cpu(i, cpus) {
769 struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
770 struct caam_napi *caam_napi = &priv->caam_napi;
771 struct napi_struct *irqtask = &caam_napi->irqtask;
772 struct net_device *net_dev = &priv->net_dev;
773
774 net_dev->dev = *qidev;
775 INIT_LIST_HEAD(&net_dev->napi_list);
776
777 netif_napi_add(net_dev, irqtask, caam_qi_poll,
778 CAAM_NAPI_WEIGHT);
779
780 napi_enable(irqtask);
781 }
782
783 /* Hook up QI device to parent controlling caam device */
784 ctrlpriv->qidev = qidev;
785
786 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
787 SLAB_CACHE_DMA, NULL);
788 if (!qi_cache) {
789 dev_err(qidev, "Can't allocate CAAM cache\n");
790 free_rsp_fqs();
791 platform_device_unregister(qi_pdev);
792 return -ENOMEM;
793 }
794
795 /* Done with the CGRs; restore the cpus allowed mask */
796 set_cpus_allowed_ptr(current, &old_cpumask);
797#ifdef CONFIG_DEBUG_FS
798 ctrlpriv->qi_congested = debugfs_create_file("qi_congested", 0444,
799 ctrlpriv->ctl,
800 &times_congested,
801 &caam_fops_u64_ro);
802#endif
803 dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
804 return 0;
805}
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
new file mode 100644
index 000000000000..33b0433f5f22
--- /dev/null
+++ b/drivers/crypto/caam/qi.h
@@ -0,0 +1,201 @@
1/*
2 * Public definitions for the CAAM/QI (Queue Interface) backend.
3 *
4 * Copyright 2013-2016 Freescale Semiconductor, Inc.
5 * Copyright 2016-2017 NXP
6 */
7
8#ifndef __QI_H__
9#define __QI_H__
10
11#include <soc/fsl/qman.h>
12#include "compat.h"
13#include "desc.h"
14#include "desc_constr.h"
15
16/*
17 * CAAM hardware constructs a job descriptor which points to a shared descriptor
18 * (as pointed by context_a of to-CAAM FQ).
19 * When the job descriptor is executed by DECO, the whole job descriptor
20 * together with shared descriptor gets loaded in DECO buffer, which is
21 * 64 words (each 32-bit) long.
22 *
23 * The job descriptor constructed by CAAM hardware has the following layout:
24 *
25 * HEADER (1 word)
26 * Shdesc ptr (1 or 2 words)
27 * SEQ_OUT_PTR (1 word)
28 * Out ptr (1 or 2 words)
29 * Out length (1 word)
30 * SEQ_IN_PTR (1 word)
31 * In ptr (1 or 2 words)
32 * In length (1 word)
33 *
34 * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
35 *
36 * Apart from shdesc contents, the total number of words that get loaded in DECO
37 * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
38 * storing shared descriptor.
39 */
40#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
41
42extern bool caam_congested __read_mostly;
43
44/*
45 * This is the request structure the driver application should fill while
46 * submitting a job to driver.
47 */
48struct caam_drv_req;
49
50/*
51 * caam_qi_cbk - application's callback function invoked by the driver when the
52 * request has been successfully processed.
53 * @drv_req: original request that was submitted
54 * @status: completion status of request (0 - success, non-zero - error code)
55 */
56typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
57
58enum optype {
59 ENCRYPT,
60 DECRYPT,
61 GIVENCRYPT,
62 NUM_OP
63};
64
65/**
66 * caam_drv_ctx - CAAM/QI backend driver context
67 *
68 * The jobs are processed by the driver against a driver context.
69 * With every cryptographic context, a driver context is attached.
70 * The driver context contains data for private use by driver.
71 * For the applications, this is an opaque structure.
72 *
73 * @prehdr: preheader placed before shrd desc
74 * @sh_desc: shared descriptor
75 * @context_a: shared descriptor dma address
76 * @req_fq: to-CAAM request frame queue
77 * @rsp_fq: from-CAAM response frame queue
78 * @cpu: cpu on which to receive CAAM response
79 * @op_type: operation type
80 * @qidev: device pointer for CAAM/QI backend
81 */
82struct caam_drv_ctx {
83 u32 prehdr[2];
84 u32 sh_desc[MAX_SDLEN];
85 dma_addr_t context_a;
86 struct qman_fq *req_fq;
87 struct qman_fq *rsp_fq;
88 int cpu;
89 enum optype op_type;
90 struct device *qidev;
91} ____cacheline_aligned;
92
93/**
94 * caam_drv_req - The request structure the driver application should fill while
95 * submitting a job to driver.
96 * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
97 * buffers.
98 * @cbk: callback function to invoke when job is completed
99 * @app_ctx: arbitrary context attached with request by the application
100 *
101 * The fields mentioned below should not be used by application.
102 * These are for private use by driver.
103 *
104 * @hdr__: linked list header to maintain list of outstanding requests to CAAM
105 * @hwaddr: DMA address for the S/G table.
106 */
107struct caam_drv_req {
108 struct qm_sg_entry fd_sgt[2];
109 struct caam_drv_ctx *drv_ctx;
110 caam_qi_cbk cbk;
111 void *app_ctx;
112} ____cacheline_aligned;
113
114/**
115 * caam_drv_ctx_init - Initialise a CAAM/QI driver context
116 *
117 * A CAAM/QI driver context must be attached with each cryptographic context.
118 * This function allocates memory for CAAM/QI context and returns a handle to
119 * the application. This handle must be submitted along with each enqueue
120 * request to the driver by the application.
121 *
122 * @cpu: CPU where the application prefers to the driver to receive CAAM
123 * responses. The request completion callback would be issued from this
124 * CPU.
125 * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
126 * context.
127 *
128 * Returns a driver context on success or negative error code on failure.
129 */
130struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
131 u32 *sh_desc);
132
133/**
134 * caam_qi_enqueue - Submit a request to QI backend driver.
135 *
136 * The request structure must be properly filled as described above.
137 *
138 * @qidev: device pointer for QI backend
139 * @req: CAAM QI request structure
140 *
141 * Returns 0 on success or negative error code on failure.
142 */
143int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
144
145/**
146 * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
147 * or too many CAAM responses are pending to be processed.
148 * @drv_ctx: driver context for which job is to be submitted
149 *
150 * Returns caam congestion status 'true/false'
151 */
152bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
153
154/**
155 * caam_drv_ctx_update - Update QI driver context
156 *
157 * Invoked when shared descriptor is required to be change in driver context.
158 *
159 * @drv_ctx: driver context to be updated
160 * @sh_desc: new shared descriptor pointer to be updated in QI driver context
161 *
162 * Returns 0 on success or negative error code on failure.
163 */
164int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
165
166/**
167 * caam_drv_ctx_rel - Release a QI driver context
168 * @drv_ctx: context to be released
169 */
170void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
171
172int caam_qi_init(struct platform_device *pdev);
173int caam_qi_shutdown(struct device *dev);
174
175/**
176 * qi_cache_alloc - Allocate buffers from CAAM-QI cache
177 *
178 * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
179 * to be allocated on the hotpath. Instead of using malloc, one can use the
180 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
181 * will have a size of 256B, which is sufficient for hosting 16 SG entries.
182 *
183 * @flags: flags that would be used for the equivalent malloc(..) call
184 *
185 * Returns a pointer to a retrieved buffer on success or NULL on failure.
186 */
187void *qi_cache_alloc(gfp_t flags);
188
189/**
190 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
191 *
192 * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
193 * the buffer previously allocated by a qi_cache_alloc call.
194 * No checking is being done, the call is a passthrough call to
195 * kmem_cache_free(...)
196 *
197 * @obj: object previously allocated using qi_cache_alloc()
198 */
199void qi_cache_free(void *obj);
200
201#endif /* __QI_H__ */
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
new file mode 100644
index 000000000000..d000b4df745f
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -0,0 +1,108 @@
1/*
2 * Copyright 2013-2016 Freescale Semiconductor, Inc.
3 * Copyright 2016-2017 NXP
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Freescale Semiconductor nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 *
17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") as published by the Free Software
19 * Foundation, either version 2 of that License or (at your option) any
20 * later version.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
23 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
26 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#ifndef __SG_SW_QM_H
35#define __SG_SW_QM_H
36
37#include <soc/fsl/qman.h>
38#include "regs.h"
39
40static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
41 u16 offset)
42{
43 qm_sg_entry_set64(qm_sg_ptr, dma);
44 qm_sg_ptr->__reserved2 = 0;
45 qm_sg_ptr->bpid = 0;
46 qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
47}
48
49static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
50 dma_addr_t dma, u32 len, u16 offset)
51{
52 __dma_to_qm_sg(qm_sg_ptr, dma, offset);
53 qm_sg_entry_set_len(qm_sg_ptr, len);
54}
55
56static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
57 dma_addr_t dma, u32 len, u16 offset)
58{
59 __dma_to_qm_sg(qm_sg_ptr, dma, offset);
60 qm_sg_entry_set_f(qm_sg_ptr, len);
61}
62
63static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
64 dma_addr_t dma, u32 len, u16 offset)
65{
66 __dma_to_qm_sg(qm_sg_ptr, dma, offset);
67 qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
68}
69
70static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
71 dma_addr_t dma, u32 len,
72 u16 offset)
73{
74 __dma_to_qm_sg(qm_sg_ptr, dma, offset);
75 qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
76 (len & QM_SG_LEN_MASK));
77}
78
79/*
80 * convert scatterlist to h/w link table format
81 * but does not have final bit; instead, returns last entry
82 */
83static inline struct qm_sg_entry *
84sg_to_qm_sg(struct scatterlist *sg, int sg_count,
85 struct qm_sg_entry *qm_sg_ptr, u16 offset)
86{
87 while (sg_count && sg) {
88 dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
89 sg_dma_len(sg), offset);
90 qm_sg_ptr++;
91 sg = sg_next(sg);
92 sg_count--;
93 }
94 return qm_sg_ptr - 1;
95}
96
97/*
98 * convert scatterlist to h/w link table format
99 * scatterlist must have been previously dma mapped
100 */
101static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
102 struct qm_sg_entry *qm_sg_ptr, u16 offset)
103{
104 qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
105 qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
106}
107
108#endif /* __SG_SW_QM_H */
diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile
new file mode 100644
index 000000000000..641268b784be
--- /dev/null
+++ b/drivers/crypto/cavium/Makefile
@@ -0,0 +1,4 @@
1#
2# Makefile for Cavium crypto device drivers
3#
4obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/
diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile
new file mode 100644
index 000000000000..b2f3baaff757
--- /dev/null
+++ b/drivers/crypto/cavium/zip/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for Cavium's ZIP Driver.
3#
4
5obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o
6thunderx_zip-y := zip_main.o \
7 zip_device.o \
8 zip_crypto.o \
9 zip_mem.o \
10 zip_deflate.o \
11 zip_inflate.o
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
new file mode 100644
index 000000000000..dc451e0a43c5
--- /dev/null
+++ b/drivers/crypto/cavium/zip/common.h
@@ -0,0 +1,202 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#ifndef __COMMON_H__
47#define __COMMON_H__
48
49#include <linux/init.h>
50#include <linux/interrupt.h>
51#include <linux/kernel.h>
52#include <linux/module.h>
53#include <linux/pci.h>
54#include <linux/seq_file.h>
55#include <linux/string.h>
56#include <linux/types.h>
57#include <linux/version.h>
58
59/* Device specific zlib function definitions */
60#include "zip_device.h"
61
62/* ZIP device definitions */
63#include "zip_main.h"
64
65/* ZIP memory allocation/deallocation related definitions */
66#include "zip_mem.h"
67
68/* Device specific structure definitions */
69#include "zip_regs.h"
70
71#define ZIP_ERROR -1
72
73#define ZIP_FLUSH_FINISH 4
74
75#define RAW_FORMAT 0 /* for rawpipe */
76#define ZLIB_FORMAT 1 /* for zpipe */
77#define GZIP_FORMAT 2 /* for gzpipe */
78#define LZS_FORMAT 3 /* for lzspipe */
79
80/* Max number of ZIP devices supported */
81#define MAX_ZIP_DEVICES 2
82
83/* Configures the number of zip queues to be used */
84#define ZIP_NUM_QUEUES 2
85
86#define DYNAMIC_STOP_EXCESS 1024
87
88/* Maximum buffer sizes in direct mode */
89#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
90#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024)
91
92/**
93 * struct zip_operation - common data structure for comp and decomp operations
94 * @input: Next input byte is read from here
95 * @output: Next output byte written here
96 * @ctx_addr: Inflate context buffer address
97 * @history: Pointer to the history buffer
98 * @input_len: Number of bytes available at next_in
99 * @input_total_len: Total number of input bytes read
100 * @output_len: Remaining free space at next_out
101 * @output_total_len: Total number of bytes output so far
102 * @csum: Checksum value of the uncompressed data
103 * @flush: Flush flag
104 * @format: Format (depends on stream's wrap)
105 * @speed: Speed depends on stream's level
106 * @ccode: Compression code ( stream's strategy)
107 * @lzs_flag: Flag for LZS support
108 * @begin_file: Beginning of file indication for inflate
109 * @history_len: Size of the history data
110 * @end_file: Ending of the file indication for inflate
111 * @compcode: Completion status of the ZIP invocation
112 * @bytes_read: Input bytes read in current instruction
113 * @bits_processed: Total bits processed for entire file
114 * @sizeofptr: To distinguish between ILP32 and LP64
115 * @sizeofzops: Optional just for padding
116 *
117 * This structure is used to maintain the required meta data for the
118 * comp and decomp operations.
119 */
120struct zip_operation {
121 u8 *input;
122 u8 *output;
123 u64 ctx_addr;
124 u64 history;
125
126 u32 input_len;
127 u32 input_total_len;
128
129 u32 output_len;
130 u32 output_total_len;
131
132 u32 csum;
133 u32 flush;
134
135 u32 format;
136 u32 speed;
137 u32 ccode;
138 u32 lzs_flag;
139
140 u32 begin_file;
141 u32 history_len;
142
143 u32 end_file;
144 u32 compcode;
145 u32 bytes_read;
146 u32 bits_processed;
147
148 u32 sizeofptr;
149 u32 sizeofzops;
150};
151
152/* error messages */
153#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
154 fmt "\n", __func__, __LINE__, ## args)
155
156#ifdef MSG_ENABLE
157/* Enable all messages */
158#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args)
159#else
160#define zip_msg(fmt, args...)
161#endif
162
163#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE)
164
165#ifdef DEBUG_LEVEL
166
167#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \
168 strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__)
169
170#if DEBUG_LEVEL >= 4
171
172#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
173 fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
174
175#elif DEBUG_LEVEL >= 3
176
177#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
178 fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
179
180#elif DEBUG_LEVEL >= 2
181
182#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \
183 fmt "\n", __func__, __LINE__, ## args)
184
185#else
186
187#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
188
189#endif /* DEBUG LEVEL >=4 */
190
191#else
192
193#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
194
195#endif /* DEBUG_LEVEL */
196#else
197
198#define zip_dbg(fmt, args...)
199
200#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/
201
202#endif
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
new file mode 100644
index 000000000000..8df4d26cf9d4
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -0,0 +1,313 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#include "zip_crypto.h"
47
48static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
49 int lzs_flag)
50{
51 zip_ops->flush = ZIP_FLUSH_FINISH;
52
53 /* equivalent to level 6 of opensource zlib */
54 zip_ops->speed = 1;
55
56 if (!lzs_flag) {
57 zip_ops->ccode = 0; /* Auto Huffman */
58 zip_ops->lzs_flag = 0;
59 zip_ops->format = ZLIB_FORMAT;
60 } else {
61 zip_ops->ccode = 3; /* LZS Encoding */
62 zip_ops->lzs_flag = 1;
63 zip_ops->format = LZS_FORMAT;
64 }
65 zip_ops->begin_file = 1;
66 zip_ops->history_len = 0;
67 zip_ops->end_file = 1;
68 zip_ops->compcode = 0;
69 zip_ops->csum = 1; /* Adler checksum desired */
70}
71
72int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
73{
74 struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
75 struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
76
77 zip_static_init_zip_ops(comp_ctx, lzs_flag);
78 zip_static_init_zip_ops(decomp_ctx, lzs_flag);
79
80 comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
81 if (!comp_ctx->input)
82 return -ENOMEM;
83
84 comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
85 if (!comp_ctx->output)
86 goto err_comp_input;
87
88 decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
89 if (!decomp_ctx->input)
90 goto err_comp_output;
91
92 decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
93 if (!decomp_ctx->output)
94 goto err_decomp_input;
95
96 return 0;
97
98err_decomp_input:
99 zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
100
101err_comp_output:
102 zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
103
104err_comp_input:
105 zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
106
107 return -ENOMEM;
108}
109
110void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
111{
112 struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
113 struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
114
115 zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
116 zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
117
118 zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
119 zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
120}
121
122int zip_compress(const u8 *src, unsigned int slen,
123 u8 *dst, unsigned int *dlen,
124 struct zip_kernel_ctx *zip_ctx)
125{
126 struct zip_operation *zip_ops = NULL;
127 struct zip_state zip_state;
128 struct zip_device *zip = NULL;
129 int ret;
130
131 if (!zip_ctx || !src || !dst || !dlen)
132 return -ENOMEM;
133
134 zip = zip_get_device(zip_get_node_id());
135 if (!zip)
136 return -ENODEV;
137
138 memset(&zip_state, 0, sizeof(struct zip_state));
139 zip_ops = &zip_ctx->zip_comp;
140
141 zip_ops->input_len = slen;
142 zip_ops->output_len = *dlen;
143 memcpy(zip_ops->input, src, slen);
144
145 ret = zip_deflate(zip_ops, &zip_state, zip);
146
147 if (!ret) {
148 *dlen = zip_ops->output_len;
149 memcpy(dst, zip_ops->output, *dlen);
150 }
151
152 return ret;
153}
154
155int zip_decompress(const u8 *src, unsigned int slen,
156 u8 *dst, unsigned int *dlen,
157 struct zip_kernel_ctx *zip_ctx)
158{
159 struct zip_operation *zip_ops = NULL;
160 struct zip_state zip_state;
161 struct zip_device *zip = NULL;
162 int ret;
163
164 if (!zip_ctx || !src || !dst || !dlen)
165 return -ENOMEM;
166
167 zip = zip_get_device(zip_get_node_id());
168 if (!zip)
169 return -ENODEV;
170
171 memset(&zip_state, 0, sizeof(struct zip_state));
172 zip_ops = &zip_ctx->zip_decomp;
173 memcpy(zip_ops->input, src, slen);
174
175 /* Work around for a bug in zlib which needs an extra bytes sometimes */
176 if (zip_ops->ccode != 3) /* Not LZS Encoding */
177 zip_ops->input[slen++] = 0;
178
179 zip_ops->input_len = slen;
180 zip_ops->output_len = *dlen;
181
182 ret = zip_inflate(zip_ops, &zip_state, zip);
183
184 if (!ret) {
185 *dlen = zip_ops->output_len;
186 memcpy(dst, zip_ops->output, *dlen);
187 }
188
189 return ret;
190}
191
192/* Legacy Compress framework start */
193int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
194{
195 int ret;
196 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
197
198 ret = zip_ctx_init(zip_ctx, 0);
199
200 return ret;
201}
202
203int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
204{
205 int ret;
206 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
207
208 ret = zip_ctx_init(zip_ctx, 1);
209
210 return ret;
211}
212
213void zip_free_comp_ctx(struct crypto_tfm *tfm)
214{
215 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
216
217 zip_ctx_exit(zip_ctx);
218}
219
220int zip_comp_compress(struct crypto_tfm *tfm,
221 const u8 *src, unsigned int slen,
222 u8 *dst, unsigned int *dlen)
223{
224 int ret;
225 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
226
227 ret = zip_compress(src, slen, dst, dlen, zip_ctx);
228
229 return ret;
230}
231
232int zip_comp_decompress(struct crypto_tfm *tfm,
233 const u8 *src, unsigned int slen,
234 u8 *dst, unsigned int *dlen)
235{
236 int ret;
237 struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
238
239 ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
240
241 return ret;
242} /* Legacy compress framework end */
243
244/* SCOMP framework start */
245void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm)
246{
247 int ret;
248 struct zip_kernel_ctx *zip_ctx;
249
250 zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
251 if (!zip_ctx)
252 return ERR_PTR(-ENOMEM);
253
254 ret = zip_ctx_init(zip_ctx, 0);
255
256 if (ret) {
257 kzfree(zip_ctx);
258 return ERR_PTR(ret);
259 }
260
261 return zip_ctx;
262}
263
264void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm)
265{
266 int ret;
267 struct zip_kernel_ctx *zip_ctx;
268
269 zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
270 if (!zip_ctx)
271 return ERR_PTR(-ENOMEM);
272
273 ret = zip_ctx_init(zip_ctx, 1);
274
275 if (ret) {
276 kzfree(zip_ctx);
277 return ERR_PTR(ret);
278 }
279
280 return zip_ctx;
281}
282
283void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx)
284{
285 struct zip_kernel_ctx *zip_ctx = ctx;
286
287 zip_ctx_exit(zip_ctx);
288 kzfree(zip_ctx);
289}
290
291int zip_scomp_compress(struct crypto_scomp *tfm,
292 const u8 *src, unsigned int slen,
293 u8 *dst, unsigned int *dlen, void *ctx)
294{
295 int ret;
296 struct zip_kernel_ctx *zip_ctx = ctx;
297
298 ret = zip_compress(src, slen, dst, dlen, zip_ctx);
299
300 return ret;
301}
302
303int zip_scomp_decompress(struct crypto_scomp *tfm,
304 const u8 *src, unsigned int slen,
305 u8 *dst, unsigned int *dlen, void *ctx)
306{
307 int ret;
308 struct zip_kernel_ctx *zip_ctx = ctx;
309
310 ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
311
312 return ret;
313} /* SCOMP framework end */
diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h
new file mode 100644
index 000000000000..b59ddfcacd34
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_crypto.h
@@ -0,0 +1,79 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#ifndef __ZIP_CRYPTO_H__
47#define __ZIP_CRYPTO_H__
48
49#include <linux/crypto.h>
50#include <crypto/internal/scompress.h>
51#include "common.h"
52#include "zip_deflate.h"
53#include "zip_inflate.h"
54
55struct zip_kernel_ctx {
56 struct zip_operation zip_comp;
57 struct zip_operation zip_decomp;
58};
59
60int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm);
61int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm);
62void zip_free_comp_ctx(struct crypto_tfm *tfm);
63int zip_comp_compress(struct crypto_tfm *tfm,
64 const u8 *src, unsigned int slen,
65 u8 *dst, unsigned int *dlen);
66int zip_comp_decompress(struct crypto_tfm *tfm,
67 const u8 *src, unsigned int slen,
68 u8 *dst, unsigned int *dlen);
69
70void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm);
71void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm);
72void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *zip_ctx);
73int zip_scomp_compress(struct crypto_scomp *tfm,
74 const u8 *src, unsigned int slen,
75 u8 *dst, unsigned int *dlen, void *ctx);
76int zip_scomp_decompress(struct crypto_scomp *tfm,
77 const u8 *src, unsigned int slen,
78 u8 *dst, unsigned int *dlen, void *ctx);
79#endif
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
new file mode 100644
index 000000000000..9a944b8c1e29
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_deflate.c
@@ -0,0 +1,200 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#include <linux/delay.h>
47#include <linux/sched.h>
48
49#include "common.h"
50#include "zip_deflate.h"
51
52/* Prepares the deflate zip command */
53static int prepare_zip_command(struct zip_operation *zip_ops,
54 struct zip_state *s, union zip_inst_s *zip_cmd)
55{
56 union zip_zres_s *result_ptr = &s->result;
57
58 memset(zip_cmd, 0, sizeof(s->zip_cmd));
59 memset(result_ptr, 0, sizeof(s->result));
60
61 /* IWORD #0 */
62 /* History gather */
63 zip_cmd->s.hg = 0;
64 /* compression enable = 1 for deflate */
65 zip_cmd->s.ce = 1;
66 /* sf (sync flush) */
67 zip_cmd->s.sf = 1;
68 /* ef (end of file) */
69 if (zip_ops->flush == ZIP_FLUSH_FINISH) {
70 zip_cmd->s.ef = 1;
71 zip_cmd->s.sf = 0;
72 }
73
74 zip_cmd->s.cc = zip_ops->ccode;
75 /* ss (compression speed/storage) */
76 zip_cmd->s.ss = zip_ops->speed;
77
78 /* IWORD #1 */
79 /* adler checksum */
80 zip_cmd->s.adlercrc32 = zip_ops->csum;
81 zip_cmd->s.historylength = zip_ops->history_len;
82 zip_cmd->s.dg = 0;
83
84 /* IWORD # 6 and 7 - compression input/history pointer */
85 zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input);
86 zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len +
87 zip_ops->history_len);
88 zip_cmd->s.ds = 0;
89
90 /* IWORD # 8 and 9 - Output pointer */
91 zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
92 zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
93 /* maximum number of output-stream bytes that can be written */
94 zip_cmd->s.totaloutputlength = zip_ops->output_len;
95
96 /* IWORD # 10 and 11 - Result pointer */
97 zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
98 /* Clearing completion code */
99 result_ptr->s.compcode = 0;
100
101 return 0;
102}
103
104/**
105 * zip_deflate - API to offload deflate operation to hardware
106 * @zip_ops: Pointer to zip operation structure
107 * @s: Pointer to the structure representing zip state
108 * @zip_dev: Pointer to zip device structure
109 *
110 * This function prepares the zip deflate command and submits it to the zip
111 * engine for processing.
112 *
113 * Return: 0 if successful or error code
114 */
115int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
116 struct zip_device *zip_dev)
117{
118 union zip_inst_s *zip_cmd = &s->zip_cmd;
119 union zip_zres_s *result_ptr = &s->result;
120 u32 queue;
121
122 /* Prepares zip command based on the input parameters */
123 prepare_zip_command(zip_ops, s, zip_cmd);
124
125 atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes);
126 /* Loads zip command into command queues and rings door bell */
127 queue = zip_load_instr(zip_cmd, zip_dev);
128
129 /* Stats update for compression requests submitted */
130 atomic64_inc(&zip_dev->stats.comp_req_submit);
131
132 while (!result_ptr->s.compcode)
133 continue;
134
135 /* Stats update for compression requests completed */
136 atomic64_inc(&zip_dev->stats.comp_req_complete);
137
138 zip_ops->compcode = result_ptr->s.compcode;
139 switch (zip_ops->compcode) {
140 case ZIP_CMD_NOTDONE:
141 zip_dbg("Zip instruction not yet completed");
142 return ZIP_ERROR;
143
144 case ZIP_CMD_SUCCESS:
145 zip_dbg("Zip instruction completed successfully");
146 zip_update_cmd_bufs(zip_dev, queue);
147 break;
148
149 case ZIP_CMD_DTRUNC:
150 zip_dbg("Output Truncate error");
151 /* Returning ZIP_ERROR to avoid copy to user */
152 return ZIP_ERROR;
153
154 default:
155 zip_err("Zip instruction failed. Code:%d", zip_ops->compcode);
156 return ZIP_ERROR;
157 }
158
159 /* Update the CRC depending on the format */
160 switch (zip_ops->format) {
161 case RAW_FORMAT:
162 zip_dbg("RAW Format: %d ", zip_ops->format);
163 /* Get checksum from engine, need to feed it again */
164 zip_ops->csum = result_ptr->s.adler32;
165 break;
166
167 case ZLIB_FORMAT:
168 zip_dbg("ZLIB Format: %d ", zip_ops->format);
169 zip_ops->csum = result_ptr->s.adler32;
170 break;
171
172 case GZIP_FORMAT:
173 zip_dbg("GZIP Format: %d ", zip_ops->format);
174 zip_ops->csum = result_ptr->s.crc32;
175 break;
176
177 case LZS_FORMAT:
178 zip_dbg("LZS Format: %d ", zip_ops->format);
179 break;
180
181 default:
182 zip_err("Unknown Format:%d\n", zip_ops->format);
183 }
184
185 atomic64_add(result_ptr->s.totalbyteswritten,
186 &zip_dev->stats.comp_out_bytes);
187
188 /* Update output_len */
189 if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
190 /* Dynamic stop && strm->output_len < zipconstants[onfsize] */
191 zip_err("output_len (%d) < total bytes written(%d)\n",
192 zip_ops->output_len, result_ptr->s.totalbyteswritten);
193 zip_ops->output_len = 0;
194
195 } else {
196 zip_ops->output_len = result_ptr->s.totalbyteswritten;
197 }
198
199 return 0;
200}
diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h
new file mode 100644
index 000000000000..1d32e76edc4d
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_deflate.h
@@ -0,0 +1,62 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#ifndef __ZIP_DEFLATE_H__
47#define __ZIP_DEFLATE_H__
48
49/**
50 * zip_deflate - API to offload deflate operation to hardware
51 * @zip_ops: Pointer to zip operation structure
52 * @s: Pointer to the structure representing zip state
53 * @zip_dev: Pointer to the structure representing zip device
54 *
55 * This function prepares the zip deflate command and submits it to the zip
56 * engine by ringing the doorbell.
57 *
58 * Return: 0 if successful or error code
59 */
60int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
61 struct zip_device *zip_dev);
62#endif
diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c
new file mode 100644
index 000000000000..ccf21fb91513
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_device.c
@@ -0,0 +1,202 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#include "common.h"
47#include "zip_deflate.h"
48
49/**
50 * zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
51 *
52 * @zip_dev: Pointer to zip device structure
53 * @queue: Queue number
54 *
55 * Return: Bytes consumed in the command queue buffer.
56 */
57static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
58{
59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
60 sizeof(u64 *));
61}
62
63/**
64 * zip_load_instr - Submits the instruction into the ZIP command queue
65 * @instr: Pointer to the instruction to be submitted
66 * @zip_dev: Pointer to ZIP device structure to which the instruction is to
67 * be submitted
68 *
69 * This function copies the ZIP instruction to the command queue and rings the
70 * doorbell to notify the engine of the instruction submission. The command
71 * queue is maintained in a circular fashion. When there is space for exactly
72 * one instruction in the queue, next chunk pointer of the queue is made to
73 * point to the head of the queue, thus maintaining a circular queue.
74 *
75 * Return: Queue number to which the instruction was submitted
76 */
77u32 zip_load_instr(union zip_inst_s *instr,
78 struct zip_device *zip_dev)
79{
80 union zip_quex_doorbell dbell;
81 u32 queue = 0;
82 u32 consumed = 0;
83 u64 *ncb_ptr = NULL;
84 union zip_nptr_s ncp;
85
86 /*
87 * Distribute the instructions between the enabled queues based on
88 * the CPU id.
89 */
90 if (smp_processor_id() % 2 == 0)
91 queue = 0;
92 else
93 queue = 1;
94
95 zip_dbg("CPU Core: %d Queue number:%d", smp_processor_id(), queue);
96
97 /* Take cmd buffer lock */
98 spin_lock(&zip_dev->iq[queue].lock);
99
100 /*
101 * Command Queue implementation
102 * 1. If there is place for new instructions, push the cmd at sw_head.
103 * 2. If there is place for exactly one instruction, push the new cmd
104 * at the sw_head. Make sw_head point to the sw_tail to make it
105 * circular. Write sw_head's physical address to the "Next-Chunk
106 * Buffer Ptr" to make it cmd_hw_tail.
107 * 3. Ring the door bell.
108 */
109 zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
110 zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
111
112 consumed = zip_cmd_queue_consumed(zip_dev, queue);
113 /* Check if there is space to push just one cmd */
114 if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) {
115 zip_dbg("Cmd queue space available for single command");
116 /* Space for one cmd, pust it and make it circular queue */
117 memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
118 sizeof(union zip_inst_s));
119 zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
120
121 /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */
122 ncb_ptr = zip_dev->iq[queue].sw_head;
123
124 zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx",
125 ncb_ptr, zip_dev->iq[queue].sw_head - 16);
126
127 /* Using Circular command queue */
128 zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
129 /* Mark this buffer for free */
130 zip_dev->iq[queue].free_flag = 1;
131
132 /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */
133 ncp.u_reg64 = 0ull;
134 ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
135 *ncb_ptr = ncp.u_reg64;
136 zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx",
137 *ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
138
139 zip_dev->iq[queue].pend_cnt++;
140
141 } else {
142 zip_dbg("Enough space is available for commands");
143 /* Push this cmd to cmd queue buffer */
144 memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
145 sizeof(union zip_inst_s));
146 zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
147
148 zip_dev->iq[queue].pend_cnt++;
149 }
150 zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
151 zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
152 zip_dev->iq[queue].hw_tail);
153
154 zip_dbg(" Pushed the new cmd : pend_cnt : %d",
155 zip_dev->iq[queue].pend_cnt);
156
157 /* Ring the doorbell */
158 dbell.u_reg64 = 0ull;
159 dbell.s.dbell_cnt = 1;
160 zip_reg_write(dbell.u_reg64,
161 (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
162
163 /* Unlock cmd buffer lock */
164 spin_unlock(&zip_dev->iq[queue].lock);
165
166 return queue;
167}
168
169/**
170 * zip_update_cmd_bufs - Updates the queue statistics after posting the
171 * instruction
172 * @zip_dev: Pointer to zip device structure
173 * @queue: Queue number
174 */
175void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
176{
177 /* Take cmd buffer lock */
178 spin_lock(&zip_dev->iq[queue].lock);
179
180 /* Check if the previous buffer can be freed */
181 if (zip_dev->iq[queue].free_flag == 1) {
182 zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail");
183 /* Reset the free flag */
184 zip_dev->iq[queue].free_flag = 0;
185
186 /* Point the hw_tail to start of the new chunk buffer */
187 zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
188 } else {
189 zip_dbg("Free flag not set. increment hw tail");
190 zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
191 }
192
193 zip_dev->iq[queue].done_cnt++;
194 zip_dev->iq[queue].pend_cnt--;
195
196 zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
197 zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
198 zip_dev->iq[queue].hw_tail);
199 zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
200
201 spin_unlock(&zip_dev->iq[queue].lock);
202}
diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h
new file mode 100644
index 000000000000..9e18b3b93d38
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_device.h
@@ -0,0 +1,108 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#ifndef __ZIP_DEVICE_H__
47#define __ZIP_DEVICE_H__
48
49#include <linux/types.h>
50#include "zip_main.h"
51
52struct sg_info {
53 /*
54 * Pointer to the input data when scatter_gather == 0 and
55 * pointer to the input gather list buffer when scatter_gather == 1
56 */
57 union zip_zptr_s *gather;
58
59 /*
60 * Pointer to the output data when scatter_gather == 0 and
61 * pointer to the output scatter list buffer when scatter_gather == 1
62 */
63 union zip_zptr_s *scatter;
64
65 /*
66 * Holds size of the output buffer pointed by scatter list
67 * when scatter_gather == 1
68 */
69 u64 scatter_buf_size;
70
71 /* for gather data */
72 u64 gather_enable;
73
74 /* for scatter data */
75 u64 scatter_enable;
76
77 /* Number of gather list pointers for gather data */
78 u32 gbuf_cnt;
79
80 /* Number of scatter list pointers for scatter data */
81 u32 sbuf_cnt;
82
83 /* Buffers allocation state */
84 u8 alloc_state;
85};
86
87/**
88 * struct zip_state - Structure representing the required information related
89 * to a command
90 * @zip_cmd: Pointer to zip instruction structure
91 * @result: Pointer to zip result structure
92 * @ctx: Context pointer for inflate
93 * @history: Decompression history pointer
94 * @sginfo: Scatter-gather info structure
95 */
96struct zip_state {
97 union zip_inst_s zip_cmd;
98 union zip_zres_s result;
99 union zip_zptr_s *ctx;
100 union zip_zptr_s *history;
101 struct sg_info sginfo;
102};
103
104#define ZIP_CONTEXT_SIZE 2048
105#define ZIP_INFLATE_HISTORY_SIZE 32768
106#define ZIP_DEFLATE_HISTORY_SIZE 32768
107
108#endif
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
new file mode 100644
index 000000000000..50cbdd83dbf2
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_inflate.c
@@ -0,0 +1,223 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#include <linux/delay.h>
47#include <linux/sched.h>
48
49#include "common.h"
50#include "zip_inflate.h"
51
52static int prepare_inflate_zcmd(struct zip_operation *zip_ops,
53 struct zip_state *s, union zip_inst_s *zip_cmd)
54{
55 union zip_zres_s *result_ptr = &s->result;
56
57 memset(zip_cmd, 0, sizeof(s->zip_cmd));
58 memset(result_ptr, 0, sizeof(s->result));
59
60 /* IWORD#0 */
61
62 /* Decompression History Gather list - no gather list */
63 zip_cmd->s.hg = 0;
64 /* For decompression, CE must be 0x0. */
65 zip_cmd->s.ce = 0;
66 /* For decompression, SS must be 0x0. */
67 zip_cmd->s.ss = 0;
68 /* For decompression, SF should always be set. */
69 zip_cmd->s.sf = 1;
70
71 /* Begin File */
72 if (zip_ops->begin_file == 0)
73 zip_cmd->s.bf = 0;
74 else
75 zip_cmd->s.bf = 1;
76
77 zip_cmd->s.ef = 1;
78 /* 0: for Deflate decompression, 3: for LZS decompression */
79 zip_cmd->s.cc = zip_ops->ccode;
80
81 /* IWORD #1*/
82
83 /* adler checksum */
84 zip_cmd->s.adlercrc32 = zip_ops->csum;
85
86 /*
87 * HISTORYLENGTH must be 0x0 for any ZIP decompress operation.
88 * History data is added to a decompression operation via IWORD3.
89 */
90 zip_cmd->s.historylength = 0;
91 zip_cmd->s.ds = 0;
92
93 /* IWORD # 8 and 9 - Output pointer */
94 zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
95 zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
96
97 /* Maximum number of output-stream bytes that can be written */
98 zip_cmd->s.totaloutputlength = zip_ops->output_len;
99
100 zip_dbg("Data Direct Input case ");
101
102 /* IWORD # 6 and 7 - input pointer */
103 zip_cmd->s.dg = 0;
104 zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input);
105 zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len;
106
107 /* IWORD # 10 and 11 - Result pointer */
108 zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
109
110 /* Clearing completion code */
111 result_ptr->s.compcode = 0;
112
113 /* Returning 0 for time being.*/
114 return 0;
115}
116
117/**
118 * zip_inflate - API to offload inflate operation to hardware
119 * @zip_ops: Pointer to zip operation structure
120 * @s: Pointer to the structure representing zip state
121 * @zip_dev: Pointer to zip device structure
122 *
123 * This function prepares the zip inflate command and submits it to the zip
124 * engine for processing.
125 *
126 * Return: 0 if successful or error code
127 */
128int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
129 struct zip_device *zip_dev)
130{
131 union zip_inst_s *zip_cmd = &s->zip_cmd;
132 union zip_zres_s *result_ptr = &s->result;
133 u32 queue;
134
135 /* Prepare inflate zip command */
136 prepare_inflate_zcmd(zip_ops, s, zip_cmd);
137
138 atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes);
139
140 /* Load inflate command to zip queue and ring the doorbell */
141 queue = zip_load_instr(zip_cmd, zip_dev);
142
143 /* Decompression requests submitted stats update */
144 atomic64_inc(&zip_dev->stats.decomp_req_submit);
145
146 while (!result_ptr->s.compcode)
147 continue;
148
149 /* Decompression requests completed stats update */
150 atomic64_inc(&zip_dev->stats.decomp_req_complete);
151
152 zip_ops->compcode = result_ptr->s.compcode;
153 switch (zip_ops->compcode) {
154 case ZIP_CMD_NOTDONE:
155 zip_dbg("Zip Instruction not yet completed\n");
156 return ZIP_ERROR;
157
158 case ZIP_CMD_SUCCESS:
159 zip_dbg("Zip Instruction completed successfully\n");
160 break;
161
162 case ZIP_CMD_DYNAMIC_STOP:
163 zip_dbg(" Dynamic stop Initiated\n");
164 break;
165
166 default:
167 zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode);
168 atomic64_inc(&zip_dev->stats.decomp_bad_reqs);
169 zip_update_cmd_bufs(zip_dev, queue);
170 return ZIP_ERROR;
171 }
172
173 zip_update_cmd_bufs(zip_dev, queue);
174
175 if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) &&
176 (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP))
177 result_ptr->s.ef = 1;
178
179 zip_ops->csum = result_ptr->s.adler32;
180
181 atomic64_add(result_ptr->s.totalbyteswritten,
182 &zip_dev->stats.decomp_out_bytes);
183
184 if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
185 zip_err("output_len (%d) < total bytes written (%d)\n",
186 zip_ops->output_len, result_ptr->s.totalbyteswritten);
187 zip_ops->output_len = 0;
188 } else {
189 zip_ops->output_len = result_ptr->s.totalbyteswritten;
190 }
191
192 zip_ops->bytes_read = result_ptr->s.totalbytesread;
193 zip_ops->bits_processed = result_ptr->s.totalbitsprocessed;
194 zip_ops->end_file = result_ptr->s.ef;
195 if (zip_ops->end_file) {
196 switch (zip_ops->format) {
197 case RAW_FORMAT:
198 zip_dbg("RAW Format: %d ", zip_ops->format);
199 /* Get checksum from engine */
200 zip_ops->csum = result_ptr->s.adler32;
201 break;
202
203 case ZLIB_FORMAT:
204 zip_dbg("ZLIB Format: %d ", zip_ops->format);
205 zip_ops->csum = result_ptr->s.adler32;
206 break;
207
208 case GZIP_FORMAT:
209 zip_dbg("GZIP Format: %d ", zip_ops->format);
210 zip_ops->csum = result_ptr->s.crc32;
211 break;
212
213 case LZS_FORMAT:
214 zip_dbg("LZS Format: %d ", zip_ops->format);
215 break;
216
217 default:
218 zip_err("Format error:%d\n", zip_ops->format);
219 }
220 }
221
222 return 0;
223}
diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h
new file mode 100644
index 000000000000..6b20f179978e
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_inflate.h
@@ -0,0 +1,62 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#ifndef __ZIP_INFLATE_H__
47#define __ZIP_INFLATE_H__
48
49/**
50 * zip_inflate - API to offload inflate operation to hardware
51 * @zip_ops: Pointer to zip operation structure
52 * @s: Pointer to the structure representing zip state
53 * @zip_dev: Pointer to the structure representing zip device
54 *
55 * This function prepares the zip inflate command and submits it to the zip
56 * engine for processing.
57 *
58 * Return: 0 if successful or error code
59 */
60int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
61 struct zip_device *zip_dev);
62#endif
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
new file mode 100644
index 000000000000..1cd8aa488185
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -0,0 +1,729 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#include "common.h"
47#include "zip_crypto.h"
48
49#define DRV_NAME "ThunderX-ZIP"
50
51static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
52
53static const struct pci_device_id zip_id_table[] = {
54 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
55 { 0, }
56};
57
58void zip_reg_write(u64 val, u64 __iomem *addr)
59{
60 writeq(val, addr);
61}
62
63u64 zip_reg_read(u64 __iomem *addr)
64{
65 return readq(addr);
66}
67
68/*
69 * Allocates new ZIP device structure
70 * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
71 */
72static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
73{
74 struct zip_device *zip = NULL;
75 int idx;
76
77 for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
78 if (!zip_dev[idx])
79 break;
80 }
81
82 /* To ensure that the index is within the limit */
83 if (idx < MAX_ZIP_DEVICES)
84 zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
85
86 if (!zip)
87 return NULL;
88
89 zip_dev[idx] = zip;
90 zip->index = idx;
91 return zip;
92}
93
94/**
95 * zip_get_device - Get ZIP device based on node id of cpu
96 *
97 * @node: Node id of the current cpu
98 * Return: Pointer to Zip device structure
99 */
100struct zip_device *zip_get_device(int node)
101{
102 if ((node < MAX_ZIP_DEVICES) && (node >= 0))
103 return zip_dev[node];
104
105 zip_err("ZIP device not found for node id %d\n", node);
106 return NULL;
107}
108
109/**
110 * zip_get_node_id - Get the node id of the current cpu
111 *
112 * Return: Node id of the current cpu
113 */
114int zip_get_node_id(void)
115{
116 return cpu_to_node(smp_processor_id());
117}
118
119/* Initializes the ZIP h/w sub-system */
120static int zip_init_hw(struct zip_device *zip)
121{
122 union zip_cmd_ctl cmd_ctl;
123 union zip_constants constants;
124 union zip_que_ena que_ena;
125 union zip_quex_map que_map;
126 union zip_que_pri que_pri;
127
128 union zip_quex_sbuf_addr que_sbuf_addr;
129 union zip_quex_sbuf_ctl que_sbuf_ctl;
130
131 int q = 0;
132
133 /* Enable the ZIP Engine(Core) Clock */
134 cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
135 cmd_ctl.s.forceclk = 1;
136 zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
137
138 zip_msg("ZIP_CMD_CTL : 0x%016llx",
139 zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
140
141 constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
142 zip->depth = constants.s.depth;
143 zip->onfsize = constants.s.onfsize;
144 zip->ctxsize = constants.s.ctxsize;
145
146 zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
147 zip->depth, zip->onfsize, zip->ctxsize);
148
149 /*
150 * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
151 * have the correct buffer pointer and size configured for each
152 * instruction queue.
153 */
154 for (q = 0; q < ZIP_NUM_QUEUES; q++) {
155 que_sbuf_ctl.u_reg64 = 0ull;
156 que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
157 que_sbuf_ctl.s.inst_be = 0;
158 que_sbuf_ctl.s.stream_id = 0;
159 zip_reg_write(que_sbuf_ctl.u_reg64,
160 (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
161
162 zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
163 zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
164 }
165
166 for (q = 0; q < ZIP_NUM_QUEUES; q++) {
167 memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
168
169 spin_lock_init(&zip->iq[q].lock);
170
171 if (zip_cmd_qbuf_alloc(zip, q)) {
172 while (q != 0) {
173 q--;
174 zip_cmd_qbuf_free(zip, q);
175 }
176 return -ENOMEM;
177 }
178
179 /* Initialize tail ptr to head */
180 zip->iq[q].sw_tail = zip->iq[q].sw_head;
181 zip->iq[q].hw_tail = zip->iq[q].sw_head;
182
183 /* Write the physical addr to register */
184 que_sbuf_addr.u_reg64 = 0ull;
185 que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
186 ZIP_128B_ALIGN);
187
188 zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
189 (u64)que_sbuf_addr.s.ptr);
190
191 zip_reg_write(que_sbuf_addr.u_reg64,
192 (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
193
194 zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
195 zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
196
197 zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
198 zip->iq[q].sw_head, zip->iq[q].sw_tail,
199 zip->iq[q].hw_tail);
200 zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
201 }
202
203 /*
204 * Queue-to-ZIP core mapping
205 * If a queue is not mapped to a particular core, it is equivalent to
206 * the ZIP core being disabled.
207 */
208 que_ena.u_reg64 = 0x0ull;
209 /* Enabling queues based on ZIP_NUM_QUEUES */
210 for (q = 0; q < ZIP_NUM_QUEUES; q++)
211 que_ena.s.ena |= (0x1 << q);
212 zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
213
214 zip_msg("QUE_ENA : 0x%016llx",
215 zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
216
217 for (q = 0; q < ZIP_NUM_QUEUES; q++) {
218 que_map.u_reg64 = 0ull;
219 /* Mapping each queue to two ZIP cores */
220 que_map.s.zce = 0x3;
221 zip_reg_write(que_map.u_reg64,
222 (zip->reg_base + ZIP_QUEX_MAP(q)));
223
224 zip_msg("QUE_MAP(%d) : 0x%016llx", q,
225 zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
226 }
227
228 que_pri.u_reg64 = 0ull;
229 for (q = 0; q < ZIP_NUM_QUEUES; q++)
230 que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
231 zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
232
233 zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
234
235 return 0;
236}
237
238static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
239{
240 struct device *dev = &pdev->dev;
241 struct zip_device *zip = NULL;
242 int err;
243
244 zip = zip_alloc_device(pdev);
245 if (!zip)
246 return -ENOMEM;
247
248 dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
249 pdev->vendor, pdev->device, dev_to_node(dev));
250
251 pci_set_drvdata(pdev, zip);
252 zip->pdev = pdev;
253
254 err = pci_enable_device(pdev);
255 if (err) {
256 dev_err(dev, "Failed to enable PCI device");
257 goto err_free_device;
258 }
259
260 err = pci_request_regions(pdev, DRV_NAME);
261 if (err) {
262 dev_err(dev, "PCI request regions failed 0x%x", err);
263 goto err_disable_device;
264 }
265
266 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
267 if (err) {
268 dev_err(dev, "Unable to get usable DMA configuration\n");
269 goto err_release_regions;
270 }
271
272 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
273 if (err) {
274 dev_err(dev, "Unable to get 48-bit DMA for allocations\n");
275 goto err_release_regions;
276 }
277
278 /* MAP configuration registers */
279 zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
280 if (!zip->reg_base) {
281 dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
282 err = -ENOMEM;
283 goto err_release_regions;
284 }
285
286 /* Initialize ZIP Hardware */
287 err = zip_init_hw(zip);
288 if (err)
289 goto err_release_regions;
290
291 return 0;
292
293err_release_regions:
294 if (zip->reg_base)
295 iounmap(zip->reg_base);
296 pci_release_regions(pdev);
297
298err_disable_device:
299 pci_disable_device(pdev);
300
301err_free_device:
302 pci_set_drvdata(pdev, NULL);
303
304 /* Remove zip_dev from zip_device list, free the zip_device memory */
305 zip_dev[zip->index] = NULL;
306 devm_kfree(dev, zip);
307
308 return err;
309}
310
311static void zip_remove(struct pci_dev *pdev)
312{
313 struct zip_device *zip = pci_get_drvdata(pdev);
314 union zip_cmd_ctl cmd_ctl;
315 int q = 0;
316
317 if (!zip)
318 return;
319
320 if (zip->reg_base) {
321 cmd_ctl.u_reg64 = 0x0ull;
322 cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
323 zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
324 iounmap(zip->reg_base);
325 }
326
327 pci_release_regions(pdev);
328 pci_disable_device(pdev);
329
330 /*
331 * Free Command Queue buffers. This free should be called for all
332 * the enabled Queues.
333 */
334 for (q = 0; q < ZIP_NUM_QUEUES; q++)
335 zip_cmd_qbuf_free(zip, q);
336
337 pci_set_drvdata(pdev, NULL);
338 /* remove zip device from zip device list */
339 zip_dev[zip->index] = NULL;
340}
341
342/* PCI Sub-System Interface */
343static struct pci_driver zip_driver = {
344 .name = DRV_NAME,
345 .id_table = zip_id_table,
346 .probe = zip_probe,
347 .remove = zip_remove,
348};
349
350/* Kernel Crypto Subsystem Interface */
351
352static struct crypto_alg zip_comp_deflate = {
353 .cra_name = "deflate",
354 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
355 .cra_ctxsize = sizeof(struct zip_kernel_ctx),
356 .cra_priority = 300,
357 .cra_module = THIS_MODULE,
358 .cra_init = zip_alloc_comp_ctx_deflate,
359 .cra_exit = zip_free_comp_ctx,
360 .cra_u = { .compress = {
361 .coa_compress = zip_comp_compress,
362 .coa_decompress = zip_comp_decompress
363 } }
364};
365
366static struct crypto_alg zip_comp_lzs = {
367 .cra_name = "lzs",
368 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
369 .cra_ctxsize = sizeof(struct zip_kernel_ctx),
370 .cra_priority = 300,
371 .cra_module = THIS_MODULE,
372 .cra_init = zip_alloc_comp_ctx_lzs,
373 .cra_exit = zip_free_comp_ctx,
374 .cra_u = { .compress = {
375 .coa_compress = zip_comp_compress,
376 .coa_decompress = zip_comp_decompress
377 } }
378};
379
380static struct scomp_alg zip_scomp_deflate = {
381 .alloc_ctx = zip_alloc_scomp_ctx_deflate,
382 .free_ctx = zip_free_scomp_ctx,
383 .compress = zip_scomp_compress,
384 .decompress = zip_scomp_decompress,
385 .base = {
386 .cra_name = "deflate",
387 .cra_driver_name = "deflate-scomp",
388 .cra_module = THIS_MODULE,
389 .cra_priority = 300,
390 }
391};
392
393static struct scomp_alg zip_scomp_lzs = {
394 .alloc_ctx = zip_alloc_scomp_ctx_lzs,
395 .free_ctx = zip_free_scomp_ctx,
396 .compress = zip_scomp_compress,
397 .decompress = zip_scomp_decompress,
398 .base = {
399 .cra_name = "lzs",
400 .cra_driver_name = "lzs-scomp",
401 .cra_module = THIS_MODULE,
402 .cra_priority = 300,
403 }
404};
405
406static int zip_register_compression_device(void)
407{
408 int ret;
409
410 ret = crypto_register_alg(&zip_comp_deflate);
411 if (ret < 0) {
412 zip_err("Deflate algorithm registration failed\n");
413 return ret;
414 }
415
416 ret = crypto_register_alg(&zip_comp_lzs);
417 if (ret < 0) {
418 zip_err("LZS algorithm registration failed\n");
419 goto err_unregister_alg_deflate;
420 }
421
422 ret = crypto_register_scomp(&zip_scomp_deflate);
423 if (ret < 0) {
424 zip_err("Deflate scomp algorithm registration failed\n");
425 goto err_unregister_alg_lzs;
426 }
427
428 ret = crypto_register_scomp(&zip_scomp_lzs);
429 if (ret < 0) {
430 zip_err("LZS scomp algorithm registration failed\n");
431 goto err_unregister_scomp_deflate;
432 }
433
434 return ret;
435
436err_unregister_scomp_deflate:
437 crypto_unregister_scomp(&zip_scomp_deflate);
438err_unregister_alg_lzs:
439 crypto_unregister_alg(&zip_comp_lzs);
440err_unregister_alg_deflate:
441 crypto_unregister_alg(&zip_comp_deflate);
442
443 return ret;
444}
445
446static void zip_unregister_compression_device(void)
447{
448 crypto_unregister_alg(&zip_comp_deflate);
449 crypto_unregister_alg(&zip_comp_lzs);
450 crypto_unregister_scomp(&zip_scomp_deflate);
451 crypto_unregister_scomp(&zip_scomp_lzs);
452}
453
454/*
455 * debugfs functions
456 */
457#ifdef CONFIG_DEBUG_FS
458#include <linux/debugfs.h>
459
460/* Displays ZIP device statistics */
461static int zip_show_stats(struct seq_file *s, void *unused)
462{
463 u64 val = 0ull;
464 u64 avg_chunk = 0ull, avg_cr = 0ull;
465 u32 q = 0;
466
467 int index = 0;
468 struct zip_device *zip;
469 struct zip_stats *st;
470
471 for (index = 0; index < MAX_ZIP_DEVICES; index++) {
472 if (zip_dev[index]) {
473 zip = zip_dev[index];
474 st = &zip->stats;
475
476 /* Get all the pending requests */
477 for (q = 0; q < ZIP_NUM_QUEUES; q++) {
478 val = zip_reg_read((zip->reg_base +
479 ZIP_DBG_COREX_STA(q)));
480 val = (val >> 32);
481 val = val & 0xffffff;
482 atomic64_add(val, &st->pending_req);
483 }
484
485 avg_chunk = (atomic64_read(&st->comp_in_bytes) /
486 atomic64_read(&st->comp_req_complete));
487 avg_cr = (atomic64_read(&st->comp_in_bytes) /
488 atomic64_read(&st->comp_out_bytes));
489 seq_printf(s, " ZIP Device %d Stats\n"
490 "-----------------------------------\n"
491 "Comp Req Submitted : \t%lld\n"
492 "Comp Req Completed : \t%lld\n"
493 "Compress In Bytes : \t%lld\n"
494 "Compressed Out Bytes : \t%lld\n"
495 "Average Chunk size : \t%llu\n"
496 "Average Compression ratio : \t%llu\n"
497 "Decomp Req Submitted : \t%lld\n"
498 "Decomp Req Completed : \t%lld\n"
499 "Decompress In Bytes : \t%lld\n"
500 "Decompressed Out Bytes : \t%lld\n"
501 "Decompress Bad requests : \t%lld\n"
502 "Pending Req : \t%lld\n"
503 "---------------------------------\n",
504 index,
505 (u64)atomic64_read(&st->comp_req_submit),
506 (u64)atomic64_read(&st->comp_req_complete),
507 (u64)atomic64_read(&st->comp_in_bytes),
508 (u64)atomic64_read(&st->comp_out_bytes),
509 avg_chunk,
510 avg_cr,
511 (u64)atomic64_read(&st->decomp_req_submit),
512 (u64)atomic64_read(&st->decomp_req_complete),
513 (u64)atomic64_read(&st->decomp_in_bytes),
514 (u64)atomic64_read(&st->decomp_out_bytes),
515 (u64)atomic64_read(&st->decomp_bad_reqs),
516 (u64)atomic64_read(&st->pending_req));
517
518 /* Reset pending requests count */
519 atomic64_set(&st->pending_req, 0);
520 }
521 }
522 return 0;
523}
524
525/* Clears stats data */
526static int zip_clear_stats(struct seq_file *s, void *unused)
527{
528 int index = 0;
529
530 for (index = 0; index < MAX_ZIP_DEVICES; index++) {
531 if (zip_dev[index]) {
532 memset(&zip_dev[index]->stats, 0,
533 sizeof(struct zip_stats));
534 seq_printf(s, "Cleared stats for zip %d\n", index);
535 }
536 }
537
538 return 0;
539}
540
541static struct zip_registers zipregs[64] = {
542 {"ZIP_CMD_CTL ", 0x0000ull},
543 {"ZIP_THROTTLE ", 0x0010ull},
544 {"ZIP_CONSTANTS ", 0x00A0ull},
545 {"ZIP_QUE0_MAP ", 0x1400ull},
546 {"ZIP_QUE1_MAP ", 0x1408ull},
547 {"ZIP_QUE_ENA ", 0x0500ull},
548 {"ZIP_QUE_PRI ", 0x0508ull},
549 {"ZIP_QUE0_DONE ", 0x2000ull},
550 {"ZIP_QUE1_DONE ", 0x2008ull},
551 {"ZIP_QUE0_DOORBELL ", 0x4000ull},
552 {"ZIP_QUE1_DOORBELL ", 0x4008ull},
553 {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
554 {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
555 {"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
556 {"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
557 { NULL, 0}
558};
559
560/* Prints registers' contents */
561static int zip_print_regs(struct seq_file *s, void *unused)
562{
563 u64 val = 0;
564 int i = 0, index = 0;
565
566 for (index = 0; index < MAX_ZIP_DEVICES; index++) {
567 if (zip_dev[index]) {
568 seq_printf(s, "--------------------------------\n"
569 " ZIP Device %d Registers\n"
570 "--------------------------------\n",
571 index);
572
573 i = 0;
574
575 while (zipregs[i].reg_name) {
576 val = zip_reg_read((zip_dev[index]->reg_base +
577 zipregs[i].reg_offset));
578 seq_printf(s, "%s: 0x%016llx\n",
579 zipregs[i].reg_name, val);
580 i++;
581 }
582 }
583 }
584 return 0;
585}
586
587static int zip_stats_open(struct inode *inode, struct file *file)
588{
589 return single_open(file, zip_show_stats, NULL);
590}
591
592static const struct file_operations zip_stats_fops = {
593 .owner = THIS_MODULE,
594 .open = zip_stats_open,
595 .read = seq_read,
596};
597
598static int zip_clear_open(struct inode *inode, struct file *file)
599{
600 return single_open(file, zip_clear_stats, NULL);
601}
602
603static const struct file_operations zip_clear_fops = {
604 .owner = THIS_MODULE,
605 .open = zip_clear_open,
606 .read = seq_read,
607};
608
609static int zip_regs_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, zip_print_regs, NULL);
612}
613
614static const struct file_operations zip_regs_fops = {
615 .owner = THIS_MODULE,
616 .open = zip_regs_open,
617 .read = seq_read,
618};
619
620/* Root directory for thunderx_zip debugfs entry */
621static struct dentry *zip_debugfs_root;
622
623static int __init zip_debugfs_init(void)
624{
625 struct dentry *zip_stats, *zip_clear, *zip_regs;
626
627 if (!debugfs_initialized())
628 return -ENODEV;
629
630 zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
631 if (!zip_debugfs_root)
632 return -ENOMEM;
633
634 /* Creating files for entries inside thunderx_zip directory */
635 zip_stats = debugfs_create_file("zip_stats", 0444,
636 zip_debugfs_root,
637 NULL, &zip_stats_fops);
638 if (!zip_stats)
639 goto failed_to_create;
640
641 zip_clear = debugfs_create_file("zip_clear", 0444,
642 zip_debugfs_root,
643 NULL, &zip_clear_fops);
644 if (!zip_clear)
645 goto failed_to_create;
646
647 zip_regs = debugfs_create_file("zip_regs", 0444,
648 zip_debugfs_root,
649 NULL, &zip_regs_fops);
650 if (!zip_regs)
651 goto failed_to_create;
652
653 return 0;
654
655failed_to_create:
656 debugfs_remove_recursive(zip_debugfs_root);
657 return -ENOENT;
658}
659
660static void __exit zip_debugfs_exit(void)
661{
662 debugfs_remove_recursive(zip_debugfs_root);
663}
664
665#else
666static int __init zip_debugfs_init(void)
667{
668 return 0;
669}
670
671static void __exit zip_debugfs_exit(void) { }
672
673#endif
674/* debugfs - end */
675
676static int __init zip_init_module(void)
677{
678 int ret;
679
680 zip_msg("%s\n", DRV_NAME);
681
682 ret = pci_register_driver(&zip_driver);
683 if (ret < 0) {
684 zip_err("ZIP: pci_register_driver() failed\n");
685 return ret;
686 }
687
688 /* Register with the Kernel Crypto Interface */
689 ret = zip_register_compression_device();
690 if (ret < 0) {
691 zip_err("ZIP: Kernel Crypto Registration failed\n");
692 goto err_pci_unregister;
693 }
694
695 /* comp-decomp statistics are handled with debugfs interface */
696 ret = zip_debugfs_init();
697 if (ret < 0) {
698 zip_err("ZIP: debugfs initialization failed\n");
699 goto err_crypto_unregister;
700 }
701
702 return ret;
703
704err_crypto_unregister:
705 zip_unregister_compression_device();
706
707err_pci_unregister:
708 pci_unregister_driver(&zip_driver);
709 return ret;
710}
711
712static void __exit zip_cleanup_module(void)
713{
714 zip_debugfs_exit();
715
716 /* Unregister from the kernel crypto interface */
717 zip_unregister_compression_device();
718
719 /* Unregister this driver for pci zip devices */
720 pci_unregister_driver(&zip_driver);
721}
722
723module_init(zip_init_module);
724module_exit(zip_cleanup_module);
725
726MODULE_AUTHOR("Cavium Inc");
727MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
728MODULE_LICENSE("GPL v2");
729MODULE_DEVICE_TABLE(pci, zip_id_table);
diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h
new file mode 100644
index 000000000000..64e051f60784
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_main.h
@@ -0,0 +1,121 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#ifndef __ZIP_MAIN_H__
47#define __ZIP_MAIN_H__
48
49#include "zip_device.h"
50#include "zip_regs.h"
51
52/* PCI device IDs */
53#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A
54
55/* ZIP device BARs */
56#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */
57
58/* Maximum available zip queues */
59#define ZIP_MAX_NUM_QUEUES 8
60
61#define ZIP_128B_ALIGN 7
62
63/* Command queue buffer size */
64#define ZIP_CMD_QBUF_SIZE (8064 + 8)
65
66struct zip_registers {
67 char *reg_name;
68 u64 reg_offset;
69};
70
71/* ZIP Compression - Decompression stats */
72struct zip_stats {
73 atomic64_t comp_req_submit;
74 atomic64_t comp_req_complete;
75 atomic64_t decomp_req_submit;
76 atomic64_t decomp_req_complete;
77 atomic64_t pending_req;
78 atomic64_t comp_in_bytes;
79 atomic64_t comp_out_bytes;
80 atomic64_t decomp_in_bytes;
81 atomic64_t decomp_out_bytes;
82 atomic64_t decomp_bad_reqs;
83};
84
85/* ZIP Instruction Queue */
86struct zip_iq {
87 u64 *sw_head;
88 u64 *sw_tail;
89 u64 *hw_tail;
90 u64 done_cnt;
91 u64 pend_cnt;
92 u64 free_flag;
93
94 /* ZIP IQ lock */
95 spinlock_t lock;
96};
97
98/* ZIP Device */
99struct zip_device {
100 u32 index;
101 void __iomem *reg_base;
102 struct pci_dev *pdev;
103
104 /* Different ZIP Constants */
105 u64 depth;
106 u64 onfsize;
107 u64 ctxsize;
108
109 struct zip_iq iq[ZIP_MAX_NUM_QUEUES];
110 struct zip_stats stats;
111};
112
113/* Prototypes */
114struct zip_device *zip_get_device(int node_id);
115int zip_get_node_id(void);
116void zip_reg_write(u64 val, u64 __iomem *addr);
117u64 zip_reg_read(u64 __iomem *addr);
118void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
119u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev);
120
121#endif /* ZIP_MAIN_H */
diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c
new file mode 100644
index 000000000000..b3e0843a9169
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_mem.c
@@ -0,0 +1,114 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#include <linux/types.h>
47#include <linux/vmalloc.h>
48
49#include "common.h"
50
51/**
52 * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue
53 * @zip: Pointer to zip device structure
54 * @q: Queue number to allocate bufffer to
55 * Return: 0 if successful, -ENOMEM otherwise
56 */
57int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
58{
59 zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
60 get_order(ZIP_CMD_QBUF_SIZE));
61
62 if (!zip->iq[q].sw_head)
63 return -ENOMEM;
64
65 memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE);
66
67 zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head);
68 return 0;
69}
70
71/**
72 * zip_cmd_qbuf_free - Frees the cmd Queue buffer
73 * @zip: Pointer to zip device structure
74 * @q: Queue number to free buffer of
75 */
76void zip_cmd_qbuf_free(struct zip_device *zip, int q)
77{
78 zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail);
79
80 free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE));
81}
82
83/**
84 * zip_data_buf_alloc - Allocates memory for a data bufffer
85 * @size: Size of the buffer to allocate
86 * Returns: Pointer to the buffer allocated
87 */
88u8 *zip_data_buf_alloc(u64 size)
89{
90 u8 *ptr;
91
92 ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
93 get_order(size));
94
95 if (!ptr)
96 return NULL;
97
98 memset(ptr, 0, size);
99
100 zip_dbg("Data buffer allocation success\n");
101 return ptr;
102}
103
104/**
105 * zip_data_buf_free - Frees the memory of a data buffer
106 * @ptr: Pointer to the buffer
107 * @size: Buffer size
108 */
109void zip_data_buf_free(u8 *ptr, u64 size)
110{
111 zip_dbg("Freeing data buffer 0x%lx\n", ptr);
112
113 free_pages((u64)ptr, get_order(size));
114}
diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h
new file mode 100644
index 000000000000..f8f2f08c4a5c
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_mem.h
@@ -0,0 +1,78 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#ifndef __ZIP_MEM_H__
47#define __ZIP_MEM_H__
48
49/**
50 * zip_cmd_qbuf_free - Frees the cmd Queue buffer
51 * @zip: Pointer to zip device structure
52 * @q: Queue nmber to free buffer of
53 */
54void zip_cmd_qbuf_free(struct zip_device *zip, int q);
55
56/**
57 * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue
58 * @zip: Pointer to zip device structure
59 * @q: Queue number to allocate bufffer to
60 * Return: 0 if successful, 1 otherwise
61 */
62int zip_cmd_qbuf_alloc(struct zip_device *zip, int q);
63
64/**
65 * zip_data_buf_alloc - Allocates memory for a data bufffer
66 * @size: Size of the buffer to allocate
67 * Returns: Pointer to the buffer allocated
68 */
69u8 *zip_data_buf_alloc(u64 size);
70
71/**
72 * zip_data_buf_free - Frees the memory of a data buffer
73 * @ptr: Pointer to the buffer
74 * @size: Buffer size
75 */
76void zip_data_buf_free(u8 *ptr, u64 size);
77
78#endif
diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h
new file mode 100644
index 000000000000..d0be682305c1
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_regs.h
@@ -0,0 +1,1347 @@
1/***********************license start************************************
2 * Copyright (c) 2003-2017 Cavium, Inc.
3 * All rights reserved.
4 *
5 * License: one of 'Cavium License' or 'GNU General Public License Version 2'
6 *
7 * This file is provided under the terms of the Cavium License (see below)
8 * or under the terms of GNU General Public License, Version 2, as
9 * published by the Free Software Foundation. When using or redistributing
10 * this file, you may do so under either license.
11 *
12 * Cavium License: Redistribution and use in source and binary forms, with
13 * or without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * * Neither the name of Cavium Inc. nor the names of its contributors may be
25 * used to endorse or promote products derived from this software without
26 * specific prior written permission.
27 *
28 * This Software, including technical data, may be subject to U.S. export
29 * control laws, including the U.S. Export Administration Act and its
30 * associated regulations, and may be subject to export or import
31 * regulations in other countries.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
34 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
35 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
36 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
37 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
38 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
39 * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
40 * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
41 * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
42 * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
43 * WITH YOU.
44 ***********************license end**************************************/
45
46#ifndef __ZIP_REGS_H__
47#define __ZIP_REGS_H__
48
49/*
50 * Configuration and status register (CSR) address and type definitions for
51 * Cavium ZIP.
52 */
53
54#include <linux/kern_levels.h>
55
56/* ZIP invocation result completion status codes */
57#define ZIP_CMD_NOTDONE 0x0
58
59/* Successful completion. */
60#define ZIP_CMD_SUCCESS 0x1
61
62/* Output truncated */
63#define ZIP_CMD_DTRUNC 0x2
64
65/* Dynamic Stop */
66#define ZIP_CMD_DYNAMIC_STOP 0x3
67
68/* Uncompress ran out of input data when IWORD0[EF] was set */
69#define ZIP_CMD_ITRUNC 0x4
70
71/* Uncompress found the reserved block type 3 */
72#define ZIP_CMD_RBLOCK 0x5
73
74/*
75 * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input.
76 */
77#define ZIP_CMD_NLEN 0x6
78
79/* Uncompress found a bad code in the main Huffman codes. */
80#define ZIP_CMD_BADCODE 0x7
81
82/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */
83#define ZIP_CMD_BADCODE2 0x8
84
85/* Compress found a zero-length input. */
86#define ZIP_CMD_ZERO_LEN 0x9
87
88/* The compress or decompress encountered an internal parity error. */
89#define ZIP_CMD_PARITY 0xA
90
91/*
92 * Uncompress found a string identifier that precedes the uncompressed data and
93 * decompression history.
94 */
95#define ZIP_CMD_FATAL 0xB
96
97/**
98 * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X
99 * interrupt vectors.
100 */
101enum zip_int_vec_e {
102 ZIP_INT_VEC_E_ECCE = 0x10,
103 ZIP_INT_VEC_E_FIFE = 0x11,
104 ZIP_INT_VEC_E_QUE0_DONE = 0x0,
105 ZIP_INT_VEC_E_QUE0_ERR = 0x8,
106 ZIP_INT_VEC_E_QUE1_DONE = 0x1,
107 ZIP_INT_VEC_E_QUE1_ERR = 0x9,
108 ZIP_INT_VEC_E_QUE2_DONE = 0x2,
109 ZIP_INT_VEC_E_QUE2_ERR = 0xa,
110 ZIP_INT_VEC_E_QUE3_DONE = 0x3,
111 ZIP_INT_VEC_E_QUE3_ERR = 0xb,
112 ZIP_INT_VEC_E_QUE4_DONE = 0x4,
113 ZIP_INT_VEC_E_QUE4_ERR = 0xc,
114 ZIP_INT_VEC_E_QUE5_DONE = 0x5,
115 ZIP_INT_VEC_E_QUE5_ERR = 0xd,
116 ZIP_INT_VEC_E_QUE6_DONE = 0x6,
117 ZIP_INT_VEC_E_QUE6_ERR = 0xe,
118 ZIP_INT_VEC_E_QUE7_DONE = 0x7,
119 ZIP_INT_VEC_E_QUE7_ERR = 0xf,
120 ZIP_INT_VEC_E_ENUM_LAST = 0x12,
121};
122
123/**
124 * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR.
125 *
126 * It is the generic format of pointers in ZIP_INST_S.
127 */
128union zip_zptr_addr_s {
129 u64 u_reg64;
130 struct {
131#if defined(__BIG_ENDIAN_BITFIELD)
132 u64 reserved_49_63 : 15;
133 u64 addr : 49;
134#elif defined(__LITTLE_ENDIAN_BITFIELD)
135 u64 addr : 49;
136 u64 reserved_49_63 : 15;
137#endif
138 } s;
139
140};
141
142/**
143 * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL.
144 *
145 * It is the generic format of pointers in ZIP_INST_S.
146 */
147union zip_zptr_ctl_s {
148 u64 u_reg64;
149 struct {
150#if defined(__BIG_ENDIAN_BITFIELD)
151 u64 reserved_112_127 : 16;
152 u64 length : 16;
153 u64 reserved_67_95 : 29;
154 u64 fw : 1;
155 u64 nc : 1;
156 u64 data_be : 1;
157#elif defined(__LITTLE_ENDIAN_BITFIELD)
158 u64 data_be : 1;
159 u64 nc : 1;
160 u64 fw : 1;
161 u64 reserved_67_95 : 29;
162 u64 length : 16;
163 u64 reserved_112_127 : 16;
164#endif
165 } s;
166};
167
168/**
169 * union zip_inst_s - ZIP Instruction Structure.
170 * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within
171 * the structure).
172 */
173union zip_inst_s {
174 u64 u_reg64[16];
175 struct {
176#if defined(__BIG_ENDIAN_BITFIELD)
177 u64 doneint : 1;
178 u64 reserved_56_62 : 7;
179 u64 totaloutputlength : 24;
180 u64 reserved_27_31 : 5;
181 u64 exn : 3;
182 u64 reserved_23_23 : 1;
183 u64 exbits : 7;
184 u64 reserved_12_15 : 4;
185 u64 sf : 1;
186 u64 ss : 2;
187 u64 cc : 2;
188 u64 ef : 1;
189 u64 bf : 1;
190 u64 ce : 1;
191 u64 reserved_3_3 : 1;
192 u64 ds : 1;
193 u64 dg : 1;
194 u64 hg : 1;
195#elif defined(__LITTLE_ENDIAN_BITFIELD)
196 u64 hg : 1;
197 u64 dg : 1;
198 u64 ds : 1;
199 u64 reserved_3_3 : 1;
200 u64 ce : 1;
201 u64 bf : 1;
202 u64 ef : 1;
203 u64 cc : 2;
204 u64 ss : 2;
205 u64 sf : 1;
206 u64 reserved_12_15 : 4;
207 u64 exbits : 7;
208 u64 reserved_23_23 : 1;
209 u64 exn : 3;
210 u64 reserved_27_31 : 5;
211 u64 totaloutputlength : 24;
212 u64 reserved_56_62 : 7;
213 u64 doneint : 1;
214#endif
215#if defined(__BIG_ENDIAN_BITFIELD)
216 u64 historylength : 16;
217 u64 reserved_96_111 : 16;
218 u64 adlercrc32 : 32;
219#elif defined(__LITTLE_ENDIAN_BITFIELD)
220 u64 adlercrc32 : 32;
221 u64 reserved_96_111 : 16;
222 u64 historylength : 16;
223#endif
224 union zip_zptr_addr_s ctx_ptr_addr;
225 union zip_zptr_ctl_s ctx_ptr_ctl;
226 union zip_zptr_addr_s his_ptr_addr;
227 union zip_zptr_ctl_s his_ptr_ctl;
228 union zip_zptr_addr_s inp_ptr_addr;
229 union zip_zptr_ctl_s inp_ptr_ctl;
230 union zip_zptr_addr_s out_ptr_addr;
231 union zip_zptr_ctl_s out_ptr_ctl;
232 union zip_zptr_addr_s res_ptr_addr;
233 union zip_zptr_ctl_s res_ptr_ctl;
234#if defined(__BIG_ENDIAN_BITFIELD)
235 u64 reserved_817_831 : 15;
236 u64 wq_ptr : 49;
237#elif defined(__LITTLE_ENDIAN_BITFIELD)
238 u64 wq_ptr : 49;
239 u64 reserved_817_831 : 15;
240#endif
241#if defined(__BIG_ENDIAN_BITFIELD)
242 u64 reserved_882_895 : 14;
243 u64 tt : 2;
244 u64 reserved_874_879 : 6;
245 u64 grp : 10;
246 u64 tag : 32;
247#elif defined(__LITTLE_ENDIAN_BITFIELD)
248 u64 tag : 32;
249 u64 grp : 10;
250 u64 reserved_874_879 : 6;
251 u64 tt : 2;
252 u64 reserved_882_895 : 14;
253#endif
254#if defined(__BIG_ENDIAN_BITFIELD)
255 u64 reserved_896_959 : 64;
256#elif defined(__LITTLE_ENDIAN_BITFIELD)
257 u64 reserved_896_959 : 64;
258#endif
259#if defined(__BIG_ENDIAN_BITFIELD)
260 u64 reserved_960_1023 : 64;
261#elif defined(__LITTLE_ENDIAN_BITFIELD)
262 u64 reserved_960_1023 : 64;
263#endif
264 } s;
265};
266
267/**
268 * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR)
269 * Structure
270 *
271 * ZIP_NPTR structure is used to chain all the zip instruction buffers
272 * together. ZIP instruction buffers are managed (allocated and released) by
273 * the software.
274 */
275union zip_nptr_s {
276 u64 u_reg64;
277 struct {
278#if defined(__BIG_ENDIAN_BITFIELD)
279 u64 reserved_49_63 : 15;
280 u64 addr : 49;
281#elif defined(__LITTLE_ENDIAN_BITFIELD)
282 u64 addr : 49;
283 u64 reserved_49_63 : 15;
284#endif
285 } s;
286};
287
288/**
289 * union zip_zptr_s - ZIP Generic Pointer Structure.
290 *
291 * It is the generic format of pointers in ZIP_INST_S.
292 */
293union zip_zptr_s {
294 u64 u_reg64[2];
295 struct {
296#if defined(__BIG_ENDIAN_BITFIELD)
297 u64 reserved_49_63 : 15;
298 u64 addr : 49;
299#elif defined(__LITTLE_ENDIAN_BITFIELD)
300 u64 addr : 49;
301 u64 reserved_49_63 : 15;
302#endif
303#if defined(__BIG_ENDIAN_BITFIELD)
304 u64 reserved_112_127 : 16;
305 u64 length : 16;
306 u64 reserved_67_95 : 29;
307 u64 fw : 1;
308 u64 nc : 1;
309 u64 data_be : 1;
310#elif defined(__LITTLE_ENDIAN_BITFIELD)
311 u64 data_be : 1;
312 u64 nc : 1;
313 u64 fw : 1;
314 u64 reserved_67_95 : 29;
315 u64 length : 16;
316 u64 reserved_112_127 : 16;
317#endif
318 } s;
319};
320
321/**
322 * union zip_zres_s - ZIP Result Structure
323 *
324 * The ZIP coprocessor writes the result structure after it completes the
325 * invocation. The result structure is exactly 24 bytes, and each invocation of
326 * the ZIP coprocessor produces exactly one result structure.
327 */
328union zip_zres_s {
329 u64 u_reg64[3];
330 struct {
331#if defined(__BIG_ENDIAN_BITFIELD)
332 u64 crc32 : 32;
333 u64 adler32 : 32;
334#elif defined(__LITTLE_ENDIAN_BITFIELD)
335 u64 adler32 : 32;
336 u64 crc32 : 32;
337#endif
338#if defined(__BIG_ENDIAN_BITFIELD)
339 u64 totalbyteswritten : 32;
340 u64 totalbytesread : 32;
341#elif defined(__LITTLE_ENDIAN_BITFIELD)
342 u64 totalbytesread : 32;
343 u64 totalbyteswritten : 32;
344#endif
345#if defined(__BIG_ENDIAN_BITFIELD)
346 u64 totalbitsprocessed : 32;
347 u64 doneint : 1;
348 u64 reserved_155_158 : 4;
349 u64 exn : 3;
350 u64 reserved_151_151 : 1;
351 u64 exbits : 7;
352 u64 reserved_137_143 : 7;
353 u64 ef : 1;
354
355 volatile u64 compcode : 8;
356#elif defined(__LITTLE_ENDIAN_BITFIELD)
357
358 volatile u64 compcode : 8;
359 u64 ef : 1;
360 u64 reserved_137_143 : 7;
361 u64 exbits : 7;
362 u64 reserved_151_151 : 1;
363 u64 exn : 3;
364 u64 reserved_155_158 : 4;
365 u64 doneint : 1;
366 u64 totalbitsprocessed : 32;
367#endif
368 } s;
369};
370
371/**
372 * union zip_cmd_ctl - Structure representing the register that controls
373 * clock and reset.
374 */
375union zip_cmd_ctl {
376 u64 u_reg64;
377 struct zip_cmd_ctl_s {
378#if defined(__BIG_ENDIAN_BITFIELD)
379 u64 reserved_2_63 : 62;
380 u64 forceclk : 1;
381 u64 reset : 1;
382#elif defined(__LITTLE_ENDIAN_BITFIELD)
383 u64 reset : 1;
384 u64 forceclk : 1;
385 u64 reserved_2_63 : 62;
386#endif
387 } s;
388};
389
390#define ZIP_CMD_CTL 0x0ull
391
392/**
393 * union zip_constants - Data structure representing the register that contains
394 * all of the current implementation-related parameters of the zip core in this
395 * chip.
396 */
397union zip_constants {
398 u64 u_reg64;
399 struct zip_constants_s {
400#if defined(__BIG_ENDIAN_BITFIELD)
401 u64 nexec : 8;
402 u64 reserved_49_55 : 7;
403 u64 syncflush_capable : 1;
404 u64 depth : 16;
405 u64 onfsize : 12;
406 u64 ctxsize : 12;
407 u64 reserved_1_7 : 7;
408 u64 disabled : 1;
409#elif defined(__LITTLE_ENDIAN_BITFIELD)
410 u64 disabled : 1;
411 u64 reserved_1_7 : 7;
412 u64 ctxsize : 12;
413 u64 onfsize : 12;
414 u64 depth : 16;
415 u64 syncflush_capable : 1;
416 u64 reserved_49_55 : 7;
417 u64 nexec : 8;
418#endif
419 } s;
420};
421
422#define ZIP_CONSTANTS 0x00A0ull
423
424/**
425 * union zip_corex_bist_status - Represents registers which have the BIST
426 * status of memories in zip cores.
427 *
428 * Each bit is the BIST result of an individual memory
429 * (per bit, 0 = pass and 1 = fail).
430 */
431union zip_corex_bist_status {
432 u64 u_reg64;
433 struct zip_corex_bist_status_s {
434#if defined(__BIG_ENDIAN_BITFIELD)
435 u64 reserved_53_63 : 11;
436 u64 bstatus : 53;
437#elif defined(__LITTLE_ENDIAN_BITFIELD)
438 u64 bstatus : 53;
439 u64 reserved_53_63 : 11;
440#endif
441 } s;
442};
443
444static inline u64 ZIP_COREX_BIST_STATUS(u64 param1)
445{
446 if (((param1 <= 1)))
447 return 0x0520ull + (param1 & 1) * 0x8ull;
448 pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1);
449 return 0;
450}
451
452/**
453 * union zip_ctl_bist_status - Represents register that has the BIST status of
454 * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data
455 * buffer, output data buffers).
456 *
457 * Each bit is the BIST result of an individual memory
458 * (per bit, 0 = pass and 1 = fail).
459 */
460union zip_ctl_bist_status {
461 u64 u_reg64;
462 struct zip_ctl_bist_status_s {
463#if defined(__BIG_ENDIAN_BITFIELD)
464 u64 reserved_9_63 : 55;
465 u64 bstatus : 9;
466#elif defined(__LITTLE_ENDIAN_BITFIELD)
467 u64 bstatus : 9;
468 u64 reserved_9_63 : 55;
469#endif
470 } s;
471};
472
473#define ZIP_CTL_BIST_STATUS 0x0510ull
474
475/**
476 * union zip_ctl_cfg - Represents the register that controls the behavior of
477 * the ZIP DMA engines.
478 *
479 * It is recommended to keep default values for normal operation. Changing the
480 * values of the fields may be useful for diagnostics.
481 */
482union zip_ctl_cfg {
483 u64 u_reg64;
484 struct zip_ctl_cfg_s {
485#if defined(__BIG_ENDIAN_BITFIELD)
486 u64 reserved_52_63 : 12;
487 u64 ildf : 4;
488 u64 reserved_36_47 : 12;
489 u64 drtf : 4;
490 u64 reserved_27_31 : 5;
491 u64 stcf : 3;
492 u64 reserved_19_23 : 5;
493 u64 ldf : 3;
494 u64 reserved_2_15 : 14;
495 u64 busy : 1;
496 u64 reserved_0_0 : 1;
497#elif defined(__LITTLE_ENDIAN_BITFIELD)
498 u64 reserved_0_0 : 1;
499 u64 busy : 1;
500 u64 reserved_2_15 : 14;
501 u64 ldf : 3;
502 u64 reserved_19_23 : 5;
503 u64 stcf : 3;
504 u64 reserved_27_31 : 5;
505 u64 drtf : 4;
506 u64 reserved_36_47 : 12;
507 u64 ildf : 4;
508 u64 reserved_52_63 : 12;
509#endif
510 } s;
511};
512
513#define ZIP_CTL_CFG 0x0560ull
514
515/**
516 * union zip_dbg_corex_inst - Represents the registers that reflect the status
517 * of the current instruction that the ZIP core is executing or has executed.
518 *
519 * These registers are only for debug use.
520 */
521union zip_dbg_corex_inst {
522 u64 u_reg64;
523 struct zip_dbg_corex_inst_s {
524#if defined(__BIG_ENDIAN_BITFIELD)
525 u64 busy : 1;
526 u64 reserved_35_62 : 28;
527 u64 qid : 3;
528 u64 iid : 32;
529#elif defined(__LITTLE_ENDIAN_BITFIELD)
530 u64 iid : 32;
531 u64 qid : 3;
532 u64 reserved_35_62 : 28;
533 u64 busy : 1;
534#endif
535 } s;
536};
537
538static inline u64 ZIP_DBG_COREX_INST(u64 param1)
539{
540 if (((param1 <= 1)))
541 return 0x0640ull + (param1 & 1) * 0x8ull;
542 pr_err("ZIP_DBG_COREX_INST: %llu\n", param1);
543 return 0;
544}
545
546/**
547 * union zip_dbg_corex_sta - Represents registers that reflect the status of
548 * the zip cores.
549 *
550 * They are for debug use only.
551 */
552union zip_dbg_corex_sta {
553 u64 u_reg64;
554 struct zip_dbg_corex_sta_s {
555#if defined(__BIG_ENDIAN_BITFIELD)
556 u64 busy : 1;
557 u64 reserved_37_62 : 26;
558 u64 ist : 5;
559 u64 nie : 32;
560#elif defined(__LITTLE_ENDIAN_BITFIELD)
561 u64 nie : 32;
562 u64 ist : 5;
563 u64 reserved_37_62 : 26;
564 u64 busy : 1;
565#endif
566 } s;
567};
568
569static inline u64 ZIP_DBG_COREX_STA(u64 param1)
570{
571 if (((param1 <= 1)))
572 return 0x0680ull + (param1 & 1) * 0x8ull;
573 pr_err("ZIP_DBG_COREX_STA: %llu\n", param1);
574 return 0;
575}
576
577/**
578 * union zip_dbg_quex_sta - Represets registers that reflect status of the zip
579 * instruction queues.
580 *
581 * They are for debug use only.
582 */
583union zip_dbg_quex_sta {
584 u64 u_reg64;
585 struct zip_dbg_quex_sta_s {
586#if defined(__BIG_ENDIAN_BITFIELD)
587 u64 busy : 1;
588 u64 reserved_56_62 : 7;
589 u64 rqwc : 24;
590 u64 nii : 32;
591#elif defined(__LITTLE_ENDIAN_BITFIELD)
592 u64 nii : 32;
593 u64 rqwc : 24;
594 u64 reserved_56_62 : 7;
595 u64 busy : 1;
596#endif
597 } s;
598};
599
600static inline u64 ZIP_DBG_QUEX_STA(u64 param1)
601{
602 if (((param1 <= 7)))
603 return 0x1800ull + (param1 & 7) * 0x8ull;
604 pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1);
605 return 0;
606}
607
608/**
609 * union zip_ecc_ctl - Represents the register that enables ECC for each
610 * individual internal memory that requires ECC.
611 *
612 * For debug purpose, it can also flip one or two bits in the ECC data.
613 */
614union zip_ecc_ctl {
615 u64 u_reg64;
616 struct zip_ecc_ctl_s {
617#if defined(__BIG_ENDIAN_BITFIELD)
618 u64 reserved_19_63 : 45;
619 u64 vmem_cdis : 1;
620 u64 vmem_fs : 2;
621 u64 reserved_15_15 : 1;
622 u64 idf1_cdis : 1;
623 u64 idf1_fs : 2;
624 u64 reserved_11_11 : 1;
625 u64 idf0_cdis : 1;
626 u64 idf0_fs : 2;
627 u64 reserved_7_7 : 1;
628 u64 gspf_cdis : 1;
629 u64 gspf_fs : 2;
630 u64 reserved_3_3 : 1;
631 u64 iqf_cdis : 1;
632 u64 iqf_fs : 2;
633#elif defined(__LITTLE_ENDIAN_BITFIELD)
634 u64 iqf_fs : 2;
635 u64 iqf_cdis : 1;
636 u64 reserved_3_3 : 1;
637 u64 gspf_fs : 2;
638 u64 gspf_cdis : 1;
639 u64 reserved_7_7 : 1;
640 u64 idf0_fs : 2;
641 u64 idf0_cdis : 1;
642 u64 reserved_11_11 : 1;
643 u64 idf1_fs : 2;
644 u64 idf1_cdis : 1;
645 u64 reserved_15_15 : 1;
646 u64 vmem_fs : 2;
647 u64 vmem_cdis : 1;
648 u64 reserved_19_63 : 45;
649#endif
650 } s;
651};
652
653#define ZIP_ECC_CTL 0x0568ull
654
655/* NCB - zip_ecce_ena_w1c */
656union zip_ecce_ena_w1c {
657 u64 u_reg64;
658 struct zip_ecce_ena_w1c_s {
659#if defined(__BIG_ENDIAN_BITFIELD)
660 u64 reserved_37_63 : 27;
661 u64 dbe : 5;
662 u64 reserved_5_31 : 27;
663 u64 sbe : 5;
664#elif defined(__LITTLE_ENDIAN_BITFIELD)
665 u64 sbe : 5;
666 u64 reserved_5_31 : 27;
667 u64 dbe : 5;
668 u64 reserved_37_63 : 27;
669#endif
670 } s;
671};
672
673#define ZIP_ECCE_ENA_W1C 0x0598ull
674
675/* NCB - zip_ecce_ena_w1s */
676union zip_ecce_ena_w1s {
677 u64 u_reg64;
678 struct zip_ecce_ena_w1s_s {
679#if defined(__BIG_ENDIAN_BITFIELD)
680 u64 reserved_37_63 : 27;
681 u64 dbe : 5;
682 u64 reserved_5_31 : 27;
683 u64 sbe : 5;
684#elif defined(__LITTLE_ENDIAN_BITFIELD)
685 u64 sbe : 5;
686 u64 reserved_5_31 : 27;
687 u64 dbe : 5;
688 u64 reserved_37_63 : 27;
689#endif
690 } s;
691};
692
693#define ZIP_ECCE_ENA_W1S 0x0590ull
694
695/**
696 * union zip_ecce_int - Represents the register that contains the status of the
697 * ECC interrupt sources.
698 */
699union zip_ecce_int {
700 u64 u_reg64;
701 struct zip_ecce_int_s {
702#if defined(__BIG_ENDIAN_BITFIELD)
703 u64 reserved_37_63 : 27;
704 u64 dbe : 5;
705 u64 reserved_5_31 : 27;
706 u64 sbe : 5;
707#elif defined(__LITTLE_ENDIAN_BITFIELD)
708 u64 sbe : 5;
709 u64 reserved_5_31 : 27;
710 u64 dbe : 5;
711 u64 reserved_37_63 : 27;
712#endif
713 } s;
714};
715
716#define ZIP_ECCE_INT 0x0580ull
717
718/* NCB - zip_ecce_int_w1s */
719union zip_ecce_int_w1s {
720 u64 u_reg64;
721 struct zip_ecce_int_w1s_s {
722#if defined(__BIG_ENDIAN_BITFIELD)
723 u64 reserved_37_63 : 27;
724 u64 dbe : 5;
725 u64 reserved_5_31 : 27;
726 u64 sbe : 5;
727#elif defined(__LITTLE_ENDIAN_BITFIELD)
728 u64 sbe : 5;
729 u64 reserved_5_31 : 27;
730 u64 dbe : 5;
731 u64 reserved_37_63 : 27;
732#endif
733 } s;
734};
735
736#define ZIP_ECCE_INT_W1S 0x0588ull
737
738/* NCB - zip_fife_ena_w1c */
739union zip_fife_ena_w1c {
740 u64 u_reg64;
741 struct zip_fife_ena_w1c_s {
742#if defined(__BIG_ENDIAN_BITFIELD)
743 u64 reserved_42_63 : 22;
744 u64 asserts : 42;
745#elif defined(__LITTLE_ENDIAN_BITFIELD)
746 u64 asserts : 42;
747 u64 reserved_42_63 : 22;
748#endif
749 } s;
750};
751
752#define ZIP_FIFE_ENA_W1C 0x0090ull
753
754/* NCB - zip_fife_ena_w1s */
755union zip_fife_ena_w1s {
756 u64 u_reg64;
757 struct zip_fife_ena_w1s_s {
758#if defined(__BIG_ENDIAN_BITFIELD)
759 u64 reserved_42_63 : 22;
760 u64 asserts : 42;
761#elif defined(__LITTLE_ENDIAN_BITFIELD)
762 u64 asserts : 42;
763 u64 reserved_42_63 : 22;
764#endif
765 } s;
766};
767
768#define ZIP_FIFE_ENA_W1S 0x0088ull
769
770/* NCB - zip_fife_int */
771union zip_fife_int {
772 u64 u_reg64;
773 struct zip_fife_int_s {
774#if defined(__BIG_ENDIAN_BITFIELD)
775 u64 reserved_42_63 : 22;
776 u64 asserts : 42;
777#elif defined(__LITTLE_ENDIAN_BITFIELD)
778 u64 asserts : 42;
779 u64 reserved_42_63 : 22;
780#endif
781 } s;
782};
783
784#define ZIP_FIFE_INT 0x0078ull
785
786/* NCB - zip_fife_int_w1s */
787union zip_fife_int_w1s {
788 u64 u_reg64;
789 struct zip_fife_int_w1s_s {
790#if defined(__BIG_ENDIAN_BITFIELD)
791 u64 reserved_42_63 : 22;
792 u64 asserts : 42;
793#elif defined(__LITTLE_ENDIAN_BITFIELD)
794 u64 asserts : 42;
795 u64 reserved_42_63 : 22;
796#endif
797 } s;
798};
799
800#define ZIP_FIFE_INT_W1S 0x0080ull
801
802/**
803 * union zip_msix_pbax - Represents the register that is the MSI-X PBA table
804 *
805 * The bit number is indexed by the ZIP_INT_VEC_E enumeration.
806 */
807union zip_msix_pbax {
808 u64 u_reg64;
809 struct zip_msix_pbax_s {
810#if defined(__BIG_ENDIAN_BITFIELD)
811 u64 pend : 64;
812#elif defined(__LITTLE_ENDIAN_BITFIELD)
813 u64 pend : 64;
814#endif
815 } s;
816};
817
818static inline u64 ZIP_MSIX_PBAX(u64 param1)
819{
820 if (((param1 == 0)))
821 return 0x0000838000FF0000ull;
822 pr_err("ZIP_MSIX_PBAX: %llu\n", param1);
823 return 0;
824}
825
826/**
827 * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector
828 * table, indexed by the ZIP_INT_VEC_E enumeration.
829 */
830union zip_msix_vecx_addr {
831 u64 u_reg64;
832 struct zip_msix_vecx_addr_s {
833#if defined(__BIG_ENDIAN_BITFIELD)
834 u64 reserved_49_63 : 15;
835 u64 addr : 47;
836 u64 reserved_1_1 : 1;
837 u64 secvec : 1;
838#elif defined(__LITTLE_ENDIAN_BITFIELD)
839 u64 secvec : 1;
840 u64 reserved_1_1 : 1;
841 u64 addr : 47;
842 u64 reserved_49_63 : 15;
843#endif
844 } s;
845};
846
847static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1)
848{
849 if (((param1 <= 17)))
850 return 0x0000838000F00000ull + (param1 & 31) * 0x10ull;
851 pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1);
852 return 0;
853}
854
855/**
856 * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector
857 * table, indexed by the ZIP_INT_VEC_E enumeration.
858 */
859union zip_msix_vecx_ctl {
860 u64 u_reg64;
861 struct zip_msix_vecx_ctl_s {
862#if defined(__BIG_ENDIAN_BITFIELD)
863 u64 reserved_33_63 : 31;
864 u64 mask : 1;
865 u64 reserved_20_31 : 12;
866 u64 data : 20;
867#elif defined(__LITTLE_ENDIAN_BITFIELD)
868 u64 data : 20;
869 u64 reserved_20_31 : 12;
870 u64 mask : 1;
871 u64 reserved_33_63 : 31;
872#endif
873 } s;
874};
875
876static inline u64 ZIP_MSIX_VECX_CTL(u64 param1)
877{
878 if (((param1 <= 17)))
879 return 0x0000838000F00008ull + (param1 & 31) * 0x10ull;
880 pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1);
881 return 0;
882}
883
884/**
885 * union zip_quex_done - Represents the registers that contain the per-queue
886 * instruction done count.
887 */
888union zip_quex_done {
889 u64 u_reg64;
890 struct zip_quex_done_s {
891#if defined(__BIG_ENDIAN_BITFIELD)
892 u64 reserved_20_63 : 44;
893 u64 done : 20;
894#elif defined(__LITTLE_ENDIAN_BITFIELD)
895 u64 done : 20;
896 u64 reserved_20_63 : 44;
897#endif
898 } s;
899};
900
901static inline u64 ZIP_QUEX_DONE(u64 param1)
902{
903 if (((param1 <= 7)))
904 return 0x2000ull + (param1 & 7) * 0x8ull;
905 pr_err("ZIP_QUEX_DONE: %llu\n", param1);
906 return 0;
907}
908
909/**
910 * union zip_quex_done_ack - Represents the registers on write to which will
911 * decrement the per-queue instructiona done count.
912 */
913union zip_quex_done_ack {
914 u64 u_reg64;
915 struct zip_quex_done_ack_s {
916#if defined(__BIG_ENDIAN_BITFIELD)
917 u64 reserved_20_63 : 44;
918 u64 done_ack : 20;
919#elif defined(__LITTLE_ENDIAN_BITFIELD)
920 u64 done_ack : 20;
921 u64 reserved_20_63 : 44;
922#endif
923 } s;
924};
925
926static inline u64 ZIP_QUEX_DONE_ACK(u64 param1)
927{
928 if (((param1 <= 7)))
929 return 0x2200ull + (param1 & 7) * 0x8ull;
930 pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1);
931 return 0;
932}
933
934/**
935 * union zip_quex_done_ena_w1c - Represents the register which when written
936 * 1 to will disable the DONEINT interrupt for the queue.
937 */
938union zip_quex_done_ena_w1c {
939 u64 u_reg64;
940 struct zip_quex_done_ena_w1c_s {
941#if defined(__BIG_ENDIAN_BITFIELD)
942 u64 reserved_1_63 : 63;
943 u64 done_ena : 1;
944#elif defined(__LITTLE_ENDIAN_BITFIELD)
945 u64 done_ena : 1;
946 u64 reserved_1_63 : 63;
947#endif
948 } s;
949};
950
951static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1)
952{
953 if (((param1 <= 7)))
954 return 0x2600ull + (param1 & 7) * 0x8ull;
955 pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1);
956 return 0;
957}
958
959/**
960 * union zip_quex_done_ena_w1s - Represents the register that when written 1 to
961 * will enable the DONEINT interrupt for the queue.
962 */
963union zip_quex_done_ena_w1s {
964 u64 u_reg64;
965 struct zip_quex_done_ena_w1s_s {
966#if defined(__BIG_ENDIAN_BITFIELD)
967 u64 reserved_1_63 : 63;
968 u64 done_ena : 1;
969#elif defined(__LITTLE_ENDIAN_BITFIELD)
970 u64 done_ena : 1;
971 u64 reserved_1_63 : 63;
972#endif
973 } s;
974};
975
976static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1)
977{
978 if (((param1 <= 7)))
979 return 0x2400ull + (param1 & 7) * 0x8ull;
980 pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1);
981 return 0;
982}
983
984/**
985 * union zip_quex_done_wait - Represents the register that specifies the per
986 * queue interrupt coalescing settings.
987 */
988union zip_quex_done_wait {
989 u64 u_reg64;
990 struct zip_quex_done_wait_s {
991#if defined(__BIG_ENDIAN_BITFIELD)
992 u64 reserved_48_63 : 16;
993 u64 time_wait : 16;
994 u64 reserved_20_31 : 12;
995 u64 num_wait : 20;
996#elif defined(__LITTLE_ENDIAN_BITFIELD)
997 u64 num_wait : 20;
998 u64 reserved_20_31 : 12;
999 u64 time_wait : 16;
1000 u64 reserved_48_63 : 16;
1001#endif
1002 } s;
1003};
1004
1005static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1)
1006{
1007 if (((param1 <= 7)))
1008 return 0x2800ull + (param1 & 7) * 0x8ull;
1009 pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1);
1010 return 0;
1011}
1012
1013/**
1014 * union zip_quex_doorbell - Represents doorbell registers for the ZIP
1015 * instruction queues.
1016 */
1017union zip_quex_doorbell {
1018 u64 u_reg64;
1019 struct zip_quex_doorbell_s {
1020#if defined(__BIG_ENDIAN_BITFIELD)
1021 u64 reserved_20_63 : 44;
1022 u64 dbell_cnt : 20;
1023#elif defined(__LITTLE_ENDIAN_BITFIELD)
1024 u64 dbell_cnt : 20;
1025 u64 reserved_20_63 : 44;
1026#endif
1027 } s;
1028};
1029
1030static inline u64 ZIP_QUEX_DOORBELL(u64 param1)
1031{
1032 if (((param1 <= 7)))
1033 return 0x4000ull + (param1 & 7) * 0x8ull;
1034 pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1);
1035 return 0;
1036}
1037
1038union zip_quex_err_ena_w1c {
1039 u64 u_reg64;
1040 struct zip_quex_err_ena_w1c_s {
1041#if defined(__BIG_ENDIAN_BITFIELD)
1042 u64 reserved_5_63 : 59;
1043 u64 mdbe : 1;
1044 u64 nwrp : 1;
1045 u64 nrrp : 1;
1046 u64 irde : 1;
1047 u64 dovf : 1;
1048#elif defined(__LITTLE_ENDIAN_BITFIELD)
1049 u64 dovf : 1;
1050 u64 irde : 1;
1051 u64 nrrp : 1;
1052 u64 nwrp : 1;
1053 u64 mdbe : 1;
1054 u64 reserved_5_63 : 59;
1055#endif
1056 } s;
1057};
1058
1059static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1)
1060{
1061 if (((param1 <= 7)))
1062 return 0x3600ull + (param1 & 7) * 0x8ull;
1063 pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1);
1064 return 0;
1065}
1066
1067union zip_quex_err_ena_w1s {
1068 u64 u_reg64;
1069 struct zip_quex_err_ena_w1s_s {
1070#if defined(__BIG_ENDIAN_BITFIELD)
1071 u64 reserved_5_63 : 59;
1072 u64 mdbe : 1;
1073 u64 nwrp : 1;
1074 u64 nrrp : 1;
1075 u64 irde : 1;
1076 u64 dovf : 1;
1077#elif defined(__LITTLE_ENDIAN_BITFIELD)
1078 u64 dovf : 1;
1079 u64 irde : 1;
1080 u64 nrrp : 1;
1081 u64 nwrp : 1;
1082 u64 mdbe : 1;
1083 u64 reserved_5_63 : 59;
1084#endif
1085 } s;
1086};
1087
1088static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1)
1089{
1090 if (((param1 <= 7)))
1091 return 0x3400ull + (param1 & 7) * 0x8ull;
1092 pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1);
1093 return 0;
1094}
1095
1096/**
1097 * union zip_quex_err_int - Represents registers that contain the per-queue
1098 * error interrupts.
1099 */
1100union zip_quex_err_int {
1101 u64 u_reg64;
1102 struct zip_quex_err_int_s {
1103#if defined(__BIG_ENDIAN_BITFIELD)
1104 u64 reserved_5_63 : 59;
1105 u64 mdbe : 1;
1106 u64 nwrp : 1;
1107 u64 nrrp : 1;
1108 u64 irde : 1;
1109 u64 dovf : 1;
1110#elif defined(__LITTLE_ENDIAN_BITFIELD)
1111 u64 dovf : 1;
1112 u64 irde : 1;
1113 u64 nrrp : 1;
1114 u64 nwrp : 1;
1115 u64 mdbe : 1;
1116 u64 reserved_5_63 : 59;
1117#endif
1118 } s;
1119};
1120
1121static inline u64 ZIP_QUEX_ERR_INT(u64 param1)
1122{
1123 if (((param1 <= 7)))
1124 return 0x3000ull + (param1 & 7) * 0x8ull;
1125 pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1);
1126 return 0;
1127}
1128
1129/* NCB - zip_que#_err_int_w1s */
1130union zip_quex_err_int_w1s {
1131 u64 u_reg64;
1132 struct zip_quex_err_int_w1s_s {
1133#if defined(__BIG_ENDIAN_BITFIELD)
1134 u64 reserved_5_63 : 59;
1135 u64 mdbe : 1;
1136 u64 nwrp : 1;
1137 u64 nrrp : 1;
1138 u64 irde : 1;
1139 u64 dovf : 1;
1140#elif defined(__LITTLE_ENDIAN_BITFIELD)
1141 u64 dovf : 1;
1142 u64 irde : 1;
1143 u64 nrrp : 1;
1144 u64 nwrp : 1;
1145 u64 mdbe : 1;
1146 u64 reserved_5_63 : 59;
1147#endif
1148 } s;
1149};
1150
1151static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1)
1152{
1153 if (((param1 <= 7)))
1154 return 0x3200ull + (param1 & 7) * 0x8ull;
1155 pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1);
1156 return 0;
1157}
1158
1159/**
1160 * union zip_quex_gcfg - Represents the registers that reflect status of the
1161 * zip instruction queues,debug use only.
1162 */
1163union zip_quex_gcfg {
1164 u64 u_reg64;
1165 struct zip_quex_gcfg_s {
1166#if defined(__BIG_ENDIAN_BITFIELD)
1167 u64 reserved_4_63 : 60;
1168 u64 iqb_ldwb : 1;
1169 u64 cbw_sty : 1;
1170 u64 l2ld_cmd : 2;
1171#elif defined(__LITTLE_ENDIAN_BITFIELD)
1172 u64 l2ld_cmd : 2;
1173 u64 cbw_sty : 1;
1174 u64 iqb_ldwb : 1;
1175 u64 reserved_4_63 : 60;
1176#endif
1177 } s;
1178};
1179
1180static inline u64 ZIP_QUEX_GCFG(u64 param1)
1181{
1182 if (((param1 <= 7)))
1183 return 0x1A00ull + (param1 & 7) * 0x8ull;
1184 pr_err("ZIP_QUEX_GCFG: %llu\n", param1);
1185 return 0;
1186}
1187
1188/**
1189 * union zip_quex_map - Represents the registers that control how each
1190 * instruction queue maps to zip cores.
1191 */
1192union zip_quex_map {
1193 u64 u_reg64;
1194 struct zip_quex_map_s {
1195#if defined(__BIG_ENDIAN_BITFIELD)
1196 u64 reserved_2_63 : 62;
1197 u64 zce : 2;
1198#elif defined(__LITTLE_ENDIAN_BITFIELD)
1199 u64 zce : 2;
1200 u64 reserved_2_63 : 62;
1201#endif
1202 } s;
1203};
1204
1205static inline u64 ZIP_QUEX_MAP(u64 param1)
1206{
1207 if (((param1 <= 7)))
1208 return 0x1400ull + (param1 & 7) * 0x8ull;
1209 pr_err("ZIP_QUEX_MAP: %llu\n", param1);
1210 return 0;
1211}
1212
1213/**
1214 * union zip_quex_sbuf_addr - Represents the registers that set the buffer
1215 * parameters for the instruction queues.
1216 *
1217 * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
1218 * this register to effectively reset the command buffer state machine.
1219 * These registers must be programmed after SW programs the corresponding
1220 * ZIP_QUE(0..7)_SBUF_CTL.
1221 */
1222union zip_quex_sbuf_addr {
1223 u64 u_reg64;
1224 struct zip_quex_sbuf_addr_s {
1225#if defined(__BIG_ENDIAN_BITFIELD)
1226 u64 reserved_49_63 : 15;
1227 u64 ptr : 42;
1228 u64 off : 7;
1229#elif defined(__LITTLE_ENDIAN_BITFIELD)
1230 u64 off : 7;
1231 u64 ptr : 42;
1232 u64 reserved_49_63 : 15;
1233#endif
1234 } s;
1235};
1236
1237static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1)
1238{
1239 if (((param1 <= 7)))
1240 return 0x1000ull + (param1 & 7) * 0x8ull;
1241 pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1);
1242 return 0;
1243}
1244
1245/**
1246 * union zip_quex_sbuf_ctl - Represents the registers that set the buffer
1247 * parameters for the instruction queues.
1248 *
1249 * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
1250 * this register to effectively reset the command buffer state machine.
1251 * These registers must be programmed before SW programs the corresponding
1252 * ZIP_QUE(0..7)_SBUF_ADDR.
1253 */
1254union zip_quex_sbuf_ctl {
1255 u64 u_reg64;
1256 struct zip_quex_sbuf_ctl_s {
1257#if defined(__BIG_ENDIAN_BITFIELD)
1258 u64 reserved_45_63 : 19;
1259 u64 size : 13;
1260 u64 inst_be : 1;
1261 u64 reserved_24_30 : 7;
1262 u64 stream_id : 8;
1263 u64 reserved_12_15 : 4;
1264 u64 aura : 12;
1265#elif defined(__LITTLE_ENDIAN_BITFIELD)
1266 u64 aura : 12;
1267 u64 reserved_12_15 : 4;
1268 u64 stream_id : 8;
1269 u64 reserved_24_30 : 7;
1270 u64 inst_be : 1;
1271 u64 size : 13;
1272 u64 reserved_45_63 : 19;
1273#endif
1274 } s;
1275};
1276
1277static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1)
1278{
1279 if (((param1 <= 7)))
1280 return 0x1200ull + (param1 & 7) * 0x8ull;
1281 pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1);
1282 return 0;
1283}
1284
1285/**
1286 * union zip_que_ena - Represents queue enable register
1287 *
1288 * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue.
1289 */
1290union zip_que_ena {
1291 u64 u_reg64;
1292 struct zip_que_ena_s {
1293#if defined(__BIG_ENDIAN_BITFIELD)
1294 u64 reserved_8_63 : 56;
1295 u64 ena : 8;
1296#elif defined(__LITTLE_ENDIAN_BITFIELD)
1297 u64 ena : 8;
1298 u64 reserved_8_63 : 56;
1299#endif
1300 } s;
1301};
1302
1303#define ZIP_QUE_ENA 0x0500ull
1304
1305/**
1306 * union zip_que_pri - Represents the register that defines the priority
1307 * between instruction queues.
1308 */
1309union zip_que_pri {
1310 u64 u_reg64;
1311 struct zip_que_pri_s {
1312#if defined(__BIG_ENDIAN_BITFIELD)
1313 u64 reserved_8_63 : 56;
1314 u64 pri : 8;
1315#elif defined(__LITTLE_ENDIAN_BITFIELD)
1316 u64 pri : 8;
1317 u64 reserved_8_63 : 56;
1318#endif
1319 } s;
1320};
1321
1322#define ZIP_QUE_PRI 0x0508ull
1323
1324/**
1325 * union zip_throttle - Represents the register that controls the maximum
1326 * number of in-flight X2I data fetch transactions.
1327 *
1328 * Writing 0 to this register causes the ZIP module to temporarily suspend NCB
1329 * accesses; it is not recommended for normal operation, but may be useful for
1330 * diagnostics.
1331 */
1332union zip_throttle {
1333 u64 u_reg64;
1334 struct zip_throttle_s {
1335#if defined(__BIG_ENDIAN_BITFIELD)
1336 u64 reserved_6_63 : 58;
1337 u64 ld_infl : 6;
1338#elif defined(__LITTLE_ENDIAN_BITFIELD)
1339 u64 ld_infl : 6;
1340 u64 reserved_6_63 : 58;
1341#endif
1342 } s;
1343};
1344
1345#define ZIP_THROTTLE 0x0010ull
1346
1347#endif /* _CSRS_ZIP__ */
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 346ceb8f17bd..60919a3ec53b 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -12,4 +12,6 @@ ccp-crypto-objs := ccp-crypto-main.o \
12 ccp-crypto-aes.o \ 12 ccp-crypto-aes.o \
13 ccp-crypto-aes-cmac.o \ 13 ccp-crypto-aes-cmac.o \
14 ccp-crypto-aes-xts.o \ 14 ccp-crypto-aes-xts.o \
15 ccp-crypto-aes-galois.o \
16 ccp-crypto-des3.o \
15 ccp-crypto-sha.o 17 ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
new file mode 100644
index 000000000000..38ee6f348ea9
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -0,0 +1,252 @@
1/*
2 * AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support
3 *
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
5 *
6 * Author: Gary R Hook <gary.hook@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/delay.h>
16#include <linux/scatterlist.h>
17#include <linux/crypto.h>
18#include <crypto/internal/aead.h>
19#include <crypto/algapi.h>
20#include <crypto/aes.h>
21#include <crypto/ctr.h>
22#include <crypto/scatterwalk.h>
23#include <linux/delay.h>
24
25#include "ccp-crypto.h"
26
27#define AES_GCM_IVSIZE 12
28
29static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
30{
31 return ret;
32}
33
34static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
35 unsigned int key_len)
36{
37 struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
38
39 switch (key_len) {
40 case AES_KEYSIZE_128:
41 ctx->u.aes.type = CCP_AES_TYPE_128;
42 break;
43 case AES_KEYSIZE_192:
44 ctx->u.aes.type = CCP_AES_TYPE_192;
45 break;
46 case AES_KEYSIZE_256:
47 ctx->u.aes.type = CCP_AES_TYPE_256;
48 break;
49 default:
50 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
51 return -EINVAL;
52 }
53
54 ctx->u.aes.mode = CCP_AES_MODE_GCM;
55 ctx->u.aes.key_len = key_len;
56
57 memcpy(ctx->u.aes.key, key, key_len);
58 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
59
60 return 0;
61}
62
63static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
64 unsigned int authsize)
65{
66 return 0;
67}
68
69static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
70{
71 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
72 struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
73 struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
74 struct scatterlist *iv_sg = NULL;
75 unsigned int iv_len = 0;
76 int i;
77 int ret = 0;
78
79 if (!ctx->u.aes.key_len)
80 return -EINVAL;
81
82 if (ctx->u.aes.mode != CCP_AES_MODE_GCM)
83 return -EINVAL;
84
85 if (!req->iv)
86 return -EINVAL;
87
88 /*
89 * 5 parts:
90 * plaintext/ciphertext input
91 * AAD
92 * key
93 * IV
94 * Destination+tag buffer
95 */
96
97 /* Prepare the IV: 12 bytes + an integer (counter) */
98 memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
99 for (i = 0; i < 3; i++)
100 rctx->iv[i + AES_GCM_IVSIZE] = 0;
101 rctx->iv[AES_BLOCK_SIZE - 1] = 1;
102
103 /* Set up a scatterlist for the IV */
104 iv_sg = &rctx->iv_sg;
105 iv_len = AES_BLOCK_SIZE;
106 sg_init_one(iv_sg, rctx->iv, iv_len);
107
108 /* The AAD + plaintext are concatenated in the src buffer */
109 memset(&rctx->cmd, 0, sizeof(rctx->cmd));
110 INIT_LIST_HEAD(&rctx->cmd.entry);
111 rctx->cmd.engine = CCP_ENGINE_AES;
112 rctx->cmd.u.aes.type = ctx->u.aes.type;
113 rctx->cmd.u.aes.mode = ctx->u.aes.mode;
114 rctx->cmd.u.aes.action = encrypt;
115 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
116 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
117 rctx->cmd.u.aes.iv = iv_sg;
118 rctx->cmd.u.aes.iv_len = iv_len;
119 rctx->cmd.u.aes.src = req->src;
120 rctx->cmd.u.aes.src_len = req->cryptlen;
121 rctx->cmd.u.aes.aad_len = req->assoclen;
122
123 /* The cipher text + the tag are in the dst buffer */
124 rctx->cmd.u.aes.dst = req->dst;
125
126 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
127
128 return ret;
129}
130
131static int ccp_aes_gcm_encrypt(struct aead_request *req)
132{
133 return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_ENCRYPT);
134}
135
136static int ccp_aes_gcm_decrypt(struct aead_request *req)
137{
138 return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_DECRYPT);
139}
140
141static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm)
142{
143 struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
144
145 ctx->complete = ccp_aes_gcm_complete;
146 ctx->u.aes.key_len = 0;
147
148 crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
149
150 return 0;
151}
152
153static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm)
154{
155}
156
157static struct aead_alg ccp_aes_gcm_defaults = {
158 .setkey = ccp_aes_gcm_setkey,
159 .setauthsize = ccp_aes_gcm_setauthsize,
160 .encrypt = ccp_aes_gcm_encrypt,
161 .decrypt = ccp_aes_gcm_decrypt,
162 .init = ccp_aes_gcm_cra_init,
163 .ivsize = AES_GCM_IVSIZE,
164 .maxauthsize = AES_BLOCK_SIZE,
165 .base = {
166 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
167 CRYPTO_ALG_ASYNC |
168 CRYPTO_ALG_KERN_DRIVER_ONLY |
169 CRYPTO_ALG_NEED_FALLBACK,
170 .cra_blocksize = AES_BLOCK_SIZE,
171 .cra_ctxsize = sizeof(struct ccp_ctx),
172 .cra_priority = CCP_CRA_PRIORITY,
173 .cra_type = &crypto_ablkcipher_type,
174 .cra_exit = ccp_aes_gcm_cra_exit,
175 .cra_module = THIS_MODULE,
176 },
177};
178
179struct ccp_aes_aead_def {
180 enum ccp_aes_mode mode;
181 unsigned int version;
182 const char *name;
183 const char *driver_name;
184 unsigned int blocksize;
185 unsigned int ivsize;
186 struct aead_alg *alg_defaults;
187};
188
189static struct ccp_aes_aead_def aes_aead_algs[] = {
190 {
191 .mode = CCP_AES_MODE_GHASH,
192 .version = CCP_VERSION(5, 0),
193 .name = "gcm(aes)",
194 .driver_name = "gcm-aes-ccp",
195 .blocksize = 1,
196 .ivsize = AES_BLOCK_SIZE,
197 .alg_defaults = &ccp_aes_gcm_defaults,
198 },
199};
200
201static int ccp_register_aes_aead(struct list_head *head,
202 const struct ccp_aes_aead_def *def)
203{
204 struct ccp_crypto_aead *ccp_aead;
205 struct aead_alg *alg;
206 int ret;
207
208 ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL);
209 if (!ccp_aead)
210 return -ENOMEM;
211
212 INIT_LIST_HEAD(&ccp_aead->entry);
213
214 ccp_aead->mode = def->mode;
215
216 /* Copy the defaults and override as necessary */
217 alg = &ccp_aead->alg;
218 *alg = *def->alg_defaults;
219 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
220 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
221 def->driver_name);
222 alg->base.cra_blocksize = def->blocksize;
223 alg->base.cra_ablkcipher.ivsize = def->ivsize;
224
225 ret = crypto_register_aead(alg);
226 if (ret) {
227 pr_err("%s ablkcipher algorithm registration error (%d)\n",
228 alg->base.cra_name, ret);
229 kfree(ccp_aead);
230 return ret;
231 }
232
233 list_add(&ccp_aead->entry, head);
234
235 return 0;
236}
237
238int ccp_register_aes_aeads(struct list_head *head)
239{
240 int i, ret;
241 unsigned int ccpversion = ccp_version();
242
243 for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) {
244 if (aes_aead_algs[i].version > ccpversion)
245 continue;
246 ret = ccp_register_aes_aead(head, &aes_aead_algs[i]);
247 if (ret)
248 return ret;
249 }
250
251 return 0;
252}
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
new file mode 100644
index 000000000000..5af7347ae03c
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -0,0 +1,254 @@
1/*
2 * AMD Cryptographic Coprocessor (CCP) DES3 crypto API support
3 *
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
5 *
6 * Author: Gary R Hook <ghook@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/delay.h>
16#include <linux/scatterlist.h>
17#include <linux/crypto.h>
18#include <crypto/algapi.h>
19#include <crypto/scatterwalk.h>
20#include <crypto/des.h>
21
22#include "ccp-crypto.h"
23
24static int ccp_des3_complete(struct crypto_async_request *async_req, int ret)
25{
26 struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
27 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
28 struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
29
30 if (ret)
31 return ret;
32
33 if (ctx->u.des3.mode != CCP_DES3_MODE_ECB)
34 memcpy(req->info, rctx->iv, DES3_EDE_BLOCK_SIZE);
35
36 return 0;
37}
38
39static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
40 unsigned int key_len)
41{
42 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
43 struct ccp_crypto_ablkcipher_alg *alg =
44 ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
45 u32 *flags = &tfm->base.crt_flags;
46
47
48 /* From des_generic.c:
49 *
50 * RFC2451:
51 * If the first two or last two independent 64-bit keys are
52 * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
53 * same as DES. Implementers MUST reject keys that exhibit this
54 * property.
55 */
56 const u32 *K = (const u32 *)key;
57
58 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
59 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
60 (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
61 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
62 return -EINVAL;
63 }
64
65 /* It's not clear that there is any support for a keysize of 112.
66 * If needed, the caller should make K1 == K3
67 */
68 ctx->u.des3.type = CCP_DES3_TYPE_168;
69 ctx->u.des3.mode = alg->mode;
70 ctx->u.des3.key_len = key_len;
71
72 memcpy(ctx->u.des3.key, key, key_len);
73 sg_init_one(&ctx->u.des3.key_sg, ctx->u.des3.key, key_len);
74
75 return 0;
76}
77
78static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
79{
80 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
81 struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
82 struct scatterlist *iv_sg = NULL;
83 unsigned int iv_len = 0;
84 int ret;
85
86 if (!ctx->u.des3.key_len)
87 return -EINVAL;
88
89 if (((ctx->u.des3.mode == CCP_DES3_MODE_ECB) ||
90 (ctx->u.des3.mode == CCP_DES3_MODE_CBC)) &&
91 (req->nbytes & (DES3_EDE_BLOCK_SIZE - 1)))
92 return -EINVAL;
93
94 if (ctx->u.des3.mode != CCP_DES3_MODE_ECB) {
95 if (!req->info)
96 return -EINVAL;
97
98 memcpy(rctx->iv, req->info, DES3_EDE_BLOCK_SIZE);
99 iv_sg = &rctx->iv_sg;
100 iv_len = DES3_EDE_BLOCK_SIZE;
101 sg_init_one(iv_sg, rctx->iv, iv_len);
102 }
103
104 memset(&rctx->cmd, 0, sizeof(rctx->cmd));
105 INIT_LIST_HEAD(&rctx->cmd.entry);
106 rctx->cmd.engine = CCP_ENGINE_DES3;
107 rctx->cmd.u.des3.type = ctx->u.des3.type;
108 rctx->cmd.u.des3.mode = ctx->u.des3.mode;
109 rctx->cmd.u.des3.action = (encrypt)
110 ? CCP_DES3_ACTION_ENCRYPT
111 : CCP_DES3_ACTION_DECRYPT;
112 rctx->cmd.u.des3.key = &ctx->u.des3.key_sg;
113 rctx->cmd.u.des3.key_len = ctx->u.des3.key_len;
114 rctx->cmd.u.des3.iv = iv_sg;
115 rctx->cmd.u.des3.iv_len = iv_len;
116 rctx->cmd.u.des3.src = req->src;
117 rctx->cmd.u.des3.src_len = req->nbytes;
118 rctx->cmd.u.des3.dst = req->dst;
119
120 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
121
122 return ret;
123}
124
125static int ccp_des3_encrypt(struct ablkcipher_request *req)
126{
127 return ccp_des3_crypt(req, true);
128}
129
130static int ccp_des3_decrypt(struct ablkcipher_request *req)
131{
132 return ccp_des3_crypt(req, false);
133}
134
135static int ccp_des3_cra_init(struct crypto_tfm *tfm)
136{
137 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
138
139 ctx->complete = ccp_des3_complete;
140 ctx->u.des3.key_len = 0;
141
142 tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_des3_req_ctx);
143
144 return 0;
145}
146
147static void ccp_des3_cra_exit(struct crypto_tfm *tfm)
148{
149}
150
151static struct crypto_alg ccp_des3_defaults = {
152 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
153 CRYPTO_ALG_ASYNC |
154 CRYPTO_ALG_KERN_DRIVER_ONLY |
155 CRYPTO_ALG_NEED_FALLBACK,
156 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
157 .cra_ctxsize = sizeof(struct ccp_ctx),
158 .cra_priority = CCP_CRA_PRIORITY,
159 .cra_type = &crypto_ablkcipher_type,
160 .cra_init = ccp_des3_cra_init,
161 .cra_exit = ccp_des3_cra_exit,
162 .cra_module = THIS_MODULE,
163 .cra_ablkcipher = {
164 .setkey = ccp_des3_setkey,
165 .encrypt = ccp_des3_encrypt,
166 .decrypt = ccp_des3_decrypt,
167 .min_keysize = DES3_EDE_KEY_SIZE,
168 .max_keysize = DES3_EDE_KEY_SIZE,
169 },
170};
171
172struct ccp_des3_def {
173 enum ccp_des3_mode mode;
174 unsigned int version;
175 const char *name;
176 const char *driver_name;
177 unsigned int blocksize;
178 unsigned int ivsize;
179 struct crypto_alg *alg_defaults;
180};
181
182static struct ccp_des3_def des3_algs[] = {
183 {
184 .mode = CCP_DES3_MODE_ECB,
185 .version = CCP_VERSION(5, 0),
186 .name = "ecb(des3_ede)",
187 .driver_name = "ecb-des3-ccp",
188 .blocksize = DES3_EDE_BLOCK_SIZE,
189 .ivsize = 0,
190 .alg_defaults = &ccp_des3_defaults,
191 },
192 {
193 .mode = CCP_DES3_MODE_CBC,
194 .version = CCP_VERSION(5, 0),
195 .name = "cbc(des3_ede)",
196 .driver_name = "cbc-des3-ccp",
197 .blocksize = DES3_EDE_BLOCK_SIZE,
198 .ivsize = DES3_EDE_BLOCK_SIZE,
199 .alg_defaults = &ccp_des3_defaults,
200 },
201};
202
203static int ccp_register_des3_alg(struct list_head *head,
204 const struct ccp_des3_def *def)
205{
206 struct ccp_crypto_ablkcipher_alg *ccp_alg;
207 struct crypto_alg *alg;
208 int ret;
209
210 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
211 if (!ccp_alg)
212 return -ENOMEM;
213
214 INIT_LIST_HEAD(&ccp_alg->entry);
215
216 ccp_alg->mode = def->mode;
217
218 /* Copy the defaults and override as necessary */
219 alg = &ccp_alg->alg;
220 *alg = *def->alg_defaults;
221 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
222 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
223 def->driver_name);
224 alg->cra_blocksize = def->blocksize;
225 alg->cra_ablkcipher.ivsize = def->ivsize;
226
227 ret = crypto_register_alg(alg);
228 if (ret) {
229 pr_err("%s ablkcipher algorithm registration error (%d)\n",
230 alg->cra_name, ret);
231 kfree(ccp_alg);
232 return ret;
233 }
234
235 list_add(&ccp_alg->entry, head);
236
237 return 0;
238}
239
240int ccp_register_des3_algs(struct list_head *head)
241{
242 int i, ret;
243 unsigned int ccpversion = ccp_version();
244
245 for (i = 0; i < ARRAY_SIZE(des3_algs); i++) {
246 if (des3_algs[i].version > ccpversion)
247 continue;
248 ret = ccp_register_des3_alg(head, &des3_algs[i]);
249 if (ret)
250 return ret;
251 }
252
253 return 0;
254}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index e0380e59c361..8dccbddabef1 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -33,9 +33,14 @@ static unsigned int sha_disable;
33module_param(sha_disable, uint, 0444); 33module_param(sha_disable, uint, 0444);
34MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); 34MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
35 35
36static unsigned int des3_disable;
37module_param(des3_disable, uint, 0444);
38MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
39
36/* List heads for the supported algorithms */ 40/* List heads for the supported algorithms */
37static LIST_HEAD(hash_algs); 41static LIST_HEAD(hash_algs);
38static LIST_HEAD(cipher_algs); 42static LIST_HEAD(cipher_algs);
43static LIST_HEAD(aead_algs);
39 44
40/* For any tfm, requests for that tfm must be returned on the order 45/* For any tfm, requests for that tfm must be returned on the order
41 * received. With multiple queues available, the CCP can process more 46 * received. With multiple queues available, the CCP can process more
@@ -335,6 +340,16 @@ static int ccp_register_algs(void)
335 ret = ccp_register_aes_xts_algs(&cipher_algs); 340 ret = ccp_register_aes_xts_algs(&cipher_algs);
336 if (ret) 341 if (ret)
337 return ret; 342 return ret;
343
344 ret = ccp_register_aes_aeads(&aead_algs);
345 if (ret)
346 return ret;
347 }
348
349 if (!des3_disable) {
350 ret = ccp_register_des3_algs(&cipher_algs);
351 if (ret)
352 return ret;
338 } 353 }
339 354
340 if (!sha_disable) { 355 if (!sha_disable) {
@@ -350,6 +365,7 @@ static void ccp_unregister_algs(void)
350{ 365{
351 struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; 366 struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
352 struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp; 367 struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
368 struct ccp_crypto_aead *aead_alg, *aead_tmp;
353 369
354 list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { 370 list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
355 crypto_unregister_ahash(&ahash_alg->alg); 371 crypto_unregister_ahash(&ahash_alg->alg);
@@ -362,6 +378,12 @@ static void ccp_unregister_algs(void)
362 list_del(&ablk_alg->entry); 378 list_del(&ablk_alg->entry);
363 kfree(ablk_alg); 379 kfree(ablk_alg);
364 } 380 }
381
382 list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
383 crypto_unregister_aead(&aead_alg->alg);
384 list_del(&aead_alg->entry);
385 kfree(aead_alg);
386 }
365} 387}
366 388
367static int ccp_crypto_init(void) 389static int ccp_crypto_init(void)
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 84a652be4274..6b46eea94932 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -146,6 +146,12 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
146 case CCP_SHA_TYPE_256: 146 case CCP_SHA_TYPE_256:
147 rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE; 147 rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
148 break; 148 break;
149 case CCP_SHA_TYPE_384:
150 rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE;
151 break;
152 case CCP_SHA_TYPE_512:
153 rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE;
154 break;
149 default: 155 default:
150 /* Should never get here */ 156 /* Should never get here */
151 break; 157 break;
@@ -393,6 +399,22 @@ static struct ccp_sha_def sha_algs[] = {
393 .digest_size = SHA256_DIGEST_SIZE, 399 .digest_size = SHA256_DIGEST_SIZE,
394 .block_size = SHA256_BLOCK_SIZE, 400 .block_size = SHA256_BLOCK_SIZE,
395 }, 401 },
402 {
403 .version = CCP_VERSION(5, 0),
404 .name = "sha384",
405 .drv_name = "sha384-ccp",
406 .type = CCP_SHA_TYPE_384,
407 .digest_size = SHA384_DIGEST_SIZE,
408 .block_size = SHA384_BLOCK_SIZE,
409 },
410 {
411 .version = CCP_VERSION(5, 0),
412 .name = "sha512",
413 .drv_name = "sha512-ccp",
414 .type = CCP_SHA_TYPE_512,
415 .digest_size = SHA512_DIGEST_SIZE,
416 .block_size = SHA512_BLOCK_SIZE,
417 },
396}; 418};
397 419
398static int ccp_register_hmac_alg(struct list_head *head, 420static int ccp_register_hmac_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 8335b32e815e..dd5bf15f06e5 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -19,10 +19,14 @@
19#include <linux/ccp.h> 19#include <linux/ccp.h>
20#include <crypto/algapi.h> 20#include <crypto/algapi.h>
21#include <crypto/aes.h> 21#include <crypto/aes.h>
22#include <crypto/internal/aead.h>
23#include <crypto/aead.h>
22#include <crypto/ctr.h> 24#include <crypto/ctr.h>
23#include <crypto/hash.h> 25#include <crypto/hash.h>
24#include <crypto/sha.h> 26#include <crypto/sha.h>
25 27
28#define CCP_LOG_LEVEL KERN_INFO
29
26#define CCP_CRA_PRIORITY 300 30#define CCP_CRA_PRIORITY 300
27 31
28struct ccp_crypto_ablkcipher_alg { 32struct ccp_crypto_ablkcipher_alg {
@@ -33,6 +37,14 @@ struct ccp_crypto_ablkcipher_alg {
33 struct crypto_alg alg; 37 struct crypto_alg alg;
34}; 38};
35 39
40struct ccp_crypto_aead {
41 struct list_head entry;
42
43 u32 mode;
44
45 struct aead_alg alg;
46};
47
36struct ccp_crypto_ahash_alg { 48struct ccp_crypto_ahash_alg {
37 struct list_head entry; 49 struct list_head entry;
38 50
@@ -95,6 +107,9 @@ struct ccp_aes_req_ctx {
95 struct scatterlist iv_sg; 107 struct scatterlist iv_sg;
96 u8 iv[AES_BLOCK_SIZE]; 108 u8 iv[AES_BLOCK_SIZE];
97 109
110 struct scatterlist tag_sg;
111 u8 tag[AES_BLOCK_SIZE];
112
98 /* Fields used for RFC3686 requests */ 113 /* Fields used for RFC3686 requests */
99 u8 *rfc3686_info; 114 u8 *rfc3686_info;
100 u8 rfc3686_iv[AES_BLOCK_SIZE]; 115 u8 rfc3686_iv[AES_BLOCK_SIZE];
@@ -137,9 +152,29 @@ struct ccp_aes_cmac_exp_ctx {
137 u8 buf[AES_BLOCK_SIZE]; 152 u8 buf[AES_BLOCK_SIZE];
138}; 153};
139 154
140/***** SHA related defines *****/ 155/***** 3DES related defines *****/
141#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE 156struct ccp_des3_ctx {
142#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE 157 enum ccp_engine engine;
158 enum ccp_des3_type type;
159 enum ccp_des3_mode mode;
160
161 struct scatterlist key_sg;
162 unsigned int key_len;
163 u8 key[AES_MAX_KEY_SIZE];
164};
165
166struct ccp_des3_req_ctx {
167 struct scatterlist iv_sg;
168 u8 iv[AES_BLOCK_SIZE];
169
170 struct ccp_cmd cmd;
171};
172
173/* SHA-related defines
174 * These values must be large enough to accommodate any variant
175 */
176#define MAX_SHA_CONTEXT_SIZE SHA512_DIGEST_SIZE
177#define MAX_SHA_BLOCK_SIZE SHA512_BLOCK_SIZE
143 178
144struct ccp_sha_ctx { 179struct ccp_sha_ctx {
145 struct scatterlist opad_sg; 180 struct scatterlist opad_sg;
@@ -199,6 +234,7 @@ struct ccp_ctx {
199 union { 234 union {
200 struct ccp_aes_ctx aes; 235 struct ccp_aes_ctx aes;
201 struct ccp_sha_ctx sha; 236 struct ccp_sha_ctx sha;
237 struct ccp_des3_ctx des3;
202 } u; 238 } u;
203}; 239};
204 240
@@ -210,6 +246,8 @@ struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
210int ccp_register_aes_algs(struct list_head *head); 246int ccp_register_aes_algs(struct list_head *head);
211int ccp_register_aes_cmac_algs(struct list_head *head); 247int ccp_register_aes_cmac_algs(struct list_head *head);
212int ccp_register_aes_xts_algs(struct list_head *head); 248int ccp_register_aes_xts_algs(struct list_head *head);
249int ccp_register_aes_aeads(struct list_head *head);
213int ccp_register_sha_algs(struct list_head *head); 250int ccp_register_sha_algs(struct list_head *head);
251int ccp_register_des3_algs(struct list_head *head);
214 252
215#endif 253#endif
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 7bc09989e18a..367c2e30656f 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op *op)
315 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); 315 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
316} 316}
317 317
318static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
319{
320 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
321}
322
323static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
324{
325 iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
326}
327
328static void ccp_irq_bh(unsigned long data)
329{
330 struct ccp_device *ccp = (struct ccp_device *)data;
331 struct ccp_cmd_queue *cmd_q;
332 u32 q_int, status;
333 unsigned int i;
334
335 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
336
337 for (i = 0; i < ccp->cmd_q_count; i++) {
338 cmd_q = &ccp->cmd_q[i];
339
340 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
341 if (q_int) {
342 cmd_q->int_status = status;
343 cmd_q->q_status = ioread32(cmd_q->reg_status);
344 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
345
346 /* On error, only save the first error value */
347 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
348 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
349
350 cmd_q->int_rcvd = 1;
351
352 /* Acknowledge the interrupt and wake the kthread */
353 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
354 wake_up_interruptible(&cmd_q->int_queue);
355 }
356 }
357 ccp_enable_queue_interrupts(ccp);
358}
359
360static irqreturn_t ccp_irq_handler(int irq, void *data)
361{
362 struct device *dev = data;
363 struct ccp_device *ccp = dev_get_drvdata(dev);
364
365 ccp_disable_queue_interrupts(ccp);
366 if (ccp->use_tasklet)
367 tasklet_schedule(&ccp->irq_tasklet);
368 else
369 ccp_irq_bh((unsigned long)ccp);
370
371 return IRQ_HANDLED;
372}
373
318static int ccp_init(struct ccp_device *ccp) 374static int ccp_init(struct ccp_device *ccp)
319{ 375{
320 struct device *dev = ccp->dev; 376 struct device *dev = ccp->dev;
321 struct ccp_cmd_queue *cmd_q; 377 struct ccp_cmd_queue *cmd_q;
322 struct dma_pool *dma_pool; 378 struct dma_pool *dma_pool;
323 char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; 379 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
324 unsigned int qmr, qim, i; 380 unsigned int qmr, i;
325 int ret; 381 int ret;
326 382
327 /* Find available queues */ 383 /* Find available queues */
328 qim = 0; 384 ccp->qim = 0;
329 qmr = ioread32(ccp->io_regs + Q_MASK_REG); 385 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
330 for (i = 0; i < MAX_HW_QUEUES; i++) { 386 for (i = 0; i < MAX_HW_QUEUES; i++) {
331 if (!(qmr & (1 << i))) 387 if (!(qmr & (1 << i)))
@@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *ccp)
370 init_waitqueue_head(&cmd_q->int_queue); 426 init_waitqueue_head(&cmd_q->int_queue);
371 427
372 /* Build queue interrupt mask (two interrupts per queue) */ 428 /* Build queue interrupt mask (two interrupts per queue) */
373 qim |= cmd_q->int_ok | cmd_q->int_err; 429 ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
374 430
375#ifdef CONFIG_ARM64 431#ifdef CONFIG_ARM64
376 /* For arm64 set the recommended queue cache settings */ 432 /* For arm64 set the recommended queue cache settings */
@@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *ccp)
388 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); 444 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
389 445
390 /* Disable and clear interrupts until ready */ 446 /* Disable and clear interrupts until ready */
391 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); 447 ccp_disable_queue_interrupts(ccp);
392 for (i = 0; i < ccp->cmd_q_count; i++) { 448 for (i = 0; i < ccp->cmd_q_count; i++) {
393 cmd_q = &ccp->cmd_q[i]; 449 cmd_q = &ccp->cmd_q[i];
394 450
395 ioread32(cmd_q->reg_int_status); 451 ioread32(cmd_q->reg_int_status);
396 ioread32(cmd_q->reg_status); 452 ioread32(cmd_q->reg_status);
397 } 453 }
398 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); 454 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
399 455
400 /* Request an irq */ 456 /* Request an irq */
401 ret = ccp->get_irq(ccp); 457 ret = ccp->get_irq(ccp);
@@ -404,6 +460,11 @@ static int ccp_init(struct ccp_device *ccp)
404 goto e_pool; 460 goto e_pool;
405 } 461 }
406 462
463 /* Initialize the ISR tasklet? */
464 if (ccp->use_tasklet)
465 tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
466 (unsigned long)ccp);
467
407 dev_dbg(dev, "Starting threads...\n"); 468 dev_dbg(dev, "Starting threads...\n");
408 /* Create a kthread for each queue */ 469 /* Create a kthread for each queue */
409 for (i = 0; i < ccp->cmd_q_count; i++) { 470 for (i = 0; i < ccp->cmd_q_count; i++) {
@@ -426,7 +487,7 @@ static int ccp_init(struct ccp_device *ccp)
426 487
427 dev_dbg(dev, "Enabling interrupts...\n"); 488 dev_dbg(dev, "Enabling interrupts...\n");
428 /* Enable interrupts */ 489 /* Enable interrupts */
429 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); 490 ccp_enable_queue_interrupts(ccp);
430 491
431 dev_dbg(dev, "Registering device...\n"); 492 dev_dbg(dev, "Registering device...\n");
432 ccp_add_device(ccp); 493 ccp_add_device(ccp);
@@ -463,7 +524,7 @@ static void ccp_destroy(struct ccp_device *ccp)
463{ 524{
464 struct ccp_cmd_queue *cmd_q; 525 struct ccp_cmd_queue *cmd_q;
465 struct ccp_cmd *cmd; 526 struct ccp_cmd *cmd;
466 unsigned int qim, i; 527 unsigned int i;
467 528
468 /* Unregister the DMA engine */ 529 /* Unregister the DMA engine */
469 ccp_dmaengine_unregister(ccp); 530 ccp_dmaengine_unregister(ccp);
@@ -474,22 +535,15 @@ static void ccp_destroy(struct ccp_device *ccp)
474 /* Remove this device from the list of available units */ 535 /* Remove this device from the list of available units */
475 ccp_del_device(ccp); 536 ccp_del_device(ccp);
476 537
477 /* Build queue interrupt mask (two interrupt masks per queue) */
478 qim = 0;
479 for (i = 0; i < ccp->cmd_q_count; i++) {
480 cmd_q = &ccp->cmd_q[i];
481 qim |= cmd_q->int_ok | cmd_q->int_err;
482 }
483
484 /* Disable and clear interrupts */ 538 /* Disable and clear interrupts */
485 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); 539 ccp_disable_queue_interrupts(ccp);
486 for (i = 0; i < ccp->cmd_q_count; i++) { 540 for (i = 0; i < ccp->cmd_q_count; i++) {
487 cmd_q = &ccp->cmd_q[i]; 541 cmd_q = &ccp->cmd_q[i];
488 542
489 ioread32(cmd_q->reg_int_status); 543 ioread32(cmd_q->reg_int_status);
490 ioread32(cmd_q->reg_status); 544 ioread32(cmd_q->reg_status);
491 } 545 }
492 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); 546 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
493 547
494 /* Stop the queue kthreads */ 548 /* Stop the queue kthreads */
495 for (i = 0; i < ccp->cmd_q_count; i++) 549 for (i = 0; i < ccp->cmd_q_count; i++)
@@ -516,43 +570,10 @@ static void ccp_destroy(struct ccp_device *ccp)
516 } 570 }
517} 571}
518 572
519static irqreturn_t ccp_irq_handler(int irq, void *data)
520{
521 struct device *dev = data;
522 struct ccp_device *ccp = dev_get_drvdata(dev);
523 struct ccp_cmd_queue *cmd_q;
524 u32 q_int, status;
525 unsigned int i;
526
527 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
528
529 for (i = 0; i < ccp->cmd_q_count; i++) {
530 cmd_q = &ccp->cmd_q[i];
531
532 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
533 if (q_int) {
534 cmd_q->int_status = status;
535 cmd_q->q_status = ioread32(cmd_q->reg_status);
536 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
537
538 /* On error, only save the first error value */
539 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
540 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
541
542 cmd_q->int_rcvd = 1;
543
544 /* Acknowledge the interrupt and wake the kthread */
545 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
546 wake_up_interruptible(&cmd_q->int_queue);
547 }
548 }
549
550 return IRQ_HANDLED;
551}
552
553static const struct ccp_actions ccp3_actions = { 573static const struct ccp_actions ccp3_actions = {
554 .aes = ccp_perform_aes, 574 .aes = ccp_perform_aes,
555 .xts_aes = ccp_perform_xts_aes, 575 .xts_aes = ccp_perform_xts_aes,
576 .des3 = NULL,
556 .sha = ccp_perform_sha, 577 .sha = ccp_perform_sha,
557 .rsa = ccp_perform_rsa, 578 .rsa = ccp_perform_rsa,
558 .passthru = ccp_perform_passthru, 579 .passthru = ccp_perform_passthru,
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index fc08b4ed69d9..ccbe32d5dd1c 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -108,6 +108,12 @@ union ccp_function {
108 u16 type:2; 108 u16 type:2;
109 } aes_xts; 109 } aes_xts;
110 struct { 110 struct {
111 u16 size:7;
112 u16 encrypt:1;
113 u16 mode:5;
114 u16 type:2;
115 } des3;
116 struct {
111 u16 rsvd1:10; 117 u16 rsvd1:10;
112 u16 type:4; 118 u16 type:4;
113 u16 rsvd2:1; 119 u16 rsvd2:1;
@@ -139,6 +145,10 @@ union ccp_function {
139#define CCP_AES_TYPE(p) ((p)->aes.type) 145#define CCP_AES_TYPE(p) ((p)->aes.type)
140#define CCP_XTS_SIZE(p) ((p)->aes_xts.size) 146#define CCP_XTS_SIZE(p) ((p)->aes_xts.size)
141#define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt) 147#define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt)
148#define CCP_DES3_SIZE(p) ((p)->des3.size)
149#define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt)
150#define CCP_DES3_MODE(p) ((p)->des3.mode)
151#define CCP_DES3_TYPE(p) ((p)->des3.type)
142#define CCP_SHA_TYPE(p) ((p)->sha.type) 152#define CCP_SHA_TYPE(p) ((p)->sha.type)
143#define CCP_RSA_SIZE(p) ((p)->rsa.size) 153#define CCP_RSA_SIZE(p) ((p)->rsa.size)
144#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) 154#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
@@ -388,6 +398,47 @@ static int ccp5_perform_sha(struct ccp_op *op)
388 return ccp5_do_cmd(&desc, op->cmd_q); 398 return ccp5_do_cmd(&desc, op->cmd_q);
389} 399}
390 400
401static int ccp5_perform_des3(struct ccp_op *op)
402{
403 struct ccp5_desc desc;
404 union ccp_function function;
405 u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
406
407 /* Zero out all the fields of the command desc */
408 memset(&desc, 0, sizeof(struct ccp5_desc));
409
410 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
411
412 CCP5_CMD_SOC(&desc) = op->soc;
413 CCP5_CMD_IOC(&desc) = 1;
414 CCP5_CMD_INIT(&desc) = op->init;
415 CCP5_CMD_EOM(&desc) = op->eom;
416 CCP5_CMD_PROT(&desc) = 0;
417
418 function.raw = 0;
419 CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
420 CCP_DES3_MODE(&function) = op->u.des3.mode;
421 CCP_DES3_TYPE(&function) = op->u.des3.type;
422 CCP5_CMD_FUNCTION(&desc) = function.raw;
423
424 CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
425
426 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
427 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
428 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
429
430 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
431 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
432 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
433
434 CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
435 CCP5_CMD_KEY_HI(&desc) = 0;
436 CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
437 CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
438
439 return ccp5_do_cmd(&desc, op->cmd_q);
440}
441
391static int ccp5_perform_rsa(struct ccp_op *op) 442static int ccp5_perform_rsa(struct ccp_op *op)
392{ 443{
393 struct ccp5_desc desc; 444 struct ccp5_desc desc;
@@ -435,6 +486,7 @@ static int ccp5_perform_passthru(struct ccp_op *op)
435 struct ccp_dma_info *saddr = &op->src.u.dma; 486 struct ccp_dma_info *saddr = &op->src.u.dma;
436 struct ccp_dma_info *daddr = &op->dst.u.dma; 487 struct ccp_dma_info *daddr = &op->dst.u.dma;
437 488
489
438 memset(&desc, 0, Q_DESC_SIZE); 490 memset(&desc, 0, Q_DESC_SIZE);
439 491
440 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; 492 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
@@ -653,6 +705,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp)
653 return rc; 705 return rc;
654} 706}
655 707
708static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
709{
710 unsigned int i;
711
712 for (i = 0; i < ccp->cmd_q_count; i++)
713 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
714}
715
716static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
717{
718 unsigned int i;
719
720 for (i = 0; i < ccp->cmd_q_count; i++)
721 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
722}
723
724static void ccp5_irq_bh(unsigned long data)
725{
726 struct ccp_device *ccp = (struct ccp_device *)data;
727 u32 status;
728 unsigned int i;
729
730 for (i = 0; i < ccp->cmd_q_count; i++) {
731 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
732
733 status = ioread32(cmd_q->reg_interrupt_status);
734
735 if (status) {
736 cmd_q->int_status = status;
737 cmd_q->q_status = ioread32(cmd_q->reg_status);
738 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
739
740 /* On error, only save the first error value */
741 if ((status & INT_ERROR) && !cmd_q->cmd_error)
742 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
743
744 cmd_q->int_rcvd = 1;
745
746 /* Acknowledge the interrupt and wake the kthread */
747 iowrite32(status, cmd_q->reg_interrupt_status);
748 wake_up_interruptible(&cmd_q->int_queue);
749 }
750 }
751 ccp5_enable_queue_interrupts(ccp);
752}
753
754static irqreturn_t ccp5_irq_handler(int irq, void *data)
755{
756 struct device *dev = data;
757 struct ccp_device *ccp = dev_get_drvdata(dev);
758
759 ccp5_disable_queue_interrupts(ccp);
760 if (ccp->use_tasklet)
761 tasklet_schedule(&ccp->irq_tasklet);
762 else
763 ccp5_irq_bh((unsigned long)ccp);
764 return IRQ_HANDLED;
765}
766
656static int ccp5_init(struct ccp_device *ccp) 767static int ccp5_init(struct ccp_device *ccp)
657{ 768{
658 struct device *dev = ccp->dev; 769 struct device *dev = ccp->dev;
@@ -729,6 +840,7 @@ static int ccp5_init(struct ccp_device *ccp)
729 840
730 dev_dbg(dev, "queue #%u available\n", i); 841 dev_dbg(dev, "queue #%u available\n", i);
731 } 842 }
843
732 if (ccp->cmd_q_count == 0) { 844 if (ccp->cmd_q_count == 0) {
733 dev_notice(dev, "no command queues available\n"); 845 dev_notice(dev, "no command queues available\n");
734 ret = -EIO; 846 ret = -EIO;
@@ -736,19 +848,18 @@ static int ccp5_init(struct ccp_device *ccp)
736 } 848 }
737 849
738 /* Turn off the queues and disable interrupts until ready */ 850 /* Turn off the queues and disable interrupts until ready */
851 ccp5_disable_queue_interrupts(ccp);
739 for (i = 0; i < ccp->cmd_q_count; i++) { 852 for (i = 0; i < ccp->cmd_q_count; i++) {
740 cmd_q = &ccp->cmd_q[i]; 853 cmd_q = &ccp->cmd_q[i];
741 854
742 cmd_q->qcontrol = 0; /* Start with nothing */ 855 cmd_q->qcontrol = 0; /* Start with nothing */
743 iowrite32(cmd_q->qcontrol, cmd_q->reg_control); 856 iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
744 857
745 /* Disable the interrupts */
746 iowrite32(0x00, cmd_q->reg_int_enable);
747 ioread32(cmd_q->reg_int_status); 858 ioread32(cmd_q->reg_int_status);
748 ioread32(cmd_q->reg_status); 859 ioread32(cmd_q->reg_status);
749 860
750 /* Clear the interrupts */ 861 /* Clear the interrupt status */
751 iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); 862 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
752 } 863 }
753 864
754 dev_dbg(dev, "Requesting an IRQ...\n"); 865 dev_dbg(dev, "Requesting an IRQ...\n");
@@ -758,6 +869,10 @@ static int ccp5_init(struct ccp_device *ccp)
758 dev_err(dev, "unable to allocate an IRQ\n"); 869 dev_err(dev, "unable to allocate an IRQ\n");
759 goto e_pool; 870 goto e_pool;
760 } 871 }
872 /* Initialize the ISR tasklet */
873 if (ccp->use_tasklet)
874 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
875 (unsigned long)ccp);
761 876
762 dev_dbg(dev, "Loading LSB map...\n"); 877 dev_dbg(dev, "Loading LSB map...\n");
763 /* Copy the private LSB mask to the public registers */ 878 /* Copy the private LSB mask to the public registers */
@@ -826,11 +941,7 @@ static int ccp5_init(struct ccp_device *ccp)
826 } 941 }
827 942
828 dev_dbg(dev, "Enabling interrupts...\n"); 943 dev_dbg(dev, "Enabling interrupts...\n");
829 /* Enable interrupts */ 944 ccp5_enable_queue_interrupts(ccp);
830 for (i = 0; i < ccp->cmd_q_count; i++) {
831 cmd_q = &ccp->cmd_q[i];
832 iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
833 }
834 945
835 dev_dbg(dev, "Registering device...\n"); 946 dev_dbg(dev, "Registering device...\n");
836 /* Put this on the unit list to make it available */ 947 /* Put this on the unit list to make it available */
@@ -882,17 +993,15 @@ static void ccp5_destroy(struct ccp_device *ccp)
882 ccp_del_device(ccp); 993 ccp_del_device(ccp);
883 994
884 /* Disable and clear interrupts */ 995 /* Disable and clear interrupts */
996 ccp5_disable_queue_interrupts(ccp);
885 for (i = 0; i < ccp->cmd_q_count; i++) { 997 for (i = 0; i < ccp->cmd_q_count; i++) {
886 cmd_q = &ccp->cmd_q[i]; 998 cmd_q = &ccp->cmd_q[i];
887 999
888 /* Turn off the run bit */ 1000 /* Turn off the run bit */
889 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); 1001 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
890 1002
891 /* Disable the interrupts */
892 iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
893
894 /* Clear the interrupt status */ 1003 /* Clear the interrupt status */
895 iowrite32(0x00, cmd_q->reg_int_enable); 1004 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
896 ioread32(cmd_q->reg_int_status); 1005 ioread32(cmd_q->reg_int_status);
897 ioread32(cmd_q->reg_status); 1006 ioread32(cmd_q->reg_status);
898 } 1007 }
@@ -925,38 +1034,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
925 } 1034 }
926} 1035}
927 1036
928static irqreturn_t ccp5_irq_handler(int irq, void *data)
929{
930 struct device *dev = data;
931 struct ccp_device *ccp = dev_get_drvdata(dev);
932 u32 status;
933 unsigned int i;
934
935 for (i = 0; i < ccp->cmd_q_count; i++) {
936 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
937
938 status = ioread32(cmd_q->reg_interrupt_status);
939
940 if (status) {
941 cmd_q->int_status = status;
942 cmd_q->q_status = ioread32(cmd_q->reg_status);
943 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
944
945 /* On error, only save the first error value */
946 if ((status & INT_ERROR) && !cmd_q->cmd_error)
947 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
948
949 cmd_q->int_rcvd = 1;
950
951 /* Acknowledge the interrupt and wake the kthread */
952 iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
953 wake_up_interruptible(&cmd_q->int_queue);
954 }
955 }
956
957 return IRQ_HANDLED;
958}
959
960static void ccp5_config(struct ccp_device *ccp) 1037static void ccp5_config(struct ccp_device *ccp)
961{ 1038{
962 /* Public side */ 1039 /* Public side */
@@ -994,6 +1071,7 @@ static const struct ccp_actions ccp5_actions = {
994 .aes = ccp5_perform_aes, 1071 .aes = ccp5_perform_aes,
995 .xts_aes = ccp5_perform_xts_aes, 1072 .xts_aes = ccp5_perform_xts_aes,
996 .sha = ccp5_perform_sha, 1073 .sha = ccp5_perform_sha,
1074 .des3 = ccp5_perform_des3,
997 .rsa = ccp5_perform_rsa, 1075 .rsa = ccp5_perform_rsa,
998 .passthru = ccp5_perform_passthru, 1076 .passthru = ccp5_perform_passthru,
999 .ecc = ccp5_perform_ecc, 1077 .ecc = ccp5_perform_ecc,
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index aa36f3f81860..0cb09d0feeaf 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -109,9 +109,8 @@
109#define INT_COMPLETION 0x1 109#define INT_COMPLETION 0x1
110#define INT_ERROR 0x2 110#define INT_ERROR 0x2
111#define INT_QUEUE_STOPPED 0x4 111#define INT_QUEUE_STOPPED 0x4
112#define ALL_INTERRUPTS (INT_COMPLETION| \ 112#define INT_EMPTY_QUEUE 0x8
113 INT_ERROR| \ 113#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR)
114 INT_QUEUE_STOPPED)
115 114
116#define LSB_REGION_WIDTH 5 115#define LSB_REGION_WIDTH 5
117#define MAX_LSB_CNT 8 116#define MAX_LSB_CNT 8
@@ -194,6 +193,9 @@
194#define CCP_XTS_AES_KEY_SB_COUNT 1 193#define CCP_XTS_AES_KEY_SB_COUNT 1
195#define CCP_XTS_AES_CTX_SB_COUNT 1 194#define CCP_XTS_AES_CTX_SB_COUNT 1
196 195
196#define CCP_DES3_KEY_SB_COUNT 1
197#define CCP_DES3_CTX_SB_COUNT 1
198
197#define CCP_SHA_SB_COUNT 1 199#define CCP_SHA_SB_COUNT 1
198 200
199#define CCP_RSA_MAX_WIDTH 4096 201#define CCP_RSA_MAX_WIDTH 4096
@@ -337,7 +339,10 @@ struct ccp_device {
337 void *dev_specific; 339 void *dev_specific;
338 int (*get_irq)(struct ccp_device *ccp); 340 int (*get_irq)(struct ccp_device *ccp);
339 void (*free_irq)(struct ccp_device *ccp); 341 void (*free_irq)(struct ccp_device *ccp);
342 unsigned int qim;
340 unsigned int irq; 343 unsigned int irq;
344 bool use_tasklet;
345 struct tasklet_struct irq_tasklet;
341 346
342 /* I/O area used for device communication. The register mapping 347 /* I/O area used for device communication. The register mapping
343 * starts at an offset into the mapped bar. 348 * starts at an offset into the mapped bar.
@@ -424,33 +429,33 @@ enum ccp_memtype {
424}; 429};
425#define CCP_MEMTYPE_LSB CCP_MEMTYPE_KSB 430#define CCP_MEMTYPE_LSB CCP_MEMTYPE_KSB
426 431
432
427struct ccp_dma_info { 433struct ccp_dma_info {
428 dma_addr_t address; 434 dma_addr_t address;
429 unsigned int offset; 435 unsigned int offset;
430 unsigned int length; 436 unsigned int length;
431 enum dma_data_direction dir; 437 enum dma_data_direction dir;
432}; 438} __packed __aligned(4);
433 439
434struct ccp_dm_workarea { 440struct ccp_dm_workarea {
435 struct device *dev; 441 struct device *dev;
436 struct dma_pool *dma_pool; 442 struct dma_pool *dma_pool;
437 unsigned int length;
438 443
439 u8 *address; 444 u8 *address;
440 struct ccp_dma_info dma; 445 struct ccp_dma_info dma;
446 unsigned int length;
441}; 447};
442 448
443struct ccp_sg_workarea { 449struct ccp_sg_workarea {
444 struct scatterlist *sg; 450 struct scatterlist *sg;
445 int nents; 451 int nents;
452 unsigned int sg_used;
446 453
447 struct scatterlist *dma_sg; 454 struct scatterlist *dma_sg;
448 struct device *dma_dev; 455 struct device *dma_dev;
449 unsigned int dma_count; 456 unsigned int dma_count;
450 enum dma_data_direction dma_dir; 457 enum dma_data_direction dma_dir;
451 458
452 unsigned int sg_used;
453
454 u64 bytes_left; 459 u64 bytes_left;
455}; 460};
456 461
@@ -479,6 +484,12 @@ struct ccp_xts_aes_op {
479 enum ccp_xts_aes_unit_size unit_size; 484 enum ccp_xts_aes_unit_size unit_size;
480}; 485};
481 486
487struct ccp_des3_op {
488 enum ccp_des3_type type;
489 enum ccp_des3_mode mode;
490 enum ccp_des3_action action;
491};
492
482struct ccp_sha_op { 493struct ccp_sha_op {
483 enum ccp_sha_type type; 494 enum ccp_sha_type type;
484 u64 msg_bits; 495 u64 msg_bits;
@@ -516,6 +527,7 @@ struct ccp_op {
516 union { 527 union {
517 struct ccp_aes_op aes; 528 struct ccp_aes_op aes;
518 struct ccp_xts_aes_op xts; 529 struct ccp_xts_aes_op xts;
530 struct ccp_des3_op des3;
519 struct ccp_sha_op sha; 531 struct ccp_sha_op sha;
520 struct ccp_rsa_op rsa; 532 struct ccp_rsa_op rsa;
521 struct ccp_passthru_op passthru; 533 struct ccp_passthru_op passthru;
@@ -624,13 +636,13 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp);
624struct ccp_actions { 636struct ccp_actions {
625 int (*aes)(struct ccp_op *); 637 int (*aes)(struct ccp_op *);
626 int (*xts_aes)(struct ccp_op *); 638 int (*xts_aes)(struct ccp_op *);
639 int (*des3)(struct ccp_op *);
627 int (*sha)(struct ccp_op *); 640 int (*sha)(struct ccp_op *);
628 int (*rsa)(struct ccp_op *); 641 int (*rsa)(struct ccp_op *);
629 int (*passthru)(struct ccp_op *); 642 int (*passthru)(struct ccp_op *);
630 int (*ecc)(struct ccp_op *); 643 int (*ecc)(struct ccp_op *);
631 u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); 644 u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int);
632 void (*sbfree)(struct ccp_cmd_queue *, unsigned int, 645 void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int);
633 unsigned int);
634 unsigned int (*get_free_slots)(struct ccp_cmd_queue *); 646 unsigned int (*get_free_slots)(struct ccp_cmd_queue *);
635 int (*init)(struct ccp_device *); 647 int (*init)(struct ccp_device *);
636 void (*destroy)(struct ccp_device *); 648 void (*destroy)(struct ccp_device *);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index f1396c3aedac..c0dfdacbdff5 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -16,6 +16,7 @@
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <crypto/scatterwalk.h> 18#include <crypto/scatterwalk.h>
19#include <crypto/des.h>
19#include <linux/ccp.h> 20#include <linux/ccp.h>
20 21
21#include "ccp-dev.h" 22#include "ccp-dev.h"
@@ -41,6 +42,20 @@ static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
41 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 42 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
42}; 43};
43 44
45static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
46 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
47 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
48 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
49 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
50};
51
52static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
53 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
54 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
55 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
56 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
57};
58
44#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ 59#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
45 ccp_gen_jobid(ccp) : 0) 60 ccp_gen_jobid(ccp) : 0)
46 61
@@ -586,6 +601,255 @@ e_key:
586 return ret; 601 return ret;
587} 602}
588 603
604static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
605 struct ccp_cmd *cmd)
606{
607 struct ccp_aes_engine *aes = &cmd->u.aes;
608 struct ccp_dm_workarea key, ctx, final_wa, tag;
609 struct ccp_data src, dst;
610 struct ccp_data aad;
611 struct ccp_op op;
612
613 unsigned long long *final;
614 unsigned int dm_offset;
615 unsigned int ilen;
616 bool in_place = true; /* Default value */
617 int ret;
618
619 struct scatterlist *p_inp, sg_inp[2];
620 struct scatterlist *p_tag, sg_tag[2];
621 struct scatterlist *p_outp, sg_outp[2];
622 struct scatterlist *p_aad;
623
624 if (!aes->iv)
625 return -EINVAL;
626
627 if (!((aes->key_len == AES_KEYSIZE_128) ||
628 (aes->key_len == AES_KEYSIZE_192) ||
629 (aes->key_len == AES_KEYSIZE_256)))
630 return -EINVAL;
631
632 if (!aes->key) /* Gotta have a key SGL */
633 return -EINVAL;
634
635 /* First, decompose the source buffer into AAD & PT,
636 * and the destination buffer into AAD, CT & tag, or
637 * the input into CT & tag.
638 * It is expected that the input and output SGs will
639 * be valid, even if the AAD and input lengths are 0.
640 */
641 p_aad = aes->src;
642 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
643 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
644 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
645 ilen = aes->src_len;
646 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
647 } else {
648 /* Input length for decryption includes tag */
649 ilen = aes->src_len - AES_BLOCK_SIZE;
650 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
651 }
652
653 memset(&op, 0, sizeof(op));
654 op.cmd_q = cmd_q;
655 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
656 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
657 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
658 op.init = 1;
659 op.u.aes.type = aes->type;
660
661 /* Copy the key to the LSB */
662 ret = ccp_init_dm_workarea(&key, cmd_q,
663 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
664 DMA_TO_DEVICE);
665 if (ret)
666 return ret;
667
668 dm_offset = CCP_SB_BYTES - aes->key_len;
669 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
670 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
671 CCP_PASSTHRU_BYTESWAP_256BIT);
672 if (ret) {
673 cmd->engine_error = cmd_q->cmd_error;
674 goto e_key;
675 }
676
677 /* Copy the context (IV) to the LSB.
678 * There is an assumption here that the IV is 96 bits in length, plus
679 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
680 */
681 ret = ccp_init_dm_workarea(&ctx, cmd_q,
682 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
683 DMA_BIDIRECTIONAL);
684 if (ret)
685 goto e_key;
686
687 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
688 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
689
690 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
691 CCP_PASSTHRU_BYTESWAP_256BIT);
692 if (ret) {
693 cmd->engine_error = cmd_q->cmd_error;
694 goto e_ctx;
695 }
696
697 op.init = 1;
698 if (aes->aad_len > 0) {
699 /* Step 1: Run a GHASH over the Additional Authenticated Data */
700 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
701 AES_BLOCK_SIZE,
702 DMA_TO_DEVICE);
703 if (ret)
704 goto e_ctx;
705
706 op.u.aes.mode = CCP_AES_MODE_GHASH;
707 op.u.aes.action = CCP_AES_GHASHAAD;
708
709 while (aad.sg_wa.bytes_left) {
710 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
711
712 ret = cmd_q->ccp->vdata->perform->aes(&op);
713 if (ret) {
714 cmd->engine_error = cmd_q->cmd_error;
715 goto e_aad;
716 }
717
718 ccp_process_data(&aad, NULL, &op);
719 op.init = 0;
720 }
721 }
722
723 op.u.aes.mode = CCP_AES_MODE_GCTR;
724 op.u.aes.action = aes->action;
725
726 if (ilen > 0) {
727 /* Step 2: Run a GCTR over the plaintext */
728 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
729
730 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
731 AES_BLOCK_SIZE,
732 in_place ? DMA_BIDIRECTIONAL
733 : DMA_TO_DEVICE);
734 if (ret)
735 goto e_ctx;
736
737 if (in_place) {
738 dst = src;
739 } else {
740 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
741 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
742 if (ret)
743 goto e_src;
744 }
745
746 op.soc = 0;
747 op.eom = 0;
748 op.init = 1;
749 while (src.sg_wa.bytes_left) {
750 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
751 if (!src.sg_wa.bytes_left) {
752 unsigned int nbytes = aes->src_len
753 % AES_BLOCK_SIZE;
754
755 if (nbytes) {
756 op.eom = 1;
757 op.u.aes.size = (nbytes * 8) - 1;
758 }
759 }
760
761 ret = cmd_q->ccp->vdata->perform->aes(&op);
762 if (ret) {
763 cmd->engine_error = cmd_q->cmd_error;
764 goto e_dst;
765 }
766
767 ccp_process_data(&src, &dst, &op);
768 op.init = 0;
769 }
770 }
771
772 /* Step 3: Update the IV portion of the context with the original IV */
773 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
774 CCP_PASSTHRU_BYTESWAP_256BIT);
775 if (ret) {
776 cmd->engine_error = cmd_q->cmd_error;
777 goto e_dst;
778 }
779
780 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
781
782 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
783 CCP_PASSTHRU_BYTESWAP_256BIT);
784 if (ret) {
785 cmd->engine_error = cmd_q->cmd_error;
786 goto e_dst;
787 }
788
789 /* Step 4: Concatenate the lengths of the AAD and source, and
790 * hash that 16 byte buffer.
791 */
792 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
793 DMA_BIDIRECTIONAL);
794 if (ret)
795 goto e_dst;
796 final = (unsigned long long *) final_wa.address;
797 final[0] = cpu_to_be64(aes->aad_len * 8);
798 final[1] = cpu_to_be64(ilen * 8);
799
800 op.u.aes.mode = CCP_AES_MODE_GHASH;
801 op.u.aes.action = CCP_AES_GHASHFINAL;
802 op.src.type = CCP_MEMTYPE_SYSTEM;
803 op.src.u.dma.address = final_wa.dma.address;
804 op.src.u.dma.length = AES_BLOCK_SIZE;
805 op.dst.type = CCP_MEMTYPE_SYSTEM;
806 op.dst.u.dma.address = final_wa.dma.address;
807 op.dst.u.dma.length = AES_BLOCK_SIZE;
808 op.eom = 1;
809 op.u.aes.size = 0;
810 ret = cmd_q->ccp->vdata->perform->aes(&op);
811 if (ret)
812 goto e_dst;
813
814 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
815 /* Put the ciphered tag after the ciphertext. */
816 ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
817 } else {
818 /* Does this ciphered tag match the input? */
819 ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
820 DMA_BIDIRECTIONAL);
821 if (ret)
822 goto e_tag;
823 ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
824
825 ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
826 ccp_dm_free(&tag);
827 }
828
829e_tag:
830 ccp_dm_free(&final_wa);
831
832e_dst:
833 if (aes->src_len && !in_place)
834 ccp_free_data(&dst, cmd_q);
835
836e_src:
837 if (aes->src_len)
838 ccp_free_data(&src, cmd_q);
839
840e_aad:
841 if (aes->aad_len)
842 ccp_free_data(&aad, cmd_q);
843
844e_ctx:
845 ccp_dm_free(&ctx);
846
847e_key:
848 ccp_dm_free(&key);
849
850 return ret;
851}
852
589static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 853static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
590{ 854{
591 struct ccp_aes_engine *aes = &cmd->u.aes; 855 struct ccp_aes_engine *aes = &cmd->u.aes;
@@ -599,6 +863,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
599 if (aes->mode == CCP_AES_MODE_CMAC) 863 if (aes->mode == CCP_AES_MODE_CMAC)
600 return ccp_run_aes_cmac_cmd(cmd_q, cmd); 864 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
601 865
866 if (aes->mode == CCP_AES_MODE_GCM)
867 return ccp_run_aes_gcm_cmd(cmd_q, cmd);
868
602 if (!((aes->key_len == AES_KEYSIZE_128) || 869 if (!((aes->key_len == AES_KEYSIZE_128) ||
603 (aes->key_len == AES_KEYSIZE_192) || 870 (aes->key_len == AES_KEYSIZE_192) ||
604 (aes->key_len == AES_KEYSIZE_256))) 871 (aes->key_len == AES_KEYSIZE_256)))
@@ -925,6 +1192,200 @@ e_key:
925 return ret; 1192 return ret;
926} 1193}
927 1194
1195static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1196{
1197 struct ccp_des3_engine *des3 = &cmd->u.des3;
1198
1199 struct ccp_dm_workarea key, ctx;
1200 struct ccp_data src, dst;
1201 struct ccp_op op;
1202 unsigned int dm_offset;
1203 unsigned int len_singlekey;
1204 bool in_place = false;
1205 int ret;
1206
1207 /* Error checks */
1208 if (!cmd_q->ccp->vdata->perform->des3)
1209 return -EINVAL;
1210
1211 if (des3->key_len != DES3_EDE_KEY_SIZE)
1212 return -EINVAL;
1213
1214 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1215 (des3->mode == CCP_DES3_MODE_CBC)) &&
1216 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1217 return -EINVAL;
1218
1219 if (!des3->key || !des3->src || !des3->dst)
1220 return -EINVAL;
1221
1222 if (des3->mode != CCP_DES3_MODE_ECB) {
1223 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1224 return -EINVAL;
1225
1226 if (!des3->iv)
1227 return -EINVAL;
1228 }
1229
1230 ret = -EIO;
1231 /* Zero out all the fields of the command desc */
1232 memset(&op, 0, sizeof(op));
1233
1234 /* Set up the Function field */
1235 op.cmd_q = cmd_q;
1236 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1237 op.sb_key = cmd_q->sb_key;
1238
1239 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1240 op.u.des3.type = des3->type;
1241 op.u.des3.mode = des3->mode;
1242 op.u.des3.action = des3->action;
1243
1244 /*
1245 * All supported key sizes fit in a single (32-byte) KSB entry and
1246 * (like AES) must be in little endian format. Use the 256-bit byte
1247 * swap passthru option to convert from big endian to little endian.
1248 */
1249 ret = ccp_init_dm_workarea(&key, cmd_q,
1250 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1251 DMA_TO_DEVICE);
1252 if (ret)
1253 return ret;
1254
1255 /*
1256 * The contents of the key triplet are in the reverse order of what
1257 * is required by the engine. Copy the 3 pieces individually to put
1258 * them where they belong.
1259 */
1260 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1261
1262 len_singlekey = des3->key_len / 3;
1263 ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1264 des3->key, 0, len_singlekey);
1265 ccp_set_dm_area(&key, dm_offset + len_singlekey,
1266 des3->key, len_singlekey, len_singlekey);
1267 ccp_set_dm_area(&key, dm_offset,
1268 des3->key, 2 * len_singlekey, len_singlekey);
1269
1270 /* Copy the key to the SB */
1271 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1272 CCP_PASSTHRU_BYTESWAP_256BIT);
1273 if (ret) {
1274 cmd->engine_error = cmd_q->cmd_error;
1275 goto e_key;
1276 }
1277
1278 /*
1279 * The DES3 context fits in a single (32-byte) KSB entry and
1280 * must be in little endian format. Use the 256-bit byte swap
1281 * passthru option to convert from big endian to little endian.
1282 */
1283 if (des3->mode != CCP_DES3_MODE_ECB) {
1284 u32 load_mode;
1285
1286 op.sb_ctx = cmd_q->sb_ctx;
1287
1288 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1289 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1290 DMA_BIDIRECTIONAL);
1291 if (ret)
1292 goto e_key;
1293
1294 /* Load the context into the LSB */
1295 dm_offset = CCP_SB_BYTES - des3->iv_len;
1296 ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
1297
1298 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1299 load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
1300 else
1301 load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
1302 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1303 load_mode);
1304 if (ret) {
1305 cmd->engine_error = cmd_q->cmd_error;
1306 goto e_ctx;
1307 }
1308 }
1309
1310 /*
1311 * Prepare the input and output data workareas. For in-place
1312 * operations we need to set the dma direction to BIDIRECTIONAL
1313 * and copy the src workarea to the dst workarea.
1314 */
1315 if (sg_virt(des3->src) == sg_virt(des3->dst))
1316 in_place = true;
1317
1318 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1319 DES3_EDE_BLOCK_SIZE,
1320 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1321 if (ret)
1322 goto e_ctx;
1323
1324 if (in_place)
1325 dst = src;
1326 else {
1327 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1328 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1329 if (ret)
1330 goto e_src;
1331 }
1332
1333 /* Send data to the CCP DES3 engine */
1334 while (src.sg_wa.bytes_left) {
1335 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1336 if (!src.sg_wa.bytes_left) {
1337 op.eom = 1;
1338
1339 /* Since we don't retrieve the context in ECB mode
1340 * we have to wait for the operation to complete
1341 * on the last piece of data
1342 */
1343 op.soc = 0;
1344 }
1345
1346 ret = cmd_q->ccp->vdata->perform->des3(&op);
1347 if (ret) {
1348 cmd->engine_error = cmd_q->cmd_error;
1349 goto e_dst;
1350 }
1351
1352 ccp_process_data(&src, &dst, &op);
1353 }
1354
1355 if (des3->mode != CCP_DES3_MODE_ECB) {
1356 /* Retrieve the context and make BE */
1357 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1358 CCP_PASSTHRU_BYTESWAP_256BIT);
1359 if (ret) {
1360 cmd->engine_error = cmd_q->cmd_error;
1361 goto e_dst;
1362 }
1363
1364 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1365 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1366 dm_offset = CCP_SB_BYTES - des3->iv_len;
1367 else
1368 dm_offset = 0;
1369 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1370 DES3_EDE_BLOCK_SIZE);
1371 }
1372e_dst:
1373 if (!in_place)
1374 ccp_free_data(&dst, cmd_q);
1375
1376e_src:
1377 ccp_free_data(&src, cmd_q);
1378
1379e_ctx:
1380 if (des3->mode != CCP_DES3_MODE_ECB)
1381 ccp_dm_free(&ctx);
1382
1383e_key:
1384 ccp_dm_free(&key);
1385
1386 return ret;
1387}
1388
928static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1389static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
929{ 1390{
930 struct ccp_sha_engine *sha = &cmd->u.sha; 1391 struct ccp_sha_engine *sha = &cmd->u.sha;
@@ -955,6 +1416,18 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
955 return -EINVAL; 1416 return -EINVAL;
956 block_size = SHA256_BLOCK_SIZE; 1417 block_size = SHA256_BLOCK_SIZE;
957 break; 1418 break;
1419 case CCP_SHA_TYPE_384:
1420 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1421 || sha->ctx_len < SHA384_DIGEST_SIZE)
1422 return -EINVAL;
1423 block_size = SHA384_BLOCK_SIZE;
1424 break;
1425 case CCP_SHA_TYPE_512:
1426 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1427 || sha->ctx_len < SHA512_DIGEST_SIZE)
1428 return -EINVAL;
1429 block_size = SHA512_BLOCK_SIZE;
1430 break;
958 default: 1431 default:
959 return -EINVAL; 1432 return -EINVAL;
960 } 1433 }
@@ -1042,6 +1515,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1042 sb_count = 1; 1515 sb_count = 1;
1043 ooffset = ioffset = 0; 1516 ooffset = ioffset = 0;
1044 break; 1517 break;
1518 case CCP_SHA_TYPE_384:
1519 digest_size = SHA384_DIGEST_SIZE;
1520 init = (void *) ccp_sha384_init;
1521 ctx_size = SHA512_DIGEST_SIZE;
1522 sb_count = 2;
1523 ioffset = 0;
1524 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1525 break;
1526 case CCP_SHA_TYPE_512:
1527 digest_size = SHA512_DIGEST_SIZE;
1528 init = (void *) ccp_sha512_init;
1529 ctx_size = SHA512_DIGEST_SIZE;
1530 sb_count = 2;
1531 ooffset = ioffset = 0;
1532 break;
1045 default: 1533 default:
1046 ret = -EINVAL; 1534 ret = -EINVAL;
1047 goto e_data; 1535 goto e_data;
@@ -1060,6 +1548,11 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1060 op.u.sha.type = sha->type; 1548 op.u.sha.type = sha->type;
1061 op.u.sha.msg_bits = sha->msg_bits; 1549 op.u.sha.msg_bits = sha->msg_bits;
1062 1550
1551 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1552 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1553 * first slot, and the left half in the second. Each portion must then
1554 * be in little endian format: use the 256-bit byte swap option.
1555 */
1063 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, 1556 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1064 DMA_BIDIRECTIONAL); 1557 DMA_BIDIRECTIONAL);
1065 if (ret) 1558 if (ret)
@@ -1071,6 +1564,13 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1071 case CCP_SHA_TYPE_256: 1564 case CCP_SHA_TYPE_256:
1072 memcpy(ctx.address + ioffset, init, ctx_size); 1565 memcpy(ctx.address + ioffset, init, ctx_size);
1073 break; 1566 break;
1567 case CCP_SHA_TYPE_384:
1568 case CCP_SHA_TYPE_512:
1569 memcpy(ctx.address + ctx_size / 2, init,
1570 ctx_size / 2);
1571 memcpy(ctx.address, init + ctx_size / 2,
1572 ctx_size / 2);
1573 break;
1074 default: 1574 default:
1075 ret = -EINVAL; 1575 ret = -EINVAL;
1076 goto e_ctx; 1576 goto e_ctx;
@@ -1137,6 +1637,15 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1137 sha->ctx, 0, 1637 sha->ctx, 0,
1138 digest_size); 1638 digest_size);
1139 break; 1639 break;
1640 case CCP_SHA_TYPE_384:
1641 case CCP_SHA_TYPE_512:
1642 ccp_get_dm_area(&ctx, 0,
1643 sha->ctx, LSB_ITEM_SIZE - ooffset,
1644 LSB_ITEM_SIZE);
1645 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1646 sha->ctx, 0,
1647 LSB_ITEM_SIZE - ooffset);
1648 break;
1140 default: 1649 default:
1141 ret = -EINVAL; 1650 ret = -EINVAL;
1142 goto e_ctx; 1651 goto e_ctx;
@@ -1174,6 +1683,16 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1174 ctx.address + ooffset, 1683 ctx.address + ooffset,
1175 digest_size); 1684 digest_size);
1176 break; 1685 break;
1686 case CCP_SHA_TYPE_384:
1687 case CCP_SHA_TYPE_512:
1688 memcpy(hmac_buf + block_size,
1689 ctx.address + LSB_ITEM_SIZE + ooffset,
1690 LSB_ITEM_SIZE);
1691 memcpy(hmac_buf + block_size +
1692 (LSB_ITEM_SIZE - ooffset),
1693 ctx.address,
1694 LSB_ITEM_SIZE);
1695 break;
1177 default: 1696 default:
1178 ret = -EINVAL; 1697 ret = -EINVAL;
1179 goto e_ctx; 1698 goto e_ctx;
@@ -1831,6 +2350,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1831 case CCP_ENGINE_XTS_AES_128: 2350 case CCP_ENGINE_XTS_AES_128:
1832 ret = ccp_run_xts_aes_cmd(cmd_q, cmd); 2351 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
1833 break; 2352 break;
2353 case CCP_ENGINE_DES3:
2354 ret = ccp_run_des3_cmd(cmd_q, cmd);
2355 break;
1834 case CCP_ENGINE_SHA: 2356 case CCP_ENGINE_SHA:
1835 ret = ccp_run_sha_cmd(cmd_q, cmd); 2357 ret = ccp_run_sha_cmd(cmd_q, cmd);
1836 break; 2358 break;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 28a9996c1085..e880d4cf4ada 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
69 goto e_irq; 69 goto e_irq;
70 } 70 }
71 } 71 }
72 ccp->use_tasklet = true;
72 73
73 return 0; 74 return 0;
74 75
@@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
100 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); 101 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
101 goto e_msi; 102 goto e_msi;
102 } 103 }
104 ccp->use_tasklet = true;
103 105
104 return 0; 106 return 0;
105 107
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 41bc7f4f58cd..f00e0d8bd039 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -294,7 +294,7 @@ static inline void get_aes_decrypt_key(unsigned char *dec_key,
294 294
295static struct crypto_shash *chcr_alloc_shash(unsigned int ds) 295static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
296{ 296{
297 struct crypto_shash *base_hash = NULL; 297 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
298 298
299 switch (ds) { 299 switch (ds) {
300 case SHA1_DIGEST_SIZE: 300 case SHA1_DIGEST_SIZE:
@@ -522,7 +522,7 @@ static inline void create_wreq(struct chcr_context *ctx,
522{ 522{
523 struct uld_ctx *u_ctx = ULD_CTX(ctx); 523 struct uld_ctx *u_ctx = ULD_CTX(ctx);
524 int iv_loc = IV_DSGL; 524 int iv_loc = IV_DSGL;
525 int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id]; 525 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
526 unsigned int immdatalen = 0, nr_frags = 0; 526 unsigned int immdatalen = 0, nr_frags = 0;
527 527
528 if (is_ofld_imm(skb)) { 528 if (is_ofld_imm(skb)) {
@@ -543,7 +543,7 @@ static inline void create_wreq(struct chcr_context *ctx,
543 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); 543 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
544 chcr_req->wreq.rx_chid_to_rx_q_id = 544 chcr_req->wreq.rx_chid_to_rx_q_id =
545 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, 545 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
546 is_iv ? iv_loc : IV_NOP, ctx->tx_channel_id); 546 is_iv ? iv_loc : IV_NOP, ctx->tx_qidx);
547 547
548 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, 548 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
549 qid); 549 qid);
@@ -721,19 +721,19 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
721 struct sk_buff *skb; 721 struct sk_buff *skb;
722 722
723 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 723 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
724 ctx->tx_channel_id))) { 724 ctx->tx_qidx))) {
725 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 725 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
726 return -EBUSY; 726 return -EBUSY;
727 } 727 }
728 728
729 skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], 729 skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
730 CHCR_ENCRYPT_OP); 730 CHCR_ENCRYPT_OP);
731 if (IS_ERR(skb)) { 731 if (IS_ERR(skb)) {
732 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); 732 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
733 return PTR_ERR(skb); 733 return PTR_ERR(skb);
734 } 734 }
735 skb->dev = u_ctx->lldi.ports[0]; 735 skb->dev = u_ctx->lldi.ports[0];
736 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); 736 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
737 chcr_send_wr(skb); 737 chcr_send_wr(skb);
738 return -EINPROGRESS; 738 return -EINPROGRESS;
739} 739}
@@ -746,19 +746,19 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
746 struct sk_buff *skb; 746 struct sk_buff *skb;
747 747
748 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 748 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
749 ctx->tx_channel_id))) { 749 ctx->tx_qidx))) {
750 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 750 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
751 return -EBUSY; 751 return -EBUSY;
752 } 752 }
753 753
754 skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0], 754 skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
755 CHCR_DECRYPT_OP); 755 CHCR_DECRYPT_OP);
756 if (IS_ERR(skb)) { 756 if (IS_ERR(skb)) {
757 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); 757 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
758 return PTR_ERR(skb); 758 return PTR_ERR(skb);
759 } 759 }
760 skb->dev = u_ctx->lldi.ports[0]; 760 skb->dev = u_ctx->lldi.ports[0];
761 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); 761 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
762 chcr_send_wr(skb); 762 chcr_send_wr(skb);
763 return -EINPROGRESS; 763 return -EINPROGRESS;
764} 764}
@@ -766,7 +766,9 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
766static int chcr_device_init(struct chcr_context *ctx) 766static int chcr_device_init(struct chcr_context *ctx)
767{ 767{
768 struct uld_ctx *u_ctx; 768 struct uld_ctx *u_ctx;
769 struct adapter *adap;
769 unsigned int id; 770 unsigned int id;
771 int txq_perchan, txq_idx, ntxq;
770 int err = 0, rxq_perchan, rxq_idx; 772 int err = 0, rxq_perchan, rxq_idx;
771 773
772 id = smp_processor_id(); 774 id = smp_processor_id();
@@ -777,11 +779,18 @@ static int chcr_device_init(struct chcr_context *ctx)
777 goto out; 779 goto out;
778 } 780 }
779 u_ctx = ULD_CTX(ctx); 781 u_ctx = ULD_CTX(ctx);
782 adap = padap(ctx->dev);
783 ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
784 adap->vres.ncrypto_fc);
780 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; 785 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
786 txq_perchan = ntxq / u_ctx->lldi.nchan;
781 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan; 787 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
782 rxq_idx += id % rxq_perchan; 788 rxq_idx += id % rxq_perchan;
789 txq_idx = ctx->dev->tx_channel_id * txq_perchan;
790 txq_idx += id % txq_perchan;
783 spin_lock(&ctx->dev->lock_chcr_dev); 791 spin_lock(&ctx->dev->lock_chcr_dev);
784 ctx->tx_channel_id = rxq_idx; 792 ctx->rx_qidx = rxq_idx;
793 ctx->tx_qidx = txq_idx;
785 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; 794 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
786 ctx->dev->rx_channel_id = 0; 795 ctx->dev->rx_channel_id = 0;
787 spin_unlock(&ctx->dev->lock_chcr_dev); 796 spin_unlock(&ctx->dev->lock_chcr_dev);
@@ -935,7 +944,7 @@ static int chcr_ahash_update(struct ahash_request *req)
935 944
936 u_ctx = ULD_CTX(ctx); 945 u_ctx = ULD_CTX(ctx);
937 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 946 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
938 ctx->tx_channel_id))) { 947 ctx->tx_qidx))) {
939 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 948 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
940 return -EBUSY; 949 return -EBUSY;
941 } 950 }
@@ -975,7 +984,7 @@ static int chcr_ahash_update(struct ahash_request *req)
975 } 984 }
976 req_ctx->reqlen = remainder; 985 req_ctx->reqlen = remainder;
977 skb->dev = u_ctx->lldi.ports[0]; 986 skb->dev = u_ctx->lldi.ports[0];
978 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); 987 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
979 chcr_send_wr(skb); 988 chcr_send_wr(skb);
980 989
981 return -EINPROGRESS; 990 return -EINPROGRESS;
@@ -1028,7 +1037,7 @@ static int chcr_ahash_final(struct ahash_request *req)
1028 return -ENOMEM; 1037 return -ENOMEM;
1029 1038
1030 skb->dev = u_ctx->lldi.ports[0]; 1039 skb->dev = u_ctx->lldi.ports[0];
1031 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); 1040 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1032 chcr_send_wr(skb); 1041 chcr_send_wr(skb);
1033 return -EINPROGRESS; 1042 return -EINPROGRESS;
1034} 1043}
@@ -1047,7 +1056,7 @@ static int chcr_ahash_finup(struct ahash_request *req)
1047 u_ctx = ULD_CTX(ctx); 1056 u_ctx = ULD_CTX(ctx);
1048 1057
1049 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1058 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1050 ctx->tx_channel_id))) { 1059 ctx->tx_qidx))) {
1051 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 1060 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1052 return -EBUSY; 1061 return -EBUSY;
1053 } 1062 }
@@ -1079,7 +1088,7 @@ static int chcr_ahash_finup(struct ahash_request *req)
1079 return -ENOMEM; 1088 return -ENOMEM;
1080 1089
1081 skb->dev = u_ctx->lldi.ports[0]; 1090 skb->dev = u_ctx->lldi.ports[0];
1082 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); 1091 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1083 chcr_send_wr(skb); 1092 chcr_send_wr(skb);
1084 1093
1085 return -EINPROGRESS; 1094 return -EINPROGRESS;
@@ -1100,7 +1109,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
1100 1109
1101 u_ctx = ULD_CTX(ctx); 1110 u_ctx = ULD_CTX(ctx);
1102 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1111 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1103 ctx->tx_channel_id))) { 1112 ctx->tx_qidx))) {
1104 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 1113 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1105 return -EBUSY; 1114 return -EBUSY;
1106 } 1115 }
@@ -1130,7 +1139,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
1130 return -ENOMEM; 1139 return -ENOMEM;
1131 1140
1132 skb->dev = u_ctx->lldi.ports[0]; 1141 skb->dev = u_ctx->lldi.ports[0];
1133 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); 1142 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1134 chcr_send_wr(skb); 1143 chcr_send_wr(skb);
1135 return -EINPROGRESS; 1144 return -EINPROGRESS;
1136} 1145}
@@ -1334,20 +1343,36 @@ static int chcr_copy_assoc(struct aead_request *req,
1334 1343
1335 return crypto_skcipher_encrypt(skreq); 1344 return crypto_skcipher_encrypt(skreq);
1336} 1345}
1337 1346static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
1338static unsigned char get_hmac(unsigned int authsize) 1347 int aadmax, int wrlen,
1348 unsigned short op_type)
1339{ 1349{
1340 switch (authsize) { 1350 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
1341 case ICV_8: 1351
1342 return CHCR_SCMD_HMAC_CTRL_PL1; 1352 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
1343 case ICV_10: 1353 (req->assoclen > aadmax) ||
1344 return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; 1354 (src_nent > MAX_SKB_FRAGS) ||
1345 case ICV_12: 1355 (wrlen > MAX_WR_SIZE))
1346 return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; 1356 return 1;
1347 } 1357 return 0;
1348 return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
1349} 1358}
1350 1359
1360static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
1361{
1362 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1363 struct chcr_context *ctx = crypto_aead_ctx(tfm);
1364 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1365 struct aead_request *subreq = aead_request_ctx(req);
1366
1367 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
1368 aead_request_set_callback(subreq, req->base.flags,
1369 req->base.complete, req->base.data);
1370 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
1371 req->iv);
1372 aead_request_set_ad(subreq, req->assoclen);
1373 return op_type ? crypto_aead_decrypt(subreq) :
1374 crypto_aead_encrypt(subreq);
1375}
1351 1376
1352static struct sk_buff *create_authenc_wr(struct aead_request *req, 1377static struct sk_buff *create_authenc_wr(struct aead_request *req,
1353 unsigned short qid, 1378 unsigned short qid,
@@ -1371,7 +1396,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1371 unsigned short stop_offset = 0; 1396 unsigned short stop_offset = 0;
1372 unsigned int assoclen = req->assoclen; 1397 unsigned int assoclen = req->assoclen;
1373 unsigned int authsize = crypto_aead_authsize(tfm); 1398 unsigned int authsize = crypto_aead_authsize(tfm);
1374 int err = 0; 1399 int err = -EINVAL, src_nent;
1375 int null = 0; 1400 int null = 0;
1376 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1401 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1377 GFP_ATOMIC; 1402 GFP_ATOMIC;
@@ -1381,8 +1406,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1381 1406
1382 if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) 1407 if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1383 goto err; 1408 goto err;
1384 1409 src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1385 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1410 if (src_nent < 0)
1386 goto err; 1411 goto err;
1387 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); 1412 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1388 reqctx->dst = src; 1413 reqctx->dst = src;
@@ -1400,7 +1425,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1400 } 1425 }
1401 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + 1426 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1402 (op_type ? -authsize : authsize)); 1427 (op_type ? -authsize : authsize));
1403 if (reqctx->dst_nents <= 0) { 1428 if (reqctx->dst_nents < 0) {
1404 pr_err("AUTHENC:Invalid Destination sg entries\n"); 1429 pr_err("AUTHENC:Invalid Destination sg entries\n");
1405 goto err; 1430 goto err;
1406 } 1431 }
@@ -1408,6 +1433,12 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1408 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) 1433 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
1409 - sizeof(chcr_req->key_ctx); 1434 - sizeof(chcr_req->key_ctx);
1410 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 1435 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1436 if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
1437 T6_MAX_AAD_SIZE,
1438 transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
1439 op_type)) {
1440 return ERR_PTR(chcr_aead_fallback(req, op_type));
1441 }
1411 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); 1442 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1412 if (!skb) 1443 if (!skb)
1413 goto err; 1444 goto err;
@@ -1489,24 +1520,6 @@ err:
1489 return ERR_PTR(-EINVAL); 1520 return ERR_PTR(-EINVAL);
1490} 1521}
1491 1522
1492static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
1493 unsigned short offset)
1494{
1495 struct page *spage;
1496 unsigned char *addr;
1497
1498 spage = sg_page(sg);
1499 get_page(spage); /* so that it is not freed by NIC */
1500#ifdef KMAP_ATOMIC_ARGS
1501 addr = kmap_atomic(spage, KM_SOFTIRQ0);
1502#else
1503 addr = kmap_atomic(spage);
1504#endif
1505 memset(addr + sg->offset, 0, offset + 1);
1506
1507 kunmap_atomic(addr);
1508}
1509
1510static int set_msg_len(u8 *block, unsigned int msglen, int csize) 1523static int set_msg_len(u8 *block, unsigned int msglen, int csize)
1511{ 1524{
1512 __be32 data; 1525 __be32 data;
@@ -1570,11 +1583,6 @@ static int ccm_format_packet(struct aead_request *req,
1570 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 1583 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1571 int rc = 0; 1584 int rc = 0;
1572 1585
1573 if (req->assoclen > T5_MAX_AAD_SIZE) {
1574 pr_err("CCM: Unsupported AAD data. It should be < %d\n",
1575 T5_MAX_AAD_SIZE);
1576 return -EINVAL;
1577 }
1578 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { 1586 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1579 reqctx->iv[0] = 3; 1587 reqctx->iv[0] = 3;
1580 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); 1588 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
@@ -1600,13 +1608,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
1600 struct chcr_context *chcrctx) 1608 struct chcr_context *chcrctx)
1601{ 1609{
1602 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1610 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1611 struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
1603 unsigned int ivsize = AES_BLOCK_SIZE; 1612 unsigned int ivsize = AES_BLOCK_SIZE;
1604 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; 1613 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
1605 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; 1614 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
1606 unsigned int c_id = chcrctx->dev->rx_channel_id; 1615 unsigned int c_id = chcrctx->dev->rx_channel_id;
1607 unsigned int ccm_xtra; 1616 unsigned int ccm_xtra;
1608 unsigned char tag_offset = 0, auth_offset = 0; 1617 unsigned char tag_offset = 0, auth_offset = 0;
1609 unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
1610 unsigned int assoclen; 1618 unsigned int assoclen;
1611 1619
1612 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) 1620 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
@@ -1642,8 +1650,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
1642 crypto_aead_authsize(tfm)); 1650 crypto_aead_authsize(tfm));
1643 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 1651 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1644 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, 1652 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
1645 cipher_mode, mac_mode, hmac_ctrl, 1653 cipher_mode, mac_mode,
1646 ivsize >> 1); 1654 aeadctx->hmac_ctrl, ivsize >> 1);
1647 1655
1648 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, 1656 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
1649 1, dst_size); 1657 1, dst_size);
@@ -1719,16 +1727,17 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1719 unsigned int dst_size = 0, kctx_len; 1727 unsigned int dst_size = 0, kctx_len;
1720 unsigned int sub_type; 1728 unsigned int sub_type;
1721 unsigned int authsize = crypto_aead_authsize(tfm); 1729 unsigned int authsize = crypto_aead_authsize(tfm);
1722 int err = 0; 1730 int err = -EINVAL, src_nent;
1723 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1731 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1724 GFP_ATOMIC; 1732 GFP_ATOMIC;
1725 1733
1726 1734
1727 if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) 1735 if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1728 goto err; 1736 goto err;
1729 1737 src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1730 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1738 if (src_nent < 0)
1731 goto err; 1739 goto err;
1740
1732 sub_type = get_aead_subtype(tfm); 1741 sub_type = get_aead_subtype(tfm);
1733 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); 1742 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1734 reqctx->dst = src; 1743 reqctx->dst = src;
@@ -1744,7 +1753,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1744 } 1753 }
1745 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + 1754 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1746 (op_type ? -authsize : authsize)); 1755 (op_type ? -authsize : authsize));
1747 if (reqctx->dst_nents <= 0) { 1756 if (reqctx->dst_nents < 0) {
1748 pr_err("CCM:Invalid Destination sg entries\n"); 1757 pr_err("CCM:Invalid Destination sg entries\n");
1749 goto err; 1758 goto err;
1750 } 1759 }
@@ -1756,6 +1765,13 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1756 dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); 1765 dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1757 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; 1766 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
1758 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 1767 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1768 if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
1769 T6_MAX_AAD_SIZE - 18,
1770 transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
1771 op_type)) {
1772 return ERR_PTR(chcr_aead_fallback(req, op_type));
1773 }
1774
1759 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); 1775 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1760 1776
1761 if (!skb) 1777 if (!skb)
@@ -1820,8 +1836,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1820 unsigned char tag_offset = 0; 1836 unsigned char tag_offset = 0;
1821 unsigned int crypt_len = 0; 1837 unsigned int crypt_len = 0;
1822 unsigned int authsize = crypto_aead_authsize(tfm); 1838 unsigned int authsize = crypto_aead_authsize(tfm);
1823 unsigned char hmac_ctrl = get_hmac(authsize); 1839 int err = -EINVAL, src_nent;
1824 int err = 0;
1825 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1840 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1826 GFP_ATOMIC; 1841 GFP_ATOMIC;
1827 1842
@@ -1831,8 +1846,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1831 1846
1832 if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) 1847 if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1833 goto err; 1848 goto err;
1834 1849 src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1835 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1850 if (src_nent < 0)
1836 goto err; 1851 goto err;
1837 1852
1838 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); 1853 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
@@ -1854,7 +1869,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1854 crypt_len = req->cryptlen; 1869 crypt_len = req->cryptlen;
1855 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + 1870 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1856 (op_type ? -authsize : authsize)); 1871 (op_type ? -authsize : authsize));
1857 if (reqctx->dst_nents <= 0) { 1872 if (reqctx->dst_nents < 0) {
1858 pr_err("GCM:Invalid Destination sg entries\n"); 1873 pr_err("GCM:Invalid Destination sg entries\n");
1859 goto err; 1874 goto err;
1860 } 1875 }
@@ -1864,6 +1879,12 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1864 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + 1879 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
1865 AEAD_H_SIZE; 1880 AEAD_H_SIZE;
1866 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 1881 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1882 if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
1883 T6_MAX_AAD_SIZE,
1884 transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
1885 op_type)) {
1886 return ERR_PTR(chcr_aead_fallback(req, op_type));
1887 }
1867 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); 1888 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1868 if (!skb) 1889 if (!skb)
1869 goto err; 1890 goto err;
@@ -1881,11 +1902,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1881 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( 1902 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
1882 ctx->dev->rx_channel_id, 2, (ivsize ? 1903 ctx->dev->rx_channel_id, 2, (ivsize ?
1883 (req->assoclen + 1) : 0)); 1904 (req->assoclen + 1) : 0));
1884 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len); 1905 chcr_req->sec_cpl.pldlen =
1906 htonl(req->assoclen + ivsize + req->cryptlen);
1885 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 1907 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1886 req->assoclen ? 1 : 0, req->assoclen, 1908 req->assoclen ? 1 : 0, req->assoclen,
1887 req->assoclen + ivsize + 1, 0); 1909 req->assoclen + ivsize + 1, 0);
1888 if (req->cryptlen) {
1889 chcr_req->sec_cpl.cipherstop_lo_authinsert = 1910 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1890 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1, 1911 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
1891 tag_offset, tag_offset); 1912 tag_offset, tag_offset);
@@ -1893,17 +1914,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1893 FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == 1914 FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
1894 CHCR_ENCRYPT_OP) ? 1 : 0, 1915 CHCR_ENCRYPT_OP) ? 1 : 0,
1895 CHCR_SCMD_CIPHER_MODE_AES_GCM, 1916 CHCR_SCMD_CIPHER_MODE_AES_GCM,
1896 CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl, 1917 CHCR_SCMD_AUTH_MODE_GHASH,
1897 ivsize >> 1); 1918 aeadctx->hmac_ctrl, ivsize >> 1);
1898 } else {
1899 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1900 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
1901 chcr_req->sec_cpl.seqno_numivs =
1902 FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1903 (op_type == CHCR_ENCRYPT_OP) ?
1904 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
1905 0, 0, ivsize >> 1);
1906 }
1907 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 1919 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1908 0, 1, dst_size); 1920 0, 1, dst_size);
1909 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 1921 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
@@ -1936,15 +1948,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1936 write_sg_to_skb(skb, &frags, req->src, req->assoclen); 1948 write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1937 1949
1938 write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); 1950 write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1939 1951 write_sg_to_skb(skb, &frags, src, req->cryptlen);
1940 if (req->cryptlen) {
1941 write_sg_to_skb(skb, &frags, src, req->cryptlen);
1942 } else {
1943 aes_gcm_empty_pld_pad(req->dst, authsize - 1);
1944 write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
1945
1946 }
1947
1948 create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, 1952 create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1949 sizeof(struct cpl_rx_phys_dsgl) + dst_size); 1953 sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1950 reqctx->skb = skb; 1954 reqctx->skb = skb;
@@ -1965,8 +1969,15 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
1965{ 1969{
1966 struct chcr_context *ctx = crypto_aead_ctx(tfm); 1970 struct chcr_context *ctx = crypto_aead_ctx(tfm);
1967 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 1971 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1968 1972 struct aead_alg *alg = crypto_aead_alg(tfm);
1969 crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx)); 1973
1974 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
1975 CRYPTO_ALG_NEED_FALLBACK);
1976 if (IS_ERR(aeadctx->sw_cipher))
1977 return PTR_ERR(aeadctx->sw_cipher);
1978 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
1979 sizeof(struct aead_request) +
1980 crypto_aead_reqsize(aeadctx->sw_cipher)));
1970 aeadctx->null = crypto_get_default_null_skcipher(); 1981 aeadctx->null = crypto_get_default_null_skcipher();
1971 if (IS_ERR(aeadctx->null)) 1982 if (IS_ERR(aeadctx->null))
1972 return PTR_ERR(aeadctx->null); 1983 return PTR_ERR(aeadctx->null);
@@ -1975,7 +1986,11 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
1975 1986
1976static void chcr_aead_cra_exit(struct crypto_aead *tfm) 1987static void chcr_aead_cra_exit(struct crypto_aead *tfm)
1977{ 1988{
1989 struct chcr_context *ctx = crypto_aead_ctx(tfm);
1990 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1991
1978 crypto_put_default_null_skcipher(); 1992 crypto_put_default_null_skcipher();
1993 crypto_free_aead(aeadctx->sw_cipher);
1979} 1994}
1980 1995
1981static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, 1996static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
@@ -1985,7 +2000,7 @@ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
1985 2000
1986 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; 2001 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
1987 aeadctx->mayverify = VERIFY_HW; 2002 aeadctx->mayverify = VERIFY_HW;
1988 return 0; 2003 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
1989} 2004}
1990static int chcr_authenc_setauthsize(struct crypto_aead *tfm, 2005static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
1991 unsigned int authsize) 2006 unsigned int authsize)
@@ -2022,7 +2037,7 @@ static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
2022 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 2037 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2023 aeadctx->mayverify = VERIFY_SW; 2038 aeadctx->mayverify = VERIFY_SW;
2024 } 2039 }
2025 return 0; 2040 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2026} 2041}
2027 2042
2028 2043
@@ -2062,7 +2077,7 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2062 CRYPTO_TFM_RES_BAD_KEY_LEN); 2077 CRYPTO_TFM_RES_BAD_KEY_LEN);
2063 return -EINVAL; 2078 return -EINVAL;
2064 } 2079 }
2065 return 0; 2080 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2066} 2081}
2067 2082
2068static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, 2083static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
@@ -2088,7 +2103,7 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2088 CRYPTO_TFM_RES_BAD_KEY_LEN); 2103 CRYPTO_TFM_RES_BAD_KEY_LEN);
2089 return -EINVAL; 2104 return -EINVAL;
2090 } 2105 }
2091 return 0; 2106 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2092} 2107}
2093 2108
2094static int chcr_ccm_setauthsize(struct crypto_aead *tfm, 2109static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
@@ -2130,10 +2145,10 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
2130 CRYPTO_TFM_RES_BAD_KEY_LEN); 2145 CRYPTO_TFM_RES_BAD_KEY_LEN);
2131 return -EINVAL; 2146 return -EINVAL;
2132 } 2147 }
2133 return 0; 2148 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2134} 2149}
2135 2150
2136static int chcr_aead_ccm_setkey(struct crypto_aead *aead, 2151static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2137 const u8 *key, 2152 const u8 *key,
2138 unsigned int keylen) 2153 unsigned int keylen)
2139{ 2154{
@@ -2142,8 +2157,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2142 unsigned char ck_size, mk_size; 2157 unsigned char ck_size, mk_size;
2143 int key_ctx_size = 0; 2158 int key_ctx_size = 0;
2144 2159
2145 memcpy(aeadctx->key, key, keylen);
2146 aeadctx->enckey_len = keylen;
2147 key_ctx_size = sizeof(struct _key_ctx) + 2160 key_ctx_size = sizeof(struct _key_ctx) +
2148 ((DIV_ROUND_UP(keylen, 16)) << 4) * 2; 2161 ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
2149 if (keylen == AES_KEYSIZE_128) { 2162 if (keylen == AES_KEYSIZE_128) {
@@ -2163,9 +2176,32 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2163 } 2176 }
2164 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0, 2177 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
2165 key_ctx_size >> 4); 2178 key_ctx_size >> 4);
2179 memcpy(aeadctx->key, key, keylen);
2180 aeadctx->enckey_len = keylen;
2181
2166 return 0; 2182 return 0;
2167} 2183}
2168 2184
2185static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2186 const u8 *key,
2187 unsigned int keylen)
2188{
2189 struct chcr_context *ctx = crypto_aead_ctx(aead);
2190 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2191 int error;
2192
2193 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2194 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2195 CRYPTO_TFM_REQ_MASK);
2196 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2197 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2198 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2199 CRYPTO_TFM_RES_MASK);
2200 if (error)
2201 return error;
2202 return chcr_ccm_common_setkey(aead, key, keylen);
2203}
2204
2169static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, 2205static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2170 unsigned int keylen) 2206 unsigned int keylen)
2171{ 2207{
@@ -2180,7 +2216,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2180 } 2216 }
2181 keylen -= 3; 2217 keylen -= 3;
2182 memcpy(aeadctx->salt, key + keylen, 3); 2218 memcpy(aeadctx->salt, key + keylen, 3);
2183 return chcr_aead_ccm_setkey(aead, key, keylen); 2219 return chcr_ccm_common_setkey(aead, key, keylen);
2184} 2220}
2185 2221
2186static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, 2222static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
@@ -2193,6 +2229,17 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2193 unsigned int ck_size; 2229 unsigned int ck_size;
2194 int ret = 0, key_ctx_size = 0; 2230 int ret = 0, key_ctx_size = 0;
2195 2231
2232 aeadctx->enckey_len = 0;
2233 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2234 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
2235 & CRYPTO_TFM_REQ_MASK);
2236 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2237 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2238 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2239 CRYPTO_TFM_RES_MASK);
2240 if (ret)
2241 goto out;
2242
2196 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && 2243 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2197 keylen > 3) { 2244 keylen > 3) {
2198 keylen -= 4; /* nonce/salt is present in the last 4 bytes */ 2245 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
@@ -2207,8 +2254,7 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2207 } else { 2254 } else {
2208 crypto_tfm_set_flags((struct crypto_tfm *)aead, 2255 crypto_tfm_set_flags((struct crypto_tfm *)aead,
2209 CRYPTO_TFM_RES_BAD_KEY_LEN); 2256 CRYPTO_TFM_RES_BAD_KEY_LEN);
2210 aeadctx->enckey_len = 0; 2257 pr_err("GCM: Invalid key length %d\n", keylen);
2211 pr_err("GCM: Invalid key length %d", keylen);
2212 ret = -EINVAL; 2258 ret = -EINVAL;
2213 goto out; 2259 goto out;
2214 } 2260 }
@@ -2259,11 +2305,21 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2259 int err = 0, i, key_ctx_len = 0; 2305 int err = 0, i, key_ctx_len = 0;
2260 unsigned char ck_size = 0; 2306 unsigned char ck_size = 0;
2261 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 }; 2307 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
2262 struct crypto_shash *base_hash = NULL; 2308 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2263 struct algo_param param; 2309 struct algo_param param;
2264 int align; 2310 int align;
2265 u8 *o_ptr = NULL; 2311 u8 *o_ptr = NULL;
2266 2312
2313 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2314 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
2315 & CRYPTO_TFM_REQ_MASK);
2316 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2317 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
2318 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
2319 & CRYPTO_TFM_RES_MASK);
2320 if (err)
2321 goto out;
2322
2267 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { 2323 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2268 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 2324 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2269 goto out; 2325 goto out;
@@ -2296,7 +2352,8 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2296 base_hash = chcr_alloc_shash(max_authsize); 2352 base_hash = chcr_alloc_shash(max_authsize);
2297 if (IS_ERR(base_hash)) { 2353 if (IS_ERR(base_hash)) {
2298 pr_err("chcr : Base driver cannot be loaded\n"); 2354 pr_err("chcr : Base driver cannot be loaded\n");
2299 goto out; 2355 aeadctx->enckey_len = 0;
2356 return -EINVAL;
2300 } 2357 }
2301 { 2358 {
2302 SHASH_DESC_ON_STACK(shash, base_hash); 2359 SHASH_DESC_ON_STACK(shash, base_hash);
@@ -2351,7 +2408,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2351 } 2408 }
2352out: 2409out:
2353 aeadctx->enckey_len = 0; 2410 aeadctx->enckey_len = 0;
2354 if (base_hash) 2411 if (!IS_ERR(base_hash))
2355 chcr_free_shash(base_hash); 2412 chcr_free_shash(base_hash);
2356 return -EINVAL; 2413 return -EINVAL;
2357} 2414}
@@ -2363,11 +2420,21 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
2363 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2420 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2364 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); 2421 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2365 struct crypto_authenc_keys keys; 2422 struct crypto_authenc_keys keys;
2366 2423 int err;
2367 /* it contains auth and cipher key both*/ 2424 /* it contains auth and cipher key both*/
2368 int key_ctx_len = 0; 2425 int key_ctx_len = 0;
2369 unsigned char ck_size = 0; 2426 unsigned char ck_size = 0;
2370 2427
2428 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2429 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
2430 & CRYPTO_TFM_REQ_MASK);
2431 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2432 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
2433 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
2434 & CRYPTO_TFM_RES_MASK);
2435 if (err)
2436 goto out;
2437
2371 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { 2438 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2372 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 2439 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2373 goto out; 2440 goto out;
@@ -2465,22 +2532,20 @@ static int chcr_aead_op(struct aead_request *req,
2465 } 2532 }
2466 u_ctx = ULD_CTX(ctx); 2533 u_ctx = ULD_CTX(ctx);
2467 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 2534 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2468 ctx->tx_channel_id)) { 2535 ctx->tx_qidx)) {
2469 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 2536 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
2470 return -EBUSY; 2537 return -EBUSY;
2471 } 2538 }
2472 2539
2473 /* Form a WR from req */ 2540 /* Form a WR from req */
2474 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size, 2541 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
2475 op_type); 2542 op_type);
2476 2543
2477 if (IS_ERR(skb) || skb == NULL) { 2544 if (IS_ERR(skb) || !skb)
2478 pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
2479 return PTR_ERR(skb); 2545 return PTR_ERR(skb);
2480 }
2481 2546
2482 skb->dev = u_ctx->lldi.ports[0]; 2547 skb->dev = u_ctx->lldi.ports[0];
2483 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); 2548 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
2484 chcr_send_wr(skb); 2549 chcr_send_wr(skb);
2485 return -EINPROGRESS; 2550 return -EINPROGRESS;
2486} 2551}
@@ -2673,6 +2738,7 @@ static struct chcr_alg_template driver_algs[] = {
2673 .cra_name = "gcm(aes)", 2738 .cra_name = "gcm(aes)",
2674 .cra_driver_name = "gcm-aes-chcr", 2739 .cra_driver_name = "gcm-aes-chcr",
2675 .cra_blocksize = 1, 2740 .cra_blocksize = 1,
2741 .cra_priority = CHCR_AEAD_PRIORITY,
2676 .cra_ctxsize = sizeof(struct chcr_context) + 2742 .cra_ctxsize = sizeof(struct chcr_context) +
2677 sizeof(struct chcr_aead_ctx) + 2743 sizeof(struct chcr_aead_ctx) +
2678 sizeof(struct chcr_gcm_ctx), 2744 sizeof(struct chcr_gcm_ctx),
@@ -2691,6 +2757,7 @@ static struct chcr_alg_template driver_algs[] = {
2691 .cra_name = "rfc4106(gcm(aes))", 2757 .cra_name = "rfc4106(gcm(aes))",
2692 .cra_driver_name = "rfc4106-gcm-aes-chcr", 2758 .cra_driver_name = "rfc4106-gcm-aes-chcr",
2693 .cra_blocksize = 1, 2759 .cra_blocksize = 1,
2760 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2694 .cra_ctxsize = sizeof(struct chcr_context) + 2761 .cra_ctxsize = sizeof(struct chcr_context) +
2695 sizeof(struct chcr_aead_ctx) + 2762 sizeof(struct chcr_aead_ctx) +
2696 sizeof(struct chcr_gcm_ctx), 2763 sizeof(struct chcr_gcm_ctx),
@@ -2710,6 +2777,7 @@ static struct chcr_alg_template driver_algs[] = {
2710 .cra_name = "ccm(aes)", 2777 .cra_name = "ccm(aes)",
2711 .cra_driver_name = "ccm-aes-chcr", 2778 .cra_driver_name = "ccm-aes-chcr",
2712 .cra_blocksize = 1, 2779 .cra_blocksize = 1,
2780 .cra_priority = CHCR_AEAD_PRIORITY,
2713 .cra_ctxsize = sizeof(struct chcr_context) + 2781 .cra_ctxsize = sizeof(struct chcr_context) +
2714 sizeof(struct chcr_aead_ctx), 2782 sizeof(struct chcr_aead_ctx),
2715 2783
@@ -2728,6 +2796,7 @@ static struct chcr_alg_template driver_algs[] = {
2728 .cra_name = "rfc4309(ccm(aes))", 2796 .cra_name = "rfc4309(ccm(aes))",
2729 .cra_driver_name = "rfc4309-ccm-aes-chcr", 2797 .cra_driver_name = "rfc4309-ccm-aes-chcr",
2730 .cra_blocksize = 1, 2798 .cra_blocksize = 1,
2799 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2731 .cra_ctxsize = sizeof(struct chcr_context) + 2800 .cra_ctxsize = sizeof(struct chcr_context) +
2732 sizeof(struct chcr_aead_ctx), 2801 sizeof(struct chcr_aead_ctx),
2733 2802
@@ -2747,6 +2816,7 @@ static struct chcr_alg_template driver_algs[] = {
2747 .cra_driver_name = 2816 .cra_driver_name =
2748 "authenc-hmac-sha1-cbc-aes-chcr", 2817 "authenc-hmac-sha1-cbc-aes-chcr",
2749 .cra_blocksize = AES_BLOCK_SIZE, 2818 .cra_blocksize = AES_BLOCK_SIZE,
2819 .cra_priority = CHCR_AEAD_PRIORITY,
2750 .cra_ctxsize = sizeof(struct chcr_context) + 2820 .cra_ctxsize = sizeof(struct chcr_context) +
2751 sizeof(struct chcr_aead_ctx) + 2821 sizeof(struct chcr_aead_ctx) +
2752 sizeof(struct chcr_authenc_ctx), 2822 sizeof(struct chcr_authenc_ctx),
@@ -2768,6 +2838,7 @@ static struct chcr_alg_template driver_algs[] = {
2768 .cra_driver_name = 2838 .cra_driver_name =
2769 "authenc-hmac-sha256-cbc-aes-chcr", 2839 "authenc-hmac-sha256-cbc-aes-chcr",
2770 .cra_blocksize = AES_BLOCK_SIZE, 2840 .cra_blocksize = AES_BLOCK_SIZE,
2841 .cra_priority = CHCR_AEAD_PRIORITY,
2771 .cra_ctxsize = sizeof(struct chcr_context) + 2842 .cra_ctxsize = sizeof(struct chcr_context) +
2772 sizeof(struct chcr_aead_ctx) + 2843 sizeof(struct chcr_aead_ctx) +
2773 sizeof(struct chcr_authenc_ctx), 2844 sizeof(struct chcr_authenc_ctx),
@@ -2788,6 +2859,7 @@ static struct chcr_alg_template driver_algs[] = {
2788 .cra_driver_name = 2859 .cra_driver_name =
2789 "authenc-hmac-sha224-cbc-aes-chcr", 2860 "authenc-hmac-sha224-cbc-aes-chcr",
2790 .cra_blocksize = AES_BLOCK_SIZE, 2861 .cra_blocksize = AES_BLOCK_SIZE,
2862 .cra_priority = CHCR_AEAD_PRIORITY,
2791 .cra_ctxsize = sizeof(struct chcr_context) + 2863 .cra_ctxsize = sizeof(struct chcr_context) +
2792 sizeof(struct chcr_aead_ctx) + 2864 sizeof(struct chcr_aead_ctx) +
2793 sizeof(struct chcr_authenc_ctx), 2865 sizeof(struct chcr_authenc_ctx),
@@ -2807,6 +2879,7 @@ static struct chcr_alg_template driver_algs[] = {
2807 .cra_driver_name = 2879 .cra_driver_name =
2808 "authenc-hmac-sha384-cbc-aes-chcr", 2880 "authenc-hmac-sha384-cbc-aes-chcr",
2809 .cra_blocksize = AES_BLOCK_SIZE, 2881 .cra_blocksize = AES_BLOCK_SIZE,
2882 .cra_priority = CHCR_AEAD_PRIORITY,
2810 .cra_ctxsize = sizeof(struct chcr_context) + 2883 .cra_ctxsize = sizeof(struct chcr_context) +
2811 sizeof(struct chcr_aead_ctx) + 2884 sizeof(struct chcr_aead_ctx) +
2812 sizeof(struct chcr_authenc_ctx), 2885 sizeof(struct chcr_authenc_ctx),
@@ -2827,6 +2900,7 @@ static struct chcr_alg_template driver_algs[] = {
2827 .cra_driver_name = 2900 .cra_driver_name =
2828 "authenc-hmac-sha512-cbc-aes-chcr", 2901 "authenc-hmac-sha512-cbc-aes-chcr",
2829 .cra_blocksize = AES_BLOCK_SIZE, 2902 .cra_blocksize = AES_BLOCK_SIZE,
2903 .cra_priority = CHCR_AEAD_PRIORITY,
2830 .cra_ctxsize = sizeof(struct chcr_context) + 2904 .cra_ctxsize = sizeof(struct chcr_context) +
2831 sizeof(struct chcr_aead_ctx) + 2905 sizeof(struct chcr_aead_ctx) +
2832 sizeof(struct chcr_authenc_ctx), 2906 sizeof(struct chcr_authenc_ctx),
@@ -2847,6 +2921,7 @@ static struct chcr_alg_template driver_algs[] = {
2847 .cra_driver_name = 2921 .cra_driver_name =
2848 "authenc-digest_null-cbc-aes-chcr", 2922 "authenc-digest_null-cbc-aes-chcr",
2849 .cra_blocksize = AES_BLOCK_SIZE, 2923 .cra_blocksize = AES_BLOCK_SIZE,
2924 .cra_priority = CHCR_AEAD_PRIORITY,
2850 .cra_ctxsize = sizeof(struct chcr_context) + 2925 .cra_ctxsize = sizeof(struct chcr_context) +
2851 sizeof(struct chcr_aead_ctx) + 2926 sizeof(struct chcr_aead_ctx) +
2852 sizeof(struct chcr_authenc_ctx), 2927 sizeof(struct chcr_authenc_ctx),
@@ -2915,10 +2990,9 @@ static int chcr_register_alg(void)
2915 name = driver_algs[i].alg.crypto.cra_driver_name; 2990 name = driver_algs[i].alg.crypto.cra_driver_name;
2916 break; 2991 break;
2917 case CRYPTO_ALG_TYPE_AEAD: 2992 case CRYPTO_ALG_TYPE_AEAD:
2918 driver_algs[i].alg.aead.base.cra_priority =
2919 CHCR_CRA_PRIORITY;
2920 driver_algs[i].alg.aead.base.cra_flags = 2993 driver_algs[i].alg.aead.base.cra_flags =
2921 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; 2994 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
2995 CRYPTO_ALG_NEED_FALLBACK;
2922 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; 2996 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
2923 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; 2997 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
2924 driver_algs[i].alg.aead.init = chcr_aead_cra_init; 2998 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index ba38bae7ce80..751d06a58101 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -218,6 +218,10 @@
218 218
219#define MAX_NK 8 219#define MAX_NK 8
220#define CRYPTO_MAX_IMM_TX_PKT_LEN 256 220#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
221#define MAX_WR_SIZE 512
222#define MIN_AUTH_SG 2 /*IV + AAD*/
223#define MIN_GCM_SG 2 /* IV + AAD*/
224#define MIN_CCM_SG 3 /*IV+AAD+B0*/
221 225
222struct algo_param { 226struct algo_param {
223 unsigned int auth_mode; 227 unsigned int auth_mode;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 79da22b5cdc9..cd0c35a18d92 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -54,6 +54,8 @@
54#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) 54#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
55#define MAX_SALT 4 55#define MAX_SALT 4
56 56
57#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
58
57struct uld_ctx; 59struct uld_ctx;
58 60
59struct _key_ctx { 61struct _key_ctx {
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 81cfd0ba132e..5b2fabb14229 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -41,15 +41,15 @@
41 41
42#define CCM_B0_SIZE 16 42#define CCM_B0_SIZE 16
43#define CCM_AAD_FIELD_SIZE 2 43#define CCM_AAD_FIELD_SIZE 2
44#define T5_MAX_AAD_SIZE 512 44#define T6_MAX_AAD_SIZE 511
45 45
46 46
47/* Define following if h/w is not dropping the AAD and IV data before 47/* Define following if h/w is not dropping the AAD and IV data before
48 * giving the processed data 48 * giving the processed data
49 */ 49 */
50 50
51#define CHCR_CRA_PRIORITY 3000 51#define CHCR_CRA_PRIORITY 500
52 52#define CHCR_AEAD_PRIORITY 6000
53#define CHCR_AES_MAX_KEY_LEN (2 * (AES_MAX_KEY_SIZE)) /* consider xts */ 53#define CHCR_AES_MAX_KEY_LEN (2 * (AES_MAX_KEY_SIZE)) /* consider xts */
54#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */ 54#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */
55 55
@@ -188,6 +188,7 @@ struct chcr_aead_ctx {
188 __be32 key_ctx_hdr; 188 __be32 key_ctx_hdr;
189 unsigned int enckey_len; 189 unsigned int enckey_len;
190 struct crypto_skcipher *null; 190 struct crypto_skcipher *null;
191 struct crypto_aead *sw_cipher;
191 u8 salt[MAX_SALT]; 192 u8 salt[MAX_SALT];
192 u8 key[CHCR_AES_MAX_KEY_LEN]; 193 u8 key[CHCR_AES_MAX_KEY_LEN];
193 u16 hmac_ctrl; 194 u16 hmac_ctrl;
@@ -211,7 +212,8 @@ struct __crypto_ctx {
211 212
212struct chcr_context { 213struct chcr_context {
213 struct chcr_dev *dev; 214 struct chcr_dev *dev;
214 unsigned char tx_channel_id; 215 unsigned char tx_qidx;
216 unsigned char rx_qidx;
215 struct __crypto_ctx crypto_ctx[0]; 217 struct __crypto_ctx crypto_ctx[0];
216}; 218};
217 219
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
new file mode 100644
index 000000000000..451620b475a0
--- /dev/null
+++ b/drivers/crypto/exynos-rng.c
@@ -0,0 +1,389 @@
1/*
2 * exynos-rng.c - Random Number Generator driver for the Exynos
3 *
4 * Copyright (c) 2017 Krzysztof Kozlowski <krzk@kernel.org>
5 *
6 * Loosely based on old driver from drivers/char/hw_random/exynos-rng.c:
7 * Copyright (C) 2012 Samsung Electronics
8 * Jonghwa Lee <jonghwa3.lee@samsung.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation;
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/clk.h>
21#include <linux/crypto.h>
22#include <linux/err.h>
23#include <linux/io.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26
27#include <crypto/internal/rng.h>
28
29#define EXYNOS_RNG_CONTROL 0x0
30#define EXYNOS_RNG_STATUS 0x10
31#define EXYNOS_RNG_SEED_BASE 0x140
32#define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4))
33#define EXYNOS_RNG_OUT_BASE 0x160
34#define EXYNOS_RNG_OUT(n) (EXYNOS_RNG_OUT_BASE + (n * 0x4))
35
36/* EXYNOS_RNG_CONTROL bit fields */
37#define EXYNOS_RNG_CONTROL_START 0x18
38/* EXYNOS_RNG_STATUS bit fields */
39#define EXYNOS_RNG_STATUS_SEED_SETTING_DONE BIT(1)
40#define EXYNOS_RNG_STATUS_RNG_DONE BIT(5)
41
42/* Five seed and output registers, each 4 bytes */
43#define EXYNOS_RNG_SEED_REGS 5
44#define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4)
45
46/*
47 * Driver re-seeds itself with generated random numbers to increase
48 * the randomness.
49 *
50 * Time for next re-seed in ms.
51 */
52#define EXYNOS_RNG_RESEED_TIME 100
53/*
54 * In polling mode, do not wait infinitely for the engine to finish the work.
55 */
56#define EXYNOS_RNG_WAIT_RETRIES 100
57
58/* Context for crypto */
59struct exynos_rng_ctx {
60 struct exynos_rng_dev *rng;
61};
62
63/* Device associated memory */
64struct exynos_rng_dev {
65 struct device *dev;
66 void __iomem *mem;
67 struct clk *clk;
68 /* Generated numbers stored for seeding during resume */
69 u8 seed_save[EXYNOS_RNG_SEED_SIZE];
70 unsigned int seed_save_len;
71 /* Time of last seeding in jiffies */
72 unsigned long last_seeding;
73};
74
75static struct exynos_rng_dev *exynos_rng_dev;
76
77static u32 exynos_rng_readl(struct exynos_rng_dev *rng, u32 offset)
78{
79 return readl_relaxed(rng->mem + offset);
80}
81
82static void exynos_rng_writel(struct exynos_rng_dev *rng, u32 val, u32 offset)
83{
84 writel_relaxed(val, rng->mem + offset);
85}
86
87static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
88 const u8 *seed, unsigned int slen)
89{
90 u32 val;
91 int i;
92
93 /* Round seed length because loop iterates over full register size */
94 slen = ALIGN_DOWN(slen, 4);
95
96 if (slen < EXYNOS_RNG_SEED_SIZE)
97 return -EINVAL;
98
99 for (i = 0; i < slen ; i += 4) {
100 unsigned int seed_reg = (i / 4) % EXYNOS_RNG_SEED_REGS;
101
102 val = seed[i] << 24;
103 val |= seed[i + 1] << 16;
104 val |= seed[i + 2] << 8;
105 val |= seed[i + 3] << 0;
106
107 exynos_rng_writel(rng, val, EXYNOS_RNG_SEED(seed_reg));
108 }
109
110 val = exynos_rng_readl(rng, EXYNOS_RNG_STATUS);
111 if (!(val & EXYNOS_RNG_STATUS_SEED_SETTING_DONE)) {
112 dev_warn(rng->dev, "Seed setting not finished\n");
113 return -EIO;
114 }
115
116 rng->last_seeding = jiffies;
117
118 return 0;
119}
120
121/*
122 * Read from output registers and put the data under 'dst' array,
123 * up to dlen bytes.
124 *
125 * Returns number of bytes actually stored in 'dst' (dlen
126 * or EXYNOS_RNG_SEED_SIZE).
127 */
128static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng,
129 u8 *dst, unsigned int dlen)
130{
131 unsigned int cnt = 0;
132 int i, j;
133 u32 val;
134
135 for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) {
136 val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j));
137
138 for (i = 0; i < 4; i++) {
139 dst[cnt] = val & 0xff;
140 val >>= 8;
141 if (++cnt >= dlen)
142 return cnt;
143 }
144 }
145
146 return cnt;
147}
148
149/*
150 * Start the engine and poll for finish. Then read from output registers
151 * filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
152 * random data (EXYNOS_RNG_SEED_SIZE).
153 *
154 * On success: return 0 and store number of read bytes under 'read' address.
155 * On error: return -ERRNO.
156 */
157static int exynos_rng_get_random(struct exynos_rng_dev *rng,
158 u8 *dst, unsigned int dlen,
159 unsigned int *read)
160{
161 int retry = EXYNOS_RNG_WAIT_RETRIES;
162
163 exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
164 EXYNOS_RNG_CONTROL);
165
166 while (!(exynos_rng_readl(rng,
167 EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry)
168 cpu_relax();
169
170 if (!retry)
171 return -ETIMEDOUT;
172
173 /* Clear status bit */
174 exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
175 EXYNOS_RNG_STATUS);
176 *read = exynos_rng_copy_random(rng, dst, dlen);
177
178 return 0;
179}
180
181/* Re-seed itself from time to time */
182static void exynos_rng_reseed(struct exynos_rng_dev *rng)
183{
184 unsigned long next_seeding = rng->last_seeding + \
185 msecs_to_jiffies(EXYNOS_RNG_RESEED_TIME);
186 unsigned long now = jiffies;
187 unsigned int read = 0;
188 u8 seed[EXYNOS_RNG_SEED_SIZE];
189
190 if (time_before(now, next_seeding))
191 return;
192
193 if (exynos_rng_get_random(rng, seed, sizeof(seed), &read))
194 return;
195
196 exynos_rng_set_seed(rng, seed, read);
197}
198
199static int exynos_rng_generate(struct crypto_rng *tfm,
200 const u8 *src, unsigned int slen,
201 u8 *dst, unsigned int dlen)
202{
203 struct exynos_rng_ctx *ctx = crypto_rng_ctx(tfm);
204 struct exynos_rng_dev *rng = ctx->rng;
205 unsigned int read = 0;
206 int ret;
207
208 ret = clk_prepare_enable(rng->clk);
209 if (ret)
210 return ret;
211
212 do {
213 ret = exynos_rng_get_random(rng, dst, dlen, &read);
214 if (ret)
215 break;
216
217 dlen -= read;
218 dst += read;
219
220 exynos_rng_reseed(rng);
221 } while (dlen > 0);
222
223 clk_disable_unprepare(rng->clk);
224
225 return ret;
226}
227
228static int exynos_rng_seed(struct crypto_rng *tfm, const u8 *seed,
229 unsigned int slen)
230{
231 struct exynos_rng_ctx *ctx = crypto_rng_ctx(tfm);
232 struct exynos_rng_dev *rng = ctx->rng;
233 int ret;
234
235 ret = clk_prepare_enable(rng->clk);
236 if (ret)
237 return ret;
238
239 ret = exynos_rng_set_seed(ctx->rng, seed, slen);
240
241 clk_disable_unprepare(rng->clk);
242
243 return ret;
244}
245
246static int exynos_rng_kcapi_init(struct crypto_tfm *tfm)
247{
248 struct exynos_rng_ctx *ctx = crypto_tfm_ctx(tfm);
249
250 ctx->rng = exynos_rng_dev;
251
252 return 0;
253}
254
255static struct rng_alg exynos_rng_alg = {
256 .generate = exynos_rng_generate,
257 .seed = exynos_rng_seed,
258 .seedsize = EXYNOS_RNG_SEED_SIZE,
259 .base = {
260 .cra_name = "stdrng",
261 .cra_driver_name = "exynos_rng",
262 .cra_priority = 100,
263 .cra_ctxsize = sizeof(struct exynos_rng_ctx),
264 .cra_module = THIS_MODULE,
265 .cra_init = exynos_rng_kcapi_init,
266 }
267};
268
269static int exynos_rng_probe(struct platform_device *pdev)
270{
271 struct exynos_rng_dev *rng;
272 struct resource *res;
273 int ret;
274
275 if (exynos_rng_dev)
276 return -EEXIST;
277
278 rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
279 if (!rng)
280 return -ENOMEM;
281
282 rng->dev = &pdev->dev;
283 rng->clk = devm_clk_get(&pdev->dev, "secss");
284 if (IS_ERR(rng->clk)) {
285 dev_err(&pdev->dev, "Couldn't get clock.\n");
286 return PTR_ERR(rng->clk);
287 }
288
289 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
290 rng->mem = devm_ioremap_resource(&pdev->dev, res);
291 if (IS_ERR(rng->mem))
292 return PTR_ERR(rng->mem);
293
294 platform_set_drvdata(pdev, rng);
295
296 exynos_rng_dev = rng;
297
298 ret = crypto_register_rng(&exynos_rng_alg);
299 if (ret) {
300 dev_err(&pdev->dev,
301 "Couldn't register rng crypto alg: %d\n", ret);
302 exynos_rng_dev = NULL;
303 }
304
305 return ret;
306}
307
308static int exynos_rng_remove(struct platform_device *pdev)
309{
310 crypto_unregister_rng(&exynos_rng_alg);
311
312 exynos_rng_dev = NULL;
313
314 return 0;
315}
316
317static int __maybe_unused exynos_rng_suspend(struct device *dev)
318{
319 struct platform_device *pdev = to_platform_device(dev);
320 struct exynos_rng_dev *rng = platform_get_drvdata(pdev);
321 int ret;
322
323 /* If we were never seeded then after resume it will be the same */
324 if (!rng->last_seeding)
325 return 0;
326
327 rng->seed_save_len = 0;
328 ret = clk_prepare_enable(rng->clk);
329 if (ret)
330 return ret;
331
332 /* Get new random numbers and store them for seeding on resume. */
333 exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
334 &(rng->seed_save_len));
335 dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
336 rng->seed_save_len);
337
338 clk_disable_unprepare(rng->clk);
339
340 return 0;
341}
342
343static int __maybe_unused exynos_rng_resume(struct device *dev)
344{
345 struct platform_device *pdev = to_platform_device(dev);
346 struct exynos_rng_dev *rng = platform_get_drvdata(pdev);
347 int ret;
348
349 /* Never seeded so nothing to do */
350 if (!rng->last_seeding)
351 return 0;
352
353 ret = clk_prepare_enable(rng->clk);
354 if (ret)
355 return ret;
356
357 ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
358
359 clk_disable_unprepare(rng->clk);
360
361 return ret;
362}
363
364static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_suspend,
365 exynos_rng_resume);
366
367static const struct of_device_id exynos_rng_dt_match[] = {
368 {
369 .compatible = "samsung,exynos4-rng",
370 },
371 { },
372};
373MODULE_DEVICE_TABLE(of, exynos_rng_dt_match);
374
375static struct platform_driver exynos_rng_driver = {
376 .driver = {
377 .name = "exynos-rng",
378 .pm = &exynos_rng_pm_ops,
379 .of_match_table = exynos_rng_dt_match,
380 },
381 .probe = exynos_rng_probe,
382 .remove = exynos_rng_remove,
383};
384
385module_platform_driver(exynos_rng_driver);
386
387MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver");
388MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
389MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 7868765a70c5..771dd26c7076 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -806,7 +806,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
806 void *ptr; 806 void *ptr;
807 807
808 nbytes -= len; 808 nbytes -= len;
809 ptr = page_address(sg_page(sg)) + sg->offset; 809 ptr = sg_virt(sg);
810 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys); 810 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
811 if (!next_buf) { 811 if (!next_buf) {
812 buf = NULL; 812 buf = NULL;
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 3a47cdb8f0c8..9e845e866dec 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -19,13 +19,10 @@
19#define AES_BUF_ORDER 2 19#define AES_BUF_ORDER 2
20#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \ 20#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
21 & ~(AES_BLOCK_SIZE - 1)) 21 & ~(AES_BLOCK_SIZE - 1))
22#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
23 AES_BLOCK_SIZE * 2)
24#define AES_MAX_CT_SIZE 6
22 25
23/* AES command token size */
24#define AES_CT_SIZE_ECB 2
25#define AES_CT_SIZE_CBC 3
26#define AES_CT_SIZE_CTR 3
27#define AES_CT_SIZE_GCM_OUT 5
28#define AES_CT_SIZE_GCM_IN 6
29#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) 26#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
30 27
31/* AES-CBC/ECB/CTR command token */ 28/* AES-CBC/ECB/CTR command token */
@@ -50,6 +47,8 @@
50#define AES_TFM_128BITS cpu_to_le32(0xb << 16) 47#define AES_TFM_128BITS cpu_to_le32(0xb << 16)
51#define AES_TFM_192BITS cpu_to_le32(0xd << 16) 48#define AES_TFM_192BITS cpu_to_le32(0xd << 16)
52#define AES_TFM_256BITS cpu_to_le32(0xf << 16) 49#define AES_TFM_256BITS cpu_to_le32(0xf << 16)
50#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
51#define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
53/* AES transform information word 1 fields */ 52/* AES transform information word 1 fields */
54#define AES_TFM_ECB cpu_to_le32(0x0 << 0) 53#define AES_TFM_ECB cpu_to_le32(0x0 << 0)
55#define AES_TFM_CBC cpu_to_le32(0x1 << 0) 54#define AES_TFM_CBC cpu_to_le32(0x1 << 0)
@@ -59,10 +58,9 @@
59#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */ 58#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
60#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10) 59#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
61#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) 60#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
62#define AES_TFM_GHASH_DIG cpu_to_le32(0x2 << 21)
63#define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
64 61
65/* AES flags */ 62/* AES flags */
63#define AES_FLAGS_CIPHER_MSK GENMASK(2, 0)
66#define AES_FLAGS_ECB BIT(0) 64#define AES_FLAGS_ECB BIT(0)
67#define AES_FLAGS_CBC BIT(1) 65#define AES_FLAGS_CBC BIT(1)
68#define AES_FLAGS_CTR BIT(2) 66#define AES_FLAGS_CTR BIT(2)
@@ -70,19 +68,15 @@
70#define AES_FLAGS_ENCRYPT BIT(4) 68#define AES_FLAGS_ENCRYPT BIT(4)
71#define AES_FLAGS_BUSY BIT(5) 69#define AES_FLAGS_BUSY BIT(5)
72 70
71#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
72
73/** 73/**
74 * Command token(CT) is a set of hardware instructions that 74 * mtk_aes_info - hardware information of AES
75 * are used to control engine's processing flow of AES. 75 * @cmd: command token, hardware instruction
76 * 76 * @tfm: transform state of cipher algorithm.
77 * Transform information(TFM) is used to define AES state and 77 * @state: contains keys and initial vectors.
78 * contains all keys and initial vectors.
79 *
80 * The engine requires CT and TFM to do:
81 * - Commands decoding and control of the engine's data path.
82 * - Coordinating hardware data fetch and store operations.
83 * - Result token construction and output.
84 * 78 *
85 * Memory map of GCM's TFM: 79 * Memory layout of GCM buffer:
86 * /-----------\ 80 * /-----------\
87 * | AES KEY | 128/196/256 bits 81 * | AES KEY | 128/196/256 bits
88 * |-----------| 82 * |-----------|
@@ -90,14 +84,16 @@
90 * |-----------| 84 * |-----------|
91 * | IVs | 4 * 4 bytes 85 * | IVs | 4 * 4 bytes
92 * \-----------/ 86 * \-----------/
87 *
88 * The engine requires all these info to do:
89 * - Commands decoding and control of the engine's data path.
90 * - Coordinating hardware data fetch and store operations.
91 * - Result token construction and output.
93 */ 92 */
94struct mtk_aes_ct { 93struct mtk_aes_info {
95 __le32 cmd[AES_CT_SIZE_GCM_IN]; 94 __le32 cmd[AES_MAX_CT_SIZE];
96}; 95 __le32 tfm[2];
97 96 __le32 state[AES_MAX_STATE_BUF_SIZE];
98struct mtk_aes_tfm {
99 __le32 ctrl[2];
100 __le32 state[SIZE_IN_WORDS(AES_KEYSIZE_256 + AES_BLOCK_SIZE * 2)];
101}; 97};
102 98
103struct mtk_aes_reqctx { 99struct mtk_aes_reqctx {
@@ -107,11 +103,12 @@ struct mtk_aes_reqctx {
107struct mtk_aes_base_ctx { 103struct mtk_aes_base_ctx {
108 struct mtk_cryp *cryp; 104 struct mtk_cryp *cryp;
109 u32 keylen; 105 u32 keylen;
106 __le32 keymode;
107
110 mtk_aes_fn start; 108 mtk_aes_fn start;
111 109
112 struct mtk_aes_ct ct; 110 struct mtk_aes_info info;
113 dma_addr_t ct_dma; 111 dma_addr_t ct_dma;
114 struct mtk_aes_tfm tfm;
115 dma_addr_t tfm_dma; 112 dma_addr_t tfm_dma;
116 113
117 __le32 ct_hdr; 114 __le32 ct_hdr;
@@ -248,6 +245,33 @@ static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
248 sg->length += dma->remainder; 245 sg->length += dma->remainder;
249} 246}
250 247
248static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
249{
250 int i;
251
252 for (i = 0; i < SIZE_IN_WORDS(size); i++)
253 dst[i] = cpu_to_le32(src[i]);
254}
255
256static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
257{
258 int i;
259
260 for (i = 0; i < SIZE_IN_WORDS(size); i++)
261 dst[i] = cpu_to_be32(src[i]);
262}
263
264static inline int mtk_aes_complete(struct mtk_cryp *cryp,
265 struct mtk_aes_rec *aes,
266 int err)
267{
268 aes->flags &= ~AES_FLAGS_BUSY;
269 aes->areq->complete(aes->areq, err);
270 /* Handle new request */
271 tasklet_schedule(&aes->queue_task);
272 return err;
273}
274
251/* 275/*
252 * Write descriptors for processing. This will configure the engine, load 276 * Write descriptors for processing. This will configure the engine, load
253 * the transform information and then start the packet processing. 277 * the transform information and then start the packet processing.
@@ -262,7 +286,7 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
262 286
263 /* Write command descriptors */ 287 /* Write command descriptors */
264 for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) { 288 for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
265 cmd = ring->cmd_base + ring->cmd_pos; 289 cmd = ring->cmd_next;
266 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length); 290 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
267 cmd->buf = cpu_to_le32(sg_dma_address(ssg)); 291 cmd->buf = cpu_to_le32(sg_dma_address(ssg));
268 292
@@ -274,25 +298,30 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
274 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma); 298 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
275 } 299 }
276 300
277 if (++ring->cmd_pos == MTK_DESC_NUM) 301 /* Shift ring buffer and check boundary */
278 ring->cmd_pos = 0; 302 if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
303 ring->cmd_next = ring->cmd_base;
279 } 304 }
280 cmd->hdr |= MTK_DESC_LAST; 305 cmd->hdr |= MTK_DESC_LAST;
281 306
282 /* Prepare result descriptors */ 307 /* Prepare result descriptors */
283 for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) { 308 for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
284 res = ring->res_base + ring->res_pos; 309 res = ring->res_next;
285 res->hdr = MTK_DESC_BUF_LEN(dsg->length); 310 res->hdr = MTK_DESC_BUF_LEN(dsg->length);
286 res->buf = cpu_to_le32(sg_dma_address(dsg)); 311 res->buf = cpu_to_le32(sg_dma_address(dsg));
287 312
288 if (nents == 0) 313 if (nents == 0)
289 res->hdr |= MTK_DESC_FIRST; 314 res->hdr |= MTK_DESC_FIRST;
290 315
291 if (++ring->res_pos == MTK_DESC_NUM) 316 /* Shift ring buffer and check boundary */
292 ring->res_pos = 0; 317 if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
318 ring->res_next = ring->res_base;
293 } 319 }
294 res->hdr |= MTK_DESC_LAST; 320 res->hdr |= MTK_DESC_LAST;
295 321
322 /* Pointer to current result descriptor */
323 ring->res_prev = res;
324
296 /* Prepare enough space for authenticated tag */ 325 /* Prepare enough space for authenticated tag */
297 if (aes->flags & AES_FLAGS_GCM) 326 if (aes->flags & AES_FLAGS_GCM)
298 res->hdr += AES_BLOCK_SIZE; 327 res->hdr += AES_BLOCK_SIZE;
@@ -313,9 +342,7 @@ static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
313{ 342{
314 struct mtk_aes_base_ctx *ctx = aes->ctx; 343 struct mtk_aes_base_ctx *ctx = aes->ctx;
315 344
316 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct), 345 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
317 DMA_TO_DEVICE);
318 dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm),
319 DMA_TO_DEVICE); 346 DMA_TO_DEVICE);
320 347
321 if (aes->src.sg == aes->dst.sg) { 348 if (aes->src.sg == aes->dst.sg) {
@@ -346,16 +373,14 @@ static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
346static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 373static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
347{ 374{
348 struct mtk_aes_base_ctx *ctx = aes->ctx; 375 struct mtk_aes_base_ctx *ctx = aes->ctx;
376 struct mtk_aes_info *info = &ctx->info;
349 377
350 ctx->ct_dma = dma_map_single(cryp->dev, &ctx->ct, sizeof(ctx->ct), 378 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
351 DMA_TO_DEVICE); 379 DMA_TO_DEVICE);
352 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) 380 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
353 return -EINVAL; 381 goto exit;
354 382
355 ctx->tfm_dma = dma_map_single(cryp->dev, &ctx->tfm, sizeof(ctx->tfm), 383 ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
356 DMA_TO_DEVICE);
357 if (unlikely(dma_mapping_error(cryp->dev, ctx->tfm_dma)))
358 goto tfm_map_err;
359 384
360 if (aes->src.sg == aes->dst.sg) { 385 if (aes->src.sg == aes->dst.sg) {
361 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, 386 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
@@ -382,13 +407,9 @@ static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
382 return mtk_aes_xmit(cryp, aes); 407 return mtk_aes_xmit(cryp, aes);
383 408
384sg_map_err: 409sg_map_err:
385 dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm), 410 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
386 DMA_TO_DEVICE); 411exit:
387tfm_map_err: 412 return mtk_aes_complete(cryp, aes, -EINVAL);
388 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct),
389 DMA_TO_DEVICE);
390
391 return -EINVAL;
392} 413}
393 414
394/* Initialize transform information of CBC/ECB/CTR mode */ 415/* Initialize transform information of CBC/ECB/CTR mode */
@@ -397,50 +418,43 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
397{ 418{
398 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); 419 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
399 struct mtk_aes_base_ctx *ctx = aes->ctx; 420 struct mtk_aes_base_ctx *ctx = aes->ctx;
421 struct mtk_aes_info *info = &ctx->info;
422 u32 cnt = 0;
400 423
401 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); 424 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
402 ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len); 425 info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
403 ctx->ct.cmd[1] = AES_CMD1; 426 info->cmd[cnt++] = AES_CMD1;
404 427
428 info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
405 if (aes->flags & AES_FLAGS_ENCRYPT) 429 if (aes->flags & AES_FLAGS_ENCRYPT)
406 ctx->tfm.ctrl[0] = AES_TFM_BASIC_OUT; 430 info->tfm[0] |= AES_TFM_BASIC_OUT;
407 else 431 else
408 ctx->tfm.ctrl[0] = AES_TFM_BASIC_IN; 432 info->tfm[0] |= AES_TFM_BASIC_IN;
409 433
410 if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128)) 434 switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
411 ctx->tfm.ctrl[0] |= AES_TFM_128BITS; 435 case AES_FLAGS_CBC:
412 else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256)) 436 info->tfm[1] = AES_TFM_CBC;
413 ctx->tfm.ctrl[0] |= AES_TFM_256BITS; 437 break;
414 else 438 case AES_FLAGS_ECB:
415 ctx->tfm.ctrl[0] |= AES_TFM_192BITS; 439 info->tfm[1] = AES_TFM_ECB;
416 440 goto ecb;
417 if (aes->flags & AES_FLAGS_CBC) { 441 case AES_FLAGS_CTR:
418 const u32 *iv = (const u32 *)req->info; 442 info->tfm[1] = AES_TFM_CTR_LOAD;
419 u32 *iv_state = ctx->tfm.state + ctx->keylen; 443 goto ctr;
420 int i; 444
421 445 default:
422 ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen + 446 /* Should not happen... */
423 SIZE_IN_WORDS(AES_BLOCK_SIZE)); 447 return;
424 ctx->tfm.ctrl[1] = AES_TFM_CBC | AES_TFM_FULL_IV;
425
426 for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
427 iv_state[i] = cpu_to_le32(iv[i]);
428
429 ctx->ct.cmd[2] = AES_CMD2;
430 ctx->ct_size = AES_CT_SIZE_CBC;
431 } else if (aes->flags & AES_FLAGS_ECB) {
432 ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen);
433 ctx->tfm.ctrl[1] = AES_TFM_ECB;
434
435 ctx->ct_size = AES_CT_SIZE_ECB;
436 } else if (aes->flags & AES_FLAGS_CTR) {
437 ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen +
438 SIZE_IN_WORDS(AES_BLOCK_SIZE));
439 ctx->tfm.ctrl[1] = AES_TFM_CTR_LOAD | AES_TFM_FULL_IV;
440
441 ctx->ct.cmd[2] = AES_CMD2;
442 ctx->ct_size = AES_CT_SIZE_CTR;
443 } 448 }
449
450 mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
451 AES_BLOCK_SIZE);
452ctr:
453 info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
454 info->tfm[1] |= AES_TFM_FULL_IV;
455 info->cmd[cnt++] = AES_CMD2;
456ecb:
457 ctx->ct_size = cnt;
444} 458}
445 459
446static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 460static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
@@ -465,7 +479,7 @@ static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
465 padlen = mtk_aes_padlen(len); 479 padlen = mtk_aes_padlen(len);
466 480
467 if (len + padlen > AES_BUF_SIZE) 481 if (len + padlen > AES_BUF_SIZE)
468 return -ENOMEM; 482 return mtk_aes_complete(cryp, aes, -ENOMEM);
469 483
470 if (!src_aligned) { 484 if (!src_aligned) {
471 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); 485 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
@@ -525,13 +539,10 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
525 return ctx->start(cryp, aes); 539 return ctx->start(cryp, aes);
526} 540}
527 541
528static int mtk_aes_complete(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 542static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
543 struct mtk_aes_rec *aes)
529{ 544{
530 aes->flags &= ~AES_FLAGS_BUSY; 545 return mtk_aes_complete(cryp, aes, 0);
531 aes->areq->complete(aes->areq, 0);
532
533 /* Handle new request */
534 return mtk_aes_handle_queue(cryp, aes->id, NULL);
535} 546}
536 547
537static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 548static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
@@ -540,7 +551,7 @@ static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
540 struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req); 551 struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
541 552
542 mtk_aes_set_mode(aes, rctx); 553 mtk_aes_set_mode(aes, rctx);
543 aes->resume = mtk_aes_complete; 554 aes->resume = mtk_aes_transfer_complete;
544 555
545 return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes); 556 return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
546} 557}
@@ -557,15 +568,14 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
557 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx); 568 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
558 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); 569 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
559 struct scatterlist *src, *dst; 570 struct scatterlist *src, *dst;
560 int i; 571 u32 start, end, ctr, blocks;
561 u32 start, end, ctr, blocks, *iv_state;
562 size_t datalen; 572 size_t datalen;
563 bool fragmented = false; 573 bool fragmented = false;
564 574
565 /* Check for transfer completion. */ 575 /* Check for transfer completion. */
566 cctx->offset += aes->total; 576 cctx->offset += aes->total;
567 if (cctx->offset >= req->nbytes) 577 if (cctx->offset >= req->nbytes)
568 return mtk_aes_complete(cryp, aes); 578 return mtk_aes_transfer_complete(cryp, aes);
569 579
570 /* Compute data length. */ 580 /* Compute data length. */
571 datalen = req->nbytes - cctx->offset; 581 datalen = req->nbytes - cctx->offset;
@@ -587,9 +597,8 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
587 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset)); 597 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
588 598
589 /* Write IVs into transform state buffer. */ 599 /* Write IVs into transform state buffer. */
590 iv_state = ctx->tfm.state + ctx->keylen; 600 mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
591 for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++) 601 AES_BLOCK_SIZE);
592 iv_state[i] = cpu_to_le32(cctx->iv[i]);
593 602
594 if (unlikely(fragmented)) { 603 if (unlikely(fragmented)) {
595 /* 604 /*
@@ -599,7 +608,6 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
599 cctx->iv[3] = cpu_to_be32(ctr); 608 cctx->iv[3] = cpu_to_be32(ctr);
600 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE); 609 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
601 } 610 }
602 aes->resume = mtk_aes_ctr_transfer;
603 611
604 return mtk_aes_dma(cryp, aes, src, dst, datalen); 612 return mtk_aes_dma(cryp, aes, src, dst, datalen);
605} 613}
@@ -615,6 +623,7 @@ static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
615 memcpy(cctx->iv, req->info, AES_BLOCK_SIZE); 623 memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
616 cctx->offset = 0; 624 cctx->offset = 0;
617 aes->total = 0; 625 aes->total = 0;
626 aes->resume = mtk_aes_ctr_transfer;
618 627
619 return mtk_aes_ctr_transfer(cryp, aes); 628 return mtk_aes_ctr_transfer(cryp, aes);
620} 629}
@@ -624,21 +633,25 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
624 const u8 *key, u32 keylen) 633 const u8 *key, u32 keylen)
625{ 634{
626 struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm); 635 struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
627 const u32 *aes_key = (const u32 *)key;
628 u32 *key_state = ctx->tfm.state;
629 int i;
630 636
631 if (keylen != AES_KEYSIZE_128 && 637 switch (keylen) {
632 keylen != AES_KEYSIZE_192 && 638 case AES_KEYSIZE_128:
633 keylen != AES_KEYSIZE_256) { 639 ctx->keymode = AES_TFM_128BITS;
640 break;
641 case AES_KEYSIZE_192:
642 ctx->keymode = AES_TFM_192BITS;
643 break;
644 case AES_KEYSIZE_256:
645 ctx->keymode = AES_TFM_256BITS;
646 break;
647
648 default:
634 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 649 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
635 return -EINVAL; 650 return -EINVAL;
636 } 651 }
637 652
638 ctx->keylen = SIZE_IN_WORDS(keylen); 653 ctx->keylen = SIZE_IN_WORDS(keylen);
639 654 mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
640 for (i = 0; i < ctx->keylen; i++)
641 key_state[i] = cpu_to_le32(aes_key[i]);
642 655
643 return 0; 656 return 0;
644} 657}
@@ -789,6 +802,19 @@ mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
789 return container_of(ctx, struct mtk_aes_gcm_ctx, base); 802 return container_of(ctx, struct mtk_aes_gcm_ctx, base);
790} 803}
791 804
805/*
806 * Engine will verify and compare tag automatically, so we just need
807 * to check returned status which stored in the result descriptor.
808 */
809static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
810 struct mtk_aes_rec *aes)
811{
812 u32 status = cryp->ring[aes->id]->res_prev->ct;
813
814 return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
815 -EBADMSG : 0);
816}
817
792/* Initialize transform information of GCM mode */ 818/* Initialize transform information of GCM mode */
793static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp, 819static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
794 struct mtk_aes_rec *aes, 820 struct mtk_aes_rec *aes,
@@ -797,45 +823,35 @@ static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
797 struct aead_request *req = aead_request_cast(aes->areq); 823 struct aead_request *req = aead_request_cast(aes->areq);
798 struct mtk_aes_base_ctx *ctx = aes->ctx; 824 struct mtk_aes_base_ctx *ctx = aes->ctx;
799 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 825 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
800 const u32 *iv = (const u32 *)req->iv; 826 struct mtk_aes_info *info = &ctx->info;
801 u32 *iv_state = ctx->tfm.state + ctx->keylen +
802 SIZE_IN_WORDS(AES_BLOCK_SIZE);
803 u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); 827 u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
804 int i; 828 u32 cnt = 0;
805 829
806 ctx->ct_hdr = AES_CT_CTRL_HDR | len; 830 ctx->ct_hdr = AES_CT_CTRL_HDR | len;
807 831
808 ctx->ct.cmd[0] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); 832 info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
809 ctx->ct.cmd[1] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); 833 info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
810 ctx->ct.cmd[2] = AES_GCM_CMD2; 834 info->cmd[cnt++] = AES_GCM_CMD2;
811 ctx->ct.cmd[3] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); 835 info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
812 836
813 if (aes->flags & AES_FLAGS_ENCRYPT) { 837 if (aes->flags & AES_FLAGS_ENCRYPT) {
814 ctx->ct.cmd[4] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); 838 info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
815 ctx->ct_size = AES_CT_SIZE_GCM_OUT; 839 info->tfm[0] = AES_TFM_GCM_OUT;
816 ctx->tfm.ctrl[0] = AES_TFM_GCM_OUT;
817 } else { 840 } else {
818 ctx->ct.cmd[4] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); 841 info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
819 ctx->ct.cmd[5] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); 842 info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
820 ctx->ct_size = AES_CT_SIZE_GCM_IN; 843 info->tfm[0] = AES_TFM_GCM_IN;
821 ctx->tfm.ctrl[0] = AES_TFM_GCM_IN;
822 } 844 }
845 ctx->ct_size = cnt;
823 846
824 if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128)) 847 info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
825 ctx->tfm.ctrl[0] |= AES_TFM_128BITS; 848 ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
826 else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256)) 849 ctx->keymode;
827 ctx->tfm.ctrl[0] |= AES_TFM_256BITS; 850 info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
828 else 851 AES_TFM_ENC_HASH;
829 ctx->tfm.ctrl[0] |= AES_TFM_192BITS;
830 852
831 ctx->tfm.ctrl[0] |= AES_TFM_GHASH_DIG | AES_TFM_GHASH | 853 mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
832 AES_TFM_SIZE(ctx->keylen + SIZE_IN_WORDS( 854 AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
833 AES_BLOCK_SIZE + ivsize));
834 ctx->tfm.ctrl[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE |
835 AES_TFM_3IV | AES_TFM_ENC_HASH;
836
837 for (i = 0; i < SIZE_IN_WORDS(ivsize); i++)
838 iv_state[i] = cpu_to_le32(iv[i]);
839} 855}
840 856
841static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 857static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
@@ -856,7 +872,7 @@ static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
856 872
857 if (!src_aligned || !dst_aligned) { 873 if (!src_aligned || !dst_aligned) {
858 if (aes->total > AES_BUF_SIZE) 874 if (aes->total > AES_BUF_SIZE)
859 return -ENOMEM; 875 return mtk_aes_complete(cryp, aes, -ENOMEM);
860 876
861 if (!src_aligned) { 877 if (!src_aligned) {
862 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); 878 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
@@ -892,6 +908,8 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
892 908
893 if (aes->flags & AES_FLAGS_ENCRYPT) { 909 if (aes->flags & AES_FLAGS_ENCRYPT) {
894 u32 tag[4]; 910 u32 tag[4];
911
912 aes->resume = mtk_aes_transfer_complete;
895 /* Compute total process length. */ 913 /* Compute total process length. */
896 aes->total = len + gctx->authsize; 914 aes->total = len + gctx->authsize;
897 /* Compute text length. */ 915 /* Compute text length. */
@@ -899,10 +917,10 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
899 /* Hardware will append authenticated tag to output buffer */ 917 /* Hardware will append authenticated tag to output buffer */
900 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); 918 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
901 } else { 919 } else {
920 aes->resume = mtk_aes_gcm_tag_verify;
902 aes->total = len; 921 aes->total = len;
903 gctx->textlen = req->cryptlen - gctx->authsize; 922 gctx->textlen = req->cryptlen - gctx->authsize;
904 } 923 }
905 aes->resume = mtk_aes_complete;
906 924
907 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); 925 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
908} 926}
@@ -915,7 +933,7 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
915 rctx->mode = AES_FLAGS_GCM | mode; 933 rctx->mode = AES_FLAGS_GCM | mode;
916 934
917 return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT), 935 return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
918 &req->base); 936 &req->base);
919} 937}
920 938
921static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err) 939static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
@@ -949,24 +967,26 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
949 struct scatterlist sg[1]; 967 struct scatterlist sg[1];
950 struct skcipher_request req; 968 struct skcipher_request req;
951 } *data; 969 } *data;
952 const u32 *aes_key; 970 int err;
953 u32 *key_state, *hash_state;
954 int err, i;
955 971
956 if (keylen != AES_KEYSIZE_256 && 972 switch (keylen) {
957 keylen != AES_KEYSIZE_192 && 973 case AES_KEYSIZE_128:
958 keylen != AES_KEYSIZE_128) { 974 ctx->keymode = AES_TFM_128BITS;
975 break;
976 case AES_KEYSIZE_192:
977 ctx->keymode = AES_TFM_192BITS;
978 break;
979 case AES_KEYSIZE_256:
980 ctx->keymode = AES_TFM_256BITS;
981 break;
982
983 default:
959 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 984 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
960 return -EINVAL; 985 return -EINVAL;
961 } 986 }
962 987
963 key_state = ctx->tfm.state;
964 aes_key = (u32 *)key;
965 ctx->keylen = SIZE_IN_WORDS(keylen); 988 ctx->keylen = SIZE_IN_WORDS(keylen);
966 989
967 for (i = 0; i < ctx->keylen; i++)
968 ctx->tfm.state[i] = cpu_to_le32(aes_key[i]);
969
970 /* Same as crypto_gcm_setkey() from crypto/gcm.c */ 990 /* Same as crypto_gcm_setkey() from crypto/gcm.c */
971 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); 991 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
972 crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & 992 crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
@@ -1001,10 +1021,11 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
1001 if (err) 1021 if (err)
1002 goto out; 1022 goto out;
1003 1023
1004 hash_state = key_state + ctx->keylen; 1024 /* Write key into state buffer */
1005 1025 mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
1006 for (i = 0; i < 4; i++) 1026 /* Write key(H) into state buffer */
1007 hash_state[i] = cpu_to_be32(data->hash[i]); 1027 mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash,
1028 AES_BLOCK_SIZE);
1008out: 1029out:
1009 kzfree(data); 1030 kzfree(data);
1010 return err; 1031 return err;
@@ -1092,58 +1113,36 @@ static struct aead_alg aes_gcm_alg = {
1092 }, 1113 },
1093}; 1114};
1094 1115
1095static void mtk_aes_enc_task(unsigned long data) 1116static void mtk_aes_queue_task(unsigned long data)
1096{ 1117{
1097 struct mtk_cryp *cryp = (struct mtk_cryp *)data; 1118 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1098 struct mtk_aes_rec *aes = cryp->aes[0];
1099 1119
1100 mtk_aes_unmap(cryp, aes); 1120 mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
1101 aes->resume(cryp, aes);
1102} 1121}
1103 1122
1104static void mtk_aes_dec_task(unsigned long data) 1123static void mtk_aes_done_task(unsigned long data)
1105{ 1124{
1106 struct mtk_cryp *cryp = (struct mtk_cryp *)data; 1125 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1107 struct mtk_aes_rec *aes = cryp->aes[1]; 1126 struct mtk_cryp *cryp = aes->cryp;
1108 1127
1109 mtk_aes_unmap(cryp, aes); 1128 mtk_aes_unmap(cryp, aes);
1110 aes->resume(cryp, aes); 1129 aes->resume(cryp, aes);
1111} 1130}
1112 1131
1113static irqreturn_t mtk_aes_enc_irq(int irq, void *dev_id) 1132static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
1114{ 1133{
1115 struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id; 1134 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
1116 struct mtk_aes_rec *aes = cryp->aes[0]; 1135 struct mtk_cryp *cryp = aes->cryp;
1117 u32 val = mtk_aes_read(cryp, RDR_STAT(RING0)); 1136 u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
1118 1137
1119 mtk_aes_write(cryp, RDR_STAT(RING0), val); 1138 mtk_aes_write(cryp, RDR_STAT(aes->id), val);
1120 1139
1121 if (likely(AES_FLAGS_BUSY & aes->flags)) { 1140 if (likely(AES_FLAGS_BUSY & aes->flags)) {
1122 mtk_aes_write(cryp, RDR_PROC_COUNT(RING0), MTK_CNT_RST); 1141 mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
1123 mtk_aes_write(cryp, RDR_THRESH(RING0), 1142 mtk_aes_write(cryp, RDR_THRESH(aes->id),
1124 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); 1143 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1125 1144
1126 tasklet_schedule(&aes->task); 1145 tasklet_schedule(&aes->done_task);
1127 } else {
1128 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1129 }
1130 return IRQ_HANDLED;
1131}
1132
1133static irqreturn_t mtk_aes_dec_irq(int irq, void *dev_id)
1134{
1135 struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
1136 struct mtk_aes_rec *aes = cryp->aes[1];
1137 u32 val = mtk_aes_read(cryp, RDR_STAT(RING1));
1138
1139 mtk_aes_write(cryp, RDR_STAT(RING1), val);
1140
1141 if (likely(AES_FLAGS_BUSY & aes->flags)) {
1142 mtk_aes_write(cryp, RDR_PROC_COUNT(RING1), MTK_CNT_RST);
1143 mtk_aes_write(cryp, RDR_THRESH(RING1),
1144 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1145
1146 tasklet_schedule(&aes->task);
1147 } else { 1146 } else {
1148 dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); 1147 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1149 } 1148 }
@@ -1171,14 +1170,20 @@ static int mtk_aes_record_init(struct mtk_cryp *cryp)
1171 if (!aes[i]->buf) 1170 if (!aes[i]->buf)
1172 goto err_cleanup; 1171 goto err_cleanup;
1173 1172
1174 aes[i]->id = i; 1173 aes[i]->cryp = cryp;
1175 1174
1176 spin_lock_init(&aes[i]->lock); 1175 spin_lock_init(&aes[i]->lock);
1177 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE); 1176 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
1177
1178 tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
1179 (unsigned long)aes[i]);
1180 tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
1181 (unsigned long)aes[i]);
1178 } 1182 }
1179 1183
1180 tasklet_init(&aes[0]->task, mtk_aes_enc_task, (unsigned long)cryp); 1184 /* Link to ring0 and ring1 respectively */
1181 tasklet_init(&aes[1]->task, mtk_aes_dec_task, (unsigned long)cryp); 1185 aes[0]->id = MTK_RING0;
1186 aes[1]->id = MTK_RING1;
1182 1187
1183 return 0; 1188 return 0;
1184 1189
@@ -1196,7 +1201,9 @@ static void mtk_aes_record_free(struct mtk_cryp *cryp)
1196 int i; 1201 int i;
1197 1202
1198 for (i = 0; i < MTK_REC_NUM; i++) { 1203 for (i = 0; i < MTK_REC_NUM; i++) {
1199 tasklet_kill(&cryp->aes[i]->task); 1204 tasklet_kill(&cryp->aes[i]->done_task);
1205 tasklet_kill(&cryp->aes[i]->queue_task);
1206
1200 free_page((unsigned long)cryp->aes[i]->buf); 1207 free_page((unsigned long)cryp->aes[i]->buf);
1201 kfree(cryp->aes[i]); 1208 kfree(cryp->aes[i]);
1202 } 1209 }
@@ -1246,25 +1253,23 @@ int mtk_cipher_alg_register(struct mtk_cryp *cryp)
1246 if (ret) 1253 if (ret)
1247 goto err_record; 1254 goto err_record;
1248 1255
1249 /* Ring0 is use by encryption record */ 1256 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
1250 ret = devm_request_irq(cryp->dev, cryp->irq[RING0], mtk_aes_enc_irq, 1257 0, "mtk-aes", cryp->aes[0]);
1251 IRQF_TRIGGER_LOW, "mtk-aes", cryp);
1252 if (ret) { 1258 if (ret) {
1253 dev_err(cryp->dev, "unable to request AES encryption irq.\n"); 1259 dev_err(cryp->dev, "unable to request AES irq.\n");
1254 goto err_res; 1260 goto err_res;
1255 } 1261 }
1256 1262
1257 /* Ring1 is use by decryption record */ 1263 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
1258 ret = devm_request_irq(cryp->dev, cryp->irq[RING1], mtk_aes_dec_irq, 1264 0, "mtk-aes", cryp->aes[1]);
1259 IRQF_TRIGGER_LOW, "mtk-aes", cryp);
1260 if (ret) { 1265 if (ret) {
1261 dev_err(cryp->dev, "unable to request AES decryption irq.\n"); 1266 dev_err(cryp->dev, "unable to request AES irq.\n");
1262 goto err_res; 1267 goto err_res;
1263 } 1268 }
1264 1269
1265 /* Enable ring0 and ring1 interrupt */ 1270 /* Enable ring0 and ring1 interrupt */
1266 mtk_aes_write(cryp, AIC_ENABLE_SET(RING0), MTK_IRQ_RDR0); 1271 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
1267 mtk_aes_write(cryp, AIC_ENABLE_SET(RING1), MTK_IRQ_RDR1); 1272 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
1268 1273
1269 spin_lock(&mtk_aes.lock); 1274 spin_lock(&mtk_aes.lock);
1270 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list); 1275 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index a9c713d4c733..b6ecc288b540 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -334,7 +334,7 @@ static int mtk_packet_engine_setup(struct mtk_cryp *cryp)
334 /* Enable the 4 rings for the packet engines. */ 334 /* Enable the 4 rings for the packet engines. */
335 mtk_desc_ring_link(cryp, 0xf); 335 mtk_desc_ring_link(cryp, 0xf);
336 336
337 for (i = 0; i < RING_MAX; i++) { 337 for (i = 0; i < MTK_RING_MAX; i++) {
338 mtk_cmd_desc_ring_setup(cryp, i, &cap); 338 mtk_cmd_desc_ring_setup(cryp, i, &cap);
339 mtk_res_desc_ring_setup(cryp, i, &cap); 339 mtk_res_desc_ring_setup(cryp, i, &cap);
340 } 340 }
@@ -359,7 +359,7 @@ static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
359{ 359{
360 u32 val; 360 u32 val;
361 361
362 if (hw == RING_MAX) 362 if (hw == MTK_RING_MAX)
363 val = readl(cryp->base + AIC_G_VERSION); 363 val = readl(cryp->base + AIC_G_VERSION);
364 else 364 else
365 val = readl(cryp->base + AIC_VERSION(hw)); 365 val = readl(cryp->base + AIC_VERSION(hw));
@@ -368,7 +368,7 @@ static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
368 if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12) 368 if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12)
369 return -ENXIO; 369 return -ENXIO;
370 370
371 if (hw == RING_MAX) 371 if (hw == MTK_RING_MAX)
372 val = readl(cryp->base + AIC_G_OPTIONS); 372 val = readl(cryp->base + AIC_G_OPTIONS);
373 else 373 else
374 val = readl(cryp->base + AIC_OPTIONS(hw)); 374 val = readl(cryp->base + AIC_OPTIONS(hw));
@@ -389,7 +389,7 @@ static int mtk_aic_init(struct mtk_cryp *cryp, int hw)
389 return err; 389 return err;
390 390
391 /* Disable all interrupts and set initial configuration */ 391 /* Disable all interrupts and set initial configuration */
392 if (hw == RING_MAX) { 392 if (hw == MTK_RING_MAX) {
393 writel(0, cryp->base + AIC_G_ENABLE_CTRL); 393 writel(0, cryp->base + AIC_G_ENABLE_CTRL);
394 writel(0, cryp->base + AIC_G_POL_CTRL); 394 writel(0, cryp->base + AIC_G_POL_CTRL);
395 writel(0, cryp->base + AIC_G_TYPE_CTRL); 395 writel(0, cryp->base + AIC_G_TYPE_CTRL);
@@ -431,7 +431,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
431{ 431{
432 int i; 432 int i;
433 433
434 for (i = 0; i < RING_MAX; i++) { 434 for (i = 0; i < MTK_RING_MAX; i++) {
435 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 435 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
436 cryp->ring[i]->res_base, 436 cryp->ring[i]->res_base,
437 cryp->ring[i]->res_dma); 437 cryp->ring[i]->res_dma);
@@ -447,7 +447,7 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
447 struct mtk_ring **ring = cryp->ring; 447 struct mtk_ring **ring = cryp->ring;
448 int i, err = ENOMEM; 448 int i, err = ENOMEM;
449 449
450 for (i = 0; i < RING_MAX; i++) { 450 for (i = 0; i < MTK_RING_MAX; i++) {
451 ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); 451 ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
452 if (!ring[i]) 452 if (!ring[i])
453 goto err_cleanup; 453 goto err_cleanup;
@@ -465,6 +465,9 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
465 GFP_KERNEL); 465 GFP_KERNEL);
466 if (!ring[i]->res_base) 466 if (!ring[i]->res_base)
467 goto err_cleanup; 467 goto err_cleanup;
468
469 ring[i]->cmd_next = ring[i]->cmd_base;
470 ring[i]->res_next = ring[i]->res_base;
468 } 471 }
469 return 0; 472 return 0;
470 473
diff --git a/drivers/crypto/mediatek/mtk-platform.h b/drivers/crypto/mediatek/mtk-platform.h
index ed6d8717f7f4..303c152dc931 100644
--- a/drivers/crypto/mediatek/mtk-platform.h
+++ b/drivers/crypto/mediatek/mtk-platform.h
@@ -38,14 +38,14 @@
38 * Ring 2/3 are used by SHA. 38 * Ring 2/3 are used by SHA.
39 */ 39 */
40enum { 40enum {
41 RING0 = 0, 41 MTK_RING0,
42 RING1, 42 MTK_RING1,
43 RING2, 43 MTK_RING2,
44 RING3, 44 MTK_RING3,
45 RING_MAX, 45 MTK_RING_MAX
46}; 46};
47 47
48#define MTK_REC_NUM (RING_MAX / 2) 48#define MTK_REC_NUM (MTK_RING_MAX / 2)
49#define MTK_IRQ_NUM 5 49#define MTK_IRQ_NUM 5
50 50
51/** 51/**
@@ -84,11 +84,12 @@ struct mtk_desc {
84/** 84/**
85 * struct mtk_ring - Descriptor ring 85 * struct mtk_ring - Descriptor ring
86 * @cmd_base: pointer to command descriptor ring base 86 * @cmd_base: pointer to command descriptor ring base
87 * @cmd_next: pointer to the next command descriptor
87 * @cmd_dma: DMA address of command descriptor ring 88 * @cmd_dma: DMA address of command descriptor ring
88 * @cmd_pos: current position in the command descriptor ring
89 * @res_base: pointer to result descriptor ring base 89 * @res_base: pointer to result descriptor ring base
90 * @res_next: pointer to the next result descriptor
91 * @res_prev: pointer to the previous result descriptor
90 * @res_dma: DMA address of result descriptor ring 92 * @res_dma: DMA address of result descriptor ring
91 * @res_pos: current position in the result descriptor ring
92 * 93 *
93 * A descriptor ring is a circular buffer that is used to manage 94 * A descriptor ring is a circular buffer that is used to manage
94 * one or more descriptors. There are two type of descriptor rings; 95 * one or more descriptors. There are two type of descriptor rings;
@@ -96,11 +97,12 @@ struct mtk_desc {
96 */ 97 */
97struct mtk_ring { 98struct mtk_ring {
98 struct mtk_desc *cmd_base; 99 struct mtk_desc *cmd_base;
100 struct mtk_desc *cmd_next;
99 dma_addr_t cmd_dma; 101 dma_addr_t cmd_dma;
100 u32 cmd_pos;
101 struct mtk_desc *res_base; 102 struct mtk_desc *res_base;
103 struct mtk_desc *res_next;
104 struct mtk_desc *res_prev;
102 dma_addr_t res_dma; 105 dma_addr_t res_dma;
103 u32 res_pos;
104}; 106};
105 107
106/** 108/**
@@ -125,9 +127,11 @@ typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
125 127
126/** 128/**
127 * struct mtk_aes_rec - AES operation record 129 * struct mtk_aes_rec - AES operation record
130 * @cryp: pointer to Cryptographic device
128 * @queue: crypto request queue 131 * @queue: crypto request queue
129 * @areq: pointer to async request 132 * @areq: pointer to async request
130 * @task: the tasklet is use in AES interrupt 133 * @done_task: the tasklet is use in AES interrupt
134 * @queue_task: the tasklet is used to dequeue request
131 * @ctx: pointer to current context 135 * @ctx: pointer to current context
132 * @src: the structure that holds source sg list info 136 * @src: the structure that holds source sg list info
133 * @dst: the structure that holds destination sg list info 137 * @dst: the structure that holds destination sg list info
@@ -136,16 +140,18 @@ typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
136 * @resume: pointer to resume function 140 * @resume: pointer to resume function
137 * @total: request buffer length 141 * @total: request buffer length
138 * @buf: pointer to page buffer 142 * @buf: pointer to page buffer
139 * @id: record identification 143 * @id: the current use of ring
140 * @flags: it's describing AES operation state 144 * @flags: it's describing AES operation state
141 * @lock: the async queue lock 145 * @lock: the async queue lock
142 * 146 *
143 * Structure used to record AES execution state. 147 * Structure used to record AES execution state.
144 */ 148 */
145struct mtk_aes_rec { 149struct mtk_aes_rec {
150 struct mtk_cryp *cryp;
146 struct crypto_queue queue; 151 struct crypto_queue queue;
147 struct crypto_async_request *areq; 152 struct crypto_async_request *areq;
148 struct tasklet_struct task; 153 struct tasklet_struct done_task;
154 struct tasklet_struct queue_task;
149 struct mtk_aes_base_ctx *ctx; 155 struct mtk_aes_base_ctx *ctx;
150 struct mtk_aes_dma src; 156 struct mtk_aes_dma src;
151 struct mtk_aes_dma dst; 157 struct mtk_aes_dma dst;
@@ -166,19 +172,23 @@ struct mtk_aes_rec {
166 172
167/** 173/**
168 * struct mtk_sha_rec - SHA operation record 174 * struct mtk_sha_rec - SHA operation record
175 * @cryp: pointer to Cryptographic device
169 * @queue: crypto request queue 176 * @queue: crypto request queue
170 * @req: pointer to ahash request 177 * @req: pointer to ahash request
171 * @task: the tasklet is use in SHA interrupt 178 * @done_task: the tasklet is use in SHA interrupt
172 * @id: record identification 179 * @queue_task: the tasklet is used to dequeue request
180 * @id: the current use of ring
173 * @flags: it's describing SHA operation state 181 * @flags: it's describing SHA operation state
174 * @lock: the ablkcipher queue lock 182 * @lock: the async queue lock
175 * 183 *
176 * Structure used to record SHA execution state. 184 * Structure used to record SHA execution state.
177 */ 185 */
178struct mtk_sha_rec { 186struct mtk_sha_rec {
187 struct mtk_cryp *cryp;
179 struct crypto_queue queue; 188 struct crypto_queue queue;
180 struct ahash_request *req; 189 struct ahash_request *req;
181 struct tasklet_struct task; 190 struct tasklet_struct done_task;
191 struct tasklet_struct queue_task;
182 192
183 u8 id; 193 u8 id;
184 unsigned long flags; 194 unsigned long flags;
@@ -193,13 +203,11 @@ struct mtk_sha_rec {
193 * @clk_ethif: pointer to ethif clock 203 * @clk_ethif: pointer to ethif clock
194 * @clk_cryp: pointer to crypto clock 204 * @clk_cryp: pointer to crypto clock
195 * @irq: global system and rings IRQ 205 * @irq: global system and rings IRQ
196 * @ring: pointer to execution state of AES 206 * @ring: pointer to descriptor rings
197 * @aes: pointer to execution state of SHA 207 * @aes: pointer to operation record of AES
198 * @sha: each execution record map to a ring 208 * @sha: pointer to operation record of SHA
199 * @aes_list: device list of AES 209 * @aes_list: device list of AES
200 * @sha_list: device list of SHA 210 * @sha_list: device list of SHA
201 * @tmp: pointer to temporary buffer for internal use
202 * @tmp_dma: DMA address of temporary buffer
203 * @rec: it's used to select SHA record for tfm 211 * @rec: it's used to select SHA record for tfm
204 * 212 *
205 * Structure storing cryptographic device information. 213 * Structure storing cryptographic device information.
@@ -211,15 +219,13 @@ struct mtk_cryp {
211 struct clk *clk_cryp; 219 struct clk *clk_cryp;
212 int irq[MTK_IRQ_NUM]; 220 int irq[MTK_IRQ_NUM];
213 221
214 struct mtk_ring *ring[RING_MAX]; 222 struct mtk_ring *ring[MTK_RING_MAX];
215 struct mtk_aes_rec *aes[MTK_REC_NUM]; 223 struct mtk_aes_rec *aes[MTK_REC_NUM];
216 struct mtk_sha_rec *sha[MTK_REC_NUM]; 224 struct mtk_sha_rec *sha[MTK_REC_NUM];
217 225
218 struct list_head aes_list; 226 struct list_head aes_list;
219 struct list_head sha_list; 227 struct list_head sha_list;
220 228
221 void *tmp;
222 dma_addr_t tmp_dma;
223 bool rec; 229 bool rec;
224}; 230};
225 231
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index 55e3805fba07..2226f12d1c7a 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -17,13 +17,13 @@
17 17
18#define SHA_ALIGN_MSK (sizeof(u32) - 1) 18#define SHA_ALIGN_MSK (sizeof(u32) - 1)
19#define SHA_QUEUE_SIZE 512 19#define SHA_QUEUE_SIZE 512
20#define SHA_TMP_BUF_SIZE 512
21#define SHA_BUF_SIZE ((u32)PAGE_SIZE) 20#define SHA_BUF_SIZE ((u32)PAGE_SIZE)
22 21
23#define SHA_OP_UPDATE 1 22#define SHA_OP_UPDATE 1
24#define SHA_OP_FINAL 2 23#define SHA_OP_FINAL 2
25 24
26#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0)) 25#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0))
26#define SHA_MAX_DIGEST_BUF_SIZE 32
27 27
28/* SHA command token */ 28/* SHA command token */
29#define SHA_CT_SIZE 5 29#define SHA_CT_SIZE 5
@@ -34,7 +34,6 @@
34 34
35/* SHA transform information */ 35/* SHA transform information */
36#define SHA_TFM_HASH cpu_to_le32(0x2 << 0) 36#define SHA_TFM_HASH cpu_to_le32(0x2 << 0)
37#define SHA_TFM_INNER_DIG cpu_to_le32(0x1 << 21)
38#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8) 37#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8)
39#define SHA_TFM_START cpu_to_le32(0x1 << 4) 38#define SHA_TFM_START cpu_to_le32(0x1 << 4)
40#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5) 39#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5)
@@ -61,31 +60,17 @@
61#define SHA_FLAGS_PAD BIT(10) 60#define SHA_FLAGS_PAD BIT(10)
62 61
63/** 62/**
64 * mtk_sha_ct is a set of hardware instructions(command token) 63 * mtk_sha_info - hardware information of AES
65 * that are used to control engine's processing flow of SHA, 64 * @cmd: command token, hardware instruction
66 * and it contains the first two words of transform state. 65 * @tfm: transform state of cipher algorithm.
66 * @state: contains keys and initial vectors.
67 *
67 */ 68 */
68struct mtk_sha_ct { 69struct mtk_sha_info {
69 __le32 ctrl[2]; 70 __le32 ctrl[2];
70 __le32 cmd[3]; 71 __le32 cmd[3];
71}; 72 __le32 tfm[2];
72 73 __le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
73/**
74 * mtk_sha_tfm is used to define SHA transform state
75 * and store result digest that produced by engine.
76 */
77struct mtk_sha_tfm {
78 __le32 ctrl[2];
79 __le32 digest[SIZE_IN_WORDS(SHA512_DIGEST_SIZE)];
80};
81
82/**
83 * mtk_sha_info consists of command token and transform state
84 * of SHA, its role is similar to mtk_aes_info.
85 */
86struct mtk_sha_info {
87 struct mtk_sha_ct ct;
88 struct mtk_sha_tfm tfm;
89}; 74};
90 75
91struct mtk_sha_reqctx { 76struct mtk_sha_reqctx {
@@ -94,7 +79,6 @@ struct mtk_sha_reqctx {
94 unsigned long op; 79 unsigned long op;
95 80
96 u64 digcnt; 81 u64 digcnt;
97 bool start;
98 size_t bufcnt; 82 size_t bufcnt;
99 dma_addr_t dma_addr; 83 dma_addr_t dma_addr;
100 84
@@ -153,6 +137,21 @@ static inline void mtk_sha_write(struct mtk_cryp *cryp,
153 writel_relaxed(value, cryp->base + offset); 137 writel_relaxed(value, cryp->base + offset);
154} 138}
155 139
140static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
141 struct mtk_desc **cmd_curr,
142 struct mtk_desc **res_curr,
143 int *count)
144{
145 *cmd_curr = ring->cmd_next++;
146 *res_curr = ring->res_next++;
147 (*count)++;
148
149 if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
150 ring->cmd_next = ring->cmd_base;
151 ring->res_next = ring->res_base;
152 }
153}
154
156static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx) 155static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
157{ 156{
158 struct mtk_cryp *cryp = NULL; 157 struct mtk_cryp *cryp = NULL;
@@ -251,7 +250,9 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
251 bits[1] = cpu_to_be64(size << 3); 250 bits[1] = cpu_to_be64(size << 3);
252 bits[0] = cpu_to_be64(size >> 61); 251 bits[0] = cpu_to_be64(size >> 61);
253 252
254 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) { 253 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
254 case SHA_FLAGS_SHA384:
255 case SHA_FLAGS_SHA512:
255 index = ctx->bufcnt & 0x7f; 256 index = ctx->bufcnt & 0x7f;
256 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); 257 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
257 *(ctx->buffer + ctx->bufcnt) = 0x80; 258 *(ctx->buffer + ctx->bufcnt) = 0x80;
@@ -259,7 +260,9 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
259 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); 260 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
260 ctx->bufcnt += padlen + 16; 261 ctx->bufcnt += padlen + 16;
261 ctx->flags |= SHA_FLAGS_PAD; 262 ctx->flags |= SHA_FLAGS_PAD;
262 } else { 263 break;
264
265 default:
263 index = ctx->bufcnt & 0x3f; 266 index = ctx->bufcnt & 0x3f;
264 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); 267 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
265 *(ctx->buffer + ctx->bufcnt) = 0x80; 268 *(ctx->buffer + ctx->bufcnt) = 0x80;
@@ -267,36 +270,35 @@ static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
267 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); 270 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
268 ctx->bufcnt += padlen + 8; 271 ctx->bufcnt += padlen + 8;
269 ctx->flags |= SHA_FLAGS_PAD; 272 ctx->flags |= SHA_FLAGS_PAD;
273 break;
270 } 274 }
271} 275}
272 276
273/* Initialize basic transform information of SHA */ 277/* Initialize basic transform information of SHA */
274static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx) 278static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
275{ 279{
276 struct mtk_sha_ct *ct = &ctx->info.ct; 280 struct mtk_sha_info *info = &ctx->info;
277 struct mtk_sha_tfm *tfm = &ctx->info.tfm;
278 281
279 ctx->ct_hdr = SHA_CT_CTRL_HDR; 282 ctx->ct_hdr = SHA_CT_CTRL_HDR;
280 ctx->ct_size = SHA_CT_SIZE; 283 ctx->ct_size = SHA_CT_SIZE;
281 284
282 tfm->ctrl[0] = SHA_TFM_HASH | SHA_TFM_INNER_DIG | 285 info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
283 SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
284 286
285 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { 287 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
286 case SHA_FLAGS_SHA1: 288 case SHA_FLAGS_SHA1:
287 tfm->ctrl[0] |= SHA_TFM_SHA1; 289 info->tfm[0] |= SHA_TFM_SHA1;
288 break; 290 break;
289 case SHA_FLAGS_SHA224: 291 case SHA_FLAGS_SHA224:
290 tfm->ctrl[0] |= SHA_TFM_SHA224; 292 info->tfm[0] |= SHA_TFM_SHA224;
291 break; 293 break;
292 case SHA_FLAGS_SHA256: 294 case SHA_FLAGS_SHA256:
293 tfm->ctrl[0] |= SHA_TFM_SHA256; 295 info->tfm[0] |= SHA_TFM_SHA256;
294 break; 296 break;
295 case SHA_FLAGS_SHA384: 297 case SHA_FLAGS_SHA384:
296 tfm->ctrl[0] |= SHA_TFM_SHA384; 298 info->tfm[0] |= SHA_TFM_SHA384;
297 break; 299 break;
298 case SHA_FLAGS_SHA512: 300 case SHA_FLAGS_SHA512:
299 tfm->ctrl[0] |= SHA_TFM_SHA512; 301 info->tfm[0] |= SHA_TFM_SHA512;
300 break; 302 break;
301 303
302 default: 304 default:
@@ -304,13 +306,13 @@ static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
304 return; 306 return;
305 } 307 }
306 308
307 tfm->ctrl[1] = SHA_TFM_HASH_STORE; 309 info->tfm[1] = SHA_TFM_HASH_STORE;
308 ct->ctrl[0] = tfm->ctrl[0] | SHA_TFM_CONTINUE | SHA_TFM_START; 310 info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
309 ct->ctrl[1] = tfm->ctrl[1]; 311 info->ctrl[1] = info->tfm[1];
310 312
311 ct->cmd[0] = SHA_CMD0; 313 info->cmd[0] = SHA_CMD0;
312 ct->cmd[1] = SHA_CMD1; 314 info->cmd[1] = SHA_CMD1;
313 ct->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds)); 315 info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
314} 316}
315 317
316/* 318/*
@@ -319,23 +321,21 @@ static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
319 */ 321 */
320static int mtk_sha_info_update(struct mtk_cryp *cryp, 322static int mtk_sha_info_update(struct mtk_cryp *cryp,
321 struct mtk_sha_rec *sha, 323 struct mtk_sha_rec *sha,
322 size_t len) 324 size_t len1, size_t len2)
323{ 325{
324 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 326 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
325 struct mtk_sha_info *info = &ctx->info; 327 struct mtk_sha_info *info = &ctx->info;
326 struct mtk_sha_ct *ct = &info->ct;
327
328 if (ctx->start)
329 ctx->start = false;
330 else
331 ct->ctrl[0] &= ~SHA_TFM_START;
332 328
333 ctx->ct_hdr &= ~SHA_DATA_LEN_MSK; 329 ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
334 ctx->ct_hdr |= cpu_to_le32(len); 330 ctx->ct_hdr |= cpu_to_le32(len1 + len2);
335 ct->cmd[0] &= ~SHA_DATA_LEN_MSK; 331 info->cmd[0] &= ~SHA_DATA_LEN_MSK;
336 ct->cmd[0] |= cpu_to_le32(len); 332 info->cmd[0] |= cpu_to_le32(len1 + len2);
333
334 /* Setting SHA_TFM_START only for the first iteration */
335 if (ctx->digcnt)
336 info->ctrl[0] &= ~SHA_TFM_START;
337 337
338 ctx->digcnt += len; 338 ctx->digcnt += len1;
339 339
340 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), 340 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
341 DMA_BIDIRECTIONAL); 341 DMA_BIDIRECTIONAL);
@@ -343,7 +343,8 @@ static int mtk_sha_info_update(struct mtk_cryp *cryp,
343 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); 343 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
344 return -EINVAL; 344 return -EINVAL;
345 } 345 }
346 ctx->tfm_dma = ctx->ct_dma + sizeof(*ct); 346
347 ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
347 348
348 return 0; 349 return 0;
349} 350}
@@ -408,7 +409,6 @@ static int mtk_sha_init(struct ahash_request *req)
408 ctx->bufcnt = 0; 409 ctx->bufcnt = 0;
409 ctx->digcnt = 0; 410 ctx->digcnt = 0;
410 ctx->buffer = tctx->buf; 411 ctx->buffer = tctx->buf;
411 ctx->start = true;
412 412
413 if (tctx->flags & SHA_FLAGS_HMAC) { 413 if (tctx->flags & SHA_FLAGS_HMAC) {
414 struct mtk_sha_hmac_ctx *bctx = tctx->base; 414 struct mtk_sha_hmac_ctx *bctx = tctx->base;
@@ -422,89 +422,39 @@ static int mtk_sha_init(struct ahash_request *req)
422} 422}
423 423
424static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, 424static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
425 dma_addr_t addr, size_t len) 425 dma_addr_t addr1, size_t len1,
426 dma_addr_t addr2, size_t len2)
426{ 427{
427 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 428 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
428 struct mtk_ring *ring = cryp->ring[sha->id]; 429 struct mtk_ring *ring = cryp->ring[sha->id];
429 struct mtk_desc *cmd = ring->cmd_base + ring->cmd_pos; 430 struct mtk_desc *cmd, *res;
430 struct mtk_desc *res = ring->res_base + ring->res_pos; 431 int err, count = 0;
431 int err;
432
433 err = mtk_sha_info_update(cryp, sha, len);
434 if (err)
435 return err;
436
437 /* Fill in the command/result descriptors */
438 res->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len);
439 res->buf = cpu_to_le32(cryp->tmp_dma);
440
441 cmd->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len) |
442 MTK_DESC_CT_LEN(ctx->ct_size);
443
444 cmd->buf = cpu_to_le32(addr);
445 cmd->ct = cpu_to_le32(ctx->ct_dma);
446 cmd->ct_hdr = ctx->ct_hdr;
447 cmd->tfm = cpu_to_le32(ctx->tfm_dma);
448
449 if (++ring->cmd_pos == MTK_DESC_NUM)
450 ring->cmd_pos = 0;
451
452 ring->res_pos = ring->cmd_pos;
453 /*
454 * Make sure that all changes to the DMA ring are done before we
455 * start engine.
456 */
457 wmb();
458 /* Start DMA transfer */
459 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(1));
460 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(1));
461
462 return -EINPROGRESS;
463}
464
465static int mtk_sha_xmit2(struct mtk_cryp *cryp,
466 struct mtk_sha_rec *sha,
467 struct mtk_sha_reqctx *ctx,
468 size_t len1, size_t len2)
469{
470 struct mtk_ring *ring = cryp->ring[sha->id];
471 struct mtk_desc *cmd = ring->cmd_base + ring->cmd_pos;
472 struct mtk_desc *res = ring->res_base + ring->res_pos;
473 int err;
474 432
475 err = mtk_sha_info_update(cryp, sha, len1 + len2); 433 err = mtk_sha_info_update(cryp, sha, len1, len2);
476 if (err) 434 if (err)
477 return err; 435 return err;
478 436
479 /* Fill in the command/result descriptors */ 437 /* Fill in the command/result descriptors */
480 res->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST; 438 mtk_sha_ring_shift(ring, &cmd, &res, &count);
481 res->buf = cpu_to_le32(cryp->tmp_dma);
482 439
483 cmd->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST | 440 res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
441 cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
484 MTK_DESC_CT_LEN(ctx->ct_size); 442 MTK_DESC_CT_LEN(ctx->ct_size);
485 cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg)); 443 cmd->buf = cpu_to_le32(addr1);
486 cmd->ct = cpu_to_le32(ctx->ct_dma); 444 cmd->ct = cpu_to_le32(ctx->ct_dma);
487 cmd->ct_hdr = ctx->ct_hdr; 445 cmd->ct_hdr = ctx->ct_hdr;
488 cmd->tfm = cpu_to_le32(ctx->tfm_dma); 446 cmd->tfm = cpu_to_le32(ctx->tfm_dma);
489 447
490 if (++ring->cmd_pos == MTK_DESC_NUM) 448 if (len2) {
491 ring->cmd_pos = 0; 449 mtk_sha_ring_shift(ring, &cmd, &res, &count);
492
493 ring->res_pos = ring->cmd_pos;
494
495 cmd = ring->cmd_base + ring->cmd_pos;
496 res = ring->res_base + ring->res_pos;
497
498 res->hdr = MTK_DESC_BUF_LEN(len2) | MTK_DESC_LAST;
499 res->buf = cpu_to_le32(cryp->tmp_dma);
500 450
501 cmd->hdr = MTK_DESC_BUF_LEN(len2) | MTK_DESC_LAST; 451 res->hdr = MTK_DESC_BUF_LEN(len2);
502 cmd->buf = cpu_to_le32(ctx->dma_addr); 452 cmd->hdr = MTK_DESC_BUF_LEN(len2);
503 453 cmd->buf = cpu_to_le32(addr2);
504 if (++ring->cmd_pos == MTK_DESC_NUM) 454 }
505 ring->cmd_pos = 0;
506 455
507 ring->res_pos = ring->cmd_pos; 456 cmd->hdr |= MTK_DESC_LAST;
457 res->hdr |= MTK_DESC_LAST;
508 458
509 /* 459 /*
510 * Make sure that all changes to the DMA ring are done before we 460 * Make sure that all changes to the DMA ring are done before we
@@ -512,8 +462,8 @@ static int mtk_sha_xmit2(struct mtk_cryp *cryp,
512 */ 462 */
513 wmb(); 463 wmb();
514 /* Start DMA transfer */ 464 /* Start DMA transfer */
515 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(2)); 465 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
516 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(2)); 466 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
517 467
518 return -EINPROGRESS; 468 return -EINPROGRESS;
519} 469}
@@ -532,7 +482,7 @@ static int mtk_sha_dma_map(struct mtk_cryp *cryp,
532 482
533 ctx->flags &= ~SHA_FLAGS_SG; 483 ctx->flags &= ~SHA_FLAGS_SG;
534 484
535 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count); 485 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
536} 486}
537 487
538static int mtk_sha_update_slow(struct mtk_cryp *cryp, 488static int mtk_sha_update_slow(struct mtk_cryp *cryp,
@@ -625,7 +575,8 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
625 575
626 if (len == 0) { 576 if (len == 0) {
627 ctx->flags &= ~SHA_FLAGS_SG; 577 ctx->flags &= ~SHA_FLAGS_SG;
628 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count); 578 return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
579 count, 0, 0);
629 580
630 } else { 581 } else {
631 ctx->sg = sg; 582 ctx->sg = sg;
@@ -635,7 +586,8 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
635 } 586 }
636 587
637 ctx->flags |= SHA_FLAGS_SG; 588 ctx->flags |= SHA_FLAGS_SG;
638 return mtk_sha_xmit2(cryp, sha, ctx, len, count); 589 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
590 len, ctx->dma_addr, count);
639 } 591 }
640 } 592 }
641 593
@@ -646,7 +598,8 @@ static int mtk_sha_update_start(struct mtk_cryp *cryp,
646 598
647 ctx->flags |= SHA_FLAGS_SG; 599 ctx->flags |= SHA_FLAGS_SG;
648 600
649 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), len); 601 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
602 len, 0, 0);
650} 603}
651 604
652static int mtk_sha_final_req(struct mtk_cryp *cryp, 605static int mtk_sha_final_req(struct mtk_cryp *cryp,
@@ -668,7 +621,7 @@ static int mtk_sha_final_req(struct mtk_cryp *cryp,
668static int mtk_sha_finish(struct ahash_request *req) 621static int mtk_sha_finish(struct ahash_request *req)
669{ 622{
670 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 623 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
671 u32 *digest = ctx->info.tfm.digest; 624 __le32 *digest = ctx->info.digest;
672 u32 *result = (u32 *)req->result; 625 u32 *result = (u32 *)req->result;
673 int i; 626 int i;
674 627
@@ -694,7 +647,7 @@ static void mtk_sha_finish_req(struct mtk_cryp *cryp,
694 sha->req->base.complete(&sha->req->base, err); 647 sha->req->base.complete(&sha->req->base, err);
695 648
696 /* Handle new request */ 649 /* Handle new request */
697 mtk_sha_handle_queue(cryp, sha->id - RING2, NULL); 650 tasklet_schedule(&sha->queue_task);
698} 651}
699 652
700static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, 653static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
@@ -1216,60 +1169,38 @@ static struct ahash_alg algs_sha384_sha512[] = {
1216}, 1169},
1217}; 1170};
1218 1171
1219static void mtk_sha_task0(unsigned long data) 1172static void mtk_sha_queue_task(unsigned long data)
1220{ 1173{
1221 struct mtk_cryp *cryp = (struct mtk_cryp *)data; 1174 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
1222 struct mtk_sha_rec *sha = cryp->sha[0];
1223 1175
1224 mtk_sha_unmap(cryp, sha); 1176 mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
1225 mtk_sha_complete(cryp, sha);
1226} 1177}
1227 1178
1228static void mtk_sha_task1(unsigned long data) 1179static void mtk_sha_done_task(unsigned long data)
1229{ 1180{
1230 struct mtk_cryp *cryp = (struct mtk_cryp *)data; 1181 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
1231 struct mtk_sha_rec *sha = cryp->sha[1]; 1182 struct mtk_cryp *cryp = sha->cryp;
1232 1183
1233 mtk_sha_unmap(cryp, sha); 1184 mtk_sha_unmap(cryp, sha);
1234 mtk_sha_complete(cryp, sha); 1185 mtk_sha_complete(cryp, sha);
1235} 1186}
1236 1187
1237static irqreturn_t mtk_sha_ring2_irq(int irq, void *dev_id) 1188static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
1238{ 1189{
1239 struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id; 1190 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
1240 struct mtk_sha_rec *sha = cryp->sha[0]; 1191 struct mtk_cryp *cryp = sha->cryp;
1241 u32 val = mtk_sha_read(cryp, RDR_STAT(RING2)); 1192 u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
1242 1193
1243 mtk_sha_write(cryp, RDR_STAT(RING2), val); 1194 mtk_sha_write(cryp, RDR_STAT(sha->id), val);
1244 1195
1245 if (likely((SHA_FLAGS_BUSY & sha->flags))) { 1196 if (likely((SHA_FLAGS_BUSY & sha->flags))) {
1246 mtk_sha_write(cryp, RDR_PROC_COUNT(RING2), MTK_CNT_RST); 1197 mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
1247 mtk_sha_write(cryp, RDR_THRESH(RING2), 1198 mtk_sha_write(cryp, RDR_THRESH(sha->id),
1248 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); 1199 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1249 1200
1250 tasklet_schedule(&sha->task); 1201 tasklet_schedule(&sha->done_task);
1251 } else { 1202 } else {
1252 dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); 1203 dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
1253 }
1254 return IRQ_HANDLED;
1255}
1256
1257static irqreturn_t mtk_sha_ring3_irq(int irq, void *dev_id)
1258{
1259 struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
1260 struct mtk_sha_rec *sha = cryp->sha[1];
1261 u32 val = mtk_sha_read(cryp, RDR_STAT(RING3));
1262
1263 mtk_sha_write(cryp, RDR_STAT(RING3), val);
1264
1265 if (likely((SHA_FLAGS_BUSY & sha->flags))) {
1266 mtk_sha_write(cryp, RDR_PROC_COUNT(RING3), MTK_CNT_RST);
1267 mtk_sha_write(cryp, RDR_THRESH(RING3),
1268 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1269
1270 tasklet_schedule(&sha->task);
1271 } else {
1272 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1273 } 1204 }
1274 return IRQ_HANDLED; 1205 return IRQ_HANDLED;
1275} 1206}
@@ -1288,14 +1219,20 @@ static int mtk_sha_record_init(struct mtk_cryp *cryp)
1288 if (!sha[i]) 1219 if (!sha[i])
1289 goto err_cleanup; 1220 goto err_cleanup;
1290 1221
1291 sha[i]->id = i + RING2; 1222 sha[i]->cryp = cryp;
1292 1223
1293 spin_lock_init(&sha[i]->lock); 1224 spin_lock_init(&sha[i]->lock);
1294 crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE); 1225 crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
1226
1227 tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
1228 (unsigned long)sha[i]);
1229 tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
1230 (unsigned long)sha[i]);
1295 } 1231 }
1296 1232
1297 tasklet_init(&sha[0]->task, mtk_sha_task0, (unsigned long)cryp); 1233 /* Link to ring2 and ring3 respectively */
1298 tasklet_init(&sha[1]->task, mtk_sha_task1, (unsigned long)cryp); 1234 sha[0]->id = MTK_RING2;
1235 sha[1]->id = MTK_RING3;
1299 1236
1300 cryp->rec = 1; 1237 cryp->rec = 1;
1301 1238
@@ -1312,7 +1249,9 @@ static void mtk_sha_record_free(struct mtk_cryp *cryp)
1312 int i; 1249 int i;
1313 1250
1314 for (i = 0; i < MTK_REC_NUM; i++) { 1251 for (i = 0; i < MTK_REC_NUM; i++) {
1315 tasklet_kill(&cryp->sha[i]->task); 1252 tasklet_kill(&cryp->sha[i]->done_task);
1253 tasklet_kill(&cryp->sha[i]->queue_task);
1254
1316 kfree(cryp->sha[i]); 1255 kfree(cryp->sha[i]);
1317 } 1256 }
1318} 1257}
@@ -1368,35 +1307,23 @@ int mtk_hash_alg_register(struct mtk_cryp *cryp)
1368 if (err) 1307 if (err)
1369 goto err_record; 1308 goto err_record;
1370 1309
1371 /* Ring2 is use by SHA record0 */ 1310 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
1372 err = devm_request_irq(cryp->dev, cryp->irq[RING2], 1311 0, "mtk-sha", cryp->sha[0]);
1373 mtk_sha_ring2_irq, IRQF_TRIGGER_LOW,
1374 "mtk-sha", cryp);
1375 if (err) { 1312 if (err) {
1376 dev_err(cryp->dev, "unable to request sha irq0.\n"); 1313 dev_err(cryp->dev, "unable to request sha irq0.\n");
1377 goto err_res; 1314 goto err_res;
1378 } 1315 }
1379 1316
1380 /* Ring3 is use by SHA record1 */ 1317 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
1381 err = devm_request_irq(cryp->dev, cryp->irq[RING3], 1318 0, "mtk-sha", cryp->sha[1]);
1382 mtk_sha_ring3_irq, IRQF_TRIGGER_LOW,
1383 "mtk-sha", cryp);
1384 if (err) { 1319 if (err) {
1385 dev_err(cryp->dev, "unable to request sha irq1.\n"); 1320 dev_err(cryp->dev, "unable to request sha irq1.\n");
1386 goto err_res; 1321 goto err_res;
1387 } 1322 }
1388 1323
1389 /* Enable ring2 and ring3 interrupt for hash */ 1324 /* Enable ring2 and ring3 interrupt for hash */
1390 mtk_sha_write(cryp, AIC_ENABLE_SET(RING2), MTK_IRQ_RDR2); 1325 mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
1391 mtk_sha_write(cryp, AIC_ENABLE_SET(RING3), MTK_IRQ_RDR3); 1326 mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
1392
1393 cryp->tmp = dma_alloc_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
1394 &cryp->tmp_dma, GFP_KERNEL);
1395 if (!cryp->tmp) {
1396 dev_err(cryp->dev, "unable to allocate tmp buffer.\n");
1397 err = -EINVAL;
1398 goto err_res;
1399 }
1400 1327
1401 spin_lock(&mtk_sha.lock); 1328 spin_lock(&mtk_sha.lock);
1402 list_add_tail(&cryp->sha_list, &mtk_sha.dev_list); 1329 list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
@@ -1412,8 +1339,6 @@ err_algs:
1412 spin_lock(&mtk_sha.lock); 1339 spin_lock(&mtk_sha.lock);
1413 list_del(&cryp->sha_list); 1340 list_del(&cryp->sha_list);
1414 spin_unlock(&mtk_sha.lock); 1341 spin_unlock(&mtk_sha.lock);
1415 dma_free_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
1416 cryp->tmp, cryp->tmp_dma);
1417err_res: 1342err_res:
1418 mtk_sha_record_free(cryp); 1343 mtk_sha_record_free(cryp);
1419err_record: 1344err_record:
@@ -1429,7 +1354,5 @@ void mtk_hash_alg_release(struct mtk_cryp *cryp)
1429 spin_unlock(&mtk_sha.lock); 1354 spin_unlock(&mtk_sha.lock);
1430 1355
1431 mtk_sha_unregister_algs(); 1356 mtk_sha_unregister_algs();
1432 dma_free_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
1433 cryp->tmp, cryp->tmp_dma);
1434 mtk_sha_record_free(cryp); 1357 mtk_sha_record_free(cryp);
1435} 1358}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 0d35dca2e925..2aab80bc241f 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -491,7 +491,7 @@ static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
491 ctx->g2 = false; 491 ctx->g2 = false;
492} 492}
493 493
494static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf, 494static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
495 unsigned int len) 495 unsigned int len)
496{ 496{
497 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); 497 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 1b9da3dc799b..7ac657f46d15 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -170,6 +170,32 @@ struct s5p_aes_ctx {
170 int keylen; 170 int keylen;
171}; 171};
172 172
173/**
174 * struct s5p_aes_dev - Crypto device state container
175 * @dev: Associated device
176 * @clk: Clock for accessing hardware
177 * @ioaddr: Mapped IO memory region
178 * @aes_ioaddr: Per-varian offset for AES block IO memory
179 * @irq_fc: Feed control interrupt line
180 * @req: Crypto request currently handled by the device
181 * @ctx: Configuration for currently handled crypto request
182 * @sg_src: Scatter list with source data for currently handled block
183 * in device. This is DMA-mapped into device.
184 * @sg_dst: Scatter list with destination data for currently handled block
185 * in device. This is DMA-mapped into device.
186 * @sg_src_cpy: In case of unaligned access, copied scatter list
187 * with source data.
188 * @sg_dst_cpy: In case of unaligned access, copied scatter list
189 * with destination data.
190 * @tasklet: New request scheduling jib
191 * @queue: Crypto queue
192 * @busy: Indicates whether the device is currently handling some request
193 * thus it uses some of the fields from this state, like:
194 * req, ctx, sg_src/dst (and copies). This essentially
195 * protects against concurrent access to these fields.
196 * @lock: Lock for protecting both access to device hardware registers
197 * and fields related to current request (including the busy field).
198 */
173struct s5p_aes_dev { 199struct s5p_aes_dev {
174 struct device *dev; 200 struct device *dev;
175 struct clk *clk; 201 struct clk *clk;
@@ -182,7 +208,6 @@ struct s5p_aes_dev {
182 struct scatterlist *sg_src; 208 struct scatterlist *sg_src;
183 struct scatterlist *sg_dst; 209 struct scatterlist *sg_dst;
184 210
185 /* In case of unaligned access: */
186 struct scatterlist *sg_src_cpy; 211 struct scatterlist *sg_src_cpy;
187 struct scatterlist *sg_dst_cpy; 212 struct scatterlist *sg_dst_cpy;
188 213
@@ -190,8 +215,6 @@ struct s5p_aes_dev {
190 struct crypto_queue queue; 215 struct crypto_queue queue;
191 bool busy; 216 bool busy;
192 spinlock_t lock; 217 spinlock_t lock;
193
194 struct samsung_aes_variant *variant;
195}; 218};
196 219
197static struct s5p_aes_dev *s5p_dev; 220static struct s5p_aes_dev *s5p_dev;
@@ -287,7 +310,6 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
287static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) 310static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
288{ 311{
289 dev->req->base.complete(&dev->req->base, err); 312 dev->req->base.complete(&dev->req->base, err);
290 dev->busy = false;
291} 313}
292 314
293static void s5p_unset_outdata(struct s5p_aes_dev *dev) 315static void s5p_unset_outdata(struct s5p_aes_dev *dev)
@@ -462,7 +484,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
462 spin_unlock_irqrestore(&dev->lock, flags); 484 spin_unlock_irqrestore(&dev->lock, flags);
463 485
464 s5p_aes_complete(dev, 0); 486 s5p_aes_complete(dev, 0);
465 dev->busy = true; 487 /* Device is still busy */
466 tasklet_schedule(&dev->tasklet); 488 tasklet_schedule(&dev->tasklet);
467 } else { 489 } else {
468 /* 490 /*
@@ -483,6 +505,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
483 505
484error: 506error:
485 s5p_sg_done(dev); 507 s5p_sg_done(dev);
508 dev->busy = false;
486 spin_unlock_irqrestore(&dev->lock, flags); 509 spin_unlock_irqrestore(&dev->lock, flags);
487 s5p_aes_complete(dev, err); 510 s5p_aes_complete(dev, err);
488 511
@@ -634,6 +657,7 @@ outdata_error:
634 657
635indata_error: 658indata_error:
636 s5p_sg_done(dev); 659 s5p_sg_done(dev);
660 dev->busy = false;
637 spin_unlock_irqrestore(&dev->lock, flags); 661 spin_unlock_irqrestore(&dev->lock, flags);
638 s5p_aes_complete(dev, err); 662 s5p_aes_complete(dev, err);
639} 663}
@@ -851,7 +875,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
851 } 875 }
852 876
853 pdata->busy = false; 877 pdata->busy = false;
854 pdata->variant = variant;
855 pdata->dev = dev; 878 pdata->dev = dev;
856 platform_set_drvdata(pdev, pdata); 879 platform_set_drvdata(pdev, pdata);
857 s5p_dev = pdata; 880 s5p_dev = pdata;
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
new file mode 100644
index 000000000000..09b4ec87c212
--- /dev/null
+++ b/drivers/crypto/stm32/Kconfig
@@ -0,0 +1,7 @@
1config CRYPTO_DEV_STM32
2 tristate "Support for STM32 crypto accelerators"
3 depends on ARCH_STM32
4 select CRYPTO_HASH
5 help
6 This enables support for the CRC32 hw accelerator which can be found
7 on STMicroelectronis STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
new file mode 100644
index 000000000000..73b4c6e47f5f
--- /dev/null
+++ b/drivers/crypto/stm32/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32_cryp.o
2stm32_cryp-objs := stm32_crc32.o
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
new file mode 100644
index 000000000000..ec83b1e6bfe8
--- /dev/null
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -0,0 +1,324 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2017
3 * Author: Fabien Dessenne <fabien.dessenne@st.com>
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <linux/bitrev.h>
8#include <linux/clk.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11
12#include <crypto/internal/hash.h>
13
14#include <asm/unaligned.h>
15
16#define DRIVER_NAME "stm32-crc32"
17#define CHKSUM_DIGEST_SIZE 4
18#define CHKSUM_BLOCK_SIZE 1
19
20/* Registers */
21#define CRC_DR 0x00000000
22#define CRC_CR 0x00000008
23#define CRC_INIT 0x00000010
24#define CRC_POL 0x00000014
25
26/* Registers values */
27#define CRC_CR_RESET BIT(0)
28#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
29#define CRC_INIT_DEFAULT 0xFFFFFFFF
30
31/* Polynomial reversed */
32#define POLY_CRC32 0xEDB88320
33#define POLY_CRC32C 0x82F63B78
34
35struct stm32_crc {
36 struct list_head list;
37 struct device *dev;
38 void __iomem *regs;
39 struct clk *clk;
40 u8 pending_data[sizeof(u32)];
41 size_t nb_pending_bytes;
42};
43
44struct stm32_crc_list {
45 struct list_head dev_list;
46 spinlock_t lock; /* protect dev_list */
47};
48
49static struct stm32_crc_list crc_list = {
50 .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
51 .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock),
52};
53
54struct stm32_crc_ctx {
55 u32 key;
56 u32 poly;
57};
58
59struct stm32_crc_desc_ctx {
60 u32 partial; /* crc32c: partial in first 4 bytes of that struct */
61 struct stm32_crc *crc;
62};
63
64static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
65{
66 struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
67
68 mctx->key = CRC_INIT_DEFAULT;
69 mctx->poly = POLY_CRC32;
70 return 0;
71}
72
73static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
74{
75 struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
76
77 mctx->key = CRC_INIT_DEFAULT;
78 mctx->poly = POLY_CRC32C;
79 return 0;
80}
81
82static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
83 unsigned int keylen)
84{
85 struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
86
87 if (keylen != sizeof(u32)) {
88 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
89 return -EINVAL;
90 }
91
92 mctx->key = get_unaligned_le32(key);
93 return 0;
94}
95
96static int stm32_crc_init(struct shash_desc *desc)
97{
98 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
99 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
100 struct stm32_crc *crc;
101
102 spin_lock_bh(&crc_list.lock);
103 list_for_each_entry(crc, &crc_list.dev_list, list) {
104 ctx->crc = crc;
105 break;
106 }
107 spin_unlock_bh(&crc_list.lock);
108
109 /* Reset, set key, poly and configure in bit reverse mode */
110 writel(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
111 writel(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
112 writel(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
113
114 /* Store partial result */
115 ctx->partial = readl(ctx->crc->regs + CRC_DR);
116 ctx->crc->nb_pending_bytes = 0;
117
118 return 0;
119}
120
121static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
122 unsigned int length)
123{
124 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
125 struct stm32_crc *crc = ctx->crc;
126 u32 *d32;
127 unsigned int i;
128
129 if (unlikely(crc->nb_pending_bytes)) {
130 while (crc->nb_pending_bytes != sizeof(u32) && length) {
131 /* Fill in pending data */
132 crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
133 length--;
134 }
135
136 if (crc->nb_pending_bytes == sizeof(u32)) {
137 /* Process completed pending data */
138 writel(*(u32 *)crc->pending_data, crc->regs + CRC_DR);
139 crc->nb_pending_bytes = 0;
140 }
141 }
142
143 d32 = (u32 *)d8;
144 for (i = 0; i < length >> 2; i++)
145 /* Process 32 bits data */
146 writel(*(d32++), crc->regs + CRC_DR);
147
148 /* Store partial result */
149 ctx->partial = readl(crc->regs + CRC_DR);
150
151 /* Check for pending data (non 32 bits) */
152 length &= 3;
153 if (likely(!length))
154 return 0;
155
156 if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
157 /* Shall not happen */
158 dev_err(crc->dev, "Pending data overflow\n");
159 return -EINVAL;
160 }
161
162 d8 = (const u8 *)d32;
163 for (i = 0; i < length; i++)
164 /* Store pending data */
165 crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
166
167 return 0;
168}
169
170static int stm32_crc_final(struct shash_desc *desc, u8 *out)
171{
172 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
173 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
174
175 /* Send computed CRC */
176 put_unaligned_le32(mctx->poly == POLY_CRC32C ?
177 ~ctx->partial : ctx->partial, out);
178
179 return 0;
180}
181
182static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
183 unsigned int length, u8 *out)
184{
185 return stm32_crc_update(desc, data, length) ?:
186 stm32_crc_final(desc, out);
187}
188
189static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
190 unsigned int length, u8 *out)
191{
192 return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
193}
194
195static struct shash_alg algs[] = {
196 /* CRC-32 */
197 {
198 .setkey = stm32_crc_setkey,
199 .init = stm32_crc_init,
200 .update = stm32_crc_update,
201 .final = stm32_crc_final,
202 .finup = stm32_crc_finup,
203 .digest = stm32_crc_digest,
204 .descsize = sizeof(struct stm32_crc_desc_ctx),
205 .digestsize = CHKSUM_DIGEST_SIZE,
206 .base = {
207 .cra_name = "crc32",
208 .cra_driver_name = DRIVER_NAME,
209 .cra_priority = 200,
210 .cra_blocksize = CHKSUM_BLOCK_SIZE,
211 .cra_alignmask = 3,
212 .cra_ctxsize = sizeof(struct stm32_crc_ctx),
213 .cra_module = THIS_MODULE,
214 .cra_init = stm32_crc32_cra_init,
215 }
216 },
217 /* CRC-32Castagnoli */
218 {
219 .setkey = stm32_crc_setkey,
220 .init = stm32_crc_init,
221 .update = stm32_crc_update,
222 .final = stm32_crc_final,
223 .finup = stm32_crc_finup,
224 .digest = stm32_crc_digest,
225 .descsize = sizeof(struct stm32_crc_desc_ctx),
226 .digestsize = CHKSUM_DIGEST_SIZE,
227 .base = {
228 .cra_name = "crc32c",
229 .cra_driver_name = DRIVER_NAME,
230 .cra_priority = 200,
231 .cra_blocksize = CHKSUM_BLOCK_SIZE,
232 .cra_alignmask = 3,
233 .cra_ctxsize = sizeof(struct stm32_crc_ctx),
234 .cra_module = THIS_MODULE,
235 .cra_init = stm32_crc32c_cra_init,
236 }
237 }
238};
239
240static int stm32_crc_probe(struct platform_device *pdev)
241{
242 struct device *dev = &pdev->dev;
243 struct stm32_crc *crc;
244 struct resource *res;
245 int ret;
246
247 crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
248 if (!crc)
249 return -ENOMEM;
250
251 crc->dev = dev;
252
253 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
254 crc->regs = devm_ioremap_resource(dev, res);
255 if (IS_ERR(crc->regs)) {
256 dev_err(dev, "Cannot map CRC IO\n");
257 return PTR_ERR(crc->regs);
258 }
259
260 crc->clk = devm_clk_get(dev, NULL);
261 if (IS_ERR(crc->clk)) {
262 dev_err(dev, "Could not get clock\n");
263 return PTR_ERR(crc->clk);
264 }
265
266 ret = clk_prepare_enable(crc->clk);
267 if (ret) {
268 dev_err(crc->dev, "Failed to enable clock\n");
269 return ret;
270 }
271
272 platform_set_drvdata(pdev, crc);
273
274 spin_lock(&crc_list.lock);
275 list_add(&crc->list, &crc_list.dev_list);
276 spin_unlock(&crc_list.lock);
277
278 ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
279 if (ret) {
280 dev_err(dev, "Failed to register\n");
281 clk_disable_unprepare(crc->clk);
282 return ret;
283 }
284
285 dev_info(dev, "Initialized\n");
286
287 return 0;
288}
289
290static int stm32_crc_remove(struct platform_device *pdev)
291{
292 struct stm32_crc *crc = platform_get_drvdata(pdev);
293
294 spin_lock(&crc_list.lock);
295 list_del(&crc->list);
296 spin_unlock(&crc_list.lock);
297
298 crypto_unregister_shash(algs);
299
300 clk_disable_unprepare(crc->clk);
301
302 return 0;
303}
304
305static const struct of_device_id stm32_dt_ids[] = {
306 { .compatible = "st,stm32f7-crc", },
307 {},
308};
309MODULE_DEVICE_TABLE(of, stm32_dt_ids);
310
311static struct platform_driver stm32_crc_driver = {
312 .probe = stm32_crc_probe,
313 .remove = stm32_crc_remove,
314 .driver = {
315 .name = DRIVER_NAME,
316 .of_match_table = stm32_dt_ids,
317 },
318};
319
320module_platform_driver(stm32_crc_driver);
321
322MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
323MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
324MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 8e8d60e9a1a2..b1f0d523dff9 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -37,7 +37,7 @@ struct udl_fbdev {
37}; 37};
38 38
39#define DL_ALIGN_UP(x, a) ALIGN(x, a) 39#define DL_ALIGN_UP(x, a) ALIGN(x, a)
40#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a) 40#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
41 41
42/** Read the red component (0..255) of a 32 bpp colour. */ 42/** Read the red component (0..255) of a 32 bpp colour. */
43#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF) 43#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index afb0967d2ce6..6faaca1b48b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3809,6 +3809,15 @@ static int adap_init0(struct adapter *adap)
3809 } 3809 }
3810 if (caps_cmd.cryptocaps) { 3810 if (caps_cmd.cryptocaps) {
3811 /* Should query params here...TODO */ 3811 /* Should query params here...TODO */
3812 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
3813 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
3814 params, val);
3815 if (ret < 0) {
3816 if (ret != -EINVAL)
3817 goto bye;
3818 } else {
3819 adap->vres.ncrypto_fc = val[0];
3820 }
3812 adap->params.crypto |= ULP_CRYPTO_LOOKASIDE; 3821 adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
3813 adap->num_uld += 1; 3822 adap->num_uld += 1;
3814 } 3823 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4c856605fdfa..6e74040af49a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -272,6 +272,7 @@ struct cxgb4_virt_res { /* virtualized HW resources */
272 struct cxgb4_range qp; 272 struct cxgb4_range qp;
273 struct cxgb4_range cq; 273 struct cxgb4_range cq;
274 struct cxgb4_range ocq; 274 struct cxgb4_range ocq;
275 unsigned int ncrypto_fc;
275}; 276};
276 277
277#define OCQ_WIN_OFFSET(pdev, vres) \ 278#define OCQ_WIN_OFFSET(pdev, vres) \
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ccc05f874419..8f8c079d0d2b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1167,7 +1167,8 @@ enum fw_params_param_pfvf {
1167 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D, 1167 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
1168 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E, 1168 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E,
1169 FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30, 1169 FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30,
1170 FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31 1170 FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
1171 FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x32
1171}; 1172};
1172 1173
1173/* 1174/*
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 6f509f68085e..3d891db57ee6 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2019,8 +2019,7 @@ out:
2019 return ret; 2019 return ret;
2020} 2020}
2021 2021
2022static int qman_query_fq_np(struct qman_fq *fq, 2022int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
2023 struct qm_mcr_queryfq_np *np)
2024{ 2023{
2025 union qm_mc_command *mcc; 2024 union qm_mc_command *mcc;
2026 union qm_mc_result *mcr; 2025 union qm_mc_result *mcr;
@@ -2046,6 +2045,7 @@ out:
2046 put_affine_portal(); 2045 put_affine_portal();
2047 return ret; 2046 return ret;
2048} 2047}
2048EXPORT_SYMBOL(qman_query_fq_np);
2049 2049
2050static int qman_query_cgr(struct qman_cgr *cgr, 2050static int qman_query_cgr(struct qman_cgr *cgr,
2051 struct qm_mcr_querycgr *cgrd) 2051 struct qm_mcr_querycgr *cgrd)
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index f4e6e70de259..90bc40c48675 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -34,6 +34,8 @@ u16 qman_ip_rev;
34EXPORT_SYMBOL(qman_ip_rev); 34EXPORT_SYMBOL(qman_ip_rev);
35u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; 35u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
36EXPORT_SYMBOL(qm_channel_pool1); 36EXPORT_SYMBOL(qm_channel_pool1);
37u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
38EXPORT_SYMBOL(qm_channel_caam);
37 39
38/* Register offsets */ 40/* Register offsets */
39#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) 41#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
@@ -720,8 +722,10 @@ static int fsl_qman_probe(struct platform_device *pdev)
720 return -ENODEV; 722 return -ENODEV;
721 } 723 }
722 724
723 if ((qman_ip_rev & 0xff00) >= QMAN_REV30) 725 if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
724 qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; 726 qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
727 qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
728 }
725 729
726 ret = zero_priv_mem(dev, node, fqd_a, fqd_sz); 730 ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
727 WARN_ON(ret); 731 WARN_ON(ret);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 53685b59718e..22725bdc6f15 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -89,67 +89,6 @@ static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
89 return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo); 89 return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
90} 90}
91 91
92/* "Query FQ Non-Programmable Fields" */
93
94struct qm_mcr_queryfq_np {
95 u8 verb;
96 u8 result;
97 u8 __reserved1;
98 u8 state; /* QM_MCR_NP_STATE_*** */
99 u32 fqd_link; /* 24-bit, _res2[24-31] */
100 u16 odp_seq; /* 14-bit, _res3[14-15] */
101 u16 orp_nesn; /* 14-bit, _res4[14-15] */
102 u16 orp_ea_hseq; /* 15-bit, _res5[15] */
103 u16 orp_ea_tseq; /* 15-bit, _res6[15] */
104 u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
105 u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
106 u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
107 u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
108 u8 __reserved2[5];
109 u8 is; /* 1-bit, _res12[1-7] */
110 u16 ics_surp;
111 u32 byte_cnt;
112 u32 frm_cnt; /* 24-bit, _res13[24-31] */
113 u32 __reserved3;
114 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
115 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
116 u16 __reserved4;
117 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
118 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
119 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
120} __packed;
121
122#define QM_MCR_NP_STATE_FE 0x10
123#define QM_MCR_NP_STATE_R 0x08
124#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
125#define QM_MCR_NP_STATE_OOS 0x00
126#define QM_MCR_NP_STATE_RETIRED 0x01
127#define QM_MCR_NP_STATE_TEN_SCHED 0x02
128#define QM_MCR_NP_STATE_TRU_SCHED 0x03
129#define QM_MCR_NP_STATE_PARKED 0x04
130#define QM_MCR_NP_STATE_ACTIVE 0x05
131#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
132#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
133#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
134#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
135#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
136
137enum qm_mcr_queryfq_np_masks {
138 qm_mcr_fqd_link_mask = BIT(24)-1,
139 qm_mcr_odp_seq_mask = BIT(14)-1,
140 qm_mcr_orp_nesn_mask = BIT(14)-1,
141 qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
142 qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
143 qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
144 qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
145 qm_mcr_pfdr_hptr_mask = BIT(24)-1,
146 qm_mcr_pfdr_tptr_mask = BIT(24)-1,
147 qm_mcr_is_mask = BIT(1)-1,
148 qm_mcr_frm_cnt_mask = BIT(24)-1,
149};
150#define qm_mcr_np_get(np, field) \
151 ((np)->field & (qm_mcr_##field##_mask))
152
153/* Congestion Groups */ 92/* Congestion Groups */
154 93
155/* 94/*
@@ -271,42 +210,6 @@ const struct qm_portal_config *qman_destroy_affine_portal(void);
271 */ 210 */
272int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); 211int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
273 212
274/*
275 * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
276 * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
277 * FQID(n) to fill in the frame queue ID.
278 */
279#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
280#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
281#define QM_VDQCR_EXACT 0x40000000
282#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
283#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
284#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
285#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
286
287#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
288#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
289#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
290
291/*
292 * qman_volatile_dequeue - Issue a volatile dequeue command
293 * @fq: the frame queue object to dequeue from
294 * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
295 * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
296 *
297 * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
298 * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
299 * the VDQCR is already in use, otherwise returns non-zero for failure. If
300 * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
301 * the VDQCR command has finished executing (ie. once the callback for the last
302 * DQRR entry resulting from the VDQCR command has been called). If not using
303 * the FINISH flag, completion can be determined either by detecting the
304 * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
305 * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
306 * for the QMAN_FQ_STATE_VDQCR bit to disappear.
307 */
308int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
309
310int qman_alloc_fq_table(u32 num_fqids); 213int qman_alloc_fq_table(u32 num_fqids);
311 214
312/* QMan s/w corenet portal, low-level i/face */ 215/* QMan s/w corenet portal, low-level i/face */
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 592d47e565a8..0977fb18ff68 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -43,12 +43,13 @@
43 --------------------------------------------------------------------------- 43 ---------------------------------------------------------------------------
44 Issue Date: 31/01/2006 44 Issue Date: 31/01/2006
45 45
46 An implementation of field multiplication in Galois Field GF(128) 46 An implementation of field multiplication in Galois Field GF(2^128)
47*/ 47*/
48 48
49#ifndef _CRYPTO_GF128MUL_H 49#ifndef _CRYPTO_GF128MUL_H
50#define _CRYPTO_GF128MUL_H 50#define _CRYPTO_GF128MUL_H
51 51
52#include <asm/byteorder.h>
52#include <crypto/b128ops.h> 53#include <crypto/b128ops.h>
53#include <linux/slab.h> 54#include <linux/slab.h>
54 55
@@ -65,7 +66,7 @@
65 * are left and the lsb's are right. char b[16] is an array and b[0] is 66 * are left and the lsb's are right. char b[16] is an array and b[0] is
66 * the first octet. 67 * the first octet.
67 * 68 *
68 * 80000000 00000000 00000000 00000000 .... 00000000 00000000 00000000 69 * 10000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
69 * b[0] b[1] b[2] b[3] b[13] b[14] b[15] 70 * b[0] b[1] b[2] b[3] b[13] b[14] b[15]
70 * 71 *
71 * Every bit is a coefficient of some power of X. We can store the bits 72 * Every bit is a coefficient of some power of X. We can store the bits
@@ -85,15 +86,17 @@
85 * Both of the above formats are easy to implement on big-endian 86 * Both of the above formats are easy to implement on big-endian
86 * machines. 87 * machines.
87 * 88 *
88 * EME (which is patent encumbered) uses the ble format (bits are stored 89 * XTS and EME (the latter of which is patent encumbered) use the ble
89 * in big endian order and the bytes in little endian). The above buffer 90 * format (bits are stored in big endian order and the bytes in little
90 * represents X^7 in this case and the primitive polynomial is b[0] = 0x87. 91 * endian). The above buffer represents X^7 in this case and the
92 * primitive polynomial is b[0] = 0x87.
91 * 93 *
92 * The common machine word-size is smaller than 128 bits, so to make 94 * The common machine word-size is smaller than 128 bits, so to make
93 * an efficient implementation we must split into machine word sizes. 95 * an efficient implementation we must split into machine word sizes.
94 * This file uses one 32bit for the moment. Machine endianness comes into 96 * This implementation uses 64-bit words for the moment. Machine
95 * play. The lle format in relation to machine endianness is discussed 97 * endianness comes into play. The lle format in relation to machine
96 * below by the original author of gf128mul Dr Brian Gladman. 98 * endianness is discussed below by the original author of gf128mul Dr
99 * Brian Gladman.
97 * 100 *
98 * Let's look at the bbe and ble format on a little endian machine. 101 * Let's look at the bbe and ble format on a little endian machine.
99 * 102 *
@@ -127,10 +130,10 @@
127 * machines this will automatically aligned to wordsize and on a 64-bit 130 * machines this will automatically aligned to wordsize and on a 64-bit
128 * machine also. 131 * machine also.
129 */ 132 */
130/* Multiply a GF128 field element by x. Field elements are held in arrays 133/* Multiply a GF(2^128) field element by x. Field elements are
131 of bytes in which field bits 8n..8n + 7 are held in byte[n], with lower 134 held in arrays of bytes in which field bits 8n..8n + 7 are held in
132 indexed bits placed in the more numerically significant bit positions 135 byte[n], with lower indexed bits placed in the more numerically
133 within bytes. 136 significant bit positions within bytes.
134 137
135 On little endian machines the bit indexes translate into the bit 138 On little endian machines the bit indexes translate into the bit
136 positions within four 32-bit words in the following way 139 positions within four 32-bit words in the following way
@@ -161,8 +164,58 @@ void gf128mul_lle(be128 *a, const be128 *b);
161 164
162void gf128mul_bbe(be128 *a, const be128 *b); 165void gf128mul_bbe(be128 *a, const be128 *b);
163 166
164/* multiply by x in ble format, needed by XTS */ 167/*
165void gf128mul_x_ble(be128 *a, const be128 *b); 168 * The following functions multiply a field element by x in
169 * the polynomial field representation. They use 64-bit word operations
170 * to gain speed but compensate for machine endianness and hence work
171 * correctly on both styles of machine.
172 *
173 * They are defined here for performance.
174 */
175
176static inline u64 gf128mul_mask_from_bit(u64 x, int which)
177{
178 /* a constant-time version of 'x & ((u64)1 << which) ? (u64)-1 : 0' */
179 return ((s64)(x << (63 - which)) >> 63);
180}
181
182static inline void gf128mul_x_lle(be128 *r, const be128 *x)
183{
184 u64 a = be64_to_cpu(x->a);
185 u64 b = be64_to_cpu(x->b);
186
187 /* equivalent to gf128mul_table_le[(b << 7) & 0xff] << 48
188 * (see crypto/gf128mul.c): */
189 u64 _tt = gf128mul_mask_from_bit(b, 0) & ((u64)0xe1 << 56);
190
191 r->b = cpu_to_be64((b >> 1) | (a << 63));
192 r->a = cpu_to_be64((a >> 1) ^ _tt);
193}
194
195static inline void gf128mul_x_bbe(be128 *r, const be128 *x)
196{
197 u64 a = be64_to_cpu(x->a);
198 u64 b = be64_to_cpu(x->b);
199
200 /* equivalent to gf128mul_table_be[a >> 63] (see crypto/gf128mul.c): */
201 u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87;
202
203 r->a = cpu_to_be64((a << 1) | (b >> 63));
204 r->b = cpu_to_be64((b << 1) ^ _tt);
205}
206
207/* needed by XTS */
208static inline void gf128mul_x_ble(le128 *r, const le128 *x)
209{
210 u64 a = le64_to_cpu(x->a);
211 u64 b = le64_to_cpu(x->b);
212
213 /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
214 u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87;
215
216 r->a = cpu_to_le64((a << 1) | (b >> 63));
217 r->b = cpu_to_le64((b << 1) ^ _tt);
218}
166 219
167/* 4k table optimization */ 220/* 4k table optimization */
168 221
@@ -172,8 +225,8 @@ struct gf128mul_4k {
172 225
173struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g); 226struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
174struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g); 227struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
175void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t); 228void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
176void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t); 229void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
177 230
178static inline void gf128mul_free_4k(struct gf128mul_4k *t) 231static inline void gf128mul_free_4k(struct gf128mul_4k *t)
179{ 232{
@@ -194,6 +247,6 @@ struct gf128mul_64k {
194 */ 247 */
195struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g); 248struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
196void gf128mul_free_64k(struct gf128mul_64k *t); 249void gf128mul_free_64k(struct gf128mul_64k *t);
197void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t); 250void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t);
198 251
199#endif /* _CRYPTO_GF128MUL_H */ 252#endif /* _CRYPTO_GF128MUL_H */
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 1de2b5af12d7..51052f65cefc 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -78,4 +78,7 @@ int crypto_register_acomp(struct acomp_alg *alg);
78 */ 78 */
79int crypto_unregister_acomp(struct acomp_alg *alg); 79int crypto_unregister_acomp(struct acomp_alg *alg);
80 80
81int crypto_register_acomps(struct acomp_alg *algs, int count);
82void crypto_unregister_acomps(struct acomp_alg *algs, int count);
83
81#endif 84#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 3fda3c5655a0..ccad9b2c9bd6 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -133,4 +133,7 @@ int crypto_register_scomp(struct scomp_alg *alg);
133 */ 133 */
134int crypto_unregister_scomp(struct scomp_alg *alg); 134int crypto_unregister_scomp(struct scomp_alg *alg);
135 135
136int crypto_register_scomps(struct scomp_alg *algs, int count);
137void crypto_unregister_scomps(struct scomp_alg *algs, int count);
138
136#endif 139#endif
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 4307a2f2365f..ce8e1f79374b 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -74,7 +74,7 @@ struct crypto_kpp {
74 * @base: Common crypto API algorithm data structure 74 * @base: Common crypto API algorithm data structure
75 */ 75 */
76struct kpp_alg { 76struct kpp_alg {
77 int (*set_secret)(struct crypto_kpp *tfm, void *buffer, 77 int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
78 unsigned int len); 78 unsigned int len);
79 int (*generate_public_key)(struct kpp_request *req); 79 int (*generate_public_key)(struct kpp_request *req);
80 int (*compute_shared_secret)(struct kpp_request *req); 80 int (*compute_shared_secret)(struct kpp_request *req);
@@ -273,8 +273,8 @@ struct kpp_secret {
273 * 273 *
274 * Return: zero on success; error code in case of error 274 * Return: zero on success; error code in case of error
275 */ 275 */
276static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer, 276static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
277 unsigned int len) 277 const void *buffer, unsigned int len)
278{ 278{
279 struct kpp_alg *alg = crypto_kpp_alg(tfm); 279 struct kpp_alg *alg = crypto_kpp_alg(tfm);
280 280
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 77b630672b2c..c0bde308b28a 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -11,7 +11,7 @@ struct blkcipher_desc;
11#define XTS_BLOCK_SIZE 16 11#define XTS_BLOCK_SIZE 16
12 12
13struct xts_crypt_req { 13struct xts_crypt_req {
14 be128 *tbuf; 14 le128 *tbuf;
15 unsigned int tbuflen; 15 unsigned int tbuflen;
16 16
17 void *tweak_ctx; 17 void *tweak_ctx;
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 692846c7941b..63f4c2c44a1f 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -12,9 +12,10 @@
12#define CLKID_FCLK_DIV4 6 12#define CLKID_FCLK_DIV4 6
13#define CLKID_CLK81 12 13#define CLKID_CLK81 12
14#define CLKID_MPLL2 15 14#define CLKID_MPLL2 15
15#define CLKID_SPI 34
16#define CLKID_I2C 22 15#define CLKID_I2C 22
17#define CLKID_SAR_ADC 23 16#define CLKID_SAR_ADC 23
17#define CLKID_RNG0 25
18#define CLKID_SPI 34
18#define CLKID_ETH 36 19#define CLKID_ETH 36
19#define CLKID_USB0 50 20#define CLKID_USB0 50
20#define CLKID_USB1 51 21#define CLKID_USB1 51
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index c41b8d99dd0e..3285c944194a 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -123,6 +123,10 @@ enum ccp_aes_mode {
123 CCP_AES_MODE_CFB, 123 CCP_AES_MODE_CFB,
124 CCP_AES_MODE_CTR, 124 CCP_AES_MODE_CTR,
125 CCP_AES_MODE_CMAC, 125 CCP_AES_MODE_CMAC,
126 CCP_AES_MODE_GHASH,
127 CCP_AES_MODE_GCTR,
128 CCP_AES_MODE_GCM,
129 CCP_AES_MODE_GMAC,
126 CCP_AES_MODE__LAST, 130 CCP_AES_MODE__LAST,
127}; 131};
128 132
@@ -137,6 +141,9 @@ enum ccp_aes_action {
137 CCP_AES_ACTION_ENCRYPT, 141 CCP_AES_ACTION_ENCRYPT,
138 CCP_AES_ACTION__LAST, 142 CCP_AES_ACTION__LAST,
139}; 143};
144/* Overloaded field */
145#define CCP_AES_GHASHAAD CCP_AES_ACTION_DECRYPT
146#define CCP_AES_GHASHFINAL CCP_AES_ACTION_ENCRYPT
140 147
141/** 148/**
142 * struct ccp_aes_engine - CCP AES operation 149 * struct ccp_aes_engine - CCP AES operation
@@ -181,6 +188,8 @@ struct ccp_aes_engine {
181 struct scatterlist *cmac_key; /* K1/K2 cmac key required for 188 struct scatterlist *cmac_key; /* K1/K2 cmac key required for
182 * final cmac cmd */ 189 * final cmac cmd */
183 u32 cmac_key_len; /* In bytes */ 190 u32 cmac_key_len; /* In bytes */
191
192 u32 aad_len; /* In bytes */
184}; 193};
185 194
186/***** XTS-AES engine *****/ 195/***** XTS-AES engine *****/
@@ -249,6 +258,8 @@ enum ccp_sha_type {
249 CCP_SHA_TYPE_1 = 1, 258 CCP_SHA_TYPE_1 = 1,
250 CCP_SHA_TYPE_224, 259 CCP_SHA_TYPE_224,
251 CCP_SHA_TYPE_256, 260 CCP_SHA_TYPE_256,
261 CCP_SHA_TYPE_384,
262 CCP_SHA_TYPE_512,
252 CCP_SHA_TYPE__LAST, 263 CCP_SHA_TYPE__LAST,
253}; 264};
254 265
@@ -290,6 +301,60 @@ struct ccp_sha_engine {
290 * final sha cmd */ 301 * final sha cmd */
291}; 302};
292 303
304/***** 3DES engine *****/
305enum ccp_des3_mode {
306 CCP_DES3_MODE_ECB = 0,
307 CCP_DES3_MODE_CBC,
308 CCP_DES3_MODE_CFB,
309 CCP_DES3_MODE__LAST,
310};
311
312enum ccp_des3_type {
313 CCP_DES3_TYPE_168 = 1,
314 CCP_DES3_TYPE__LAST,
315 };
316
317enum ccp_des3_action {
318 CCP_DES3_ACTION_DECRYPT = 0,
319 CCP_DES3_ACTION_ENCRYPT,
320 CCP_DES3_ACTION__LAST,
321};
322
323/**
324 * struct ccp_des3_engine - CCP SHA operation
325 * @type: Type of 3DES operation
326 * @mode: cipher mode
327 * @action: 3DES operation (decrypt/encrypt)
328 * @key: key to be used for this 3DES operation
329 * @key_len: length of key (in bytes)
330 * @iv: IV to be used for this AES operation
331 * @iv_len: length in bytes of iv
332 * @src: input data to be used for this operation
333 * @src_len: length of input data used for this operation (in bytes)
334 * @dst: output data produced by this operation
335 *
336 * Variables required to be set when calling ccp_enqueue_cmd():
337 * - type, mode, action, key, key_len, src, dst, src_len
338 * - iv, iv_len for any mode other than ECB
339 *
340 * The iv variable is used as both input and output. On completion of the
341 * 3DES operation the new IV overwrites the old IV.
342 */
343struct ccp_des3_engine {
344 enum ccp_des3_type type;
345 enum ccp_des3_mode mode;
346 enum ccp_des3_action action;
347
348 struct scatterlist *key;
349 u32 key_len; /* In bytes */
350
351 struct scatterlist *iv;
352 u32 iv_len; /* In bytes */
353
354 struct scatterlist *src, *dst;
355 u64 src_len; /* In bytes */
356};
357
293/***** RSA engine *****/ 358/***** RSA engine *****/
294/** 359/**
295 * struct ccp_rsa_engine - CCP RSA operation 360 * struct ccp_rsa_engine - CCP RSA operation
@@ -539,7 +604,7 @@ struct ccp_ecc_engine {
539enum ccp_engine { 604enum ccp_engine {
540 CCP_ENGINE_AES = 0, 605 CCP_ENGINE_AES = 0,
541 CCP_ENGINE_XTS_AES_128, 606 CCP_ENGINE_XTS_AES_128,
542 CCP_ENGINE_RSVD1, 607 CCP_ENGINE_DES3,
543 CCP_ENGINE_SHA, 608 CCP_ENGINE_SHA,
544 CCP_ENGINE_RSA, 609 CCP_ENGINE_RSA,
545 CCP_ENGINE_PASSTHRU, 610 CCP_ENGINE_PASSTHRU,
@@ -587,6 +652,7 @@ struct ccp_cmd {
587 union { 652 union {
588 struct ccp_aes_engine aes; 653 struct ccp_aes_engine aes;
589 struct ccp_xts_aes_engine xts; 654 struct ccp_xts_aes_engine xts;
655 struct ccp_des3_engine des3;
590 struct ccp_sha_engine sha; 656 struct ccp_sha_engine sha;
591 struct ccp_rsa_engine rsa; 657 struct ccp_rsa_engine rsa;
592 struct ccp_passthru_engine passthru; 658 struct ccp_passthru_engine passthru;
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index c0b0cf3d2d2f..84da9978e951 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -123,7 +123,7 @@
123/* 123/*
124 * Miscellaneous stuff. 124 * Miscellaneous stuff.
125 */ 125 */
126#define CRYPTO_MAX_ALG_NAME 64 126#define CRYPTO_MAX_ALG_NAME 128
127 127
128/* 128/*
129 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 129 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index 3252799832cf..df4d3e943d28 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -10,9 +10,4 @@
10void sha_init(__u32 *buf); 10void sha_init(__u32 *buf);
11void sha_transform(__u32 *digest, const char *data, __u32 *W); 11void sha_transform(__u32 *digest, const char *data, __u32 *W);
12 12
13#define MD5_DIGEST_WORDS 4
14#define MD5_MESSAGE_BYTES 64
15
16void md5_transform(__u32 *hash, __u32 const *in);
17
18#endif 13#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7ae256717a32..13bc08aba704 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -47,6 +47,7 @@
47 47
48/* @a is a power of 2 value */ 48/* @a is a power of 2 value */
49#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) 49#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
50#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
50#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) 51#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
51#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) 52#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
52#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) 53#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 3d4df74a96de..d4dfefdee6c1 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -36,8 +36,11 @@
36/* Hardware constants */ 36/* Hardware constants */
37#define QM_CHANNEL_SWPORTAL0 0 37#define QM_CHANNEL_SWPORTAL0 0
38#define QMAN_CHANNEL_POOL1 0x21 38#define QMAN_CHANNEL_POOL1 0x21
39#define QMAN_CHANNEL_CAAM 0x80
39#define QMAN_CHANNEL_POOL1_REV3 0x401 40#define QMAN_CHANNEL_POOL1_REV3 0x401
41#define QMAN_CHANNEL_CAAM_REV3 0x840
40extern u16 qm_channel_pool1; 42extern u16 qm_channel_pool1;
43extern u16 qm_channel_caam;
41 44
42/* Portal processing (interrupt) sources */ 45/* Portal processing (interrupt) sources */
43#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */ 46#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
@@ -165,6 +168,7 @@ static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
165#define qm_fd_set_contig_big(fd, len) \ 168#define qm_fd_set_contig_big(fd, len) \
166 qm_fd_set_param(fd, qm_fd_contig_big, 0, len) 169 qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
167#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len) 170#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
171#define qm_fd_set_compound(fd, len) qm_fd_set_param(fd, qm_fd_compound, 0, len)
168 172
169static inline void qm_fd_clear_fd(struct qm_fd *fd) 173static inline void qm_fd_clear_fd(struct qm_fd *fd)
170{ 174{
@@ -639,6 +643,7 @@ struct qm_mcc_initcgr {
639#define QM_CGR_WE_MODE 0x0001 643#define QM_CGR_WE_MODE 0x0001
640 644
641#define QMAN_CGR_FLAG_USE_INIT 0x00000001 645#define QMAN_CGR_FLAG_USE_INIT 0x00000001
646#define QMAN_CGR_MODE_FRAME 0x00000001
642 647
643 /* Portal and Frame Queues */ 648 /* Portal and Frame Queues */
644/* Represents a managed portal */ 649/* Represents a managed portal */
@@ -791,6 +796,84 @@ struct qman_cgr {
791#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */ 796#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
792#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */ 797#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
793 798
799/*
800 * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
801 * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
802 * FQID(n) to fill in the frame queue ID.
803 */
804#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
805#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
806#define QM_VDQCR_EXACT 0x40000000
807#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
808#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
809#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
810#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
811
812#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
813#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
814#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
815
816/* "Query FQ Non-Programmable Fields" */
817struct qm_mcr_queryfq_np {
818 u8 verb;
819 u8 result;
820 u8 __reserved1;
821 u8 state; /* QM_MCR_NP_STATE_*** */
822 u32 fqd_link; /* 24-bit, _res2[24-31] */
823 u16 odp_seq; /* 14-bit, _res3[14-15] */
824 u16 orp_nesn; /* 14-bit, _res4[14-15] */
825 u16 orp_ea_hseq; /* 15-bit, _res5[15] */
826 u16 orp_ea_tseq; /* 15-bit, _res6[15] */
827 u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
828 u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
829 u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
830 u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
831 u8 __reserved2[5];
832 u8 is; /* 1-bit, _res12[1-7] */
833 u16 ics_surp;
834 u32 byte_cnt;
835 u32 frm_cnt; /* 24-bit, _res13[24-31] */
836 u32 __reserved3;
837 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
838 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
839 u16 __reserved4;
840 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
841 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
842 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
843} __packed;
844
845#define QM_MCR_NP_STATE_FE 0x10
846#define QM_MCR_NP_STATE_R 0x08
847#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
848#define QM_MCR_NP_STATE_OOS 0x00
849#define QM_MCR_NP_STATE_RETIRED 0x01
850#define QM_MCR_NP_STATE_TEN_SCHED 0x02
851#define QM_MCR_NP_STATE_TRU_SCHED 0x03
852#define QM_MCR_NP_STATE_PARKED 0x04
853#define QM_MCR_NP_STATE_ACTIVE 0x05
854#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
855#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
856#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
857#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
858#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
859
860enum qm_mcr_queryfq_np_masks {
861 qm_mcr_fqd_link_mask = BIT(24) - 1,
862 qm_mcr_odp_seq_mask = BIT(14) - 1,
863 qm_mcr_orp_nesn_mask = BIT(14) - 1,
864 qm_mcr_orp_ea_hseq_mask = BIT(15) - 1,
865 qm_mcr_orp_ea_tseq_mask = BIT(15) - 1,
866 qm_mcr_orp_ea_hptr_mask = BIT(24) - 1,
867 qm_mcr_orp_ea_tptr_mask = BIT(24) - 1,
868 qm_mcr_pfdr_hptr_mask = BIT(24) - 1,
869 qm_mcr_pfdr_tptr_mask = BIT(24) - 1,
870 qm_mcr_is_mask = BIT(1) - 1,
871 qm_mcr_frm_cnt_mask = BIT(24) - 1,
872};
873
874#define qm_mcr_np_get(np, field) \
875 ((np)->field & (qm_mcr_##field##_mask))
876
794 /* Portal Management */ 877 /* Portal Management */
795/** 878/**
796 * qman_p_irqsource_add - add processing sources to be interrupt-driven 879 * qman_p_irqsource_add - add processing sources to be interrupt-driven
@@ -963,6 +1046,25 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags);
963 */ 1046 */
964int qman_oos_fq(struct qman_fq *fq); 1047int qman_oos_fq(struct qman_fq *fq);
965 1048
1049/*
1050 * qman_volatile_dequeue - Issue a volatile dequeue command
1051 * @fq: the frame queue object to dequeue from
1052 * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
1053 * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
1054 *
1055 * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
1056 * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
1057 * the VDQCR is already in use, otherwise returns non-zero for failure. If
1058 * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
1059 * the VDQCR command has finished executing (ie. once the callback for the last
1060 * DQRR entry resulting from the VDQCR command has been called). If not using
1061 * the FINISH flag, completion can be determined either by detecting the
1062 * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
1063 * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
1064 * for the QMAN_FQ_STATE_VDQCR bit to disappear.
1065 */
1066int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
1067
966/** 1068/**
967 * qman_enqueue - Enqueue a frame to a frame queue 1069 * qman_enqueue - Enqueue a frame to a frame queue
968 * @fq: the frame queue object to enqueue to 1070 * @fq: the frame queue object to enqueue to
@@ -994,6 +1096,13 @@ int qman_alloc_fqid_range(u32 *result, u32 count);
994 */ 1096 */
995int qman_release_fqid(u32 fqid); 1097int qman_release_fqid(u32 fqid);
996 1098
1099/**
1100 * qman_query_fq_np - Queries non-programmable FQD fields
1101 * @fq: the frame queue object to be queried
1102 * @np: storage for the queried FQD fields
1103 */
1104int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
1105
997 /* Pool-channel management */ 1106 /* Pool-channel management */
998/** 1107/**
999 * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs 1108 * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 11d21fce14d6..b4def5c630e7 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -31,7 +31,7 @@ enum {
31#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1) 31#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
32#define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE) 32#define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE)
33 33
34#define CRYPTO_MAX_NAME CRYPTO_MAX_ALG_NAME 34#define CRYPTO_MAX_NAME 64
35 35
36/* Netlink message attributes. */ 36/* Netlink message attributes. */
37enum crypto_attr_type_t { 37enum crypto_attr_type_t {
@@ -53,9 +53,9 @@ enum crypto_attr_type_t {
53}; 53};
54 54
55struct crypto_user_alg { 55struct crypto_user_alg {
56 char cru_name[CRYPTO_MAX_ALG_NAME]; 56 char cru_name[CRYPTO_MAX_NAME];
57 char cru_driver_name[CRYPTO_MAX_ALG_NAME]; 57 char cru_driver_name[CRYPTO_MAX_NAME];
58 char cru_module_name[CRYPTO_MAX_ALG_NAME]; 58 char cru_module_name[CRYPTO_MAX_NAME];
59 __u32 cru_type; 59 __u32 cru_type;
60 __u32 cru_mask; 60 __u32 cru_mask;
61 __u32 cru_refcnt; 61 __u32 cru_refcnt;
@@ -73,7 +73,7 @@ struct crypto_report_hash {
73}; 73};
74 74
75struct crypto_report_cipher { 75struct crypto_report_cipher {
76 char type[CRYPTO_MAX_ALG_NAME]; 76 char type[CRYPTO_MAX_NAME];
77 unsigned int blocksize; 77 unsigned int blocksize;
78 unsigned int min_keysize; 78 unsigned int min_keysize;
79 unsigned int max_keysize; 79 unsigned int max_keysize;
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index f9466fa54ba4..3ea90aea5617 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -92,6 +92,6 @@ struct dlfb_data {
92 92
93/* remove these once align.h patch is taken into kernel */ 93/* remove these once align.h patch is taken into kernel */
94#define DL_ALIGN_UP(x, a) ALIGN(x, a) 94#define DL_ALIGN_UP(x, a) ALIGN(x, a)
95#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a) 95#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
96 96
97#endif 97#endif
diff --git a/kernel/padata.c b/kernel/padata.c
index 3202aa17492c..ac8f1e524836 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -154,8 +154,6 @@ EXPORT_SYMBOL(padata_do_parallel);
154 * A pointer to the control struct of the next object that needs 154 * A pointer to the control struct of the next object that needs
155 * serialization, if present in one of the percpu reorder queues. 155 * serialization, if present in one of the percpu reorder queues.
156 * 156 *
157 * NULL, if all percpu reorder queues are empty.
158 *
159 * -EINPROGRESS, if the next object that needs serialization will 157 * -EINPROGRESS, if the next object that needs serialization will
160 * be parallel processed by another cpu and is not yet present in 158 * be parallel processed by another cpu and is not yet present in
161 * the cpu's reorder queue. 159 * the cpu's reorder queue.
@@ -182,8 +180,6 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
182 cpu = padata_index_to_cpu(pd, next_index); 180 cpu = padata_index_to_cpu(pd, next_index);
183 next_queue = per_cpu_ptr(pd->pqueue, cpu); 181 next_queue = per_cpu_ptr(pd->pqueue, cpu);
184 182
185 padata = NULL;
186
187 reorder = &next_queue->reorder; 183 reorder = &next_queue->reorder;
188 184
189 spin_lock(&reorder->lock); 185 spin_lock(&reorder->lock);
@@ -235,12 +231,11 @@ static void padata_reorder(struct parallel_data *pd)
235 padata = padata_get_next(pd); 231 padata = padata_get_next(pd);
236 232
237 /* 233 /*
238 * All reorder queues are empty, or the next object that needs 234 * If the next object that needs serialization is parallel
239 * serialization is parallel processed by another cpu and is 235 * processed by another cpu and is still on it's way to the
240 * still on it's way to the cpu's reorder queue, nothing to 236 * cpu's reorder queue, nothing to do for now.
241 * do for now.
242 */ 237 */
243 if (!padata || PTR_ERR(padata) == -EINPROGRESS) 238 if (PTR_ERR(padata) == -EINPROGRESS)
244 break; 239 break;
245 240
246 /* 241 /*
@@ -354,7 +349,7 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
354 349
355 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); 350 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
356 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { 351 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
357 free_cpumask_var(pd->cpumask.cbcpu); 352 free_cpumask_var(pd->cpumask.pcpu);
358 return -ENOMEM; 353 return -ENOMEM;
359 } 354 }
360 355
diff --git a/lib/Makefile b/lib/Makefile
index b47cf97e1e68..a155c73e3437 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -19,7 +19,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n
19lib-y := ctype.o string.o vsprintf.o cmdline.o \ 19lib-y := ctype.o string.o vsprintf.o cmdline.o \
20 rbtree.o radix-tree.o dump_stack.o timerqueue.o\ 20 rbtree.o radix-tree.o dump_stack.o timerqueue.o\
21 idr.o int_sqrt.o extable.o \ 21 idr.o int_sqrt.o extable.o \
22 sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ 22 sha1.o chacha20.o irq_regs.o argv_split.o \
23 flex_proportions.o ratelimit.o show_mem.o \ 23 flex_proportions.o ratelimit.o show_mem.o \
24 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 24 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
25 earlycpio.o seq_buf.o siphash.o \ 25 earlycpio.o seq_buf.o siphash.o \
diff --git a/lib/md5.c b/lib/md5.c
deleted file mode 100644
index bb0cd01d356d..000000000000
--- a/lib/md5.c
+++ /dev/null
@@ -1,95 +0,0 @@
1#include <linux/compiler.h>
2#include <linux/export.h>
3#include <linux/cryptohash.h>
4
5#define F1(x, y, z) (z ^ (x & (y ^ z)))
6#define F2(x, y, z) F1(z, x, y)
7#define F3(x, y, z) (x ^ y ^ z)
8#define F4(x, y, z) (y ^ (x | ~z))
9
10#define MD5STEP(f, w, x, y, z, in, s) \
11 (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
12
13void md5_transform(__u32 *hash, __u32 const *in)
14{
15 u32 a, b, c, d;
16
17 a = hash[0];
18 b = hash[1];
19 c = hash[2];
20 d = hash[3];
21
22 MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
23 MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
24 MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
25 MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
26 MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
27 MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
28 MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
29 MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
30 MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
31 MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
32 MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
33 MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
34 MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
35 MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
36 MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
37 MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
38
39 MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
40 MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
41 MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
42 MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
43 MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
44 MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
45 MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
46 MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
47 MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
48 MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
49 MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
50 MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
51 MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
52 MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
53 MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
54 MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
55
56 MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
57 MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
58 MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
59 MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
60 MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
61 MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
62 MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
63 MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
64 MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
65 MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
66 MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
67 MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
68 MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
69 MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
70 MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
71 MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
72
73 MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
74 MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
75 MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
76 MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
77 MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
78 MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
79 MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
80 MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
81 MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
82 MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
83 MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
84 MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
85 MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
86 MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
87 MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
88 MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
89
90 hash[0] += a;
91 hash[1] += b;
92 hash[2] += c;
93 hash[3] += d;
94}
95EXPORT_SYMBOL(md5_transform);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 40a8aa39220d..7916af0aadd2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -55,7 +55,7 @@ static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
55 return -EINVAL; 55 return -EINVAL;
56 } 56 }
57 57
58 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 58 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
59 return 0; 59 return 0;
60} 60}
61 61
@@ -71,7 +71,7 @@ static int verify_auth_trunc(struct nlattr **attrs)
71 if (nla_len(rt) < xfrm_alg_auth_len(algp)) 71 if (nla_len(rt) < xfrm_alg_auth_len(algp))
72 return -EINVAL; 72 return -EINVAL;
73 73
74 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 74 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
75 return 0; 75 return 0;
76} 76}
77 77
@@ -87,7 +87,7 @@ static int verify_aead(struct nlattr **attrs)
87 if (nla_len(rt) < aead_len(algp)) 87 if (nla_len(rt) < aead_len(algp))
88 return -EINVAL; 88 return -EINVAL;
89 89
90 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 90 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
91 return 0; 91 return 0;
92} 92}
93 93