summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-18 15:11:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-18 15:11:14 -0400
commit8b53c76533aa4356602aea98f98a2f3b4051464c (patch)
treeab10ba58e21501407f8108a6bb9003daa2176962
parent6cfae0c26b21dce323fe8799b66cf4bc996e3565 (diff)
parent9575d1a5c0780ea26ff8dd29c94a32be32ce3c85 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Add the ability to abort a skcipher walk. Algorithms: - Fix XTS to actually do the stealing. - Add library helpers for AES and DES for single-block users. - Add library helpers for SHA256. - Add new DES key verification helper. - Add surrounding bits for ESSIV generator. - Add accelerations for aegis128. - Add test vectors for lzo-rle. Drivers: - Add i.MX8MQ support to caam. - Add gcm/ccm/cfb/ofb aes support in inside-secure. - Add ofb/cfb aes support in media-tek. - Add HiSilicon ZIP accelerator support. Others: - Fix potential race condition in padata. - Use unbound workqueues in padata" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (311 commits) crypto: caam - Cast to long first before pointer conversion crypto: ccree - enable CTS support in AES-XTS crypto: inside-secure - Probe transform record cache RAM sizes crypto: inside-secure - Base RD fetchcount on actual RD FIFO size crypto: inside-secure - Base CD fetchcount on actual CD FIFO size crypto: inside-secure - Enable extended algorithms on newer HW crypto: inside-secure: Corrected configuration of EIP96_TOKEN_CTRL crypto: inside-secure - Add EIP97/EIP197 and endianness detection padata: remove cpu_index from the parallel_queue padata: unbind parallel jobs from specific CPUs padata: use separate workqueues for parallel and serial work padata, pcrypt: take CPU hotplug lock internally in padata_alloc_possible crypto: pcrypt - remove padata cpumask notifier padata: make padata_do_parallel find alternate callback CPU workqueue: require CPU hotplug read exclusion for apply_workqueue_attrs workqueue: unconfine alloc/apply/free_workqueue_attrs() padata: allocate workqueue internally arm64: dts: imx8mq: Add CAAM node random: Use wait_event_freezable() in add_hwgenerator_randomness() crypto: ux500 - Fix COMPILE_TEST warnings ...
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-zip50
-rw-r--r--Documentation/crypto/crypto_engine.rst1
-rw-r--r--Documentation/devicetree/bindings/rng/timeriomem_rng.txt2
-rw-r--r--Documentation/padata.txt12
-rw-r--r--MAINTAINERS13
-rw-r--r--arch/arm/crypto/Kconfig2
-rw-r--r--arch/arm/crypto/aes-ce-core.S482
-rw-r--r--arch/arm/crypto/aes-ce-glue.c545
-rw-r--r--arch/arm/crypto/aes-cipher-core.S40
-rw-r--r--arch/arm/crypto/aes-cipher-glue.c11
-rw-r--r--arch/arm/crypto/aes-neonbs-core.S24
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c160
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c80
-rw-r--r--arch/arm/crypto/sha256_glue.c8
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c24
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi30
-rw-r--r--arch/arm64/crypto/Kconfig10
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c18
-rw-r--r--arch/arm64/crypto/aes-ce-glue.c7
-rw-r--r--arch/arm64/crypto/aes-ce.S3
-rw-r--r--arch/arm64/crypto/aes-cipher-core.S40
-rw-r--r--arch/arm64/crypto/aes-cipher-glue.c11
-rw-r--r--arch/arm64/crypto/aes-ctr-fallback.h50
-rw-r--r--arch/arm64/crypto/aes-glue.c470
-rw-r--r--arch/arm64/crypto/aes-modes.S135
-rw-r--r--arch/arm64/crypto/aes-neon.S79
-rw-r--r--arch/arm64/crypto/aes-neonbs-core.S9
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c140
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c30
-rw-r--r--arch/arm64/crypto/sha256-glue.c24
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/s390/crypto/aes_s390.c16
-rw-r--r--arch/s390/crypto/des_s390.c25
-rw-r--r--arch/s390/crypto/ghash_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c8
-rw-r--r--arch/s390/purgatory/Makefile4
-rw-r--r--arch/s390/purgatory/purgatory.c2
-rw-r--r--arch/sparc/crypto/aes_glue.c8
-rw-r--r--arch/sparc/crypto/des_glue.c37
-rw-r--r--arch/x86/crypto/Makefile21
-rw-r--r--arch/x86/crypto/aegis128l-aesni-asm.S823
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c293
-rw-r--r--arch/x86/crypto/aegis256-aesni-asm.S700
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c293
-rw-r--r--arch/x86/crypto/aes-i586-asm_32.S362
-rw-r--r--arch/x86/crypto/aes-x86_64-asm_64.S185
-rw-r--r--arch/x86/crypto/aes_glue.c70
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c46
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c4
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c4
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c4
-rw-r--r--arch/x86/crypto/des3_ede_glue.c38
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c3
-rw-r--r--arch/x86/crypto/glue_helper.c67
-rw-r--r--arch/x86/crypto/morus1280-avx2-asm.S619
-rw-r--r--arch/x86/crypto/morus1280-avx2-glue.c62
-rw-r--r--arch/x86/crypto/morus1280-sse2-asm.S893
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c61
-rw-r--r--arch/x86/crypto/morus1280_glue.c205
-rw-r--r--arch/x86/crypto/morus640-sse2-asm.S612
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c61
-rw-r--r--arch/x86/crypto/morus640_glue.c200
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c4
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c4
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c12
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c4
-rw-r--r--arch/x86/include/asm/crypto/aes.h12
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h2
-rw-r--r--arch/x86/purgatory/Makefile4
-rw-r--r--arch/x86/purgatory/purgatory.c2
-rw-r--r--crypto/Kconfig164
-rw-r--r--crypto/Makefile26
-rw-r--r--crypto/aead.c3
-rw-r--r--crypto/aegis.h39
-rw-r--r--crypto/aegis128-core.c (renamed from crypto/aegis128.c)74
-rw-r--r--crypto/aegis128-neon-inner.c212
-rw-r--r--crypto/aegis128-neon.c49
-rw-r--r--crypto/aegis128l.c522
-rw-r--r--crypto/aegis256.c473
-rw-r--r--crypto/aes_generic.c169
-rw-r--r--crypto/aes_ti.c313
-rw-r--r--crypto/cryptd.c44
-rw-r--r--crypto/crypto_engine.c2
-rw-r--r--crypto/crypto_user_base.c37
-rw-r--r--crypto/crypto_user_stat.c4
-rw-r--r--crypto/des_generic.c945
-rw-r--r--crypto/fips.c11
-rw-r--r--crypto/gcm.c47
-rw-r--r--crypto/ghash-generic.c31
-rw-r--r--crypto/morus1280.c542
-rw-r--r--crypto/morus640.c533
-rw-r--r--crypto/pcrypt.c167
-rw-r--r--crypto/sha256_generic.c224
-rw-r--r--crypto/skcipher.c42
-rw-r--r--crypto/streebog_generic.c46
-rw-r--r--crypto/tcrypt.c16
-rw-r--r--crypto/testmgr.c52
-rw-r--r--crypto/testmgr.h3268
-rw-r--r--crypto/xts.c152
-rw-r--r--drivers/char/hw_random/atmel-rng.c3
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c11
-rw-r--r--drivers/char/hw_random/core.c5
-rw-r--r--drivers/char/hw_random/exynos-trng.c3
-rw-r--r--drivers/char/hw_random/imx-rngc.c4
-rw-r--r--drivers/char/hw_random/mxc-rnga.c4
-rw-r--r--drivers/char/hw_random/n2-drv.c4
-rw-r--r--drivers/char/hw_random/nomadik-rng.c3
-rw-r--r--drivers/char/hw_random/omap-rng.c3
-rw-r--r--drivers/char/hw_random/powernv-rng.c10
-rw-r--r--drivers/char/hw_random/st-rng.c4
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c4
-rw-r--r--drivers/char/hw_random/xgene-rng.c4
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/crypto/Kconfig56
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c24
-rw-r--r--drivers/crypto/atmel-aes.c1
-rw-r--r--drivers/crypto/atmel-i2c.c12
-rw-r--r--drivers/crypto/atmel-i2c.h12
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/atmel-sha204a.c3
-rw-r--r--drivers/crypto/atmel-tdes.c29
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c4
-rw-r--r--drivers/crypto/bcm/cipher.c92
-rw-r--r--drivers/crypto/caam/Kconfig4
-rw-r--r--drivers/crypto/caam/Makefile1
-rw-r--r--drivers/crypto/caam/caamalg.c268
-rw-r--r--drivers/crypto/caam/caamalg_desc.c56
-rw-r--r--drivers/crypto/caam/caamalg_desc.h4
-rw-r--r--drivers/crypto/caam/caamalg_qi.c257
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c325
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h31
-rw-r--r--drivers/crypto/caam/caamhash.c116
-rw-r--r--drivers/crypto/caam/caamhash_desc.c5
-rw-r--r--drivers/crypto/caam/caamhash_desc.h2
-rw-r--r--drivers/crypto/caam/caampkc.c99
-rw-r--r--drivers/crypto/caam/caamrng.c19
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c255
-rw-r--r--drivers/crypto/caam/desc_constr.h81
-rw-r--r--drivers/crypto/caam/dpseci-debugfs.c79
-rw-r--r--drivers/crypto/caam/dpseci-debugfs.h18
-rw-r--r--drivers/crypto/caam/error.c64
-rw-r--r--drivers/crypto/caam/error.h2
-rw-r--r--drivers/crypto/caam/intern.h32
-rw-r--r--drivers/crypto/caam/jr.c124
-rw-r--r--drivers/crypto/caam/key_gen.c14
-rw-r--r--drivers/crypto/caam/pdb.h16
-rw-r--r--drivers/crypto/caam/pkc_desc.c8
-rw-r--r--drivers/crypto/caam/qi.c10
-rw-r--r--drivers/crypto/caam/qi.h26
-rw-r--r--drivers/crypto/caam/regs.h141
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c26
-rw-r--r--drivers/crypto/cavium/nitrox/Kconfig2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_csr.h235
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c158
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.h6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c66
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c148
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h30
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_sriov.c3
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c3
-rw-r--r--drivers/crypto/ccp/Kconfig9
-rw-r--r--drivers/crypto/ccp/Makefile4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c25
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c4
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h8
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c3
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c26
-rw-r--r--drivers/crypto/ccp/ccp-dev.c29
-rw-r--r--drivers/crypto/ccp/ccp-dev.h3
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c13
-rw-r--r--drivers/crypto/ccp/ccp-ops.c56
-rw-r--r--drivers/crypto/ccp/psp-dev.h1
-rw-r--r--drivers/crypto/ccp/sp-dev.h1
-rw-r--r--drivers/crypto/ccp/sp-platform.c4
-rw-r--r--drivers/crypto/ccree/Makefile2
-rw-r--r--drivers/crypto/ccree/cc_aead.c129
-rw-r--r--drivers/crypto/ccree/cc_aead.h3
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c21
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_cipher.c31
-rw-r--r--drivers/crypto/ccree/cc_driver.c18
-rw-r--r--drivers/crypto/ccree/cc_driver.h10
-rw-r--r--drivers/crypto/ccree/cc_fips.c31
-rw-r--r--drivers/crypto/ccree/cc_hash.c153
-rw-r--r--drivers/crypto/ccree/cc_ivgen.c276
-rw-r--r--drivers/crypto/ccree/cc_ivgen.h55
-rw-r--r--drivers/crypto/ccree/cc_pm.c2
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c47
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c46
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h20
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c19
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c20
-rw-r--r--drivers/crypto/exynos-rng.c4
-rw-r--r--drivers/crypto/hifn_795x.c32
-rw-r--r--drivers/crypto/hisilicon/Kconfig25
-rw-r--r--drivers/crypto/hisilicon/Makefile3
-rw-r--r--drivers/crypto/hisilicon/qm.c1913
-rw-r--r--drivers/crypto/hisilicon/qm.h215
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c18
-rw-r--r--drivers/crypto/hisilicon/sgl.c214
-rw-r--r--drivers/crypto/hisilicon/sgl.h24
-rw-r--r--drivers/crypto/hisilicon/zip/Makefile2
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h71
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c653
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c1013
-rw-r--r--drivers/crypto/img-hash.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel.c1153
-rw-r--r--drivers/crypto/inside-secure/safexcel.h226
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c1528
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c625
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c11
-rw-r--r--drivers/crypto/ixp4xx_crypto.c27
-rw-r--r--drivers/crypto/marvell/cipher.c27
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c143
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c7
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c4
-rw-r--r--drivers/crypto/mxs-dcp.c8
-rw-r--r--drivers/crypto/n2_core.c42
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c1
-rw-r--r--drivers/crypto/nx/nx.h3
-rw-r--r--drivers/crypto/omap-aes.c1
-rw-r--r--drivers/crypto/omap-des.c28
-rw-r--r--drivers/crypto/omap-sham.c1
-rw-r--r--drivers/crypto/padlock-aes.c10
-rw-r--r--drivers/crypto/picoxcell_crypto.c29
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qce/ablkcipher.c55
-rw-r--r--drivers/crypto/qce/core.c4
-rw-r--r--drivers/crypto/qcom-rng.c4
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c4
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c21
-rw-r--r--drivers/crypto/s5p-sss.c7
-rw-r--r--drivers/crypto/sahara.c4
-rw-r--r--drivers/crypto/stm32/Kconfig2
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c4
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c38
-rw-r--r--drivers/crypto/stm32/stm32-hash.c4
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c26
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c4
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h2
-rw-r--r--drivers/crypto/talitos.c38
-rw-r--r--drivers/crypto/ux500/Kconfig2
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h4
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c39
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c12
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c4
-rw-r--r--drivers/crypto/vmx/aes_xts.c2
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/smbencrypt.c18
-rw-r--r--include/asm-generic/Kbuild2
-rw-r--r--include/crypto/aes.h58
-rw-r--r--include/crypto/algapi.h2
-rw-r--r--include/crypto/ctr.h50
-rw-r--r--include/crypto/des.h77
-rw-r--r--include/crypto/gcm.h55
-rw-r--r--include/crypto/ghash.h2
-rw-r--r--include/crypto/internal/cryptouser.h7
-rw-r--r--include/crypto/internal/des.h152
-rw-r--r--include/crypto/internal/skcipher.h5
-rw-r--r--include/crypto/morus1280_glue.h97
-rw-r--r--include/crypto/morus640_glue.h97
-rw-r--r--include/crypto/morus_common.h18
-rw-r--r--include/crypto/sha.h47
-rw-r--r--include/crypto/sha1_base.h5
-rw-r--r--include/crypto/sha256_base.h29
-rw-r--r--include/crypto/sha512_base.h5
-rw-r--r--include/crypto/sm3_base.h5
-rw-r--r--include/linux/fips.h7
-rw-r--r--include/linux/padata.h29
-rw-r--r--include/linux/sha256.h28
-rw-r--r--include/linux/timeriomem-rng.h5
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/uapi/linux/cryptouser.h5
-rw-r--r--kernel/padata.c307
-rw-r--r--kernel/workqueue.c25
-rw-r--r--lib/crypto/Makefile9
-rw-r--r--lib/crypto/aes.c356
-rw-r--r--lib/crypto/des.c902
-rw-r--r--lib/crypto/sha256.c (renamed from lib/sha256.c)150
-rw-r--r--lib/mpi/longlong.h36
-rw-r--r--net/bluetooth/Kconfig3
-rw-r--r--net/bluetooth/smp.c103
-rw-r--r--tools/crypto/getstat.c294
297 files changed, 14595 insertions, 17387 deletions
diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip
new file mode 100644
index 000000000000..a7c63e6c4bc3
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-hisi-zip
@@ -0,0 +1,50 @@
1What: /sys/kernel/debug/hisi_zip/<bdf>/comp_core[01]/regs
2Date: Nov 2018
3Contact: linux-crypto@vger.kernel.org
4Description: Dump of compression cores related debug registers.
5 Only available for PF.
6
7What: /sys/kernel/debug/hisi_zip/<bdf>/decomp_core[0-5]/regs
8Date: Nov 2018
9Contact: linux-crypto@vger.kernel.org
10Description: Dump of decompression cores related debug registers.
11 Only available for PF.
12
13What: /sys/kernel/debug/hisi_zip/<bdf>/clear_enable
14Date: Nov 2018
15Contact: linux-crypto@vger.kernel.org
16Description: Compression/decompression core debug registers read clear
17 control. 1 means enable register read clear, otherwise 0.
18 Writing to this file has no functional effect, only enable or
19 disable counters clear after reading of these registers.
20 Only available for PF.
21
22What: /sys/kernel/debug/hisi_zip/<bdf>/current_qm
23Date: Nov 2018
24Contact: linux-crypto@vger.kernel.org
25Description: One ZIP controller has one PF and multiple VFs, each function
26 has a QM. Select the QM which below qm refers to.
27 Only available for PF.
28
29What: /sys/kernel/debug/hisi_zip/<bdf>/qm/qm_regs
30Date: Nov 2018
31Contact: linux-crypto@vger.kernel.org
32Description: Dump of QM related debug registers.
33 Available for PF and VF in host. VF in guest currently only
34 has one debug register.
35
36What: /sys/kernel/debug/hisi_zip/<bdf>/qm/current_q
37Date: Nov 2018
38Contact: linux-crypto@vger.kernel.org
39Description: One QM may contain multiple queues. Select specific queue to
40 show its debug registers in above qm_regs.
41 Only available for PF.
42
43What: /sys/kernel/debug/hisi_zip/<bdf>/qm/clear_enable
44Date: Nov 2018
45Contact: linux-crypto@vger.kernel.org
46Description: QM debug registers(qm_regs) read clear control. 1 means enable
47 register read clear, otherwise 0.
48 Writing to this file has no functional effect, only enable or
49 disable counters clear after reading of these registers.
50 Only available for PF.
diff --git a/Documentation/crypto/crypto_engine.rst b/Documentation/crypto/crypto_engine.rst
index 236c674d6897..3baa23c2cd08 100644
--- a/Documentation/crypto/crypto_engine.rst
+++ b/Documentation/crypto/crypto_engine.rst
@@ -1,4 +1,5 @@
1.. SPDX-License-Identifier: GPL-2.0 1.. SPDX-License-Identifier: GPL-2.0
2
2Crypto Engine 3Crypto Engine
3============= 4=============
4 5
diff --git a/Documentation/devicetree/bindings/rng/timeriomem_rng.txt b/Documentation/devicetree/bindings/rng/timeriomem_rng.txt
index 214940093b55..fb4846160047 100644
--- a/Documentation/devicetree/bindings/rng/timeriomem_rng.txt
+++ b/Documentation/devicetree/bindings/rng/timeriomem_rng.txt
@@ -12,7 +12,7 @@ Optional properties:
12 which disables using this rng to automatically fill the kernel's 12 which disables using this rng to automatically fill the kernel's
13 entropy pool. 13 entropy pool.
14 14
15N.B. currently 'reg' must be four bytes wide and aligned 15N.B. currently 'reg' must be at least four bytes wide and 32-bit aligned
16 16
17Example: 17Example:
18 18
diff --git a/Documentation/padata.txt b/Documentation/padata.txt
index b103d0c82000..b37ba1eaace3 100644
--- a/Documentation/padata.txt
+++ b/Documentation/padata.txt
@@ -16,10 +16,12 @@ overall control of how tasks are to be run::
16 16
17 #include <linux/padata.h> 17 #include <linux/padata.h>
18 18
19 struct padata_instance *padata_alloc(struct workqueue_struct *wq, 19 struct padata_instance *padata_alloc(const char *name,
20 const struct cpumask *pcpumask, 20 const struct cpumask *pcpumask,
21 const struct cpumask *cbcpumask); 21 const struct cpumask *cbcpumask);
22 22
23'name' simply identifies the instance.
24
23The pcpumask describes which processors will be used to execute work 25The pcpumask describes which processors will be used to execute work
24submitted to this instance in parallel. The cbcpumask defines which 26submitted to this instance in parallel. The cbcpumask defines which
25processors are allowed to be used as the serialization callback processor. 27processors are allowed to be used as the serialization callback processor.
@@ -128,8 +130,7 @@ in that CPU mask or about a not running instance.
128 130
129Each task submitted to padata_do_parallel() will, in turn, be passed to 131Each task submitted to padata_do_parallel() will, in turn, be passed to
130exactly one call to the above-mentioned parallel() function, on one CPU, so 132exactly one call to the above-mentioned parallel() function, on one CPU, so
131true parallelism is achieved by submitting multiple tasks. Despite the 133true parallelism is achieved by submitting multiple tasks. parallel() runs with
132fact that the workqueue is used to make these calls, parallel() is run with
133software interrupts disabled and thus cannot sleep. The parallel() 134software interrupts disabled and thus cannot sleep. The parallel()
134function gets the padata_priv structure pointer as its lone parameter; 135function gets the padata_priv structure pointer as its lone parameter;
135information about the actual work to be done is probably obtained by using 136information about the actual work to be done is probably obtained by using
@@ -148,7 +149,7 @@ fact with a call to::
148At some point in the future, padata_do_serial() will trigger a call to the 149At some point in the future, padata_do_serial() will trigger a call to the
149serial() function in the padata_priv structure. That call will happen on 150serial() function in the padata_priv structure. That call will happen on
150the CPU requested in the initial call to padata_do_parallel(); it, too, is 151the CPU requested in the initial call to padata_do_parallel(); it, too, is
151done through the workqueue, but with local software interrupts disabled. 152run with local software interrupts disabled.
152Note that this call may be deferred for a while since the padata code takes 153Note that this call may be deferred for a while since the padata code takes
153pains to ensure that tasks are completed in the order in which they were 154pains to ensure that tasks are completed in the order in which they were
154submitted. 155submitted.
@@ -159,5 +160,4 @@ when a padata instance is no longer needed::
159 void padata_free(struct padata_instance *pinst); 160 void padata_free(struct padata_instance *pinst);
160 161
161This function will busy-wait while any remaining tasks are completed, so it 162This function will busy-wait while any remaining tasks are completed, so it
162might be best not to call it while there is work outstanding. Shutting 163might be best not to call it while there is work outstanding.
163down the workqueue, if necessary, should be done separately.
diff --git a/MAINTAINERS b/MAINTAINERS
index 6e1b0ac2441b..05033aa17f29 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7350,6 +7350,17 @@ S: Supported
7350F: drivers/scsi/hisi_sas/ 7350F: drivers/scsi/hisi_sas/
7351F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt 7351F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
7352 7352
7353HISILICON QM AND ZIP Controller DRIVER
7354M: Zhou Wang <wangzhou1@hisilicon.com>
7355L: linux-crypto@vger.kernel.org
7356S: Maintained
7357F: drivers/crypto/hisilicon/qm.c
7358F: drivers/crypto/hisilicon/qm.h
7359F: drivers/crypto/hisilicon/sgl.c
7360F: drivers/crypto/hisilicon/sgl.h
7361F: drivers/crypto/hisilicon/zip/
7362F: Documentation/ABI/testing/debugfs-hisi-zip
7363
7353HMM - Heterogeneous Memory Management 7364HMM - Heterogeneous Memory Management
7354M: Jérôme Glisse <jglisse@redhat.com> 7365M: Jérôme Glisse <jglisse@redhat.com>
7355L: linux-mm@kvack.org 7366L: linux-mm@kvack.org
@@ -7703,7 +7714,7 @@ F: drivers/crypto/nx/nx-aes*
7703F: drivers/crypto/nx/nx-sha* 7714F: drivers/crypto/nx/nx-sha*
7704F: drivers/crypto/nx/nx.* 7715F: drivers/crypto/nx/nx.*
7705F: drivers/crypto/nx/nx_csbcpb.h 7716F: drivers/crypto/nx/nx_csbcpb.h
7706F: drivers/crypto/nx/nx_debugfs.h 7717F: drivers/crypto/nx/nx_debugfs.c
7707 7718
7708IBM Power Linux RAID adapter 7719IBM Power Linux RAID adapter
7709M: Brian King <brking@us.ibm.com> 7720M: Brian King <brking@us.ibm.com>
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index a95322b59799..b24df84a1d7a 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -82,8 +82,8 @@ config CRYPTO_AES_ARM_BS
82 tristate "Bit sliced AES using NEON instructions" 82 tristate "Bit sliced AES using NEON instructions"
83 depends on KERNEL_MODE_NEON 83 depends on KERNEL_MODE_NEON
84 select CRYPTO_BLKCIPHER 84 select CRYPTO_BLKCIPHER
85 select CRYPTO_LIB_AES
85 select CRYPTO_SIMD 86 select CRYPTO_SIMD
86 select CRYPTO_AES
87 help 87 help
88 Use a faster and more secure NEON based implementation of AES in CBC, 88 Use a faster and more secure NEON based implementation of AES in CBC,
89 CTR and XTS modes 89 CTR and XTS modes
diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S
index caac519d6249..b978cdf133af 100644
--- a/arch/arm/crypto/aes-ce-core.S
+++ b/arch/arm/crypto/aes-ce-core.S
@@ -44,63 +44,73 @@
44 veor q0, q0, \key3 44 veor q0, q0, \key3
45 .endm 45 .endm
46 46
47 .macro enc_dround_3x, key1, key2 47 .macro enc_dround_4x, key1, key2
48 enc_round q0, \key1 48 enc_round q0, \key1
49 enc_round q1, \key1 49 enc_round q1, \key1
50 enc_round q2, \key1 50 enc_round q2, \key1
51 enc_round q3, \key1
51 enc_round q0, \key2 52 enc_round q0, \key2
52 enc_round q1, \key2 53 enc_round q1, \key2
53 enc_round q2, \key2 54 enc_round q2, \key2
55 enc_round q3, \key2
54 .endm 56 .endm
55 57
56 .macro dec_dround_3x, key1, key2 58 .macro dec_dround_4x, key1, key2
57 dec_round q0, \key1 59 dec_round q0, \key1
58 dec_round q1, \key1 60 dec_round q1, \key1
59 dec_round q2, \key1 61 dec_round q2, \key1
62 dec_round q3, \key1
60 dec_round q0, \key2 63 dec_round q0, \key2
61 dec_round q1, \key2 64 dec_round q1, \key2
62 dec_round q2, \key2 65 dec_round q2, \key2
66 dec_round q3, \key2
63 .endm 67 .endm
64 68
65 .macro enc_fround_3x, key1, key2, key3 69 .macro enc_fround_4x, key1, key2, key3
66 enc_round q0, \key1 70 enc_round q0, \key1
67 enc_round q1, \key1 71 enc_round q1, \key1
68 enc_round q2, \key1 72 enc_round q2, \key1
73 enc_round q3, \key1
69 aese.8 q0, \key2 74 aese.8 q0, \key2
70 aese.8 q1, \key2 75 aese.8 q1, \key2
71 aese.8 q2, \key2 76 aese.8 q2, \key2
77 aese.8 q3, \key2
72 veor q0, q0, \key3 78 veor q0, q0, \key3
73 veor q1, q1, \key3 79 veor q1, q1, \key3
74 veor q2, q2, \key3 80 veor q2, q2, \key3
81 veor q3, q3, \key3
75 .endm 82 .endm
76 83
77 .macro dec_fround_3x, key1, key2, key3 84 .macro dec_fround_4x, key1, key2, key3
78 dec_round q0, \key1 85 dec_round q0, \key1
79 dec_round q1, \key1 86 dec_round q1, \key1
80 dec_round q2, \key1 87 dec_round q2, \key1
88 dec_round q3, \key1
81 aesd.8 q0, \key2 89 aesd.8 q0, \key2
82 aesd.8 q1, \key2 90 aesd.8 q1, \key2
83 aesd.8 q2, \key2 91 aesd.8 q2, \key2
92 aesd.8 q3, \key2
84 veor q0, q0, \key3 93 veor q0, q0, \key3
85 veor q1, q1, \key3 94 veor q1, q1, \key3
86 veor q2, q2, \key3 95 veor q2, q2, \key3
96 veor q3, q3, \key3
87 .endm 97 .endm
88 98
89 .macro do_block, dround, fround 99 .macro do_block, dround, fround
90 cmp r3, #12 @ which key size? 100 cmp r3, #12 @ which key size?
91 vld1.8 {q10-q11}, [ip]! 101 vld1.32 {q10-q11}, [ip]!
92 \dround q8, q9 102 \dround q8, q9
93 vld1.8 {q12-q13}, [ip]! 103 vld1.32 {q12-q13}, [ip]!
94 \dround q10, q11 104 \dround q10, q11
95 vld1.8 {q10-q11}, [ip]! 105 vld1.32 {q10-q11}, [ip]!
96 \dround q12, q13 106 \dround q12, q13
97 vld1.8 {q12-q13}, [ip]! 107 vld1.32 {q12-q13}, [ip]!
98 \dround q10, q11 108 \dround q10, q11
99 blo 0f @ AES-128: 10 rounds 109 blo 0f @ AES-128: 10 rounds
100 vld1.8 {q10-q11}, [ip]! 110 vld1.32 {q10-q11}, [ip]!
101 \dround q12, q13 111 \dround q12, q13
102 beq 1f @ AES-192: 12 rounds 112 beq 1f @ AES-192: 12 rounds
103 vld1.8 {q12-q13}, [ip] 113 vld1.32 {q12-q13}, [ip]
104 \dround q10, q11 114 \dround q10, q11
1050: \fround q12, q13, q14 1150: \fround q12, q13, q14
106 bx lr 116 bx lr
@@ -114,8 +124,9 @@
114 * transforms. These should preserve all registers except q0 - q2 and ip 124 * transforms. These should preserve all registers except q0 - q2 and ip
115 * Arguments: 125 * Arguments:
116 * q0 : first in/output block 126 * q0 : first in/output block
117 * q1 : second in/output block (_3x version only) 127 * q1 : second in/output block (_4x version only)
118 * q2 : third in/output block (_3x version only) 128 * q2 : third in/output block (_4x version only)
129 * q3 : fourth in/output block (_4x version only)
119 * q8 : first round key 130 * q8 : first round key
120 * q9 : secound round key 131 * q9 : secound round key
121 * q14 : final round key 132 * q14 : final round key
@@ -136,44 +147,44 @@ aes_decrypt:
136ENDPROC(aes_decrypt) 147ENDPROC(aes_decrypt)
137 148
138 .align 6 149 .align 6
139aes_encrypt_3x: 150aes_encrypt_4x:
140 add ip, r2, #32 @ 3rd round key 151 add ip, r2, #32 @ 3rd round key
141 do_block enc_dround_3x, enc_fround_3x 152 do_block enc_dround_4x, enc_fround_4x
142ENDPROC(aes_encrypt_3x) 153ENDPROC(aes_encrypt_4x)
143 154
144 .align 6 155 .align 6
145aes_decrypt_3x: 156aes_decrypt_4x:
146 add ip, r2, #32 @ 3rd round key 157 add ip, r2, #32 @ 3rd round key
147 do_block dec_dround_3x, dec_fround_3x 158 do_block dec_dround_4x, dec_fround_4x
148ENDPROC(aes_decrypt_3x) 159ENDPROC(aes_decrypt_4x)
149 160
150 .macro prepare_key, rk, rounds 161 .macro prepare_key, rk, rounds
151 add ip, \rk, \rounds, lsl #4 162 add ip, \rk, \rounds, lsl #4
152 vld1.8 {q8-q9}, [\rk] @ load first 2 round keys 163 vld1.32 {q8-q9}, [\rk] @ load first 2 round keys
153 vld1.8 {q14}, [ip] @ load last round key 164 vld1.32 {q14}, [ip] @ load last round key
154 .endm 165 .endm
155 166
156 /* 167 /*
157 * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 168 * aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
158 * int blocks) 169 * int blocks)
159 * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 170 * aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
160 * int blocks) 171 * int blocks)
161 */ 172 */
162ENTRY(ce_aes_ecb_encrypt) 173ENTRY(ce_aes_ecb_encrypt)
163 push {r4, lr} 174 push {r4, lr}
164 ldr r4, [sp, #8] 175 ldr r4, [sp, #8]
165 prepare_key r2, r3 176 prepare_key r2, r3
166.Lecbencloop3x: 177.Lecbencloop4x:
167 subs r4, r4, #3 178 subs r4, r4, #4
168 bmi .Lecbenc1x 179 bmi .Lecbenc1x
169 vld1.8 {q0-q1}, [r1]! 180 vld1.8 {q0-q1}, [r1]!
170 vld1.8 {q2}, [r1]! 181 vld1.8 {q2-q3}, [r1]!
171 bl aes_encrypt_3x 182 bl aes_encrypt_4x
172 vst1.8 {q0-q1}, [r0]! 183 vst1.8 {q0-q1}, [r0]!
173 vst1.8 {q2}, [r0]! 184 vst1.8 {q2-q3}, [r0]!
174 b .Lecbencloop3x 185 b .Lecbencloop4x
175.Lecbenc1x: 186.Lecbenc1x:
176 adds r4, r4, #3 187 adds r4, r4, #4
177 beq .Lecbencout 188 beq .Lecbencout
178.Lecbencloop: 189.Lecbencloop:
179 vld1.8 {q0}, [r1]! 190 vld1.8 {q0}, [r1]!
@@ -189,17 +200,17 @@ ENTRY(ce_aes_ecb_decrypt)
189 push {r4, lr} 200 push {r4, lr}
190 ldr r4, [sp, #8] 201 ldr r4, [sp, #8]
191 prepare_key r2, r3 202 prepare_key r2, r3
192.Lecbdecloop3x: 203.Lecbdecloop4x:
193 subs r4, r4, #3 204 subs r4, r4, #4
194 bmi .Lecbdec1x 205 bmi .Lecbdec1x
195 vld1.8 {q0-q1}, [r1]! 206 vld1.8 {q0-q1}, [r1]!
196 vld1.8 {q2}, [r1]! 207 vld1.8 {q2-q3}, [r1]!
197 bl aes_decrypt_3x 208 bl aes_decrypt_4x
198 vst1.8 {q0-q1}, [r0]! 209 vst1.8 {q0-q1}, [r0]!
199 vst1.8 {q2}, [r0]! 210 vst1.8 {q2-q3}, [r0]!
200 b .Lecbdecloop3x 211 b .Lecbdecloop4x
201.Lecbdec1x: 212.Lecbdec1x:
202 adds r4, r4, #3 213 adds r4, r4, #4
203 beq .Lecbdecout 214 beq .Lecbdecout
204.Lecbdecloop: 215.Lecbdecloop:
205 vld1.8 {q0}, [r1]! 216 vld1.8 {q0}, [r1]!
@@ -212,9 +223,9 @@ ENTRY(ce_aes_ecb_decrypt)
212ENDPROC(ce_aes_ecb_decrypt) 223ENDPROC(ce_aes_ecb_decrypt)
213 224
214 /* 225 /*
215 * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 226 * aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
216 * int blocks, u8 iv[]) 227 * int blocks, u8 iv[])
217 * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 228 * aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
218 * int blocks, u8 iv[]) 229 * int blocks, u8 iv[])
219 */ 230 */
220ENTRY(ce_aes_cbc_encrypt) 231ENTRY(ce_aes_cbc_encrypt)
@@ -236,88 +247,181 @@ ENDPROC(ce_aes_cbc_encrypt)
236ENTRY(ce_aes_cbc_decrypt) 247ENTRY(ce_aes_cbc_decrypt)
237 push {r4-r6, lr} 248 push {r4-r6, lr}
238 ldrd r4, r5, [sp, #16] 249 ldrd r4, r5, [sp, #16]
239 vld1.8 {q6}, [r5] @ keep iv in q6 250 vld1.8 {q15}, [r5] @ keep iv in q15
240 prepare_key r2, r3 251 prepare_key r2, r3
241.Lcbcdecloop3x: 252.Lcbcdecloop4x:
242 subs r4, r4, #3 253 subs r4, r4, #4
243 bmi .Lcbcdec1x 254 bmi .Lcbcdec1x
244 vld1.8 {q0-q1}, [r1]! 255 vld1.8 {q0-q1}, [r1]!
245 vld1.8 {q2}, [r1]! 256 vld1.8 {q2-q3}, [r1]!
246 vmov q3, q0 257 vmov q4, q0
247 vmov q4, q1 258 vmov q5, q1
248 vmov q5, q2 259 vmov q6, q2
249 bl aes_decrypt_3x 260 vmov q7, q3
250 veor q0, q0, q6 261 bl aes_decrypt_4x
251 veor q1, q1, q3 262 veor q0, q0, q15
252 veor q2, q2, q4 263 veor q1, q1, q4
253 vmov q6, q5 264 veor q2, q2, q5
265 veor q3, q3, q6
266 vmov q15, q7
254 vst1.8 {q0-q1}, [r0]! 267 vst1.8 {q0-q1}, [r0]!
255 vst1.8 {q2}, [r0]! 268 vst1.8 {q2-q3}, [r0]!
256 b .Lcbcdecloop3x 269 b .Lcbcdecloop4x
257.Lcbcdec1x: 270.Lcbcdec1x:
258 adds r4, r4, #3 271 adds r4, r4, #4
259 beq .Lcbcdecout 272 beq .Lcbcdecout
260 vmov q15, q14 @ preserve last round key 273 vmov q6, q14 @ preserve last round key
261.Lcbcdecloop: 274.Lcbcdecloop:
262 vld1.8 {q0}, [r1]! @ get next ct block 275 vld1.8 {q0}, [r1]! @ get next ct block
263 veor q14, q15, q6 @ combine prev ct with last key 276 veor q14, q15, q6 @ combine prev ct with last key
264 vmov q6, q0 277 vmov q15, q0
265 bl aes_decrypt 278 bl aes_decrypt
266 vst1.8 {q0}, [r0]! 279 vst1.8 {q0}, [r0]!
267 subs r4, r4, #1 280 subs r4, r4, #1
268 bne .Lcbcdecloop 281 bne .Lcbcdecloop
269.Lcbcdecout: 282.Lcbcdecout:
270 vst1.8 {q6}, [r5] @ keep iv in q6 283 vst1.8 {q15}, [r5] @ keep iv in q15
271 pop {r4-r6, pc} 284 pop {r4-r6, pc}
272ENDPROC(ce_aes_cbc_decrypt) 285ENDPROC(ce_aes_cbc_decrypt)
273 286
287
274 /* 288 /*
275 * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 289 * ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
290 * int rounds, int bytes, u8 const iv[])
291 * ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
292 * int rounds, int bytes, u8 const iv[])
293 */
294
295ENTRY(ce_aes_cbc_cts_encrypt)
296 push {r4-r6, lr}
297 ldrd r4, r5, [sp, #16]
298
299 movw ip, :lower16:.Lcts_permute_table
300 movt ip, :upper16:.Lcts_permute_table
301 sub r4, r4, #16
302 add lr, ip, #32
303 add ip, ip, r4
304 sub lr, lr, r4
305 vld1.8 {q5}, [ip]
306 vld1.8 {q6}, [lr]
307
308 add ip, r1, r4
309 vld1.8 {q0}, [r1] @ overlapping loads
310 vld1.8 {q3}, [ip]
311
312 vld1.8 {q1}, [r5] @ get iv
313 prepare_key r2, r3
314
315 veor q0, q0, q1 @ xor with iv
316 bl aes_encrypt
317
318 vtbl.8 d4, {d0-d1}, d10
319 vtbl.8 d5, {d0-d1}, d11
320 vtbl.8 d2, {d6-d7}, d12
321 vtbl.8 d3, {d6-d7}, d13
322
323 veor q0, q0, q1
324 bl aes_encrypt
325
326 add r4, r0, r4
327 vst1.8 {q2}, [r4] @ overlapping stores
328 vst1.8 {q0}, [r0]
329
330 pop {r4-r6, pc}
331ENDPROC(ce_aes_cbc_cts_encrypt)
332
333ENTRY(ce_aes_cbc_cts_decrypt)
334 push {r4-r6, lr}
335 ldrd r4, r5, [sp, #16]
336
337 movw ip, :lower16:.Lcts_permute_table
338 movt ip, :upper16:.Lcts_permute_table
339 sub r4, r4, #16
340 add lr, ip, #32
341 add ip, ip, r4
342 sub lr, lr, r4
343 vld1.8 {q5}, [ip]
344 vld1.8 {q6}, [lr]
345
346 add ip, r1, r4
347 vld1.8 {q0}, [r1] @ overlapping loads
348 vld1.8 {q1}, [ip]
349
350 vld1.8 {q3}, [r5] @ get iv
351 prepare_key r2, r3
352
353 bl aes_decrypt
354
355 vtbl.8 d4, {d0-d1}, d10
356 vtbl.8 d5, {d0-d1}, d11
357 vtbx.8 d0, {d2-d3}, d12
358 vtbx.8 d1, {d2-d3}, d13
359
360 veor q1, q1, q2
361 bl aes_decrypt
362 veor q0, q0, q3 @ xor with iv
363
364 add r4, r0, r4
365 vst1.8 {q1}, [r4] @ overlapping stores
366 vst1.8 {q0}, [r0]
367
368 pop {r4-r6, pc}
369ENDPROC(ce_aes_cbc_cts_decrypt)
370
371
372 /*
373 * aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
276 * int blocks, u8 ctr[]) 374 * int blocks, u8 ctr[])
277 */ 375 */
278ENTRY(ce_aes_ctr_encrypt) 376ENTRY(ce_aes_ctr_encrypt)
279 push {r4-r6, lr} 377 push {r4-r6, lr}
280 ldrd r4, r5, [sp, #16] 378 ldrd r4, r5, [sp, #16]
281 vld1.8 {q6}, [r5] @ load ctr 379 vld1.8 {q7}, [r5] @ load ctr
282 prepare_key r2, r3 380 prepare_key r2, r3
283 vmov r6, s27 @ keep swabbed ctr in r6 381 vmov r6, s31 @ keep swabbed ctr in r6
284 rev r6, r6 382 rev r6, r6
285 cmn r6, r4 @ 32 bit overflow? 383 cmn r6, r4 @ 32 bit overflow?
286 bcs .Lctrloop 384 bcs .Lctrloop
287.Lctrloop3x: 385.Lctrloop4x:
288 subs r4, r4, #3 386 subs r4, r4, #4
289 bmi .Lctr1x 387 bmi .Lctr1x
290 add r6, r6, #1 388 add r6, r6, #1
291 vmov q0, q6 389 vmov q0, q7
292 vmov q1, q6 390 vmov q1, q7
293 rev ip, r6 391 rev ip, r6
294 add r6, r6, #1 392 add r6, r6, #1
295 vmov q2, q6 393 vmov q2, q7
296 vmov s7, ip 394 vmov s7, ip
297 rev ip, r6 395 rev ip, r6
298 add r6, r6, #1 396 add r6, r6, #1
397 vmov q3, q7
299 vmov s11, ip 398 vmov s11, ip
300 vld1.8 {q3-q4}, [r1]! 399 rev ip, r6
301 vld1.8 {q5}, [r1]! 400 add r6, r6, #1
302 bl aes_encrypt_3x 401 vmov s15, ip
303 veor q0, q0, q3 402 vld1.8 {q4-q5}, [r1]!
304 veor q1, q1, q4 403 vld1.8 {q6}, [r1]!
305 veor q2, q2, q5 404 vld1.8 {q15}, [r1]!
405 bl aes_encrypt_4x
406 veor q0, q0, q4
407 veor q1, q1, q5
408 veor q2, q2, q6
409 veor q3, q3, q15
306 rev ip, r6 410 rev ip, r6
307 vst1.8 {q0-q1}, [r0]! 411 vst1.8 {q0-q1}, [r0]!
308 vst1.8 {q2}, [r0]! 412 vst1.8 {q2-q3}, [r0]!
309 vmov s27, ip 413 vmov s31, ip
310 b .Lctrloop3x 414 b .Lctrloop4x
311.Lctr1x: 415.Lctr1x:
312 adds r4, r4, #3 416 adds r4, r4, #4
313 beq .Lctrout 417 beq .Lctrout
314.Lctrloop: 418.Lctrloop:
315 vmov q0, q6 419 vmov q0, q7
316 bl aes_encrypt 420 bl aes_encrypt
317 421
318 adds r6, r6, #1 @ increment BE ctr 422 adds r6, r6, #1 @ increment BE ctr
319 rev ip, r6 423 rev ip, r6
320 vmov s27, ip 424 vmov s31, ip
321 bcs .Lctrcarry 425 bcs .Lctrcarry
322 426
323.Lctrcarrydone: 427.Lctrcarrydone:
@@ -329,7 +433,7 @@ ENTRY(ce_aes_ctr_encrypt)
329 bne .Lctrloop 433 bne .Lctrloop
330 434
331.Lctrout: 435.Lctrout:
332 vst1.8 {q6}, [r5] @ return next CTR value 436 vst1.8 {q7}, [r5] @ return next CTR value
333 pop {r4-r6, pc} 437 pop {r4-r6, pc}
334 438
335.Lctrtailblock: 439.Lctrtailblock:
@@ -337,7 +441,7 @@ ENTRY(ce_aes_ctr_encrypt)
337 b .Lctrout 441 b .Lctrout
338 442
339.Lctrcarry: 443.Lctrcarry:
340 .irp sreg, s26, s25, s24 444 .irp sreg, s30, s29, s28
341 vmov ip, \sreg @ load next word of ctr 445 vmov ip, \sreg @ load next word of ctr
342 rev ip, ip @ ... to handle the carry 446 rev ip, ip @ ... to handle the carry
343 adds ip, ip, #1 447 adds ip, ip, #1
@@ -349,10 +453,10 @@ ENTRY(ce_aes_ctr_encrypt)
349ENDPROC(ce_aes_ctr_encrypt) 453ENDPROC(ce_aes_ctr_encrypt)
350 454
351 /* 455 /*
352 * aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, 456 * aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds,
353 * int blocks, u8 iv[], u8 const rk2[], int first) 457 * int bytes, u8 iv[], u32 const rk2[], int first)
354 * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, 458 * aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds,
355 * int blocks, u8 iv[], u8 const rk2[], int first) 459 * int bytes, u8 iv[], u32 const rk2[], int first)
356 */ 460 */
357 461
358 .macro next_tweak, out, in, const, tmp 462 .macro next_tweak, out, in, const, tmp
@@ -363,13 +467,10 @@ ENDPROC(ce_aes_ctr_encrypt)
363 veor \out, \out, \tmp 467 veor \out, \out, \tmp
364 .endm 468 .endm
365 469
366 .align 3
367.Lxts_mul_x:
368 .quad 1, 0x87
369
370ce_aes_xts_init: 470ce_aes_xts_init:
371 vldr d14, .Lxts_mul_x 471 vmov.i32 d30, #0x87 @ compose tweak mask vector
372 vldr d15, .Lxts_mul_x + 8 472 vmovl.u32 q15, d30
473 vshr.u64 d30, d31, #7
373 474
374 ldrd r4, r5, [sp, #16] @ load args 475 ldrd r4, r5, [sp, #16] @ load args
375 ldr r6, [sp, #28] 476 ldr r6, [sp, #28]
@@ -390,49 +491,86 @@ ENTRY(ce_aes_xts_encrypt)
390 491
391 bl ce_aes_xts_init @ run shared prologue 492 bl ce_aes_xts_init @ run shared prologue
392 prepare_key r2, r3 493 prepare_key r2, r3
393 vmov q3, q0 494 vmov q4, q0
394 495
395 teq r6, #0 @ start of a block? 496 teq r6, #0 @ start of a block?
396 bne .Lxtsenc3x 497 bne .Lxtsenc4x
397 498
398.Lxtsencloop3x: 499.Lxtsencloop4x:
399 next_tweak q3, q3, q7, q6 500 next_tweak q4, q4, q15, q10
400.Lxtsenc3x: 501.Lxtsenc4x:
401 subs r4, r4, #3 502 subs r4, r4, #64
402 bmi .Lxtsenc1x 503 bmi .Lxtsenc1x
403 vld1.8 {q0-q1}, [r1]! @ get 3 pt blocks 504 vld1.8 {q0-q1}, [r1]! @ get 4 pt blocks
404 vld1.8 {q2}, [r1]! 505 vld1.8 {q2-q3}, [r1]!
405 next_tweak q4, q3, q7, q6 506 next_tweak q5, q4, q15, q10
406 veor q0, q0, q3 507 veor q0, q0, q4
407 next_tweak q5, q4, q7, q6 508 next_tweak q6, q5, q15, q10
408 veor q1, q1, q4 509 veor q1, q1, q5
409 veor q2, q2, q5 510 next_tweak q7, q6, q15, q10
410 bl aes_encrypt_3x 511 veor q2, q2, q6
411 veor q0, q0, q3 512 veor q3, q3, q7
412 veor q1, q1, q4 513 bl aes_encrypt_4x
413 veor q2, q2, q5 514 veor q0, q0, q4
414 vst1.8 {q0-q1}, [r0]! @ write 3 ct blocks 515 veor q1, q1, q5
415 vst1.8 {q2}, [r0]! 516 veor q2, q2, q6
416 vmov q3, q5 517 veor q3, q3, q7
518 vst1.8 {q0-q1}, [r0]! @ write 4 ct blocks
519 vst1.8 {q2-q3}, [r0]!
520 vmov q4, q7
417 teq r4, #0 521 teq r4, #0
418 beq .Lxtsencout 522 beq .Lxtsencret
419 b .Lxtsencloop3x 523 b .Lxtsencloop4x
420.Lxtsenc1x: 524.Lxtsenc1x:
421 adds r4, r4, #3 525 adds r4, r4, #64
422 beq .Lxtsencout 526 beq .Lxtsencout
527 subs r4, r4, #16
528 bmi .LxtsencctsNx
423.Lxtsencloop: 529.Lxtsencloop:
424 vld1.8 {q0}, [r1]! 530 vld1.8 {q0}, [r1]!
425 veor q0, q0, q3 531.Lxtsencctsout:
532 veor q0, q0, q4
426 bl aes_encrypt 533 bl aes_encrypt
427 veor q0, q0, q3 534 veor q0, q0, q4
428 vst1.8 {q0}, [r0]! 535 teq r4, #0
429 subs r4, r4, #1
430 beq .Lxtsencout 536 beq .Lxtsencout
431 next_tweak q3, q3, q7, q6 537 subs r4, r4, #16
538 next_tweak q4, q4, q15, q6
539 bmi .Lxtsenccts
540 vst1.8 {q0}, [r0]!
432 b .Lxtsencloop 541 b .Lxtsencloop
433.Lxtsencout: 542.Lxtsencout:
434 vst1.8 {q3}, [r5] 543 vst1.8 {q0}, [r0]
544.Lxtsencret:
545 vst1.8 {q4}, [r5]
435 pop {r4-r6, pc} 546 pop {r4-r6, pc}
547
548.LxtsencctsNx:
549 vmov q0, q3
550 sub r0, r0, #16
551.Lxtsenccts:
552 movw ip, :lower16:.Lcts_permute_table
553 movt ip, :upper16:.Lcts_permute_table
554
555 add r1, r1, r4 @ rewind input pointer
556 add r4, r4, #16 @ # bytes in final block
557 add lr, ip, #32
558 add ip, ip, r4
559 sub lr, lr, r4
560 add r4, r0, r4 @ output address of final block
561
562 vld1.8 {q1}, [r1] @ load final partial block
563 vld1.8 {q2}, [ip]
564 vld1.8 {q3}, [lr]
565
566 vtbl.8 d4, {d0-d1}, d4
567 vtbl.8 d5, {d0-d1}, d5
568 vtbx.8 d0, {d2-d3}, d6
569 vtbx.8 d1, {d2-d3}, d7
570
571 vst1.8 {q2}, [r4] @ overlapping stores
572 mov r4, #0
573 b .Lxtsencctsout
436ENDPROC(ce_aes_xts_encrypt) 574ENDPROC(ce_aes_xts_encrypt)
437 575
438 576
@@ -441,50 +579,90 @@ ENTRY(ce_aes_xts_decrypt)
441 579
442 bl ce_aes_xts_init @ run shared prologue 580 bl ce_aes_xts_init @ run shared prologue
443 prepare_key r2, r3 581 prepare_key r2, r3
444 vmov q3, q0 582 vmov q4, q0
583
584 /* subtract 16 bytes if we are doing CTS */
585 tst r4, #0xf
586 subne r4, r4, #0x10
445 587
446 teq r6, #0 @ start of a block? 588 teq r6, #0 @ start of a block?
447 bne .Lxtsdec3x 589 bne .Lxtsdec4x
448 590
449.Lxtsdecloop3x: 591.Lxtsdecloop4x:
450 next_tweak q3, q3, q7, q6 592 next_tweak q4, q4, q15, q10
451.Lxtsdec3x: 593.Lxtsdec4x:
452 subs r4, r4, #3 594 subs r4, r4, #64
453 bmi .Lxtsdec1x 595 bmi .Lxtsdec1x
454 vld1.8 {q0-q1}, [r1]! @ get 3 ct blocks 596 vld1.8 {q0-q1}, [r1]! @ get 4 ct blocks
455 vld1.8 {q2}, [r1]! 597 vld1.8 {q2-q3}, [r1]!
456 next_tweak q4, q3, q7, q6 598 next_tweak q5, q4, q15, q10
457 veor q0, q0, q3 599 veor q0, q0, q4
458 next_tweak q5, q4, q7, q6 600 next_tweak q6, q5, q15, q10
459 veor q1, q1, q4 601 veor q1, q1, q5
460 veor q2, q2, q5 602 next_tweak q7, q6, q15, q10
461 bl aes_decrypt_3x 603 veor q2, q2, q6
462 veor q0, q0, q3 604 veor q3, q3, q7
463 veor q1, q1, q4 605 bl aes_decrypt_4x
464 veor q2, q2, q5 606 veor q0, q0, q4
465 vst1.8 {q0-q1}, [r0]! @ write 3 pt blocks 607 veor q1, q1, q5
466 vst1.8 {q2}, [r0]! 608 veor q2, q2, q6
467 vmov q3, q5 609 veor q3, q3, q7
610 vst1.8 {q0-q1}, [r0]! @ write 4 pt blocks
611 vst1.8 {q2-q3}, [r0]!
612 vmov q4, q7
468 teq r4, #0 613 teq r4, #0
469 beq .Lxtsdecout 614 beq .Lxtsdecout
470 b .Lxtsdecloop3x 615 b .Lxtsdecloop4x
471.Lxtsdec1x: 616.Lxtsdec1x:
472 adds r4, r4, #3 617 adds r4, r4, #64
473 beq .Lxtsdecout 618 beq .Lxtsdecout
619 subs r4, r4, #16
474.Lxtsdecloop: 620.Lxtsdecloop:
475 vld1.8 {q0}, [r1]! 621 vld1.8 {q0}, [r1]!
476 veor q0, q0, q3 622 bmi .Lxtsdeccts
477 add ip, r2, #32 @ 3rd round key 623.Lxtsdecctsout:
624 veor q0, q0, q4
478 bl aes_decrypt 625 bl aes_decrypt
479 veor q0, q0, q3 626 veor q0, q0, q4
480 vst1.8 {q0}, [r0]! 627 vst1.8 {q0}, [r0]!
481 subs r4, r4, #1 628 teq r4, #0
482 beq .Lxtsdecout 629 beq .Lxtsdecout
483 next_tweak q3, q3, q7, q6 630 subs r4, r4, #16
631 next_tweak q4, q4, q15, q6
484 b .Lxtsdecloop 632 b .Lxtsdecloop
485.Lxtsdecout: 633.Lxtsdecout:
486 vst1.8 {q3}, [r5] 634 vst1.8 {q4}, [r5]
487 pop {r4-r6, pc} 635 pop {r4-r6, pc}
636
637.Lxtsdeccts:
638 movw ip, :lower16:.Lcts_permute_table
639 movt ip, :upper16:.Lcts_permute_table
640
641 add r1, r1, r4 @ rewind input pointer
642 add r4, r4, #16 @ # bytes in final block
643 add lr, ip, #32
644 add ip, ip, r4
645 sub lr, lr, r4
646 add r4, r0, r4 @ output address of final block
647
648 next_tweak q5, q4, q15, q6
649
650 vld1.8 {q1}, [r1] @ load final partial block
651 vld1.8 {q2}, [ip]
652 vld1.8 {q3}, [lr]
653
654 veor q0, q0, q5
655 bl aes_decrypt
656 veor q0, q0, q5
657
658 vtbl.8 d4, {d0-d1}, d4
659 vtbl.8 d5, {d0-d1}, d5
660 vtbx.8 d0, {d2-d3}, d6
661 vtbx.8 d1, {d2-d3}, d7
662
663 vst1.8 {q2}, [r4] @ overlapping stores
664 mov r4, #0
665 b .Lxtsdecctsout
488ENDPROC(ce_aes_xts_decrypt) 666ENDPROC(ce_aes_xts_decrypt)
489 667
490 /* 668 /*
@@ -505,8 +683,18 @@ ENDPROC(ce_aes_sub)
505 * operation on round key *src 683 * operation on round key *src
506 */ 684 */
507ENTRY(ce_aes_invert) 685ENTRY(ce_aes_invert)
508 vld1.8 {q0}, [r1] 686 vld1.32 {q0}, [r1]
509 aesimc.8 q0, q0 687 aesimc.8 q0, q0
510 vst1.8 {q0}, [r0] 688 vst1.32 {q0}, [r0]
511 bx lr 689 bx lr
512ENDPROC(ce_aes_invert) 690ENDPROC(ce_aes_invert)
691
692 .section ".rodata", "a"
693 .align 6
694.Lcts_permute_table:
695 .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
696 .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
697 .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
698 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
699 .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
700 .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index 620aacf0d128..cdb1a07e7ad0 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -7,9 +7,13 @@
7 7
8#include <asm/hwcap.h> 8#include <asm/hwcap.h>
9#include <asm/neon.h> 9#include <asm/neon.h>
10#include <asm/simd.h>
11#include <asm/unaligned.h>
10#include <crypto/aes.h> 12#include <crypto/aes.h>
13#include <crypto/ctr.h>
11#include <crypto/internal/simd.h> 14#include <crypto/internal/simd.h>
12#include <crypto/internal/skcipher.h> 15#include <crypto/internal/skcipher.h>
16#include <crypto/scatterwalk.h>
13#include <linux/cpufeature.h> 17#include <linux/cpufeature.h>
14#include <linux/module.h> 18#include <linux/module.h>
15#include <crypto/xts.h> 19#include <crypto/xts.h>
@@ -22,25 +26,29 @@ MODULE_LICENSE("GPL v2");
22asmlinkage u32 ce_aes_sub(u32 input); 26asmlinkage u32 ce_aes_sub(u32 input);
23asmlinkage void ce_aes_invert(void *dst, void *src); 27asmlinkage void ce_aes_invert(void *dst, void *src);
24 28
25asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], 29asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
26 int rounds, int blocks); 30 int rounds, int blocks);
27asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], 31asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
28 int rounds, int blocks); 32 int rounds, int blocks);
29 33
30asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], 34asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
31 int rounds, int blocks, u8 iv[]); 35 int rounds, int blocks, u8 iv[]);
32asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], 36asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
33 int rounds, int blocks, u8 iv[]); 37 int rounds, int blocks, u8 iv[]);
38asmlinkage void ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
39 int rounds, int bytes, u8 const iv[]);
40asmlinkage void ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
41 int rounds, int bytes, u8 const iv[]);
34 42
35asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], 43asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
36 int rounds, int blocks, u8 ctr[]); 44 int rounds, int blocks, u8 ctr[]);
37 45
38asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], 46asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
39 int rounds, int blocks, u8 iv[], 47 int rounds, int bytes, u8 iv[],
40 u8 const rk2[], int first); 48 u32 const rk2[], int first);
41asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], 49asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
42 int rounds, int blocks, u8 iv[], 50 int rounds, int bytes, u8 iv[],
43 u8 const rk2[], int first); 51 u32 const rk2[], int first);
44 52
45struct aes_block { 53struct aes_block {
46 u8 b[AES_BLOCK_SIZE]; 54 u8 b[AES_BLOCK_SIZE];
@@ -77,21 +85,17 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
77 key_len != AES_KEYSIZE_256) 85 key_len != AES_KEYSIZE_256)
78 return -EINVAL; 86 return -EINVAL;
79 87
80 memcpy(ctx->key_enc, in_key, key_len);
81 ctx->key_length = key_len; 88 ctx->key_length = key_len;
89 for (i = 0; i < kwords; i++)
90 ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
82 91
83 kernel_neon_begin(); 92 kernel_neon_begin();
84 for (i = 0; i < sizeof(rcon); i++) { 93 for (i = 0; i < sizeof(rcon); i++) {
85 u32 *rki = ctx->key_enc + (i * kwords); 94 u32 *rki = ctx->key_enc + (i * kwords);
86 u32 *rko = rki + kwords; 95 u32 *rko = rki + kwords;
87 96
88#ifndef CONFIG_CPU_BIG_ENDIAN
89 rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8); 97 rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
90 rko[0] = rko[0] ^ rki[0] ^ rcon[i]; 98 rko[0] = rko[0] ^ rki[0] ^ rcon[i];
91#else
92 rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8);
93 rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24);
94#endif
95 rko[1] = rko[0] ^ rki[1]; 99 rko[1] = rko[0] ^ rki[1];
96 rko[2] = rko[1] ^ rki[2]; 100 rko[2] = rko[1] ^ rki[2];
97 rko[3] = rko[2] ^ rki[3]; 101 rko[3] = rko[2] ^ rki[3];
@@ -178,15 +182,15 @@ static int ecb_encrypt(struct skcipher_request *req)
178 unsigned int blocks; 182 unsigned int blocks;
179 int err; 183 int err;
180 184
181 err = skcipher_walk_virt(&walk, req, true); 185 err = skcipher_walk_virt(&walk, req, false);
182 186
183 kernel_neon_begin();
184 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 187 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
188 kernel_neon_begin();
185 ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 189 ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
186 (u8 *)ctx->key_enc, num_rounds(ctx), blocks); 190 ctx->key_enc, num_rounds(ctx), blocks);
191 kernel_neon_end();
187 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 192 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
188 } 193 }
189 kernel_neon_end();
190 return err; 194 return err;
191} 195}
192 196
@@ -198,58 +202,192 @@ static int ecb_decrypt(struct skcipher_request *req)
198 unsigned int blocks; 202 unsigned int blocks;
199 int err; 203 int err;
200 204
201 err = skcipher_walk_virt(&walk, req, true); 205 err = skcipher_walk_virt(&walk, req, false);
202 206
203 kernel_neon_begin();
204 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 207 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
208 kernel_neon_begin();
205 ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 209 ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
206 (u8 *)ctx->key_dec, num_rounds(ctx), blocks); 210 ctx->key_dec, num_rounds(ctx), blocks);
211 kernel_neon_end();
207 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 212 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
208 } 213 }
209 kernel_neon_end();
210 return err; 214 return err;
211} 215}
212 216
213static int cbc_encrypt(struct skcipher_request *req) 217static int cbc_encrypt_walk(struct skcipher_request *req,
218 struct skcipher_walk *walk)
214{ 219{
215 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 220 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
216 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 221 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
217 struct skcipher_walk walk;
218 unsigned int blocks; 222 unsigned int blocks;
223 int err = 0;
224
225 while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
226 kernel_neon_begin();
227 ce_aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr,
228 ctx->key_enc, num_rounds(ctx), blocks,
229 walk->iv);
230 kernel_neon_end();
231 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
232 }
233 return err;
234}
235
236static int cbc_encrypt(struct skcipher_request *req)
237{
238 struct skcipher_walk walk;
219 int err; 239 int err;
220 240
221 err = skcipher_walk_virt(&walk, req, true); 241 err = skcipher_walk_virt(&walk, req, false);
242 if (err)
243 return err;
244 return cbc_encrypt_walk(req, &walk);
245}
222 246
223 kernel_neon_begin(); 247static int cbc_decrypt_walk(struct skcipher_request *req,
224 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 248 struct skcipher_walk *walk)
225 ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 249{
226 (u8 *)ctx->key_enc, num_rounds(ctx), blocks, 250 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
227 walk.iv); 251 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
228 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 252 unsigned int blocks;
253 int err = 0;
254
255 while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
256 kernel_neon_begin();
257 ce_aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr,
258 ctx->key_dec, num_rounds(ctx), blocks,
259 walk->iv);
260 kernel_neon_end();
261 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
229 } 262 }
230 kernel_neon_end();
231 return err; 263 return err;
232} 264}
233 265
234static int cbc_decrypt(struct skcipher_request *req) 266static int cbc_decrypt(struct skcipher_request *req)
235{ 267{
268 struct skcipher_walk walk;
269 int err;
270
271 err = skcipher_walk_virt(&walk, req, false);
272 if (err)
273 return err;
274 return cbc_decrypt_walk(req, &walk);
275}
276
277static int cts_cbc_encrypt(struct skcipher_request *req)
278{
236 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 279 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
237 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 280 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
281 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
282 struct scatterlist *src = req->src, *dst = req->dst;
283 struct scatterlist sg_src[2], sg_dst[2];
284 struct skcipher_request subreq;
238 struct skcipher_walk walk; 285 struct skcipher_walk walk;
239 unsigned int blocks;
240 int err; 286 int err;
241 287
242 err = skcipher_walk_virt(&walk, req, true); 288 skcipher_request_set_tfm(&subreq, tfm);
289 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
290 NULL, NULL);
291
292 if (req->cryptlen <= AES_BLOCK_SIZE) {
293 if (req->cryptlen < AES_BLOCK_SIZE)
294 return -EINVAL;
295 cbc_blocks = 1;
296 }
297
298 if (cbc_blocks > 0) {
299 skcipher_request_set_crypt(&subreq, req->src, req->dst,
300 cbc_blocks * AES_BLOCK_SIZE,
301 req->iv);
302
303 err = skcipher_walk_virt(&walk, &subreq, false) ?:
304 cbc_encrypt_walk(&subreq, &walk);
305 if (err)
306 return err;
307
308 if (req->cryptlen == AES_BLOCK_SIZE)
309 return 0;
310
311 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
312 if (req->dst != req->src)
313 dst = scatterwalk_ffwd(sg_dst, req->dst,
314 subreq.cryptlen);
315 }
316
317 /* handle ciphertext stealing */
318 skcipher_request_set_crypt(&subreq, src, dst,
319 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
320 req->iv);
321
322 err = skcipher_walk_virt(&walk, &subreq, false);
323 if (err)
324 return err;
243 325
244 kernel_neon_begin(); 326 kernel_neon_begin();
245 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 327 ce_aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
246 ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 328 ctx->key_enc, num_rounds(ctx), walk.nbytes,
247 (u8 *)ctx->key_dec, num_rounds(ctx), blocks, 329 walk.iv);
248 walk.iv); 330 kernel_neon_end();
249 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 331
332 return skcipher_walk_done(&walk, 0);
333}
334
335static int cts_cbc_decrypt(struct skcipher_request *req)
336{
337 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
338 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
339 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
340 struct scatterlist *src = req->src, *dst = req->dst;
341 struct scatterlist sg_src[2], sg_dst[2];
342 struct skcipher_request subreq;
343 struct skcipher_walk walk;
344 int err;
345
346 skcipher_request_set_tfm(&subreq, tfm);
347 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
348 NULL, NULL);
349
350 if (req->cryptlen <= AES_BLOCK_SIZE) {
351 if (req->cryptlen < AES_BLOCK_SIZE)
352 return -EINVAL;
353 cbc_blocks = 1;
250 } 354 }
355
356 if (cbc_blocks > 0) {
357 skcipher_request_set_crypt(&subreq, req->src, req->dst,
358 cbc_blocks * AES_BLOCK_SIZE,
359 req->iv);
360
361 err = skcipher_walk_virt(&walk, &subreq, false) ?:
362 cbc_decrypt_walk(&subreq, &walk);
363 if (err)
364 return err;
365
366 if (req->cryptlen == AES_BLOCK_SIZE)
367 return 0;
368
369 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
370 if (req->dst != req->src)
371 dst = scatterwalk_ffwd(sg_dst, req->dst,
372 subreq.cryptlen);
373 }
374
375 /* handle ciphertext stealing */
376 skcipher_request_set_crypt(&subreq, src, dst,
377 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
378 req->iv);
379
380 err = skcipher_walk_virt(&walk, &subreq, false);
381 if (err)
382 return err;
383
384 kernel_neon_begin();
385 ce_aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
386 ctx->key_dec, num_rounds(ctx), walk.nbytes,
387 walk.iv);
251 kernel_neon_end(); 388 kernel_neon_end();
252 return err; 389
390 return skcipher_walk_done(&walk, 0);
253} 391}
254 392
255static int ctr_encrypt(struct skcipher_request *req) 393static int ctr_encrypt(struct skcipher_request *req)
@@ -259,13 +397,14 @@ static int ctr_encrypt(struct skcipher_request *req)
259 struct skcipher_walk walk; 397 struct skcipher_walk walk;
260 int err, blocks; 398 int err, blocks;
261 399
262 err = skcipher_walk_virt(&walk, req, true); 400 err = skcipher_walk_virt(&walk, req, false);
263 401
264 kernel_neon_begin();
265 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 402 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
403 kernel_neon_begin();
266 ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 404 ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
267 (u8 *)ctx->key_enc, num_rounds(ctx), blocks, 405 ctx->key_enc, num_rounds(ctx), blocks,
268 walk.iv); 406 walk.iv);
407 kernel_neon_end();
269 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 408 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
270 } 409 }
271 if (walk.nbytes) { 410 if (walk.nbytes) {
@@ -279,36 +418,109 @@ static int ctr_encrypt(struct skcipher_request *req)
279 */ 418 */
280 blocks = -1; 419 blocks = -1;
281 420
282 ce_aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, 421 kernel_neon_begin();
283 num_rounds(ctx), blocks, walk.iv); 422 ce_aes_ctr_encrypt(tail, NULL, ctx->key_enc, num_rounds(ctx),
423 blocks, walk.iv);
424 kernel_neon_end();
284 crypto_xor_cpy(tdst, tsrc, tail, nbytes); 425 crypto_xor_cpy(tdst, tsrc, tail, nbytes);
285 err = skcipher_walk_done(&walk, 0); 426 err = skcipher_walk_done(&walk, 0);
286 } 427 }
287 kernel_neon_end();
288
289 return err; 428 return err;
290} 429}
291 430
431static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
432{
433 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
434 unsigned long flags;
435
436 /*
437 * Temporarily disable interrupts to avoid races where
438 * cachelines are evicted when the CPU is interrupted
439 * to do something else.
440 */
441 local_irq_save(flags);
442 aes_encrypt(ctx, dst, src);
443 local_irq_restore(flags);
444}
445
446static int ctr_encrypt_sync(struct skcipher_request *req)
447{
448 if (!crypto_simd_usable())
449 return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
450
451 return ctr_encrypt(req);
452}
453
292static int xts_encrypt(struct skcipher_request *req) 454static int xts_encrypt(struct skcipher_request *req)
293{ 455{
294 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 456 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
295 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 457 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
296 int err, first, rounds = num_rounds(&ctx->key1); 458 int err, first, rounds = num_rounds(&ctx->key1);
459 int tail = req->cryptlen % AES_BLOCK_SIZE;
460 struct scatterlist sg_src[2], sg_dst[2];
461 struct skcipher_request subreq;
462 struct scatterlist *src, *dst;
297 struct skcipher_walk walk; 463 struct skcipher_walk walk;
298 unsigned int blocks;
299 464
300 err = skcipher_walk_virt(&walk, req, true); 465 if (req->cryptlen < AES_BLOCK_SIZE)
466 return -EINVAL;
301 467
302 kernel_neon_begin(); 468 err = skcipher_walk_virt(&walk, req, false);
303 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 469
470 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
471 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
472 AES_BLOCK_SIZE) - 2;
473
474 skcipher_walk_abort(&walk);
475
476 skcipher_request_set_tfm(&subreq, tfm);
477 skcipher_request_set_callback(&subreq,
478 skcipher_request_flags(req),
479 NULL, NULL);
480 skcipher_request_set_crypt(&subreq, req->src, req->dst,
481 xts_blocks * AES_BLOCK_SIZE,
482 req->iv);
483 req = &subreq;
484 err = skcipher_walk_virt(&walk, req, false);
485 } else {
486 tail = 0;
487 }
488
489 for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
490 int nbytes = walk.nbytes;
491
492 if (walk.nbytes < walk.total)
493 nbytes &= ~(AES_BLOCK_SIZE - 1);
494
495 kernel_neon_begin();
304 ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 496 ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
305 (u8 *)ctx->key1.key_enc, rounds, blocks, 497 ctx->key1.key_enc, rounds, nbytes, walk.iv,
306 walk.iv, (u8 *)ctx->key2.key_enc, first); 498 ctx->key2.key_enc, first);
307 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 499 kernel_neon_end();
500 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
308 } 501 }
502
503 if (err || likely(!tail))
504 return err;
505
506 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
507 if (req->dst != req->src)
508 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
509
510 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
511 req->iv);
512
513 err = skcipher_walk_virt(&walk, req, false);
514 if (err)
515 return err;
516
517 kernel_neon_begin();
518 ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
519 ctx->key1.key_enc, rounds, walk.nbytes, walk.iv,
520 ctx->key2.key_enc, first);
309 kernel_neon_end(); 521 kernel_neon_end();
310 522
311 return err; 523 return skcipher_walk_done(&walk, 0);
312} 524}
313 525
314static int xts_decrypt(struct skcipher_request *req) 526static int xts_decrypt(struct skcipher_request *req)
@@ -316,87 +528,165 @@ static int xts_decrypt(struct skcipher_request *req)
316 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 528 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
317 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 529 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
318 int err, first, rounds = num_rounds(&ctx->key1); 530 int err, first, rounds = num_rounds(&ctx->key1);
531 int tail = req->cryptlen % AES_BLOCK_SIZE;
532 struct scatterlist sg_src[2], sg_dst[2];
533 struct skcipher_request subreq;
534 struct scatterlist *src, *dst;
319 struct skcipher_walk walk; 535 struct skcipher_walk walk;
320 unsigned int blocks;
321 536
322 err = skcipher_walk_virt(&walk, req, true); 537 if (req->cryptlen < AES_BLOCK_SIZE)
538 return -EINVAL;
539
540 err = skcipher_walk_virt(&walk, req, false);
541
542 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
543 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
544 AES_BLOCK_SIZE) - 2;
545
546 skcipher_walk_abort(&walk);
547
548 skcipher_request_set_tfm(&subreq, tfm);
549 skcipher_request_set_callback(&subreq,
550 skcipher_request_flags(req),
551 NULL, NULL);
552 skcipher_request_set_crypt(&subreq, req->src, req->dst,
553 xts_blocks * AES_BLOCK_SIZE,
554 req->iv);
555 req = &subreq;
556 err = skcipher_walk_virt(&walk, req, false);
557 } else {
558 tail = 0;
559 }
560
561 for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
562 int nbytes = walk.nbytes;
323 563
324 kernel_neon_begin(); 564 if (walk.nbytes < walk.total)
325 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 565 nbytes &= ~(AES_BLOCK_SIZE - 1);
566
567 kernel_neon_begin();
326 ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 568 ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
327 (u8 *)ctx->key1.key_dec, rounds, blocks, 569 ctx->key1.key_dec, rounds, nbytes, walk.iv,
328 walk.iv, (u8 *)ctx->key2.key_enc, first); 570 ctx->key2.key_enc, first);
329 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 571 kernel_neon_end();
572 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
330 } 573 }
574
575 if (err || likely(!tail))
576 return err;
577
578 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
579 if (req->dst != req->src)
580 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
581
582 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
583 req->iv);
584
585 err = skcipher_walk_virt(&walk, req, false);
586 if (err)
587 return err;
588
589 kernel_neon_begin();
590 ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
591 ctx->key1.key_dec, rounds, walk.nbytes, walk.iv,
592 ctx->key2.key_enc, first);
331 kernel_neon_end(); 593 kernel_neon_end();
332 594
333 return err; 595 return skcipher_walk_done(&walk, 0);
334} 596}
335 597
336static struct skcipher_alg aes_algs[] = { { 598static struct skcipher_alg aes_algs[] = { {
337 .base = { 599 .base.cra_name = "__ecb(aes)",
338 .cra_name = "__ecb(aes)", 600 .base.cra_driver_name = "__ecb-aes-ce",
339 .cra_driver_name = "__ecb-aes-ce", 601 .base.cra_priority = 300,
340 .cra_priority = 300, 602 .base.cra_flags = CRYPTO_ALG_INTERNAL,
341 .cra_flags = CRYPTO_ALG_INTERNAL, 603 .base.cra_blocksize = AES_BLOCK_SIZE,
342 .cra_blocksize = AES_BLOCK_SIZE, 604 .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
343 .cra_ctxsize = sizeof(struct crypto_aes_ctx), 605 .base.cra_module = THIS_MODULE,
344 .cra_module = THIS_MODULE, 606
345 }, 607 .min_keysize = AES_MIN_KEY_SIZE,
346 .min_keysize = AES_MIN_KEY_SIZE, 608 .max_keysize = AES_MAX_KEY_SIZE,
347 .max_keysize = AES_MAX_KEY_SIZE, 609 .setkey = ce_aes_setkey,
348 .setkey = ce_aes_setkey, 610 .encrypt = ecb_encrypt,
349 .encrypt = ecb_encrypt, 611 .decrypt = ecb_decrypt,
350 .decrypt = ecb_decrypt,
351}, { 612}, {
352 .base = { 613 .base.cra_name = "__cbc(aes)",
353 .cra_name = "__cbc(aes)", 614 .base.cra_driver_name = "__cbc-aes-ce",
354 .cra_driver_name = "__cbc-aes-ce", 615 .base.cra_priority = 300,
355 .cra_priority = 300, 616 .base.cra_flags = CRYPTO_ALG_INTERNAL,
356 .cra_flags = CRYPTO_ALG_INTERNAL, 617 .base.cra_blocksize = AES_BLOCK_SIZE,
357 .cra_blocksize = AES_BLOCK_SIZE, 618 .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
358 .cra_ctxsize = sizeof(struct crypto_aes_ctx), 619 .base.cra_module = THIS_MODULE,
359 .cra_module = THIS_MODULE, 620
360 }, 621 .min_keysize = AES_MIN_KEY_SIZE,
361 .min_keysize = AES_MIN_KEY_SIZE, 622 .max_keysize = AES_MAX_KEY_SIZE,
362 .max_keysize = AES_MAX_KEY_SIZE, 623 .ivsize = AES_BLOCK_SIZE,
363 .ivsize = AES_BLOCK_SIZE, 624 .setkey = ce_aes_setkey,
364 .setkey = ce_aes_setkey, 625 .encrypt = cbc_encrypt,
365 .encrypt = cbc_encrypt, 626 .decrypt = cbc_decrypt,
366 .decrypt = cbc_decrypt,
367}, { 627}, {
368 .base = { 628 .base.cra_name = "__cts(cbc(aes))",
369 .cra_name = "__ctr(aes)", 629 .base.cra_driver_name = "__cts-cbc-aes-ce",
370 .cra_driver_name = "__ctr-aes-ce", 630 .base.cra_priority = 300,
371 .cra_priority = 300, 631 .base.cra_flags = CRYPTO_ALG_INTERNAL,
372 .cra_flags = CRYPTO_ALG_INTERNAL, 632 .base.cra_blocksize = AES_BLOCK_SIZE,
373 .cra_blocksize = 1, 633 .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
374 .cra_ctxsize = sizeof(struct crypto_aes_ctx), 634 .base.cra_module = THIS_MODULE,
375 .cra_module = THIS_MODULE, 635
376 }, 636 .min_keysize = AES_MIN_KEY_SIZE,
377 .min_keysize = AES_MIN_KEY_SIZE, 637 .max_keysize = AES_MAX_KEY_SIZE,
378 .max_keysize = AES_MAX_KEY_SIZE, 638 .ivsize = AES_BLOCK_SIZE,
379 .ivsize = AES_BLOCK_SIZE, 639 .walksize = 2 * AES_BLOCK_SIZE,
380 .chunksize = AES_BLOCK_SIZE, 640 .setkey = ce_aes_setkey,
381 .setkey = ce_aes_setkey, 641 .encrypt = cts_cbc_encrypt,
382 .encrypt = ctr_encrypt, 642 .decrypt = cts_cbc_decrypt,
383 .decrypt = ctr_encrypt,
384}, { 643}, {
385 .base = { 644 .base.cra_name = "__ctr(aes)",
386 .cra_name = "__xts(aes)", 645 .base.cra_driver_name = "__ctr-aes-ce",
387 .cra_driver_name = "__xts-aes-ce", 646 .base.cra_priority = 300,
388 .cra_priority = 300, 647 .base.cra_flags = CRYPTO_ALG_INTERNAL,
389 .cra_flags = CRYPTO_ALG_INTERNAL, 648 .base.cra_blocksize = 1,
390 .cra_blocksize = AES_BLOCK_SIZE, 649 .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
391 .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), 650 .base.cra_module = THIS_MODULE,
392 .cra_module = THIS_MODULE, 651
393 }, 652 .min_keysize = AES_MIN_KEY_SIZE,
394 .min_keysize = 2 * AES_MIN_KEY_SIZE, 653 .max_keysize = AES_MAX_KEY_SIZE,
395 .max_keysize = 2 * AES_MAX_KEY_SIZE, 654 .ivsize = AES_BLOCK_SIZE,
396 .ivsize = AES_BLOCK_SIZE, 655 .chunksize = AES_BLOCK_SIZE,
397 .setkey = xts_set_key, 656 .setkey = ce_aes_setkey,
398 .encrypt = xts_encrypt, 657 .encrypt = ctr_encrypt,
399 .decrypt = xts_decrypt, 658 .decrypt = ctr_encrypt,
659}, {
660 .base.cra_name = "ctr(aes)",
661 .base.cra_driver_name = "ctr-aes-ce-sync",
662 .base.cra_priority = 300 - 1,
663 .base.cra_blocksize = 1,
664 .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
665 .base.cra_module = THIS_MODULE,
666
667 .min_keysize = AES_MIN_KEY_SIZE,
668 .max_keysize = AES_MAX_KEY_SIZE,
669 .ivsize = AES_BLOCK_SIZE,
670 .chunksize = AES_BLOCK_SIZE,
671 .setkey = ce_aes_setkey,
672 .encrypt = ctr_encrypt_sync,
673 .decrypt = ctr_encrypt_sync,
674}, {
675 .base.cra_name = "__xts(aes)",
676 .base.cra_driver_name = "__xts-aes-ce",
677 .base.cra_priority = 300,
678 .base.cra_flags = CRYPTO_ALG_INTERNAL,
679 .base.cra_blocksize = AES_BLOCK_SIZE,
680 .base.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
681 .base.cra_module = THIS_MODULE,
682
683 .min_keysize = 2 * AES_MIN_KEY_SIZE,
684 .max_keysize = 2 * AES_MAX_KEY_SIZE,
685 .ivsize = AES_BLOCK_SIZE,
686 .walksize = 2 * AES_BLOCK_SIZE,
687 .setkey = xts_set_key,
688 .encrypt = xts_encrypt,
689 .decrypt = xts_decrypt,
400} }; 690} };
401 691
402static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; 692static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
@@ -425,6 +715,9 @@ static int __init aes_init(void)
425 return err; 715 return err;
426 716
427 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 717 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
718 if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
719 continue;
720
428 algname = aes_algs[i].base.cra_name + 2; 721 algname = aes_algs[i].base.cra_name + 2;
429 drvname = aes_algs[i].base.cra_driver_name + 2; 722 drvname = aes_algs[i].base.cra_driver_name + 2;
430 basename = aes_algs[i].base.cra_driver_name; 723 basename = aes_algs[i].base.cra_driver_name;
diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S
index 4460ed05d6ff..472e56d09eea 100644
--- a/arch/arm/crypto/aes-cipher-core.S
+++ b/arch/arm/crypto/aes-cipher-core.S
@@ -219,43 +219,5 @@ ENDPROC(__aes_arm_encrypt)
219 219
220 .align 5 220 .align 5
221ENTRY(__aes_arm_decrypt) 221ENTRY(__aes_arm_decrypt)
222 do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0 222 do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0
223ENDPROC(__aes_arm_decrypt) 223ENDPROC(__aes_arm_decrypt)
224
225 .section ".rodata", "a"
226 .align L1_CACHE_SHIFT
227 .type __aes_arm_inverse_sbox, %object
228__aes_arm_inverse_sbox:
229 .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
230 .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
231 .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
232 .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
233 .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
234 .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
235 .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
236 .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
237 .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
238 .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
239 .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
240 .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
241 .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
242 .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
243 .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
244 .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
245 .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
246 .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
247 .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
248 .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
249 .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
250 .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
251 .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
252 .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
253 .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
254 .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
255 .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
256 .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
257 .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
258 .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
259 .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
260 .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
261 .size __aes_arm_inverse_sbox, . - __aes_arm_inverse_sbox
diff --git a/arch/arm/crypto/aes-cipher-glue.c b/arch/arm/crypto/aes-cipher-glue.c
index 128d0a1ac068..8cd00f56800e 100644
--- a/arch/arm/crypto/aes-cipher-glue.c
+++ b/arch/arm/crypto/aes-cipher-glue.c
@@ -11,12 +11,9 @@
11#include <linux/module.h> 11#include <linux/module.h>
12 12
13asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out); 13asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
14EXPORT_SYMBOL(__aes_arm_encrypt);
15
16asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out); 14asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
17EXPORT_SYMBOL(__aes_arm_decrypt);
18 15
19static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 16static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
20{ 17{
21 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 18 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
22 int rounds = 6 + ctx->key_length / 4; 19 int rounds = 6 + ctx->key_length / 4;
@@ -24,7 +21,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
24 __aes_arm_encrypt(ctx->key_enc, rounds, in, out); 21 __aes_arm_encrypt(ctx->key_enc, rounds, in, out);
25} 22}
26 23
27static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 24static void aes_arm_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
28{ 25{
29 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 26 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
30 int rounds = 6 + ctx->key_length / 4; 27 int rounds = 6 + ctx->key_length / 4;
@@ -44,8 +41,8 @@ static struct crypto_alg aes_alg = {
44 .cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE, 41 .cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE,
45 .cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE, 42 .cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE,
46 .cra_cipher.cia_setkey = crypto_aes_set_key, 43 .cra_cipher.cia_setkey = crypto_aes_set_key,
47 .cra_cipher.cia_encrypt = aes_encrypt, 44 .cra_cipher.cia_encrypt = aes_arm_encrypt,
48 .cra_cipher.cia_decrypt = aes_decrypt, 45 .cra_cipher.cia_decrypt = aes_arm_decrypt,
49 46
50#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 47#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
51 .cra_alignmask = 3, 48 .cra_alignmask = 3,
diff --git a/arch/arm/crypto/aes-neonbs-core.S b/arch/arm/crypto/aes-neonbs-core.S
index d3eab76b6e1b..cfaed4e67535 100644
--- a/arch/arm/crypto/aes-neonbs-core.S
+++ b/arch/arm/crypto/aes-neonbs-core.S
@@ -887,19 +887,17 @@ ENDPROC(aesbs_ctr_encrypt)
887 veor \out, \out, \tmp 887 veor \out, \out, \tmp
888 .endm 888 .endm
889 889
890 .align 4
891.Lxts_mul_x:
892 .quad 1, 0x87
893
894 /* 890 /*
895 * aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 891 * aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
896 * int blocks, u8 iv[]) 892 * int blocks, u8 iv[], int reorder_last_tweak)
897 * aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 893 * aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
898 * int blocks, u8 iv[]) 894 * int blocks, u8 iv[], int reorder_last_tweak)
899 */ 895 */
900__xts_prepare8: 896__xts_prepare8:
901 vld1.8 {q14}, [r7] // load iv 897 vld1.8 {q14}, [r7] // load iv
902 __ldr q15, .Lxts_mul_x // load tweak mask 898 vmov.i32 d30, #0x87 // compose tweak mask vector
899 vmovl.u32 q15, d30
900 vshr.u64 d30, d31, #7
903 vmov q12, q14 901 vmov q12, q14
904 902
905 __adr ip, 0f 903 __adr ip, 0f
@@ -946,17 +944,25 @@ __xts_prepare8:
946 944
947 vld1.8 {q7}, [r1]! 945 vld1.8 {q7}, [r1]!
948 next_tweak q14, q12, q15, q13 946 next_tweak q14, q12, q15, q13
949 veor q7, q7, q12 947THUMB( itt le )
948 W(cmple) r8, #0
949 ble 1f
9500: veor q7, q7, q12
950 vst1.8 {q12}, [r4, :128] 951 vst1.8 {q12}, [r4, :128]
951 952
9520: vst1.8 {q14}, [r7] // store next iv 953 vst1.8 {q14}, [r7] // store next iv
953 bx lr 954 bx lr
955
9561: vswp q12, q14
957 b 0b
954ENDPROC(__xts_prepare8) 958ENDPROC(__xts_prepare8)
955 959
956 .macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7 960 .macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
957 push {r4-r8, lr} 961 push {r4-r8, lr}
958 mov r5, sp // preserve sp 962 mov r5, sp // preserve sp
959 ldrd r6, r7, [sp, #24] // get blocks and iv args 963 ldrd r6, r7, [sp, #24] // get blocks and iv args
964 ldr r8, [sp, #32] // reorder final tweak?
965 rsb r8, r8, #1
960 sub ip, sp, #128 // make room for 8x tweak 966 sub ip, sp, #128 // make room for 8x tweak
961 bic ip, ip, #0xf // align sp to 16 bytes 967 bic ip, ip, #0xf // align sp to 16 bytes
962 mov sp, ip 968 mov sp, ip
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index bd0bee9c8f7b..e85839a8aaeb 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -6,10 +6,13 @@
6 */ 6 */
7 7
8#include <asm/neon.h> 8#include <asm/neon.h>
9#include <asm/simd.h>
9#include <crypto/aes.h> 10#include <crypto/aes.h>
10#include <crypto/cbc.h> 11#include <crypto/cbc.h>
12#include <crypto/ctr.h>
11#include <crypto/internal/simd.h> 13#include <crypto/internal/simd.h>
12#include <crypto/internal/skcipher.h> 14#include <crypto/internal/skcipher.h>
15#include <crypto/scatterwalk.h>
13#include <crypto/xts.h> 16#include <crypto/xts.h>
14#include <linux/module.h> 17#include <linux/module.h>
15 18
@@ -35,9 +38,9 @@ asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
35 int rounds, int blocks, u8 ctr[], u8 final[]); 38 int rounds, int blocks, u8 ctr[], u8 final[]);
36 39
37asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], 40asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
38 int rounds, int blocks, u8 iv[]); 41 int rounds, int blocks, u8 iv[], int);
39asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], 42asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
40 int rounds, int blocks, u8 iv[]); 43 int rounds, int blocks, u8 iv[], int);
41 44
42struct aesbs_ctx { 45struct aesbs_ctx {
43 int rounds; 46 int rounds;
@@ -51,9 +54,15 @@ struct aesbs_cbc_ctx {
51 54
52struct aesbs_xts_ctx { 55struct aesbs_xts_ctx {
53 struct aesbs_ctx key; 56 struct aesbs_ctx key;
57 struct crypto_cipher *cts_tfm;
54 struct crypto_cipher *tweak_tfm; 58 struct crypto_cipher *tweak_tfm;
55}; 59};
56 60
61struct aesbs_ctr_ctx {
62 struct aesbs_ctx key; /* must be first member */
63 struct crypto_aes_ctx fallback;
64};
65
57static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 66static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
58 unsigned int key_len) 67 unsigned int key_len)
59{ 68{
@@ -61,7 +70,7 @@ static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
61 struct crypto_aes_ctx rk; 70 struct crypto_aes_ctx rk;
62 int err; 71 int err;
63 72
64 err = crypto_aes_expand_key(&rk, in_key, key_len); 73 err = aes_expandkey(&rk, in_key, key_len);
65 if (err) 74 if (err)
66 return err; 75 return err;
67 76
@@ -83,9 +92,8 @@ static int __ecb_crypt(struct skcipher_request *req,
83 struct skcipher_walk walk; 92 struct skcipher_walk walk;
84 int err; 93 int err;
85 94
86 err = skcipher_walk_virt(&walk, req, true); 95 err = skcipher_walk_virt(&walk, req, false);
87 96
88 kernel_neon_begin();
89 while (walk.nbytes >= AES_BLOCK_SIZE) { 97 while (walk.nbytes >= AES_BLOCK_SIZE) {
90 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 98 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
91 99
@@ -93,12 +101,13 @@ static int __ecb_crypt(struct skcipher_request *req,
93 blocks = round_down(blocks, 101 blocks = round_down(blocks,
94 walk.stride / AES_BLOCK_SIZE); 102 walk.stride / AES_BLOCK_SIZE);
95 103
104 kernel_neon_begin();
96 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk, 105 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
97 ctx->rounds, blocks); 106 ctx->rounds, blocks);
107 kernel_neon_end();
98 err = skcipher_walk_done(&walk, 108 err = skcipher_walk_done(&walk,
99 walk.nbytes - blocks * AES_BLOCK_SIZE); 109 walk.nbytes - blocks * AES_BLOCK_SIZE);
100 } 110 }
101 kernel_neon_end();
102 111
103 return err; 112 return err;
104} 113}
@@ -120,7 +129,7 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
120 struct crypto_aes_ctx rk; 129 struct crypto_aes_ctx rk;
121 int err; 130 int err;
122 131
123 err = crypto_aes_expand_key(&rk, in_key, key_len); 132 err = aes_expandkey(&rk, in_key, key_len);
124 if (err) 133 if (err)
125 return err; 134 return err;
126 135
@@ -152,9 +161,8 @@ static int cbc_decrypt(struct skcipher_request *req)
152 struct skcipher_walk walk; 161 struct skcipher_walk walk;
153 int err; 162 int err;
154 163
155 err = skcipher_walk_virt(&walk, req, true); 164 err = skcipher_walk_virt(&walk, req, false);
156 165
157 kernel_neon_begin();
158 while (walk.nbytes >= AES_BLOCK_SIZE) { 166 while (walk.nbytes >= AES_BLOCK_SIZE) {
159 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 167 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
160 168
@@ -162,13 +170,14 @@ static int cbc_decrypt(struct skcipher_request *req)
162 blocks = round_down(blocks, 170 blocks = round_down(blocks,
163 walk.stride / AES_BLOCK_SIZE); 171 walk.stride / AES_BLOCK_SIZE);
164 172
173 kernel_neon_begin();
165 aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 174 aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
166 ctx->key.rk, ctx->key.rounds, blocks, 175 ctx->key.rk, ctx->key.rounds, blocks,
167 walk.iv); 176 walk.iv);
177 kernel_neon_end();
168 err = skcipher_walk_done(&walk, 178 err = skcipher_walk_done(&walk,
169 walk.nbytes - blocks * AES_BLOCK_SIZE); 179 walk.nbytes - blocks * AES_BLOCK_SIZE);
170 } 180 }
171 kernel_neon_end();
172 181
173 return err; 182 return err;
174} 183}
@@ -189,6 +198,25 @@ static void cbc_exit(struct crypto_tfm *tfm)
189 crypto_free_cipher(ctx->enc_tfm); 198 crypto_free_cipher(ctx->enc_tfm);
190} 199}
191 200
201static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
202 unsigned int key_len)
203{
204 struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
205 int err;
206
207 err = aes_expandkey(&ctx->fallback, in_key, key_len);
208 if (err)
209 return err;
210
211 ctx->key.rounds = 6 + key_len / 4;
212
213 kernel_neon_begin();
214 aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
215 kernel_neon_end();
216
217 return 0;
218}
219
192static int ctr_encrypt(struct skcipher_request *req) 220static int ctr_encrypt(struct skcipher_request *req)
193{ 221{
194 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 222 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -197,9 +225,8 @@ static int ctr_encrypt(struct skcipher_request *req)
197 u8 buf[AES_BLOCK_SIZE]; 225 u8 buf[AES_BLOCK_SIZE];
198 int err; 226 int err;
199 227
200 err = skcipher_walk_virt(&walk, req, true); 228 err = skcipher_walk_virt(&walk, req, false);
201 229
202 kernel_neon_begin();
203 while (walk.nbytes > 0) { 230 while (walk.nbytes > 0) {
204 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 231 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
205 u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL; 232 u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
@@ -210,8 +237,10 @@ static int ctr_encrypt(struct skcipher_request *req)
210 final = NULL; 237 final = NULL;
211 } 238 }
212 239
240 kernel_neon_begin();
213 aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 241 aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
214 ctx->rk, ctx->rounds, blocks, walk.iv, final); 242 ctx->rk, ctx->rounds, blocks, walk.iv, final);
243 kernel_neon_end();
215 244
216 if (final) { 245 if (final) {
217 u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; 246 u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
@@ -226,11 +255,33 @@ static int ctr_encrypt(struct skcipher_request *req)
226 err = skcipher_walk_done(&walk, 255 err = skcipher_walk_done(&walk,
227 walk.nbytes - blocks * AES_BLOCK_SIZE); 256 walk.nbytes - blocks * AES_BLOCK_SIZE);
228 } 257 }
229 kernel_neon_end();
230 258
231 return err; 259 return err;
232} 260}
233 261
262static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
263{
264 struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
265 unsigned long flags;
266
267 /*
268 * Temporarily disable interrupts to avoid races where
269 * cachelines are evicted when the CPU is interrupted
270 * to do something else.
271 */
272 local_irq_save(flags);
273 aes_encrypt(&ctx->fallback, dst, src);
274 local_irq_restore(flags);
275}
276
277static int ctr_encrypt_sync(struct skcipher_request *req)
278{
279 if (!crypto_simd_usable())
280 return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
281
282 return ctr_encrypt(req);
283}
284
234static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 285static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
235 unsigned int key_len) 286 unsigned int key_len)
236{ 287{
@@ -242,6 +293,9 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
242 return err; 293 return err;
243 294
244 key_len /= 2; 295 key_len /= 2;
296 err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len);
297 if (err)
298 return err;
245 err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len); 299 err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len);
246 if (err) 300 if (err)
247 return err; 301 return err;
@@ -253,7 +307,13 @@ static int xts_init(struct crypto_tfm *tfm)
253{ 307{
254 struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); 308 struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
255 309
310 ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0);
311 if (IS_ERR(ctx->cts_tfm))
312 return PTR_ERR(ctx->cts_tfm);
313
256 ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); 314 ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
315 if (IS_ERR(ctx->tweak_tfm))
316 crypto_free_cipher(ctx->cts_tfm);
257 317
258 return PTR_ERR_OR_ZERO(ctx->tweak_tfm); 318 return PTR_ERR_OR_ZERO(ctx->tweak_tfm);
259} 319}
@@ -263,49 +323,89 @@ static void xts_exit(struct crypto_tfm *tfm)
263 struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); 323 struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
264 324
265 crypto_free_cipher(ctx->tweak_tfm); 325 crypto_free_cipher(ctx->tweak_tfm);
326 crypto_free_cipher(ctx->cts_tfm);
266} 327}
267 328
268static int __xts_crypt(struct skcipher_request *req, 329static int __xts_crypt(struct skcipher_request *req, bool encrypt,
269 void (*fn)(u8 out[], u8 const in[], u8 const rk[], 330 void (*fn)(u8 out[], u8 const in[], u8 const rk[],
270 int rounds, int blocks, u8 iv[])) 331 int rounds, int blocks, u8 iv[], int))
271{ 332{
272 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 333 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
273 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 334 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
335 int tail = req->cryptlen % AES_BLOCK_SIZE;
336 struct skcipher_request subreq;
337 u8 buf[2 * AES_BLOCK_SIZE];
274 struct skcipher_walk walk; 338 struct skcipher_walk walk;
275 int err; 339 int err;
276 340
341 if (req->cryptlen < AES_BLOCK_SIZE)
342 return -EINVAL;
343
344 if (unlikely(tail)) {
345 skcipher_request_set_tfm(&subreq, tfm);
346 skcipher_request_set_callback(&subreq,
347 skcipher_request_flags(req),
348 NULL, NULL);
349 skcipher_request_set_crypt(&subreq, req->src, req->dst,
350 req->cryptlen - tail, req->iv);
351 req = &subreq;
352 }
353
277 err = skcipher_walk_virt(&walk, req, true); 354 err = skcipher_walk_virt(&walk, req, true);
278 if (err) 355 if (err)
279 return err; 356 return err;
280 357
281 crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); 358 crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
282 359
283 kernel_neon_begin();
284 while (walk.nbytes >= AES_BLOCK_SIZE) { 360 while (walk.nbytes >= AES_BLOCK_SIZE) {
285 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 361 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
362 int reorder_last_tweak = !encrypt && tail > 0;
286 363
287 if (walk.nbytes < walk.total) 364 if (walk.nbytes < walk.total) {
288 blocks = round_down(blocks, 365 blocks = round_down(blocks,
289 walk.stride / AES_BLOCK_SIZE); 366 walk.stride / AES_BLOCK_SIZE);
367 reorder_last_tweak = 0;
368 }
290 369
370 kernel_neon_begin();
291 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, 371 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
292 ctx->key.rounds, blocks, walk.iv); 372 ctx->key.rounds, blocks, walk.iv, reorder_last_tweak);
373 kernel_neon_end();
293 err = skcipher_walk_done(&walk, 374 err = skcipher_walk_done(&walk,
294 walk.nbytes - blocks * AES_BLOCK_SIZE); 375 walk.nbytes - blocks * AES_BLOCK_SIZE);
295 } 376 }
296 kernel_neon_end();
297 377
298 return err; 378 if (err || likely(!tail))
379 return err;
380
381 /* handle ciphertext stealing */
382 scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE,
383 AES_BLOCK_SIZE, 0);
384 memcpy(buf + AES_BLOCK_SIZE, buf, tail);
385 scatterwalk_map_and_copy(buf, req->src, req->cryptlen, tail, 0);
386
387 crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
388
389 if (encrypt)
390 crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf);
391 else
392 crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf);
393
394 crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
395
396 scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE,
397 AES_BLOCK_SIZE + tail, 1);
398 return 0;
299} 399}
300 400
301static int xts_encrypt(struct skcipher_request *req) 401static int xts_encrypt(struct skcipher_request *req)
302{ 402{
303 return __xts_crypt(req, aesbs_xts_encrypt); 403 return __xts_crypt(req, true, aesbs_xts_encrypt);
304} 404}
305 405
306static int xts_decrypt(struct skcipher_request *req) 406static int xts_decrypt(struct skcipher_request *req)
307{ 407{
308 return __xts_crypt(req, aesbs_xts_decrypt); 408 return __xts_crypt(req, false, aesbs_xts_decrypt);
309} 409}
310 410
311static struct skcipher_alg aes_algs[] = { { 411static struct skcipher_alg aes_algs[] = { {
@@ -359,6 +459,22 @@ static struct skcipher_alg aes_algs[] = { {
359 .encrypt = ctr_encrypt, 459 .encrypt = ctr_encrypt,
360 .decrypt = ctr_encrypt, 460 .decrypt = ctr_encrypt,
361}, { 461}, {
462 .base.cra_name = "ctr(aes)",
463 .base.cra_driver_name = "ctr-aes-neonbs-sync",
464 .base.cra_priority = 250 - 1,
465 .base.cra_blocksize = 1,
466 .base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
467 .base.cra_module = THIS_MODULE,
468
469 .min_keysize = AES_MIN_KEY_SIZE,
470 .max_keysize = AES_MAX_KEY_SIZE,
471 .chunksize = AES_BLOCK_SIZE,
472 .walksize = 8 * AES_BLOCK_SIZE,
473 .ivsize = AES_BLOCK_SIZE,
474 .setkey = aesbs_ctr_setkey_sync,
475 .encrypt = ctr_encrypt_sync,
476 .decrypt = ctr_encrypt_sync,
477}, {
362 .base.cra_name = "__xts(aes)", 478 .base.cra_name = "__xts(aes)",
363 .base.cra_driver_name = "__xts-aes-neonbs", 479 .base.cra_driver_name = "__xts-aes-neonbs",
364 .base.cra_priority = 250, 480 .base.cra_priority = 250,
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 52d472a050e6..c691077679a6 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -9,6 +9,7 @@
9#include <asm/neon.h> 9#include <asm/neon.h>
10#include <asm/simd.h> 10#include <asm/simd.h>
11#include <asm/unaligned.h> 11#include <asm/unaligned.h>
12#include <crypto/b128ops.h>
12#include <crypto/cryptd.h> 13#include <crypto/cryptd.h>
13#include <crypto/internal/hash.h> 14#include <crypto/internal/hash.h>
14#include <crypto/internal/simd.h> 15#include <crypto/internal/simd.h>
@@ -17,7 +18,7 @@
17#include <linux/crypto.h> 18#include <linux/crypto.h>
18#include <linux/module.h> 19#include <linux/module.h>
19 20
20MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions"); 21MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions");
21MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 22MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
22MODULE_LICENSE("GPL v2"); 23MODULE_LICENSE("GPL v2");
23MODULE_ALIAS_CRYPTO("ghash"); 24MODULE_ALIAS_CRYPTO("ghash");
@@ -30,6 +31,8 @@ struct ghash_key {
30 u64 h2[2]; 31 u64 h2[2];
31 u64 h3[2]; 32 u64 h3[2];
32 u64 h4[2]; 33 u64 h4[2];
34
35 be128 k;
33}; 36};
34 37
35struct ghash_desc_ctx { 38struct ghash_desc_ctx {
@@ -62,6 +65,36 @@ static int ghash_init(struct shash_desc *desc)
62 return 0; 65 return 0;
63} 66}
64 67
68static void ghash_do_update(int blocks, u64 dg[], const char *src,
69 struct ghash_key *key, const char *head)
70{
71 if (likely(crypto_simd_usable())) {
72 kernel_neon_begin();
73 pmull_ghash_update(blocks, dg, src, key, head);
74 kernel_neon_end();
75 } else {
76 be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
77
78 do {
79 const u8 *in = src;
80
81 if (head) {
82 in = head;
83 blocks++;
84 head = NULL;
85 } else {
86 src += GHASH_BLOCK_SIZE;
87 }
88
89 crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
90 gf128mul_lle(&dst, &key->k);
91 } while (--blocks);
92
93 dg[0] = be64_to_cpu(dst.b);
94 dg[1] = be64_to_cpu(dst.a);
95 }
96}
97
65static int ghash_update(struct shash_desc *desc, const u8 *src, 98static int ghash_update(struct shash_desc *desc, const u8 *src,
66 unsigned int len) 99 unsigned int len)
67{ 100{
@@ -85,10 +118,8 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
85 blocks = len / GHASH_BLOCK_SIZE; 118 blocks = len / GHASH_BLOCK_SIZE;
86 len %= GHASH_BLOCK_SIZE; 119 len %= GHASH_BLOCK_SIZE;
87 120
88 kernel_neon_begin(); 121 ghash_do_update(blocks, ctx->digest, src, key,
89 pmull_ghash_update(blocks, ctx->digest, src, key, 122 partial ? ctx->buf : NULL);
90 partial ? ctx->buf : NULL);
91 kernel_neon_end();
92 src += blocks * GHASH_BLOCK_SIZE; 123 src += blocks * GHASH_BLOCK_SIZE;
93 partial = 0; 124 partial = 0;
94 } 125 }
@@ -106,9 +137,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
106 struct ghash_key *key = crypto_shash_ctx(desc->tfm); 137 struct ghash_key *key = crypto_shash_ctx(desc->tfm);
107 138
108 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); 139 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
109 kernel_neon_begin(); 140 ghash_do_update(1, ctx->digest, ctx->buf, key, NULL);
110 pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
111 kernel_neon_end();
112 } 141 }
113 put_unaligned_be64(ctx->digest[1], dst); 142 put_unaligned_be64(ctx->digest[1], dst);
114 put_unaligned_be64(ctx->digest[0], dst + 8); 143 put_unaligned_be64(ctx->digest[0], dst + 8);
@@ -132,24 +161,25 @@ static int ghash_setkey(struct crypto_shash *tfm,
132 const u8 *inkey, unsigned int keylen) 161 const u8 *inkey, unsigned int keylen)
133{ 162{
134 struct ghash_key *key = crypto_shash_ctx(tfm); 163 struct ghash_key *key = crypto_shash_ctx(tfm);
135 be128 h, k; 164 be128 h;
136 165
137 if (keylen != GHASH_BLOCK_SIZE) { 166 if (keylen != GHASH_BLOCK_SIZE) {
138 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 167 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
139 return -EINVAL; 168 return -EINVAL;
140 } 169 }
141 170
142 memcpy(&k, inkey, GHASH_BLOCK_SIZE); 171 /* needed for the fallback */
143 ghash_reflect(key->h, &k); 172 memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
173 ghash_reflect(key->h, &key->k);
144 174
145 h = k; 175 h = key->k;
146 gf128mul_lle(&h, &k); 176 gf128mul_lle(&h, &key->k);
147 ghash_reflect(key->h2, &h); 177 ghash_reflect(key->h2, &h);
148 178
149 gf128mul_lle(&h, &k); 179 gf128mul_lle(&h, &key->k);
150 ghash_reflect(key->h3, &h); 180 ghash_reflect(key->h3, &h);
151 181
152 gf128mul_lle(&h, &k); 182 gf128mul_lle(&h, &key->k);
153 ghash_reflect(key->h4, &h); 183 ghash_reflect(key->h4, &h);
154 184
155 return 0; 185 return 0;
@@ -162,15 +192,13 @@ static struct shash_alg ghash_alg = {
162 .final = ghash_final, 192 .final = ghash_final,
163 .setkey = ghash_setkey, 193 .setkey = ghash_setkey,
164 .descsize = sizeof(struct ghash_desc_ctx), 194 .descsize = sizeof(struct ghash_desc_ctx),
165 .base = { 195
166 .cra_name = "__ghash", 196 .base.cra_name = "ghash",
167 .cra_driver_name = "__driver-ghash-ce", 197 .base.cra_driver_name = "ghash-ce-sync",
168 .cra_priority = 0, 198 .base.cra_priority = 300 - 1,
169 .cra_flags = CRYPTO_ALG_INTERNAL, 199 .base.cra_blocksize = GHASH_BLOCK_SIZE,
170 .cra_blocksize = GHASH_BLOCK_SIZE, 200 .base.cra_ctxsize = sizeof(struct ghash_key),
171 .cra_ctxsize = sizeof(struct ghash_key), 201 .base.cra_module = THIS_MODULE,
172 .cra_module = THIS_MODULE,
173 },
174}; 202};
175 203
176static int ghash_async_init(struct ahash_request *req) 204static int ghash_async_init(struct ahash_request *req)
@@ -285,9 +313,7 @@ static int ghash_async_init_tfm(struct crypto_tfm *tfm)
285 struct cryptd_ahash *cryptd_tfm; 313 struct cryptd_ahash *cryptd_tfm;
286 struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); 314 struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
287 315
288 cryptd_tfm = cryptd_alloc_ahash("__driver-ghash-ce", 316 cryptd_tfm = cryptd_alloc_ahash("ghash-ce-sync", 0, 0);
289 CRYPTO_ALG_INTERNAL,
290 CRYPTO_ALG_INTERNAL);
291 if (IS_ERR(cryptd_tfm)) 317 if (IS_ERR(cryptd_tfm))
292 return PTR_ERR(cryptd_tfm); 318 return PTR_ERR(cryptd_tfm);
293 ctx->cryptd_tfm = cryptd_tfm; 319 ctx->cryptd_tfm = cryptd_tfm;
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
index 70efa9656bff..215497f011f2 100644
--- a/arch/arm/crypto/sha256_glue.c
+++ b/arch/arm/crypto/sha256_glue.c
@@ -39,7 +39,7 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
39} 39}
40EXPORT_SYMBOL(crypto_sha256_arm_update); 40EXPORT_SYMBOL(crypto_sha256_arm_update);
41 41
42static int sha256_final(struct shash_desc *desc, u8 *out) 42static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out)
43{ 43{
44 sha256_base_do_finalize(desc, 44 sha256_base_do_finalize(desc,
45 (sha256_block_fn *)sha256_block_data_order); 45 (sha256_block_fn *)sha256_block_data_order);
@@ -51,7 +51,7 @@ int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
51{ 51{
52 sha256_base_do_update(desc, data, len, 52 sha256_base_do_update(desc, data, len,
53 (sha256_block_fn *)sha256_block_data_order); 53 (sha256_block_fn *)sha256_block_data_order);
54 return sha256_final(desc, out); 54 return crypto_sha256_arm_final(desc, out);
55} 55}
56EXPORT_SYMBOL(crypto_sha256_arm_finup); 56EXPORT_SYMBOL(crypto_sha256_arm_finup);
57 57
@@ -59,7 +59,7 @@ static struct shash_alg algs[] = { {
59 .digestsize = SHA256_DIGEST_SIZE, 59 .digestsize = SHA256_DIGEST_SIZE,
60 .init = sha256_base_init, 60 .init = sha256_base_init,
61 .update = crypto_sha256_arm_update, 61 .update = crypto_sha256_arm_update,
62 .final = sha256_final, 62 .final = crypto_sha256_arm_final,
63 .finup = crypto_sha256_arm_finup, 63 .finup = crypto_sha256_arm_finup,
64 .descsize = sizeof(struct sha256_state), 64 .descsize = sizeof(struct sha256_state),
65 .base = { 65 .base = {
@@ -73,7 +73,7 @@ static struct shash_alg algs[] = { {
73 .digestsize = SHA224_DIGEST_SIZE, 73 .digestsize = SHA224_DIGEST_SIZE,
74 .init = sha224_base_init, 74 .init = sha224_base_init,
75 .update = crypto_sha256_arm_update, 75 .update = crypto_sha256_arm_update,
76 .final = sha256_final, 76 .final = crypto_sha256_arm_final,
77 .finup = crypto_sha256_arm_finup, 77 .finup = crypto_sha256_arm_finup,
78 .descsize = sizeof(struct sha256_state), 78 .descsize = sizeof(struct sha256_state),
79 .base = { 79 .base = {
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index a7ce38a36006..38645e415196 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -25,8 +25,8 @@
25asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data, 25asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data,
26 unsigned int num_blks); 26 unsigned int num_blks);
27 27
28static int sha256_update(struct shash_desc *desc, const u8 *data, 28static int crypto_sha256_neon_update(struct shash_desc *desc, const u8 *data,
29 unsigned int len) 29 unsigned int len)
30{ 30{
31 struct sha256_state *sctx = shash_desc_ctx(desc); 31 struct sha256_state *sctx = shash_desc_ctx(desc);
32 32
@@ -42,8 +42,8 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
42 return 0; 42 return 0;
43} 43}
44 44
45static int sha256_finup(struct shash_desc *desc, const u8 *data, 45static int crypto_sha256_neon_finup(struct shash_desc *desc, const u8 *data,
46 unsigned int len, u8 *out) 46 unsigned int len, u8 *out)
47{ 47{
48 if (!crypto_simd_usable()) 48 if (!crypto_simd_usable())
49 return crypto_sha256_arm_finup(desc, data, len, out); 49 return crypto_sha256_arm_finup(desc, data, len, out);
@@ -59,17 +59,17 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data,
59 return sha256_base_finish(desc, out); 59 return sha256_base_finish(desc, out);
60} 60}
61 61
62static int sha256_final(struct shash_desc *desc, u8 *out) 62static int crypto_sha256_neon_final(struct shash_desc *desc, u8 *out)
63{ 63{
64 return sha256_finup(desc, NULL, 0, out); 64 return crypto_sha256_neon_finup(desc, NULL, 0, out);
65} 65}
66 66
67struct shash_alg sha256_neon_algs[] = { { 67struct shash_alg sha256_neon_algs[] = { {
68 .digestsize = SHA256_DIGEST_SIZE, 68 .digestsize = SHA256_DIGEST_SIZE,
69 .init = sha256_base_init, 69 .init = sha256_base_init,
70 .update = sha256_update, 70 .update = crypto_sha256_neon_update,
71 .final = sha256_final, 71 .final = crypto_sha256_neon_final,
72 .finup = sha256_finup, 72 .finup = crypto_sha256_neon_finup,
73 .descsize = sizeof(struct sha256_state), 73 .descsize = sizeof(struct sha256_state),
74 .base = { 74 .base = {
75 .cra_name = "sha256", 75 .cra_name = "sha256",
@@ -81,9 +81,9 @@ struct shash_alg sha256_neon_algs[] = { {
81}, { 81}, {
82 .digestsize = SHA224_DIGEST_SIZE, 82 .digestsize = SHA224_DIGEST_SIZE,
83 .init = sha224_base_init, 83 .init = sha224_base_init,
84 .update = sha256_update, 84 .update = crypto_sha256_neon_update,
85 .final = sha256_final, 85 .final = crypto_sha256_neon_final,
86 .finup = sha256_finup, 86 .finup = crypto_sha256_neon_finup,
87 .descsize = sizeof(struct sha256_state), 87 .descsize = sizeof(struct sha256_state),
88 .base = { 88 .base = {
89 .cra_name = "sha224", 89 .cra_name = "sha224",
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 6b2dc15b6dff..68ca86f85eb7 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -17,7 +17,6 @@ generic-y += parport.h
17generic-y += preempt.h 17generic-y += preempt.h
18generic-y += seccomp.h 18generic-y += seccomp.h
19generic-y += serial.h 19generic-y += serial.h
20generic-y += simd.h
21generic-y += trace_clock.h 20generic-y += trace_clock.h
22 21
23generated-y += mach-types.h 22generated-y += mach-types.h
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 3f3594d9485c..04115ca6bfb5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -751,6 +751,36 @@
751 status = "disabled"; 751 status = "disabled";
752 }; 752 };
753 753
754 crypto: crypto@30900000 {
755 compatible = "fsl,sec-v4.0";
756 #address-cells = <1>;
757 #size-cells = <1>;
758 reg = <0x30900000 0x40000>;
759 ranges = <0 0x30900000 0x40000>;
760 interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
761 clocks = <&clk IMX8MQ_CLK_AHB>,
762 <&clk IMX8MQ_CLK_IPG_ROOT>;
763 clock-names = "aclk", "ipg";
764
765 sec_jr0: jr@1000 {
766 compatible = "fsl,sec-v4.0-job-ring";
767 reg = <0x1000 0x1000>;
768 interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
769 };
770
771 sec_jr1: jr@2000 {
772 compatible = "fsl,sec-v4.0-job-ring";
773 reg = <0x2000 0x1000>;
774 interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
775 };
776
777 sec_jr2: jr@3000 {
778 compatible = "fsl,sec-v4.0-job-ring";
779 reg = <0x3000 0x1000>;
780 interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
781 };
782 };
783
754 dphy: dphy@30a00300 { 784 dphy: dphy@30a00300 {
755 compatible = "fsl,imx8mq-mipi-dphy"; 785 compatible = "fsl,imx8mq-mipi-dphy";
756 reg = <0x30a00300 0x100>; 786 reg = <0x30a00300 0x100>;
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index d9a523ecdd83..4922c4451e7c 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -58,8 +58,7 @@ config CRYPTO_GHASH_ARM64_CE
58 depends on KERNEL_MODE_NEON 58 depends on KERNEL_MODE_NEON
59 select CRYPTO_HASH 59 select CRYPTO_HASH
60 select CRYPTO_GF128MUL 60 select CRYPTO_GF128MUL
61 select CRYPTO_AES 61 select CRYPTO_LIB_AES
62 select CRYPTO_AES_ARM64
63 62
64config CRYPTO_CRCT10DIF_ARM64_CE 63config CRYPTO_CRCT10DIF_ARM64_CE
65 tristate "CRCT10DIF digest algorithm using PMULL instructions" 64 tristate "CRCT10DIF digest algorithm using PMULL instructions"
@@ -74,15 +73,15 @@ config CRYPTO_AES_ARM64_CE
74 tristate "AES core cipher using ARMv8 Crypto Extensions" 73 tristate "AES core cipher using ARMv8 Crypto Extensions"
75 depends on ARM64 && KERNEL_MODE_NEON 74 depends on ARM64 && KERNEL_MODE_NEON
76 select CRYPTO_ALGAPI 75 select CRYPTO_ALGAPI
77 select CRYPTO_AES_ARM64 76 select CRYPTO_LIB_AES
78 77
79config CRYPTO_AES_ARM64_CE_CCM 78config CRYPTO_AES_ARM64_CE_CCM
80 tristate "AES in CCM mode using ARMv8 Crypto Extensions" 79 tristate "AES in CCM mode using ARMv8 Crypto Extensions"
81 depends on ARM64 && KERNEL_MODE_NEON 80 depends on ARM64 && KERNEL_MODE_NEON
82 select CRYPTO_ALGAPI 81 select CRYPTO_ALGAPI
83 select CRYPTO_AES_ARM64_CE 82 select CRYPTO_AES_ARM64_CE
84 select CRYPTO_AES_ARM64
85 select CRYPTO_AEAD 83 select CRYPTO_AEAD
84 select CRYPTO_LIB_AES
86 85
87config CRYPTO_AES_ARM64_CE_BLK 86config CRYPTO_AES_ARM64_CE_BLK
88 tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions" 87 tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
@@ -97,7 +96,7 @@ config CRYPTO_AES_ARM64_NEON_BLK
97 depends on KERNEL_MODE_NEON 96 depends on KERNEL_MODE_NEON
98 select CRYPTO_BLKCIPHER 97 select CRYPTO_BLKCIPHER
99 select CRYPTO_AES_ARM64 98 select CRYPTO_AES_ARM64
100 select CRYPTO_AES 99 select CRYPTO_LIB_AES
101 select CRYPTO_SIMD 100 select CRYPTO_SIMD
102 101
103config CRYPTO_CHACHA20_NEON 102config CRYPTO_CHACHA20_NEON
@@ -117,6 +116,7 @@ config CRYPTO_AES_ARM64_BS
117 select CRYPTO_BLKCIPHER 116 select CRYPTO_BLKCIPHER
118 select CRYPTO_AES_ARM64_NEON_BLK 117 select CRYPTO_AES_ARM64_NEON_BLK
119 select CRYPTO_AES_ARM64 118 select CRYPTO_AES_ARM64
119 select CRYPTO_LIB_AES
120 select CRYPTO_SIMD 120 select CRYPTO_SIMD
121 121
122endif 122endif
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 827e5473e5de..541cf9165748 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -43,8 +43,6 @@ asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
43asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[], 43asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
44 u32 rounds); 44 u32 rounds);
45 45
46asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
47
48static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key, 46static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
49 unsigned int key_len) 47 unsigned int key_len)
50{ 48{
@@ -124,8 +122,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
124 } 122 }
125 123
126 while (abytes >= AES_BLOCK_SIZE) { 124 while (abytes >= AES_BLOCK_SIZE) {
127 __aes_arm64_encrypt(key->key_enc, mac, mac, 125 aes_encrypt(key, mac, mac);
128 num_rounds(key));
129 crypto_xor(mac, in, AES_BLOCK_SIZE); 126 crypto_xor(mac, in, AES_BLOCK_SIZE);
130 127
131 in += AES_BLOCK_SIZE; 128 in += AES_BLOCK_SIZE;
@@ -133,8 +130,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
133 } 130 }
134 131
135 if (abytes > 0) { 132 if (abytes > 0) {
136 __aes_arm64_encrypt(key->key_enc, mac, mac, 133 aes_encrypt(key, mac, mac);
137 num_rounds(key));
138 crypto_xor(mac, in, abytes); 134 crypto_xor(mac, in, abytes);
139 *macp = abytes; 135 *macp = abytes;
140 } 136 }
@@ -206,10 +202,8 @@ static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
206 bsize = nbytes; 202 bsize = nbytes;
207 203
208 crypto_inc(walk->iv, AES_BLOCK_SIZE); 204 crypto_inc(walk->iv, AES_BLOCK_SIZE);
209 __aes_arm64_encrypt(ctx->key_enc, buf, walk->iv, 205 aes_encrypt(ctx, buf, walk->iv);
210 num_rounds(ctx)); 206 aes_encrypt(ctx, mac, mac);
211 __aes_arm64_encrypt(ctx->key_enc, mac, mac,
212 num_rounds(ctx));
213 if (enc) 207 if (enc)
214 crypto_xor(mac, src, bsize); 208 crypto_xor(mac, src, bsize);
215 crypto_xor_cpy(dst, src, buf, bsize); 209 crypto_xor_cpy(dst, src, buf, bsize);
@@ -224,8 +218,8 @@ static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
224 } 218 }
225 219
226 if (!err) { 220 if (!err) {
227 __aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx)); 221 aes_encrypt(ctx, buf, iv0);
228 __aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx)); 222 aes_encrypt(ctx, mac, mac);
229 crypto_xor(mac, buf, AES_BLOCK_SIZE); 223 crypto_xor(mac, buf, AES_BLOCK_SIZE);
230 } 224 }
231 return err; 225 return err;
diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c
index d3bc97afde20..6d085dc56c51 100644
--- a/arch/arm64/crypto/aes-ce-glue.c
+++ b/arch/arm64/crypto/aes-ce-glue.c
@@ -20,9 +20,6 @@ MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
20MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 20MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
21MODULE_LICENSE("GPL v2"); 21MODULE_LICENSE("GPL v2");
22 22
23asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
24asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
25
26struct aes_block { 23struct aes_block {
27 u8 b[AES_BLOCK_SIZE]; 24 u8 b[AES_BLOCK_SIZE];
28}; 25};
@@ -51,7 +48,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
51 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 48 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
52 49
53 if (!crypto_simd_usable()) { 50 if (!crypto_simd_usable()) {
54 __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); 51 aes_encrypt(ctx, dst, src);
55 return; 52 return;
56 } 53 }
57 54
@@ -65,7 +62,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
65 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 62 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
66 63
67 if (!crypto_simd_usable()) { 64 if (!crypto_simd_usable()) {
68 __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); 65 aes_decrypt(ctx, dst, src);
69 return; 66 return;
70 } 67 }
71 68
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index 00bd2885feaa..c132c49c89a8 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -21,6 +21,9 @@
21 .macro xts_reload_mask, tmp 21 .macro xts_reload_mask, tmp
22 .endm 22 .endm
23 23
24 .macro xts_cts_skip_tw, reg, lbl
25 .endm
26
24 /* preload all round keys */ 27 /* preload all round keys */
25 .macro load_round_keys, rounds, rk 28 .macro load_round_keys, rounds, rk
26 cmp \rounds, #12 29 cmp \rounds, #12
diff --git a/arch/arm64/crypto/aes-cipher-core.S b/arch/arm64/crypto/aes-cipher-core.S
index f06df0d2080c..423d0aebc570 100644
--- a/arch/arm64/crypto/aes-cipher-core.S
+++ b/arch/arm64/crypto/aes-cipher-core.S
@@ -128,43 +128,5 @@ ENDPROC(__aes_arm64_encrypt)
128 128
129 .align 5 129 .align 5
130ENTRY(__aes_arm64_decrypt) 130ENTRY(__aes_arm64_decrypt)
131 do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0 131 do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0
132ENDPROC(__aes_arm64_decrypt) 132ENDPROC(__aes_arm64_decrypt)
133
134 .section ".rodata", "a"
135 .align L1_CACHE_SHIFT
136 .type __aes_arm64_inverse_sbox, %object
137__aes_arm64_inverse_sbox:
138 .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
139 .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
140 .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
141 .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
142 .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
143 .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
144 .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
145 .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
146 .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
147 .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
148 .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
149 .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
150 .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
151 .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
152 .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
153 .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
154 .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
155 .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
156 .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
157 .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
158 .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
159 .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
160 .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
161 .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
162 .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
163 .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
164 .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
165 .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
166 .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
167 .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
168 .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
169 .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
170 .size __aes_arm64_inverse_sbox, . - __aes_arm64_inverse_sbox
diff --git a/arch/arm64/crypto/aes-cipher-glue.c b/arch/arm64/crypto/aes-cipher-glue.c
index 0913966aa6fa..8caf6dfefce8 100644
--- a/arch/arm64/crypto/aes-cipher-glue.c
+++ b/arch/arm64/crypto/aes-cipher-glue.c
@@ -10,12 +10,9 @@
10#include <linux/module.h> 10#include <linux/module.h>
11 11
12asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 12asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
13EXPORT_SYMBOL(__aes_arm64_encrypt);
14
15asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 13asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
16EXPORT_SYMBOL(__aes_arm64_decrypt);
17 14
18static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 15static void aes_arm64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
19{ 16{
20 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 17 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
21 int rounds = 6 + ctx->key_length / 4; 18 int rounds = 6 + ctx->key_length / 4;
@@ -23,7 +20,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
23 __aes_arm64_encrypt(ctx->key_enc, out, in, rounds); 20 __aes_arm64_encrypt(ctx->key_enc, out, in, rounds);
24} 21}
25 22
26static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 23static void aes_arm64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
27{ 24{
28 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 25 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
29 int rounds = 6 + ctx->key_length / 4; 26 int rounds = 6 + ctx->key_length / 4;
@@ -43,8 +40,8 @@ static struct crypto_alg aes_alg = {
43 .cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE, 40 .cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE,
44 .cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE, 41 .cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE,
45 .cra_cipher.cia_setkey = crypto_aes_set_key, 42 .cra_cipher.cia_setkey = crypto_aes_set_key,
46 .cra_cipher.cia_encrypt = aes_encrypt, 43 .cra_cipher.cia_encrypt = aes_arm64_encrypt,
47 .cra_cipher.cia_decrypt = aes_decrypt 44 .cra_cipher.cia_decrypt = aes_arm64_decrypt
48}; 45};
49 46
50static int __init aes_init(void) 47static int __init aes_init(void)
diff --git a/arch/arm64/crypto/aes-ctr-fallback.h b/arch/arm64/crypto/aes-ctr-fallback.h
deleted file mode 100644
index 3ac911990ec7..000000000000
--- a/arch/arm64/crypto/aes-ctr-fallback.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Fallback for sync aes(ctr) in contexts where kernel mode NEON
4 * is not allowed
5 *
6 * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
7 */
8
9#include <crypto/aes.h>
10#include <crypto/internal/skcipher.h>
11
12asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
13
14static inline int aes_ctr_encrypt_fallback(struct crypto_aes_ctx *ctx,
15 struct skcipher_request *req)
16{
17 struct skcipher_walk walk;
18 u8 buf[AES_BLOCK_SIZE];
19 int err;
20
21 err = skcipher_walk_virt(&walk, req, true);
22
23 while (walk.nbytes > 0) {
24 u8 *dst = walk.dst.virt.addr;
25 u8 *src = walk.src.virt.addr;
26 int nbytes = walk.nbytes;
27 int tail = 0;
28
29 if (nbytes < walk.total) {
30 nbytes = round_down(nbytes, AES_BLOCK_SIZE);
31 tail = walk.nbytes % AES_BLOCK_SIZE;
32 }
33
34 do {
35 int bsize = min(nbytes, AES_BLOCK_SIZE);
36
37 __aes_arm64_encrypt(ctx->key_enc, buf, walk.iv,
38 6 + ctx->key_length / 4);
39 crypto_xor_cpy(dst, src, buf, bsize);
40 crypto_inc(walk.iv, AES_BLOCK_SIZE);
41
42 dst += AES_BLOCK_SIZE;
43 src += AES_BLOCK_SIZE;
44 nbytes -= AES_BLOCK_SIZE;
45 } while (nbytes > 0);
46
47 err = skcipher_walk_done(&walk, tail);
48 }
49 return err;
50}
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 8d6c8932c841..aa57dc639f77 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -9,6 +9,8 @@
9#include <asm/hwcap.h> 9#include <asm/hwcap.h>
10#include <asm/simd.h> 10#include <asm/simd.h>
11#include <crypto/aes.h> 11#include <crypto/aes.h>
12#include <crypto/ctr.h>
13#include <crypto/sha.h>
12#include <crypto/internal/hash.h> 14#include <crypto/internal/hash.h>
13#include <crypto/internal/simd.h> 15#include <crypto/internal/simd.h>
14#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
@@ -18,12 +20,10 @@
18#include <crypto/xts.h> 20#include <crypto/xts.h>
19 21
20#include "aes-ce-setkey.h" 22#include "aes-ce-setkey.h"
21#include "aes-ctr-fallback.h"
22 23
23#ifdef USE_V8_CRYPTO_EXTENSIONS 24#ifdef USE_V8_CRYPTO_EXTENSIONS
24#define MODE "ce" 25#define MODE "ce"
25#define PRIO 300 26#define PRIO 300
26#define aes_setkey ce_aes_setkey
27#define aes_expandkey ce_aes_expandkey 27#define aes_expandkey ce_aes_expandkey
28#define aes_ecb_encrypt ce_aes_ecb_encrypt 28#define aes_ecb_encrypt ce_aes_ecb_encrypt
29#define aes_ecb_decrypt ce_aes_ecb_decrypt 29#define aes_ecb_decrypt ce_aes_ecb_decrypt
@@ -31,6 +31,8 @@
31#define aes_cbc_decrypt ce_aes_cbc_decrypt 31#define aes_cbc_decrypt ce_aes_cbc_decrypt
32#define aes_cbc_cts_encrypt ce_aes_cbc_cts_encrypt 32#define aes_cbc_cts_encrypt ce_aes_cbc_cts_encrypt
33#define aes_cbc_cts_decrypt ce_aes_cbc_cts_decrypt 33#define aes_cbc_cts_decrypt ce_aes_cbc_cts_decrypt
34#define aes_essiv_cbc_encrypt ce_aes_essiv_cbc_encrypt
35#define aes_essiv_cbc_decrypt ce_aes_essiv_cbc_decrypt
34#define aes_ctr_encrypt ce_aes_ctr_encrypt 36#define aes_ctr_encrypt ce_aes_ctr_encrypt
35#define aes_xts_encrypt ce_aes_xts_encrypt 37#define aes_xts_encrypt ce_aes_xts_encrypt
36#define aes_xts_decrypt ce_aes_xts_decrypt 38#define aes_xts_decrypt ce_aes_xts_decrypt
@@ -39,27 +41,31 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
39#else 41#else
40#define MODE "neon" 42#define MODE "neon"
41#define PRIO 200 43#define PRIO 200
42#define aes_setkey crypto_aes_set_key
43#define aes_expandkey crypto_aes_expand_key
44#define aes_ecb_encrypt neon_aes_ecb_encrypt 44#define aes_ecb_encrypt neon_aes_ecb_encrypt
45#define aes_ecb_decrypt neon_aes_ecb_decrypt 45#define aes_ecb_decrypt neon_aes_ecb_decrypt
46#define aes_cbc_encrypt neon_aes_cbc_encrypt 46#define aes_cbc_encrypt neon_aes_cbc_encrypt
47#define aes_cbc_decrypt neon_aes_cbc_decrypt 47#define aes_cbc_decrypt neon_aes_cbc_decrypt
48#define aes_cbc_cts_encrypt neon_aes_cbc_cts_encrypt 48#define aes_cbc_cts_encrypt neon_aes_cbc_cts_encrypt
49#define aes_cbc_cts_decrypt neon_aes_cbc_cts_decrypt 49#define aes_cbc_cts_decrypt neon_aes_cbc_cts_decrypt
50#define aes_essiv_cbc_encrypt neon_aes_essiv_cbc_encrypt
51#define aes_essiv_cbc_decrypt neon_aes_essiv_cbc_decrypt
50#define aes_ctr_encrypt neon_aes_ctr_encrypt 52#define aes_ctr_encrypt neon_aes_ctr_encrypt
51#define aes_xts_encrypt neon_aes_xts_encrypt 53#define aes_xts_encrypt neon_aes_xts_encrypt
52#define aes_xts_decrypt neon_aes_xts_decrypt 54#define aes_xts_decrypt neon_aes_xts_decrypt
53#define aes_mac_update neon_aes_mac_update 55#define aes_mac_update neon_aes_mac_update
54MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON"); 56MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
57#endif
58#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
55MODULE_ALIAS_CRYPTO("ecb(aes)"); 59MODULE_ALIAS_CRYPTO("ecb(aes)");
56MODULE_ALIAS_CRYPTO("cbc(aes)"); 60MODULE_ALIAS_CRYPTO("cbc(aes)");
57MODULE_ALIAS_CRYPTO("ctr(aes)"); 61MODULE_ALIAS_CRYPTO("ctr(aes)");
58MODULE_ALIAS_CRYPTO("xts(aes)"); 62MODULE_ALIAS_CRYPTO("xts(aes)");
63#endif
64MODULE_ALIAS_CRYPTO("cts(cbc(aes))");
65MODULE_ALIAS_CRYPTO("essiv(cbc(aes),sha256)");
59MODULE_ALIAS_CRYPTO("cmac(aes)"); 66MODULE_ALIAS_CRYPTO("cmac(aes)");
60MODULE_ALIAS_CRYPTO("xcbc(aes)"); 67MODULE_ALIAS_CRYPTO("xcbc(aes)");
61MODULE_ALIAS_CRYPTO("cbcmac(aes)"); 68MODULE_ALIAS_CRYPTO("cbcmac(aes)");
62#endif
63 69
64MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 70MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
65MODULE_LICENSE("GPL v2"); 71MODULE_LICENSE("GPL v2");
@@ -84,25 +90,32 @@ asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
84 int rounds, int blocks, u8 ctr[]); 90 int rounds, int blocks, u8 ctr[]);
85 91
86asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], 92asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
87 int rounds, int blocks, u32 const rk2[], u8 iv[], 93 int rounds, int bytes, u32 const rk2[], u8 iv[],
88 int first); 94 int first);
89asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[], 95asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
90 int rounds, int blocks, u32 const rk2[], u8 iv[], 96 int rounds, int bytes, u32 const rk2[], u8 iv[],
91 int first); 97 int first);
92 98
99asmlinkage void aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[],
100 int rounds, int blocks, u8 iv[],
101 u32 const rk2[]);
102asmlinkage void aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
103 int rounds, int blocks, u8 iv[],
104 u32 const rk2[]);
105
93asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds, 106asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
94 int blocks, u8 dg[], int enc_before, 107 int blocks, u8 dg[], int enc_before,
95 int enc_after); 108 int enc_after);
96 109
97struct cts_cbc_req_ctx { 110struct crypto_aes_xts_ctx {
98 struct scatterlist sg_src[2]; 111 struct crypto_aes_ctx key1;
99 struct scatterlist sg_dst[2]; 112 struct crypto_aes_ctx __aligned(8) key2;
100 struct skcipher_request subreq;
101}; 113};
102 114
103struct crypto_aes_xts_ctx { 115struct crypto_aes_essiv_cbc_ctx {
104 struct crypto_aes_ctx key1; 116 struct crypto_aes_ctx key1;
105 struct crypto_aes_ctx __aligned(8) key2; 117 struct crypto_aes_ctx __aligned(8) key2;
118 struct crypto_shash *hash;
106}; 119};
107 120
108struct mac_tfm_ctx { 121struct mac_tfm_ctx {
@@ -118,11 +131,18 @@ struct mac_desc_ctx {
118static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 131static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
119 unsigned int key_len) 132 unsigned int key_len)
120{ 133{
121 return aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len); 134 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
135 int ret;
136
137 ret = aes_expandkey(ctx, in_key, key_len);
138 if (ret)
139 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
140
141 return ret;
122} 142}
123 143
124static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 144static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm,
125 unsigned int key_len) 145 const u8 *in_key, unsigned int key_len)
126{ 146{
127 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 147 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
128 int ret; 148 int ret;
@@ -142,7 +162,33 @@ static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
142 return -EINVAL; 162 return -EINVAL;
143} 163}
144 164
145static int ecb_encrypt(struct skcipher_request *req) 165static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
166 const u8 *in_key,
167 unsigned int key_len)
168{
169 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
170 SHASH_DESC_ON_STACK(desc, ctx->hash);
171 u8 digest[SHA256_DIGEST_SIZE];
172 int ret;
173
174 ret = aes_expandkey(&ctx->key1, in_key, key_len);
175 if (ret)
176 goto out;
177
178 desc->tfm = ctx->hash;
179 crypto_shash_digest(desc, in_key, key_len, digest);
180
181 ret = aes_expandkey(&ctx->key2, digest, sizeof(digest));
182 if (ret)
183 goto out;
184
185 return 0;
186out:
187 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
188 return -EINVAL;
189}
190
191static int __maybe_unused ecb_encrypt(struct skcipher_request *req)
146{ 192{
147 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 193 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
148 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 194 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -162,7 +208,7 @@ static int ecb_encrypt(struct skcipher_request *req)
162 return err; 208 return err;
163} 209}
164 210
165static int ecb_decrypt(struct skcipher_request *req) 211static int __maybe_unused ecb_decrypt(struct skcipher_request *req)
166{ 212{
167 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 213 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
168 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 214 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -182,63 +228,78 @@ static int ecb_decrypt(struct skcipher_request *req)
182 return err; 228 return err;
183} 229}
184 230
185static int cbc_encrypt(struct skcipher_request *req) 231static int cbc_encrypt_walk(struct skcipher_request *req,
232 struct skcipher_walk *walk)
186{ 233{
187 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 234 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
188 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 235 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
189 int err, rounds = 6 + ctx->key_length / 4; 236 int err = 0, rounds = 6 + ctx->key_length / 4;
190 struct skcipher_walk walk;
191 unsigned int blocks; 237 unsigned int blocks;
192 238
193 err = skcipher_walk_virt(&walk, req, false); 239 while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
194
195 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
196 kernel_neon_begin(); 240 kernel_neon_begin();
197 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 241 aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr,
198 ctx->key_enc, rounds, blocks, walk.iv); 242 ctx->key_enc, rounds, blocks, walk->iv);
199 kernel_neon_end(); 243 kernel_neon_end();
200 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 244 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
201 } 245 }
202 return err; 246 return err;
203} 247}
204 248
205static int cbc_decrypt(struct skcipher_request *req) 249static int __maybe_unused cbc_encrypt(struct skcipher_request *req)
206{ 250{
207 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
208 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
209 int err, rounds = 6 + ctx->key_length / 4;
210 struct skcipher_walk walk; 251 struct skcipher_walk walk;
211 unsigned int blocks; 252 int err;
212 253
213 err = skcipher_walk_virt(&walk, req, false); 254 err = skcipher_walk_virt(&walk, req, false);
255 if (err)
256 return err;
257 return cbc_encrypt_walk(req, &walk);
258}
214 259
215 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 260static int cbc_decrypt_walk(struct skcipher_request *req,
261 struct skcipher_walk *walk)
262{
263 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
264 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
265 int err = 0, rounds = 6 + ctx->key_length / 4;
266 unsigned int blocks;
267
268 while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
216 kernel_neon_begin(); 269 kernel_neon_begin();
217 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 270 aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr,
218 ctx->key_dec, rounds, blocks, walk.iv); 271 ctx->key_dec, rounds, blocks, walk->iv);
219 kernel_neon_end(); 272 kernel_neon_end();
220 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 273 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
221 } 274 }
222 return err; 275 return err;
223} 276}
224 277
225static int cts_cbc_init_tfm(struct crypto_skcipher *tfm) 278static int __maybe_unused cbc_decrypt(struct skcipher_request *req)
226{ 279{
227 crypto_skcipher_set_reqsize(tfm, sizeof(struct cts_cbc_req_ctx)); 280 struct skcipher_walk walk;
228 return 0; 281 int err;
282
283 err = skcipher_walk_virt(&walk, req, false);
284 if (err)
285 return err;
286 return cbc_decrypt_walk(req, &walk);
229} 287}
230 288
231static int cts_cbc_encrypt(struct skcipher_request *req) 289static int cts_cbc_encrypt(struct skcipher_request *req)
232{ 290{
233 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 291 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
234 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 292 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
235 struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req);
236 int err, rounds = 6 + ctx->key_length / 4; 293 int err, rounds = 6 + ctx->key_length / 4;
237 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; 294 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
238 struct scatterlist *src = req->src, *dst = req->dst; 295 struct scatterlist *src = req->src, *dst = req->dst;
296 struct scatterlist sg_src[2], sg_dst[2];
297 struct skcipher_request subreq;
239 struct skcipher_walk walk; 298 struct skcipher_walk walk;
240 299
241 skcipher_request_set_tfm(&rctx->subreq, tfm); 300 skcipher_request_set_tfm(&subreq, tfm);
301 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
302 NULL, NULL);
242 303
243 if (req->cryptlen <= AES_BLOCK_SIZE) { 304 if (req->cryptlen <= AES_BLOCK_SIZE) {
244 if (req->cryptlen < AES_BLOCK_SIZE) 305 if (req->cryptlen < AES_BLOCK_SIZE)
@@ -247,41 +308,30 @@ static int cts_cbc_encrypt(struct skcipher_request *req)
247 } 308 }
248 309
249 if (cbc_blocks > 0) { 310 if (cbc_blocks > 0) {
250 unsigned int blocks; 311 skcipher_request_set_crypt(&subreq, req->src, req->dst,
251
252 skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst,
253 cbc_blocks * AES_BLOCK_SIZE, 312 cbc_blocks * AES_BLOCK_SIZE,
254 req->iv); 313 req->iv);
255 314
256 err = skcipher_walk_virt(&walk, &rctx->subreq, false); 315 err = skcipher_walk_virt(&walk, &subreq, false) ?:
257 316 cbc_encrypt_walk(&subreq, &walk);
258 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
259 kernel_neon_begin();
260 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
261 ctx->key_enc, rounds, blocks, walk.iv);
262 kernel_neon_end();
263 err = skcipher_walk_done(&walk,
264 walk.nbytes % AES_BLOCK_SIZE);
265 }
266 if (err) 317 if (err)
267 return err; 318 return err;
268 319
269 if (req->cryptlen == AES_BLOCK_SIZE) 320 if (req->cryptlen == AES_BLOCK_SIZE)
270 return 0; 321 return 0;
271 322
272 dst = src = scatterwalk_ffwd(rctx->sg_src, req->src, 323 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
273 rctx->subreq.cryptlen);
274 if (req->dst != req->src) 324 if (req->dst != req->src)
275 dst = scatterwalk_ffwd(rctx->sg_dst, req->dst, 325 dst = scatterwalk_ffwd(sg_dst, req->dst,
276 rctx->subreq.cryptlen); 326 subreq.cryptlen);
277 } 327 }
278 328
279 /* handle ciphertext stealing */ 329 /* handle ciphertext stealing */
280 skcipher_request_set_crypt(&rctx->subreq, src, dst, 330 skcipher_request_set_crypt(&subreq, src, dst,
281 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE, 331 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
282 req->iv); 332 req->iv);
283 333
284 err = skcipher_walk_virt(&walk, &rctx->subreq, false); 334 err = skcipher_walk_virt(&walk, &subreq, false);
285 if (err) 335 if (err)
286 return err; 336 return err;
287 337
@@ -297,13 +347,16 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
297{ 347{
298 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 348 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
299 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 349 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
300 struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req);
301 int err, rounds = 6 + ctx->key_length / 4; 350 int err, rounds = 6 + ctx->key_length / 4;
302 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; 351 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
303 struct scatterlist *src = req->src, *dst = req->dst; 352 struct scatterlist *src = req->src, *dst = req->dst;
353 struct scatterlist sg_src[2], sg_dst[2];
354 struct skcipher_request subreq;
304 struct skcipher_walk walk; 355 struct skcipher_walk walk;
305 356
306 skcipher_request_set_tfm(&rctx->subreq, tfm); 357 skcipher_request_set_tfm(&subreq, tfm);
358 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
359 NULL, NULL);
307 360
308 if (req->cryptlen <= AES_BLOCK_SIZE) { 361 if (req->cryptlen <= AES_BLOCK_SIZE) {
309 if (req->cryptlen < AES_BLOCK_SIZE) 362 if (req->cryptlen < AES_BLOCK_SIZE)
@@ -312,41 +365,30 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
312 } 365 }
313 366
314 if (cbc_blocks > 0) { 367 if (cbc_blocks > 0) {
315 unsigned int blocks; 368 skcipher_request_set_crypt(&subreq, req->src, req->dst,
316
317 skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst,
318 cbc_blocks * AES_BLOCK_SIZE, 369 cbc_blocks * AES_BLOCK_SIZE,
319 req->iv); 370 req->iv);
320 371
321 err = skcipher_walk_virt(&walk, &rctx->subreq, false); 372 err = skcipher_walk_virt(&walk, &subreq, false) ?:
322 373 cbc_decrypt_walk(&subreq, &walk);
323 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
324 kernel_neon_begin();
325 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
326 ctx->key_dec, rounds, blocks, walk.iv);
327 kernel_neon_end();
328 err = skcipher_walk_done(&walk,
329 walk.nbytes % AES_BLOCK_SIZE);
330 }
331 if (err) 374 if (err)
332 return err; 375 return err;
333 376
334 if (req->cryptlen == AES_BLOCK_SIZE) 377 if (req->cryptlen == AES_BLOCK_SIZE)
335 return 0; 378 return 0;
336 379
337 dst = src = scatterwalk_ffwd(rctx->sg_src, req->src, 380 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
338 rctx->subreq.cryptlen);
339 if (req->dst != req->src) 381 if (req->dst != req->src)
340 dst = scatterwalk_ffwd(rctx->sg_dst, req->dst, 382 dst = scatterwalk_ffwd(sg_dst, req->dst,
341 rctx->subreq.cryptlen); 383 subreq.cryptlen);
342 } 384 }
343 385
344 /* handle ciphertext stealing */ 386 /* handle ciphertext stealing */
345 skcipher_request_set_crypt(&rctx->subreq, src, dst, 387 skcipher_request_set_crypt(&subreq, src, dst,
346 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE, 388 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
347 req->iv); 389 req->iv);
348 390
349 err = skcipher_walk_virt(&walk, &rctx->subreq, false); 391 err = skcipher_walk_virt(&walk, &subreq, false);
350 if (err) 392 if (err)
351 return err; 393 return err;
352 394
@@ -358,6 +400,66 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
358 return skcipher_walk_done(&walk, 0); 400 return skcipher_walk_done(&walk, 0);
359} 401}
360 402
403static int __maybe_unused essiv_cbc_init_tfm(struct crypto_skcipher *tfm)
404{
405 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
406
407 ctx->hash = crypto_alloc_shash("sha256", 0, 0);
408
409 return PTR_ERR_OR_ZERO(ctx->hash);
410}
411
412static void __maybe_unused essiv_cbc_exit_tfm(struct crypto_skcipher *tfm)
413{
414 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
415
416 crypto_free_shash(ctx->hash);
417}
418
419static int __maybe_unused essiv_cbc_encrypt(struct skcipher_request *req)
420{
421 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
422 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
423 int err, rounds = 6 + ctx->key1.key_length / 4;
424 struct skcipher_walk walk;
425 unsigned int blocks;
426
427 err = skcipher_walk_virt(&walk, req, false);
428
429 blocks = walk.nbytes / AES_BLOCK_SIZE;
430 if (blocks) {
431 kernel_neon_begin();
432 aes_essiv_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
433 ctx->key1.key_enc, rounds, blocks,
434 req->iv, ctx->key2.key_enc);
435 kernel_neon_end();
436 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
437 }
438 return err ?: cbc_encrypt_walk(req, &walk);
439}
440
441static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req)
442{
443 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
444 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
445 int err, rounds = 6 + ctx->key1.key_length / 4;
446 struct skcipher_walk walk;
447 unsigned int blocks;
448
449 err = skcipher_walk_virt(&walk, req, false);
450
451 blocks = walk.nbytes / AES_BLOCK_SIZE;
452 if (blocks) {
453 kernel_neon_begin();
454 aes_essiv_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
455 ctx->key1.key_dec, rounds, blocks,
456 req->iv, ctx->key2.key_enc);
457 kernel_neon_end();
458 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
459 }
460 return err ?: cbc_decrypt_walk(req, &walk);
461}
462
361static int ctr_encrypt(struct skcipher_request *req) 463static int ctr_encrypt(struct skcipher_request *req)
362{ 464{
363 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 465 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -397,62 +499,176 @@ static int ctr_encrypt(struct skcipher_request *req)
397 return err; 499 return err;
398} 500}
399 501
400static int ctr_encrypt_sync(struct skcipher_request *req) 502static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
401{ 503{
402 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 504 const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
403 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 505 unsigned long flags;
506
507 /*
508 * Temporarily disable interrupts to avoid races where
509 * cachelines are evicted when the CPU is interrupted
510 * to do something else.
511 */
512 local_irq_save(flags);
513 aes_encrypt(ctx, dst, src);
514 local_irq_restore(flags);
515}
404 516
517static int __maybe_unused ctr_encrypt_sync(struct skcipher_request *req)
518{
405 if (!crypto_simd_usable()) 519 if (!crypto_simd_usable())
406 return aes_ctr_encrypt_fallback(ctx, req); 520 return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
407 521
408 return ctr_encrypt(req); 522 return ctr_encrypt(req);
409} 523}
410 524
411static int xts_encrypt(struct skcipher_request *req) 525static int __maybe_unused xts_encrypt(struct skcipher_request *req)
412{ 526{
413 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 527 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
414 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 528 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
415 int err, first, rounds = 6 + ctx->key1.key_length / 4; 529 int err, first, rounds = 6 + ctx->key1.key_length / 4;
530 int tail = req->cryptlen % AES_BLOCK_SIZE;
531 struct scatterlist sg_src[2], sg_dst[2];
532 struct skcipher_request subreq;
533 struct scatterlist *src, *dst;
416 struct skcipher_walk walk; 534 struct skcipher_walk walk;
417 unsigned int blocks; 535
536 if (req->cryptlen < AES_BLOCK_SIZE)
537 return -EINVAL;
418 538
419 err = skcipher_walk_virt(&walk, req, false); 539 err = skcipher_walk_virt(&walk, req, false);
420 540
421 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 541 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
542 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
543 AES_BLOCK_SIZE) - 2;
544
545 skcipher_walk_abort(&walk);
546
547 skcipher_request_set_tfm(&subreq, tfm);
548 skcipher_request_set_callback(&subreq,
549 skcipher_request_flags(req),
550 NULL, NULL);
551 skcipher_request_set_crypt(&subreq, req->src, req->dst,
552 xts_blocks * AES_BLOCK_SIZE,
553 req->iv);
554 req = &subreq;
555 err = skcipher_walk_virt(&walk, req, false);
556 } else {
557 tail = 0;
558 }
559
560 for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
561 int nbytes = walk.nbytes;
562
563 if (walk.nbytes < walk.total)
564 nbytes &= ~(AES_BLOCK_SIZE - 1);
565
422 kernel_neon_begin(); 566 kernel_neon_begin();
423 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 567 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
424 ctx->key1.key_enc, rounds, blocks, 568 ctx->key1.key_enc, rounds, nbytes,
425 ctx->key2.key_enc, walk.iv, first); 569 ctx->key2.key_enc, walk.iv, first);
426 kernel_neon_end(); 570 kernel_neon_end();
427 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 571 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
428 } 572 }
429 573
430 return err; 574 if (err || likely(!tail))
575 return err;
576
577 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
578 if (req->dst != req->src)
579 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
580
581 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
582 req->iv);
583
584 err = skcipher_walk_virt(&walk, &subreq, false);
585 if (err)
586 return err;
587
588 kernel_neon_begin();
589 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
590 ctx->key1.key_enc, rounds, walk.nbytes,
591 ctx->key2.key_enc, walk.iv, first);
592 kernel_neon_end();
593
594 return skcipher_walk_done(&walk, 0);
431} 595}
432 596
433static int xts_decrypt(struct skcipher_request *req) 597static int __maybe_unused xts_decrypt(struct skcipher_request *req)
434{ 598{
435 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 599 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
436 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 600 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
437 int err, first, rounds = 6 + ctx->key1.key_length / 4; 601 int err, first, rounds = 6 + ctx->key1.key_length / 4;
602 int tail = req->cryptlen % AES_BLOCK_SIZE;
603 struct scatterlist sg_src[2], sg_dst[2];
604 struct skcipher_request subreq;
605 struct scatterlist *src, *dst;
438 struct skcipher_walk walk; 606 struct skcipher_walk walk;
439 unsigned int blocks; 607
608 if (req->cryptlen < AES_BLOCK_SIZE)
609 return -EINVAL;
440 610
441 err = skcipher_walk_virt(&walk, req, false); 611 err = skcipher_walk_virt(&walk, req, false);
442 612
443 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 613 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
614 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
615 AES_BLOCK_SIZE) - 2;
616
617 skcipher_walk_abort(&walk);
618
619 skcipher_request_set_tfm(&subreq, tfm);
620 skcipher_request_set_callback(&subreq,
621 skcipher_request_flags(req),
622 NULL, NULL);
623 skcipher_request_set_crypt(&subreq, req->src, req->dst,
624 xts_blocks * AES_BLOCK_SIZE,
625 req->iv);
626 req = &subreq;
627 err = skcipher_walk_virt(&walk, req, false);
628 } else {
629 tail = 0;
630 }
631
632 for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
633 int nbytes = walk.nbytes;
634
635 if (walk.nbytes < walk.total)
636 nbytes &= ~(AES_BLOCK_SIZE - 1);
637
444 kernel_neon_begin(); 638 kernel_neon_begin();
445 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 639 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
446 ctx->key1.key_dec, rounds, blocks, 640 ctx->key1.key_dec, rounds, nbytes,
447 ctx->key2.key_enc, walk.iv, first); 641 ctx->key2.key_enc, walk.iv, first);
448 kernel_neon_end(); 642 kernel_neon_end();
449 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 643 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
450 } 644 }
451 645
452 return err; 646 if (err || likely(!tail))
647 return err;
648
649 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
650 if (req->dst != req->src)
651 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
652
653 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
654 req->iv);
655
656 err = skcipher_walk_virt(&walk, &subreq, false);
657 if (err)
658 return err;
659
660
661 kernel_neon_begin();
662 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
663 ctx->key1.key_dec, rounds, walk.nbytes,
664 ctx->key2.key_enc, walk.iv, first);
665 kernel_neon_end();
666
667 return skcipher_walk_done(&walk, 0);
453} 668}
454 669
455static struct skcipher_alg aes_algs[] = { { 670static struct skcipher_alg aes_algs[] = { {
671#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
456 .base = { 672 .base = {
457 .cra_name = "__ecb(aes)", 673 .cra_name = "__ecb(aes)",
458 .cra_driver_name = "__ecb-aes-" MODE, 674 .cra_driver_name = "__ecb-aes-" MODE,
@@ -485,24 +701,6 @@ static struct skcipher_alg aes_algs[] = { {
485 .decrypt = cbc_decrypt, 701 .decrypt = cbc_decrypt,
486}, { 702}, {
487 .base = { 703 .base = {
488 .cra_name = "__cts(cbc(aes))",
489 .cra_driver_name = "__cts-cbc-aes-" MODE,
490 .cra_priority = PRIO,
491 .cra_flags = CRYPTO_ALG_INTERNAL,
492 .cra_blocksize = AES_BLOCK_SIZE,
493 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
494 .cra_module = THIS_MODULE,
495 },
496 .min_keysize = AES_MIN_KEY_SIZE,
497 .max_keysize = AES_MAX_KEY_SIZE,
498 .ivsize = AES_BLOCK_SIZE,
499 .walksize = 2 * AES_BLOCK_SIZE,
500 .setkey = skcipher_aes_setkey,
501 .encrypt = cts_cbc_encrypt,
502 .decrypt = cts_cbc_decrypt,
503 .init = cts_cbc_init_tfm,
504}, {
505 .base = {
506 .cra_name = "__ctr(aes)", 704 .cra_name = "__ctr(aes)",
507 .cra_driver_name = "__ctr-aes-" MODE, 705 .cra_driver_name = "__ctr-aes-" MODE,
508 .cra_priority = PRIO, 706 .cra_priority = PRIO,
@@ -547,9 +745,46 @@ static struct skcipher_alg aes_algs[] = { {
547 .min_keysize = 2 * AES_MIN_KEY_SIZE, 745 .min_keysize = 2 * AES_MIN_KEY_SIZE,
548 .max_keysize = 2 * AES_MAX_KEY_SIZE, 746 .max_keysize = 2 * AES_MAX_KEY_SIZE,
549 .ivsize = AES_BLOCK_SIZE, 747 .ivsize = AES_BLOCK_SIZE,
748 .walksize = 2 * AES_BLOCK_SIZE,
550 .setkey = xts_set_key, 749 .setkey = xts_set_key,
551 .encrypt = xts_encrypt, 750 .encrypt = xts_encrypt,
552 .decrypt = xts_decrypt, 751 .decrypt = xts_decrypt,
752}, {
753#endif
754 .base = {
755 .cra_name = "__cts(cbc(aes))",
756 .cra_driver_name = "__cts-cbc-aes-" MODE,
757 .cra_priority = PRIO,
758 .cra_flags = CRYPTO_ALG_INTERNAL,
759 .cra_blocksize = AES_BLOCK_SIZE,
760 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
761 .cra_module = THIS_MODULE,
762 },
763 .min_keysize = AES_MIN_KEY_SIZE,
764 .max_keysize = AES_MAX_KEY_SIZE,
765 .ivsize = AES_BLOCK_SIZE,
766 .walksize = 2 * AES_BLOCK_SIZE,
767 .setkey = skcipher_aes_setkey,
768 .encrypt = cts_cbc_encrypt,
769 .decrypt = cts_cbc_decrypt,
770}, {
771 .base = {
772 .cra_name = "__essiv(cbc(aes),sha256)",
773 .cra_driver_name = "__essiv-cbc-aes-sha256-" MODE,
774 .cra_priority = PRIO + 1,
775 .cra_flags = CRYPTO_ALG_INTERNAL,
776 .cra_blocksize = AES_BLOCK_SIZE,
777 .cra_ctxsize = sizeof(struct crypto_aes_essiv_cbc_ctx),
778 .cra_module = THIS_MODULE,
779 },
780 .min_keysize = AES_MIN_KEY_SIZE,
781 .max_keysize = AES_MAX_KEY_SIZE,
782 .ivsize = AES_BLOCK_SIZE,
783 .setkey = essiv_cbc_set_key,
784 .encrypt = essiv_cbc_encrypt,
785 .decrypt = essiv_cbc_decrypt,
786 .init = essiv_cbc_init_tfm,
787 .exit = essiv_cbc_exit_tfm,
553} }; 788} };
554 789
555static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key, 790static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
@@ -646,15 +881,14 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
646 kernel_neon_end(); 881 kernel_neon_end();
647 } else { 882 } else {
648 if (enc_before) 883 if (enc_before)
649 __aes_arm64_encrypt(ctx->key_enc, dg, dg, rounds); 884 aes_encrypt(ctx, dg, dg);
650 885
651 while (blocks--) { 886 while (blocks--) {
652 crypto_xor(dg, in, AES_BLOCK_SIZE); 887 crypto_xor(dg, in, AES_BLOCK_SIZE);
653 in += AES_BLOCK_SIZE; 888 in += AES_BLOCK_SIZE;
654 889
655 if (blocks || enc_after) 890 if (blocks || enc_after)
656 __aes_arm64_encrypt(ctx->key_enc, dg, dg, 891 aes_encrypt(ctx, dg, dg);
657 rounds);
658 } 892 }
659 } 893 }
660} 894}
@@ -837,5 +1071,7 @@ module_cpu_feature_match(AES, aes_init);
837module_init(aes_init); 1071module_init(aes_init);
838EXPORT_SYMBOL(neon_aes_ecb_encrypt); 1072EXPORT_SYMBOL(neon_aes_ecb_encrypt);
839EXPORT_SYMBOL(neon_aes_cbc_encrypt); 1073EXPORT_SYMBOL(neon_aes_cbc_encrypt);
1074EXPORT_SYMBOL(neon_aes_xts_encrypt);
1075EXPORT_SYMBOL(neon_aes_xts_decrypt);
840#endif 1076#endif
841module_exit(aes_exit); 1077module_exit(aes_exit);
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 324039b72094..131618389f1f 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -118,8 +118,23 @@ AES_ENDPROC(aes_ecb_decrypt)
118 * int blocks, u8 iv[]) 118 * int blocks, u8 iv[])
119 * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 119 * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
120 * int blocks, u8 iv[]) 120 * int blocks, u8 iv[])
121 * aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[],
122 * int rounds, int blocks, u8 iv[],
123 * u32 const rk2[]);
124 * aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
125 * int rounds, int blocks, u8 iv[],
126 * u32 const rk2[]);
121 */ 127 */
122 128
129AES_ENTRY(aes_essiv_cbc_encrypt)
130 ld1 {v4.16b}, [x5] /* get iv */
131
132 mov w8, #14 /* AES-256: 14 rounds */
133 enc_prepare w8, x6, x7
134 encrypt_block v4, w8, x6, x7, w9
135 enc_switch_key w3, x2, x6
136 b .Lcbcencloop4x
137
123AES_ENTRY(aes_cbc_encrypt) 138AES_ENTRY(aes_cbc_encrypt)
124 ld1 {v4.16b}, [x5] /* get iv */ 139 ld1 {v4.16b}, [x5] /* get iv */
125 enc_prepare w3, x2, x6 140 enc_prepare w3, x2, x6
@@ -153,13 +168,25 @@ AES_ENTRY(aes_cbc_encrypt)
153 st1 {v4.16b}, [x5] /* return iv */ 168 st1 {v4.16b}, [x5] /* return iv */
154 ret 169 ret
155AES_ENDPROC(aes_cbc_encrypt) 170AES_ENDPROC(aes_cbc_encrypt)
171AES_ENDPROC(aes_essiv_cbc_encrypt)
156 172
173AES_ENTRY(aes_essiv_cbc_decrypt)
174 stp x29, x30, [sp, #-16]!
175 mov x29, sp
176
177 ld1 {cbciv.16b}, [x5] /* get iv */
178
179 mov w8, #14 /* AES-256: 14 rounds */
180 enc_prepare w8, x6, x7
181 encrypt_block cbciv, w8, x6, x7, w9
182 b .Lessivcbcdecstart
157 183
158AES_ENTRY(aes_cbc_decrypt) 184AES_ENTRY(aes_cbc_decrypt)
159 stp x29, x30, [sp, #-16]! 185 stp x29, x30, [sp, #-16]!
160 mov x29, sp 186 mov x29, sp
161 187
162 ld1 {cbciv.16b}, [x5] /* get iv */ 188 ld1 {cbciv.16b}, [x5] /* get iv */
189.Lessivcbcdecstart:
163 dec_prepare w3, x2, x6 190 dec_prepare w3, x2, x6
164 191
165.LcbcdecloopNx: 192.LcbcdecloopNx:
@@ -212,6 +239,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
212 ldp x29, x30, [sp], #16 239 ldp x29, x30, [sp], #16
213 ret 240 ret
214AES_ENDPROC(aes_cbc_decrypt) 241AES_ENDPROC(aes_cbc_decrypt)
242AES_ENDPROC(aes_essiv_cbc_decrypt)
215 243
216 244
217 /* 245 /*
@@ -265,12 +293,11 @@ AES_ENTRY(aes_cbc_cts_decrypt)
265 ld1 {v5.16b}, [x5] /* get iv */ 293 ld1 {v5.16b}, [x5] /* get iv */
266 dec_prepare w3, x2, x6 294 dec_prepare w3, x2, x6
267 295
268 tbl v2.16b, {v1.16b}, v4.16b
269 decrypt_block v0, w3, x2, x6, w7 296 decrypt_block v0, w3, x2, x6, w7
270 eor v2.16b, v2.16b, v0.16b 297 tbl v2.16b, {v0.16b}, v3.16b
298 eor v2.16b, v2.16b, v1.16b
271 299
272 tbx v0.16b, {v1.16b}, v4.16b 300 tbx v0.16b, {v1.16b}, v4.16b
273 tbl v2.16b, {v2.16b}, v3.16b
274 decrypt_block v0, w3, x2, x6, w7 301 decrypt_block v0, w3, x2, x6, w7
275 eor v0.16b, v0.16b, v5.16b /* xor with iv */ 302 eor v0.16b, v0.16b, v5.16b /* xor with iv */
276 303
@@ -386,10 +413,10 @@ AES_ENDPROC(aes_ctr_encrypt)
386 413
387 414
388 /* 415 /*
416 * aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
417 * int bytes, u8 const rk2[], u8 iv[], int first)
389 * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, 418 * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
390 * int blocks, u8 const rk2[], u8 iv[], int first) 419 * int bytes, u8 const rk2[], u8 iv[], int first)
391 * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
392 * int blocks, u8 const rk2[], u8 iv[], int first)
393 */ 420 */
394 421
395 .macro next_tweak, out, in, tmp 422 .macro next_tweak, out, in, tmp
@@ -415,6 +442,7 @@ AES_ENTRY(aes_xts_encrypt)
415 cbz w7, .Lxtsencnotfirst 442 cbz w7, .Lxtsencnotfirst
416 443
417 enc_prepare w3, x5, x8 444 enc_prepare w3, x5, x8
445 xts_cts_skip_tw w7, .LxtsencNx
418 encrypt_block v4, w3, x5, x8, w7 /* first tweak */ 446 encrypt_block v4, w3, x5, x8, w7 /* first tweak */
419 enc_switch_key w3, x2, x8 447 enc_switch_key w3, x2, x8
420 b .LxtsencNx 448 b .LxtsencNx
@@ -424,7 +452,7 @@ AES_ENTRY(aes_xts_encrypt)
424.LxtsencloopNx: 452.LxtsencloopNx:
425 next_tweak v4, v4, v8 453 next_tweak v4, v4, v8
426.LxtsencNx: 454.LxtsencNx:
427 subs w4, w4, #4 455 subs w4, w4, #64
428 bmi .Lxtsenc1x 456 bmi .Lxtsenc1x
429 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ 457 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
430 next_tweak v5, v4, v8 458 next_tweak v5, v4, v8
@@ -441,39 +469,74 @@ AES_ENTRY(aes_xts_encrypt)
441 eor v2.16b, v2.16b, v6.16b 469 eor v2.16b, v2.16b, v6.16b
442 st1 {v0.16b-v3.16b}, [x0], #64 470 st1 {v0.16b-v3.16b}, [x0], #64
443 mov v4.16b, v7.16b 471 mov v4.16b, v7.16b
444 cbz w4, .Lxtsencout 472 cbz w4, .Lxtsencret
445 xts_reload_mask v8 473 xts_reload_mask v8
446 b .LxtsencloopNx 474 b .LxtsencloopNx
447.Lxtsenc1x: 475.Lxtsenc1x:
448 adds w4, w4, #4 476 adds w4, w4, #64
449 beq .Lxtsencout 477 beq .Lxtsencout
478 subs w4, w4, #16
479 bmi .LxtsencctsNx
450.Lxtsencloop: 480.Lxtsencloop:
451 ld1 {v1.16b}, [x1], #16 481 ld1 {v0.16b}, [x1], #16
452 eor v0.16b, v1.16b, v4.16b 482.Lxtsencctsout:
483 eor v0.16b, v0.16b, v4.16b
453 encrypt_block v0, w3, x2, x8, w7 484 encrypt_block v0, w3, x2, x8, w7
454 eor v0.16b, v0.16b, v4.16b 485 eor v0.16b, v0.16b, v4.16b
455 st1 {v0.16b}, [x0], #16 486 cbz w4, .Lxtsencout
456 subs w4, w4, #1 487 subs w4, w4, #16
457 beq .Lxtsencout
458 next_tweak v4, v4, v8 488 next_tweak v4, v4, v8
489 bmi .Lxtsenccts
490 st1 {v0.16b}, [x0], #16
459 b .Lxtsencloop 491 b .Lxtsencloop
460.Lxtsencout: 492.Lxtsencout:
493 st1 {v0.16b}, [x0]
494.Lxtsencret:
461 st1 {v4.16b}, [x6] 495 st1 {v4.16b}, [x6]
462 ldp x29, x30, [sp], #16 496 ldp x29, x30, [sp], #16
463 ret 497 ret
464AES_ENDPROC(aes_xts_encrypt)
465 498
499.LxtsencctsNx:
500 mov v0.16b, v3.16b
501 sub x0, x0, #16
502.Lxtsenccts:
503 adr_l x8, .Lcts_permute_table
504
505 add x1, x1, w4, sxtw /* rewind input pointer */
506 add w4, w4, #16 /* # bytes in final block */
507 add x9, x8, #32
508 add x8, x8, x4
509 sub x9, x9, x4
510 add x4, x0, x4 /* output address of final block */
511
512 ld1 {v1.16b}, [x1] /* load final block */
513 ld1 {v2.16b}, [x8]
514 ld1 {v3.16b}, [x9]
515
516 tbl v2.16b, {v0.16b}, v2.16b
517 tbx v0.16b, {v1.16b}, v3.16b
518 st1 {v2.16b}, [x4] /* overlapping stores */
519 mov w4, wzr
520 b .Lxtsencctsout
521AES_ENDPROC(aes_xts_encrypt)
466 522
467AES_ENTRY(aes_xts_decrypt) 523AES_ENTRY(aes_xts_decrypt)
468 stp x29, x30, [sp, #-16]! 524 stp x29, x30, [sp, #-16]!
469 mov x29, sp 525 mov x29, sp
470 526
527 /* subtract 16 bytes if we are doing CTS */
528 sub w8, w4, #0x10
529 tst w4, #0xf
530 csel w4, w4, w8, eq
531
471 ld1 {v4.16b}, [x6] 532 ld1 {v4.16b}, [x6]
472 xts_load_mask v8 533 xts_load_mask v8
534 xts_cts_skip_tw w7, .Lxtsdecskiptw
473 cbz w7, .Lxtsdecnotfirst 535 cbz w7, .Lxtsdecnotfirst
474 536
475 enc_prepare w3, x5, x8 537 enc_prepare w3, x5, x8
476 encrypt_block v4, w3, x5, x8, w7 /* first tweak */ 538 encrypt_block v4, w3, x5, x8, w7 /* first tweak */
539.Lxtsdecskiptw:
477 dec_prepare w3, x2, x8 540 dec_prepare w3, x2, x8
478 b .LxtsdecNx 541 b .LxtsdecNx
479 542
@@ -482,7 +545,7 @@ AES_ENTRY(aes_xts_decrypt)
482.LxtsdecloopNx: 545.LxtsdecloopNx:
483 next_tweak v4, v4, v8 546 next_tweak v4, v4, v8
484.LxtsdecNx: 547.LxtsdecNx:
485 subs w4, w4, #4 548 subs w4, w4, #64
486 bmi .Lxtsdec1x 549 bmi .Lxtsdec1x
487 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ 550 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
488 next_tweak v5, v4, v8 551 next_tweak v5, v4, v8
@@ -503,22 +566,52 @@ AES_ENTRY(aes_xts_decrypt)
503 xts_reload_mask v8 566 xts_reload_mask v8
504 b .LxtsdecloopNx 567 b .LxtsdecloopNx
505.Lxtsdec1x: 568.Lxtsdec1x:
506 adds w4, w4, #4 569 adds w4, w4, #64
507 beq .Lxtsdecout 570 beq .Lxtsdecout
571 subs w4, w4, #16
508.Lxtsdecloop: 572.Lxtsdecloop:
509 ld1 {v1.16b}, [x1], #16 573 ld1 {v0.16b}, [x1], #16
510 eor v0.16b, v1.16b, v4.16b 574 bmi .Lxtsdeccts
575.Lxtsdecctsout:
576 eor v0.16b, v0.16b, v4.16b
511 decrypt_block v0, w3, x2, x8, w7 577 decrypt_block v0, w3, x2, x8, w7
512 eor v0.16b, v0.16b, v4.16b 578 eor v0.16b, v0.16b, v4.16b
513 st1 {v0.16b}, [x0], #16 579 st1 {v0.16b}, [x0], #16
514 subs w4, w4, #1 580 cbz w4, .Lxtsdecout
515 beq .Lxtsdecout 581 subs w4, w4, #16
516 next_tweak v4, v4, v8 582 next_tweak v4, v4, v8
517 b .Lxtsdecloop 583 b .Lxtsdecloop
518.Lxtsdecout: 584.Lxtsdecout:
519 st1 {v4.16b}, [x6] 585 st1 {v4.16b}, [x6]
520 ldp x29, x30, [sp], #16 586 ldp x29, x30, [sp], #16
521 ret 587 ret
588
589.Lxtsdeccts:
590 adr_l x8, .Lcts_permute_table
591
592 add x1, x1, w4, sxtw /* rewind input pointer */
593 add w4, w4, #16 /* # bytes in final block */
594 add x9, x8, #32
595 add x8, x8, x4
596 sub x9, x9, x4
597 add x4, x0, x4 /* output address of final block */
598
599 next_tweak v5, v4, v8
600
601 ld1 {v1.16b}, [x1] /* load final block */
602 ld1 {v2.16b}, [x8]
603 ld1 {v3.16b}, [x9]
604
605 eor v0.16b, v0.16b, v5.16b
606 decrypt_block v0, w3, x2, x8, w7
607 eor v0.16b, v0.16b, v5.16b
608
609 tbl v2.16b, {v0.16b}, v2.16b
610 tbx v0.16b, {v1.16b}, v3.16b
611
612 st1 {v2.16b}, [x4] /* overlapping stores */
613 mov w4, wzr
614 b .Lxtsdecctsout
522AES_ENDPROC(aes_xts_decrypt) 615AES_ENDPROC(aes_xts_decrypt)
523 616
524 /* 617 /*
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index 2bebccc73869..22d9b110cf78 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -19,6 +19,11 @@
19 xts_load_mask \tmp 19 xts_load_mask \tmp
20 .endm 20 .endm
21 21
22 /* special case for the neon-bs driver calling into this one for CTS */
23 .macro xts_cts_skip_tw, reg, lbl
24 tbnz \reg, #1, \lbl
25 .endm
26
22 /* multiply by polynomial 'x' in GF(2^8) */ 27 /* multiply by polynomial 'x' in GF(2^8) */
23 .macro mul_by_x, out, in, temp, const 28 .macro mul_by_x, out, in, temp, const
24 sshr \temp, \in, #7 29 sshr \temp, \in, #7
@@ -49,7 +54,7 @@
49 54
50 /* do preload for encryption */ 55 /* do preload for encryption */
51 .macro enc_prepare, ignore0, ignore1, temp 56 .macro enc_prepare, ignore0, ignore1, temp
52 prepare .LForward_Sbox, .LForward_ShiftRows, \temp 57 prepare crypto_aes_sbox, .LForward_ShiftRows, \temp
53 .endm 58 .endm
54 59
55 .macro enc_switch_key, ignore0, ignore1, temp 60 .macro enc_switch_key, ignore0, ignore1, temp
@@ -58,7 +63,7 @@
58 63
59 /* do preload for decryption */ 64 /* do preload for decryption */
60 .macro dec_prepare, ignore0, ignore1, temp 65 .macro dec_prepare, ignore0, ignore1, temp
61 prepare .LReverse_Sbox, .LReverse_ShiftRows, \temp 66 prepare crypto_aes_inv_sbox, .LReverse_ShiftRows, \temp
62 .endm 67 .endm
63 68
64 /* apply SubBytes transformation using the the preloaded Sbox */ 69 /* apply SubBytes transformation using the the preloaded Sbox */
@@ -234,75 +239,7 @@
234#include "aes-modes.S" 239#include "aes-modes.S"
235 240
236 .section ".rodata", "a" 241 .section ".rodata", "a"
237 .align 6 242 .align 4
238.LForward_Sbox:
239 .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
240 .byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
241 .byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
242 .byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
243 .byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
244 .byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
245 .byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
246 .byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
247 .byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
248 .byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
249 .byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
250 .byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
251 .byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
252 .byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
253 .byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
254 .byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
255 .byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
256 .byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
257 .byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
258 .byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
259 .byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
260 .byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
261 .byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
262 .byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
263 .byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
264 .byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
265 .byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
266 .byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
267 .byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
268 .byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
269 .byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
270 .byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
271
272.LReverse_Sbox:
273 .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
274 .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
275 .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
276 .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
277 .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
278 .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
279 .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
280 .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
281 .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
282 .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
283 .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
284 .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
285 .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
286 .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
287 .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
288 .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
289 .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
290 .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
291 .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
292 .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
293 .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
294 .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
295 .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
296 .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
297 .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
298 .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
299 .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
300 .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
301 .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
302 .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
303 .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
304 .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
305
306.LForward_ShiftRows: 243.LForward_ShiftRows:
307 .octa 0x0b06010c07020d08030e09040f0a0500 244 .octa 0x0b06010c07020d08030e09040f0a0500
308 245
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index cf10ff8878a3..65982039fa36 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -730,11 +730,6 @@ ENDPROC(aesbs_cbc_decrypt)
730 eor \out\().16b, \out\().16b, \tmp\().16b 730 eor \out\().16b, \out\().16b, \tmp\().16b
731 .endm 731 .endm
732 732
733 .align 4
734.Lxts_mul_x:
735CPU_LE( .quad 1, 0x87 )
736CPU_BE( .quad 0x87, 1 )
737
738 /* 733 /*
739 * aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 734 * aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
740 * int blocks, u8 iv[]) 735 * int blocks, u8 iv[])
@@ -806,7 +801,9 @@ ENDPROC(__xts_crypt8)
806 mov x23, x4 801 mov x23, x4
807 mov x24, x5 802 mov x24, x5
808 803
8090: ldr q30, .Lxts_mul_x 8040: movi v30.2s, #0x1
805 movi v25.2s, #0x87
806 uzp1 v30.4s, v30.4s, v25.4s
810 ld1 {v25.16b}, [x24] 807 ld1 {v25.16b}, [x24]
811 808
81299: adr x7, \do8 80999: adr x7, \do8
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index 281d23087697..ea873b8904c4 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -8,13 +8,13 @@
8#include <asm/neon.h> 8#include <asm/neon.h>
9#include <asm/simd.h> 9#include <asm/simd.h>
10#include <crypto/aes.h> 10#include <crypto/aes.h>
11#include <crypto/ctr.h>
11#include <crypto/internal/simd.h> 12#include <crypto/internal/simd.h>
12#include <crypto/internal/skcipher.h> 13#include <crypto/internal/skcipher.h>
14#include <crypto/scatterwalk.h>
13#include <crypto/xts.h> 15#include <crypto/xts.h>
14#include <linux/module.h> 16#include <linux/module.h>
15 17
16#include "aes-ctr-fallback.h"
17
18MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 18MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
19MODULE_LICENSE("GPL v2"); 19MODULE_LICENSE("GPL v2");
20 20
@@ -46,6 +46,12 @@ asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
46 int rounds, int blocks); 46 int rounds, int blocks);
47asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], 47asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
48 int rounds, int blocks, u8 iv[]); 48 int rounds, int blocks, u8 iv[]);
49asmlinkage void neon_aes_xts_encrypt(u8 out[], u8 const in[],
50 u32 const rk1[], int rounds, int bytes,
51 u32 const rk2[], u8 iv[], int first);
52asmlinkage void neon_aes_xts_decrypt(u8 out[], u8 const in[],
53 u32 const rk1[], int rounds, int bytes,
54 u32 const rk2[], u8 iv[], int first);
49 55
50struct aesbs_ctx { 56struct aesbs_ctx {
51 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32]; 57 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32];
@@ -65,6 +71,7 @@ struct aesbs_ctr_ctx {
65struct aesbs_xts_ctx { 71struct aesbs_xts_ctx {
66 struct aesbs_ctx key; 72 struct aesbs_ctx key;
67 u32 twkey[AES_MAX_KEYLENGTH_U32]; 73 u32 twkey[AES_MAX_KEYLENGTH_U32];
74 struct crypto_aes_ctx cts;
68}; 75};
69 76
70static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 77static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
@@ -74,7 +81,7 @@ static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
74 struct crypto_aes_ctx rk; 81 struct crypto_aes_ctx rk;
75 int err; 82 int err;
76 83
77 err = crypto_aes_expand_key(&rk, in_key, key_len); 84 err = aes_expandkey(&rk, in_key, key_len);
78 if (err) 85 if (err)
79 return err; 86 return err;
80 87
@@ -133,7 +140,7 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
133 struct crypto_aes_ctx rk; 140 struct crypto_aes_ctx rk;
134 int err; 141 int err;
135 142
136 err = crypto_aes_expand_key(&rk, in_key, key_len); 143 err = aes_expandkey(&rk, in_key, key_len);
137 if (err) 144 if (err)
138 return err; 145 return err;
139 146
@@ -205,7 +212,7 @@ static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
205 struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); 212 struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
206 int err; 213 int err;
207 214
208 err = crypto_aes_expand_key(&ctx->fallback, in_key, key_len); 215 err = aes_expandkey(&ctx->fallback, in_key, key_len);
209 if (err) 216 if (err)
210 return err; 217 return err;
211 218
@@ -271,7 +278,11 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
271 return err; 278 return err;
272 279
273 key_len /= 2; 280 key_len /= 2;
274 err = crypto_aes_expand_key(&rk, in_key + key_len, key_len); 281 err = aes_expandkey(&ctx->cts, in_key, key_len);
282 if (err)
283 return err;
284
285 err = aes_expandkey(&rk, in_key + key_len, key_len);
275 if (err) 286 if (err)
276 return err; 287 return err;
277 288
@@ -280,59 +291,142 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
280 return aesbs_setkey(tfm, in_key, key_len); 291 return aesbs_setkey(tfm, in_key, key_len);
281} 292}
282 293
283static int ctr_encrypt_sync(struct skcipher_request *req) 294static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
284{ 295{
285 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
286 struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); 296 struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
297 unsigned long flags;
298
299 /*
300 * Temporarily disable interrupts to avoid races where
301 * cachelines are evicted when the CPU is interrupted
302 * to do something else.
303 */
304 local_irq_save(flags);
305 aes_encrypt(&ctx->fallback, dst, src);
306 local_irq_restore(flags);
307}
287 308
309static int ctr_encrypt_sync(struct skcipher_request *req)
310{
288 if (!crypto_simd_usable()) 311 if (!crypto_simd_usable())
289 return aes_ctr_encrypt_fallback(&ctx->fallback, req); 312 return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
290 313
291 return ctr_encrypt(req); 314 return ctr_encrypt(req);
292} 315}
293 316
294static int __xts_crypt(struct skcipher_request *req, 317static int __xts_crypt(struct skcipher_request *req, bool encrypt,
295 void (*fn)(u8 out[], u8 const in[], u8 const rk[], 318 void (*fn)(u8 out[], u8 const in[], u8 const rk[],
296 int rounds, int blocks, u8 iv[])) 319 int rounds, int blocks, u8 iv[]))
297{ 320{
298 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 321 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
299 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 322 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
323 int tail = req->cryptlen % (8 * AES_BLOCK_SIZE);
324 struct scatterlist sg_src[2], sg_dst[2];
325 struct skcipher_request subreq;
326 struct scatterlist *src, *dst;
300 struct skcipher_walk walk; 327 struct skcipher_walk walk;
301 int err; 328 int nbytes, err;
329 int first = 1;
330 u8 *out, *in;
331
332 if (req->cryptlen < AES_BLOCK_SIZE)
333 return -EINVAL;
334
335 /* ensure that the cts tail is covered by a single step */
336 if (unlikely(tail > 0 && tail < AES_BLOCK_SIZE)) {
337 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
338 AES_BLOCK_SIZE) - 2;
339
340 skcipher_request_set_tfm(&subreq, tfm);
341 skcipher_request_set_callback(&subreq,
342 skcipher_request_flags(req),
343 NULL, NULL);
344 skcipher_request_set_crypt(&subreq, req->src, req->dst,
345 xts_blocks * AES_BLOCK_SIZE,
346 req->iv);
347 req = &subreq;
348 } else {
349 tail = 0;
350 }
302 351
303 err = skcipher_walk_virt(&walk, req, false); 352 err = skcipher_walk_virt(&walk, req, false);
304 if (err) 353 if (err)
305 return err; 354 return err;
306 355
307 kernel_neon_begin();
308 neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
309 kernel_neon_end();
310
311 while (walk.nbytes >= AES_BLOCK_SIZE) { 356 while (walk.nbytes >= AES_BLOCK_SIZE) {
312 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 357 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
313 358
314 if (walk.nbytes < walk.total) 359 if (walk.nbytes < walk.total || walk.nbytes % AES_BLOCK_SIZE)
315 blocks = round_down(blocks, 360 blocks = round_down(blocks,
316 walk.stride / AES_BLOCK_SIZE); 361 walk.stride / AES_BLOCK_SIZE);
317 362
363 out = walk.dst.virt.addr;
364 in = walk.src.virt.addr;
365 nbytes = walk.nbytes;
366
318 kernel_neon_begin(); 367 kernel_neon_begin();
319 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, 368 if (likely(blocks > 6)) { /* plain NEON is faster otherwise */
320 ctx->key.rounds, blocks, walk.iv); 369 if (first)
370 neon_aes_ecb_encrypt(walk.iv, walk.iv,
371 ctx->twkey,
372 ctx->key.rounds, 1);
373 first = 0;
374
375 fn(out, in, ctx->key.rk, ctx->key.rounds, blocks,
376 walk.iv);
377
378 out += blocks * AES_BLOCK_SIZE;
379 in += blocks * AES_BLOCK_SIZE;
380 nbytes -= blocks * AES_BLOCK_SIZE;
381 }
382
383 if (walk.nbytes == walk.total && nbytes > 0)
384 goto xts_tail;
385
321 kernel_neon_end(); 386 kernel_neon_end();
322 err = skcipher_walk_done(&walk, 387 skcipher_walk_done(&walk, nbytes);
323 walk.nbytes - blocks * AES_BLOCK_SIZE);
324 } 388 }
325 return err; 389
390 if (err || likely(!tail))
391 return err;
392
393 /* handle ciphertext stealing */
394 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
395 if (req->dst != req->src)
396 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
397
398 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
399 req->iv);
400
401 err = skcipher_walk_virt(&walk, req, false);
402 if (err)
403 return err;
404
405 out = walk.dst.virt.addr;
406 in = walk.src.virt.addr;
407 nbytes = walk.nbytes;
408
409 kernel_neon_begin();
410xts_tail:
411 if (encrypt)
412 neon_aes_xts_encrypt(out, in, ctx->cts.key_enc, ctx->key.rounds,
413 nbytes, ctx->twkey, walk.iv, first ?: 2);
414 else
415 neon_aes_xts_decrypt(out, in, ctx->cts.key_dec, ctx->key.rounds,
416 nbytes, ctx->twkey, walk.iv, first ?: 2);
417 kernel_neon_end();
418
419 return skcipher_walk_done(&walk, 0);
326} 420}
327 421
328static int xts_encrypt(struct skcipher_request *req) 422static int xts_encrypt(struct skcipher_request *req)
329{ 423{
330 return __xts_crypt(req, aesbs_xts_encrypt); 424 return __xts_crypt(req, true, aesbs_xts_encrypt);
331} 425}
332 426
333static int xts_decrypt(struct skcipher_request *req) 427static int xts_decrypt(struct skcipher_request *req)
334{ 428{
335 return __xts_crypt(req, aesbs_xts_decrypt); 429 return __xts_crypt(req, false, aesbs_xts_decrypt);
336} 430}
337 431
338static struct skcipher_alg aes_algs[] = { { 432static struct skcipher_alg aes_algs[] = { {
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 16c5da9be9fb..70b1469783f9 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -70,8 +70,6 @@ asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
70asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[], 70asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
71 u32 const rk[], int rounds); 71 u32 const rk[], int rounds);
72 72
73asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
74
75static int ghash_init(struct shash_desc *desc) 73static int ghash_init(struct shash_desc *desc)
76{ 74{
77 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); 75 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
@@ -309,14 +307,13 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
309 u8 key[GHASH_BLOCK_SIZE]; 307 u8 key[GHASH_BLOCK_SIZE];
310 int ret; 308 int ret;
311 309
312 ret = crypto_aes_expand_key(&ctx->aes_key, inkey, keylen); 310 ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
313 if (ret) { 311 if (ret) {
314 tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 312 tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
315 return -EINVAL; 313 return -EINVAL;
316 } 314 }
317 315
318 __aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){}, 316 aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
319 num_rounds(&ctx->aes_key));
320 317
321 return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128)); 318 return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
322} 319}
@@ -467,7 +464,7 @@ static int gcm_encrypt(struct aead_request *req)
467 rk = ctx->aes_key.key_enc; 464 rk = ctx->aes_key.key_enc;
468 } while (walk.nbytes >= 2 * AES_BLOCK_SIZE); 465 } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
469 } else { 466 } else {
470 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); 467 aes_encrypt(&ctx->aes_key, tag, iv);
471 put_unaligned_be32(2, iv + GCM_IV_SIZE); 468 put_unaligned_be32(2, iv + GCM_IV_SIZE);
472 469
473 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { 470 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
@@ -478,8 +475,7 @@ static int gcm_encrypt(struct aead_request *req)
478 int remaining = blocks; 475 int remaining = blocks;
479 476
480 do { 477 do {
481 __aes_arm64_encrypt(ctx->aes_key.key_enc, 478 aes_encrypt(&ctx->aes_key, ks, iv);
482 ks, iv, nrounds);
483 crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE); 479 crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
484 crypto_inc(iv, AES_BLOCK_SIZE); 480 crypto_inc(iv, AES_BLOCK_SIZE);
485 481
@@ -495,13 +491,10 @@ static int gcm_encrypt(struct aead_request *req)
495 walk.nbytes % (2 * AES_BLOCK_SIZE)); 491 walk.nbytes % (2 * AES_BLOCK_SIZE));
496 } 492 }
497 if (walk.nbytes) { 493 if (walk.nbytes) {
498 __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, 494 aes_encrypt(&ctx->aes_key, ks, iv);
499 nrounds);
500 if (walk.nbytes > AES_BLOCK_SIZE) { 495 if (walk.nbytes > AES_BLOCK_SIZE) {
501 crypto_inc(iv, AES_BLOCK_SIZE); 496 crypto_inc(iv, AES_BLOCK_SIZE);
502 __aes_arm64_encrypt(ctx->aes_key.key_enc, 497 aes_encrypt(&ctx->aes_key, ks + AES_BLOCK_SIZE, iv);
503 ks + AES_BLOCK_SIZE, iv,
504 nrounds);
505 } 498 }
506 } 499 }
507 } 500 }
@@ -605,7 +598,7 @@ static int gcm_decrypt(struct aead_request *req)
605 rk = ctx->aes_key.key_enc; 598 rk = ctx->aes_key.key_enc;
606 } while (walk.nbytes >= 2 * AES_BLOCK_SIZE); 599 } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
607 } else { 600 } else {
608 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); 601 aes_encrypt(&ctx->aes_key, tag, iv);
609 put_unaligned_be32(2, iv + GCM_IV_SIZE); 602 put_unaligned_be32(2, iv + GCM_IV_SIZE);
610 603
611 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { 604 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
@@ -618,8 +611,7 @@ static int gcm_decrypt(struct aead_request *req)
618 pmull_ghash_update_p64); 611 pmull_ghash_update_p64);
619 612
620 do { 613 do {
621 __aes_arm64_encrypt(ctx->aes_key.key_enc, 614 aes_encrypt(&ctx->aes_key, buf, iv);
622 buf, iv, nrounds);
623 crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE); 615 crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
624 crypto_inc(iv, AES_BLOCK_SIZE); 616 crypto_inc(iv, AES_BLOCK_SIZE);
625 617
@@ -637,11 +629,9 @@ static int gcm_decrypt(struct aead_request *req)
637 memcpy(iv2, iv, AES_BLOCK_SIZE); 629 memcpy(iv2, iv, AES_BLOCK_SIZE);
638 crypto_inc(iv2, AES_BLOCK_SIZE); 630 crypto_inc(iv2, AES_BLOCK_SIZE);
639 631
640 __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2, 632 aes_encrypt(&ctx->aes_key, iv2, iv2);
641 iv2, nrounds);
642 } 633 }
643 __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, 634 aes_encrypt(&ctx->aes_key, iv, iv);
644 nrounds);
645 } 635 }
646 } 636 }
647 637
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index 04b9d17b0733..e273faca924f 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -30,15 +30,15 @@ EXPORT_SYMBOL(sha256_block_data_order);
30asmlinkage void sha256_block_neon(u32 *digest, const void *data, 30asmlinkage void sha256_block_neon(u32 *digest, const void *data,
31 unsigned int num_blks); 31 unsigned int num_blks);
32 32
33static int sha256_update(struct shash_desc *desc, const u8 *data, 33static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
34 unsigned int len) 34 unsigned int len)
35{ 35{
36 return sha256_base_do_update(desc, data, len, 36 return sha256_base_do_update(desc, data, len,
37 (sha256_block_fn *)sha256_block_data_order); 37 (sha256_block_fn *)sha256_block_data_order);
38} 38}
39 39
40static int sha256_finup(struct shash_desc *desc, const u8 *data, 40static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
41 unsigned int len, u8 *out) 41 unsigned int len, u8 *out)
42{ 42{
43 if (len) 43 if (len)
44 sha256_base_do_update(desc, data, len, 44 sha256_base_do_update(desc, data, len,
@@ -49,17 +49,17 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data,
49 return sha256_base_finish(desc, out); 49 return sha256_base_finish(desc, out);
50} 50}
51 51
52static int sha256_final(struct shash_desc *desc, u8 *out) 52static int crypto_sha256_arm64_final(struct shash_desc *desc, u8 *out)
53{ 53{
54 return sha256_finup(desc, NULL, 0, out); 54 return crypto_sha256_arm64_finup(desc, NULL, 0, out);
55} 55}
56 56
57static struct shash_alg algs[] = { { 57static struct shash_alg algs[] = { {
58 .digestsize = SHA256_DIGEST_SIZE, 58 .digestsize = SHA256_DIGEST_SIZE,
59 .init = sha256_base_init, 59 .init = sha256_base_init,
60 .update = sha256_update, 60 .update = crypto_sha256_arm64_update,
61 .final = sha256_final, 61 .final = crypto_sha256_arm64_final,
62 .finup = sha256_finup, 62 .finup = crypto_sha256_arm64_finup,
63 .descsize = sizeof(struct sha256_state), 63 .descsize = sizeof(struct sha256_state),
64 .base.cra_name = "sha256", 64 .base.cra_name = "sha256",
65 .base.cra_driver_name = "sha256-arm64", 65 .base.cra_driver_name = "sha256-arm64",
@@ -69,9 +69,9 @@ static struct shash_alg algs[] = { {
69}, { 69}, {
70 .digestsize = SHA224_DIGEST_SIZE, 70 .digestsize = SHA224_DIGEST_SIZE,
71 .init = sha224_base_init, 71 .init = sha224_base_init,
72 .update = sha256_update, 72 .update = crypto_sha256_arm64_update,
73 .final = sha256_final, 73 .final = crypto_sha256_arm64_final,
74 .finup = sha256_finup, 74 .finup = crypto_sha256_arm64_finup,
75 .descsize = sizeof(struct sha256_state), 75 .descsize = sizeof(struct sha256_state),
76 .base.cra_name = "sha224", 76 .base.cra_name = "sha224",
77 .base.cra_driver_name = "sha224-arm64", 77 .base.cra_driver_name = "sha224-arm64",
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 9a1d2fc6ceb7..64870c7be4a3 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -11,4 +11,3 @@ generic-y += mcs_spinlock.h
11generic-y += preempt.h 11generic-y += preempt.h
12generic-y += vtime.h 12generic-y += vtime.h
13generic-y += msi.h 13generic-y += msi.h
14generic-y += simd.h
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 6d2dbb5089d5..9803e96d2924 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -108,7 +108,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
108 return 0; 108 return 0;
109} 109}
110 110
111static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 111static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
112{ 112{
113 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 113 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
114 114
@@ -119,7 +119,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
119 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE); 119 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
120} 120}
121 121
122static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 122static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
123{ 123{
124 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 124 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
125 125
@@ -172,8 +172,8 @@ static struct crypto_alg aes_alg = {
172 .cia_min_keysize = AES_MIN_KEY_SIZE, 172 .cia_min_keysize = AES_MIN_KEY_SIZE,
173 .cia_max_keysize = AES_MAX_KEY_SIZE, 173 .cia_max_keysize = AES_MAX_KEY_SIZE,
174 .cia_setkey = aes_set_key, 174 .cia_setkey = aes_set_key,
175 .cia_encrypt = aes_encrypt, 175 .cia_encrypt = crypto_aes_encrypt,
176 .cia_decrypt = aes_decrypt, 176 .cia_decrypt = crypto_aes_decrypt,
177 } 177 }
178 } 178 }
179}; 179};
@@ -512,7 +512,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
512 unsigned long fc; 512 unsigned long fc;
513 int err; 513 int err;
514 514
515 err = xts_check_key(tfm, in_key, key_len); 515 err = xts_fallback_setkey(tfm, in_key, key_len);
516 if (err) 516 if (err)
517 return err; 517 return err;
518 518
@@ -529,7 +529,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
529 /* Check if the function code is available */ 529 /* Check if the function code is available */
530 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 530 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
531 if (!xts_ctx->fc) 531 if (!xts_ctx->fc)
532 return xts_fallback_setkey(tfm, in_key, key_len); 532 return 0;
533 533
534 /* Split the XTS key into the two subkeys */ 534 /* Split the XTS key into the two subkeys */
535 key_len = key_len / 2; 535 key_len = key_len / 2;
@@ -589,7 +589,7 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
589 if (!nbytes) 589 if (!nbytes)
590 return -EINVAL; 590 return -EINVAL;
591 591
592 if (unlikely(!xts_ctx->fc)) 592 if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0))
593 return xts_fallback_encrypt(desc, dst, src, nbytes); 593 return xts_fallback_encrypt(desc, dst, src, nbytes);
594 594
595 blkcipher_walk_init(&walk, dst, src, nbytes); 595 blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -606,7 +606,7 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
606 if (!nbytes) 606 if (!nbytes)
607 return -EINVAL; 607 return -EINVAL;
608 608
609 if (unlikely(!xts_ctx->fc)) 609 if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0))
610 return xts_fallback_decrypt(desc, dst, src, nbytes); 610 return xts_fallback_decrypt(desc, dst, src, nbytes);
611 611
612 blkcipher_walk_init(&walk, dst, src, nbytes); 612 blkcipher_walk_init(&walk, dst, src, nbytes);
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 374b42fc7637..439b100c6f2e 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -16,7 +16,7 @@
16#include <linux/fips.h> 16#include <linux/fips.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <crypto/algapi.h> 18#include <crypto/algapi.h>
19#include <crypto/des.h> 19#include <crypto/internal/des.h>
20#include <asm/cpacf.h> 20#include <asm/cpacf.h>
21 21
22#define DES3_KEY_SIZE (3 * DES_KEY_SIZE) 22#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
@@ -35,27 +35,24 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
35 unsigned int key_len) 35 unsigned int key_len)
36{ 36{
37 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); 37 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
38 u32 tmp[DES_EXPKEY_WORDS]; 38 int err;
39 39
40 /* check for weak keys */ 40 err = crypto_des_verify_key(tfm, key);
41 if (!des_ekey(tmp, key) && 41 if (err)
42 (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 42 return err;
43 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
44 return -EINVAL;
45 }
46 43
47 memcpy(ctx->key, key, key_len); 44 memcpy(ctx->key, key, key_len);
48 return 0; 45 return 0;
49} 46}
50 47
51static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 48static void s390_des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
52{ 49{
53 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); 50 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
54 51
55 cpacf_km(CPACF_KM_DEA, ctx->key, out, in, DES_BLOCK_SIZE); 52 cpacf_km(CPACF_KM_DEA, ctx->key, out, in, DES_BLOCK_SIZE);
56} 53}
57 54
58static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 55static void s390_des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
59{ 56{
60 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); 57 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
61 58
@@ -76,8 +73,8 @@ static struct crypto_alg des_alg = {
76 .cia_min_keysize = DES_KEY_SIZE, 73 .cia_min_keysize = DES_KEY_SIZE,
77 .cia_max_keysize = DES_KEY_SIZE, 74 .cia_max_keysize = DES_KEY_SIZE,
78 .cia_setkey = des_setkey, 75 .cia_setkey = des_setkey,
79 .cia_encrypt = des_encrypt, 76 .cia_encrypt = s390_des_encrypt,
80 .cia_decrypt = des_decrypt, 77 .cia_decrypt = s390_des_decrypt,
81 } 78 }
82 } 79 }
83}; 80};
@@ -227,8 +224,8 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
227 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); 224 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
228 int err; 225 int err;
229 226
230 err = __des3_verify_key(&tfm->crt_flags, key); 227 err = crypto_des3_ede_verify_key(tfm, key);
231 if (unlikely(err)) 228 if (err)
232 return err; 229 return err;
233 230
234 memcpy(ctx->key, key, key_len); 231 memcpy(ctx->key, key, key_len);
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index eeeb6a7737a4..a3e7400e031c 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -153,4 +153,4 @@ module_exit(ghash_mod_exit);
153MODULE_ALIAS_CRYPTO("ghash"); 153MODULE_ALIAS_CRYPTO("ghash");
154 154
155MODULE_LICENSE("GPL"); 155MODULE_LICENSE("GPL");
156MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation"); 156MODULE_DESCRIPTION("GHASH hash function, s390 implementation");
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index af7505148f80..b52c87e44939 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -17,7 +17,7 @@
17 17
18#include "sha.h" 18#include "sha.h"
19 19
20static int sha256_init(struct shash_desc *desc) 20static int s390_sha256_init(struct shash_desc *desc)
21{ 21{
22 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); 22 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
23 23
@@ -60,7 +60,7 @@ static int sha256_import(struct shash_desc *desc, const void *in)
60 60
61static struct shash_alg sha256_alg = { 61static struct shash_alg sha256_alg = {
62 .digestsize = SHA256_DIGEST_SIZE, 62 .digestsize = SHA256_DIGEST_SIZE,
63 .init = sha256_init, 63 .init = s390_sha256_init,
64 .update = s390_sha_update, 64 .update = s390_sha_update,
65 .final = s390_sha_final, 65 .final = s390_sha_final,
66 .export = sha256_export, 66 .export = sha256_export,
@@ -76,7 +76,7 @@ static struct shash_alg sha256_alg = {
76 } 76 }
77}; 77};
78 78
79static int sha224_init(struct shash_desc *desc) 79static int s390_sha224_init(struct shash_desc *desc)
80{ 80{
81 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); 81 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
82 82
@@ -96,7 +96,7 @@ static int sha224_init(struct shash_desc *desc)
96 96
97static struct shash_alg sha224_alg = { 97static struct shash_alg sha224_alg = {
98 .digestsize = SHA224_DIGEST_SIZE, 98 .digestsize = SHA224_DIGEST_SIZE,
99 .init = sha224_init, 99 .init = s390_sha224_init,
100 .update = s390_sha_update, 100 .update = s390_sha_update,
101 .final = s390_sha_final, 101 .final = s390_sha_final,
102 .export = sha256_export, 102 .export = sha256_export,
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index dc1ae4ff79d7..bc0d7a0d0394 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -7,9 +7,11 @@ purgatory-y := head.o purgatory.o string.o sha256.o mem.o
7targets += $(purgatory-y) purgatory.lds purgatory purgatory.ro 7targets += $(purgatory-y) purgatory.lds purgatory purgatory.ro
8PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) 8PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
9 9
10$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE 10$(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
11 $(call if_changed_rule,cc_o_c) 11 $(call if_changed_rule,cc_o_c)
12 12
13CFLAGS_sha256.o := -D__DISABLE_EXPORTS
14
13$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE 15$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
14 $(call if_changed_rule,as_o_S) 16 $(call if_changed_rule,as_o_S)
15 17
diff --git a/arch/s390/purgatory/purgatory.c b/arch/s390/purgatory/purgatory.c
index 3528e6da4e87..0a423bcf6746 100644
--- a/arch/s390/purgatory/purgatory.c
+++ b/arch/s390/purgatory/purgatory.c
@@ -8,8 +8,8 @@
8 */ 8 */
9 9
10#include <linux/kexec.h> 10#include <linux/kexec.h>
11#include <linux/sha256.h>
12#include <linux/string.h> 11#include <linux/string.h>
12#include <crypto/sha.h>
13#include <asm/purgatory.h> 13#include <asm/purgatory.h>
14 14
15int verify_sha256_digest(void) 15int verify_sha256_digest(void)
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 57b474113168..7b946b3dee9d 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -197,14 +197,14 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
197 return 0; 197 return 0;
198} 198}
199 199
200static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 200static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
201{ 201{
202 struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm); 202 struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
203 203
204 ctx->ops->encrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst); 204 ctx->ops->encrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst);
205} 205}
206 206
207static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 207static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
208{ 208{
209 struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm); 209 struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
210 210
@@ -396,8 +396,8 @@ static struct crypto_alg algs[] = { {
396 .cia_min_keysize = AES_MIN_KEY_SIZE, 396 .cia_min_keysize = AES_MIN_KEY_SIZE,
397 .cia_max_keysize = AES_MAX_KEY_SIZE, 397 .cia_max_keysize = AES_MAX_KEY_SIZE,
398 .cia_setkey = aes_set_key, 398 .cia_setkey = aes_set_key,
399 .cia_encrypt = aes_encrypt, 399 .cia_encrypt = crypto_aes_encrypt,
400 .cia_decrypt = aes_decrypt 400 .cia_decrypt = crypto_aes_decrypt
401 } 401 }
402 } 402 }
403}, { 403}, {
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 281448f72c90..db6010b4e52e 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -12,7 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <crypto/algapi.h> 14#include <crypto/algapi.h>
15#include <crypto/des.h> 15#include <crypto/internal/des.h>
16 16
17#include <asm/fpumacro.h> 17#include <asm/fpumacro.h>
18#include <asm/pstate.h> 18#include <asm/pstate.h>
@@ -45,19 +45,15 @@ static int des_set_key(struct crypto_tfm *tfm, const u8 *key,
45 unsigned int keylen) 45 unsigned int keylen)
46{ 46{
47 struct des_sparc64_ctx *dctx = crypto_tfm_ctx(tfm); 47 struct des_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
48 u32 *flags = &tfm->crt_flags; 48 int err;
49 u32 tmp[DES_EXPKEY_WORDS];
50 int ret;
51 49
52 /* Even though we have special instructions for key expansion, 50 /* Even though we have special instructions for key expansion,
53 * we call des_ekey() so that we don't have to write our own 51 * we call des_verify_key() so that we don't have to write our own
54 * weak key detection code. 52 * weak key detection code.
55 */ 53 */
56 ret = des_ekey(tmp, key); 54 err = crypto_des_verify_key(tfm, key);
57 if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 55 if (err)
58 *flags |= CRYPTO_TFM_RES_WEAK_KEY; 56 return err;
59 return -EINVAL;
60 }
61 57
62 des_sparc64_key_expand((const u32 *) key, &dctx->encrypt_expkey[0]); 58 des_sparc64_key_expand((const u32 *) key, &dctx->encrypt_expkey[0]);
63 encrypt_to_decrypt(&dctx->decrypt_expkey[0], &dctx->encrypt_expkey[0]); 59 encrypt_to_decrypt(&dctx->decrypt_expkey[0], &dctx->encrypt_expkey[0]);
@@ -68,7 +64,7 @@ static int des_set_key(struct crypto_tfm *tfm, const u8 *key,
68extern void des_sparc64_crypt(const u64 *key, const u64 *input, 64extern void des_sparc64_crypt(const u64 *key, const u64 *input,
69 u64 *output); 65 u64 *output);
70 66
71static void des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 67static void sparc_des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
72{ 68{
73 struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); 69 struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
74 const u64 *K = ctx->encrypt_expkey; 70 const u64 *K = ctx->encrypt_expkey;
@@ -76,7 +72,7 @@ static void des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
76 des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst); 72 des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
77} 73}
78 74
79static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 75static void sparc_des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
80{ 76{
81 struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); 77 struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
82 const u64 *K = ctx->decrypt_expkey; 78 const u64 *K = ctx->decrypt_expkey;
@@ -202,14 +198,13 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
202 unsigned int keylen) 198 unsigned int keylen)
203{ 199{
204 struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm); 200 struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
205 u32 *flags = &tfm->crt_flags;
206 u64 k1[DES_EXPKEY_WORDS / 2]; 201 u64 k1[DES_EXPKEY_WORDS / 2];
207 u64 k2[DES_EXPKEY_WORDS / 2]; 202 u64 k2[DES_EXPKEY_WORDS / 2];
208 u64 k3[DES_EXPKEY_WORDS / 2]; 203 u64 k3[DES_EXPKEY_WORDS / 2];
209 int err; 204 int err;
210 205
211 err = __des3_verify_key(flags, key); 206 err = crypto_des3_ede_verify_key(tfm, key);
212 if (unlikely(err)) 207 if (err)
213 return err; 208 return err;
214 209
215 des_sparc64_key_expand((const u32 *)key, k1); 210 des_sparc64_key_expand((const u32 *)key, k1);
@@ -235,7 +230,7 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
235extern void des3_ede_sparc64_crypt(const u64 *key, const u64 *input, 230extern void des3_ede_sparc64_crypt(const u64 *key, const u64 *input,
236 u64 *output); 231 u64 *output);
237 232
238static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 233static void sparc_des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
239{ 234{
240 struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); 235 struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
241 const u64 *K = ctx->encrypt_expkey; 236 const u64 *K = ctx->encrypt_expkey;
@@ -243,7 +238,7 @@ static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
243 des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst); 238 des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
244} 239}
245 240
246static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 241static void sparc_des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
247{ 242{
248 struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); 243 struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
249 const u64 *K = ctx->decrypt_expkey; 244 const u64 *K = ctx->decrypt_expkey;
@@ -390,8 +385,8 @@ static struct crypto_alg algs[] = { {
390 .cia_min_keysize = DES_KEY_SIZE, 385 .cia_min_keysize = DES_KEY_SIZE,
391 .cia_max_keysize = DES_KEY_SIZE, 386 .cia_max_keysize = DES_KEY_SIZE,
392 .cia_setkey = des_set_key, 387 .cia_setkey = des_set_key,
393 .cia_encrypt = des_encrypt, 388 .cia_encrypt = sparc_des_encrypt,
394 .cia_decrypt = des_decrypt 389 .cia_decrypt = sparc_des_decrypt
395 } 390 }
396 } 391 }
397}, { 392}, {
@@ -447,8 +442,8 @@ static struct crypto_alg algs[] = { {
447 .cia_min_keysize = DES3_EDE_KEY_SIZE, 442 .cia_min_keysize = DES3_EDE_KEY_SIZE,
448 .cia_max_keysize = DES3_EDE_KEY_SIZE, 443 .cia_max_keysize = DES3_EDE_KEY_SIZE,
449 .cia_setkey = des3_ede_set_key, 444 .cia_setkey = des3_ede_set_key,
450 .cia_encrypt = des3_ede_encrypt, 445 .cia_encrypt = sparc_des3_ede_encrypt,
451 .cia_decrypt = des3_ede_decrypt 446 .cia_decrypt = sparc_des3_ede_decrypt
452 } 447 }
453 } 448 }
454}, { 449}, {
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 45734e1cf967..759b1a927826 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -14,11 +14,9 @@ sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no)
14 14
15obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o 15obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
16 16
17obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
18obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o 17obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
19obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o 18obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
20 19
21obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
22obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o 20obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o
23obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o 21obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
24obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o 22obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
@@ -38,14 +36,6 @@ obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
38obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o 36obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o
39 37
40obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o 38obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
41obj-$(CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2) += aegis128l-aesni.o
42obj-$(CONFIG_CRYPTO_AEGIS256_AESNI_SSE2) += aegis256-aesni.o
43
44obj-$(CONFIG_CRYPTO_MORUS640_GLUE) += morus640_glue.o
45obj-$(CONFIG_CRYPTO_MORUS1280_GLUE) += morus1280_glue.o
46
47obj-$(CONFIG_CRYPTO_MORUS640_SSE2) += morus640-sse2.o
48obj-$(CONFIG_CRYPTO_MORUS1280_SSE2) += morus1280-sse2.o
49 39
50obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o 40obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
51obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o 41obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
@@ -64,15 +54,11 @@ endif
64ifeq ($(avx2_supported),yes) 54ifeq ($(avx2_supported),yes)
65 obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o 55 obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
66 obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o 56 obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
67
68 obj-$(CONFIG_CRYPTO_MORUS1280_AVX2) += morus1280-avx2.o
69endif 57endif
70 58
71aes-i586-y := aes-i586-asm_32.o aes_glue.o
72twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o 59twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
73serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o 60serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
74 61
75aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
76des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o 62des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o
77camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o 63camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
78blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o 64blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
@@ -82,11 +68,6 @@ chacha-x86_64-y := chacha-ssse3-x86_64.o chacha_glue.o
82serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o 68serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
83 69
84aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o 70aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o
85aegis128l-aesni-y := aegis128l-aesni-asm.o aegis128l-aesni-glue.o
86aegis256-aesni-y := aegis256-aesni-asm.o aegis256-aesni-glue.o
87
88morus640-sse2-y := morus640-sse2-asm.o morus640-sse2-glue.o
89morus1280-sse2-y := morus1280-sse2-asm.o morus1280-sse2-glue.o
90 71
91nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o 72nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
92 73
@@ -106,8 +87,6 @@ ifeq ($(avx2_supported),yes)
106 chacha-x86_64-y += chacha-avx2-x86_64.o 87 chacha-x86_64-y += chacha-avx2-x86_64.o
107 serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o 88 serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o
108 89
109 morus1280-avx2-y := morus1280-avx2-asm.o morus1280-avx2-glue.o
110
111 nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o 90 nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o
112endif 91endif
113 92
diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S b/arch/x86/crypto/aegis128l-aesni-asm.S
deleted file mode 100644
index 1461ef00c0e8..000000000000
--- a/arch/x86/crypto/aegis128l-aesni-asm.S
+++ /dev/null
@@ -1,823 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * AES-NI + SSE2 implementation of AEGIS-128L
4 *
5 * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7 */
8
9#include <linux/linkage.h>
10#include <asm/frame.h>
11
12#define STATE0 %xmm0
13#define STATE1 %xmm1
14#define STATE2 %xmm2
15#define STATE3 %xmm3
16#define STATE4 %xmm4
17#define STATE5 %xmm5
18#define STATE6 %xmm6
19#define STATE7 %xmm7
20#define MSG0 %xmm8
21#define MSG1 %xmm9
22#define T0 %xmm10
23#define T1 %xmm11
24#define T2 %xmm12
25#define T3 %xmm13
26
27#define STATEP %rdi
28#define LEN %rsi
29#define SRC %rdx
30#define DST %rcx
31
32.section .rodata.cst16.aegis128l_const, "aM", @progbits, 32
33.align 16
34.Laegis128l_const_0:
35 .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d
36 .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62
37.Laegis128l_const_1:
38 .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1
39 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd
40
41.section .rodata.cst16.aegis128l_counter, "aM", @progbits, 16
42.align 16
43.Laegis128l_counter0:
44 .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
45 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
46.Laegis128l_counter1:
47 .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
48 .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
49
50.text
51
52/*
53 * __load_partial: internal ABI
54 * input:
55 * LEN - bytes
56 * SRC - src
57 * output:
58 * MSG0 - first message block
59 * MSG1 - second message block
60 * changed:
61 * T0
62 * %r8
63 * %r9
64 */
65__load_partial:
66 xor %r9d, %r9d
67 pxor MSG0, MSG0
68 pxor MSG1, MSG1
69
70 mov LEN, %r8
71 and $0x1, %r8
72 jz .Lld_partial_1
73
74 mov LEN, %r8
75 and $0x1E, %r8
76 add SRC, %r8
77 mov (%r8), %r9b
78
79.Lld_partial_1:
80 mov LEN, %r8
81 and $0x2, %r8
82 jz .Lld_partial_2
83
84 mov LEN, %r8
85 and $0x1C, %r8
86 add SRC, %r8
87 shl $0x10, %r9
88 mov (%r8), %r9w
89
90.Lld_partial_2:
91 mov LEN, %r8
92 and $0x4, %r8
93 jz .Lld_partial_4
94
95 mov LEN, %r8
96 and $0x18, %r8
97 add SRC, %r8
98 shl $32, %r9
99 mov (%r8), %r8d
100 xor %r8, %r9
101
102.Lld_partial_4:
103 movq %r9, MSG0
104
105 mov LEN, %r8
106 and $0x8, %r8
107 jz .Lld_partial_8
108
109 mov LEN, %r8
110 and $0x10, %r8
111 add SRC, %r8
112 pslldq $8, MSG0
113 movq (%r8), T0
114 pxor T0, MSG0
115
116.Lld_partial_8:
117 mov LEN, %r8
118 and $0x10, %r8
119 jz .Lld_partial_16
120
121 movdqa MSG0, MSG1
122 movdqu (SRC), MSG0
123
124.Lld_partial_16:
125 ret
126ENDPROC(__load_partial)
127
128/*
129 * __store_partial: internal ABI
130 * input:
131 * LEN - bytes
132 * DST - dst
133 * output:
134 * T0 - first message block
135 * T1 - second message block
136 * changed:
137 * %r8
138 * %r9
139 * %r10
140 */
141__store_partial:
142 mov LEN, %r8
143 mov DST, %r9
144
145 cmp $16, %r8
146 jl .Lst_partial_16
147
148 movdqu T0, (%r9)
149 movdqa T1, T0
150
151 sub $16, %r8
152 add $16, %r9
153
154.Lst_partial_16:
155 movq T0, %r10
156
157 cmp $8, %r8
158 jl .Lst_partial_8
159
160 mov %r10, (%r9)
161 psrldq $8, T0
162 movq T0, %r10
163
164 sub $8, %r8
165 add $8, %r9
166
167.Lst_partial_8:
168 cmp $4, %r8
169 jl .Lst_partial_4
170
171 mov %r10d, (%r9)
172 shr $32, %r10
173
174 sub $4, %r8
175 add $4, %r9
176
177.Lst_partial_4:
178 cmp $2, %r8
179 jl .Lst_partial_2
180
181 mov %r10w, (%r9)
182 shr $0x10, %r10
183
184 sub $2, %r8
185 add $2, %r9
186
187.Lst_partial_2:
188 cmp $1, %r8
189 jl .Lst_partial_1
190
191 mov %r10b, (%r9)
192
193.Lst_partial_1:
194 ret
195ENDPROC(__store_partial)
196
197.macro update
198 movdqa STATE7, T0
199 aesenc STATE0, STATE7
200 aesenc STATE1, STATE0
201 aesenc STATE2, STATE1
202 aesenc STATE3, STATE2
203 aesenc STATE4, STATE3
204 aesenc STATE5, STATE4
205 aesenc STATE6, STATE5
206 aesenc T0, STATE6
207.endm
208
209.macro update0
210 update
211 pxor MSG0, STATE7
212 pxor MSG1, STATE3
213.endm
214
215.macro update1
216 update
217 pxor MSG0, STATE6
218 pxor MSG1, STATE2
219.endm
220
221.macro update2
222 update
223 pxor MSG0, STATE5
224 pxor MSG1, STATE1
225.endm
226
227.macro update3
228 update
229 pxor MSG0, STATE4
230 pxor MSG1, STATE0
231.endm
232
233.macro update4
234 update
235 pxor MSG0, STATE3
236 pxor MSG1, STATE7
237.endm
238
239.macro update5
240 update
241 pxor MSG0, STATE2
242 pxor MSG1, STATE6
243.endm
244
245.macro update6
246 update
247 pxor MSG0, STATE1
248 pxor MSG1, STATE5
249.endm
250
251.macro update7
252 update
253 pxor MSG0, STATE0
254 pxor MSG1, STATE4
255.endm
256
257.macro state_load
258 movdqu 0x00(STATEP), STATE0
259 movdqu 0x10(STATEP), STATE1
260 movdqu 0x20(STATEP), STATE2
261 movdqu 0x30(STATEP), STATE3
262 movdqu 0x40(STATEP), STATE4
263 movdqu 0x50(STATEP), STATE5
264 movdqu 0x60(STATEP), STATE6
265 movdqu 0x70(STATEP), STATE7
266.endm
267
268.macro state_store s0 s1 s2 s3 s4 s5 s6 s7
269 movdqu \s7, 0x00(STATEP)
270 movdqu \s0, 0x10(STATEP)
271 movdqu \s1, 0x20(STATEP)
272 movdqu \s2, 0x30(STATEP)
273 movdqu \s3, 0x40(STATEP)
274 movdqu \s4, 0x50(STATEP)
275 movdqu \s5, 0x60(STATEP)
276 movdqu \s6, 0x70(STATEP)
277.endm
278
279.macro state_store0
280 state_store STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7
281.endm
282
283.macro state_store1
284 state_store STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6
285.endm
286
287.macro state_store2
288 state_store STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5
289.endm
290
291.macro state_store3
292 state_store STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4
293.endm
294
295.macro state_store4
296 state_store STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3
297.endm
298
299.macro state_store5
300 state_store STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2
301.endm
302
303.macro state_store6
304 state_store STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1
305.endm
306
307.macro state_store7
308 state_store STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0
309.endm
310
311/*
312 * void crypto_aegis128l_aesni_init(void *state, const void *key, const void *iv);
313 */
314ENTRY(crypto_aegis128l_aesni_init)
315 FRAME_BEGIN
316
317 /* load key: */
318 movdqa (%rsi), MSG1
319 movdqa MSG1, STATE0
320 movdqa MSG1, STATE4
321 movdqa MSG1, STATE5
322 movdqa MSG1, STATE6
323 movdqa MSG1, STATE7
324
325 /* load IV: */
326 movdqu (%rdx), MSG0
327 pxor MSG0, STATE0
328 pxor MSG0, STATE4
329
330 /* load the constants: */
331 movdqa .Laegis128l_const_0, STATE2
332 movdqa .Laegis128l_const_1, STATE1
333 movdqa STATE1, STATE3
334 pxor STATE2, STATE5
335 pxor STATE1, STATE6
336 pxor STATE2, STATE7
337
338 /* update 10 times with IV and KEY: */
339 update0
340 update1
341 update2
342 update3
343 update4
344 update5
345 update6
346 update7
347 update0
348 update1
349
350 state_store1
351
352 FRAME_END
353 ret
354ENDPROC(crypto_aegis128l_aesni_init)
355
356.macro ad_block a i
357 movdq\a (\i * 0x20 + 0x00)(SRC), MSG0
358 movdq\a (\i * 0x20 + 0x10)(SRC), MSG1
359 update\i
360 sub $0x20, LEN
361 cmp $0x20, LEN
362 jl .Lad_out_\i
363.endm
364
365/*
366 * void crypto_aegis128l_aesni_ad(void *state, unsigned int length,
367 * const void *data);
368 */
369ENTRY(crypto_aegis128l_aesni_ad)
370 FRAME_BEGIN
371
372 cmp $0x20, LEN
373 jb .Lad_out
374
375 state_load
376
377 mov SRC, %r8
378 and $0xf, %r8
379 jnz .Lad_u_loop
380
381.align 8
382.Lad_a_loop:
383 ad_block a 0
384 ad_block a 1
385 ad_block a 2
386 ad_block a 3
387 ad_block a 4
388 ad_block a 5
389 ad_block a 6
390 ad_block a 7
391
392 add $0x100, SRC
393 jmp .Lad_a_loop
394
395.align 8
396.Lad_u_loop:
397 ad_block u 0
398 ad_block u 1
399 ad_block u 2
400 ad_block u 3
401 ad_block u 4
402 ad_block u 5
403 ad_block u 6
404 ad_block u 7
405
406 add $0x100, SRC
407 jmp .Lad_u_loop
408
409.Lad_out_0:
410 state_store0
411 FRAME_END
412 ret
413
414.Lad_out_1:
415 state_store1
416 FRAME_END
417 ret
418
419.Lad_out_2:
420 state_store2
421 FRAME_END
422 ret
423
424.Lad_out_3:
425 state_store3
426 FRAME_END
427 ret
428
429.Lad_out_4:
430 state_store4
431 FRAME_END
432 ret
433
434.Lad_out_5:
435 state_store5
436 FRAME_END
437 ret
438
439.Lad_out_6:
440 state_store6
441 FRAME_END
442 ret
443
444.Lad_out_7:
445 state_store7
446 FRAME_END
447 ret
448
449.Lad_out:
450 FRAME_END
451 ret
452ENDPROC(crypto_aegis128l_aesni_ad)
453
454.macro crypt m0 m1 s0 s1 s2 s3 s4 s5 s6 s7
455 pxor \s1, \m0
456 pxor \s6, \m0
457 movdqa \s2, T3
458 pand \s3, T3
459 pxor T3, \m0
460
461 pxor \s2, \m1
462 pxor \s5, \m1
463 movdqa \s6, T3
464 pand \s7, T3
465 pxor T3, \m1
466.endm
467
468.macro crypt0 m0 m1
469 crypt \m0 \m1 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7
470.endm
471
472.macro crypt1 m0 m1
473 crypt \m0 \m1 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6
474.endm
475
476.macro crypt2 m0 m1
477 crypt \m0 \m1 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4 STATE5
478.endm
479
480.macro crypt3 m0 m1
481 crypt \m0 \m1 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3 STATE4
482.endm
483
484.macro crypt4 m0 m1
485 crypt \m0 \m1 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2 STATE3
486.endm
487
488.macro crypt5 m0 m1
489 crypt \m0 \m1 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1 STATE2
490.endm
491
492.macro crypt6 m0 m1
493 crypt \m0 \m1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0 STATE1
494.endm
495
496.macro crypt7 m0 m1
497 crypt \m0 \m1 STATE1 STATE2 STATE3 STATE4 STATE5 STATE6 STATE7 STATE0
498.endm
499
500.macro encrypt_block a i
501 movdq\a (\i * 0x20 + 0x00)(SRC), MSG0
502 movdq\a (\i * 0x20 + 0x10)(SRC), MSG1
503 movdqa MSG0, T0
504 movdqa MSG1, T1
505 crypt\i T0, T1
506 movdq\a T0, (\i * 0x20 + 0x00)(DST)
507 movdq\a T1, (\i * 0x20 + 0x10)(DST)
508
509 update\i
510
511 sub $0x20, LEN
512 cmp $0x20, LEN
513 jl .Lenc_out_\i
514.endm
515
516.macro decrypt_block a i
517 movdq\a (\i * 0x20 + 0x00)(SRC), MSG0
518 movdq\a (\i * 0x20 + 0x10)(SRC), MSG1
519 crypt\i MSG0, MSG1
520 movdq\a MSG0, (\i * 0x20 + 0x00)(DST)
521 movdq\a MSG1, (\i * 0x20 + 0x10)(DST)
522
523 update\i
524
525 sub $0x20, LEN
526 cmp $0x20, LEN
527 jl .Ldec_out_\i
528.endm
529
530/*
531 * void crypto_aegis128l_aesni_enc(void *state, unsigned int length,
532 * const void *src, void *dst);
533 */
534ENTRY(crypto_aegis128l_aesni_enc)
535 FRAME_BEGIN
536
537 cmp $0x20, LEN
538 jb .Lenc_out
539
540 state_load
541
542 mov SRC, %r8
543 or DST, %r8
544 and $0xf, %r8
545 jnz .Lenc_u_loop
546
547.align 8
548.Lenc_a_loop:
549 encrypt_block a 0
550 encrypt_block a 1
551 encrypt_block a 2
552 encrypt_block a 3
553 encrypt_block a 4
554 encrypt_block a 5
555 encrypt_block a 6
556 encrypt_block a 7
557
558 add $0x100, SRC
559 add $0x100, DST
560 jmp .Lenc_a_loop
561
562.align 8
563.Lenc_u_loop:
564 encrypt_block u 0
565 encrypt_block u 1
566 encrypt_block u 2
567 encrypt_block u 3
568 encrypt_block u 4
569 encrypt_block u 5
570 encrypt_block u 6
571 encrypt_block u 7
572
573 add $0x100, SRC
574 add $0x100, DST
575 jmp .Lenc_u_loop
576
577.Lenc_out_0:
578 state_store0
579 FRAME_END
580 ret
581
582.Lenc_out_1:
583 state_store1
584 FRAME_END
585 ret
586
587.Lenc_out_2:
588 state_store2
589 FRAME_END
590 ret
591
592.Lenc_out_3:
593 state_store3
594 FRAME_END
595 ret
596
597.Lenc_out_4:
598 state_store4
599 FRAME_END
600 ret
601
602.Lenc_out_5:
603 state_store5
604 FRAME_END
605 ret
606
607.Lenc_out_6:
608 state_store6
609 FRAME_END
610 ret
611
612.Lenc_out_7:
613 state_store7
614 FRAME_END
615 ret
616
617.Lenc_out:
618 FRAME_END
619 ret
620ENDPROC(crypto_aegis128l_aesni_enc)
621
622/*
623 * void crypto_aegis128l_aesni_enc_tail(void *state, unsigned int length,
624 * const void *src, void *dst);
625 */
626ENTRY(crypto_aegis128l_aesni_enc_tail)
627 FRAME_BEGIN
628
629 state_load
630
631 /* encrypt message: */
632 call __load_partial
633
634 movdqa MSG0, T0
635 movdqa MSG1, T1
636 crypt0 T0, T1
637
638 call __store_partial
639
640 update0
641
642 state_store0
643
644 FRAME_END
645 ret
646ENDPROC(crypto_aegis128l_aesni_enc_tail)
647
648/*
649 * void crypto_aegis128l_aesni_dec(void *state, unsigned int length,
650 * const void *src, void *dst);
651 */
652ENTRY(crypto_aegis128l_aesni_dec)
653 FRAME_BEGIN
654
655 cmp $0x20, LEN
656 jb .Ldec_out
657
658 state_load
659
660 mov SRC, %r8
661 or DST, %r8
662 and $0xF, %r8
663 jnz .Ldec_u_loop
664
665.align 8
666.Ldec_a_loop:
667 decrypt_block a 0
668 decrypt_block a 1
669 decrypt_block a 2
670 decrypt_block a 3
671 decrypt_block a 4
672 decrypt_block a 5
673 decrypt_block a 6
674 decrypt_block a 7
675
676 add $0x100, SRC
677 add $0x100, DST
678 jmp .Ldec_a_loop
679
680.align 8
681.Ldec_u_loop:
682 decrypt_block u 0
683 decrypt_block u 1
684 decrypt_block u 2
685 decrypt_block u 3
686 decrypt_block u 4
687 decrypt_block u 5
688 decrypt_block u 6
689 decrypt_block u 7
690
691 add $0x100, SRC
692 add $0x100, DST
693 jmp .Ldec_u_loop
694
695.Ldec_out_0:
696 state_store0
697 FRAME_END
698 ret
699
700.Ldec_out_1:
701 state_store1
702 FRAME_END
703 ret
704
705.Ldec_out_2:
706 state_store2
707 FRAME_END
708 ret
709
710.Ldec_out_3:
711 state_store3
712 FRAME_END
713 ret
714
715.Ldec_out_4:
716 state_store4
717 FRAME_END
718 ret
719
720.Ldec_out_5:
721 state_store5
722 FRAME_END
723 ret
724
725.Ldec_out_6:
726 state_store6
727 FRAME_END
728 ret
729
730.Ldec_out_7:
731 state_store7
732 FRAME_END
733 ret
734
735.Ldec_out:
736 FRAME_END
737 ret
738ENDPROC(crypto_aegis128l_aesni_dec)
739
740/*
741 * void crypto_aegis128l_aesni_dec_tail(void *state, unsigned int length,
742 * const void *src, void *dst);
743 */
744ENTRY(crypto_aegis128l_aesni_dec_tail)
745 FRAME_BEGIN
746
747 state_load
748
749 /* decrypt message: */
750 call __load_partial
751
752 crypt0 MSG0, MSG1
753
754 movdqa MSG0, T0
755 movdqa MSG1, T1
756 call __store_partial
757
758 /* mask with byte count: */
759 movq LEN, T0
760 punpcklbw T0, T0
761 punpcklbw T0, T0
762 punpcklbw T0, T0
763 punpcklbw T0, T0
764 movdqa T0, T1
765 movdqa .Laegis128l_counter0, T2
766 movdqa .Laegis128l_counter1, T3
767 pcmpgtb T2, T0
768 pcmpgtb T3, T1
769 pand T0, MSG0
770 pand T1, MSG1
771
772 update0
773
774 state_store0
775
776 FRAME_END
777 ret
778ENDPROC(crypto_aegis128l_aesni_dec_tail)
779
780/*
781 * void crypto_aegis128l_aesni_final(void *state, void *tag_xor,
782 * u64 assoclen, u64 cryptlen);
783 */
784ENTRY(crypto_aegis128l_aesni_final)
785 FRAME_BEGIN
786
787 state_load
788
789 /* prepare length block: */
790 movq %rdx, MSG0
791 movq %rcx, T0
792 pslldq $8, T0
793 pxor T0, MSG0
794 psllq $3, MSG0 /* multiply by 8 (to get bit count) */
795
796 pxor STATE2, MSG0
797 movdqa MSG0, MSG1
798
799 /* update state: */
800 update0
801 update1
802 update2
803 update3
804 update4
805 update5
806 update6
807
808 /* xor tag: */
809 movdqu (%rsi), T0
810
811 pxor STATE1, T0
812 pxor STATE2, T0
813 pxor STATE3, T0
814 pxor STATE4, T0
815 pxor STATE5, T0
816 pxor STATE6, T0
817 pxor STATE7, T0
818
819 movdqu T0, (%rsi)
820
821 FRAME_END
822 ret
823ENDPROC(crypto_aegis128l_aesni_final)
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
deleted file mode 100644
index 19eb28b316f0..000000000000
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ /dev/null
@@ -1,293 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The AEGIS-128L Authenticated-Encryption Algorithm
4 * Glue for AES-NI + SSE2 implementation
5 *
6 * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 */
9
10#include <crypto/internal/aead.h>
11#include <crypto/internal/simd.h>
12#include <crypto/internal/skcipher.h>
13#include <crypto/scatterwalk.h>
14#include <linux/module.h>
15#include <asm/fpu/api.h>
16#include <asm/cpu_device_id.h>
17
18#define AEGIS128L_BLOCK_ALIGN 16
19#define AEGIS128L_BLOCK_SIZE 32
20#define AEGIS128L_NONCE_SIZE 16
21#define AEGIS128L_STATE_BLOCKS 8
22#define AEGIS128L_KEY_SIZE 16
23#define AEGIS128L_MIN_AUTH_SIZE 8
24#define AEGIS128L_MAX_AUTH_SIZE 16
25
26asmlinkage void crypto_aegis128l_aesni_init(void *state, void *key, void *iv);
27
28asmlinkage void crypto_aegis128l_aesni_ad(
29 void *state, unsigned int length, const void *data);
30
31asmlinkage void crypto_aegis128l_aesni_enc(
32 void *state, unsigned int length, const void *src, void *dst);
33
34asmlinkage void crypto_aegis128l_aesni_dec(
35 void *state, unsigned int length, const void *src, void *dst);
36
37asmlinkage void crypto_aegis128l_aesni_enc_tail(
38 void *state, unsigned int length, const void *src, void *dst);
39
40asmlinkage void crypto_aegis128l_aesni_dec_tail(
41 void *state, unsigned int length, const void *src, void *dst);
42
43asmlinkage void crypto_aegis128l_aesni_final(
44 void *state, void *tag_xor, unsigned int cryptlen,
45 unsigned int assoclen);
46
47struct aegis_block {
48 u8 bytes[AEGIS128L_BLOCK_SIZE] __aligned(AEGIS128L_BLOCK_ALIGN);
49};
50
51struct aegis_state {
52 struct aegis_block blocks[AEGIS128L_STATE_BLOCKS];
53};
54
55struct aegis_ctx {
56 struct aegis_block key;
57};
58
59struct aegis_crypt_ops {
60 int (*skcipher_walk_init)(struct skcipher_walk *walk,
61 struct aead_request *req, bool atomic);
62
63 void (*crypt_blocks)(void *state, unsigned int length, const void *src,
64 void *dst);
65 void (*crypt_tail)(void *state, unsigned int length, const void *src,
66 void *dst);
67};
68
69static void crypto_aegis128l_aesni_process_ad(
70 struct aegis_state *state, struct scatterlist *sg_src,
71 unsigned int assoclen)
72{
73 struct scatter_walk walk;
74 struct aegis_block buf;
75 unsigned int pos = 0;
76
77 scatterwalk_start(&walk, sg_src);
78 while (assoclen != 0) {
79 unsigned int size = scatterwalk_clamp(&walk, assoclen);
80 unsigned int left = size;
81 void *mapped = scatterwalk_map(&walk);
82 const u8 *src = (const u8 *)mapped;
83
84 if (pos + size >= AEGIS128L_BLOCK_SIZE) {
85 if (pos > 0) {
86 unsigned int fill = AEGIS128L_BLOCK_SIZE - pos;
87 memcpy(buf.bytes + pos, src, fill);
88 crypto_aegis128l_aesni_ad(state,
89 AEGIS128L_BLOCK_SIZE,
90 buf.bytes);
91 pos = 0;
92 left -= fill;
93 src += fill;
94 }
95
96 crypto_aegis128l_aesni_ad(state, left, src);
97
98 src += left & ~(AEGIS128L_BLOCK_SIZE - 1);
99 left &= AEGIS128L_BLOCK_SIZE - 1;
100 }
101
102 memcpy(buf.bytes + pos, src, left);
103 pos += left;
104 assoclen -= size;
105
106 scatterwalk_unmap(mapped);
107 scatterwalk_advance(&walk, size);
108 scatterwalk_done(&walk, 0, assoclen);
109 }
110
111 if (pos > 0) {
112 memset(buf.bytes + pos, 0, AEGIS128L_BLOCK_SIZE - pos);
113 crypto_aegis128l_aesni_ad(state, AEGIS128L_BLOCK_SIZE, buf.bytes);
114 }
115}
116
117static void crypto_aegis128l_aesni_process_crypt(
118 struct aegis_state *state, struct skcipher_walk *walk,
119 const struct aegis_crypt_ops *ops)
120{
121 while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
122 ops->crypt_blocks(state, round_down(walk->nbytes,
123 AEGIS128L_BLOCK_SIZE),
124 walk->src.virt.addr, walk->dst.virt.addr);
125 skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
126 }
127
128 if (walk->nbytes) {
129 ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
130 walk->dst.virt.addr);
131 skcipher_walk_done(walk, 0);
132 }
133}
134
135static struct aegis_ctx *crypto_aegis128l_aesni_ctx(struct crypto_aead *aead)
136{
137 u8 *ctx = crypto_aead_ctx(aead);
138 ctx = PTR_ALIGN(ctx, __alignof__(struct aegis_ctx));
139 return (void *)ctx;
140}
141
142static int crypto_aegis128l_aesni_setkey(struct crypto_aead *aead,
143 const u8 *key, unsigned int keylen)
144{
145 struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(aead);
146
147 if (keylen != AEGIS128L_KEY_SIZE) {
148 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
149 return -EINVAL;
150 }
151
152 memcpy(ctx->key.bytes, key, AEGIS128L_KEY_SIZE);
153
154 return 0;
155}
156
157static int crypto_aegis128l_aesni_setauthsize(struct crypto_aead *tfm,
158 unsigned int authsize)
159{
160 if (authsize > AEGIS128L_MAX_AUTH_SIZE)
161 return -EINVAL;
162 if (authsize < AEGIS128L_MIN_AUTH_SIZE)
163 return -EINVAL;
164 return 0;
165}
166
167static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
168 struct aegis_block *tag_xor,
169 unsigned int cryptlen,
170 const struct aegis_crypt_ops *ops)
171{
172 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
173 struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
174 struct skcipher_walk walk;
175 struct aegis_state state;
176
177 ops->skcipher_walk_init(&walk, req, true);
178
179 kernel_fpu_begin();
180
181 crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
182 crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
183 crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
184 crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
185
186 kernel_fpu_end();
187}
188
189static int crypto_aegis128l_aesni_encrypt(struct aead_request *req)
190{
191 static const struct aegis_crypt_ops OPS = {
192 .skcipher_walk_init = skcipher_walk_aead_encrypt,
193 .crypt_blocks = crypto_aegis128l_aesni_enc,
194 .crypt_tail = crypto_aegis128l_aesni_enc_tail,
195 };
196
197 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
198 struct aegis_block tag = {};
199 unsigned int authsize = crypto_aead_authsize(tfm);
200 unsigned int cryptlen = req->cryptlen;
201
202 crypto_aegis128l_aesni_crypt(req, &tag, cryptlen, &OPS);
203
204 scatterwalk_map_and_copy(tag.bytes, req->dst,
205 req->assoclen + cryptlen, authsize, 1);
206 return 0;
207}
208
209static int crypto_aegis128l_aesni_decrypt(struct aead_request *req)
210{
211 static const struct aegis_block zeros = {};
212
213 static const struct aegis_crypt_ops OPS = {
214 .skcipher_walk_init = skcipher_walk_aead_decrypt,
215 .crypt_blocks = crypto_aegis128l_aesni_dec,
216 .crypt_tail = crypto_aegis128l_aesni_dec_tail,
217 };
218
219 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220 struct aegis_block tag;
221 unsigned int authsize = crypto_aead_authsize(tfm);
222 unsigned int cryptlen = req->cryptlen - authsize;
223
224 scatterwalk_map_and_copy(tag.bytes, req->src,
225 req->assoclen + cryptlen, authsize, 0);
226
227 crypto_aegis128l_aesni_crypt(req, &tag, cryptlen, &OPS);
228
229 return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0;
230}
231
232static int crypto_aegis128l_aesni_init_tfm(struct crypto_aead *aead)
233{
234 return 0;
235}
236
237static void crypto_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
238{
239}
240
241static struct aead_alg crypto_aegis128l_aesni_alg = {
242 .setkey = crypto_aegis128l_aesni_setkey,
243 .setauthsize = crypto_aegis128l_aesni_setauthsize,
244 .encrypt = crypto_aegis128l_aesni_encrypt,
245 .decrypt = crypto_aegis128l_aesni_decrypt,
246 .init = crypto_aegis128l_aesni_init_tfm,
247 .exit = crypto_aegis128l_aesni_exit_tfm,
248
249 .ivsize = AEGIS128L_NONCE_SIZE,
250 .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
251 .chunksize = AEGIS128L_BLOCK_SIZE,
252
253 .base = {
254 .cra_flags = CRYPTO_ALG_INTERNAL,
255 .cra_blocksize = 1,
256 .cra_ctxsize = sizeof(struct aegis_ctx) +
257 __alignof__(struct aegis_ctx),
258 .cra_alignmask = 0,
259 .cra_priority = 400,
260
261 .cra_name = "__aegis128l",
262 .cra_driver_name = "__aegis128l-aesni",
263
264 .cra_module = THIS_MODULE,
265 }
266};
267
268static struct simd_aead_alg *simd_alg;
269
270static int __init crypto_aegis128l_aesni_module_init(void)
271{
272 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
273 !boot_cpu_has(X86_FEATURE_AES) ||
274 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
275 return -ENODEV;
276
277 return simd_register_aeads_compat(&crypto_aegis128l_aesni_alg, 1,
278 &simd_alg);
279}
280
281static void __exit crypto_aegis128l_aesni_module_exit(void)
282{
283 simd_unregister_aeads(&crypto_aegis128l_aesni_alg, 1, &simd_alg);
284}
285
286module_init(crypto_aegis128l_aesni_module_init);
287module_exit(crypto_aegis128l_aesni_module_exit);
288
289MODULE_LICENSE("GPL");
290MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
291MODULE_DESCRIPTION("AEGIS-128L AEAD algorithm -- AESNI+SSE2 implementation");
292MODULE_ALIAS_CRYPTO("aegis128l");
293MODULE_ALIAS_CRYPTO("aegis128l-aesni");
diff --git a/arch/x86/crypto/aegis256-aesni-asm.S b/arch/x86/crypto/aegis256-aesni-asm.S
deleted file mode 100644
index 37d9b13dfd85..000000000000
--- a/arch/x86/crypto/aegis256-aesni-asm.S
+++ /dev/null
@@ -1,700 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * AES-NI + SSE2 implementation of AEGIS-128L
4 *
5 * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7 */
8
9#include <linux/linkage.h>
10#include <asm/frame.h>
11
12#define STATE0 %xmm0
13#define STATE1 %xmm1
14#define STATE2 %xmm2
15#define STATE3 %xmm3
16#define STATE4 %xmm4
17#define STATE5 %xmm5
18#define MSG %xmm6
19#define T0 %xmm7
20#define T1 %xmm8
21#define T2 %xmm9
22#define T3 %xmm10
23
24#define STATEP %rdi
25#define LEN %rsi
26#define SRC %rdx
27#define DST %rcx
28
29.section .rodata.cst16.aegis256_const, "aM", @progbits, 32
30.align 16
31.Laegis256_const_0:
32 .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d
33 .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62
34.Laegis256_const_1:
35 .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1
36 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd
37
38.section .rodata.cst16.aegis256_counter, "aM", @progbits, 16
39.align 16
40.Laegis256_counter:
41 .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
42 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
43
44.text
45
46/*
47 * __load_partial: internal ABI
48 * input:
49 * LEN - bytes
50 * SRC - src
51 * output:
52 * MSG - message block
53 * changed:
54 * T0
55 * %r8
56 * %r9
57 */
58__load_partial:
59 xor %r9d, %r9d
60 pxor MSG, MSG
61
62 mov LEN, %r8
63 and $0x1, %r8
64 jz .Lld_partial_1
65
66 mov LEN, %r8
67 and $0x1E, %r8
68 add SRC, %r8
69 mov (%r8), %r9b
70
71.Lld_partial_1:
72 mov LEN, %r8
73 and $0x2, %r8
74 jz .Lld_partial_2
75
76 mov LEN, %r8
77 and $0x1C, %r8
78 add SRC, %r8
79 shl $0x10, %r9
80 mov (%r8), %r9w
81
82.Lld_partial_2:
83 mov LEN, %r8
84 and $0x4, %r8
85 jz .Lld_partial_4
86
87 mov LEN, %r8
88 and $0x18, %r8
89 add SRC, %r8
90 shl $32, %r9
91 mov (%r8), %r8d
92 xor %r8, %r9
93
94.Lld_partial_4:
95 movq %r9, MSG
96
97 mov LEN, %r8
98 and $0x8, %r8
99 jz .Lld_partial_8
100
101 mov LEN, %r8
102 and $0x10, %r8
103 add SRC, %r8
104 pslldq $8, MSG
105 movq (%r8), T0
106 pxor T0, MSG
107
108.Lld_partial_8:
109 ret
110ENDPROC(__load_partial)
111
112/*
113 * __store_partial: internal ABI
114 * input:
115 * LEN - bytes
116 * DST - dst
117 * output:
118 * T0 - message block
119 * changed:
120 * %r8
121 * %r9
122 * %r10
123 */
124__store_partial:
125 mov LEN, %r8
126 mov DST, %r9
127
128 movq T0, %r10
129
130 cmp $8, %r8
131 jl .Lst_partial_8
132
133 mov %r10, (%r9)
134 psrldq $8, T0
135 movq T0, %r10
136
137 sub $8, %r8
138 add $8, %r9
139
140.Lst_partial_8:
141 cmp $4, %r8
142 jl .Lst_partial_4
143
144 mov %r10d, (%r9)
145 shr $32, %r10
146
147 sub $4, %r8
148 add $4, %r9
149
150.Lst_partial_4:
151 cmp $2, %r8
152 jl .Lst_partial_2
153
154 mov %r10w, (%r9)
155 shr $0x10, %r10
156
157 sub $2, %r8
158 add $2, %r9
159
160.Lst_partial_2:
161 cmp $1, %r8
162 jl .Lst_partial_1
163
164 mov %r10b, (%r9)
165
166.Lst_partial_1:
167 ret
168ENDPROC(__store_partial)
169
170.macro update
171 movdqa STATE5, T0
172 aesenc STATE0, STATE5
173 aesenc STATE1, STATE0
174 aesenc STATE2, STATE1
175 aesenc STATE3, STATE2
176 aesenc STATE4, STATE3
177 aesenc T0, STATE4
178.endm
179
180.macro update0 m
181 update
182 pxor \m, STATE5
183.endm
184
185.macro update1 m
186 update
187 pxor \m, STATE4
188.endm
189
190.macro update2 m
191 update
192 pxor \m, STATE3
193.endm
194
195.macro update3 m
196 update
197 pxor \m, STATE2
198.endm
199
200.macro update4 m
201 update
202 pxor \m, STATE1
203.endm
204
205.macro update5 m
206 update
207 pxor \m, STATE0
208.endm
209
210.macro state_load
211 movdqu 0x00(STATEP), STATE0
212 movdqu 0x10(STATEP), STATE1
213 movdqu 0x20(STATEP), STATE2
214 movdqu 0x30(STATEP), STATE3
215 movdqu 0x40(STATEP), STATE4
216 movdqu 0x50(STATEP), STATE5
217.endm
218
219.macro state_store s0 s1 s2 s3 s4 s5
220 movdqu \s5, 0x00(STATEP)
221 movdqu \s0, 0x10(STATEP)
222 movdqu \s1, 0x20(STATEP)
223 movdqu \s2, 0x30(STATEP)
224 movdqu \s3, 0x40(STATEP)
225 movdqu \s4, 0x50(STATEP)
226.endm
227
228.macro state_store0
229 state_store STATE0 STATE1 STATE2 STATE3 STATE4 STATE5
230.endm
231
232.macro state_store1
233 state_store STATE5 STATE0 STATE1 STATE2 STATE3 STATE4
234.endm
235
236.macro state_store2
237 state_store STATE4 STATE5 STATE0 STATE1 STATE2 STATE3
238.endm
239
240.macro state_store3
241 state_store STATE3 STATE4 STATE5 STATE0 STATE1 STATE2
242.endm
243
244.macro state_store4
245 state_store STATE2 STATE3 STATE4 STATE5 STATE0 STATE1
246.endm
247
248.macro state_store5
249 state_store STATE1 STATE2 STATE3 STATE4 STATE5 STATE0
250.endm
251
252/*
253 * void crypto_aegis256_aesni_init(void *state, const void *key, const void *iv);
254 */
255ENTRY(crypto_aegis256_aesni_init)
256 FRAME_BEGIN
257
258 /* load key: */
259 movdqa 0x00(%rsi), MSG
260 movdqa 0x10(%rsi), T1
261 movdqa MSG, STATE4
262 movdqa T1, STATE5
263
264 /* load IV: */
265 movdqu 0x00(%rdx), T2
266 movdqu 0x10(%rdx), T3
267 pxor MSG, T2
268 pxor T1, T3
269 movdqa T2, STATE0
270 movdqa T3, STATE1
271
272 /* load the constants: */
273 movdqa .Laegis256_const_0, STATE3
274 movdqa .Laegis256_const_1, STATE2
275 pxor STATE3, STATE4
276 pxor STATE2, STATE5
277
278 /* update 10 times with IV and KEY: */
279 update0 MSG
280 update1 T1
281 update2 T2
282 update3 T3
283 update4 MSG
284 update5 T1
285 update0 T2
286 update1 T3
287 update2 MSG
288 update3 T1
289 update4 T2
290 update5 T3
291 update0 MSG
292 update1 T1
293 update2 T2
294 update3 T3
295
296 state_store3
297
298 FRAME_END
299 ret
300ENDPROC(crypto_aegis256_aesni_init)
301
302.macro ad_block a i
303 movdq\a (\i * 0x10)(SRC), MSG
304 update\i MSG
305 sub $0x10, LEN
306 cmp $0x10, LEN
307 jl .Lad_out_\i
308.endm
309
310/*
311 * void crypto_aegis256_aesni_ad(void *state, unsigned int length,
312 * const void *data);
313 */
314ENTRY(crypto_aegis256_aesni_ad)
315 FRAME_BEGIN
316
317 cmp $0x10, LEN
318 jb .Lad_out
319
320 state_load
321
322 mov SRC, %r8
323 and $0xf, %r8
324 jnz .Lad_u_loop
325
326.align 8
327.Lad_a_loop:
328 ad_block a 0
329 ad_block a 1
330 ad_block a 2
331 ad_block a 3
332 ad_block a 4
333 ad_block a 5
334
335 add $0x60, SRC
336 jmp .Lad_a_loop
337
338.align 8
339.Lad_u_loop:
340 ad_block u 0
341 ad_block u 1
342 ad_block u 2
343 ad_block u 3
344 ad_block u 4
345 ad_block u 5
346
347 add $0x60, SRC
348 jmp .Lad_u_loop
349
350.Lad_out_0:
351 state_store0
352 FRAME_END
353 ret
354
355.Lad_out_1:
356 state_store1
357 FRAME_END
358 ret
359
360.Lad_out_2:
361 state_store2
362 FRAME_END
363 ret
364
365.Lad_out_3:
366 state_store3
367 FRAME_END
368 ret
369
370.Lad_out_4:
371 state_store4
372 FRAME_END
373 ret
374
375.Lad_out_5:
376 state_store5
377 FRAME_END
378 ret
379
380.Lad_out:
381 FRAME_END
382 ret
383ENDPROC(crypto_aegis256_aesni_ad)
384
385.macro crypt m s0 s1 s2 s3 s4 s5
386 pxor \s1, \m
387 pxor \s4, \m
388 pxor \s5, \m
389 movdqa \s2, T3
390 pand \s3, T3
391 pxor T3, \m
392.endm
393
394.macro crypt0 m
395 crypt \m STATE0 STATE1 STATE2 STATE3 STATE4 STATE5
396.endm
397
398.macro crypt1 m
399 crypt \m STATE5 STATE0 STATE1 STATE2 STATE3 STATE4
400.endm
401
402.macro crypt2 m
403 crypt \m STATE4 STATE5 STATE0 STATE1 STATE2 STATE3
404.endm
405
406.macro crypt3 m
407 crypt \m STATE3 STATE4 STATE5 STATE0 STATE1 STATE2
408.endm
409
410.macro crypt4 m
411 crypt \m STATE2 STATE3 STATE4 STATE5 STATE0 STATE1
412.endm
413
414.macro crypt5 m
415 crypt \m STATE1 STATE2 STATE3 STATE4 STATE5 STATE0
416.endm
417
418.macro encrypt_block a i
419 movdq\a (\i * 0x10)(SRC), MSG
420 movdqa MSG, T0
421 crypt\i T0
422 movdq\a T0, (\i * 0x10)(DST)
423
424 update\i MSG
425
426 sub $0x10, LEN
427 cmp $0x10, LEN
428 jl .Lenc_out_\i
429.endm
430
431.macro decrypt_block a i
432 movdq\a (\i * 0x10)(SRC), MSG
433 crypt\i MSG
434 movdq\a MSG, (\i * 0x10)(DST)
435
436 update\i MSG
437
438 sub $0x10, LEN
439 cmp $0x10, LEN
440 jl .Ldec_out_\i
441.endm
442
443/*
444 * void crypto_aegis256_aesni_enc(void *state, unsigned int length,
445 * const void *src, void *dst);
446 */
447ENTRY(crypto_aegis256_aesni_enc)
448 FRAME_BEGIN
449
450 cmp $0x10, LEN
451 jb .Lenc_out
452
453 state_load
454
455 mov SRC, %r8
456 or DST, %r8
457 and $0xf, %r8
458 jnz .Lenc_u_loop
459
460.align 8
461.Lenc_a_loop:
462 encrypt_block a 0
463 encrypt_block a 1
464 encrypt_block a 2
465 encrypt_block a 3
466 encrypt_block a 4
467 encrypt_block a 5
468
469 add $0x60, SRC
470 add $0x60, DST
471 jmp .Lenc_a_loop
472
473.align 8
474.Lenc_u_loop:
475 encrypt_block u 0
476 encrypt_block u 1
477 encrypt_block u 2
478 encrypt_block u 3
479 encrypt_block u 4
480 encrypt_block u 5
481
482 add $0x60, SRC
483 add $0x60, DST
484 jmp .Lenc_u_loop
485
486.Lenc_out_0:
487 state_store0
488 FRAME_END
489 ret
490
491.Lenc_out_1:
492 state_store1
493 FRAME_END
494 ret
495
496.Lenc_out_2:
497 state_store2
498 FRAME_END
499 ret
500
501.Lenc_out_3:
502 state_store3
503 FRAME_END
504 ret
505
506.Lenc_out_4:
507 state_store4
508 FRAME_END
509 ret
510
511.Lenc_out_5:
512 state_store5
513 FRAME_END
514 ret
515
516.Lenc_out:
517 FRAME_END
518 ret
519ENDPROC(crypto_aegis256_aesni_enc)
520
521/*
522 * void crypto_aegis256_aesni_enc_tail(void *state, unsigned int length,
523 * const void *src, void *dst);
524 */
525ENTRY(crypto_aegis256_aesni_enc_tail)
526 FRAME_BEGIN
527
528 state_load
529
530 /* encrypt message: */
531 call __load_partial
532
533 movdqa MSG, T0
534 crypt0 T0
535
536 call __store_partial
537
538 update0 MSG
539
540 state_store0
541
542 FRAME_END
543 ret
544ENDPROC(crypto_aegis256_aesni_enc_tail)
545
546/*
547 * void crypto_aegis256_aesni_dec(void *state, unsigned int length,
548 * const void *src, void *dst);
549 */
550ENTRY(crypto_aegis256_aesni_dec)
551 FRAME_BEGIN
552
553 cmp $0x10, LEN
554 jb .Ldec_out
555
556 state_load
557
558 mov SRC, %r8
559 or DST, %r8
560 and $0xF, %r8
561 jnz .Ldec_u_loop
562
563.align 8
564.Ldec_a_loop:
565 decrypt_block a 0
566 decrypt_block a 1
567 decrypt_block a 2
568 decrypt_block a 3
569 decrypt_block a 4
570 decrypt_block a 5
571
572 add $0x60, SRC
573 add $0x60, DST
574 jmp .Ldec_a_loop
575
576.align 8
577.Ldec_u_loop:
578 decrypt_block u 0
579 decrypt_block u 1
580 decrypt_block u 2
581 decrypt_block u 3
582 decrypt_block u 4
583 decrypt_block u 5
584
585 add $0x60, SRC
586 add $0x60, DST
587 jmp .Ldec_u_loop
588
589.Ldec_out_0:
590 state_store0
591 FRAME_END
592 ret
593
594.Ldec_out_1:
595 state_store1
596 FRAME_END
597 ret
598
599.Ldec_out_2:
600 state_store2
601 FRAME_END
602 ret
603
604.Ldec_out_3:
605 state_store3
606 FRAME_END
607 ret
608
609.Ldec_out_4:
610 state_store4
611 FRAME_END
612 ret
613
614.Ldec_out_5:
615 state_store5
616 FRAME_END
617 ret
618
619.Ldec_out:
620 FRAME_END
621 ret
622ENDPROC(crypto_aegis256_aesni_dec)
623
624/*
625 * void crypto_aegis256_aesni_dec_tail(void *state, unsigned int length,
626 * const void *src, void *dst);
627 */
628ENTRY(crypto_aegis256_aesni_dec_tail)
629 FRAME_BEGIN
630
631 state_load
632
633 /* decrypt message: */
634 call __load_partial
635
636 crypt0 MSG
637
638 movdqa MSG, T0
639 call __store_partial
640
641 /* mask with byte count: */
642 movq LEN, T0
643 punpcklbw T0, T0
644 punpcklbw T0, T0
645 punpcklbw T0, T0
646 punpcklbw T0, T0
647 movdqa .Laegis256_counter, T1
648 pcmpgtb T1, T0
649 pand T0, MSG
650
651 update0 MSG
652
653 state_store0
654
655 FRAME_END
656 ret
657ENDPROC(crypto_aegis256_aesni_dec_tail)
658
659/*
660 * void crypto_aegis256_aesni_final(void *state, void *tag_xor,
661 * u64 assoclen, u64 cryptlen);
662 */
663ENTRY(crypto_aegis256_aesni_final)
664 FRAME_BEGIN
665
666 state_load
667
668 /* prepare length block: */
669 movq %rdx, MSG
670 movq %rcx, T0
671 pslldq $8, T0
672 pxor T0, MSG
673 psllq $3, MSG /* multiply by 8 (to get bit count) */
674
675 pxor STATE3, MSG
676
677 /* update state: */
678 update0 MSG
679 update1 MSG
680 update2 MSG
681 update3 MSG
682 update4 MSG
683 update5 MSG
684 update0 MSG
685
686 /* xor tag: */
687 movdqu (%rsi), MSG
688
689 pxor STATE0, MSG
690 pxor STATE1, MSG
691 pxor STATE2, MSG
692 pxor STATE3, MSG
693 pxor STATE4, MSG
694 pxor STATE5, MSG
695
696 movdqu MSG, (%rsi)
697
698 FRAME_END
699 ret
700ENDPROC(crypto_aegis256_aesni_final)
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
deleted file mode 100644
index f84da27171d3..000000000000
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ /dev/null
@@ -1,293 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The AEGIS-256 Authenticated-Encryption Algorithm
4 * Glue for AES-NI + SSE2 implementation
5 *
6 * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 */
9
10#include <crypto/internal/aead.h>
11#include <crypto/internal/simd.h>
12#include <crypto/internal/skcipher.h>
13#include <crypto/scatterwalk.h>
14#include <linux/module.h>
15#include <asm/fpu/api.h>
16#include <asm/cpu_device_id.h>
17
18#define AEGIS256_BLOCK_ALIGN 16
19#define AEGIS256_BLOCK_SIZE 16
20#define AEGIS256_NONCE_SIZE 32
21#define AEGIS256_STATE_BLOCKS 6
22#define AEGIS256_KEY_SIZE 32
23#define AEGIS256_MIN_AUTH_SIZE 8
24#define AEGIS256_MAX_AUTH_SIZE 16
25
26asmlinkage void crypto_aegis256_aesni_init(void *state, void *key, void *iv);
27
28asmlinkage void crypto_aegis256_aesni_ad(
29 void *state, unsigned int length, const void *data);
30
31asmlinkage void crypto_aegis256_aesni_enc(
32 void *state, unsigned int length, const void *src, void *dst);
33
34asmlinkage void crypto_aegis256_aesni_dec(
35 void *state, unsigned int length, const void *src, void *dst);
36
37asmlinkage void crypto_aegis256_aesni_enc_tail(
38 void *state, unsigned int length, const void *src, void *dst);
39
40asmlinkage void crypto_aegis256_aesni_dec_tail(
41 void *state, unsigned int length, const void *src, void *dst);
42
43asmlinkage void crypto_aegis256_aesni_final(
44 void *state, void *tag_xor, unsigned int cryptlen,
45 unsigned int assoclen);
46
47struct aegis_block {
48 u8 bytes[AEGIS256_BLOCK_SIZE] __aligned(AEGIS256_BLOCK_ALIGN);
49};
50
51struct aegis_state {
52 struct aegis_block blocks[AEGIS256_STATE_BLOCKS];
53};
54
55struct aegis_ctx {
56 struct aegis_block key[AEGIS256_KEY_SIZE / AEGIS256_BLOCK_SIZE];
57};
58
59struct aegis_crypt_ops {
60 int (*skcipher_walk_init)(struct skcipher_walk *walk,
61 struct aead_request *req, bool atomic);
62
63 void (*crypt_blocks)(void *state, unsigned int length, const void *src,
64 void *dst);
65 void (*crypt_tail)(void *state, unsigned int length, const void *src,
66 void *dst);
67};
68
69static void crypto_aegis256_aesni_process_ad(
70 struct aegis_state *state, struct scatterlist *sg_src,
71 unsigned int assoclen)
72{
73 struct scatter_walk walk;
74 struct aegis_block buf;
75 unsigned int pos = 0;
76
77 scatterwalk_start(&walk, sg_src);
78 while (assoclen != 0) {
79 unsigned int size = scatterwalk_clamp(&walk, assoclen);
80 unsigned int left = size;
81 void *mapped = scatterwalk_map(&walk);
82 const u8 *src = (const u8 *)mapped;
83
84 if (pos + size >= AEGIS256_BLOCK_SIZE) {
85 if (pos > 0) {
86 unsigned int fill = AEGIS256_BLOCK_SIZE - pos;
87 memcpy(buf.bytes + pos, src, fill);
88 crypto_aegis256_aesni_ad(state,
89 AEGIS256_BLOCK_SIZE,
90 buf.bytes);
91 pos = 0;
92 left -= fill;
93 src += fill;
94 }
95
96 crypto_aegis256_aesni_ad(state, left, src);
97
98 src += left & ~(AEGIS256_BLOCK_SIZE - 1);
99 left &= AEGIS256_BLOCK_SIZE - 1;
100 }
101
102 memcpy(buf.bytes + pos, src, left);
103 pos += left;
104 assoclen -= size;
105
106 scatterwalk_unmap(mapped);
107 scatterwalk_advance(&walk, size);
108 scatterwalk_done(&walk, 0, assoclen);
109 }
110
111 if (pos > 0) {
112 memset(buf.bytes + pos, 0, AEGIS256_BLOCK_SIZE - pos);
113 crypto_aegis256_aesni_ad(state, AEGIS256_BLOCK_SIZE, buf.bytes);
114 }
115}
116
117static void crypto_aegis256_aesni_process_crypt(
118 struct aegis_state *state, struct skcipher_walk *walk,
119 const struct aegis_crypt_ops *ops)
120{
121 while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
122 ops->crypt_blocks(state,
123 round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
124 walk->src.virt.addr, walk->dst.virt.addr);
125 skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
126 }
127
128 if (walk->nbytes) {
129 ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
130 walk->dst.virt.addr);
131 skcipher_walk_done(walk, 0);
132 }
133}
134
135static struct aegis_ctx *crypto_aegis256_aesni_ctx(struct crypto_aead *aead)
136{
137 u8 *ctx = crypto_aead_ctx(aead);
138 ctx = PTR_ALIGN(ctx, __alignof__(struct aegis_ctx));
139 return (void *)ctx;
140}
141
142static int crypto_aegis256_aesni_setkey(struct crypto_aead *aead, const u8 *key,
143 unsigned int keylen)
144{
145 struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(aead);
146
147 if (keylen != AEGIS256_KEY_SIZE) {
148 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
149 return -EINVAL;
150 }
151
152 memcpy(ctx->key, key, AEGIS256_KEY_SIZE);
153
154 return 0;
155}
156
157static int crypto_aegis256_aesni_setauthsize(struct crypto_aead *tfm,
158 unsigned int authsize)
159{
160 if (authsize > AEGIS256_MAX_AUTH_SIZE)
161 return -EINVAL;
162 if (authsize < AEGIS256_MIN_AUTH_SIZE)
163 return -EINVAL;
164 return 0;
165}
166
167static void crypto_aegis256_aesni_crypt(struct aead_request *req,
168 struct aegis_block *tag_xor,
169 unsigned int cryptlen,
170 const struct aegis_crypt_ops *ops)
171{
172 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
173 struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
174 struct skcipher_walk walk;
175 struct aegis_state state;
176
177 ops->skcipher_walk_init(&walk, req, true);
178
179 kernel_fpu_begin();
180
181 crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
182 crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
183 crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
184 crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
185
186 kernel_fpu_end();
187}
188
189static int crypto_aegis256_aesni_encrypt(struct aead_request *req)
190{
191 static const struct aegis_crypt_ops OPS = {
192 .skcipher_walk_init = skcipher_walk_aead_encrypt,
193 .crypt_blocks = crypto_aegis256_aesni_enc,
194 .crypt_tail = crypto_aegis256_aesni_enc_tail,
195 };
196
197 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
198 struct aegis_block tag = {};
199 unsigned int authsize = crypto_aead_authsize(tfm);
200 unsigned int cryptlen = req->cryptlen;
201
202 crypto_aegis256_aesni_crypt(req, &tag, cryptlen, &OPS);
203
204 scatterwalk_map_and_copy(tag.bytes, req->dst,
205 req->assoclen + cryptlen, authsize, 1);
206 return 0;
207}
208
209static int crypto_aegis256_aesni_decrypt(struct aead_request *req)
210{
211 static const struct aegis_block zeros = {};
212
213 static const struct aegis_crypt_ops OPS = {
214 .skcipher_walk_init = skcipher_walk_aead_decrypt,
215 .crypt_blocks = crypto_aegis256_aesni_dec,
216 .crypt_tail = crypto_aegis256_aesni_dec_tail,
217 };
218
219 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220 struct aegis_block tag;
221 unsigned int authsize = crypto_aead_authsize(tfm);
222 unsigned int cryptlen = req->cryptlen - authsize;
223
224 scatterwalk_map_and_copy(tag.bytes, req->src,
225 req->assoclen + cryptlen, authsize, 0);
226
227 crypto_aegis256_aesni_crypt(req, &tag, cryptlen, &OPS);
228
229 return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0;
230}
231
232static int crypto_aegis256_aesni_init_tfm(struct crypto_aead *aead)
233{
234 return 0;
235}
236
237static void crypto_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
238{
239}
240
241static struct aead_alg crypto_aegis256_aesni_alg = {
242 .setkey = crypto_aegis256_aesni_setkey,
243 .setauthsize = crypto_aegis256_aesni_setauthsize,
244 .encrypt = crypto_aegis256_aesni_encrypt,
245 .decrypt = crypto_aegis256_aesni_decrypt,
246 .init = crypto_aegis256_aesni_init_tfm,
247 .exit = crypto_aegis256_aesni_exit_tfm,
248
249 .ivsize = AEGIS256_NONCE_SIZE,
250 .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
251 .chunksize = AEGIS256_BLOCK_SIZE,
252
253 .base = {
254 .cra_flags = CRYPTO_ALG_INTERNAL,
255 .cra_blocksize = 1,
256 .cra_ctxsize = sizeof(struct aegis_ctx) +
257 __alignof__(struct aegis_ctx),
258 .cra_alignmask = 0,
259 .cra_priority = 400,
260
261 .cra_name = "__aegis256",
262 .cra_driver_name = "__aegis256-aesni",
263
264 .cra_module = THIS_MODULE,
265 }
266};
267
268static struct simd_aead_alg *simd_alg;
269
270static int __init crypto_aegis256_aesni_module_init(void)
271{
272 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
273 !boot_cpu_has(X86_FEATURE_AES) ||
274 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
275 return -ENODEV;
276
277 return simd_register_aeads_compat(&crypto_aegis256_aesni_alg, 1,
278 &simd_alg);
279}
280
281static void __exit crypto_aegis256_aesni_module_exit(void)
282{
283 simd_unregister_aeads(&crypto_aegis256_aesni_alg, 1, &simd_alg);
284}
285
286module_init(crypto_aegis256_aesni_module_init);
287module_exit(crypto_aegis256_aesni_module_exit);
288
289MODULE_LICENSE("GPL");
290MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
291MODULE_DESCRIPTION("AEGIS-256 AEAD algorithm -- AESNI+SSE2 implementation");
292MODULE_ALIAS_CRYPTO("aegis256");
293MODULE_ALIAS_CRYPTO("aegis256-aesni");
diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S
deleted file mode 100644
index 2849dbc59e11..000000000000
--- a/arch/x86/crypto/aes-i586-asm_32.S
+++ /dev/null
@@ -1,362 +0,0 @@
1// -------------------------------------------------------------------------
2// Copyright (c) 2001, Dr Brian Gladman < >, Worcester, UK.
3// All rights reserved.
4//
5// LICENSE TERMS
6//
7// The free distribution and use of this software in both source and binary
8// form is allowed (with or without changes) provided that:
9//
10// 1. distributions of this source code include the above copyright
11// notice, this list of conditions and the following disclaimer//
12//
13// 2. distributions in binary form include the above copyright
14// notice, this list of conditions and the following disclaimer
15// in the documentation and/or other associated materials//
16//
17// 3. the copyright holder's name is not used to endorse products
18// built using this software without specific written permission.
19//
20//
21// ALTERNATIVELY, provided that this notice is retained in full, this product
22// may be distributed under the terms of the GNU General Public License (GPL),
23// in which case the provisions of the GPL apply INSTEAD OF those given above.
24//
25// Copyright (c) 2004 Linus Torvalds <torvalds@osdl.org>
26// Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
27
28// DISCLAIMER
29//
30// This software is provided 'as is' with no explicit or implied warranties
31// in respect of its properties including, but not limited to, correctness
32// and fitness for purpose.
33// -------------------------------------------------------------------------
34// Issue Date: 29/07/2002
35
36.file "aes-i586-asm.S"
37.text
38
39#include <linux/linkage.h>
40#include <asm/asm-offsets.h>
41
42#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
43
44/* offsets to parameters with one register pushed onto stack */
45#define ctx 8
46#define out_blk 12
47#define in_blk 16
48
49/* offsets in crypto_aes_ctx structure */
50#define klen (480)
51#define ekey (0)
52#define dkey (240)
53
54// register mapping for encrypt and decrypt subroutines
55
56#define r0 eax
57#define r1 ebx
58#define r2 ecx
59#define r3 edx
60#define r4 esi
61#define r5 edi
62
63#define eaxl al
64#define eaxh ah
65#define ebxl bl
66#define ebxh bh
67#define ecxl cl
68#define ecxh ch
69#define edxl dl
70#define edxh dh
71
72#define _h(reg) reg##h
73#define h(reg) _h(reg)
74
75#define _l(reg) reg##l
76#define l(reg) _l(reg)
77
78// This macro takes a 32-bit word representing a column and uses
79// each of its four bytes to index into four tables of 256 32-bit
80// words to obtain values that are then xored into the appropriate
81// output registers r0, r1, r4 or r5.
82
83// Parameters:
84// table table base address
85// %1 out_state[0]
86// %2 out_state[1]
87// %3 out_state[2]
88// %4 out_state[3]
89// idx input register for the round (destroyed)
90// tmp scratch register for the round
91// sched key schedule
92
93#define do_col(table, a1,a2,a3,a4, idx, tmp) \
94 movzx %l(idx),%tmp; \
95 xor table(,%tmp,4),%a1; \
96 movzx %h(idx),%tmp; \
97 shr $16,%idx; \
98 xor table+tlen(,%tmp,4),%a2; \
99 movzx %l(idx),%tmp; \
100 movzx %h(idx),%idx; \
101 xor table+2*tlen(,%tmp,4),%a3; \
102 xor table+3*tlen(,%idx,4),%a4;
103
104// initialise output registers from the key schedule
105// NB1: original value of a3 is in idx on exit
106// NB2: original values of a1,a2,a4 aren't used
107#define do_fcol(table, a1,a2,a3,a4, idx, tmp, sched) \
108 mov 0 sched,%a1; \
109 movzx %l(idx),%tmp; \
110 mov 12 sched,%a2; \
111 xor table(,%tmp,4),%a1; \
112 mov 4 sched,%a4; \
113 movzx %h(idx),%tmp; \
114 shr $16,%idx; \
115 xor table+tlen(,%tmp,4),%a2; \
116 movzx %l(idx),%tmp; \
117 movzx %h(idx),%idx; \
118 xor table+3*tlen(,%idx,4),%a4; \
119 mov %a3,%idx; \
120 mov 8 sched,%a3; \
121 xor table+2*tlen(,%tmp,4),%a3;
122
123// initialise output registers from the key schedule
124// NB1: original value of a3 is in idx on exit
125// NB2: original values of a1,a2,a4 aren't used
126#define do_icol(table, a1,a2,a3,a4, idx, tmp, sched) \
127 mov 0 sched,%a1; \
128 movzx %l(idx),%tmp; \
129 mov 4 sched,%a2; \
130 xor table(,%tmp,4),%a1; \
131 mov 12 sched,%a4; \
132 movzx %h(idx),%tmp; \
133 shr $16,%idx; \
134 xor table+tlen(,%tmp,4),%a2; \
135 movzx %l(idx),%tmp; \
136 movzx %h(idx),%idx; \
137 xor table+3*tlen(,%idx,4),%a4; \
138 mov %a3,%idx; \
139 mov 8 sched,%a3; \
140 xor table+2*tlen(,%tmp,4),%a3;
141
142
143// original Gladman had conditional saves to MMX regs.
144#define save(a1, a2) \
145 mov %a2,4*a1(%esp)
146
147#define restore(a1, a2) \
148 mov 4*a2(%esp),%a1
149
150// These macros perform a forward encryption cycle. They are entered with
151// the first previous round column values in r0,r1,r4,r5 and
152// exit with the final values in the same registers, using stack
153// for temporary storage.
154
155// round column values
156// on entry: r0,r1,r4,r5
157// on exit: r2,r1,r4,r5
158#define fwd_rnd1(arg, table) \
159 save (0,r1); \
160 save (1,r5); \
161 \
162 /* compute new column values */ \
163 do_fcol(table, r2,r5,r4,r1, r0,r3, arg); /* idx=r0 */ \
164 do_col (table, r4,r1,r2,r5, r0,r3); /* idx=r4 */ \
165 restore(r0,0); \
166 do_col (table, r1,r2,r5,r4, r0,r3); /* idx=r1 */ \
167 restore(r0,1); \
168 do_col (table, r5,r4,r1,r2, r0,r3); /* idx=r5 */
169
170// round column values
171// on entry: r2,r1,r4,r5
172// on exit: r0,r1,r4,r5
173#define fwd_rnd2(arg, table) \
174 save (0,r1); \
175 save (1,r5); \
176 \
177 /* compute new column values */ \
178 do_fcol(table, r0,r5,r4,r1, r2,r3, arg); /* idx=r2 */ \
179 do_col (table, r4,r1,r0,r5, r2,r3); /* idx=r4 */ \
180 restore(r2,0); \
181 do_col (table, r1,r0,r5,r4, r2,r3); /* idx=r1 */ \
182 restore(r2,1); \
183 do_col (table, r5,r4,r1,r0, r2,r3); /* idx=r5 */
184
185// These macros performs an inverse encryption cycle. They are entered with
186// the first previous round column values in r0,r1,r4,r5 and
187// exit with the final values in the same registers, using stack
188// for temporary storage
189
190// round column values
191// on entry: r0,r1,r4,r5
192// on exit: r2,r1,r4,r5
193#define inv_rnd1(arg, table) \
194 save (0,r1); \
195 save (1,r5); \
196 \
197 /* compute new column values */ \
198 do_icol(table, r2,r1,r4,r5, r0,r3, arg); /* idx=r0 */ \
199 do_col (table, r4,r5,r2,r1, r0,r3); /* idx=r4 */ \
200 restore(r0,0); \
201 do_col (table, r1,r4,r5,r2, r0,r3); /* idx=r1 */ \
202 restore(r0,1); \
203 do_col (table, r5,r2,r1,r4, r0,r3); /* idx=r5 */
204
205// round column values
206// on entry: r2,r1,r4,r5
207// on exit: r0,r1,r4,r5
208#define inv_rnd2(arg, table) \
209 save (0,r1); \
210 save (1,r5); \
211 \
212 /* compute new column values */ \
213 do_icol(table, r0,r1,r4,r5, r2,r3, arg); /* idx=r2 */ \
214 do_col (table, r4,r5,r0,r1, r2,r3); /* idx=r4 */ \
215 restore(r2,0); \
216 do_col (table, r1,r4,r5,r0, r2,r3); /* idx=r1 */ \
217 restore(r2,1); \
218 do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */
219
220// AES (Rijndael) Encryption Subroutine
221/* void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */
222
223.extern crypto_ft_tab
224.extern crypto_fl_tab
225
226ENTRY(aes_enc_blk)
227 push %ebp
228 mov ctx(%esp),%ebp
229
230// CAUTION: the order and the values used in these assigns
231// rely on the register mappings
232
2331: push %ebx
234 mov in_blk+4(%esp),%r2
235 push %esi
236 mov klen(%ebp),%r3 // key size
237 push %edi
238#if ekey != 0
239 lea ekey(%ebp),%ebp // key pointer
240#endif
241
242// input four columns and xor in first round key
243
244 mov (%r2),%r0
245 mov 4(%r2),%r1
246 mov 8(%r2),%r4
247 mov 12(%r2),%r5
248 xor (%ebp),%r0
249 xor 4(%ebp),%r1
250 xor 8(%ebp),%r4
251 xor 12(%ebp),%r5
252
253 sub $8,%esp // space for register saves on stack
254 add $16,%ebp // increment to next round key
255 cmp $24,%r3
256 jb 4f // 10 rounds for 128-bit key
257 lea 32(%ebp),%ebp
258 je 3f // 12 rounds for 192-bit key
259 lea 32(%ebp),%ebp
260
2612: fwd_rnd1( -64(%ebp), crypto_ft_tab) // 14 rounds for 256-bit key
262 fwd_rnd2( -48(%ebp), crypto_ft_tab)
2633: fwd_rnd1( -32(%ebp), crypto_ft_tab) // 12 rounds for 192-bit key
264 fwd_rnd2( -16(%ebp), crypto_ft_tab)
2654: fwd_rnd1( (%ebp), crypto_ft_tab) // 10 rounds for 128-bit key
266 fwd_rnd2( +16(%ebp), crypto_ft_tab)
267 fwd_rnd1( +32(%ebp), crypto_ft_tab)
268 fwd_rnd2( +48(%ebp), crypto_ft_tab)
269 fwd_rnd1( +64(%ebp), crypto_ft_tab)
270 fwd_rnd2( +80(%ebp), crypto_ft_tab)
271 fwd_rnd1( +96(%ebp), crypto_ft_tab)
272 fwd_rnd2(+112(%ebp), crypto_ft_tab)
273 fwd_rnd1(+128(%ebp), crypto_ft_tab)
274 fwd_rnd2(+144(%ebp), crypto_fl_tab) // last round uses a different table
275
276// move final values to the output array. CAUTION: the
277// order of these assigns rely on the register mappings
278
279 add $8,%esp
280 mov out_blk+12(%esp),%ebp
281 mov %r5,12(%ebp)
282 pop %edi
283 mov %r4,8(%ebp)
284 pop %esi
285 mov %r1,4(%ebp)
286 pop %ebx
287 mov %r0,(%ebp)
288 pop %ebp
289 ret
290ENDPROC(aes_enc_blk)
291
292// AES (Rijndael) Decryption Subroutine
293/* void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */
294
295.extern crypto_it_tab
296.extern crypto_il_tab
297
298ENTRY(aes_dec_blk)
299 push %ebp
300 mov ctx(%esp),%ebp
301
302// CAUTION: the order and the values used in these assigns
303// rely on the register mappings
304
3051: push %ebx
306 mov in_blk+4(%esp),%r2
307 push %esi
308 mov klen(%ebp),%r3 // key size
309 push %edi
310#if dkey != 0
311 lea dkey(%ebp),%ebp // key pointer
312#endif
313
314// input four columns and xor in first round key
315
316 mov (%r2),%r0
317 mov 4(%r2),%r1
318 mov 8(%r2),%r4
319 mov 12(%r2),%r5
320 xor (%ebp),%r0
321 xor 4(%ebp),%r1
322 xor 8(%ebp),%r4
323 xor 12(%ebp),%r5
324
325 sub $8,%esp // space for register saves on stack
326 add $16,%ebp // increment to next round key
327 cmp $24,%r3
328 jb 4f // 10 rounds for 128-bit key
329 lea 32(%ebp),%ebp
330 je 3f // 12 rounds for 192-bit key
331 lea 32(%ebp),%ebp
332
3332: inv_rnd1( -64(%ebp), crypto_it_tab) // 14 rounds for 256-bit key
334 inv_rnd2( -48(%ebp), crypto_it_tab)
3353: inv_rnd1( -32(%ebp), crypto_it_tab) // 12 rounds for 192-bit key
336 inv_rnd2( -16(%ebp), crypto_it_tab)
3374: inv_rnd1( (%ebp), crypto_it_tab) // 10 rounds for 128-bit key
338 inv_rnd2( +16(%ebp), crypto_it_tab)
339 inv_rnd1( +32(%ebp), crypto_it_tab)
340 inv_rnd2( +48(%ebp), crypto_it_tab)
341 inv_rnd1( +64(%ebp), crypto_it_tab)
342 inv_rnd2( +80(%ebp), crypto_it_tab)
343 inv_rnd1( +96(%ebp), crypto_it_tab)
344 inv_rnd2(+112(%ebp), crypto_it_tab)
345 inv_rnd1(+128(%ebp), crypto_it_tab)
346 inv_rnd2(+144(%ebp), crypto_il_tab) // last round uses a different table
347
348// move final values to the output array. CAUTION: the
349// order of these assigns rely on the register mappings
350
351 add $8,%esp
352 mov out_blk+12(%esp),%ebp
353 mov %r5,12(%ebp)
354 pop %edi
355 mov %r4,8(%ebp)
356 pop %esi
357 mov %r1,4(%ebp)
358 pop %ebx
359 mov %r0,(%ebp)
360 pop %ebp
361 ret
362ENDPROC(aes_dec_blk)
diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
deleted file mode 100644
index 8739cf7795de..000000000000
--- a/arch/x86/crypto/aes-x86_64-asm_64.S
+++ /dev/null
@@ -1,185 +0,0 @@
1/* AES (Rijndael) implementation (FIPS PUB 197) for x86_64
2 *
3 * Copyright (C) 2005 Andreas Steinmetz, <ast@domdv.de>
4 *
5 * License:
6 * This code can be distributed under the terms of the GNU General Public
7 * License (GPL) Version 2 provided that the above header down to and
8 * including this sentence is retained in full.
9 */
10
11.extern crypto_ft_tab
12.extern crypto_it_tab
13.extern crypto_fl_tab
14.extern crypto_il_tab
15
16.text
17
18#include <linux/linkage.h>
19#include <asm/asm-offsets.h>
20
21#define R1 %rax
22#define R1E %eax
23#define R1X %ax
24#define R1H %ah
25#define R1L %al
26#define R2 %rbx
27#define R2E %ebx
28#define R2X %bx
29#define R2H %bh
30#define R2L %bl
31#define R3 %rcx
32#define R3E %ecx
33#define R3X %cx
34#define R3H %ch
35#define R3L %cl
36#define R4 %rdx
37#define R4E %edx
38#define R4X %dx
39#define R4H %dh
40#define R4L %dl
41#define R5 %rsi
42#define R5E %esi
43#define R6 %rdi
44#define R6E %edi
45#define R7 %r9 /* don't use %rbp; it breaks stack traces */
46#define R7E %r9d
47#define R8 %r8
48#define R10 %r10
49#define R11 %r11
50
51#define prologue(FUNC,KEY,B128,B192,r1,r2,r5,r6,r7,r8,r9,r10,r11) \
52 ENTRY(FUNC); \
53 movq r1,r2; \
54 leaq KEY+48(r8),r9; \
55 movq r10,r11; \
56 movl (r7),r5 ## E; \
57 movl 4(r7),r1 ## E; \
58 movl 8(r7),r6 ## E; \
59 movl 12(r7),r7 ## E; \
60 movl 480(r8),r10 ## E; \
61 xorl -48(r9),r5 ## E; \
62 xorl -44(r9),r1 ## E; \
63 xorl -40(r9),r6 ## E; \
64 xorl -36(r9),r7 ## E; \
65 cmpl $24,r10 ## E; \
66 jb B128; \
67 leaq 32(r9),r9; \
68 je B192; \
69 leaq 32(r9),r9;
70
71#define epilogue(FUNC,r1,r2,r5,r6,r7,r8,r9) \
72 movq r1,r2; \
73 movl r5 ## E,(r9); \
74 movl r6 ## E,4(r9); \
75 movl r7 ## E,8(r9); \
76 movl r8 ## E,12(r9); \
77 ret; \
78 ENDPROC(FUNC);
79
80#define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \
81 movzbl r2 ## H,r5 ## E; \
82 movzbl r2 ## L,r6 ## E; \
83 movl TAB+1024(,r5,4),r5 ## E;\
84 movw r4 ## X,r2 ## X; \
85 movl TAB(,r6,4),r6 ## E; \
86 roll $16,r2 ## E; \
87 shrl $16,r4 ## E; \
88 movzbl r4 ## L,r7 ## E; \
89 movzbl r4 ## H,r4 ## E; \
90 xorl OFFSET(r8),ra ## E; \
91 xorl OFFSET+4(r8),rb ## E; \
92 xorl TAB+3072(,r4,4),r5 ## E;\
93 xorl TAB+2048(,r7,4),r6 ## E;\
94 movzbl r1 ## L,r7 ## E; \
95 movzbl r1 ## H,r4 ## E; \
96 movl TAB+1024(,r4,4),r4 ## E;\
97 movw r3 ## X,r1 ## X; \
98 roll $16,r1 ## E; \
99 shrl $16,r3 ## E; \
100 xorl TAB(,r7,4),r5 ## E; \
101 movzbl r3 ## L,r7 ## E; \
102 movzbl r3 ## H,r3 ## E; \
103 xorl TAB+3072(,r3,4),r4 ## E;\
104 xorl TAB+2048(,r7,4),r5 ## E;\
105 movzbl r1 ## L,r7 ## E; \
106 movzbl r1 ## H,r3 ## E; \
107 shrl $16,r1 ## E; \
108 xorl TAB+3072(,r3,4),r6 ## E;\
109 movl TAB+2048(,r7,4),r3 ## E;\
110 movzbl r1 ## L,r7 ## E; \
111 movzbl r1 ## H,r1 ## E; \
112 xorl TAB+1024(,r1,4),r6 ## E;\
113 xorl TAB(,r7,4),r3 ## E; \
114 movzbl r2 ## H,r1 ## E; \
115 movzbl r2 ## L,r7 ## E; \
116 shrl $16,r2 ## E; \
117 xorl TAB+3072(,r1,4),r3 ## E;\
118 xorl TAB+2048(,r7,4),r4 ## E;\
119 movzbl r2 ## H,r1 ## E; \
120 movzbl r2 ## L,r2 ## E; \
121 xorl OFFSET+8(r8),rc ## E; \
122 xorl OFFSET+12(r8),rd ## E; \
123 xorl TAB+1024(,r1,4),r3 ## E;\
124 xorl TAB(,r2,4),r4 ## E;
125
126#define move_regs(r1,r2,r3,r4) \
127 movl r3 ## E,r1 ## E; \
128 movl r4 ## E,r2 ## E;
129
130#define entry(FUNC,KEY,B128,B192) \
131 prologue(FUNC,KEY,B128,B192,R2,R8,R1,R3,R4,R6,R10,R5,R11)
132
133#define return(FUNC) epilogue(FUNC,R8,R2,R5,R6,R3,R4,R11)
134
135#define encrypt_round(TAB,OFFSET) \
136 round(TAB,OFFSET,R1,R2,R3,R4,R5,R6,R7,R10,R5,R6,R3,R4) \
137 move_regs(R1,R2,R5,R6)
138
139#define encrypt_final(TAB,OFFSET) \
140 round(TAB,OFFSET,R1,R2,R3,R4,R5,R6,R7,R10,R5,R6,R3,R4)
141
142#define decrypt_round(TAB,OFFSET) \
143 round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4) \
144 move_regs(R1,R2,R5,R6)
145
146#define decrypt_final(TAB,OFFSET) \
147 round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4)
148
149/* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */
150
151 entry(aes_enc_blk,0,.Le128,.Le192)
152 encrypt_round(crypto_ft_tab,-96)
153 encrypt_round(crypto_ft_tab,-80)
154.Le192: encrypt_round(crypto_ft_tab,-64)
155 encrypt_round(crypto_ft_tab,-48)
156.Le128: encrypt_round(crypto_ft_tab,-32)
157 encrypt_round(crypto_ft_tab,-16)
158 encrypt_round(crypto_ft_tab, 0)
159 encrypt_round(crypto_ft_tab, 16)
160 encrypt_round(crypto_ft_tab, 32)
161 encrypt_round(crypto_ft_tab, 48)
162 encrypt_round(crypto_ft_tab, 64)
163 encrypt_round(crypto_ft_tab, 80)
164 encrypt_round(crypto_ft_tab, 96)
165 encrypt_final(crypto_fl_tab,112)
166 return(aes_enc_blk)
167
168/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */
169
170 entry(aes_dec_blk,240,.Ld128,.Ld192)
171 decrypt_round(crypto_it_tab,-96)
172 decrypt_round(crypto_it_tab,-80)
173.Ld192: decrypt_round(crypto_it_tab,-64)
174 decrypt_round(crypto_it_tab,-48)
175.Ld128: decrypt_round(crypto_it_tab,-32)
176 decrypt_round(crypto_it_tab,-16)
177 decrypt_round(crypto_it_tab, 0)
178 decrypt_round(crypto_it_tab, 16)
179 decrypt_round(crypto_it_tab, 32)
180 decrypt_round(crypto_it_tab, 48)
181 decrypt_round(crypto_it_tab, 64)
182 decrypt_round(crypto_it_tab, 80)
183 decrypt_round(crypto_it_tab, 96)
184 decrypt_final(crypto_il_tab,112)
185 return(aes_dec_blk)
diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
index 9e9d819e8bc3..7b7dc05fa1a4 100644
--- a/arch/x86/crypto/aes_glue.c
+++ b/arch/x86/crypto/aes_glue.c
@@ -1,71 +1 @@
1// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Glue Code for the asm optimized version of the AES Cipher Algorithm
4 *
5 */
6
7#include <linux/module.h>
8#include <crypto/aes.h>
9#include <asm/crypto/aes.h>
10
11asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
12asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
13
14void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
15{
16 aes_enc_blk(ctx, dst, src);
17}
18EXPORT_SYMBOL_GPL(crypto_aes_encrypt_x86);
19
20void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
21{
22 aes_dec_blk(ctx, dst, src);
23}
24EXPORT_SYMBOL_GPL(crypto_aes_decrypt_x86);
25
26static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
27{
28 aes_enc_blk(crypto_tfm_ctx(tfm), dst, src);
29}
30
31static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
32{
33 aes_dec_blk(crypto_tfm_ctx(tfm), dst, src);
34}
35
36static struct crypto_alg aes_alg = {
37 .cra_name = "aes",
38 .cra_driver_name = "aes-asm",
39 .cra_priority = 200,
40 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
41 .cra_blocksize = AES_BLOCK_SIZE,
42 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
43 .cra_module = THIS_MODULE,
44 .cra_u = {
45 .cipher = {
46 .cia_min_keysize = AES_MIN_KEY_SIZE,
47 .cia_max_keysize = AES_MAX_KEY_SIZE,
48 .cia_setkey = crypto_aes_set_key,
49 .cia_encrypt = aes_encrypt,
50 .cia_decrypt = aes_decrypt
51 }
52 }
53};
54
55static int __init aes_init(void)
56{
57 return crypto_register_alg(&aes_alg);
58}
59
60static void __exit aes_fini(void)
61{
62 crypto_unregister_alg(&aes_alg);
63}
64
65module_init(aes_init);
66module_exit(aes_fini);
67
68MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
69MODULE_LICENSE("GPL");
70MODULE_ALIAS_CRYPTO("aes");
71MODULE_ALIAS_CRYPTO("aes-asm");
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 73c0ccb009a0..3e707e81afdb 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -26,7 +26,6 @@
26#include <crypto/gcm.h> 26#include <crypto/gcm.h>
27#include <crypto/xts.h> 27#include <crypto/xts.h>
28#include <asm/cpu_device_id.h> 28#include <asm/cpu_device_id.h>
29#include <asm/crypto/aes.h>
30#include <asm/simd.h> 29#include <asm/simd.h>
31#include <crypto/scatterwalk.h> 30#include <crypto/scatterwalk.h>
32#include <crypto/internal/aead.h> 31#include <crypto/internal/aead.h>
@@ -329,7 +328,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
329 } 328 }
330 329
331 if (!crypto_simd_usable()) 330 if (!crypto_simd_usable())
332 err = crypto_aes_expand_key(ctx, in_key, key_len); 331 err = aes_expandkey(ctx, in_key, key_len);
333 else { 332 else {
334 kernel_fpu_begin(); 333 kernel_fpu_begin();
335 err = aesni_set_key(ctx, in_key, key_len); 334 err = aesni_set_key(ctx, in_key, key_len);
@@ -345,26 +344,26 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
345 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); 344 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
346} 345}
347 346
348static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 347static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
349{ 348{
350 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 349 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
351 350
352 if (!crypto_simd_usable()) 351 if (!crypto_simd_usable()) {
353 crypto_aes_encrypt_x86(ctx, dst, src); 352 aes_encrypt(ctx, dst, src);
354 else { 353 } else {
355 kernel_fpu_begin(); 354 kernel_fpu_begin();
356 aesni_enc(ctx, dst, src); 355 aesni_enc(ctx, dst, src);
357 kernel_fpu_end(); 356 kernel_fpu_end();
358 } 357 }
359} 358}
360 359
361static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 360static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
362{ 361{
363 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 362 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
364 363
365 if (!crypto_simd_usable()) 364 if (!crypto_simd_usable()) {
366 crypto_aes_decrypt_x86(ctx, dst, src); 365 aes_decrypt(ctx, dst, src);
367 else { 366 } else {
368 kernel_fpu_begin(); 367 kernel_fpu_begin();
369 aesni_dec(ctx, dst, src); 368 aesni_dec(ctx, dst, src);
370 kernel_fpu_end(); 369 kernel_fpu_end();
@@ -610,7 +609,8 @@ static int xts_encrypt(struct skcipher_request *req)
610 return glue_xts_req_128bit(&aesni_enc_xts, req, 609 return glue_xts_req_128bit(&aesni_enc_xts, req,
611 XTS_TWEAK_CAST(aesni_xts_tweak), 610 XTS_TWEAK_CAST(aesni_xts_tweak),
612 aes_ctx(ctx->raw_tweak_ctx), 611 aes_ctx(ctx->raw_tweak_ctx),
613 aes_ctx(ctx->raw_crypt_ctx)); 612 aes_ctx(ctx->raw_crypt_ctx),
613 false);
614} 614}
615 615
616static int xts_decrypt(struct skcipher_request *req) 616static int xts_decrypt(struct skcipher_request *req)
@@ -621,32 +621,28 @@ static int xts_decrypt(struct skcipher_request *req)
621 return glue_xts_req_128bit(&aesni_dec_xts, req, 621 return glue_xts_req_128bit(&aesni_dec_xts, req,
622 XTS_TWEAK_CAST(aesni_xts_tweak), 622 XTS_TWEAK_CAST(aesni_xts_tweak),
623 aes_ctx(ctx->raw_tweak_ctx), 623 aes_ctx(ctx->raw_tweak_ctx),
624 aes_ctx(ctx->raw_crypt_ctx)); 624 aes_ctx(ctx->raw_crypt_ctx),
625 true);
625} 626}
626 627
627static int 628static int
628rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) 629rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
629{ 630{
630 struct crypto_cipher *tfm; 631 struct crypto_aes_ctx ctx;
631 int ret; 632 int ret;
632 633
633 tfm = crypto_alloc_cipher("aes", 0, 0); 634 ret = aes_expandkey(&ctx, key, key_len);
634 if (IS_ERR(tfm))
635 return PTR_ERR(tfm);
636
637 ret = crypto_cipher_setkey(tfm, key, key_len);
638 if (ret) 635 if (ret)
639 goto out_free_cipher; 636 return ret;
640 637
641 /* Clear the data in the hash sub key container to zero.*/ 638 /* Clear the data in the hash sub key container to zero.*/
642 /* We want to cipher all zeros to create the hash sub key. */ 639 /* We want to cipher all zeros to create the hash sub key. */
643 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); 640 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
644 641
645 crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey); 642 aes_encrypt(&ctx, hash_subkey, hash_subkey);
646 643
647out_free_cipher: 644 memzero_explicit(&ctx, sizeof(ctx));
648 crypto_free_cipher(tfm); 645 return 0;
649 return ret;
650} 646}
651 647
652static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, 648static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
@@ -919,8 +915,8 @@ static struct crypto_alg aesni_cipher_alg = {
919 .cia_min_keysize = AES_MIN_KEY_SIZE, 915 .cia_min_keysize = AES_MIN_KEY_SIZE,
920 .cia_max_keysize = AES_MAX_KEY_SIZE, 916 .cia_max_keysize = AES_MAX_KEY_SIZE,
921 .cia_setkey = aes_set_key, 917 .cia_setkey = aes_set_key,
922 .cia_encrypt = aes_encrypt, 918 .cia_encrypt = aesni_encrypt,
923 .cia_decrypt = aes_decrypt 919 .cia_decrypt = aesni_decrypt
924 } 920 }
925 } 921 }
926}; 922};
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index abf298c272dc..a4f00128ea55 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -182,7 +182,7 @@ static int xts_encrypt(struct skcipher_request *req)
182 182
183 return glue_xts_req_128bit(&camellia_enc_xts, req, 183 return glue_xts_req_128bit(&camellia_enc_xts, req,
184 XTS_TWEAK_CAST(camellia_enc_blk), 184 XTS_TWEAK_CAST(camellia_enc_blk),
185 &ctx->tweak_ctx, &ctx->crypt_ctx); 185 &ctx->tweak_ctx, &ctx->crypt_ctx, false);
186} 186}
187 187
188static int xts_decrypt(struct skcipher_request *req) 188static int xts_decrypt(struct skcipher_request *req)
@@ -192,7 +192,7 @@ static int xts_decrypt(struct skcipher_request *req)
192 192
193 return glue_xts_req_128bit(&camellia_dec_xts, req, 193 return glue_xts_req_128bit(&camellia_dec_xts, req,
194 XTS_TWEAK_CAST(camellia_enc_blk), 194 XTS_TWEAK_CAST(camellia_enc_blk),
195 &ctx->tweak_ctx, &ctx->crypt_ctx); 195 &ctx->tweak_ctx, &ctx->crypt_ctx, true);
196} 196}
197 197
198static struct skcipher_alg camellia_algs[] = { 198static struct skcipher_alg camellia_algs[] = {
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 0c22d84750a3..f28d282779b8 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -208,7 +208,7 @@ static int xts_encrypt(struct skcipher_request *req)
208 208
209 return glue_xts_req_128bit(&camellia_enc_xts, req, 209 return glue_xts_req_128bit(&camellia_enc_xts, req,
210 XTS_TWEAK_CAST(camellia_enc_blk), 210 XTS_TWEAK_CAST(camellia_enc_blk),
211 &ctx->tweak_ctx, &ctx->crypt_ctx); 211 &ctx->tweak_ctx, &ctx->crypt_ctx, false);
212} 212}
213 213
214static int xts_decrypt(struct skcipher_request *req) 214static int xts_decrypt(struct skcipher_request *req)
@@ -218,7 +218,7 @@ static int xts_decrypt(struct skcipher_request *req)
218 218
219 return glue_xts_req_128bit(&camellia_dec_xts, req, 219 return glue_xts_req_128bit(&camellia_dec_xts, req,
220 XTS_TWEAK_CAST(camellia_enc_blk), 220 XTS_TWEAK_CAST(camellia_enc_blk),
221 &ctx->tweak_ctx, &ctx->crypt_ctx); 221 &ctx->tweak_ctx, &ctx->crypt_ctx, true);
222} 222}
223 223
224static struct skcipher_alg camellia_algs[] = { 224static struct skcipher_alg camellia_algs[] = {
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 645f8f16815c..a8a38fffb4a9 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -201,7 +201,7 @@ static int xts_encrypt(struct skcipher_request *req)
201 201
202 return glue_xts_req_128bit(&cast6_enc_xts, req, 202 return glue_xts_req_128bit(&cast6_enc_xts, req,
203 XTS_TWEAK_CAST(__cast6_encrypt), 203 XTS_TWEAK_CAST(__cast6_encrypt),
204 &ctx->tweak_ctx, &ctx->crypt_ctx); 204 &ctx->tweak_ctx, &ctx->crypt_ctx, false);
205} 205}
206 206
207static int xts_decrypt(struct skcipher_request *req) 207static int xts_decrypt(struct skcipher_request *req)
@@ -211,7 +211,7 @@ static int xts_decrypt(struct skcipher_request *req)
211 211
212 return glue_xts_req_128bit(&cast6_dec_xts, req, 212 return glue_xts_req_128bit(&cast6_dec_xts, req,
213 XTS_TWEAK_CAST(__cast6_encrypt), 213 XTS_TWEAK_CAST(__cast6_encrypt),
214 &ctx->tweak_ctx, &ctx->crypt_ctx); 214 &ctx->tweak_ctx, &ctx->crypt_ctx, true);
215} 215}
216 216
217static struct skcipher_alg cast6_algs[] = { 217static struct skcipher_alg cast6_algs[] = {
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index 968386c21ef4..89830e531350 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -19,8 +19,8 @@
19#include <linux/types.h> 19#include <linux/types.h>
20 20
21struct des3_ede_x86_ctx { 21struct des3_ede_x86_ctx {
22 u32 enc_expkey[DES3_EDE_EXPKEY_WORDS]; 22 struct des3_ede_ctx enc;
23 u32 dec_expkey[DES3_EDE_EXPKEY_WORDS]; 23 struct des3_ede_ctx dec;
24}; 24};
25 25
26/* regular block cipher functions */ 26/* regular block cipher functions */
@@ -34,7 +34,7 @@ asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst,
34static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, 34static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
35 const u8 *src) 35 const u8 *src)
36{ 36{
37 u32 *enc_ctx = ctx->enc_expkey; 37 u32 *enc_ctx = ctx->enc.expkey;
38 38
39 des3_ede_x86_64_crypt_blk(enc_ctx, dst, src); 39 des3_ede_x86_64_crypt_blk(enc_ctx, dst, src);
40} 40}
@@ -42,7 +42,7 @@ static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
42static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, 42static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
43 const u8 *src) 43 const u8 *src)
44{ 44{
45 u32 *dec_ctx = ctx->dec_expkey; 45 u32 *dec_ctx = ctx->dec.expkey;
46 46
47 des3_ede_x86_64_crypt_blk(dec_ctx, dst, src); 47 des3_ede_x86_64_crypt_blk(dec_ctx, dst, src);
48} 48}
@@ -50,7 +50,7 @@ static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
50static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, 50static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
51 const u8 *src) 51 const u8 *src)
52{ 52{
53 u32 *enc_ctx = ctx->enc_expkey; 53 u32 *enc_ctx = ctx->enc.expkey;
54 54
55 des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src); 55 des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src);
56} 56}
@@ -58,7 +58,7 @@ static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
58static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, 58static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
59 const u8 *src) 59 const u8 *src)
60{ 60{
61 u32 *dec_ctx = ctx->dec_expkey; 61 u32 *dec_ctx = ctx->dec.expkey;
62 62
63 des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src); 63 des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src);
64} 64}
@@ -122,7 +122,7 @@ static int ecb_encrypt(struct skcipher_request *req)
122 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 122 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
123 struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); 123 struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
124 124
125 return ecb_crypt(req, ctx->enc_expkey); 125 return ecb_crypt(req, ctx->enc.expkey);
126} 126}
127 127
128static int ecb_decrypt(struct skcipher_request *req) 128static int ecb_decrypt(struct skcipher_request *req)
@@ -130,7 +130,7 @@ static int ecb_decrypt(struct skcipher_request *req)
130 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 130 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
131 struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); 131 struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
132 132
133 return ecb_crypt(req, ctx->dec_expkey); 133 return ecb_crypt(req, ctx->dec.expkey);
134} 134}
135 135
136static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx, 136static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx,
@@ -348,20 +348,28 @@ static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
348 u32 i, j, tmp; 348 u32 i, j, tmp;
349 int err; 349 int err;
350 350
351 /* Generate encryption context using generic implementation. */ 351 err = des3_ede_expand_key(&ctx->enc, key, keylen);
352 err = __des3_ede_setkey(ctx->enc_expkey, &tfm->crt_flags, key, keylen); 352 if (err == -ENOKEY) {
353 if (err < 0) 353 if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
354 err = -EINVAL;
355 else
356 err = 0;
357 }
358
359 if (err) {
360 memset(ctx, 0, sizeof(*ctx));
354 return err; 361 return err;
362 }
355 363
356 /* Fix encryption context for this implementation and form decryption 364 /* Fix encryption context for this implementation and form decryption
357 * context. */ 365 * context. */
358 j = DES3_EDE_EXPKEY_WORDS - 2; 366 j = DES3_EDE_EXPKEY_WORDS - 2;
359 for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) { 367 for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) {
360 tmp = ror32(ctx->enc_expkey[i + 1], 4); 368 tmp = ror32(ctx->enc.expkey[i + 1], 4);
361 ctx->enc_expkey[i + 1] = tmp; 369 ctx->enc.expkey[i + 1] = tmp;
362 370
363 ctx->dec_expkey[j + 0] = ctx->enc_expkey[i + 0]; 371 ctx->dec.expkey[j + 0] = ctx->enc.expkey[i + 0];
364 ctx->dec_expkey[j + 1] = tmp; 372 ctx->dec.expkey[j + 1] = tmp;
365 } 373 }
366 374
367 return 0; 375 return 0;
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index ac76fe88ac4f..04d72a5a8ce9 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -357,6 +357,5 @@ module_init(ghash_pclmulqdqni_mod_init);
357module_exit(ghash_pclmulqdqni_mod_exit); 357module_exit(ghash_pclmulqdqni_mod_exit);
358 358
359MODULE_LICENSE("GPL"); 359MODULE_LICENSE("GPL");
360MODULE_DESCRIPTION("GHASH Message Digest Algorithm, " 360MODULE_DESCRIPTION("GHASH hash function, accelerated by PCLMULQDQ-NI");
361 "accelerated by PCLMULQDQ-NI");
362MODULE_ALIAS_CRYPTO("ghash"); 361MODULE_ALIAS_CRYPTO("ghash");
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index 901551445387..d15b99397480 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -14,6 +14,7 @@
14#include <crypto/b128ops.h> 14#include <crypto/b128ops.h>
15#include <crypto/gf128mul.h> 15#include <crypto/gf128mul.h>
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <crypto/scatterwalk.h>
17#include <crypto/xts.h> 18#include <crypto/xts.h>
18#include <asm/crypto/glue_helper.h> 19#include <asm/crypto/glue_helper.h>
19 20
@@ -259,17 +260,36 @@ done:
259int glue_xts_req_128bit(const struct common_glue_ctx *gctx, 260int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
260 struct skcipher_request *req, 261 struct skcipher_request *req,
261 common_glue_func_t tweak_fn, void *tweak_ctx, 262 common_glue_func_t tweak_fn, void *tweak_ctx,
262 void *crypt_ctx) 263 void *crypt_ctx, bool decrypt)
263{ 264{
265 const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
264 const unsigned int bsize = 128 / 8; 266 const unsigned int bsize = 128 / 8;
267 struct skcipher_request subreq;
265 struct skcipher_walk walk; 268 struct skcipher_walk walk;
266 bool fpu_enabled = false; 269 bool fpu_enabled = false;
267 unsigned int nbytes; 270 unsigned int nbytes, tail;
268 int err; 271 int err;
269 272
273 if (req->cryptlen < XTS_BLOCK_SIZE)
274 return -EINVAL;
275
276 if (unlikely(cts)) {
277 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
278
279 tail = req->cryptlen % XTS_BLOCK_SIZE + XTS_BLOCK_SIZE;
280
281 skcipher_request_set_tfm(&subreq, tfm);
282 skcipher_request_set_callback(&subreq,
283 crypto_skcipher_get_flags(tfm),
284 NULL, NULL);
285 skcipher_request_set_crypt(&subreq, req->src, req->dst,
286 req->cryptlen - tail, req->iv);
287 req = &subreq;
288 }
289
270 err = skcipher_walk_virt(&walk, req, false); 290 err = skcipher_walk_virt(&walk, req, false);
271 nbytes = walk.nbytes; 291 nbytes = walk.nbytes;
272 if (!nbytes) 292 if (err)
273 return err; 293 return err;
274 294
275 /* set minimum length to bsize, for tweak_fn */ 295 /* set minimum length to bsize, for tweak_fn */
@@ -287,6 +307,47 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
287 nbytes = walk.nbytes; 307 nbytes = walk.nbytes;
288 } 308 }
289 309
310 if (unlikely(cts)) {
311 u8 *next_tweak, *final_tweak = req->iv;
312 struct scatterlist *src, *dst;
313 struct scatterlist s[2], d[2];
314 le128 b[2];
315
316 dst = src = scatterwalk_ffwd(s, req->src, req->cryptlen);
317 if (req->dst != req->src)
318 dst = scatterwalk_ffwd(d, req->dst, req->cryptlen);
319
320 if (decrypt) {
321 next_tweak = memcpy(b, req->iv, XTS_BLOCK_SIZE);
322 gf128mul_x_ble(b, b);
323 } else {
324 next_tweak = req->iv;
325 }
326
327 skcipher_request_set_crypt(&subreq, src, dst, XTS_BLOCK_SIZE,
328 next_tweak);
329
330 err = skcipher_walk_virt(&walk, req, false) ?:
331 skcipher_walk_done(&walk,
332 __glue_xts_req_128bit(gctx, crypt_ctx, &walk));
333 if (err)
334 goto out;
335
336 scatterwalk_map_and_copy(b, dst, 0, XTS_BLOCK_SIZE, 0);
337 memcpy(b + 1, b, tail - XTS_BLOCK_SIZE);
338 scatterwalk_map_and_copy(b, src, XTS_BLOCK_SIZE,
339 tail - XTS_BLOCK_SIZE, 0);
340 scatterwalk_map_and_copy(b, dst, 0, tail, 1);
341
342 skcipher_request_set_crypt(&subreq, dst, dst, XTS_BLOCK_SIZE,
343 final_tweak);
344
345 err = skcipher_walk_virt(&walk, req, false) ?:
346 skcipher_walk_done(&walk,
347 __glue_xts_req_128bit(gctx, crypt_ctx, &walk));
348 }
349
350out:
290 glue_fpu_end(fpu_enabled); 351 glue_fpu_end(fpu_enabled);
291 352
292 return err; 353 return err;
diff --git a/arch/x86/crypto/morus1280-avx2-asm.S b/arch/x86/crypto/morus1280-avx2-asm.S
deleted file mode 100644
index 5413fee33481..000000000000
--- a/arch/x86/crypto/morus1280-avx2-asm.S
+++ /dev/null
@@ -1,619 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * AVX2 implementation of MORUS-1280
4 *
5 * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7 */
8
9#include <linux/linkage.h>
10#include <asm/frame.h>
11
12#define SHUFFLE_MASK(i0, i1, i2, i3) \
13 (i0 | (i1 << 2) | (i2 << 4) | (i3 << 6))
14
15#define MASK1 SHUFFLE_MASK(3, 0, 1, 2)
16#define MASK2 SHUFFLE_MASK(2, 3, 0, 1)
17#define MASK3 SHUFFLE_MASK(1, 2, 3, 0)
18
19#define STATE0 %ymm0
20#define STATE0_LOW %xmm0
21#define STATE1 %ymm1
22#define STATE2 %ymm2
23#define STATE3 %ymm3
24#define STATE4 %ymm4
25#define KEY %ymm5
26#define MSG %ymm5
27#define MSG_LOW %xmm5
28#define T0 %ymm6
29#define T0_LOW %xmm6
30#define T1 %ymm7
31
32.section .rodata.cst32.morus1280_const, "aM", @progbits, 32
33.align 32
34.Lmorus1280_const:
35 .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d
36 .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62
37 .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1
38 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd
39
40.section .rodata.cst32.morus1280_counter, "aM", @progbits, 32
41.align 32
42.Lmorus1280_counter:
43 .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
44 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
45 .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
46 .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
47
48.text
49
50.macro morus1280_round s0, s1, s2, s3, s4, b, w
51 vpand \s1, \s2, T0
52 vpxor T0, \s0, \s0
53 vpxor \s3, \s0, \s0
54 vpsllq $\b, \s0, T0
55 vpsrlq $(64 - \b), \s0, \s0
56 vpxor T0, \s0, \s0
57 vpermq $\w, \s3, \s3
58.endm
59
60/*
61 * __morus1280_update: internal ABI
62 * input:
63 * STATE[0-4] - input state
64 * MSG - message block
65 * output:
66 * STATE[0-4] - output state
67 * changed:
68 * T0
69 */
70__morus1280_update:
71 morus1280_round STATE0, STATE1, STATE2, STATE3, STATE4, 13, MASK1
72 vpxor MSG, STATE1, STATE1
73 morus1280_round STATE1, STATE2, STATE3, STATE4, STATE0, 46, MASK2
74 vpxor MSG, STATE2, STATE2
75 morus1280_round STATE2, STATE3, STATE4, STATE0, STATE1, 38, MASK3
76 vpxor MSG, STATE3, STATE3
77 morus1280_round STATE3, STATE4, STATE0, STATE1, STATE2, 7, MASK2
78 vpxor MSG, STATE4, STATE4
79 morus1280_round STATE4, STATE0, STATE1, STATE2, STATE3, 4, MASK1
80 ret
81ENDPROC(__morus1280_update)
82
83/*
84 * __morus1280_update_zero: internal ABI
85 * input:
86 * STATE[0-4] - input state
87 * output:
88 * STATE[0-4] - output state
89 * changed:
90 * T0
91 */
92__morus1280_update_zero:
93 morus1280_round STATE0, STATE1, STATE2, STATE3, STATE4, 13, MASK1
94 morus1280_round STATE1, STATE2, STATE3, STATE4, STATE0, 46, MASK2
95 morus1280_round STATE2, STATE3, STATE4, STATE0, STATE1, 38, MASK3
96 morus1280_round STATE3, STATE4, STATE0, STATE1, STATE2, 7, MASK2
97 morus1280_round STATE4, STATE0, STATE1, STATE2, STATE3, 4, MASK1
98 ret
99ENDPROC(__morus1280_update_zero)
100
101/*
102 * __load_partial: internal ABI
103 * input:
104 * %rsi - src
105 * %rcx - bytes
106 * output:
107 * MSG - message block
108 * changed:
109 * %r8
110 * %r9
111 */
112__load_partial:
113 xor %r9d, %r9d
114 vpxor MSG, MSG, MSG
115
116 mov %rcx, %r8
117 and $0x1, %r8
118 jz .Lld_partial_1
119
120 mov %rcx, %r8
121 and $0x1E, %r8
122 add %rsi, %r8
123 mov (%r8), %r9b
124
125.Lld_partial_1:
126 mov %rcx, %r8
127 and $0x2, %r8
128 jz .Lld_partial_2
129
130 mov %rcx, %r8
131 and $0x1C, %r8
132 add %rsi, %r8
133 shl $16, %r9
134 mov (%r8), %r9w
135
136.Lld_partial_2:
137 mov %rcx, %r8
138 and $0x4, %r8
139 jz .Lld_partial_4
140
141 mov %rcx, %r8
142 and $0x18, %r8
143 add %rsi, %r8
144 shl $32, %r9
145 mov (%r8), %r8d
146 xor %r8, %r9
147
148.Lld_partial_4:
149 movq %r9, MSG_LOW
150
151 mov %rcx, %r8
152 and $0x8, %r8
153 jz .Lld_partial_8
154
155 mov %rcx, %r8
156 and $0x10, %r8
157 add %rsi, %r8
158 pshufd $MASK2, MSG_LOW, MSG_LOW
159 pinsrq $0, (%r8), MSG_LOW
160
161.Lld_partial_8:
162 mov %rcx, %r8
163 and $0x10, %r8
164 jz .Lld_partial_16
165
166 vpermq $MASK2, MSG, MSG
167 movdqu (%rsi), MSG_LOW
168
169.Lld_partial_16:
170 ret
171ENDPROC(__load_partial)
172
173/*
174 * __store_partial: internal ABI
175 * input:
176 * %rdx - dst
177 * %rcx - bytes
178 * output:
179 * T0 - message block
180 * changed:
181 * %r8
182 * %r9
183 * %r10
184 */
185__store_partial:
186 mov %rcx, %r8
187 mov %rdx, %r9
188
189 cmp $16, %r8
190 jl .Lst_partial_16
191
192 movdqu T0_LOW, (%r9)
193 vpermq $MASK2, T0, T0
194
195 sub $16, %r8
196 add $16, %r9
197
198.Lst_partial_16:
199 movq T0_LOW, %r10
200
201 cmp $8, %r8
202 jl .Lst_partial_8
203
204 mov %r10, (%r9)
205 pextrq $1, T0_LOW, %r10
206
207 sub $8, %r8
208 add $8, %r9
209
210.Lst_partial_8:
211 cmp $4, %r8
212 jl .Lst_partial_4
213
214 mov %r10d, (%r9)
215 shr $32, %r10
216
217 sub $4, %r8
218 add $4, %r9
219
220.Lst_partial_4:
221 cmp $2, %r8
222 jl .Lst_partial_2
223
224 mov %r10w, (%r9)
225 shr $16, %r10
226
227 sub $2, %r8
228 add $2, %r9
229
230.Lst_partial_2:
231 cmp $1, %r8
232 jl .Lst_partial_1
233
234 mov %r10b, (%r9)
235
236.Lst_partial_1:
237 ret
238ENDPROC(__store_partial)
239
240/*
241 * void crypto_morus1280_avx2_init(void *state, const void *key,
242 * const void *iv);
243 */
244ENTRY(crypto_morus1280_avx2_init)
245 FRAME_BEGIN
246
247 /* load IV: */
248 vpxor STATE0, STATE0, STATE0
249 movdqu (%rdx), STATE0_LOW
250 /* load key: */
251 vmovdqu (%rsi), KEY
252 vmovdqa KEY, STATE1
253 /* load all ones: */
254 vpcmpeqd STATE2, STATE2, STATE2
255 /* load all zeros: */
256 vpxor STATE3, STATE3, STATE3
257 /* load the constant: */
258 vmovdqa .Lmorus1280_const, STATE4
259
260 /* update 16 times with zero: */
261 call __morus1280_update_zero
262 call __morus1280_update_zero
263 call __morus1280_update_zero
264 call __morus1280_update_zero
265 call __morus1280_update_zero
266 call __morus1280_update_zero
267 call __morus1280_update_zero
268 call __morus1280_update_zero
269 call __morus1280_update_zero
270 call __morus1280_update_zero
271 call __morus1280_update_zero
272 call __morus1280_update_zero
273 call __morus1280_update_zero
274 call __morus1280_update_zero
275 call __morus1280_update_zero
276 call __morus1280_update_zero
277
278 /* xor-in the key again after updates: */
279 vpxor KEY, STATE1, STATE1
280
281 /* store the state: */
282 vmovdqu STATE0, (0 * 32)(%rdi)
283 vmovdqu STATE1, (1 * 32)(%rdi)
284 vmovdqu STATE2, (2 * 32)(%rdi)
285 vmovdqu STATE3, (3 * 32)(%rdi)
286 vmovdqu STATE4, (4 * 32)(%rdi)
287
288 FRAME_END
289 ret
290ENDPROC(crypto_morus1280_avx2_init)
291
292/*
293 * void crypto_morus1280_avx2_ad(void *state, const void *data,
294 * unsigned int length);
295 */
296ENTRY(crypto_morus1280_avx2_ad)
297 FRAME_BEGIN
298
299 cmp $32, %rdx
300 jb .Lad_out
301
302 /* load the state: */
303 vmovdqu (0 * 32)(%rdi), STATE0
304 vmovdqu (1 * 32)(%rdi), STATE1
305 vmovdqu (2 * 32)(%rdi), STATE2
306 vmovdqu (3 * 32)(%rdi), STATE3
307 vmovdqu (4 * 32)(%rdi), STATE4
308
309 mov %rsi, %r8
310 and $0x1F, %r8
311 jnz .Lad_u_loop
312
313.align 4
314.Lad_a_loop:
315 vmovdqa (%rsi), MSG
316 call __morus1280_update
317 sub $32, %rdx
318 add $32, %rsi
319 cmp $32, %rdx
320 jge .Lad_a_loop
321
322 jmp .Lad_cont
323.align 4
324.Lad_u_loop:
325 vmovdqu (%rsi), MSG
326 call __morus1280_update
327 sub $32, %rdx
328 add $32, %rsi
329 cmp $32, %rdx
330 jge .Lad_u_loop
331
332.Lad_cont:
333 /* store the state: */
334 vmovdqu STATE0, (0 * 32)(%rdi)
335 vmovdqu STATE1, (1 * 32)(%rdi)
336 vmovdqu STATE2, (2 * 32)(%rdi)
337 vmovdqu STATE3, (3 * 32)(%rdi)
338 vmovdqu STATE4, (4 * 32)(%rdi)
339
340.Lad_out:
341 FRAME_END
342 ret
343ENDPROC(crypto_morus1280_avx2_ad)
344
345/*
346 * void crypto_morus1280_avx2_enc(void *state, const void *src, void *dst,
347 * unsigned int length);
348 */
349ENTRY(crypto_morus1280_avx2_enc)
350 FRAME_BEGIN
351
352 cmp $32, %rcx
353 jb .Lenc_out
354
355 /* load the state: */
356 vmovdqu (0 * 32)(%rdi), STATE0
357 vmovdqu (1 * 32)(%rdi), STATE1
358 vmovdqu (2 * 32)(%rdi), STATE2
359 vmovdqu (3 * 32)(%rdi), STATE3
360 vmovdqu (4 * 32)(%rdi), STATE4
361
362 mov %rsi, %r8
363 or %rdx, %r8
364 and $0x1F, %r8
365 jnz .Lenc_u_loop
366
367.align 4
368.Lenc_a_loop:
369 vmovdqa (%rsi), MSG
370 vmovdqa MSG, T0
371 vpxor STATE0, T0, T0
372 vpermq $MASK3, STATE1, T1
373 vpxor T1, T0, T0
374 vpand STATE2, STATE3, T1
375 vpxor T1, T0, T0
376 vmovdqa T0, (%rdx)
377
378 call __morus1280_update
379 sub $32, %rcx
380 add $32, %rsi
381 add $32, %rdx
382 cmp $32, %rcx
383 jge .Lenc_a_loop
384
385 jmp .Lenc_cont
386.align 4
387.Lenc_u_loop:
388 vmovdqu (%rsi), MSG
389 vmovdqa MSG, T0
390 vpxor STATE0, T0, T0
391 vpermq $MASK3, STATE1, T1
392 vpxor T1, T0, T0
393 vpand STATE2, STATE3, T1
394 vpxor T1, T0, T0
395 vmovdqu T0, (%rdx)
396
397 call __morus1280_update
398 sub $32, %rcx
399 add $32, %rsi
400 add $32, %rdx
401 cmp $32, %rcx
402 jge .Lenc_u_loop
403
404.Lenc_cont:
405 /* store the state: */
406 vmovdqu STATE0, (0 * 32)(%rdi)
407 vmovdqu STATE1, (1 * 32)(%rdi)
408 vmovdqu STATE2, (2 * 32)(%rdi)
409 vmovdqu STATE3, (3 * 32)(%rdi)
410 vmovdqu STATE4, (4 * 32)(%rdi)
411
412.Lenc_out:
413 FRAME_END
414 ret
415ENDPROC(crypto_morus1280_avx2_enc)
416
417/*
418 * void crypto_morus1280_avx2_enc_tail(void *state, const void *src, void *dst,
419 * unsigned int length);
420 */
421ENTRY(crypto_morus1280_avx2_enc_tail)
422 FRAME_BEGIN
423
424 /* load the state: */
425 vmovdqu (0 * 32)(%rdi), STATE0
426 vmovdqu (1 * 32)(%rdi), STATE1
427 vmovdqu (2 * 32)(%rdi), STATE2
428 vmovdqu (3 * 32)(%rdi), STATE3
429 vmovdqu (4 * 32)(%rdi), STATE4
430
431 /* encrypt message: */
432 call __load_partial
433
434 vmovdqa MSG, T0
435 vpxor STATE0, T0, T0
436 vpermq $MASK3, STATE1, T1
437 vpxor T1, T0, T0
438 vpand STATE2, STATE3, T1
439 vpxor T1, T0, T0
440
441 call __store_partial
442
443 call __morus1280_update
444
445 /* store the state: */
446 vmovdqu STATE0, (0 * 32)(%rdi)
447 vmovdqu STATE1, (1 * 32)(%rdi)
448 vmovdqu STATE2, (2 * 32)(%rdi)
449 vmovdqu STATE3, (3 * 32)(%rdi)
450 vmovdqu STATE4, (4 * 32)(%rdi)
451
452 FRAME_END
453 ret
454ENDPROC(crypto_morus1280_avx2_enc_tail)
455
456/*
457 * void crypto_morus1280_avx2_dec(void *state, const void *src, void *dst,
458 * unsigned int length);
459 */
460ENTRY(crypto_morus1280_avx2_dec)
461 FRAME_BEGIN
462
463 cmp $32, %rcx
464 jb .Ldec_out
465
466 /* load the state: */
467 vmovdqu (0 * 32)(%rdi), STATE0
468 vmovdqu (1 * 32)(%rdi), STATE1
469 vmovdqu (2 * 32)(%rdi), STATE2
470 vmovdqu (3 * 32)(%rdi), STATE3
471 vmovdqu (4 * 32)(%rdi), STATE4
472
473 mov %rsi, %r8
474 or %rdx, %r8
475 and $0x1F, %r8
476 jnz .Ldec_u_loop
477
478.align 4
479.Ldec_a_loop:
480 vmovdqa (%rsi), MSG
481 vpxor STATE0, MSG, MSG
482 vpermq $MASK3, STATE1, T0
483 vpxor T0, MSG, MSG
484 vpand STATE2, STATE3, T0
485 vpxor T0, MSG, MSG
486 vmovdqa MSG, (%rdx)
487
488 call __morus1280_update
489 sub $32, %rcx
490 add $32, %rsi
491 add $32, %rdx
492 cmp $32, %rcx
493 jge .Ldec_a_loop
494
495 jmp .Ldec_cont
496.align 4
497.Ldec_u_loop:
498 vmovdqu (%rsi), MSG
499 vpxor STATE0, MSG, MSG
500 vpermq $MASK3, STATE1, T0
501 vpxor T0, MSG, MSG
502 vpand STATE2, STATE3, T0
503 vpxor T0, MSG, MSG
504 vmovdqu MSG, (%rdx)
505
506 call __morus1280_update
507 sub $32, %rcx
508 add $32, %rsi
509 add $32, %rdx
510 cmp $32, %rcx
511 jge .Ldec_u_loop
512
513.Ldec_cont:
514 /* store the state: */
515 vmovdqu STATE0, (0 * 32)(%rdi)
516 vmovdqu STATE1, (1 * 32)(%rdi)
517 vmovdqu STATE2, (2 * 32)(%rdi)
518 vmovdqu STATE3, (3 * 32)(%rdi)
519 vmovdqu STATE4, (4 * 32)(%rdi)
520
521.Ldec_out:
522 FRAME_END
523 ret
524ENDPROC(crypto_morus1280_avx2_dec)
525
526/*
527 * void crypto_morus1280_avx2_dec_tail(void *state, const void *src, void *dst,
528 * unsigned int length);
529 */
530ENTRY(crypto_morus1280_avx2_dec_tail)
531 FRAME_BEGIN
532
533 /* load the state: */
534 vmovdqu (0 * 32)(%rdi), STATE0
535 vmovdqu (1 * 32)(%rdi), STATE1
536 vmovdqu (2 * 32)(%rdi), STATE2
537 vmovdqu (3 * 32)(%rdi), STATE3
538 vmovdqu (4 * 32)(%rdi), STATE4
539
540 /* decrypt message: */
541 call __load_partial
542
543 vpxor STATE0, MSG, MSG
544 vpermq $MASK3, STATE1, T0
545 vpxor T0, MSG, MSG
546 vpand STATE2, STATE3, T0
547 vpxor T0, MSG, MSG
548 vmovdqa MSG, T0
549
550 call __store_partial
551
552 /* mask with byte count: */
553 movq %rcx, T0_LOW
554 vpbroadcastb T0_LOW, T0
555 vmovdqa .Lmorus1280_counter, T1
556 vpcmpgtb T1, T0, T0
557 vpand T0, MSG, MSG
558
559 call __morus1280_update
560
561 /* store the state: */
562 vmovdqu STATE0, (0 * 32)(%rdi)
563 vmovdqu STATE1, (1 * 32)(%rdi)
564 vmovdqu STATE2, (2 * 32)(%rdi)
565 vmovdqu STATE3, (3 * 32)(%rdi)
566 vmovdqu STATE4, (4 * 32)(%rdi)
567
568 FRAME_END
569 ret
570ENDPROC(crypto_morus1280_avx2_dec_tail)
571
572/*
573 * void crypto_morus1280_avx2_final(void *state, void *tag_xor,
574 * u64 assoclen, u64 cryptlen);
575 */
576ENTRY(crypto_morus1280_avx2_final)
577 FRAME_BEGIN
578
579 /* load the state: */
580 vmovdqu (0 * 32)(%rdi), STATE0
581 vmovdqu (1 * 32)(%rdi), STATE1
582 vmovdqu (2 * 32)(%rdi), STATE2
583 vmovdqu (3 * 32)(%rdi), STATE3
584 vmovdqu (4 * 32)(%rdi), STATE4
585
586 /* xor state[0] into state[4]: */
587 vpxor STATE0, STATE4, STATE4
588
589 /* prepare length block: */
590 vpxor MSG, MSG, MSG
591 vpinsrq $0, %rdx, MSG_LOW, MSG_LOW
592 vpinsrq $1, %rcx, MSG_LOW, MSG_LOW
593 vpsllq $3, MSG, MSG /* multiply by 8 (to get bit count) */
594
595 /* update state: */
596 call __morus1280_update
597 call __morus1280_update
598 call __morus1280_update
599 call __morus1280_update
600 call __morus1280_update
601 call __morus1280_update
602 call __morus1280_update
603 call __morus1280_update
604 call __morus1280_update
605 call __morus1280_update
606
607 /* xor tag: */
608 vmovdqu (%rsi), MSG
609
610 vpxor STATE0, MSG, MSG
611 vpermq $MASK3, STATE1, T0
612 vpxor T0, MSG, MSG
613 vpand STATE2, STATE3, T0
614 vpxor T0, MSG, MSG
615 vmovdqu MSG, (%rsi)
616
617 FRAME_END
618 ret
619ENDPROC(crypto_morus1280_avx2_final)
diff --git a/arch/x86/crypto/morus1280-avx2-glue.c b/arch/x86/crypto/morus1280-avx2-glue.c
deleted file mode 100644
index 2d000d66ba4c..000000000000
--- a/arch/x86/crypto/morus1280-avx2-glue.c
+++ /dev/null
@@ -1,62 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The MORUS-1280 Authenticated-Encryption Algorithm
4 * Glue for AVX2 implementation
5 *
6 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 */
9
10#include <crypto/internal/aead.h>
11#include <crypto/internal/simd.h>
12#include <crypto/morus1280_glue.h>
13#include <linux/module.h>
14#include <asm/fpu/api.h>
15#include <asm/cpu_device_id.h>
16
17asmlinkage void crypto_morus1280_avx2_init(void *state, const void *key,
18 const void *iv);
19asmlinkage void crypto_morus1280_avx2_ad(void *state, const void *data,
20 unsigned int length);
21
22asmlinkage void crypto_morus1280_avx2_enc(void *state, const void *src,
23 void *dst, unsigned int length);
24asmlinkage void crypto_morus1280_avx2_dec(void *state, const void *src,
25 void *dst, unsigned int length);
26
27asmlinkage void crypto_morus1280_avx2_enc_tail(void *state, const void *src,
28 void *dst, unsigned int length);
29asmlinkage void crypto_morus1280_avx2_dec_tail(void *state, const void *src,
30 void *dst, unsigned int length);
31
32asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
33 u64 assoclen, u64 cryptlen);
34
35MORUS1280_DECLARE_ALG(avx2, "morus1280-avx2", 400);
36
37static struct simd_aead_alg *simd_alg;
38
39static int __init crypto_morus1280_avx2_module_init(void)
40{
41 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
42 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
43 !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
44 return -ENODEV;
45
46 return simd_register_aeads_compat(&crypto_morus1280_avx2_alg, 1,
47 &simd_alg);
48}
49
50static void __exit crypto_morus1280_avx2_module_exit(void)
51{
52 simd_unregister_aeads(&crypto_morus1280_avx2_alg, 1, &simd_alg);
53}
54
55module_init(crypto_morus1280_avx2_module_init);
56module_exit(crypto_morus1280_avx2_module_exit);
57
58MODULE_LICENSE("GPL");
59MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
60MODULE_DESCRIPTION("MORUS-1280 AEAD algorithm -- AVX2 implementation");
61MODULE_ALIAS_CRYPTO("morus1280");
62MODULE_ALIAS_CRYPTO("morus1280-avx2");
diff --git a/arch/x86/crypto/morus1280-sse2-asm.S b/arch/x86/crypto/morus1280-sse2-asm.S
deleted file mode 100644
index 0eece772866b..000000000000
--- a/arch/x86/crypto/morus1280-sse2-asm.S
+++ /dev/null
@@ -1,893 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * SSE2 implementation of MORUS-1280
4 *
5 * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7 */
8
9#include <linux/linkage.h>
10#include <asm/frame.h>
11
12#define SHUFFLE_MASK(i0, i1, i2, i3) \
13 (i0 | (i1 << 2) | (i2 << 4) | (i3 << 6))
14
15#define MASK2 SHUFFLE_MASK(2, 3, 0, 1)
16
17#define STATE0_LO %xmm0
18#define STATE0_HI %xmm1
19#define STATE1_LO %xmm2
20#define STATE1_HI %xmm3
21#define STATE2_LO %xmm4
22#define STATE2_HI %xmm5
23#define STATE3_LO %xmm6
24#define STATE3_HI %xmm7
25#define STATE4_LO %xmm8
26#define STATE4_HI %xmm9
27#define KEY_LO %xmm10
28#define KEY_HI %xmm11
29#define MSG_LO %xmm10
30#define MSG_HI %xmm11
31#define T0_LO %xmm12
32#define T0_HI %xmm13
33#define T1_LO %xmm14
34#define T1_HI %xmm15
35
36.section .rodata.cst16.morus640_const, "aM", @progbits, 16
37.align 16
38.Lmorus640_const_0:
39 .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d
40 .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62
41.Lmorus640_const_1:
42 .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1
43 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd
44
45.section .rodata.cst16.morus640_counter, "aM", @progbits, 16
46.align 16
47.Lmorus640_counter_0:
48 .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
49 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
50.Lmorus640_counter_1:
51 .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
52 .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
53
54.text
55
56.macro rol1 hi, lo
57 /*
58 * HI_1 | HI_0 || LO_1 | LO_0
59 * ==>
60 * HI_0 | HI_1 || LO_1 | LO_0
61 * ==>
62 * HI_0 | LO_1 || LO_0 | HI_1
63 */
64 pshufd $MASK2, \hi, \hi
65 movdqa \hi, T0_LO
66 punpcklqdq \lo, T0_LO
67 punpckhqdq \hi, \lo
68 movdqa \lo, \hi
69 movdqa T0_LO, \lo
70.endm
71
72.macro rol2 hi, lo
73 movdqa \lo, T0_LO
74 movdqa \hi, \lo
75 movdqa T0_LO, \hi
76.endm
77
78.macro rol3 hi, lo
79 /*
80 * HI_1 | HI_0 || LO_1 | LO_0
81 * ==>
82 * HI_0 | HI_1 || LO_1 | LO_0
83 * ==>
84 * LO_0 | HI_1 || HI_0 | LO_1
85 */
86 pshufd $MASK2, \hi, \hi
87 movdqa \lo, T0_LO
88 punpckhqdq \hi, T0_LO
89 punpcklqdq \lo, \hi
90 movdqa T0_LO, \lo
91.endm
92
93.macro morus1280_round s0_l, s0_h, s1_l, s1_h, s2_l, s2_h, s3_l, s3_h, s4_l, s4_h, b, w
94 movdqa \s1_l, T0_LO
95 pand \s2_l, T0_LO
96 pxor T0_LO, \s0_l
97
98 movdqa \s1_h, T0_LO
99 pand \s2_h, T0_LO
100 pxor T0_LO, \s0_h
101
102 pxor \s3_l, \s0_l
103 pxor \s3_h, \s0_h
104
105 movdqa \s0_l, T0_LO
106 psllq $\b, T0_LO
107 psrlq $(64 - \b), \s0_l
108 pxor T0_LO, \s0_l
109
110 movdqa \s0_h, T0_LO
111 psllq $\b, T0_LO
112 psrlq $(64 - \b), \s0_h
113 pxor T0_LO, \s0_h
114
115 \w \s3_h, \s3_l
116.endm
117
118/*
119 * __morus1280_update: internal ABI
120 * input:
121 * STATE[0-4] - input state
122 * MSG - message block
123 * output:
124 * STATE[0-4] - output state
125 * changed:
126 * T0
127 */
128__morus1280_update:
129 morus1280_round \
130 STATE0_LO, STATE0_HI, \
131 STATE1_LO, STATE1_HI, \
132 STATE2_LO, STATE2_HI, \
133 STATE3_LO, STATE3_HI, \
134 STATE4_LO, STATE4_HI, \
135 13, rol1
136 pxor MSG_LO, STATE1_LO
137 pxor MSG_HI, STATE1_HI
138 morus1280_round \
139 STATE1_LO, STATE1_HI, \
140 STATE2_LO, STATE2_HI, \
141 STATE3_LO, STATE3_HI, \
142 STATE4_LO, STATE4_HI, \
143 STATE0_LO, STATE0_HI, \
144 46, rol2
145 pxor MSG_LO, STATE2_LO
146 pxor MSG_HI, STATE2_HI
147 morus1280_round \
148 STATE2_LO, STATE2_HI, \
149 STATE3_LO, STATE3_HI, \
150 STATE4_LO, STATE4_HI, \
151 STATE0_LO, STATE0_HI, \
152 STATE1_LO, STATE1_HI, \
153 38, rol3
154 pxor MSG_LO, STATE3_LO
155 pxor MSG_HI, STATE3_HI
156 morus1280_round \
157 STATE3_LO, STATE3_HI, \
158 STATE4_LO, STATE4_HI, \
159 STATE0_LO, STATE0_HI, \
160 STATE1_LO, STATE1_HI, \
161 STATE2_LO, STATE2_HI, \
162 7, rol2
163 pxor MSG_LO, STATE4_LO
164 pxor MSG_HI, STATE4_HI
165 morus1280_round \
166 STATE4_LO, STATE4_HI, \
167 STATE0_LO, STATE0_HI, \
168 STATE1_LO, STATE1_HI, \
169 STATE2_LO, STATE2_HI, \
170 STATE3_LO, STATE3_HI, \
171 4, rol1
172 ret
173ENDPROC(__morus1280_update)
174
175/*
176 * __morus1280_update_zero: internal ABI
177 * input:
178 * STATE[0-4] - input state
179 * output:
180 * STATE[0-4] - output state
181 * changed:
182 * T0
183 */
184__morus1280_update_zero:
185 morus1280_round \
186 STATE0_LO, STATE0_HI, \
187 STATE1_LO, STATE1_HI, \
188 STATE2_LO, STATE2_HI, \
189 STATE3_LO, STATE3_HI, \
190 STATE4_LO, STATE4_HI, \
191 13, rol1
192 morus1280_round \
193 STATE1_LO, STATE1_HI, \
194 STATE2_LO, STATE2_HI, \
195 STATE3_LO, STATE3_HI, \
196 STATE4_LO, STATE4_HI, \
197 STATE0_LO, STATE0_HI, \
198 46, rol2
199 morus1280_round \
200 STATE2_LO, STATE2_HI, \
201 STATE3_LO, STATE3_HI, \
202 STATE4_LO, STATE4_HI, \
203 STATE0_LO, STATE0_HI, \
204 STATE1_LO, STATE1_HI, \
205 38, rol3
206 morus1280_round \
207 STATE3_LO, STATE3_HI, \
208 STATE4_LO, STATE4_HI, \
209 STATE0_LO, STATE0_HI, \
210 STATE1_LO, STATE1_HI, \
211 STATE2_LO, STATE2_HI, \
212 7, rol2
213 morus1280_round \
214 STATE4_LO, STATE4_HI, \
215 STATE0_LO, STATE0_HI, \
216 STATE1_LO, STATE1_HI, \
217 STATE2_LO, STATE2_HI, \
218 STATE3_LO, STATE3_HI, \
219 4, rol1
220 ret
221ENDPROC(__morus1280_update_zero)
222
223/*
224 * __load_partial: internal ABI
225 * input:
226 * %rsi - src
227 * %rcx - bytes
228 * output:
229 * MSG - message block
230 * changed:
231 * %r8
232 * %r9
233 */
234__load_partial:
235 xor %r9d, %r9d
236 pxor MSG_LO, MSG_LO
237 pxor MSG_HI, MSG_HI
238
239 mov %rcx, %r8
240 and $0x1, %r8
241 jz .Lld_partial_1
242
243 mov %rcx, %r8
244 and $0x1E, %r8
245 add %rsi, %r8
246 mov (%r8), %r9b
247
248.Lld_partial_1:
249 mov %rcx, %r8
250 and $0x2, %r8
251 jz .Lld_partial_2
252
253 mov %rcx, %r8
254 and $0x1C, %r8
255 add %rsi, %r8
256 shl $16, %r9
257 mov (%r8), %r9w
258
259.Lld_partial_2:
260 mov %rcx, %r8
261 and $0x4, %r8
262 jz .Lld_partial_4
263
264 mov %rcx, %r8
265 and $0x18, %r8
266 add %rsi, %r8
267 shl $32, %r9
268 mov (%r8), %r8d
269 xor %r8, %r9
270
271.Lld_partial_4:
272 movq %r9, MSG_LO
273
274 mov %rcx, %r8
275 and $0x8, %r8
276 jz .Lld_partial_8
277
278 mov %rcx, %r8
279 and $0x10, %r8
280 add %rsi, %r8
281 pslldq $8, MSG_LO
282 movq (%r8), T0_LO
283 pxor T0_LO, MSG_LO
284
285.Lld_partial_8:
286 mov %rcx, %r8
287 and $0x10, %r8
288 jz .Lld_partial_16
289
290 movdqa MSG_LO, MSG_HI
291 movdqu (%rsi), MSG_LO
292
293.Lld_partial_16:
294 ret
295ENDPROC(__load_partial)
296
297/*
298 * __store_partial: internal ABI
299 * input:
300 * %rdx - dst
301 * %rcx - bytes
302 * output:
303 * T0 - message block
304 * changed:
305 * %r8
306 * %r9
307 * %r10
308 */
309__store_partial:
310 mov %rcx, %r8
311 mov %rdx, %r9
312
313 cmp $16, %r8
314 jl .Lst_partial_16
315
316 movdqu T0_LO, (%r9)
317 movdqa T0_HI, T0_LO
318
319 sub $16, %r8
320 add $16, %r9
321
322.Lst_partial_16:
323 movq T0_LO, %r10
324
325 cmp $8, %r8
326 jl .Lst_partial_8
327
328 mov %r10, (%r9)
329 psrldq $8, T0_LO
330 movq T0_LO, %r10
331
332 sub $8, %r8
333 add $8, %r9
334
335.Lst_partial_8:
336 cmp $4, %r8
337 jl .Lst_partial_4
338
339 mov %r10d, (%r9)
340 shr $32, %r10
341
342 sub $4, %r8
343 add $4, %r9
344
345.Lst_partial_4:
346 cmp $2, %r8
347 jl .Lst_partial_2
348
349 mov %r10w, (%r9)
350 shr $16, %r10
351
352 sub $2, %r8
353 add $2, %r9
354
355.Lst_partial_2:
356 cmp $1, %r8
357 jl .Lst_partial_1
358
359 mov %r10b, (%r9)
360
361.Lst_partial_1:
362 ret
363ENDPROC(__store_partial)
364
365/*
366 * void crypto_morus1280_sse2_init(void *state, const void *key,
367 * const void *iv);
368 */
369ENTRY(crypto_morus1280_sse2_init)
370 FRAME_BEGIN
371
372 /* load IV: */
373 pxor STATE0_HI, STATE0_HI
374 movdqu (%rdx), STATE0_LO
375 /* load key: */
376 movdqu 0(%rsi), KEY_LO
377 movdqu 16(%rsi), KEY_HI
378 movdqa KEY_LO, STATE1_LO
379 movdqa KEY_HI, STATE1_HI
380 /* load all ones: */
381 pcmpeqd STATE2_LO, STATE2_LO
382 pcmpeqd STATE2_HI, STATE2_HI
383 /* load all zeros: */
384 pxor STATE3_LO, STATE3_LO
385 pxor STATE3_HI, STATE3_HI
386 /* load the constant: */
387 movdqa .Lmorus640_const_0, STATE4_LO
388 movdqa .Lmorus640_const_1, STATE4_HI
389
390 /* update 16 times with zero: */
391 call __morus1280_update_zero
392 call __morus1280_update_zero
393 call __morus1280_update_zero
394 call __morus1280_update_zero
395 call __morus1280_update_zero
396 call __morus1280_update_zero
397 call __morus1280_update_zero
398 call __morus1280_update_zero
399 call __morus1280_update_zero
400 call __morus1280_update_zero
401 call __morus1280_update_zero
402 call __morus1280_update_zero
403 call __morus1280_update_zero
404 call __morus1280_update_zero
405 call __morus1280_update_zero
406 call __morus1280_update_zero
407
408 /* xor-in the key again after updates: */
409 pxor KEY_LO, STATE1_LO
410 pxor KEY_HI, STATE1_HI
411
412 /* store the state: */
413 movdqu STATE0_LO, (0 * 16)(%rdi)
414 movdqu STATE0_HI, (1 * 16)(%rdi)
415 movdqu STATE1_LO, (2 * 16)(%rdi)
416 movdqu STATE1_HI, (3 * 16)(%rdi)
417 movdqu STATE2_LO, (4 * 16)(%rdi)
418 movdqu STATE2_HI, (5 * 16)(%rdi)
419 movdqu STATE3_LO, (6 * 16)(%rdi)
420 movdqu STATE3_HI, (7 * 16)(%rdi)
421 movdqu STATE4_LO, (8 * 16)(%rdi)
422 movdqu STATE4_HI, (9 * 16)(%rdi)
423
424 FRAME_END
425 ret
426ENDPROC(crypto_morus1280_sse2_init)
427
428/*
429 * void crypto_morus1280_sse2_ad(void *state, const void *data,
430 * unsigned int length);
431 */
432ENTRY(crypto_morus1280_sse2_ad)
433 FRAME_BEGIN
434
435 cmp $32, %rdx
436 jb .Lad_out
437
438 /* load the state: */
439 movdqu (0 * 16)(%rdi), STATE0_LO
440 movdqu (1 * 16)(%rdi), STATE0_HI
441 movdqu (2 * 16)(%rdi), STATE1_LO
442 movdqu (3 * 16)(%rdi), STATE1_HI
443 movdqu (4 * 16)(%rdi), STATE2_LO
444 movdqu (5 * 16)(%rdi), STATE2_HI
445 movdqu (6 * 16)(%rdi), STATE3_LO
446 movdqu (7 * 16)(%rdi), STATE3_HI
447 movdqu (8 * 16)(%rdi), STATE4_LO
448 movdqu (9 * 16)(%rdi), STATE4_HI
449
450 mov %rsi, %r8
451 and $0xF, %r8
452 jnz .Lad_u_loop
453
454.align 4
455.Lad_a_loop:
456 movdqa 0(%rsi), MSG_LO
457 movdqa 16(%rsi), MSG_HI
458 call __morus1280_update
459 sub $32, %rdx
460 add $32, %rsi
461 cmp $32, %rdx
462 jge .Lad_a_loop
463
464 jmp .Lad_cont
465.align 4
466.Lad_u_loop:
467 movdqu 0(%rsi), MSG_LO
468 movdqu 16(%rsi), MSG_HI
469 call __morus1280_update
470 sub $32, %rdx
471 add $32, %rsi
472 cmp $32, %rdx
473 jge .Lad_u_loop
474
475.Lad_cont:
476 /* store the state: */
477 movdqu STATE0_LO, (0 * 16)(%rdi)
478 movdqu STATE0_HI, (1 * 16)(%rdi)
479 movdqu STATE1_LO, (2 * 16)(%rdi)
480 movdqu STATE1_HI, (3 * 16)(%rdi)
481 movdqu STATE2_LO, (4 * 16)(%rdi)
482 movdqu STATE2_HI, (5 * 16)(%rdi)
483 movdqu STATE3_LO, (6 * 16)(%rdi)
484 movdqu STATE3_HI, (7 * 16)(%rdi)
485 movdqu STATE4_LO, (8 * 16)(%rdi)
486 movdqu STATE4_HI, (9 * 16)(%rdi)
487
488.Lad_out:
489 FRAME_END
490 ret
491ENDPROC(crypto_morus1280_sse2_ad)
492
493/*
494 * void crypto_morus1280_sse2_enc(void *state, const void *src, void *dst,
495 * unsigned int length);
496 */
497ENTRY(crypto_morus1280_sse2_enc)
498 FRAME_BEGIN
499
500 cmp $32, %rcx
501 jb .Lenc_out
502
503 /* load the state: */
504 movdqu (0 * 16)(%rdi), STATE0_LO
505 movdqu (1 * 16)(%rdi), STATE0_HI
506 movdqu (2 * 16)(%rdi), STATE1_LO
507 movdqu (3 * 16)(%rdi), STATE1_HI
508 movdqu (4 * 16)(%rdi), STATE2_LO
509 movdqu (5 * 16)(%rdi), STATE2_HI
510 movdqu (6 * 16)(%rdi), STATE3_LO
511 movdqu (7 * 16)(%rdi), STATE3_HI
512 movdqu (8 * 16)(%rdi), STATE4_LO
513 movdqu (9 * 16)(%rdi), STATE4_HI
514
515 mov %rsi, %r8
516 or %rdx, %r8
517 and $0xF, %r8
518 jnz .Lenc_u_loop
519
520.align 4
521.Lenc_a_loop:
522 movdqa 0(%rsi), MSG_LO
523 movdqa 16(%rsi), MSG_HI
524 movdqa STATE1_LO, T1_LO
525 movdqa STATE1_HI, T1_HI
526 rol3 T1_HI, T1_LO
527 movdqa MSG_LO, T0_LO
528 movdqa MSG_HI, T0_HI
529 pxor T1_LO, T0_LO
530 pxor T1_HI, T0_HI
531 pxor STATE0_LO, T0_LO
532 pxor STATE0_HI, T0_HI
533 movdqa STATE2_LO, T1_LO
534 movdqa STATE2_HI, T1_HI
535 pand STATE3_LO, T1_LO
536 pand STATE3_HI, T1_HI
537 pxor T1_LO, T0_LO
538 pxor T1_HI, T0_HI
539 movdqa T0_LO, 0(%rdx)
540 movdqa T0_HI, 16(%rdx)
541
542 call __morus1280_update
543 sub $32, %rcx
544 add $32, %rsi
545 add $32, %rdx
546 cmp $32, %rcx
547 jge .Lenc_a_loop
548
549 jmp .Lenc_cont
550.align 4
551.Lenc_u_loop:
552 movdqu 0(%rsi), MSG_LO
553 movdqu 16(%rsi), MSG_HI
554 movdqa STATE1_LO, T1_LO
555 movdqa STATE1_HI, T1_HI
556 rol3 T1_HI, T1_LO
557 movdqa MSG_LO, T0_LO
558 movdqa MSG_HI, T0_HI
559 pxor T1_LO, T0_LO
560 pxor T1_HI, T0_HI
561 pxor STATE0_LO, T0_LO
562 pxor STATE0_HI, T0_HI
563 movdqa STATE2_LO, T1_LO
564 movdqa STATE2_HI, T1_HI
565 pand STATE3_LO, T1_LO
566 pand STATE3_HI, T1_HI
567 pxor T1_LO, T0_LO
568 pxor T1_HI, T0_HI
569 movdqu T0_LO, 0(%rdx)
570 movdqu T0_HI, 16(%rdx)
571
572 call __morus1280_update
573 sub $32, %rcx
574 add $32, %rsi
575 add $32, %rdx
576 cmp $32, %rcx
577 jge .Lenc_u_loop
578
579.Lenc_cont:
580 /* store the state: */
581 movdqu STATE0_LO, (0 * 16)(%rdi)
582 movdqu STATE0_HI, (1 * 16)(%rdi)
583 movdqu STATE1_LO, (2 * 16)(%rdi)
584 movdqu STATE1_HI, (3 * 16)(%rdi)
585 movdqu STATE2_LO, (4 * 16)(%rdi)
586 movdqu STATE2_HI, (5 * 16)(%rdi)
587 movdqu STATE3_LO, (6 * 16)(%rdi)
588 movdqu STATE3_HI, (7 * 16)(%rdi)
589 movdqu STATE4_LO, (8 * 16)(%rdi)
590 movdqu STATE4_HI, (9 * 16)(%rdi)
591
592.Lenc_out:
593 FRAME_END
594 ret
595ENDPROC(crypto_morus1280_sse2_enc)
596
597/*
598 * void crypto_morus1280_sse2_enc_tail(void *state, const void *src, void *dst,
599 * unsigned int length);
600 */
601ENTRY(crypto_morus1280_sse2_enc_tail)
602 FRAME_BEGIN
603
604 /* load the state: */
605 movdqu (0 * 16)(%rdi), STATE0_LO
606 movdqu (1 * 16)(%rdi), STATE0_HI
607 movdqu (2 * 16)(%rdi), STATE1_LO
608 movdqu (3 * 16)(%rdi), STATE1_HI
609 movdqu (4 * 16)(%rdi), STATE2_LO
610 movdqu (5 * 16)(%rdi), STATE2_HI
611 movdqu (6 * 16)(%rdi), STATE3_LO
612 movdqu (7 * 16)(%rdi), STATE3_HI
613 movdqu (8 * 16)(%rdi), STATE4_LO
614 movdqu (9 * 16)(%rdi), STATE4_HI
615
616 /* encrypt message: */
617 call __load_partial
618
619 movdqa STATE1_LO, T1_LO
620 movdqa STATE1_HI, T1_HI
621 rol3 T1_HI, T1_LO
622 movdqa MSG_LO, T0_LO
623 movdqa MSG_HI, T0_HI
624 pxor T1_LO, T0_LO
625 pxor T1_HI, T0_HI
626 pxor STATE0_LO, T0_LO
627 pxor STATE0_HI, T0_HI
628 movdqa STATE2_LO, T1_LO
629 movdqa STATE2_HI, T1_HI
630 pand STATE3_LO, T1_LO
631 pand STATE3_HI, T1_HI
632 pxor T1_LO, T0_LO
633 pxor T1_HI, T0_HI
634
635 call __store_partial
636
637 call __morus1280_update
638
639 /* store the state: */
640 movdqu STATE0_LO, (0 * 16)(%rdi)
641 movdqu STATE0_HI, (1 * 16)(%rdi)
642 movdqu STATE1_LO, (2 * 16)(%rdi)
643 movdqu STATE1_HI, (3 * 16)(%rdi)
644 movdqu STATE2_LO, (4 * 16)(%rdi)
645 movdqu STATE2_HI, (5 * 16)(%rdi)
646 movdqu STATE3_LO, (6 * 16)(%rdi)
647 movdqu STATE3_HI, (7 * 16)(%rdi)
648 movdqu STATE4_LO, (8 * 16)(%rdi)
649 movdqu STATE4_HI, (9 * 16)(%rdi)
650
651 FRAME_END
652 ret
653ENDPROC(crypto_morus1280_sse2_enc_tail)
654
655/*
656 * void crypto_morus1280_sse2_dec(void *state, const void *src, void *dst,
657 * unsigned int length);
658 */
659ENTRY(crypto_morus1280_sse2_dec)
660 FRAME_BEGIN
661
662 cmp $32, %rcx
663 jb .Ldec_out
664
665 /* load the state: */
666 movdqu (0 * 16)(%rdi), STATE0_LO
667 movdqu (1 * 16)(%rdi), STATE0_HI
668 movdqu (2 * 16)(%rdi), STATE1_LO
669 movdqu (3 * 16)(%rdi), STATE1_HI
670 movdqu (4 * 16)(%rdi), STATE2_LO
671 movdqu (5 * 16)(%rdi), STATE2_HI
672 movdqu (6 * 16)(%rdi), STATE3_LO
673 movdqu (7 * 16)(%rdi), STATE3_HI
674 movdqu (8 * 16)(%rdi), STATE4_LO
675 movdqu (9 * 16)(%rdi), STATE4_HI
676
677 mov %rsi, %r8
678 or %rdx, %r8
679 and $0xF, %r8
680 jnz .Ldec_u_loop
681
682.align 4
683.Ldec_a_loop:
684 movdqa 0(%rsi), MSG_LO
685 movdqa 16(%rsi), MSG_HI
686 pxor STATE0_LO, MSG_LO
687 pxor STATE0_HI, MSG_HI
688 movdqa STATE1_LO, T1_LO
689 movdqa STATE1_HI, T1_HI
690 rol3 T1_HI, T1_LO
691 pxor T1_LO, MSG_LO
692 pxor T1_HI, MSG_HI
693 movdqa STATE2_LO, T1_LO
694 movdqa STATE2_HI, T1_HI
695 pand STATE3_LO, T1_LO
696 pand STATE3_HI, T1_HI
697 pxor T1_LO, MSG_LO
698 pxor T1_HI, MSG_HI
699 movdqa MSG_LO, 0(%rdx)
700 movdqa MSG_HI, 16(%rdx)
701
702 call __morus1280_update
703 sub $32, %rcx
704 add $32, %rsi
705 add $32, %rdx
706 cmp $32, %rcx
707 jge .Ldec_a_loop
708
709 jmp .Ldec_cont
710.align 4
711.Ldec_u_loop:
712 movdqu 0(%rsi), MSG_LO
713 movdqu 16(%rsi), MSG_HI
714 pxor STATE0_LO, MSG_LO
715 pxor STATE0_HI, MSG_HI
716 movdqa STATE1_LO, T1_LO
717 movdqa STATE1_HI, T1_HI
718 rol3 T1_HI, T1_LO
719 pxor T1_LO, MSG_LO
720 pxor T1_HI, MSG_HI
721 movdqa STATE2_LO, T1_LO
722 movdqa STATE2_HI, T1_HI
723 pand STATE3_LO, T1_LO
724 pand STATE3_HI, T1_HI
725 pxor T1_LO, MSG_LO
726 pxor T1_HI, MSG_HI
727 movdqu MSG_LO, 0(%rdx)
728 movdqu MSG_HI, 16(%rdx)
729
730 call __morus1280_update
731 sub $32, %rcx
732 add $32, %rsi
733 add $32, %rdx
734 cmp $32, %rcx
735 jge .Ldec_u_loop
736
737.Ldec_cont:
738 /* store the state: */
739 movdqu STATE0_LO, (0 * 16)(%rdi)
740 movdqu STATE0_HI, (1 * 16)(%rdi)
741 movdqu STATE1_LO, (2 * 16)(%rdi)
742 movdqu STATE1_HI, (3 * 16)(%rdi)
743 movdqu STATE2_LO, (4 * 16)(%rdi)
744 movdqu STATE2_HI, (5 * 16)(%rdi)
745 movdqu STATE3_LO, (6 * 16)(%rdi)
746 movdqu STATE3_HI, (7 * 16)(%rdi)
747 movdqu STATE4_LO, (8 * 16)(%rdi)
748 movdqu STATE4_HI, (9 * 16)(%rdi)
749
750.Ldec_out:
751 FRAME_END
752 ret
753ENDPROC(crypto_morus1280_sse2_dec)
754
755/*
756 * void crypto_morus1280_sse2_dec_tail(void *state, const void *src, void *dst,
757 * unsigned int length);
758 */
759ENTRY(crypto_morus1280_sse2_dec_tail)
760 FRAME_BEGIN
761
762 /* load the state: */
763 movdqu (0 * 16)(%rdi), STATE0_LO
764 movdqu (1 * 16)(%rdi), STATE0_HI
765 movdqu (2 * 16)(%rdi), STATE1_LO
766 movdqu (3 * 16)(%rdi), STATE1_HI
767 movdqu (4 * 16)(%rdi), STATE2_LO
768 movdqu (5 * 16)(%rdi), STATE2_HI
769 movdqu (6 * 16)(%rdi), STATE3_LO
770 movdqu (7 * 16)(%rdi), STATE3_HI
771 movdqu (8 * 16)(%rdi), STATE4_LO
772 movdqu (9 * 16)(%rdi), STATE4_HI
773
774 /* decrypt message: */
775 call __load_partial
776
777 pxor STATE0_LO, MSG_LO
778 pxor STATE0_HI, MSG_HI
779 movdqa STATE1_LO, T1_LO
780 movdqa STATE1_HI, T1_HI
781 rol3 T1_HI, T1_LO
782 pxor T1_LO, MSG_LO
783 pxor T1_HI, MSG_HI
784 movdqa STATE2_LO, T1_LO
785 movdqa STATE2_HI, T1_HI
786 pand STATE3_LO, T1_LO
787 pand STATE3_HI, T1_HI
788 pxor T1_LO, MSG_LO
789 pxor T1_HI, MSG_HI
790 movdqa MSG_LO, T0_LO
791 movdqa MSG_HI, T0_HI
792
793 call __store_partial
794
795 /* mask with byte count: */
796 movq %rcx, T0_LO
797 punpcklbw T0_LO, T0_LO
798 punpcklbw T0_LO, T0_LO
799 punpcklbw T0_LO, T0_LO
800 punpcklbw T0_LO, T0_LO
801 movdqa T0_LO, T0_HI
802 movdqa .Lmorus640_counter_0, T1_LO
803 movdqa .Lmorus640_counter_1, T1_HI
804 pcmpgtb T1_LO, T0_LO
805 pcmpgtb T1_HI, T0_HI
806 pand T0_LO, MSG_LO
807 pand T0_HI, MSG_HI
808
809 call __morus1280_update
810
811 /* store the state: */
812 movdqu STATE0_LO, (0 * 16)(%rdi)
813 movdqu STATE0_HI, (1 * 16)(%rdi)
814 movdqu STATE1_LO, (2 * 16)(%rdi)
815 movdqu STATE1_HI, (3 * 16)(%rdi)
816 movdqu STATE2_LO, (4 * 16)(%rdi)
817 movdqu STATE2_HI, (5 * 16)(%rdi)
818 movdqu STATE3_LO, (6 * 16)(%rdi)
819 movdqu STATE3_HI, (7 * 16)(%rdi)
820 movdqu STATE4_LO, (8 * 16)(%rdi)
821 movdqu STATE4_HI, (9 * 16)(%rdi)
822
823 FRAME_END
824 ret
825ENDPROC(crypto_morus1280_sse2_dec_tail)
826
827/*
828 * void crypto_morus1280_sse2_final(void *state, void *tag_xor,
829 * u64 assoclen, u64 cryptlen);
830 */
831ENTRY(crypto_morus1280_sse2_final)
832 FRAME_BEGIN
833
834 /* load the state: */
835 movdqu (0 * 16)(%rdi), STATE0_LO
836 movdqu (1 * 16)(%rdi), STATE0_HI
837 movdqu (2 * 16)(%rdi), STATE1_LO
838 movdqu (3 * 16)(%rdi), STATE1_HI
839 movdqu (4 * 16)(%rdi), STATE2_LO
840 movdqu (5 * 16)(%rdi), STATE2_HI
841 movdqu (6 * 16)(%rdi), STATE3_LO
842 movdqu (7 * 16)(%rdi), STATE3_HI
843 movdqu (8 * 16)(%rdi), STATE4_LO
844 movdqu (9 * 16)(%rdi), STATE4_HI
845
846 /* xor state[0] into state[4]: */
847 pxor STATE0_LO, STATE4_LO
848 pxor STATE0_HI, STATE4_HI
849
850 /* prepare length block: */
851 movq %rdx, MSG_LO
852 movq %rcx, T0_LO
853 pslldq $8, T0_LO
854 pxor T0_LO, MSG_LO
855 psllq $3, MSG_LO /* multiply by 8 (to get bit count) */
856 pxor MSG_HI, MSG_HI
857
858 /* update state: */
859 call __morus1280_update
860 call __morus1280_update
861 call __morus1280_update
862 call __morus1280_update
863 call __morus1280_update
864 call __morus1280_update
865 call __morus1280_update
866 call __morus1280_update
867 call __morus1280_update
868 call __morus1280_update
869
870 /* xor tag: */
871 movdqu 0(%rsi), MSG_LO
872 movdqu 16(%rsi), MSG_HI
873
874 pxor STATE0_LO, MSG_LO
875 pxor STATE0_HI, MSG_HI
876 movdqa STATE1_LO, T0_LO
877 movdqa STATE1_HI, T0_HI
878 rol3 T0_HI, T0_LO
879 pxor T0_LO, MSG_LO
880 pxor T0_HI, MSG_HI
881 movdqa STATE2_LO, T0_LO
882 movdqa STATE2_HI, T0_HI
883 pand STATE3_LO, T0_LO
884 pand STATE3_HI, T0_HI
885 pxor T0_LO, MSG_LO
886 pxor T0_HI, MSG_HI
887
888 movdqu MSG_LO, 0(%rsi)
889 movdqu MSG_HI, 16(%rsi)
890
891 FRAME_END
892 ret
893ENDPROC(crypto_morus1280_sse2_final)
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
deleted file mode 100644
index aada9d774293..000000000000
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ /dev/null
@@ -1,61 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The MORUS-1280 Authenticated-Encryption Algorithm
4 * Glue for SSE2 implementation
5 *
6 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 */
9
10#include <crypto/internal/aead.h>
11#include <crypto/internal/simd.h>
12#include <crypto/morus1280_glue.h>
13#include <linux/module.h>
14#include <asm/fpu/api.h>
15#include <asm/cpu_device_id.h>
16
17asmlinkage void crypto_morus1280_sse2_init(void *state, const void *key,
18 const void *iv);
19asmlinkage void crypto_morus1280_sse2_ad(void *state, const void *data,
20 unsigned int length);
21
22asmlinkage void crypto_morus1280_sse2_enc(void *state, const void *src,
23 void *dst, unsigned int length);
24asmlinkage void crypto_morus1280_sse2_dec(void *state, const void *src,
25 void *dst, unsigned int length);
26
27asmlinkage void crypto_morus1280_sse2_enc_tail(void *state, const void *src,
28 void *dst, unsigned int length);
29asmlinkage void crypto_morus1280_sse2_dec_tail(void *state, const void *src,
30 void *dst, unsigned int length);
31
32asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
33 u64 assoclen, u64 cryptlen);
34
35MORUS1280_DECLARE_ALG(sse2, "morus1280-sse2", 350);
36
37static struct simd_aead_alg *simd_alg;
38
39static int __init crypto_morus1280_sse2_module_init(void)
40{
41 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
42 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
43 return -ENODEV;
44
45 return simd_register_aeads_compat(&crypto_morus1280_sse2_alg, 1,
46 &simd_alg);
47}
48
49static void __exit crypto_morus1280_sse2_module_exit(void)
50{
51 simd_unregister_aeads(&crypto_morus1280_sse2_alg, 1, &simd_alg);
52}
53
54module_init(crypto_morus1280_sse2_module_init);
55module_exit(crypto_morus1280_sse2_module_exit);
56
57MODULE_LICENSE("GPL");
58MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
59MODULE_DESCRIPTION("MORUS-1280 AEAD algorithm -- SSE2 implementation");
60MODULE_ALIAS_CRYPTO("morus1280");
61MODULE_ALIAS_CRYPTO("morus1280-sse2");
diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
deleted file mode 100644
index ffbde8b22838..000000000000
--- a/arch/x86/crypto/morus1280_glue.c
+++ /dev/null
@@ -1,205 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The MORUS-1280 Authenticated-Encryption Algorithm
4 * Common x86 SIMD glue skeleton
5 *
6 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 */
9
10#include <crypto/internal/aead.h>
11#include <crypto/internal/skcipher.h>
12#include <crypto/morus1280_glue.h>
13#include <crypto/scatterwalk.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/scatterlist.h>
19#include <asm/fpu/api.h>
20
21struct morus1280_state {
22 struct morus1280_block s[MORUS_STATE_BLOCKS];
23};
24
25struct morus1280_ops {
26 int (*skcipher_walk_init)(struct skcipher_walk *walk,
27 struct aead_request *req, bool atomic);
28
29 void (*crypt_blocks)(void *state, const void *src, void *dst,
30 unsigned int length);
31 void (*crypt_tail)(void *state, const void *src, void *dst,
32 unsigned int length);
33};
34
35static void crypto_morus1280_glue_process_ad(
36 struct morus1280_state *state,
37 const struct morus1280_glue_ops *ops,
38 struct scatterlist *sg_src, unsigned int assoclen)
39{
40 struct scatter_walk walk;
41 struct morus1280_block buf;
42 unsigned int pos = 0;
43
44 scatterwalk_start(&walk, sg_src);
45 while (assoclen != 0) {
46 unsigned int size = scatterwalk_clamp(&walk, assoclen);
47 unsigned int left = size;
48 void *mapped = scatterwalk_map(&walk);
49 const u8 *src = (const u8 *)mapped;
50
51 if (pos + size >= MORUS1280_BLOCK_SIZE) {
52 if (pos > 0) {
53 unsigned int fill = MORUS1280_BLOCK_SIZE - pos;
54 memcpy(buf.bytes + pos, src, fill);
55 ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE);
56 pos = 0;
57 left -= fill;
58 src += fill;
59 }
60
61 ops->ad(state, src, left);
62 src += left & ~(MORUS1280_BLOCK_SIZE - 1);
63 left &= MORUS1280_BLOCK_SIZE - 1;
64 }
65
66 memcpy(buf.bytes + pos, src, left);
67
68 pos += left;
69 assoclen -= size;
70 scatterwalk_unmap(mapped);
71 scatterwalk_advance(&walk, size);
72 scatterwalk_done(&walk, 0, assoclen);
73 }
74
75 if (pos > 0) {
76 memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos);
77 ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE);
78 }
79}
80
81static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
82 struct morus1280_ops ops,
83 struct skcipher_walk *walk)
84{
85 while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
86 ops.crypt_blocks(state, walk->src.virt.addr,
87 walk->dst.virt.addr,
88 round_down(walk->nbytes,
89 MORUS1280_BLOCK_SIZE));
90 skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
91 }
92
93 if (walk->nbytes) {
94 ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
95 walk->nbytes);
96 skcipher_walk_done(walk, 0);
97 }
98}
99
100int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
101 unsigned int keylen)
102{
103 struct morus1280_ctx *ctx = crypto_aead_ctx(aead);
104
105 if (keylen == MORUS1280_BLOCK_SIZE) {
106 memcpy(ctx->key.bytes, key, MORUS1280_BLOCK_SIZE);
107 } else if (keylen == MORUS1280_BLOCK_SIZE / 2) {
108 memcpy(ctx->key.bytes, key, keylen);
109 memcpy(ctx->key.bytes + keylen, key, keylen);
110 } else {
111 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
112 return -EINVAL;
113 }
114
115 return 0;
116}
117EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setkey);
118
119int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
120 unsigned int authsize)
121{
122 return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
123}
124EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setauthsize);
125
126static void crypto_morus1280_glue_crypt(struct aead_request *req,
127 struct morus1280_ops ops,
128 unsigned int cryptlen,
129 struct morus1280_block *tag_xor)
130{
131 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
132 struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
133 struct morus1280_state state;
134 struct skcipher_walk walk;
135
136 ops.skcipher_walk_init(&walk, req, true);
137
138 kernel_fpu_begin();
139
140 ctx->ops->init(&state, &ctx->key, req->iv);
141 crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
142 crypto_morus1280_glue_process_crypt(&state, ops, &walk);
143 ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
144
145 kernel_fpu_end();
146}
147
148int crypto_morus1280_glue_encrypt(struct aead_request *req)
149{
150 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
151 struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
152 struct morus1280_ops OPS = {
153 .skcipher_walk_init = skcipher_walk_aead_encrypt,
154 .crypt_blocks = ctx->ops->enc,
155 .crypt_tail = ctx->ops->enc_tail,
156 };
157
158 struct morus1280_block tag = {};
159 unsigned int authsize = crypto_aead_authsize(tfm);
160 unsigned int cryptlen = req->cryptlen;
161
162 crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag);
163
164 scatterwalk_map_and_copy(tag.bytes, req->dst,
165 req->assoclen + cryptlen, authsize, 1);
166 return 0;
167}
168EXPORT_SYMBOL_GPL(crypto_morus1280_glue_encrypt);
169
170int crypto_morus1280_glue_decrypt(struct aead_request *req)
171{
172 static const u8 zeros[MORUS1280_BLOCK_SIZE] = {};
173
174 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
175 struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
176 struct morus1280_ops OPS = {
177 .skcipher_walk_init = skcipher_walk_aead_decrypt,
178 .crypt_blocks = ctx->ops->dec,
179 .crypt_tail = ctx->ops->dec_tail,
180 };
181
182 struct morus1280_block tag;
183 unsigned int authsize = crypto_aead_authsize(tfm);
184 unsigned int cryptlen = req->cryptlen - authsize;
185
186 scatterwalk_map_and_copy(tag.bytes, req->src,
187 req->assoclen + cryptlen, authsize, 0);
188
189 crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag);
190
191 return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
192}
193EXPORT_SYMBOL_GPL(crypto_morus1280_glue_decrypt);
194
195void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
196 const struct morus1280_glue_ops *ops)
197{
198 struct morus1280_ctx *ctx = crypto_aead_ctx(aead);
199 ctx->ops = ops;
200}
201EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops);
202
203MODULE_LICENSE("GPL");
204MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
205MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations");
diff --git a/arch/x86/crypto/morus640-sse2-asm.S b/arch/x86/crypto/morus640-sse2-asm.S
deleted file mode 100644
index a60891101bbd..000000000000
--- a/arch/x86/crypto/morus640-sse2-asm.S
+++ /dev/null
@@ -1,612 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * SSE2 implementation of MORUS-640
4 *
5 * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7 */
8
9#include <linux/linkage.h>
10#include <asm/frame.h>
11
12#define SHUFFLE_MASK(i0, i1, i2, i3) \
13 (i0 | (i1 << 2) | (i2 << 4) | (i3 << 6))
14
15#define MASK1 SHUFFLE_MASK(3, 0, 1, 2)
16#define MASK2 SHUFFLE_MASK(2, 3, 0, 1)
17#define MASK3 SHUFFLE_MASK(1, 2, 3, 0)
18
19#define STATE0 %xmm0
20#define STATE1 %xmm1
21#define STATE2 %xmm2
22#define STATE3 %xmm3
23#define STATE4 %xmm4
24#define KEY %xmm5
25#define MSG %xmm5
26#define T0 %xmm6
27#define T1 %xmm7
28
29.section .rodata.cst16.morus640_const, "aM", @progbits, 32
30.align 16
31.Lmorus640_const_0:
32 .byte 0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d
33 .byte 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62
34.Lmorus640_const_1:
35 .byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1
36 .byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd
37
38.section .rodata.cst16.morus640_counter, "aM", @progbits, 16
39.align 16
40.Lmorus640_counter:
41 .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
42 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
43
44.text
45
46.macro morus640_round s0, s1, s2, s3, s4, b, w
47 movdqa \s1, T0
48 pand \s2, T0
49 pxor T0, \s0
50 pxor \s3, \s0
51 movdqa \s0, T0
52 pslld $\b, T0
53 psrld $(32 - \b), \s0
54 pxor T0, \s0
55 pshufd $\w, \s3, \s3
56.endm
57
58/*
59 * __morus640_update: internal ABI
60 * input:
61 * STATE[0-4] - input state
62 * MSG - message block
63 * output:
64 * STATE[0-4] - output state
65 * changed:
66 * T0
67 */
68__morus640_update:
69 morus640_round STATE0, STATE1, STATE2, STATE3, STATE4, 5, MASK1
70 pxor MSG, STATE1
71 morus640_round STATE1, STATE2, STATE3, STATE4, STATE0, 31, MASK2
72 pxor MSG, STATE2
73 morus640_round STATE2, STATE3, STATE4, STATE0, STATE1, 7, MASK3
74 pxor MSG, STATE3
75 morus640_round STATE3, STATE4, STATE0, STATE1, STATE2, 22, MASK2
76 pxor MSG, STATE4
77 morus640_round STATE4, STATE0, STATE1, STATE2, STATE3, 13, MASK1
78 ret
79ENDPROC(__morus640_update)
80
81
82/*
83 * __morus640_update_zero: internal ABI
84 * input:
85 * STATE[0-4] - input state
86 * output:
87 * STATE[0-4] - output state
88 * changed:
89 * T0
90 */
91__morus640_update_zero:
92 morus640_round STATE0, STATE1, STATE2, STATE3, STATE4, 5, MASK1
93 morus640_round STATE1, STATE2, STATE3, STATE4, STATE0, 31, MASK2
94 morus640_round STATE2, STATE3, STATE4, STATE0, STATE1, 7, MASK3
95 morus640_round STATE3, STATE4, STATE0, STATE1, STATE2, 22, MASK2
96 morus640_round STATE4, STATE0, STATE1, STATE2, STATE3, 13, MASK1
97 ret
98ENDPROC(__morus640_update_zero)
99
100/*
101 * __load_partial: internal ABI
102 * input:
103 * %rsi - src
104 * %rcx - bytes
105 * output:
106 * MSG - message block
107 * changed:
108 * T0
109 * %r8
110 * %r9
111 */
112__load_partial:
113 xor %r9d, %r9d
114 pxor MSG, MSG
115
116 mov %rcx, %r8
117 and $0x1, %r8
118 jz .Lld_partial_1
119
120 mov %rcx, %r8
121 and $0x1E, %r8
122 add %rsi, %r8
123 mov (%r8), %r9b
124
125.Lld_partial_1:
126 mov %rcx, %r8
127 and $0x2, %r8
128 jz .Lld_partial_2
129
130 mov %rcx, %r8
131 and $0x1C, %r8
132 add %rsi, %r8
133 shl $16, %r9
134 mov (%r8), %r9w
135
136.Lld_partial_2:
137 mov %rcx, %r8
138 and $0x4, %r8
139 jz .Lld_partial_4
140
141 mov %rcx, %r8
142 and $0x18, %r8
143 add %rsi, %r8
144 shl $32, %r9
145 mov (%r8), %r8d
146 xor %r8, %r9
147
148.Lld_partial_4:
149 movq %r9, MSG
150
151 mov %rcx, %r8
152 and $0x8, %r8
153 jz .Lld_partial_8
154
155 mov %rcx, %r8
156 and $0x10, %r8
157 add %rsi, %r8
158 pslldq $8, MSG
159 movq (%r8), T0
160 pxor T0, MSG
161
162.Lld_partial_8:
163 ret
164ENDPROC(__load_partial)
165
166/*
167 * __store_partial: internal ABI
168 * input:
169 * %rdx - dst
170 * %rcx - bytes
171 * output:
172 * T0 - message block
173 * changed:
174 * %r8
175 * %r9
176 * %r10
177 */
178__store_partial:
179 mov %rcx, %r8
180 mov %rdx, %r9
181
182 movq T0, %r10
183
184 cmp $8, %r8
185 jl .Lst_partial_8
186
187 mov %r10, (%r9)
188 psrldq $8, T0
189 movq T0, %r10
190
191 sub $8, %r8
192 add $8, %r9
193
194.Lst_partial_8:
195 cmp $4, %r8
196 jl .Lst_partial_4
197
198 mov %r10d, (%r9)
199 shr $32, %r10
200
201 sub $4, %r8
202 add $4, %r9
203
204.Lst_partial_4:
205 cmp $2, %r8
206 jl .Lst_partial_2
207
208 mov %r10w, (%r9)
209 shr $16, %r10
210
211 sub $2, %r8
212 add $2, %r9
213
214.Lst_partial_2:
215 cmp $1, %r8
216 jl .Lst_partial_1
217
218 mov %r10b, (%r9)
219
220.Lst_partial_1:
221 ret
222ENDPROC(__store_partial)
223
224/*
225 * void crypto_morus640_sse2_init(void *state, const void *key, const void *iv);
226 */
227ENTRY(crypto_morus640_sse2_init)
228 FRAME_BEGIN
229
230 /* load IV: */
231 movdqu (%rdx), STATE0
232 /* load key: */
233 movdqu (%rsi), KEY
234 movdqa KEY, STATE1
235 /* load all ones: */
236 pcmpeqd STATE2, STATE2
237 /* load the constants: */
238 movdqa .Lmorus640_const_0, STATE3
239 movdqa .Lmorus640_const_1, STATE4
240
241 /* update 16 times with zero: */
242 call __morus640_update_zero
243 call __morus640_update_zero
244 call __morus640_update_zero
245 call __morus640_update_zero
246 call __morus640_update_zero
247 call __morus640_update_zero
248 call __morus640_update_zero
249 call __morus640_update_zero
250 call __morus640_update_zero
251 call __morus640_update_zero
252 call __morus640_update_zero
253 call __morus640_update_zero
254 call __morus640_update_zero
255 call __morus640_update_zero
256 call __morus640_update_zero
257 call __morus640_update_zero
258
259 /* xor-in the key again after updates: */
260 pxor KEY, STATE1
261
262 /* store the state: */
263 movdqu STATE0, (0 * 16)(%rdi)
264 movdqu STATE1, (1 * 16)(%rdi)
265 movdqu STATE2, (2 * 16)(%rdi)
266 movdqu STATE3, (3 * 16)(%rdi)
267 movdqu STATE4, (4 * 16)(%rdi)
268
269 FRAME_END
270 ret
271ENDPROC(crypto_morus640_sse2_init)
272
273/*
274 * void crypto_morus640_sse2_ad(void *state, const void *data,
275 * unsigned int length);
276 */
277ENTRY(crypto_morus640_sse2_ad)
278 FRAME_BEGIN
279
280 cmp $16, %rdx
281 jb .Lad_out
282
283 /* load the state: */
284 movdqu (0 * 16)(%rdi), STATE0
285 movdqu (1 * 16)(%rdi), STATE1
286 movdqu (2 * 16)(%rdi), STATE2
287 movdqu (3 * 16)(%rdi), STATE3
288 movdqu (4 * 16)(%rdi), STATE4
289
290 mov %rsi, %r8
291 and $0xF, %r8
292 jnz .Lad_u_loop
293
294.align 4
295.Lad_a_loop:
296 movdqa (%rsi), MSG
297 call __morus640_update
298 sub $16, %rdx
299 add $16, %rsi
300 cmp $16, %rdx
301 jge .Lad_a_loop
302
303 jmp .Lad_cont
304.align 4
305.Lad_u_loop:
306 movdqu (%rsi), MSG
307 call __morus640_update
308 sub $16, %rdx
309 add $16, %rsi
310 cmp $16, %rdx
311 jge .Lad_u_loop
312
313.Lad_cont:
314 /* store the state: */
315 movdqu STATE0, (0 * 16)(%rdi)
316 movdqu STATE1, (1 * 16)(%rdi)
317 movdqu STATE2, (2 * 16)(%rdi)
318 movdqu STATE3, (3 * 16)(%rdi)
319 movdqu STATE4, (4 * 16)(%rdi)
320
321.Lad_out:
322 FRAME_END
323 ret
324ENDPROC(crypto_morus640_sse2_ad)
325
326/*
327 * void crypto_morus640_sse2_enc(void *state, const void *src, void *dst,
328 * unsigned int length);
329 */
330ENTRY(crypto_morus640_sse2_enc)
331 FRAME_BEGIN
332
333 cmp $16, %rcx
334 jb .Lenc_out
335
336 /* load the state: */
337 movdqu (0 * 16)(%rdi), STATE0
338 movdqu (1 * 16)(%rdi), STATE1
339 movdqu (2 * 16)(%rdi), STATE2
340 movdqu (3 * 16)(%rdi), STATE3
341 movdqu (4 * 16)(%rdi), STATE4
342
343 mov %rsi, %r8
344 or %rdx, %r8
345 and $0xF, %r8
346 jnz .Lenc_u_loop
347
348.align 4
349.Lenc_a_loop:
350 movdqa (%rsi), MSG
351 movdqa MSG, T0
352 pxor STATE0, T0
353 pshufd $MASK3, STATE1, T1
354 pxor T1, T0
355 movdqa STATE2, T1
356 pand STATE3, T1
357 pxor T1, T0
358 movdqa T0, (%rdx)
359
360 call __morus640_update
361 sub $16, %rcx
362 add $16, %rsi
363 add $16, %rdx
364 cmp $16, %rcx
365 jge .Lenc_a_loop
366
367 jmp .Lenc_cont
368.align 4
369.Lenc_u_loop:
370 movdqu (%rsi), MSG
371 movdqa MSG, T0
372 pxor STATE0, T0
373 pshufd $MASK3, STATE1, T1
374 pxor T1, T0
375 movdqa STATE2, T1
376 pand STATE3, T1
377 pxor T1, T0
378 movdqu T0, (%rdx)
379
380 call __morus640_update
381 sub $16, %rcx
382 add $16, %rsi
383 add $16, %rdx
384 cmp $16, %rcx
385 jge .Lenc_u_loop
386
387.Lenc_cont:
388 /* store the state: */
389 movdqu STATE0, (0 * 16)(%rdi)
390 movdqu STATE1, (1 * 16)(%rdi)
391 movdqu STATE2, (2 * 16)(%rdi)
392 movdqu STATE3, (3 * 16)(%rdi)
393 movdqu STATE4, (4 * 16)(%rdi)
394
395.Lenc_out:
396 FRAME_END
397 ret
398ENDPROC(crypto_morus640_sse2_enc)
399
400/*
401 * void crypto_morus640_sse2_enc_tail(void *state, const void *src, void *dst,
402 * unsigned int length);
403 */
404ENTRY(crypto_morus640_sse2_enc_tail)
405 FRAME_BEGIN
406
407 /* load the state: */
408 movdqu (0 * 16)(%rdi), STATE0
409 movdqu (1 * 16)(%rdi), STATE1
410 movdqu (2 * 16)(%rdi), STATE2
411 movdqu (3 * 16)(%rdi), STATE3
412 movdqu (4 * 16)(%rdi), STATE4
413
414 /* encrypt message: */
415 call __load_partial
416
417 movdqa MSG, T0
418 pxor STATE0, T0
419 pshufd $MASK3, STATE1, T1
420 pxor T1, T0
421 movdqa STATE2, T1
422 pand STATE3, T1
423 pxor T1, T0
424
425 call __store_partial
426
427 call __morus640_update
428
429 /* store the state: */
430 movdqu STATE0, (0 * 16)(%rdi)
431 movdqu STATE1, (1 * 16)(%rdi)
432 movdqu STATE2, (2 * 16)(%rdi)
433 movdqu STATE3, (3 * 16)(%rdi)
434 movdqu STATE4, (4 * 16)(%rdi)
435
436 FRAME_END
437 ret
438ENDPROC(crypto_morus640_sse2_enc_tail)
439
440/*
441 * void crypto_morus640_sse2_dec(void *state, const void *src, void *dst,
442 * unsigned int length);
443 */
444ENTRY(crypto_morus640_sse2_dec)
445 FRAME_BEGIN
446
447 cmp $16, %rcx
448 jb .Ldec_out
449
450 /* load the state: */
451 movdqu (0 * 16)(%rdi), STATE0
452 movdqu (1 * 16)(%rdi), STATE1
453 movdqu (2 * 16)(%rdi), STATE2
454 movdqu (3 * 16)(%rdi), STATE3
455 movdqu (4 * 16)(%rdi), STATE4
456
457 mov %rsi, %r8
458 or %rdx, %r8
459 and $0xF, %r8
460 jnz .Ldec_u_loop
461
462.align 4
463.Ldec_a_loop:
464 movdqa (%rsi), MSG
465 pxor STATE0, MSG
466 pshufd $MASK3, STATE1, T0
467 pxor T0, MSG
468 movdqa STATE2, T0
469 pand STATE3, T0
470 pxor T0, MSG
471 movdqa MSG, (%rdx)
472
473 call __morus640_update
474 sub $16, %rcx
475 add $16, %rsi
476 add $16, %rdx
477 cmp $16, %rcx
478 jge .Ldec_a_loop
479
480 jmp .Ldec_cont
481.align 4
482.Ldec_u_loop:
483 movdqu (%rsi), MSG
484 pxor STATE0, MSG
485 pshufd $MASK3, STATE1, T0
486 pxor T0, MSG
487 movdqa STATE2, T0
488 pand STATE3, T0
489 pxor T0, MSG
490 movdqu MSG, (%rdx)
491
492 call __morus640_update
493 sub $16, %rcx
494 add $16, %rsi
495 add $16, %rdx
496 cmp $16, %rcx
497 jge .Ldec_u_loop
498
499.Ldec_cont:
500 /* store the state: */
501 movdqu STATE0, (0 * 16)(%rdi)
502 movdqu STATE1, (1 * 16)(%rdi)
503 movdqu STATE2, (2 * 16)(%rdi)
504 movdqu STATE3, (3 * 16)(%rdi)
505 movdqu STATE4, (4 * 16)(%rdi)
506
507.Ldec_out:
508 FRAME_END
509 ret
510ENDPROC(crypto_morus640_sse2_dec)
511
512/*
513 * void crypto_morus640_sse2_dec_tail(void *state, const void *src, void *dst,
514 * unsigned int length);
515 */
516ENTRY(crypto_morus640_sse2_dec_tail)
517 FRAME_BEGIN
518
519 /* load the state: */
520 movdqu (0 * 16)(%rdi), STATE0
521 movdqu (1 * 16)(%rdi), STATE1
522 movdqu (2 * 16)(%rdi), STATE2
523 movdqu (3 * 16)(%rdi), STATE3
524 movdqu (4 * 16)(%rdi), STATE4
525
526 /* decrypt message: */
527 call __load_partial
528
529 pxor STATE0, MSG
530 pshufd $MASK3, STATE1, T0
531 pxor T0, MSG
532 movdqa STATE2, T0
533 pand STATE3, T0
534 pxor T0, MSG
535 movdqa MSG, T0
536
537 call __store_partial
538
539 /* mask with byte count: */
540 movq %rcx, T0
541 punpcklbw T0, T0
542 punpcklbw T0, T0
543 punpcklbw T0, T0
544 punpcklbw T0, T0
545 movdqa .Lmorus640_counter, T1
546 pcmpgtb T1, T0
547 pand T0, MSG
548
549 call __morus640_update
550
551 /* store the state: */
552 movdqu STATE0, (0 * 16)(%rdi)
553 movdqu STATE1, (1 * 16)(%rdi)
554 movdqu STATE2, (2 * 16)(%rdi)
555 movdqu STATE3, (3 * 16)(%rdi)
556 movdqu STATE4, (4 * 16)(%rdi)
557
558 FRAME_END
559 ret
560ENDPROC(crypto_morus640_sse2_dec_tail)
561
562/*
563 * void crypto_morus640_sse2_final(void *state, void *tag_xor,
564 * u64 assoclen, u64 cryptlen);
565 */
566ENTRY(crypto_morus640_sse2_final)
567 FRAME_BEGIN
568
569 /* load the state: */
570 movdqu (0 * 16)(%rdi), STATE0
571 movdqu (1 * 16)(%rdi), STATE1
572 movdqu (2 * 16)(%rdi), STATE2
573 movdqu (3 * 16)(%rdi), STATE3
574 movdqu (4 * 16)(%rdi), STATE4
575
576 /* xor state[0] into state[4]: */
577 pxor STATE0, STATE4
578
579 /* prepare length block: */
580 movq %rdx, MSG
581 movq %rcx, T0
582 pslldq $8, T0
583 pxor T0, MSG
584 psllq $3, MSG /* multiply by 8 (to get bit count) */
585
586 /* update state: */
587 call __morus640_update
588 call __morus640_update
589 call __morus640_update
590 call __morus640_update
591 call __morus640_update
592 call __morus640_update
593 call __morus640_update
594 call __morus640_update
595 call __morus640_update
596 call __morus640_update
597
598 /* xor tag: */
599 movdqu (%rsi), MSG
600
601 pxor STATE0, MSG
602 pshufd $MASK3, STATE1, T0
603 pxor T0, MSG
604 movdqa STATE2, T0
605 pand STATE3, T0
606 pxor T0, MSG
607
608 movdqu MSG, (%rsi)
609
610 FRAME_END
611 ret
612ENDPROC(crypto_morus640_sse2_final)
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
deleted file mode 100644
index 8ef68134aef4..000000000000
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ /dev/null
@@ -1,61 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The MORUS-640 Authenticated-Encryption Algorithm
4 * Glue for SSE2 implementation
5 *
6 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 */
9
10#include <crypto/internal/aead.h>
11#include <crypto/internal/simd.h>
12#include <crypto/morus640_glue.h>
13#include <linux/module.h>
14#include <asm/fpu/api.h>
15#include <asm/cpu_device_id.h>
16
17asmlinkage void crypto_morus640_sse2_init(void *state, const void *key,
18 const void *iv);
19asmlinkage void crypto_morus640_sse2_ad(void *state, const void *data,
20 unsigned int length);
21
22asmlinkage void crypto_morus640_sse2_enc(void *state, const void *src,
23 void *dst, unsigned int length);
24asmlinkage void crypto_morus640_sse2_dec(void *state, const void *src,
25 void *dst, unsigned int length);
26
27asmlinkage void crypto_morus640_sse2_enc_tail(void *state, const void *src,
28 void *dst, unsigned int length);
29asmlinkage void crypto_morus640_sse2_dec_tail(void *state, const void *src,
30 void *dst, unsigned int length);
31
32asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
33 u64 assoclen, u64 cryptlen);
34
35MORUS640_DECLARE_ALG(sse2, "morus640-sse2", 400);
36
37static struct simd_aead_alg *simd_alg;
38
39static int __init crypto_morus640_sse2_module_init(void)
40{
41 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
42 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
43 return -ENODEV;
44
45 return simd_register_aeads_compat(&crypto_morus640_sse2_alg, 1,
46 &simd_alg);
47}
48
49static void __exit crypto_morus640_sse2_module_exit(void)
50{
51 simd_unregister_aeads(&crypto_morus640_sse2_alg, 1, &simd_alg);
52}
53
54module_init(crypto_morus640_sse2_module_init);
55module_exit(crypto_morus640_sse2_module_exit);
56
57MODULE_LICENSE("GPL");
58MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
59MODULE_DESCRIPTION("MORUS-640 AEAD algorithm -- SSE2 implementation");
60MODULE_ALIAS_CRYPTO("morus640");
61MODULE_ALIAS_CRYPTO("morus640-sse2");
diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
deleted file mode 100644
index d8b5fd6cef29..000000000000
--- a/arch/x86/crypto/morus640_glue.c
+++ /dev/null
@@ -1,200 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The MORUS-640 Authenticated-Encryption Algorithm
4 * Common x86 SIMD glue skeleton
5 *
6 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 */
9
10#include <crypto/internal/aead.h>
11#include <crypto/internal/skcipher.h>
12#include <crypto/morus640_glue.h>
13#include <crypto/scatterwalk.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/scatterlist.h>
19#include <asm/fpu/api.h>
20
21struct morus640_state {
22 struct morus640_block s[MORUS_STATE_BLOCKS];
23};
24
25struct morus640_ops {
26 int (*skcipher_walk_init)(struct skcipher_walk *walk,
27 struct aead_request *req, bool atomic);
28
29 void (*crypt_blocks)(void *state, const void *src, void *dst,
30 unsigned int length);
31 void (*crypt_tail)(void *state, const void *src, void *dst,
32 unsigned int length);
33};
34
35static void crypto_morus640_glue_process_ad(
36 struct morus640_state *state,
37 const struct morus640_glue_ops *ops,
38 struct scatterlist *sg_src, unsigned int assoclen)
39{
40 struct scatter_walk walk;
41 struct morus640_block buf;
42 unsigned int pos = 0;
43
44 scatterwalk_start(&walk, sg_src);
45 while (assoclen != 0) {
46 unsigned int size = scatterwalk_clamp(&walk, assoclen);
47 unsigned int left = size;
48 void *mapped = scatterwalk_map(&walk);
49 const u8 *src = (const u8 *)mapped;
50
51 if (pos + size >= MORUS640_BLOCK_SIZE) {
52 if (pos > 0) {
53 unsigned int fill = MORUS640_BLOCK_SIZE - pos;
54 memcpy(buf.bytes + pos, src, fill);
55 ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE);
56 pos = 0;
57 left -= fill;
58 src += fill;
59 }
60
61 ops->ad(state, src, left);
62 src += left & ~(MORUS640_BLOCK_SIZE - 1);
63 left &= MORUS640_BLOCK_SIZE - 1;
64 }
65
66 memcpy(buf.bytes + pos, src, left);
67
68 pos += left;
69 assoclen -= size;
70 scatterwalk_unmap(mapped);
71 scatterwalk_advance(&walk, size);
72 scatterwalk_done(&walk, 0, assoclen);
73 }
74
75 if (pos > 0) {
76 memset(buf.bytes + pos, 0, MORUS640_BLOCK_SIZE - pos);
77 ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE);
78 }
79}
80
81static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
82 struct morus640_ops ops,
83 struct skcipher_walk *walk)
84{
85 while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
86 ops.crypt_blocks(state, walk->src.virt.addr,
87 walk->dst.virt.addr,
88 round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
89 skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
90 }
91
92 if (walk->nbytes) {
93 ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
94 walk->nbytes);
95 skcipher_walk_done(walk, 0);
96 }
97}
98
99int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
100 unsigned int keylen)
101{
102 struct morus640_ctx *ctx = crypto_aead_ctx(aead);
103
104 if (keylen != MORUS640_BLOCK_SIZE) {
105 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
106 return -EINVAL;
107 }
108
109 memcpy(ctx->key.bytes, key, MORUS640_BLOCK_SIZE);
110 return 0;
111}
112EXPORT_SYMBOL_GPL(crypto_morus640_glue_setkey);
113
114int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm,
115 unsigned int authsize)
116{
117 return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
118}
119EXPORT_SYMBOL_GPL(crypto_morus640_glue_setauthsize);
120
121static void crypto_morus640_glue_crypt(struct aead_request *req,
122 struct morus640_ops ops,
123 unsigned int cryptlen,
124 struct morus640_block *tag_xor)
125{
126 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
127 struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
128 struct morus640_state state;
129 struct skcipher_walk walk;
130
131 ops.skcipher_walk_init(&walk, req, true);
132
133 kernel_fpu_begin();
134
135 ctx->ops->init(&state, &ctx->key, req->iv);
136 crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
137 crypto_morus640_glue_process_crypt(&state, ops, &walk);
138 ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
139
140 kernel_fpu_end();
141}
142
143int crypto_morus640_glue_encrypt(struct aead_request *req)
144{
145 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
146 struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
147 struct morus640_ops OPS = {
148 .skcipher_walk_init = skcipher_walk_aead_encrypt,
149 .crypt_blocks = ctx->ops->enc,
150 .crypt_tail = ctx->ops->enc_tail,
151 };
152
153 struct morus640_block tag = {};
154 unsigned int authsize = crypto_aead_authsize(tfm);
155 unsigned int cryptlen = req->cryptlen;
156
157 crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag);
158
159 scatterwalk_map_and_copy(tag.bytes, req->dst,
160 req->assoclen + cryptlen, authsize, 1);
161 return 0;
162}
163EXPORT_SYMBOL_GPL(crypto_morus640_glue_encrypt);
164
165int crypto_morus640_glue_decrypt(struct aead_request *req)
166{
167 static const u8 zeros[MORUS640_BLOCK_SIZE] = {};
168
169 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
170 struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
171 struct morus640_ops OPS = {
172 .skcipher_walk_init = skcipher_walk_aead_decrypt,
173 .crypt_blocks = ctx->ops->dec,
174 .crypt_tail = ctx->ops->dec_tail,
175 };
176
177 struct morus640_block tag;
178 unsigned int authsize = crypto_aead_authsize(tfm);
179 unsigned int cryptlen = req->cryptlen - authsize;
180
181 scatterwalk_map_and_copy(tag.bytes, req->src,
182 req->assoclen + cryptlen, authsize, 0);
183
184 crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag);
185
186 return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
187}
188EXPORT_SYMBOL_GPL(crypto_morus640_glue_decrypt);
189
190void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
191 const struct morus640_glue_ops *ops)
192{
193 struct morus640_ctx *ctx = crypto_aead_ctx(aead);
194 ctx->ops = ops;
195}
196EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops);
197
198MODULE_LICENSE("GPL");
199MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
200MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations");
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index b871728e0b2f..13fd8d3d2da0 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -167,7 +167,7 @@ static int xts_encrypt(struct skcipher_request *req)
167 167
168 return glue_xts_req_128bit(&serpent_enc_xts, req, 168 return glue_xts_req_128bit(&serpent_enc_xts, req,
169 XTS_TWEAK_CAST(__serpent_encrypt), 169 XTS_TWEAK_CAST(__serpent_encrypt),
170 &ctx->tweak_ctx, &ctx->crypt_ctx); 170 &ctx->tweak_ctx, &ctx->crypt_ctx, false);
171} 171}
172 172
173static int xts_decrypt(struct skcipher_request *req) 173static int xts_decrypt(struct skcipher_request *req)
@@ -177,7 +177,7 @@ static int xts_decrypt(struct skcipher_request *req)
177 177
178 return glue_xts_req_128bit(&serpent_dec_xts, req, 178 return glue_xts_req_128bit(&serpent_dec_xts, req,
179 XTS_TWEAK_CAST(__serpent_encrypt), 179 XTS_TWEAK_CAST(__serpent_encrypt),
180 &ctx->tweak_ctx, &ctx->crypt_ctx); 180 &ctx->tweak_ctx, &ctx->crypt_ctx, true);
181} 181}
182 182
183static struct skcipher_alg serpent_algs[] = { 183static struct skcipher_alg serpent_algs[] = {
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 4a9a9f2ee1d8..7d3dca38a5a2 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -207,7 +207,7 @@ static int xts_encrypt(struct skcipher_request *req)
207 207
208 return glue_xts_req_128bit(&serpent_enc_xts, req, 208 return glue_xts_req_128bit(&serpent_enc_xts, req,
209 XTS_TWEAK_CAST(__serpent_encrypt), 209 XTS_TWEAK_CAST(__serpent_encrypt),
210 &ctx->tweak_ctx, &ctx->crypt_ctx); 210 &ctx->tweak_ctx, &ctx->crypt_ctx, false);
211} 211}
212 212
213static int xts_decrypt(struct skcipher_request *req) 213static int xts_decrypt(struct skcipher_request *req)
@@ -217,7 +217,7 @@ static int xts_decrypt(struct skcipher_request *req)
217 217
218 return glue_xts_req_128bit(&serpent_dec_xts, req, 218 return glue_xts_req_128bit(&serpent_dec_xts, req,
219 XTS_TWEAK_CAST(__serpent_encrypt), 219 XTS_TWEAK_CAST(__serpent_encrypt),
220 &ctx->tweak_ctx, &ctx->crypt_ctx); 220 &ctx->tweak_ctx, &ctx->crypt_ctx, true);
221} 221}
222 222
223static struct skcipher_alg serpent_algs[] = { 223static struct skcipher_alg serpent_algs[] = {
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 73867da3cbee..f9aff31fe59e 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -45,8 +45,8 @@ asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
45 u64 rounds); 45 u64 rounds);
46typedef void (sha256_transform_fn)(u32 *digest, const char *data, u64 rounds); 46typedef void (sha256_transform_fn)(u32 *digest, const char *data, u64 rounds);
47 47
48static int sha256_update(struct shash_desc *desc, const u8 *data, 48static int _sha256_update(struct shash_desc *desc, const u8 *data,
49 unsigned int len, sha256_transform_fn *sha256_xform) 49 unsigned int len, sha256_transform_fn *sha256_xform)
50{ 50{
51 struct sha256_state *sctx = shash_desc_ctx(desc); 51 struct sha256_state *sctx = shash_desc_ctx(desc);
52 52
@@ -84,7 +84,7 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data,
84static int sha256_ssse3_update(struct shash_desc *desc, const u8 *data, 84static int sha256_ssse3_update(struct shash_desc *desc, const u8 *data,
85 unsigned int len) 85 unsigned int len)
86{ 86{
87 return sha256_update(desc, data, len, sha256_transform_ssse3); 87 return _sha256_update(desc, data, len, sha256_transform_ssse3);
88} 88}
89 89
90static int sha256_ssse3_finup(struct shash_desc *desc, const u8 *data, 90static int sha256_ssse3_finup(struct shash_desc *desc, const u8 *data,
@@ -151,7 +151,7 @@ asmlinkage void sha256_transform_avx(u32 *digest, const char *data,
151static int sha256_avx_update(struct shash_desc *desc, const u8 *data, 151static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
152 unsigned int len) 152 unsigned int len)
153{ 153{
154 return sha256_update(desc, data, len, sha256_transform_avx); 154 return _sha256_update(desc, data, len, sha256_transform_avx);
155} 155}
156 156
157static int sha256_avx_finup(struct shash_desc *desc, const u8 *data, 157static int sha256_avx_finup(struct shash_desc *desc, const u8 *data,
@@ -233,7 +233,7 @@ asmlinkage void sha256_transform_rorx(u32 *digest, const char *data,
233static int sha256_avx2_update(struct shash_desc *desc, const u8 *data, 233static int sha256_avx2_update(struct shash_desc *desc, const u8 *data,
234 unsigned int len) 234 unsigned int len)
235{ 235{
236 return sha256_update(desc, data, len, sha256_transform_rorx); 236 return _sha256_update(desc, data, len, sha256_transform_rorx);
237} 237}
238 238
239static int sha256_avx2_finup(struct shash_desc *desc, const u8 *data, 239static int sha256_avx2_finup(struct shash_desc *desc, const u8 *data,
@@ -313,7 +313,7 @@ asmlinkage void sha256_ni_transform(u32 *digest, const char *data,
313static int sha256_ni_update(struct shash_desc *desc, const u8 *data, 313static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
314 unsigned int len) 314 unsigned int len)
315{ 315{
316 return sha256_update(desc, data, len, sha256_ni_transform); 316 return _sha256_update(desc, data, len, sha256_ni_transform);
317} 317}
318 318
319static int sha256_ni_finup(struct shash_desc *desc, const u8 *data, 319static int sha256_ni_finup(struct shash_desc *desc, const u8 *data,
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index 0dbf8e8b09d7..d561c821788b 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -210,7 +210,7 @@ static int xts_encrypt(struct skcipher_request *req)
210 210
211 return glue_xts_req_128bit(&twofish_enc_xts, req, 211 return glue_xts_req_128bit(&twofish_enc_xts, req,
212 XTS_TWEAK_CAST(twofish_enc_blk), 212 XTS_TWEAK_CAST(twofish_enc_blk),
213 &ctx->tweak_ctx, &ctx->crypt_ctx); 213 &ctx->tweak_ctx, &ctx->crypt_ctx, false);
214} 214}
215 215
216static int xts_decrypt(struct skcipher_request *req) 216static int xts_decrypt(struct skcipher_request *req)
@@ -220,7 +220,7 @@ static int xts_decrypt(struct skcipher_request *req)
220 220
221 return glue_xts_req_128bit(&twofish_dec_xts, req, 221 return glue_xts_req_128bit(&twofish_dec_xts, req,
222 XTS_TWEAK_CAST(twofish_enc_blk), 222 XTS_TWEAK_CAST(twofish_enc_blk),
223 &ctx->tweak_ctx, &ctx->crypt_ctx); 223 &ctx->tweak_ctx, &ctx->crypt_ctx, true);
224} 224}
225 225
226static struct skcipher_alg twofish_algs[] = { 226static struct skcipher_alg twofish_algs[] = {
diff --git a/arch/x86/include/asm/crypto/aes.h b/arch/x86/include/asm/crypto/aes.h
deleted file mode 100644
index c508521dd190..000000000000
--- a/arch/x86/include/asm/crypto/aes.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ASM_X86_AES_H
3#define ASM_X86_AES_H
4
5#include <linux/crypto.h>
6#include <crypto/aes.h>
7
8void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
9 const u8 *src);
10void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
11 const u8 *src);
12#endif
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
index d1818634ae7e..8d4a8e1226ee 100644
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ b/arch/x86/include/asm/crypto/glue_helper.h
@@ -114,7 +114,7 @@ extern int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
114extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx, 114extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
115 struct skcipher_request *req, 115 struct skcipher_request *req,
116 common_glue_func_t tweak_fn, void *tweak_ctx, 116 common_glue_func_t tweak_fn, void *tweak_ctx,
117 void *crypt_ctx); 117 void *crypt_ctx, bool decrypt);
118 118
119extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, 119extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
120 le128 *iv, common_glue_func_t fn); 120 le128 *iv, common_glue_func_t fn);
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 10fb42da0007..527749066d31 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -9,9 +9,11 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
9$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE 9$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
10 $(call if_changed_rule,cc_o_c) 10 $(call if_changed_rule,cc_o_c)
11 11
12$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE 12$(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
13 $(call if_changed_rule,cc_o_c) 13 $(call if_changed_rule,cc_o_c)
14 14
15CFLAGS_sha256.o := -D__DISABLE_EXPORTS
16
15LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib 17LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
16targets += purgatory.ro 18targets += purgatory.ro
17 19
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index b607bda786f6..3b95410ff0f8 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -9,7 +9,7 @@
9 */ 9 */
10 10
11#include <linux/bug.h> 11#include <linux/bug.h>
12#include <linux/sha256.h> 12#include <crypto/sha.h>
13#include <asm/purgatory.h> 13#include <asm/purgatory.h>
14 14
15#include "../boot/string.h" 15#include "../boot/string.h"
diff --git a/crypto/Kconfig b/crypto/Kconfig
index e801450bcb1c..ad86463de715 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -306,19 +306,10 @@ config CRYPTO_AEGIS128
306 help 306 help
307 Support for the AEGIS-128 dedicated AEAD algorithm. 307 Support for the AEGIS-128 dedicated AEAD algorithm.
308 308
309config CRYPTO_AEGIS128L 309config CRYPTO_AEGIS128_SIMD
310 tristate "AEGIS-128L AEAD algorithm" 310 bool "Support SIMD acceleration for AEGIS-128"
311 select CRYPTO_AEAD 311 depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON)
312 select CRYPTO_AES # for AES S-box tables 312 default y
313 help
314 Support for the AEGIS-128L dedicated AEAD algorithm.
315
316config CRYPTO_AEGIS256
317 tristate "AEGIS-256 AEAD algorithm"
318 select CRYPTO_AEAD
319 select CRYPTO_AES # for AES S-box tables
320 help
321 Support for the AEGIS-256 dedicated AEAD algorithm.
322 313
323config CRYPTO_AEGIS128_AESNI_SSE2 314config CRYPTO_AEGIS128_AESNI_SSE2
324 tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" 315 tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
@@ -328,78 +319,6 @@ config CRYPTO_AEGIS128_AESNI_SSE2
328 help 319 help
329 AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm. 320 AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm.
330 321
331config CRYPTO_AEGIS128L_AESNI_SSE2
332 tristate "AEGIS-128L AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
333 depends on X86 && 64BIT
334 select CRYPTO_AEAD
335 select CRYPTO_SIMD
336 help
337 AESNI+SSE2 implementation of the AEGIS-128L dedicated AEAD algorithm.
338
339config CRYPTO_AEGIS256_AESNI_SSE2
340 tristate "AEGIS-256 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
341 depends on X86 && 64BIT
342 select CRYPTO_AEAD
343 select CRYPTO_SIMD
344 help
345 AESNI+SSE2 implementation of the AEGIS-256 dedicated AEAD algorithm.
346
347config CRYPTO_MORUS640
348 tristate "MORUS-640 AEAD algorithm"
349 select CRYPTO_AEAD
350 help
351 Support for the MORUS-640 dedicated AEAD algorithm.
352
353config CRYPTO_MORUS640_GLUE
354 tristate
355 depends on X86
356 select CRYPTO_AEAD
357 select CRYPTO_SIMD
358 help
359 Common glue for SIMD optimizations of the MORUS-640 dedicated AEAD
360 algorithm.
361
362config CRYPTO_MORUS640_SSE2
363 tristate "MORUS-640 AEAD algorithm (x86_64 SSE2 implementation)"
364 depends on X86 && 64BIT
365 select CRYPTO_AEAD
366 select CRYPTO_MORUS640_GLUE
367 help
368 SSE2 implementation of the MORUS-640 dedicated AEAD algorithm.
369
370config CRYPTO_MORUS1280
371 tristate "MORUS-1280 AEAD algorithm"
372 select CRYPTO_AEAD
373 help
374 Support for the MORUS-1280 dedicated AEAD algorithm.
375
376config CRYPTO_MORUS1280_GLUE
377 tristate
378 depends on X86
379 select CRYPTO_AEAD
380 select CRYPTO_SIMD
381 help
382 Common glue for SIMD optimizations of the MORUS-1280 dedicated AEAD
383 algorithm.
384
385config CRYPTO_MORUS1280_SSE2
386 tristate "MORUS-1280 AEAD algorithm (x86_64 SSE2 implementation)"
387 depends on X86 && 64BIT
388 select CRYPTO_AEAD
389 select CRYPTO_MORUS1280_GLUE
390 help
391 SSE2 optimizedimplementation of the MORUS-1280 dedicated AEAD
392 algorithm.
393
394config CRYPTO_MORUS1280_AVX2
395 tristate "MORUS-1280 AEAD algorithm (x86_64 AVX2 implementation)"
396 depends on X86 && 64BIT
397 select CRYPTO_AEAD
398 select CRYPTO_MORUS1280_GLUE
399 help
400 AVX2 optimized implementation of the MORUS-1280 dedicated AEAD
401 algorithm.
402
403config CRYPTO_SEQIV 322config CRYPTO_SEQIV
404 tristate "Sequence Number IV Generator" 323 tristate "Sequence Number IV Generator"
405 select CRYPTO_AEAD 324 select CRYPTO_AEAD
@@ -728,11 +647,12 @@ config CRYPTO_VPMSUM_TESTER
728 Unless you are testing these algorithms, you don't need this. 647 Unless you are testing these algorithms, you don't need this.
729 648
730config CRYPTO_GHASH 649config CRYPTO_GHASH
731 tristate "GHASH digest algorithm" 650 tristate "GHASH hash function"
732 select CRYPTO_GF128MUL 651 select CRYPTO_GF128MUL
733 select CRYPTO_HASH 652 select CRYPTO_HASH
734 help 653 help
735 GHASH is message digest algorithm for GCM (Galois/Counter Mode). 654 GHASH is the hash function used in GCM (Galois/Counter Mode).
655 It is not a general-purpose cryptographic hash function.
736 656
737config CRYPTO_POLY1305 657config CRYPTO_POLY1305
738 tristate "Poly1305 authenticator algorithm" 658 tristate "Poly1305 authenticator algorithm"
@@ -929,9 +849,13 @@ config CRYPTO_SHA1_PPC_SPE
929 SHA-1 secure hash standard (DFIPS 180-4) implemented 849 SHA-1 secure hash standard (DFIPS 180-4) implemented
930 using powerpc SPE SIMD instruction set. 850 using powerpc SPE SIMD instruction set.
931 851
852config CRYPTO_LIB_SHA256
853 tristate
854
932config CRYPTO_SHA256 855config CRYPTO_SHA256
933 tristate "SHA224 and SHA256 digest algorithm" 856 tristate "SHA224 and SHA256 digest algorithm"
934 select CRYPTO_HASH 857 select CRYPTO_HASH
858 select CRYPTO_LIB_SHA256
935 help 859 help
936 SHA256 secure hash standard (DFIPS 180-2). 860 SHA256 secure hash standard (DFIPS 180-2).
937 861
@@ -1057,18 +981,22 @@ config CRYPTO_WP512
1057 <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html> 981 <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html>
1058 982
1059config CRYPTO_GHASH_CLMUL_NI_INTEL 983config CRYPTO_GHASH_CLMUL_NI_INTEL
1060 tristate "GHASH digest algorithm (CLMUL-NI accelerated)" 984 tristate "GHASH hash function (CLMUL-NI accelerated)"
1061 depends on X86 && 64BIT 985 depends on X86 && 64BIT
1062 select CRYPTO_CRYPTD 986 select CRYPTO_CRYPTD
1063 help 987 help
1064 GHASH is message digest algorithm for GCM (Galois/Counter Mode). 988 This is the x86_64 CLMUL-NI accelerated implementation of
1065 The implementation is accelerated by CLMUL-NI of Intel. 989 GHASH, the hash function used in GCM (Galois/Counter mode).
1066 990
1067comment "Ciphers" 991comment "Ciphers"
1068 992
993config CRYPTO_LIB_AES
994 tristate
995
1069config CRYPTO_AES 996config CRYPTO_AES
1070 tristate "AES cipher algorithms" 997 tristate "AES cipher algorithms"
1071 select CRYPTO_ALGAPI 998 select CRYPTO_ALGAPI
999 select CRYPTO_LIB_AES
1072 help 1000 help
1073 AES cipher algorithms (FIPS-197). AES uses the Rijndael 1001 AES cipher algorithms (FIPS-197). AES uses the Rijndael
1074 algorithm. 1002 algorithm.
@@ -1089,6 +1017,7 @@ config CRYPTO_AES
1089config CRYPTO_AES_TI 1017config CRYPTO_AES_TI
1090 tristate "Fixed time AES cipher" 1018 tristate "Fixed time AES cipher"
1091 select CRYPTO_ALGAPI 1019 select CRYPTO_ALGAPI
1020 select CRYPTO_LIB_AES
1092 help 1021 help
1093 This is a generic implementation of AES that attempts to eliminate 1022 This is a generic implementation of AES that attempts to eliminate
1094 data dependent latencies as much as possible without affecting 1023 data dependent latencies as much as possible without affecting
@@ -1104,56 +1033,11 @@ config CRYPTO_AES_TI
1104 block. Interrupts are also disabled to avoid races where cachelines 1033 block. Interrupts are also disabled to avoid races where cachelines
1105 are evicted when the CPU is interrupted to do something else. 1034 are evicted when the CPU is interrupted to do something else.
1106 1035
1107config CRYPTO_AES_586
1108 tristate "AES cipher algorithms (i586)"
1109 depends on (X86 || UML_X86) && !64BIT
1110 select CRYPTO_ALGAPI
1111 select CRYPTO_AES
1112 help
1113 AES cipher algorithms (FIPS-197). AES uses the Rijndael
1114 algorithm.
1115
1116 Rijndael appears to be consistently a very good performer in
1117 both hardware and software across a wide range of computing
1118 environments regardless of its use in feedback or non-feedback
1119 modes. Its key setup time is excellent, and its key agility is
1120 good. Rijndael's very low memory requirements make it very well
1121 suited for restricted-space environments, in which it also
1122 demonstrates excellent performance. Rijndael's operations are
1123 among the easiest to defend against power and timing attacks.
1124
1125 The AES specifies three key sizes: 128, 192 and 256 bits
1126
1127 See <http://csrc.nist.gov/encryption/aes/> for more information.
1128
1129config CRYPTO_AES_X86_64
1130 tristate "AES cipher algorithms (x86_64)"
1131 depends on (X86 || UML_X86) && 64BIT
1132 select CRYPTO_ALGAPI
1133 select CRYPTO_AES
1134 help
1135 AES cipher algorithms (FIPS-197). AES uses the Rijndael
1136 algorithm.
1137
1138 Rijndael appears to be consistently a very good performer in
1139 both hardware and software across a wide range of computing
1140 environments regardless of its use in feedback or non-feedback
1141 modes. Its key setup time is excellent, and its key agility is
1142 good. Rijndael's very low memory requirements make it very well
1143 suited for restricted-space environments, in which it also
1144 demonstrates excellent performance. Rijndael's operations are
1145 among the easiest to defend against power and timing attacks.
1146
1147 The AES specifies three key sizes: 128, 192 and 256 bits
1148
1149 See <http://csrc.nist.gov/encryption/aes/> for more information.
1150
1151config CRYPTO_AES_NI_INTEL 1036config CRYPTO_AES_NI_INTEL
1152 tristate "AES cipher algorithms (AES-NI)" 1037 tristate "AES cipher algorithms (AES-NI)"
1153 depends on X86 1038 depends on X86
1154 select CRYPTO_AEAD 1039 select CRYPTO_AEAD
1155 select CRYPTO_AES_X86_64 if 64BIT 1040 select CRYPTO_LIB_AES
1156 select CRYPTO_AES_586 if !64BIT
1157 select CRYPTO_ALGAPI 1041 select CRYPTO_ALGAPI
1158 select CRYPTO_BLKCIPHER 1042 select CRYPTO_BLKCIPHER
1159 select CRYPTO_GLUE_HELPER_X86 if 64BIT 1043 select CRYPTO_GLUE_HELPER_X86 if 64BIT
@@ -1426,9 +1310,13 @@ config CRYPTO_CAST6_AVX_X86_64
1426 This module provides the Cast6 cipher algorithm that processes 1310 This module provides the Cast6 cipher algorithm that processes
1427 eight blocks parallel using the AVX instruction set. 1311 eight blocks parallel using the AVX instruction set.
1428 1312
1313config CRYPTO_LIB_DES
1314 tristate
1315
1429config CRYPTO_DES 1316config CRYPTO_DES
1430 tristate "DES and Triple DES EDE cipher algorithms" 1317 tristate "DES and Triple DES EDE cipher algorithms"
1431 select CRYPTO_ALGAPI 1318 select CRYPTO_ALGAPI
1319 select CRYPTO_LIB_DES
1432 help 1320 help
1433 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 1321 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
1434 1322
@@ -1436,7 +1324,7 @@ config CRYPTO_DES_SPARC64
1436 tristate "DES and Triple DES EDE cipher algorithms (SPARC64)" 1324 tristate "DES and Triple DES EDE cipher algorithms (SPARC64)"
1437 depends on SPARC64 1325 depends on SPARC64
1438 select CRYPTO_ALGAPI 1326 select CRYPTO_ALGAPI
1439 select CRYPTO_DES 1327 select CRYPTO_LIB_DES
1440 help 1328 help
1441 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3), 1329 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3),
1442 optimized using SPARC64 crypto opcodes. 1330 optimized using SPARC64 crypto opcodes.
@@ -1445,7 +1333,7 @@ config CRYPTO_DES3_EDE_X86_64
1445 tristate "Triple DES EDE cipher algorithm (x86-64)" 1333 tristate "Triple DES EDE cipher algorithm (x86-64)"
1446 depends on X86 && 64BIT 1334 depends on X86 && 64BIT
1447 select CRYPTO_BLKCIPHER 1335 select CRYPTO_BLKCIPHER
1448 select CRYPTO_DES 1336 select CRYPTO_LIB_DES
1449 help 1337 help
1450 Triple DES EDE (FIPS 46-3) algorithm. 1338 Triple DES EDE (FIPS 46-3) algorithm.
1451 1339
diff --git a/crypto/Makefile b/crypto/Makefile
index 9479e1a45d8c..0d2cdd523fd9 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -90,10 +90,26 @@ obj-$(CONFIG_CRYPTO_GCM) += gcm.o
90obj-$(CONFIG_CRYPTO_CCM) += ccm.o 90obj-$(CONFIG_CRYPTO_CCM) += ccm.o
91obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o 91obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o
92obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o 92obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o
93obj-$(CONFIG_CRYPTO_AEGIS128L) += aegis128l.o 93aegis128-y := aegis128-core.o
94obj-$(CONFIG_CRYPTO_AEGIS256) += aegis256.o 94
95obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o 95ifeq ($(ARCH),arm)
96obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o 96CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp
97CFLAGS_aegis128-neon-inner.o += -mfpu=crypto-neon-fp-armv8
98aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o
99endif
100ifeq ($(ARCH),arm64)
101aegis128-cflags-y := -ffreestanding -mcpu=generic+crypto
102aegis128-cflags-$(CONFIG_CC_IS_GCC) += -ffixed-q16 -ffixed-q17 -ffixed-q18 \
103 -ffixed-q19 -ffixed-q20 -ffixed-q21 \
104 -ffixed-q22 -ffixed-q23 -ffixed-q24 \
105 -ffixed-q25 -ffixed-q26 -ffixed-q27 \
106 -ffixed-q28 -ffixed-q29 -ffixed-q30 \
107 -ffixed-q31
108CFLAGS_aegis128-neon-inner.o += $(aegis128-cflags-y)
109CFLAGS_REMOVE_aegis128-neon-inner.o += -mgeneral-regs-only
110aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o
111endif
112
97obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o 113obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
98obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o 114obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
99obj-$(CONFIG_CRYPTO_DES) += des_generic.o 115obj-$(CONFIG_CRYPTO_DES) += des_generic.o
@@ -136,6 +152,8 @@ obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
136obj-$(CONFIG_CRYPTO_DRBG) += drbg.o 152obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
137obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy_rng.o 153obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy_rng.o
138CFLAGS_jitterentropy.o = -O0 154CFLAGS_jitterentropy.o = -O0
155KASAN_SANITIZE_jitterentropy.o = n
156UBSAN_SANITIZE_jitterentropy.o = n
139jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o 157jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
140obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o 158obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
141obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o 159obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
diff --git a/crypto/aead.c b/crypto/aead.c
index fbf0ec93bc8e..ce035589cf57 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -70,7 +70,8 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
70{ 70{
71 int err; 71 int err;
72 72
73 if (authsize > crypto_aead_maxauthsize(tfm)) 73 if ((!authsize && crypto_aead_maxauthsize(tfm)) ||
74 authsize > crypto_aead_maxauthsize(tfm))
74 return -EINVAL; 75 return -EINVAL;
75 76
76 if (crypto_aead_alg(tfm)->setauthsize) { 77 if (crypto_aead_alg(tfm)->setauthsize) {
diff --git a/crypto/aegis.h b/crypto/aegis.h
index 41a3090cda8e..6920ebe77679 100644
--- a/crypto/aegis.h
+++ b/crypto/aegis.h
@@ -10,6 +10,7 @@
10#define _CRYPTO_AEGIS_H 10#define _CRYPTO_AEGIS_H
11 11
12#include <crypto/aes.h> 12#include <crypto/aes.h>
13#include <linux/bitops.h>
13#include <linux/types.h> 14#include <linux/types.h>
14 15
15#define AEGIS_BLOCK_SIZE 16 16#define AEGIS_BLOCK_SIZE 16
@@ -23,46 +24,32 @@ union aegis_block {
23#define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block)) 24#define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block))
24#define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN) 25#define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN)
25 26
26static const union aegis_block crypto_aegis_const[2] = { 27static __always_inline void crypto_aegis_block_xor(union aegis_block *dst,
27 { .words64 = { 28 const union aegis_block *src)
28 cpu_to_le64(U64_C(0x0d08050302010100)),
29 cpu_to_le64(U64_C(0x6279e99059372215)),
30 } },
31 { .words64 = {
32 cpu_to_le64(U64_C(0xf12fc26d55183ddb)),
33 cpu_to_le64(U64_C(0xdd28b57342311120)),
34 } },
35};
36
37static void crypto_aegis_block_xor(union aegis_block *dst,
38 const union aegis_block *src)
39{ 29{
40 dst->words64[0] ^= src->words64[0]; 30 dst->words64[0] ^= src->words64[0];
41 dst->words64[1] ^= src->words64[1]; 31 dst->words64[1] ^= src->words64[1];
42} 32}
43 33
44static void crypto_aegis_block_and(union aegis_block *dst, 34static __always_inline void crypto_aegis_block_and(union aegis_block *dst,
45 const union aegis_block *src) 35 const union aegis_block *src)
46{ 36{
47 dst->words64[0] &= src->words64[0]; 37 dst->words64[0] &= src->words64[0];
48 dst->words64[1] &= src->words64[1]; 38 dst->words64[1] &= src->words64[1];
49} 39}
50 40
51static void crypto_aegis_aesenc(union aegis_block *dst, 41static __always_inline void crypto_aegis_aesenc(union aegis_block *dst,
52 const union aegis_block *src, 42 const union aegis_block *src,
53 const union aegis_block *key) 43 const union aegis_block *key)
54{ 44{
55 const u8 *s = src->bytes; 45 const u8 *s = src->bytes;
56 const u32 *t0 = crypto_ft_tab[0]; 46 const u32 *t = crypto_ft_tab[0];
57 const u32 *t1 = crypto_ft_tab[1];
58 const u32 *t2 = crypto_ft_tab[2];
59 const u32 *t3 = crypto_ft_tab[3];
60 u32 d0, d1, d2, d3; 47 u32 d0, d1, d2, d3;
61 48
62 d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]]; 49 d0 = t[s[ 0]] ^ rol32(t[s[ 5]], 8) ^ rol32(t[s[10]], 16) ^ rol32(t[s[15]], 24);
63 d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]]; 50 d1 = t[s[ 4]] ^ rol32(t[s[ 9]], 8) ^ rol32(t[s[14]], 16) ^ rol32(t[s[ 3]], 24);
64 d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]]; 51 d2 = t[s[ 8]] ^ rol32(t[s[13]], 8) ^ rol32(t[s[ 2]], 16) ^ rol32(t[s[ 7]], 24);
65 d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]]; 52 d3 = t[s[12]] ^ rol32(t[s[ 1]], 8) ^ rol32(t[s[ 6]], 16) ^ rol32(t[s[11]], 24);
66 53
67 dst->words32[0] = cpu_to_le32(d0) ^ key->words32[0]; 54 dst->words32[0] = cpu_to_le32(d0) ^ key->words32[0];
68 dst->words32[1] = cpu_to_le32(d1) ^ key->words32[1]; 55 dst->words32[1] = cpu_to_le32(d1) ^ key->words32[1];
diff --git a/crypto/aegis128.c b/crypto/aegis128-core.c
index d78f77fc5dd1..80e73611bd5c 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128-core.c
@@ -8,6 +8,7 @@
8 8
9#include <crypto/algapi.h> 9#include <crypto/algapi.h>
10#include <crypto/internal/aead.h> 10#include <crypto/internal/aead.h>
11#include <crypto/internal/simd.h>
11#include <crypto/internal/skcipher.h> 12#include <crypto/internal/skcipher.h>
12#include <crypto/scatterwalk.h> 13#include <crypto/scatterwalk.h>
13#include <linux/err.h> 14#include <linux/err.h>
@@ -16,6 +17,8 @@
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/scatterlist.h> 18#include <linux/scatterlist.h>
18 19
20#include <asm/simd.h>
21
19#include "aegis.h" 22#include "aegis.h"
20 23
21#define AEGIS128_NONCE_SIZE 16 24#define AEGIS128_NONCE_SIZE 16
@@ -40,6 +43,35 @@ struct aegis128_ops {
40 const u8 *src, unsigned int size); 43 const u8 *src, unsigned int size);
41}; 44};
42 45
46static bool have_simd;
47
48static const union aegis_block crypto_aegis_const[2] = {
49 { .words64 = {
50 cpu_to_le64(U64_C(0x0d08050302010100)),
51 cpu_to_le64(U64_C(0x6279e99059372215)),
52 } },
53 { .words64 = {
54 cpu_to_le64(U64_C(0xf12fc26d55183ddb)),
55 cpu_to_le64(U64_C(0xdd28b57342311120)),
56 } },
57};
58
59static bool aegis128_do_simd(void)
60{
61#ifdef CONFIG_CRYPTO_AEGIS128_SIMD
62 if (have_simd)
63 return crypto_simd_usable();
64#endif
65 return false;
66}
67
68bool crypto_aegis128_have_simd(void);
69void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg);
70void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
71 const u8 *src, unsigned int size);
72void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
73 const u8 *src, unsigned int size);
74
43static void crypto_aegis128_update(struct aegis_state *state) 75static void crypto_aegis128_update(struct aegis_state *state)
44{ 76{
45 union aegis_block tmp; 77 union aegis_block tmp;
@@ -55,12 +87,22 @@ static void crypto_aegis128_update(struct aegis_state *state)
55static void crypto_aegis128_update_a(struct aegis_state *state, 87static void crypto_aegis128_update_a(struct aegis_state *state,
56 const union aegis_block *msg) 88 const union aegis_block *msg)
57{ 89{
90 if (aegis128_do_simd()) {
91 crypto_aegis128_update_simd(state, msg);
92 return;
93 }
94
58 crypto_aegis128_update(state); 95 crypto_aegis128_update(state);
59 crypto_aegis_block_xor(&state->blocks[0], msg); 96 crypto_aegis_block_xor(&state->blocks[0], msg);
60} 97}
61 98
62static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) 99static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg)
63{ 100{
101 if (aegis128_do_simd()) {
102 crypto_aegis128_update_simd(state, msg);
103 return;
104 }
105
64 crypto_aegis128_update(state); 106 crypto_aegis128_update(state);
65 crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); 107 crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE);
66} 108}
@@ -365,7 +407,7 @@ static void crypto_aegis128_crypt(struct aead_request *req,
365 407
366static int crypto_aegis128_encrypt(struct aead_request *req) 408static int crypto_aegis128_encrypt(struct aead_request *req)
367{ 409{
368 static const struct aegis128_ops ops = { 410 const struct aegis128_ops *ops = &(struct aegis128_ops){
369 .skcipher_walk_init = skcipher_walk_aead_encrypt, 411 .skcipher_walk_init = skcipher_walk_aead_encrypt,
370 .crypt_chunk = crypto_aegis128_encrypt_chunk, 412 .crypt_chunk = crypto_aegis128_encrypt_chunk,
371 }; 413 };
@@ -375,7 +417,12 @@ static int crypto_aegis128_encrypt(struct aead_request *req)
375 unsigned int authsize = crypto_aead_authsize(tfm); 417 unsigned int authsize = crypto_aead_authsize(tfm);
376 unsigned int cryptlen = req->cryptlen; 418 unsigned int cryptlen = req->cryptlen;
377 419
378 crypto_aegis128_crypt(req, &tag, cryptlen, &ops); 420 if (aegis128_do_simd())
421 ops = &(struct aegis128_ops){
422 .skcipher_walk_init = skcipher_walk_aead_encrypt,
423 .crypt_chunk = crypto_aegis128_encrypt_chunk_simd };
424
425 crypto_aegis128_crypt(req, &tag, cryptlen, ops);
379 426
380 scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, 427 scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
381 authsize, 1); 428 authsize, 1);
@@ -384,7 +431,7 @@ static int crypto_aegis128_encrypt(struct aead_request *req)
384 431
385static int crypto_aegis128_decrypt(struct aead_request *req) 432static int crypto_aegis128_decrypt(struct aead_request *req)
386{ 433{
387 static const struct aegis128_ops ops = { 434 const struct aegis128_ops *ops = &(struct aegis128_ops){
388 .skcipher_walk_init = skcipher_walk_aead_decrypt, 435 .skcipher_walk_init = skcipher_walk_aead_decrypt,
389 .crypt_chunk = crypto_aegis128_decrypt_chunk, 436 .crypt_chunk = crypto_aegis128_decrypt_chunk,
390 }; 437 };
@@ -398,18 +445,14 @@ static int crypto_aegis128_decrypt(struct aead_request *req)
398 scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, 445 scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
399 authsize, 0); 446 authsize, 0);
400 447
401 crypto_aegis128_crypt(req, &tag, cryptlen, &ops); 448 if (aegis128_do_simd())
449 ops = &(struct aegis128_ops){
450 .skcipher_walk_init = skcipher_walk_aead_decrypt,
451 .crypt_chunk = crypto_aegis128_decrypt_chunk_simd };
402 452
403 return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; 453 crypto_aegis128_crypt(req, &tag, cryptlen, ops);
404}
405
406static int crypto_aegis128_init_tfm(struct crypto_aead *tfm)
407{
408 return 0;
409}
410 454
411static void crypto_aegis128_exit_tfm(struct crypto_aead *tfm) 455 return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
412{
413} 456}
414 457
415static struct aead_alg crypto_aegis128_alg = { 458static struct aead_alg crypto_aegis128_alg = {
@@ -417,8 +460,6 @@ static struct aead_alg crypto_aegis128_alg = {
417 .setauthsize = crypto_aegis128_setauthsize, 460 .setauthsize = crypto_aegis128_setauthsize,
418 .encrypt = crypto_aegis128_encrypt, 461 .encrypt = crypto_aegis128_encrypt,
419 .decrypt = crypto_aegis128_decrypt, 462 .decrypt = crypto_aegis128_decrypt,
420 .init = crypto_aegis128_init_tfm,
421 .exit = crypto_aegis128_exit_tfm,
422 463
423 .ivsize = AEGIS128_NONCE_SIZE, 464 .ivsize = AEGIS128_NONCE_SIZE,
424 .maxauthsize = AEGIS128_MAX_AUTH_SIZE, 465 .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
@@ -440,6 +481,9 @@ static struct aead_alg crypto_aegis128_alg = {
440 481
441static int __init crypto_aegis128_module_init(void) 482static int __init crypto_aegis128_module_init(void)
442{ 483{
484 if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD))
485 have_simd = crypto_aegis128_have_simd();
486
443 return crypto_register_aead(&crypto_aegis128_alg); 487 return crypto_register_aead(&crypto_aegis128_alg);
444} 488}
445 489
diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c
new file mode 100644
index 000000000000..f05310ca22aa
--- /dev/null
+++ b/crypto/aegis128-neon-inner.c
@@ -0,0 +1,212 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
4 */
5
6#ifdef CONFIG_ARM64
7#include <asm/neon-intrinsics.h>
8
9#define AES_ROUND "aese %0.16b, %1.16b \n\t aesmc %0.16b, %0.16b"
10#else
11#include <arm_neon.h>
12
13#define AES_ROUND "aese.8 %q0, %q1 \n\t aesmc.8 %q0, %q0"
14#endif
15
16#define AEGIS_BLOCK_SIZE 16
17
18#include <stddef.h>
19
20extern int aegis128_have_aes_insn;
21
22void *memcpy(void *dest, const void *src, size_t n);
23void *memset(void *s, int c, size_t n);
24
25struct aegis128_state {
26 uint8x16_t v[5];
27};
28
29extern const uint8_t crypto_aes_sbox[];
30
31static struct aegis128_state aegis128_load_state_neon(const void *state)
32{
33 return (struct aegis128_state){ {
34 vld1q_u8(state),
35 vld1q_u8(state + 16),
36 vld1q_u8(state + 32),
37 vld1q_u8(state + 48),
38 vld1q_u8(state + 64)
39 } };
40}
41
42static void aegis128_save_state_neon(struct aegis128_state st, void *state)
43{
44 vst1q_u8(state, st.v[0]);
45 vst1q_u8(state + 16, st.v[1]);
46 vst1q_u8(state + 32, st.v[2]);
47 vst1q_u8(state + 48, st.v[3]);
48 vst1q_u8(state + 64, st.v[4]);
49}
50
51static inline __attribute__((always_inline))
52uint8x16_t aegis_aes_round(uint8x16_t w)
53{
54 uint8x16_t z = {};
55
56#ifdef CONFIG_ARM64
57 if (!__builtin_expect(aegis128_have_aes_insn, 1)) {
58 static const uint8_t shift_rows[] = {
59 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
60 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
61 };
62 static const uint8_t ror32by8[] = {
63 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
64 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
65 };
66 uint8x16_t v;
67
68 // shift rows
69 w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
70
71 // sub bytes
72#ifndef CONFIG_CC_IS_GCC
73 v = vqtbl4q_u8(vld1q_u8_x4(crypto_aes_sbox), w);
74 v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x40), w - 0x40);
75 v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x80), w - 0x80);
76 v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0xc0), w - 0xc0);
77#else
78 asm("tbl %0.16b, {v16.16b-v19.16b}, %1.16b" : "=w"(v) : "w"(w));
79 w -= 0x40;
80 asm("tbx %0.16b, {v20.16b-v23.16b}, %1.16b" : "+w"(v) : "w"(w));
81 w -= 0x40;
82 asm("tbx %0.16b, {v24.16b-v27.16b}, %1.16b" : "+w"(v) : "w"(w));
83 w -= 0x40;
84 asm("tbx %0.16b, {v28.16b-v31.16b}, %1.16b" : "+w"(v) : "w"(w));
85#endif
86
87 // mix columns
88 w = (v << 1) ^ (uint8x16_t)(((int8x16_t)v >> 7) & 0x1b);
89 w ^= (uint8x16_t)vrev32q_u16((uint16x8_t)v);
90 w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
91
92 return w;
93 }
94#endif
95
96 /*
97 * We use inline asm here instead of the vaeseq_u8/vaesmcq_u8 intrinsics
98 * to force the compiler to issue the aese/aesmc instructions in pairs.
99 * This is much faster on many cores, where the instruction pair can
100 * execute in a single cycle.
101 */
102 asm(AES_ROUND : "+w"(w) : "w"(z));
103 return w;
104}
105
106static inline __attribute__((always_inline))
107struct aegis128_state aegis128_update_neon(struct aegis128_state st,
108 uint8x16_t m)
109{
110 m ^= aegis_aes_round(st.v[4]);
111 st.v[4] ^= aegis_aes_round(st.v[3]);
112 st.v[3] ^= aegis_aes_round(st.v[2]);
113 st.v[2] ^= aegis_aes_round(st.v[1]);
114 st.v[1] ^= aegis_aes_round(st.v[0]);
115 st.v[0] ^= m;
116
117 return st;
118}
119
120static inline __attribute__((always_inline))
121void preload_sbox(void)
122{
123 if (!IS_ENABLED(CONFIG_ARM64) ||
124 !IS_ENABLED(CONFIG_CC_IS_GCC) ||
125 __builtin_expect(aegis128_have_aes_insn, 1))
126 return;
127
128 asm("ld1 {v16.16b-v19.16b}, [%0], #64 \n\t"
129 "ld1 {v20.16b-v23.16b}, [%0], #64 \n\t"
130 "ld1 {v24.16b-v27.16b}, [%0], #64 \n\t"
131 "ld1 {v28.16b-v31.16b}, [%0] \n\t"
132 :: "r"(crypto_aes_sbox));
133}
134
135void crypto_aegis128_update_neon(void *state, const void *msg)
136{
137 struct aegis128_state st = aegis128_load_state_neon(state);
138
139 preload_sbox();
140
141 st = aegis128_update_neon(st, vld1q_u8(msg));
142
143 aegis128_save_state_neon(st, state);
144}
145
146void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
147 unsigned int size)
148{
149 struct aegis128_state st = aegis128_load_state_neon(state);
150 uint8x16_t msg;
151
152 preload_sbox();
153
154 while (size >= AEGIS_BLOCK_SIZE) {
155 uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
156
157 msg = vld1q_u8(src);
158 st = aegis128_update_neon(st, msg);
159 vst1q_u8(dst, msg ^ s);
160
161 size -= AEGIS_BLOCK_SIZE;
162 src += AEGIS_BLOCK_SIZE;
163 dst += AEGIS_BLOCK_SIZE;
164 }
165
166 if (size > 0) {
167 uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
168 uint8_t buf[AEGIS_BLOCK_SIZE] = {};
169
170 memcpy(buf, src, size);
171 msg = vld1q_u8(buf);
172 st = aegis128_update_neon(st, msg);
173 vst1q_u8(buf, msg ^ s);
174 memcpy(dst, buf, size);
175 }
176
177 aegis128_save_state_neon(st, state);
178}
179
180void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
181 unsigned int size)
182{
183 struct aegis128_state st = aegis128_load_state_neon(state);
184 uint8x16_t msg;
185
186 preload_sbox();
187
188 while (size >= AEGIS_BLOCK_SIZE) {
189 msg = vld1q_u8(src) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
190 st = aegis128_update_neon(st, msg);
191 vst1q_u8(dst, msg);
192
193 size -= AEGIS_BLOCK_SIZE;
194 src += AEGIS_BLOCK_SIZE;
195 dst += AEGIS_BLOCK_SIZE;
196 }
197
198 if (size > 0) {
199 uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
200 uint8_t buf[AEGIS_BLOCK_SIZE];
201
202 vst1q_u8(buf, s);
203 memcpy(buf, src, size);
204 msg = vld1q_u8(buf) ^ s;
205 vst1q_u8(buf, msg);
206 memcpy(dst, buf, size);
207
208 st = aegis128_update_neon(st, msg);
209 }
210
211 aegis128_save_state_neon(st, state);
212}
diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c
new file mode 100644
index 000000000000..751f9c195aa4
--- /dev/null
+++ b/crypto/aegis128-neon.c
@@ -0,0 +1,49 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2019 Linaro Ltd <ard.biesheuvel@linaro.org>
4 */
5
6#include <asm/cpufeature.h>
7#include <asm/neon.h>
8
9#include "aegis.h"
10
11void crypto_aegis128_update_neon(void *state, const void *msg);
12void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
13 unsigned int size);
14void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
15 unsigned int size);
16
17int aegis128_have_aes_insn __ro_after_init;
18
19bool crypto_aegis128_have_simd(void)
20{
21 if (cpu_have_feature(cpu_feature(AES))) {
22 aegis128_have_aes_insn = 1;
23 return true;
24 }
25 return IS_ENABLED(CONFIG_ARM64);
26}
27
28void crypto_aegis128_update_simd(union aegis_block *state, const void *msg)
29{
30 kernel_neon_begin();
31 crypto_aegis128_update_neon(state, msg);
32 kernel_neon_end();
33}
34
35void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst,
36 const u8 *src, unsigned int size)
37{
38 kernel_neon_begin();
39 crypto_aegis128_encrypt_chunk_neon(state, dst, src, size);
40 kernel_neon_end();
41}
42
43void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst,
44 const u8 *src, unsigned int size)
45{
46 kernel_neon_begin();
47 crypto_aegis128_decrypt_chunk_neon(state, dst, src, size);
48 kernel_neon_end();
49}
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
deleted file mode 100644
index 9bca3d619a22..000000000000
--- a/crypto/aegis128l.c
+++ /dev/null
@@ -1,522 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The AEGIS-128L Authenticated-Encryption Algorithm
4 *
5 * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7 */
8
9#include <crypto/algapi.h>
10#include <crypto/internal/aead.h>
11#include <crypto/internal/skcipher.h>
12#include <crypto/scatterwalk.h>
13#include <linux/err.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/scatterlist.h>
18
19#include "aegis.h"
20
21#define AEGIS128L_CHUNK_BLOCKS 2
22#define AEGIS128L_CHUNK_SIZE (AEGIS128L_CHUNK_BLOCKS * AEGIS_BLOCK_SIZE)
23#define AEGIS128L_NONCE_SIZE 16
24#define AEGIS128L_STATE_BLOCKS 8
25#define AEGIS128L_KEY_SIZE 16
26#define AEGIS128L_MIN_AUTH_SIZE 8
27#define AEGIS128L_MAX_AUTH_SIZE 16
28
29union aegis_chunk {
30 union aegis_block blocks[AEGIS128L_CHUNK_BLOCKS];
31 u8 bytes[AEGIS128L_CHUNK_SIZE];
32};
33
34struct aegis_state {
35 union aegis_block blocks[AEGIS128L_STATE_BLOCKS];
36};
37
38struct aegis_ctx {
39 union aegis_block key;
40};
41
42struct aegis128l_ops {
43 int (*skcipher_walk_init)(struct skcipher_walk *walk,
44 struct aead_request *req, bool atomic);
45
46 void (*crypt_chunk)(struct aegis_state *state, u8 *dst,
47 const u8 *src, unsigned int size);
48};
49
50static void crypto_aegis128l_update(struct aegis_state *state)
51{
52 union aegis_block tmp;
53 unsigned int i;
54
55 tmp = state->blocks[AEGIS128L_STATE_BLOCKS - 1];
56 for (i = AEGIS128L_STATE_BLOCKS - 1; i > 0; i--)
57 crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1],
58 &state->blocks[i]);
59 crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]);
60}
61
62static void crypto_aegis128l_update_a(struct aegis_state *state,
63 const union aegis_chunk *msg)
64{
65 crypto_aegis128l_update(state);
66 crypto_aegis_block_xor(&state->blocks[0], &msg->blocks[0]);
67 crypto_aegis_block_xor(&state->blocks[4], &msg->blocks[1]);
68}
69
70static void crypto_aegis128l_update_u(struct aegis_state *state,
71 const void *msg)
72{
73 crypto_aegis128l_update(state);
74 crypto_xor(state->blocks[0].bytes, msg + 0 * AEGIS_BLOCK_SIZE,
75 AEGIS_BLOCK_SIZE);
76 crypto_xor(state->blocks[4].bytes, msg + 1 * AEGIS_BLOCK_SIZE,
77 AEGIS_BLOCK_SIZE);
78}
79
80static void crypto_aegis128l_init(struct aegis_state *state,
81 const union aegis_block *key,
82 const u8 *iv)
83{
84 union aegis_block key_iv;
85 union aegis_chunk chunk;
86 unsigned int i;
87
88 memcpy(chunk.blocks[0].bytes, iv, AEGIS_BLOCK_SIZE);
89 chunk.blocks[1] = *key;
90
91 key_iv = *key;
92 crypto_aegis_block_xor(&key_iv, &chunk.blocks[0]);
93
94 state->blocks[0] = key_iv;
95 state->blocks[1] = crypto_aegis_const[1];
96 state->blocks[2] = crypto_aegis_const[0];
97 state->blocks[3] = crypto_aegis_const[1];
98 state->blocks[4] = key_iv;
99 state->blocks[5] = *key;
100 state->blocks[6] = *key;
101 state->blocks[7] = *key;
102
103 crypto_aegis_block_xor(&state->blocks[5], &crypto_aegis_const[0]);
104 crypto_aegis_block_xor(&state->blocks[6], &crypto_aegis_const[1]);
105 crypto_aegis_block_xor(&state->blocks[7], &crypto_aegis_const[0]);
106
107 for (i = 0; i < 10; i++) {
108 crypto_aegis128l_update_a(state, &chunk);
109 }
110}
111
112static void crypto_aegis128l_ad(struct aegis_state *state,
113 const u8 *src, unsigned int size)
114{
115 if (AEGIS_ALIGNED(src)) {
116 const union aegis_chunk *src_chunk =
117 (const union aegis_chunk *)src;
118
119 while (size >= AEGIS128L_CHUNK_SIZE) {
120 crypto_aegis128l_update_a(state, src_chunk);
121
122 size -= AEGIS128L_CHUNK_SIZE;
123 src_chunk += 1;
124 }
125 } else {
126 while (size >= AEGIS128L_CHUNK_SIZE) {
127 crypto_aegis128l_update_u(state, src);
128
129 size -= AEGIS128L_CHUNK_SIZE;
130 src += AEGIS128L_CHUNK_SIZE;
131 }
132 }
133}
134
135static void crypto_aegis128l_encrypt_chunk(struct aegis_state *state, u8 *dst,
136 const u8 *src, unsigned int size)
137{
138 union aegis_chunk tmp;
139 union aegis_block *tmp0 = &tmp.blocks[0];
140 union aegis_block *tmp1 = &tmp.blocks[1];
141
142 if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
143 while (size >= AEGIS128L_CHUNK_SIZE) {
144 union aegis_chunk *dst_blk =
145 (union aegis_chunk *)dst;
146 const union aegis_chunk *src_blk =
147 (const union aegis_chunk *)src;
148
149 *tmp0 = state->blocks[2];
150 crypto_aegis_block_and(tmp0, &state->blocks[3]);
151 crypto_aegis_block_xor(tmp0, &state->blocks[6]);
152 crypto_aegis_block_xor(tmp0, &state->blocks[1]);
153 crypto_aegis_block_xor(tmp0, &src_blk->blocks[0]);
154
155 *tmp1 = state->blocks[6];
156 crypto_aegis_block_and(tmp1, &state->blocks[7]);
157 crypto_aegis_block_xor(tmp1, &state->blocks[5]);
158 crypto_aegis_block_xor(tmp1, &state->blocks[2]);
159 crypto_aegis_block_xor(tmp1, &src_blk->blocks[1]);
160
161 crypto_aegis128l_update_a(state, src_blk);
162
163 *dst_blk = tmp;
164
165 size -= AEGIS128L_CHUNK_SIZE;
166 src += AEGIS128L_CHUNK_SIZE;
167 dst += AEGIS128L_CHUNK_SIZE;
168 }
169 } else {
170 while (size >= AEGIS128L_CHUNK_SIZE) {
171 *tmp0 = state->blocks[2];
172 crypto_aegis_block_and(tmp0, &state->blocks[3]);
173 crypto_aegis_block_xor(tmp0, &state->blocks[6]);
174 crypto_aegis_block_xor(tmp0, &state->blocks[1]);
175 crypto_xor(tmp0->bytes, src + 0 * AEGIS_BLOCK_SIZE,
176 AEGIS_BLOCK_SIZE);
177
178 *tmp1 = state->blocks[6];
179 crypto_aegis_block_and(tmp1, &state->blocks[7]);
180 crypto_aegis_block_xor(tmp1, &state->blocks[5]);
181 crypto_aegis_block_xor(tmp1, &state->blocks[2]);
182 crypto_xor(tmp1->bytes, src + 1 * AEGIS_BLOCK_SIZE,
183 AEGIS_BLOCK_SIZE);
184
185 crypto_aegis128l_update_u(state, src);
186
187 memcpy(dst, tmp.bytes, AEGIS128L_CHUNK_SIZE);
188
189 size -= AEGIS128L_CHUNK_SIZE;
190 src += AEGIS128L_CHUNK_SIZE;
191 dst += AEGIS128L_CHUNK_SIZE;
192 }
193 }
194
195 if (size > 0) {
196 union aegis_chunk msg = {};
197 memcpy(msg.bytes, src, size);
198
199 *tmp0 = state->blocks[2];
200 crypto_aegis_block_and(tmp0, &state->blocks[3]);
201 crypto_aegis_block_xor(tmp0, &state->blocks[6]);
202 crypto_aegis_block_xor(tmp0, &state->blocks[1]);
203
204 *tmp1 = state->blocks[6];
205 crypto_aegis_block_and(tmp1, &state->blocks[7]);
206 crypto_aegis_block_xor(tmp1, &state->blocks[5]);
207 crypto_aegis_block_xor(tmp1, &state->blocks[2]);
208
209 crypto_aegis128l_update_a(state, &msg);
210
211 crypto_aegis_block_xor(&msg.blocks[0], tmp0);
212 crypto_aegis_block_xor(&msg.blocks[1], tmp1);
213
214 memcpy(dst, msg.bytes, size);
215 }
216}
217
218static void crypto_aegis128l_decrypt_chunk(struct aegis_state *state, u8 *dst,
219 const u8 *src, unsigned int size)
220{
221 union aegis_chunk tmp;
222 union aegis_block *tmp0 = &tmp.blocks[0];
223 union aegis_block *tmp1 = &tmp.blocks[1];
224
225 if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
226 while (size >= AEGIS128L_CHUNK_SIZE) {
227 union aegis_chunk *dst_blk =
228 (union aegis_chunk *)dst;
229 const union aegis_chunk *src_blk =
230 (const union aegis_chunk *)src;
231
232 *tmp0 = state->blocks[2];
233 crypto_aegis_block_and(tmp0, &state->blocks[3]);
234 crypto_aegis_block_xor(tmp0, &state->blocks[6]);
235 crypto_aegis_block_xor(tmp0, &state->blocks[1]);
236 crypto_aegis_block_xor(tmp0, &src_blk->blocks[0]);
237
238 *tmp1 = state->blocks[6];
239 crypto_aegis_block_and(tmp1, &state->blocks[7]);
240 crypto_aegis_block_xor(tmp1, &state->blocks[5]);
241 crypto_aegis_block_xor(tmp1, &state->blocks[2]);
242 crypto_aegis_block_xor(tmp1, &src_blk->blocks[1]);
243
244 crypto_aegis128l_update_a(state, &tmp);
245
246 *dst_blk = tmp;
247
248 size -= AEGIS128L_CHUNK_SIZE;
249 src += AEGIS128L_CHUNK_SIZE;
250 dst += AEGIS128L_CHUNK_SIZE;
251 }
252 } else {
253 while (size >= AEGIS128L_CHUNK_SIZE) {
254 *tmp0 = state->blocks[2];
255 crypto_aegis_block_and(tmp0, &state->blocks[3]);
256 crypto_aegis_block_xor(tmp0, &state->blocks[6]);
257 crypto_aegis_block_xor(tmp0, &state->blocks[1]);
258 crypto_xor(tmp0->bytes, src + 0 * AEGIS_BLOCK_SIZE,
259 AEGIS_BLOCK_SIZE);
260
261 *tmp1 = state->blocks[6];
262 crypto_aegis_block_and(tmp1, &state->blocks[7]);
263 crypto_aegis_block_xor(tmp1, &state->blocks[5]);
264 crypto_aegis_block_xor(tmp1, &state->blocks[2]);
265 crypto_xor(tmp1->bytes, src + 1 * AEGIS_BLOCK_SIZE,
266 AEGIS_BLOCK_SIZE);
267
268 crypto_aegis128l_update_a(state, &tmp);
269
270 memcpy(dst, tmp.bytes, AEGIS128L_CHUNK_SIZE);
271
272 size -= AEGIS128L_CHUNK_SIZE;
273 src += AEGIS128L_CHUNK_SIZE;
274 dst += AEGIS128L_CHUNK_SIZE;
275 }
276 }
277
278 if (size > 0) {
279 union aegis_chunk msg = {};
280 memcpy(msg.bytes, src, size);
281
282 *tmp0 = state->blocks[2];
283 crypto_aegis_block_and(tmp0, &state->blocks[3]);
284 crypto_aegis_block_xor(tmp0, &state->blocks[6]);
285 crypto_aegis_block_xor(tmp0, &state->blocks[1]);
286 crypto_aegis_block_xor(&msg.blocks[0], tmp0);
287
288 *tmp1 = state->blocks[6];
289 crypto_aegis_block_and(tmp1, &state->blocks[7]);
290 crypto_aegis_block_xor(tmp1, &state->blocks[5]);
291 crypto_aegis_block_xor(tmp1, &state->blocks[2]);
292 crypto_aegis_block_xor(&msg.blocks[1], tmp1);
293
294 memset(msg.bytes + size, 0, AEGIS128L_CHUNK_SIZE - size);
295
296 crypto_aegis128l_update_a(state, &msg);
297
298 memcpy(dst, msg.bytes, size);
299 }
300}
301
302static void crypto_aegis128l_process_ad(struct aegis_state *state,
303 struct scatterlist *sg_src,
304 unsigned int assoclen)
305{
306 struct scatter_walk walk;
307 union aegis_chunk buf;
308 unsigned int pos = 0;
309
310 scatterwalk_start(&walk, sg_src);
311 while (assoclen != 0) {
312 unsigned int size = scatterwalk_clamp(&walk, assoclen);
313 unsigned int left = size;
314 void *mapped = scatterwalk_map(&walk);
315 const u8 *src = (const u8 *)mapped;
316
317 if (pos + size >= AEGIS128L_CHUNK_SIZE) {
318 if (pos > 0) {
319 unsigned int fill = AEGIS128L_CHUNK_SIZE - pos;
320 memcpy(buf.bytes + pos, src, fill);
321 crypto_aegis128l_update_a(state, &buf);
322 pos = 0;
323 left -= fill;
324 src += fill;
325 }
326
327 crypto_aegis128l_ad(state, src, left);
328 src += left & ~(AEGIS128L_CHUNK_SIZE - 1);
329 left &= AEGIS128L_CHUNK_SIZE - 1;
330 }
331
332 memcpy(buf.bytes + pos, src, left);
333
334 pos += left;
335 assoclen -= size;
336 scatterwalk_unmap(mapped);
337 scatterwalk_advance(&walk, size);
338 scatterwalk_done(&walk, 0, assoclen);
339 }
340
341 if (pos > 0) {
342 memset(buf.bytes + pos, 0, AEGIS128L_CHUNK_SIZE - pos);
343 crypto_aegis128l_update_a(state, &buf);
344 }
345}
346
347static void crypto_aegis128l_process_crypt(struct aegis_state *state,
348 struct aead_request *req,
349 const struct aegis128l_ops *ops)
350{
351 struct skcipher_walk walk;
352
353 ops->skcipher_walk_init(&walk, req, false);
354
355 while (walk.nbytes) {
356 unsigned int nbytes = walk.nbytes;
357
358 if (nbytes < walk.total)
359 nbytes = round_down(nbytes, walk.stride);
360
361 ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
362 nbytes);
363
364 skcipher_walk_done(&walk, walk.nbytes - nbytes);
365 }
366}
367
368static void crypto_aegis128l_final(struct aegis_state *state,
369 union aegis_block *tag_xor,
370 u64 assoclen, u64 cryptlen)
371{
372 u64 assocbits = assoclen * 8;
373 u64 cryptbits = cryptlen * 8;
374
375 union aegis_chunk tmp;
376 unsigned int i;
377
378 tmp.blocks[0].words64[0] = cpu_to_le64(assocbits);
379 tmp.blocks[0].words64[1] = cpu_to_le64(cryptbits);
380
381 crypto_aegis_block_xor(&tmp.blocks[0], &state->blocks[2]);
382
383 tmp.blocks[1] = tmp.blocks[0];
384 for (i = 0; i < 7; i++)
385 crypto_aegis128l_update_a(state, &tmp);
386
387 for (i = 0; i < 7; i++)
388 crypto_aegis_block_xor(tag_xor, &state->blocks[i]);
389}
390
391static int crypto_aegis128l_setkey(struct crypto_aead *aead, const u8 *key,
392 unsigned int keylen)
393{
394 struct aegis_ctx *ctx = crypto_aead_ctx(aead);
395
396 if (keylen != AEGIS128L_KEY_SIZE) {
397 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
398 return -EINVAL;
399 }
400
401 memcpy(ctx->key.bytes, key, AEGIS128L_KEY_SIZE);
402 return 0;
403}
404
405static int crypto_aegis128l_setauthsize(struct crypto_aead *tfm,
406 unsigned int authsize)
407{
408 if (authsize > AEGIS128L_MAX_AUTH_SIZE)
409 return -EINVAL;
410 if (authsize < AEGIS128L_MIN_AUTH_SIZE)
411 return -EINVAL;
412 return 0;
413}
414
415static void crypto_aegis128l_crypt(struct aead_request *req,
416 union aegis_block *tag_xor,
417 unsigned int cryptlen,
418 const struct aegis128l_ops *ops)
419{
420 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
421 struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
422 struct aegis_state state;
423
424 crypto_aegis128l_init(&state, &ctx->key, req->iv);
425 crypto_aegis128l_process_ad(&state, req->src, req->assoclen);
426 crypto_aegis128l_process_crypt(&state, req, ops);
427 crypto_aegis128l_final(&state, tag_xor, req->assoclen, cryptlen);
428}
429
430static int crypto_aegis128l_encrypt(struct aead_request *req)
431{
432 static const struct aegis128l_ops ops = {
433 .skcipher_walk_init = skcipher_walk_aead_encrypt,
434 .crypt_chunk = crypto_aegis128l_encrypt_chunk,
435 };
436
437 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
438 union aegis_block tag = {};
439 unsigned int authsize = crypto_aead_authsize(tfm);
440 unsigned int cryptlen = req->cryptlen;
441
442 crypto_aegis128l_crypt(req, &tag, cryptlen, &ops);
443
444 scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
445 authsize, 1);
446 return 0;
447}
448
449static int crypto_aegis128l_decrypt(struct aead_request *req)
450{
451 static const struct aegis128l_ops ops = {
452 .skcipher_walk_init = skcipher_walk_aead_decrypt,
453 .crypt_chunk = crypto_aegis128l_decrypt_chunk,
454 };
455 static const u8 zeros[AEGIS128L_MAX_AUTH_SIZE] = {};
456
457 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
458 union aegis_block tag;
459 unsigned int authsize = crypto_aead_authsize(tfm);
460 unsigned int cryptlen = req->cryptlen - authsize;
461
462 scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
463 authsize, 0);
464
465 crypto_aegis128l_crypt(req, &tag, cryptlen, &ops);
466
467 return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
468}
469
470static int crypto_aegis128l_init_tfm(struct crypto_aead *tfm)
471{
472 return 0;
473}
474
475static void crypto_aegis128l_exit_tfm(struct crypto_aead *tfm)
476{
477}
478
479static struct aead_alg crypto_aegis128l_alg = {
480 .setkey = crypto_aegis128l_setkey,
481 .setauthsize = crypto_aegis128l_setauthsize,
482 .encrypt = crypto_aegis128l_encrypt,
483 .decrypt = crypto_aegis128l_decrypt,
484 .init = crypto_aegis128l_init_tfm,
485 .exit = crypto_aegis128l_exit_tfm,
486
487 .ivsize = AEGIS128L_NONCE_SIZE,
488 .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
489 .chunksize = AEGIS128L_CHUNK_SIZE,
490
491 .base = {
492 .cra_blocksize = 1,
493 .cra_ctxsize = sizeof(struct aegis_ctx),
494 .cra_alignmask = 0,
495
496 .cra_priority = 100,
497
498 .cra_name = "aegis128l",
499 .cra_driver_name = "aegis128l-generic",
500
501 .cra_module = THIS_MODULE,
502 }
503};
504
505static int __init crypto_aegis128l_module_init(void)
506{
507 return crypto_register_aead(&crypto_aegis128l_alg);
508}
509
510static void __exit crypto_aegis128l_module_exit(void)
511{
512 crypto_unregister_aead(&crypto_aegis128l_alg);
513}
514
515subsys_initcall(crypto_aegis128l_module_init);
516module_exit(crypto_aegis128l_module_exit);
517
518MODULE_LICENSE("GPL");
519MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
520MODULE_DESCRIPTION("AEGIS-128L AEAD algorithm");
521MODULE_ALIAS_CRYPTO("aegis128l");
522MODULE_ALIAS_CRYPTO("aegis128l-generic");
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
deleted file mode 100644
index b47fd39595ad..000000000000
--- a/crypto/aegis256.c
+++ /dev/null
@@ -1,473 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The AEGIS-256 Authenticated-Encryption Algorithm
4 *
5 * Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7 */
8
9#include <crypto/algapi.h>
10#include <crypto/internal/aead.h>
11#include <crypto/internal/skcipher.h>
12#include <crypto/scatterwalk.h>
13#include <linux/err.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/scatterlist.h>
18
19#include "aegis.h"
20
21#define AEGIS256_NONCE_SIZE 32
22#define AEGIS256_STATE_BLOCKS 6
23#define AEGIS256_KEY_SIZE 32
24#define AEGIS256_MIN_AUTH_SIZE 8
25#define AEGIS256_MAX_AUTH_SIZE 16
26
27struct aegis_state {
28 union aegis_block blocks[AEGIS256_STATE_BLOCKS];
29};
30
31struct aegis_ctx {
32 union aegis_block key[AEGIS256_KEY_SIZE / AEGIS_BLOCK_SIZE];
33};
34
35struct aegis256_ops {
36 int (*skcipher_walk_init)(struct skcipher_walk *walk,
37 struct aead_request *req, bool atomic);
38
39 void (*crypt_chunk)(struct aegis_state *state, u8 *dst,
40 const u8 *src, unsigned int size);
41};
42
43static void crypto_aegis256_update(struct aegis_state *state)
44{
45 union aegis_block tmp;
46 unsigned int i;
47
48 tmp = state->blocks[AEGIS256_STATE_BLOCKS - 1];
49 for (i = AEGIS256_STATE_BLOCKS - 1; i > 0; i--)
50 crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1],
51 &state->blocks[i]);
52 crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]);
53}
54
55static void crypto_aegis256_update_a(struct aegis_state *state,
56 const union aegis_block *msg)
57{
58 crypto_aegis256_update(state);
59 crypto_aegis_block_xor(&state->blocks[0], msg);
60}
61
62static void crypto_aegis256_update_u(struct aegis_state *state, const void *msg)
63{
64 crypto_aegis256_update(state);
65 crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE);
66}
67
68static void crypto_aegis256_init(struct aegis_state *state,
69 const union aegis_block *key,
70 const u8 *iv)
71{
72 union aegis_block key_iv[2];
73 unsigned int i;
74
75 key_iv[0] = key[0];
76 key_iv[1] = key[1];
77 crypto_xor(key_iv[0].bytes, iv + 0 * AEGIS_BLOCK_SIZE,
78 AEGIS_BLOCK_SIZE);
79 crypto_xor(key_iv[1].bytes, iv + 1 * AEGIS_BLOCK_SIZE,
80 AEGIS_BLOCK_SIZE);
81
82 state->blocks[0] = key_iv[0];
83 state->blocks[1] = key_iv[1];
84 state->blocks[2] = crypto_aegis_const[1];
85 state->blocks[3] = crypto_aegis_const[0];
86 state->blocks[4] = key[0];
87 state->blocks[5] = key[1];
88
89 crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[0]);
90 crypto_aegis_block_xor(&state->blocks[5], &crypto_aegis_const[1]);
91
92 for (i = 0; i < 4; i++) {
93 crypto_aegis256_update_a(state, &key[0]);
94 crypto_aegis256_update_a(state, &key[1]);
95 crypto_aegis256_update_a(state, &key_iv[0]);
96 crypto_aegis256_update_a(state, &key_iv[1]);
97 }
98}
99
100static void crypto_aegis256_ad(struct aegis_state *state,
101 const u8 *src, unsigned int size)
102{
103 if (AEGIS_ALIGNED(src)) {
104 const union aegis_block *src_blk =
105 (const union aegis_block *)src;
106
107 while (size >= AEGIS_BLOCK_SIZE) {
108 crypto_aegis256_update_a(state, src_blk);
109
110 size -= AEGIS_BLOCK_SIZE;
111 src_blk++;
112 }
113 } else {
114 while (size >= AEGIS_BLOCK_SIZE) {
115 crypto_aegis256_update_u(state, src);
116
117 size -= AEGIS_BLOCK_SIZE;
118 src += AEGIS_BLOCK_SIZE;
119 }
120 }
121}
122
123static void crypto_aegis256_encrypt_chunk(struct aegis_state *state, u8 *dst,
124 const u8 *src, unsigned int size)
125{
126 union aegis_block tmp;
127
128 if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
129 while (size >= AEGIS_BLOCK_SIZE) {
130 union aegis_block *dst_blk =
131 (union aegis_block *)dst;
132 const union aegis_block *src_blk =
133 (const union aegis_block *)src;
134
135 tmp = state->blocks[2];
136 crypto_aegis_block_and(&tmp, &state->blocks[3]);
137 crypto_aegis_block_xor(&tmp, &state->blocks[5]);
138 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
139 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
140 crypto_aegis_block_xor(&tmp, src_blk);
141
142 crypto_aegis256_update_a(state, src_blk);
143
144 *dst_blk = tmp;
145
146 size -= AEGIS_BLOCK_SIZE;
147 src += AEGIS_BLOCK_SIZE;
148 dst += AEGIS_BLOCK_SIZE;
149 }
150 } else {
151 while (size >= AEGIS_BLOCK_SIZE) {
152 tmp = state->blocks[2];
153 crypto_aegis_block_and(&tmp, &state->blocks[3]);
154 crypto_aegis_block_xor(&tmp, &state->blocks[5]);
155 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
156 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
157 crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
158
159 crypto_aegis256_update_u(state, src);
160
161 memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
162
163 size -= AEGIS_BLOCK_SIZE;
164 src += AEGIS_BLOCK_SIZE;
165 dst += AEGIS_BLOCK_SIZE;
166 }
167 }
168
169 if (size > 0) {
170 union aegis_block msg = {};
171 memcpy(msg.bytes, src, size);
172
173 tmp = state->blocks[2];
174 crypto_aegis_block_and(&tmp, &state->blocks[3]);
175 crypto_aegis_block_xor(&tmp, &state->blocks[5]);
176 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
177 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
178
179 crypto_aegis256_update_a(state, &msg);
180
181 crypto_aegis_block_xor(&msg, &tmp);
182
183 memcpy(dst, msg.bytes, size);
184 }
185}
186
187static void crypto_aegis256_decrypt_chunk(struct aegis_state *state, u8 *dst,
188 const u8 *src, unsigned int size)
189{
190 union aegis_block tmp;
191
192 if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
193 while (size >= AEGIS_BLOCK_SIZE) {
194 union aegis_block *dst_blk =
195 (union aegis_block *)dst;
196 const union aegis_block *src_blk =
197 (const union aegis_block *)src;
198
199 tmp = state->blocks[2];
200 crypto_aegis_block_and(&tmp, &state->blocks[3]);
201 crypto_aegis_block_xor(&tmp, &state->blocks[5]);
202 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
203 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
204 crypto_aegis_block_xor(&tmp, src_blk);
205
206 crypto_aegis256_update_a(state, &tmp);
207
208 *dst_blk = tmp;
209
210 size -= AEGIS_BLOCK_SIZE;
211 src += AEGIS_BLOCK_SIZE;
212 dst += AEGIS_BLOCK_SIZE;
213 }
214 } else {
215 while (size >= AEGIS_BLOCK_SIZE) {
216 tmp = state->blocks[2];
217 crypto_aegis_block_and(&tmp, &state->blocks[3]);
218 crypto_aegis_block_xor(&tmp, &state->blocks[5]);
219 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
220 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
221 crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
222
223 crypto_aegis256_update_a(state, &tmp);
224
225 memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
226
227 size -= AEGIS_BLOCK_SIZE;
228 src += AEGIS_BLOCK_SIZE;
229 dst += AEGIS_BLOCK_SIZE;
230 }
231 }
232
233 if (size > 0) {
234 union aegis_block msg = {};
235 memcpy(msg.bytes, src, size);
236
237 tmp = state->blocks[2];
238 crypto_aegis_block_and(&tmp, &state->blocks[3]);
239 crypto_aegis_block_xor(&tmp, &state->blocks[5]);
240 crypto_aegis_block_xor(&tmp, &state->blocks[4]);
241 crypto_aegis_block_xor(&tmp, &state->blocks[1]);
242 crypto_aegis_block_xor(&msg, &tmp);
243
244 memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size);
245
246 crypto_aegis256_update_a(state, &msg);
247
248 memcpy(dst, msg.bytes, size);
249 }
250}
251
252static void crypto_aegis256_process_ad(struct aegis_state *state,
253 struct scatterlist *sg_src,
254 unsigned int assoclen)
255{
256 struct scatter_walk walk;
257 union aegis_block buf;
258 unsigned int pos = 0;
259
260 scatterwalk_start(&walk, sg_src);
261 while (assoclen != 0) {
262 unsigned int size = scatterwalk_clamp(&walk, assoclen);
263 unsigned int left = size;
264 void *mapped = scatterwalk_map(&walk);
265 const u8 *src = (const u8 *)mapped;
266
267 if (pos + size >= AEGIS_BLOCK_SIZE) {
268 if (pos > 0) {
269 unsigned int fill = AEGIS_BLOCK_SIZE - pos;
270 memcpy(buf.bytes + pos, src, fill);
271 crypto_aegis256_update_a(state, &buf);
272 pos = 0;
273 left -= fill;
274 src += fill;
275 }
276
277 crypto_aegis256_ad(state, src, left);
278 src += left & ~(AEGIS_BLOCK_SIZE - 1);
279 left &= AEGIS_BLOCK_SIZE - 1;
280 }
281
282 memcpy(buf.bytes + pos, src, left);
283
284 pos += left;
285 assoclen -= size;
286 scatterwalk_unmap(mapped);
287 scatterwalk_advance(&walk, size);
288 scatterwalk_done(&walk, 0, assoclen);
289 }
290
291 if (pos > 0) {
292 memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos);
293 crypto_aegis256_update_a(state, &buf);
294 }
295}
296
297static void crypto_aegis256_process_crypt(struct aegis_state *state,
298 struct aead_request *req,
299 const struct aegis256_ops *ops)
300{
301 struct skcipher_walk walk;
302
303 ops->skcipher_walk_init(&walk, req, false);
304
305 while (walk.nbytes) {
306 unsigned int nbytes = walk.nbytes;
307
308 if (nbytes < walk.total)
309 nbytes = round_down(nbytes, walk.stride);
310
311 ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
312 nbytes);
313
314 skcipher_walk_done(&walk, walk.nbytes - nbytes);
315 }
316}
317
318static void crypto_aegis256_final(struct aegis_state *state,
319 union aegis_block *tag_xor,
320 u64 assoclen, u64 cryptlen)
321{
322 u64 assocbits = assoclen * 8;
323 u64 cryptbits = cryptlen * 8;
324
325 union aegis_block tmp;
326 unsigned int i;
327
328 tmp.words64[0] = cpu_to_le64(assocbits);
329 tmp.words64[1] = cpu_to_le64(cryptbits);
330
331 crypto_aegis_block_xor(&tmp, &state->blocks[3]);
332
333 for (i = 0; i < 7; i++)
334 crypto_aegis256_update_a(state, &tmp);
335
336 for (i = 0; i < AEGIS256_STATE_BLOCKS; i++)
337 crypto_aegis_block_xor(tag_xor, &state->blocks[i]);
338}
339
340static int crypto_aegis256_setkey(struct crypto_aead *aead, const u8 *key,
341 unsigned int keylen)
342{
343 struct aegis_ctx *ctx = crypto_aead_ctx(aead);
344
345 if (keylen != AEGIS256_KEY_SIZE) {
346 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
347 return -EINVAL;
348 }
349
350 memcpy(ctx->key[0].bytes, key, AEGIS_BLOCK_SIZE);
351 memcpy(ctx->key[1].bytes, key + AEGIS_BLOCK_SIZE,
352 AEGIS_BLOCK_SIZE);
353 return 0;
354}
355
356static int crypto_aegis256_setauthsize(struct crypto_aead *tfm,
357 unsigned int authsize)
358{
359 if (authsize > AEGIS256_MAX_AUTH_SIZE)
360 return -EINVAL;
361 if (authsize < AEGIS256_MIN_AUTH_SIZE)
362 return -EINVAL;
363 return 0;
364}
365
366static void crypto_aegis256_crypt(struct aead_request *req,
367 union aegis_block *tag_xor,
368 unsigned int cryptlen,
369 const struct aegis256_ops *ops)
370{
371 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
372 struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
373 struct aegis_state state;
374
375 crypto_aegis256_init(&state, ctx->key, req->iv);
376 crypto_aegis256_process_ad(&state, req->src, req->assoclen);
377 crypto_aegis256_process_crypt(&state, req, ops);
378 crypto_aegis256_final(&state, tag_xor, req->assoclen, cryptlen);
379}
380
381static int crypto_aegis256_encrypt(struct aead_request *req)
382{
383 static const struct aegis256_ops ops = {
384 .skcipher_walk_init = skcipher_walk_aead_encrypt,
385 .crypt_chunk = crypto_aegis256_encrypt_chunk,
386 };
387
388 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
389 union aegis_block tag = {};
390 unsigned int authsize = crypto_aead_authsize(tfm);
391 unsigned int cryptlen = req->cryptlen;
392
393 crypto_aegis256_crypt(req, &tag, cryptlen, &ops);
394
395 scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
396 authsize, 1);
397 return 0;
398}
399
400static int crypto_aegis256_decrypt(struct aead_request *req)
401{
402 static const struct aegis256_ops ops = {
403 .skcipher_walk_init = skcipher_walk_aead_decrypt,
404 .crypt_chunk = crypto_aegis256_decrypt_chunk,
405 };
406 static const u8 zeros[AEGIS256_MAX_AUTH_SIZE] = {};
407
408 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
409 union aegis_block tag;
410 unsigned int authsize = crypto_aead_authsize(tfm);
411 unsigned int cryptlen = req->cryptlen - authsize;
412
413 scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
414 authsize, 0);
415
416 crypto_aegis256_crypt(req, &tag, cryptlen, &ops);
417
418 return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
419}
420
421static int crypto_aegis256_init_tfm(struct crypto_aead *tfm)
422{
423 return 0;
424}
425
426static void crypto_aegis256_exit_tfm(struct crypto_aead *tfm)
427{
428}
429
430static struct aead_alg crypto_aegis256_alg = {
431 .setkey = crypto_aegis256_setkey,
432 .setauthsize = crypto_aegis256_setauthsize,
433 .encrypt = crypto_aegis256_encrypt,
434 .decrypt = crypto_aegis256_decrypt,
435 .init = crypto_aegis256_init_tfm,
436 .exit = crypto_aegis256_exit_tfm,
437
438 .ivsize = AEGIS256_NONCE_SIZE,
439 .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
440 .chunksize = AEGIS_BLOCK_SIZE,
441
442 .base = {
443 .cra_blocksize = 1,
444 .cra_ctxsize = sizeof(struct aegis_ctx),
445 .cra_alignmask = 0,
446
447 .cra_priority = 100,
448
449 .cra_name = "aegis256",
450 .cra_driver_name = "aegis256-generic",
451
452 .cra_module = THIS_MODULE,
453 }
454};
455
456static int __init crypto_aegis256_module_init(void)
457{
458 return crypto_register_aead(&crypto_aegis256_alg);
459}
460
461static void __exit crypto_aegis256_module_exit(void)
462{
463 crypto_unregister_aead(&crypto_aegis256_alg);
464}
465
466subsys_initcall(crypto_aegis256_module_init);
467module_exit(crypto_aegis256_module_exit);
468
469MODULE_LICENSE("GPL");
470MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
471MODULE_DESCRIPTION("AEGIS-256 AEAD algorithm");
472MODULE_ALIAS_CRYPTO("aegis256");
473MODULE_ALIAS_CRYPTO("aegis256-generic");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index f217568917e4..22e5867177f1 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -61,8 +61,6 @@ static inline u8 byte(const u32 x, const unsigned n)
61 return x >> (n << 3); 61 return x >> (n << 3);
62} 62}
63 63
64static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
65
66/* cacheline-aligned to facilitate prefetching into cache */ 64/* cacheline-aligned to facilitate prefetching into cache */
67__visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = { 65__visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = {
68 { 66 {
@@ -328,7 +326,7 @@ __visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = {
328 } 326 }
329}; 327};
330 328
331__visible const u32 crypto_fl_tab[4][256] ____cacheline_aligned = { 329static const u32 crypto_fl_tab[4][256] ____cacheline_aligned = {
332 { 330 {
333 0x00000063, 0x0000007c, 0x00000077, 0x0000007b, 331 0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
334 0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5, 332 0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
@@ -856,7 +854,7 @@ __visible const u32 crypto_it_tab[4][256] ____cacheline_aligned = {
856 } 854 }
857}; 855};
858 856
859__visible const u32 crypto_il_tab[4][256] ____cacheline_aligned = { 857static const u32 crypto_il_tab[4][256] ____cacheline_aligned = {
860 { 858 {
861 0x00000052, 0x00000009, 0x0000006a, 0x000000d5, 859 0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
862 0x00000030, 0x00000036, 0x000000a5, 0x00000038, 860 0x00000030, 0x00000036, 0x000000a5, 0x00000038,
@@ -1121,158 +1119,7 @@ __visible const u32 crypto_il_tab[4][256] ____cacheline_aligned = {
1121}; 1119};
1122 1120
1123EXPORT_SYMBOL_GPL(crypto_ft_tab); 1121EXPORT_SYMBOL_GPL(crypto_ft_tab);
1124EXPORT_SYMBOL_GPL(crypto_fl_tab);
1125EXPORT_SYMBOL_GPL(crypto_it_tab); 1122EXPORT_SYMBOL_GPL(crypto_it_tab);
1126EXPORT_SYMBOL_GPL(crypto_il_tab);
1127
1128/* initialise the key schedule from the user supplied key */
1129
1130#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
1131
1132#define imix_col(y, x) do { \
1133 u = star_x(x); \
1134 v = star_x(u); \
1135 w = star_x(v); \
1136 t = w ^ (x); \
1137 (y) = u ^ v ^ w; \
1138 (y) ^= ror32(u ^ t, 8) ^ \
1139 ror32(v ^ t, 16) ^ \
1140 ror32(t, 24); \
1141} while (0)
1142
1143#define ls_box(x) \
1144 crypto_fl_tab[0][byte(x, 0)] ^ \
1145 crypto_fl_tab[1][byte(x, 1)] ^ \
1146 crypto_fl_tab[2][byte(x, 2)] ^ \
1147 crypto_fl_tab[3][byte(x, 3)]
1148
1149#define loop4(i) do { \
1150 t = ror32(t, 8); \
1151 t = ls_box(t) ^ rco_tab[i]; \
1152 t ^= ctx->key_enc[4 * i]; \
1153 ctx->key_enc[4 * i + 4] = t; \
1154 t ^= ctx->key_enc[4 * i + 1]; \
1155 ctx->key_enc[4 * i + 5] = t; \
1156 t ^= ctx->key_enc[4 * i + 2]; \
1157 ctx->key_enc[4 * i + 6] = t; \
1158 t ^= ctx->key_enc[4 * i + 3]; \
1159 ctx->key_enc[4 * i + 7] = t; \
1160} while (0)
1161
1162#define loop6(i) do { \
1163 t = ror32(t, 8); \
1164 t = ls_box(t) ^ rco_tab[i]; \
1165 t ^= ctx->key_enc[6 * i]; \
1166 ctx->key_enc[6 * i + 6] = t; \
1167 t ^= ctx->key_enc[6 * i + 1]; \
1168 ctx->key_enc[6 * i + 7] = t; \
1169 t ^= ctx->key_enc[6 * i + 2]; \
1170 ctx->key_enc[6 * i + 8] = t; \
1171 t ^= ctx->key_enc[6 * i + 3]; \
1172 ctx->key_enc[6 * i + 9] = t; \
1173 t ^= ctx->key_enc[6 * i + 4]; \
1174 ctx->key_enc[6 * i + 10] = t; \
1175 t ^= ctx->key_enc[6 * i + 5]; \
1176 ctx->key_enc[6 * i + 11] = t; \
1177} while (0)
1178
1179#define loop8tophalf(i) do { \
1180 t = ror32(t, 8); \
1181 t = ls_box(t) ^ rco_tab[i]; \
1182 t ^= ctx->key_enc[8 * i]; \
1183 ctx->key_enc[8 * i + 8] = t; \
1184 t ^= ctx->key_enc[8 * i + 1]; \
1185 ctx->key_enc[8 * i + 9] = t; \
1186 t ^= ctx->key_enc[8 * i + 2]; \
1187 ctx->key_enc[8 * i + 10] = t; \
1188 t ^= ctx->key_enc[8 * i + 3]; \
1189 ctx->key_enc[8 * i + 11] = t; \
1190} while (0)
1191
1192#define loop8(i) do { \
1193 loop8tophalf(i); \
1194 t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \
1195 ctx->key_enc[8 * i + 12] = t; \
1196 t ^= ctx->key_enc[8 * i + 5]; \
1197 ctx->key_enc[8 * i + 13] = t; \
1198 t ^= ctx->key_enc[8 * i + 6]; \
1199 ctx->key_enc[8 * i + 14] = t; \
1200 t ^= ctx->key_enc[8 * i + 7]; \
1201 ctx->key_enc[8 * i + 15] = t; \
1202} while (0)
1203
1204/**
1205 * crypto_aes_expand_key - Expands the AES key as described in FIPS-197
1206 * @ctx: The location where the computed key will be stored.
1207 * @in_key: The supplied key.
1208 * @key_len: The length of the supplied key.
1209 *
1210 * Returns 0 on success. The function fails only if an invalid key size (or
1211 * pointer) is supplied.
1212 * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
1213 * key schedule plus a 16 bytes key which is used before the first round).
1214 * The decryption key is prepared for the "Equivalent Inverse Cipher" as
1215 * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
1216 * for the initial combination, the second slot for the first round and so on.
1217 */
1218int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
1219 unsigned int key_len)
1220{
1221 u32 i, t, u, v, w, j;
1222
1223 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
1224 key_len != AES_KEYSIZE_256)
1225 return -EINVAL;
1226
1227 ctx->key_length = key_len;
1228
1229 ctx->key_enc[0] = get_unaligned_le32(in_key);
1230 ctx->key_enc[1] = get_unaligned_le32(in_key + 4);
1231 ctx->key_enc[2] = get_unaligned_le32(in_key + 8);
1232 ctx->key_enc[3] = get_unaligned_le32(in_key + 12);
1233
1234 ctx->key_dec[key_len + 24] = ctx->key_enc[0];
1235 ctx->key_dec[key_len + 25] = ctx->key_enc[1];
1236 ctx->key_dec[key_len + 26] = ctx->key_enc[2];
1237 ctx->key_dec[key_len + 27] = ctx->key_enc[3];
1238
1239 switch (key_len) {
1240 case AES_KEYSIZE_128:
1241 t = ctx->key_enc[3];
1242 for (i = 0; i < 10; ++i)
1243 loop4(i);
1244 break;
1245
1246 case AES_KEYSIZE_192:
1247 ctx->key_enc[4] = get_unaligned_le32(in_key + 16);
1248 t = ctx->key_enc[5] = get_unaligned_le32(in_key + 20);
1249 for (i = 0; i < 8; ++i)
1250 loop6(i);
1251 break;
1252
1253 case AES_KEYSIZE_256:
1254 ctx->key_enc[4] = get_unaligned_le32(in_key + 16);
1255 ctx->key_enc[5] = get_unaligned_le32(in_key + 20);
1256 ctx->key_enc[6] = get_unaligned_le32(in_key + 24);
1257 t = ctx->key_enc[7] = get_unaligned_le32(in_key + 28);
1258 for (i = 0; i < 6; ++i)
1259 loop8(i);
1260 loop8tophalf(i);
1261 break;
1262 }
1263
1264 ctx->key_dec[0] = ctx->key_enc[key_len + 24];
1265 ctx->key_dec[1] = ctx->key_enc[key_len + 25];
1266 ctx->key_dec[2] = ctx->key_enc[key_len + 26];
1267 ctx->key_dec[3] = ctx->key_enc[key_len + 27];
1268
1269 for (i = 4; i < key_len + 24; ++i) {
1270 j = key_len + 24 - (i & ~3) + (i & 3);
1271 imix_col(ctx->key_dec[j], ctx->key_enc[i]);
1272 }
1273 return 0;
1274}
1275EXPORT_SYMBOL_GPL(crypto_aes_expand_key);
1276 1123
1277/** 1124/**
1278 * crypto_aes_set_key - Set the AES key. 1125 * crypto_aes_set_key - Set the AES key.
@@ -1281,7 +1128,7 @@ EXPORT_SYMBOL_GPL(crypto_aes_expand_key);
1281 * @key_len: The size of the key. 1128 * @key_len: The size of the key.
1282 * 1129 *
1283 * Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm 1130 * Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm
1284 * is set. The function uses crypto_aes_expand_key() to expand the key. 1131 * is set. The function uses aes_expand_key() to expand the key.
1285 * &crypto_aes_ctx _must_ be the private data embedded in @tfm which is 1132 * &crypto_aes_ctx _must_ be the private data embedded in @tfm which is
1286 * retrieved with crypto_tfm_ctx(). 1133 * retrieved with crypto_tfm_ctx().
1287 */ 1134 */
@@ -1292,7 +1139,7 @@ int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
1292 u32 *flags = &tfm->crt_flags; 1139 u32 *flags = &tfm->crt_flags;
1293 int ret; 1140 int ret;
1294 1141
1295 ret = crypto_aes_expand_key(ctx, in_key, key_len); 1142 ret = aes_expandkey(ctx, in_key, key_len);
1296 if (!ret) 1143 if (!ret)
1297 return 0; 1144 return 0;
1298 1145
@@ -1332,7 +1179,7 @@ EXPORT_SYMBOL_GPL(crypto_aes_set_key);
1332 f_rl(bo, bi, 3, k); \ 1179 f_rl(bo, bi, 3, k); \
1333} while (0) 1180} while (0)
1334 1181
1335static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 1182static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
1336{ 1183{
1337 const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 1184 const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1338 u32 b0[4], b1[4]; 1185 u32 b0[4], b1[4];
@@ -1402,7 +1249,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
1402 i_rl(bo, bi, 3, k); \ 1249 i_rl(bo, bi, 3, k); \
1403} while (0) 1250} while (0)
1404 1251
1405static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 1252static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
1406{ 1253{
1407 const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 1254 const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1408 u32 b0[4], b1[4]; 1255 u32 b0[4], b1[4];
@@ -1454,8 +1301,8 @@ static struct crypto_alg aes_alg = {
1454 .cia_min_keysize = AES_MIN_KEY_SIZE, 1301 .cia_min_keysize = AES_MIN_KEY_SIZE,
1455 .cia_max_keysize = AES_MAX_KEY_SIZE, 1302 .cia_max_keysize = AES_MAX_KEY_SIZE,
1456 .cia_setkey = crypto_aes_set_key, 1303 .cia_setkey = crypto_aes_set_key,
1457 .cia_encrypt = aes_encrypt, 1304 .cia_encrypt = crypto_aes_encrypt,
1458 .cia_decrypt = aes_decrypt 1305 .cia_decrypt = crypto_aes_decrypt
1459 } 1306 }
1460 } 1307 }
1461}; 1308};
diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
index 798fc9a2c8d6..205c2c257d49 100644
--- a/crypto/aes_ti.c
+++ b/crypto/aes_ti.c
@@ -8,271 +8,19 @@
8#include <crypto/aes.h> 8#include <crypto/aes.h>
9#include <linux/crypto.h> 9#include <linux/crypto.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <asm/unaligned.h>
12
13/*
14 * Emit the sbox as volatile const to prevent the compiler from doing
15 * constant folding on sbox references involving fixed indexes.
16 */
17static volatile const u8 __cacheline_aligned __aesti_sbox[] = {
18 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
19 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
20 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
21 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
22 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
23 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
24 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
25 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
26 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
27 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
28 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
29 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
30 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
31 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
32 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
33 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
34 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
35 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
36 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
37 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
38 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
39 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
40 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
41 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
42 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
43 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
44 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
45 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
46 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
47 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
48 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
49 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
50};
51
52static volatile const u8 __cacheline_aligned __aesti_inv_sbox[] = {
53 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
54 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
55 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
56 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
57 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
58 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
59 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
60 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
61 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
62 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
63 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
64 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
65 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
66 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
67 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
68 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
69 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
70 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
71 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
72 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
73 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
74 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
75 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
76 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
77 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
78 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
79 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
80 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
81 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
82 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
83 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
84 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
85};
86
87static u32 mul_by_x(u32 w)
88{
89 u32 x = w & 0x7f7f7f7f;
90 u32 y = w & 0x80808080;
91
92 /* multiply by polynomial 'x' (0b10) in GF(2^8) */
93 return (x << 1) ^ (y >> 7) * 0x1b;
94}
95
96static u32 mul_by_x2(u32 w)
97{
98 u32 x = w & 0x3f3f3f3f;
99 u32 y = w & 0x80808080;
100 u32 z = w & 0x40404040;
101
102 /* multiply by polynomial 'x^2' (0b100) in GF(2^8) */
103 return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b;
104}
105
106static u32 mix_columns(u32 x)
107{
108 /*
109 * Perform the following matrix multiplication in GF(2^8)
110 *
111 * | 0x2 0x3 0x1 0x1 | | x[0] |
112 * | 0x1 0x2 0x3 0x1 | | x[1] |
113 * | 0x1 0x1 0x2 0x3 | x | x[2] |
114 * | 0x3 0x1 0x1 0x2 | | x[3] |
115 */
116 u32 y = mul_by_x(x) ^ ror32(x, 16);
117
118 return y ^ ror32(x ^ y, 8);
119}
120
121static u32 inv_mix_columns(u32 x)
122{
123 /*
124 * Perform the following matrix multiplication in GF(2^8)
125 *
126 * | 0xe 0xb 0xd 0x9 | | x[0] |
127 * | 0x9 0xe 0xb 0xd | | x[1] |
128 * | 0xd 0x9 0xe 0xb | x | x[2] |
129 * | 0xb 0xd 0x9 0xe | | x[3] |
130 *
131 * which can conveniently be reduced to
132 *
133 * | 0x2 0x3 0x1 0x1 | | 0x5 0x0 0x4 0x0 | | x[0] |
134 * | 0x1 0x2 0x3 0x1 | | 0x0 0x5 0x0 0x4 | | x[1] |
135 * | 0x1 0x1 0x2 0x3 | x | 0x4 0x0 0x5 0x0 | x | x[2] |
136 * | 0x3 0x1 0x1 0x2 | | 0x0 0x4 0x0 0x5 | | x[3] |
137 */
138 u32 y = mul_by_x2(x);
139
140 return mix_columns(x ^ y ^ ror32(y, 16));
141}
142
143static __always_inline u32 subshift(u32 in[], int pos)
144{
145 return (__aesti_sbox[in[pos] & 0xff]) ^
146 (__aesti_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^
147 (__aesti_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
148 (__aesti_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24);
149}
150
151static __always_inline u32 inv_subshift(u32 in[], int pos)
152{
153 return (__aesti_inv_sbox[in[pos] & 0xff]) ^
154 (__aesti_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^
155 (__aesti_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
156 (__aesti_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24);
157}
158
159static u32 subw(u32 in)
160{
161 return (__aesti_sbox[in & 0xff]) ^
162 (__aesti_sbox[(in >> 8) & 0xff] << 8) ^
163 (__aesti_sbox[(in >> 16) & 0xff] << 16) ^
164 (__aesti_sbox[(in >> 24) & 0xff] << 24);
165}
166
167static int aesti_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
168 unsigned int key_len)
169{
170 u32 kwords = key_len / sizeof(u32);
171 u32 rc, i, j;
172
173 if (key_len != AES_KEYSIZE_128 &&
174 key_len != AES_KEYSIZE_192 &&
175 key_len != AES_KEYSIZE_256)
176 return -EINVAL;
177
178 ctx->key_length = key_len;
179
180 for (i = 0; i < kwords; i++)
181 ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
182
183 for (i = 0, rc = 1; i < 10; i++, rc = mul_by_x(rc)) {
184 u32 *rki = ctx->key_enc + (i * kwords);
185 u32 *rko = rki + kwords;
186
187 rko[0] = ror32(subw(rki[kwords - 1]), 8) ^ rc ^ rki[0];
188 rko[1] = rko[0] ^ rki[1];
189 rko[2] = rko[1] ^ rki[2];
190 rko[3] = rko[2] ^ rki[3];
191
192 if (key_len == 24) {
193 if (i >= 7)
194 break;
195 rko[4] = rko[3] ^ rki[4];
196 rko[5] = rko[4] ^ rki[5];
197 } else if (key_len == 32) {
198 if (i >= 6)
199 break;
200 rko[4] = subw(rko[3]) ^ rki[4];
201 rko[5] = rko[4] ^ rki[5];
202 rko[6] = rko[5] ^ rki[6];
203 rko[7] = rko[6] ^ rki[7];
204 }
205 }
206
207 /*
208 * Generate the decryption keys for the Equivalent Inverse Cipher.
209 * This involves reversing the order of the round keys, and applying
210 * the Inverse Mix Columns transformation to all but the first and
211 * the last one.
212 */
213 ctx->key_dec[0] = ctx->key_enc[key_len + 24];
214 ctx->key_dec[1] = ctx->key_enc[key_len + 25];
215 ctx->key_dec[2] = ctx->key_enc[key_len + 26];
216 ctx->key_dec[3] = ctx->key_enc[key_len + 27];
217
218 for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) {
219 ctx->key_dec[i] = inv_mix_columns(ctx->key_enc[j]);
220 ctx->key_dec[i + 1] = inv_mix_columns(ctx->key_enc[j + 1]);
221 ctx->key_dec[i + 2] = inv_mix_columns(ctx->key_enc[j + 2]);
222 ctx->key_dec[i + 3] = inv_mix_columns(ctx->key_enc[j + 3]);
223 }
224
225 ctx->key_dec[i] = ctx->key_enc[0];
226 ctx->key_dec[i + 1] = ctx->key_enc[1];
227 ctx->key_dec[i + 2] = ctx->key_enc[2];
228 ctx->key_dec[i + 3] = ctx->key_enc[3];
229
230 return 0;
231}
232 11
233static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key, 12static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key,
234 unsigned int key_len) 13 unsigned int key_len)
235{ 14{
236 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 15 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
237 int err;
238
239 err = aesti_expand_key(ctx, in_key, key_len);
240 if (err)
241 return err;
242
243 /*
244 * In order to force the compiler to emit data independent Sbox lookups
245 * at the start of each block, xor the first round key with values at
246 * fixed indexes in the Sbox. This will need to be repeated each time
247 * the key is used, which will pull the entire Sbox into the D-cache
248 * before any data dependent Sbox lookups are performed.
249 */
250 ctx->key_enc[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
251 ctx->key_enc[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
252 ctx->key_enc[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
253 ctx->key_enc[3] ^= __aesti_sbox[96] ^ __aesti_sbox[224];
254
255 ctx->key_dec[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
256 ctx->key_dec[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
257 ctx->key_dec[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
258 ctx->key_dec[3] ^= __aesti_inv_sbox[96] ^ __aesti_inv_sbox[224];
259 16
260 return 0; 17 return aes_expandkey(ctx, in_key, key_len);
261} 18}
262 19
263static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 20static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
264{ 21{
265 const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 22 const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
266 const u32 *rkp = ctx->key_enc + 4;
267 int rounds = 6 + ctx->key_length / 4;
268 u32 st0[4], st1[4];
269 unsigned long flags; 23 unsigned long flags;
270 int round;
271
272 st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
273 st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
274 st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
275 st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
276 24
277 /* 25 /*
278 * Temporarily disable interrupts to avoid races where cachelines are 26 * Temporarily disable interrupts to avoid races where cachelines are
@@ -280,30 +28,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
280 */ 28 */
281 local_irq_save(flags); 29 local_irq_save(flags);
282 30
283 st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128]; 31 aes_encrypt(ctx, out, in);
284 st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
285 st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
286 st0[3] ^= __aesti_sbox[96] ^ __aesti_sbox[224];
287
288 for (round = 0;; round += 2, rkp += 8) {
289 st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0];
290 st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1];
291 st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2];
292 st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3];
293
294 if (round == rounds - 2)
295 break;
296
297 st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4];
298 st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5];
299 st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6];
300 st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7];
301 }
302
303 put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out);
304 put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
305 put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
306 put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
307 32
308 local_irq_restore(flags); 33 local_irq_restore(flags);
309} 34}
@@ -311,16 +36,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
311static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 36static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
312{ 37{
313 const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 38 const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
314 const u32 *rkp = ctx->key_dec + 4;
315 int rounds = 6 + ctx->key_length / 4;
316 u32 st0[4], st1[4];
317 unsigned long flags; 39 unsigned long flags;
318 int round;
319
320 st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
321 st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);
322 st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
323 st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
324 40
325 /* 41 /*
326 * Temporarily disable interrupts to avoid races where cachelines are 42 * Temporarily disable interrupts to avoid races where cachelines are
@@ -328,30 +44,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
328 */ 44 */
329 local_irq_save(flags); 45 local_irq_save(flags);
330 46
331 st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128]; 47 aes_decrypt(ctx, out, in);
332 st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
333 st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
334 st0[3] ^= __aesti_inv_sbox[96] ^ __aesti_inv_sbox[224];
335
336 for (round = 0;; round += 2, rkp += 8) {
337 st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];
338 st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1];
339 st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2];
340 st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3];
341
342 if (round == rounds - 2)
343 break;
344
345 st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4];
346 st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5];
347 st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6];
348 st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7];
349 }
350
351 put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out);
352 put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
353 put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
354 put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
355 48
356 local_irq_restore(flags); 49 local_irq_restore(flags);
357} 50}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 3748f9b4516d..927760b316a4 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -16,7 +16,7 @@
16#include <crypto/internal/aead.h> 16#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h> 17#include <crypto/internal/skcipher.h>
18#include <crypto/cryptd.h> 18#include <crypto/cryptd.h>
19#include <linux/atomic.h> 19#include <linux/refcount.h>
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
@@ -63,7 +63,7 @@ struct aead_instance_ctx {
63}; 63};
64 64
65struct cryptd_skcipher_ctx { 65struct cryptd_skcipher_ctx {
66 atomic_t refcnt; 66 refcount_t refcnt;
67 struct crypto_sync_skcipher *child; 67 struct crypto_sync_skcipher *child;
68}; 68};
69 69
@@ -72,7 +72,7 @@ struct cryptd_skcipher_request_ctx {
72}; 72};
73 73
74struct cryptd_hash_ctx { 74struct cryptd_hash_ctx {
75 atomic_t refcnt; 75 refcount_t refcnt;
76 struct crypto_shash *child; 76 struct crypto_shash *child;
77}; 77};
78 78
@@ -82,7 +82,7 @@ struct cryptd_hash_request_ctx {
82}; 82};
83 83
84struct cryptd_aead_ctx { 84struct cryptd_aead_ctx {
85 atomic_t refcnt; 85 refcount_t refcnt;
86 struct crypto_aead *child; 86 struct crypto_aead *child;
87}; 87};
88 88
@@ -127,7 +127,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
127{ 127{
128 int cpu, err; 128 int cpu, err;
129 struct cryptd_cpu_queue *cpu_queue; 129 struct cryptd_cpu_queue *cpu_queue;
130 atomic_t *refcnt; 130 refcount_t *refcnt;
131 131
132 cpu = get_cpu(); 132 cpu = get_cpu();
133 cpu_queue = this_cpu_ptr(queue->cpu_queue); 133 cpu_queue = this_cpu_ptr(queue->cpu_queue);
@@ -140,10 +140,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
140 140
141 queue_work_on(cpu, cryptd_wq, &cpu_queue->work); 141 queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
142 142
143 if (!atomic_read(refcnt)) 143 if (!refcount_read(refcnt))
144 goto out_put_cpu; 144 goto out_put_cpu;
145 145
146 atomic_inc(refcnt); 146 refcount_inc(refcnt);
147 147
148out_put_cpu: 148out_put_cpu:
149 put_cpu(); 149 put_cpu();
@@ -270,13 +270,13 @@ static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
270 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 270 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
271 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 271 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
272 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 272 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
273 int refcnt = atomic_read(&ctx->refcnt); 273 int refcnt = refcount_read(&ctx->refcnt);
274 274
275 local_bh_disable(); 275 local_bh_disable();
276 rctx->complete(&req->base, err); 276 rctx->complete(&req->base, err);
277 local_bh_enable(); 277 local_bh_enable();
278 278
279 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 279 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
280 crypto_free_skcipher(tfm); 280 crypto_free_skcipher(tfm);
281} 281}
282 282
@@ -521,13 +521,13 @@ static void cryptd_hash_complete(struct ahash_request *req, int err)
521 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 521 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
522 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 522 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
523 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 523 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
524 int refcnt = atomic_read(&ctx->refcnt); 524 int refcnt = refcount_read(&ctx->refcnt);
525 525
526 local_bh_disable(); 526 local_bh_disable();
527 rctx->complete(&req->base, err); 527 rctx->complete(&req->base, err);
528 local_bh_enable(); 528 local_bh_enable();
529 529
530 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 530 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
531 crypto_free_ahash(tfm); 531 crypto_free_ahash(tfm);
532} 532}
533 533
@@ -772,13 +772,13 @@ static void cryptd_aead_crypt(struct aead_request *req,
772 772
773out: 773out:
774 ctx = crypto_aead_ctx(tfm); 774 ctx = crypto_aead_ctx(tfm);
775 refcnt = atomic_read(&ctx->refcnt); 775 refcnt = refcount_read(&ctx->refcnt);
776 776
777 local_bh_disable(); 777 local_bh_disable();
778 compl(&req->base, err); 778 compl(&req->base, err);
779 local_bh_enable(); 779 local_bh_enable();
780 780
781 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 781 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
782 crypto_free_aead(tfm); 782 crypto_free_aead(tfm);
783} 783}
784 784
@@ -979,7 +979,7 @@ struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
979 } 979 }
980 980
981 ctx = crypto_skcipher_ctx(tfm); 981 ctx = crypto_skcipher_ctx(tfm);
982 atomic_set(&ctx->refcnt, 1); 982 refcount_set(&ctx->refcnt, 1);
983 983
984 return container_of(tfm, struct cryptd_skcipher, base); 984 return container_of(tfm, struct cryptd_skcipher, base);
985} 985}
@@ -997,7 +997,7 @@ bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
997{ 997{
998 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 998 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
999 999
1000 return atomic_read(&ctx->refcnt) - 1; 1000 return refcount_read(&ctx->refcnt) - 1;
1001} 1001}
1002EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); 1002EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1003 1003
@@ -1005,7 +1005,7 @@ void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1005{ 1005{
1006 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1006 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1007 1007
1008 if (atomic_dec_and_test(&ctx->refcnt)) 1008 if (refcount_dec_and_test(&ctx->refcnt))
1009 crypto_free_skcipher(&tfm->base); 1009 crypto_free_skcipher(&tfm->base);
1010} 1010}
1011EXPORT_SYMBOL_GPL(cryptd_free_skcipher); 1011EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
@@ -1029,7 +1029,7 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1029 } 1029 }
1030 1030
1031 ctx = crypto_ahash_ctx(tfm); 1031 ctx = crypto_ahash_ctx(tfm);
1032 atomic_set(&ctx->refcnt, 1); 1032 refcount_set(&ctx->refcnt, 1);
1033 1033
1034 return __cryptd_ahash_cast(tfm); 1034 return __cryptd_ahash_cast(tfm);
1035} 1035}
@@ -1054,7 +1054,7 @@ bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1054{ 1054{
1055 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1055 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1056 1056
1057 return atomic_read(&ctx->refcnt) - 1; 1057 return refcount_read(&ctx->refcnt) - 1;
1058} 1058}
1059EXPORT_SYMBOL_GPL(cryptd_ahash_queued); 1059EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1060 1060
@@ -1062,7 +1062,7 @@ void cryptd_free_ahash(struct cryptd_ahash *tfm)
1062{ 1062{
1063 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1063 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1064 1064
1065 if (atomic_dec_and_test(&ctx->refcnt)) 1065 if (refcount_dec_and_test(&ctx->refcnt))
1066 crypto_free_ahash(&tfm->base); 1066 crypto_free_ahash(&tfm->base);
1067} 1067}
1068EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1068EXPORT_SYMBOL_GPL(cryptd_free_ahash);
@@ -1086,7 +1086,7 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1086 } 1086 }
1087 1087
1088 ctx = crypto_aead_ctx(tfm); 1088 ctx = crypto_aead_ctx(tfm);
1089 atomic_set(&ctx->refcnt, 1); 1089 refcount_set(&ctx->refcnt, 1);
1090 1090
1091 return __cryptd_aead_cast(tfm); 1091 return __cryptd_aead_cast(tfm);
1092} 1092}
@@ -1104,7 +1104,7 @@ bool cryptd_aead_queued(struct cryptd_aead *tfm)
1104{ 1104{
1105 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1105 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1106 1106
1107 return atomic_read(&ctx->refcnt) - 1; 1107 return refcount_read(&ctx->refcnt) - 1;
1108} 1108}
1109EXPORT_SYMBOL_GPL(cryptd_aead_queued); 1109EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1110 1110
@@ -1112,7 +1112,7 @@ void cryptd_free_aead(struct cryptd_aead *tfm)
1112{ 1112{
1113 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1113 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1114 1114
1115 if (atomic_dec_and_test(&ctx->refcnt)) 1115 if (refcount_dec_and_test(&ctx->refcnt))
1116 crypto_free_aead(&tfm->base); 1116 crypto_free_aead(&tfm->base);
1117} 1117}
1118EXPORT_SYMBOL_GPL(cryptd_free_aead); 1118EXPORT_SYMBOL_GPL(cryptd_free_aead);
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index d7502ec37f20..055d17977280 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -425,7 +425,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
425 */ 425 */
426struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) 426struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
427{ 427{
428 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 428 struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
429 struct crypto_engine *engine; 429 struct crypto_engine *engine;
430 430
431 if (!dev) 431 if (!dev)
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
index c65e39005ce2..910e0b46012e 100644
--- a/crypto/crypto_user_base.c
+++ b/crypto/crypto_user_base.c
@@ -10,9 +10,10 @@
10#include <linux/crypto.h> 10#include <linux/crypto.h>
11#include <linux/cryptouser.h> 11#include <linux/cryptouser.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <net/netlink.h>
14#include <linux/security.h> 13#include <linux/security.h>
14#include <net/netlink.h>
15#include <net/net_namespace.h> 15#include <net/net_namespace.h>
16#include <net/sock.h>
16#include <crypto/internal/skcipher.h> 17#include <crypto/internal/skcipher.h>
17#include <crypto/internal/rng.h> 18#include <crypto/internal/rng.h>
18#include <crypto/akcipher.h> 19#include <crypto/akcipher.h>
@@ -25,9 +26,6 @@
25 26
26static DEFINE_MUTEX(crypto_cfg_mutex); 27static DEFINE_MUTEX(crypto_cfg_mutex);
27 28
28/* The crypto netlink socket */
29struct sock *crypto_nlsk;
30
31struct crypto_dump_info { 29struct crypto_dump_info {
32 struct sk_buff *in_skb; 30 struct sk_buff *in_skb;
33 struct sk_buff *out_skb; 31 struct sk_buff *out_skb;
@@ -186,6 +184,7 @@ out:
186static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, 184static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
187 struct nlattr **attrs) 185 struct nlattr **attrs)
188{ 186{
187 struct net *net = sock_net(in_skb->sk);
189 struct crypto_user_alg *p = nlmsg_data(in_nlh); 188 struct crypto_user_alg *p = nlmsg_data(in_nlh);
190 struct crypto_alg *alg; 189 struct crypto_alg *alg;
191 struct sk_buff *skb; 190 struct sk_buff *skb;
@@ -217,7 +216,7 @@ drop_alg:
217 if (err) 216 if (err)
218 return err; 217 return err;
219 218
220 return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid); 219 return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
221} 220}
222 221
223static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb) 222static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
@@ -420,6 +419,7 @@ static const struct crypto_link {
420static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 419static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
421 struct netlink_ext_ack *extack) 420 struct netlink_ext_ack *extack)
422{ 421{
422 struct net *net = sock_net(skb->sk);
423 struct nlattr *attrs[CRYPTOCFGA_MAX+1]; 423 struct nlattr *attrs[CRYPTOCFGA_MAX+1];
424 const struct crypto_link *link; 424 const struct crypto_link *link;
425 int type, err; 425 int type, err;
@@ -450,7 +450,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
450 .done = link->done, 450 .done = link->done,
451 .min_dump_alloc = min(dump_alloc, 65535UL), 451 .min_dump_alloc = min(dump_alloc, 65535UL),
452 }; 452 };
453 err = netlink_dump_start(crypto_nlsk, skb, nlh, &c); 453 err = netlink_dump_start(net->crypto_nlsk, skb, nlh, &c);
454 } 454 }
455 455
456 return err; 456 return err;
@@ -474,22 +474,35 @@ static void crypto_netlink_rcv(struct sk_buff *skb)
474 mutex_unlock(&crypto_cfg_mutex); 474 mutex_unlock(&crypto_cfg_mutex);
475} 475}
476 476
477static int __init crypto_user_init(void) 477static int __net_init crypto_netlink_init(struct net *net)
478{ 478{
479 struct netlink_kernel_cfg cfg = { 479 struct netlink_kernel_cfg cfg = {
480 .input = crypto_netlink_rcv, 480 .input = crypto_netlink_rcv,
481 }; 481 };
482 482
483 crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, &cfg); 483 net->crypto_nlsk = netlink_kernel_create(net, NETLINK_CRYPTO, &cfg);
484 if (!crypto_nlsk) 484 return net->crypto_nlsk == NULL ? -ENOMEM : 0;
485 return -ENOMEM; 485}
486 486
487 return 0; 487static void __net_exit crypto_netlink_exit(struct net *net)
488{
489 netlink_kernel_release(net->crypto_nlsk);
490 net->crypto_nlsk = NULL;
491}
492
493static struct pernet_operations crypto_netlink_net_ops = {
494 .init = crypto_netlink_init,
495 .exit = crypto_netlink_exit,
496};
497
498static int __init crypto_user_init(void)
499{
500 return register_pernet_subsys(&crypto_netlink_net_ops);
488} 501}
489 502
490static void __exit crypto_user_exit(void) 503static void __exit crypto_user_exit(void)
491{ 504{
492 netlink_kernel_release(crypto_nlsk); 505 unregister_pernet_subsys(&crypto_netlink_net_ops);
493} 506}
494 507
495module_init(crypto_user_init); 508module_init(crypto_user_init);
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
index a03f326a63d3..8bad88413de1 100644
--- a/crypto/crypto_user_stat.c
+++ b/crypto/crypto_user_stat.c
@@ -10,6 +10,7 @@
10#include <linux/cryptouser.h> 10#include <linux/cryptouser.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <net/netlink.h> 12#include <net/netlink.h>
13#include <net/sock.h>
13#include <crypto/internal/skcipher.h> 14#include <crypto/internal/skcipher.h>
14#include <crypto/internal/rng.h> 15#include <crypto/internal/rng.h>
15#include <crypto/akcipher.h> 16#include <crypto/akcipher.h>
@@ -298,6 +299,7 @@ out:
298int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, 299int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
299 struct nlattr **attrs) 300 struct nlattr **attrs)
300{ 301{
302 struct net *net = sock_net(in_skb->sk);
301 struct crypto_user_alg *p = nlmsg_data(in_nlh); 303 struct crypto_user_alg *p = nlmsg_data(in_nlh);
302 struct crypto_alg *alg; 304 struct crypto_alg *alg;
303 struct sk_buff *skb; 305 struct sk_buff *skb;
@@ -329,7 +331,7 @@ drop_alg:
329 if (err) 331 if (err)
330 return err; 332 return err;
331 333
332 return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid); 334 return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
333} 335}
334 336
335MODULE_LICENSE("GPL"); 337MODULE_LICENSE("GPL");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index dc085514408a..6e13a4a29ecb 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -13,934 +13,79 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/types.h>
17 16
18#include <crypto/des.h> 17#include <crypto/internal/des.h>
19
20#define ROL(x, r) ((x) = rol32((x), (r)))
21#define ROR(x, r) ((x) = ror32((x), (r)))
22
23struct des_ctx {
24 u32 expkey[DES_EXPKEY_WORDS];
25};
26
27struct des3_ede_ctx {
28 u32 expkey[DES3_EDE_EXPKEY_WORDS];
29};
30
31/* Lookup tables for key expansion */
32
33static const u8 pc1[256] = {
34 0x00, 0x00, 0x40, 0x04, 0x10, 0x10, 0x50, 0x14,
35 0x04, 0x40, 0x44, 0x44, 0x14, 0x50, 0x54, 0x54,
36 0x02, 0x02, 0x42, 0x06, 0x12, 0x12, 0x52, 0x16,
37 0x06, 0x42, 0x46, 0x46, 0x16, 0x52, 0x56, 0x56,
38 0x80, 0x08, 0xc0, 0x0c, 0x90, 0x18, 0xd0, 0x1c,
39 0x84, 0x48, 0xc4, 0x4c, 0x94, 0x58, 0xd4, 0x5c,
40 0x82, 0x0a, 0xc2, 0x0e, 0x92, 0x1a, 0xd2, 0x1e,
41 0x86, 0x4a, 0xc6, 0x4e, 0x96, 0x5a, 0xd6, 0x5e,
42 0x20, 0x20, 0x60, 0x24, 0x30, 0x30, 0x70, 0x34,
43 0x24, 0x60, 0x64, 0x64, 0x34, 0x70, 0x74, 0x74,
44 0x22, 0x22, 0x62, 0x26, 0x32, 0x32, 0x72, 0x36,
45 0x26, 0x62, 0x66, 0x66, 0x36, 0x72, 0x76, 0x76,
46 0xa0, 0x28, 0xe0, 0x2c, 0xb0, 0x38, 0xf0, 0x3c,
47 0xa4, 0x68, 0xe4, 0x6c, 0xb4, 0x78, 0xf4, 0x7c,
48 0xa2, 0x2a, 0xe2, 0x2e, 0xb2, 0x3a, 0xf2, 0x3e,
49 0xa6, 0x6a, 0xe6, 0x6e, 0xb6, 0x7a, 0xf6, 0x7e,
50 0x08, 0x80, 0x48, 0x84, 0x18, 0x90, 0x58, 0x94,
51 0x0c, 0xc0, 0x4c, 0xc4, 0x1c, 0xd0, 0x5c, 0xd4,
52 0x0a, 0x82, 0x4a, 0x86, 0x1a, 0x92, 0x5a, 0x96,
53 0x0e, 0xc2, 0x4e, 0xc6, 0x1e, 0xd2, 0x5e, 0xd6,
54 0x88, 0x88, 0xc8, 0x8c, 0x98, 0x98, 0xd8, 0x9c,
55 0x8c, 0xc8, 0xcc, 0xcc, 0x9c, 0xd8, 0xdc, 0xdc,
56 0x8a, 0x8a, 0xca, 0x8e, 0x9a, 0x9a, 0xda, 0x9e,
57 0x8e, 0xca, 0xce, 0xce, 0x9e, 0xda, 0xde, 0xde,
58 0x28, 0xa0, 0x68, 0xa4, 0x38, 0xb0, 0x78, 0xb4,
59 0x2c, 0xe0, 0x6c, 0xe4, 0x3c, 0xf0, 0x7c, 0xf4,
60 0x2a, 0xa2, 0x6a, 0xa6, 0x3a, 0xb2, 0x7a, 0xb6,
61 0x2e, 0xe2, 0x6e, 0xe6, 0x3e, 0xf2, 0x7e, 0xf6,
62 0xa8, 0xa8, 0xe8, 0xac, 0xb8, 0xb8, 0xf8, 0xbc,
63 0xac, 0xe8, 0xec, 0xec, 0xbc, 0xf8, 0xfc, 0xfc,
64 0xaa, 0xaa, 0xea, 0xae, 0xba, 0xba, 0xfa, 0xbe,
65 0xae, 0xea, 0xee, 0xee, 0xbe, 0xfa, 0xfe, 0xfe
66};
67
68static const u8 rs[256] = {
69 0x00, 0x00, 0x80, 0x80, 0x02, 0x02, 0x82, 0x82,
70 0x04, 0x04, 0x84, 0x84, 0x06, 0x06, 0x86, 0x86,
71 0x08, 0x08, 0x88, 0x88, 0x0a, 0x0a, 0x8a, 0x8a,
72 0x0c, 0x0c, 0x8c, 0x8c, 0x0e, 0x0e, 0x8e, 0x8e,
73 0x10, 0x10, 0x90, 0x90, 0x12, 0x12, 0x92, 0x92,
74 0x14, 0x14, 0x94, 0x94, 0x16, 0x16, 0x96, 0x96,
75 0x18, 0x18, 0x98, 0x98, 0x1a, 0x1a, 0x9a, 0x9a,
76 0x1c, 0x1c, 0x9c, 0x9c, 0x1e, 0x1e, 0x9e, 0x9e,
77 0x20, 0x20, 0xa0, 0xa0, 0x22, 0x22, 0xa2, 0xa2,
78 0x24, 0x24, 0xa4, 0xa4, 0x26, 0x26, 0xa6, 0xa6,
79 0x28, 0x28, 0xa8, 0xa8, 0x2a, 0x2a, 0xaa, 0xaa,
80 0x2c, 0x2c, 0xac, 0xac, 0x2e, 0x2e, 0xae, 0xae,
81 0x30, 0x30, 0xb0, 0xb0, 0x32, 0x32, 0xb2, 0xb2,
82 0x34, 0x34, 0xb4, 0xb4, 0x36, 0x36, 0xb6, 0xb6,
83 0x38, 0x38, 0xb8, 0xb8, 0x3a, 0x3a, 0xba, 0xba,
84 0x3c, 0x3c, 0xbc, 0xbc, 0x3e, 0x3e, 0xbe, 0xbe,
85 0x40, 0x40, 0xc0, 0xc0, 0x42, 0x42, 0xc2, 0xc2,
86 0x44, 0x44, 0xc4, 0xc4, 0x46, 0x46, 0xc6, 0xc6,
87 0x48, 0x48, 0xc8, 0xc8, 0x4a, 0x4a, 0xca, 0xca,
88 0x4c, 0x4c, 0xcc, 0xcc, 0x4e, 0x4e, 0xce, 0xce,
89 0x50, 0x50, 0xd0, 0xd0, 0x52, 0x52, 0xd2, 0xd2,
90 0x54, 0x54, 0xd4, 0xd4, 0x56, 0x56, 0xd6, 0xd6,
91 0x58, 0x58, 0xd8, 0xd8, 0x5a, 0x5a, 0xda, 0xda,
92 0x5c, 0x5c, 0xdc, 0xdc, 0x5e, 0x5e, 0xde, 0xde,
93 0x60, 0x60, 0xe0, 0xe0, 0x62, 0x62, 0xe2, 0xe2,
94 0x64, 0x64, 0xe4, 0xe4, 0x66, 0x66, 0xe6, 0xe6,
95 0x68, 0x68, 0xe8, 0xe8, 0x6a, 0x6a, 0xea, 0xea,
96 0x6c, 0x6c, 0xec, 0xec, 0x6e, 0x6e, 0xee, 0xee,
97 0x70, 0x70, 0xf0, 0xf0, 0x72, 0x72, 0xf2, 0xf2,
98 0x74, 0x74, 0xf4, 0xf4, 0x76, 0x76, 0xf6, 0xf6,
99 0x78, 0x78, 0xf8, 0xf8, 0x7a, 0x7a, 0xfa, 0xfa,
100 0x7c, 0x7c, 0xfc, 0xfc, 0x7e, 0x7e, 0xfe, 0xfe
101};
102
103static const u32 pc2[1024] = {
104 0x00000000, 0x00000000, 0x00000000, 0x00000000,
105 0x00040000, 0x00000000, 0x04000000, 0x00100000,
106 0x00400000, 0x00000008, 0x00000800, 0x40000000,
107 0x00440000, 0x00000008, 0x04000800, 0x40100000,
108 0x00000400, 0x00000020, 0x08000000, 0x00000100,
109 0x00040400, 0x00000020, 0x0c000000, 0x00100100,
110 0x00400400, 0x00000028, 0x08000800, 0x40000100,
111 0x00440400, 0x00000028, 0x0c000800, 0x40100100,
112 0x80000000, 0x00000010, 0x00000000, 0x00800000,
113 0x80040000, 0x00000010, 0x04000000, 0x00900000,
114 0x80400000, 0x00000018, 0x00000800, 0x40800000,
115 0x80440000, 0x00000018, 0x04000800, 0x40900000,
116 0x80000400, 0x00000030, 0x08000000, 0x00800100,
117 0x80040400, 0x00000030, 0x0c000000, 0x00900100,
118 0x80400400, 0x00000038, 0x08000800, 0x40800100,
119 0x80440400, 0x00000038, 0x0c000800, 0x40900100,
120 0x10000000, 0x00000000, 0x00200000, 0x00001000,
121 0x10040000, 0x00000000, 0x04200000, 0x00101000,
122 0x10400000, 0x00000008, 0x00200800, 0x40001000,
123 0x10440000, 0x00000008, 0x04200800, 0x40101000,
124 0x10000400, 0x00000020, 0x08200000, 0x00001100,
125 0x10040400, 0x00000020, 0x0c200000, 0x00101100,
126 0x10400400, 0x00000028, 0x08200800, 0x40001100,
127 0x10440400, 0x00000028, 0x0c200800, 0x40101100,
128 0x90000000, 0x00000010, 0x00200000, 0x00801000,
129 0x90040000, 0x00000010, 0x04200000, 0x00901000,
130 0x90400000, 0x00000018, 0x00200800, 0x40801000,
131 0x90440000, 0x00000018, 0x04200800, 0x40901000,
132 0x90000400, 0x00000030, 0x08200000, 0x00801100,
133 0x90040400, 0x00000030, 0x0c200000, 0x00901100,
134 0x90400400, 0x00000038, 0x08200800, 0x40801100,
135 0x90440400, 0x00000038, 0x0c200800, 0x40901100,
136 0x00000200, 0x00080000, 0x00000000, 0x00000004,
137 0x00040200, 0x00080000, 0x04000000, 0x00100004,
138 0x00400200, 0x00080008, 0x00000800, 0x40000004,
139 0x00440200, 0x00080008, 0x04000800, 0x40100004,
140 0x00000600, 0x00080020, 0x08000000, 0x00000104,
141 0x00040600, 0x00080020, 0x0c000000, 0x00100104,
142 0x00400600, 0x00080028, 0x08000800, 0x40000104,
143 0x00440600, 0x00080028, 0x0c000800, 0x40100104,
144 0x80000200, 0x00080010, 0x00000000, 0x00800004,
145 0x80040200, 0x00080010, 0x04000000, 0x00900004,
146 0x80400200, 0x00080018, 0x00000800, 0x40800004,
147 0x80440200, 0x00080018, 0x04000800, 0x40900004,
148 0x80000600, 0x00080030, 0x08000000, 0x00800104,
149 0x80040600, 0x00080030, 0x0c000000, 0x00900104,
150 0x80400600, 0x00080038, 0x08000800, 0x40800104,
151 0x80440600, 0x00080038, 0x0c000800, 0x40900104,
152 0x10000200, 0x00080000, 0x00200000, 0x00001004,
153 0x10040200, 0x00080000, 0x04200000, 0x00101004,
154 0x10400200, 0x00080008, 0x00200800, 0x40001004,
155 0x10440200, 0x00080008, 0x04200800, 0x40101004,
156 0x10000600, 0x00080020, 0x08200000, 0x00001104,
157 0x10040600, 0x00080020, 0x0c200000, 0x00101104,
158 0x10400600, 0x00080028, 0x08200800, 0x40001104,
159 0x10440600, 0x00080028, 0x0c200800, 0x40101104,
160 0x90000200, 0x00080010, 0x00200000, 0x00801004,
161 0x90040200, 0x00080010, 0x04200000, 0x00901004,
162 0x90400200, 0x00080018, 0x00200800, 0x40801004,
163 0x90440200, 0x00080018, 0x04200800, 0x40901004,
164 0x90000600, 0x00080030, 0x08200000, 0x00801104,
165 0x90040600, 0x00080030, 0x0c200000, 0x00901104,
166 0x90400600, 0x00080038, 0x08200800, 0x40801104,
167 0x90440600, 0x00080038, 0x0c200800, 0x40901104,
168 0x00000002, 0x00002000, 0x20000000, 0x00000001,
169 0x00040002, 0x00002000, 0x24000000, 0x00100001,
170 0x00400002, 0x00002008, 0x20000800, 0x40000001,
171 0x00440002, 0x00002008, 0x24000800, 0x40100001,
172 0x00000402, 0x00002020, 0x28000000, 0x00000101,
173 0x00040402, 0x00002020, 0x2c000000, 0x00100101,
174 0x00400402, 0x00002028, 0x28000800, 0x40000101,
175 0x00440402, 0x00002028, 0x2c000800, 0x40100101,
176 0x80000002, 0x00002010, 0x20000000, 0x00800001,
177 0x80040002, 0x00002010, 0x24000000, 0x00900001,
178 0x80400002, 0x00002018, 0x20000800, 0x40800001,
179 0x80440002, 0x00002018, 0x24000800, 0x40900001,
180 0x80000402, 0x00002030, 0x28000000, 0x00800101,
181 0x80040402, 0x00002030, 0x2c000000, 0x00900101,
182 0x80400402, 0x00002038, 0x28000800, 0x40800101,
183 0x80440402, 0x00002038, 0x2c000800, 0x40900101,
184 0x10000002, 0x00002000, 0x20200000, 0x00001001,
185 0x10040002, 0x00002000, 0x24200000, 0x00101001,
186 0x10400002, 0x00002008, 0x20200800, 0x40001001,
187 0x10440002, 0x00002008, 0x24200800, 0x40101001,
188 0x10000402, 0x00002020, 0x28200000, 0x00001101,
189 0x10040402, 0x00002020, 0x2c200000, 0x00101101,
190 0x10400402, 0x00002028, 0x28200800, 0x40001101,
191 0x10440402, 0x00002028, 0x2c200800, 0x40101101,
192 0x90000002, 0x00002010, 0x20200000, 0x00801001,
193 0x90040002, 0x00002010, 0x24200000, 0x00901001,
194 0x90400002, 0x00002018, 0x20200800, 0x40801001,
195 0x90440002, 0x00002018, 0x24200800, 0x40901001,
196 0x90000402, 0x00002030, 0x28200000, 0x00801101,
197 0x90040402, 0x00002030, 0x2c200000, 0x00901101,
198 0x90400402, 0x00002038, 0x28200800, 0x40801101,
199 0x90440402, 0x00002038, 0x2c200800, 0x40901101,
200 0x00000202, 0x00082000, 0x20000000, 0x00000005,
201 0x00040202, 0x00082000, 0x24000000, 0x00100005,
202 0x00400202, 0x00082008, 0x20000800, 0x40000005,
203 0x00440202, 0x00082008, 0x24000800, 0x40100005,
204 0x00000602, 0x00082020, 0x28000000, 0x00000105,
205 0x00040602, 0x00082020, 0x2c000000, 0x00100105,
206 0x00400602, 0x00082028, 0x28000800, 0x40000105,
207 0x00440602, 0x00082028, 0x2c000800, 0x40100105,
208 0x80000202, 0x00082010, 0x20000000, 0x00800005,
209 0x80040202, 0x00082010, 0x24000000, 0x00900005,
210 0x80400202, 0x00082018, 0x20000800, 0x40800005,
211 0x80440202, 0x00082018, 0x24000800, 0x40900005,
212 0x80000602, 0x00082030, 0x28000000, 0x00800105,
213 0x80040602, 0x00082030, 0x2c000000, 0x00900105,
214 0x80400602, 0x00082038, 0x28000800, 0x40800105,
215 0x80440602, 0x00082038, 0x2c000800, 0x40900105,
216 0x10000202, 0x00082000, 0x20200000, 0x00001005,
217 0x10040202, 0x00082000, 0x24200000, 0x00101005,
218 0x10400202, 0x00082008, 0x20200800, 0x40001005,
219 0x10440202, 0x00082008, 0x24200800, 0x40101005,
220 0x10000602, 0x00082020, 0x28200000, 0x00001105,
221 0x10040602, 0x00082020, 0x2c200000, 0x00101105,
222 0x10400602, 0x00082028, 0x28200800, 0x40001105,
223 0x10440602, 0x00082028, 0x2c200800, 0x40101105,
224 0x90000202, 0x00082010, 0x20200000, 0x00801005,
225 0x90040202, 0x00082010, 0x24200000, 0x00901005,
226 0x90400202, 0x00082018, 0x20200800, 0x40801005,
227 0x90440202, 0x00082018, 0x24200800, 0x40901005,
228 0x90000602, 0x00082030, 0x28200000, 0x00801105,
229 0x90040602, 0x00082030, 0x2c200000, 0x00901105,
230 0x90400602, 0x00082038, 0x28200800, 0x40801105,
231 0x90440602, 0x00082038, 0x2c200800, 0x40901105,
232
233 0x00000000, 0x00000000, 0x00000000, 0x00000000,
234 0x00000000, 0x00000008, 0x00080000, 0x10000000,
235 0x02000000, 0x00000000, 0x00000080, 0x00001000,
236 0x02000000, 0x00000008, 0x00080080, 0x10001000,
237 0x00004000, 0x00000000, 0x00000040, 0x00040000,
238 0x00004000, 0x00000008, 0x00080040, 0x10040000,
239 0x02004000, 0x00000000, 0x000000c0, 0x00041000,
240 0x02004000, 0x00000008, 0x000800c0, 0x10041000,
241 0x00020000, 0x00008000, 0x08000000, 0x00200000,
242 0x00020000, 0x00008008, 0x08080000, 0x10200000,
243 0x02020000, 0x00008000, 0x08000080, 0x00201000,
244 0x02020000, 0x00008008, 0x08080080, 0x10201000,
245 0x00024000, 0x00008000, 0x08000040, 0x00240000,
246 0x00024000, 0x00008008, 0x08080040, 0x10240000,
247 0x02024000, 0x00008000, 0x080000c0, 0x00241000,
248 0x02024000, 0x00008008, 0x080800c0, 0x10241000,
249 0x00000000, 0x01000000, 0x00002000, 0x00000020,
250 0x00000000, 0x01000008, 0x00082000, 0x10000020,
251 0x02000000, 0x01000000, 0x00002080, 0x00001020,
252 0x02000000, 0x01000008, 0x00082080, 0x10001020,
253 0x00004000, 0x01000000, 0x00002040, 0x00040020,
254 0x00004000, 0x01000008, 0x00082040, 0x10040020,
255 0x02004000, 0x01000000, 0x000020c0, 0x00041020,
256 0x02004000, 0x01000008, 0x000820c0, 0x10041020,
257 0x00020000, 0x01008000, 0x08002000, 0x00200020,
258 0x00020000, 0x01008008, 0x08082000, 0x10200020,
259 0x02020000, 0x01008000, 0x08002080, 0x00201020,
260 0x02020000, 0x01008008, 0x08082080, 0x10201020,
261 0x00024000, 0x01008000, 0x08002040, 0x00240020,
262 0x00024000, 0x01008008, 0x08082040, 0x10240020,
263 0x02024000, 0x01008000, 0x080020c0, 0x00241020,
264 0x02024000, 0x01008008, 0x080820c0, 0x10241020,
265 0x00000400, 0x04000000, 0x00100000, 0x00000004,
266 0x00000400, 0x04000008, 0x00180000, 0x10000004,
267 0x02000400, 0x04000000, 0x00100080, 0x00001004,
268 0x02000400, 0x04000008, 0x00180080, 0x10001004,
269 0x00004400, 0x04000000, 0x00100040, 0x00040004,
270 0x00004400, 0x04000008, 0x00180040, 0x10040004,
271 0x02004400, 0x04000000, 0x001000c0, 0x00041004,
272 0x02004400, 0x04000008, 0x001800c0, 0x10041004,
273 0x00020400, 0x04008000, 0x08100000, 0x00200004,
274 0x00020400, 0x04008008, 0x08180000, 0x10200004,
275 0x02020400, 0x04008000, 0x08100080, 0x00201004,
276 0x02020400, 0x04008008, 0x08180080, 0x10201004,
277 0x00024400, 0x04008000, 0x08100040, 0x00240004,
278 0x00024400, 0x04008008, 0x08180040, 0x10240004,
279 0x02024400, 0x04008000, 0x081000c0, 0x00241004,
280 0x02024400, 0x04008008, 0x081800c0, 0x10241004,
281 0x00000400, 0x05000000, 0x00102000, 0x00000024,
282 0x00000400, 0x05000008, 0x00182000, 0x10000024,
283 0x02000400, 0x05000000, 0x00102080, 0x00001024,
284 0x02000400, 0x05000008, 0x00182080, 0x10001024,
285 0x00004400, 0x05000000, 0x00102040, 0x00040024,
286 0x00004400, 0x05000008, 0x00182040, 0x10040024,
287 0x02004400, 0x05000000, 0x001020c0, 0x00041024,
288 0x02004400, 0x05000008, 0x001820c0, 0x10041024,
289 0x00020400, 0x05008000, 0x08102000, 0x00200024,
290 0x00020400, 0x05008008, 0x08182000, 0x10200024,
291 0x02020400, 0x05008000, 0x08102080, 0x00201024,
292 0x02020400, 0x05008008, 0x08182080, 0x10201024,
293 0x00024400, 0x05008000, 0x08102040, 0x00240024,
294 0x00024400, 0x05008008, 0x08182040, 0x10240024,
295 0x02024400, 0x05008000, 0x081020c0, 0x00241024,
296 0x02024400, 0x05008008, 0x081820c0, 0x10241024,
297 0x00000800, 0x00010000, 0x20000000, 0x00000010,
298 0x00000800, 0x00010008, 0x20080000, 0x10000010,
299 0x02000800, 0x00010000, 0x20000080, 0x00001010,
300 0x02000800, 0x00010008, 0x20080080, 0x10001010,
301 0x00004800, 0x00010000, 0x20000040, 0x00040010,
302 0x00004800, 0x00010008, 0x20080040, 0x10040010,
303 0x02004800, 0x00010000, 0x200000c0, 0x00041010,
304 0x02004800, 0x00010008, 0x200800c0, 0x10041010,
305 0x00020800, 0x00018000, 0x28000000, 0x00200010,
306 0x00020800, 0x00018008, 0x28080000, 0x10200010,
307 0x02020800, 0x00018000, 0x28000080, 0x00201010,
308 0x02020800, 0x00018008, 0x28080080, 0x10201010,
309 0x00024800, 0x00018000, 0x28000040, 0x00240010,
310 0x00024800, 0x00018008, 0x28080040, 0x10240010,
311 0x02024800, 0x00018000, 0x280000c0, 0x00241010,
312 0x02024800, 0x00018008, 0x280800c0, 0x10241010,
313 0x00000800, 0x01010000, 0x20002000, 0x00000030,
314 0x00000800, 0x01010008, 0x20082000, 0x10000030,
315 0x02000800, 0x01010000, 0x20002080, 0x00001030,
316 0x02000800, 0x01010008, 0x20082080, 0x10001030,
317 0x00004800, 0x01010000, 0x20002040, 0x00040030,
318 0x00004800, 0x01010008, 0x20082040, 0x10040030,
319 0x02004800, 0x01010000, 0x200020c0, 0x00041030,
320 0x02004800, 0x01010008, 0x200820c0, 0x10041030,
321 0x00020800, 0x01018000, 0x28002000, 0x00200030,
322 0x00020800, 0x01018008, 0x28082000, 0x10200030,
323 0x02020800, 0x01018000, 0x28002080, 0x00201030,
324 0x02020800, 0x01018008, 0x28082080, 0x10201030,
325 0x00024800, 0x01018000, 0x28002040, 0x00240030,
326 0x00024800, 0x01018008, 0x28082040, 0x10240030,
327 0x02024800, 0x01018000, 0x280020c0, 0x00241030,
328 0x02024800, 0x01018008, 0x280820c0, 0x10241030,
329 0x00000c00, 0x04010000, 0x20100000, 0x00000014,
330 0x00000c00, 0x04010008, 0x20180000, 0x10000014,
331 0x02000c00, 0x04010000, 0x20100080, 0x00001014,
332 0x02000c00, 0x04010008, 0x20180080, 0x10001014,
333 0x00004c00, 0x04010000, 0x20100040, 0x00040014,
334 0x00004c00, 0x04010008, 0x20180040, 0x10040014,
335 0x02004c00, 0x04010000, 0x201000c0, 0x00041014,
336 0x02004c00, 0x04010008, 0x201800c0, 0x10041014,
337 0x00020c00, 0x04018000, 0x28100000, 0x00200014,
338 0x00020c00, 0x04018008, 0x28180000, 0x10200014,
339 0x02020c00, 0x04018000, 0x28100080, 0x00201014,
340 0x02020c00, 0x04018008, 0x28180080, 0x10201014,
341 0x00024c00, 0x04018000, 0x28100040, 0x00240014,
342 0x00024c00, 0x04018008, 0x28180040, 0x10240014,
343 0x02024c00, 0x04018000, 0x281000c0, 0x00241014,
344 0x02024c00, 0x04018008, 0x281800c0, 0x10241014,
345 0x00000c00, 0x05010000, 0x20102000, 0x00000034,
346 0x00000c00, 0x05010008, 0x20182000, 0x10000034,
347 0x02000c00, 0x05010000, 0x20102080, 0x00001034,
348 0x02000c00, 0x05010008, 0x20182080, 0x10001034,
349 0x00004c00, 0x05010000, 0x20102040, 0x00040034,
350 0x00004c00, 0x05010008, 0x20182040, 0x10040034,
351 0x02004c00, 0x05010000, 0x201020c0, 0x00041034,
352 0x02004c00, 0x05010008, 0x201820c0, 0x10041034,
353 0x00020c00, 0x05018000, 0x28102000, 0x00200034,
354 0x00020c00, 0x05018008, 0x28182000, 0x10200034,
355 0x02020c00, 0x05018000, 0x28102080, 0x00201034,
356 0x02020c00, 0x05018008, 0x28182080, 0x10201034,
357 0x00024c00, 0x05018000, 0x28102040, 0x00240034,
358 0x00024c00, 0x05018008, 0x28182040, 0x10240034,
359 0x02024c00, 0x05018000, 0x281020c0, 0x00241034,
360 0x02024c00, 0x05018008, 0x281820c0, 0x10241034
361};
362
363/* S-box lookup tables */
364
365static const u32 S1[64] = {
366 0x01010400, 0x00000000, 0x00010000, 0x01010404,
367 0x01010004, 0x00010404, 0x00000004, 0x00010000,
368 0x00000400, 0x01010400, 0x01010404, 0x00000400,
369 0x01000404, 0x01010004, 0x01000000, 0x00000004,
370 0x00000404, 0x01000400, 0x01000400, 0x00010400,
371 0x00010400, 0x01010000, 0x01010000, 0x01000404,
372 0x00010004, 0x01000004, 0x01000004, 0x00010004,
373 0x00000000, 0x00000404, 0x00010404, 0x01000000,
374 0x00010000, 0x01010404, 0x00000004, 0x01010000,
375 0x01010400, 0x01000000, 0x01000000, 0x00000400,
376 0x01010004, 0x00010000, 0x00010400, 0x01000004,
377 0x00000400, 0x00000004, 0x01000404, 0x00010404,
378 0x01010404, 0x00010004, 0x01010000, 0x01000404,
379 0x01000004, 0x00000404, 0x00010404, 0x01010400,
380 0x00000404, 0x01000400, 0x01000400, 0x00000000,
381 0x00010004, 0x00010400, 0x00000000, 0x01010004
382};
383
384static const u32 S2[64] = {
385 0x80108020, 0x80008000, 0x00008000, 0x00108020,
386 0x00100000, 0x00000020, 0x80100020, 0x80008020,
387 0x80000020, 0x80108020, 0x80108000, 0x80000000,
388 0x80008000, 0x00100000, 0x00000020, 0x80100020,
389 0x00108000, 0x00100020, 0x80008020, 0x00000000,
390 0x80000000, 0x00008000, 0x00108020, 0x80100000,
391 0x00100020, 0x80000020, 0x00000000, 0x00108000,
392 0x00008020, 0x80108000, 0x80100000, 0x00008020,
393 0x00000000, 0x00108020, 0x80100020, 0x00100000,
394 0x80008020, 0x80100000, 0x80108000, 0x00008000,
395 0x80100000, 0x80008000, 0x00000020, 0x80108020,
396 0x00108020, 0x00000020, 0x00008000, 0x80000000,
397 0x00008020, 0x80108000, 0x00100000, 0x80000020,
398 0x00100020, 0x80008020, 0x80000020, 0x00100020,
399 0x00108000, 0x00000000, 0x80008000, 0x00008020,
400 0x80000000, 0x80100020, 0x80108020, 0x00108000
401};
402
403static const u32 S3[64] = {
404 0x00000208, 0x08020200, 0x00000000, 0x08020008,
405 0x08000200, 0x00000000, 0x00020208, 0x08000200,
406 0x00020008, 0x08000008, 0x08000008, 0x00020000,
407 0x08020208, 0x00020008, 0x08020000, 0x00000208,
408 0x08000000, 0x00000008, 0x08020200, 0x00000200,
409 0x00020200, 0x08020000, 0x08020008, 0x00020208,
410 0x08000208, 0x00020200, 0x00020000, 0x08000208,
411 0x00000008, 0x08020208, 0x00000200, 0x08000000,
412 0x08020200, 0x08000000, 0x00020008, 0x00000208,
413 0x00020000, 0x08020200, 0x08000200, 0x00000000,
414 0x00000200, 0x00020008, 0x08020208, 0x08000200,
415 0x08000008, 0x00000200, 0x00000000, 0x08020008,
416 0x08000208, 0x00020000, 0x08000000, 0x08020208,
417 0x00000008, 0x00020208, 0x00020200, 0x08000008,
418 0x08020000, 0x08000208, 0x00000208, 0x08020000,
419 0x00020208, 0x00000008, 0x08020008, 0x00020200
420};
421
422static const u32 S4[64] = {
423 0x00802001, 0x00002081, 0x00002081, 0x00000080,
424 0x00802080, 0x00800081, 0x00800001, 0x00002001,
425 0x00000000, 0x00802000, 0x00802000, 0x00802081,
426 0x00000081, 0x00000000, 0x00800080, 0x00800001,
427 0x00000001, 0x00002000, 0x00800000, 0x00802001,
428 0x00000080, 0x00800000, 0x00002001, 0x00002080,
429 0x00800081, 0x00000001, 0x00002080, 0x00800080,
430 0x00002000, 0x00802080, 0x00802081, 0x00000081,
431 0x00800080, 0x00800001, 0x00802000, 0x00802081,
432 0x00000081, 0x00000000, 0x00000000, 0x00802000,
433 0x00002080, 0x00800080, 0x00800081, 0x00000001,
434 0x00802001, 0x00002081, 0x00002081, 0x00000080,
435 0x00802081, 0x00000081, 0x00000001, 0x00002000,
436 0x00800001, 0x00002001, 0x00802080, 0x00800081,
437 0x00002001, 0x00002080, 0x00800000, 0x00802001,
438 0x00000080, 0x00800000, 0x00002000, 0x00802080
439};
440
441static const u32 S5[64] = {
442 0x00000100, 0x02080100, 0x02080000, 0x42000100,
443 0x00080000, 0x00000100, 0x40000000, 0x02080000,
444 0x40080100, 0x00080000, 0x02000100, 0x40080100,
445 0x42000100, 0x42080000, 0x00080100, 0x40000000,
446 0x02000000, 0x40080000, 0x40080000, 0x00000000,
447 0x40000100, 0x42080100, 0x42080100, 0x02000100,
448 0x42080000, 0x40000100, 0x00000000, 0x42000000,
449 0x02080100, 0x02000000, 0x42000000, 0x00080100,
450 0x00080000, 0x42000100, 0x00000100, 0x02000000,
451 0x40000000, 0x02080000, 0x42000100, 0x40080100,
452 0x02000100, 0x40000000, 0x42080000, 0x02080100,
453 0x40080100, 0x00000100, 0x02000000, 0x42080000,
454 0x42080100, 0x00080100, 0x42000000, 0x42080100,
455 0x02080000, 0x00000000, 0x40080000, 0x42000000,
456 0x00080100, 0x02000100, 0x40000100, 0x00080000,
457 0x00000000, 0x40080000, 0x02080100, 0x40000100
458};
459
460static const u32 S6[64] = {
461 0x20000010, 0x20400000, 0x00004000, 0x20404010,
462 0x20400000, 0x00000010, 0x20404010, 0x00400000,
463 0x20004000, 0x00404010, 0x00400000, 0x20000010,
464 0x00400010, 0x20004000, 0x20000000, 0x00004010,
465 0x00000000, 0x00400010, 0x20004010, 0x00004000,
466 0x00404000, 0x20004010, 0x00000010, 0x20400010,
467 0x20400010, 0x00000000, 0x00404010, 0x20404000,
468 0x00004010, 0x00404000, 0x20404000, 0x20000000,
469 0x20004000, 0x00000010, 0x20400010, 0x00404000,
470 0x20404010, 0x00400000, 0x00004010, 0x20000010,
471 0x00400000, 0x20004000, 0x20000000, 0x00004010,
472 0x20000010, 0x20404010, 0x00404000, 0x20400000,
473 0x00404010, 0x20404000, 0x00000000, 0x20400010,
474 0x00000010, 0x00004000, 0x20400000, 0x00404010,
475 0x00004000, 0x00400010, 0x20004010, 0x00000000,
476 0x20404000, 0x20000000, 0x00400010, 0x20004010
477};
478
479static const u32 S7[64] = {
480 0x00200000, 0x04200002, 0x04000802, 0x00000000,
481 0x00000800, 0x04000802, 0x00200802, 0x04200800,
482 0x04200802, 0x00200000, 0x00000000, 0x04000002,
483 0x00000002, 0x04000000, 0x04200002, 0x00000802,
484 0x04000800, 0x00200802, 0x00200002, 0x04000800,
485 0x04000002, 0x04200000, 0x04200800, 0x00200002,
486 0x04200000, 0x00000800, 0x00000802, 0x04200802,
487 0x00200800, 0x00000002, 0x04000000, 0x00200800,
488 0x04000000, 0x00200800, 0x00200000, 0x04000802,
489 0x04000802, 0x04200002, 0x04200002, 0x00000002,
490 0x00200002, 0x04000000, 0x04000800, 0x00200000,
491 0x04200800, 0x00000802, 0x00200802, 0x04200800,
492 0x00000802, 0x04000002, 0x04200802, 0x04200000,
493 0x00200800, 0x00000000, 0x00000002, 0x04200802,
494 0x00000000, 0x00200802, 0x04200000, 0x00000800,
495 0x04000002, 0x04000800, 0x00000800, 0x00200002
496};
497
498static const u32 S8[64] = {
499 0x10001040, 0x00001000, 0x00040000, 0x10041040,
500 0x10000000, 0x10001040, 0x00000040, 0x10000000,
501 0x00040040, 0x10040000, 0x10041040, 0x00041000,
502 0x10041000, 0x00041040, 0x00001000, 0x00000040,
503 0x10040000, 0x10000040, 0x10001000, 0x00001040,
504 0x00041000, 0x00040040, 0x10040040, 0x10041000,
505 0x00001040, 0x00000000, 0x00000000, 0x10040040,
506 0x10000040, 0x10001000, 0x00041040, 0x00040000,
507 0x00041040, 0x00040000, 0x10041000, 0x00001000,
508 0x00000040, 0x10040040, 0x00001000, 0x00041040,
509 0x10001000, 0x00000040, 0x10000040, 0x10040000,
510 0x10040040, 0x10000000, 0x00040000, 0x10001040,
511 0x00000000, 0x10041040, 0x00040040, 0x10000040,
512 0x10040000, 0x10001000, 0x10001040, 0x00000000,
513 0x10041040, 0x00041000, 0x00041000, 0x00001040,
514 0x00001040, 0x00040040, 0x10000000, 0x10041000
515};
516
517/* Encryption components: IP, FP, and round function */
518
519#define IP(L, R, T) \
520 ROL(R, 4); \
521 T = L; \
522 L ^= R; \
523 L &= 0xf0f0f0f0; \
524 R ^= L; \
525 L ^= T; \
526 ROL(R, 12); \
527 T = L; \
528 L ^= R; \
529 L &= 0xffff0000; \
530 R ^= L; \
531 L ^= T; \
532 ROR(R, 14); \
533 T = L; \
534 L ^= R; \
535 L &= 0xcccccccc; \
536 R ^= L; \
537 L ^= T; \
538 ROL(R, 6); \
539 T = L; \
540 L ^= R; \
541 L &= 0xff00ff00; \
542 R ^= L; \
543 L ^= T; \
544 ROR(R, 7); \
545 T = L; \
546 L ^= R; \
547 L &= 0xaaaaaaaa; \
548 R ^= L; \
549 L ^= T; \
550 ROL(L, 1);
551
552#define FP(L, R, T) \
553 ROR(L, 1); \
554 T = L; \
555 L ^= R; \
556 L &= 0xaaaaaaaa; \
557 R ^= L; \
558 L ^= T; \
559 ROL(R, 7); \
560 T = L; \
561 L ^= R; \
562 L &= 0xff00ff00; \
563 R ^= L; \
564 L ^= T; \
565 ROR(R, 6); \
566 T = L; \
567 L ^= R; \
568 L &= 0xcccccccc; \
569 R ^= L; \
570 L ^= T; \
571 ROL(R, 14); \
572 T = L; \
573 L ^= R; \
574 L &= 0xffff0000; \
575 R ^= L; \
576 L ^= T; \
577 ROR(R, 12); \
578 T = L; \
579 L ^= R; \
580 L &= 0xf0f0f0f0; \
581 R ^= L; \
582 L ^= T; \
583 ROR(R, 4);
584
585#define ROUND(L, R, A, B, K, d) \
586 B = K[0]; A = K[1]; K += d; \
587 B ^= R; A ^= R; \
588 B &= 0x3f3f3f3f; ROR(A, 4); \
589 L ^= S8[0xff & B]; A &= 0x3f3f3f3f; \
590 L ^= S6[0xff & (B >> 8)]; B >>= 16; \
591 L ^= S7[0xff & A]; \
592 L ^= S5[0xff & (A >> 8)]; A >>= 16; \
593 L ^= S4[0xff & B]; \
594 L ^= S2[0xff & (B >> 8)]; \
595 L ^= S3[0xff & A]; \
596 L ^= S1[0xff & (A >> 8)];
597
598/*
599 * PC2 lookup tables are organized as 2 consecutive sets of 4 interleaved
600 * tables of 128 elements. One set is for C_i and the other for D_i, while
601 * the 4 interleaved tables correspond to four 7-bit subsets of C_i or D_i.
602 *
603 * After PC1 each of the variables a,b,c,d contains a 7 bit subset of C_i
604 * or D_i in bits 7-1 (bit 0 being the least significant).
605 */
606
607#define T1(x) pt[2 * (x) + 0]
608#define T2(x) pt[2 * (x) + 1]
609#define T3(x) pt[2 * (x) + 2]
610#define T4(x) pt[2 * (x) + 3]
611
612#define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
613
614/*
615 * Encryption key expansion
616 *
617 * RFC2451: Weak key checks SHOULD be performed.
618 *
619 * FIPS 74:
620 *
621 * Keys having duals are keys which produce all zeros, all ones, or
622 * alternating zero-one patterns in the C and D registers after Permuted
623 * Choice 1 has operated on the key.
624 *
625 */
626unsigned long des_ekey(u32 *pe, const u8 *k)
627{
628 /* K&R: long is at least 32 bits */
629 unsigned long a, b, c, d, w;
630 const u32 *pt = pc2;
631
632 d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
633 c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
634 b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
635 a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
636
637 pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d];
638 pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
639 pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
640 pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
641 pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
642 pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
643 pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
644 pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c];
645 pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
646 pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
647 pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
648 pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
649 pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
650 pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
651 pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b];
652 pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a);
653
654 /* Check if first half is weak */
655 w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
656
657 /* Skip to next table set */
658 pt += 512;
659
660 d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
661 c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
662 b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
663 a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
664
665 /* Check if second half is weak */
666 w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
667
668 pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
669 pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
670 pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
671 pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
672 pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
673 pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
674 pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
675 pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
676 pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
677 pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
678 pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
679 pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
680 pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
681 pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
682 pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
683 pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a);
684
685 /* Fixup: 2413 5768 -> 1357 2468 */
686 for (d = 0; d < 16; ++d) {
687 a = pe[2 * d];
688 b = pe[2 * d + 1];
689 c = a ^ b;
690 c &= 0xffff0000;
691 a ^= c;
692 b ^= c;
693 ROL(b, 18);
694 pe[2 * d] = a;
695 pe[2 * d + 1] = b;
696 }
697
698 /* Zero if weak key */
699 return w;
700}
701EXPORT_SYMBOL_GPL(des_ekey);
702
703/*
704 * Decryption key expansion
705 *
706 * No weak key checking is performed, as this is only used by triple DES
707 *
708 */
709static void dkey(u32 *pe, const u8 *k)
710{
711 /* K&R: long is at least 32 bits */
712 unsigned long a, b, c, d;
713 const u32 *pt = pc2;
714
715 d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
716 c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
717 b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
718 a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
719
720 pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d];
721 pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
722 pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
723 pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
724 pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
725 pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
726 pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
727 pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c];
728 pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
729 pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
730 pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
731 pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
732 pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
733 pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
734 pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b];
735 pe[15 * 2] = DES_PC2(b, c, d, a);
736
737 /* Skip to next table set */
738 pt += 512;
739
740 d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
741 c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
742 b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
743 a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
744
745 pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
746 pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
747 pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
748 pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
749 pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
750 pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
751 pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
752 pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
753 pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
754 pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
755 pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
756 pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
757 pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
758 pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
759 pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
760 pe[15 * 2 + 1] = DES_PC2(b, c, d, a);
761
762 /* Fixup: 2413 5768 -> 1357 2468 */
763 for (d = 0; d < 16; ++d) {
764 a = pe[2 * d];
765 b = pe[2 * d + 1];
766 c = a ^ b;
767 c &= 0xffff0000;
768 a ^= c;
769 b ^= c;
770 ROL(b, 18);
771 pe[2 * d] = a;
772 pe[2 * d + 1] = b;
773 }
774}
775 18
776static int des_setkey(struct crypto_tfm *tfm, const u8 *key, 19static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
777 unsigned int keylen) 20 unsigned int keylen)
778{ 21{
779 struct des_ctx *dctx = crypto_tfm_ctx(tfm); 22 struct des_ctx *dctx = crypto_tfm_ctx(tfm);
780 u32 *flags = &tfm->crt_flags; 23 int err;
781 u32 tmp[DES_EXPKEY_WORDS];
782 int ret;
783
784 /* Expand to tmp */
785 ret = des_ekey(tmp, key);
786 24
787 if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 25 err = des_expand_key(dctx, key, keylen);
788 *flags |= CRYPTO_TFM_RES_WEAK_KEY; 26 if (err == -ENOKEY) {
789 return -EINVAL; 27 if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
28 err = -EINVAL;
29 else
30 err = 0;
790 } 31 }
791 32
792 /* Copy to output */ 33 if (err) {
793 memcpy(dctx->expkey, tmp, sizeof(dctx->expkey)); 34 memset(dctx, 0, sizeof(*dctx));
794 35 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
795 return 0;
796}
797
798static void des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
799{
800 struct des_ctx *ctx = crypto_tfm_ctx(tfm);
801 const u32 *K = ctx->expkey;
802 const __le32 *s = (const __le32 *)src;
803 __le32 *d = (__le32 *)dst;
804 u32 L, R, A, B;
805 int i;
806
807 L = le32_to_cpu(s[0]);
808 R = le32_to_cpu(s[1]);
809
810 IP(L, R, A);
811 for (i = 0; i < 8; i++) {
812 ROUND(L, R, A, B, K, 2);
813 ROUND(R, L, A, B, K, 2);
814 } 36 }
815 FP(R, L, A); 37 return err;
816
817 d[0] = cpu_to_le32(R);
818 d[1] = cpu_to_le32(L);
819} 38}
820 39
821static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 40static void crypto_des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
822{ 41{
823 struct des_ctx *ctx = crypto_tfm_ctx(tfm); 42 const struct des_ctx *dctx = crypto_tfm_ctx(tfm);
824 const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2;
825 const __le32 *s = (const __le32 *)src;
826 __le32 *d = (__le32 *)dst;
827 u32 L, R, A, B;
828 int i;
829
830 L = le32_to_cpu(s[0]);
831 R = le32_to_cpu(s[1]);
832 43
833 IP(L, R, A); 44 des_encrypt(dctx, dst, src);
834 for (i = 0; i < 8; i++) {
835 ROUND(L, R, A, B, K, -2);
836 ROUND(R, L, A, B, K, -2);
837 }
838 FP(R, L, A);
839
840 d[0] = cpu_to_le32(R);
841 d[1] = cpu_to_le32(L);
842} 45}
843 46
844/* 47static void crypto_des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
845 * RFC2451:
846 *
847 * For DES-EDE3, there is no known need to reject weak or
848 * complementation keys. Any weakness is obviated by the use of
849 * multiple keys.
850 *
851 * However, if the first two or last two independent 64-bit keys are
852 * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
853 * same as DES. Implementers MUST reject keys that exhibit this
854 * property.
855 *
856 */
857int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
858 unsigned int keylen)
859{ 48{
860 int err; 49 const struct des_ctx *dctx = crypto_tfm_ctx(tfm);
861 50
862 err = __des3_verify_key(flags, key); 51 des_decrypt(dctx, dst, src);
863 if (unlikely(err))
864 return err;
865
866 des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
867 dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
868 des_ekey(expkey, key);
869
870 return 0;
871} 52}
872EXPORT_SYMBOL_GPL(__des3_ede_setkey);
873 53
874static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, 54static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
875 unsigned int keylen) 55 unsigned int keylen)
876{ 56{
877 struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); 57 struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
878 u32 *flags = &tfm->crt_flags; 58 int err;
879 u32 *expkey = dctx->expkey;
880
881 return __des3_ede_setkey(expkey, flags, key, keylen);
882}
883
884static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
885{
886 struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
887 const u32 *K = dctx->expkey;
888 const __le32 *s = (const __le32 *)src;
889 __le32 *d = (__le32 *)dst;
890 u32 L, R, A, B;
891 int i;
892
893 L = le32_to_cpu(s[0]);
894 R = le32_to_cpu(s[1]);
895 59
896 IP(L, R, A); 60 err = des3_ede_expand_key(dctx, key, keylen);
897 for (i = 0; i < 8; i++) { 61 if (err == -ENOKEY) {
898 ROUND(L, R, A, B, K, 2); 62 if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
899 ROUND(R, L, A, B, K, 2); 63 err = -EINVAL;
64 else
65 err = 0;
900 } 66 }
901 for (i = 0; i < 8; i++) {
902 ROUND(R, L, A, B, K, 2);
903 ROUND(L, R, A, B, K, 2);
904 }
905 for (i = 0; i < 8; i++) {
906 ROUND(L, R, A, B, K, 2);
907 ROUND(R, L, A, B, K, 2);
908 }
909 FP(R, L, A);
910 67
911 d[0] = cpu_to_le32(R); 68 if (err) {
912 d[1] = cpu_to_le32(L); 69 memset(dctx, 0, sizeof(*dctx));
70 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
71 }
72 return err;
913} 73}
914 74
915static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 75static void crypto_des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst,
76 const u8 *src)
916{ 77{
917 struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); 78 const struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
918 const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2;
919 const __le32 *s = (const __le32 *)src;
920 __le32 *d = (__le32 *)dst;
921 u32 L, R, A, B;
922 int i;
923 79
924 L = le32_to_cpu(s[0]); 80 des3_ede_encrypt(dctx, dst, src);
925 R = le32_to_cpu(s[1]); 81}
926 82
927 IP(L, R, A); 83static void crypto_des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst,
928 for (i = 0; i < 8; i++) { 84 const u8 *src)
929 ROUND(L, R, A, B, K, -2); 85{
930 ROUND(R, L, A, B, K, -2); 86 const struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
931 }
932 for (i = 0; i < 8; i++) {
933 ROUND(R, L, A, B, K, -2);
934 ROUND(L, R, A, B, K, -2);
935 }
936 for (i = 0; i < 8; i++) {
937 ROUND(L, R, A, B, K, -2);
938 ROUND(R, L, A, B, K, -2);
939 }
940 FP(R, L, A);
941 87
942 d[0] = cpu_to_le32(R); 88 des3_ede_decrypt(dctx, dst, src);
943 d[1] = cpu_to_le32(L);
944} 89}
945 90
946static struct crypto_alg des_algs[2] = { { 91static struct crypto_alg des_algs[2] = { {
@@ -951,13 +96,12 @@ static struct crypto_alg des_algs[2] = { {
951 .cra_blocksize = DES_BLOCK_SIZE, 96 .cra_blocksize = DES_BLOCK_SIZE,
952 .cra_ctxsize = sizeof(struct des_ctx), 97 .cra_ctxsize = sizeof(struct des_ctx),
953 .cra_module = THIS_MODULE, 98 .cra_module = THIS_MODULE,
954 .cra_alignmask = 3,
955 .cra_u = { .cipher = { 99 .cra_u = { .cipher = {
956 .cia_min_keysize = DES_KEY_SIZE, 100 .cia_min_keysize = DES_KEY_SIZE,
957 .cia_max_keysize = DES_KEY_SIZE, 101 .cia_max_keysize = DES_KEY_SIZE,
958 .cia_setkey = des_setkey, 102 .cia_setkey = des_setkey,
959 .cia_encrypt = des_encrypt, 103 .cia_encrypt = crypto_des_encrypt,
960 .cia_decrypt = des_decrypt } } 104 .cia_decrypt = crypto_des_decrypt } }
961}, { 105}, {
962 .cra_name = "des3_ede", 106 .cra_name = "des3_ede",
963 .cra_driver_name = "des3_ede-generic", 107 .cra_driver_name = "des3_ede-generic",
@@ -966,13 +110,12 @@ static struct crypto_alg des_algs[2] = { {
966 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 110 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
967 .cra_ctxsize = sizeof(struct des3_ede_ctx), 111 .cra_ctxsize = sizeof(struct des3_ede_ctx),
968 .cra_module = THIS_MODULE, 112 .cra_module = THIS_MODULE,
969 .cra_alignmask = 3,
970 .cra_u = { .cipher = { 113 .cra_u = { .cipher = {
971 .cia_min_keysize = DES3_EDE_KEY_SIZE, 114 .cia_min_keysize = DES3_EDE_KEY_SIZE,
972 .cia_max_keysize = DES3_EDE_KEY_SIZE, 115 .cia_max_keysize = DES3_EDE_KEY_SIZE,
973 .cia_setkey = des3_ede_setkey, 116 .cia_setkey = des3_ede_setkey,
974 .cia_encrypt = des3_ede_encrypt, 117 .cia_encrypt = crypto_des3_ede_encrypt,
975 .cia_decrypt = des3_ede_decrypt } } 118 .cia_decrypt = crypto_des3_ede_decrypt } }
976} }; 119} };
977 120
978static int __init des_generic_mod_init(void) 121static int __init des_generic_mod_init(void)
diff --git a/crypto/fips.c b/crypto/fips.c
index c0b3a3c3452d..7b1d8caee669 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -11,10 +11,14 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/sysctl.h> 13#include <linux/sysctl.h>
14#include <linux/notifier.h>
14 15
15int fips_enabled; 16int fips_enabled;
16EXPORT_SYMBOL_GPL(fips_enabled); 17EXPORT_SYMBOL_GPL(fips_enabled);
17 18
19ATOMIC_NOTIFIER_HEAD(fips_fail_notif_chain);
20EXPORT_SYMBOL_GPL(fips_fail_notif_chain);
21
18/* Process kernel command-line parameter at boot time. fips=0 or fips=1 */ 22/* Process kernel command-line parameter at boot time. fips=0 or fips=1 */
19static int fips_enable(char *str) 23static int fips_enable(char *str)
20{ 24{
@@ -58,6 +62,13 @@ static void crypto_proc_fips_exit(void)
58 unregister_sysctl_table(crypto_sysctls); 62 unregister_sysctl_table(crypto_sysctls);
59} 63}
60 64
65void fips_fail_notify(void)
66{
67 if (fips_enabled)
68 atomic_notifier_call_chain(&fips_fail_notif_chain, 0, NULL);
69}
70EXPORT_SYMBOL_GPL(fips_fail_notify);
71
61static int __init fips_init(void) 72static int __init fips_init(void)
62{ 73{
63 crypto_proc_fips_init(); 74 crypto_proc_fips_init();
diff --git a/crypto/gcm.c b/crypto/gcm.c
index f254e2d4c206..73884208f075 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -152,20 +152,7 @@ out:
152static int crypto_gcm_setauthsize(struct crypto_aead *tfm, 152static int crypto_gcm_setauthsize(struct crypto_aead *tfm,
153 unsigned int authsize) 153 unsigned int authsize)
154{ 154{
155 switch (authsize) { 155 return crypto_gcm_check_authsize(authsize);
156 case 4:
157 case 8:
158 case 12:
159 case 13:
160 case 14:
161 case 15:
162 case 16:
163 break;
164 default:
165 return -EINVAL;
166 }
167
168 return 0;
169} 156}
170 157
171static void crypto_gcm_init_common(struct aead_request *req) 158static void crypto_gcm_init_common(struct aead_request *req)
@@ -762,15 +749,11 @@ static int crypto_rfc4106_setauthsize(struct crypto_aead *parent,
762 unsigned int authsize) 749 unsigned int authsize)
763{ 750{
764 struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); 751 struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent);
752 int err;
765 753
766 switch (authsize) { 754 err = crypto_rfc4106_check_authsize(authsize);
767 case 8: 755 if (err)
768 case 12: 756 return err;
769 case 16:
770 break;
771 default:
772 return -EINVAL;
773 }
774 757
775 return crypto_aead_setauthsize(ctx->child, authsize); 758 return crypto_aead_setauthsize(ctx->child, authsize);
776} 759}
@@ -818,8 +801,11 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
818 801
819static int crypto_rfc4106_encrypt(struct aead_request *req) 802static int crypto_rfc4106_encrypt(struct aead_request *req)
820{ 803{
821 if (req->assoclen != 16 && req->assoclen != 20) 804 int err;
822 return -EINVAL; 805
806 err = crypto_ipsec_check_assoclen(req->assoclen);
807 if (err)
808 return err;
823 809
824 req = crypto_rfc4106_crypt(req); 810 req = crypto_rfc4106_crypt(req);
825 811
@@ -828,8 +814,11 @@ static int crypto_rfc4106_encrypt(struct aead_request *req)
828 814
829static int crypto_rfc4106_decrypt(struct aead_request *req) 815static int crypto_rfc4106_decrypt(struct aead_request *req)
830{ 816{
831 if (req->assoclen != 16 && req->assoclen != 20) 817 int err;
832 return -EINVAL; 818
819 err = crypto_ipsec_check_assoclen(req->assoclen);
820 if (err)
821 return err;
833 822
834 req = crypto_rfc4106_crypt(req); 823 req = crypto_rfc4106_crypt(req);
835 824
@@ -1045,12 +1034,14 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
1045 1034
1046static int crypto_rfc4543_encrypt(struct aead_request *req) 1035static int crypto_rfc4543_encrypt(struct aead_request *req)
1047{ 1036{
1048 return crypto_rfc4543_crypt(req, true); 1037 return crypto_ipsec_check_assoclen(req->assoclen) ?:
1038 crypto_rfc4543_crypt(req, true);
1049} 1039}
1050 1040
1051static int crypto_rfc4543_decrypt(struct aead_request *req) 1041static int crypto_rfc4543_decrypt(struct aead_request *req)
1052{ 1042{
1053 return crypto_rfc4543_crypt(req, false); 1043 return crypto_ipsec_check_assoclen(req->assoclen) ?:
1044 crypto_rfc4543_crypt(req, false);
1054} 1045}
1055 1046
1056static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) 1047static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index dad9e1f91a78..5027b3461c92 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -1,12 +1,37 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * GHASH: digest algorithm for GCM (Galois/Counter Mode). 3 * GHASH: hash function for GCM (Galois/Counter Mode).
4 * 4 *
5 * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> 5 * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
6 * Copyright (c) 2009 Intel Corp. 6 * Copyright (c) 2009 Intel Corp.
7 * Author: Huang Ying <ying.huang@intel.com> 7 * Author: Huang Ying <ying.huang@intel.com>
8 */
9
10/*
11 * GHASH is a keyed hash function used in GCM authentication tag generation.
12 *
13 * The original GCM paper [1] presents GHASH as a function GHASH(H, A, C) which
14 * takes a 16-byte hash key H, additional authenticated data A, and a ciphertext
15 * C. It formats A and C into a single byte string X, interprets X as a
16 * polynomial over GF(2^128), and evaluates this polynomial at the point H.
17 *
18 * However, the NIST standard for GCM [2] presents GHASH as GHASH(H, X) where X
19 * is the already-formatted byte string containing both A and C.
20 *
21 * "ghash" in the Linux crypto API uses the 'X' (pre-formatted) convention,
22 * since the API supports only a single data stream per hash. Thus, the
23 * formatting of 'A' and 'C' is done in the "gcm" template, not in "ghash".
24 *
25 * The reason "ghash" is separate from "gcm" is to allow "gcm" to use an
26 * accelerated "ghash" when a standalone accelerated "gcm(aes)" is unavailable.
27 * It is generally inappropriate to use "ghash" for other purposes, since it is
28 * an "ε-almost-XOR-universal hash function", not a cryptographic hash function.
29 * It can only be used securely in crypto modes specially designed to use it.
8 * 30 *
9 * The algorithm implementation is copied from gcm.c. 31 * [1] The Galois/Counter Mode of Operation (GCM)
32 * (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.694.695&rep=rep1&type=pdf)
33 * [2] Recommendation for Block Cipher Modes of Operation: Galois/Counter Mode (GCM) and GMAC
34 * (https://csrc.nist.gov/publications/detail/sp/800-38d/final)
10 */ 35 */
11 36
12#include <crypto/algapi.h> 37#include <crypto/algapi.h>
@@ -156,6 +181,6 @@ subsys_initcall(ghash_mod_init);
156module_exit(ghash_mod_exit); 181module_exit(ghash_mod_exit);
157 182
158MODULE_LICENSE("GPL"); 183MODULE_LICENSE("GPL");
159MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); 184MODULE_DESCRIPTION("GHASH hash function");
160MODULE_ALIAS_CRYPTO("ghash"); 185MODULE_ALIAS_CRYPTO("ghash");
161MODULE_ALIAS_CRYPTO("ghash-generic"); 186MODULE_ALIAS_CRYPTO("ghash-generic");
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
deleted file mode 100644
index f8734c6576af..000000000000
--- a/crypto/morus1280.c
+++ /dev/null
@@ -1,542 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The MORUS-1280 Authenticated-Encryption Algorithm
4 *
5 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7 */
8
9#include <asm/unaligned.h>
10#include <crypto/algapi.h>
11#include <crypto/internal/aead.h>
12#include <crypto/internal/skcipher.h>
13#include <crypto/morus_common.h>
14#include <crypto/scatterwalk.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/scatterlist.h>
20
21#define MORUS1280_WORD_SIZE 8
22#define MORUS1280_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS1280_WORD_SIZE)
23#define MORUS1280_BLOCK_ALIGN (__alignof__(__le64))
24#define MORUS1280_ALIGNED(p) IS_ALIGNED((uintptr_t)p, MORUS1280_BLOCK_ALIGN)
25
26struct morus1280_block {
27 u64 words[MORUS_BLOCK_WORDS];
28};
29
30union morus1280_block_in {
31 __le64 words[MORUS_BLOCK_WORDS];
32 u8 bytes[MORUS1280_BLOCK_SIZE];
33};
34
35struct morus1280_state {
36 struct morus1280_block s[MORUS_STATE_BLOCKS];
37};
38
39struct morus1280_ctx {
40 struct morus1280_block key;
41};
42
43struct morus1280_ops {
44 int (*skcipher_walk_init)(struct skcipher_walk *walk,
45 struct aead_request *req, bool atomic);
46
47 void (*crypt_chunk)(struct morus1280_state *state,
48 u8 *dst, const u8 *src, unsigned int size);
49};
50
51static const struct morus1280_block crypto_morus1280_const[1] = {
52 { .words = {
53 U64_C(0x0d08050302010100),
54 U64_C(0x6279e99059372215),
55 U64_C(0xf12fc26d55183ddb),
56 U64_C(0xdd28b57342311120),
57 } },
58};
59
60static void crypto_morus1280_round(struct morus1280_block *b0,
61 struct morus1280_block *b1,
62 struct morus1280_block *b2,
63 struct morus1280_block *b3,
64 struct morus1280_block *b4,
65 const struct morus1280_block *m,
66 unsigned int b, unsigned int w)
67{
68 unsigned int i;
69 struct morus1280_block tmp;
70
71 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
72 b0->words[i] ^= b1->words[i] & b2->words[i];
73 b0->words[i] ^= b3->words[i];
74 b0->words[i] ^= m->words[i];
75 b0->words[i] = rol64(b0->words[i], b);
76 }
77
78 tmp = *b3;
79 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
80 b3->words[(i + w) % MORUS_BLOCK_WORDS] = tmp.words[i];
81}
82
83static void crypto_morus1280_update(struct morus1280_state *state,
84 const struct morus1280_block *m)
85{
86 static const struct morus1280_block z = {};
87
88 struct morus1280_block *s = state->s;
89
90 crypto_morus1280_round(&s[0], &s[1], &s[2], &s[3], &s[4], &z, 13, 1);
91 crypto_morus1280_round(&s[1], &s[2], &s[3], &s[4], &s[0], m, 46, 2);
92 crypto_morus1280_round(&s[2], &s[3], &s[4], &s[0], &s[1], m, 38, 3);
93 crypto_morus1280_round(&s[3], &s[4], &s[0], &s[1], &s[2], m, 7, 2);
94 crypto_morus1280_round(&s[4], &s[0], &s[1], &s[2], &s[3], m, 4, 1);
95}
96
97static void crypto_morus1280_load_a(struct morus1280_block *dst, const u8 *src)
98{
99 unsigned int i;
100 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
101 dst->words[i] = le64_to_cpu(*(const __le64 *)src);
102 src += MORUS1280_WORD_SIZE;
103 }
104}
105
106static void crypto_morus1280_load_u(struct morus1280_block *dst, const u8 *src)
107{
108 unsigned int i;
109 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
110 dst->words[i] = get_unaligned_le64(src);
111 src += MORUS1280_WORD_SIZE;
112 }
113}
114
115static void crypto_morus1280_load(struct morus1280_block *dst, const u8 *src)
116{
117 if (MORUS1280_ALIGNED(src))
118 crypto_morus1280_load_a(dst, src);
119 else
120 crypto_morus1280_load_u(dst, src);
121}
122
123static void crypto_morus1280_store_a(u8 *dst, const struct morus1280_block *src)
124{
125 unsigned int i;
126 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
127 *(__le64 *)dst = cpu_to_le64(src->words[i]);
128 dst += MORUS1280_WORD_SIZE;
129 }
130}
131
132static void crypto_morus1280_store_u(u8 *dst, const struct morus1280_block *src)
133{
134 unsigned int i;
135 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
136 put_unaligned_le64(src->words[i], dst);
137 dst += MORUS1280_WORD_SIZE;
138 }
139}
140
141static void crypto_morus1280_store(u8 *dst, const struct morus1280_block *src)
142{
143 if (MORUS1280_ALIGNED(dst))
144 crypto_morus1280_store_a(dst, src);
145 else
146 crypto_morus1280_store_u(dst, src);
147}
148
149static void crypto_morus1280_ad(struct morus1280_state *state, const u8 *src,
150 unsigned int size)
151{
152 struct morus1280_block m;
153
154 if (MORUS1280_ALIGNED(src)) {
155 while (size >= MORUS1280_BLOCK_SIZE) {
156 crypto_morus1280_load_a(&m, src);
157 crypto_morus1280_update(state, &m);
158
159 size -= MORUS1280_BLOCK_SIZE;
160 src += MORUS1280_BLOCK_SIZE;
161 }
162 } else {
163 while (size >= MORUS1280_BLOCK_SIZE) {
164 crypto_morus1280_load_u(&m, src);
165 crypto_morus1280_update(state, &m);
166
167 size -= MORUS1280_BLOCK_SIZE;
168 src += MORUS1280_BLOCK_SIZE;
169 }
170 }
171}
172
173static void crypto_morus1280_core(const struct morus1280_state *state,
174 struct morus1280_block *blk)
175{
176 unsigned int i;
177
178 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
179 blk->words[(i + 3) % MORUS_BLOCK_WORDS] ^= state->s[1].words[i];
180
181 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
182 blk->words[i] ^= state->s[0].words[i];
183 blk->words[i] ^= state->s[2].words[i] & state->s[3].words[i];
184 }
185}
186
187static void crypto_morus1280_encrypt_chunk(struct morus1280_state *state,
188 u8 *dst, const u8 *src,
189 unsigned int size)
190{
191 struct morus1280_block c, m;
192
193 if (MORUS1280_ALIGNED(src) && MORUS1280_ALIGNED(dst)) {
194 while (size >= MORUS1280_BLOCK_SIZE) {
195 crypto_morus1280_load_a(&m, src);
196 c = m;
197 crypto_morus1280_core(state, &c);
198 crypto_morus1280_store_a(dst, &c);
199 crypto_morus1280_update(state, &m);
200
201 src += MORUS1280_BLOCK_SIZE;
202 dst += MORUS1280_BLOCK_SIZE;
203 size -= MORUS1280_BLOCK_SIZE;
204 }
205 } else {
206 while (size >= MORUS1280_BLOCK_SIZE) {
207 crypto_morus1280_load_u(&m, src);
208 c = m;
209 crypto_morus1280_core(state, &c);
210 crypto_morus1280_store_u(dst, &c);
211 crypto_morus1280_update(state, &m);
212
213 src += MORUS1280_BLOCK_SIZE;
214 dst += MORUS1280_BLOCK_SIZE;
215 size -= MORUS1280_BLOCK_SIZE;
216 }
217 }
218
219 if (size > 0) {
220 union morus1280_block_in tail;
221
222 memcpy(tail.bytes, src, size);
223 memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size);
224
225 crypto_morus1280_load_a(&m, tail.bytes);
226 c = m;
227 crypto_morus1280_core(state, &c);
228 crypto_morus1280_store_a(tail.bytes, &c);
229 crypto_morus1280_update(state, &m);
230
231 memcpy(dst, tail.bytes, size);
232 }
233}
234
235static void crypto_morus1280_decrypt_chunk(struct morus1280_state *state,
236 u8 *dst, const u8 *src,
237 unsigned int size)
238{
239 struct morus1280_block m;
240
241 if (MORUS1280_ALIGNED(src) && MORUS1280_ALIGNED(dst)) {
242 while (size >= MORUS1280_BLOCK_SIZE) {
243 crypto_morus1280_load_a(&m, src);
244 crypto_morus1280_core(state, &m);
245 crypto_morus1280_store_a(dst, &m);
246 crypto_morus1280_update(state, &m);
247
248 src += MORUS1280_BLOCK_SIZE;
249 dst += MORUS1280_BLOCK_SIZE;
250 size -= MORUS1280_BLOCK_SIZE;
251 }
252 } else {
253 while (size >= MORUS1280_BLOCK_SIZE) {
254 crypto_morus1280_load_u(&m, src);
255 crypto_morus1280_core(state, &m);
256 crypto_morus1280_store_u(dst, &m);
257 crypto_morus1280_update(state, &m);
258
259 src += MORUS1280_BLOCK_SIZE;
260 dst += MORUS1280_BLOCK_SIZE;
261 size -= MORUS1280_BLOCK_SIZE;
262 }
263 }
264
265 if (size > 0) {
266 union morus1280_block_in tail;
267
268 memcpy(tail.bytes, src, size);
269 memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size);
270
271 crypto_morus1280_load_a(&m, tail.bytes);
272 crypto_morus1280_core(state, &m);
273 crypto_morus1280_store_a(tail.bytes, &m);
274 memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size);
275 crypto_morus1280_load_a(&m, tail.bytes);
276 crypto_morus1280_update(state, &m);
277
278 memcpy(dst, tail.bytes, size);
279 }
280}
281
282static void crypto_morus1280_init(struct morus1280_state *state,
283 const struct morus1280_block *key,
284 const u8 *iv)
285{
286 static const struct morus1280_block z = {};
287
288 union morus1280_block_in tmp;
289 unsigned int i;
290
291 memcpy(tmp.bytes, iv, MORUS_NONCE_SIZE);
292 memset(tmp.bytes + MORUS_NONCE_SIZE, 0,
293 MORUS1280_BLOCK_SIZE - MORUS_NONCE_SIZE);
294
295 crypto_morus1280_load(&state->s[0], tmp.bytes);
296 state->s[1] = *key;
297 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
298 state->s[2].words[i] = U64_C(0xFFFFFFFFFFFFFFFF);
299 state->s[3] = z;
300 state->s[4] = crypto_morus1280_const[0];
301
302 for (i = 0; i < 16; i++)
303 crypto_morus1280_update(state, &z);
304
305 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
306 state->s[1].words[i] ^= key->words[i];
307}
308
309static void crypto_morus1280_process_ad(struct morus1280_state *state,
310 struct scatterlist *sg_src,
311 unsigned int assoclen)
312{
313 struct scatter_walk walk;
314 struct morus1280_block m;
315 union morus1280_block_in buf;
316 unsigned int pos = 0;
317
318 scatterwalk_start(&walk, sg_src);
319 while (assoclen != 0) {
320 unsigned int size = scatterwalk_clamp(&walk, assoclen);
321 unsigned int left = size;
322 void *mapped = scatterwalk_map(&walk);
323 const u8 *src = (const u8 *)mapped;
324
325 if (pos + size >= MORUS1280_BLOCK_SIZE) {
326 if (pos > 0) {
327 unsigned int fill = MORUS1280_BLOCK_SIZE - pos;
328 memcpy(buf.bytes + pos, src, fill);
329
330 crypto_morus1280_load_a(&m, buf.bytes);
331 crypto_morus1280_update(state, &m);
332
333 pos = 0;
334 left -= fill;
335 src += fill;
336 }
337
338 crypto_morus1280_ad(state, src, left);
339 src += left & ~(MORUS1280_BLOCK_SIZE - 1);
340 left &= MORUS1280_BLOCK_SIZE - 1;
341 }
342
343 memcpy(buf.bytes + pos, src, left);
344
345 pos += left;
346 assoclen -= size;
347 scatterwalk_unmap(mapped);
348 scatterwalk_advance(&walk, size);
349 scatterwalk_done(&walk, 0, assoclen);
350 }
351
352 if (pos > 0) {
353 memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos);
354
355 crypto_morus1280_load_a(&m, buf.bytes);
356 crypto_morus1280_update(state, &m);
357 }
358}
359
360static void crypto_morus1280_process_crypt(struct morus1280_state *state,
361 struct aead_request *req,
362 const struct morus1280_ops *ops)
363{
364 struct skcipher_walk walk;
365
366 ops->skcipher_walk_init(&walk, req, false);
367
368 while (walk.nbytes) {
369 unsigned int nbytes = walk.nbytes;
370
371 if (nbytes < walk.total)
372 nbytes = round_down(nbytes, walk.stride);
373
374 ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
375 nbytes);
376
377 skcipher_walk_done(&walk, walk.nbytes - nbytes);
378 }
379}
380
381static void crypto_morus1280_final(struct morus1280_state *state,
382 struct morus1280_block *tag_xor,
383 u64 assoclen, u64 cryptlen)
384{
385 struct morus1280_block tmp;
386 unsigned int i;
387
388 tmp.words[0] = assoclen * 8;
389 tmp.words[1] = cryptlen * 8;
390 tmp.words[2] = 0;
391 tmp.words[3] = 0;
392
393 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
394 state->s[4].words[i] ^= state->s[0].words[i];
395
396 for (i = 0; i < 10; i++)
397 crypto_morus1280_update(state, &tmp);
398
399 crypto_morus1280_core(state, tag_xor);
400}
401
402static int crypto_morus1280_setkey(struct crypto_aead *aead, const u8 *key,
403 unsigned int keylen)
404{
405 struct morus1280_ctx *ctx = crypto_aead_ctx(aead);
406 union morus1280_block_in tmp;
407
408 if (keylen == MORUS1280_BLOCK_SIZE)
409 crypto_morus1280_load(&ctx->key, key);
410 else if (keylen == MORUS1280_BLOCK_SIZE / 2) {
411 memcpy(tmp.bytes, key, keylen);
412 memcpy(tmp.bytes + keylen, key, keylen);
413
414 crypto_morus1280_load(&ctx->key, tmp.bytes);
415 } else {
416 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
417 return -EINVAL;
418 }
419
420 return 0;
421}
422
423static int crypto_morus1280_setauthsize(struct crypto_aead *tfm,
424 unsigned int authsize)
425{
426 return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
427}
428
429static void crypto_morus1280_crypt(struct aead_request *req,
430 struct morus1280_block *tag_xor,
431 unsigned int cryptlen,
432 const struct morus1280_ops *ops)
433{
434 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
435 struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
436 struct morus1280_state state;
437
438 crypto_morus1280_init(&state, &ctx->key, req->iv);
439 crypto_morus1280_process_ad(&state, req->src, req->assoclen);
440 crypto_morus1280_process_crypt(&state, req, ops);
441 crypto_morus1280_final(&state, tag_xor, req->assoclen, cryptlen);
442}
443
444static int crypto_morus1280_encrypt(struct aead_request *req)
445{
446 static const struct morus1280_ops ops = {
447 .skcipher_walk_init = skcipher_walk_aead_encrypt,
448 .crypt_chunk = crypto_morus1280_encrypt_chunk,
449 };
450
451 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
452 struct morus1280_block tag = {};
453 union morus1280_block_in tag_out;
454 unsigned int authsize = crypto_aead_authsize(tfm);
455 unsigned int cryptlen = req->cryptlen;
456
457 crypto_morus1280_crypt(req, &tag, cryptlen, &ops);
458 crypto_morus1280_store(tag_out.bytes, &tag);
459
460 scatterwalk_map_and_copy(tag_out.bytes, req->dst,
461 req->assoclen + cryptlen, authsize, 1);
462 return 0;
463}
464
465static int crypto_morus1280_decrypt(struct aead_request *req)
466{
467 static const struct morus1280_ops ops = {
468 .skcipher_walk_init = skcipher_walk_aead_decrypt,
469 .crypt_chunk = crypto_morus1280_decrypt_chunk,
470 };
471 static const u8 zeros[MORUS1280_BLOCK_SIZE] = {};
472
473 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
474 union morus1280_block_in tag_in;
475 struct morus1280_block tag;
476 unsigned int authsize = crypto_aead_authsize(tfm);
477 unsigned int cryptlen = req->cryptlen - authsize;
478
479 scatterwalk_map_and_copy(tag_in.bytes, req->src,
480 req->assoclen + cryptlen, authsize, 0);
481
482 crypto_morus1280_load(&tag, tag_in.bytes);
483 crypto_morus1280_crypt(req, &tag, cryptlen, &ops);
484 crypto_morus1280_store(tag_in.bytes, &tag);
485
486 return crypto_memneq(tag_in.bytes, zeros, authsize) ? -EBADMSG : 0;
487}
488
489static int crypto_morus1280_init_tfm(struct crypto_aead *tfm)
490{
491 return 0;
492}
493
494static void crypto_morus1280_exit_tfm(struct crypto_aead *tfm)
495{
496}
497
498static struct aead_alg crypto_morus1280_alg = {
499 .setkey = crypto_morus1280_setkey,
500 .setauthsize = crypto_morus1280_setauthsize,
501 .encrypt = crypto_morus1280_encrypt,
502 .decrypt = crypto_morus1280_decrypt,
503 .init = crypto_morus1280_init_tfm,
504 .exit = crypto_morus1280_exit_tfm,
505
506 .ivsize = MORUS_NONCE_SIZE,
507 .maxauthsize = MORUS_MAX_AUTH_SIZE,
508 .chunksize = MORUS1280_BLOCK_SIZE,
509
510 .base = {
511 .cra_blocksize = 1,
512 .cra_ctxsize = sizeof(struct morus1280_ctx),
513 .cra_alignmask = 0,
514
515 .cra_priority = 100,
516
517 .cra_name = "morus1280",
518 .cra_driver_name = "morus1280-generic",
519
520 .cra_module = THIS_MODULE,
521 }
522};
523
524
525static int __init crypto_morus1280_module_init(void)
526{
527 return crypto_register_aead(&crypto_morus1280_alg);
528}
529
530static void __exit crypto_morus1280_module_exit(void)
531{
532 crypto_unregister_aead(&crypto_morus1280_alg);
533}
534
535subsys_initcall(crypto_morus1280_module_init);
536module_exit(crypto_morus1280_module_exit);
537
538MODULE_LICENSE("GPL");
539MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
540MODULE_DESCRIPTION("MORUS-1280 AEAD algorithm");
541MODULE_ALIAS_CRYPTO("morus1280");
542MODULE_ALIAS_CRYPTO("morus1280-generic");
diff --git a/crypto/morus640.c b/crypto/morus640.c
deleted file mode 100644
index ae5aa9482cb4..000000000000
--- a/crypto/morus640.c
+++ /dev/null
@@ -1,533 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The MORUS-640 Authenticated-Encryption Algorithm
4 *
5 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7 */
8
9#include <asm/unaligned.h>
10#include <crypto/algapi.h>
11#include <crypto/internal/aead.h>
12#include <crypto/internal/skcipher.h>
13#include <crypto/morus_common.h>
14#include <crypto/scatterwalk.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/scatterlist.h>
20
21#define MORUS640_WORD_SIZE 4
22#define MORUS640_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS640_WORD_SIZE)
23#define MORUS640_BLOCK_ALIGN (__alignof__(__le32))
24#define MORUS640_ALIGNED(p) IS_ALIGNED((uintptr_t)p, MORUS640_BLOCK_ALIGN)
25
26struct morus640_block {
27 u32 words[MORUS_BLOCK_WORDS];
28};
29
30union morus640_block_in {
31 __le32 words[MORUS_BLOCK_WORDS];
32 u8 bytes[MORUS640_BLOCK_SIZE];
33};
34
35struct morus640_state {
36 struct morus640_block s[MORUS_STATE_BLOCKS];
37};
38
39struct morus640_ctx {
40 struct morus640_block key;
41};
42
43struct morus640_ops {
44 int (*skcipher_walk_init)(struct skcipher_walk *walk,
45 struct aead_request *req, bool atomic);
46
47 void (*crypt_chunk)(struct morus640_state *state,
48 u8 *dst, const u8 *src, unsigned int size);
49};
50
51static const struct morus640_block crypto_morus640_const[2] = {
52 { .words = {
53 U32_C(0x02010100),
54 U32_C(0x0d080503),
55 U32_C(0x59372215),
56 U32_C(0x6279e990),
57 } },
58 { .words = {
59 U32_C(0x55183ddb),
60 U32_C(0xf12fc26d),
61 U32_C(0x42311120),
62 U32_C(0xdd28b573),
63 } },
64};
65
66static void crypto_morus640_round(struct morus640_block *b0,
67 struct morus640_block *b1,
68 struct morus640_block *b2,
69 struct morus640_block *b3,
70 struct morus640_block *b4,
71 const struct morus640_block *m,
72 unsigned int b, unsigned int w)
73{
74 unsigned int i;
75 struct morus640_block tmp;
76
77 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
78 b0->words[i] ^= b1->words[i] & b2->words[i];
79 b0->words[i] ^= b3->words[i];
80 b0->words[i] ^= m->words[i];
81 b0->words[i] = rol32(b0->words[i], b);
82 }
83
84 tmp = *b3;
85 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
86 b3->words[(i + w) % MORUS_BLOCK_WORDS] = tmp.words[i];
87}
88
89static void crypto_morus640_update(struct morus640_state *state,
90 const struct morus640_block *m)
91{
92 static const struct morus640_block z = {};
93
94 struct morus640_block *s = state->s;
95
96 crypto_morus640_round(&s[0], &s[1], &s[2], &s[3], &s[4], &z, 5, 1);
97 crypto_morus640_round(&s[1], &s[2], &s[3], &s[4], &s[0], m, 31, 2);
98 crypto_morus640_round(&s[2], &s[3], &s[4], &s[0], &s[1], m, 7, 3);
99 crypto_morus640_round(&s[3], &s[4], &s[0], &s[1], &s[2], m, 22, 2);
100 crypto_morus640_round(&s[4], &s[0], &s[1], &s[2], &s[3], m, 13, 1);
101}
102
103static void crypto_morus640_load_a(struct morus640_block *dst, const u8 *src)
104{
105 unsigned int i;
106 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
107 dst->words[i] = le32_to_cpu(*(const __le32 *)src);
108 src += MORUS640_WORD_SIZE;
109 }
110}
111
112static void crypto_morus640_load_u(struct morus640_block *dst, const u8 *src)
113{
114 unsigned int i;
115 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
116 dst->words[i] = get_unaligned_le32(src);
117 src += MORUS640_WORD_SIZE;
118 }
119}
120
121static void crypto_morus640_load(struct morus640_block *dst, const u8 *src)
122{
123 if (MORUS640_ALIGNED(src))
124 crypto_morus640_load_a(dst, src);
125 else
126 crypto_morus640_load_u(dst, src);
127}
128
129static void crypto_morus640_store_a(u8 *dst, const struct morus640_block *src)
130{
131 unsigned int i;
132 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
133 *(__le32 *)dst = cpu_to_le32(src->words[i]);
134 dst += MORUS640_WORD_SIZE;
135 }
136}
137
138static void crypto_morus640_store_u(u8 *dst, const struct morus640_block *src)
139{
140 unsigned int i;
141 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
142 put_unaligned_le32(src->words[i], dst);
143 dst += MORUS640_WORD_SIZE;
144 }
145}
146
147static void crypto_morus640_store(u8 *dst, const struct morus640_block *src)
148{
149 if (MORUS640_ALIGNED(dst))
150 crypto_morus640_store_a(dst, src);
151 else
152 crypto_morus640_store_u(dst, src);
153}
154
155static void crypto_morus640_ad(struct morus640_state *state, const u8 *src,
156 unsigned int size)
157{
158 struct morus640_block m;
159
160 if (MORUS640_ALIGNED(src)) {
161 while (size >= MORUS640_BLOCK_SIZE) {
162 crypto_morus640_load_a(&m, src);
163 crypto_morus640_update(state, &m);
164
165 size -= MORUS640_BLOCK_SIZE;
166 src += MORUS640_BLOCK_SIZE;
167 }
168 } else {
169 while (size >= MORUS640_BLOCK_SIZE) {
170 crypto_morus640_load_u(&m, src);
171 crypto_morus640_update(state, &m);
172
173 size -= MORUS640_BLOCK_SIZE;
174 src += MORUS640_BLOCK_SIZE;
175 }
176 }
177}
178
179static void crypto_morus640_core(const struct morus640_state *state,
180 struct morus640_block *blk)
181{
182 unsigned int i;
183
184 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
185 blk->words[(i + 3) % MORUS_BLOCK_WORDS] ^= state->s[1].words[i];
186
187 for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
188 blk->words[i] ^= state->s[0].words[i];
189 blk->words[i] ^= state->s[2].words[i] & state->s[3].words[i];
190 }
191}
192
193static void crypto_morus640_encrypt_chunk(struct morus640_state *state, u8 *dst,
194 const u8 *src, unsigned int size)
195{
196 struct morus640_block c, m;
197
198 if (MORUS640_ALIGNED(src) && MORUS640_ALIGNED(dst)) {
199 while (size >= MORUS640_BLOCK_SIZE) {
200 crypto_morus640_load_a(&m, src);
201 c = m;
202 crypto_morus640_core(state, &c);
203 crypto_morus640_store_a(dst, &c);
204 crypto_morus640_update(state, &m);
205
206 src += MORUS640_BLOCK_SIZE;
207 dst += MORUS640_BLOCK_SIZE;
208 size -= MORUS640_BLOCK_SIZE;
209 }
210 } else {
211 while (size >= MORUS640_BLOCK_SIZE) {
212 crypto_morus640_load_u(&m, src);
213 c = m;
214 crypto_morus640_core(state, &c);
215 crypto_morus640_store_u(dst, &c);
216 crypto_morus640_update(state, &m);
217
218 src += MORUS640_BLOCK_SIZE;
219 dst += MORUS640_BLOCK_SIZE;
220 size -= MORUS640_BLOCK_SIZE;
221 }
222 }
223
224 if (size > 0) {
225 union morus640_block_in tail;
226
227 memcpy(tail.bytes, src, size);
228 memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
229
230 crypto_morus640_load_a(&m, tail.bytes);
231 c = m;
232 crypto_morus640_core(state, &c);
233 crypto_morus640_store_a(tail.bytes, &c);
234 crypto_morus640_update(state, &m);
235
236 memcpy(dst, tail.bytes, size);
237 }
238}
239
240static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst,
241 const u8 *src, unsigned int size)
242{
243 struct morus640_block m;
244
245 if (MORUS640_ALIGNED(src) && MORUS640_ALIGNED(dst)) {
246 while (size >= MORUS640_BLOCK_SIZE) {
247 crypto_morus640_load_a(&m, src);
248 crypto_morus640_core(state, &m);
249 crypto_morus640_store_a(dst, &m);
250 crypto_morus640_update(state, &m);
251
252 src += MORUS640_BLOCK_SIZE;
253 dst += MORUS640_BLOCK_SIZE;
254 size -= MORUS640_BLOCK_SIZE;
255 }
256 } else {
257 while (size >= MORUS640_BLOCK_SIZE) {
258 crypto_morus640_load_u(&m, src);
259 crypto_morus640_core(state, &m);
260 crypto_morus640_store_u(dst, &m);
261 crypto_morus640_update(state, &m);
262
263 src += MORUS640_BLOCK_SIZE;
264 dst += MORUS640_BLOCK_SIZE;
265 size -= MORUS640_BLOCK_SIZE;
266 }
267 }
268
269 if (size > 0) {
270 union morus640_block_in tail;
271
272 memcpy(tail.bytes, src, size);
273 memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
274
275 crypto_morus640_load_a(&m, tail.bytes);
276 crypto_morus640_core(state, &m);
277 crypto_morus640_store_a(tail.bytes, &m);
278 memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
279 crypto_morus640_load_a(&m, tail.bytes);
280 crypto_morus640_update(state, &m);
281
282 memcpy(dst, tail.bytes, size);
283 }
284}
285
286static void crypto_morus640_init(struct morus640_state *state,
287 const struct morus640_block *key,
288 const u8 *iv)
289{
290 static const struct morus640_block z = {};
291
292 unsigned int i;
293
294 crypto_morus640_load(&state->s[0], iv);
295 state->s[1] = *key;
296 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
297 state->s[2].words[i] = U32_C(0xFFFFFFFF);
298 state->s[3] = crypto_morus640_const[0];
299 state->s[4] = crypto_morus640_const[1];
300
301 for (i = 0; i < 16; i++)
302 crypto_morus640_update(state, &z);
303
304 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
305 state->s[1].words[i] ^= key->words[i];
306}
307
308static void crypto_morus640_process_ad(struct morus640_state *state,
309 struct scatterlist *sg_src,
310 unsigned int assoclen)
311{
312 struct scatter_walk walk;
313 struct morus640_block m;
314 union morus640_block_in buf;
315 unsigned int pos = 0;
316
317 scatterwalk_start(&walk, sg_src);
318 while (assoclen != 0) {
319 unsigned int size = scatterwalk_clamp(&walk, assoclen);
320 unsigned int left = size;
321 void *mapped = scatterwalk_map(&walk);
322 const u8 *src = (const u8 *)mapped;
323
324 if (pos + size >= MORUS640_BLOCK_SIZE) {
325 if (pos > 0) {
326 unsigned int fill = MORUS640_BLOCK_SIZE - pos;
327 memcpy(buf.bytes + pos, src, fill);
328
329 crypto_morus640_load_a(&m, buf.bytes);
330 crypto_morus640_update(state, &m);
331
332 pos = 0;
333 left -= fill;
334 src += fill;
335 }
336
337 crypto_morus640_ad(state, src, left);
338 src += left & ~(MORUS640_BLOCK_SIZE - 1);
339 left &= MORUS640_BLOCK_SIZE - 1;
340 }
341
342 memcpy(buf.bytes + pos, src, left);
343
344 pos += left;
345 assoclen -= size;
346 scatterwalk_unmap(mapped);
347 scatterwalk_advance(&walk, size);
348 scatterwalk_done(&walk, 0, assoclen);
349 }
350
351 if (pos > 0) {
352 memset(buf.bytes + pos, 0, MORUS640_BLOCK_SIZE - pos);
353
354 crypto_morus640_load_a(&m, buf.bytes);
355 crypto_morus640_update(state, &m);
356 }
357}
358
359static void crypto_morus640_process_crypt(struct morus640_state *state,
360 struct aead_request *req,
361 const struct morus640_ops *ops)
362{
363 struct skcipher_walk walk;
364
365 ops->skcipher_walk_init(&walk, req, false);
366
367 while (walk.nbytes) {
368 unsigned int nbytes = walk.nbytes;
369
370 if (nbytes < walk.total)
371 nbytes = round_down(nbytes, walk.stride);
372
373 ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
374 nbytes);
375
376 skcipher_walk_done(&walk, walk.nbytes - nbytes);
377 }
378}
379
380static void crypto_morus640_final(struct morus640_state *state,
381 struct morus640_block *tag_xor,
382 u64 assoclen, u64 cryptlen)
383{
384 struct morus640_block tmp;
385 unsigned int i;
386
387 tmp.words[0] = lower_32_bits(assoclen * 8);
388 tmp.words[1] = upper_32_bits(assoclen * 8);
389 tmp.words[2] = lower_32_bits(cryptlen * 8);
390 tmp.words[3] = upper_32_bits(cryptlen * 8);
391
392 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
393 state->s[4].words[i] ^= state->s[0].words[i];
394
395 for (i = 0; i < 10; i++)
396 crypto_morus640_update(state, &tmp);
397
398 crypto_morus640_core(state, tag_xor);
399}
400
401static int crypto_morus640_setkey(struct crypto_aead *aead, const u8 *key,
402 unsigned int keylen)
403{
404 struct morus640_ctx *ctx = crypto_aead_ctx(aead);
405
406 if (keylen != MORUS640_BLOCK_SIZE) {
407 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
408 return -EINVAL;
409 }
410
411 crypto_morus640_load(&ctx->key, key);
412 return 0;
413}
414
415static int crypto_morus640_setauthsize(struct crypto_aead *tfm,
416 unsigned int authsize)
417{
418 return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
419}
420
421static void crypto_morus640_crypt(struct aead_request *req,
422 struct morus640_block *tag_xor,
423 unsigned int cryptlen,
424 const struct morus640_ops *ops)
425{
426 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
427 struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
428 struct morus640_state state;
429
430 crypto_morus640_init(&state, &ctx->key, req->iv);
431 crypto_morus640_process_ad(&state, req->src, req->assoclen);
432 crypto_morus640_process_crypt(&state, req, ops);
433 crypto_morus640_final(&state, tag_xor, req->assoclen, cryptlen);
434}
435
436static int crypto_morus640_encrypt(struct aead_request *req)
437{
438 static const struct morus640_ops ops = {
439 .skcipher_walk_init = skcipher_walk_aead_encrypt,
440 .crypt_chunk = crypto_morus640_encrypt_chunk,
441 };
442
443 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
444 struct morus640_block tag = {};
445 union morus640_block_in tag_out;
446 unsigned int authsize = crypto_aead_authsize(tfm);
447 unsigned int cryptlen = req->cryptlen;
448
449 crypto_morus640_crypt(req, &tag, cryptlen, &ops);
450 crypto_morus640_store(tag_out.bytes, &tag);
451
452 scatterwalk_map_and_copy(tag_out.bytes, req->dst,
453 req->assoclen + cryptlen, authsize, 1);
454 return 0;
455}
456
457static int crypto_morus640_decrypt(struct aead_request *req)
458{
459 static const struct morus640_ops ops = {
460 .skcipher_walk_init = skcipher_walk_aead_decrypt,
461 .crypt_chunk = crypto_morus640_decrypt_chunk,
462 };
463 static const u8 zeros[MORUS640_BLOCK_SIZE] = {};
464
465 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
466 union morus640_block_in tag_in;
467 struct morus640_block tag;
468 unsigned int authsize = crypto_aead_authsize(tfm);
469 unsigned int cryptlen = req->cryptlen - authsize;
470
471 scatterwalk_map_and_copy(tag_in.bytes, req->src,
472 req->assoclen + cryptlen, authsize, 0);
473
474 crypto_morus640_load(&tag, tag_in.bytes);
475 crypto_morus640_crypt(req, &tag, cryptlen, &ops);
476 crypto_morus640_store(tag_in.bytes, &tag);
477
478 return crypto_memneq(tag_in.bytes, zeros, authsize) ? -EBADMSG : 0;
479}
480
481static int crypto_morus640_init_tfm(struct crypto_aead *tfm)
482{
483 return 0;
484}
485
486static void crypto_morus640_exit_tfm(struct crypto_aead *tfm)
487{
488}
489
490static struct aead_alg crypto_morus640_alg = {
491 .setkey = crypto_morus640_setkey,
492 .setauthsize = crypto_morus640_setauthsize,
493 .encrypt = crypto_morus640_encrypt,
494 .decrypt = crypto_morus640_decrypt,
495 .init = crypto_morus640_init_tfm,
496 .exit = crypto_morus640_exit_tfm,
497
498 .ivsize = MORUS_NONCE_SIZE,
499 .maxauthsize = MORUS_MAX_AUTH_SIZE,
500 .chunksize = MORUS640_BLOCK_SIZE,
501
502 .base = {
503 .cra_blocksize = 1,
504 .cra_ctxsize = sizeof(struct morus640_ctx),
505 .cra_alignmask = 0,
506
507 .cra_priority = 100,
508
509 .cra_name = "morus640",
510 .cra_driver_name = "morus640-generic",
511
512 .cra_module = THIS_MODULE,
513 }
514};
515
516static int __init crypto_morus640_module_init(void)
517{
518 return crypto_register_aead(&crypto_morus640_alg);
519}
520
521static void __exit crypto_morus640_module_exit(void)
522{
523 crypto_unregister_aead(&crypto_morus640_alg);
524}
525
526subsys_initcall(crypto_morus640_module_init);
527module_exit(crypto_morus640_module_exit);
528
529MODULE_LICENSE("GPL");
530MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
531MODULE_DESCRIPTION("MORUS-640 AEAD algorithm");
532MODULE_ALIAS_CRYPTO("morus640");
533MODULE_ALIAS_CRYPTO("morus640-generic");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 0edf5b54fc77..543792e0ebf0 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -18,34 +18,8 @@
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <crypto/pcrypt.h> 19#include <crypto/pcrypt.h>
20 20
21struct padata_pcrypt { 21static struct padata_instance *pencrypt;
22 struct padata_instance *pinst; 22static struct padata_instance *pdecrypt;
23 struct workqueue_struct *wq;
24
25 /*
26 * Cpumask for callback CPUs. It should be
27 * equal to serial cpumask of corresponding padata instance,
28 * so it is updated when padata notifies us about serial
29 * cpumask change.
30 *
31 * cb_cpumask is protected by RCU. This fact prevents us from
32 * using cpumask_var_t directly because the actual type of
33 * cpumsak_var_t depends on kernel configuration(particularly on
34 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
35 * cpumask_var_t may be either a pointer to the struct cpumask
36 * or a variable allocated on the stack. Thus we can not safely use
37 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
38 * rcu_dereference. So cpumask_var_t is wrapped with struct
39 * pcrypt_cpumask which makes possible to use it with RCU.
40 */
41 struct pcrypt_cpumask {
42 cpumask_var_t mask;
43 } *cb_cpumask;
44 struct notifier_block nblock;
45};
46
47static struct padata_pcrypt pencrypt;
48static struct padata_pcrypt pdecrypt;
49static struct kset *pcrypt_kset; 23static struct kset *pcrypt_kset;
50 24
51struct pcrypt_instance_ctx { 25struct pcrypt_instance_ctx {
@@ -58,35 +32,6 @@ struct pcrypt_aead_ctx {
58 unsigned int cb_cpu; 32 unsigned int cb_cpu;
59}; 33};
60 34
61static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
62 struct padata_pcrypt *pcrypt)
63{
64 unsigned int cpu_index, cpu, i;
65 struct pcrypt_cpumask *cpumask;
66
67 cpu = *cb_cpu;
68
69 rcu_read_lock_bh();
70 cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
71 if (cpumask_test_cpu(cpu, cpumask->mask))
72 goto out;
73
74 if (!cpumask_weight(cpumask->mask))
75 goto out;
76
77 cpu_index = cpu % cpumask_weight(cpumask->mask);
78
79 cpu = cpumask_first(cpumask->mask);
80 for (i = 0; i < cpu_index; i++)
81 cpu = cpumask_next(cpu, cpumask->mask);
82
83 *cb_cpu = cpu;
84
85out:
86 rcu_read_unlock_bh();
87 return padata_do_parallel(pcrypt->pinst, padata, cpu);
88}
89
90static int pcrypt_aead_setkey(struct crypto_aead *parent, 35static int pcrypt_aead_setkey(struct crypto_aead *parent,
91 const u8 *key, unsigned int keylen) 36 const u8 *key, unsigned int keylen)
92{ 37{
@@ -158,7 +103,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
158 req->cryptlen, req->iv); 103 req->cryptlen, req->iv);
159 aead_request_set_ad(creq, req->assoclen); 104 aead_request_set_ad(creq, req->assoclen);
160 105
161 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); 106 err = padata_do_parallel(pencrypt, padata, &ctx->cb_cpu);
162 if (!err) 107 if (!err)
163 return -EINPROGRESS; 108 return -EINPROGRESS;
164 109
@@ -200,7 +145,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
200 req->cryptlen, req->iv); 145 req->cryptlen, req->iv);
201 aead_request_set_ad(creq, req->assoclen); 146 aead_request_set_ad(creq, req->assoclen);
202 147
203 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); 148 err = padata_do_parallel(pdecrypt, padata, &ctx->cb_cpu);
204 if (!err) 149 if (!err)
205 return -EINPROGRESS; 150 return -EINPROGRESS;
206 151
@@ -347,36 +292,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
347 return -EINVAL; 292 return -EINVAL;
348} 293}
349 294
350static int pcrypt_cpumask_change_notify(struct notifier_block *self,
351 unsigned long val, void *data)
352{
353 struct padata_pcrypt *pcrypt;
354 struct pcrypt_cpumask *new_mask, *old_mask;
355 struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
356
357 if (!(val & PADATA_CPU_SERIAL))
358 return 0;
359
360 pcrypt = container_of(self, struct padata_pcrypt, nblock);
361 new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
362 if (!new_mask)
363 return -ENOMEM;
364 if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
365 kfree(new_mask);
366 return -ENOMEM;
367 }
368
369 old_mask = pcrypt->cb_cpumask;
370
371 cpumask_copy(new_mask->mask, cpumask->cbcpu);
372 rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
373 synchronize_rcu();
374
375 free_cpumask_var(old_mask->mask);
376 kfree(old_mask);
377 return 0;
378}
379
380static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) 295static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
381{ 296{
382 int ret; 297 int ret;
@@ -389,71 +304,25 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
389 return ret; 304 return ret;
390} 305}
391 306
392static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, 307static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
393 const char *name)
394{ 308{
395 int ret = -ENOMEM; 309 int ret = -ENOMEM;
396 struct pcrypt_cpumask *mask;
397 310
398 get_online_cpus(); 311 *pinst = padata_alloc_possible(name);
399 312 if (!*pinst)
400 pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 313 return ret;
401 1, name);
402 if (!pcrypt->wq)
403 goto err;
404
405 pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
406 if (!pcrypt->pinst)
407 goto err_destroy_workqueue;
408
409 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
410 if (!mask)
411 goto err_free_padata;
412 if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
413 kfree(mask);
414 goto err_free_padata;
415 }
416 314
417 cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask); 315 ret = pcrypt_sysfs_add(*pinst, name);
418 rcu_assign_pointer(pcrypt->cb_cpumask, mask);
419
420 pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
421 ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
422 if (ret)
423 goto err_free_cpumask;
424
425 ret = pcrypt_sysfs_add(pcrypt->pinst, name);
426 if (ret) 316 if (ret)
427 goto err_unregister_notifier; 317 padata_free(*pinst);
428
429 put_online_cpus();
430
431 return ret;
432
433err_unregister_notifier:
434 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
435err_free_cpumask:
436 free_cpumask_var(mask->mask);
437 kfree(mask);
438err_free_padata:
439 padata_free(pcrypt->pinst);
440err_destroy_workqueue:
441 destroy_workqueue(pcrypt->wq);
442err:
443 put_online_cpus();
444 318
445 return ret; 319 return ret;
446} 320}
447 321
448static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) 322static void pcrypt_fini_padata(struct padata_instance *pinst)
449{ 323{
450 free_cpumask_var(pcrypt->cb_cpumask->mask); 324 padata_stop(pinst);
451 kfree(pcrypt->cb_cpumask); 325 padata_free(pinst);
452
453 padata_stop(pcrypt->pinst);
454 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
455 destroy_workqueue(pcrypt->wq);
456 padata_free(pcrypt->pinst);
457} 326}
458 327
459static struct crypto_template pcrypt_tmpl = { 328static struct crypto_template pcrypt_tmpl = {
@@ -478,13 +347,13 @@ static int __init pcrypt_init(void)
478 if (err) 347 if (err)
479 goto err_deinit_pencrypt; 348 goto err_deinit_pencrypt;
480 349
481 padata_start(pencrypt.pinst); 350 padata_start(pencrypt);
482 padata_start(pdecrypt.pinst); 351 padata_start(pdecrypt);
483 352
484 return crypto_register_template(&pcrypt_tmpl); 353 return crypto_register_template(&pcrypt_tmpl);
485 354
486err_deinit_pencrypt: 355err_deinit_pencrypt:
487 pcrypt_fini_padata(&pencrypt); 356 pcrypt_fini_padata(pencrypt);
488err_unreg_kset: 357err_unreg_kset:
489 kset_unregister(pcrypt_kset); 358 kset_unregister(pcrypt_kset);
490err: 359err:
@@ -493,8 +362,8 @@ err:
493 362
494static void __exit pcrypt_exit(void) 363static void __exit pcrypt_exit(void)
495{ 364{
496 pcrypt_fini_padata(&pencrypt); 365 pcrypt_fini_padata(pencrypt);
497 pcrypt_fini_padata(&pdecrypt); 366 pcrypt_fini_padata(pdecrypt);
498 367
499 kset_unregister(pcrypt_kset); 368 kset_unregister(pcrypt_kset);
500 crypto_unregister_template(&pcrypt_tmpl); 369 crypto_unregister_template(&pcrypt_tmpl);
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index b7502a96a0d4..f2d7095d4f2d 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -1,11 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-or-later 1// SPDX-License-Identifier: GPL-2.0-or-later
2/* 2/*
3 * Cryptographic API. 3 * Crypto API wrapper for the generic SHA256 code from lib/crypto/sha256.c
4 *
5 * SHA-256, as specified in
6 * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
7 *
8 * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
9 * 4 *
10 * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> 5 * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
11 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> 6 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
@@ -38,229 +33,44 @@ const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
38}; 33};
39EXPORT_SYMBOL_GPL(sha256_zero_message_hash); 34EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
40 35
41static inline u32 Ch(u32 x, u32 y, u32 z) 36static int crypto_sha256_init(struct shash_desc *desc)
42{
43 return z ^ (x & (y ^ z));
44}
45
46static inline u32 Maj(u32 x, u32 y, u32 z)
47{ 37{
48 return (x & y) | (z & (x | y)); 38 return sha256_init(shash_desc_ctx(desc));
49} 39}
50 40
51#define e0(x) (ror32(x, 2) ^ ror32(x,13) ^ ror32(x,22)) 41static int crypto_sha224_init(struct shash_desc *desc)
52#define e1(x) (ror32(x, 6) ^ ror32(x,11) ^ ror32(x,25))
53#define s0(x) (ror32(x, 7) ^ ror32(x,18) ^ (x >> 3))
54#define s1(x) (ror32(x,17) ^ ror32(x,19) ^ (x >> 10))
55
56static inline void LOAD_OP(int I, u32 *W, const u8 *input)
57{ 42{
58 W[I] = get_unaligned_be32((__u32 *)input + I); 43 return sha224_init(shash_desc_ctx(desc));
59}
60
61static inline void BLEND_OP(int I, u32 *W)
62{
63 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
64}
65
66static void sha256_transform(u32 *state, const u8 *input)
67{
68 u32 a, b, c, d, e, f, g, h, t1, t2;
69 u32 W[64];
70 int i;
71
72 /* load the input */
73 for (i = 0; i < 16; i++)
74 LOAD_OP(i, W, input);
75
76 /* now blend */
77 for (i = 16; i < 64; i++)
78 BLEND_OP(i, W);
79
80 /* load the state into our registers */
81 a=state[0]; b=state[1]; c=state[2]; d=state[3];
82 e=state[4]; f=state[5]; g=state[6]; h=state[7];
83
84 /* now iterate */
85 t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0];
86 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
87 t1 = g + e1(d) + Ch(d,e,f) + 0x71374491 + W[ 1];
88 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
89 t1 = f + e1(c) + Ch(c,d,e) + 0xb5c0fbcf + W[ 2];
90 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
91 t1 = e + e1(b) + Ch(b,c,d) + 0xe9b5dba5 + W[ 3];
92 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
93 t1 = d + e1(a) + Ch(a,b,c) + 0x3956c25b + W[ 4];
94 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
95 t1 = c + e1(h) + Ch(h,a,b) + 0x59f111f1 + W[ 5];
96 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
97 t1 = b + e1(g) + Ch(g,h,a) + 0x923f82a4 + W[ 6];
98 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
99 t1 = a + e1(f) + Ch(f,g,h) + 0xab1c5ed5 + W[ 7];
100 t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
101
102 t1 = h + e1(e) + Ch(e,f,g) + 0xd807aa98 + W[ 8];
103 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
104 t1 = g + e1(d) + Ch(d,e,f) + 0x12835b01 + W[ 9];
105 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
106 t1 = f + e1(c) + Ch(c,d,e) + 0x243185be + W[10];
107 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
108 t1 = e + e1(b) + Ch(b,c,d) + 0x550c7dc3 + W[11];
109 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
110 t1 = d + e1(a) + Ch(a,b,c) + 0x72be5d74 + W[12];
111 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
112 t1 = c + e1(h) + Ch(h,a,b) + 0x80deb1fe + W[13];
113 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
114 t1 = b + e1(g) + Ch(g,h,a) + 0x9bdc06a7 + W[14];
115 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
116 t1 = a + e1(f) + Ch(f,g,h) + 0xc19bf174 + W[15];
117 t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
118
119 t1 = h + e1(e) + Ch(e,f,g) + 0xe49b69c1 + W[16];
120 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
121 t1 = g + e1(d) + Ch(d,e,f) + 0xefbe4786 + W[17];
122 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
123 t1 = f + e1(c) + Ch(c,d,e) + 0x0fc19dc6 + W[18];
124 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
125 t1 = e + e1(b) + Ch(b,c,d) + 0x240ca1cc + W[19];
126 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
127 t1 = d + e1(a) + Ch(a,b,c) + 0x2de92c6f + W[20];
128 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
129 t1 = c + e1(h) + Ch(h,a,b) + 0x4a7484aa + W[21];
130 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
131 t1 = b + e1(g) + Ch(g,h,a) + 0x5cb0a9dc + W[22];
132 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
133 t1 = a + e1(f) + Ch(f,g,h) + 0x76f988da + W[23];
134 t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
135
136 t1 = h + e1(e) + Ch(e,f,g) + 0x983e5152 + W[24];
137 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
138 t1 = g + e1(d) + Ch(d,e,f) + 0xa831c66d + W[25];
139 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
140 t1 = f + e1(c) + Ch(c,d,e) + 0xb00327c8 + W[26];
141 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
142 t1 = e + e1(b) + Ch(b,c,d) + 0xbf597fc7 + W[27];
143 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
144 t1 = d + e1(a) + Ch(a,b,c) + 0xc6e00bf3 + W[28];
145 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
146 t1 = c + e1(h) + Ch(h,a,b) + 0xd5a79147 + W[29];
147 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
148 t1 = b + e1(g) + Ch(g,h,a) + 0x06ca6351 + W[30];
149 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
150 t1 = a + e1(f) + Ch(f,g,h) + 0x14292967 + W[31];
151 t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
152
153 t1 = h + e1(e) + Ch(e,f,g) + 0x27b70a85 + W[32];
154 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
155 t1 = g + e1(d) + Ch(d,e,f) + 0x2e1b2138 + W[33];
156 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
157 t1 = f + e1(c) + Ch(c,d,e) + 0x4d2c6dfc + W[34];
158 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
159 t1 = e + e1(b) + Ch(b,c,d) + 0x53380d13 + W[35];
160 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
161 t1 = d + e1(a) + Ch(a,b,c) + 0x650a7354 + W[36];
162 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
163 t1 = c + e1(h) + Ch(h,a,b) + 0x766a0abb + W[37];
164 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
165 t1 = b + e1(g) + Ch(g,h,a) + 0x81c2c92e + W[38];
166 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
167 t1 = a + e1(f) + Ch(f,g,h) + 0x92722c85 + W[39];
168 t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
169
170 t1 = h + e1(e) + Ch(e,f,g) + 0xa2bfe8a1 + W[40];
171 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
172 t1 = g + e1(d) + Ch(d,e,f) + 0xa81a664b + W[41];
173 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
174 t1 = f + e1(c) + Ch(c,d,e) + 0xc24b8b70 + W[42];
175 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
176 t1 = e + e1(b) + Ch(b,c,d) + 0xc76c51a3 + W[43];
177 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
178 t1 = d + e1(a) + Ch(a,b,c) + 0xd192e819 + W[44];
179 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
180 t1 = c + e1(h) + Ch(h,a,b) + 0xd6990624 + W[45];
181 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
182 t1 = b + e1(g) + Ch(g,h,a) + 0xf40e3585 + W[46];
183 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
184 t1 = a + e1(f) + Ch(f,g,h) + 0x106aa070 + W[47];
185 t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
186
187 t1 = h + e1(e) + Ch(e,f,g) + 0x19a4c116 + W[48];
188 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
189 t1 = g + e1(d) + Ch(d,e,f) + 0x1e376c08 + W[49];
190 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
191 t1 = f + e1(c) + Ch(c,d,e) + 0x2748774c + W[50];
192 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
193 t1 = e + e1(b) + Ch(b,c,d) + 0x34b0bcb5 + W[51];
194 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
195 t1 = d + e1(a) + Ch(a,b,c) + 0x391c0cb3 + W[52];
196 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
197 t1 = c + e1(h) + Ch(h,a,b) + 0x4ed8aa4a + W[53];
198 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
199 t1 = b + e1(g) + Ch(g,h,a) + 0x5b9cca4f + W[54];
200 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
201 t1 = a + e1(f) + Ch(f,g,h) + 0x682e6ff3 + W[55];
202 t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
203
204 t1 = h + e1(e) + Ch(e,f,g) + 0x748f82ee + W[56];
205 t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
206 t1 = g + e1(d) + Ch(d,e,f) + 0x78a5636f + W[57];
207 t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
208 t1 = f + e1(c) + Ch(c,d,e) + 0x84c87814 + W[58];
209 t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
210 t1 = e + e1(b) + Ch(b,c,d) + 0x8cc70208 + W[59];
211 t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
212 t1 = d + e1(a) + Ch(a,b,c) + 0x90befffa + W[60];
213 t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
214 t1 = c + e1(h) + Ch(h,a,b) + 0xa4506ceb + W[61];
215 t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
216 t1 = b + e1(g) + Ch(g,h,a) + 0xbef9a3f7 + W[62];
217 t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
218 t1 = a + e1(f) + Ch(f,g,h) + 0xc67178f2 + W[63];
219 t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
220
221 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
222 state[4] += e; state[5] += f; state[6] += g; state[7] += h;
223
224 /* clear any sensitive info... */
225 a = b = c = d = e = f = g = h = t1 = t2 = 0;
226 memzero_explicit(W, 64 * sizeof(u32));
227}
228
229static void sha256_generic_block_fn(struct sha256_state *sst, u8 const *src,
230 int blocks)
231{
232 while (blocks--) {
233 sha256_transform(sst->state, src);
234 src += SHA256_BLOCK_SIZE;
235 }
236} 44}
237 45
238int crypto_sha256_update(struct shash_desc *desc, const u8 *data, 46int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
239 unsigned int len) 47 unsigned int len)
240{ 48{
241 return sha256_base_do_update(desc, data, len, sha256_generic_block_fn); 49 return sha256_update(shash_desc_ctx(desc), data, len);
242} 50}
243EXPORT_SYMBOL(crypto_sha256_update); 51EXPORT_SYMBOL(crypto_sha256_update);
244 52
245static int sha256_final(struct shash_desc *desc, u8 *out) 53static int crypto_sha256_final(struct shash_desc *desc, u8 *out)
246{ 54{
247 sha256_base_do_finalize(desc, sha256_generic_block_fn); 55 if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE)
248 return sha256_base_finish(desc, out); 56 return sha224_final(shash_desc_ctx(desc), out);
57 else
58 return sha256_final(shash_desc_ctx(desc), out);
249} 59}
250 60
251int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, 61int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
252 unsigned int len, u8 *hash) 62 unsigned int len, u8 *hash)
253{ 63{
254 sha256_base_do_update(desc, data, len, sha256_generic_block_fn); 64 sha256_update(shash_desc_ctx(desc), data, len);
255 return sha256_final(desc, hash); 65 return crypto_sha256_final(desc, hash);
256} 66}
257EXPORT_SYMBOL(crypto_sha256_finup); 67EXPORT_SYMBOL(crypto_sha256_finup);
258 68
259static struct shash_alg sha256_algs[2] = { { 69static struct shash_alg sha256_algs[2] = { {
260 .digestsize = SHA256_DIGEST_SIZE, 70 .digestsize = SHA256_DIGEST_SIZE,
261 .init = sha256_base_init, 71 .init = crypto_sha256_init,
262 .update = crypto_sha256_update, 72 .update = crypto_sha256_update,
263 .final = sha256_final, 73 .final = crypto_sha256_final,
264 .finup = crypto_sha256_finup, 74 .finup = crypto_sha256_finup,
265 .descsize = sizeof(struct sha256_state), 75 .descsize = sizeof(struct sha256_state),
266 .base = { 76 .base = {
@@ -272,9 +82,9 @@ static struct shash_alg sha256_algs[2] = { {
272 } 82 }
273}, { 83}, {
274 .digestsize = SHA224_DIGEST_SIZE, 84 .digestsize = SHA224_DIGEST_SIZE,
275 .init = sha224_base_init, 85 .init = crypto_sha224_init,
276 .update = crypto_sha256_update, 86 .update = crypto_sha256_update,
277 .final = sha256_final, 87 .final = crypto_sha256_final,
278 .finup = crypto_sha256_finup, 88 .finup = crypto_sha256_finup,
279 .descsize = sizeof(struct sha256_state), 89 .descsize = sizeof(struct sha256_state),
280 .base = { 90 .base = {
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 5d836fc3df3e..22753c1c7202 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -90,7 +90,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
90 return max(start, end_page); 90 return max(start, end_page);
91} 91}
92 92
93static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) 93static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
94{ 94{
95 u8 *addr; 95 u8 *addr;
96 96
@@ -98,19 +98,21 @@ static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
98 addr = skcipher_get_spot(addr, bsize); 98 addr = skcipher_get_spot(addr, bsize);
99 scatterwalk_copychunks(addr, &walk->out, bsize, 99 scatterwalk_copychunks(addr, &walk->out, bsize,
100 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); 100 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
101 return 0;
101} 102}
102 103
103int skcipher_walk_done(struct skcipher_walk *walk, int err) 104int skcipher_walk_done(struct skcipher_walk *walk, int err)
104{ 105{
105 unsigned int n; /* bytes processed */ 106 unsigned int n = walk->nbytes;
106 bool more; 107 unsigned int nbytes = 0;
107 108
108 if (unlikely(err < 0)) 109 if (!n)
109 goto finish; 110 goto finish;
110 111
111 n = walk->nbytes - err; 112 if (likely(err >= 0)) {
112 walk->total -= n; 113 n -= err;
113 more = (walk->total != 0); 114 nbytes = walk->total - n;
115 }
114 116
115 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | 117 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
116 SKCIPHER_WALK_SLOW | 118 SKCIPHER_WALK_SLOW |
@@ -126,7 +128,7 @@ unmap_src:
126 memcpy(walk->dst.virt.addr, walk->page, n); 128 memcpy(walk->dst.virt.addr, walk->page, n);
127 skcipher_unmap_dst(walk); 129 skcipher_unmap_dst(walk);
128 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { 130 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
129 if (err) { 131 if (err > 0) {
130 /* 132 /*
131 * Didn't process all bytes. Either the algorithm is 133 * Didn't process all bytes. Either the algorithm is
132 * broken, or this was the last step and it turned out 134 * broken, or this was the last step and it turned out
@@ -134,27 +136,29 @@ unmap_src:
134 * the algorithm requires it. 136 * the algorithm requires it.
135 */ 137 */
136 err = -EINVAL; 138 err = -EINVAL;
137 goto finish; 139 nbytes = 0;
138 } 140 } else
139 skcipher_done_slow(walk, n); 141 n = skcipher_done_slow(walk, n);
140 goto already_advanced;
141 } 142 }
142 143
144 if (err > 0)
145 err = 0;
146
147 walk->total = nbytes;
148 walk->nbytes = 0;
149
143 scatterwalk_advance(&walk->in, n); 150 scatterwalk_advance(&walk->in, n);
144 scatterwalk_advance(&walk->out, n); 151 scatterwalk_advance(&walk->out, n);
145already_advanced: 152 scatterwalk_done(&walk->in, 0, nbytes);
146 scatterwalk_done(&walk->in, 0, more); 153 scatterwalk_done(&walk->out, 1, nbytes);
147 scatterwalk_done(&walk->out, 1, more);
148 154
149 if (more) { 155 if (nbytes) {
150 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? 156 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
151 CRYPTO_TFM_REQ_MAY_SLEEP : 0); 157 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
152 return skcipher_walk_next(walk); 158 return skcipher_walk_next(walk);
153 } 159 }
154 err = 0;
155finish:
156 walk->nbytes = 0;
157 160
161finish:
158 /* Short-circuit for the common/fast path. */ 162 /* Short-circuit for the common/fast path. */
159 if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) 163 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
160 goto out; 164 goto out;
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
index 63663c3bab7e..dc625ffc54ad 100644
--- a/crypto/streebog_generic.c
+++ b/crypto/streebog_generic.c
@@ -148,52 +148,6 @@ static const struct streebog_uint512 C[12] = {
148 } } 148 } }
149}; 149};
150 150
151static const u8 Tau[64] = {
152 0, 8, 16, 24, 32, 40, 48, 56,
153 1, 9, 17, 25, 33, 41, 49, 57,
154 2, 10, 18, 26, 34, 42, 50, 58,
155 3, 11, 19, 27, 35, 43, 51, 59,
156 4, 12, 20, 28, 36, 44, 52, 60,
157 5, 13, 21, 29, 37, 45, 53, 61,
158 6, 14, 22, 30, 38, 46, 54, 62,
159 7, 15, 23, 31, 39, 47, 55, 63
160};
161
162static const u8 Pi[256] = {
163 252, 238, 221, 17, 207, 110, 49, 22,
164 251, 196, 250, 218, 35, 197, 4, 77,
165 233, 119, 240, 219, 147, 46, 153, 186,
166 23, 54, 241, 187, 20, 205, 95, 193,
167 249, 24, 101, 90, 226, 92, 239, 33,
168 129, 28, 60, 66, 139, 1, 142, 79,
169 5, 132, 2, 174, 227, 106, 143, 160,
170 6, 11, 237, 152, 127, 212, 211, 31,
171 235, 52, 44, 81, 234, 200, 72, 171,
172 242, 42, 104, 162, 253, 58, 206, 204,
173 181, 112, 14, 86, 8, 12, 118, 18,
174 191, 114, 19, 71, 156, 183, 93, 135,
175 21, 161, 150, 41, 16, 123, 154, 199,
176 243, 145, 120, 111, 157, 158, 178, 177,
177 50, 117, 25, 61, 255, 53, 138, 126,
178 109, 84, 198, 128, 195, 189, 13, 87,
179 223, 245, 36, 169, 62, 168, 67, 201,
180 215, 121, 214, 246, 124, 34, 185, 3,
181 224, 15, 236, 222, 122, 148, 176, 188,
182 220, 232, 40, 80, 78, 51, 10, 74,
183 167, 151, 96, 115, 30, 0, 98, 68,
184 26, 184, 56, 130, 100, 159, 38, 65,
185 173, 69, 70, 146, 39, 94, 85, 47,
186 140, 163, 165, 125, 105, 213, 149, 59,
187 7, 88, 179, 64, 134, 172, 29, 247,
188 48, 55, 107, 228, 136, 217, 231, 137,
189 225, 27, 131, 73, 76, 63, 248, 254,
190 141, 83, 170, 144, 202, 216, 133, 97,
191 32, 113, 103, 164, 45, 43, 9, 91,
192 203, 155, 37, 208, 190, 229, 108, 82,
193 89, 166, 116, 210, 230, 244, 180, 192,
194 209, 102, 175, 194, 57, 75, 99, 182
195};
196
197static const unsigned long long Ax[8][256] = { 151static const unsigned long long Ax[8][256] = {
198 { 152 {
199 0xd01f715b5c7ef8e6ULL, 0x16fa240980778325ULL, 0xa8a42e857ee049c8ULL, 153 0xd01f715b5c7ef8e6ULL, 0x16fa240980778325ULL, 0xa8a42e857ee049c8ULL,
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index ad78ab5b93cb..83ad0b1fab30 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -2327,6 +2327,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
2327 0, speed_template_32); 2327 0, speed_template_32);
2328 break; 2328 break;
2329 2329
2330 case 220:
2331 test_acipher_speed("essiv(cbc(aes),sha256)",
2332 ENCRYPT, sec, NULL, 0,
2333 speed_template_16_24_32);
2334 test_acipher_speed("essiv(cbc(aes),sha256)",
2335 DECRYPT, sec, NULL, 0,
2336 speed_template_16_24_32);
2337 break;
2338
2339 case 221:
2340 test_aead_speed("aegis128", ENCRYPT, sec,
2341 NULL, 0, 16, 8, speed_template_16);
2342 test_aead_speed("aegis128", DECRYPT, sec,
2343 NULL, 0, 16, 8, speed_template_16);
2344 break;
2345
2330 case 300: 2346 case 300:
2331 if (alg) { 2347 if (alg) {
2332 test_hash_speed(alg, sec, generic_hash_speed_template); 2348 test_hash_speed(alg, sec, generic_hash_speed_template);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index d0b5b33806a6..c39e39e55dc2 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3887,18 +3887,6 @@ static const struct alg_test_desc alg_test_descs[] = {
3887 .aead = __VECS(aegis128_tv_template) 3887 .aead = __VECS(aegis128_tv_template)
3888 } 3888 }
3889 }, { 3889 }, {
3890 .alg = "aegis128l",
3891 .test = alg_test_aead,
3892 .suite = {
3893 .aead = __VECS(aegis128l_tv_template)
3894 }
3895 }, {
3896 .alg = "aegis256",
3897 .test = alg_test_aead,
3898 .suite = {
3899 .aead = __VECS(aegis256_tv_template)
3900 }
3901 }, {
3902 .alg = "ansi_cprng", 3890 .alg = "ansi_cprng",
3903 .test = alg_test_cprng, 3891 .test = alg_test_cprng,
3904 .suite = { 3892 .suite = {
@@ -4557,6 +4545,20 @@ static const struct alg_test_desc alg_test_descs[] = {
4557 .akcipher = __VECS(ecrdsa_tv_template) 4545 .akcipher = __VECS(ecrdsa_tv_template)
4558 } 4546 }
4559 }, { 4547 }, {
4548 .alg = "essiv(authenc(hmac(sha256),cbc(aes)),sha256)",
4549 .test = alg_test_aead,
4550 .fips_allowed = 1,
4551 .suite = {
4552 .aead = __VECS(essiv_hmac_sha256_aes_cbc_tv_temp)
4553 }
4554 }, {
4555 .alg = "essiv(cbc(aes),sha256)",
4556 .test = alg_test_skcipher,
4557 .fips_allowed = 1,
4558 .suite = {
4559 .cipher = __VECS(essiv_aes_cbc_tv_template)
4560 }
4561 }, {
4560 .alg = "gcm(aes)", 4562 .alg = "gcm(aes)",
4561 .generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)", 4563 .generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)",
4562 .test = alg_test_aead, 4564 .test = alg_test_aead,
@@ -4741,6 +4743,16 @@ static const struct alg_test_desc alg_test_descs[] = {
4741 } 4743 }
4742 } 4744 }
4743 }, { 4745 }, {
4746 .alg = "lzo-rle",
4747 .test = alg_test_comp,
4748 .fips_allowed = 1,
4749 .suite = {
4750 .comp = {
4751 .comp = __VECS(lzorle_comp_tv_template),
4752 .decomp = __VECS(lzorle_decomp_tv_template)
4753 }
4754 }
4755 }, {
4744 .alg = "md4", 4756 .alg = "md4",
4745 .test = alg_test_hash, 4757 .test = alg_test_hash,
4746 .suite = { 4758 .suite = {
@@ -4759,18 +4771,6 @@ static const struct alg_test_desc alg_test_descs[] = {
4759 .hash = __VECS(michael_mic_tv_template) 4771 .hash = __VECS(michael_mic_tv_template)
4760 } 4772 }
4761 }, { 4773 }, {
4762 .alg = "morus1280",
4763 .test = alg_test_aead,
4764 .suite = {
4765 .aead = __VECS(morus1280_tv_template)
4766 }
4767 }, {
4768 .alg = "morus640",
4769 .test = alg_test_aead,
4770 .suite = {
4771 .aead = __VECS(morus640_tv_template)
4772 }
4773 }, {
4774 .alg = "nhpoly1305", 4774 .alg = "nhpoly1305",
4775 .test = alg_test_hash, 4775 .test = alg_test_hash,
4776 .suite = { 4776 .suite = {
@@ -5240,9 +5240,11 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
5240 type, mask); 5240 type, mask);
5241 5241
5242test_done: 5242test_done:
5243 if (rc && (fips_enabled || panic_on_fail)) 5243 if (rc && (fips_enabled || panic_on_fail)) {
5244 fips_fail_notify();
5244 panic("alg: self-tests for %s (%s) failed in %s mode!\n", 5245 panic("alg: self-tests for %s (%s) failed in %s mode!\n",
5245 driver, alg, fips_enabled ? "fips" : "panic_on_fail"); 5246 driver, alg, fips_enabled ? "fips" : "panic_on_fail");
5247 }
5246 5248
5247 if (fips_enabled && !rc) 5249 if (fips_enabled && !rc)
5248 pr_info("alg: self-tests for %s (%s) passed\n", driver, alg); 5250 pr_info("alg: self-tests for %s (%s) passed\n", driver, alg);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 073bd2efafca..ef7d21f39d4a 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -19489,2697 +19489,6 @@ static const struct aead_testvec aegis128_tv_template[] = {
19489}; 19489};
19490 19490
19491/* 19491/*
19492 * AEGIS-128L test vectors - generated via reference implementation from
19493 * SUPERCOP (https://bench.cr.yp.to/supercop.html):
19494 *
19495 * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz
19496 * (see crypto_aead/aegis128l/)
19497 */
19498static const struct aead_testvec aegis128l_tv_template[] = {
19499 {
19500 .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
19501 "\x20\x36\x2c\x24\xfe\xc9\x30\x81",
19502 .klen = 16,
19503 .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d"
19504 "\x40\x6d\x59\x48\xfc\x92\x61\x03",
19505 .assoc = "",
19506 .alen = 0,
19507 .ptext = "",
19508 .plen = 0,
19509 .ctext = "\x30\x4f\xf3\xe9\xb1\xfa\x81\xa6"
19510 "\x20\x72\x78\xdd\x93\xc8\x57\xef",
19511 .clen = 16,
19512 }, {
19513 .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
19514 "\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
19515 .klen = 16,
19516 .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29"
19517 "\xc1\x47\x0b\xda\xf6\xb6\x23\x09",
19518 .assoc = "",
19519 .alen = 0,
19520 .ptext = "\x79",
19521 .plen = 1,
19522 .ctext = "\xa9\x24\xa0\xb6\x2d\xdd\x29\xdb"
19523 "\x40\xb3\x71\xc5\x22\x58\x31\x77"
19524 "\x6d",
19525 .clen = 17,
19526 }, {
19527 .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
19528 "\x22\xea\x90\x47\xf2\x11\xb5\x8e",
19529 .klen = 16,
19530 .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45"
19531 "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f",
19532 .assoc = "",
19533 .alen = 0,
19534 .ptext = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
19535 "\x82\x8e\x16\xb4\xed\x6d\x47",
19536 .plen = 15,
19537 .ctext = "\xbb\x0a\x53\xc4\xaa\x7e\xa4\x03"
19538 "\x2b\xee\x62\x99\x7b\x98\x13\x1f"
19539 "\xe0\x76\x4c\x2e\x53\x99\x4f\xbe"
19540 "\xe1\xa8\x04\x7f\xe1\x71\xbe",
19541 .clen = 31,
19542 }, {
19543 .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
19544 "\xa2\xc5\x42\xd8\xec\x36\x78\x94",
19545 .klen = 16,
19546 .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61"
19547 "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15",
19548 .assoc = "",
19549 .alen = 0,
19550 .ptext = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
19551 "\x03\x68\xc8\x45\xe7\x91\x0a\x18",
19552 .plen = 16,
19553 .ctext = "\x66\xdf\x6e\x71\xc0\x6e\xa4\x4c"
19554 "\x9d\xb7\x8c\x9a\xdb\x1f\xd2\x2e"
19555 "\x23\xb6\xa4\xfb\xd3\x86\xdd\xbb"
19556 "\xde\x54\x9b\xf5\x92\x8b\x93\xc5",
19557 .clen = 32,
19558 }, {
19559 .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
19560 "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
19561 .klen = 16,
19562 .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d"
19563 "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c",
19564 .assoc = "",
19565 .alen = 0,
19566 .ptext = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
19567 "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f"
19568 "\xd3",
19569 .plen = 17,
19570 .ctext = "\x4f\xc3\x69\xb6\xd3\xa4\x64\x8b"
19571 "\x71\xc3\x8a\x91\x22\x4f\x1b\xd2"
19572 "\x33\x6d\x86\xbc\xf8\x2f\x06\xf9"
19573 "\x82\x64\xc7\x72\x00\x30\xfc\xf0"
19574 "\xf8",
19575 .clen = 33,
19576 }, {
19577 .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
19578 "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
19579 .klen = 16,
19580 .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98"
19581 "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22",
19582 .assoc = "",
19583 .alen = 0,
19584 .ptext = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
19585 "\x05\x1d\x2c\x68\xdb\xda\x8f\x25"
19586 "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99"
19587 "\x88\x11\x39\x12\x1c\x3a\xbb",
19588 .plen = 31,
19589 .ctext = "\xe3\x93\x15\xae\x5f\x9d\x3c\xb5"
19590 "\xd6\x9d\xee\xee\xcf\xaa\xaf\xe1"
19591 "\x45\x10\x96\xe0\xbf\x55\x0f\x4c"
19592 "\x1a\xfd\xf4\xda\x4e\x10\xde\xc9"
19593 "\x0e\x6f\xc7\x3c\x49\x94\x41\xfc"
19594 "\x59\x28\x88\x3c\x79\x10\x6b",
19595 .clen = 47,
19596 }, {
19597 .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
19598 "\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
19599 .klen = 16,
19600 .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4"
19601 "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28",
19602 .assoc = "",
19603 .alen = 0,
19604 .ptext = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
19605 "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b"
19606 "\x28\x50\x51\x9d\x24\x60\x8d\xb3"
19607 "\x49\x3e\x17\xea\xf6\x99\x5a\xdd",
19608 .plen = 32,
19609 .ctext = "\x1c\x8e\x22\x34\xfd\xab\xe6\x0d"
19610 "\x1c\x9f\x06\x54\x8b\x0b\xb4\x40"
19611 "\xde\x11\x59\x3e\xfd\x74\xf6\x42"
19612 "\x97\x17\xf7\x24\xb6\x7e\xc4\xc6"
19613 "\x06\xa3\x94\xda\x3d\x7f\x55\x0a"
19614 "\x92\x07\x2f\xa6\xf3\x6b\x2c\xfc",
19615 .clen = 48,
19616 }, {
19617 .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
19618 "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
19619 .klen = 16,
19620 .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0"
19621 "\xc6\x64\x37\x42\xd2\x90\xb3\x2e",
19622 .assoc = "\xd5",
19623 .alen = 1,
19624 .ptext = "",
19625 .plen = 0,
19626 .ctext = "\xa0\x2a\xb4\x9a\x91\x00\x15\xb8"
19627 "\x0f\x9a\x15\x60\x0e\x9b\x13\x8f",
19628 .clen = 16,
19629 }, {
19630 .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
19631 "\x27\x08\xbd\xaf\xce\xec\x45\xb3",
19632 .klen = 16,
19633 .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec"
19634 "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34",
19635 .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73"
19636 "\x68\x75\x16\xf8\xcb\x7e\xa7",
19637 .alen = 15,
19638 .ptext = "",
19639 .plen = 0,
19640 .ctext = "\x4c\x26\xad\x9c\x14\xfd\x9c\x8c"
19641 "\x84\xfb\x26\xfb\xd5\xca\x62\x39",
19642 .clen = 16,
19643 }, {
19644 .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
19645 "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9",
19646 .klen = 16,
19647 .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08"
19648 "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b",
19649 .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f"
19650 "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc",
19651 .alen = 16,
19652 .ptext = "",
19653 .plen = 0,
19654 .ctext = "\x45\x85\x0e\x0f\xf4\xae\x96\xa1"
19655 "\x99\x4d\x6d\xb4\x67\x32\xb0\x3a",
19656 .clen = 16,
19657 }, {
19658 .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
19659 "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf",
19660 .klen = 16,
19661 .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24"
19662 "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41",
19663 .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab"
19664 "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2"
19665 "\x07",
19666 .alen = 17,
19667 .ptext = "",
19668 .plen = 0,
19669 .ctext = "\x33\xb1\x42\x97\x8e\x16\x7b\x63"
19670 "\x06\xba\x5b\xcb\xae\x6d\x8b\x56",
19671 .clen = 16,
19672 }, {
19673 .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
19674 "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6",
19675 .klen = 16,
19676 .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f"
19677 "\xca\xcd\xff\x88\xba\x22\xbe\x47",
19678 .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6"
19679 "\xea\x03\x2c\xac\xb9\xeb\xef\xc9"
19680 "\x31\x6b\x08\x12\xfc\xd8\x37\x2d"
19681 "\xe0\x17\x3a\x2e\x83\x5c\x8f",
19682 .alen = 31,
19683 .ptext = "",
19684 .plen = 0,
19685 .ctext = "\xda\x44\x08\x8c\x2a\xa5\x07\x35"
19686 "\x0b\x54\x4e\x6d\xe3\xfd\xc4\x5f",
19687 .clen = 16,
19688 }, {
19689 .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
19690 "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc",
19691 .klen = 16,
19692 .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b"
19693 "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d",
19694 .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2"
19695 "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf"
19696 "\x5c\x2d\x14\x96\x01\x78\xb9\x47"
19697 "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f",
19698 .alen = 32,
19699 .ptext = "",
19700 .plen = 0,
19701 .ctext = "\x1b\xb1\xf1\xa8\x9e\xc2\xb2\x88"
19702 "\x40\x7f\x7b\x19\x7a\x52\x8c\xf0",
19703 .clen = 16,
19704 }, {
19705 .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
19706 "\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
19707 .klen = 16,
19708 .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77"
19709 "\xcc\x81\x63\xab\xae\x6b\x43\x54",
19710 .assoc = "\x40",
19711 .alen = 1,
19712 .ptext = "\x4f",
19713 .plen = 1,
19714 .ctext = "\x6e\xc8\xfb\x15\x9d\x98\x49\xc9"
19715 "\xa0\x98\x09\x85\xbe\x56\x8e\x79"
19716 "\xf4",
19717 .clen = 17,
19718 }, {
19719 .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
19720 "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8",
19721 .klen = 16,
19722 .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93"
19723 "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a",
19724 .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
19725 "\x6d\x92\x42\x61\xa7\x58\x37",
19726 .alen = 15,
19727 .ptext = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
19728 "\x8d\xc8\x6e\x85\xa5\x21\x67",
19729 .plen = 15,
19730 .ctext = "\x99\x2e\x84\x50\x64\x5c\xab\x29"
19731 "\x20\xba\xb9\x2f\x62\x3a\xce\x2a"
19732 "\x75\x25\x3b\xe3\x40\xe0\x1d\xfc"
19733 "\x20\x63\x0b\x49\x7e\x97\x08",
19734 .clen = 31,
19735 }, {
19736 .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
19737 "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf",
19738 .klen = 16,
19739 .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf"
19740 "\xce\x36\xc7\xce\xa2\xb4\xc9\x60",
19741 .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
19742 "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
19743 .alen = 16,
19744 .ptext = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
19745 "\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
19746 .plen = 16,
19747 .ctext = "\xd9\x8e\xfd\x50\x8f\x02\x9f\xee"
19748 "\x78\x08\x12\xec\x09\xaf\x53\x14"
19749 "\x90\x3e\x3d\x76\xad\x71\x21\x08"
19750 "\x77\xe5\x4b\x15\xc2\xe6\xbc\xdb",
19751 .clen = 32,
19752 }, {
19753 .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
19754 "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5",
19755 .klen = 16,
19756 .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca"
19757 "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66",
19758 .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
19759 "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
19760 "\x05",
19761 .alen = 17,
19762 .ptext = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
19763 "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69"
19764 "\xd0",
19765 .plen = 17,
19766 .ctext = "\xf3\xe7\x95\x86\xcf\x34\x95\x96"
19767 "\x17\xfe\x1b\xae\x1b\x31\xf2\x1a"
19768 "\xbd\xbc\xc9\x4e\x11\x29\x09\x5c"
19769 "\x05\xd3\xb4\x2e\x4a\x74\x59\x49"
19770 "\x7d",
19771 .clen = 33,
19772 }, {
19773 .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
19774 "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb",
19775 .klen = 16,
19776 .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6"
19777 "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d",
19778 .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
19779 "\xf0\x20\x58\x15\x95\xc6\x7f\xee"
19780 "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
19781 "\x68\x28\x73\x40\x9f\x96\x4a",
19782 .alen = 31,
19783 .ptext = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
19784 "\x10\x57\x85\x39\x93\x8f\xaf\x70"
19785 "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd"
19786 "\x98\x34\xab\x37\x56\xae\x32",
19787 .plen = 31,
19788 .ctext = "\x06\x96\xb2\xbf\x63\xf4\x1e\x24"
19789 "\x0d\x19\x15\x61\x65\x3b\x06\x26"
19790 "\x71\xe8\x7e\x16\xdb\x96\x01\x01"
19791 "\x52\xcd\x49\x5b\x07\x33\x4e\xe7"
19792 "\xaa\x91\xf5\xd5\xc6\xfe\x41\xb5"
19793 "\xed\x90\xce\xb9\xcd\xcc\xa1",
19794 .clen = 47,
19795 }, {
19796 .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
19797 "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1",
19798 .klen = 16,
19799 .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02"
19800 "\x50\xc4\xde\x82\x90\x21\x11\x73",
19801 .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
19802 "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
19803 "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
19804 "\x29\x56\x52\x19\x79\xf5\xe9\x37",
19805 .alen = 32,
19806 .ptext = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
19807 "\x91\x31\x37\xcb\x8d\xb3\x72\x76"
19808 "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7"
19809 "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec",
19810 .plen = 32,
19811 .ctext = "\xf9\xd7\xee\x17\xfd\x24\xcd\xf1"
19812 "\xbc\x0f\x35\x97\x97\x0c\x4b\x18"
19813 "\xce\x58\xc8\x3b\xd4\x85\x93\x79"
19814 "\xcc\x9c\xea\xc1\x73\x13\x0b\x4c"
19815 "\xcc\x6f\x28\xf8\xa4\x4e\xb8\x56"
19816 "\x64\x4e\x47\xce\xb2\xb4\x92\xb4",
19817 .clen = 48,
19818 }, {
19819 .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
19820 "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7",
19821 .klen = 16,
19822 .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e"
19823 "\xd1\x9e\x90\x13\x8a\x45\xd3\x79",
19824 .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
19825 "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
19826 "\x84\x7d\x65\x34\x25\xd8\x47\xfa"
19827 "\xeb\x83\x31\xf1\x54\x54\x89\x0d"
19828 "\x9d",
19829 .alen = 33,
19830 .ptext = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
19831 "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c"
19832 "\x4f\x2e\xe8\x55\x66\x80\x27\x00"
19833 "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3"
19834 "\x21\x78\x55\x9d\x9c\x65\x7b\xcd"
19835 "\x0a\x34\x97\xff\x47\x37\xb0\x2a"
19836 "\x80\x0d\x19\x98\x33\xa9\x7a\xe3"
19837 "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01"
19838 "\xbd",
19839 .plen = 65,
19840 .ctext = "\x58\xfa\x3a\x3d\xd9\x88\x63\xe8"
19841 "\xc5\x78\x50\x8b\x4a\xc9\xdf\x7f"
19842 "\x4b\xfa\xc8\x2e\x67\x43\xf3\x63"
19843 "\x42\x8e\x99\x5a\x9c\x0b\x84\x77"
19844 "\xbc\x46\x76\x48\x82\xc7\x57\x96"
19845 "\xe1\x65\xd1\xed\x1d\xdd\x80\x24"
19846 "\xa6\x4d\xa9\xf1\x53\x8b\x5e\x0e"
19847 "\x26\xb9\xcc\x37\xe5\x43\xe1\x5a"
19848 "\x8a\xd6\x8c\x5a\xe4\x95\xd1\x8d"
19849 "\xf7\x33\x64\xc1\xd3\xf2\xfc\x35"
19850 "\x01",
19851 .clen = 81,
19852 }, {
19853 .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
19854 "\x32\x42\x15\x80\x85\xa1\x65\xfe",
19855 .klen = 16,
19856 .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a"
19857 "\x52\x79\x42\xa5\x84\x6a\x96\x7f",
19858 .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
19859 "\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
19860 "\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
19861 "\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
19862 "\xac\xfa\x58\x7f\xe5\x73\x09\x8c"
19863 "\x1d\x40\x87\x8c\xd9\x75\xc0\x55"
19864 "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb"
19865 "\x09\x4f\x77\x62\x88\x2d\xf2\x68"
19866 "\x54",
19867 .alen = 65,
19868 .ptext = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
19869 "\x93\xe6\x9b\xee\x81\xfc\xf7\x82"
19870 "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a"
19871 "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99"
19872 "\x2f",
19873 .plen = 33,
19874 .ctext = "\x4c\xa9\xac\x71\xed\x10\xa6\x24"
19875 "\xb7\xa7\xdf\x8b\xf5\xc2\x41\xcb"
19876 "\x05\xc9\xd6\x97\xb6\x10\x7f\x17"
19877 "\xc2\xc0\x93\xcf\xe0\x94\xfd\x99"
19878 "\xf2\x62\x25\x28\x01\x23\x6f\x8b"
19879 "\x04\x52\xbc\xb0\x3e\x66\x52\x90"
19880 "\x9f",
19881 .clen = 49,
19882 }, {
19883 .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
19884 "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04",
19885 .klen = 16,
19886 .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56"
19887 "\xd3\x53\xf4\x36\x7e\x8e\x59\x85",
19888 .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
19889 "\xf3\x89\x20\x5b\x7c\x57\x89\x07",
19890 .alen = 16,
19891 .ptext = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
19892 "\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
19893 .plen = 16,
19894 .ctext = "\x6d\xed\x04\x7a\x2f\x0c\x30\xa5"
19895 "\x96\xe6\x97\xe4\x10\xeb\x40\x95"
19896 "\xc5\x9a\xdf\x31\xd5\xa5\xa6\xec"
19897 "\x05\xa8\x31\x50\x11\x19\x44",
19898 .clen = 31,
19899 }, {
19900 .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
19901 "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a",
19902 .klen = 16,
19903 .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71"
19904 "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c",
19905 .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
19906 "\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
19907 .alen = 16,
19908 .ptext = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
19909 "\x95\x9a\xff\x10\x75\x45\x7d\x8f",
19910 .plen = 16,
19911 .ctext = "\x30\x95\x7d\xea\xdc\x62\xc0\x88"
19912 "\xa1\xe3\x8d\x8c\xac\x04\x10\xa7"
19913 "\xfa\xfa\x07\xbd\xa0\xf0\x36\xeb"
19914 "\x21\x93\x2e\x31\x84\x83",
19915 .clen = 30,
19916 }, {
19917 .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
19918 "\xb5\xd1\x2b\x35\x73\x0e\xad\x10",
19919 .klen = 16,
19920 .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d"
19921 "\xd5\x07\x58\x59\x72\xd7\xde\x92",
19922 .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
19923 "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
19924 .alen = 16,
19925 .ptext = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
19926 "\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
19927 .plen = 16,
19928 .ctext = "\x93\xcd\xee\xd4\xcb\x9d\x8d\x16"
19929 "\x63\x0d\x43\xd5\x49\xca\xa8\x85"
19930 "\x49\xc0\xae\x13\xbc\x26\x1d\x4b",
19931 .clen = 24,
19932 },
19933};
19934
19935/*
19936 * AEGIS-256 test vectors - generated via reference implementation from
19937 * SUPERCOP (https://bench.cr.yp.to/supercop.html):
19938 *
19939 * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz
19940 * (see crypto_aead/aegis256/)
19941 */
19942static const struct aead_testvec aegis256_tv_template[] = {
19943 {
19944 .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
19945 "\x20\x36\x2c\x24\xfe\xc9\x30\x81"
19946 "\xca\xb0\x82\x21\x41\xa8\xe0\x06"
19947 "\x30\x0b\x37\xf6\xb6\x17\xe7\xb5",
19948 .klen = 32,
19949 .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d"
19950 "\x40\x6d\x59\x48\xfc\x92\x61\x03"
19951 "\x95\x61\x05\x42\x82\x50\xc0\x0c"
19952 "\x60\x16\x6f\xec\x6d\x2f\xcf\x6b",
19953 .assoc = "",
19954 .alen = 0,
19955 .ptext = "",
19956 .plen = 0,
19957 .ctext = "\xd5\x65\x3a\xa9\x03\x51\xd7\xaa"
19958 "\xfa\x4b\xd8\xa2\x41\x9b\xc1\xb2",
19959 .clen = 16,
19960 }, {
19961 .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
19962 "\xa1\x10\xde\xb5\xf8\xed\xf3\x87"
19963 "\xf4\x72\x8e\xa5\x46\x48\x62\x20"
19964 "\xf1\x38\x16\xce\x90\x76\x87\x8c",
19965 .klen = 32,
19966 .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29"
19967 "\xc1\x47\x0b\xda\xf6\xb6\x23\x09"
19968 "\xbf\x23\x11\xc6\x87\xf0\x42\x26"
19969 "\x22\x44\x4e\xc4\x47\x8e\x6e\x41",
19970 .assoc = "",
19971 .alen = 0,
19972 .ptext = "\x79",
19973 .plen = 1,
19974 .ctext = "\x84\xa2\x8f\xad\xdb\x8d\x2c\x16"
19975 "\x9e\x89\xd9\x06\xa6\xa8\x14\x29"
19976 "\x8b",
19977 .clen = 17,
19978 }, {
19979 .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
19980 "\x22\xea\x90\x47\xf2\x11\xb5\x8e"
19981 "\x1f\x35\x9a\x29\x4b\xe8\xe4\x39"
19982 "\xb3\x66\xf5\xa6\x6a\xd5\x26\x62",
19983 .klen = 32,
19984 .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45"
19985 "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f"
19986 "\xe9\xe5\x1d\x4a\x8c\x90\xc4\x40"
19987 "\xe3\x71\x2d\x9c\x21\xed\x0e\x18",
19988 .assoc = "",
19989 .alen = 0,
19990 .ptext = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
19991 "\x82\x8e\x16\xb4\xed\x6d\x47",
19992 .plen = 15,
19993 .ctext = "\x09\x94\x1f\xa6\x13\xc3\x74\x75"
19994 "\x17\xad\x8a\x0e\xd8\x66\x9a\x28"
19995 "\xd7\x30\x66\x09\x2a\xdc\xfa\x2a"
19996 "\x9f\x3b\xd7\xdd\x66\xd1\x2b",
19997 .clen = 31,
19998 }, {
19999 .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
20000 "\xa2\xc5\x42\xd8\xec\x36\x78\x94"
20001 "\x49\xf7\xa5\xad\x50\x88\x66\x53"
20002 "\x74\x94\xd4\x7f\x44\x34\xc5\x39",
20003 .klen = 32,
20004 .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61"
20005 "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15"
20006 "\x14\xa8\x28\xce\x92\x30\x46\x59"
20007 "\xa4\x9f\x0b\x75\xfb\x4c\xad\xee",
20008 .assoc = "",
20009 .alen = 0,
20010 .ptext = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
20011 "\x03\x68\xc8\x45\xe7\x91\x0a\x18",
20012 .plen = 16,
20013 .ctext = "\x8a\x46\xa2\x22\x8c\x03\xab\x6f"
20014 "\x54\x63\x4e\x7f\xc9\x8e\xfa\x70"
20015 "\x7b\xe5\x8d\x78\xbc\xe9\xb6\xa1"
20016 "\x29\x17\xc8\x3b\x52\xa4\x98\x72",
20017 .clen = 32,
20018 }, {
20019 .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
20020 "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a"
20021 "\x74\xb9\xb1\x32\x55\x28\xe8\x6d"
20022 "\x35\xc1\xb3\x57\x1f\x93\x64\x0f",
20023 .klen = 32,
20024 .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d"
20025 "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c"
20026 "\x3e\x6a\x34\x53\x97\xd0\xc8\x73"
20027 "\x66\xcd\xea\x4d\xd5\xab\x4c\xc5",
20028 .assoc = "",
20029 .alen = 0,
20030 .ptext = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
20031 "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f"
20032 "\xd3",
20033 .plen = 17,
20034 .ctext = "\x71\x6b\x37\x0b\x02\x61\x28\x12"
20035 "\x83\xab\x66\x90\x84\xc7\xd1\xc5"
20036 "\xb2\x7a\xb4\x7b\xb4\xfe\x02\xb2"
20037 "\xc0\x00\x39\x13\xb5\x51\x68\x44"
20038 "\xad",
20039 .clen = 33,
20040 }, {
20041 .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
20042 "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0"
20043 "\x9e\x7c\xbc\xb6\x5b\xc8\x6a\x86"
20044 "\xf7\xef\x91\x30\xf9\xf2\x04\xe6",
20045 .klen = 32,
20046 .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98"
20047 "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22"
20048 "\x69\x2c\x3f\xd7\x9c\x70\x4a\x8d"
20049 "\x27\xfa\xc9\x26\xaf\x0a\xeb\x9c",
20050 .assoc = "",
20051 .alen = 0,
20052 .ptext = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
20053 "\x05\x1d\x2c\x68\xdb\xda\x8f\x25"
20054 "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99"
20055 "\x88\x11\x39\x12\x1c\x3a\xbb",
20056 .plen = 31,
20057 .ctext = "\xaf\xa4\x34\x0d\x59\xe6\x1c\x2f"
20058 "\x06\x3b\x52\x18\x49\x75\x1b\xf0"
20059 "\x53\x09\x72\x7b\x45\x79\xe0\xbe"
20060 "\x89\x85\x23\x15\xb8\x79\x07\x4c"
20061 "\x53\x7a\x15\x37\x0a\xee\xb7\xfb"
20062 "\xc4\x1f\x12\x27\xcf\x77\x90",
20063 .clen = 47,
20064 }, {
20065 .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
20066 "\x25\x53\x58\x8c\xda\xa3\xc0\xa6"
20067 "\xc8\x3e\xc8\x3a\x60\x68\xec\xa0"
20068 "\xb8\x1c\x70\x08\xd3\x51\xa3\xbd",
20069 .klen = 32,
20070 .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4"
20071 "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28"
20072 "\x93\xef\x4b\x5b\xa1\x10\xcc\xa6"
20073 "\xe8\x28\xa8\xfe\x89\x69\x8b\x72",
20074 .assoc = "",
20075 .alen = 0,
20076 .ptext = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
20077 "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b"
20078 "\x28\x50\x51\x9d\x24\x60\x8d\xb3"
20079 "\x49\x3e\x17\xea\xf6\x99\x5a\xdd",
20080 .plen = 32,
20081 .ctext = "\xe2\xc9\x0b\x33\x31\x02\xb3\xb4"
20082 "\x33\xfe\xeb\xa8\xb7\x9b\xb2\xd7"
20083 "\xeb\x0f\x05\x2b\xba\xb3\xca\xef"
20084 "\xf6\xd1\xb6\xc0\xb9\x9b\x85\xc5"
20085 "\xbf\x7a\x3e\xcc\x31\x76\x09\x80"
20086 "\x32\x5d\xbb\xe8\x38\x0e\x77\xd3",
20087 .clen = 48,
20088 }, {
20089 .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
20090 "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad"
20091 "\xf3\x00\xd4\xbf\x65\x08\x6e\xb9"
20092 "\x7a\x4a\x4f\xe0\xad\xb0\x42\x93",
20093 .klen = 32,
20094 .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0"
20095 "\xc6\x64\x37\x42\xd2\x90\xb3\x2e"
20096 "\xbd\xb1\x57\xe0\xa6\xb0\x4e\xc0"
20097 "\xaa\x55\x87\xd6\x63\xc8\x2a\x49",
20098 .assoc = "\xd5",
20099 .alen = 1,
20100 .ptext = "",
20101 .plen = 0,
20102 .ctext = "\x96\x43\x30\xca\x6c\x4f\xd7\x12"
20103 "\xba\xd9\xb3\x18\x86\xdf\xc3\x52",
20104 .clen = 16,
20105 }, {
20106 .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
20107 "\x27\x08\xbd\xaf\xce\xec\x45\xb3"
20108 "\x1d\xc3\xdf\x43\x6a\xa8\xf0\xd3"
20109 "\x3b\x77\x2e\xb9\x87\x0f\xe1\x6a",
20110 .klen = 32,
20111 .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec"
20112 "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34"
20113 "\xe8\x73\x62\x64\xab\x50\xd0\xda"
20114 "\x6b\x83\x66\xaf\x3e\x27\xc9\x1f",
20115 .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73"
20116 "\x68\x75\x16\xf8\xcb\x7e\xa7",
20117 .alen = 15,
20118 .ptext = "",
20119 .plen = 0,
20120 .ctext = "\x2f\xab\x45\xe2\xa7\x46\xc5\x83"
20121 "\x11\x9f\xb0\x74\xee\xc7\x03\xdd",
20122 .clen = 16,
20123 }, {
20124 .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
20125 "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9"
20126 "\x47\x85\xeb\xc7\x6f\x48\x72\xed"
20127 "\xfc\xa5\x0d\x91\x61\x6e\x81\x40",
20128 .klen = 32,
20129 .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08"
20130 "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b"
20131 "\x12\x35\x6e\xe8\xb0\xf0\x52\xf3"
20132 "\x2d\xb0\x45\x87\x18\x86\x68\xf6",
20133 .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f"
20134 "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc",
20135 .alen = 16,
20136 .ptext = "",
20137 .plen = 0,
20138 .ctext = "\x16\x44\x73\x33\x5d\xf2\xb9\x04"
20139 "\x6b\x79\x98\xef\xdb\xd5\xc5\xf1",
20140 .clen = 16,
20141 }, {
20142 .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
20143 "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf"
20144 "\x72\x47\xf6\x4b\x74\xe8\xf4\x06"
20145 "\xbe\xd3\xec\x6a\x3b\xcd\x20\x17",
20146 .klen = 32,
20147 .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24"
20148 "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41"
20149 "\x3c\xf8\x79\x6c\xb6\x90\xd4\x0d"
20150 "\xee\xde\x23\x60\xf2\xe5\x08\xcc",
20151 .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab"
20152 "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2"
20153 "\x07",
20154 .alen = 17,
20155 .ptext = "",
20156 .plen = 0,
20157 .ctext = "\xa4\x9b\xb8\x47\xc0\xed\x7a\x45"
20158 "\x98\x54\x8c\xed\x3d\x17\xf0\xdd",
20159 .clen = 16,
20160 }, {
20161 .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
20162 "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6"
20163 "\x9c\x0a\x02\xd0\x79\x88\x76\x20"
20164 "\x7f\x00\xca\x42\x15\x2c\xbf\xed",
20165 .klen = 32,
20166 .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f"
20167 "\xca\xcd\xff\x88\xba\x22\xbe\x47"
20168 "\x67\xba\x85\xf1\xbb\x30\x56\x26"
20169 "\xaf\x0b\x02\x38\xcc\x44\xa7\xa3",
20170 .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6"
20171 "\xea\x03\x2c\xac\xb9\xeb\xef\xc9"
20172 "\x31\x6b\x08\x12\xfc\xd8\x37\x2d"
20173 "\xe0\x17\x3a\x2e\x83\x5c\x8f",
20174 .alen = 31,
20175 .ptext = "",
20176 .plen = 0,
20177 .ctext = "\x20\x24\xe2\x33\x5c\x60\xc9\xf0"
20178 "\xa4\x96\x2f\x0d\x53\xc2\xf8\xfc",
20179 .clen = 16,
20180 }, {
20181 .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
20182 "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc"
20183 "\xc6\xcc\x0e\x54\x7f\x28\xf8\x3a"
20184 "\x40\x2e\xa9\x1a\xf0\x8b\x5e\xc4",
20185 .klen = 32,
20186 .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b"
20187 "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d"
20188 "\x91\x7c\x91\x75\xc0\xd0\xd8\x40"
20189 "\x71\x39\xe1\x10\xa6\xa3\x46\x7a",
20190 .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2"
20191 "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf"
20192 "\x5c\x2d\x14\x96\x01\x78\xb9\x47"
20193 "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f",
20194 .alen = 32,
20195 .ptext = "",
20196 .plen = 0,
20197 .ctext = "\x6f\x4a\xb9\xe0\xff\x51\xa3\xf1"
20198 "\xd2\x64\x3e\x66\x6a\xb2\x03\xc0",
20199 .clen = 16,
20200 }, {
20201 .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
20202 "\xac\x4b\x37\x86\xb0\xa2\x13\xd2"
20203 "\xf1\x8e\x19\xd8\x84\xc8\x7a\x53"
20204 "\x02\x5b\x88\xf3\xca\xea\xfe\x9b",
20205 .klen = 32,
20206 .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77"
20207 "\xcc\x81\x63\xab\xae\x6b\x43\x54"
20208 "\xbb\x3f\x9c\xf9\xc5\x70\x5a\x5a"
20209 "\x32\x67\xc0\xe9\x80\x02\xe5\x50",
20210 .assoc = "\x40",
20211 .alen = 1,
20212 .ptext = "\x4f",
20213 .plen = 1,
20214 .ctext = "\x2c\xfb\xad\x7e\xbe\xa0\x9a\x5b"
20215 "\x7a\x3f\x81\xf7\xfc\x1b\x79\x83"
20216 "\xc7",
20217 .clen = 17,
20218 }, {
20219 .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
20220 "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8"
20221 "\x1b\x50\x25\x5d\x89\x68\xfc\x6d"
20222 "\xc3\x89\x67\xcb\xa4\x49\x9d\x71",
20223 .klen = 32,
20224 .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93"
20225 "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a"
20226 "\xe6\x01\xa8\x7e\xca\x10\xdc\x73"
20227 "\xf4\x94\x9f\xc1\x5a\x61\x85\x27",
20228 .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
20229 "\x6d\x92\x42\x61\xa7\x58\x37",
20230 .alen = 15,
20231 .ptext = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
20232 "\x8d\xc8\x6e\x85\xa5\x21\x67",
20233 .plen = 15,
20234 .ctext = "\x1f\x7f\xca\x3c\x2b\xe7\x27\xba"
20235 "\x7e\x98\x83\x02\x34\x23\xf7\x94"
20236 "\xde\x35\xe6\x1d\x14\x18\xe5\x38"
20237 "\x14\x80\x6a\xa7\x1b\xae\x1d",
20238 .clen = 31,
20239 }, {
20240 .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
20241 "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf"
20242 "\x46\x13\x31\xe1\x8e\x08\x7e\x87"
20243 "\x85\xb6\x46\xa3\x7e\xa8\x3c\x48",
20244 .klen = 32,
20245 .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf"
20246 "\xce\x36\xc7\xce\xa2\xb4\xc9\x60"
20247 "\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d"
20248 "\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd",
20249 .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
20250 "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
20251 .alen = 16,
20252 .ptext = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
20253 "\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
20254 .plen = 16,
20255 .ctext = "\x05\x86\x9e\xd7\x2b\xa3\x97\x01"
20256 "\xbe\x28\x98\x10\x6f\xe9\x61\x32"
20257 "\x96\xbb\xb1\x2e\x8f\x0c\x44\xb9"
20258 "\x46\x2d\x55\xe3\x42\x67\xf2\xaf",
20259 .clen = 32,
20260 }, {
20261 .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
20262 "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5"
20263 "\x70\xd5\x3c\x65\x93\xa8\x00\xa0"
20264 "\x46\xe4\x25\x7c\x58\x08\xdb\x1e",
20265 .klen = 32,
20266 .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca"
20267 "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66"
20268 "\x3b\x86\xbf\x86\xd4\x50\xe0\xa7"
20269 "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4",
20270 .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
20271 "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
20272 "\x05",
20273 .alen = 17,
20274 .ptext = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
20275 "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69"
20276 "\xd0",
20277 .plen = 17,
20278 .ctext = "\x9c\xe0\x06\x7b\x86\xcf\x2e\xd8"
20279 "\x45\x65\x1b\x72\x9b\xaa\xa3\x1e"
20280 "\x87\x9d\x26\xdf\xff\x81\x11\xd2"
20281 "\x47\x41\xb9\x24\xc1\x8a\xa3\x8b"
20282 "\x55",
20283 .clen = 33,
20284 }, {
20285 .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
20286 "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb"
20287 "\x9a\x97\x48\xe9\x98\x48\x82\xba"
20288 "\x07\x11\x04\x54\x32\x67\x7b\xf5",
20289 .klen = 32,
20290 .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6"
20291 "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d"
20292 "\x65\x48\xcb\x0a\xda\xf0\x62\xc0"
20293 "\x38\x1d\x3b\x4a\xe9\x7e\x62\xaa",
20294 .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
20295 "\xf0\x20\x58\x15\x95\xc6\x7f\xee"
20296 "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
20297 "\x68\x28\x73\x40\x9f\x96\x4a",
20298 .alen = 31,
20299 .ptext = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
20300 "\x10\x57\x85\x39\x93\x8f\xaf\x70"
20301 "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd"
20302 "\x98\x34\xab\x37\x56\xae\x32",
20303 .plen = 31,
20304 .ctext = "\xa0\xc8\xde\x83\x0d\xc3\x4e\xd5"
20305 "\x69\x7f\x7a\xdd\x8c\x46\xda\xba"
20306 "\x0a\x5c\x0e\x7f\xac\xee\x02\xd2"
20307 "\xe5\x4b\x0a\xba\xb8\xa4\x7b\x66"
20308 "\xde\xae\xdb\xc2\xc0\x0b\xf7\x2b"
20309 "\xdf\xb8\xea\xd8\xa9\x38\xed",
20310 .clen = 47,
20311 }, {
20312 .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
20313 "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1"
20314 "\xc5\x5a\x53\x6e\x9d\xe8\x04\xd4"
20315 "\xc9\x3f\xe2\x2d\x0c\xc6\x1a\xcb",
20316 .klen = 32,
20317 .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02"
20318 "\x50\xc4\xde\x82\x90\x21\x11\x73"
20319 "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda"
20320 "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81",
20321 .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
20322 "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
20323 "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
20324 "\x29\x56\x52\x19\x79\xf5\xe9\x37",
20325 .alen = 32,
20326 .ptext = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
20327 "\x91\x31\x37\xcb\x8d\xb3\x72\x76"
20328 "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7"
20329 "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec",
20330 .plen = 32,
20331 .ctext = "\xd3\x68\x14\x70\x3c\x01\x43\x86"
20332 "\x02\xab\xbe\x75\xaa\xe7\xf5\x53"
20333 "\x5c\x05\xbd\x9b\x19\xbb\x2a\x61"
20334 "\x8f\x69\x05\x75\x8e\xca\x60\x0c"
20335 "\x5b\xa2\x48\x61\x32\x74\x11\x2b"
20336 "\xf6\xcf\x06\x78\x6f\x78\x1a\x4a",
20337 .clen = 48,
20338 }, {
20339 .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
20340 "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7"
20341 "\xef\x1c\x5f\xf2\xa3\x88\x86\xed"
20342 "\x8a\x6d\xc1\x05\xe7\x25\xb9\xa2",
20343 .klen = 32,
20344 .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e"
20345 "\xd1\x9e\x90\x13\x8a\x45\xd3\x79"
20346 "\xba\xcd\xe2\x13\xe4\x30\x66\xf4"
20347 "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58",
20348 .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
20349 "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
20350 "\x84\x7d\x65\x34\x25\xd8\x47\xfa"
20351 "\xeb\x83\x31\xf1\x54\x54\x89\x0d"
20352 "\x9d",
20353 .alen = 33,
20354 .ptext = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
20355 "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c"
20356 "\x4f\x2e\xe8\x55\x66\x80\x27\x00"
20357 "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3"
20358 "\x21\x78\x55\x9d\x9c\x65\x7b\xcd"
20359 "\x0a\x34\x97\xff\x47\x37\xb0\x2a"
20360 "\x80\x0d\x19\x98\x33\xa9\x7a\xe3"
20361 "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01"
20362 "\xbd",
20363 .plen = 65,
20364 .ctext = "\x07\x0a\x35\xb0\x82\x03\x5a\xd2"
20365 "\x15\x3a\x6c\x72\x83\x9b\xb1\x75"
20366 "\xea\xf2\xfc\xff\xc6\xf1\x13\xa4"
20367 "\x1a\x93\x33\x79\x97\x82\x81\xc0"
20368 "\x96\xc2\x00\xab\x39\xae\xa1\x62"
20369 "\x53\xa3\x86\xc9\x07\x8c\xaf\x22"
20370 "\x47\x31\x29\xca\x4a\x95\xf5\xd5"
20371 "\x20\x63\x5a\x54\x80\x2c\x4a\x63"
20372 "\xfb\x18\x73\x31\x4f\x08\x21\x5d"
20373 "\x20\xe9\xc3\x7e\xea\x25\x77\x3a"
20374 "\x65",
20375 .clen = 81,
20376 }, {
20377 .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
20378 "\x32\x42\x15\x80\x85\xa1\x65\xfe"
20379 "\x19\xde\x6b\x76\xa8\x28\x08\x07"
20380 "\x4b\x9a\xa0\xdd\xc1\x84\x58\x79",
20381 .klen = 32,
20382 .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a"
20383 "\x52\x79\x42\xa5\x84\x6a\x96\x7f"
20384 "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d"
20385 "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e",
20386 .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
20387 "\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
20388 "\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
20389 "\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
20390 "\xac\xfa\x58\x7f\xe5\x73\x09\x8c"
20391 "\x1d\x40\x87\x8c\xd9\x75\xc0\x55"
20392 "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb"
20393 "\x09\x4f\x77\x62\x88\x2d\xf2\x68"
20394 "\x54",
20395 .alen = 65,
20396 .ptext = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
20397 "\x93\xe6\x9b\xee\x81\xfc\xf7\x82"
20398 "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a"
20399 "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99"
20400 "\x2f",
20401 .plen = 33,
20402 .ctext = "\x33\xc1\xda\xfa\x15\x21\x07\x8e"
20403 "\x93\x68\xea\x64\x7b\x3d\x4b\x6b"
20404 "\x71\x5e\x5e\x6b\x92\xaa\x65\xc2"
20405 "\x7a\x2a\xc1\xa9\x0a\xa1\x24\x81"
20406 "\x26\x3a\x5a\x09\xe8\xce\x73\x72"
20407 "\xde\x7b\x58\x9e\x85\xb9\xa4\x28"
20408 "\xda",
20409 .clen = 49,
20410 }, {
20411 .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
20412 "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04"
20413 "\x44\xa1\x76\xfb\xad\xc8\x8a\x21"
20414 "\x0d\xc8\x7f\xb6\x9b\xe3\xf8\x4f",
20415 .klen = 32,
20416 .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56"
20417 "\xd3\x53\xf4\x36\x7e\x8e\x59\x85"
20418 "\x0e\x51\xf9\x1c\xee\x70\x6a\x27"
20419 "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05",
20420 .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
20421 "\xf3\x89\x20\x5b\x7c\x57\x89\x07",
20422 .alen = 16,
20423 .ptext = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
20424 "\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
20425 .plen = 16,
20426 .ctext = "\x3e\xf8\x86\x3d\x39\xf8\x96\x02"
20427 "\x0f\xdf\xc9\x6e\x37\x1e\x57\x99"
20428 "\x07\x2a\x1a\xac\xd1\xda\xfd\x3b"
20429 "\xc7\xff\xbd\xbc\x85\x09\x0b",
20430 .clen = 31,
20431 }, {
20432 .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
20433 "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a"
20434 "\x6e\x63\x82\x7f\xb2\x68\x0c\x3a"
20435 "\xce\xf5\x5e\x8e\x75\x42\x97\x26",
20436 .klen = 32,
20437 .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71"
20438 "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c"
20439 "\x39\x14\x05\xa0\xf3\x10\xec\x41"
20440 "\xff\x01\x95\x84\x2b\x59\x7f\xdb",
20441 .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
20442 "\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
20443 .alen = 16,
20444 .ptext = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
20445 "\x95\x9a\xff\x10\x75\x45\x7d\x8f",
20446 .plen = 16,
20447 .ctext = "\x2f\xc4\xd8\x0d\xa6\x07\xef\x2e"
20448 "\x6c\xd9\x84\x63\x70\x97\x61\x37"
20449 "\x08\x2f\x16\x90\x9e\x62\x30\x0d"
20450 "\x62\xd5\xc8\xf0\x46\x1a",
20451 .clen = 30,
20452 }, {
20453 .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
20454 "\xb5\xd1\x2b\x35\x73\x0e\xad\x10"
20455 "\x98\x25\x8d\x03\xb7\x08\x8e\x54"
20456 "\x90\x23\x3d\x67\x4f\xa1\x36\xfc",
20457 .klen = 32,
20458 .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d"
20459 "\xd5\x07\x58\x59\x72\xd7\xde\x92"
20460 "\x63\xd6\x10\x24\xf8\xb0\x6e\x5a"
20461 "\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2",
20462 .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
20463 "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
20464 .alen = 16,
20465 .ptext = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
20466 "\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
20467 .plen = 16,
20468 .ctext = "\xce\xf3\x17\x87\x49\xc2\x00\x46"
20469 "\xc6\x12\x5c\x8f\x81\x38\xaa\x55"
20470 "\xf8\x67\x75\xf1\x75\xe3\x2a\x24",
20471 .clen = 24,
20472 },
20473};
20474
20475/*
20476 * MORUS-640 test vectors - generated via reference implementation from
20477 * SUPERCOP (https://bench.cr.yp.to/supercop.html):
20478 *
20479 * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz
20480 * (see crypto_aead/morus640128v2/)
20481 */
20482static const struct aead_testvec morus640_tv_template[] = {
20483 {
20484 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
20485 "\x00\x00\x00\x00\x00\x00\x00\x00",
20486 .klen = 16,
20487 .iv = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
20488 "\x20\x36\x2c\x24\xfe\xc9\x30\x81",
20489 .assoc = "",
20490 .alen = 0,
20491 .ptext = "",
20492 .plen = 0,
20493 .ctext = "\x89\x62\x7d\xf3\x07\x9d\x52\x05"
20494 "\x53\xc3\x04\x60\x93\xb4\x37\x9a",
20495 .clen = 16,
20496 }, {
20497 .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b"
20498 "\x80\xda\xb2\x91\xf9\x24\xc2\x06",
20499 .klen = 16,
20500 .iv = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
20501 "\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
20502 .assoc = "",
20503 .alen = 0,
20504 .ptext = "\x69",
20505 .plen = 1,
20506 .ctext = "\xa8\x8d\xe4\x90\xb5\x50\x8f\x78"
20507 "\xb6\x10\x9a\x59\x5f\x61\x37\x70"
20508 "\x09",
20509 .clen = 17,
20510 }, {
20511 .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37"
20512 "\x01\xb4\x64\x22\xf3\x48\x85\x0c",
20513 .klen = 16,
20514 .iv = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
20515 "\x22\xea\x90\x47\xf2\x11\xb5\x8e",
20516 .assoc = "",
20517 .alen = 0,
20518 .ptext = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc"
20519 "\x62\x58\xe9\x8f\xef\xa4\x17",
20520 .plen = 15,
20521 .ctext = "\x76\xdd\xb9\x05\x3d\xce\x61\x38"
20522 "\xf3\xef\xf7\xe5\xd7\xfd\x70\xa5"
20523 "\xcf\x9d\x64\xb8\x0a\x9f\xfd\x8b"
20524 "\xd4\x6e\xfe\xd9\xc8\x63\x4b",
20525 .clen = 31,
20526 }, {
20527 .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
20528 "\x82\x8e\x16\xb4\xed\x6d\x47\x12",
20529 .klen = 16,
20530 .iv = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
20531 "\xa2\xc5\x42\xd8\xec\x36\x78\x94",
20532 .assoc = "",
20533 .alen = 0,
20534 .ptext = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8"
20535 "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97",
20536 .plen = 16,
20537 .ctext = "\xdc\x72\xe8\x14\xfb\x63\xad\x72"
20538 "\x1f\x57\x9a\x1f\x88\x81\xdb\xd6"
20539 "\xc1\x91\x9d\xb9\x25\xc4\x99\x4c"
20540 "\x97\xcd\x8a\x0c\x9d\x68\x00\x1c",
20541 .clen = 32,
20542 }, {
20543 .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
20544 "\x03\x68\xc8\x45\xe7\x91\x0a\x18",
20545 .klen = 16,
20546 .iv = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
20547 "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
20548 .assoc = "",
20549 .alen = 0,
20550 .ptext = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04"
20551 "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d"
20552 "\x09",
20553 .plen = 17,
20554 .ctext = "\x6b\x4f\x3b\x90\x9a\xa2\xb3\x82"
20555 "\x0a\xb8\x55\xee\xeb\x73\x4d\x7f"
20556 "\x54\x11\x3a\x8a\x31\xa3\xb5\xf2"
20557 "\xcd\x49\xdb\xf3\xee\x26\xbd\xa2"
20558 "\x0d",
20559 .clen = 33,
20560 }, {
20561 .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
20562 "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f",
20563 .klen = 16,
20564 .iv = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
20565 "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
20566 .assoc = "",
20567 .alen = 0,
20568 .ptext = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f"
20569 "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3"
20570 "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93"
20571 "\x57\x05\x01\x1c\x66\x22\xd3",
20572 .plen = 31,
20573 .ctext = "\x59\xd1\x0f\x6b\xee\x27\x84\x92"
20574 "\xb7\xa9\xb5\xdd\x02\xa4\x12\xa5"
20575 "\x50\x32\xb4\x9a\x2e\x35\x83\x55"
20576 "\x36\x12\x12\xed\xa3\x31\xc5\x30"
20577 "\xa7\xe2\x4a\x6d\x05\x59\x43\x91"
20578 "\x75\xfa\x6c\x17\xc6\x73\xca",
20579 .clen = 47,
20580 }, {
20581 .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
20582 "\x05\x1d\x2c\x68\xdb\xda\x8f\x25",
20583 .klen = 16,
20584 .iv = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
20585 "\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
20586 .assoc = "",
20587 .alen = 0,
20588 .ptext = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b"
20589 "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa"
20590 "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad"
20591 "\x19\x33\xe0\xf4\x40\x81\x72\x28",
20592 .plen = 32,
20593 .ctext = "\xdb\x49\x68\x0f\x91\x5b\x21\xb1"
20594 "\xcf\x50\xb2\x4c\x32\xe1\xa6\x69"
20595 "\xc0\xfb\x44\x1f\xa0\x9a\xeb\x39"
20596 "\x1b\xde\x68\x38\xcc\x27\x52\xc5"
20597 "\xf6\x3e\x74\xea\x66\x5b\x5f\x0c"
20598 "\x65\x9e\x58\xe6\x52\xa2\xfe\x59",
20599 .clen = 48,
20600 }, {
20601 .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
20602 "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b",
20603 .klen = 16,
20604 .iv = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
20605 "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
20606 .assoc = "\xc5",
20607 .alen = 1,
20608 .ptext = "",
20609 .plen = 0,
20610 .ctext = "\x56\xe7\x24\x52\xdd\x95\x60\x5b"
20611 "\x09\x48\x39\x69\x9c\xb3\x62\x46",
20612 .clen = 16,
20613 }, {
20614 .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde"
20615 "\x07\xd1\x90\x8b\xcf\x23\x15\x31",
20616 .klen = 16,
20617 .iv = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
20618 "\x27\x08\xbd\xaf\xce\xec\x45\xb3",
20619 .assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec"
20620 "\x47\x3e\xe9\xd4\xcc\xb5\x76",
20621 .alen = 15,
20622 .ptext = "",
20623 .plen = 0,
20624 .ctext = "\xdd\xfa\x6c\x1f\x5d\x86\x87\x01"
20625 "\x13\xe5\x73\x46\x46\xf2\x5c\xe1",
20626 .clen = 16,
20627 }, {
20628 .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa"
20629 "\x88\xab\x42\x1c\xc9\x47\xd7\x38",
20630 .klen = 16,
20631 .iv = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
20632 "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9",
20633 .assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08"
20634 "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b",
20635 .alen = 16,
20636 .ptext = "",
20637 .plen = 0,
20638 .ctext = "\xa6\x1b\xb9\xd7\x5e\x3c\xcf\xac"
20639 "\xa9\x21\x45\x0b\x16\x52\xf7\xe1",
20640 .clen = 16,
20641 }, {
20642 .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16"
20643 "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e",
20644 .klen = 16,
20645 .iv = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
20646 "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf",
20647 .assoc = "\x7b\x01\x5d\x08\x29\x12\xec\x24"
20648 "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41"
20649 "\x3c",
20650 .alen = 17,
20651 .ptext = "",
20652 .plen = 0,
20653 .ctext = "\x15\xff\xde\x3b\x34\xfc\xf6\xf9"
20654 "\xbb\xa8\x62\xad\x0a\xf5\x48\x60",
20655 .clen = 16,
20656 }, {
20657 .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31"
20658 "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44",
20659 .klen = 16,
20660 .iv = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
20661 "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6",
20662 .assoc = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f"
20663 "\xca\xcd\xff\x88\xba\x22\xbe\x47"
20664 "\x67\xba\x85\xf1\xbb\x30\x56\x26"
20665 "\xaf\x0b\x02\x38\xcc\x44\xa7",
20666 .alen = 31,
20667 .ptext = "",
20668 .plen = 0,
20669 .ctext = "\xd2\x9d\xf8\x3b\xd7\x84\xe9\x2d"
20670 "\x4b\xef\x75\x16\x0a\x99\xae\x6b",
20671 .clen = 16,
20672 }, {
20673 .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d"
20674 "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a",
20675 .klen = 16,
20676 .iv = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
20677 "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc",
20678 .assoc = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b"
20679 "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d"
20680 "\x91\x7c\x91\x75\xc0\xd0\xd8\x40"
20681 "\x71\x39\xe1\x10\xa6\xa3\x46\x7a",
20682 .alen = 32,
20683 .ptext = "",
20684 .plen = 0,
20685 .ctext = "\xe4\x8d\xa7\xa7\x45\xc1\x31\x4f"
20686 "\xce\xfb\xaf\xd6\xc2\xe6\xee\xc0",
20687 .clen = 16,
20688 }, {
20689 .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69"
20690 "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50",
20691 .klen = 16,
20692 .iv = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
20693 "\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
20694 .assoc = "\x31",
20695 .alen = 1,
20696 .ptext = "\x40",
20697 .plen = 1,
20698 .ctext = "\xe2\x67\x38\x4f\xb9\xad\x7d\x38"
20699 "\x01\xfe\x84\x14\x85\xf8\xd1\xe3"
20700 "\x22",
20701 .clen = 17,
20702 }, {
20703 .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85"
20704 "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57",
20705 .klen = 16,
20706 .iv = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
20707 "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8",
20708 .assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93"
20709 "\x4d\x5b\x15\x3c\xa8\x8f\x06",
20710 .alen = 15,
20711 .ptext = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
20712 "\x6d\x92\x42\x61\xa7\x58\x37",
20713 .plen = 15,
20714 .ctext = "\x77\x32\x61\xeb\xb4\x33\x29\x92"
20715 "\x29\x95\xc5\x8e\x85\x76\xab\xfc"
20716 "\x07\x95\xa7\x44\x74\xf7\x22\xff"
20717 "\xd8\xd8\x36\x3d\x8a\x7f\x9e",
20718 .clen = 31,
20719 }, {
20720 .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
20721 "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d",
20722 .klen = 16,
20723 .iv = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
20724 "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf",
20725 .assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf"
20726 "\xce\x36\xc7\xce\xa2\xb4\xc9\x60",
20727 .alen = 16,
20728 .ptext = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
20729 "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2",
20730 .plen = 16,
20731 .ctext = "\xd8\xfd\x44\x45\xf6\x42\x12\x38"
20732 "\xf2\x0b\xea\x4f\x9e\x11\x61\x07"
20733 "\x48\x67\x98\x18\x9b\xd0\x0c\x59"
20734 "\x67\xa4\x11\xb3\x2b\xd6\xc1\x70",
20735 .clen = 32,
20736 }, {
20737 .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
20738 "\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
20739 .klen = 16,
20740 .iv = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
20741 "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5",
20742 .assoc = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca"
20743 "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66"
20744 "\x3b",
20745 .alen = 17,
20746 .ptext = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
20747 "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
20748 "\x05",
20749 .plen = 17,
20750 .ctext = "\xb1\xab\x53\x4e\xc7\x40\x16\xb6"
20751 "\x71\x3a\x00\x9f\x41\x88\xb0\xb2"
20752 "\x71\x83\x85\x5f\xc8\x79\x0a\x99"
20753 "\x99\xdc\x89\x1c\x88\xd2\x3e\xf9"
20754 "\x83",
20755 .clen = 33,
20756 }, {
20757 .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
20758 "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69",
20759 .klen = 16,
20760 .iv = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
20761 "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb",
20762 .assoc = "\x23\x02\xf1\x64\x9a\x73\x89\xe6"
20763 "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d"
20764 "\x65\x48\xcb\x0a\xda\xf0\x62\xc0"
20765 "\x38\x1d\x3b\x4a\xe9\x7e\x62",
20766 .alen = 31,
20767 .ptext = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
20768 "\xf0\x20\x58\x15\x95\xc6\x7f\xee"
20769 "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
20770 "\x68\x28\x73\x40\x9f\x96\x4a",
20771 .plen = 31,
20772 .ctext = "\x29\xc4\xf0\x03\xc1\x86\xdf\x06"
20773 "\x5c\x7b\xef\x64\x87\x00\xd1\x37"
20774 "\xa7\x08\xbc\x7f\x8f\x41\x54\xd0"
20775 "\x3e\xf1\xc3\xa2\x96\x84\xdd\x2a"
20776 "\x2d\x21\x30\xf9\x02\xdb\x06\x0c"
20777 "\xf1\x5a\x66\x69\xe0\xca\x83",
20778 .clen = 47,
20779 }, {
20780 .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
20781 "\x10\x57\x85\x39\x93\x8f\xaf\x70",
20782 .klen = 16,
20783 .iv = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
20784 "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1",
20785 .assoc = "\x5f\x27\x2b\x03\xaa\xef\x32\x02"
20786 "\x50\xc4\xde\x82\x90\x21\x11\x73"
20787 "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda"
20788 "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81",
20789 .alen = 32,
20790 .ptext = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
20791 "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
20792 "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
20793 "\x29\x56\x52\x19\x79\xf5\xe9\x37",
20794 .plen = 32,
20795 .ctext = "\xe2\x2e\x44\xdf\xd3\x60\x6d\xb2"
20796 "\x70\x57\x37\xc5\xc2\x4f\x8d\x14"
20797 "\xc6\xbf\x8b\xec\xf5\x62\x67\xf2"
20798 "\x2f\xa1\xe6\xd6\xa7\xb1\x8c\x54"
20799 "\xe5\x6b\x49\xf9\x6e\x90\xc3\xaa"
20800 "\x7a\x00\x2e\x4d\x7f\x31\x2e\x81",
20801 .clen = 48,
20802 }, {
20803 .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
20804 "\x91\x31\x37\xcb\x8d\xb3\x72\x76",
20805 .klen = 16,
20806 .iv = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
20807 "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7",
20808 .assoc = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e"
20809 "\xd1\x9e\x90\x13\x8a\x45\xd3\x79"
20810 "\xba\xcd\xe2\x13\xe4\x30\x66\xf4"
20811 "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58"
20812 "\x1a",
20813 .alen = 33,
20814 .ptext = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
20815 "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
20816 "\x84\x7d\x65\x34\x25\xd8\x47\xfa"
20817 "\xeb\x83\x31\xf1\x54\x54\x89\x0d"
20818 "\x9d\x4d\x54\x51\x84\x61\xf6\x8e"
20819 "\x03\x31\xf2\x25\x16\xcc\xaa\xc6"
20820 "\x75\x73\x20\x30\x59\x54\xb2\xf0"
20821 "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35"
20822 "\x8a",
20823 .plen = 65,
20824 .ctext = "\xc7\xca\x26\x61\x57\xee\xa2\xb9"
20825 "\xb1\x37\xde\x95\x06\x90\x11\x08"
20826 "\x4d\x30\x9f\x24\xc0\x56\xb7\xe1"
20827 "\x0b\x9f\xd2\x57\xe9\xd2\xb1\x76"
20828 "\x56\x9a\xb4\x58\xc5\x08\xfc\xb5"
20829 "\xf2\x31\x9b\xc9\xcd\xb3\x64\xdb"
20830 "\x6f\x50\xbf\xf4\x73\x9d\xfb\x6b"
20831 "\xef\x35\x25\x48\xed\xcf\x29\xa8"
20832 "\xac\xc3\xb9\xcb\x61\x8f\x73\x92"
20833 "\x2c\x7a\x6f\xda\xf9\x09\x6f\xe1"
20834 "\xc4",
20835 .clen = 81,
20836 }, {
20837 .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
20838 "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c",
20839 .klen = 16,
20840 .iv = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
20841 "\x32\x42\x15\x80\x85\xa1\x65\xfe",
20842 .assoc = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a"
20843 "\x52\x79\x42\xa5\x84\x6a\x96\x7f"
20844 "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d"
20845 "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e"
20846 "\x28\xce\x57\x34\xcd\x6e\x84\x4c"
20847 "\x17\x3c\xe1\xb2\xa8\x0b\xbb\xf1"
20848 "\x96\x41\x0d\x69\xe8\x54\x0a\xc8"
20849 "\x15\x4e\x91\x92\x89\x4b\xb7\x9b"
20850 "\x21",
20851 .alen = 65,
20852 .ptext = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
20853 "\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
20854 "\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
20855 "\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
20856 "\xac",
20857 .plen = 33,
20858 .ctext = "\x57\xcd\x3d\x46\xc5\xf9\x68\x3b"
20859 "\x2c\x0f\xb4\x7e\x7b\x64\x3e\x40"
20860 "\xf3\x78\x63\x34\x89\x79\x39\x6b"
20861 "\x61\x64\x4a\x9a\xfa\x70\xa4\xd3"
20862 "\x54\x0b\xea\x05\xa6\x95\x64\xed"
20863 "\x3d\x69\xa2\x0c\x27\x56\x2f\x34"
20864 "\x66",
20865 .clen = 49,
20866 }, {
20867 .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
20868 "\x93\xe6\x9b\xee\x81\xfc\xf7\x82",
20869 .klen = 16,
20870 .iv = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
20871 "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04",
20872 .assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56"
20873 "\xd3\x53\xf4\x36\x7e\x8e\x59\x85",
20874 .alen = 16,
20875 .ptext = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
20876 "\xf3\x89\x20\x5b\x7c\x57\x89\x07",
20877 .plen = 16,
20878 .ctext = "\xfc\x85\x06\x28\x8f\xe8\x23\x1f"
20879 "\x33\x98\x87\xde\x08\xb6\xb6\xae"
20880 "\x3e\xa4\xf8\x19\xf1\x92\x60\x39"
20881 "\xb9\x6b\x3f\xdf\xc8\xcb\x30",
20882 .clen = 31,
20883 }, {
20884 .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
20885 "\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
20886 .klen = 16,
20887 .iv = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
20888 "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a",
20889 .assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71"
20890 "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c",
20891 .alen = 16,
20892 .ptext = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
20893 "\x74\x63\xd2\xec\x76\x7c\x4c\x0d",
20894 .plen = 16,
20895 .ctext = "\x74\x7d\x70\x07\xe9\xba\x01\xee"
20896 "\x6c\xc6\x6f\x50\x25\x33\xbe\x50"
20897 "\x17\xb8\x17\x62\xed\x80\xa2\xf5"
20898 "\x03\xde\x85\x71\x5d\x34",
20899 .clen = 30,
20900 }, {
20901 .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
20902 "\x95\x9a\xff\x10\x75\x45\x7d\x8f",
20903 .klen = 16,
20904 .iv = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
20905 "\xb5\xd1\x2b\x35\x73\x0e\xad\x10",
20906 .assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d"
20907 "\xd5\x07\x58\x59\x72\xd7\xde\x92",
20908 .alen = 16,
20909 .ptext = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
20910 "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13",
20911 .plen = 16,
20912 .ctext = "\xf4\xb3\x85\xf9\xac\xde\xb1\x38"
20913 "\x29\xfd\x6c\x7c\x49\xe5\x1d\xaf"
20914 "\xba\xea\xd4\xfa\x3f\x11\x33\x98",
20915 .clen = 24,
20916 }, {
20917 .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
20918 "\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
20919 .klen = 16,
20920 .iv = "\xbb\x3a\xf7\x57\xc6\x36\x7c\x22"
20921 "\x36\xab\xde\xc6\x6d\x32\x70\x17",
20922 .assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9"
20923 "\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98",
20924 .alen = 16,
20925 .ptext = "\xda\xcc\x14\x27\x4e\x74\xd1\x30"
20926 "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a",
20927 .plen = 16,
20928 .ctext = "\xe6\x5c\x49\x4f\x78\xf3\x62\x86"
20929 "\xe1\xb7\xa5\xc3\x32\x88\x3c\x8c"
20930 "\x6e",
20931 .clen = 17,
20932 },
20933};
20934
20935/*
20936 * MORUS-1280 test vectors - generated via reference implementation from
20937 * SUPERCOP (https://bench.cr.yp.to/supercop.html):
20938 *
20939 * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz
20940 * (see crypto_aead/morus1280128v2/ and crypto_aead/morus1280256v2/ )
20941 */
20942static const struct aead_testvec morus1280_tv_template[] = {
20943 {
20944 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
20945 "\x00\x00\x00\x00\x00\x00\x00\x00",
20946 .klen = 16,
20947 .iv = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86"
20948 "\x20\x36\x2c\x24\xfe\xc9\x30\x81",
20949 .assoc = "",
20950 .alen = 0,
20951 .ptext = "",
20952 .plen = 0,
20953 .ctext = "\x91\x85\x0f\xf5\x52\x9e\xce\xce"
20954 "\x65\x99\xc7\xbf\xd3\x76\xe8\x98",
20955 .clen = 16,
20956 }, {
20957 .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b"
20958 "\x80\xda\xb2\x91\xf9\x24\xc2\x06",
20959 .klen = 16,
20960 .iv = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2"
20961 "\xa1\x10\xde\xb5\xf8\xed\xf3\x87",
20962 .assoc = "",
20963 .alen = 0,
20964 .ptext = "\x69",
20965 .plen = 1,
20966 .ctext = "\x88\xc3\x4c\xf0\x2f\x43\x76\x13"
20967 "\x96\xda\x76\x34\x33\x4e\xd5\x39"
20968 "\x73",
20969 .clen = 17,
20970 }, {
20971 .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37"
20972 "\x01\xb4\x64\x22\xf3\x48\x85\x0c",
20973 .klen = 16,
20974 .iv = "\x88\x12\x01\xa6\x64\x96\xfb\xbe"
20975 "\x22\xea\x90\x47\xf2\x11\xb5\x8e",
20976 .assoc = "",
20977 .alen = 0,
20978 .ptext = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc"
20979 "\x62\x58\xe9\x8f\xef\xa4\x17\x91"
20980 "\xb4\x96\x9f\x6b\xce\x38\xa5\x46"
20981 "\x13\x7d\x64\x93\xd7\x05\xf5",
20982 .plen = 31,
20983 .ctext = "\x3e\x5c\x3b\x58\x3b\x7d\x2a\x22"
20984 "\x75\x0b\x24\xa6\x0e\xc3\xde\x52"
20985 "\x97\x0b\x64\xd4\xce\x90\x52\xf7"
20986 "\xef\xdb\x6a\x38\xd2\xa8\xa1\x0d"
20987 "\xe0\x61\x33\x24\xc6\x4d\x51\xbc"
20988 "\xa4\x21\x74\xcf\x19\x16\x59",
20989 .clen = 47,
20990 }, {
20991 .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53"
20992 "\x82\x8e\x16\xb4\xed\x6d\x47\x12",
20993 .klen = 16,
20994 .iv = "\xc4\x37\x3b\x45\x74\x11\xa4\xda"
20995 "\xa2\xc5\x42\xd8\xec\x36\x78\x94",
20996 .assoc = "",
20997 .alen = 0,
20998 .ptext = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8"
20999 "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97"
21000 "\xde\x58\xab\xf0\xd3\xd8\x27\x60"
21001 "\xd5\xaa\x43\x6b\xb1\x64\x95\xa4",
21002 .plen = 32,
21003 .ctext = "\x30\x82\x9c\x2b\x67\xcb\xf9\x1f"
21004 "\xde\x9f\x77\xb2\xda\x92\x61\x5c"
21005 "\x09\x0b\x2d\x9a\x26\xaa\x1c\x06"
21006 "\xab\x74\xb7\x2b\x95\x5f\x9f\xa1"
21007 "\x9a\xff\x50\xa0\xa2\xff\xc5\xad"
21008 "\x21\x8e\x84\x5c\x12\x61\xb2\xae",
21009 .clen = 48,
21010 }, {
21011 .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f"
21012 "\x03\x68\xc8\x45\xe7\x91\x0a\x18",
21013 .klen = 16,
21014 .iv = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6"
21015 "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a",
21016 .assoc = "",
21017 .alen = 0,
21018 .ptext = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04"
21019 "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d"
21020 "\x09\x1a\xb7\x74\xd8\x78\xa9\x79"
21021 "\x96\xd8\x22\x43\x8c\xc3\x34\x7b"
21022 "\xc4",
21023 .plen = 33,
21024 .ctext = "\x67\x5d\x8e\x45\xc8\x39\xf5\x17"
21025 "\xc1\x1d\x2a\xdd\x88\x67\xda\x1f"
21026 "\x6d\xe8\x37\x28\x5a\xc1\x5e\x9f"
21027 "\xa6\xec\xc6\x92\x05\x4b\xc0\xa3"
21028 "\x63\xef\x88\xa4\x9b\x0a\x5c\xed"
21029 "\x2b\x6a\xac\x63\x52\xaa\x10\x94"
21030 "\xd0",
21031 .clen = 49,
21032 }, {
21033 .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b"
21034 "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f",
21035 .klen = 16,
21036 .iv = "\x3d\x80\xae\x84\x94\x09\xf6\x12"
21037 "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0",
21038 .assoc = "",
21039 .alen = 0,
21040 .ptext = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f"
21041 "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3"
21042 "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93"
21043 "\x57\x05\x01\x1c\x66\x22\xd3\x51"
21044 "\xd3\xdf\x18\xc9\x30\x66\xed\xb1"
21045 "\x96\x58\xd5\x8c\x64\x8c\x7c\xf5"
21046 "\x01\xd0\x74\x5f\x9b\xaa\xf6\xd1"
21047 "\xe6\x16\xa2\xac\xde\x47\x40",
21048 .plen = 63,
21049 .ctext = "\x7d\x61\x1a\x35\x20\xcc\x07\x88"
21050 "\x03\x98\x87\xcf\xc0\x6e\x4d\x19"
21051 "\xe3\xd4\x0b\xfb\x29\x8f\x49\x1a"
21052 "\x3a\x06\x77\xce\x71\x2c\xcd\xdd"
21053 "\xed\xf6\xc9\xbe\xa6\x3b\xb8\xfc"
21054 "\x6c\xbe\x77\xed\x74\x0e\x20\x85"
21055 "\xd0\x65\xde\x24\x6f\xe3\x25\xc5"
21056 "\xdf\x5b\x0f\xbd\x8a\x88\x78\xc9"
21057 "\xe5\x81\x37\xde\x84\x7a\xf6\x84"
21058 "\x99\x7a\x72\x9c\x54\x31\xa1",
21059 .clen = 79,
21060 }, {
21061 .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6"
21062 "\x05\x1d\x2c\x68\xdb\xda\x8f\x25",
21063 .klen = 16,
21064 .iv = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d"
21065 "\x25\x53\x58\x8c\xda\xa3\xc0\xa6",
21066 .assoc = "",
21067 .alen = 0,
21068 .ptext = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b"
21069 "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa"
21070 "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad"
21071 "\x19\x33\xe0\xf4\x40\x81\x72\x28"
21072 "\xe1\x8b\x1c\xf8\x91\x78\xff\xaf"
21073 "\xb0\x68\x69\xf2\x27\x35\x91\x84"
21074 "\x2e\x37\x5b\x00\x04\xff\x16\x9c"
21075 "\xb5\x19\x39\xeb\xd9\xcd\x29\x9a",
21076 .plen = 64,
21077 .ctext = "\x05\xc5\xb1\xf9\x1b\xb9\xab\x2c"
21078 "\xa5\x07\x12\xa7\x12\x39\x60\x66"
21079 "\x30\x81\x4a\x03\x78\x28\x45\x52"
21080 "\xd2\x2b\x24\xfd\x8b\xa5\xb7\x66"
21081 "\x6f\x45\xd7\x3b\x67\x6f\x51\xb9"
21082 "\xc0\x3d\x6c\xca\x1e\xae\xff\xb6"
21083 "\x79\xa9\xe4\x82\x5d\x4c\x2d\xdf"
21084 "\xeb\x71\x40\xc9\x2c\x40\x45\x6d"
21085 "\x73\x77\x01\xf3\x4f\xf3\x9d\x2a"
21086 "\x5d\x57\xa8\xa1\x18\xa2\xad\xcb",
21087 .clen = 80,
21088 }, {
21089 .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2"
21090 "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b",
21091 .klen = 16,
21092 .iv = "\xb6\xca\x22\xc3\xb4\x00\x47\x49"
21093 "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad",
21094 .assoc = "\xc5",
21095 .alen = 1,
21096 .ptext = "",
21097 .plen = 0,
21098 .ctext = "\x4d\xbf\x11\xac\x7f\x97\x0b\x2e"
21099 "\x89\x3b\x9d\x0f\x83\x1c\x08\xc3",
21100 .clen = 16,
21101 }, {
21102 .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde"
21103 "\x07\xd1\x90\x8b\xcf\x23\x15\x31",
21104 .klen = 16,
21105 .iv = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65"
21106 "\x27\x08\xbd\xaf\xce\xec\x45\xb3",
21107 .assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec"
21108 "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34"
21109 "\xe8\x73\x62\x64\xab\x50\xd0\xda"
21110 "\x6b\x83\x66\xaf\x3e\x27\xc9",
21111 .alen = 31,
21112 .ptext = "",
21113 .plen = 0,
21114 .ctext = "\x5b\xc0\x8d\x54\xe4\xec\xbe\x38"
21115 "\x03\x12\xf9\xcc\x9e\x46\x42\x92",
21116 .clen = 16,
21117 }, {
21118 .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa"
21119 "\x88\xab\x42\x1c\xc9\x47\xd7\x38",
21120 .klen = 16,
21121 .iv = "\x2f\x13\x95\x01\xd5\xf7\x99\x81"
21122 "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9",
21123 .assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08"
21124 "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b"
21125 "\x12\x35\x6e\xe8\xb0\xf0\x52\xf3"
21126 "\x2d\xb0\x45\x87\x18\x86\x68\xf6",
21127 .alen = 32,
21128 .ptext = "",
21129 .plen = 0,
21130 .ctext = "\x48\xc5\xc3\x4c\x40\x2e\x2f\xc2"
21131 "\x6d\x65\xe0\x67\x9c\x1d\xa0\xf0",
21132 .clen = 16,
21133 }, {
21134 .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16"
21135 "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e",
21136 .klen = 16,
21137 .iv = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d"
21138 "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf",
21139 .assoc = "\x7b\x01\x5d\x08\x29\x12\xec\x24"
21140 "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41"
21141 "\x3c\xf8\x79\x6c\xb6\x90\xd4\x0d"
21142 "\xee\xde\x23\x60\xf2\xe5\x08\xcc"
21143 "\x97",
21144 .alen = 33,
21145 .ptext = "",
21146 .plen = 0,
21147 .ctext = "\x28\x64\x78\x51\x55\xd8\x56\x4a"
21148 "\x58\x3e\xf7\xbe\xee\x21\xfe\x94",
21149 .clen = 16,
21150 }, {
21151 .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31"
21152 "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44",
21153 .klen = 16,
21154 .iv = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8"
21155 "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6",
21156 .assoc = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f"
21157 "\xca\xcd\xff\x88\xba\x22\xbe\x47"
21158 "\x67\xba\x85\xf1\xbb\x30\x56\x26"
21159 "\xaf\x0b\x02\x38\xcc\x44\xa7\xa3"
21160 "\xa6\xbf\x31\x93\x60\xcd\xda\x63"
21161 "\x2c\xb1\xaa\x19\xc8\x19\xf8\xeb"
21162 "\x03\xa1\xe8\xbe\x37\x54\xec\xa2"
21163 "\xcd\x2c\x45\x58\xbd\x8e\x80",
21164 .alen = 63,
21165 .ptext = "",
21166 .plen = 0,
21167 .ctext = "\xb3\xa6\x00\x4e\x09\x20\xac\x21"
21168 "\x77\x72\x69\x76\x2d\x36\xe5\xc8",
21169 .clen = 16,
21170 }, {
21171 .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d"
21172 "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a",
21173 .klen = 16,
21174 .iv = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4"
21175 "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc",
21176 .assoc = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b"
21177 "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d"
21178 "\x91\x7c\x91\x75\xc0\xd0\xd8\x40"
21179 "\x71\x39\xe1\x10\xa6\xa3\x46\x7a"
21180 "\xb4\x6b\x35\xc2\xc1\xdf\xed\x60"
21181 "\x46\xc1\x3e\x7f\x8c\xc2\x0e\x7a"
21182 "\x30\x08\xd0\x5f\xa0\xaa\x0c\x6d"
21183 "\x9c\x2f\xdb\x97\xb8\x15\x69\x01",
21184 .alen = 64,
21185 .ptext = "",
21186 .plen = 0,
21187 .ctext = "\x65\x33\x7b\xa1\x63\xf4\x20\xdd"
21188 "\xe4\xb9\x4a\xaa\x9a\x21\xaa\x14",
21189 .clen = 16,
21190 }, {
21191 .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69"
21192 "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50",
21193 .klen = 16,
21194 .iv = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0"
21195 "\xac\x4b\x37\x86\xb0\xa2\x13\xd2",
21196 .assoc = "\x31",
21197 .alen = 1,
21198 .ptext = "\x40",
21199 .plen = 1,
21200 .ctext = "\x1d\x47\x17\x34\x86\xf5\x54\x1a"
21201 "\x6d\x28\xb8\x5d\x6c\xcf\xa0\xb9"
21202 "\xbf",
21203 .clen = 17,
21204 }, {
21205 .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85"
21206 "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57",
21207 .klen = 16,
21208 .iv = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c"
21209 "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8",
21210 .assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93"
21211 "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a"
21212 "\xe6\x01\xa8\x7e\xca\x10\xdc\x73"
21213 "\xf4\x94\x9f\xc1\x5a\x61\x85",
21214 .alen = 31,
21215 .ptext = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a"
21216 "\x6d\x92\x42\x61\xa7\x58\x37\xdb"
21217 "\xb0\xb2\x2b\x9f\x0b\xb8\xbd\x7a"
21218 "\x24\xa0\xd6\xb7\x11\x79\x6c",
21219 .plen = 31,
21220 .ctext = "\x78\x90\x52\xae\x0f\xf7\x2e\xef"
21221 "\x63\x09\x08\x58\xb5\x56\xbd\x72"
21222 "\x6e\x42\xcf\x27\x04\x7c\xdb\x92"
21223 "\x18\xe9\xa4\x33\x90\xba\x62\xb5"
21224 "\x70\xd3\x88\x9b\x4f\x05\xa7\x51"
21225 "\x85\x87\x17\x09\x42\xed\x4e",
21226 .clen = 47,
21227 }, {
21228 .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1"
21229 "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d",
21230 .klen = 16,
21231 .iv = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28"
21232 "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf",
21233 .assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf"
21234 "\xce\x36\xc7\xce\xa2\xb4\xc9\x60"
21235 "\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d"
21236 "\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd",
21237 .alen = 32,
21238 .ptext = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36"
21239 "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2"
21240 "\xdb\x74\x36\x23\x11\x58\x3f\x93"
21241 "\xe5\xcd\xb5\x90\xeb\xd8\x0c\xb3",
21242 .plen = 32,
21243 .ctext = "\x1d\x2c\x57\xe0\x50\x38\x3d\x41"
21244 "\x2e\x71\xc8\x3b\x92\x43\x58\xaf"
21245 "\x5a\xfb\xad\x8f\xd9\xd5\x8a\x5e"
21246 "\xdb\xf3\xcd\x3a\x2b\xe1\x2c\x1a"
21247 "\xb0\xed\xe3\x0c\x6e\xf9\xf2\xd6"
21248 "\x90\xe6\xb1\x0e\xa5\x8a\xac\xb7",
21249 .clen = 48,
21250 }, {
21251 .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd"
21252 "\x0e\xa3\x21\x16\x9f\x46\x2a\x63",
21253 .klen = 16,
21254 .iv = "\xd7\x14\x29\x5d\x45\x59\x36\x44"
21255 "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5",
21256 .assoc = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca"
21257 "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66"
21258 "\x3b\x86\xbf\x86\xd4\x50\xe0\xa7"
21259 "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4"
21260 "\xee",
21261 .alen = 33,
21262 .ptext = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51"
21263 "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8"
21264 "\x05\x36\x42\xa7\x16\xf8\xc1\xad"
21265 "\xa7\xfb\x94\x68\xc5\x37\xab\x8a"
21266 "\x72",
21267 .plen = 33,
21268 .ctext = "\x59\x10\x84\x1c\x83\x4c\x8b\xfc"
21269 "\xfd\x2e\x4b\x46\x84\xff\x78\x4e"
21270 "\x50\xda\x5c\xb9\x61\x1d\xf5\xb9"
21271 "\xfe\xbb\x7f\xae\x8c\xc1\x24\xbd"
21272 "\x8c\x6f\x1f\x9b\xce\xc6\xc1\x37"
21273 "\x08\x06\x5a\xe5\x96\x10\x95\xc2"
21274 "\x5e",
21275 .clen = 49,
21276 }, {
21277 .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8"
21278 "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69",
21279 .klen = 16,
21280 .iv = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f"
21281 "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb",
21282 .assoc = "\x23\x02\xf1\x64\x9a\x73\x89\xe6"
21283 "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d"
21284 "\x65\x48\xcb\x0a\xda\xf0\x62\xc0"
21285 "\x38\x1d\x3b\x4a\xe9\x7e\x62\xaa"
21286 "\xfd\xc9\x4a\xa9\xa9\x39\x4b\x54"
21287 "\xc8\x0e\x24\x7f\x5e\x10\x7a\x45"
21288 "\x10\x0b\x56\x85\xad\x54\xaa\x66"
21289 "\xa8\x43\xcd\xd4\x9b\xb7\xfa",
21290 .alen = 63,
21291 .ptext = "\x32\xcb\x80\xcc\xde\x12\x33\x6d"
21292 "\xf0\x20\x58\x15\x95\xc6\x7f\xee"
21293 "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7"
21294 "\x68\x28\x73\x40\x9f\x96\x4a\x60"
21295 "\x80\xf4\x4b\xf4\xc1\x3d\xd0\x93"
21296 "\xcf\x12\xc9\x59\x8f\x7a\x7f\xa8"
21297 "\x1b\xa5\x50\xed\x87\xa9\x72\x59"
21298 "\x9c\x44\xb2\xa4\x99\x98\x34",
21299 .plen = 63,
21300 .ctext = "\x9a\x12\xbc\xdf\x72\xa8\x56\x22"
21301 "\x49\x2d\x07\x92\xfc\x3d\x6d\x5f"
21302 "\xef\x36\x19\xae\x91\xfa\xd6\x63"
21303 "\x46\xea\x8a\x39\x14\x21\xa6\x37"
21304 "\x18\xfc\x97\x3e\x16\xa5\x4d\x39"
21305 "\x45\x2e\x69\xcc\x9c\x5f\xdf\x6d"
21306 "\x5e\xa2\xbf\xac\x83\x32\x72\x52"
21307 "\x58\x58\x23\x40\xfd\xa5\xc2\xe6"
21308 "\xe9\x5a\x50\x98\x00\x58\xc9\x86"
21309 "\x4f\x20\x37\xdb\x7b\x22\xa3",
21310 .clen = 79,
21311 }, {
21312 .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4"
21313 "\x10\x57\x85\x39\x93\x8f\xaf\x70",
21314 .klen = 16,
21315 .iv = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b"
21316 "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1",
21317 .assoc = "\x5f\x27\x2b\x03\xaa\xef\x32\x02"
21318 "\x50\xc4\xde\x82\x90\x21\x11\x73"
21319 "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda"
21320 "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81"
21321 "\x0b\x76\x4f\xd7\x0a\x4b\x5e\x51"
21322 "\xe3\x1d\xb9\xe5\x21\xb9\x8f\xd4"
21323 "\x3d\x72\x3e\x26\x16\xa9\xca\x32"
21324 "\x77\x47\x63\x14\x95\x3d\xe4\x34",
21325 .alen = 64,
21326 .ptext = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89"
21327 "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4"
21328 "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0"
21329 "\x29\x56\x52\x19\x79\xf5\xe9\x37"
21330 "\x8f\xa1\x50\x23\x22\x4f\xe3\x91"
21331 "\xe9\x21\x5e\xbf\x52\x23\x95\x37"
21332 "\x48\x0c\x38\x8f\xf0\xff\x92\x24"
21333 "\x6b\x47\x49\xe3\x94\x1f\x1e\x01",
21334 .plen = 64,
21335 .ctext = "\xe6\xeb\x92\x5a\x5b\xf0\x2d\xbb"
21336 "\x23\xec\x35\xe3\xae\xc9\xfb\x0b"
21337 "\x90\x14\x46\xeb\xa8\x8d\xb0\x9b"
21338 "\x39\xda\x8b\x48\xec\xb2\x00\x4e"
21339 "\x80\x6f\x46\x4f\x9b\x1e\xbb\x35"
21340 "\xea\x5a\xbc\xa2\x36\xa5\x89\x45"
21341 "\xc2\xd6\xd7\x15\x0b\xf6\x6c\x56"
21342 "\xec\x99\x7d\x61\xb3\x15\x93\xed"
21343 "\x83\x1e\xd9\x48\x84\x0b\x37\xfe"
21344 "\x95\x74\x44\xd5\x54\xa6\x27\x06",
21345 .clen = 80,
21346 }, {
21347 .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10"
21348 "\x91\x31\x37\xcb\x8d\xb3\x72\x76",
21349 .klen = 16,
21350 .iv = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97"
21351 "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7",
21352 .assoc = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e"
21353 "\xd1\x9e\x90\x13\x8a\x45\xd3\x79"
21354 "\xba\xcd\xe2\x13\xe4\x30\x66\xf4"
21355 "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58"
21356 "\x1a\x22\x53\x05\x6b\x5c\x71\x4f"
21357 "\xfd\x2d\x4d\x4c\xe5\x62\xa5\x63"
21358 "\x6a\xda\x26\xc8\x7f\xff\xea\xfd"
21359 "\x46\x4a\xfa\x53\x8f\xc4\xcd\x68"
21360 "\x58",
21361 .alen = 65,
21362 .ptext = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5"
21363 "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb"
21364 "\x84\x7d\x65\x34\x25\xd8\x47\xfa"
21365 "\xeb\x83\x31\xf1\x54\x54\x89\x0d"
21366 "\x9d\x4d\x54\x51\x84\x61\xf6\x8e"
21367 "\x03\x31\xf2\x25\x16\xcc\xaa\xc6"
21368 "\x75\x73\x20\x30\x59\x54\xb2\xf0"
21369 "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35"
21370 "\x8a\xdf\x27\xa0\xe4\x60\x99\xae"
21371 "\x8e\x43\xd9\x39\x7b\x10\x40\x67"
21372 "\x5c\x7e\xc9\x70\x63\x34\xca\x59"
21373 "\xfe\x86\xbc\xb7\x9c\x39\xf3\x6d"
21374 "\x6a\x41\x64\x6f\x16\x7f\x65\x7e"
21375 "\x89\x84\x68\xeb\xb0\x51\xbe\x55"
21376 "\x33\x16\x59\x6c\x3b\xef\x88\xad"
21377 "\x2f\xab\xbc\x25\x76\x87\x41\x2f"
21378 "\x36",
21379 .plen = 129,
21380 .ctext = "\x89\x24\x27\x86\xdc\xd7\x6b\xd9"
21381 "\xd1\xcd\xdc\x16\xdd\x2c\xc1\xfb"
21382 "\x52\xb5\xb3\xab\x50\x99\x3f\xa0"
21383 "\x38\xa4\x74\xa5\x04\x15\x63\x05"
21384 "\x8f\x54\x81\x06\x5a\x6b\xa4\x63"
21385 "\x6d\xa7\x21\xcb\xff\x42\x30\x8e"
21386 "\x3b\xd1\xca\x3f\x4b\x1a\xb8\xc3"
21387 "\x42\x01\xe6\xbc\x75\x15\x87\xee"
21388 "\xc9\x8e\x65\x01\xd9\xd8\xb5\x9f"
21389 "\x48\x86\xa6\x5f\x2c\xc7\xb5\xb0"
21390 "\xed\x5d\x14\x7c\x3f\x40\xb1\x0b"
21391 "\x72\xef\x94\x8d\x7a\x85\x56\xe5"
21392 "\x56\x08\x15\x56\xba\xaf\xbd\xf0"
21393 "\x20\xef\xa0\xf6\xa9\xad\xa2\xc9"
21394 "\x1c\x3b\x28\x51\x7e\x77\xb2\x18"
21395 "\x4f\x61\x64\x37\x22\x36\x6d\x78"
21396 "\xed\xed\x35\xe8\x83\xa5\xec\x25"
21397 "\x6b\xff\x5f\x1a\x09\x96\x3d\xdc"
21398 "\x20",
21399 .clen = 145,
21400 }, {
21401 .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c"
21402 "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c",
21403 .klen = 16,
21404 .iv = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3"
21405 "\x32\x42\x15\x80\x85\xa1\x65\xfe",
21406 .assoc = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a"
21407 "\x52\x79\x42\xa5\x84\x6a\x96\x7f"
21408 "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d"
21409 "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e"
21410 "\x28\xce\x57\x34\xcd\x6e\x84\x4c"
21411 "\x17\x3c\xe1\xb2\xa8\x0b\xbb\xf1"
21412 "\x96\x41\x0d\x69\xe8\x54\x0a\xc8"
21413 "\x15\x4e\x91\x92\x89\x4b\xb7\x9b"
21414 "\x21\xf7\x42\x89\xac\x12\x2a\x54"
21415 "\x69\xee\x18\xc7\x8d\xed\xe8\xfd"
21416 "\xbb\x04\x28\xe6\x8a\x3c\x98\xc1"
21417 "\x04\x2d\xa9\xa1\x24\x83\xff\xe9"
21418 "\x55\x7a\xf0\xd1\xf6\x63\x05\xe1"
21419 "\xd9\x1e\x75\x72\xc1\x9f\xae\x32"
21420 "\xe1\x6b\xcd\x9e\x61\x19\x23\x86"
21421 "\xd9\xd2\xaf\x8e\xd5\xd3\xa8\xa9"
21422 "\x51",
21423 .alen = 129,
21424 .ptext = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1"
21425 "\x72\xaf\x6e\xc9\x82\x33\xc7\x01"
21426 "\xaf\x40\x70\xb8\x2a\x78\xc9\x14"
21427 "\xac\xb1\x10\xca\x2e\xb3\x28\xe4"
21428 "\xac\xfa\x58\x7f\xe5\x73\x09\x8c"
21429 "\x1d\x40\x87\x8c\xd9\x75\xc0\x55"
21430 "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb"
21431 "\x09\x4f\x77\x62\x88\x2d\xf2\x68"
21432 "\x54",
21433 .plen = 65,
21434 .ctext = "\x36\x78\xb9\x22\xde\x62\x35\x55"
21435 "\x1a\x7a\xf5\x45\xbc\xd7\x15\x82"
21436 "\x01\xe9\x5a\x07\xea\x46\xaf\x91"
21437 "\xcb\x73\xa5\xee\xe1\xb4\xbf\xc2"
21438 "\xdb\xd2\x9d\x59\xde\xfc\x83\x00"
21439 "\xf5\x46\xac\x97\xd5\x57\xa9\xb9"
21440 "\x1f\x8c\xe8\xca\x68\x8b\x91\x0c"
21441 "\x01\xbe\x0a\xaf\x7c\xf6\x67\xa4"
21442 "\xbf\xbc\x88\x3f\x5d\xd1\xf9\x19"
21443 "\x0f\x9d\xb2\xaf\xb9\x6e\x17\xdf"
21444 "\xa2",
21445 .clen = 81,
21446 }, {
21447 .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48"
21448 "\x93\xe6\x9b\xee\x81\xfc\xf7\x82",
21449 .klen = 16,
21450 .iv = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf"
21451 "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04",
21452 .assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56"
21453 "\xd3\x53\xf4\x36\x7e\x8e\x59\x85"
21454 "\x0e\x51\xf9\x1c\xee\x70\x6a\x27"
21455 "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05",
21456 .alen = 32,
21457 .ptext = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd"
21458 "\xf3\x89\x20\x5b\x7c\x57\x89\x07"
21459 "\xd9\x02\x7c\x3d\x2f\x18\x4b\x2d"
21460 "\x6e\xde\xee\xa2\x08\x12\xc7\xba",
21461 .plen = 32,
21462 .ctext = "\x08\x1b\x95\x0e\x41\x95\x02\x4b"
21463 "\x9c\xbb\xa8\xd0\x7c\xd3\x44\x6e"
21464 "\x89\x14\x33\x70\x0a\xbc\xea\x39"
21465 "\x88\xaa\x2b\xd5\x73\x11\x55\xf5"
21466 "\x33\x33\x9c\xd7\x42\x34\x49\x8e"
21467 "\x2f\x03\x30\x05\x47\xaf\x34",
21468 .clen = 47,
21469 }, {
21470 .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63"
21471 "\x14\xc0\x4d\x7f\x7b\x20\xba\x89",
21472 .klen = 16,
21473 .iv = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea"
21474 "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a",
21475 .assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71"
21476 "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c"
21477 "\x39\x14\x05\xa0\xf3\x10\xec\x41"
21478 "\xff\x01\x95\x84\x2b\x59\x7f\xdb",
21479 .alen = 32,
21480 .ptext = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8"
21481 "\x74\x63\xd2\xec\x76\x7c\x4c\x0d"
21482 "\x03\xc4\x88\xc1\x35\xb8\xcd\x47"
21483 "\x2f\x0c\xcd\x7a\xe2\x71\x66\x91",
21484 .plen = 32,
21485 .ctext = "\x97\xca\xf4\xe0\x8d\x89\xbf\x68"
21486 "\x0c\x60\xb9\x27\xdf\xaa\x41\xc6"
21487 "\x25\xd8\xf7\x1f\x10\x15\x48\x61"
21488 "\x4c\x95\x00\xdf\x51\x9b\x7f\xe6"
21489 "\x24\x40\x9e\xbe\x3b\xeb\x1b\x98"
21490 "\xb9\x9c\xe5\xef\xf2\x05",
21491 .clen = 46,
21492 }, {
21493 .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f"
21494 "\x95\x9a\xff\x10\x75\x45\x7d\x8f",
21495 .klen = 16,
21496 .iv = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06"
21497 "\xb5\xd1\x2b\x35\x73\x0e\xad\x10",
21498 .assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d"
21499 "\xd5\x07\x58\x59\x72\xd7\xde\x92"
21500 "\x63\xd6\x10\x24\xf8\xb0\x6e\x5a"
21501 "\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2",
21502 .alen = 32,
21503 .ptext = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14"
21504 "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13"
21505 "\x2e\x86\x93\x45\x3a\x58\x4f\x61"
21506 "\xf0\x3a\xac\x53\xbc\xd0\x06\x68",
21507 .plen = 32,
21508 .ctext = "\x63\x4c\x2a\x8e\xb4\x6b\x63\x0d"
21509 "\xb5\xec\x9b\x4e\x12\x23\xa3\xcf"
21510 "\x1a\x5a\x70\x15\x5a\x10\x40\x51"
21511 "\xca\x47\x4c\x9d\xc9\x97\xf4\x77"
21512 "\xdb\xc8\x10\x2d\xdc\x65\x20\x3f",
21513 .clen = 40,
21514 }, {
21515 .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b"
21516 "\x15\x74\xb1\xa2\x6f\x69\x3f\x95",
21517 .klen = 16,
21518 .iv = "\xbb\x3a\xf7\x57\xc6\x36\x7c\x22"
21519 "\x36\xab\xde\xc6\x6d\x32\x70\x17",
21520 .assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9"
21521 "\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98"
21522 "\x8d\x98\x1c\xa8\xfe\x50\xf0\x74"
21523 "\x81\x5c\x53\x35\xe0\x17\xbd\x88",
21524 .alen = 32,
21525 .ptext = "\xda\xcc\x14\x27\x4e\x74\xd1\x30"
21526 "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a"
21527 "\x58\x49\x9f\xc9\x3f\xf8\xd1\x7a"
21528 "\xb2\x67\x8b\x2b\x96\x2f\xa5\x3e",
21529 .plen = 32,
21530 .ctext = "\xf1\x62\x44\xc7\x5f\x19\xca\x43"
21531 "\x47\x2c\xaf\x68\x82\xbd\x51\xef"
21532 "\x3d\x65\xd8\x45\x2d\x06\x07\x78"
21533 "\x08\x2e\xb3\x23\xcd\x81\x12\x55"
21534 "\x1a",
21535 .clen = 33,
21536 }, {
21537 .key = "\xe9\x95\xa2\x8f\x93\x13\x7b\xb7"
21538 "\x96\x4e\x63\x33\x69\x8d\x02\x9b"
21539 "\x23\xf9\x22\xeb\x80\xa0\xb1\x81"
21540 "\xe2\x73\xc3\x21\x4d\x47\x8d\xf4",
21541 .klen = 32,
21542 .iv = "\xf8\x5e\x31\xf7\xd7\xb2\x25\x3e"
21543 "\xb7\x85\x90\x58\x67\x57\x33\x1d",
21544 .assoc = "",
21545 .alen = 0,
21546 .ptext = "",
21547 .plen = 0,
21548 .ctext = "\xdf\x2f\x83\xc0\x45\x4a\x2c\xcf"
21549 "\xb9\xd2\x41\xf6\x80\xa1\x52\x70",
21550 .clen = 16,
21551 }, {
21552 .key = "\x25\xba\xdc\x2e\xa3\x8f\x24\xd3"
21553 "\x17\x29\x15\xc5\x63\xb2\xc5\xa1"
21554 "\x4d\xbc\x2d\x6f\x85\x40\x33\x9a"
21555 "\xa3\xa0\xa1\xfa\x27\xa6\x2c\xca",
21556 .klen = 32,
21557 .iv = "\x34\x83\x6a\x96\xe7\x2d\xce\x5a"
21558 "\x38\x5f\x42\xe9\x61\x7b\xf5\x23",
21559 .assoc = "",
21560 .alen = 0,
21561 .ptext = "\x53",
21562 .plen = 1,
21563 .ctext = "\x01\xd8\x55\x3c\xc0\x5a\x4b\xc7"
21564 "\x01\xf4\x08\xe3\x0d\xf7\xf0\x78"
21565 "\x53",
21566 .clen = 17,
21567 }, {
21568 .key = "\x62\xdf\x16\xcd\xb3\x0a\xcc\xef"
21569 "\x98\x03\xc7\x56\x5d\xd6\x87\xa8"
21570 "\x77\x7e\x39\xf3\x8a\xe0\xb5\xb4"
21571 "\x65\xce\x80\xd2\x01\x05\xcb\xa1",
21572 .klen = 32,
21573 .iv = "\x71\xa8\xa4\x35\xf7\xa9\x76\x75"
21574 "\xb8\x39\xf4\x7a\x5b\x9f\xb8\x29",
21575 .assoc = "",
21576 .alen = 0,
21577 .ptext = "\x8f\x3a\xc1\x05\x7f\xe7\xcb\x83"
21578 "\xf9\xa6\x4d\xc3\x58\x31\x19\x2c"
21579 "\xd7\x90\xc2\x56\x4e\xd8\x57\xc7"
21580 "\xf6\xf0\x27\xb4\x25\x4c\x83",
21581 .plen = 31,
21582 .ctext = "\xc2\x4b\x41\x0f\x2d\xb9\x62\x07"
21583 "\xff\x8e\x74\xf8\xa1\xa6\xd5\x37"
21584 "\xa5\x64\x31\x5c\xca\x73\x9b\x43"
21585 "\xe6\x70\x63\x46\x95\xcb\xf7\xb5"
21586 "\x20\x8c\x75\x7a\x2a\x17\x2f\xa9"
21587 "\xb8\x4d\x11\x42\xd1\xf8\xf1",
21588 .clen = 47,
21589 }, {
21590 .key = "\x9e\x03\x4f\x6d\xc3\x86\x75\x0a"
21591 "\x19\xdd\x79\xe8\x57\xfb\x4a\xae"
21592 "\xa2\x40\x45\x77\x90\x80\x37\xce"
21593 "\x26\xfb\x5f\xaa\xdb\x64\x6b\x77",
21594 .klen = 32,
21595 .iv = "\xae\xcc\xde\xd5\x07\x25\x1f\x91"
21596 "\x39\x14\xa6\x0c\x55\xc4\x7b\x30",
21597 .assoc = "",
21598 .alen = 0,
21599 .ptext = "\xcc\x5f\xfb\xa4\x8f\x63\x74\x9f"
21600 "\x7a\x81\xff\x55\x52\x56\xdc\x33"
21601 "\x01\x52\xcd\xdb\x53\x78\xd9\xe1"
21602 "\xb7\x1d\x06\x8d\xff\xab\x22\x98",
21603 .plen = 32,
21604 .ctext = "\xbb\x01\x7c\xd1\x2c\x33\x7b\x37"
21605 "\x0a\xee\xc4\x30\x19\xd7\x3a\x6f"
21606 "\xf8\x2b\x67\xf5\x3b\x84\x87\x2a"
21607 "\xfb\x07\x7a\x82\xb5\xe4\x85\x26"
21608 "\x1e\xa8\xe5\x04\x54\xce\xe5\x5f"
21609 "\xb5\x3f\xc1\xd5\x7f\xbd\xd2\xa6",
21610 .clen = 48,
21611 }, {
21612 .key = "\xdb\x28\x89\x0c\xd3\x01\x1e\x26"
21613 "\x9a\xb7\x2b\x79\x51\x1f\x0d\xb4"
21614 "\xcc\x03\x50\xfc\x95\x20\xb9\xe7"
21615 "\xe8\x29\x3e\x83\xb5\xc3\x0a\x4e",
21616 .klen = 32,
21617 .iv = "\xea\xf1\x18\x74\x17\xa0\xc8\xad"
21618 "\xba\xee\x58\x9d\x4f\xe8\x3d\x36",
21619 .assoc = "",
21620 .alen = 0,
21621 .ptext = "\x08\x84\x34\x44\x9f\xde\x1c\xbb"
21622 "\xfb\x5b\xb1\xe6\x4c\x7a\x9f\x39"
21623 "\x2c\x14\xd9\x5f\x59\x18\x5b\xfb"
21624 "\x79\x4b\xe5\x65\xd9\x0a\xc1\x6f"
21625 "\x2e",
21626 .plen = 33,
21627 .ctext = "\xc2\xf4\x40\x55\xf9\x59\xff\x73"
21628 "\x08\xf5\x98\x92\x0c\x7b\x35\x9a"
21629 "\xa8\xf4\x42\x7e\x6f\x93\xca\x22"
21630 "\x23\x06\x1e\xf8\x89\x22\xf4\x46"
21631 "\x7c\x7c\x67\x75\xab\xe5\x75\xaa"
21632 "\x15\xd7\x83\x19\xfd\x31\x59\x5b"
21633 "\x32",
21634 .clen = 49,
21635 }, {
21636 .key = "\x17\x4d\xc3\xab\xe3\x7d\xc7\x42"
21637 "\x1b\x91\xdd\x0a\x4b\x43\xcf\xba"
21638 "\xf6\xc5\x5c\x80\x9a\xc0\x3b\x01"
21639 "\xa9\x56\x1d\x5b\x8f\x22\xa9\x25",
21640 .klen = 32,
21641 .iv = "\x27\x16\x51\x13\x27\x1c\x71\xc9"
21642 "\x3b\xc8\x0a\x2f\x49\x0c\x00\x3c",
21643 .assoc = "",
21644 .alen = 0,
21645 .ptext = "\x45\xa8\x6e\xe3\xaf\x5a\xc5\xd7"
21646 "\x7c\x35\x63\x77\x46\x9f\x61\x3f"
21647 "\x56\xd7\xe4\xe3\x5e\xb8\xdc\x14"
21648 "\x3a\x79\xc4\x3e\xb3\x69\x61\x46"
21649 "\x3c\xb6\x83\x4e\xb4\x26\xc7\x73"
21650 "\x22\xda\x52\x8b\x7d\x11\x98\xea"
21651 "\x62\xe1\x14\x1e\xdc\xfe\x0f\xad"
21652 "\x20\x76\x5a\xdc\x4e\x71\x13",
21653 .plen = 63,
21654 .ctext = "\xc9\x82\x3b\x4b\x87\x84\xa5\xdb"
21655 "\xa0\x8c\xd3\x3e\x7f\x8d\xe8\x28"
21656 "\x2a\xdc\xfa\x01\x84\x87\x9a\x70"
21657 "\x81\x75\x37\x0a\xd2\x75\xa9\xb6"
21658 "\x21\x72\xee\x7e\x65\x95\xe5\xcc"
21659 "\x01\xb7\x39\xa6\x51\x15\xca\xff"
21660 "\x61\xdc\x97\x38\xcc\xf4\xca\xc7"
21661 "\x83\x9b\x05\x11\x72\x60\xf0\xb4"
21662 "\x7e\x06\xab\x0a\xc0\xbb\x59\x23"
21663 "\xaa\x2d\xfc\x4e\x35\x05\x59",
21664 .clen = 79,
21665 }, {
21666 .key = "\x54\x71\xfd\x4b\xf3\xf9\x6f\x5e"
21667 "\x9c\x6c\x8f\x9c\x45\x68\x92\xc1"
21668 "\x21\x87\x67\x04\x9f\x60\xbd\x1b"
21669 "\x6a\x84\xfc\x34\x6a\x81\x48\xfb",
21670 .klen = 32,
21671 .iv = "\x63\x3b\x8b\xb3\x37\x98\x1a\xe5"
21672 "\xbc\xa2\xbc\xc0\x43\x31\xc2\x42",
21673 .assoc = "",
21674 .alen = 0,
21675 .ptext = "\x81\xcd\xa8\x82\xbf\xd6\x6e\xf3"
21676 "\xfd\x0f\x15\x09\x40\xc3\x24\x45"
21677 "\x81\x99\xf0\x67\x63\x58\x5e\x2e"
21678 "\xfb\xa6\xa3\x16\x8d\xc8\x00\x1c"
21679 "\x4b\x62\x87\x7c\x15\x38\xda\x70"
21680 "\x3d\xea\xe7\xf2\x40\xba\xae\x79"
21681 "\x8f\x48\xfc\xbf\x45\x53\x2e\x78"
21682 "\xef\x79\xf0\x1b\x49\xf7\xfd\x9c",
21683 .plen = 64,
21684 .ctext = "\x11\x7c\x7d\xef\xce\x29\x95\xec"
21685 "\x7e\x9f\x42\xa6\x26\x07\xa1\x75"
21686 "\x2f\x4e\x09\x9a\xf6\x6b\xc2\xfa"
21687 "\x0d\xd0\x17\xdc\x25\x1e\x9b\xdc"
21688 "\x5f\x8c\x1c\x60\x15\x4f\x9b\x20"
21689 "\x7b\xff\xcd\x82\x60\x84\xf4\xa5"
21690 "\x20\x9a\x05\x19\x5b\x02\x0a\x72"
21691 "\x43\x11\x26\x58\xcf\xc5\x41\xcf"
21692 "\x13\xcc\xde\x32\x92\xfa\x86\xf2"
21693 "\xaf\x16\xe8\x8f\xca\xb6\xfd\x54",
21694 .clen = 80,
21695 }, {
21696 .key = "\x90\x96\x36\xea\x03\x74\x18\x7a"
21697 "\x1d\x46\x42\x2d\x3f\x8c\x54\xc7"
21698 "\x4b\x4a\x73\x89\xa4\x00\x3f\x34"
21699 "\x2c\xb1\xdb\x0c\x44\xe0\xe8\xd2",
21700 .klen = 32,
21701 .iv = "\xa0\x5f\xc5\x52\x47\x13\xc2\x01"
21702 "\x3d\x7c\x6e\x52\x3d\x55\x85\x48",
21703 .assoc = "\xaf",
21704 .alen = 1,
21705 .ptext = "",
21706 .plen = 0,
21707 .ctext = "\x9b\xc5\x3b\x20\x0a\x88\x56\xbe"
21708 "\x69\xdf\xc4\xc4\x02\x46\x3a\xf0",
21709 .clen = 16,
21710 }, {
21711 .key = "\xcd\xbb\x70\x89\x13\xf0\xc1\x95"
21712 "\x9e\x20\xf4\xbf\x39\xb1\x17\xcd"
21713 "\x76\x0c\x7f\x0d\xa9\xa0\xc1\x4e"
21714 "\xed\xdf\xb9\xe4\x1e\x3f\x87\xa8",
21715 .klen = 32,
21716 .iv = "\xdc\x84\xfe\xf1\x58\x8f\x6b\x1c"
21717 "\xbe\x57\x20\xe3\x37\x7a\x48\x4f",
21718 .assoc = "\xeb\x4d\x8d\x59\x9c\x2e\x15\xa3"
21719 "\xde\x8d\x4d\x07\x36\x43\x78\xd0"
21720 "\x0b\x6d\x84\x4f\x2c\xf0\x82\x5b"
21721 "\x4e\xf6\x29\xd1\x8b\x6f\x56",
21722 .alen = 31,
21723 .ptext = "",
21724 .plen = 0,
21725 .ctext = "\xe0\x6d\xa1\x07\x98\x2f\x40\x2d"
21726 "\x2e\x9a\xd6\x61\x43\xc0\x74\x69",
21727 .clen = 16,
21728 }, {
21729 .key = "\x0a\xe0\xaa\x29\x24\x6c\x6a\xb1"
21730 "\x1f\xfa\xa6\x50\x33\xd5\xda\xd3"
21731 "\xa0\xce\x8a\x91\xae\x40\x43\x68"
21732 "\xae\x0d\x98\xbd\xf8\x9e\x26\x7f",
21733 .klen = 32,
21734 .iv = "\x19\xa9\x38\x91\x68\x0b\x14\x38"
21735 "\x3f\x31\xd2\x74\x31\x9e\x0a\x55",
21736 .assoc = "\x28\x72\xc7\xf8\xac\xaa\xbe\xbf"
21737 "\x5f\x67\xff\x99\x30\x67\x3b\xd6"
21738 "\x35\x2f\x90\xd3\x31\x90\x04\x74"
21739 "\x0f\x23\x08\xa9\x65\xce\xf6\xea",
21740 .alen = 32,
21741 .ptext = "",
21742 .plen = 0,
21743 .ctext = "\xb9\x57\x13\x3e\x82\x31\x61\x65"
21744 "\x0d\x7f\x6c\x96\x93\x5c\x50\xe2",
21745 .clen = 16,
21746 }, {
21747 .key = "\x46\x04\xe3\xc8\x34\xe7\x12\xcd"
21748 "\xa0\xd4\x58\xe2\x2d\xf9\x9c\xda"
21749 "\xca\x91\x96\x15\xb4\xe0\xc5\x81"
21750 "\x70\x3a\x77\x95\xd2\xfd\xc5\x55",
21751 .klen = 32,
21752 .iv = "\x55\xcd\x72\x30\x78\x86\xbd\x54"
21753 "\xc0\x0b\x84\x06\x2b\xc2\xcd\x5b",
21754 .assoc = "\x64\x97\x00\x98\xbc\x25\x67\xdb"
21755 "\xe0\x41\xb1\x2a\x2a\x8c\xfe\xdd"
21756 "\x5f\xf2\x9c\x58\x36\x30\x86\x8e"
21757 "\xd1\x51\xe6\x81\x3f\x2d\x95\xc1"
21758 "\x01",
21759 .alen = 33,
21760 .ptext = "",
21761 .plen = 0,
21762 .ctext = "\x81\x96\x34\xde\xbb\x36\xdd\x3e"
21763 "\x4e\x5e\xcb\x44\x21\xb8\x3f\xf1",
21764 .clen = 16,
21765 }, {
21766 .key = "\x83\x29\x1d\x67\x44\x63\xbb\xe9"
21767 "\x20\xaf\x0a\x73\x27\x1e\x5f\xe0"
21768 "\xf5\x53\xa1\x9a\xb9\x80\x47\x9b"
21769 "\x31\x68\x56\x6e\xac\x5c\x65\x2c",
21770 .klen = 32,
21771 .iv = "\x92\xf2\xac\xcf\x88\x02\x65\x70"
21772 "\x41\xe5\x36\x97\x25\xe7\x90\x61",
21773 .assoc = "\xa1\xbb\x3a\x37\xcc\xa1\x10\xf7"
21774 "\x61\x1c\x63\xbc\x24\xb0\xc0\xe3"
21775 "\x8a\xb4\xa7\xdc\x3b\xd0\x08\xa8"
21776 "\x92\x7f\xc5\x5a\x19\x8c\x34\x97"
21777 "\x0f\x95\x9b\x18\xe4\x8d\xb4\x24"
21778 "\xb9\x33\x28\x18\xe1\x9d\x14\xe0"
21779 "\x64\xb2\x89\x7d\x78\xa8\x05\x7e"
21780 "\x07\x8c\xfc\x88\x2d\xb8\x53",
21781 .alen = 63,
21782 .ptext = "",
21783 .plen = 0,
21784 .ctext = "\x2e\x99\xb6\x79\x57\x56\x80\x36"
21785 "\x8e\xc4\x1c\x12\x7d\x71\x36\x0c",
21786 .clen = 16,
21787 }, {
21788 .key = "\xbf\x4e\x57\x07\x54\xdf\x64\x05"
21789 "\xa1\x89\xbc\x04\x21\x42\x22\xe6"
21790 "\x1f\x15\xad\x1e\xbe\x20\xc9\xb4"
21791 "\xf3\x95\x35\x46\x86\xbb\x04\x03",
21792 .klen = 32,
21793 .iv = "\xce\x17\xe5\x6f\x98\x7e\x0e\x8c"
21794 "\xc2\xbf\xe8\x29\x1f\x0b\x52\x68",
21795 .assoc = "\xdd\xe0\x74\xd6\xdc\x1d\xb8\x13"
21796 "\xe2\xf6\x15\x4d\x1e\xd4\x83\xe9"
21797 "\xb4\x76\xb3\x60\x40\x70\x8a\xc1"
21798 "\x53\xac\xa4\x32\xf3\xeb\xd3\x6e"
21799 "\x1e\x42\xa0\x46\x45\x9f\xc7\x22"
21800 "\xd3\x43\xbc\x7e\xa5\x47\x2a\x6f"
21801 "\x91\x19\x70\x1e\xe1\xfe\x25\x49"
21802 "\xd6\x8f\x93\xc7\x28\x3f\x3d\x03",
21803 .alen = 64,
21804 .ptext = "",
21805 .plen = 0,
21806 .ctext = "\x7b\x25\x3d\x47\xd4\xa7\x08\xce"
21807 "\x3b\x89\x40\x36\xba\x6d\x0e\xa2",
21808 .clen = 16,
21809 }, {
21810 .key = "\xfc\x72\x90\xa6\x64\x5a\x0d\x21"
21811 "\x22\x63\x6e\x96\x1b\x67\xe4\xec"
21812 "\x49\xd7\xb9\xa2\xc3\xc0\x4b\xce"
21813 "\xb4\xc3\x14\x1e\x61\x1a\xa3\xd9",
21814 .klen = 32,
21815 .iv = "\x0b\x3c\x1f\x0e\xa8\xf9\xb7\xa7"
21816 "\x42\x9a\x9a\xba\x19\x30\x15\x6e",
21817 .assoc = "\x1a",
21818 .alen = 1,
21819 .ptext = "\x29",
21820 .plen = 1,
21821 .ctext = "\xe6\x09\x6f\x95\x9a\x18\xc8\xf6"
21822 "\x17\x75\x81\x16\xdf\x26\xff\x67"
21823 "\x92",
21824 .clen = 17,
21825 }, {
21826 .key = "\x38\x97\xca\x45\x74\xd6\xb6\x3c"
21827 "\xa3\x3d\x20\x27\x15\x8b\xa7\xf2"
21828 "\x74\x9a\xc4\x27\xc8\x60\xcd\xe8"
21829 "\x75\xf0\xf2\xf7\x3b\x79\x42\xb0",
21830 .klen = 32,
21831 .iv = "\x47\x60\x59\xad\xb8\x75\x60\xc3"
21832 "\xc3\x74\x4c\x4c\x13\x54\xd8\x74",
21833 .assoc = "\x56\x29\xe7\x15\xfc\x14\x0a\x4a"
21834 "\xe4\xaa\x79\x70\x12\x1d\x08\xf6"
21835 "\x09\xfb\xca\x69\x4b\xb0\x8e\xf5"
21836 "\xd6\x07\x62\xe3\xa8\xa9\x12",
21837 .alen = 31,
21838 .ptext = "\x66\xf3\x75\x7d\x40\xb3\xb4\xd1"
21839 "\x04\xe1\xa6\x94\x10\xe6\x39\x77"
21840 "\xd3\xac\x4d\x8a\x8c\x58\x6e\xfb"
21841 "\x06\x13\x9a\xd9\x5e\xc0\xfa",
21842 .plen = 31,
21843 .ctext = "\x82\xc0\x56\xf0\xd7\xc4\xc9\xfd"
21844 "\x3c\xd1\x2a\xd4\x15\x86\x9d\xda"
21845 "\xea\x6c\x6f\xa1\x33\xb0\x7a\x01"
21846 "\x57\xe7\xf3\x7b\x73\xe7\x54\x10"
21847 "\xc6\x91\xe2\xc6\xa0\x69\xe7\xe6"
21848 "\x76\xc3\xf5\x3a\x76\xfd\x4a",
21849 .clen = 47,
21850 }, {
21851 .key = "\x75\xbc\x04\xe5\x84\x52\x5e\x58"
21852 "\x24\x17\xd2\xb9\x0e\xaf\x6a\xf9"
21853 "\x9e\x5c\xd0\xab\xcd\x00\x4f\x01"
21854 "\x37\x1e\xd1\xcf\x15\xd8\xe2\x86",
21855 .klen = 32,
21856 .iv = "\x84\x85\x92\x4d\xc8\xf1\x08\xdf"
21857 "\x44\x4e\xff\xdd\x0d\x78\x9a\x7a",
21858 .assoc = "\x93\x4e\x21\xb4\x0c\x90\xb3\x66"
21859 "\x65\x84\x2b\x01\x0b\x42\xcb\xfc"
21860 "\x33\xbd\xd6\xed\x50\x50\x10\x0e"
21861 "\x97\x35\x41\xbb\x82\x08\xb1\xf2",
21862 .alen = 32,
21863 .ptext = "\xa2\x17\xaf\x1c\x50\x2e\x5d\xed"
21864 "\x85\xbb\x58\x26\x0a\x0b\xfc\x7d"
21865 "\xfe\x6e\x59\x0e\x91\xf8\xf0\x15"
21866 "\xc8\x40\x78\xb1\x38\x1f\x99\xa7",
21867 .plen = 32,
21868 .ctext = "\x01\x47\x8e\x6c\xf6\x64\x89\x3a"
21869 "\x71\xce\xe4\xaa\x45\x70\xe6\x84"
21870 "\x62\x48\x08\x64\x86\x6a\xdf\xec"
21871 "\xb4\xa0\xfb\x34\x03\x0c\x19\xf4"
21872 "\x2b\x7b\x36\x73\xec\x54\xa9\x1e"
21873 "\x30\x85\xdb\xe4\xac\xe9\x2c\xca",
21874 .clen = 48,
21875 }, {
21876 .key = "\xb1\xe1\x3e\x84\x94\xcd\x07\x74"
21877 "\xa5\xf2\x84\x4a\x08\xd4\x2c\xff"
21878 "\xc8\x1e\xdb\x2f\xd2\xa0\xd1\x1b"
21879 "\xf8\x4c\xb0\xa8\xef\x37\x81\x5d",
21880 .klen = 32,
21881 .iv = "\xc0\xaa\xcc\xec\xd8\x6c\xb1\xfb"
21882 "\xc5\x28\xb1\x6e\x07\x9d\x5d\x81",
21883 .assoc = "\xd0\x73\x5a\x54\x1d\x0b\x5b\x82"
21884 "\xe5\x5f\xdd\x93\x05\x66\x8e\x02"
21885 "\x5e\x80\xe1\x71\x55\xf0\x92\x28"
21886 "\x59\x62\x20\x94\x5c\x67\x50\xc8"
21887 "\x58",
21888 .alen = 33,
21889 .ptext = "\xdf\x3c\xe9\xbc\x61\xaa\x06\x09"
21890 "\x06\x95\x0a\xb7\x04\x2f\xbe\x84"
21891 "\x28\x30\x64\x92\x96\x98\x72\x2e"
21892 "\x89\x6e\x57\x8a\x13\x7e\x38\x7e"
21893 "\xdb",
21894 .plen = 33,
21895 .ctext = "\x85\xe0\xf8\x0f\x8e\x49\xe3\x60"
21896 "\xcb\x4a\x54\x94\xcf\xf5\x7e\x34"
21897 "\xe9\xf8\x80\x65\x53\xd0\x72\x70"
21898 "\x4f\x7d\x9d\xd1\x15\x6f\xb9\x2c"
21899 "\xfa\xe8\xdd\xac\x2e\xe1\x3f\x67"
21900 "\x63\x0f\x1a\x59\xb7\x89\xdb\xf4"
21901 "\xc3",
21902 .clen = 49,
21903 }, {
21904 .key = "\xee\x05\x77\x23\xa5\x49\xb0\x90"
21905 "\x26\xcc\x36\xdc\x02\xf8\xef\x05"
21906 "\xf3\xe1\xe7\xb3\xd8\x40\x53\x35"
21907 "\xb9\x79\x8f\x80\xc9\x96\x20\x33",
21908 .klen = 32,
21909 .iv = "\xfd\xce\x06\x8b\xe9\xe8\x5a\x17"
21910 "\x46\x02\x63\x00\x01\xc1\x20\x87",
21911 .assoc = "\x0c\x98\x94\xf3\x2d\x87\x04\x9e"
21912 "\x66\x39\x8f\x24\xff\x8a\x50\x08"
21913 "\x88\x42\xed\xf6\x5a\x90\x14\x42"
21914 "\x1a\x90\xfe\x6c\x36\xc6\xf0\x9f"
21915 "\x66\xa0\xb5\x2d\x2c\xf8\x25\x15"
21916 "\x55\x90\xa2\x7e\x77\x94\x96\x3a"
21917 "\x71\x1c\xf7\x44\xee\xa8\xc3\x42"
21918 "\xe2\xa3\x84\x04\x0b\xe1\xce",
21919 .alen = 63,
21920 .ptext = "\x1b\x61\x23\x5b\x71\x26\xae\x25"
21921 "\x87\x6f\xbc\x49\xfe\x53\x81\x8a"
21922 "\x53\xf2\x70\x17\x9b\x38\xf4\x48"
21923 "\x4b\x9b\x36\x62\xed\xdd\xd8\x54"
21924 "\xea\xcb\xb6\x79\x45\xfc\xaa\x54"
21925 "\x5c\x94\x47\x58\xa7\xff\x9c\x9e"
21926 "\x7c\xb6\xf1\xac\xc8\xfd\x8b\x35"
21927 "\xd5\xa4\x6a\xd4\x09\xc2\x08",
21928 .plen = 63,
21929 .ctext = "\x00\xe5\x5b\x87\x5c\x20\x22\x8a"
21930 "\xda\x1f\xd3\xff\xbb\xb2\xb0\xf8"
21931 "\xef\xe9\xeb\x9e\x7c\x80\xf4\x2b"
21932 "\x59\xc0\x79\xbc\x17\xa0\x15\x01"
21933 "\xf5\x72\xfb\x5a\xe7\xaf\x07\xe3"
21934 "\x1b\x49\x21\x34\x23\x63\x55\x5e"
21935 "\xee\x4f\x34\x17\xfa\xfe\xa5\x0c"
21936 "\xed\x0b\x23\xea\x9b\xda\x57\x2f"
21937 "\xf6\xa9\xae\x0d\x4e\x40\x96\x45"
21938 "\x7f\xfa\xf0\xbf\xc4\x98\x78",
21939 .clen = 79,
21940 }, {
21941 .key = "\x2a\x2a\xb1\xc3\xb5\xc5\x59\xac"
21942 "\xa7\xa6\xe8\x6d\xfc\x1d\xb2\x0b"
21943 "\x1d\xa3\xf3\x38\xdd\xe0\xd5\x4e"
21944 "\x7b\xa7\x6e\x58\xa3\xf5\xbf\x0a",
21945 .klen = 32,
21946 .iv = "\x39\xf3\x3f\x2b\xf9\x64\x03\x33"
21947 "\xc7\xdd\x15\x91\xfb\xe6\xe2\x8d",
21948 .assoc = "\x49\xbc\xce\x92\x3d\x02\xad\xba"
21949 "\xe7\x13\x41\xb6\xf9\xaf\x13\x0f"
21950 "\xb2\x04\xf8\x7a\x5f\x30\x96\x5b"
21951 "\xdc\xbd\xdd\x44\x10\x25\x8f\x75"
21952 "\x75\x4d\xb9\x5b\x8e\x0a\x38\x13"
21953 "\x6f\x9f\x36\xe4\x3a\x3e\xac\xc9"
21954 "\x9d\x83\xde\xe5\x57\xfd\xe3\x0e"
21955 "\xb1\xa7\x1b\x44\x05\x67\xb7\x37",
21956 .alen = 64,
21957 .ptext = "\x58\x85\x5c\xfa\x81\xa1\x57\x40"
21958 "\x08\x4a\x6e\xda\xf8\x78\x44\x90"
21959 "\x7d\xb5\x7b\x9b\xa1\xd8\x76\x62"
21960 "\x0c\xc9\x15\x3b\xc7\x3c\x77\x2b"
21961 "\xf8\x78\xba\xa7\xa6\x0e\xbd\x52"
21962 "\x76\xa3\xdc\xbe\x6b\xa8\xb1\x2d"
21963 "\xa9\x1d\xd8\x4e\x31\x53\xab\x00"
21964 "\xa5\xa7\x01\x13\x04\x49\xf2\x04",
21965 .plen = 64,
21966 .ctext = "\x28\xdd\xb9\x4a\x12\xc7\x0a\xe1"
21967 "\x58\x06\x1a\x9b\x8c\x67\xdf\xeb"
21968 "\x35\x35\x60\x9d\x06\x40\x65\xc1"
21969 "\x93\xe8\xb3\x82\x50\x29\xdd\xb5"
21970 "\x2b\xcb\xde\x18\x78\x6b\x42\xbe"
21971 "\x6d\x24\xd0\xb2\x7d\xd7\x08\x8f"
21972 "\x4a\x18\x98\xad\x8c\xf2\x97\xb4"
21973 "\xf4\x77\xe4\xbf\x41\x3b\xc4\x06"
21974 "\xce\x9e\x34\x81\xf0\x89\x11\x13"
21975 "\x02\x65\xa1\x7c\xdf\x07\x33\x06",
21976 .clen = 80,
21977 }, {
21978 .key = "\x67\x4f\xeb\x62\xc5\x40\x01\xc7"
21979 "\x28\x80\x9a\xfe\xf6\x41\x74\x12"
21980 "\x48\x65\xfe\xbc\xe2\x80\x57\x68"
21981 "\x3c\xd4\x4d\x31\x7d\x54\x5f\xe1",
21982 .klen = 32,
21983 .iv = "\x76\x18\x79\xca\x09\xdf\xac\x4e"
21984 "\x48\xb7\xc7\x23\xf5\x0a\xa5\x93",
21985 .assoc = "\x85\xe1\x08\x32\x4d\x7e\x56\xd5"
21986 "\x68\xed\xf3\x47\xf3\xd3\xd6\x15"
21987 "\xdd\xc7\x04\xfe\x64\xd0\x18\x75"
21988 "\x9d\xeb\xbc\x1d\xea\x84\x2e\x4c"
21989 "\x83\xf9\xbe\x8a\xef\x1c\x4b\x10"
21990 "\x89\xaf\xcb\x4b\xfe\xe7\xc1\x58"
21991 "\xca\xea\xc6\x87\xc0\x53\x03\xd9"
21992 "\x80\xaa\xb2\x83\xff\xee\xa1\x6a"
21993 "\x04",
21994 .alen = 65,
21995 .ptext = "\x94\xaa\x96\x9a\x91\x1d\x00\x5c"
21996 "\x88\x24\x20\x6b\xf2\x9c\x06\x96"
21997 "\xa7\x77\x87\x1f\xa6\x78\xf8\x7b"
21998 "\xcd\xf6\xf4\x13\xa1\x9b\x16\x02"
21999 "\x07\x24\xbf\xd5\x08\x20\xd0\x4f"
22000 "\x90\xb3\x70\x24\x2f\x51\xc7\xbb"
22001 "\xd6\x84\xc0\xef\x9a\xa8\xca\xcc"
22002 "\x74\xab\x97\x53\xfe\xd0\xdb\x37"
22003 "\x37\x6a\x0e\x9f\x3f\xa3\x2a\xe3"
22004 "\x1b\x34\x6d\x51\x72\x2b\x17\xe7"
22005 "\x4d\xaa\x2c\x18\xda\xa3\x33\x89"
22006 "\x2a\x9f\xf4\xd2\xed\x76\x3d\x3f"
22007 "\x3c\x15\x9d\x8e\x4f\x3c\x27\xb0"
22008 "\x42\x3f\x2f\x8a\xd4\xc2\x10\xb2"
22009 "\x27\x7f\xe3\x34\x80\x02\x49\x4b"
22010 "\x07\x68\x22\x2a\x88\x25\x53\xb2"
22011 "\x2f",
22012 .plen = 129,
22013 .ctext = "\x85\x39\x69\x35\xfb\xf9\xb0\xa6"
22014 "\x85\x43\x88\xd0\xd7\x78\x60\x19"
22015 "\x3e\x1f\xb1\xa4\xd6\xc5\x96\xec"
22016 "\xf7\x84\x85\xc7\x27\x0f\x74\x57"
22017 "\x28\x9e\xdd\x90\x3c\x43\x12\xc5"
22018 "\x51\x3d\x39\x8f\xa5\xf4\xe0\x0b"
22019 "\x57\x04\xf1\x6d\xfe\x9b\x84\x27"
22020 "\xe8\xeb\x4d\xda\x02\x0a\xc5\x49"
22021 "\x1a\x55\x5e\x50\x56\x4d\x94\xda"
22022 "\x20\xf8\x12\x54\x50\xb3\x11\xda"
22023 "\xed\x44\x27\x67\xd5\xd1\x8b\x4b"
22024 "\x38\x67\x56\x65\x59\xda\xe6\x97"
22025 "\x81\xae\x2f\x92\x3b\xae\x22\x1c"
22026 "\x91\x59\x38\x18\x00\xe8\xba\x92"
22027 "\x04\x19\x56\xdf\xb0\x82\xeb\x6f"
22028 "\x2e\xdb\x54\x3c\x4b\xbb\x60\x90"
22029 "\x4c\x50\x10\x62\xba\x7a\xb1\x68"
22030 "\x37\xd7\x87\x4e\xe4\x66\x09\x1f"
22031 "\xa5",
22032 .clen = 145,
22033 }, {
22034 .key = "\xa3\x73\x24\x01\xd5\xbc\xaa\xe3"
22035 "\xa9\x5a\x4c\x90\xf0\x65\x37\x18"
22036 "\x72\x28\x0a\x40\xe7\x20\xd9\x82"
22037 "\xfe\x02\x2b\x09\x57\xb3\xfe\xb7",
22038 .klen = 32,
22039 .iv = "\xb3\x3d\xb3\x69\x19\x5b\x54\x6a"
22040 "\xc9\x91\x79\xb4\xef\x2e\x68\x99",
22041 .assoc = "\xc2\x06\x41\xd1\x5d\xfa\xff\xf1"
22042 "\xe9\xc7\xa5\xd9\xed\xf8\x98\x1b"
22043 "\x07\x89\x10\x82\x6a\x70\x9a\x8f"
22044 "\x5e\x19\x9b\xf5\xc5\xe3\xcd\x22"
22045 "\x92\xa5\xc2\xb8\x51\x2e\x5e\x0e"
22046 "\xa4\xbe\x5f\xb1\xc1\x90\xd7\xe7"
22047 "\xf7\x52\xae\x28\x29\xa8\x22\xa4"
22048 "\x4f\xae\x48\xc2\xfa\x75\x8b\x9e"
22049 "\xce\x83\x2a\x88\x07\x55\xbb\x89"
22050 "\xf6\xdf\xac\xdf\x83\x08\xbf\x7d"
22051 "\xac\x30\x8b\x8e\x02\xac\x00\xf1"
22052 "\x30\x46\xe1\xbc\x75\xbf\x49\xbb"
22053 "\x26\x4e\x29\xf0\x2f\x21\xc6\x13"
22054 "\x92\xd9\x3d\x11\xe4\x10\x00\x8e"
22055 "\xd4\xd4\x58\x65\xa6\x2b\xe3\x25"
22056 "\xb1\x8f\x15\x93\xe7\x71\xb9\x2c"
22057 "\x4b",
22058 .alen = 129,
22059 .ptext = "\xd1\xcf\xd0\x39\xa1\x99\xa9\x78"
22060 "\x09\xfe\xd2\xfd\xec\xc1\xc9\x9d"
22061 "\xd2\x39\x93\xa3\xab\x18\x7a\x95"
22062 "\x8f\x24\xd3\xeb\x7b\xfa\xb5\xd8"
22063 "\x15\xd1\xc3\x04\x69\x32\xe3\x4d"
22064 "\xaa\xc2\x04\x8b\xf2\xfa\xdc\x4a"
22065 "\x02\xeb\xa8\x90\x03\xfd\xea\x97"
22066 "\x43\xaf\x2e\x92\xf8\x57\xc5\x6a"
22067 "\x00",
22068 .plen = 65,
22069 .ctext = "\x7d\xde\x53\x22\xe4\x23\x3b\x30"
22070 "\x78\xde\x35\x90\x7a\xd9\x0b\x93"
22071 "\xf6\x0e\x0b\xed\x40\xee\x10\x9c"
22072 "\x96\x3a\xd3\x34\xb2\xd0\x67\xcf"
22073 "\x63\x7f\x2d\x0c\xcf\x96\xec\x64"
22074 "\x1a\x87\xcc\x7d\x2c\x5e\x81\x4b"
22075 "\xd2\x8f\x4c\x7c\x00\xb1\xb4\xe0"
22076 "\x87\x4d\xb1\xbc\xd8\x78\x2c\x17"
22077 "\xf2\x3b\xd8\x28\x40\xe2\x76\xf6"
22078 "\x20\x13\x83\x46\xaf\xff\xe3\x0f"
22079 "\x72",
22080 .clen = 81,
22081 }, {
22082 .key = "\xe0\x98\x5e\xa1\xe5\x38\x53\xff"
22083 "\x2a\x35\xfe\x21\xea\x8a\xfa\x1e"
22084 "\x9c\xea\x15\xc5\xec\xc0\x5b\x9b"
22085 "\xbf\x2f\x0a\xe1\x32\x12\x9d\x8e",
22086 .klen = 32,
22087 .iv = "\xef\x61\xed\x08\x29\xd7\xfd\x86"
22088 "\x4a\x6b\x2b\x46\xe9\x53\x2a\xa0",
22089 .assoc = "\xfe\x2a\x7b\x70\x6d\x75\xa7\x0d"
22090 "\x6a\xa2\x57\x6a\xe7\x1c\x5b\x21"
22091 "\x31\x4b\x1b\x07\x6f\x10\x1c\xa8"
22092 "\x20\x46\x7a\xce\x9f\x42\x6d\xf9",
22093 .alen = 32,
22094 .ptext = "\x0d\xf4\x09\xd8\xb1\x14\x51\x94"
22095 "\x8a\xd8\x84\x8e\xe6\xe5\x8c\xa3"
22096 "\xfc\xfc\x9e\x28\xb0\xb8\xfc\xaf"
22097 "\x50\x52\xb1\xc4\x55\x59\x55\xaf",
22098 .plen = 32,
22099 .ctext = "\x5a\xcd\x8c\x57\xf2\x6a\xb6\xbe"
22100 "\x53\xc7\xaa\x9a\x60\x74\x9c\xc4"
22101 "\xa2\xc2\xd0\x6d\xe1\x03\x63\xdc"
22102 "\xbb\x51\x7e\x9c\x89\x73\xde\x4e"
22103 "\x24\xf8\x52\x7c\x15\x41\x0e\xba"
22104 "\x69\x0e\x36\x5f\x2f\x22\x8c",
22105 .clen = 47,
22106 }, {
22107 .key = "\x1c\xbd\x98\x40\xf5\xb3\xfc\x1b"
22108 "\xaa\x0f\xb0\xb3\xe4\xae\xbc\x24"
22109 "\xc7\xac\x21\x49\xf1\x60\xdd\xb5"
22110 "\x80\x5d\xe9\xba\x0c\x71\x3c\x64",
22111 .klen = 32,
22112 .iv = "\x2c\x86\x26\xa8\x39\x52\xa6\xa2"
22113 "\xcb\x45\xdd\xd7\xe3\x77\xed\xa6",
22114 .assoc = "\x3b\x4f\xb5\x10\x7d\xf1\x50\x29"
22115 "\xeb\x7c\x0a\xfb\xe1\x40\x1e\x27"
22116 "\x5c\x0d\x27\x8b\x74\xb0\x9e\xc2"
22117 "\xe1\x74\x59\xa6\x79\xa1\x0c\xd0",
22118 .alen = 32,
22119 .ptext = "\x4a\x18\x43\x77\xc1\x90\xfa\xb0"
22120 "\x0b\xb2\x36\x20\xe0\x09\x4e\xa9"
22121 "\x26\xbe\xaa\xac\xb5\x58\x7e\xc8"
22122 "\x11\x7f\x90\x9c\x2f\xb8\xf4\x85",
22123 .plen = 32,
22124 .ctext = "\x47\xd6\xce\x78\xd6\xbf\x4a\x51"
22125 "\xb8\xda\x92\x3c\xfd\xda\xac\x8e"
22126 "\x8d\x88\xd7\x4d\x90\xe5\xeb\xa1"
22127 "\xab\xd6\x7c\x76\xad\xea\x7d\x76"
22128 "\x53\xee\xb0\xcd\xd0\x02\xbb\x70"
22129 "\x5b\x6f\x7b\xe2\x8c\xe8",
22130 .clen = 46,
22131 }, {
22132 .key = "\x59\xe1\xd2\xdf\x05\x2f\xa4\x37"
22133 "\x2b\xe9\x63\x44\xde\xd3\x7f\x2b"
22134 "\xf1\x6f\x2d\xcd\xf6\x00\x5f\xcf"
22135 "\x42\x8a\xc8\x92\xe6\xd0\xdc\x3b",
22136 .klen = 32,
22137 .iv = "\x68\xab\x60\x47\x49\xce\x4f\xbe"
22138 "\x4c\x20\x8f\x68\xdd\x9c\xb0\xac",
22139 .assoc = "\x77\x74\xee\xaf\x8d\x6d\xf9\x45"
22140 "\x6c\x56\xbc\x8d\xdb\x65\xe0\x2e"
22141 "\x86\xd0\x32\x0f\x79\x50\x20\xdb"
22142 "\xa2\xa1\x37\x7e\x53\x00\xab\xa6",
22143 .alen = 32,
22144 .ptext = "\x86\x3d\x7d\x17\xd1\x0c\xa3\xcc"
22145 "\x8c\x8d\xe8\xb1\xda\x2e\x11\xaf"
22146 "\x51\x80\xb5\x30\xba\xf8\x00\xe2"
22147 "\xd3\xad\x6f\x75\x09\x18\x93\x5c",
22148 .plen = 32,
22149 .ctext = "\x9f\xa9\x2b\xa4\x8f\x00\x05\x2b"
22150 "\xe7\x68\x81\x51\xbb\xfb\xdf\x60"
22151 "\xbb\xac\xe8\xc1\xdc\x68\xae\x68"
22152 "\x3a\xcd\x7a\x06\x49\xfe\x80\x11"
22153 "\xe6\x61\x99\xe2\xdd\xbe\x2c\xbf",
22154 .clen = 40,
22155 }, {
22156 .key = "\x96\x06\x0b\x7f\x15\xab\x4d\x53"
22157 "\xac\xc3\x15\xd6\xd8\xf7\x42\x31"
22158 "\x1b\x31\x38\x51\xfc\xa0\xe1\xe8"
22159 "\x03\xb8\xa7\x6b\xc0\x2f\x7b\x11",
22160 .klen = 32,
22161 .iv = "\xa5\xcf\x9a\xe6\x59\x4a\xf7\xd9"
22162 "\xcd\xfa\x41\xfa\xd7\xc0\x72\xb2",
22163 .assoc = "\xb4\x99\x28\x4e\x9d\xe8\xa2\x60"
22164 "\xed\x30\x6e\x1e\xd5\x89\xa3\x34"
22165 "\xb1\x92\x3e\x93\x7e\xf0\xa2\xf5"
22166 "\x64\xcf\x16\x57\x2d\x5f\x4a\x7d",
22167 .alen = 32,
22168 .ptext = "\xc3\x62\xb7\xb6\xe2\x87\x4c\xe7"
22169 "\x0d\x67\x9a\x43\xd4\x52\xd4\xb5"
22170 "\x7b\x43\xc1\xb5\xbf\x98\x82\xfc"
22171 "\x94\xda\x4e\x4d\xe4\x77\x32\x32",
22172 .plen = 32,
22173 .ctext = "\xe2\x34\xfa\x25\xfd\xfb\x89\x5e"
22174 "\x5b\x4e\x0b\x15\x6e\x39\xfb\x0c"
22175 "\x73\xc7\xd9\x6b\xbe\xce\x9b\x70"
22176 "\xc7\x4f\x96\x16\x03\xfc\xea\xfb"
22177 "\x56",
22178 .clen = 33,
22179 },
22180};
22181
22182/*
22183 * All key wrapping test vectors taken from 19492 * All key wrapping test vectors taken from
22184 * http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip 19493 * http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip
22185 * 19494 *
@@ -32454,6 +29763,86 @@ static const struct comp_testvec lzo_decomp_tv_template[] = {
32454 }, 29763 },
32455}; 29764};
32456 29765
29766static const struct comp_testvec lzorle_comp_tv_template[] = {
29767 {
29768 .inlen = 70,
29769 .outlen = 59,
29770 .input = "Join us now and share the software "
29771 "Join us now and share the software ",
29772 .output = "\x11\x01\x00\x0d\x4a\x6f\x69\x6e"
29773 "\x20\x75\x73\x20\x6e\x6f\x77\x20"
29774 "\x61\x6e\x64\x20\x73\x68\x61\x72"
29775 "\x65\x20\x74\x68\x65\x20\x73\x6f"
29776 "\x66\x74\x77\x70\x01\x32\x88\x00"
29777 "\x0c\x65\x20\x74\x68\x65\x20\x73"
29778 "\x6f\x66\x74\x77\x61\x72\x65\x20"
29779 "\x11\x00\x00",
29780 }, {
29781 .inlen = 159,
29782 .outlen = 133,
29783 .input = "This document describes a compression method based on the LZO "
29784 "compression algorithm. This document defines the application of "
29785 "the LZO algorithm used in UBIFS.",
29786 .output = "\x11\x01\x00\x2c\x54\x68\x69\x73"
29787 "\x20\x64\x6f\x63\x75\x6d\x65\x6e"
29788 "\x74\x20\x64\x65\x73\x63\x72\x69"
29789 "\x62\x65\x73\x20\x61\x20\x63\x6f"
29790 "\x6d\x70\x72\x65\x73\x73\x69\x6f"
29791 "\x6e\x20\x6d\x65\x74\x68\x6f\x64"
29792 "\x20\x62\x61\x73\x65\x64\x20\x6f"
29793 "\x6e\x20\x74\x68\x65\x20\x4c\x5a"
29794 "\x4f\x20\x2a\x8c\x00\x09\x61\x6c"
29795 "\x67\x6f\x72\x69\x74\x68\x6d\x2e"
29796 "\x20\x20\x2e\x54\x01\x03\x66\x69"
29797 "\x6e\x65\x73\x20\x74\x06\x05\x61"
29798 "\x70\x70\x6c\x69\x63\x61\x74\x76"
29799 "\x0a\x6f\x66\x88\x02\x60\x09\x27"
29800 "\xf0\x00\x0c\x20\x75\x73\x65\x64"
29801 "\x20\x69\x6e\x20\x55\x42\x49\x46"
29802 "\x53\x2e\x11\x00\x00",
29803 },
29804};
29805
29806static const struct comp_testvec lzorle_decomp_tv_template[] = {
29807 {
29808 .inlen = 133,
29809 .outlen = 159,
29810 .input = "\x00\x2b\x54\x68\x69\x73\x20\x64"
29811 "\x6f\x63\x75\x6d\x65\x6e\x74\x20"
29812 "\x64\x65\x73\x63\x72\x69\x62\x65"
29813 "\x73\x20\x61\x20\x63\x6f\x6d\x70"
29814 "\x72\x65\x73\x73\x69\x6f\x6e\x20"
29815 "\x6d\x65\x74\x68\x6f\x64\x20\x62"
29816 "\x61\x73\x65\x64\x20\x6f\x6e\x20"
29817 "\x74\x68\x65\x20\x4c\x5a\x4f\x2b"
29818 "\x8c\x00\x0d\x61\x6c\x67\x6f\x72"
29819 "\x69\x74\x68\x6d\x2e\x20\x20\x54"
29820 "\x68\x69\x73\x2a\x54\x01\x02\x66"
29821 "\x69\x6e\x65\x73\x94\x06\x05\x61"
29822 "\x70\x70\x6c\x69\x63\x61\x74\x76"
29823 "\x0a\x6f\x66\x88\x02\x60\x09\x27"
29824 "\xf0\x00\x0c\x20\x75\x73\x65\x64"
29825 "\x20\x69\x6e\x20\x55\x42\x49\x46"
29826 "\x53\x2e\x11\x00\x00",
29827 .output = "This document describes a compression method based on the LZO "
29828 "compression algorithm. This document defines the application of "
29829 "the LZO algorithm used in UBIFS.",
29830 }, {
29831 .inlen = 59,
29832 .outlen = 70,
29833 .input = "\x11\x01\x00\x0d\x4a\x6f\x69\x6e"
29834 "\x20\x75\x73\x20\x6e\x6f\x77\x20"
29835 "\x61\x6e\x64\x20\x73\x68\x61\x72"
29836 "\x65\x20\x74\x68\x65\x20\x73\x6f"
29837 "\x66\x74\x77\x70\x01\x32\x88\x00"
29838 "\x0c\x65\x20\x74\x68\x65\x20\x73"
29839 "\x6f\x66\x74\x77\x61\x72\x65\x20"
29840 "\x11\x00\x00",
29841 .output = "Join us now and share the software "
29842 "Join us now and share the software ",
29843 },
29844};
29845
32457/* 29846/*
32458 * Michael MIC test vectors from IEEE 802.11i 29847 * Michael MIC test vectors from IEEE 802.11i
32459 */ 29848 */
@@ -33681,4 +31070,501 @@ static const struct comp_testvec zstd_decomp_tv_template[] = {
33681 "functions.", 31070 "functions.",
33682 }, 31071 },
33683}; 31072};
31073
31074/* based on aes_cbc_tv_template */
31075static const struct cipher_testvec essiv_aes_cbc_tv_template[] = {
31076 {
31077 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
31078 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
31079 .klen = 16,
31080 .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
31081 "\x00\x00\x00\x00\x00\x00\x00\x00",
31082 .ptext = "Single block msg",
31083 .ctext = "\xfa\x59\xe7\x5f\x41\x56\x65\xc3"
31084 "\x36\xca\x6b\x72\x10\x9f\x8c\xd4",
31085 .len = 16,
31086 }, {
31087 .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
31088 "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
31089 .klen = 16,
31090 .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
31091 "\x00\x00\x00\x00\x00\x00\x00\x00",
31092 .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
31093 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
31094 "\x10\x11\x12\x13\x14\x15\x16\x17"
31095 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
31096 .ctext = "\xc8\x59\x9a\xfe\x79\xe6\x7b\x20"
31097 "\x06\x7d\x55\x0a\x5e\xc7\xb5\xa7"
31098 "\x0b\x9c\x80\xd2\x15\xa1\xb8\x6d"
31099 "\xc6\xab\x7b\x65\xd9\xfd\x88\xeb",
31100 .len = 32,
31101 }, {
31102 .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
31103 "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
31104 "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
31105 .klen = 24,
31106 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
31107 "\x00\x00\x00\x00\x00\x00\x00\x00",
31108 .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
31109 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
31110 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
31111 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
31112 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
31113 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
31114 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
31115 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
31116 .ctext = "\x96\x6d\xa9\x7a\x42\xe6\x01\xc7"
31117 "\x17\xfc\xa7\x41\xd3\x38\x0b\xe5"
31118 "\x51\x48\xf7\x7e\x5e\x26\xa9\xfe"
31119 "\x45\x72\x1c\xd9\xde\xab\xf3\x4d"
31120 "\x39\x47\xc5\x4f\x97\x3a\x55\x63"
31121 "\x80\x29\x64\x4c\x33\xe8\x21\x8a"
31122 "\x6a\xef\x6b\x6a\x8f\x43\xc0\xcb"
31123 "\xf0\xf3\x6e\x74\x54\x44\x92\x44",
31124 .len = 64,
31125 }, {
31126 .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
31127 "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
31128 "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
31129 "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
31130 .klen = 32,
31131 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
31132 "\x00\x00\x00\x00\x00\x00\x00\x00",
31133 .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
31134 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
31135 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
31136 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
31137 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
31138 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
31139 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
31140 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
31141 .ctext = "\x24\x52\xf1\x48\x74\xd0\xa7\x93"
31142 "\x75\x9b\x63\x46\xc0\x1c\x1e\x17"
31143 "\x4d\xdc\x5b\x3a\x27\x93\x2a\x63"
31144 "\xf7\xf1\xc7\xb3\x54\x56\x5b\x50"
31145 "\xa3\x31\xa5\x8b\xd6\xfd\xb6\x3c"
31146 "\x8b\xf6\xf2\x45\x05\x0c\xc8\xbb"
31147 "\x32\x0b\x26\x1c\xe9\x8b\x02\xc0"
31148 "\xb2\x6f\x37\xa7\x5b\xa8\xa9\x42",
31149 .len = 64,
31150 }, {
31151 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
31152 "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
31153 "\xBE\xE1\x04\x27\xE1\x04\x27\x4A"
31154 "\x6D\x90\x4A\x6D\x90\xB3\xD6\xF9",
31155 .klen = 32,
31156 .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47"
31157 "\x00\x00\x00\x00\x00\x00\x00\x00",
31158 .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
31159 "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
31160 "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
31161 "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
31162 "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
31163 "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
31164 "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
31165 "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
31166 "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
31167 "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
31168 "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
31169 "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
31170 "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
31171 "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
31172 "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
31173 "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
31174 "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
31175 "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
31176 "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
31177 "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
31178 "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
31179 "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
31180 "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
31181 "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
31182 "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
31183 "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
31184 "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
31185 "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
31186 "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
31187 "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
31188 "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB"
31189 "\x54\xE0\x49\xB2\x1B\xA7\x10\x79"
31190 "\x05\x6E\xD7\x40\xCC\x35\x9E\x07"
31191 "\x93\xFC\x65\xF1\x5A\xC3\x2C\xB8"
31192 "\x21\x8A\x16\x7F\xE8\x51\xDD\x46"
31193 "\xAF\x18\xA4\x0D\x76\x02\x6B\xD4"
31194 "\x3D\xC9\x32\x9B\x04\x90\xF9\x62"
31195 "\xEE\x57\xC0\x29\xB5\x1E\x87\x13"
31196 "\x7C\xE5\x4E\xDA\x43\xAC\x15\xA1"
31197 "\x0A\x73\xFF\x68\xD1\x3A\xC6\x2F"
31198 "\x98\x01\x8D\xF6\x5F\xEB\x54\xBD"
31199 "\x26\xB2\x1B\x84\x10\x79\xE2\x4B"
31200 "\xD7\x40\xA9\x12\x9E\x07\x70\xFC"
31201 "\x65\xCE\x37\xC3\x2C\x95\x21\x8A"
31202 "\xF3\x5C\xE8\x51\xBA\x23\xAF\x18"
31203 "\x81\x0D\x76\xDF\x48\xD4\x3D\xA6"
31204 "\x0F\x9B\x04\x6D\xF9\x62\xCB\x34"
31205 "\xC0\x29\x92\x1E\x87\xF0\x59\xE5"
31206 "\x4E\xB7\x20\xAC\x15\x7E\x0A\x73"
31207 "\xDC\x45\xD1\x3A\xA3\x0C\x98\x01"
31208 "\x6A\xF6\x5F\xC8\x31\xBD\x26\x8F"
31209 "\x1B\x84\xED\x56\xE2\x4B\xB4\x1D"
31210 "\xA9\x12\x7B\x07\x70\xD9\x42\xCE"
31211 "\x37\xA0\x09\x95\xFE\x67\xF3\x5C"
31212 "\xC5\x2E\xBA\x23\x8C\x18\x81\xEA"
31213 "\x53\xDF\x48\xB1\x1A\xA6\x0F\x78"
31214 "\x04\x6D\xD6\x3F\xCB\x34\x9D\x06"
31215 "\x92\xFB\x64\xF0\x59\xC2\x2B\xB7"
31216 "\x20\x89\x15\x7E\xE7\x50\xDC\x45"
31217 "\xAE\x17\xA3\x0C\x75\x01\x6A\xD3"
31218 "\x3C\xC8\x31\x9A\x03\x8F\xF8\x61"
31219 "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
31220 .ctext = "\x97\x7f\x69\x0f\x0f\x34\xa6\x33"
31221 "\x66\x49\x7e\xd0\x4d\x1b\xc9\x64"
31222 "\xf9\x61\x95\x98\x11\x00\x88\xf8"
31223 "\x2e\x88\x01\x0f\x2b\xe1\xae\x3e"
31224 "\xfe\xd6\x47\x30\x11\x68\x7d\x99"
31225 "\xad\x69\x6a\xe8\x41\x5f\x1e\x16"
31226 "\x00\x3a\x47\xdf\x8e\x7d\x23\x1c"
31227 "\x19\x5b\x32\x76\x60\x03\x05\xc1"
31228 "\xa0\xff\xcf\xcc\x74\x39\x46\x63"
31229 "\xfe\x5f\xa6\x35\xa7\xb4\xc1\xf9"
31230 "\x4b\x5e\x38\xcc\x8c\xc1\xa2\xcf"
31231 "\x9a\xc3\xae\x55\x42\x46\x93\xd9"
31232 "\xbd\x22\xd3\x8a\x19\x96\xc3\xb3"
31233 "\x7d\x03\x18\xf9\x45\x09\x9c\xc8"
31234 "\x90\xf3\x22\xb3\x25\x83\x9a\x75"
31235 "\xbb\x04\x48\x97\x3a\x63\x08\x04"
31236 "\xa0\x69\xf6\x52\xd4\x89\x93\x69"
31237 "\xb4\x33\xa2\x16\x58\xec\x4b\x26"
31238 "\x76\x54\x10\x0b\x6e\x53\x1e\xbc"
31239 "\x16\x18\x42\xb1\xb1\xd3\x4b\xda"
31240 "\x06\x9f\x8b\x77\xf7\xab\xd6\xed"
31241 "\xa3\x1d\x90\xda\x49\x38\x20\xb8"
31242 "\x6c\xee\xae\x3e\xae\x6c\x03\xb8"
31243 "\x0b\xed\xc8\xaa\x0e\xc5\x1f\x90"
31244 "\x60\xe2\xec\x1b\x76\xd0\xcf\xda"
31245 "\x29\x1b\xb8\x5a\xbc\xf4\xba\x13"
31246 "\x91\xa6\xcb\x83\x3f\xeb\xe9\x7b"
31247 "\x03\xba\x40\x9e\xe6\x7a\xb2\x4a"
31248 "\x73\x49\xfc\xed\xfb\x55\xa4\x24"
31249 "\xc7\xa4\xd7\x4b\xf5\xf7\x16\x62"
31250 "\x80\xd3\x19\x31\x52\x25\xa8\x69"
31251 "\xda\x9a\x87\xf5\xf2\xee\x5d\x61"
31252 "\xc1\x12\x72\x3e\x52\x26\x45\x3a"
31253 "\xd8\x9d\x57\xfa\x14\xe2\x9b\x2f"
31254 "\xd4\xaa\x5e\x31\xf4\x84\x89\xa4"
31255 "\xe3\x0e\xb0\x58\x41\x75\x6a\xcb"
31256 "\x30\x01\x98\x90\x15\x80\xf5\x27"
31257 "\x92\x13\x81\xf0\x1c\x1e\xfc\xb1"
31258 "\x33\xf7\x63\xb0\x67\xec\x2e\x5c"
31259 "\x85\xe3\x5b\xd0\x43\x8a\xb8\x5f"
31260 "\x44\x9f\xec\x19\xc9\x8f\xde\xdf"
31261 "\x79\xef\xf8\xee\x14\x87\xb3\x34"
31262 "\x76\x00\x3a\x9b\xc7\xed\xb1\x3d"
31263 "\xef\x07\xb0\xe4\xfd\x68\x9e\xeb"
31264 "\xc2\xb4\x1a\x85\x9a\x7d\x11\x88"
31265 "\xf8\xab\x43\x55\x2b\x8a\x4f\x60"
31266 "\x85\x9a\xf4\xba\xae\x48\x81\xeb"
31267 "\x93\x07\x97\x9e\xde\x2a\xfc\x4e"
31268 "\x31\xde\xaa\x44\xf7\x2a\xc3\xee"
31269 "\x60\xa2\x98\x2c\x0a\x88\x50\xc5"
31270 "\x6d\x89\xd3\xe4\xb6\xa7\xf4\xb0"
31271 "\xcf\x0e\x89\xe3\x5e\x8f\x82\xf4"
31272 "\x9d\xd1\xa9\x51\x50\x8a\xd2\x18"
31273 "\x07\xb2\xaa\x3b\x7f\x58\x9b\xf4"
31274 "\xb7\x24\x39\xd3\x66\x2f\x1e\xc0"
31275 "\x11\xa3\x56\x56\x2a\x10\x73\xbc"
31276 "\xe1\x23\xbf\xa9\x37\x07\x9c\xc3"
31277 "\xb2\xc9\xa8\x1c\x5b\x5c\x58\xa4"
31278 "\x77\x02\x26\xad\xc3\x40\x11\x53"
31279 "\x93\x68\x72\xde\x05\x8b\x10\xbc"
31280 "\xa6\xd4\x1b\xd9\x27\xd8\x16\x12"
31281 "\x61\x2b\x31\x2a\x44\x87\x96\x58",
31282 .len = 496,
31283 },
31284};
31285
31286/* based on hmac_sha256_aes_cbc_tv_temp */
31287static const struct aead_testvec essiv_hmac_sha256_aes_cbc_tv_temp[] = {
31288 {
31289#ifdef __LITTLE_ENDIAN
31290 .key = "\x08\x00" /* rta length */
31291 "\x01\x00" /* rta type */
31292#else
31293 .key = "\x00\x08" /* rta length */
31294 "\x00\x01" /* rta type */
31295#endif
31296 "\x00\x00\x00\x10" /* enc key length */
31297 "\x00\x00\x00\x00\x00\x00\x00\x00"
31298 "\x00\x00\x00\x00\x00\x00\x00\x00"
31299 "\x00\x00\x00\x00\x00\x00\x00\x00"
31300 "\x00\x00\x00\x00\x00\x00\x00\x00"
31301 "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
31302 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
31303 .klen = 8 + 32 + 16,
31304 .iv = "\xb3\x0c\x5a\x11\x41\xad\xc1\x04"
31305 "\xbc\x1e\x7e\x35\xb0\x5d\x78\x29",
31306 .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
31307 "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
31308 .alen = 16,
31309 .ptext = "Single block msg",
31310 .plen = 16,
31311 .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
31312 "\x27\x08\x94\x2d\xbe\x77\x18\x1a"
31313 "\xcc\xde\x2d\x6a\xae\xf1\x0b\xcc"
31314 "\x38\x06\x38\x51\xb4\xb8\xf3\x5b"
31315 "\x5c\x34\xa6\xa3\x6e\x0b\x05\xe5"
31316 "\x6a\x6d\x44\xaa\x26\xa8\x44\xa5",
31317 .clen = 16 + 32,
31318 }, {
31319#ifdef __LITTLE_ENDIAN
31320 .key = "\x08\x00" /* rta length */
31321 "\x01\x00" /* rta type */
31322#else
31323 .key = "\x00\x08" /* rta length */
31324 "\x00\x01" /* rta type */
31325#endif
31326 "\x00\x00\x00\x10" /* enc key length */
31327 "\x20\x21\x22\x23\x24\x25\x26\x27"
31328 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
31329 "\x30\x31\x32\x33\x34\x35\x36\x37"
31330 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
31331 "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
31332 "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
31333 .klen = 8 + 32 + 16,
31334 .iv = "\x56\xe8\x14\xa5\x74\x18\x75\x13"
31335 "\x2f\x79\xe7\xc8\x65\xe3\x48\x45",
31336 .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
31337 "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
31338 .alen = 16,
31339 .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
31340 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
31341 "\x10\x11\x12\x13\x14\x15\x16\x17"
31342 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
31343 .plen = 32,
31344 .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
31345 "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
31346 "\x75\x86\x60\x2d\x25\x3c\xff\xf9"
31347 "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1"
31348 "\xf5\x33\x53\xf3\x68\x85\x2a\x99"
31349 "\x0e\x06\x58\x8f\xba\xf6\x06\xda"
31350 "\x49\x69\x0d\x5b\xd4\x36\x06\x62"
31351 "\x35\x5e\x54\x58\x53\x4d\xdf\xbf",
31352 .clen = 32 + 32,
31353 }, {
31354#ifdef __LITTLE_ENDIAN
31355 .key = "\x08\x00" /* rta length */
31356 "\x01\x00" /* rta type */
31357#else
31358 .key = "\x00\x08" /* rta length */
31359 "\x00\x01" /* rta type */
31360#endif
31361 "\x00\x00\x00\x10" /* enc key length */
31362 "\x11\x22\x33\x44\x55\x66\x77\x88"
31363 "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
31364 "\x22\x33\x44\x55\x66\x77\x88\x99"
31365 "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
31366 "\x6c\x3e\xa0\x47\x76\x30\xce\x21"
31367 "\xa2\xce\x33\x4a\xa7\x46\xc2\xcd",
31368 .klen = 8 + 32 + 16,
31369 .iv = "\x1f\x6b\xfb\xd6\x6b\x72\x2f\xc9"
31370 "\xb6\x9f\x8c\x10\xa8\x96\x15\x64",
31371 .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
31372 "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
31373 .alen = 16,
31374 .ptext = "This is a 48-byte message (exactly 3 AES blocks)",
31375 .plen = 48,
31376 .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
31377 "\xd4\x93\x66\x5d\x33\xf0\xe8\x86"
31378 "\x2d\xea\x54\xcd\xb2\x93\xab\xc7"
31379 "\x50\x69\x39\x27\x67\x72\xf8\xd5"
31380 "\x02\x1c\x19\x21\x6b\xad\x52\x5c"
31381 "\x85\x79\x69\x5d\x83\xba\x26\x84"
31382 "\x68\xb9\x3e\x90\x38\xa0\x88\x01"
31383 "\xe7\xc6\xce\x10\x31\x2f\x9b\x1d"
31384 "\x24\x78\xfb\xbe\x02\xe0\x4f\x40"
31385 "\x10\xbd\xaa\xc6\xa7\x79\xe0\x1a",
31386 .clen = 48 + 32,
31387 }, {
31388#ifdef __LITTLE_ENDIAN
31389 .key = "\x08\x00" /* rta length */
31390 "\x01\x00" /* rta type */
31391#else
31392 .key = "\x00\x08" /* rta length */
31393 "\x00\x01" /* rta type */
31394#endif
31395 "\x00\x00\x00\x10" /* enc key length */
31396 "\x11\x22\x33\x44\x55\x66\x77\x88"
31397 "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
31398 "\x22\x33\x44\x55\x66\x77\x88\x99"
31399 "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
31400 "\x56\xe4\x7a\x38\xc5\x59\x89\x74"
31401 "\xbc\x46\x90\x3d\xba\x29\x03\x49",
31402 .klen = 8 + 32 + 16,
31403 .iv = "\x13\xe5\xf2\xef\x61\x97\x59\x35"
31404 "\x9b\x36\x84\x46\x4e\x63\xd1\x41",
31405 .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
31406 "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
31407 .alen = 16,
31408 .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
31409 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
31410 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
31411 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
31412 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
31413 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
31414 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
31415 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf",
31416 .plen = 64,
31417 .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
31418 "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa"
31419 "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6"
31420 "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e"
31421 "\x35\x90\x7a\xa6\x32\xc3\xff\xdf"
31422 "\x86\x8b\xb7\xb2\x9d\x3d\x46\xad"
31423 "\x83\xce\x9f\x9a\x10\x2e\xe9\x9d"
31424 "\x49\xa5\x3e\x87\xf4\xc3\xda\x55"
31425 "\x7a\x1b\xd4\x3c\xdb\x17\x95\xe2"
31426 "\xe0\x93\xec\xc9\x9f\xf7\xce\xd8"
31427 "\x3f\x54\xe2\x49\x39\xe3\x71\x25"
31428 "\x2b\x6c\xe9\x5d\xec\xec\x2b\x64",
31429 .clen = 64 + 32,
31430 }, {
31431#ifdef __LITTLE_ENDIAN
31432 .key = "\x08\x00" /* rta length */
31433 "\x01\x00" /* rta type */
31434#else
31435 .key = "\x00\x08" /* rta length */
31436 "\x00\x01" /* rta type */
31437#endif
31438 "\x00\x00\x00\x10" /* enc key length */
31439 "\x11\x22\x33\x44\x55\x66\x77\x88"
31440 "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
31441 "\x22\x33\x44\x55\x66\x77\x88\x99"
31442 "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
31443 "\x90\xd3\x82\xb4\x10\xee\xba\x7a"
31444 "\xd9\x38\xc4\x6c\xec\x1a\x82\xbf",
31445 .klen = 8 + 32 + 16,
31446 .iv = "\xe4\x13\xa1\x15\xe9\x6b\xb8\x23"
31447 "\x81\x7a\x94\x29\xab\xfd\xd2\x2c",
31448 .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
31449 "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
31450 "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
31451 .alen = 24,
31452 .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
31453 "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
31454 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
31455 "\x10\x11\x12\x13\x14\x15\x16\x17"
31456 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
31457 "\x20\x21\x22\x23\x24\x25\x26\x27"
31458 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
31459 "\x30\x31\x32\x33\x34\x35\x36\x37"
31460 "\x01\x02\x03\x04\x05\x06\x07\x08"
31461 "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01",
31462 .plen = 80,
31463 .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
31464 "\xa9\x45\x3e\x19\x4e\x12\x08\x49"
31465 "\xa4\x87\x0b\x66\xcc\x6b\x99\x65"
31466 "\x33\x00\x13\xb4\x89\x8d\xc8\x56"
31467 "\xa4\x69\x9e\x52\x3a\x55\xdb\x08"
31468 "\x0b\x59\xec\x3a\x8e\x4b\x7e\x52"
31469 "\x77\x5b\x07\xd1\xdb\x34\xed\x9c"
31470 "\x53\x8a\xb5\x0c\x55\x1b\x87\x4a"
31471 "\xa2\x69\xad\xd0\x47\xad\x2d\x59"
31472 "\x13\xac\x19\xb7\xcf\xba\xd4\xa6"
31473 "\xbb\xd4\x0f\xbe\xa3\x3b\x4c\xb8"
31474 "\x3a\xd2\xe1\x03\x86\xa5\x59\xb7"
31475 "\x73\xc3\x46\x20\x2c\xb1\xef\x68"
31476 "\xbb\x8a\x32\x7e\x12\x8c\x69\xcf",
31477 .clen = 80 + 32,
31478 }, {
31479#ifdef __LITTLE_ENDIAN
31480 .key = "\x08\x00" /* rta length */
31481 "\x01\x00" /* rta type */
31482#else
31483 .key = "\x00\x08" /* rta length */
31484 "\x00\x01" /* rta type */
31485#endif
31486 "\x00\x00\x00\x18" /* enc key length */
31487 "\x11\x22\x33\x44\x55\x66\x77\x88"
31488 "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
31489 "\x22\x33\x44\x55\x66\x77\x88\x99"
31490 "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
31491 "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
31492 "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
31493 "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
31494 .klen = 8 + 32 + 24,
31495 .iv = "\x49\xca\x41\xc9\x6b\xbf\x6c\x98"
31496 "\x38\x2f\xa7\x3d\x4d\x80\x49\xb0",
31497 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
31498 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
31499 .alen = 16,
31500 .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
31501 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
31502 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
31503 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
31504 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
31505 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
31506 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
31507 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
31508 .plen = 64,
31509 .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
31510 "\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
31511 "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
31512 "\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
31513 "\x57\x1b\x24\x20\x12\xfb\x7a\xe0"
31514 "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0"
31515 "\x08\xb0\xe2\x79\x88\x59\x88\x81"
31516 "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd"
31517 "\x2f\xee\x5f\xdb\x66\xfe\x79\x09"
31518 "\x61\x81\x31\xea\x5b\x3d\x8e\xfb"
31519 "\xca\x71\x85\x93\xf7\x85\x55\x8b"
31520 "\x7a\xe4\x94\xca\x8b\xba\x19\x33",
31521 .clen = 64 + 32,
31522 }, {
31523#ifdef __LITTLE_ENDIAN
31524 .key = "\x08\x00" /* rta length */
31525 "\x01\x00" /* rta type */
31526#else
31527 .key = "\x00\x08" /* rta length */
31528 "\x00\x01" /* rta type */
31529#endif
31530 "\x00\x00\x00\x20" /* enc key length */
31531 "\x11\x22\x33\x44\x55\x66\x77\x88"
31532 "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
31533 "\x22\x33\x44\x55\x66\x77\x88\x99"
31534 "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
31535 "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
31536 "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
31537 "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
31538 "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
31539 .klen = 8 + 32 + 32,
31540 .iv = "\xdf\xab\xf2\x7c\xdc\xe0\x33\x4c"
31541 "\xf9\x75\xaf\xf9\x2f\x60\x3a\x9b",
31542 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
31543 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
31544 .alen = 16,
31545 .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
31546 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
31547 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
31548 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
31549 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
31550 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
31551 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
31552 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
31553 .plen = 64,
31554 .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
31555 "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
31556 "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
31557 "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
31558 "\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
31559 "\xa5\x30\xe2\x63\x04\x23\x14\x61"
31560 "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
31561 "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b"
31562 "\x24\x29\xed\xc2\x31\x49\xdb\xb1"
31563 "\x8f\x74\xbd\x17\x92\x03\xbe\x8f"
31564 "\xf3\x61\xde\x1c\xe9\xdb\xcd\xd0"
31565 "\xcc\xce\xe9\x85\x57\xcf\x6f\x5f",
31566 .clen = 64 + 32,
31567 },
31568};
31569
33684#endif /* _CRYPTO_TESTMGR_H */ 31570#endif /* _CRYPTO_TESTMGR_H */
diff --git a/crypto/xts.c b/crypto/xts.c
index 11211003db7e..ab117633d64e 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -1,8 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-or-later 1// SPDX-License-Identifier: GPL-2.0-or-later
2/* XTS: as defined in IEEE1619/D16 2/* XTS: as defined in IEEE1619/D16
3 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf 3 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
4 * (sector sizes which are not a multiple of 16 bytes are,
5 * however currently unsupported)
6 * 4 *
7 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> 5 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
8 * 6 *
@@ -34,6 +32,8 @@ struct xts_instance_ctx {
34 32
35struct rctx { 33struct rctx {
36 le128 t; 34 le128 t;
35 struct scatterlist *tail;
36 struct scatterlist sg[2];
37 struct skcipher_request subreq; 37 struct skcipher_request subreq;
38}; 38};
39 39
@@ -84,10 +84,11 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
84 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than 84 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
85 * just doing the gf128mul_x_ble() calls again. 85 * just doing the gf128mul_x_ble() calls again.
86 */ 86 */
87static int xor_tweak(struct skcipher_request *req, bool second_pass) 87static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
88{ 88{
89 struct rctx *rctx = skcipher_request_ctx(req); 89 struct rctx *rctx = skcipher_request_ctx(req);
90 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 90 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
91 const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
91 const int bs = XTS_BLOCK_SIZE; 92 const int bs = XTS_BLOCK_SIZE;
92 struct skcipher_walk w; 93 struct skcipher_walk w;
93 le128 t = rctx->t; 94 le128 t = rctx->t;
@@ -109,6 +110,20 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
109 wdst = w.dst.virt.addr; 110 wdst = w.dst.virt.addr;
110 111
111 do { 112 do {
113 if (unlikely(cts) &&
114 w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
115 if (!enc) {
116 if (second_pass)
117 rctx->t = t;
118 gf128mul_x_ble(&t, &t);
119 }
120 le128_xor(wdst, &t, wsrc);
121 if (enc && second_pass)
122 gf128mul_x_ble(&rctx->t, &t);
123 skcipher_walk_done(&w, avail - bs);
124 return 0;
125 }
126
112 le128_xor(wdst++, &t, wsrc++); 127 le128_xor(wdst++, &t, wsrc++);
113 gf128mul_x_ble(&t, &t); 128 gf128mul_x_ble(&t, &t);
114 } while ((avail -= bs) >= bs); 129 } while ((avail -= bs) >= bs);
@@ -119,17 +134,71 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
119 return err; 134 return err;
120} 135}
121 136
122static int xor_tweak_pre(struct skcipher_request *req) 137static int xor_tweak_pre(struct skcipher_request *req, bool enc)
123{ 138{
124 return xor_tweak(req, false); 139 return xor_tweak(req, false, enc);
125} 140}
126 141
127static int xor_tweak_post(struct skcipher_request *req) 142static int xor_tweak_post(struct skcipher_request *req, bool enc)
128{ 143{
129 return xor_tweak(req, true); 144 return xor_tweak(req, true, enc);
130} 145}
131 146
132static void crypt_done(struct crypto_async_request *areq, int err) 147static void cts_done(struct crypto_async_request *areq, int err)
148{
149 struct skcipher_request *req = areq->data;
150 le128 b;
151
152 if (!err) {
153 struct rctx *rctx = skcipher_request_ctx(req);
154
155 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
156 le128_xor(&b, &rctx->t, &b);
157 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
158 }
159
160 skcipher_request_complete(req, err);
161}
162
163static int cts_final(struct skcipher_request *req,
164 int (*crypt)(struct skcipher_request *req))
165{
166 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
167 int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
168 struct rctx *rctx = skcipher_request_ctx(req);
169 struct skcipher_request *subreq = &rctx->subreq;
170 int tail = req->cryptlen % XTS_BLOCK_SIZE;
171 le128 b[2];
172 int err;
173
174 rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
175 offset - XTS_BLOCK_SIZE);
176
177 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
178 memcpy(b + 1, b, tail);
179 scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
180
181 le128_xor(b, &rctx->t, b);
182
183 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
184
185 skcipher_request_set_tfm(subreq, ctx->child);
186 skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
187 skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
188 XTS_BLOCK_SIZE, NULL);
189
190 err = crypt(subreq);
191 if (err)
192 return err;
193
194 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
195 le128_xor(b, &rctx->t, b);
196 scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
197
198 return 0;
199}
200
201static void encrypt_done(struct crypto_async_request *areq, int err)
133{ 202{
134 struct skcipher_request *req = areq->data; 203 struct skcipher_request *req = areq->data;
135 204
@@ -137,47 +206,90 @@ static void crypt_done(struct crypto_async_request *areq, int err)
137 struct rctx *rctx = skcipher_request_ctx(req); 206 struct rctx *rctx = skcipher_request_ctx(req);
138 207
139 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 208 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
140 err = xor_tweak_post(req); 209 err = xor_tweak_post(req, true);
210
211 if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
212 err = cts_final(req, crypto_skcipher_encrypt);
213 if (err == -EINPROGRESS)
214 return;
215 }
141 } 216 }
142 217
143 skcipher_request_complete(req, err); 218 skcipher_request_complete(req, err);
144} 219}
145 220
146static void init_crypt(struct skcipher_request *req) 221static void decrypt_done(struct crypto_async_request *areq, int err)
222{
223 struct skcipher_request *req = areq->data;
224
225 if (!err) {
226 struct rctx *rctx = skcipher_request_ctx(req);
227
228 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
229 err = xor_tweak_post(req, false);
230
231 if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
232 err = cts_final(req, crypto_skcipher_decrypt);
233 if (err == -EINPROGRESS)
234 return;
235 }
236 }
237
238 skcipher_request_complete(req, err);
239}
240
241static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
147{ 242{
148 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 243 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
149 struct rctx *rctx = skcipher_request_ctx(req); 244 struct rctx *rctx = skcipher_request_ctx(req);
150 struct skcipher_request *subreq = &rctx->subreq; 245 struct skcipher_request *subreq = &rctx->subreq;
151 246
247 if (req->cryptlen < XTS_BLOCK_SIZE)
248 return -EINVAL;
249
152 skcipher_request_set_tfm(subreq, ctx->child); 250 skcipher_request_set_tfm(subreq, ctx->child);
153 skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req); 251 skcipher_request_set_callback(subreq, req->base.flags, compl, req);
154 skcipher_request_set_crypt(subreq, req->dst, req->dst, 252 skcipher_request_set_crypt(subreq, req->dst, req->dst,
155 req->cryptlen, NULL); 253 req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
156 254
157 /* calculate first value of T */ 255 /* calculate first value of T */
158 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); 256 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
257
258 return 0;
159} 259}
160 260
161static int encrypt(struct skcipher_request *req) 261static int encrypt(struct skcipher_request *req)
162{ 262{
163 struct rctx *rctx = skcipher_request_ctx(req); 263 struct rctx *rctx = skcipher_request_ctx(req);
164 struct skcipher_request *subreq = &rctx->subreq; 264 struct skcipher_request *subreq = &rctx->subreq;
265 int err;
165 266
166 init_crypt(req); 267 err = init_crypt(req, encrypt_done) ?:
167 return xor_tweak_pre(req) ?: 268 xor_tweak_pre(req, true) ?:
168 crypto_skcipher_encrypt(subreq) ?: 269 crypto_skcipher_encrypt(subreq) ?:
169 xor_tweak_post(req); 270 xor_tweak_post(req, true);
271
272 if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
273 return err;
274
275 return cts_final(req, crypto_skcipher_encrypt);
170} 276}
171 277
172static int decrypt(struct skcipher_request *req) 278static int decrypt(struct skcipher_request *req)
173{ 279{
174 struct rctx *rctx = skcipher_request_ctx(req); 280 struct rctx *rctx = skcipher_request_ctx(req);
175 struct skcipher_request *subreq = &rctx->subreq; 281 struct skcipher_request *subreq = &rctx->subreq;
282 int err;
283
284 err = init_crypt(req, decrypt_done) ?:
285 xor_tweak_pre(req, false) ?:
286 crypto_skcipher_decrypt(subreq) ?:
287 xor_tweak_post(req, false);
288
289 if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
290 return err;
176 291
177 init_crypt(req); 292 return cts_final(req, crypto_skcipher_decrypt);
178 return xor_tweak_pre(req) ?:
179 crypto_skcipher_decrypt(subreq) ?:
180 xor_tweak_post(req);
181} 293}
182 294
183static int init_tfm(struct crypto_skcipher *tfm) 295static int init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 433426242b87..e55705745d5e 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -86,7 +86,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
86 trng->rng.name = pdev->name; 86 trng->rng.name = pdev->name;
87 trng->rng.read = atmel_trng_read; 87 trng->rng.read = atmel_trng_read;
88 88
89 ret = hwrng_register(&trng->rng); 89 ret = devm_hwrng_register(&pdev->dev, &trng->rng);
90 if (ret) 90 if (ret)
91 goto err_register; 91 goto err_register;
92 92
@@ -103,7 +103,6 @@ static int atmel_trng_remove(struct platform_device *pdev)
103{ 103{
104 struct atmel_trng *trng = platform_get_drvdata(pdev); 104 struct atmel_trng *trng = platform_get_drvdata(pdev);
105 105
106 hwrng_unregister(&trng->rng);
107 106
108 atmel_trng_disable(trng); 107 atmel_trng_disable(trng);
109 clk_disable_unprepare(trng->clk); 108 clk_disable_unprepare(trng->clk);
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index 2d1352b67168..3de4a6a443ef 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -67,7 +67,7 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
67 67
68 pci_set_drvdata(pdev, rng); 68 pci_set_drvdata(pdev, rng);
69 69
70 ret = hwrng_register(&rng->ops); 70 ret = devm_hwrng_register(&pdev->dev, &rng->ops);
71 if (ret) { 71 if (ret) {
72 dev_err(&pdev->dev, "Error registering device as HWRNG.\n"); 72 dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
73 return ret; 73 return ret;
@@ -76,14 +76,6 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
76 return 0; 76 return 0;
77} 77}
78 78
79/* Remove the VF */
80static void cavium_rng_remove_vf(struct pci_dev *pdev)
81{
82 struct cavium_rng *rng;
83
84 rng = pci_get_drvdata(pdev);
85 hwrng_unregister(&rng->ops);
86}
87 79
88static const struct pci_device_id cavium_rng_vf_id_table[] = { 80static const struct pci_device_id cavium_rng_vf_id_table[] = {
89 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0}, 81 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0},
@@ -95,7 +87,6 @@ static struct pci_driver cavium_rng_vf_driver = {
95 .name = "cavium_rng_vf", 87 .name = "cavium_rng_vf",
96 .id_table = cavium_rng_vf_id_table, 88 .id_table = cavium_rng_vf_id_table,
97 .probe = cavium_rng_probe_vf, 89 .probe = cavium_rng_probe_vf,
98 .remove = cavium_rng_remove_vf,
99}; 90};
100module_pci_driver(cavium_rng_vf_driver); 91module_pci_driver(cavium_rng_vf_driver);
101 92
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 9044d31ab1a1..bdab5d9af8d2 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -13,6 +13,7 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/freezer.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17#include <linux/hw_random.h> 18#include <linux/hw_random.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -421,7 +422,9 @@ static int hwrng_fillfn(void *unused)
421{ 422{
422 long rc; 423 long rc;
423 424
424 while (!kthread_should_stop()) { 425 set_freezable();
426
427 while (!kthread_freezable_should_stop(NULL)) {
425 struct hwrng *rng; 428 struct hwrng *rng;
426 429
427 rng = get_current_rng(); 430 rng = get_current_rng();
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 94235761955c..b4b52ab23b6b 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -153,7 +153,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
153 goto err_clock; 153 goto err_clock;
154 } 154 }
155 155
156 ret = hwrng_register(&trng->rng); 156 ret = devm_hwrng_register(&pdev->dev, &trng->rng);
157 if (ret) { 157 if (ret) {
158 dev_err(&pdev->dev, "Could not register hwrng device.\n"); 158 dev_err(&pdev->dev, "Could not register hwrng device.\n");
159 goto err_register; 159 goto err_register;
@@ -179,7 +179,6 @@ static int exynos_trng_remove(struct platform_device *pdev)
179{ 179{
180 struct exynos_trng_dev *trng = platform_get_drvdata(pdev); 180 struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
181 181
182 hwrng_unregister(&trng->rng);
183 clk_disable_unprepare(trng->clk); 182 clk_disable_unprepare(trng->clk);
184 183
185 pm_runtime_put_sync(&pdev->dev); 184 pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 69f537980004..30cf00f8e9a0 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -196,7 +196,6 @@ static int imx_rngc_init(struct hwrng *rng)
196static int imx_rngc_probe(struct platform_device *pdev) 196static int imx_rngc_probe(struct platform_device *pdev)
197{ 197{
198 struct imx_rngc *rngc; 198 struct imx_rngc *rngc;
199 struct resource *res;
200 int ret; 199 int ret;
201 int irq; 200 int irq;
202 201
@@ -204,8 +203,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
204 if (!rngc) 203 if (!rngc)
205 return -ENOMEM; 204 return -ENOMEM;
206 205
207 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 206 rngc->base = devm_platform_ioremap_resource(pdev, 0);
208 rngc->base = devm_ioremap_resource(&pdev->dev, res);
209 if (IS_ERR(rngc->base)) 207 if (IS_ERR(rngc->base))
210 return PTR_ERR(rngc->base); 208 return PTR_ERR(rngc->base);
211 209
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index ea2bf18b1fbb..025083c838f5 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -134,7 +134,6 @@ static void mxc_rnga_cleanup(struct hwrng *rng)
134static int __init mxc_rnga_probe(struct platform_device *pdev) 134static int __init mxc_rnga_probe(struct platform_device *pdev)
135{ 135{
136 int err; 136 int err;
137 struct resource *res;
138 struct mxc_rng *mxc_rng; 137 struct mxc_rng *mxc_rng;
139 138
140 mxc_rng = devm_kzalloc(&pdev->dev, sizeof(*mxc_rng), GFP_KERNEL); 139 mxc_rng = devm_kzalloc(&pdev->dev, sizeof(*mxc_rng), GFP_KERNEL);
@@ -158,8 +157,7 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
158 if (err) 157 if (err)
159 return err; 158 return err;
160 159
161 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 160 mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0);
162 mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res);
163 if (IS_ERR(mxc_rng->mem)) { 161 if (IS_ERR(mxc_rng->mem)) {
164 err = PTR_ERR(mxc_rng->mem); 162 err = PTR_ERR(mxc_rng->mem);
165 goto err_ioremap; 163 goto err_ioremap;
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index d4cab105796f..73e408146420 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -768,7 +768,7 @@ static int n2rng_probe(struct platform_device *op)
768 np->hwrng.data_read = n2rng_data_read; 768 np->hwrng.data_read = n2rng_data_read;
769 np->hwrng.priv = (unsigned long) np; 769 np->hwrng.priv = (unsigned long) np;
770 770
771 err = hwrng_register(&np->hwrng); 771 err = devm_hwrng_register(&op->dev, &np->hwrng);
772 if (err) 772 if (err)
773 goto out_hvapi_unregister; 773 goto out_hvapi_unregister;
774 774
@@ -793,8 +793,6 @@ static int n2rng_remove(struct platform_device *op)
793 793
794 cancel_delayed_work_sync(&np->work); 794 cancel_delayed_work_sync(&np->work);
795 795
796 hwrng_unregister(&np->hwrng);
797
798 sun4v_hvapi_unregister(HV_GRP_RNG); 796 sun4v_hvapi_unregister(HV_GRP_RNG);
799 797
800 return 0; 798 return 0;
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index fc0f6b0cb80d..74ed29f42e4f 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -57,7 +57,7 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
57 if (!base) 57 if (!base)
58 goto out_release; 58 goto out_release;
59 nmk_rng.priv = (unsigned long)base; 59 nmk_rng.priv = (unsigned long)base;
60 ret = hwrng_register(&nmk_rng); 60 ret = devm_hwrng_register(&dev->dev, &nmk_rng);
61 if (ret) 61 if (ret)
62 goto out_release; 62 goto out_release;
63 return 0; 63 return 0;
@@ -71,7 +71,6 @@ out_clk:
71 71
72static int nmk_rng_remove(struct amba_device *dev) 72static int nmk_rng_remove(struct amba_device *dev)
73{ 73{
74 hwrng_unregister(&nmk_rng);
75 amba_release_regions(dev); 74 amba_release_regions(dev);
76 clk_disable(rng_clk); 75 clk_disable(rng_clk);
77 return 0; 76 return 0;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index e9b6ac61fb7f..b27f39688b5e 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -500,7 +500,7 @@ static int omap_rng_probe(struct platform_device *pdev)
500 if (ret) 500 if (ret)
501 goto err_register; 501 goto err_register;
502 502
503 ret = hwrng_register(&priv->rng); 503 ret = devm_hwrng_register(&pdev->dev, &priv->rng);
504 if (ret) 504 if (ret)
505 goto err_register; 505 goto err_register;
506 506
@@ -525,7 +525,6 @@ static int omap_rng_remove(struct platform_device *pdev)
525{ 525{
526 struct omap_rng_dev *priv = platform_get_drvdata(pdev); 526 struct omap_rng_dev *priv = platform_get_drvdata(pdev);
527 527
528 hwrng_unregister(&priv->rng);
529 528
530 priv->pdata->cleanup(priv); 529 priv->pdata->cleanup(priv);
531 530
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index f2e8272e276a..8da1d7917bdc 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -33,18 +33,11 @@ static struct hwrng powernv_hwrng = {
33 .read = powernv_rng_read, 33 .read = powernv_rng_read,
34}; 34};
35 35
36static int powernv_rng_remove(struct platform_device *pdev)
37{
38 hwrng_unregister(&powernv_hwrng);
39
40 return 0;
41}
42
43static int powernv_rng_probe(struct platform_device *pdev) 36static int powernv_rng_probe(struct platform_device *pdev)
44{ 37{
45 int rc; 38 int rc;
46 39
47 rc = hwrng_register(&powernv_hwrng); 40 rc = devm_hwrng_register(&pdev->dev, &powernv_hwrng);
48 if (rc) { 41 if (rc) {
49 /* We only register one device, ignore any others */ 42 /* We only register one device, ignore any others */
50 if (rc == -EEXIST) 43 if (rc == -EEXIST)
@@ -70,7 +63,6 @@ static struct platform_driver powernv_rng_driver = {
70 .of_match_table = powernv_rng_match, 63 .of_match_table = powernv_rng_match,
71 }, 64 },
72 .probe = powernv_rng_probe, 65 .probe = powernv_rng_probe,
73 .remove = powernv_rng_remove,
74}; 66};
75module_platform_driver(powernv_rng_driver); 67module_platform_driver(powernv_rng_driver);
76 68
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index bd6a98b3479b..863448360a7d 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -102,7 +102,7 @@ static int st_rng_probe(struct platform_device *pdev)
102 102
103 dev_set_drvdata(&pdev->dev, ddata); 103 dev_set_drvdata(&pdev->dev, ddata);
104 104
105 ret = hwrng_register(&ddata->ops); 105 ret = devm_hwrng_register(&pdev->dev, &ddata->ops);
106 if (ret) { 106 if (ret) {
107 dev_err(&pdev->dev, "Failed to register HW RNG\n"); 107 dev_err(&pdev->dev, "Failed to register HW RNG\n");
108 clk_disable_unprepare(clk); 108 clk_disable_unprepare(clk);
@@ -118,8 +118,6 @@ static int st_rng_remove(struct platform_device *pdev)
118{ 118{
119 struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev); 119 struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev);
120 120
121 hwrng_unregister(&ddata->ops);
122
123 clk_disable_unprepare(ddata->clk); 121 clk_disable_unprepare(ddata->clk);
124 122
125 return 0; 123 return 0;
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index ccd1f6e0696b..e262445fed5f 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -117,9 +117,9 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
117 if (!res) 117 if (!res)
118 return -ENXIO; 118 return -ENXIO;
119 119
120 if (res->start % 4 != 0 || resource_size(res) != 4) { 120 if (res->start % 4 != 0 || resource_size(res) < 4) {
121 dev_err(&pdev->dev, 121 dev_err(&pdev->dev,
122 "address must be four bytes wide and aligned\n"); 122 "address must be at least four bytes wide and 32-bit aligned\n");
123 return -EINVAL; 123 return -EINVAL;
124 } 124 }
125 125
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 8c6f9f63da5e..7e568db87ae2 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -361,7 +361,7 @@ static int xgene_rng_probe(struct platform_device *pdev)
361 361
362 xgene_rng_func.priv = (unsigned long) ctx; 362 xgene_rng_func.priv = (unsigned long) ctx;
363 363
364 rc = hwrng_register(&xgene_rng_func); 364 rc = devm_hwrng_register(&pdev->dev, &xgene_rng_func);
365 if (rc) { 365 if (rc) {
366 dev_err(&pdev->dev, "RNG registering failed error %d\n", rc); 366 dev_err(&pdev->dev, "RNG registering failed error %d\n", rc);
367 if (!IS_ERR(ctx->clk)) 367 if (!IS_ERR(ctx->clk))
@@ -375,7 +375,6 @@ static int xgene_rng_probe(struct platform_device *pdev)
375 rc); 375 rc);
376 if (!IS_ERR(ctx->clk)) 376 if (!IS_ERR(ctx->clk))
377 clk_disable_unprepare(ctx->clk); 377 clk_disable_unprepare(ctx->clk);
378 hwrng_unregister(&xgene_rng_func);
379 return rc; 378 return rc;
380 } 379 }
381 380
@@ -392,7 +391,6 @@ static int xgene_rng_remove(struct platform_device *pdev)
392 dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc); 391 dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
393 if (!IS_ERR(ctx->clk)) 392 if (!IS_ERR(ctx->clk))
394 clk_disable_unprepare(ctx->clk); 393 clk_disable_unprepare(ctx->clk);
395 hwrng_unregister(&xgene_rng_func);
396 394
397 return rc; 395 return rc;
398} 396}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 566922df4b7b..d3beed084c0a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -327,6 +327,7 @@
327#include <linux/percpu.h> 327#include <linux/percpu.h>
328#include <linux/cryptohash.h> 328#include <linux/cryptohash.h>
329#include <linux/fips.h> 329#include <linux/fips.h>
330#include <linux/freezer.h>
330#include <linux/ptrace.h> 331#include <linux/ptrace.h>
331#include <linux/workqueue.h> 332#include <linux/workqueue.h>
332#include <linux/irq.h> 333#include <linux/irq.h>
@@ -2439,7 +2440,8 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
2439 * We'll be woken up again once below random_write_wakeup_thresh, 2440 * We'll be woken up again once below random_write_wakeup_thresh,
2440 * or when the calling thread is about to terminate. 2441 * or when the calling thread is about to terminate.
2441 */ 2442 */
2442 wait_event_interruptible(random_write_wait, kthread_should_stop() || 2443 wait_event_freezable(random_write_wait,
2444 kthread_should_stop() ||
2443 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); 2445 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2444 mix_pool_bytes(poolp, buffer, count); 2446 mix_pool_bytes(poolp, buffer, count);
2445 credit_entropy_bits(poolp, entropy); 2447 credit_entropy_bits(poolp, entropy);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index d7c85c79094b..1fb622f2a87d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -27,7 +27,7 @@ config CRYPTO_DEV_PADLOCK_AES
27 tristate "PadLock driver for AES algorithm" 27 tristate "PadLock driver for AES algorithm"
28 depends on CRYPTO_DEV_PADLOCK 28 depends on CRYPTO_DEV_PADLOCK
29 select CRYPTO_BLKCIPHER 29 select CRYPTO_BLKCIPHER
30 select CRYPTO_AES 30 select CRYPTO_LIB_AES
31 help 31 help
32 Use VIA PadLock for AES algorithm. 32 Use VIA PadLock for AES algorithm.
33 33
@@ -170,7 +170,7 @@ config CRYPTO_DES_S390
170 depends on S390 170 depends on S390
171 select CRYPTO_ALGAPI 171 select CRYPTO_ALGAPI
172 select CRYPTO_BLKCIPHER 172 select CRYPTO_BLKCIPHER
173 select CRYPTO_DES 173 select CRYPTO_LIB_DES
174 help 174 help
175 This is the s390 hardware accelerated implementation of the 175 This is the s390 hardware accelerated implementation of the
176 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 176 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
@@ -209,12 +209,12 @@ config S390_PRNG
209 It is available as of z9. 209 It is available as of z9.
210 210
211config CRYPTO_GHASH_S390 211config CRYPTO_GHASH_S390
212 tristate "GHASH digest algorithm" 212 tristate "GHASH hash function"
213 depends on S390 213 depends on S390
214 select CRYPTO_HASH 214 select CRYPTO_HASH
215 help 215 help
216 This is the s390 hardware accelerated implementation of the 216 This is the s390 hardware accelerated implementation of GHASH,
217 GHASH message digest algorithm for GCM (Galois/Counter Mode). 217 the hash function used in GCM (Galois/Counter mode).
218 218
219 It is available as of z196. 219 It is available as of z196.
220 220
@@ -234,8 +234,8 @@ config CRYPTO_CRC32_S390
234config CRYPTO_DEV_MARVELL_CESA 234config CRYPTO_DEV_MARVELL_CESA
235 tristate "Marvell's Cryptographic Engine driver" 235 tristate "Marvell's Cryptographic Engine driver"
236 depends on PLAT_ORION || ARCH_MVEBU 236 depends on PLAT_ORION || ARCH_MVEBU
237 select CRYPTO_AES 237 select CRYPTO_LIB_AES
238 select CRYPTO_DES 238 select CRYPTO_LIB_DES
239 select CRYPTO_BLKCIPHER 239 select CRYPTO_BLKCIPHER
240 select CRYPTO_HASH 240 select CRYPTO_HASH
241 select SRAM 241 select SRAM
@@ -247,7 +247,7 @@ config CRYPTO_DEV_MARVELL_CESA
247 247
248config CRYPTO_DEV_NIAGARA2 248config CRYPTO_DEV_NIAGARA2
249 tristate "Niagara2 Stream Processing Unit driver" 249 tristate "Niagara2 Stream Processing Unit driver"
250 select CRYPTO_DES 250 select CRYPTO_LIB_DES
251 select CRYPTO_BLKCIPHER 251 select CRYPTO_BLKCIPHER
252 select CRYPTO_HASH 252 select CRYPTO_HASH
253 select CRYPTO_MD5 253 select CRYPTO_MD5
@@ -264,7 +264,7 @@ config CRYPTO_DEV_NIAGARA2
264 264
265config CRYPTO_DEV_HIFN_795X 265config CRYPTO_DEV_HIFN_795X
266 tristate "Driver HIFN 795x crypto accelerator chips" 266 tristate "Driver HIFN 795x crypto accelerator chips"
267 select CRYPTO_DES 267 select CRYPTO_LIB_DES
268 select CRYPTO_BLKCIPHER 268 select CRYPTO_BLKCIPHER
269 select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG 269 select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
270 depends on PCI 270 depends on PCI
@@ -320,7 +320,7 @@ config CRYPTO_DEV_TALITOS2
320config CRYPTO_DEV_IXP4XX 320config CRYPTO_DEV_IXP4XX
321 tristate "Driver for IXP4xx crypto hardware acceleration" 321 tristate "Driver for IXP4xx crypto hardware acceleration"
322 depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE 322 depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
323 select CRYPTO_DES 323 select CRYPTO_LIB_DES
324 select CRYPTO_AEAD 324 select CRYPTO_AEAD
325 select CRYPTO_AUTHENC 325 select CRYPTO_AUTHENC
326 select CRYPTO_BLKCIPHER 326 select CRYPTO_BLKCIPHER
@@ -332,7 +332,7 @@ config CRYPTO_DEV_PPC4XX
332 depends on PPC && 4xx 332 depends on PPC && 4xx
333 select CRYPTO_HASH 333 select CRYPTO_HASH
334 select CRYPTO_AEAD 334 select CRYPTO_AEAD
335 select CRYPTO_AES 335 select CRYPTO_LIB_AES
336 select CRYPTO_CCM 336 select CRYPTO_CCM
337 select CRYPTO_CTR 337 select CRYPTO_CTR
338 select CRYPTO_GCM 338 select CRYPTO_GCM
@@ -386,7 +386,7 @@ config CRYPTO_DEV_OMAP_AES
386config CRYPTO_DEV_OMAP_DES 386config CRYPTO_DEV_OMAP_DES
387 tristate "Support for OMAP DES/3DES hw engine" 387 tristate "Support for OMAP DES/3DES hw engine"
388 depends on ARCH_OMAP2PLUS 388 depends on ARCH_OMAP2PLUS
389 select CRYPTO_DES 389 select CRYPTO_LIB_DES
390 select CRYPTO_BLKCIPHER 390 select CRYPTO_BLKCIPHER
391 select CRYPTO_ENGINE 391 select CRYPTO_ENGINE
392 help 392 help
@@ -404,7 +404,7 @@ config CRYPTO_DEV_PICOXCELL
404 select CRYPTO_AES 404 select CRYPTO_AES
405 select CRYPTO_AUTHENC 405 select CRYPTO_AUTHENC
406 select CRYPTO_BLKCIPHER 406 select CRYPTO_BLKCIPHER
407 select CRYPTO_DES 407 select CRYPTO_LIB_DES
408 select CRYPTO_CBC 408 select CRYPTO_CBC
409 select CRYPTO_ECB 409 select CRYPTO_ECB
410 select CRYPTO_SEQIV 410 select CRYPTO_SEQIV
@@ -413,7 +413,7 @@ config CRYPTO_DEV_PICOXCELL
413 Picochip picoXcell SoC devices. Select this for IPSEC ESP offload 413 Picochip picoXcell SoC devices. Select this for IPSEC ESP offload
414 and for 3gpp Layer 2 ciphering support. 414 and for 3gpp Layer 2 ciphering support.
415 415
416 Saying m here will build a module named pipcoxcell_crypto. 416 Saying m here will build a module named picoxcell_crypto.
417 417
418config CRYPTO_DEV_SAHARA 418config CRYPTO_DEV_SAHARA
419 tristate "Support for SAHARA crypto accelerator" 419 tristate "Support for SAHARA crypto accelerator"
@@ -517,7 +517,7 @@ config CRYPTO_DEV_ATMEL_AES
517config CRYPTO_DEV_ATMEL_TDES 517config CRYPTO_DEV_ATMEL_TDES
518 tristate "Support for Atmel DES/TDES hw accelerator" 518 tristate "Support for Atmel DES/TDES hw accelerator"
519 depends on ARCH_AT91 || COMPILE_TEST 519 depends on ARCH_AT91 || COMPILE_TEST
520 select CRYPTO_DES 520 select CRYPTO_LIB_DES
521 select CRYPTO_BLKCIPHER 521 select CRYPTO_BLKCIPHER
522 help 522 help
523 Some Atmel processors have DES/TDES hw accelerator. 523 Some Atmel processors have DES/TDES hw accelerator.
@@ -615,7 +615,7 @@ config CRYPTO_DEV_QCE
615 depends on ARCH_QCOM || COMPILE_TEST 615 depends on ARCH_QCOM || COMPILE_TEST
616 depends on HAS_IOMEM 616 depends on HAS_IOMEM
617 select CRYPTO_AES 617 select CRYPTO_AES
618 select CRYPTO_DES 618 select CRYPTO_LIB_DES
619 select CRYPTO_ECB 619 select CRYPTO_ECB
620 select CRYPTO_CBC 620 select CRYPTO_CBC
621 select CRYPTO_XTS 621 select CRYPTO_XTS
@@ -663,7 +663,7 @@ config CRYPTO_DEV_SUN4I_SS
663 select CRYPTO_MD5 663 select CRYPTO_MD5
664 select CRYPTO_SHA1 664 select CRYPTO_SHA1
665 select CRYPTO_AES 665 select CRYPTO_AES
666 select CRYPTO_DES 666 select CRYPTO_LIB_DES
667 select CRYPTO_BLKCIPHER 667 select CRYPTO_BLKCIPHER
668 help 668 help
669 Some Allwinner SoC have a crypto accelerator named 669 Some Allwinner SoC have a crypto accelerator named
@@ -686,7 +686,7 @@ config CRYPTO_DEV_ROCKCHIP
686 tristate "Rockchip's Cryptographic Engine driver" 686 tristate "Rockchip's Cryptographic Engine driver"
687 depends on OF && ARCH_ROCKCHIP 687 depends on OF && ARCH_ROCKCHIP
688 select CRYPTO_AES 688 select CRYPTO_AES
689 select CRYPTO_DES 689 select CRYPTO_LIB_DES
690 select CRYPTO_MD5 690 select CRYPTO_MD5
691 select CRYPTO_SHA1 691 select CRYPTO_SHA1
692 select CRYPTO_SHA256 692 select CRYPTO_SHA256
@@ -723,7 +723,7 @@ config CRYPTO_DEV_BCM_SPU
723 depends on MAILBOX 723 depends on MAILBOX
724 default m 724 default m
725 select CRYPTO_AUTHENC 725 select CRYPTO_AUTHENC
726 select CRYPTO_DES 726 select CRYPTO_LIB_DES
727 select CRYPTO_MD5 727 select CRYPTO_MD5
728 select CRYPTO_SHA1 728 select CRYPTO_SHA1
729 select CRYPTO_SHA256 729 select CRYPTO_SHA256
@@ -737,12 +737,11 @@ source "drivers/crypto/stm32/Kconfig"
737 737
738config CRYPTO_DEV_SAFEXCEL 738config CRYPTO_DEV_SAFEXCEL
739 tristate "Inside Secure's SafeXcel cryptographic engine driver" 739 tristate "Inside Secure's SafeXcel cryptographic engine driver"
740 depends on OF 740 depends on OF || PCI || COMPILE_TEST
741 depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT) 741 select CRYPTO_LIB_AES
742 select CRYPTO_AES
743 select CRYPTO_AUTHENC 742 select CRYPTO_AUTHENC
744 select CRYPTO_BLKCIPHER 743 select CRYPTO_BLKCIPHER
745 select CRYPTO_DES 744 select CRYPTO_LIB_DES
746 select CRYPTO_HASH 745 select CRYPTO_HASH
747 select CRYPTO_HMAC 746 select CRYPTO_HMAC
748 select CRYPTO_MD5 747 select CRYPTO_MD5
@@ -750,10 +749,11 @@ config CRYPTO_DEV_SAFEXCEL
750 select CRYPTO_SHA256 749 select CRYPTO_SHA256
751 select CRYPTO_SHA512 750 select CRYPTO_SHA512
752 help 751 help
753 This driver interfaces with the SafeXcel EIP-197 cryptographic engine 752 This driver interfaces with the SafeXcel EIP-97 and EIP-197 cryptographic
754 designed by Inside Secure. Select this if you want to use CBC/ECB 753 engines designed by Inside Secure. It currently accelerates DES, 3DES and
755 chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash 754 AES block ciphers in ECB and CBC mode, as well as SHA1, SHA224, SHA256,
756 algorithms. 755 SHA384 and SHA512 hash algorithms for both basic hash and HMAC.
756 Additionally, it accelerates combined AES-CBC/HMAC-SHA AEAD operations.
757 757
758config CRYPTO_DEV_ARTPEC6 758config CRYPTO_DEV_ARTPEC6
759 tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration." 759 tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration."
@@ -780,7 +780,7 @@ config CRYPTO_DEV_CCREE
780 default n 780 default n
781 select CRYPTO_HASH 781 select CRYPTO_HASH
782 select CRYPTO_BLKCIPHER 782 select CRYPTO_BLKCIPHER
783 select CRYPTO_DES 783 select CRYPTO_LIB_DES
784 select CRYPTO_AEAD 784 select CRYPTO_AEAD
785 select CRYPTO_AUTHENC 785 select CRYPTO_AUTHENC
786 select CRYPTO_SHA1 786 select CRYPTO_SHA1
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index cbfc607282f4..a42f8619589d 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -527,28 +527,20 @@ static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
527static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key, 527static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
528 unsigned int keylen) 528 unsigned int keylen)
529{ 529{
530 struct crypto_cipher *aes_tfm = NULL; 530 struct crypto_aes_ctx ctx;
531 uint8_t src[16] = { 0 }; 531 uint8_t src[16] = { 0 };
532 int rc = 0; 532 int rc;
533
534 aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK);
535 if (IS_ERR(aes_tfm)) {
536 rc = PTR_ERR(aes_tfm);
537 pr_warn("could not load aes cipher driver: %d\n", rc);
538 return rc;
539 }
540 533
541 rc = crypto_cipher_setkey(aes_tfm, key, keylen); 534 rc = aes_expandkey(&ctx, key, keylen);
542 if (rc) { 535 if (rc) {
543 pr_err("setkey() failed: %d\n", rc); 536 pr_err("aes_expandkey() failed: %d\n", rc);
544 goto out; 537 return rc;
545 } 538 }
546 539
547 crypto_cipher_encrypt_one(aes_tfm, src, src); 540 aes_encrypt(&ctx, src, src);
548 crypto4xx_memcpy_to_le32(hash_start, src, 16); 541 crypto4xx_memcpy_to_le32(hash_start, src, 16);
549out: 542 memzero_explicit(&ctx, sizeof(ctx));
550 crypto_free_cipher(aes_tfm); 543 return 0;
551 return rc;
552} 544}
553 545
554int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, 546int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 2b7af44c7b85..026f193556f9 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2673,7 +2673,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
2673 /* Get the IRQ */ 2673 /* Get the IRQ */
2674 aes_dd->irq = platform_get_irq(pdev, 0); 2674 aes_dd->irq = platform_get_irq(pdev, 0);
2675 if (aes_dd->irq < 0) { 2675 if (aes_dd->irq < 0) {
2676 dev_err(dev, "no IRQ resource info\n");
2677 err = aes_dd->irq; 2676 err = aes_dd->irq;
2678 goto res_err; 2677 goto res_err;
2679 } 2678 }
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index dc876fab2882..1d3355913b40 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -21,6 +21,18 @@
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22#include "atmel-i2c.h" 22#include "atmel-i2c.h"
23 23
24static const struct {
25 u8 value;
26 const char *error_text;
27} error_list[] = {
28 { 0x01, "CheckMac or Verify miscompare" },
29 { 0x03, "Parse Error" },
30 { 0x05, "ECC Fault" },
31 { 0x0F, "Execution Error" },
32 { 0xEE, "Watchdog about to expire" },
33 { 0xFF, "CRC or other communication error" },
34};
35
24/** 36/**
25 * atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC. 37 * atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
26 * CRC16 verification of the count, opcode, param1, param2 and data bytes. 38 * CRC16 verification of the count, opcode, param1, param2 and data bytes.
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h
index 21860b99c3e3..63b97b104f16 100644
--- a/drivers/crypto/atmel-i2c.h
+++ b/drivers/crypto/atmel-i2c.h
@@ -62,18 +62,6 @@ struct atmel_i2c_cmd {
62#define STATUS_NOERR 0x00 62#define STATUS_NOERR 0x00
63#define STATUS_WAKE_SUCCESSFUL 0x11 63#define STATUS_WAKE_SUCCESSFUL 0x11
64 64
65static const struct {
66 u8 value;
67 const char *error_text;
68} error_list[] = {
69 { 0x01, "CheckMac or Verify miscompare" },
70 { 0x03, "Parse Error" },
71 { 0x05, "ECC Fault" },
72 { 0x0F, "Execution Error" },
73 { 0xEE, "Watchdog about to expire" },
74 { 0xFF, "CRC or other communication error" },
75};
76
77/* Definitions for eeprom organization */ 65/* Definitions for eeprom organization */
78#define CONFIG_ZONE 0 66#define CONFIG_ZONE 0
79 67
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index ab0cfe748931..84cb8748a795 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2779,7 +2779,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
2779 /* Get the IRQ */ 2779 /* Get the IRQ */
2780 sha_dd->irq = platform_get_irq(pdev, 0); 2780 sha_dd->irq = platform_get_irq(pdev, 0);
2781 if (sha_dd->irq < 0) { 2781 if (sha_dd->irq < 0) {
2782 dev_err(dev, "no IRQ resource info\n");
2783 err = sha_dd->irq; 2782 err = sha_dd->irq;
2784 goto res_err; 2783 goto res_err;
2785 } 2784 }
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index ea0d2068ea4f..c96c14e7dab1 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -109,7 +109,7 @@ static int atmel_sha204a_probe(struct i2c_client *client,
109 i2c_priv->hwrng.read = atmel_sha204a_rng_read; 109 i2c_priv->hwrng.read = atmel_sha204a_rng_read;
110 i2c_priv->hwrng.quality = 1024; 110 i2c_priv->hwrng.quality = 1024;
111 111
112 ret = hwrng_register(&i2c_priv->hwrng); 112 ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
113 if (ret) 113 if (ret)
114 dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); 114 dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
115 115
@@ -127,7 +127,6 @@ static int atmel_sha204a_remove(struct i2c_client *client)
127 127
128 if (i2c_priv->hwrng.priv) 128 if (i2c_priv->hwrng.priv)
129 kfree((void *)i2c_priv->hwrng.priv); 129 kfree((void *)i2c_priv->hwrng.priv);
130 hwrng_unregister(&i2c_priv->hwrng);
131 130
132 return 0; 131 return 0;
133} 132}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index fa76620281e8..1a6c86ae6148 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -33,7 +33,7 @@
33#include <linux/cryptohash.h> 33#include <linux/cryptohash.h>
34#include <crypto/scatterwalk.h> 34#include <crypto/scatterwalk.h>
35#include <crypto/algapi.h> 35#include <crypto/algapi.h>
36#include <crypto/des.h> 36#include <crypto/internal/des.h>
37#include <crypto/hash.h> 37#include <crypto/hash.h>
38#include <crypto/internal/hash.h> 38#include <crypto/internal/hash.h>
39#include <linux/platform_data/crypto-atmel.h> 39#include <linux/platform_data/crypto-atmel.h>
@@ -773,22 +773,12 @@ static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
773static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 773static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
774 unsigned int keylen) 774 unsigned int keylen)
775{ 775{
776 u32 tmp[DES_EXPKEY_WORDS];
777 int err;
778 struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
779
780 struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 776 struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
777 int err;
781 778
782 if (keylen != DES_KEY_SIZE) { 779 err = verify_ablkcipher_des_key(tfm, key);
783 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 780 if (err)
784 return -EINVAL; 781 return err;
785 }
786
787 err = des_ekey(tmp, key);
788 if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
789 ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
790 return -EINVAL;
791 }
792 782
793 memcpy(ctx->key, key, keylen); 783 memcpy(ctx->key, key, keylen);
794 ctx->keylen = keylen; 784 ctx->keylen = keylen;
@@ -800,15 +790,11 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
800 unsigned int keylen) 790 unsigned int keylen)
801{ 791{
802 struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 792 struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
803 u32 flags;
804 int err; 793 int err;
805 794
806 flags = crypto_ablkcipher_get_flags(tfm); 795 err = verify_ablkcipher_des3_key(tfm, key);
807 err = __des3_verify_key(&flags, key); 796 if (err)
808 if (unlikely(err)) {
809 crypto_ablkcipher_set_flags(tfm, flags);
810 return err; 797 return err;
811 }
812 798
813 memcpy(ctx->key, key, keylen); 799 memcpy(ctx->key, key, keylen);
814 ctx->keylen = keylen; 800 ctx->keylen = keylen;
@@ -1281,7 +1267,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
1281 /* Get the IRQ */ 1267 /* Get the IRQ */
1282 tdes_dd->irq = platform_get_irq(pdev, 0); 1268 tdes_dd->irq = platform_get_irq(pdev, 0);
1283 if (tdes_dd->irq < 0) { 1269 if (tdes_dd->irq < 0) {
1284 dev_err(dev, "no IRQ resource info\n");
1285 err = tdes_dd->irq; 1270 err = tdes_dd->irq;
1286 goto res_err; 1271 goto res_err;
1287 } 1272 }
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 80fa04ef215f..4b20606983a4 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2854,7 +2854,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
2854 struct artpec6_crypto *ac; 2854 struct artpec6_crypto *ac;
2855 struct device *dev = &pdev->dev; 2855 struct device *dev = &pdev->dev;
2856 void __iomem *base; 2856 void __iomem *base;
2857 struct resource *res;
2858 int irq; 2857 int irq;
2859 int err; 2858 int err;
2860 2859
@@ -2867,8 +2866,7 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
2867 2866
2868 variant = (enum artpec6_crypto_variant)match->data; 2867 variant = (enum artpec6_crypto_variant)match->data;
2869 2868
2870 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2869 base = devm_platform_ioremap_resource(pdev, 0);
2871 base = devm_ioremap_resource(&pdev->dev, res);
2872 if (IS_ERR(base)) 2870 if (IS_ERR(base))
2873 return PTR_ERR(base); 2871 return PTR_ERR(base);
2874 2872
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 869602fcfd96..f85356a48e7e 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -24,7 +24,7 @@
24#include <crypto/aead.h> 24#include <crypto/aead.h>
25#include <crypto/internal/aead.h> 25#include <crypto/internal/aead.h>
26#include <crypto/aes.h> 26#include <crypto/aes.h>
27#include <crypto/des.h> 27#include <crypto/internal/des.h>
28#include <crypto/hmac.h> 28#include <crypto/hmac.h>
29#include <crypto/sha.h> 29#include <crypto/sha.h>
30#include <crypto/md5.h> 30#include <crypto/md5.h>
@@ -1802,24 +1802,13 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1802 unsigned int keylen) 1802 unsigned int keylen)
1803{ 1803{
1804 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1804 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1805 u32 tmp[DES_EXPKEY_WORDS]; 1805 int err;
1806
1807 if (keylen == DES_KEY_SIZE) {
1808 if (des_ekey(tmp, key) == 0) {
1809 if (crypto_ablkcipher_get_flags(cipher) &
1810 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
1811 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1812 1806
1813 crypto_ablkcipher_set_flags(cipher, flags); 1807 err = verify_ablkcipher_des_key(cipher, key);
1814 return -EINVAL; 1808 if (err)
1815 } 1809 return err;
1816 }
1817 1810
1818 ctx->cipher_type = CIPHER_TYPE_DES; 1811 ctx->cipher_type = CIPHER_TYPE_DES;
1819 } else {
1820 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1821 return -EINVAL;
1822 }
1823 return 0; 1812 return 0;
1824} 1813}
1825 1814
@@ -1827,23 +1816,13 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1827 unsigned int keylen) 1816 unsigned int keylen)
1828{ 1817{
1829 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1818 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1819 int err;
1830 1820
1831 if (keylen == (DES_KEY_SIZE * 3)) { 1821 err = verify_ablkcipher_des3_key(cipher, key);
1832 u32 flags; 1822 if (err)
1833 int ret; 1823 return err;
1834
1835 flags = crypto_ablkcipher_get_flags(cipher);
1836 ret = __des3_verify_key(&flags, key);
1837 if (unlikely(ret)) {
1838 crypto_ablkcipher_set_flags(cipher, flags);
1839 return ret;
1840 }
1841 1824
1842 ctx->cipher_type = CIPHER_TYPE_3DES; 1825 ctx->cipher_type = CIPHER_TYPE_3DES;
1843 } else {
1844 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1845 return -EINVAL;
1846 }
1847 return 0; 1826 return 0;
1848} 1827}
1849 1828
@@ -2629,6 +2608,19 @@ static int aead_need_fallback(struct aead_request *req)
2629 return 1; 2608 return 1;
2630 } 2609 }
2631 2610
2611 /*
2612 * RFC4106 and RFC4543 cannot handle the case where AAD is other than
2613 * 16 or 20 bytes long. So use fallback in this case.
2614 */
2615 if (ctx->cipher.mode == CIPHER_MODE_GCM &&
2616 ctx->cipher.alg == CIPHER_ALG_AES &&
2617 rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2618 req->assoclen != 16 && req->assoclen != 20) {
2619 flow_log("RFC4106/RFC4543 needs fallback for assoclen"
2620 " other than 16 or 20 bytes\n");
2621 return 1;
2622 }
2623
2632 payload_len = req->cryptlen; 2624 payload_len = req->cryptlen;
2633 if (spu->spu_type == SPU_TYPE_SPUM) 2625 if (spu->spu_type == SPU_TYPE_SPUM)
2634 payload_len += req->assoclen; 2626 payload_len += req->assoclen;
@@ -2855,40 +2847,16 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2855 2847
2856 switch (ctx->alg->cipher_info.alg) { 2848 switch (ctx->alg->cipher_info.alg) {
2857 case CIPHER_ALG_DES: 2849 case CIPHER_ALG_DES:
2858 if (ctx->enckeylen == DES_KEY_SIZE) { 2850 if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen))
2859 u32 tmp[DES_EXPKEY_WORDS]; 2851 return -EINVAL;
2860 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2861
2862 if (des_ekey(tmp, keys.enckey) == 0) {
2863 if (crypto_aead_get_flags(cipher) &
2864 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
2865 crypto_aead_set_flags(cipher, flags);
2866 return -EINVAL;
2867 }
2868 }
2869 2852
2870 ctx->cipher_type = CIPHER_TYPE_DES; 2853 ctx->cipher_type = CIPHER_TYPE_DES;
2871 } else {
2872 goto badkey;
2873 }
2874 break; 2854 break;
2875 case CIPHER_ALG_3DES: 2855 case CIPHER_ALG_3DES:
2876 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { 2856 if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen))
2877 u32 flags;
2878
2879 flags = crypto_aead_get_flags(cipher);
2880 ret = __des3_verify_key(&flags, keys.enckey);
2881 if (unlikely(ret)) {
2882 crypto_aead_set_flags(cipher, flags);
2883 return ret;
2884 }
2885
2886 ctx->cipher_type = CIPHER_TYPE_3DES;
2887 } else {
2888 crypto_aead_set_flags(cipher,
2889 CRYPTO_TFM_RES_BAD_KEY_LEN);
2890 return -EINVAL; 2857 return -EINVAL;
2891 } 2858
2859 ctx->cipher_type = CIPHER_TYPE_3DES;
2892 break; 2860 break;
2893 case CIPHER_ALG_AES: 2861 case CIPHER_ALG_AES:
2894 switch (ctx->enckeylen) { 2862 switch (ctx->enckeylen) {
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 3720ddabb507..137ed3df0c74 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -98,7 +98,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
98 select CRYPTO_AEAD 98 select CRYPTO_AEAD
99 select CRYPTO_AUTHENC 99 select CRYPTO_AUTHENC
100 select CRYPTO_BLKCIPHER 100 select CRYPTO_BLKCIPHER
101 select CRYPTO_DES 101 select CRYPTO_LIB_DES
102 help 102 help
103 Selecting this will offload crypto for users of the 103 Selecting this will offload crypto for users of the
104 scatterlist crypto API (such as the linux native IPSec 104 scatterlist crypto API (such as the linux native IPSec
@@ -111,6 +111,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
111 select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC 111 select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
112 select CRYPTO_AUTHENC 112 select CRYPTO_AUTHENC
113 select CRYPTO_BLKCIPHER 113 select CRYPTO_BLKCIPHER
114 select CRYPTO_DES
114 help 115 help
115 Selecting this will use CAAM Queue Interface (QI) for sending 116 Selecting this will use CAAM Queue Interface (QI) for sending
116 & receiving crypto jobs to/from CAAM. This gives better performance 117 & receiving crypto jobs to/from CAAM. This gives better performance
@@ -161,6 +162,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
161 select CRYPTO_AUTHENC 162 select CRYPTO_AUTHENC
162 select CRYPTO_AEAD 163 select CRYPTO_AEAD
163 select CRYPTO_HASH 164 select CRYPTO_HASH
165 select CRYPTO_DES
164 help 166 help
165 CAAM driver for QorIQ Data Path Acceleration Architecture 2. 167 CAAM driver for QorIQ Data Path Acceleration Architecture 2.
166 It handles DPSECI DPAA2 objects that sit on the Management Complex 168 It handles DPSECI DPAA2 objects that sit on the Management Complex
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 9ab4e81ea21e..68d5cc0f28e2 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -30,3 +30,4 @@ endif
30obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o 30obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
31 31
32dpaa2_caam-y := caamalg_qi2.o dpseci.o 32dpaa2_caam-y := caamalg_qi2.o dpseci.o
33dpaa2_caam-$(CONFIG_DEBUG_FS) += dpseci-debugfs.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 43f18253e5b6..2912006b946b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -74,7 +74,7 @@
74 74
75#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6) 75#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
76 76
77#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) 77#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN)
78#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 78#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
79 79
80struct caam_alg_entry { 80struct caam_alg_entry {
@@ -205,6 +205,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
205 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 205 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
206 } 206 }
207 207
208 /*
209 * In case |user key| > |derived key|, using DKP<imm,imm>
210 * would result in invalid opcodes (last bytes of user key) in
211 * the resulting descriptor. Use DKP<ptr,imm> instead => both
212 * virtual and dma key addresses are needed.
213 */
214 ctx->adata.key_virt = ctx->key;
215 ctx->adata.key_dma = ctx->key_dma;
216
217 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
218 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
219
208 data_len[0] = ctx->adata.keylen_pad; 220 data_len[0] = ctx->adata.keylen_pad;
209 data_len[1] = ctx->cdata.keylen; 221 data_len[1] = ctx->cdata.keylen;
210 222
@@ -221,16 +233,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
221 ARRAY_SIZE(data_len)) < 0) 233 ARRAY_SIZE(data_len)) < 0)
222 return -EINVAL; 234 return -EINVAL;
223 235
224 if (inl_mask & 1)
225 ctx->adata.key_virt = ctx->key;
226 else
227 ctx->adata.key_dma = ctx->key_dma;
228
229 if (inl_mask & 2)
230 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
231 else
232 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
233
234 ctx->adata.key_inline = !!(inl_mask & 1); 236 ctx->adata.key_inline = !!(inl_mask & 1);
235 ctx->cdata.key_inline = !!(inl_mask & 2); 237 ctx->cdata.key_inline = !!(inl_mask & 2);
236 238
@@ -253,16 +255,6 @@ skip_enc:
253 ARRAY_SIZE(data_len)) < 0) 255 ARRAY_SIZE(data_len)) < 0)
254 return -EINVAL; 256 return -EINVAL;
255 257
256 if (inl_mask & 1)
257 ctx->adata.key_virt = ctx->key;
258 else
259 ctx->adata.key_dma = ctx->key_dma;
260
261 if (inl_mask & 2)
262 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
263 else
264 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
265
266 ctx->adata.key_inline = !!(inl_mask & 1); 258 ctx->adata.key_inline = !!(inl_mask & 1);
267 ctx->cdata.key_inline = !!(inl_mask & 2); 259 ctx->cdata.key_inline = !!(inl_mask & 2);
268 260
@@ -287,16 +279,6 @@ skip_enc:
287 ARRAY_SIZE(data_len)) < 0) 279 ARRAY_SIZE(data_len)) < 0)
288 return -EINVAL; 280 return -EINVAL;
289 281
290 if (inl_mask & 1)
291 ctx->adata.key_virt = ctx->key;
292 else
293 ctx->adata.key_dma = ctx->key_dma;
294
295 if (inl_mask & 2)
296 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
297 else
298 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
299
300 ctx->adata.key_inline = !!(inl_mask & 1); 282 ctx->adata.key_inline = !!(inl_mask & 1);
301 ctx->cdata.key_inline = !!(inl_mask & 2); 283 ctx->cdata.key_inline = !!(inl_mask & 2);
302 284
@@ -376,6 +358,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
376static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 358static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
377{ 359{
378 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 360 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
361 int err;
362
363 err = crypto_gcm_check_authsize(authsize);
364 if (err)
365 return err;
379 366
380 ctx->authsize = authsize; 367 ctx->authsize = authsize;
381 gcm_set_sh_desc(authenc); 368 gcm_set_sh_desc(authenc);
@@ -439,6 +426,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
439 unsigned int authsize) 426 unsigned int authsize)
440{ 427{
441 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 428 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
429 int err;
430
431 err = crypto_rfc4106_check_authsize(authsize);
432 if (err)
433 return err;
442 434
443 ctx->authsize = authsize; 435 ctx->authsize = authsize;
444 rfc4106_set_sh_desc(authenc); 436 rfc4106_set_sh_desc(authenc);
@@ -503,6 +495,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
503{ 495{
504 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 496 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
505 497
498 if (authsize != 16)
499 return -EINVAL;
500
506 ctx->authsize = authsize; 501 ctx->authsize = authsize;
507 rfc4543_set_sh_desc(authenc); 502 rfc4543_set_sh_desc(authenc);
508 503
@@ -633,33 +628,17 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
633 unsigned int keylen) 628 unsigned int keylen)
634{ 629{
635 struct crypto_authenc_keys keys; 630 struct crypto_authenc_keys keys;
636 u32 flags;
637 int err; 631 int err;
638 632
639 err = crypto_authenc_extractkeys(&keys, key, keylen); 633 err = crypto_authenc_extractkeys(&keys, key, keylen);
640 if (unlikely(err)) 634 if (unlikely(err))
641 goto badkey; 635 return err;
642
643 err = -EINVAL;
644 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
645 goto badkey;
646
647 flags = crypto_aead_get_flags(aead);
648 err = __des3_verify_key(&flags, keys.enckey);
649 if (unlikely(err)) {
650 crypto_aead_set_flags(aead, flags);
651 goto out;
652 }
653 636
654 err = aead_setkey(aead, key, keylen); 637 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
638 aead_setkey(aead, key, keylen);
655 639
656out:
657 memzero_explicit(&keys, sizeof(keys)); 640 memzero_explicit(&keys, sizeof(keys));
658 return err; 641 return err;
659
660badkey:
661 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
662 goto out;
663} 642}
664 643
665static int gcm_setkey(struct crypto_aead *aead, 644static int gcm_setkey(struct crypto_aead *aead,
@@ -667,6 +646,13 @@ static int gcm_setkey(struct crypto_aead *aead,
667{ 646{
668 struct caam_ctx *ctx = crypto_aead_ctx(aead); 647 struct caam_ctx *ctx = crypto_aead_ctx(aead);
669 struct device *jrdev = ctx->jrdev; 648 struct device *jrdev = ctx->jrdev;
649 int err;
650
651 err = aes_check_keylen(keylen);
652 if (err) {
653 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
654 return err;
655 }
670 656
671 print_hex_dump_debug("key in @"__stringify(__LINE__)": ", 657 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
672 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 658 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -683,9 +669,13 @@ static int rfc4106_setkey(struct crypto_aead *aead,
683{ 669{
684 struct caam_ctx *ctx = crypto_aead_ctx(aead); 670 struct caam_ctx *ctx = crypto_aead_ctx(aead);
685 struct device *jrdev = ctx->jrdev; 671 struct device *jrdev = ctx->jrdev;
672 int err;
686 673
687 if (keylen < 4) 674 err = aes_check_keylen(keylen - 4);
688 return -EINVAL; 675 if (err) {
676 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
677 return err;
678 }
689 679
690 print_hex_dump_debug("key in @"__stringify(__LINE__)": ", 680 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
691 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 681 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -707,9 +697,13 @@ static int rfc4543_setkey(struct crypto_aead *aead,
707{ 697{
708 struct caam_ctx *ctx = crypto_aead_ctx(aead); 698 struct caam_ctx *ctx = crypto_aead_ctx(aead);
709 struct device *jrdev = ctx->jrdev; 699 struct device *jrdev = ctx->jrdev;
700 int err;
710 701
711 if (keylen < 4) 702 err = aes_check_keylen(keylen - 4);
712 return -EINVAL; 703 if (err) {
704 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
705 return err;
706 }
713 707
714 print_hex_dump_debug("key in @"__stringify(__LINE__)": ", 708 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
715 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 709 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -727,7 +721,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
727} 721}
728 722
729static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 723static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
730 unsigned int keylen) 724 unsigned int keylen, const u32 ctx1_iv_off)
731{ 725{
732 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 726 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
733 struct caam_skcipher_alg *alg = 727 struct caam_skcipher_alg *alg =
@@ -736,30 +730,10 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
736 struct device *jrdev = ctx->jrdev; 730 struct device *jrdev = ctx->jrdev;
737 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 731 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
738 u32 *desc; 732 u32 *desc;
739 u32 ctx1_iv_off = 0;
740 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
741 OP_ALG_AAI_CTR_MOD128);
742 const bool is_rfc3686 = alg->caam.rfc3686; 733 const bool is_rfc3686 = alg->caam.rfc3686;
743 734
744 print_hex_dump_debug("key in @"__stringify(__LINE__)": ", 735 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
745 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 736 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
746 /*
747 * AES-CTR needs to load IV in CONTEXT1 reg
748 * at an offset of 128bits (16bytes)
749 * CONTEXT1[255:128] = IV
750 */
751 if (ctr_mode)
752 ctx1_iv_off = 16;
753
754 /*
755 * RFC3686 specific:
756 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
757 * | *key = {KEY, NONCE}
758 */
759 if (is_rfc3686) {
760 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
761 keylen -= CTR_RFC3686_NONCE_SIZE;
762 }
763 737
764 ctx->cdata.keylen = keylen; 738 ctx->cdata.keylen = keylen;
765 ctx->cdata.key_virt = key; 739 ctx->cdata.key_virt = key;
@@ -782,25 +756,86 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
782 return 0; 756 return 0;
783} 757}
784 758
785static int des_skcipher_setkey(struct crypto_skcipher *skcipher, 759static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
786 const u8 *key, unsigned int keylen) 760 const u8 *key, unsigned int keylen)
787{ 761{
788 u32 tmp[DES3_EDE_EXPKEY_WORDS]; 762 int err;
789 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
790 763
791 if (keylen == DES3_EDE_KEY_SIZE && 764 err = aes_check_keylen(keylen);
792 __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) { 765 if (err) {
793 return -EINVAL; 766 crypto_skcipher_set_flags(skcipher,
767 CRYPTO_TFM_RES_BAD_KEY_LEN);
768 return err;
794 } 769 }
795 770
796 if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) & 771 return skcipher_setkey(skcipher, key, keylen, 0);
797 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 772}
773
774static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
775 const u8 *key, unsigned int keylen)
776{
777 u32 ctx1_iv_off;
778 int err;
779
780 /*
781 * RFC3686 specific:
782 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
783 * | *key = {KEY, NONCE}
784 */
785 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
786 keylen -= CTR_RFC3686_NONCE_SIZE;
787
788 err = aes_check_keylen(keylen);
789 if (err) {
798 crypto_skcipher_set_flags(skcipher, 790 crypto_skcipher_set_flags(skcipher,
799 CRYPTO_TFM_RES_WEAK_KEY); 791 CRYPTO_TFM_RES_BAD_KEY_LEN);
800 return -EINVAL; 792 return err;
801 } 793 }
802 794
803 return skcipher_setkey(skcipher, key, keylen); 795 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
796}
797
798static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
799 const u8 *key, unsigned int keylen)
800{
801 u32 ctx1_iv_off;
802 int err;
803
804 /*
805 * AES-CTR needs to load IV in CONTEXT1 reg
806 * at an offset of 128bits (16bytes)
807 * CONTEXT1[255:128] = IV
808 */
809 ctx1_iv_off = 16;
810
811 err = aes_check_keylen(keylen);
812 if (err) {
813 crypto_skcipher_set_flags(skcipher,
814 CRYPTO_TFM_RES_BAD_KEY_LEN);
815 return err;
816 }
817
818 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
819}
820
821static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher,
822 const u8 *key, unsigned int keylen)
823{
824 return skcipher_setkey(skcipher, key, keylen, 0);
825}
826
827static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
828 const u8 *key, unsigned int keylen)
829{
830 return verify_skcipher_des_key(skcipher, key) ?:
831 skcipher_setkey(skcipher, key, keylen, 0);
832}
833
834static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
835 const u8 *key, unsigned int keylen)
836{
837 return verify_skcipher_des3_key(skcipher, key) ?:
838 skcipher_setkey(skcipher, key, keylen, 0);
804} 839}
805 840
806static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 841static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
@@ -930,19 +965,20 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
930{ 965{
931 struct aead_request *req = context; 966 struct aead_request *req = context;
932 struct aead_edesc *edesc; 967 struct aead_edesc *edesc;
968 int ecode = 0;
933 969
934 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 970 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
935 971
936 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 972 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
937 973
938 if (err) 974 if (err)
939 caam_jr_strstatus(jrdev, err); 975 ecode = caam_jr_strstatus(jrdev, err);
940 976
941 aead_unmap(jrdev, edesc, req); 977 aead_unmap(jrdev, edesc, req);
942 978
943 kfree(edesc); 979 kfree(edesc);
944 980
945 aead_request_complete(req, err); 981 aead_request_complete(req, ecode);
946} 982}
947 983
948static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 984static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
@@ -950,25 +986,20 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
950{ 986{
951 struct aead_request *req = context; 987 struct aead_request *req = context;
952 struct aead_edesc *edesc; 988 struct aead_edesc *edesc;
989 int ecode = 0;
953 990
954 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 991 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
955 992
956 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 993 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
957 994
958 if (err) 995 if (err)
959 caam_jr_strstatus(jrdev, err); 996 ecode = caam_jr_strstatus(jrdev, err);
960 997
961 aead_unmap(jrdev, edesc, req); 998 aead_unmap(jrdev, edesc, req);
962 999
963 /*
964 * verify hw auth check passed else return -EBADMSG
965 */
966 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
967 err = -EBADMSG;
968
969 kfree(edesc); 1000 kfree(edesc);
970 1001
971 aead_request_complete(req, err); 1002 aead_request_complete(req, ecode);
972} 1003}
973 1004
974static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 1005static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
@@ -978,13 +1009,14 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
978 struct skcipher_edesc *edesc; 1009 struct skcipher_edesc *edesc;
979 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1010 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
980 int ivsize = crypto_skcipher_ivsize(skcipher); 1011 int ivsize = crypto_skcipher_ivsize(skcipher);
1012 int ecode = 0;
981 1013
982 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1014 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
983 1015
984 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); 1016 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
985 1017
986 if (err) 1018 if (err)
987 caam_jr_strstatus(jrdev, err); 1019 ecode = caam_jr_strstatus(jrdev, err);
988 1020
989 skcipher_unmap(jrdev, edesc, req); 1021 skcipher_unmap(jrdev, edesc, req);
990 1022
@@ -993,10 +1025,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
993 * ciphertext block (CBC mode) or last counter (CTR mode). 1025 * ciphertext block (CBC mode) or last counter (CTR mode).
994 * This is used e.g. by the CTS mode. 1026 * This is used e.g. by the CTS mode.
995 */ 1027 */
996 if (ivsize) { 1028 if (ivsize && !ecode) {
997 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, 1029 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
998 ivsize); 1030 ivsize);
999
1000 print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ", 1031 print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
1001 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1032 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1002 edesc->src_nents > 1 ? 100 : ivsize, 1); 1033 edesc->src_nents > 1 ? 100 : ivsize, 1);
@@ -1008,7 +1039,7 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1008 1039
1009 kfree(edesc); 1040 kfree(edesc);
1010 1041
1011 skcipher_request_complete(req, err); 1042 skcipher_request_complete(req, ecode);
1012} 1043}
1013 1044
1014static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 1045static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
@@ -1018,12 +1049,13 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1018 struct skcipher_edesc *edesc; 1049 struct skcipher_edesc *edesc;
1019 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1050 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1020 int ivsize = crypto_skcipher_ivsize(skcipher); 1051 int ivsize = crypto_skcipher_ivsize(skcipher);
1052 int ecode = 0;
1021 1053
1022 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1054 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1023 1055
1024 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); 1056 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
1025 if (err) 1057 if (err)
1026 caam_jr_strstatus(jrdev, err); 1058 ecode = caam_jr_strstatus(jrdev, err);
1027 1059
1028 skcipher_unmap(jrdev, edesc, req); 1060 skcipher_unmap(jrdev, edesc, req);
1029 1061
@@ -1032,7 +1064,7 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1032 * ciphertext block (CBC mode) or last counter (CTR mode). 1064 * ciphertext block (CBC mode) or last counter (CTR mode).
1033 * This is used e.g. by the CTS mode. 1065 * This is used e.g. by the CTS mode.
1034 */ 1066 */
1035 if (ivsize) { 1067 if (ivsize && !ecode) {
1036 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, 1068 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1037 ivsize); 1069 ivsize);
1038 1070
@@ -1047,7 +1079,7 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1047 1079
1048 kfree(edesc); 1080 kfree(edesc);
1049 1081
1050 skcipher_request_complete(req, err); 1082 skcipher_request_complete(req, ecode);
1051} 1083}
1052 1084
1053/* 1085/*
@@ -1525,10 +1557,7 @@ static int chachapoly_decrypt(struct aead_request *req)
1525 1557
1526static int ipsec_gcm_encrypt(struct aead_request *req) 1558static int ipsec_gcm_encrypt(struct aead_request *req)
1527{ 1559{
1528 if (req->assoclen < 8) 1560 return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
1529 return -EINVAL;
1530
1531 return gcm_encrypt(req);
1532} 1561}
1533 1562
1534static int aead_encrypt(struct aead_request *req) 1563static int aead_encrypt(struct aead_request *req)
@@ -1602,10 +1631,7 @@ static int gcm_decrypt(struct aead_request *req)
1602 1631
1603static int ipsec_gcm_decrypt(struct aead_request *req) 1632static int ipsec_gcm_decrypt(struct aead_request *req)
1604{ 1633{
1605 if (req->assoclen < 8) 1634 return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
1606 return -EINVAL;
1607
1608 return gcm_decrypt(req);
1609} 1635}
1610 1636
1611static int aead_decrypt(struct aead_request *req) 1637static int aead_decrypt(struct aead_request *req)
@@ -1817,6 +1843,9 @@ static int skcipher_encrypt(struct skcipher_request *req)
1817 u32 *desc; 1843 u32 *desc;
1818 int ret = 0; 1844 int ret = 0;
1819 1845
1846 if (!req->cryptlen)
1847 return 0;
1848
1820 /* allocate extended descriptor */ 1849 /* allocate extended descriptor */
1821 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); 1850 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1822 if (IS_ERR(edesc)) 1851 if (IS_ERR(edesc))
@@ -1851,6 +1880,9 @@ static int skcipher_decrypt(struct skcipher_request *req)
1851 u32 *desc; 1880 u32 *desc;
1852 int ret = 0; 1881 int ret = 0;
1853 1882
1883 if (!req->cryptlen)
1884 return 0;
1885
1854 /* allocate extended descriptor */ 1886 /* allocate extended descriptor */
1855 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); 1887 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1856 if (IS_ERR(edesc)) 1888 if (IS_ERR(edesc))
@@ -1883,7 +1915,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1883 .cra_driver_name = "cbc-aes-caam", 1915 .cra_driver_name = "cbc-aes-caam",
1884 .cra_blocksize = AES_BLOCK_SIZE, 1916 .cra_blocksize = AES_BLOCK_SIZE,
1885 }, 1917 },
1886 .setkey = skcipher_setkey, 1918 .setkey = aes_skcipher_setkey,
1887 .encrypt = skcipher_encrypt, 1919 .encrypt = skcipher_encrypt,
1888 .decrypt = skcipher_decrypt, 1920 .decrypt = skcipher_decrypt,
1889 .min_keysize = AES_MIN_KEY_SIZE, 1921 .min_keysize = AES_MIN_KEY_SIZE,
@@ -1899,7 +1931,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1899 .cra_driver_name = "cbc-3des-caam", 1931 .cra_driver_name = "cbc-3des-caam",
1900 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1932 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1901 }, 1933 },
1902 .setkey = des_skcipher_setkey, 1934 .setkey = des3_skcipher_setkey,
1903 .encrypt = skcipher_encrypt, 1935 .encrypt = skcipher_encrypt,
1904 .decrypt = skcipher_decrypt, 1936 .decrypt = skcipher_decrypt,
1905 .min_keysize = DES3_EDE_KEY_SIZE, 1937 .min_keysize = DES3_EDE_KEY_SIZE,
@@ -1931,7 +1963,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1931 .cra_driver_name = "ctr-aes-caam", 1963 .cra_driver_name = "ctr-aes-caam",
1932 .cra_blocksize = 1, 1964 .cra_blocksize = 1,
1933 }, 1965 },
1934 .setkey = skcipher_setkey, 1966 .setkey = ctr_skcipher_setkey,
1935 .encrypt = skcipher_encrypt, 1967 .encrypt = skcipher_encrypt,
1936 .decrypt = skcipher_decrypt, 1968 .decrypt = skcipher_decrypt,
1937 .min_keysize = AES_MIN_KEY_SIZE, 1969 .min_keysize = AES_MIN_KEY_SIZE,
@@ -1949,7 +1981,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1949 .cra_driver_name = "rfc3686-ctr-aes-caam", 1981 .cra_driver_name = "rfc3686-ctr-aes-caam",
1950 .cra_blocksize = 1, 1982 .cra_blocksize = 1,
1951 }, 1983 },
1952 .setkey = skcipher_setkey, 1984 .setkey = rfc3686_skcipher_setkey,
1953 .encrypt = skcipher_encrypt, 1985 .encrypt = skcipher_encrypt,
1954 .decrypt = skcipher_decrypt, 1986 .decrypt = skcipher_decrypt,
1955 .min_keysize = AES_MIN_KEY_SIZE + 1987 .min_keysize = AES_MIN_KEY_SIZE +
@@ -2003,7 +2035,7 @@ static struct caam_skcipher_alg driver_algs[] = {
2003 .cra_driver_name = "ecb-aes-caam", 2035 .cra_driver_name = "ecb-aes-caam",
2004 .cra_blocksize = AES_BLOCK_SIZE, 2036 .cra_blocksize = AES_BLOCK_SIZE,
2005 }, 2037 },
2006 .setkey = skcipher_setkey, 2038 .setkey = aes_skcipher_setkey,
2007 .encrypt = skcipher_encrypt, 2039 .encrypt = skcipher_encrypt,
2008 .decrypt = skcipher_decrypt, 2040 .decrypt = skcipher_decrypt,
2009 .min_keysize = AES_MIN_KEY_SIZE, 2041 .min_keysize = AES_MIN_KEY_SIZE,
@@ -2018,7 +2050,7 @@ static struct caam_skcipher_alg driver_algs[] = {
2018 .cra_driver_name = "ecb-des3-caam", 2050 .cra_driver_name = "ecb-des3-caam",
2019 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2051 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2020 }, 2052 },
2021 .setkey = des_skcipher_setkey, 2053 .setkey = des3_skcipher_setkey,
2022 .encrypt = skcipher_encrypt, 2054 .encrypt = skcipher_encrypt,
2023 .decrypt = skcipher_decrypt, 2055 .decrypt = skcipher_decrypt,
2024 .min_keysize = DES3_EDE_KEY_SIZE, 2056 .min_keysize = DES3_EDE_KEY_SIZE,
@@ -2033,7 +2065,7 @@ static struct caam_skcipher_alg driver_algs[] = {
2033 .cra_driver_name = "ecb-arc4-caam", 2065 .cra_driver_name = "ecb-arc4-caam",
2034 .cra_blocksize = ARC4_BLOCK_SIZE, 2066 .cra_blocksize = ARC4_BLOCK_SIZE,
2035 }, 2067 },
2036 .setkey = skcipher_setkey, 2068 .setkey = arc4_skcipher_setkey,
2037 .encrypt = skcipher_encrypt, 2069 .encrypt = skcipher_encrypt,
2038 .decrypt = skcipher_decrypt, 2070 .decrypt = skcipher_decrypt,
2039 .min_keysize = ARC4_MIN_KEY_SIZE, 2071 .min_keysize = ARC4_MIN_KEY_SIZE,
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 72531837571e..aa9ccca67045 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -503,6 +503,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
503 const bool is_qi, int era) 503 const bool is_qi, int era)
504{ 504{
505 u32 geniv, moveiv; 505 u32 geniv, moveiv;
506 u32 *wait_cmd;
506 507
507 /* Note: Context registers are saved. */ 508 /* Note: Context registers are saved. */
508 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); 509 init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
@@ -598,6 +599,14 @@ copy_iv:
598 599
599 /* Will read cryptlen */ 600 /* Will read cryptlen */
600 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 601 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
602
603 /*
604 * Wait for IV transfer (ofifo -> class2) to finish before starting
605 * ciphertext transfer (ofifo -> external memory).
606 */
607 wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
608 set_jump_tgt_here(desc, wait_cmd);
609
601 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | 610 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
602 FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); 611 FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
603 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); 612 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
@@ -843,13 +852,16 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
843 * @ivsize: initialization vector size 852 * @ivsize: initialization vector size
844 * @icvsize: integrity check value (ICV) size (truncated or full) 853 * @icvsize: integrity check value (ICV) size (truncated or full)
845 * @is_qi: true when called from caam/qi 854 * @is_qi: true when called from caam/qi
855 *
856 * Input sequence: AAD | PTXT
857 * Output sequence: AAD | CTXT | ICV
858 * AAD length (assoclen), which includes the IV length, is available in Math3.
846 */ 859 */
847void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, 860void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
848 unsigned int ivsize, unsigned int icvsize, 861 unsigned int ivsize, unsigned int icvsize,
849 const bool is_qi) 862 const bool is_qi)
850{ 863{
851 u32 *key_jump_cmd; 864 u32 *key_jump_cmd, *zero_cryptlen_jump_cmd, *skip_instructions;
852
853 init_sh_desc(desc, HDR_SHARE_SERIAL); 865 init_sh_desc(desc, HDR_SHARE_SERIAL);
854 866
855 /* Skip key loading if it is loaded due to sharing */ 867 /* Skip key loading if it is loaded due to sharing */
@@ -892,24 +904,26 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
892 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); 904 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
893 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 905 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
894 906
895 /* Read assoc data */ 907 /* Skip AAD */
896 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | 908 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
897 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
898 909
899 /* Skip IV */ 910 /* Read cryptlen and set this value into VARSEQOUTLEN */
900 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); 911 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
901 912
902 /* Will read cryptlen bytes */ 913 /* If cryptlen is ZERO jump to AAD command */
903 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 914 zero_cryptlen_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
915 JUMP_COND_MATH_Z);
904 916
905 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ 917 /* Read AAD data */
906 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); 918 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
919 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
907 920
908 /* Skip assoc data */ 921 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
909 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); 922 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA);
910 923
911 /* cryptlen = seqoutlen - assoclen */ 924 /* Skip IV */
912 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ); 925 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
926 append_math_add(desc, VARSEQINLEN, VARSEQOUTLEN, REG0, CAAM_CMD_SZ);
913 927
914 /* Write encrypted data */ 928 /* Write encrypted data */
915 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); 929 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -918,6 +932,18 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
918 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | 932 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
919 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); 933 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
920 934
935 /* Jump instructions to avoid double reading of AAD */
936 skip_instructions = append_jump(desc, JUMP_TEST_ALL);
937
938 /* There is no input data, cryptlen = 0 */
939 set_jump_tgt_here(desc, zero_cryptlen_jump_cmd);
940
941 /* Read AAD */
942 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
943 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
944
945 set_jump_tgt_here(desc, skip_instructions);
946
921 /* Write ICV */ 947 /* Write ICV */
922 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | 948 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
923 LDST_SRCDST_BYTE_CONTEXT); 949 LDST_SRCDST_BYTE_CONTEXT);
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index da4a4ee60c80..f2893393ba5e 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -12,7 +12,7 @@
12#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) 12#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
13#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) 13#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
14#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) 14#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
15#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) 15#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
16#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ) 16#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
17#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ) 17#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
18#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ) 18#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
@@ -31,7 +31,7 @@
31#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ) 31#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
32 32
33#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) 33#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
34#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) 34#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 16 * CAAM_CMD_SZ)
35#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) 35#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
36#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ) 36#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
37#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ) 37#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 32f0f8a72067..8e3449670d2f 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -105,6 +105,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
105 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 105 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
106 } 106 }
107 107
108 /*
109 * In case |user key| > |derived key|, using DKP<imm,imm> would result
110 * in invalid opcodes (last bytes of user key) in the resulting
111 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
112 * addresses are needed.
113 */
114 ctx->adata.key_virt = ctx->key;
115 ctx->adata.key_dma = ctx->key_dma;
116
117 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
118 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
119
108 data_len[0] = ctx->adata.keylen_pad; 120 data_len[0] = ctx->adata.keylen_pad;
109 data_len[1] = ctx->cdata.keylen; 121 data_len[1] = ctx->cdata.keylen;
110 122
@@ -118,16 +130,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
118 ARRAY_SIZE(data_len)) < 0) 130 ARRAY_SIZE(data_len)) < 0)
119 return -EINVAL; 131 return -EINVAL;
120 132
121 if (inl_mask & 1)
122 ctx->adata.key_virt = ctx->key;
123 else
124 ctx->adata.key_dma = ctx->key_dma;
125
126 if (inl_mask & 2)
127 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
128 else
129 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
130
131 ctx->adata.key_inline = !!(inl_mask & 1); 133 ctx->adata.key_inline = !!(inl_mask & 1);
132 ctx->cdata.key_inline = !!(inl_mask & 2); 134 ctx->cdata.key_inline = !!(inl_mask & 2);
133 135
@@ -143,16 +145,6 @@ skip_enc:
143 ARRAY_SIZE(data_len)) < 0) 145 ARRAY_SIZE(data_len)) < 0)
144 return -EINVAL; 146 return -EINVAL;
145 147
146 if (inl_mask & 1)
147 ctx->adata.key_virt = ctx->key;
148 else
149 ctx->adata.key_dma = ctx->key_dma;
150
151 if (inl_mask & 2)
152 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
153 else
154 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
155
156 ctx->adata.key_inline = !!(inl_mask & 1); 148 ctx->adata.key_inline = !!(inl_mask & 1);
157 ctx->cdata.key_inline = !!(inl_mask & 2); 149 ctx->cdata.key_inline = !!(inl_mask & 2);
158 150
@@ -171,16 +163,6 @@ skip_enc:
171 ARRAY_SIZE(data_len)) < 0) 163 ARRAY_SIZE(data_len)) < 0)
172 return -EINVAL; 164 return -EINVAL;
173 165
174 if (inl_mask & 1)
175 ctx->adata.key_virt = ctx->key;
176 else
177 ctx->adata.key_dma = ctx->key_dma;
178
179 if (inl_mask & 2)
180 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
181 else
182 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
183
184 ctx->adata.key_inline = !!(inl_mask & 1); 166 ctx->adata.key_inline = !!(inl_mask & 1);
185 ctx->cdata.key_inline = !!(inl_mask & 2); 167 ctx->cdata.key_inline = !!(inl_mask & 2);
186 168
@@ -252,11 +234,10 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
252 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 234 dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
253 ctx->adata.keylen_pad + keys.enckeylen, 235 ctx->adata.keylen_pad + keys.enckeylen,
254 ctx->dir); 236 ctx->dir);
255#ifdef DEBUG 237
256 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 238 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
257 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 239 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
258 ctx->adata.keylen_pad + keys.enckeylen, 1); 240 ctx->adata.keylen_pad + keys.enckeylen, 1);
259#endif
260 241
261skip_split_key: 242skip_split_key:
262 ctx->cdata.keylen = keys.enckeylen; 243 ctx->cdata.keylen = keys.enckeylen;
@@ -296,33 +277,17 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
296 unsigned int keylen) 277 unsigned int keylen)
297{ 278{
298 struct crypto_authenc_keys keys; 279 struct crypto_authenc_keys keys;
299 u32 flags;
300 int err; 280 int err;
301 281
302 err = crypto_authenc_extractkeys(&keys, key, keylen); 282 err = crypto_authenc_extractkeys(&keys, key, keylen);
303 if (unlikely(err)) 283 if (unlikely(err))
304 goto badkey; 284 return err;
305
306 err = -EINVAL;
307 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
308 goto badkey;
309
310 flags = crypto_aead_get_flags(aead);
311 err = __des3_verify_key(&flags, keys.enckey);
312 if (unlikely(err)) {
313 crypto_aead_set_flags(aead, flags);
314 goto out;
315 }
316 285
317 err = aead_setkey(aead, key, keylen); 286 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
287 aead_setkey(aead, key, keylen);
318 288
319out:
320 memzero_explicit(&keys, sizeof(keys)); 289 memzero_explicit(&keys, sizeof(keys));
321 return err; 290 return err;
322
323badkey:
324 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
325 goto out;
326} 291}
327 292
328static int gcm_set_sh_desc(struct crypto_aead *aead) 293static int gcm_set_sh_desc(struct crypto_aead *aead)
@@ -371,6 +336,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
371static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 336static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
372{ 337{
373 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 338 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
339 int err;
340
341 err = crypto_gcm_check_authsize(authsize);
342 if (err)
343 return err;
374 344
375 ctx->authsize = authsize; 345 ctx->authsize = authsize;
376 gcm_set_sh_desc(authenc); 346 gcm_set_sh_desc(authenc);
@@ -385,6 +355,12 @@ static int gcm_setkey(struct crypto_aead *aead,
385 struct device *jrdev = ctx->jrdev; 355 struct device *jrdev = ctx->jrdev;
386 int ret; 356 int ret;
387 357
358 ret = aes_check_keylen(keylen);
359 if (ret) {
360 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
361 return ret;
362 }
363
388 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 364 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
389 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 365 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
390 366
@@ -466,6 +442,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
466 unsigned int authsize) 442 unsigned int authsize)
467{ 443{
468 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 444 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
445 int err;
446
447 err = crypto_rfc4106_check_authsize(authsize);
448 if (err)
449 return err;
469 450
470 ctx->authsize = authsize; 451 ctx->authsize = authsize;
471 rfc4106_set_sh_desc(authenc); 452 rfc4106_set_sh_desc(authenc);
@@ -480,8 +461,11 @@ static int rfc4106_setkey(struct crypto_aead *aead,
480 struct device *jrdev = ctx->jrdev; 461 struct device *jrdev = ctx->jrdev;
481 int ret; 462 int ret;
482 463
483 if (keylen < 4) 464 ret = aes_check_keylen(keylen - 4);
484 return -EINVAL; 465 if (ret) {
466 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
467 return ret;
468 }
485 469
486 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 470 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
487 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 471 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -569,6 +553,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
569{ 553{
570 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 554 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
571 555
556 if (authsize != 16)
557 return -EINVAL;
558
572 ctx->authsize = authsize; 559 ctx->authsize = authsize;
573 rfc4543_set_sh_desc(authenc); 560 rfc4543_set_sh_desc(authenc);
574 561
@@ -582,8 +569,11 @@ static int rfc4543_setkey(struct crypto_aead *aead,
582 struct device *jrdev = ctx->jrdev; 569 struct device *jrdev = ctx->jrdev;
583 int ret; 570 int ret;
584 571
585 if (keylen < 4) 572 ret = aes_check_keylen(keylen - 4);
586 return -EINVAL; 573 if (ret) {
574 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
575 return ret;
576 }
587 577
588 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 578 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
589 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 579 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -624,7 +614,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
624} 614}
625 615
626static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 616static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
627 unsigned int keylen) 617 unsigned int keylen, const u32 ctx1_iv_off)
628{ 618{
629 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 619 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
630 struct caam_skcipher_alg *alg = 620 struct caam_skcipher_alg *alg =
@@ -632,33 +622,12 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
632 skcipher); 622 skcipher);
633 struct device *jrdev = ctx->jrdev; 623 struct device *jrdev = ctx->jrdev;
634 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 624 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
635 u32 ctx1_iv_off = 0;
636 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
637 OP_ALG_AAI_CTR_MOD128);
638 const bool is_rfc3686 = alg->caam.rfc3686; 625 const bool is_rfc3686 = alg->caam.rfc3686;
639 int ret = 0; 626 int ret = 0;
640 627
641 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 628 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
642 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 629 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
643 630
644 /*
645 * AES-CTR needs to load IV in CONTEXT1 reg
646 * at an offset of 128bits (16bytes)
647 * CONTEXT1[255:128] = IV
648 */
649 if (ctr_mode)
650 ctx1_iv_off = 16;
651
652 /*
653 * RFC3686 specific:
654 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
655 * | *key = {KEY, NONCE}
656 */
657 if (is_rfc3686) {
658 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
659 keylen -= CTR_RFC3686_NONCE_SIZE;
660 }
661
662 ctx->cdata.keylen = keylen; 631 ctx->cdata.keylen = keylen;
663 ctx->cdata.key_virt = key; 632 ctx->cdata.key_virt = key;
664 ctx->cdata.key_inline = true; 633 ctx->cdata.key_inline = true;
@@ -694,11 +663,80 @@ badkey:
694 return -EINVAL; 663 return -EINVAL;
695} 664}
696 665
666static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
667 const u8 *key, unsigned int keylen)
668{
669 int err;
670
671 err = aes_check_keylen(keylen);
672 if (err) {
673 crypto_skcipher_set_flags(skcipher,
674 CRYPTO_TFM_RES_BAD_KEY_LEN);
675 return err;
676 }
677
678 return skcipher_setkey(skcipher, key, keylen, 0);
679}
680
681static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
682 const u8 *key, unsigned int keylen)
683{
684 u32 ctx1_iv_off;
685 int err;
686
687 /*
688 * RFC3686 specific:
689 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
690 * | *key = {KEY, NONCE}
691 */
692 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
693 keylen -= CTR_RFC3686_NONCE_SIZE;
694
695 err = aes_check_keylen(keylen);
696 if (err) {
697 crypto_skcipher_set_flags(skcipher,
698 CRYPTO_TFM_RES_BAD_KEY_LEN);
699 return err;
700 }
701
702 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
703}
704
705static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
706 const u8 *key, unsigned int keylen)
707{
708 u32 ctx1_iv_off;
709 int err;
710
711 /*
712 * AES-CTR needs to load IV in CONTEXT1 reg
713 * at an offset of 128bits (16bytes)
714 * CONTEXT1[255:128] = IV
715 */
716 ctx1_iv_off = 16;
717
718 err = aes_check_keylen(keylen);
719 if (err) {
720 crypto_skcipher_set_flags(skcipher,
721 CRYPTO_TFM_RES_BAD_KEY_LEN);
722 return err;
723 }
724
725 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
726}
727
697static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, 728static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
698 const u8 *key, unsigned int keylen) 729 const u8 *key, unsigned int keylen)
699{ 730{
700 return unlikely(des3_verify_key(skcipher, key)) ?: 731 return verify_skcipher_des3_key(skcipher, key) ?:
701 skcipher_setkey(skcipher, key, keylen); 732 skcipher_setkey(skcipher, key, keylen, 0);
733}
734
735static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
736 const u8 *key, unsigned int keylen)
737{
738 return verify_skcipher_des_key(skcipher, key) ?:
739 skcipher_setkey(skcipher, key, keylen, 0);
702} 740}
703 741
704static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 742static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
@@ -884,20 +922,8 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status)
884 922
885 qidev = caam_ctx->qidev; 923 qidev = caam_ctx->qidev;
886 924
887 if (unlikely(status)) { 925 if (unlikely(status))
888 u32 ssrc = status & JRSTA_SSRC_MASK; 926 ecode = caam_jr_strstatus(qidev, status);
889 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
890
891 caam_jr_strstatus(qidev, status);
892 /*
893 * verify hw auth check passed else return -EBADMSG
894 */
895 if (ssrc == JRSTA_SSRC_CCB_ERROR &&
896 err_id == JRSTA_CCBERR_ERRID_ICVCHK)
897 ecode = -EBADMSG;
898 else
899 ecode = -EIO;
900 }
901 927
902 edesc = container_of(drv_req, typeof(*edesc), drv_req); 928 edesc = container_of(drv_req, typeof(*edesc), drv_req);
903 aead_unmap(qidev, edesc, aead_req); 929 aead_unmap(qidev, edesc, aead_req);
@@ -1168,18 +1194,14 @@ static int aead_decrypt(struct aead_request *req)
1168 1194
1169static int ipsec_gcm_encrypt(struct aead_request *req) 1195static int ipsec_gcm_encrypt(struct aead_request *req)
1170{ 1196{
1171 if (req->assoclen < 8) 1197 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1172 return -EINVAL; 1198 true);
1173
1174 return aead_crypt(req, true);
1175} 1199}
1176 1200
1177static int ipsec_gcm_decrypt(struct aead_request *req) 1201static int ipsec_gcm_decrypt(struct aead_request *req)
1178{ 1202{
1179 if (req->assoclen < 8) 1203 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
1180 return -EINVAL; 1204 false);
1181
1182 return aead_crypt(req, false);
1183} 1205}
1184 1206
1185static void skcipher_done(struct caam_drv_req *drv_req, u32 status) 1207static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
@@ -1190,13 +1212,14 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
1190 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); 1212 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
1191 struct device *qidev = caam_ctx->qidev; 1213 struct device *qidev = caam_ctx->qidev;
1192 int ivsize = crypto_skcipher_ivsize(skcipher); 1214 int ivsize = crypto_skcipher_ivsize(skcipher);
1215 int ecode = 0;
1193 1216
1194 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1217 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1195 1218
1196 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1219 edesc = container_of(drv_req, typeof(*edesc), drv_req);
1197 1220
1198 if (status) 1221 if (status)
1199 caam_jr_strstatus(qidev, status); 1222 ecode = caam_jr_strstatus(qidev, status);
1200 1223
1201 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1224 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1202 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1225 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1212,10 +1235,12 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
1212 * ciphertext block (CBC mode) or last counter (CTR mode). 1235 * ciphertext block (CBC mode) or last counter (CTR mode).
1213 * This is used e.g. by the CTS mode. 1236 * This is used e.g. by the CTS mode.
1214 */ 1237 */
1215 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); 1238 if (!ecode)
1239 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1240 ivsize);
1216 1241
1217 qi_cache_free(edesc); 1242 qi_cache_free(edesc);
1218 skcipher_request_complete(req, status); 1243 skcipher_request_complete(req, ecode);
1219} 1244}
1220 1245
1221static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, 1246static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
@@ -1377,6 +1402,9 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1377 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1402 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1378 int ret; 1403 int ret;
1379 1404
1405 if (!req->cryptlen)
1406 return 0;
1407
1380 if (unlikely(caam_congested)) 1408 if (unlikely(caam_congested))
1381 return -EAGAIN; 1409 return -EAGAIN;
1382 1410
@@ -1414,7 +1442,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1414 .cra_driver_name = "cbc-aes-caam-qi", 1442 .cra_driver_name = "cbc-aes-caam-qi",
1415 .cra_blocksize = AES_BLOCK_SIZE, 1443 .cra_blocksize = AES_BLOCK_SIZE,
1416 }, 1444 },
1417 .setkey = skcipher_setkey, 1445 .setkey = aes_skcipher_setkey,
1418 .encrypt = skcipher_encrypt, 1446 .encrypt = skcipher_encrypt,
1419 .decrypt = skcipher_decrypt, 1447 .decrypt = skcipher_decrypt,
1420 .min_keysize = AES_MIN_KEY_SIZE, 1448 .min_keysize = AES_MIN_KEY_SIZE,
@@ -1446,7 +1474,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1446 .cra_driver_name = "cbc-des-caam-qi", 1474 .cra_driver_name = "cbc-des-caam-qi",
1447 .cra_blocksize = DES_BLOCK_SIZE, 1475 .cra_blocksize = DES_BLOCK_SIZE,
1448 }, 1476 },
1449 .setkey = skcipher_setkey, 1477 .setkey = des_skcipher_setkey,
1450 .encrypt = skcipher_encrypt, 1478 .encrypt = skcipher_encrypt,
1451 .decrypt = skcipher_decrypt, 1479 .decrypt = skcipher_decrypt,
1452 .min_keysize = DES_KEY_SIZE, 1480 .min_keysize = DES_KEY_SIZE,
@@ -1462,7 +1490,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1462 .cra_driver_name = "ctr-aes-caam-qi", 1490 .cra_driver_name = "ctr-aes-caam-qi",
1463 .cra_blocksize = 1, 1491 .cra_blocksize = 1,
1464 }, 1492 },
1465 .setkey = skcipher_setkey, 1493 .setkey = ctr_skcipher_setkey,
1466 .encrypt = skcipher_encrypt, 1494 .encrypt = skcipher_encrypt,
1467 .decrypt = skcipher_decrypt, 1495 .decrypt = skcipher_decrypt,
1468 .min_keysize = AES_MIN_KEY_SIZE, 1496 .min_keysize = AES_MIN_KEY_SIZE,
@@ -1480,7 +1508,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1480 .cra_driver_name = "rfc3686-ctr-aes-caam-qi", 1508 .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
1481 .cra_blocksize = 1, 1509 .cra_blocksize = 1,
1482 }, 1510 },
1483 .setkey = skcipher_setkey, 1511 .setkey = rfc3686_skcipher_setkey,
1484 .encrypt = skcipher_encrypt, 1512 .encrypt = skcipher_encrypt,
1485 .decrypt = skcipher_decrypt, 1513 .decrypt = skcipher_decrypt,
1486 .min_keysize = AES_MIN_KEY_SIZE + 1514 .min_keysize = AES_MIN_KEY_SIZE +
@@ -2523,10 +2551,9 @@ int caam_qi_algapi_init(struct device *ctrldev)
2523 unsigned int md_limit = SHA512_DIGEST_SIZE; 2551 unsigned int md_limit = SHA512_DIGEST_SIZE;
2524 bool registered = false; 2552 bool registered = false;
2525 2553
2526 if (caam_dpaa2) { 2554 /* Make sure this runs only on (DPAA 1.x) QI */
2527 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); 2555 if (!priv->qi_present || caam_dpaa2)
2528 return -ENODEV; 2556 return 0;
2529 }
2530 2557
2531 /* 2558 /*
2532 * Register crypto algorithms the device supports. 2559 * Register crypto algorithms the device supports.
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 06bf32c32cbd..3443f6d6dd83 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -15,6 +15,7 @@
15#include "key_gen.h" 15#include "key_gen.h"
16#include "caamalg_desc.h" 16#include "caamalg_desc.h"
17#include "caamhash_desc.h" 17#include "caamhash_desc.h"
18#include "dpseci-debugfs.h"
18#include <linux/fsl/mc.h> 19#include <linux/fsl/mc.h>
19#include <soc/fsl/dpaa2-io.h> 20#include <soc/fsl/dpaa2-io.h>
20#include <soc/fsl/dpaa2-fd.h> 21#include <soc/fsl/dpaa2-fd.h>
@@ -198,6 +199,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
198 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 199 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
199 } 200 }
200 201
202 /*
203 * In case |user key| > |derived key|, using DKP<imm,imm> would result
204 * in invalid opcodes (last bytes of user key) in the resulting
205 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
206 * addresses are needed.
207 */
208 ctx->adata.key_virt = ctx->key;
209 ctx->adata.key_dma = ctx->key_dma;
210
211 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
212 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
213
201 data_len[0] = ctx->adata.keylen_pad; 214 data_len[0] = ctx->adata.keylen_pad;
202 data_len[1] = ctx->cdata.keylen; 215 data_len[1] = ctx->cdata.keylen;
203 216
@@ -209,16 +222,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
209 ARRAY_SIZE(data_len)) < 0) 222 ARRAY_SIZE(data_len)) < 0)
210 return -EINVAL; 223 return -EINVAL;
211 224
212 if (inl_mask & 1)
213 ctx->adata.key_virt = ctx->key;
214 else
215 ctx->adata.key_dma = ctx->key_dma;
216
217 if (inl_mask & 2)
218 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
219 else
220 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
221
222 ctx->adata.key_inline = !!(inl_mask & 1); 225 ctx->adata.key_inline = !!(inl_mask & 1);
223 ctx->cdata.key_inline = !!(inl_mask & 2); 226 ctx->cdata.key_inline = !!(inl_mask & 2);
224 227
@@ -247,16 +250,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
247 ARRAY_SIZE(data_len)) < 0) 250 ARRAY_SIZE(data_len)) < 0)
248 return -EINVAL; 251 return -EINVAL;
249 252
250 if (inl_mask & 1)
251 ctx->adata.key_virt = ctx->key;
252 else
253 ctx->adata.key_dma = ctx->key_dma;
254
255 if (inl_mask & 2)
256 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
257 else
258 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
259
260 ctx->adata.key_inline = !!(inl_mask & 1); 253 ctx->adata.key_inline = !!(inl_mask & 1);
261 ctx->cdata.key_inline = !!(inl_mask & 2); 254 ctx->cdata.key_inline = !!(inl_mask & 2);
262 255
@@ -329,7 +322,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
329 unsigned int keylen) 322 unsigned int keylen)
330{ 323{
331 struct crypto_authenc_keys keys; 324 struct crypto_authenc_keys keys;
332 u32 flags;
333 int err; 325 int err;
334 326
335 err = crypto_authenc_extractkeys(&keys, key, keylen); 327 err = crypto_authenc_extractkeys(&keys, key, keylen);
@@ -340,14 +332,8 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
340 if (keys.enckeylen != DES3_EDE_KEY_SIZE) 332 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
341 goto badkey; 333 goto badkey;
342 334
343 flags = crypto_aead_get_flags(aead); 335 err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
344 err = __des3_verify_key(&flags, keys.enckey); 336 aead_setkey(aead, key, keylen);
345 if (unlikely(err)) {
346 crypto_aead_set_flags(aead, flags);
347 goto out;
348 }
349
350 err = aead_setkey(aead, key, keylen);
351 337
352out: 338out:
353 memzero_explicit(&keys, sizeof(keys)); 339 memzero_explicit(&keys, sizeof(keys));
@@ -719,6 +705,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
719static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 705static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
720{ 706{
721 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 707 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
708 int err;
709
710 err = crypto_gcm_check_authsize(authsize);
711 if (err)
712 return err;
722 713
723 ctx->authsize = authsize; 714 ctx->authsize = authsize;
724 gcm_set_sh_desc(authenc); 715 gcm_set_sh_desc(authenc);
@@ -731,7 +722,13 @@ static int gcm_setkey(struct crypto_aead *aead,
731{ 722{
732 struct caam_ctx *ctx = crypto_aead_ctx(aead); 723 struct caam_ctx *ctx = crypto_aead_ctx(aead);
733 struct device *dev = ctx->dev; 724 struct device *dev = ctx->dev;
725 int ret;
734 726
727 ret = aes_check_keylen(keylen);
728 if (ret) {
729 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
730 return ret;
731 }
735 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 732 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
736 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 733 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
737 734
@@ -805,6 +802,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
805 unsigned int authsize) 802 unsigned int authsize)
806{ 803{
807 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 804 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
805 int err;
806
807 err = crypto_rfc4106_check_authsize(authsize);
808 if (err)
809 return err;
808 810
809 ctx->authsize = authsize; 811 ctx->authsize = authsize;
810 rfc4106_set_sh_desc(authenc); 812 rfc4106_set_sh_desc(authenc);
@@ -817,9 +819,13 @@ static int rfc4106_setkey(struct crypto_aead *aead,
817{ 819{
818 struct caam_ctx *ctx = crypto_aead_ctx(aead); 820 struct caam_ctx *ctx = crypto_aead_ctx(aead);
819 struct device *dev = ctx->dev; 821 struct device *dev = ctx->dev;
822 int ret;
820 823
821 if (keylen < 4) 824 ret = aes_check_keylen(keylen - 4);
822 return -EINVAL; 825 if (ret) {
826 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
827 return ret;
828 }
823 829
824 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 830 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
825 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 831 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -900,6 +906,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
900{ 906{
901 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 907 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
902 908
909 if (authsize != 16)
910 return -EINVAL;
911
903 ctx->authsize = authsize; 912 ctx->authsize = authsize;
904 rfc4543_set_sh_desc(authenc); 913 rfc4543_set_sh_desc(authenc);
905 914
@@ -911,9 +920,13 @@ static int rfc4543_setkey(struct crypto_aead *aead,
911{ 920{
912 struct caam_ctx *ctx = crypto_aead_ctx(aead); 921 struct caam_ctx *ctx = crypto_aead_ctx(aead);
913 struct device *dev = ctx->dev; 922 struct device *dev = ctx->dev;
923 int ret;
914 924
915 if (keylen < 4) 925 ret = aes_check_keylen(keylen - 4);
916 return -EINVAL; 926 if (ret) {
927 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
928 return ret;
929 }
917 930
918 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 931 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
919 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 932 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -931,7 +944,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
931} 944}
932 945
933static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 946static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
934 unsigned int keylen) 947 unsigned int keylen, const u32 ctx1_iv_off)
935{ 948{
936 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 949 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
937 struct caam_skcipher_alg *alg = 950 struct caam_skcipher_alg *alg =
@@ -941,34 +954,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
941 struct caam_flc *flc; 954 struct caam_flc *flc;
942 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 955 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
943 u32 *desc; 956 u32 *desc;
944 u32 ctx1_iv_off = 0;
945 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
946 OP_ALG_AAI_CTR_MOD128) &&
947 ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
948 OP_ALG_ALGSEL_CHACHA20);
949 const bool is_rfc3686 = alg->caam.rfc3686; 957 const bool is_rfc3686 = alg->caam.rfc3686;
950 958
951 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 959 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
952 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 960 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
953 961
954 /*
955 * AES-CTR needs to load IV in CONTEXT1 reg
956 * at an offset of 128bits (16bytes)
957 * CONTEXT1[255:128] = IV
958 */
959 if (ctr_mode)
960 ctx1_iv_off = 16;
961
962 /*
963 * RFC3686 specific:
964 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
965 * | *key = {KEY, NONCE}
966 */
967 if (is_rfc3686) {
968 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
969 keylen -= CTR_RFC3686_NONCE_SIZE;
970 }
971
972 ctx->cdata.keylen = keylen; 962 ctx->cdata.keylen = keylen;
973 ctx->cdata.key_virt = key; 963 ctx->cdata.key_virt = key;
974 ctx->cdata.key_inline = true; 964 ctx->cdata.key_inline = true;
@@ -996,11 +986,92 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
996 return 0; 986 return 0;
997} 987}
998 988
989static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
990 const u8 *key, unsigned int keylen)
991{
992 int err;
993
994 err = aes_check_keylen(keylen);
995 if (err) {
996 crypto_skcipher_set_flags(skcipher,
997 CRYPTO_TFM_RES_BAD_KEY_LEN);
998 return err;
999 }
1000
1001 return skcipher_setkey(skcipher, key, keylen, 0);
1002}
1003
1004static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
1005 const u8 *key, unsigned int keylen)
1006{
1007 u32 ctx1_iv_off;
1008 int err;
1009
1010 /*
1011 * RFC3686 specific:
1012 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1013 * | *key = {KEY, NONCE}
1014 */
1015 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1016 keylen -= CTR_RFC3686_NONCE_SIZE;
1017
1018 err = aes_check_keylen(keylen);
1019 if (err) {
1020 crypto_skcipher_set_flags(skcipher,
1021 CRYPTO_TFM_RES_BAD_KEY_LEN);
1022 return err;
1023 }
1024
1025 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1026}
1027
1028static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1029 const u8 *key, unsigned int keylen)
1030{
1031 u32 ctx1_iv_off;
1032 int err;
1033
1034 /*
1035 * AES-CTR needs to load IV in CONTEXT1 reg
1036 * at an offset of 128bits (16bytes)
1037 * CONTEXT1[255:128] = IV
1038 */
1039 ctx1_iv_off = 16;
1040
1041 err = aes_check_keylen(keylen);
1042 if (err) {
1043 crypto_skcipher_set_flags(skcipher,
1044 CRYPTO_TFM_RES_BAD_KEY_LEN);
1045 return err;
1046 }
1047
1048 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1049}
1050
1051static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1052 const u8 *key, unsigned int keylen)
1053{
1054 if (keylen != CHACHA_KEY_SIZE) {
1055 crypto_skcipher_set_flags(skcipher,
1056 CRYPTO_TFM_RES_BAD_KEY_LEN);
1057 return -EINVAL;
1058 }
1059
1060 return skcipher_setkey(skcipher, key, keylen, 0);
1061}
1062
1063static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1064 const u8 *key, unsigned int keylen)
1065{
1066 return verify_skcipher_des_key(skcipher, key) ?:
1067 skcipher_setkey(skcipher, key, keylen, 0);
1068}
1069
999static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, 1070static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1000 const u8 *key, unsigned int keylen) 1071 const u8 *key, unsigned int keylen)
1001{ 1072{
1002 return unlikely(des3_verify_key(skcipher, key)) ?: 1073 return verify_skcipher_des3_key(skcipher, key) ?:
1003 skcipher_setkey(skcipher, key, keylen); 1074 skcipher_setkey(skcipher, key, keylen, 0);
1004} 1075}
1005 1076
1006static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 1077static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
@@ -1227,10 +1298,8 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status)
1227 1298
1228 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 1299 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1229 1300
1230 if (unlikely(status)) { 1301 if (unlikely(status))
1231 caam_qi2_strstatus(ctx->dev, status); 1302 ecode = caam_qi2_strstatus(ctx->dev, status);
1232 ecode = -EIO;
1233 }
1234 1303
1235 aead_unmap(ctx->dev, edesc, req); 1304 aead_unmap(ctx->dev, edesc, req);
1236 qi_cache_free(edesc); 1305 qi_cache_free(edesc);
@@ -1250,17 +1319,8 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status)
1250 1319
1251 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 1320 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1252 1321
1253 if (unlikely(status)) { 1322 if (unlikely(status))
1254 caam_qi2_strstatus(ctx->dev, status); 1323 ecode = caam_qi2_strstatus(ctx->dev, status);
1255 /*
1256 * verify hw auth check passed else return -EBADMSG
1257 */
1258 if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1259 JRSTA_CCBERR_ERRID_ICVCHK)
1260 ecode = -EBADMSG;
1261 else
1262 ecode = -EIO;
1263 }
1264 1324
1265 aead_unmap(ctx->dev, edesc, req); 1325 aead_unmap(ctx->dev, edesc, req);
1266 qi_cache_free(edesc); 1326 qi_cache_free(edesc);
@@ -1325,18 +1385,12 @@ static int aead_decrypt(struct aead_request *req)
1325 1385
1326static int ipsec_gcm_encrypt(struct aead_request *req) 1386static int ipsec_gcm_encrypt(struct aead_request *req)
1327{ 1387{
1328 if (req->assoclen < 8) 1388 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1329 return -EINVAL;
1330
1331 return aead_encrypt(req);
1332} 1389}
1333 1390
1334static int ipsec_gcm_decrypt(struct aead_request *req) 1391static int ipsec_gcm_decrypt(struct aead_request *req)
1335{ 1392{
1336 if (req->assoclen < 8) 1393 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1337 return -EINVAL;
1338
1339 return aead_decrypt(req);
1340} 1394}
1341 1395
1342static void skcipher_encrypt_done(void *cbk_ctx, u32 status) 1396static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
@@ -1352,10 +1406,8 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1352 1406
1353 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 1407 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1354 1408
1355 if (unlikely(status)) { 1409 if (unlikely(status))
1356 caam_qi2_strstatus(ctx->dev, status); 1410 ecode = caam_qi2_strstatus(ctx->dev, status);
1357 ecode = -EIO;
1358 }
1359 1411
1360 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1412 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1361 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1413 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1371,7 +1423,9 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1371 * ciphertext block (CBC mode) or last counter (CTR mode). 1423 * ciphertext block (CBC mode) or last counter (CTR mode).
1372 * This is used e.g. by the CTS mode. 1424 * This is used e.g. by the CTS mode.
1373 */ 1425 */
1374 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); 1426 if (!ecode)
1427 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1428 ivsize);
1375 1429
1376 qi_cache_free(edesc); 1430 qi_cache_free(edesc);
1377 skcipher_request_complete(req, ecode); 1431 skcipher_request_complete(req, ecode);
@@ -1390,10 +1444,8 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1390 1444
1391 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 1445 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1392 1446
1393 if (unlikely(status)) { 1447 if (unlikely(status))
1394 caam_qi2_strstatus(ctx->dev, status); 1448 ecode = caam_qi2_strstatus(ctx->dev, status);
1395 ecode = -EIO;
1396 }
1397 1449
1398 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1450 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1399 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1451 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@@ -1409,7 +1461,9 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1409 * ciphertext block (CBC mode) or last counter (CTR mode). 1461 * ciphertext block (CBC mode) or last counter (CTR mode).
1410 * This is used e.g. by the CTS mode. 1462 * This is used e.g. by the CTS mode.
1411 */ 1463 */
1412 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); 1464 if (!ecode)
1465 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1466 ivsize);
1413 1467
1414 qi_cache_free(edesc); 1468 qi_cache_free(edesc);
1415 skcipher_request_complete(req, ecode); 1469 skcipher_request_complete(req, ecode);
@@ -1423,6 +1477,9 @@ static int skcipher_encrypt(struct skcipher_request *req)
1423 struct caam_request *caam_req = skcipher_request_ctx(req); 1477 struct caam_request *caam_req = skcipher_request_ctx(req);
1424 int ret; 1478 int ret;
1425 1479
1480 if (!req->cryptlen)
1481 return 0;
1482
1426 /* allocate extended descriptor */ 1483 /* allocate extended descriptor */
1427 edesc = skcipher_edesc_alloc(req); 1484 edesc = skcipher_edesc_alloc(req);
1428 if (IS_ERR(edesc)) 1485 if (IS_ERR(edesc))
@@ -1451,6 +1508,8 @@ static int skcipher_decrypt(struct skcipher_request *req)
1451 struct caam_request *caam_req = skcipher_request_ctx(req); 1508 struct caam_request *caam_req = skcipher_request_ctx(req);
1452 int ret; 1509 int ret;
1453 1510
1511 if (!req->cryptlen)
1512 return 0;
1454 /* allocate extended descriptor */ 1513 /* allocate extended descriptor */
1455 edesc = skcipher_edesc_alloc(req); 1514 edesc = skcipher_edesc_alloc(req);
1456 if (IS_ERR(edesc)) 1515 if (IS_ERR(edesc))
@@ -1545,7 +1604,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1545 .cra_driver_name = "cbc-aes-caam-qi2", 1604 .cra_driver_name = "cbc-aes-caam-qi2",
1546 .cra_blocksize = AES_BLOCK_SIZE, 1605 .cra_blocksize = AES_BLOCK_SIZE,
1547 }, 1606 },
1548 .setkey = skcipher_setkey, 1607 .setkey = aes_skcipher_setkey,
1549 .encrypt = skcipher_encrypt, 1608 .encrypt = skcipher_encrypt,
1550 .decrypt = skcipher_decrypt, 1609 .decrypt = skcipher_decrypt,
1551 .min_keysize = AES_MIN_KEY_SIZE, 1610 .min_keysize = AES_MIN_KEY_SIZE,
@@ -1577,7 +1636,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1577 .cra_driver_name = "cbc-des-caam-qi2", 1636 .cra_driver_name = "cbc-des-caam-qi2",
1578 .cra_blocksize = DES_BLOCK_SIZE, 1637 .cra_blocksize = DES_BLOCK_SIZE,
1579 }, 1638 },
1580 .setkey = skcipher_setkey, 1639 .setkey = des_skcipher_setkey,
1581 .encrypt = skcipher_encrypt, 1640 .encrypt = skcipher_encrypt,
1582 .decrypt = skcipher_decrypt, 1641 .decrypt = skcipher_decrypt,
1583 .min_keysize = DES_KEY_SIZE, 1642 .min_keysize = DES_KEY_SIZE,
@@ -1593,7 +1652,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1593 .cra_driver_name = "ctr-aes-caam-qi2", 1652 .cra_driver_name = "ctr-aes-caam-qi2",
1594 .cra_blocksize = 1, 1653 .cra_blocksize = 1,
1595 }, 1654 },
1596 .setkey = skcipher_setkey, 1655 .setkey = ctr_skcipher_setkey,
1597 .encrypt = skcipher_encrypt, 1656 .encrypt = skcipher_encrypt,
1598 .decrypt = skcipher_decrypt, 1657 .decrypt = skcipher_decrypt,
1599 .min_keysize = AES_MIN_KEY_SIZE, 1658 .min_keysize = AES_MIN_KEY_SIZE,
@@ -1611,7 +1670,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1611 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2", 1670 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1612 .cra_blocksize = 1, 1671 .cra_blocksize = 1,
1613 }, 1672 },
1614 .setkey = skcipher_setkey, 1673 .setkey = rfc3686_skcipher_setkey,
1615 .encrypt = skcipher_encrypt, 1674 .encrypt = skcipher_encrypt,
1616 .decrypt = skcipher_decrypt, 1675 .decrypt = skcipher_decrypt,
1617 .min_keysize = AES_MIN_KEY_SIZE + 1676 .min_keysize = AES_MIN_KEY_SIZE +
@@ -1650,7 +1709,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1650 .cra_driver_name = "chacha20-caam-qi2", 1709 .cra_driver_name = "chacha20-caam-qi2",
1651 .cra_blocksize = 1, 1710 .cra_blocksize = 1,
1652 }, 1711 },
1653 .setkey = skcipher_setkey, 1712 .setkey = chacha20_skcipher_setkey,
1654 .encrypt = skcipher_encrypt, 1713 .encrypt = skcipher_encrypt,
1655 .decrypt = skcipher_decrypt, 1714 .decrypt = skcipher_decrypt,
1656 .min_keysize = CHACHA_KEY_SIZE, 1715 .min_keysize = CHACHA_KEY_SIZE,
@@ -2918,6 +2977,7 @@ enum hash_optype {
2918/** 2977/**
2919 * caam_hash_ctx - ahash per-session context 2978 * caam_hash_ctx - ahash per-session context
2920 * @flc: Flow Contexts array 2979 * @flc: Flow Contexts array
2980 * @key: authentication key
2921 * @flc_dma: I/O virtual addresses of the Flow Contexts 2981 * @flc_dma: I/O virtual addresses of the Flow Contexts
2922 * @dev: dpseci device 2982 * @dev: dpseci device
2923 * @ctx_len: size of Context Register 2983 * @ctx_len: size of Context Register
@@ -2925,6 +2985,7 @@ enum hash_optype {
2925 */ 2985 */
2926struct caam_hash_ctx { 2986struct caam_hash_ctx {
2927 struct caam_flc flc[HASH_NUM_OP]; 2987 struct caam_flc flc[HASH_NUM_OP];
2988 u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2928 dma_addr_t flc_dma[HASH_NUM_OP]; 2989 dma_addr_t flc_dma[HASH_NUM_OP];
2929 struct device *dev; 2990 struct device *dev;
2930 int ctx_len; 2991 int ctx_len;
@@ -3094,10 +3155,7 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
3094 3155
3095 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 3156 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3096 3157
3097 if (err) 3158 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3098 caam_qi2_strstatus(res->dev, err);
3099
3100 res->err = err;
3101 complete(&res->completion); 3159 complete(&res->completion);
3102} 3160}
3103 3161
@@ -3228,6 +3286,19 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3228 ctx->adata.key_virt = key; 3286 ctx->adata.key_virt = key;
3229 ctx->adata.key_inline = true; 3287 ctx->adata.key_inline = true;
3230 3288
3289 /*
3290 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3291 * in invalid opcodes (last bytes of user key) in the resulting
3292 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3293 * addresses are needed.
3294 */
3295 if (keylen > ctx->adata.keylen_pad) {
3296 memcpy(ctx->key, key, keylen);
3297 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3298 ctx->adata.keylen_pad,
3299 DMA_TO_DEVICE);
3300 }
3301
3231 ret = ahash_set_sh_desc(ahash); 3302 ret = ahash_set_sh_desc(ahash);
3232 kfree(hashed_key); 3303 kfree(hashed_key);
3233 return ret; 3304 return ret;
@@ -3282,10 +3353,8 @@ static void ahash_done(void *cbk_ctx, u32 status)
3282 3353
3283 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 3354 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3284 3355
3285 if (unlikely(status)) { 3356 if (unlikely(status))
3286 caam_qi2_strstatus(ctx->dev, status); 3357 ecode = caam_qi2_strstatus(ctx->dev, status);
3287 ecode = -EIO;
3288 }
3289 3358
3290 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); 3359 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3291 memcpy(req->result, state->caam_ctx, digestsize); 3360 memcpy(req->result, state->caam_ctx, digestsize);
@@ -3310,10 +3379,8 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
3310 3379
3311 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 3380 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3312 3381
3313 if (unlikely(status)) { 3382 if (unlikely(status))
3314 caam_qi2_strstatus(ctx->dev, status); 3383 ecode = caam_qi2_strstatus(ctx->dev, status);
3315 ecode = -EIO;
3316 }
3317 3384
3318 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); 3385 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3319 switch_buf(state); 3386 switch_buf(state);
@@ -3343,10 +3410,8 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3343 3410
3344 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 3411 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3345 3412
3346 if (unlikely(status)) { 3413 if (unlikely(status))
3347 caam_qi2_strstatus(ctx->dev, status); 3414 ecode = caam_qi2_strstatus(ctx->dev, status);
3348 ecode = -EIO;
3349 }
3350 3415
3351 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); 3416 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3352 memcpy(req->result, state->caam_ctx, digestsize); 3417 memcpy(req->result, state->caam_ctx, digestsize);
@@ -3371,10 +3436,8 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3371 3436
3372 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); 3437 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3373 3438
3374 if (unlikely(status)) { 3439 if (unlikely(status))
3375 caam_qi2_strstatus(ctx->dev, status); 3440 ecode = caam_qi2_strstatus(ctx->dev, status);
3376 ecode = -EIO;
3377 }
3378 3441
3379 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); 3442 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3380 switch_buf(state); 3443 switch_buf(state);
@@ -4466,11 +4529,27 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
4466 4529
4467 ctx->dev = caam_hash->dev; 4530 ctx->dev = caam_hash->dev;
4468 4531
4532 if (alg->setkey) {
4533 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4534 ARRAY_SIZE(ctx->key),
4535 DMA_TO_DEVICE,
4536 DMA_ATTR_SKIP_CPU_SYNC);
4537 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4538 dev_err(ctx->dev, "unable to map key\n");
4539 return -ENOMEM;
4540 }
4541 }
4542
4469 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), 4543 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4470 DMA_BIDIRECTIONAL, 4544 DMA_BIDIRECTIONAL,
4471 DMA_ATTR_SKIP_CPU_SYNC); 4545 DMA_ATTR_SKIP_CPU_SYNC);
4472 if (dma_mapping_error(ctx->dev, dma_addr)) { 4546 if (dma_mapping_error(ctx->dev, dma_addr)) {
4473 dev_err(ctx->dev, "unable to map shared descriptors\n"); 4547 dev_err(ctx->dev, "unable to map shared descriptors\n");
4548 if (ctx->adata.key_dma)
4549 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4550 ARRAY_SIZE(ctx->key),
4551 DMA_TO_DEVICE,
4552 DMA_ATTR_SKIP_CPU_SYNC);
4474 return -ENOMEM; 4553 return -ENOMEM;
4475 } 4554 }
4476 4555
@@ -4496,6 +4575,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4496 4575
4497 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), 4576 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4498 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); 4577 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4578 if (ctx->adata.key_dma)
4579 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4580 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4581 DMA_ATTR_SKIP_CPU_SYNC);
4499} 4582}
4500 4583
4501static struct caam_hash_alg *caam_hash_alloc(struct device *dev, 4584static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
@@ -4700,7 +4783,7 @@ static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4700 4783
4701 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; 4784 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4702 if (unlikely(fd_err)) 4785 if (unlikely(fd_err))
4703 dev_err(priv->dev, "FD error: %08x\n", fd_err); 4786 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4704 4787
4705 /* 4788 /*
4706 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported 4789 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
@@ -5098,6 +5181,8 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5098 goto err_bind; 5181 goto err_bind;
5099 } 5182 }
5100 5183
5184 dpaa2_dpseci_debugfs_init(priv);
5185
5101 /* register crypto algorithms the device supports */ 5186 /* register crypto algorithms the device supports */
5102 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 5187 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5103 struct caam_skcipher_alg *t_alg = driver_algs + i; 5188 struct caam_skcipher_alg *t_alg = driver_algs + i;
@@ -5265,6 +5350,8 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5265 dev = &ls_dev->dev; 5350 dev = &ls_dev->dev;
5266 priv = dev_get_drvdata(dev); 5351 priv = dev_get_drvdata(dev);
5267 5352
5353 dpaa2_dpseci_debugfs_exit(priv);
5354
5268 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 5355 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5269 struct caam_aead_alg *t_alg = driver_aeads + i; 5356 struct caam_aead_alg *t_alg = driver_aeads + i;
5270 5357
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index be5085451053..706736776b47 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -10,12 +10,13 @@
10#include <soc/fsl/dpaa2-io.h> 10#include <soc/fsl/dpaa2-io.h>
11#include <soc/fsl/dpaa2-fd.h> 11#include <soc/fsl/dpaa2-fd.h>
12#include <linux/threads.h> 12#include <linux/threads.h>
13#include <linux/netdevice.h>
13#include "dpseci.h" 14#include "dpseci.h"
14#include "desc_constr.h" 15#include "desc_constr.h"
15 16
16#define DPAA2_CAAM_STORE_SIZE 16 17#define DPAA2_CAAM_STORE_SIZE 16
17/* NAPI weight *must* be a multiple of the store size. */ 18/* NAPI weight *must* be a multiple of the store size. */
18#define DPAA2_CAAM_NAPI_WEIGHT 64 19#define DPAA2_CAAM_NAPI_WEIGHT 512
19 20
20/* The congestion entrance threshold was chosen so that on LS2088 21/* The congestion entrance threshold was chosen so that on LS2088
21 * we support the maximum throughput for the available memory 22 * we support the maximum throughput for the available memory
@@ -64,6 +65,7 @@ struct dpaa2_caam_priv {
64 struct iommu_domain *domain; 65 struct iommu_domain *domain;
65 66
66 struct dpaa2_caam_priv_per_cpu __percpu *ppriv; 67 struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
68 struct dentry *dfs_root;
67}; 69};
68 70
69/** 71/**
@@ -90,33 +92,6 @@ struct dpaa2_caam_priv_per_cpu {
90 struct dpaa2_io *dpio; 92 struct dpaa2_io *dpio;
91}; 93};
92 94
93/*
94 * The CAAM QI hardware constructs a job descriptor which points
95 * to shared descriptor (as pointed by context_a of FQ to CAAM).
96 * When the job descriptor is executed by deco, the whole job
97 * descriptor together with shared descriptor gets loaded in
98 * deco buffer which is 64 words long (each 32-bit).
99 *
100 * The job descriptor constructed by QI hardware has layout:
101 *
102 * HEADER (1 word)
103 * Shdesc ptr (1 or 2 words)
104 * SEQ_OUT_PTR (1 word)
105 * Out ptr (1 or 2 words)
106 * Out length (1 word)
107 * SEQ_IN_PTR (1 word)
108 * In ptr (1 or 2 words)
109 * In length (1 word)
110 *
111 * The shdesc ptr is used to fetch shared descriptor contents
112 * into deco buffer.
113 *
114 * Apart from shdesc contents, the total number of words that
115 * get loaded in deco buffer are '8' or '11'. The remaining words
116 * in deco buffer can be used for storing shared descriptor.
117 */
118#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
119
120/* Length of a single buffer in the QI driver memory cache */ 95/* Length of a single buffer in the QI driver memory cache */
121#define CAAM_QI_MEMCACHE_SIZE 512 96#define CAAM_QI_MEMCACHE_SIZE 512
122 97
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e4ac5d591ad6..65399cb2a770 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -95,8 +95,8 @@ struct caam_hash_ctx {
95 dma_addr_t sh_desc_update_first_dma; 95 dma_addr_t sh_desc_update_first_dma;
96 dma_addr_t sh_desc_fin_dma; 96 dma_addr_t sh_desc_fin_dma;
97 dma_addr_t sh_desc_digest_dma; 97 dma_addr_t sh_desc_digest_dma;
98 dma_addr_t key_dma;
99 enum dma_data_direction dir; 98 enum dma_data_direction dir;
99 enum dma_data_direction key_dir;
100 struct device *jrdev; 100 struct device *jrdev;
101 int ctx_len; 101 int ctx_len;
102 struct alginfo adata; 102 struct alginfo adata;
@@ -282,13 +282,10 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
282 struct device *jrdev = ctx->jrdev; 282 struct device *jrdev = ctx->jrdev;
283 u32 *desc; 283 u32 *desc;
284 284
285 /* key is loaded from memory for UPDATE and FINALIZE states */
286 ctx->adata.key_dma = ctx->key_dma;
287
288 /* shared descriptor for ahash_update */ 285 /* shared descriptor for ahash_update */
289 desc = ctx->sh_desc_update; 286 desc = ctx->sh_desc_update;
290 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 287 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
291 ctx->ctx_len, ctx->ctx_len, 0); 288 ctx->ctx_len, ctx->ctx_len);
292 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 289 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
293 desc_bytes(desc), ctx->dir); 290 desc_bytes(desc), ctx->dir);
294 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", 291 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
@@ -298,7 +295,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
298 /* shared descriptor for ahash_{final,finup} */ 295 /* shared descriptor for ahash_{final,finup} */
299 desc = ctx->sh_desc_fin; 296 desc = ctx->sh_desc_fin;
300 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 297 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
301 digestsize, ctx->ctx_len, 0); 298 digestsize, ctx->ctx_len);
302 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 299 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
303 desc_bytes(desc), ctx->dir); 300 desc_bytes(desc), ctx->dir);
304 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", 301 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
@@ -311,7 +308,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
311 /* shared descriptor for first invocation of ahash_update */ 308 /* shared descriptor for first invocation of ahash_update */
312 desc = ctx->sh_desc_update_first; 309 desc = ctx->sh_desc_update_first;
313 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 310 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
314 ctx->ctx_len, ctx->key_dma); 311 ctx->ctx_len);
315 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 312 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
316 desc_bytes(desc), ctx->dir); 313 desc_bytes(desc), ctx->dir);
317 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) 314 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
@@ -321,7 +318,7 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
321 /* shared descriptor for ahash_digest */ 318 /* shared descriptor for ahash_digest */
322 desc = ctx->sh_desc_digest; 319 desc = ctx->sh_desc_digest;
323 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 320 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
324 digestsize, ctx->ctx_len, 0); 321 digestsize, ctx->ctx_len);
325 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 322 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
326 desc_bytes(desc), ctx->dir); 323 desc_bytes(desc), ctx->dir);
327 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", 324 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
@@ -340,7 +337,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
340 /* shared descriptor for ahash_update */ 337 /* shared descriptor for ahash_update */
341 desc = ctx->sh_desc_update; 338 desc = ctx->sh_desc_update;
342 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 339 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
343 ctx->ctx_len, ctx->ctx_len, 0); 340 ctx->ctx_len, ctx->ctx_len);
344 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 341 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
345 desc_bytes(desc), ctx->dir); 342 desc_bytes(desc), ctx->dir);
346 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", 343 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
@@ -350,7 +347,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
350 /* shared descriptor for ahash_{final,finup} */ 347 /* shared descriptor for ahash_{final,finup} */
351 desc = ctx->sh_desc_fin; 348 desc = ctx->sh_desc_fin;
352 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 349 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
353 digestsize, ctx->ctx_len, 0); 350 digestsize, ctx->ctx_len);
354 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 351 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
355 desc_bytes(desc), ctx->dir); 352 desc_bytes(desc), ctx->dir);
356 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", 353 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
@@ -360,7 +357,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
360 /* shared descriptor for first invocation of ahash_update */ 357 /* shared descriptor for first invocation of ahash_update */
361 desc = ctx->sh_desc_update_first; 358 desc = ctx->sh_desc_update_first;
362 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 359 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
363 ctx->ctx_len, 0); 360 ctx->ctx_len);
364 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 361 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
365 desc_bytes(desc), ctx->dir); 362 desc_bytes(desc), ctx->dir);
366 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) 363 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
@@ -370,7 +367,7 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
370 /* shared descriptor for ahash_digest */ 367 /* shared descriptor for ahash_digest */
371 desc = ctx->sh_desc_digest; 368 desc = ctx->sh_desc_digest;
372 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 369 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
373 digestsize, ctx->ctx_len, 0); 370 digestsize, ctx->ctx_len);
374 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 371 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
375 desc_bytes(desc), ctx->dir); 372 desc_bytes(desc), ctx->dir);
376 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", 373 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
@@ -480,6 +477,18 @@ static int ahash_setkey(struct crypto_ahash *ahash,
480 goto bad_free_key; 477 goto bad_free_key;
481 478
482 memcpy(ctx->key, key, keylen); 479 memcpy(ctx->key, key, keylen);
480
481 /*
482 * In case |user key| > |derived key|, using DKP<imm,imm>
483 * would result in invalid opcodes (last bytes of user key) in
484 * the resulting descriptor. Use DKP<ptr,imm> instead => both
485 * virtual and dma key addresses are needed.
486 */
487 if (keylen > ctx->adata.keylen_pad)
488 dma_sync_single_for_device(ctx->jrdev,
489 ctx->adata.key_dma,
490 ctx->adata.keylen_pad,
491 DMA_TO_DEVICE);
483 } else { 492 } else {
484 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 493 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
485 keylen, CAAM_MAX_HASH_KEY_SIZE); 494 keylen, CAAM_MAX_HASH_KEY_SIZE);
@@ -501,8 +510,14 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
501 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 510 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
502 struct device *jrdev = ctx->jrdev; 511 struct device *jrdev = ctx->jrdev;
503 512
513 if (keylen != AES_KEYSIZE_128) {
514 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
515 return -EINVAL;
516 }
517
504 memcpy(ctx->key, key, keylen); 518 memcpy(ctx->key, key, keylen);
505 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 519 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
520 DMA_TO_DEVICE);
506 ctx->adata.keylen = keylen; 521 ctx->adata.keylen = keylen;
507 522
508 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", 523 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
@@ -515,6 +530,13 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
515 unsigned int keylen) 530 unsigned int keylen)
516{ 531{
517 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 532 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
533 int err;
534
535 err = aes_check_keylen(keylen);
536 if (err) {
537 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
538 return err;
539 }
518 540
519 /* key is immediate data for all cmac shared descriptors */ 541 /* key is immediate data for all cmac shared descriptors */
520 ctx->adata.key_virt = key; 542 ctx->adata.key_virt = key;
@@ -538,7 +560,7 @@ struct ahash_edesc {
538 dma_addr_t sec4_sg_dma; 560 dma_addr_t sec4_sg_dma;
539 int src_nents; 561 int src_nents;
540 int sec4_sg_bytes; 562 int sec4_sg_bytes;
541 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 563 u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
542 struct sec4_sg_entry sec4_sg[0]; 564 struct sec4_sg_entry sec4_sg[0];
543}; 565};
544 566
@@ -584,12 +606,13 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
584 int digestsize = crypto_ahash_digestsize(ahash); 606 int digestsize = crypto_ahash_digestsize(ahash);
585 struct caam_hash_state *state = ahash_request_ctx(req); 607 struct caam_hash_state *state = ahash_request_ctx(req);
586 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 608 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
609 int ecode = 0;
587 610
588 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 611 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
589 612
590 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 613 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
591 if (err) 614 if (err)
592 caam_jr_strstatus(jrdev, err); 615 ecode = caam_jr_strstatus(jrdev, err);
593 616
594 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 617 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
595 memcpy(req->result, state->caam_ctx, digestsize); 618 memcpy(req->result, state->caam_ctx, digestsize);
@@ -599,7 +622,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
599 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 622 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
600 ctx->ctx_len, 1); 623 ctx->ctx_len, 1);
601 624
602 req->base.complete(&req->base, err); 625 req->base.complete(&req->base, ecode);
603} 626}
604 627
605static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 628static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
@@ -611,12 +634,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
611 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 634 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
612 struct caam_hash_state *state = ahash_request_ctx(req); 635 struct caam_hash_state *state = ahash_request_ctx(req);
613 int digestsize = crypto_ahash_digestsize(ahash); 636 int digestsize = crypto_ahash_digestsize(ahash);
637 int ecode = 0;
614 638
615 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 639 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
616 640
617 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 641 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
618 if (err) 642 if (err)
619 caam_jr_strstatus(jrdev, err); 643 ecode = caam_jr_strstatus(jrdev, err);
620 644
621 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 645 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
622 switch_buf(state); 646 switch_buf(state);
@@ -630,7 +654,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
630 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 654 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
631 digestsize, 1); 655 digestsize, 1);
632 656
633 req->base.complete(&req->base, err); 657 req->base.complete(&req->base, ecode);
634} 658}
635 659
636static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 660static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
@@ -642,12 +666,13 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
642 int digestsize = crypto_ahash_digestsize(ahash); 666 int digestsize = crypto_ahash_digestsize(ahash);
643 struct caam_hash_state *state = ahash_request_ctx(req); 667 struct caam_hash_state *state = ahash_request_ctx(req);
644 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 668 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
669 int ecode = 0;
645 670
646 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 671 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
647 672
648 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 673 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
649 if (err) 674 if (err)
650 caam_jr_strstatus(jrdev, err); 675 ecode = caam_jr_strstatus(jrdev, err);
651 676
652 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 677 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
653 memcpy(req->result, state->caam_ctx, digestsize); 678 memcpy(req->result, state->caam_ctx, digestsize);
@@ -657,7 +682,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
657 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 682 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
658 ctx->ctx_len, 1); 683 ctx->ctx_len, 1);
659 684
660 req->base.complete(&req->base, err); 685 req->base.complete(&req->base, ecode);
661} 686}
662 687
663static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 688static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
@@ -669,12 +694,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
669 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 694 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
670 struct caam_hash_state *state = ahash_request_ctx(req); 695 struct caam_hash_state *state = ahash_request_ctx(req);
671 int digestsize = crypto_ahash_digestsize(ahash); 696 int digestsize = crypto_ahash_digestsize(ahash);
697 int ecode = 0;
672 698
673 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 699 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
674 700
675 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 701 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
676 if (err) 702 if (err)
677 caam_jr_strstatus(jrdev, err); 703 ecode = caam_jr_strstatus(jrdev, err);
678 704
679 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 705 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
680 switch_buf(state); 706 switch_buf(state);
@@ -688,7 +714,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
688 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 714 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
689 digestsize, 1); 715 digestsize, 1);
690 716
691 req->base.complete(&req->base, err); 717 req->base.complete(&req->base, ecode);
692} 718}
693 719
694/* 720/*
@@ -1812,40 +1838,50 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
1812 1838
1813 if (is_xcbc_aes(caam_hash->alg_type)) { 1839 if (is_xcbc_aes(caam_hash->alg_type)) {
1814 ctx->dir = DMA_TO_DEVICE; 1840 ctx->dir = DMA_TO_DEVICE;
1841 ctx->key_dir = DMA_BIDIRECTIONAL;
1815 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1842 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1816 ctx->ctx_len = 48; 1843 ctx->ctx_len = 48;
1817
1818 ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1819 ARRAY_SIZE(ctx->key),
1820 DMA_BIDIRECTIONAL,
1821 DMA_ATTR_SKIP_CPU_SYNC);
1822 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
1823 dev_err(ctx->jrdev, "unable to map key\n");
1824 caam_jr_free(ctx->jrdev);
1825 return -ENOMEM;
1826 }
1827 } else if (is_cmac_aes(caam_hash->alg_type)) { 1844 } else if (is_cmac_aes(caam_hash->alg_type)) {
1828 ctx->dir = DMA_TO_DEVICE; 1845 ctx->dir = DMA_TO_DEVICE;
1846 ctx->key_dir = DMA_NONE;
1829 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1847 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1830 ctx->ctx_len = 32; 1848 ctx->ctx_len = 32;
1831 } else { 1849 } else {
1832 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 1850 if (priv->era >= 6) {
1851 ctx->dir = DMA_BIDIRECTIONAL;
1852 ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1853 } else {
1854 ctx->dir = DMA_TO_DEVICE;
1855 ctx->key_dir = DMA_NONE;
1856 }
1833 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1857 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1834 ctx->ctx_len = runninglen[(ctx->adata.algtype & 1858 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1835 OP_ALG_ALGSEL_SUBMASK) >> 1859 OP_ALG_ALGSEL_SUBMASK) >>
1836 OP_ALG_ALGSEL_SHIFT]; 1860 OP_ALG_ALGSEL_SHIFT];
1837 } 1861 }
1838 1862
1863 if (ctx->key_dir != DMA_NONE) {
1864 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1865 ARRAY_SIZE(ctx->key),
1866 ctx->key_dir,
1867 DMA_ATTR_SKIP_CPU_SYNC);
1868 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1869 dev_err(ctx->jrdev, "unable to map key\n");
1870 caam_jr_free(ctx->jrdev);
1871 return -ENOMEM;
1872 }
1873 }
1874
1839 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1875 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1840 offsetof(struct caam_hash_ctx, key), 1876 offsetof(struct caam_hash_ctx, key),
1841 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1877 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1842 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1878 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1843 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1879 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1844 1880
1845 if (is_xcbc_aes(caam_hash->alg_type)) 1881 if (ctx->key_dir != DMA_NONE)
1846 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, 1882 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1847 ARRAY_SIZE(ctx->key), 1883 ARRAY_SIZE(ctx->key),
1848 DMA_BIDIRECTIONAL, 1884 ctx->key_dir,
1849 DMA_ATTR_SKIP_CPU_SYNC); 1885 DMA_ATTR_SKIP_CPU_SYNC);
1850 1886
1851 caam_jr_free(ctx->jrdev); 1887 caam_jr_free(ctx->jrdev);
@@ -1878,9 +1914,9 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1878 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1914 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1879 offsetof(struct caam_hash_ctx, key), 1915 offsetof(struct caam_hash_ctx, key),
1880 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1916 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1881 if (is_xcbc_aes(ctx->adata.algtype)) 1917 if (ctx->key_dir != DMA_NONE)
1882 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, 1918 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1883 ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, 1919 ARRAY_SIZE(ctx->key), ctx->key_dir,
1884 DMA_ATTR_SKIP_CPU_SYNC); 1920 DMA_ATTR_SKIP_CPU_SYNC);
1885 caam_jr_free(ctx->jrdev); 1921 caam_jr_free(ctx->jrdev);
1886} 1922}
@@ -1971,7 +2007,7 @@ int caam_algapi_hash_init(struct device *ctrldev)
1971 * is not present. 2007 * is not present.
1972 */ 2008 */
1973 if (!md_inst) 2009 if (!md_inst)
1974 return -ENODEV; 2010 return 0;
1975 2011
1976 /* Limit digest size based on LP256 */ 2012 /* Limit digest size based on LP256 */
1977 if (md_vid == CHA_VER_VID_MD_LP256) 2013 if (md_vid == CHA_VER_VID_MD_LP256)
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
index 71d018343ee4..78383d77da99 100644
--- a/drivers/crypto/caam/caamhash_desc.c
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -83,10 +83,9 @@ EXPORT_SYMBOL(cnstr_shdsc_ahash);
83 * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} 83 * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
84 * @digestsize: algorithm's digest size 84 * @digestsize: algorithm's digest size
85 * @ctx_len: size of Context Register 85 * @ctx_len: size of Context Register
86 * @key_dma: I/O Virtual Address of the key
87 */ 86 */
88void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, 87void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
89 int digestsize, int ctx_len, dma_addr_t key_dma) 88 int digestsize, int ctx_len)
90{ 89{
91 u32 *skip_key_load; 90 u32 *skip_key_load;
92 91
@@ -136,7 +135,7 @@ void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
136 LDST_SRCDST_BYTE_CONTEXT); 135 LDST_SRCDST_BYTE_CONTEXT);
137 if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT) 136 if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT)
138 /* Save K1 */ 137 /* Save K1 */
139 append_fifo_store(desc, key_dma, adata->keylen, 138 append_fifo_store(desc, adata->key_dma, adata->keylen,
140 LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK); 139 LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK);
141} 140}
142EXPORT_SYMBOL(cnstr_shdsc_sk_hash); 141EXPORT_SYMBOL(cnstr_shdsc_sk_hash);
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
index 6947ee1f200c..4f369b8cb6ae 100644
--- a/drivers/crypto/caam/caamhash_desc.h
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -25,5 +25,5 @@ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
25 int digestsize, int ctx_len, bool import_ctx, int era); 25 int digestsize, int ctx_len, bool import_ctx, int era);
26 26
27void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, 27void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
28 int digestsize, int ctx_len, dma_addr_t key_dma); 28 int digestsize, int ctx_len);
29#endif /* _CAAMHASH_DESC_H_ */ 29#endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 80574106af29..83f96d4f86e0 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -17,18 +17,29 @@
17#include "sg_sw_sec4.h" 17#include "sg_sw_sec4.h"
18#include "caampkc.h" 18#include "caampkc.h"
19 19
20#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) 20#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ 21#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f1_pdb)) 22 SIZEOF_RSA_PRIV_F1_PDB)
23#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ 23#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
24 sizeof(struct rsa_priv_f2_pdb)) 24 SIZEOF_RSA_PRIV_F2_PDB)
25#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ 25#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
26 sizeof(struct rsa_priv_f3_pdb)) 26 SIZEOF_RSA_PRIV_F3_PDB)
27#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ 27#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
28 28
29/* buffer filled with zeros, used for padding */ 29/* buffer filled with zeros, used for padding */
30static u8 *zero_buffer; 30static u8 *zero_buffer;
31 31
32/*
33 * variable used to avoid double free of resources in case
34 * algorithm registration was unsuccessful
35 */
36static bool init_done;
37
38struct caam_akcipher_alg {
39 struct akcipher_alg akcipher;
40 bool registered;
41};
42
32static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, 43static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
33 struct akcipher_request *req) 44 struct akcipher_request *req)
34{ 45{
@@ -107,9 +118,10 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
107{ 118{
108 struct akcipher_request *req = context; 119 struct akcipher_request *req = context;
109 struct rsa_edesc *edesc; 120 struct rsa_edesc *edesc;
121 int ecode = 0;
110 122
111 if (err) 123 if (err)
112 caam_jr_strstatus(dev, err); 124 ecode = caam_jr_strstatus(dev, err);
113 125
114 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 126 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
115 127
@@ -117,7 +129,7 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
117 rsa_io_unmap(dev, edesc, req); 129 rsa_io_unmap(dev, edesc, req);
118 kfree(edesc); 130 kfree(edesc);
119 131
120 akcipher_request_complete(req, err); 132 akcipher_request_complete(req, ecode);
121} 133}
122 134
123static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, 135static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
@@ -125,9 +137,10 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
125{ 137{
126 struct akcipher_request *req = context; 138 struct akcipher_request *req = context;
127 struct rsa_edesc *edesc; 139 struct rsa_edesc *edesc;
140 int ecode = 0;
128 141
129 if (err) 142 if (err)
130 caam_jr_strstatus(dev, err); 143 ecode = caam_jr_strstatus(dev, err);
131 144
132 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 145 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
133 146
@@ -135,7 +148,7 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
135 rsa_io_unmap(dev, edesc, req); 148 rsa_io_unmap(dev, edesc, req);
136 kfree(edesc); 149 kfree(edesc);
137 150
138 akcipher_request_complete(req, err); 151 akcipher_request_complete(req, ecode);
139} 152}
140 153
141static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, 154static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
@@ -143,9 +156,10 @@ static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
143{ 156{
144 struct akcipher_request *req = context; 157 struct akcipher_request *req = context;
145 struct rsa_edesc *edesc; 158 struct rsa_edesc *edesc;
159 int ecode = 0;
146 160
147 if (err) 161 if (err)
148 caam_jr_strstatus(dev, err); 162 ecode = caam_jr_strstatus(dev, err);
149 163
150 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 164 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
151 165
@@ -153,7 +167,7 @@ static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
153 rsa_io_unmap(dev, edesc, req); 167 rsa_io_unmap(dev, edesc, req);
154 kfree(edesc); 168 kfree(edesc);
155 169
156 akcipher_request_complete(req, err); 170 akcipher_request_complete(req, ecode);
157} 171}
158 172
159static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, 173static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
@@ -161,9 +175,10 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
161{ 175{
162 struct akcipher_request *req = context; 176 struct akcipher_request *req = context;
163 struct rsa_edesc *edesc; 177 struct rsa_edesc *edesc;
178 int ecode = 0;
164 179
165 if (err) 180 if (err)
166 caam_jr_strstatus(dev, err); 181 ecode = caam_jr_strstatus(dev, err);
167 182
168 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 183 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
169 184
@@ -171,7 +186,7 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
171 rsa_io_unmap(dev, edesc, req); 186 rsa_io_unmap(dev, edesc, req);
172 kfree(edesc); 187 kfree(edesc);
173 188
174 akcipher_request_complete(req, err); 189 akcipher_request_complete(req, ecode);
175} 190}
176 191
177/** 192/**
@@ -867,7 +882,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
867 return ret; 882 return ret;
868 883
869 /* Copy key in DMA zone */ 884 /* Copy key in DMA zone */
870 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); 885 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
871 if (!rsa_key->e) 886 if (!rsa_key->e)
872 goto err; 887 goto err;
873 888
@@ -889,8 +904,6 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
889 rsa_key->e_sz = raw_key.e_sz; 904 rsa_key->e_sz = raw_key.e_sz;
890 rsa_key->n_sz = raw_key.n_sz; 905 rsa_key->n_sz = raw_key.n_sz;
891 906
892 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
893
894 return 0; 907 return 0;
895err: 908err:
896 caam_rsa_free_key(rsa_key); 909 caam_rsa_free_key(rsa_key);
@@ -971,11 +984,11 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
971 return ret; 984 return ret;
972 985
973 /* Copy key in DMA zone */ 986 /* Copy key in DMA zone */
974 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL); 987 rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
975 if (!rsa_key->d) 988 if (!rsa_key->d)
976 goto err; 989 goto err;
977 990
978 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); 991 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
979 if (!rsa_key->e) 992 if (!rsa_key->e)
980 goto err; 993 goto err;
981 994
@@ -998,9 +1011,6 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
998 rsa_key->e_sz = raw_key.e_sz; 1011 rsa_key->e_sz = raw_key.e_sz;
999 rsa_key->n_sz = raw_key.n_sz; 1012 rsa_key->n_sz = raw_key.n_sz;
1000 1013
1001 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
1002 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
1003
1004 caam_rsa_set_priv_key_form(ctx, &raw_key); 1014 caam_rsa_set_priv_key_form(ctx, &raw_key);
1005 1015
1006 return 0; 1016 return 0;
@@ -1053,22 +1063,24 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1053 caam_jr_free(ctx->dev); 1063 caam_jr_free(ctx->dev);
1054} 1064}
1055 1065
1056static struct akcipher_alg caam_rsa = { 1066static struct caam_akcipher_alg caam_rsa = {
1057 .encrypt = caam_rsa_enc, 1067 .akcipher = {
1058 .decrypt = caam_rsa_dec, 1068 .encrypt = caam_rsa_enc,
1059 .set_pub_key = caam_rsa_set_pub_key, 1069 .decrypt = caam_rsa_dec,
1060 .set_priv_key = caam_rsa_set_priv_key, 1070 .set_pub_key = caam_rsa_set_pub_key,
1061 .max_size = caam_rsa_max_size, 1071 .set_priv_key = caam_rsa_set_priv_key,
1062 .init = caam_rsa_init_tfm, 1072 .max_size = caam_rsa_max_size,
1063 .exit = caam_rsa_exit_tfm, 1073 .init = caam_rsa_init_tfm,
1064 .reqsize = sizeof(struct caam_rsa_req_ctx), 1074 .exit = caam_rsa_exit_tfm,
1065 .base = { 1075 .reqsize = sizeof(struct caam_rsa_req_ctx),
1066 .cra_name = "rsa", 1076 .base = {
1067 .cra_driver_name = "rsa-caam", 1077 .cra_name = "rsa",
1068 .cra_priority = 3000, 1078 .cra_driver_name = "rsa-caam",
1069 .cra_module = THIS_MODULE, 1079 .cra_priority = 3000,
1070 .cra_ctxsize = sizeof(struct caam_rsa_ctx), 1080 .cra_module = THIS_MODULE,
1071 }, 1081 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1082 },
1083 }
1072}; 1084};
1073 1085
1074/* Public Key Cryptography module initialization handler */ 1086/* Public Key Cryptography module initialization handler */
@@ -1077,6 +1089,7 @@ int caam_pkc_init(struct device *ctrldev)
1077 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 1089 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1078 u32 pk_inst; 1090 u32 pk_inst;
1079 int err; 1091 int err;
1092 init_done = false;
1080 1093
1081 /* Determine public key hardware accelerator presence. */ 1094 /* Determine public key hardware accelerator presence. */
1082 if (priv->era < 10) 1095 if (priv->era < 10)
@@ -1095,12 +1108,15 @@ int caam_pkc_init(struct device *ctrldev)
1095 if (!zero_buffer) 1108 if (!zero_buffer)
1096 return -ENOMEM; 1109 return -ENOMEM;
1097 1110
1098 err = crypto_register_akcipher(&caam_rsa); 1111 err = crypto_register_akcipher(&caam_rsa.akcipher);
1112
1099 if (err) { 1113 if (err) {
1100 kfree(zero_buffer); 1114 kfree(zero_buffer);
1101 dev_warn(ctrldev, "%s alg registration failed\n", 1115 dev_warn(ctrldev, "%s alg registration failed\n",
1102 caam_rsa.base.cra_driver_name); 1116 caam_rsa.akcipher.base.cra_driver_name);
1103 } else { 1117 } else {
1118 init_done = true;
1119 caam_rsa.registered = true;
1104 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); 1120 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1105 } 1121 }
1106 1122
@@ -1109,6 +1125,11 @@ int caam_pkc_init(struct device *ctrldev)
1109 1125
1110void caam_pkc_exit(void) 1126void caam_pkc_exit(void)
1111{ 1127{
1128 if (!init_done)
1129 return;
1130
1131 if (caam_rsa.registered)
1132 crypto_unregister_akcipher(&caam_rsa.akcipher);
1133
1112 kfree(zero_buffer); 1134 kfree(zero_buffer);
1113 crypto_unregister_akcipher(&caam_rsa);
1114} 1135}
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 561bcb535184..e8baacaabe07 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -53,7 +53,7 @@
53 L1_CACHE_BYTES) 53 L1_CACHE_BYTES)
54 54
55/* length of descriptors */ 55/* length of descriptors */
56#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) 56#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ_MAX * 2)
57#define DESC_RNG_LEN (3 * CAAM_CMD_SZ) 57#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
58 58
59/* Buffer, its dma address and lock */ 59/* Buffer, its dma address and lock */
@@ -80,6 +80,12 @@ struct caam_rng_ctx {
80 80
81static struct caam_rng_ctx *rng_ctx; 81static struct caam_rng_ctx *rng_ctx;
82 82
83/*
84 * Variable used to avoid double free of resources in case
85 * algorithm registration was unsuccessful
86 */
87static bool init_done;
88
83static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) 89static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
84{ 90{
85 if (bd->addr) 91 if (bd->addr)
@@ -296,6 +302,9 @@ static struct hwrng caam_rng = {
296 302
297void caam_rng_exit(void) 303void caam_rng_exit(void)
298{ 304{
305 if (!init_done)
306 return;
307
299 caam_jr_free(rng_ctx->jrdev); 308 caam_jr_free(rng_ctx->jrdev);
300 hwrng_unregister(&caam_rng); 309 hwrng_unregister(&caam_rng);
301 kfree(rng_ctx); 310 kfree(rng_ctx);
@@ -307,6 +316,7 @@ int caam_rng_init(struct device *ctrldev)
307 u32 rng_inst; 316 u32 rng_inst;
308 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 317 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
309 int err; 318 int err;
319 init_done = false;
310 320
311 /* Check for an instantiated RNG before registration */ 321 /* Check for an instantiated RNG before registration */
312 if (priv->era < 10) 322 if (priv->era < 10)
@@ -333,7 +343,12 @@ int caam_rng_init(struct device *ctrldev)
333 goto free_rng_ctx; 343 goto free_rng_ctx;
334 344
335 dev_info(dev, "registering rng-caam\n"); 345 dev_info(dev, "registering rng-caam\n");
336 return hwrng_register(&caam_rng); 346
347 err = hwrng_register(&caam_rng);
348 if (!err) {
349 init_done = true;
350 return err;
351 }
337 352
338free_rng_ctx: 353free_rng_ctx:
339 kfree(rng_ctx); 354 kfree(rng_ctx);
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 8639b2df0371..60e2a54c19f1 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -32,7 +32,7 @@
32#include <crypto/null.h> 32#include <crypto/null.h>
33#include <crypto/aes.h> 33#include <crypto/aes.h>
34#include <crypto/ctr.h> 34#include <crypto/ctr.h>
35#include <crypto/des.h> 35#include <crypto/internal/des.h>
36#include <crypto/gcm.h> 36#include <crypto/gcm.h>
37#include <crypto/sha.h> 37#include <crypto/sha.h>
38#include <crypto/md5.h> 38#include <crypto/md5.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 4e43ca4d3656..db22777d59b4 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -26,16 +26,6 @@ EXPORT_SYMBOL(caam_dpaa2);
26#endif 26#endif
27 27
28/* 28/*
29 * i.MX targets tend to have clock control subsystems that can
30 * enable/disable clocking to our device.
31 */
32static inline struct clk *caam_drv_identify_clk(struct device *dev,
33 char *clk_name)
34{
35 return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
36}
37
38/*
39 * Descriptor to instantiate RNG State Handle 0 in normal mode and 29 * Descriptor to instantiate RNG State Handle 0 in normal mode and
40 * load the JDKEK, TDKEK and TDSK registers 30 * load the JDKEK, TDKEK and TDSK registers
41 */ 31 */
@@ -107,7 +97,12 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
107 int i; 97 int i;
108 98
109 99
110 if (ctrlpriv->virt_en == 1) { 100 if (ctrlpriv->virt_en == 1 ||
101 /*
102 * Apparently on i.MX8MQ it doesn't matter if virt_en == 1
103 * and the following steps should be performed regardless
104 */
105 of_machine_is_compatible("fsl,imx8mq")) {
111 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0); 106 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
112 107
113 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && 108 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -342,13 +337,6 @@ static int caam_remove(struct platform_device *pdev)
342 /* Unmap controller region */ 337 /* Unmap controller region */
343 iounmap(ctrl); 338 iounmap(ctrl);
344 339
345 /* shut clocks off before finalizing shutdown */
346 clk_disable_unprepare(ctrlpriv->caam_ipg);
347 if (ctrlpriv->caam_mem)
348 clk_disable_unprepare(ctrlpriv->caam_mem);
349 clk_disable_unprepare(ctrlpriv->caam_aclk);
350 if (ctrlpriv->caam_emi_slow)
351 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
352 return 0; 340 return 0;
353} 341}
354 342
@@ -497,20 +485,99 @@ static const struct of_device_id caam_match[] = {
497}; 485};
498MODULE_DEVICE_TABLE(of, caam_match); 486MODULE_DEVICE_TABLE(of, caam_match);
499 487
488struct caam_imx_data {
489 const struct clk_bulk_data *clks;
490 int num_clks;
491};
492
493static const struct clk_bulk_data caam_imx6_clks[] = {
494 { .id = "ipg" },
495 { .id = "mem" },
496 { .id = "aclk" },
497 { .id = "emi_slow" },
498};
499
500static const struct caam_imx_data caam_imx6_data = {
501 .clks = caam_imx6_clks,
502 .num_clks = ARRAY_SIZE(caam_imx6_clks),
503};
504
505static const struct clk_bulk_data caam_imx7_clks[] = {
506 { .id = "ipg" },
507 { .id = "aclk" },
508};
509
510static const struct caam_imx_data caam_imx7_data = {
511 .clks = caam_imx7_clks,
512 .num_clks = ARRAY_SIZE(caam_imx7_clks),
513};
514
515static const struct clk_bulk_data caam_imx6ul_clks[] = {
516 { .id = "ipg" },
517 { .id = "mem" },
518 { .id = "aclk" },
519};
520
521static const struct caam_imx_data caam_imx6ul_data = {
522 .clks = caam_imx6ul_clks,
523 .num_clks = ARRAY_SIZE(caam_imx6ul_clks),
524};
525
526static const struct soc_device_attribute caam_imx_soc_table[] = {
527 { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
528 { .soc_id = "i.MX6*", .data = &caam_imx6_data },
529 { .soc_id = "i.MX7*", .data = &caam_imx7_data },
530 { .soc_id = "i.MX8MQ", .data = &caam_imx7_data },
531 { .family = "Freescale i.MX" },
532 { /* sentinel */ }
533};
534
535static void disable_clocks(void *data)
536{
537 struct caam_drv_private *ctrlpriv = data;
538
539 clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks);
540}
541
542static int init_clocks(struct device *dev, const struct caam_imx_data *data)
543{
544 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
545 int ret;
546
547 ctrlpriv->num_clks = data->num_clks;
548 ctrlpriv->clks = devm_kmemdup(dev, data->clks,
549 data->num_clks * sizeof(data->clks[0]),
550 GFP_KERNEL);
551 if (!ctrlpriv->clks)
552 return -ENOMEM;
553
554 ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks);
555 if (ret) {
556 dev_err(dev,
557 "Failed to request all necessary clocks\n");
558 return ret;
559 }
560
561 ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks);
562 if (ret) {
563 dev_err(dev,
564 "Failed to prepare/enable all necessary clocks\n");
565 return ret;
566 }
567
568 return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
569}
570
500/* Probe routine for CAAM top (controller) level */ 571/* Probe routine for CAAM top (controller) level */
501static int caam_probe(struct platform_device *pdev) 572static int caam_probe(struct platform_device *pdev)
502{ 573{
503 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; 574 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
504 u64 caam_id; 575 u64 caam_id;
505 static const struct soc_device_attribute imx_soc[] = { 576 const struct soc_device_attribute *imx_soc_match;
506 {.family = "Freescale i.MX"},
507 {},
508 };
509 struct device *dev; 577 struct device *dev;
510 struct device_node *nprop, *np; 578 struct device_node *nprop, *np;
511 struct caam_ctrl __iomem *ctrl; 579 struct caam_ctrl __iomem *ctrl;
512 struct caam_drv_private *ctrlpriv; 580 struct caam_drv_private *ctrlpriv;
513 struct clk *clk;
514#ifdef CONFIG_DEBUG_FS 581#ifdef CONFIG_DEBUG_FS
515 struct caam_perfmon *perfmon; 582 struct caam_perfmon *perfmon;
516#endif 583#endif
@@ -527,103 +594,68 @@ static int caam_probe(struct platform_device *pdev)
527 dev_set_drvdata(dev, ctrlpriv); 594 dev_set_drvdata(dev, ctrlpriv);
528 nprop = pdev->dev.of_node; 595 nprop = pdev->dev.of_node;
529 596
530 caam_imx = (bool)soc_device_match(imx_soc); 597 imx_soc_match = soc_device_match(caam_imx_soc_table);
598 caam_imx = (bool)imx_soc_match;
531 599
532 /* Enable clocking */ 600 if (imx_soc_match) {
533 clk = caam_drv_identify_clk(&pdev->dev, "ipg"); 601 if (!imx_soc_match->data) {
534 if (IS_ERR(clk)) { 602 dev_err(dev, "No clock data provided for i.MX SoC");
535 ret = PTR_ERR(clk); 603 return -EINVAL;
536 dev_err(&pdev->dev,
537 "can't identify CAAM ipg clk: %d\n", ret);
538 return ret;
539 }
540 ctrlpriv->caam_ipg = clk;
541
542 if (!of_machine_is_compatible("fsl,imx7d") &&
543 !of_machine_is_compatible("fsl,imx7s") &&
544 !of_machine_is_compatible("fsl,imx7ulp")) {
545 clk = caam_drv_identify_clk(&pdev->dev, "mem");
546 if (IS_ERR(clk)) {
547 ret = PTR_ERR(clk);
548 dev_err(&pdev->dev,
549 "can't identify CAAM mem clk: %d\n", ret);
550 return ret;
551 } 604 }
552 ctrlpriv->caam_mem = clk;
553 }
554 605
555 clk = caam_drv_identify_clk(&pdev->dev, "aclk"); 606 ret = init_clocks(dev, imx_soc_match->data);
556 if (IS_ERR(clk)) { 607 if (ret)
557 ret = PTR_ERR(clk);
558 dev_err(&pdev->dev,
559 "can't identify CAAM aclk clk: %d\n", ret);
560 return ret;
561 }
562 ctrlpriv->caam_aclk = clk;
563
564 if (!of_machine_is_compatible("fsl,imx6ul") &&
565 !of_machine_is_compatible("fsl,imx7d") &&
566 !of_machine_is_compatible("fsl,imx7s") &&
567 !of_machine_is_compatible("fsl,imx7ulp")) {
568 clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
569 if (IS_ERR(clk)) {
570 ret = PTR_ERR(clk);
571 dev_err(&pdev->dev,
572 "can't identify CAAM emi_slow clk: %d\n", ret);
573 return ret; 608 return ret;
574 }
575 ctrlpriv->caam_emi_slow = clk;
576 }
577
578 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
579 if (ret < 0) {
580 dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
581 return ret;
582 }
583
584 if (ctrlpriv->caam_mem) {
585 ret = clk_prepare_enable(ctrlpriv->caam_mem);
586 if (ret < 0) {
587 dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
588 ret);
589 goto disable_caam_ipg;
590 }
591 } 609 }
592 610
593 ret = clk_prepare_enable(ctrlpriv->caam_aclk);
594 if (ret < 0) {
595 dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
596 goto disable_caam_mem;
597 }
598
599 if (ctrlpriv->caam_emi_slow) {
600 ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
601 if (ret < 0) {
602 dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
603 ret);
604 goto disable_caam_aclk;
605 }
606 }
607 611
608 /* Get configuration properties from device tree */ 612 /* Get configuration properties from device tree */
609 /* First, get register page */ 613 /* First, get register page */
610 ctrl = of_iomap(nprop, 0); 614 ctrl = of_iomap(nprop, 0);
611 if (ctrl == NULL) { 615 if (!ctrl) {
612 dev_err(dev, "caam: of_iomap() failed\n"); 616 dev_err(dev, "caam: of_iomap() failed\n");
613 ret = -ENOMEM; 617 return -ENOMEM;
614 goto disable_caam_emi_slow;
615 } 618 }
616 619
617 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) & 620 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
618 (CSTA_PLEND | CSTA_ALT_PLEND)); 621 (CSTA_PLEND | CSTA_ALT_PLEND));
619
620 /* Finding the page size for using the CTPR_MS register */
621 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); 622 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
622 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT; 623 if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
624 caam_ptr_sz = sizeof(u64);
625 else
626 caam_ptr_sz = sizeof(u32);
627 caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
628 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
629
630#ifdef CONFIG_CAAM_QI
631 /* If (DPAA 1.x) QI present, check whether dependencies are available */
632 if (ctrlpriv->qi_present && !caam_dpaa2) {
633 ret = qman_is_probed();
634 if (!ret) {
635 ret = -EPROBE_DEFER;
636 goto iounmap_ctrl;
637 } else if (ret < 0) {
638 dev_err(dev, "failing probe due to qman probe error\n");
639 ret = -ENODEV;
640 goto iounmap_ctrl;
641 }
642
643 ret = qman_portals_probed();
644 if (!ret) {
645 ret = -EPROBE_DEFER;
646 goto iounmap_ctrl;
647 } else if (ret < 0) {
648 dev_err(dev, "failing probe due to qman portals probe error\n");
649 ret = -ENODEV;
650 goto iounmap_ctrl;
651 }
652 }
653#endif
623 654
624 /* Allocating the BLOCK_OFFSET based on the supported page size on 655 /* Allocating the BLOCK_OFFSET based on the supported page size on
625 * the platform 656 * the platform
626 */ 657 */
658 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
627 if (pg_size == 0) 659 if (pg_size == 0)
628 BLOCK_OFFSET = PG_SIZE_4K; 660 BLOCK_OFFSET = PG_SIZE_4K;
629 else 661 else
@@ -648,7 +680,6 @@ static int caam_probe(struct platform_device *pdev)
648 * In case of SoCs with Management Complex, MC f/w performs 680 * In case of SoCs with Management Complex, MC f/w performs
649 * the configuration. 681 * the configuration.
650 */ 682 */
651 caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
652 np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc"); 683 np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
653 ctrlpriv->mc_en = !!np; 684 ctrlpriv->mc_en = !!np;
654 of_node_put(np); 685 of_node_put(np);
@@ -688,16 +719,7 @@ static int caam_probe(struct platform_device *pdev)
688 JRSTART_JR1_START | JRSTART_JR2_START | 719 JRSTART_JR1_START | JRSTART_JR2_START |
689 JRSTART_JR3_START); 720 JRSTART_JR3_START);
690 721
691 if (sizeof(dma_addr_t) == sizeof(u64)) { 722 ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
692 if (caam_dpaa2)
693 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
694 else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
695 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
696 else
697 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
698 } else {
699 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
700 }
701 if (ret) { 723 if (ret) {
702 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); 724 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
703 goto iounmap_ctrl; 725 goto iounmap_ctrl;
@@ -719,7 +741,6 @@ static int caam_probe(struct platform_device *pdev)
719#endif 741#endif
720 742
721 /* Check to see if (DPAA 1.x) QI present. If so, enable */ 743 /* Check to see if (DPAA 1.x) QI present. If so, enable */
722 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
723 if (ctrlpriv->qi_present && !caam_dpaa2) { 744 if (ctrlpriv->qi_present && !caam_dpaa2) {
724 ctrlpriv->qi = (struct caam_queue_if __iomem __force *) 745 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
725 ((__force uint8_t *)ctrl + 746 ((__force uint8_t *)ctrl +
@@ -908,16 +929,6 @@ shutdown_qi:
908#endif 929#endif
909iounmap_ctrl: 930iounmap_ctrl:
910 iounmap(ctrl); 931 iounmap(ctrl);
911disable_caam_emi_slow:
912 if (ctrlpriv->caam_emi_slow)
913 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
914disable_caam_aclk:
915 clk_disable_unprepare(ctrlpriv->caam_aclk);
916disable_caam_mem:
917 if (ctrlpriv->caam_mem)
918 clk_disable_unprepare(ctrlpriv->caam_mem);
919disable_caam_ipg:
920 clk_disable_unprepare(ctrlpriv->caam_ipg);
921 return ret; 932 return ret;
922} 933}
923 934
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 5988a26a2441..62ce6421bb3f 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -14,9 +14,41 @@
14 14
15#define IMMEDIATE (1 << 23) 15#define IMMEDIATE (1 << 23)
16#define CAAM_CMD_SZ sizeof(u32) 16#define CAAM_CMD_SZ sizeof(u32)
17#define CAAM_PTR_SZ sizeof(dma_addr_t) 17#define CAAM_PTR_SZ caam_ptr_sz
18#define CAAM_PTR_SZ_MAX sizeof(dma_addr_t)
19#define CAAM_PTR_SZ_MIN sizeof(u32)
18#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) 20#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
19#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3) 21#define __DESC_JOB_IO_LEN(n) (CAAM_CMD_SZ * 5 + (n) * 3)
22#define DESC_JOB_IO_LEN __DESC_JOB_IO_LEN(CAAM_PTR_SZ)
23#define DESC_JOB_IO_LEN_MAX __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MAX)
24#define DESC_JOB_IO_LEN_MIN __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MIN)
25
26/*
27 * The CAAM QI hardware constructs a job descriptor which points
28 * to shared descriptor (as pointed by context_a of FQ to CAAM).
29 * When the job descriptor is executed by deco, the whole job
30 * descriptor together with shared descriptor gets loaded in
31 * deco buffer which is 64 words long (each 32-bit).
32 *
33 * The job descriptor constructed by QI hardware has layout:
34 *
35 * HEADER (1 word)
36 * Shdesc ptr (1 or 2 words)
37 * SEQ_OUT_PTR (1 word)
38 * Out ptr (1 or 2 words)
39 * Out length (1 word)
40 * SEQ_IN_PTR (1 word)
41 * In ptr (1 or 2 words)
42 * In length (1 word)
43 *
44 * The shdesc ptr is used to fetch shared descriptor contents
45 * into deco buffer.
46 *
47 * Apart from shdesc contents, the total number of words that
48 * get loaded in deco buffer are '8' or '11'. The remaining words
49 * in deco buffer can be used for storing shared descriptor.
50 */
51#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) / CAAM_CMD_SZ)
20 52
21#ifdef DEBUG 53#ifdef DEBUG
22#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ 54#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
@@ -37,6 +69,7 @@
37 (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) 69 (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
38 70
39extern bool caam_little_end; 71extern bool caam_little_end;
72extern size_t caam_ptr_sz;
40 73
41/* 74/*
42 * HW fetches 4 S/G table entries at a time, irrespective of how many entries 75 * HW fetches 4 S/G table entries at a time, irrespective of how many entries
@@ -103,9 +136,15 @@ static inline void init_job_desc_pdb(u32 * const desc, u32 options,
103 136
104static inline void append_ptr(u32 * const desc, dma_addr_t ptr) 137static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
105{ 138{
106 dma_addr_t *offset = (dma_addr_t *)desc_end(desc); 139 if (caam_ptr_sz == sizeof(dma_addr_t)) {
140 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
107 141
108 *offset = cpu_to_caam_dma(ptr); 142 *offset = cpu_to_caam_dma(ptr);
143 } else {
144 u32 *offset = (u32 *)desc_end(desc);
145
146 *offset = cpu_to_caam_dma(ptr);
147 }
109 148
110 (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 149 (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
111 CAAM_PTR_SZ / CAAM_CMD_SZ); 150 CAAM_PTR_SZ / CAAM_CMD_SZ);
@@ -457,8 +496,8 @@ do { \
457 * functions where it is used. 496 * functions where it is used.
458 * @keylen: length of the provided algorithm key, in bytes 497 * @keylen: length of the provided algorithm key, in bytes
459 * @keylen_pad: padded length of the provided algorithm key, in bytes 498 * @keylen_pad: padded length of the provided algorithm key, in bytes
460 * @key: address where algorithm key resides; virtual address if key_inline 499 * @key_dma: dma (bus) address where algorithm key resides
461 * is true, dma (bus) address if key_inline is false. 500 * @key_virt: virtual address where algorithm key resides
462 * @key_inline: true - key can be inlined in the descriptor; false - key is 501 * @key_inline: true - key can be inlined in the descriptor; false - key is
463 * referenced by the descriptor 502 * referenced by the descriptor
464 */ 503 */
@@ -466,10 +505,8 @@ struct alginfo {
466 u32 algtype; 505 u32 algtype;
467 unsigned int keylen; 506 unsigned int keylen;
468 unsigned int keylen_pad; 507 unsigned int keylen_pad;
469 union { 508 dma_addr_t key_dma;
470 dma_addr_t key_dma; 509 const void *key_virt;
471 const void *key_virt;
472 };
473 bool key_inline; 510 bool key_inline;
474}; 511};
475 512
@@ -535,14 +572,26 @@ static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
535 if (adata->key_inline) { 572 if (adata->key_inline) {
536 int words; 573 int words;
537 574
538 append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | 575 if (adata->keylen > adata->keylen_pad) {
539 OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM | 576 append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
540 adata->keylen); 577 OP_PCL_DKP_SRC_PTR |
541 append_data(desc, adata->key_virt, adata->keylen); 578 OP_PCL_DKP_DST_IMM | adata->keylen);
579 append_ptr(desc, adata->key_dma);
580
581 words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
582 CAAM_PTR_SZ) / CAAM_CMD_SZ;
583 } else {
584 append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
585 OP_PCL_DKP_SRC_IMM |
586 OP_PCL_DKP_DST_IMM | adata->keylen);
587 append_data(desc, adata->key_virt, adata->keylen);
588
589 words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
590 ALIGN(adata->keylen, CAAM_CMD_SZ)) /
591 CAAM_CMD_SZ;
592 }
542 593
543 /* Reserve space in descriptor buffer for the derived key */ 594 /* Reserve space in descriptor buffer for the derived key */
544 words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
545 ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
546 if (words) 595 if (words)
547 (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); 596 (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
548 } else { 597 } else {
diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c
new file mode 100644
index 000000000000..c5bfc923abd8
--- /dev/null
+++ b/drivers/crypto/caam/dpseci-debugfs.c
@@ -0,0 +1,79 @@
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/* Copyright 2019 NXP */
3
4#include <linux/module.h>
5#include <linux/device.h>
6#include <linux/debugfs.h>
7#include "dpseci-debugfs.h"
8
9static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset)
10{
11 struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private;
12 u32 fqid, fcnt, bcnt;
13 int i, err;
14
15 seq_printf(file, "FQ stats for %s:\n", dev_name(priv->dev));
16 seq_printf(file, "%s%16s%16s\n",
17 "Rx-VFQID",
18 "Pending frames",
19 "Pending bytes");
20
21 for (i = 0; i < priv->num_pairs; i++) {
22 fqid = priv->rx_queue_attr[i].fqid;
23 err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt);
24 if (err)
25 continue;
26
27 seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt);
28 }
29
30 seq_printf(file, "%s%16s%16s\n",
31 "Tx-VFQID",
32 "Pending frames",
33 "Pending bytes");
34
35 for (i = 0; i < priv->num_pairs; i++) {
36 fqid = priv->tx_queue_attr[i].fqid;
37 err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt);
38 if (err)
39 continue;
40
41 seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt);
42 }
43
44 return 0;
45}
46
47static int dpseci_dbg_fqs_open(struct inode *inode, struct file *file)
48{
49 int err;
50 struct dpaa2_caam_priv *priv;
51
52 priv = (struct dpaa2_caam_priv *)inode->i_private;
53
54 err = single_open(file, dpseci_dbg_fqs_show, priv);
55 if (err < 0)
56 dev_err(priv->dev, "single_open() failed\n");
57
58 return err;
59}
60
61static const struct file_operations dpseci_dbg_fq_ops = {
62 .open = dpseci_dbg_fqs_open,
63 .read = seq_read,
64 .llseek = seq_lseek,
65 .release = single_release,
66};
67
68void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv)
69{
70 priv->dfs_root = debugfs_create_dir(dev_name(priv->dev), NULL);
71
72 debugfs_create_file("fq_stats", 0444, priv->dfs_root, priv,
73 &dpseci_dbg_fq_ops);
74}
75
76void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv)
77{
78 debugfs_remove_recursive(priv->dfs_root);
79}
diff --git a/drivers/crypto/caam/dpseci-debugfs.h b/drivers/crypto/caam/dpseci-debugfs.h
new file mode 100644
index 000000000000..bc22af7bec37
--- /dev/null
+++ b/drivers/crypto/caam/dpseci-debugfs.h
@@ -0,0 +1,18 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/* Copyright 2019 NXP */
3
4#ifndef DPSECI_DEBUGFS_H
5#define DPSECI_DEBUGFS_H
6
7#include <linux/dcache.h>
8#include "caamalg_qi2.h"
9
10#ifdef CONFIG_DEBUG_FS
11void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv);
12void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv);
13#else
14static inline void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv) {}
15static inline void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv) {}
16#endif /* CONFIG_DEBUG_FS */
17
18#endif /* DPSECI_DEBUGFS_H */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 4f0d45865aa2..17c6108b6d41 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -56,6 +56,9 @@ EXPORT_SYMBOL(caam_little_end);
56bool caam_imx; 56bool caam_imx;
57EXPORT_SYMBOL(caam_imx); 57EXPORT_SYMBOL(caam_imx);
58 58
59size_t caam_ptr_sz;
60EXPORT_SYMBOL(caam_ptr_sz);
61
59static const struct { 62static const struct {
60 u8 value; 63 u8 value;
61 const char *error_text; 64 const char *error_text;
@@ -118,6 +121,7 @@ static const struct {
118 u8 value; 121 u8 value;
119 const char *error_text; 122 const char *error_text;
120} qi_error_list[] = { 123} qi_error_list[] = {
124 { 0x00, "No error" },
121 { 0x1F, "Job terminated by FQ or ICID flush" }, 125 { 0x1F, "Job terminated by FQ or ICID flush" },
122 { 0x20, "FD format error"}, 126 { 0x20, "FD format error"},
123 { 0x21, "FD command format error"}, 127 { 0x21, "FD command format error"},
@@ -210,8 +214,8 @@ static const char * const rng_err_id_list[] = {
210 "Secure key generation", 214 "Secure key generation",
211}; 215};
212 216
213static void report_ccb_status(struct device *jrdev, const u32 status, 217static int report_ccb_status(struct device *jrdev, const u32 status,
214 const char *error) 218 const char *error)
215{ 219{
216 u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> 220 u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
217 JRSTA_CCBERR_CHAID_SHIFT; 221 JRSTA_CCBERR_CHAID_SHIFT;
@@ -247,22 +251,27 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
247 * CCB ICV check failures are part of normal operation life; 251 * CCB ICV check failures are part of normal operation life;
248 * we leave the upper layers to do what they want with them. 252 * we leave the upper layers to do what they want with them.
249 */ 253 */
250 if (err_id != JRSTA_CCBERR_ERRID_ICVCHK) 254 if (err_id == JRSTA_CCBERR_ERRID_ICVCHK)
251 dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", 255 return -EBADMSG;
252 status, error, idx_str, idx, 256
253 cha_str, cha_err_code, 257 dev_err_ratelimited(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", status,
254 err_str, err_err_code); 258 error, idx_str, idx, cha_str, cha_err_code,
259 err_str, err_err_code);
260
261 return -EINVAL;
255} 262}
256 263
257static void report_jump_status(struct device *jrdev, const u32 status, 264static int report_jump_status(struct device *jrdev, const u32 status,
258 const char *error) 265 const char *error)
259{ 266{
260 dev_err(jrdev, "%08x: %s: %s() not implemented\n", 267 dev_err(jrdev, "%08x: %s: %s() not implemented\n",
261 status, error, __func__); 268 status, error, __func__);
269
270 return -EINVAL;
262} 271}
263 272
264static void report_deco_status(struct device *jrdev, const u32 status, 273static int report_deco_status(struct device *jrdev, const u32 status,
265 const char *error) 274 const char *error)
266{ 275{
267 u8 err_id = status & JRSTA_DECOERR_ERROR_MASK; 276 u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
268 u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> 277 u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
@@ -288,10 +297,12 @@ static void report_deco_status(struct device *jrdev, const u32 status,
288 297
289 dev_err(jrdev, "%08x: %s: %s %d: %s%s\n", 298 dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
290 status, error, idx_str, idx, err_str, err_err_code); 299 status, error, idx_str, idx, err_str, err_err_code);
300
301 return -EINVAL;
291} 302}
292 303
293static void report_qi_status(struct device *qidev, const u32 status, 304static int report_qi_status(struct device *qidev, const u32 status,
294 const char *error) 305 const char *error)
295{ 306{
296 u8 err_id = status & JRSTA_QIERR_ERROR_MASK; 307 u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
297 const char *err_str = "unidentified error value 0x"; 308 const char *err_str = "unidentified error value 0x";
@@ -309,27 +320,33 @@ static void report_qi_status(struct device *qidev, const u32 status,
309 320
310 dev_err(qidev, "%08x: %s: %s%s\n", 321 dev_err(qidev, "%08x: %s: %s%s\n",
311 status, error, err_str, err_err_code); 322 status, error, err_str, err_err_code);
323
324 return -EINVAL;
312} 325}
313 326
314static void report_jr_status(struct device *jrdev, const u32 status, 327static int report_jr_status(struct device *jrdev, const u32 status,
315 const char *error) 328 const char *error)
316{ 329{
317 dev_err(jrdev, "%08x: %s: %s() not implemented\n", 330 dev_err(jrdev, "%08x: %s: %s() not implemented\n",
318 status, error, __func__); 331 status, error, __func__);
332
333 return -EINVAL;
319} 334}
320 335
321static void report_cond_code_status(struct device *jrdev, const u32 status, 336static int report_cond_code_status(struct device *jrdev, const u32 status,
322 const char *error) 337 const char *error)
323{ 338{
324 dev_err(jrdev, "%08x: %s: %s() not implemented\n", 339 dev_err(jrdev, "%08x: %s: %s() not implemented\n",
325 status, error, __func__); 340 status, error, __func__);
341
342 return -EINVAL;
326} 343}
327 344
328void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) 345int caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
329{ 346{
330 static const struct stat_src { 347 static const struct stat_src {
331 void (*report_ssed)(struct device *jrdev, const u32 status, 348 int (*report_ssed)(struct device *jrdev, const u32 status,
332 const char *error); 349 const char *error);
333 const char *error; 350 const char *error;
334 } status_src[16] = { 351 } status_src[16] = {
335 { NULL, "No error" }, 352 { NULL, "No error" },
@@ -357,11 +374,14 @@ void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
357 * Otherwise print the error source name. 374 * Otherwise print the error source name.
358 */ 375 */
359 if (status_src[ssrc].report_ssed) 376 if (status_src[ssrc].report_ssed)
360 status_src[ssrc].report_ssed(jrdev, status, error); 377 return status_src[ssrc].report_ssed(jrdev, status, error);
361 else if (error) 378
379 if (error)
362 dev_err(jrdev, "%d: %s\n", ssrc, error); 380 dev_err(jrdev, "%d: %s\n", ssrc, error);
363 else 381 else
364 dev_err(jrdev, "%d: unknown error source\n", ssrc); 382 dev_err(jrdev, "%d: unknown error source\n", ssrc);
383
384 return -EINVAL;
365} 385}
366EXPORT_SYMBOL(caam_strstatus); 386EXPORT_SYMBOL(caam_strstatus);
367 387
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index d9726e66edbf..16809fa8fec7 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -12,7 +12,7 @@
12 12
13#define CAAM_ERROR_STR_MAX 302 13#define CAAM_ERROR_STR_MAX 302
14 14
15void caam_strstatus(struct device *dev, u32 status, bool qi_v2); 15int caam_strstatus(struct device *dev, u32 status, bool qi_v2);
16 16
17#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) 17#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
18#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) 18#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 6af84bbc612c..731b06becd9c 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -10,6 +10,8 @@
10#ifndef INTERN_H 10#ifndef INTERN_H
11#define INTERN_H 11#define INTERN_H
12 12
13#include "ctrl.h"
14
13/* Currently comes from Kconfig param as a ^2 (driver-required) */ 15/* Currently comes from Kconfig param as a ^2 (driver-required) */
14#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) 16#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
15 17
@@ -53,10 +55,11 @@ struct caam_drv_private_jr {
53 spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */ 55 spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
54 u32 inpring_avail; /* Number of free entries in input ring */ 56 u32 inpring_avail; /* Number of free entries in input ring */
55 int head; /* entinfo (s/w ring) head index */ 57 int head; /* entinfo (s/w ring) head index */
56 dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */ 58 void *inpring; /* Base of input ring, alloc
59 * DMA-safe */
57 int out_ring_read_index; /* Output index "tail" */ 60 int out_ring_read_index; /* Output index "tail" */
58 int tail; /* entinfo (s/w ring) tail index */ 61 int tail; /* entinfo (s/w ring) tail index */
59 struct jr_outentry *outring; /* Base of output ring, DMA-safe */ 62 void *outring; /* Base of output ring, DMA-safe */
60}; 63};
61 64
62/* 65/*
@@ -92,11 +95,8 @@ struct caam_drv_private {
92 Handles of the RNG4 block are initialized 95 Handles of the RNG4 block are initialized
93 by this driver */ 96 by this driver */
94 97
95 struct clk *caam_ipg; 98 struct clk_bulk_data *clks;
96 struct clk *caam_mem; 99 int num_clks;
97 struct clk *caam_aclk;
98 struct clk *caam_emi_slow;
99
100 /* 100 /*
101 * debugfs entries for developer view into driver/device 101 * debugfs entries for developer view into driver/device
102 * variables at runtime. 102 * variables at runtime.
@@ -215,4 +215,22 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
215DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); 215DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
216#endif 216#endif
217 217
218static inline u64 caam_get_dma_mask(struct device *dev)
219{
220 struct device_node *nprop = dev->of_node;
221
222 if (caam_ptr_sz != sizeof(u64))
223 return DMA_BIT_MASK(32);
224
225 if (caam_dpaa2)
226 return DMA_BIT_MASK(49);
227
228 if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring") ||
229 of_device_is_compatible(nprop, "fsl,sec-v5.0"))
230 return DMA_BIT_MASK(40);
231
232 return DMA_BIT_MASK(36);
233}
234
235
218#endif /* INTERN_H */ 236#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index cea811fed320..fc97cde27059 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -108,25 +108,12 @@ static int caam_reset_hw_jr(struct device *dev)
108static int caam_jr_shutdown(struct device *dev) 108static int caam_jr_shutdown(struct device *dev)
109{ 109{
110 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 110 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
111 dma_addr_t inpbusaddr, outbusaddr;
112 int ret; 111 int ret;
113 112
114 ret = caam_reset_hw_jr(dev); 113 ret = caam_reset_hw_jr(dev);
115 114
116 tasklet_kill(&jrp->irqtask); 115 tasklet_kill(&jrp->irqtask);
117 116
118 /* Release interrupt */
119 free_irq(jrp->irq, dev);
120
121 /* Free rings */
122 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
123 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
124 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
125 jrp->inpring, inpbusaddr);
126 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
127 jrp->outring, outbusaddr);
128 kfree(jrp->entinfo);
129
130 return ret; 117 return ret;
131} 118}
132 119
@@ -159,7 +146,6 @@ static int caam_jr_remove(struct platform_device *pdev)
159 ret = caam_jr_shutdown(jrdev); 146 ret = caam_jr_shutdown(jrdev);
160 if (ret) 147 if (ret)
161 dev_err(jrdev, "Failed to shut down job ring\n"); 148 dev_err(jrdev, "Failed to shut down job ring\n");
162 irq_dispose_mapping(jrpriv->irq);
163 149
164 return ret; 150 return ret;
165} 151}
@@ -224,7 +210,7 @@ static void caam_jr_dequeue(unsigned long devarg)
224 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { 210 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
225 sw_idx = (tail + i) & (JOBR_DEPTH - 1); 211 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
226 212
227 if (jrp->outring[hw_idx].desc == 213 if (jr_outentry_desc(jrp->outring, hw_idx) ==
228 caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma)) 214 caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
229 break; /* found */ 215 break; /* found */
230 } 216 }
@@ -233,7 +219,8 @@ static void caam_jr_dequeue(unsigned long devarg)
233 219
234 /* Unmap just-run descriptor so we can post-process */ 220 /* Unmap just-run descriptor so we can post-process */
235 dma_unmap_single(dev, 221 dma_unmap_single(dev,
236 caam_dma_to_cpu(jrp->outring[hw_idx].desc), 222 caam_dma_to_cpu(jr_outentry_desc(jrp->outring,
223 hw_idx)),
237 jrp->entinfo[sw_idx].desc_size, 224 jrp->entinfo[sw_idx].desc_size,
238 DMA_TO_DEVICE); 225 DMA_TO_DEVICE);
239 226
@@ -244,7 +231,8 @@ static void caam_jr_dequeue(unsigned long devarg)
244 usercall = jrp->entinfo[sw_idx].callbk; 231 usercall = jrp->entinfo[sw_idx].callbk;
245 userarg = jrp->entinfo[sw_idx].cbkarg; 232 userarg = jrp->entinfo[sw_idx].cbkarg;
246 userdesc = jrp->entinfo[sw_idx].desc_addr_virt; 233 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
247 userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus); 234 userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring,
235 hw_idx));
248 236
249 /* 237 /*
250 * Make sure all information from the job has been obtained 238 * Make sure all information from the job has been obtained
@@ -399,7 +387,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
399 head_entry->cbkarg = areq; 387 head_entry->cbkarg = areq;
400 head_entry->desc_addr_dma = desc_dma; 388 head_entry->desc_addr_dma = desc_dma;
401 389
402 jrp->inpring[head] = cpu_to_caam_dma(desc_dma); 390 jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma));
403 391
404 /* 392 /*
405 * Guarantee that the descriptor's DMA address has been written to 393 * Guarantee that the descriptor's DMA address has been written to
@@ -441,35 +429,26 @@ static int caam_jr_init(struct device *dev)
441 429
442 jrp = dev_get_drvdata(dev); 430 jrp = dev_get_drvdata(dev);
443 431
444 tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
445
446 /* Connect job ring interrupt handler. */
447 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
448 dev_name(dev), dev);
449 if (error) {
450 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
451 jrp->ridx, jrp->irq);
452 goto out_kill_deq;
453 }
454
455 error = caam_reset_hw_jr(dev); 432 error = caam_reset_hw_jr(dev);
456 if (error) 433 if (error)
457 goto out_free_irq; 434 return error;
458 435
459 error = -ENOMEM; 436 jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY *
460 jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) * 437 JOBR_DEPTH, &inpbusaddr,
461 JOBR_DEPTH, &inpbusaddr, GFP_KERNEL); 438 GFP_KERNEL);
462 if (!jrp->inpring) 439 if (!jrp->inpring)
463 goto out_free_irq; 440 return -ENOMEM;
464 441
465 jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) * 442 jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY *
466 JOBR_DEPTH, &outbusaddr, GFP_KERNEL); 443 JOBR_DEPTH, &outbusaddr,
444 GFP_KERNEL);
467 if (!jrp->outring) 445 if (!jrp->outring)
468 goto out_free_inpring; 446 return -ENOMEM;
469 447
470 jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL); 448 jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo),
449 GFP_KERNEL);
471 if (!jrp->entinfo) 450 if (!jrp->entinfo)
472 goto out_free_outring; 451 return -ENOMEM;
473 452
474 for (i = 0; i < JOBR_DEPTH; i++) 453 for (i = 0; i < JOBR_DEPTH; i++)
475 jrp->entinfo[i].desc_addr_dma = !0; 454 jrp->entinfo[i].desc_addr_dma = !0;
@@ -493,22 +472,24 @@ static int caam_jr_init(struct device *dev)
493 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | 472 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
494 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); 473 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
495 474
496 return 0; 475 tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
476
477 /* Connect job ring interrupt handler. */
478 error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
479 dev_name(dev), dev);
480 if (error) {
481 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
482 jrp->ridx, jrp->irq);
483 tasklet_kill(&jrp->irqtask);
484 }
497 485
498out_free_outring:
499 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
500 jrp->outring, outbusaddr);
501out_free_inpring:
502 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
503 jrp->inpring, inpbusaddr);
504 dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
505out_free_irq:
506 free_irq(jrp->irq, dev);
507out_kill_deq:
508 tasklet_kill(&jrp->irqtask);
509 return error; 486 return error;
510} 487}
511 488
489static void caam_jr_irq_dispose_mapping(void *data)
490{
491 irq_dispose_mapping((unsigned long)data);
492}
512 493
513/* 494/*
514 * Probe routine for each detected JobR subsystem. 495 * Probe routine for each detected JobR subsystem.
@@ -520,6 +501,7 @@ static int caam_jr_probe(struct platform_device *pdev)
520 struct caam_job_ring __iomem *ctrl; 501 struct caam_job_ring __iomem *ctrl;
521 struct caam_drv_private_jr *jrpriv; 502 struct caam_drv_private_jr *jrpriv;
522 static int total_jobrs; 503 static int total_jobrs;
504 struct resource *r;
523 int error; 505 int error;
524 506
525 jrdev = &pdev->dev; 507 jrdev = &pdev->dev;
@@ -535,45 +517,43 @@ static int caam_jr_probe(struct platform_device *pdev)
535 nprop = pdev->dev.of_node; 517 nprop = pdev->dev.of_node;
536 /* Get configuration properties from device tree */ 518 /* Get configuration properties from device tree */
537 /* First, get register page */ 519 /* First, get register page */
538 ctrl = of_iomap(nprop, 0); 520 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
521 if (!r) {
522 dev_err(jrdev, "platform_get_resource() failed\n");
523 return -ENOMEM;
524 }
525
526 ctrl = devm_ioremap(jrdev, r->start, resource_size(r));
539 if (!ctrl) { 527 if (!ctrl) {
540 dev_err(jrdev, "of_iomap() failed\n"); 528 dev_err(jrdev, "devm_ioremap() failed\n");
541 return -ENOMEM; 529 return -ENOMEM;
542 } 530 }
543 531
544 jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; 532 jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
545 533
546 if (sizeof(dma_addr_t) == sizeof(u64)) { 534 error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev));
547 if (caam_dpaa2)
548 error = dma_set_mask_and_coherent(jrdev,
549 DMA_BIT_MASK(49));
550 else if (of_device_is_compatible(nprop,
551 "fsl,sec-v5.0-job-ring"))
552 error = dma_set_mask_and_coherent(jrdev,
553 DMA_BIT_MASK(40));
554 else
555 error = dma_set_mask_and_coherent(jrdev,
556 DMA_BIT_MASK(36));
557 } else {
558 error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
559 }
560 if (error) { 535 if (error) {
561 dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n", 536 dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
562 error); 537 error);
563 iounmap(ctrl);
564 return error; 538 return error;
565 } 539 }
566 540
567 /* Identify the interrupt */ 541 /* Identify the interrupt */
568 jrpriv->irq = irq_of_parse_and_map(nprop, 0); 542 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
543 if (!jrpriv->irq) {
544 dev_err(jrdev, "irq_of_parse_and_map failed\n");
545 return -EINVAL;
546 }
547
548 error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping,
549 (void *)(unsigned long)jrpriv->irq);
550 if (error)
551 return error;
569 552
570 /* Now do the platform independent part */ 553 /* Now do the platform independent part */
571 error = caam_jr_init(jrdev); /* now turn on hardware */ 554 error = caam_jr_init(jrdev); /* now turn on hardware */
572 if (error) { 555 if (error)
573 irq_dispose_mapping(jrpriv->irq);
574 iounmap(ctrl);
575 return error; 556 return error;
576 }
577 557
578 jrpriv->dev = jrdev; 558 jrpriv->dev = jrdev;
579 spin_lock(&driver_data.jr_alloc_lock); 559 spin_lock(&driver_data.jr_alloc_lock);
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 48dd3536060d..5a851ddc48fb 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -15,13 +15,14 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
15 void *context) 15 void *context)
16{ 16{
17 struct split_key_result *res = context; 17 struct split_key_result *res = context;
18 int ecode = 0;
18 19
19 dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 20 dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20 21
21 if (err) 22 if (err)
22 caam_jr_strstatus(dev, err); 23 ecode = caam_jr_strstatus(dev, err);
23 24
24 res->err = err; 25 res->err = ecode;
25 26
26 complete(&res->completion); 27 complete(&res->completion);
27} 28}
@@ -47,18 +48,20 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
47 u32 *desc; 48 u32 *desc;
48 struct split_key_result result; 49 struct split_key_result result;
49 dma_addr_t dma_addr; 50 dma_addr_t dma_addr;
51 unsigned int local_max;
50 int ret = -ENOMEM; 52 int ret = -ENOMEM;
51 53
52 adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); 54 adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
53 adata->keylen_pad = split_key_pad_len(adata->algtype & 55 adata->keylen_pad = split_key_pad_len(adata->algtype &
54 OP_ALG_ALGSEL_MASK); 56 OP_ALG_ALGSEL_MASK);
57 local_max = max(keylen, adata->keylen_pad);
55 58
56 dev_dbg(jrdev, "split keylen %d split keylen padded %d\n", 59 dev_dbg(jrdev, "split keylen %d split keylen padded %d\n",
57 adata->keylen, adata->keylen_pad); 60 adata->keylen, adata->keylen_pad);
58 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", 61 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
59 DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); 62 DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
60 63
61 if (adata->keylen_pad > max_keylen) 64 if (local_max > max_keylen)
62 return -EINVAL; 65 return -EINVAL;
63 66
64 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 67 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
@@ -69,8 +72,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
69 72
70 memcpy(key_out, key_in, keylen); 73 memcpy(key_out, key_in, keylen);
71 74
72 dma_addr = dma_map_single(jrdev, key_out, adata->keylen_pad, 75 dma_addr = dma_map_single(jrdev, key_out, local_max, DMA_BIDIRECTIONAL);
73 DMA_BIDIRECTIONAL);
74 if (dma_mapping_error(jrdev, dma_addr)) { 76 if (dma_mapping_error(jrdev, dma_addr)) {
75 dev_err(jrdev, "unable to map key memory\n"); 77 dev_err(jrdev, "unable to map key memory\n");
76 goto out_free; 78 goto out_free;
@@ -116,7 +118,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
116 adata->keylen_pad, 1); 118 adata->keylen_pad, 1);
117 } 119 }
118 120
119 dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); 121 dma_unmap_single(jrdev, dma_addr, local_max, DMA_BIDIRECTIONAL);
120out_free: 122out_free:
121 kfree(desc); 123 kfree(desc);
122 return ret; 124 return ret;
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 810f0bef0652..68c1fd5dee5d 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -512,7 +512,9 @@ struct rsa_pub_pdb {
512 dma_addr_t n_dma; 512 dma_addr_t n_dma;
513 dma_addr_t e_dma; 513 dma_addr_t e_dma;
514 u32 f_len; 514 u32 f_len;
515} __packed; 515};
516
517#define SIZEOF_RSA_PUB_PDB (2 * sizeof(u32) + 4 * caam_ptr_sz)
516 518
517/** 519/**
518 * RSA Decrypt PDB - Private Key Form #1 520 * RSA Decrypt PDB - Private Key Form #1
@@ -528,7 +530,9 @@ struct rsa_priv_f1_pdb {
528 dma_addr_t f_dma; 530 dma_addr_t f_dma;
529 dma_addr_t n_dma; 531 dma_addr_t n_dma;
530 dma_addr_t d_dma; 532 dma_addr_t d_dma;
531} __packed; 533};
534
535#define SIZEOF_RSA_PRIV_F1_PDB (sizeof(u32) + 4 * caam_ptr_sz)
532 536
533/** 537/**
534 * RSA Decrypt PDB - Private Key Form #2 538 * RSA Decrypt PDB - Private Key Form #2
@@ -554,7 +558,9 @@ struct rsa_priv_f2_pdb {
554 dma_addr_t tmp1_dma; 558 dma_addr_t tmp1_dma;
555 dma_addr_t tmp2_dma; 559 dma_addr_t tmp2_dma;
556 u32 p_q_len; 560 u32 p_q_len;
557} __packed; 561};
562
563#define SIZEOF_RSA_PRIV_F2_PDB (2 * sizeof(u32) + 7 * caam_ptr_sz)
558 564
559/** 565/**
560 * RSA Decrypt PDB - Private Key Form #3 566 * RSA Decrypt PDB - Private Key Form #3
@@ -586,6 +592,8 @@ struct rsa_priv_f3_pdb {
586 dma_addr_t tmp1_dma; 592 dma_addr_t tmp1_dma;
587 dma_addr_t tmp2_dma; 593 dma_addr_t tmp2_dma;
588 u32 p_q_len; 594 u32 p_q_len;
589} __packed; 595};
596
597#define SIZEOF_RSA_PRIV_F3_PDB (2 * sizeof(u32) + 9 * caam_ptr_sz)
590 598
591#endif 599#endif
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
index 2a8d87ea94bf..0d5ee762e036 100644
--- a/drivers/crypto/caam/pkc_desc.c
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -13,7 +13,7 @@
13/* Descriptor for RSA Public operation */ 13/* Descriptor for RSA Public operation */
14void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb) 14void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb)
15{ 15{
16 init_job_desc_pdb(desc, 0, sizeof(*pdb)); 16 init_job_desc_pdb(desc, 0, SIZEOF_RSA_PUB_PDB);
17 append_cmd(desc, pdb->sgf); 17 append_cmd(desc, pdb->sgf);
18 append_ptr(desc, pdb->f_dma); 18 append_ptr(desc, pdb->f_dma);
19 append_ptr(desc, pdb->g_dma); 19 append_ptr(desc, pdb->g_dma);
@@ -26,7 +26,7 @@ void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb)
26/* Descriptor for RSA Private operation - Private Key Form #1 */ 26/* Descriptor for RSA Private operation - Private Key Form #1 */
27void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb) 27void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
28{ 28{
29 init_job_desc_pdb(desc, 0, sizeof(*pdb)); 29 init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F1_PDB);
30 append_cmd(desc, pdb->sgf); 30 append_cmd(desc, pdb->sgf);
31 append_ptr(desc, pdb->g_dma); 31 append_ptr(desc, pdb->g_dma);
32 append_ptr(desc, pdb->f_dma); 32 append_ptr(desc, pdb->f_dma);
@@ -39,7 +39,7 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
39/* Descriptor for RSA Private operation - Private Key Form #2 */ 39/* Descriptor for RSA Private operation - Private Key Form #2 */
40void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb) 40void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
41{ 41{
42 init_job_desc_pdb(desc, 0, sizeof(*pdb)); 42 init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F2_PDB);
43 append_cmd(desc, pdb->sgf); 43 append_cmd(desc, pdb->sgf);
44 append_ptr(desc, pdb->g_dma); 44 append_ptr(desc, pdb->g_dma);
45 append_ptr(desc, pdb->f_dma); 45 append_ptr(desc, pdb->f_dma);
@@ -56,7 +56,7 @@ void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
56/* Descriptor for RSA Private operation - Private Key Form #3 */ 56/* Descriptor for RSA Private operation - Private Key Form #3 */
57void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb) 57void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
58{ 58{
59 init_job_desc_pdb(desc, 0, sizeof(*pdb)); 59 init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F3_PDB);
60 append_cmd(desc, pdb->sgf); 60 append_cmd(desc, pdb->sgf);
61 append_ptr(desc, pdb->g_dma); 61 append_ptr(desc, pdb->g_dma);
62 append_ptr(desc, pdb->f_dma); 62 append_ptr(desc, pdb->f_dma);
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 0fe618e3804a..378f627e1d64 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -163,7 +163,10 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
163 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), 163 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
164 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); 164 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
165 165
166 drv_req->cbk(drv_req, -EIO); 166 if (fd->status)
167 drv_req->cbk(drv_req, be32_to_cpu(fd->status));
168 else
169 drv_req->cbk(drv_req, JRSTA_SSRC_QI);
167} 170}
168 171
169static struct qman_fq *create_caam_req_fq(struct device *qidev, 172static struct qman_fq *create_caam_req_fq(struct device *qidev,
@@ -574,8 +577,9 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
574 577
575 if (ssrc != JRSTA_SSRC_CCB_ERROR || 578 if (ssrc != JRSTA_SSRC_CCB_ERROR ||
576 err_id != JRSTA_CCBERR_ERRID_ICVCHK) 579 err_id != JRSTA_CCBERR_ERRID_ICVCHK)
577 dev_err(qidev, "Error: %#x in CAAM response FD\n", 580 dev_err_ratelimited(qidev,
578 status); 581 "Error: %#x in CAAM response FD\n",
582 status);
579 } 583 }
580 584
581 if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { 585 if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index f93c9c7ed430..db0549549e3b 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -14,32 +14,6 @@
14#include "desc.h" 14#include "desc.h"
15#include "desc_constr.h" 15#include "desc_constr.h"
16 16
17/*
18 * CAAM hardware constructs a job descriptor which points to a shared descriptor
19 * (as pointed by context_a of to-CAAM FQ).
20 * When the job descriptor is executed by DECO, the whole job descriptor
21 * together with shared descriptor gets loaded in DECO buffer, which is
22 * 64 words (each 32-bit) long.
23 *
24 * The job descriptor constructed by CAAM hardware has the following layout:
25 *
26 * HEADER (1 word)
27 * Shdesc ptr (1 or 2 words)
28 * SEQ_OUT_PTR (1 word)
29 * Out ptr (1 or 2 words)
30 * Out length (1 word)
31 * SEQ_IN_PTR (1 word)
32 * In ptr (1 or 2 words)
33 * In length (1 word)
34 *
35 * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
36 *
37 * Apart from shdesc contents, the total number of words that get loaded in DECO
38 * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
39 * storing shared descriptor.
40 */
41#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
42
43/* Length of a single buffer in the QI driver memory cache */ 17/* Length of a single buffer in the QI driver memory cache */
44#define CAAM_QI_MEMCACHE_SIZE 768 18#define CAAM_QI_MEMCACHE_SIZE 768
45 19
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 8591914d5c51..05127b70527d 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/io-64-nonatomic-hi-lo.h>
15 16
16/* 17/*
17 * Architecture-specific register access methods 18 * Architecture-specific register access methods
@@ -70,6 +71,7 @@
70 71
71extern bool caam_little_end; 72extern bool caam_little_end;
72extern bool caam_imx; 73extern bool caam_imx;
74extern size_t caam_ptr_sz;
73 75
74#define caam_to_cpu(len) \ 76#define caam_to_cpu(len) \
75static inline u##len caam##len ## _to_cpu(u##len val) \ 77static inline u##len caam##len ## _to_cpu(u##len val) \
@@ -137,46 +139,38 @@ static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set)
137 * base + 0x0000 : least-significant 32 bits 139 * base + 0x0000 : least-significant 32 bits
138 * base + 0x0004 : most-significant 32 bits 140 * base + 0x0004 : most-significant 32 bits
139 */ 141 */
140#ifdef CONFIG_64BIT
141static inline void wr_reg64(void __iomem *reg, u64 data) 142static inline void wr_reg64(void __iomem *reg, u64 data)
142{ 143{
143 if (caam_little_end) 144 if (caam_little_end) {
144 iowrite64(data, reg); 145 if (caam_imx) {
145 else 146 iowrite32(data >> 32, (u32 __iomem *)(reg));
147 iowrite32(data, (u32 __iomem *)(reg) + 1);
148 } else {
149 iowrite64(data, reg);
150 }
151 } else {
146 iowrite64be(data, reg); 152 iowrite64be(data, reg);
153 }
147} 154}
148 155
149static inline u64 rd_reg64(void __iomem *reg) 156static inline u64 rd_reg64(void __iomem *reg)
150{ 157{
151 if (caam_little_end) 158 if (caam_little_end) {
152 return ioread64(reg); 159 if (caam_imx) {
153 else 160 u32 low, high;
154 return ioread64be(reg);
155}
156 161
157#else /* CONFIG_64BIT */ 162 high = ioread32(reg);
158static inline void wr_reg64(void __iomem *reg, u64 data) 163 low = ioread32(reg + sizeof(u32));
159{ 164
160 if (!caam_imx && caam_little_end) { 165 return low + ((u64)high << 32);
161 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); 166 } else {
162 wr_reg32((u32 __iomem *)(reg), data); 167 return ioread64(reg);
168 }
163 } else { 169 } else {
164 wr_reg32((u32 __iomem *)(reg), data >> 32); 170 return ioread64be(reg);
165 wr_reg32((u32 __iomem *)(reg) + 1, data);
166 } 171 }
167} 172}
168 173
169static inline u64 rd_reg64(void __iomem *reg)
170{
171 if (!caam_imx && caam_little_end)
172 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
173 (u64)rd_reg32((u32 __iomem *)(reg)));
174
175 return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
176 (u64)rd_reg32((u32 __iomem *)(reg) + 1));
177}
178#endif /* CONFIG_64BIT */
179
180static inline u64 cpu_to_caam_dma64(dma_addr_t value) 174static inline u64 cpu_to_caam_dma64(dma_addr_t value)
181{ 175{
182 if (caam_imx) 176 if (caam_imx)
@@ -195,22 +189,89 @@ static inline u64 caam_dma64_to_cpu(u64 value)
195 return caam64_to_cpu(value); 189 return caam64_to_cpu(value);
196} 190}
197 191
198#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 192static inline u64 cpu_to_caam_dma(u64 value)
199#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value) 193{
200#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value) 194 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
201#else 195 caam_ptr_sz == sizeof(u64))
202#define cpu_to_caam_dma(value) cpu_to_caam32(value) 196 return cpu_to_caam_dma64(value);
203#define caam_dma_to_cpu(value) caam32_to_cpu(value) 197 else
204#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ 198 return cpu_to_caam32(value);
199}
200
201static inline u64 caam_dma_to_cpu(u64 value)
202{
203 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
204 caam_ptr_sz == sizeof(u64))
205 return caam_dma64_to_cpu(value);
206 else
207 return caam32_to_cpu(value);
208}
205 209
206/* 210/*
207 * jr_outentry 211 * jr_outentry
208 * Represents each entry in a JobR output ring 212 * Represents each entry in a JobR output ring
209 */ 213 */
210struct jr_outentry { 214
211 dma_addr_t desc;/* Pointer to completed descriptor */ 215static inline void jr_outentry_get(void *outring, int hw_idx, dma_addr_t *desc,
212 u32 jrstatus; /* Status for completed descriptor */ 216 u32 *jrstatus)
213} __packed; 217{
218
219 if (caam_ptr_sz == sizeof(u32)) {
220 struct {
221 u32 desc;
222 u32 jrstatus;
223 } __packed *outentry = outring;
224
225 *desc = outentry[hw_idx].desc;
226 *jrstatus = outentry[hw_idx].jrstatus;
227 } else {
228 struct {
229 dma_addr_t desc;/* Pointer to completed descriptor */
230 u32 jrstatus; /* Status for completed descriptor */
231 } __packed *outentry = outring;
232
233 *desc = outentry[hw_idx].desc;
234 *jrstatus = outentry[hw_idx].jrstatus;
235 }
236}
237
238#define SIZEOF_JR_OUTENTRY (caam_ptr_sz + sizeof(u32))
239
240static inline dma_addr_t jr_outentry_desc(void *outring, int hw_idx)
241{
242 dma_addr_t desc;
243 u32 unused;
244
245 jr_outentry_get(outring, hw_idx, &desc, &unused);
246
247 return desc;
248}
249
250static inline u32 jr_outentry_jrstatus(void *outring, int hw_idx)
251{
252 dma_addr_t unused;
253 u32 jrstatus;
254
255 jr_outentry_get(outring, hw_idx, &unused, &jrstatus);
256
257 return jrstatus;
258}
259
260static inline void jr_inpentry_set(void *inpring, int hw_idx, dma_addr_t val)
261{
262 if (caam_ptr_sz == sizeof(u32)) {
263 u32 *inpentry = inpring;
264
265 inpentry[hw_idx] = val;
266 } else {
267 dma_addr_t *inpentry = inpring;
268
269 inpentry[hw_idx] = val;
270 }
271}
272
273#define SIZEOF_JR_INPENTRY caam_ptr_sz
274
214 275
215/* Version registers (Era 10+) e80-eff */ 276/* Version registers (Era 10+) e80-eff */
216struct version_regs { 277struct version_regs {
@@ -338,6 +399,7 @@ struct caam_perfmon {
338 u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ 399 u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
339#define CTPR_MS_QI_SHIFT 25 400#define CTPR_MS_QI_SHIFT 25
340#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) 401#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
402#define CTPR_MS_PS BIT(17)
341#define CTPR_MS_DPAA2 BIT(13) 403#define CTPR_MS_DPAA2 BIT(13)
342#define CTPR_MS_VIRT_EN_INCL 0x00000001 404#define CTPR_MS_VIRT_EN_INCL 0x00000001
343#define CTPR_MS_VIRT_EN_POR 0x00000002 405#define CTPR_MS_VIRT_EN_POR 0x00000002
@@ -641,6 +703,7 @@ struct caam_job_ring {
641#define JRSTA_SSRC_CCB_ERROR 0x20000000 703#define JRSTA_SSRC_CCB_ERROR 0x20000000
642#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000 704#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000
643#define JRSTA_SSRC_DECO 0x40000000 705#define JRSTA_SSRC_DECO 0x40000000
706#define JRSTA_SSRC_QI 0x50000000
644#define JRSTA_SSRC_JRERROR 0x60000000 707#define JRSTA_SSRC_JRERROR 0x60000000
645#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000 708#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000
646 709
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index ff3cb1f8f2b6..596ce28b957d 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -7,7 +7,7 @@
7#include <crypto/aes.h> 7#include <crypto/aes.h>
8#include <crypto/algapi.h> 8#include <crypto/algapi.h>
9#include <crypto/authenc.h> 9#include <crypto/authenc.h>
10#include <crypto/des.h> 10#include <crypto/internal/des.h>
11#include <crypto/xts.h> 11#include <crypto/xts.h>
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -322,31 +322,15 @@ static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
322static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 322static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
323 u32 keylen) 323 u32 keylen)
324{ 324{
325 u32 flags = crypto_ablkcipher_get_flags(cipher); 325 return verify_ablkcipher_des3_key(cipher, key) ?:
326 int err; 326 cvm_setkey(cipher, key, keylen, DES3_CBC);
327
328 err = __des3_verify_key(&flags, key);
329 if (unlikely(err)) {
330 crypto_ablkcipher_set_flags(cipher, flags);
331 return err;
332 }
333
334 return cvm_setkey(cipher, key, keylen, DES3_CBC);
335} 327}
336 328
337static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 329static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
338 u32 keylen) 330 u32 keylen)
339{ 331{
340 u32 flags = crypto_ablkcipher_get_flags(cipher); 332 return verify_ablkcipher_des3_key(cipher, key) ?:
341 int err; 333 cvm_setkey(cipher, key, keylen, DES3_ECB);
342
343 err = __des3_verify_key(&flags, key);
344 if (unlikely(err)) {
345 crypto_ablkcipher_set_flags(cipher, flags);
346 return err;
347 }
348
349 return cvm_setkey(cipher, key, keylen, DES3_ECB);
350} 334}
351 335
352static int cvm_enc_dec_init(struct crypto_tfm *tfm) 336static int cvm_enc_dec_init(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/cavium/nitrox/Kconfig b/drivers/crypto/cavium/nitrox/Kconfig
index dab162af41b8..7b1e751bb9cd 100644
--- a/drivers/crypto/cavium/nitrox/Kconfig
+++ b/drivers/crypto/cavium/nitrox/Kconfig
@@ -6,7 +6,7 @@ config CRYPTO_DEV_NITROX
6 tristate 6 tristate
7 select CRYPTO_BLKCIPHER 7 select CRYPTO_BLKCIPHER
8 select CRYPTO_AES 8 select CRYPTO_AES
9 select CRYPTO_DES 9 select CRYPTO_LIB_DES
10 select FW_LOADER 10 select FW_LOADER
11 11
12config CRYPTO_DEV_NITROX_CNN55XX 12config CRYPTO_DEV_NITROX_CNN55XX
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
index a2a452642b38..1c8715ae0488 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_csr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -40,9 +40,77 @@
40#define EMU_FUSE_MAPX(_i) (0x1402708 + ((_i) * 0x40000)) 40#define EMU_FUSE_MAPX(_i) (0x1402708 + ((_i) * 0x40000))
41 41
42/* UCD registers */ 42/* UCD registers */
43#define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000))
44#define UCD_AE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0008 + ((_i) * 0x800))
43#define UCD_UCODE_LOAD_BLOCK_NUM 0x12C0010 45#define UCD_UCODE_LOAD_BLOCK_NUM 0x12C0010
44#define UCD_UCODE_LOAD_IDX_DATAX(_i) (0x12C0018 + ((_i) * 0x20)) 46#define UCD_UCODE_LOAD_IDX_DATAX(_i) (0x12C0018 + ((_i) * 0x20))
45#define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000)) 47#define UCD_SE_CNTX(_i) (0x12C0040 + ((_i) * 0x1000))
48#define UCD_AE_CNTX(_i) (0x12C0048 + ((_i) * 0x800))
49
50/* AQM registers */
51#define AQM_CTL 0x1300000
52#define AQM_INT 0x1300008
53#define AQM_DBELL_OVF_LO 0x1300010
54#define AQM_DBELL_OVF_HI 0x1300018
55#define AQM_DBELL_OVF_LO_W1S 0x1300020
56#define AQM_DBELL_OVF_LO_ENA_W1C 0x1300028
57#define AQM_DBELL_OVF_LO_ENA_W1S 0x1300030
58#define AQM_DBELL_OVF_HI_W1S 0x1300038
59#define AQM_DBELL_OVF_HI_ENA_W1C 0x1300040
60#define AQM_DBELL_OVF_HI_ENA_W1S 0x1300048
61#define AQM_DMA_RD_ERR_LO 0x1300050
62#define AQM_DMA_RD_ERR_HI 0x1300058
63#define AQM_DMA_RD_ERR_LO_W1S 0x1300060
64#define AQM_DMA_RD_ERR_LO_ENA_W1C 0x1300068
65#define AQM_DMA_RD_ERR_LO_ENA_W1S 0x1300070
66#define AQM_DMA_RD_ERR_HI_W1S 0x1300078
67#define AQM_DMA_RD_ERR_HI_ENA_W1C 0x1300080
68#define AQM_DMA_RD_ERR_HI_ENA_W1S 0x1300088
69#define AQM_EXEC_NA_LO 0x1300090
70#define AQM_EXEC_NA_HI 0x1300098
71#define AQM_EXEC_NA_LO_W1S 0x13000A0
72#define AQM_EXEC_NA_LO_ENA_W1C 0x13000A8
73#define AQM_EXEC_NA_LO_ENA_W1S 0x13000B0
74#define AQM_EXEC_NA_HI_W1S 0x13000B8
75#define AQM_EXEC_NA_HI_ENA_W1C 0x13000C0
76#define AQM_EXEC_NA_HI_ENA_W1S 0x13000C8
77#define AQM_EXEC_ERR_LO 0x13000D0
78#define AQM_EXEC_ERR_HI 0x13000D8
79#define AQM_EXEC_ERR_LO_W1S 0x13000E0
80#define AQM_EXEC_ERR_LO_ENA_W1C 0x13000E8
81#define AQM_EXEC_ERR_LO_ENA_W1S 0x13000F0
82#define AQM_EXEC_ERR_HI_W1S 0x13000F8
83#define AQM_EXEC_ERR_HI_ENA_W1C 0x1300100
84#define AQM_EXEC_ERR_HI_ENA_W1S 0x1300108
85#define AQM_ECC_INT 0x1300110
86#define AQM_ECC_INT_W1S 0x1300118
87#define AQM_ECC_INT_ENA_W1C 0x1300120
88#define AQM_ECC_INT_ENA_W1S 0x1300128
89#define AQM_ECC_CTL 0x1300130
90#define AQM_BIST_STATUS 0x1300138
91#define AQM_CMD_INF_THRX(x) (0x1300400 + ((x) * 0x8))
92#define AQM_CMD_INFX(x) (0x1300800 + ((x) * 0x8))
93#define AQM_GRP_EXECMSK_LOX(x) (0x1300C00 + ((x) * 0x10))
94#define AQM_GRP_EXECMSK_HIX(x) (0x1300C08 + ((x) * 0x10))
95#define AQM_ACTIVITY_STAT_LO 0x1300C80
96#define AQM_ACTIVITY_STAT_HI 0x1300C88
97#define AQM_Q_CMD_PROCX(x) (0x1301000 + ((x) * 0x8))
98#define AQM_PERF_CTL_LO 0x1301400
99#define AQM_PERF_CTL_HI 0x1301408
100#define AQM_PERF_CNT 0x1301410
101
102#define AQMQ_DRBLX(x) (0x20000 + ((x) * 0x40000))
103#define AQMQ_QSZX(x) (0x20008 + ((x) * 0x40000))
104#define AQMQ_BADRX(x) (0x20010 + ((x) * 0x40000))
105#define AQMQ_NXT_CMDX(x) (0x20018 + ((x) * 0x40000))
106#define AQMQ_CMD_CNTX(x) (0x20020 + ((x) * 0x40000))
107#define AQMQ_CMP_THRX(x) (0x20028 + ((x) * 0x40000))
108#define AQMQ_CMP_CNTX(x) (0x20030 + ((x) * 0x40000))
109#define AQMQ_TIM_LDX(x) (0x20038 + ((x) * 0x40000))
110#define AQMQ_TIMERX(x) (0x20040 + ((x) * 0x40000))
111#define AQMQ_ENX(x) (0x20048 + ((x) * 0x40000))
112#define AQMQ_ACTIVITY_STATX(x) (0x20050 + ((x) * 0x40000))
113#define AQM_VF_CMP_STATX(x) (0x28000 + ((x) * 0x40000))
46 114
47/* NPS core registers */ 115/* NPS core registers */
48#define NPS_CORE_GBL_VFCFG 0x1000000 116#define NPS_CORE_GBL_VFCFG 0x1000000
@@ -135,6 +203,171 @@
135#define PEM0_INT 0x1080428 203#define PEM0_INT 0x1080428
136 204
137/** 205/**
206 * struct ucd_core_eid_ucode_block_num - Core Eid to Ucode Blk Mapping Registers
207 * @ucode_len: Ucode length identifier 32KB or 64KB
208 * @ucode_blk: Ucode Block Number
209 */
210union ucd_core_eid_ucode_block_num {
211 u64 value;
212 struct {
213#if (defined(__BIG_ENDIAN_BITFIELD))
214 u64 raz_4_63 : 60;
215 u64 ucode_len : 1;
216 u64 ucode_blk : 3;
217#else
218 u64 ucode_blk : 3;
219 u64 ucode_len : 1;
220 u64 raz_4_63 : 60;
221#endif
222 };
223};
224
225/**
226 * struct aqm_grp_execmsk_lo - Available AE engines for the group
227 * @exec_0_to_39: AE engines 0 to 39 status
228 */
229union aqm_grp_execmsk_lo {
230 u64 value;
231 struct {
232#if (defined(__BIG_ENDIAN_BITFIELD))
233 u64 raz_40_63 : 24;
234 u64 exec_0_to_39 : 40;
235#else
236 u64 exec_0_to_39 : 40;
237 u64 raz_40_63 : 24;
238#endif
239 };
240};
241
242/**
243 * struct aqm_grp_execmsk_hi - Available AE engines for the group
244 * @exec_40_to_79: AE engines 40 to 79 status
245 */
246union aqm_grp_execmsk_hi {
247 u64 value;
248 struct {
249#if (defined(__BIG_ENDIAN_BITFIELD))
250 u64 raz_40_63 : 24;
251 u64 exec_40_to_79 : 40;
252#else
253 u64 exec_40_to_79 : 40;
254 u64 raz_40_63 : 24;
255#endif
256 };
257};
258
259/**
260 * struct aqmq_drbl - AQM Queue Doorbell Counter Registers
261 * @dbell_count: Doorbell Counter
262 */
263union aqmq_drbl {
264 u64 value;
265 struct {
266#if (defined(__BIG_ENDIAN_BITFIELD))
267 u64 raz_32_63 : 32;
268 u64 dbell_count : 32;
269#else
270 u64 dbell_count : 32;
271 u64 raz_32_63 : 32;
272#endif
273 };
274};
275
276/**
277 * struct aqmq_qsz - AQM Queue Host Queue Size Registers
278 * @host_queue_size: Size, in numbers of 'aqmq_command_s' command
279 * of the Host Ring.
280 */
281union aqmq_qsz {
282 u64 value;
283 struct {
284#if (defined(__BIG_ENDIAN_BITFIELD))
285 u64 raz_32_63 : 32;
286 u64 host_queue_size : 32;
287#else
288 u64 host_queue_size : 32;
289 u64 raz_32_63 : 32;
290#endif
291 };
292};
293
294/**
295 * struct aqmq_cmp_thr - AQM Queue Commands Completed Threshold Registers
296 * @commands_completed_threshold: Count of 'aqmq_command_s' commands executed
297 * by AE engines for which completion interrupt is asserted.
298 */
299union aqmq_cmp_thr {
300 u64 value;
301 struct {
302#if (defined(__BIG_ENDIAN_BITFIELD))
303 u64 raz_32_63 : 32;
304 u64 commands_completed_threshold : 32;
305#else
306 u64 commands_completed_threshold : 32;
307 u64 raz_32_63 : 32;
308#endif
309 };
310};
311
312/**
313 * struct aqmq_cmp_cnt - AQM Queue Commands Completed Count Registers
314 * @resend: Bit to request completion interrupt Resend.
315 * @completion_status: Command completion status of the ring.
316 * @commands_completed_count: Count of 'aqmq_command_s' commands executed by
317 * AE engines.
318 */
319union aqmq_cmp_cnt {
320 u64 value;
321 struct {
322#if (defined(__BIG_ENDIAN_BITFIELD))
323 u64 raz_34_63 : 30;
324 u64 resend : 1;
325 u64 completion_status : 1;
326 u64 commands_completed_count : 32;
327#else
328 u64 commands_completed_count : 32;
329 u64 completion_status : 1;
330 u64 resend : 1;
331 u64 raz_34_63 : 30;
332#endif
333 };
334};
335
336/**
337 * struct aqmq_en - AQM Queue Enable Registers
338 * @queue_status: 1 = AQMQ is enabled, 0 = AQMQ is disabled
339 */
340union aqmq_en {
341 u64 value;
342 struct {
343#if (defined(__BIG_ENDIAN_BITFIELD))
344 u64 raz_1_63 : 63;
345 u64 queue_enable : 1;
346#else
347 u64 queue_enable : 1;
348 u64 raz_1_63 : 63;
349#endif
350 };
351};
352
353/**
354 * struct aqmq_activity_stat - AQM Queue Activity Status Registers
355 * @queue_active: 1 = AQMQ is active, 0 = AQMQ is quiescent
356 */
357union aqmq_activity_stat {
358 u64 value;
359 struct {
360#if (defined(__BIG_ENDIAN_BITFIELD))
361 u64 raz_1_63 : 63;
362 u64 queue_active : 1;
363#else
364 u64 queue_active : 1;
365 u64 raz_1_63 : 63;
366#endif
367 };
368};
369
370/**
138 * struct emu_fuse_map - EMU Fuse Map Registers 371 * struct emu_fuse_map - EMU Fuse Map Registers
139 * @ae_fuse: Fuse settings for AE 19..0 372 * @ae_fuse: Fuse settings for AE 19..0
140 * @se_fuse: Fuse settings for SE 15..0 373 * @se_fuse: Fuse settings for SE 15..0
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
index 848ec93d4333..16f7d0bd1303 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -9,7 +9,8 @@ static int firmware_show(struct seq_file *s, void *v)
9{ 9{
10 struct nitrox_device *ndev = s->private; 10 struct nitrox_device *ndev = s->private;
11 11
12 seq_printf(s, "Version: %s\n", ndev->hw.fw_name); 12 seq_printf(s, "Version: %s\n", ndev->hw.fw_name[0]);
13 seq_printf(s, "Version: %s\n", ndev->hw.fw_name[1]);
13 return 0; 14 return 0;
14} 15}
15 16
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 0338877b828f..2217a2736c8e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -10,6 +10,10 @@
10#define VERSION_LEN 32 10#define VERSION_LEN 32
11/* Maximum queues in PF mode */ 11/* Maximum queues in PF mode */
12#define MAX_PF_QUEUES 64 12#define MAX_PF_QUEUES 64
13/* Maximum device queues */
14#define MAX_DEV_QUEUES (MAX_PF_QUEUES)
15/* Maximum UCD Blocks */
16#define CNN55XX_MAX_UCD_BLOCKS 8
13 17
14/** 18/**
15 * struct nitrox_cmdq - NITROX command queue 19 * struct nitrox_cmdq - NITROX command queue
@@ -74,7 +78,7 @@ struct nitrox_cmdq {
74 */ 78 */
75struct nitrox_hw { 79struct nitrox_hw {
76 char partname[IFNAMSIZ * 2]; 80 char partname[IFNAMSIZ * 2];
77 char fw_name[VERSION_LEN]; 81 char fw_name[CNN55XX_MAX_UCD_BLOCKS][VERSION_LEN];
78 82
79 int freq; 83 int freq;
80 u16 vendor_id; 84 u16 vendor_id;
@@ -206,6 +210,7 @@ enum vf_mode {
206 * @mode: Device mode PF/VF 210 * @mode: Device mode PF/VF
207 * @ctx_pool: DMA pool for crypto context 211 * @ctx_pool: DMA pool for crypto context
208 * @pkt_inq: Packet input rings 212 * @pkt_inq: Packet input rings
213 * @aqmq: AQM command queues
209 * @qvec: MSI-X queue vectors information 214 * @qvec: MSI-X queue vectors information
210 * @iov: SR-IOV informatin 215 * @iov: SR-IOV informatin
211 * @num_vecs: number of MSI-X vectors 216 * @num_vecs: number of MSI-X vectors
@@ -232,6 +237,7 @@ struct nitrox_device {
232 237
233 struct dma_pool *ctx_pool; 238 struct dma_pool *ctx_pool;
234 struct nitrox_cmdq *pkt_inq; 239 struct nitrox_cmdq *pkt_inq;
240 struct nitrox_cmdq *aqmq[MAX_DEV_QUEUES] ____cacheline_aligned_in_smp;
235 241
236 struct nitrox_q_vector *qvec; 242 struct nitrox_q_vector *qvec;
237 struct nitrox_iov iov; 243 struct nitrox_iov iov;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index 3f0df60267a9..34a2f4f30a7e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -241,12 +241,12 @@ void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
241} 241}
242 242
243/** 243/**
244 * enable_nps_interrupts - enable NPS interrutps 244 * enable_nps_core_interrupts - enable NPS core interrutps
245 * @ndev: NITROX device. 245 * @ndev: NITROX device.
246 * 246 *
247 * This includes NPS core, packet in and slc interrupts. 247 * This includes NPS core interrupts.
248 */ 248 */
249static void enable_nps_interrupts(struct nitrox_device *ndev) 249static void enable_nps_core_interrupts(struct nitrox_device *ndev)
250{ 250{
251 union nps_core_int_ena_w1s core_int; 251 union nps_core_int_ena_w1s core_int;
252 252
@@ -258,18 +258,9 @@ static void enable_nps_interrupts(struct nitrox_device *ndev)
258 core_int.s.npco_dma_malform = 1; 258 core_int.s.npco_dma_malform = 1;
259 core_int.s.host_nps_wr_err = 1; 259 core_int.s.host_nps_wr_err = 1;
260 nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value); 260 nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
261
262 /* NPS packet in ring interrupts */
263 nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
264 nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
265 nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
266 /* NPS packet slc port interrupts */
267 nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
268 nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
269 nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
270} 261}
271 262
272void nitrox_config_nps_unit(struct nitrox_device *ndev) 263void nitrox_config_nps_core_unit(struct nitrox_device *ndev)
273{ 264{
274 union nps_core_gbl_vfcfg core_gbl_vfcfg; 265 union nps_core_gbl_vfcfg core_gbl_vfcfg;
275 266
@@ -281,12 +272,149 @@ void nitrox_config_nps_unit(struct nitrox_device *ndev)
281 core_gbl_vfcfg.s.ilk_disable = 1; 272 core_gbl_vfcfg.s.ilk_disable = 1;
282 core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF; 273 core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
283 nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value); 274 nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
275
276 /* enable nps core interrupts */
277 enable_nps_core_interrupts(ndev);
278}
279
280/**
281 * enable_nps_pkt_interrupts - enable NPS packet interrutps
282 * @ndev: NITROX device.
283 *
284 * This includes NPS packet in and slc interrupts.
285 */
286static void enable_nps_pkt_interrupts(struct nitrox_device *ndev)
287{
288 /* NPS packet in ring interrupts */
289 nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
290 nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
291 nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
292 /* NPS packet slc port interrupts */
293 nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
294 nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
295 nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
296}
297
298void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev)
299{
284 /* config input and solicit ports */ 300 /* config input and solicit ports */
285 nitrox_config_pkt_input_rings(ndev); 301 nitrox_config_pkt_input_rings(ndev);
286 nitrox_config_pkt_solicit_ports(ndev); 302 nitrox_config_pkt_solicit_ports(ndev);
287 303
288 /* enable interrupts */ 304 /* enable nps packet interrupts */
289 enable_nps_interrupts(ndev); 305 enable_nps_pkt_interrupts(ndev);
306}
307
308static void reset_aqm_ring(struct nitrox_device *ndev, int ring)
309{
310 union aqmq_en aqmq_en_reg;
311 union aqmq_activity_stat activity_stat;
312 union aqmq_cmp_cnt cmp_cnt;
313 int max_retries = MAX_CSR_RETRIES;
314 u64 offset;
315
316 /* step 1: disable the queue */
317 offset = AQMQ_ENX(ring);
318 aqmq_en_reg.value = 0;
319 aqmq_en_reg.queue_enable = 0;
320 nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
321
322 /* step 2: wait for AQMQ_ACTIVITY_STATX[QUEUE_ACTIVE] to clear */
323 usleep_range(100, 150);
324 offset = AQMQ_ACTIVITY_STATX(ring);
325 do {
326 activity_stat.value = nitrox_read_csr(ndev, offset);
327 if (!activity_stat.queue_active)
328 break;
329 udelay(50);
330 } while (max_retries--);
331
332 /* step 3: clear commands completed count */
333 offset = AQMQ_CMP_CNTX(ring);
334 cmp_cnt.value = nitrox_read_csr(ndev, offset);
335 nitrox_write_csr(ndev, offset, cmp_cnt.value);
336 usleep_range(50, 100);
337}
338
339void enable_aqm_ring(struct nitrox_device *ndev, int ring)
340{
341 union aqmq_en aqmq_en_reg;
342 u64 offset;
343
344 offset = AQMQ_ENX(ring);
345 aqmq_en_reg.value = 0;
346 aqmq_en_reg.queue_enable = 1;
347 nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
348 usleep_range(50, 100);
349}
350
351void nitrox_config_aqm_rings(struct nitrox_device *ndev)
352{
353 int ring;
354
355 for (ring = 0; ring < ndev->nr_queues; ring++) {
356 struct nitrox_cmdq *cmdq = ndev->aqmq[ring];
357 union aqmq_drbl drbl;
358 union aqmq_qsz qsize;
359 union aqmq_cmp_thr cmp_thr;
360 u64 offset;
361
362 /* steps 1 - 3 */
363 reset_aqm_ring(ndev, ring);
364
365 /* step 4: clear doorbell count of ring */
366 offset = AQMQ_DRBLX(ring);
367 drbl.value = 0;
368 drbl.dbell_count = 0xFFFFFFFF;
369 nitrox_write_csr(ndev, offset, drbl.value);
370
371 /* step 5: configure host ring details */
372
373 /* set host address for next command of ring */
374 offset = AQMQ_NXT_CMDX(ring);
375 nitrox_write_csr(ndev, offset, 0ULL);
376
377 /* set host address of ring base */
378 offset = AQMQ_BADRX(ring);
379 nitrox_write_csr(ndev, offset, cmdq->dma);
380
381 /* set ring size */
382 offset = AQMQ_QSZX(ring);
383 qsize.value = 0;
384 qsize.host_queue_size = ndev->qlen;
385 nitrox_write_csr(ndev, offset, qsize.value);
386
387 /* set command completion threshold */
388 offset = AQMQ_CMP_THRX(ring);
389 cmp_thr.value = 0;
390 cmp_thr.commands_completed_threshold = 1;
391 nitrox_write_csr(ndev, offset, cmp_thr.value);
392
393 /* step 6: enable the queue */
394 enable_aqm_ring(ndev, ring);
395 }
396}
397
398static void enable_aqm_interrupts(struct nitrox_device *ndev)
399{
400 /* clear interrupt enable bits */
401 nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL));
402 nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL));
403 nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL));
404 nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL));
405 nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL));
406 nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL));
407 nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL));
408 nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL));
409}
410
411void nitrox_config_aqm_unit(struct nitrox_device *ndev)
412{
413 /* config aqm command queues */
414 nitrox_config_aqm_rings(ndev);
415
416 /* enable aqm interrupts */
417 enable_aqm_interrupts(ndev);
290} 418}
291 419
292void nitrox_config_pom_unit(struct nitrox_device *ndev) 420void nitrox_config_pom_unit(struct nitrox_device *ndev)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h
index d6606418ba38..48b0af039099 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h
@@ -4,10 +4,13 @@
4 4
5#include "nitrox_dev.h" 5#include "nitrox_dev.h"
6 6
7void nitrox_config_aqm_rings(struct nitrox_device *ndev);
8void nitrox_config_aqm_unit(struct nitrox_device *ndev);
7void nitrox_config_emu_unit(struct nitrox_device *ndev); 9void nitrox_config_emu_unit(struct nitrox_device *ndev);
8void nitrox_config_pkt_input_rings(struct nitrox_device *ndev); 10void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
9void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev); 11void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
10void nitrox_config_nps_unit(struct nitrox_device *ndev); 12void nitrox_config_nps_core_unit(struct nitrox_device *ndev);
13void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev);
11void nitrox_config_pom_unit(struct nitrox_device *ndev); 14void nitrox_config_pom_unit(struct nitrox_device *ndev);
12void nitrox_config_rand_unit(struct nitrox_device *ndev); 15void nitrox_config_rand_unit(struct nitrox_device *ndev);
13void nitrox_config_efl_unit(struct nitrox_device *ndev); 16void nitrox_config_efl_unit(struct nitrox_device *ndev);
@@ -15,6 +18,7 @@ void nitrox_config_bmi_unit(struct nitrox_device *ndev);
15void nitrox_config_bmo_unit(struct nitrox_device *ndev); 18void nitrox_config_bmo_unit(struct nitrox_device *ndev);
16void nitrox_config_lbc_unit(struct nitrox_device *ndev); 19void nitrox_config_lbc_unit(struct nitrox_device *ndev);
17void invalidate_lbc(struct nitrox_device *ndev); 20void invalidate_lbc(struct nitrox_device *ndev);
21void enable_aqm_ring(struct nitrox_device *ndev, int qno);
18void enable_pkt_input_ring(struct nitrox_device *ndev, int ring); 22void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
19void enable_pkt_solicit_port(struct nitrox_device *ndev, int port); 23void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
20void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode); 24void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4ace9bcd603a..5cbc64b851b9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -19,6 +19,8 @@
19 19
20/* packet inuput ring alignments */ 20/* packet inuput ring alignments */
21#define PKTIN_Q_ALIGN_BYTES 16 21#define PKTIN_Q_ALIGN_BYTES 16
22/* AQM Queue input alignments */
23#define AQM_Q_ALIGN_BYTES 32
22 24
23static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) 25static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
24{ 26{
@@ -57,11 +59,15 @@ static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
57 59
58static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) 60static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
59{ 61{
60 struct nitrox_device *ndev = cmdq->ndev; 62 struct nitrox_device *ndev;
63
64 if (!cmdq)
65 return;
61 66
62 if (!cmdq->unalign_base) 67 if (!cmdq->unalign_base)
63 return; 68 return;
64 69
70 ndev = cmdq->ndev;
65 cancel_work_sync(&cmdq->backlog_qflush); 71 cancel_work_sync(&cmdq->backlog_qflush);
66 72
67 dma_free_coherent(DEV(ndev), cmdq->qsize, 73 dma_free_coherent(DEV(ndev), cmdq->qsize,
@@ -78,6 +84,57 @@ static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
78 cmdq->instr_size = 0; 84 cmdq->instr_size = 0;
79} 85}
80 86
87static void nitrox_free_aqm_queues(struct nitrox_device *ndev)
88{
89 int i;
90
91 for (i = 0; i < ndev->nr_queues; i++) {
92 nitrox_cmdq_cleanup(ndev->aqmq[i]);
93 kzfree(ndev->aqmq[i]);
94 ndev->aqmq[i] = NULL;
95 }
96}
97
98static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev)
99{
100 int i, err;
101
102 for (i = 0; i < ndev->nr_queues; i++) {
103 struct nitrox_cmdq *cmdq;
104 u64 offset;
105
106 cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node);
107 if (!cmdq) {
108 err = -ENOMEM;
109 goto aqmq_fail;
110 }
111
112 cmdq->ndev = ndev;
113 cmdq->qno = i;
114 cmdq->instr_size = sizeof(struct aqmq_command_s);
115
116 /* AQM Queue Doorbell Counter Register Address */
117 offset = AQMQ_DRBLX(i);
118 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
119 /* AQM Queue Commands Completed Count Register Address */
120 offset = AQMQ_CMD_CNTX(i);
121 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
122
123 err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES);
124 if (err) {
125 kzfree(cmdq);
126 goto aqmq_fail;
127 }
128 ndev->aqmq[i] = cmdq;
129 }
130
131 return 0;
132
133aqmq_fail:
134 nitrox_free_aqm_queues(ndev);
135 return err;
136}
137
81static void nitrox_free_pktin_queues(struct nitrox_device *ndev) 138static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
82{ 139{
83 int i; 140 int i;
@@ -222,6 +279,12 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
222 if (err) 279 if (err)
223 destroy_crypto_dma_pool(ndev); 280 destroy_crypto_dma_pool(ndev);
224 281
282 err = nitrox_alloc_aqm_queues(ndev);
283 if (err) {
284 nitrox_free_pktin_queues(ndev);
285 destroy_crypto_dma_pool(ndev);
286 }
287
225 return err; 288 return err;
226} 289}
227 290
@@ -231,6 +294,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
231 */ 294 */
232void nitrox_common_sw_cleanup(struct nitrox_device *ndev) 295void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
233{ 296{
297 nitrox_free_aqm_queues(ndev);
234 nitrox_free_pktin_queues(ndev); 298 nitrox_free_pktin_queues(ndev);
235 destroy_crypto_dma_pool(ndev); 299 destroy_crypto_dma_pool(ndev);
236} 300}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index fe825d0ef9ca..bc924980e10c 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -17,12 +17,17 @@
17 17
18#define CNN55XX_DEV_ID 0x12 18#define CNN55XX_DEV_ID 0x12
19#define UCODE_HLEN 48 19#define UCODE_HLEN 48
20#define SE_GROUP 0 20#define DEFAULT_SE_GROUP 0
21#define DEFAULT_AE_GROUP 0
21 22
22#define DRIVER_VERSION "1.1" 23#define DRIVER_VERSION "1.2"
24#define CNN55XX_UCD_BLOCK_SIZE 32768
25#define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2)
23#define FW_DIR "cavium/" 26#define FW_DIR "cavium/"
24/* SE microcode */ 27/* SE microcode */
25#define SE_FW FW_DIR "cnn55xx_se.fw" 28#define SE_FW FW_DIR "cnn55xx_se.fw"
29/* AE microcode */
30#define AE_FW FW_DIR "cnn55xx_ae.fw"
26 31
27static const char nitrox_driver_name[] = "CNN55XX"; 32static const char nitrox_driver_name[] = "CNN55XX";
28 33
@@ -72,10 +77,10 @@ struct ucode {
72/** 77/**
73 * write_to_ucd_unit - Write Firmware to NITROX UCD unit 78 * write_to_ucd_unit - Write Firmware to NITROX UCD unit
74 */ 79 */
75static void write_to_ucd_unit(struct nitrox_device *ndev, 80static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
76 struct ucode *ucode) 81 u64 *ucode_data, int block_num)
77{ 82{
78 u32 code_size = be32_to_cpu(ucode->code_size) * 2; 83 u32 code_size;
79 u64 offset, data; 84 u64 offset, data;
80 int i = 0; 85 int i = 0;
81 86
@@ -96,11 +101,12 @@ static void write_to_ucd_unit(struct nitrox_device *ndev,
96 101
97 /* set the block number */ 102 /* set the block number */
98 offset = UCD_UCODE_LOAD_BLOCK_NUM; 103 offset = UCD_UCODE_LOAD_BLOCK_NUM;
99 nitrox_write_csr(ndev, offset, 0); 104 nitrox_write_csr(ndev, offset, block_num);
100 105
106 code_size = ucode_size;
101 code_size = roundup(code_size, 8); 107 code_size = roundup(code_size, 8);
102 while (code_size) { 108 while (code_size) {
103 data = ucode->code[i]; 109 data = ucode_data[i];
104 /* write 8 bytes at a time */ 110 /* write 8 bytes at a time */
105 offset = UCD_UCODE_LOAD_IDX_DATAX(i); 111 offset = UCD_UCODE_LOAD_IDX_DATAX(i);
106 nitrox_write_csr(ndev, offset, data); 112 nitrox_write_csr(ndev, offset, data);
@@ -108,29 +114,74 @@ static void write_to_ucd_unit(struct nitrox_device *ndev,
108 i++; 114 i++;
109 } 115 }
110 116
111 /* put all SE cores in group 0 */
112 offset = POM_GRP_EXECMASKX(SE_GROUP);
113 nitrox_write_csr(ndev, offset, (~0ULL));
114
115 for (i = 0; i < ndev->hw.se_cores; i++) {
116 /*
117 * write block number and firware length
118 * bit:<2:0> block number
119 * bit:3 is set SE uses 32KB microcode
120 * bit:3 is clear SE uses 64KB microcode
121 */
122 offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
123 nitrox_write_csr(ndev, offset, 0x8);
124 }
125 usleep_range(300, 400); 117 usleep_range(300, 400);
126} 118}
127 119
128static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name) 120static int nitrox_load_fw(struct nitrox_device *ndev)
129{ 121{
130 const struct firmware *fw; 122 const struct firmware *fw;
123 const char *fw_name;
131 struct ucode *ucode; 124 struct ucode *ucode;
132 int ret; 125 u64 *ucode_data;
126 u64 offset;
127 union ucd_core_eid_ucode_block_num core_2_eid_val;
128 union aqm_grp_execmsk_lo aqm_grp_execmask_lo;
129 union aqm_grp_execmsk_hi aqm_grp_execmask_hi;
130 u32 ucode_size;
131 int ret, i = 0;
132
133 fw_name = SE_FW;
134 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
135
136 ret = request_firmware(&fw, fw_name, DEV(ndev));
137 if (ret < 0) {
138 dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
139 return ret;
140 }
141
142 ucode = (struct ucode *)fw->data;
143
144 ucode_size = be32_to_cpu(ucode->code_size) * 2;
145 if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
146 dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
147 ucode_size, fw_name);
148 release_firmware(fw);
149 return -EINVAL;
150 }
151 ucode_data = ucode->code;
152
153 /* copy the firmware version */
154 memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2));
155 ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0';
156
157 /* Load SE Firmware on UCD Block 0 */
158 write_to_ucd_unit(ndev, ucode_size, ucode_data, 0);
133 159
160 release_firmware(fw);
161
162 /* put all SE cores in DEFAULT_SE_GROUP */
163 offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP);
164 nitrox_write_csr(ndev, offset, (~0ULL));
165
166 /* write block number and firmware length
167 * bit:<2:0> block number
168 * bit:3 is set SE uses 32KB microcode
169 * bit:3 is clear SE uses 64KB microcode
170 */
171 core_2_eid_val.value = 0ULL;
172 core_2_eid_val.ucode_blk = 0;
173 if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
174 core_2_eid_val.ucode_len = 1;
175 else
176 core_2_eid_val.ucode_len = 0;
177
178 for (i = 0; i < ndev->hw.se_cores; i++) {
179 offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
180 nitrox_write_csr(ndev, offset, core_2_eid_val.value);
181 }
182
183
184 fw_name = AE_FW;
134 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name); 185 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
135 186
136 ret = request_firmware(&fw, fw_name, DEV(ndev)); 187 ret = request_firmware(&fw, fw_name, DEV(ndev));
@@ -140,13 +191,50 @@ static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name)
140 } 191 }
141 192
142 ucode = (struct ucode *)fw->data; 193 ucode = (struct ucode *)fw->data;
194
195 ucode_size = be32_to_cpu(ucode->code_size) * 2;
196 if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
197 dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
198 ucode_size, fw_name);
199 release_firmware(fw);
200 return -EINVAL;
201 }
202 ucode_data = ucode->code;
203
143 /* copy the firmware version */ 204 /* copy the firmware version */
144 memcpy(ndev->hw.fw_name, ucode->version, (VERSION_LEN - 2)); 205 memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2));
145 ndev->hw.fw_name[VERSION_LEN - 1] = '\0'; 206 ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0';
207
208 /* Load AE Firmware on UCD Block 2 */
209 write_to_ucd_unit(ndev, ucode_size, ucode_data, 2);
146 210
147 write_to_ucd_unit(ndev, ucode);
148 release_firmware(fw); 211 release_firmware(fw);
149 212
213 /* put all AE cores in DEFAULT_AE_GROUP */
214 offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP);
215 aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL;
216 nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value);
217 offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP);
218 aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL;
219 nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value);
220
221 /* write block number and firmware length
222 * bit:<2:0> block number
223 * bit:3 is set SE uses 32KB microcode
224 * bit:3 is clear SE uses 64KB microcode
225 */
226 core_2_eid_val.value = 0ULL;
227 core_2_eid_val.ucode_blk = 0;
228 if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
229 core_2_eid_val.ucode_len = 1;
230 else
231 core_2_eid_val.ucode_len = 0;
232
233 for (i = 0; i < ndev->hw.ae_cores; i++) {
234 offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i);
235 nitrox_write_csr(ndev, offset, core_2_eid_val.value);
236 }
237
150 return 0; 238 return 0;
151} 239}
152 240
@@ -299,7 +387,9 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev)
299 /* get cores information */ 387 /* get cores information */
300 nitrox_get_hwinfo(ndev); 388 nitrox_get_hwinfo(ndev);
301 389
302 nitrox_config_nps_unit(ndev); 390 nitrox_config_nps_core_unit(ndev);
391 nitrox_config_aqm_unit(ndev);
392 nitrox_config_nps_pkt_unit(ndev);
303 nitrox_config_pom_unit(ndev); 393 nitrox_config_pom_unit(ndev);
304 nitrox_config_efl_unit(ndev); 394 nitrox_config_efl_unit(ndev);
305 /* configure IO units */ 395 /* configure IO units */
@@ -309,8 +399,8 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev)
309 nitrox_config_lbc_unit(ndev); 399 nitrox_config_lbc_unit(ndev);
310 nitrox_config_rand_unit(ndev); 400 nitrox_config_rand_unit(ndev);
311 401
312 /* load firmware on SE cores */ 402 /* load firmware on cores */
313 err = nitrox_load_fw(ndev, SE_FW); 403 err = nitrox_load_fw(ndev);
314 if (err) 404 if (err)
315 return err; 405 return err;
316 406
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index efdbd0fc3e3b..f69ba02c4d25 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -400,6 +400,36 @@ struct nps_pkt_instr {
400}; 400};
401 401
402/** 402/**
403 * struct aqmq_command_s - The 32 byte command for AE processing.
404 * @opcode: Request opcode
405 * @param1: Request control parameter 1
406 * @param2: Request control parameter 2
407 * @dlen: Input length
408 * @dptr: Input pointer points to buffer in remote host
409 * @rptr: Result pointer points to buffer in remote host
410 * @grp: AQM Group (0..7)
411 * @cptr: Context pointer
412 */
413struct aqmq_command_s {
414 __be16 opcode;
415 __be16 param1;
416 __be16 param2;
417 __be16 dlen;
418 __be64 dptr;
419 __be64 rptr;
420 union {
421 __be64 word3;
422#if defined(__BIG_ENDIAN_BITFIELD)
423 u64 grp : 3;
424 u64 cptr : 61;
425#else
426 u64 cptr : 61;
427 u64 grp : 3;
428#endif
429 };
430};
431
432/**
403 * struct ctx_hdr - Book keeping data about the crypto context 433 * struct ctx_hdr - Book keeping data about the crypto context
404 * @pool: Pool used to allocate crypto context 434 * @pool: Pool used to allocate crypto context
405 * @dma: Base DMA address of the cypto context 435 * @dma: Base DMA address of the cypto context
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 7e4a5e69085e..3cdce1f0f257 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -7,7 +7,7 @@
7#include <crypto/aes.h> 7#include <crypto/aes.h>
8#include <crypto/skcipher.h> 8#include <crypto/skcipher.h>
9#include <crypto/ctr.h> 9#include <crypto/ctr.h>
10#include <crypto/des.h> 10#include <crypto/internal/des.h>
11#include <crypto/xts.h> 11#include <crypto/xts.h>
12 12
13#include "nitrox_dev.h" 13#include "nitrox_dev.h"
@@ -257,7 +257,7 @@ static int nitrox_aes_decrypt(struct skcipher_request *skreq)
257static int nitrox_3des_setkey(struct crypto_skcipher *cipher, 257static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
258 const u8 *key, unsigned int keylen) 258 const u8 *key, unsigned int keylen)
259{ 259{
260 return unlikely(des3_verify_key(cipher, key)) ?: 260 return verify_skcipher_des3_key(cipher, key) ?:
261 nitrox_skcipher_setkey(cipher, 0, key, keylen); 261 nitrox_skcipher_setkey(cipher, 0, key, keylen);
262} 262}
263 263
diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
index bf439d8256ba..43287f8471d1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_sriov.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
@@ -109,6 +109,9 @@ static int nitrox_pf_reinit(struct nitrox_device *ndev)
109 return err; 109 return err;
110 } 110 }
111 111
112 /* configure the AQM queues */
113 nitrox_config_aqm_rings(ndev);
114
112 /* configure the packet queues */ 115 /* configure the packet queues */
113 nitrox_config_pkt_input_rings(ndev); 116 nitrox_config_pkt_input_rings(ndev);
114 nitrox_config_pkt_solicit_ports(ndev); 117 nitrox_config_pkt_solicit_ports(ndev);
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index a8447a3cf366..194624b4855b 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -593,6 +593,7 @@ static const struct file_operations zip_stats_fops = {
593 .owner = THIS_MODULE, 593 .owner = THIS_MODULE,
594 .open = zip_stats_open, 594 .open = zip_stats_open,
595 .read = seq_read, 595 .read = seq_read,
596 .release = single_release,
596}; 597};
597 598
598static int zip_clear_open(struct inode *inode, struct file *file) 599static int zip_clear_open(struct inode *inode, struct file *file)
@@ -604,6 +605,7 @@ static const struct file_operations zip_clear_fops = {
604 .owner = THIS_MODULE, 605 .owner = THIS_MODULE,
605 .open = zip_clear_open, 606 .open = zip_clear_open,
606 .read = seq_read, 607 .read = seq_read,
608 .release = single_release,
607}; 609};
608 610
609static int zip_regs_open(struct inode *inode, struct file *file) 611static int zip_regs_open(struct inode *inode, struct file *file)
@@ -615,6 +617,7 @@ static const struct file_operations zip_regs_fops = {
615 .owner = THIS_MODULE, 617 .owner = THIS_MODULE,
616 .open = zip_regs_open, 618 .open = zip_regs_open,
617 .read = seq_read, 619 .read = seq_read,
620 .release = single_release,
618}; 621};
619 622
620/* Root directory for thunderx_zip debugfs entry */ 623/* Root directory for thunderx_zip debugfs entry */
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 48f3edc1e3fb..8fec733f567f 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -30,6 +30,7 @@ config CRYPTO_DEV_CCP_CRYPTO
30 select CRYPTO_BLKCIPHER 30 select CRYPTO_BLKCIPHER
31 select CRYPTO_AUTHENC 31 select CRYPTO_AUTHENC
32 select CRYPTO_RSA 32 select CRYPTO_RSA
33 select CRYPTO_LIB_AES
33 help 34 help
34 Support for using the cryptographic API with the AMD Cryptographic 35 Support for using the cryptographic API with the AMD Cryptographic
35 Coprocessor. This module supports offload of SHA and AES algorithms. 36 Coprocessor. This module supports offload of SHA and AES algorithms.
@@ -45,3 +46,11 @@ config CRYPTO_DEV_SP_PSP
45 management commands in Secure Encrypted Virtualization (SEV) mode, 46 management commands in Secure Encrypted Virtualization (SEV) mode,
46 along with software-based Trusted Execution Environment (TEE) to 47 along with software-based Trusted Execution Environment (TEE) to
47 enable third-party trusted applications. 48 enable third-party trusted applications.
49
50config CRYPTO_DEV_CCP_DEBUGFS
51 bool "Enable CCP Internals in DebugFS"
52 default n
53 depends on CRYPTO_DEV_SP_CCP
54 help
55 Expose CCP device information such as operation statistics, feature
56 information, and descriptor queue contents.
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 51d1c0cf66c7..6b86f1e6d634 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -5,8 +5,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
5 ccp-ops.o \ 5 ccp-ops.o \
6 ccp-dev-v3.o \ 6 ccp-dev-v3.o \
7 ccp-dev-v5.o \ 7 ccp-dev-v5.o \
8 ccp-dmaengine.o \ 8 ccp-dmaengine.o
9 ccp-debugfs.o 9ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
10ccp-$(CONFIG_PCI) += sp-pci.o 10ccp-$(CONFIG_PCI) += sp-pci.o
11ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o 11ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o
12 12
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index bb7219d36b2c..32f19f402073 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -261,6 +261,7 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
261 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); 261 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
262 u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo; 262 u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
263 u64 rb_hi = 0x00, rb_lo = 0x87; 263 u64 rb_hi = 0x00, rb_lo = 0x87;
264 struct crypto_aes_ctx aes;
264 __be64 *gk; 265 __be64 *gk;
265 int ret; 266 int ret;
266 267
@@ -284,14 +285,14 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
284 ctx->u.aes.key_len = 0; 285 ctx->u.aes.key_len = 0;
285 286
286 /* Set the key for the AES cipher used to generate the keys */ 287 /* Set the key for the AES cipher used to generate the keys */
287 ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len); 288 ret = aes_expandkey(&aes, key, key_len);
288 if (ret) 289 if (ret)
289 return ret; 290 return ret;
290 291
291 /* Encrypt a block of zeroes - use key area in context */ 292 /* Encrypt a block of zeroes - use key area in context */
292 memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); 293 memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
293 crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key, 294 aes_encrypt(&aes, ctx->u.aes.key, ctx->u.aes.key);
294 ctx->u.aes.key); 295 memzero_explicit(&aes, sizeof(aes));
295 296
296 /* Generate K1 and K2 */ 297 /* Generate K1 and K2 */
297 k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key)); 298 k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key));
@@ -336,32 +337,15 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
336{ 337{
337 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 338 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
338 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 339 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
339 struct crypto_cipher *cipher_tfm;
340 340
341 ctx->complete = ccp_aes_cmac_complete; 341 ctx->complete = ccp_aes_cmac_complete;
342 ctx->u.aes.key_len = 0; 342 ctx->u.aes.key_len = 0;
343 343
344 crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); 344 crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
345 345
346 cipher_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK);
347 if (IS_ERR(cipher_tfm)) {
348 pr_warn("could not load aes cipher driver\n");
349 return PTR_ERR(cipher_tfm);
350 }
351 ctx->u.aes.tfm_cipher = cipher_tfm;
352
353 return 0; 346 return 0;
354} 347}
355 348
356static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm)
357{
358 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
359
360 if (ctx->u.aes.tfm_cipher)
361 crypto_free_cipher(ctx->u.aes.tfm_cipher);
362 ctx->u.aes.tfm_cipher = NULL;
363}
364
365int ccp_register_aes_cmac_algs(struct list_head *head) 349int ccp_register_aes_cmac_algs(struct list_head *head)
366{ 350{
367 struct ccp_crypto_ahash_alg *ccp_alg; 351 struct ccp_crypto_ahash_alg *ccp_alg;
@@ -401,7 +385,6 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
401 base->cra_ctxsize = sizeof(struct ccp_ctx); 385 base->cra_ctxsize = sizeof(struct ccp_ctx);
402 base->cra_priority = CCP_CRA_PRIORITY; 386 base->cra_priority = CCP_CRA_PRIORITY;
403 base->cra_init = ccp_aes_cmac_cra_init; 387 base->cra_init = ccp_aes_cmac_cra_init;
404 base->cra_exit = ccp_aes_cmac_cra_exit;
405 base->cra_module = THIS_MODULE; 388 base->cra_module = THIS_MODULE;
406 389
407 ret = crypto_register_ahash(alg); 390 ret = crypto_register_ahash(alg);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 783ba75e0618..8e4a531f4f70 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -116,9 +116,6 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
116 if (!ctx->u.aes.key_len) 116 if (!ctx->u.aes.key_len)
117 return -EINVAL; 117 return -EINVAL;
118 118
119 if (req->nbytes & (AES_BLOCK_SIZE - 1))
120 return -EINVAL;
121
122 if (!req->info) 119 if (!req->info)
123 return -EINVAL; 120 return -EINVAL;
124 121
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index 5f05f834c7cd..d2c49b2f0323 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -14,7 +14,7 @@
14#include <linux/crypto.h> 14#include <linux/crypto.h>
15#include <crypto/algapi.h> 15#include <crypto/algapi.h>
16#include <crypto/scatterwalk.h> 16#include <crypto/scatterwalk.h>
17#include <crypto/des.h> 17#include <crypto/internal/des.h>
18 18
19#include "ccp-crypto.h" 19#include "ccp-crypto.h"
20 20
@@ -39,11 +39,10 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
39 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); 39 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
40 struct ccp_crypto_ablkcipher_alg *alg = 40 struct ccp_crypto_ablkcipher_alg *alg =
41 ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm)); 41 ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
42 u32 *flags = &tfm->base.crt_flags;
43 int err; 42 int err;
44 43
45 err = __des3_verify_key(flags, key); 44 err = verify_ablkcipher_des3_key(tfm, key);
46 if (unlikely(err)) 45 if (err)
47 return err; 46 return err;
48 47
49 /* It's not clear that there is any support for a keysize of 112. 48 /* It's not clear that there is any support for a keysize of 112.
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 818096490829..8ee4cb45a3f3 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -405,8 +405,10 @@ static int ccp_crypto_init(void)
405 int ret; 405 int ret;
406 406
407 ret = ccp_present(); 407 ret = ccp_present();
408 if (ret) 408 if (ret) {
409 pr_err("Cannot load: there are no available CCPs\n");
409 return ret; 410 return ret;
411 }
410 412
411 spin_lock_init(&req_queue_lock); 413 spin_lock_init(&req_queue_lock);
412 INIT_LIST_HEAD(&req_queue.cmds); 414 INIT_LIST_HEAD(&req_queue.cmds);
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 622b34c17643..9015b5da6ba3 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -12,7 +12,6 @@
12 12
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/wait.h> 14#include <linux/wait.h>
15#include <linux/pci.h>
16#include <linux/ccp.h> 15#include <linux/ccp.h>
17#include <crypto/algapi.h> 16#include <crypto/algapi.h>
18#include <crypto/aes.h> 17#include <crypto/aes.h>
@@ -24,6 +23,10 @@
24#include <crypto/akcipher.h> 23#include <crypto/akcipher.h>
25#include <crypto/internal/rsa.h> 24#include <crypto/internal/rsa.h>
26 25
26/* We want the module name in front of our messages */
27#undef pr_fmt
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
27#define CCP_LOG_LEVEL KERN_INFO 30#define CCP_LOG_LEVEL KERN_INFO
28 31
29#define CCP_CRA_PRIORITY 300 32#define CCP_CRA_PRIORITY 300
@@ -87,9 +90,6 @@ struct ccp_aes_ctx {
87 /* Fallback cipher for XTS with unsupported unit sizes */ 90 /* Fallback cipher for XTS with unsupported unit sizes */
88 struct crypto_sync_skcipher *tfm_skcipher; 91 struct crypto_sync_skcipher *tfm_skcipher;
89 92
90 /* Cipher used to generate CMAC K1/K2 keys */
91 struct crypto_cipher *tfm_cipher;
92
93 enum ccp_engine engine; 93 enum ccp_engine engine;
94 enum ccp_aes_type type; 94 enum ccp_aes_type type;
95 enum ccp_aes_mode mode; 95 enum ccp_aes_mode mode;
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 2b7d47ed5c74..0186b3df4c87 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -10,7 +10,6 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/kthread.h> 13#include <linux/kthread.h>
15#include <linux/interrupt.h> 14#include <linux/interrupt.h>
16#include <linux/ccp.h> 15#include <linux/ccp.h>
@@ -379,7 +378,7 @@ static int ccp_init(struct ccp_device *ccp)
379 /* Find available queues */ 378 /* Find available queues */
380 ccp->qim = 0; 379 ccp->qim = 0;
381 qmr = ioread32(ccp->io_regs + Q_MASK_REG); 380 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
382 for (i = 0; i < MAX_HW_QUEUES; i++) { 381 for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
383 if (!(qmr & (1 << i))) 382 if (!(qmr & (1 << i)))
384 continue; 383 continue;
385 384
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 217e41bbadaf..57eb53b8ac21 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -2,16 +2,13 @@
2/* 2/*
3 * AMD Cryptographic Coprocessor (CCP) driver 3 * AMD Cryptographic Coprocessor (CCP) driver
4 * 4 *
5 * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. 5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
6 * 6 *
7 * Author: Gary R Hook <gary.hook@amd.com> 7 * Author: Gary R Hook <gary.hook@amd.com>
8 */ 8 */
9 9
10#include <linux/module.h>
11#include <linux/kernel.h> 10#include <linux/kernel.h>
12#include <linux/pci.h>
13#include <linux/kthread.h> 11#include <linux/kthread.h>
14#include <linux/debugfs.h>
15#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
16#include <linux/interrupt.h> 13#include <linux/interrupt.h>
17#include <linux/compiler.h> 14#include <linux/compiler.h>
@@ -792,8 +789,7 @@ static int ccp5_init(struct ccp_device *ccp)
792 789
793 /* Find available queues */ 790 /* Find available queues */
794 qmr = ioread32(ccp->io_regs + Q_MASK_REG); 791 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
795 for (i = 0; i < MAX_HW_QUEUES; i++) { 792 for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
796
797 if (!(qmr & (1 << i))) 793 if (!(qmr & (1 << i)))
798 continue; 794 continue;
799 795
@@ -806,6 +802,7 @@ static int ccp5_init(struct ccp_device *ccp)
806 if (!dma_pool) { 802 if (!dma_pool) {
807 dev_err(dev, "unable to allocate dma pool\n"); 803 dev_err(dev, "unable to allocate dma pool\n");
808 ret = -ENOMEM; 804 ret = -ENOMEM;
805 goto e_pool;
809 } 806 }
810 807
811 cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; 808 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
@@ -819,9 +816,9 @@ static int ccp5_init(struct ccp_device *ccp)
819 /* Page alignment satisfies our needs for N <= 128 */ 816 /* Page alignment satisfies our needs for N <= 128 */
820 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); 817 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
821 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); 818 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
822 cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, 819 cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize,
823 &cmd_q->qbase_dma, 820 &cmd_q->qbase_dma,
824 GFP_KERNEL); 821 GFP_KERNEL);
825 if (!cmd_q->qbase) { 822 if (!cmd_q->qbase) {
826 dev_err(dev, "unable to allocate command queue\n"); 823 dev_err(dev, "unable to allocate command queue\n");
827 ret = -ENOMEM; 824 ret = -ENOMEM;
@@ -970,8 +967,10 @@ static int ccp5_init(struct ccp_device *ccp)
970 if (ret) 967 if (ret)
971 goto e_hwrng; 968 goto e_hwrng;
972 969
970#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS
973 /* Set up debugfs entries */ 971 /* Set up debugfs entries */
974 ccp5_debugfs_setup(ccp); 972 ccp5_debugfs_setup(ccp);
973#endif
975 974
976 return 0; 975 return 0;
977 976
@@ -995,7 +994,6 @@ e_pool:
995 994
996static void ccp5_destroy(struct ccp_device *ccp) 995static void ccp5_destroy(struct ccp_device *ccp)
997{ 996{
998 struct device *dev = ccp->dev;
999 struct ccp_cmd_queue *cmd_q; 997 struct ccp_cmd_queue *cmd_q;
1000 struct ccp_cmd *cmd; 998 struct ccp_cmd *cmd;
1001 unsigned int i; 999 unsigned int i;
@@ -1009,11 +1007,13 @@ static void ccp5_destroy(struct ccp_device *ccp)
1009 /* Remove this device from the list of available units first */ 1007 /* Remove this device from the list of available units first */
1010 ccp_del_device(ccp); 1008 ccp_del_device(ccp);
1011 1009
1010#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS
1012 /* We're in the process of tearing down the entire driver; 1011 /* We're in the process of tearing down the entire driver;
1013 * when all the devices are gone clean up debugfs 1012 * when all the devices are gone clean up debugfs
1014 */ 1013 */
1015 if (ccp_present()) 1014 if (ccp_present())
1016 ccp5_debugfs_destroy(); 1015 ccp5_debugfs_destroy();
1016#endif
1017 1017
1018 /* Disable and clear interrupts */ 1018 /* Disable and clear interrupts */
1019 ccp5_disable_queue_interrupts(ccp); 1019 ccp5_disable_queue_interrupts(ccp);
@@ -1036,12 +1036,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
1036 1036
1037 sp_free_ccp_irq(ccp->sp, ccp); 1037 sp_free_ccp_irq(ccp->sp, ccp);
1038 1038
1039 for (i = 0; i < ccp->cmd_q_count; i++) {
1040 cmd_q = &ccp->cmd_q[i];
1041 dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
1042 cmd_q->qbase_dma);
1043 }
1044
1045 /* Flush the cmd and backlog queue */ 1039 /* Flush the cmd and backlog queue */
1046 while (!list_empty(&ccp->cmd)) { 1040 while (!list_empty(&ccp->cmd)) {
1047 /* Invoke the callback directly with an error code */ 1041 /* Invoke the callback directly with an error code */
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index edefa669153f..73acf0fdb793 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -2,12 +2,13 @@
2/* 2/*
3 * AMD Cryptographic Coprocessor (CCP) driver 3 * AMD Cryptographic Coprocessor (CCP) driver
4 * 4 *
5 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. 5 * Copyright (C) 2013,2019 Advanced Micro Devices, Inc.
6 * 6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * Author: Gary R Hook <gary.hook@amd.com> 8 * Author: Gary R Hook <gary.hook@amd.com>
9 */ 9 */
10 10
11#include <linux/module.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/kthread.h> 13#include <linux/kthread.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
@@ -19,6 +20,7 @@
19#include <linux/delay.h> 20#include <linux/delay.h>
20#include <linux/hw_random.h> 21#include <linux/hw_random.h>
21#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/atomic.h>
22#ifdef CONFIG_X86 24#ifdef CONFIG_X86
23#include <asm/cpu_device_id.h> 25#include <asm/cpu_device_id.h>
24#endif 26#endif
@@ -26,6 +28,19 @@
26 28
27#include "ccp-dev.h" 29#include "ccp-dev.h"
28 30
31#define MAX_CCPS 32
32
33/* Limit CCP use to a specifed number of queues per device */
34static unsigned int nqueues = 0;
35module_param(nqueues, uint, 0444);
36MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)");
37
38/* Limit the maximum number of configured CCPs */
39static atomic_t dev_count = ATOMIC_INIT(0);
40static unsigned int max_devs = MAX_CCPS;
41module_param(max_devs, uint, 0444);
42MODULE_PARM_DESC(max_devs, "Maximum number of CCPs to enable (default: all; 0 disables all CCPs)");
43
29struct ccp_tasklet_data { 44struct ccp_tasklet_data {
30 struct completion completion; 45 struct completion completion;
31 struct ccp_cmd *cmd; 46 struct ccp_cmd *cmd;
@@ -594,12 +609,24 @@ int ccp_dev_init(struct sp_device *sp)
594 struct ccp_device *ccp; 609 struct ccp_device *ccp;
595 int ret; 610 int ret;
596 611
612 /*
613 * Check how many we have so far, and stop after reaching
614 * that number
615 */
616 if (atomic_inc_return(&dev_count) > max_devs)
617 return 0; /* don't fail the load */
618
597 ret = -ENOMEM; 619 ret = -ENOMEM;
598 ccp = ccp_alloc_struct(sp); 620 ccp = ccp_alloc_struct(sp);
599 if (!ccp) 621 if (!ccp)
600 goto e_err; 622 goto e_err;
601 sp->ccp_data = ccp; 623 sp->ccp_data = ccp;
602 624
625 if (!nqueues || (nqueues > MAX_HW_QUEUES))
626 ccp->max_q_count = MAX_HW_QUEUES;
627 else
628 ccp->max_q_count = nqueues;
629
603 ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; 630 ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
604 if (!ccp->vdata || !ccp->vdata->version) { 631 if (!ccp->vdata || !ccp->vdata->version) {
605 ret = -ENODEV; 632 ret = -ENODEV;
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 5e624920fd99..3f68262d9ab4 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -12,11 +12,11 @@
12#define __CCP_DEV_H__ 12#define __CCP_DEV_H__
13 13
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/pci.h>
16#include <linux/spinlock.h> 15#include <linux/spinlock.h>
17#include <linux/mutex.h> 16#include <linux/mutex.h>
18#include <linux/list.h> 17#include <linux/list.h>
19#include <linux/wait.h> 18#include <linux/wait.h>
19#include <linux/dma-direction.h>
20#include <linux/dmapool.h> 20#include <linux/dmapool.h>
21#include <linux/hw_random.h> 21#include <linux/hw_random.h>
22#include <linux/bitops.h> 22#include <linux/bitops.h>
@@ -379,6 +379,7 @@ struct ccp_device {
379 */ 379 */
380 struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; 380 struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES];
381 unsigned int cmd_q_count; 381 unsigned int cmd_q_count;
382 unsigned int max_q_count;
382 383
383 /* Support for the CCP True RNG 384 /* Support for the CCP True RNG
384 */ 385 */
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 7f22a45bbc11..a54f9367a580 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -2,13 +2,14 @@
2/* 2/*
3 * AMD Cryptographic Coprocessor (CCP) driver 3 * AMD Cryptographic Coprocessor (CCP) driver
4 * 4 *
5 * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. 5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
6 * 6 *
7 * Author: Gary R Hook <gary.hook@amd.com> 7 * Author: Gary R Hook <gary.hook@amd.com>
8 */ 8 */
9 9
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h> 13#include <linux/dmaengine.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <linux/mutex.h> 15#include <linux/mutex.h>
@@ -35,6 +36,10 @@ static unsigned int dma_chan_attr = CCP_DMA_DFLT;
35module_param(dma_chan_attr, uint, 0444); 36module_param(dma_chan_attr, uint, 0444);
36MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); 37MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
37 38
39static unsigned int dmaengine = 1;
40module_param(dmaengine, uint, 0444);
41MODULE_PARM_DESC(dmaengine, "Register services with the DMA subsystem (any non-zero value, default: 1)");
42
38static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) 43static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
39{ 44{
40 switch (dma_chan_attr) { 45 switch (dma_chan_attr) {
@@ -637,6 +642,9 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
637 unsigned int i; 642 unsigned int i;
638 int ret; 643 int ret;
639 644
645 if (!dmaengine)
646 return 0;
647
640 ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count, 648 ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
641 sizeof(*(ccp->ccp_dma_chan)), 649 sizeof(*(ccp->ccp_dma_chan)),
642 GFP_KERNEL); 650 GFP_KERNEL);
@@ -740,6 +748,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
740{ 748{
741 struct dma_device *dma_dev = &ccp->dma_dev; 749 struct dma_device *dma_dev = &ccp->dma_dev;
742 750
751 if (!dmaengine)
752 return;
753
743 dma_async_device_unregister(dma_dev); 754 dma_async_device_unregister(dma_dev);
744 755
745 kmem_cache_destroy(ccp->dma_desc_cache); 756 kmem_cache_destroy(ccp->dma_desc_cache);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 9bc3c62157d7..c8da8eb160da 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -10,7 +10,6 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/interrupt.h> 13#include <linux/interrupt.h>
15#include <crypto/scatterwalk.h> 14#include <crypto/scatterwalk.h>
16#include <crypto/des.h> 15#include <crypto/des.h>
@@ -150,14 +149,13 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
150 if (len <= CCP_DMAPOOL_MAX_SIZE) { 149 if (len <= CCP_DMAPOOL_MAX_SIZE) {
151 wa->dma_pool = cmd_q->dma_pool; 150 wa->dma_pool = cmd_q->dma_pool;
152 151
153 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL, 152 wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL,
154 &wa->dma.address); 153 &wa->dma.address);
155 if (!wa->address) 154 if (!wa->address)
156 return -ENOMEM; 155 return -ENOMEM;
157 156
158 wa->dma.length = CCP_DMAPOOL_MAX_SIZE; 157 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
159 158
160 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
161 } else { 159 } else {
162 wa->address = kzalloc(len, GFP_KERNEL); 160 wa->address = kzalloc(len, GFP_KERNEL);
163 if (!wa->address) 161 if (!wa->address)
@@ -455,8 +453,8 @@ static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
455 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); 453 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
456} 454}
457 455
458static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, 456static noinline_for_stack int
459 struct ccp_cmd *cmd) 457ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
460{ 458{
461 struct ccp_aes_engine *aes = &cmd->u.aes; 459 struct ccp_aes_engine *aes = &cmd->u.aes;
462 struct ccp_dm_workarea key, ctx; 460 struct ccp_dm_workarea key, ctx;
@@ -611,8 +609,8 @@ e_key:
611 return ret; 609 return ret;
612} 610}
613 611
614static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, 612static noinline_for_stack int
615 struct ccp_cmd *cmd) 613ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
616{ 614{
617 struct ccp_aes_engine *aes = &cmd->u.aes; 615 struct ccp_aes_engine *aes = &cmd->u.aes;
618 struct ccp_dm_workarea key, ctx, final_wa, tag; 616 struct ccp_dm_workarea key, ctx, final_wa, tag;
@@ -894,7 +892,8 @@ e_key:
894 return ret; 892 return ret;
895} 893}
896 894
897static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 895static noinline_for_stack int
896ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
898{ 897{
899 struct ccp_aes_engine *aes = &cmd->u.aes; 898 struct ccp_aes_engine *aes = &cmd->u.aes;
900 struct ccp_dm_workarea key, ctx; 899 struct ccp_dm_workarea key, ctx;
@@ -904,12 +903,6 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
904 bool in_place = false; 903 bool in_place = false;
905 int ret; 904 int ret;
906 905
907 if (aes->mode == CCP_AES_MODE_CMAC)
908 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
909
910 if (aes->mode == CCP_AES_MODE_GCM)
911 return ccp_run_aes_gcm_cmd(cmd_q, cmd);
912
913 if (!((aes->key_len == AES_KEYSIZE_128) || 906 if (!((aes->key_len == AES_KEYSIZE_128) ||
914 (aes->key_len == AES_KEYSIZE_192) || 907 (aes->key_len == AES_KEYSIZE_192) ||
915 (aes->key_len == AES_KEYSIZE_256))) 908 (aes->key_len == AES_KEYSIZE_256)))
@@ -1076,8 +1069,8 @@ e_key:
1076 return ret; 1069 return ret;
1077} 1070}
1078 1071
1079static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, 1072static noinline_for_stack int
1080 struct ccp_cmd *cmd) 1073ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1081{ 1074{
1082 struct ccp_xts_aes_engine *xts = &cmd->u.xts; 1075 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1083 struct ccp_dm_workarea key, ctx; 1076 struct ccp_dm_workarea key, ctx;
@@ -1276,7 +1269,8 @@ e_key:
1276 return ret; 1269 return ret;
1277} 1270}
1278 1271
1279static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1272static noinline_for_stack int
1273ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1280{ 1274{
1281 struct ccp_des3_engine *des3 = &cmd->u.des3; 1275 struct ccp_des3_engine *des3 = &cmd->u.des3;
1282 1276
@@ -1472,7 +1466,8 @@ e_key:
1472 return ret; 1466 return ret;
1473} 1467}
1474 1468
1475static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1469static noinline_for_stack int
1470ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1476{ 1471{
1477 struct ccp_sha_engine *sha = &cmd->u.sha; 1472 struct ccp_sha_engine *sha = &cmd->u.sha;
1478 struct ccp_dm_workarea ctx; 1473 struct ccp_dm_workarea ctx;
@@ -1816,7 +1811,8 @@ e_ctx:
1816 return ret; 1811 return ret;
1817} 1812}
1818 1813
1819static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 1814static noinline_for_stack int
1815ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1820{ 1816{
1821 struct ccp_rsa_engine *rsa = &cmd->u.rsa; 1817 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1822 struct ccp_dm_workarea exp, src, dst; 1818 struct ccp_dm_workarea exp, src, dst;
@@ -1947,8 +1943,8 @@ e_sb:
1947 return ret; 1943 return ret;
1948} 1944}
1949 1945
1950static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, 1946static noinline_for_stack int
1951 struct ccp_cmd *cmd) 1947ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1952{ 1948{
1953 struct ccp_passthru_engine *pt = &cmd->u.passthru; 1949 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1954 struct ccp_dm_workarea mask; 1950 struct ccp_dm_workarea mask;
@@ -2079,7 +2075,8 @@ e_mask:
2079 return ret; 2075 return ret;
2080} 2076}
2081 2077
2082static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, 2078static noinline_for_stack int
2079ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
2083 struct ccp_cmd *cmd) 2080 struct ccp_cmd *cmd)
2084{ 2081{
2085 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; 2082 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
@@ -2420,7 +2417,8 @@ e_src:
2420 return ret; 2417 return ret;
2421} 2418}
2422 2419
2423static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) 2420static noinline_for_stack int
2421ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2424{ 2422{
2425 struct ccp_ecc_engine *ecc = &cmd->u.ecc; 2423 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2426 2424
@@ -2457,7 +2455,17 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2457 2455
2458 switch (cmd->engine) { 2456 switch (cmd->engine) {
2459 case CCP_ENGINE_AES: 2457 case CCP_ENGINE_AES:
2460 ret = ccp_run_aes_cmd(cmd_q, cmd); 2458 switch (cmd->u.aes.mode) {
2459 case CCP_AES_MODE_CMAC:
2460 ret = ccp_run_aes_cmac_cmd(cmd_q, cmd);
2461 break;
2462 case CCP_AES_MODE_GCM:
2463 ret = ccp_run_aes_gcm_cmd(cmd_q, cmd);
2464 break;
2465 default:
2466 ret = ccp_run_aes_cmd(cmd_q, cmd);
2467 break;
2468 }
2461 break; 2469 break;
2462 case CCP_ENGINE_XTS_AES_128: 2470 case CCP_ENGINE_XTS_AES_128:
2463 ret = ccp_run_xts_aes_cmd(cmd_q, cmd); 2471 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index c5e06c92d40e..82a084f02990 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -11,7 +11,6 @@
11#define __PSP_DEV_H__ 11#define __PSP_DEV_H__
12 12
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/pci.h>
15#include <linux/spinlock.h> 14#include <linux/spinlock.h>
16#include <linux/mutex.h> 15#include <linux/mutex.h>
17#include <linux/list.h> 16#include <linux/list.h>
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 8abe9ea7e76f..53c12562d31e 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -13,7 +13,6 @@
13#define __SP_DEV_H__ 13#define __SP_DEV_H__
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/pci.h>
17#include <linux/spinlock.h> 16#include <linux/spinlock.h>
18#include <linux/mutex.h> 17#include <linux/mutex.h>
19#include <linux/list.h> 18#include <linux/list.h>
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 1b45236e3716..831aac1393a2 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -125,7 +125,6 @@ static int sp_platform_probe(struct platform_device *pdev)
125 struct sp_platform *sp_platform; 125 struct sp_platform *sp_platform;
126 struct device *dev = &pdev->dev; 126 struct device *dev = &pdev->dev;
127 enum dev_dma_attr attr; 127 enum dev_dma_attr attr;
128 struct resource *ior;
129 int ret; 128 int ret;
130 129
131 ret = -ENOMEM; 130 ret = -ENOMEM;
@@ -146,8 +145,7 @@ static int sp_platform_probe(struct platform_device *pdev)
146 goto e_err; 145 goto e_err;
147 } 146 }
148 147
149 ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); 148 sp->io_map = devm_platform_ioremap_resource(pdev, 0);
150 sp->io_map = devm_ioremap_resource(dev, ior);
151 if (IS_ERR(sp->io_map)) { 149 if (IS_ERR(sp->io_map)) {
152 ret = PTR_ERR(sp->io_map); 150 ret = PTR_ERR(sp->io_map);
153 goto e_err; 151 goto e_err;
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index 145e50bdbf16..5cfda508ee41 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -2,7 +2,7 @@
2# Copyright (C) 2012-2019 ARM Limited (or its affiliates). 2# Copyright (C) 2012-2019 ARM Limited (or its affiliates).
3 3
4obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o 4obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
5ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o 5ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_sram_mgr.o
6ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o 6ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
7ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o 7ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
8ccree-$(CONFIG_PM) += cc_pm.o 8ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 7aa4cbe19a86..d3e8faa03f15 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -6,7 +6,7 @@
6#include <crypto/algapi.h> 6#include <crypto/algapi.h>
7#include <crypto/internal/aead.h> 7#include <crypto/internal/aead.h>
8#include <crypto/authenc.h> 8#include <crypto/authenc.h>
9#include <crypto/des.h> 9#include <crypto/internal/des.h>
10#include <linux/rtnetlink.h> 10#include <linux/rtnetlink.h>
11#include "cc_driver.h" 11#include "cc_driver.h"
12#include "cc_buffer_mgr.h" 12#include "cc_buffer_mgr.h"
@@ -236,31 +236,17 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
236 /* In case of payload authentication failure, MUST NOT 236 /* In case of payload authentication failure, MUST NOT
237 * revealed the decrypted message --> zero its memory. 237 * revealed the decrypted message --> zero its memory.
238 */ 238 */
239 cc_zero_sgl(areq->dst, areq_ctx->cryptlen); 239 sg_zero_buffer(areq->dst, sg_nents(areq->dst),
240 areq->cryptlen, 0);
240 err = -EBADMSG; 241 err = -EBADMSG;
241 } 242 }
242 } else { /*ENCRYPT*/ 243 /*ENCRYPT*/
243 if (areq_ctx->is_icv_fragmented) { 244 } else if (areq_ctx->is_icv_fragmented) {
244 u32 skip = areq->cryptlen + areq_ctx->dst_offset; 245 u32 skip = areq->cryptlen + areq_ctx->dst_offset;
245
246 cc_copy_sg_portion(dev, areq_ctx->mac_buf,
247 areq_ctx->dst_sgl, skip,
248 (skip + ctx->authsize),
249 CC_SG_FROM_BUF);
250 }
251 246
252 /* If an IV was generated, copy it back to the user provided 247 cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
253 * buffer. 248 skip, (skip + ctx->authsize),
254 */ 249 CC_SG_FROM_BUF);
255 if (areq_ctx->backup_giv) {
256 if (ctx->cipher_mode == DRV_CIPHER_CTR)
257 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
258 CTR_RFC3686_NONCE_SIZE,
259 CTR_RFC3686_IV_SIZE);
260 else if (ctx->cipher_mode == DRV_CIPHER_CCM)
261 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
262 CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
263 }
264 } 250 }
265done: 251done:
266 aead_request_complete(areq, err); 252 aead_request_complete(areq, err);
@@ -663,33 +649,17 @@ static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
663 unsigned int keylen) 649 unsigned int keylen)
664{ 650{
665 struct crypto_authenc_keys keys; 651 struct crypto_authenc_keys keys;
666 u32 flags;
667 int err; 652 int err;
668 653
669 err = crypto_authenc_extractkeys(&keys, key, keylen); 654 err = crypto_authenc_extractkeys(&keys, key, keylen);
670 if (unlikely(err)) 655 if (unlikely(err))
671 goto badkey; 656 return err;
672
673 err = -EINVAL;
674 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
675 goto badkey;
676 657
677 flags = crypto_aead_get_flags(aead); 658 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
678 err = __des3_verify_key(&flags, keys.enckey); 659 cc_aead_setkey(aead, key, keylen);
679 if (unlikely(err)) {
680 crypto_aead_set_flags(aead, flags);
681 goto out;
682 }
683
684 err = cc_aead_setkey(aead, key, keylen);
685 660
686out:
687 memzero_explicit(&keys, sizeof(keys)); 661 memzero_explicit(&keys, sizeof(keys));
688 return err; 662 return err;
689
690badkey:
691 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
692 goto out;
693} 663}
694 664
695static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, 665static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -1975,9 +1945,8 @@ static int cc_proc_aead(struct aead_request *req,
1975 */ 1945 */
1976 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, 1946 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1977 CTR_RFC3686_NONCE_SIZE); 1947 CTR_RFC3686_NONCE_SIZE);
1978 if (!areq_ctx->backup_giv) /*User none-generated IV*/ 1948 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1979 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, 1949 CTR_RFC3686_IV_SIZE);
1980 req->iv, CTR_RFC3686_IV_SIZE);
1981 /* Initialize counter portion of counter block */ 1950 /* Initialize counter portion of counter block */
1982 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE + 1951 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1983 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); 1952 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
@@ -2023,40 +1992,6 @@ static int cc_proc_aead(struct aead_request *req,
2023 goto exit; 1992 goto exit;
2024 } 1993 }
2025 1994
2026 /* do we need to generate IV? */
2027 if (areq_ctx->backup_giv) {
2028 /* set the DMA mapped IV address*/
2029 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
2030 cc_req.ivgen_dma_addr[0] =
2031 areq_ctx->gen_ctx.iv_dma_addr +
2032 CTR_RFC3686_NONCE_SIZE;
2033 cc_req.ivgen_dma_addr_len = 1;
2034 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2035 /* In ccm, the IV needs to exist both inside B0 and
2036 * inside the counter.It is also copied to iv_dma_addr
2037 * for other reasons (like returning it to the user).
2038 * So, using 3 (identical) IV outputs.
2039 */
2040 cc_req.ivgen_dma_addr[0] =
2041 areq_ctx->gen_ctx.iv_dma_addr +
2042 CCM_BLOCK_IV_OFFSET;
2043 cc_req.ivgen_dma_addr[1] =
2044 sg_dma_address(&areq_ctx->ccm_adata_sg) +
2045 CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
2046 cc_req.ivgen_dma_addr[2] =
2047 sg_dma_address(&areq_ctx->ccm_adata_sg) +
2048 CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
2049 cc_req.ivgen_dma_addr_len = 3;
2050 } else {
2051 cc_req.ivgen_dma_addr[0] =
2052 areq_ctx->gen_ctx.iv_dma_addr;
2053 cc_req.ivgen_dma_addr_len = 1;
2054 }
2055
2056 /* set the IV size (8/16 B long)*/
2057 cc_req.ivgen_size = crypto_aead_ivsize(tfm);
2058 }
2059
2060 /* STAT_PHASE_2: Create sequence */ 1995 /* STAT_PHASE_2: Create sequence */
2061 1996
2062 /* Load MLLI tables to SRAM if necessary */ 1997 /* Load MLLI tables to SRAM if necessary */
@@ -2107,7 +2042,6 @@ static int cc_aead_encrypt(struct aead_request *req)
2107 /* No generated IV required */ 2042 /* No generated IV required */
2108 areq_ctx->backup_iv = req->iv; 2043 areq_ctx->backup_iv = req->iv;
2109 areq_ctx->assoclen = req->assoclen; 2044 areq_ctx->assoclen = req->assoclen;
2110 areq_ctx->backup_giv = NULL;
2111 areq_ctx->is_gcm4543 = false; 2045 areq_ctx->is_gcm4543 = false;
2112 2046
2113 areq_ctx->plaintext_authenticate_only = false; 2047 areq_ctx->plaintext_authenticate_only = false;
@@ -2139,7 +2073,6 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2139 /* No generated IV required */ 2073 /* No generated IV required */
2140 areq_ctx->backup_iv = req->iv; 2074 areq_ctx->backup_iv = req->iv;
2141 areq_ctx->assoclen = req->assoclen; 2075 areq_ctx->assoclen = req->assoclen;
2142 areq_ctx->backup_giv = NULL;
2143 areq_ctx->is_gcm4543 = true; 2076 areq_ctx->is_gcm4543 = true;
2144 2077
2145 cc_proc_rfc4309_ccm(req); 2078 cc_proc_rfc4309_ccm(req);
@@ -2161,7 +2094,6 @@ static int cc_aead_decrypt(struct aead_request *req)
2161 /* No generated IV required */ 2094 /* No generated IV required */
2162 areq_ctx->backup_iv = req->iv; 2095 areq_ctx->backup_iv = req->iv;
2163 areq_ctx->assoclen = req->assoclen; 2096 areq_ctx->assoclen = req->assoclen;
2164 areq_ctx->backup_giv = NULL;
2165 areq_ctx->is_gcm4543 = false; 2097 areq_ctx->is_gcm4543 = false;
2166 2098
2167 areq_ctx->plaintext_authenticate_only = false; 2099 areq_ctx->plaintext_authenticate_only = false;
@@ -2191,7 +2123,6 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2191 /* No generated IV required */ 2123 /* No generated IV required */
2192 areq_ctx->backup_iv = req->iv; 2124 areq_ctx->backup_iv = req->iv;
2193 areq_ctx->assoclen = req->assoclen; 2125 areq_ctx->assoclen = req->assoclen;
2194 areq_ctx->backup_giv = NULL;
2195 2126
2196 areq_ctx->is_gcm4543 = true; 2127 areq_ctx->is_gcm4543 = true;
2197 cc_proc_rfc4309_ccm(req); 2128 cc_proc_rfc4309_ccm(req);
@@ -2311,8 +2242,6 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2311 /* No generated IV required */ 2242 /* No generated IV required */
2312 areq_ctx->backup_iv = req->iv; 2243 areq_ctx->backup_iv = req->iv;
2313 areq_ctx->assoclen = req->assoclen; 2244 areq_ctx->assoclen = req->assoclen;
2314 areq_ctx->backup_giv = NULL;
2315
2316 areq_ctx->plaintext_authenticate_only = false; 2245 areq_ctx->plaintext_authenticate_only = false;
2317 2246
2318 cc_proc_rfc4_gcm(req); 2247 cc_proc_rfc4_gcm(req);
@@ -2328,9 +2257,16 @@ out:
2328static int cc_rfc4543_gcm_encrypt(struct aead_request *req) 2257static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2329{ 2258{
2330 /* Very similar to cc_aead_encrypt() above. */ 2259 /* Very similar to cc_aead_encrypt() above. */
2331 2260 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2261 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2262 struct device *dev = drvdata_to_dev(ctx->drvdata);
2332 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2263 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2333 int rc; 2264 int rc = -EINVAL;
2265
2266 if (!valid_assoclen(req)) {
2267 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2268 goto out;
2269 }
2334 2270
2335 memset(areq_ctx, 0, sizeof(*areq_ctx)); 2271 memset(areq_ctx, 0, sizeof(*areq_ctx));
2336 2272
@@ -2340,7 +2276,6 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2340 /* No generated IV required */ 2276 /* No generated IV required */
2341 areq_ctx->backup_iv = req->iv; 2277 areq_ctx->backup_iv = req->iv;
2342 areq_ctx->assoclen = req->assoclen; 2278 areq_ctx->assoclen = req->assoclen;
2343 areq_ctx->backup_giv = NULL;
2344 2279
2345 cc_proc_rfc4_gcm(req); 2280 cc_proc_rfc4_gcm(req);
2346 areq_ctx->is_gcm4543 = true; 2281 areq_ctx->is_gcm4543 = true;
@@ -2348,7 +2283,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2348 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); 2283 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2349 if (rc != -EINPROGRESS && rc != -EBUSY) 2284 if (rc != -EINPROGRESS && rc != -EBUSY)
2350 req->iv = areq_ctx->backup_iv; 2285 req->iv = areq_ctx->backup_iv;
2351 2286out:
2352 return rc; 2287 return rc;
2353} 2288}
2354 2289
@@ -2372,8 +2307,6 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2372 /* No generated IV required */ 2307 /* No generated IV required */
2373 areq_ctx->backup_iv = req->iv; 2308 areq_ctx->backup_iv = req->iv;
2374 areq_ctx->assoclen = req->assoclen; 2309 areq_ctx->assoclen = req->assoclen;
2375 areq_ctx->backup_giv = NULL;
2376
2377 areq_ctx->plaintext_authenticate_only = false; 2310 areq_ctx->plaintext_authenticate_only = false;
2378 2311
2379 cc_proc_rfc4_gcm(req); 2312 cc_proc_rfc4_gcm(req);
@@ -2389,9 +2322,16 @@ out:
2389static int cc_rfc4543_gcm_decrypt(struct aead_request *req) 2322static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2390{ 2323{
2391 /* Very similar to cc_aead_decrypt() above. */ 2324 /* Very similar to cc_aead_decrypt() above. */
2392 2325 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2326 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2327 struct device *dev = drvdata_to_dev(ctx->drvdata);
2393 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2328 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2394 int rc; 2329 int rc = -EINVAL;
2330
2331 if (!valid_assoclen(req)) {
2332 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2333 goto out;
2334 }
2395 2335
2396 memset(areq_ctx, 0, sizeof(*areq_ctx)); 2336 memset(areq_ctx, 0, sizeof(*areq_ctx));
2397 2337
@@ -2401,7 +2341,6 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2401 /* No generated IV required */ 2341 /* No generated IV required */
2402 areq_ctx->backup_iv = req->iv; 2342 areq_ctx->backup_iv = req->iv;
2403 areq_ctx->assoclen = req->assoclen; 2343 areq_ctx->assoclen = req->assoclen;
2404 areq_ctx->backup_giv = NULL;
2405 2344
2406 cc_proc_rfc4_gcm(req); 2345 cc_proc_rfc4_gcm(req);
2407 areq_ctx->is_gcm4543 = true; 2346 areq_ctx->is_gcm4543 = true;
@@ -2409,7 +2348,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2409 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); 2348 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2410 if (rc != -EINPROGRESS && rc != -EBUSY) 2349 if (rc != -EINPROGRESS && rc != -EBUSY)
2411 req->iv = areq_ctx->backup_iv; 2350 req->iv = areq_ctx->backup_iv;
2412 2351out:
2413 return rc; 2352 return rc;
2414} 2353}
2415 2354
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index e51724b96c56..f12169b57f9d 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -65,8 +65,7 @@ struct aead_req_ctx {
65 unsigned int hw_iv_size ____cacheline_aligned; 65 unsigned int hw_iv_size ____cacheline_aligned;
66 /* used to prevent cache coherence problem */ 66 /* used to prevent cache coherence problem */
67 u8 backup_mac[MAX_MAC_SIZE]; 67 u8 backup_mac[MAX_MAC_SIZE];
68 u8 *backup_iv; /*store iv for generated IV flow*/ 68 u8 *backup_iv; /* store orig iv */
69 u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
70 u32 assoclen; /* internal assoclen */ 69 u32 assoclen; /* internal assoclen */
71 dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */ 70 dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
72 /* buffer for internal ccm configurations */ 71 /* buffer for internal ccm configurations */
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index c81ad33f9115..a72586eccd81 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -100,27 +100,6 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
100} 100}
101 101
102/** 102/**
103 * cc_zero_sgl() - Zero scatter scatter list data.
104 *
105 * @sgl:
106 */
107void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
108{
109 struct scatterlist *current_sg = sgl;
110 int sg_index = 0;
111
112 while (sg_index <= data_len) {
113 if (!current_sg) {
114 /* reached the end of the sgl --> just return back */
115 return;
116 }
117 memset(sg_virt(current_sg), 0, current_sg->length);
118 sg_index += current_sg->length;
119 current_sg = sg_next(current_sg);
120 }
121}
122
123/**
124 * cc_copy_sg_portion() - Copy scatter list data, 103 * cc_copy_sg_portion() - Copy scatter list data,
125 * from to_skip to end, to dest and vice versa 104 * from to_skip to end, to dest and vice versa
126 * 105 *
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index a726016bdbc1..af434872c6ff 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -66,6 +66,4 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
66void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, 66void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
67 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct); 67 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
68 68
69void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
70
71#endif /*__BUFFER_MGR_H__*/ 69#endif /*__BUFFER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 5b58226ea24d..254b48797799 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -5,7 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <crypto/algapi.h> 6#include <crypto/algapi.h>
7#include <crypto/internal/skcipher.h> 7#include <crypto/internal/skcipher.h>
8#include <crypto/des.h> 8#include <crypto/internal/des.h>
9#include <crypto/xts.h> 9#include <crypto/xts.h>
10#include <crypto/sm4.h> 10#include <crypto/sm4.h>
11#include <crypto/scatterwalk.h> 11#include <crypto/scatterwalk.h>
@@ -116,10 +116,6 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p,
116 case S_DIN_to_AES: 116 case S_DIN_to_AES:
117 switch (ctx_p->cipher_mode) { 117 switch (ctx_p->cipher_mode) {
118 case DRV_CIPHER_XTS: 118 case DRV_CIPHER_XTS:
119 if (size >= AES_BLOCK_SIZE &&
120 IS_ALIGNED(size, AES_BLOCK_SIZE))
121 return 0;
122 break;
123 case DRV_CIPHER_CBC_CTS: 119 case DRV_CIPHER_CBC_CTS:
124 if (size >= AES_BLOCK_SIZE) 120 if (size >= AES_BLOCK_SIZE)
125 return 0; 121 return 0;
@@ -411,16 +407,9 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
411 * HW does the expansion on its own. 407 * HW does the expansion on its own.
412 */ 408 */
413 if (ctx_p->flow_mode == S_DIN_to_DES) { 409 if (ctx_p->flow_mode == S_DIN_to_DES) {
414 u32 tmp[DES3_EDE_EXPKEY_WORDS]; 410 if ((keylen == DES3_EDE_KEY_SIZE &&
415 if (keylen == DES3_EDE_KEY_SIZE && 411 verify_skcipher_des3_key(sktfm, key)) ||
416 __des3_ede_setkey(tmp, &tfm->crt_flags, key, 412 verify_skcipher_des_key(sktfm, key)) {
417 DES3_EDE_KEY_SIZE)) {
418 dev_dbg(dev, "weak 3DES key");
419 return -EINVAL;
420 } else if (!des_ekey(tmp, key) &&
421 (crypto_tfm_get_flags(tfm) &
422 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
423 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
424 dev_dbg(dev, "weak DES key"); 413 dev_dbg(dev, "weak DES key");
425 return -EINVAL; 414 return -EINVAL;
426 } 415 }
@@ -945,7 +934,7 @@ static const struct cc_alg_template skcipher_algs[] = {
945 { 934 {
946 .name = "xts(paes)", 935 .name = "xts(paes)",
947 .driver_name = "xts-paes-ccree", 936 .driver_name = "xts-paes-ccree",
948 .blocksize = AES_BLOCK_SIZE, 937 .blocksize = 1,
949 .template_skcipher = { 938 .template_skcipher = {
950 .setkey = cc_cipher_sethkey, 939 .setkey = cc_cipher_sethkey,
951 .encrypt = cc_cipher_encrypt, 940 .encrypt = cc_cipher_encrypt,
@@ -963,7 +952,7 @@ static const struct cc_alg_template skcipher_algs[] = {
963 { 952 {
964 .name = "xts512(paes)", 953 .name = "xts512(paes)",
965 .driver_name = "xts-paes-du512-ccree", 954 .driver_name = "xts-paes-du512-ccree",
966 .blocksize = AES_BLOCK_SIZE, 955 .blocksize = 1,
967 .template_skcipher = { 956 .template_skcipher = {
968 .setkey = cc_cipher_sethkey, 957 .setkey = cc_cipher_sethkey,
969 .encrypt = cc_cipher_encrypt, 958 .encrypt = cc_cipher_encrypt,
@@ -982,7 +971,7 @@ static const struct cc_alg_template skcipher_algs[] = {
982 { 971 {
983 .name = "xts4096(paes)", 972 .name = "xts4096(paes)",
984 .driver_name = "xts-paes-du4096-ccree", 973 .driver_name = "xts-paes-du4096-ccree",
985 .blocksize = AES_BLOCK_SIZE, 974 .blocksize = 1,
986 .template_skcipher = { 975 .template_skcipher = {
987 .setkey = cc_cipher_sethkey, 976 .setkey = cc_cipher_sethkey,
988 .encrypt = cc_cipher_encrypt, 977 .encrypt = cc_cipher_encrypt,
@@ -1203,7 +1192,7 @@ static const struct cc_alg_template skcipher_algs[] = {
1203 { 1192 {
1204 .name = "xts(aes)", 1193 .name = "xts(aes)",
1205 .driver_name = "xts-aes-ccree", 1194 .driver_name = "xts-aes-ccree",
1206 .blocksize = AES_BLOCK_SIZE, 1195 .blocksize = 1,
1207 .template_skcipher = { 1196 .template_skcipher = {
1208 .setkey = cc_cipher_setkey, 1197 .setkey = cc_cipher_setkey,
1209 .encrypt = cc_cipher_encrypt, 1198 .encrypt = cc_cipher_encrypt,
@@ -1220,7 +1209,7 @@ static const struct cc_alg_template skcipher_algs[] = {
1220 { 1209 {
1221 .name = "xts512(aes)", 1210 .name = "xts512(aes)",
1222 .driver_name = "xts-aes-du512-ccree", 1211 .driver_name = "xts-aes-du512-ccree",
1223 .blocksize = AES_BLOCK_SIZE, 1212 .blocksize = 1,
1224 .template_skcipher = { 1213 .template_skcipher = {
1225 .setkey = cc_cipher_setkey, 1214 .setkey = cc_cipher_setkey,
1226 .encrypt = cc_cipher_encrypt, 1215 .encrypt = cc_cipher_encrypt,
@@ -1238,7 +1227,7 @@ static const struct cc_alg_template skcipher_algs[] = {
1238 { 1227 {
1239 .name = "xts4096(aes)", 1228 .name = "xts4096(aes)",
1240 .driver_name = "xts-aes-du4096-ccree", 1229 .driver_name = "xts-aes-du4096-ccree",
1241 .blocksize = AES_BLOCK_SIZE, 1230 .blocksize = 1,
1242 .template_skcipher = { 1231 .template_skcipher = {
1243 .setkey = cc_cipher_setkey, 1232 .setkey = cc_cipher_setkey,
1244 .encrypt = cc_cipher_encrypt, 1233 .encrypt = cc_cipher_encrypt,
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 980aa04b655b..8b8eee513c27 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -22,7 +22,6 @@
22#include "cc_cipher.h" 22#include "cc_cipher.h"
23#include "cc_aead.h" 23#include "cc_aead.h"
24#include "cc_hash.h" 24#include "cc_hash.h"
25#include "cc_ivgen.h"
26#include "cc_sram_mgr.h" 25#include "cc_sram_mgr.h"
27#include "cc_pm.h" 26#include "cc_pm.h"
28#include "cc_fips.h" 27#include "cc_fips.h"
@@ -339,10 +338,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
339 338
340 /* Then IRQ */ 339 /* Then IRQ */
341 new_drvdata->irq = platform_get_irq(plat_dev, 0); 340 new_drvdata->irq = platform_get_irq(plat_dev, 0);
342 if (new_drvdata->irq < 0) { 341 if (new_drvdata->irq < 0)
343 dev_err(dev, "Failed getting IRQ resource\n");
344 return new_drvdata->irq; 342 return new_drvdata->irq;
345 }
346 343
347 init_completion(&new_drvdata->hw_queue_avail); 344 init_completion(&new_drvdata->hw_queue_avail);
348 345
@@ -421,7 +418,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
421 } 418 }
422 break; 419 break;
423 default: 420 default:
424 dev_err(dev, "Unsupported engines configration.\n"); 421 dev_err(dev, "Unsupported engines configuration.\n");
425 rc = -EINVAL; 422 rc = -EINVAL;
426 goto post_clk_err; 423 goto post_clk_err;
427 } 424 }
@@ -503,17 +500,11 @@ static int init_cc_resources(struct platform_device *plat_dev)
503 goto post_buf_mgr_err; 500 goto post_buf_mgr_err;
504 } 501 }
505 502
506 rc = cc_ivgen_init(new_drvdata);
507 if (rc) {
508 dev_err(dev, "cc_ivgen_init failed\n");
509 goto post_buf_mgr_err;
510 }
511
512 /* Allocate crypto algs */ 503 /* Allocate crypto algs */
513 rc = cc_cipher_alloc(new_drvdata); 504 rc = cc_cipher_alloc(new_drvdata);
514 if (rc) { 505 if (rc) {
515 dev_err(dev, "cc_cipher_alloc failed\n"); 506 dev_err(dev, "cc_cipher_alloc failed\n");
516 goto post_ivgen_err; 507 goto post_buf_mgr_err;
517 } 508 }
518 509
519 /* hash must be allocated before aead since hash exports APIs */ 510 /* hash must be allocated before aead since hash exports APIs */
@@ -544,8 +535,6 @@ post_hash_err:
544 cc_hash_free(new_drvdata); 535 cc_hash_free(new_drvdata);
545post_cipher_err: 536post_cipher_err:
546 cc_cipher_free(new_drvdata); 537 cc_cipher_free(new_drvdata);
547post_ivgen_err:
548 cc_ivgen_fini(new_drvdata);
549post_buf_mgr_err: 538post_buf_mgr_err:
550 cc_buffer_mgr_fini(new_drvdata); 539 cc_buffer_mgr_fini(new_drvdata);
551post_req_mgr_err: 540post_req_mgr_err:
@@ -577,7 +566,6 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
577 cc_aead_free(drvdata); 566 cc_aead_free(drvdata);
578 cc_hash_free(drvdata); 567 cc_hash_free(drvdata);
579 cc_cipher_free(drvdata); 568 cc_cipher_free(drvdata);
580 cc_ivgen_fini(drvdata);
581 cc_pm_fini(drvdata); 569 cc_pm_fini(drvdata);
582 cc_buffer_mgr_fini(drvdata); 570 cc_buffer_mgr_fini(drvdata);
583 cc_req_mgr_fini(drvdata); 571 cc_req_mgr_fini(drvdata);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 7cd99380bf1f..ab31d4a68c80 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -126,15 +126,6 @@ struct cc_cpp_req {
126struct cc_crypto_req { 126struct cc_crypto_req {
127 void (*user_cb)(struct device *dev, void *req, int err); 127 void (*user_cb)(struct device *dev, void *req, int err);
128 void *user_arg; 128 void *user_arg;
129 dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
130 /* For the first 'ivgen_dma_addr_len' addresses of this array,
131 * generated IV would be placed in it by send_request().
132 * Same generated IV for all addresses!
133 */
134 /* Amount of 'ivgen_dma_addr' elements to be filled. */
135 unsigned int ivgen_dma_addr_len;
136 /* The generated IV size required, 8/16 B allowed. */
137 unsigned int ivgen_size;
138 struct completion seq_compl; /* request completion */ 129 struct completion seq_compl; /* request completion */
139 struct cc_cpp_req cpp; 130 struct cc_cpp_req cpp;
140}; 131};
@@ -158,7 +149,6 @@ struct cc_drvdata {
158 void *aead_handle; 149 void *aead_handle;
159 void *request_mgr_handle; 150 void *request_mgr_handle;
160 void *fips_handle; 151 void *fips_handle;
161 void *ivgen_handle;
162 void *sram_mgr_handle; 152 void *sram_mgr_handle;
163 void *debugfs; 153 void *debugfs;
164 struct clk *clk; 154 struct clk *clk;
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index 5ad3ffb7acaa..4c8bce33abcf 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -3,6 +3,7 @@
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/fips.h> 5#include <linux/fips.h>
6#include <linux/notifier.h>
6 7
7#include "cc_driver.h" 8#include "cc_driver.h"
8#include "cc_fips.h" 9#include "cc_fips.h"
@@ -11,6 +12,8 @@ static void fips_dsr(unsigned long devarg);
11 12
12struct cc_fips_handle { 13struct cc_fips_handle {
13 struct tasklet_struct tasklet; 14 struct tasklet_struct tasklet;
15 struct notifier_block nb;
16 struct cc_drvdata *drvdata;
14}; 17};
15 18
16/* The function called once at driver entry point to check 19/* The function called once at driver entry point to check
@@ -21,7 +24,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
21 u32 reg; 24 u32 reg;
22 25
23 reg = cc_ioread(drvdata, CC_REG(GPR_HOST)); 26 reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
24 return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)); 27 /* Did the TEE report status? */
28 if (reg & CC_FIPS_SYNC_TEE_STATUS)
29 /* Yes. Is it OK? */
30 return (reg & CC_FIPS_SYNC_MODULE_OK);
31
32 /* No. It's either not in use or will be reported later */
33 return true;
25} 34}
26 35
27/* 36/*
@@ -40,6 +49,21 @@ void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
40 cc_iowrite(drvdata, CC_REG(HOST_GPR0), val); 49 cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
41} 50}
42 51
52/* Push REE side FIPS test failure to TEE side */
53static int cc_ree_fips_failure(struct notifier_block *nb, unsigned long unused1,
54 void *unused2)
55{
56 struct cc_fips_handle *fips_h =
57 container_of(nb, struct cc_fips_handle, nb);
58 struct cc_drvdata *drvdata = fips_h->drvdata;
59 struct device *dev = drvdata_to_dev(drvdata);
60
61 cc_set_ree_fips_status(drvdata, false);
62 dev_info(dev, "Notifying TEE of FIPS test failure...\n");
63
64 return NOTIFY_OK;
65}
66
43void cc_fips_fini(struct cc_drvdata *drvdata) 67void cc_fips_fini(struct cc_drvdata *drvdata)
44{ 68{
45 struct cc_fips_handle *fips_h = drvdata->fips_handle; 69 struct cc_fips_handle *fips_h = drvdata->fips_handle;
@@ -47,6 +71,8 @@ void cc_fips_fini(struct cc_drvdata *drvdata)
47 if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h) 71 if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h)
48 return; 72 return;
49 73
74 atomic_notifier_chain_unregister(&fips_fail_notif_chain, &fips_h->nb);
75
50 /* Kill tasklet */ 76 /* Kill tasklet */
51 tasklet_kill(&fips_h->tasklet); 77 tasklet_kill(&fips_h->tasklet);
52 drvdata->fips_handle = NULL; 78 drvdata->fips_handle = NULL;
@@ -118,6 +144,9 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
118 144
119 dev_dbg(dev, "Initializing fips tasklet\n"); 145 dev_dbg(dev, "Initializing fips tasklet\n");
120 tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); 146 tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
147 fips_h->drvdata = p_drvdata;
148 fips_h->nb.notifier_call = cc_ree_fips_failure;
149 atomic_notifier_chain_register(&fips_fail_notif_chain, &fips_h->nb);
121 150
122 cc_tee_handle_fips_error(p_drvdata); 151 cc_tee_handle_fips_error(p_drvdata);
123 152
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index a6abe4e3bb0e..bc71bdf44a9f 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -25,27 +25,27 @@ struct cc_hash_handle {
25 struct list_head hash_list; 25 struct list_head hash_list;
26}; 26};
27 27
28static const u32 digest_len_init[] = { 28static const u32 cc_digest_len_init[] = {
29 0x00000040, 0x00000000, 0x00000000, 0x00000000 }; 29 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
30static const u32 md5_init[] = { 30static const u32 cc_md5_init[] = {
31 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; 31 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
32static const u32 sha1_init[] = { 32static const u32 cc_sha1_init[] = {
33 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; 33 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
34static const u32 sha224_init[] = { 34static const u32 cc_sha224_init[] = {
35 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4, 35 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
36 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 }; 36 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
37static const u32 sha256_init[] = { 37static const u32 cc_sha256_init[] = {
38 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4, 38 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
39 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; 39 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
40static const u32 digest_len_sha512_init[] = { 40static const u32 cc_digest_len_sha512_init[] = {
41 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; 41 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
42static u64 sha384_init[] = { 42static u64 cc_sha384_init[] = {
43 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4, 43 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
44 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 }; 44 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
45static u64 sha512_init[] = { 45static u64 cc_sha512_init[] = {
46 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, 46 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
47 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; 47 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
48static const u32 sm3_init[] = { 48static const u32 cc_sm3_init[] = {
49 SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, 49 SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
50 SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; 50 SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
51 51
@@ -144,10 +144,11 @@ static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
144 if (ctx->hash_mode == DRV_HASH_SHA512 || 144 if (ctx->hash_mode == DRV_HASH_SHA512 ||
145 ctx->hash_mode == DRV_HASH_SHA384) 145 ctx->hash_mode == DRV_HASH_SHA384)
146 memcpy(state->digest_bytes_len, 146 memcpy(state->digest_bytes_len,
147 digest_len_sha512_init, 147 cc_digest_len_sha512_init,
148 ctx->hash_len); 148 ctx->hash_len);
149 else 149 else
150 memcpy(state->digest_bytes_len, digest_len_init, 150 memcpy(state->digest_bytes_len,
151 cc_digest_len_init,
151 ctx->hash_len); 152 ctx->hash_len);
152 } 153 }
153 154
@@ -1873,26 +1874,26 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
1873 int rc = 0; 1874 int rc = 0;
1874 1875
1875 /* Copy-to-sram digest-len */ 1876 /* Copy-to-sram digest-len */
1876 cc_set_sram_desc(digest_len_init, sram_buff_ofs, 1877 cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs,
1877 ARRAY_SIZE(digest_len_init), larval_seq, 1878 ARRAY_SIZE(cc_digest_len_init), larval_seq,
1878 &larval_seq_len); 1879 &larval_seq_len);
1879 rc = send_request_init(drvdata, larval_seq, larval_seq_len); 1880 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1880 if (rc) 1881 if (rc)
1881 goto init_digest_const_err; 1882 goto init_digest_const_err;
1882 1883
1883 sram_buff_ofs += sizeof(digest_len_init); 1884 sram_buff_ofs += sizeof(cc_digest_len_init);
1884 larval_seq_len = 0; 1885 larval_seq_len = 0;
1885 1886
1886 if (large_sha_supported) { 1887 if (large_sha_supported) {
1887 /* Copy-to-sram digest-len for sha384/512 */ 1888 /* Copy-to-sram digest-len for sha384/512 */
1888 cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs, 1889 cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs,
1889 ARRAY_SIZE(digest_len_sha512_init), 1890 ARRAY_SIZE(cc_digest_len_sha512_init),
1890 larval_seq, &larval_seq_len); 1891 larval_seq, &larval_seq_len);
1891 rc = send_request_init(drvdata, larval_seq, larval_seq_len); 1892 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1892 if (rc) 1893 if (rc)
1893 goto init_digest_const_err; 1894 goto init_digest_const_err;
1894 1895
1895 sram_buff_ofs += sizeof(digest_len_sha512_init); 1896 sram_buff_ofs += sizeof(cc_digest_len_sha512_init);
1896 larval_seq_len = 0; 1897 larval_seq_len = 0;
1897 } 1898 }
1898 1899
@@ -1900,64 +1901,64 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
1900 hash_handle->larval_digest_sram_addr = sram_buff_ofs; 1901 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1901 1902
1902 /* Copy-to-sram initial SHA* digests */ 1903 /* Copy-to-sram initial SHA* digests */
1903 cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init), 1904 cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init),
1904 larval_seq, &larval_seq_len); 1905 larval_seq, &larval_seq_len);
1905 rc = send_request_init(drvdata, larval_seq, larval_seq_len); 1906 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1906 if (rc) 1907 if (rc)
1907 goto init_digest_const_err; 1908 goto init_digest_const_err;
1908 sram_buff_ofs += sizeof(md5_init); 1909 sram_buff_ofs += sizeof(cc_md5_init);
1909 larval_seq_len = 0; 1910 larval_seq_len = 0;
1910 1911
1911 cc_set_sram_desc(sha1_init, sram_buff_ofs, 1912 cc_set_sram_desc(cc_sha1_init, sram_buff_ofs,
1912 ARRAY_SIZE(sha1_init), larval_seq, 1913 ARRAY_SIZE(cc_sha1_init), larval_seq,
1913 &larval_seq_len); 1914 &larval_seq_len);
1914 rc = send_request_init(drvdata, larval_seq, larval_seq_len); 1915 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1915 if (rc) 1916 if (rc)
1916 goto init_digest_const_err; 1917 goto init_digest_const_err;
1917 sram_buff_ofs += sizeof(sha1_init); 1918 sram_buff_ofs += sizeof(cc_sha1_init);
1918 larval_seq_len = 0; 1919 larval_seq_len = 0;
1919 1920
1920 cc_set_sram_desc(sha224_init, sram_buff_ofs, 1921 cc_set_sram_desc(cc_sha224_init, sram_buff_ofs,
1921 ARRAY_SIZE(sha224_init), larval_seq, 1922 ARRAY_SIZE(cc_sha224_init), larval_seq,
1922 &larval_seq_len); 1923 &larval_seq_len);
1923 rc = send_request_init(drvdata, larval_seq, larval_seq_len); 1924 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1924 if (rc) 1925 if (rc)
1925 goto init_digest_const_err; 1926 goto init_digest_const_err;
1926 sram_buff_ofs += sizeof(sha224_init); 1927 sram_buff_ofs += sizeof(cc_sha224_init);
1927 larval_seq_len = 0; 1928 larval_seq_len = 0;
1928 1929
1929 cc_set_sram_desc(sha256_init, sram_buff_ofs, 1930 cc_set_sram_desc(cc_sha256_init, sram_buff_ofs,
1930 ARRAY_SIZE(sha256_init), larval_seq, 1931 ARRAY_SIZE(cc_sha256_init), larval_seq,
1931 &larval_seq_len); 1932 &larval_seq_len);
1932 rc = send_request_init(drvdata, larval_seq, larval_seq_len); 1933 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1933 if (rc) 1934 if (rc)
1934 goto init_digest_const_err; 1935 goto init_digest_const_err;
1935 sram_buff_ofs += sizeof(sha256_init); 1936 sram_buff_ofs += sizeof(cc_sha256_init);
1936 larval_seq_len = 0; 1937 larval_seq_len = 0;
1937 1938
1938 if (sm3_supported) { 1939 if (sm3_supported) {
1939 cc_set_sram_desc(sm3_init, sram_buff_ofs, 1940 cc_set_sram_desc(cc_sm3_init, sram_buff_ofs,
1940 ARRAY_SIZE(sm3_init), larval_seq, 1941 ARRAY_SIZE(cc_sm3_init), larval_seq,
1941 &larval_seq_len); 1942 &larval_seq_len);
1942 rc = send_request_init(drvdata, larval_seq, larval_seq_len); 1943 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1943 if (rc) 1944 if (rc)
1944 goto init_digest_const_err; 1945 goto init_digest_const_err;
1945 sram_buff_ofs += sizeof(sm3_init); 1946 sram_buff_ofs += sizeof(cc_sm3_init);
1946 larval_seq_len = 0; 1947 larval_seq_len = 0;
1947 } 1948 }
1948 1949
1949 if (large_sha_supported) { 1950 if (large_sha_supported) {
1950 cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs, 1951 cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
1951 (ARRAY_SIZE(sha384_init) * 2), larval_seq, 1952 (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
1952 &larval_seq_len); 1953 &larval_seq_len);
1953 rc = send_request_init(drvdata, larval_seq, larval_seq_len); 1954 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1954 if (rc) 1955 if (rc)
1955 goto init_digest_const_err; 1956 goto init_digest_const_err;
1956 sram_buff_ofs += sizeof(sha384_init); 1957 sram_buff_ofs += sizeof(cc_sha384_init);
1957 larval_seq_len = 0; 1958 larval_seq_len = 0;
1958 1959
1959 cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs, 1960 cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
1960 (ARRAY_SIZE(sha512_init) * 2), larval_seq, 1961 (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
1961 &larval_seq_len); 1962 &larval_seq_len);
1962 rc = send_request_init(drvdata, larval_seq, larval_seq_len); 1963 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1963 if (rc) 1964 if (rc)
@@ -1986,8 +1987,8 @@ static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1986 */ 1987 */
1987void __init cc_hash_global_init(void) 1988void __init cc_hash_global_init(void)
1988{ 1989{
1989 cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2)); 1990 cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
1990 cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2)); 1991 cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
1991} 1992}
1992 1993
1993int cc_hash_alloc(struct cc_drvdata *drvdata) 1994int cc_hash_alloc(struct cc_drvdata *drvdata)
@@ -2006,18 +2007,18 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
2006 INIT_LIST_HEAD(&hash_handle->hash_list); 2007 INIT_LIST_HEAD(&hash_handle->hash_list);
2007 drvdata->hash_handle = hash_handle; 2008 drvdata->hash_handle = hash_handle;
2008 2009
2009 sram_size_to_alloc = sizeof(digest_len_init) + 2010 sram_size_to_alloc = sizeof(cc_digest_len_init) +
2010 sizeof(md5_init) + 2011 sizeof(cc_md5_init) +
2011 sizeof(sha1_init) + 2012 sizeof(cc_sha1_init) +
2012 sizeof(sha224_init) + 2013 sizeof(cc_sha224_init) +
2013 sizeof(sha256_init); 2014 sizeof(cc_sha256_init);
2014 2015
2015 if (drvdata->hw_rev >= CC_HW_REV_713) 2016 if (drvdata->hw_rev >= CC_HW_REV_713)
2016 sram_size_to_alloc += sizeof(sm3_init); 2017 sram_size_to_alloc += sizeof(cc_sm3_init);
2017 2018
2018 if (drvdata->hw_rev >= CC_HW_REV_712) 2019 if (drvdata->hw_rev >= CC_HW_REV_712)
2019 sram_size_to_alloc += sizeof(digest_len_sha512_init) + 2020 sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
2020 sizeof(sha384_init) + sizeof(sha512_init); 2021 sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
2021 2022
2022 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc); 2023 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
2023 if (sram_buff == NULL_SRAM_ADDR) { 2024 if (sram_buff == NULL_SRAM_ADDR) {
@@ -2258,22 +2259,22 @@ static const void *cc_larval_digest(struct device *dev, u32 mode)
2258{ 2259{
2259 switch (mode) { 2260 switch (mode) {
2260 case DRV_HASH_MD5: 2261 case DRV_HASH_MD5:
2261 return md5_init; 2262 return cc_md5_init;
2262 case DRV_HASH_SHA1: 2263 case DRV_HASH_SHA1:
2263 return sha1_init; 2264 return cc_sha1_init;
2264 case DRV_HASH_SHA224: 2265 case DRV_HASH_SHA224:
2265 return sha224_init; 2266 return cc_sha224_init;
2266 case DRV_HASH_SHA256: 2267 case DRV_HASH_SHA256:
2267 return sha256_init; 2268 return cc_sha256_init;
2268 case DRV_HASH_SHA384: 2269 case DRV_HASH_SHA384:
2269 return sha384_init; 2270 return cc_sha384_init;
2270 case DRV_HASH_SHA512: 2271 case DRV_HASH_SHA512:
2271 return sha512_init; 2272 return cc_sha512_init;
2272 case DRV_HASH_SM3: 2273 case DRV_HASH_SM3:
2273 return sm3_init; 2274 return cc_sm3_init;
2274 default: 2275 default:
2275 dev_err(dev, "Invalid hash mode (%d)\n", mode); 2276 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2276 return md5_init; 2277 return cc_md5_init;
2277 } 2278 }
2278} 2279}
2279 2280
@@ -2301,40 +2302,40 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2301 return (hash_handle->larval_digest_sram_addr); 2302 return (hash_handle->larval_digest_sram_addr);
2302 case DRV_HASH_SHA1: 2303 case DRV_HASH_SHA1:
2303 return (hash_handle->larval_digest_sram_addr + 2304 return (hash_handle->larval_digest_sram_addr +
2304 sizeof(md5_init)); 2305 sizeof(cc_md5_init));
2305 case DRV_HASH_SHA224: 2306 case DRV_HASH_SHA224:
2306 return (hash_handle->larval_digest_sram_addr + 2307 return (hash_handle->larval_digest_sram_addr +
2307 sizeof(md5_init) + 2308 sizeof(cc_md5_init) +
2308 sizeof(sha1_init)); 2309 sizeof(cc_sha1_init));
2309 case DRV_HASH_SHA256: 2310 case DRV_HASH_SHA256:
2310 return (hash_handle->larval_digest_sram_addr + 2311 return (hash_handle->larval_digest_sram_addr +
2311 sizeof(md5_init) + 2312 sizeof(cc_md5_init) +
2312 sizeof(sha1_init) + 2313 sizeof(cc_sha1_init) +
2313 sizeof(sha224_init)); 2314 sizeof(cc_sha224_init));
2314 case DRV_HASH_SM3: 2315 case DRV_HASH_SM3:
2315 return (hash_handle->larval_digest_sram_addr + 2316 return (hash_handle->larval_digest_sram_addr +
2316 sizeof(md5_init) + 2317 sizeof(cc_md5_init) +
2317 sizeof(sha1_init) + 2318 sizeof(cc_sha1_init) +
2318 sizeof(sha224_init) + 2319 sizeof(cc_sha224_init) +
2319 sizeof(sha256_init)); 2320 sizeof(cc_sha256_init));
2320 case DRV_HASH_SHA384: 2321 case DRV_HASH_SHA384:
2321 addr = (hash_handle->larval_digest_sram_addr + 2322 addr = (hash_handle->larval_digest_sram_addr +
2322 sizeof(md5_init) + 2323 sizeof(cc_md5_init) +
2323 sizeof(sha1_init) + 2324 sizeof(cc_sha1_init) +
2324 sizeof(sha224_init) + 2325 sizeof(cc_sha224_init) +
2325 sizeof(sha256_init)); 2326 sizeof(cc_sha256_init));
2326 if (sm3_supported) 2327 if (sm3_supported)
2327 addr += sizeof(sm3_init); 2328 addr += sizeof(cc_sm3_init);
2328 return addr; 2329 return addr;
2329 case DRV_HASH_SHA512: 2330 case DRV_HASH_SHA512:
2330 addr = (hash_handle->larval_digest_sram_addr + 2331 addr = (hash_handle->larval_digest_sram_addr +
2331 sizeof(md5_init) + 2332 sizeof(cc_md5_init) +
2332 sizeof(sha1_init) + 2333 sizeof(cc_sha1_init) +
2333 sizeof(sha224_init) + 2334 sizeof(cc_sha224_init) +
2334 sizeof(sha256_init) + 2335 sizeof(cc_sha256_init) +
2335 sizeof(sha384_init)); 2336 sizeof(cc_sha384_init));
2336 if (sm3_supported) 2337 if (sm3_supported)
2337 addr += sizeof(sm3_init); 2338 addr += sizeof(cc_sm3_init);
2338 return addr; 2339 return addr;
2339 default: 2340 default:
2340 dev_err(dev, "Invalid hash mode (%d)\n", mode); 2341 dev_err(dev, "Invalid hash mode (%d)\n", mode);
@@ -2360,7 +2361,7 @@ cc_digest_len_addr(void *drvdata, u32 mode)
2360#if (CC_DEV_SHA_MAX > 256) 2361#if (CC_DEV_SHA_MAX > 256)
2361 case DRV_HASH_SHA384: 2362 case DRV_HASH_SHA384:
2362 case DRV_HASH_SHA512: 2363 case DRV_HASH_SHA512:
2363 return digest_len_addr + sizeof(digest_len_init); 2364 return digest_len_addr + sizeof(cc_digest_len_init);
2364#endif 2365#endif
2365 default: 2366 default:
2366 return digest_len_addr; /*to avoid kernel crash*/ 2367 return digest_len_addr; /*to avoid kernel crash*/
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
deleted file mode 100644
index 99dc69383e20..000000000000
--- a/drivers/crypto/ccree/cc_ivgen.c
+++ /dev/null
@@ -1,276 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3
4#include <crypto/ctr.h>
5#include "cc_driver.h"
6#include "cc_ivgen.h"
7#include "cc_request_mgr.h"
8#include "cc_sram_mgr.h"
9#include "cc_buffer_mgr.h"
10
11/* The max. size of pool *MUST* be <= SRAM total size */
12#define CC_IVPOOL_SIZE 1024
13/* The first 32B fraction of pool are dedicated to the
14 * next encryption "key" & "IV" for pool regeneration
15 */
16#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
17#define CC_IVPOOL_GEN_SEQ_LEN 4
18
19/**
20 * struct cc_ivgen_ctx -IV pool generation context
21 * @pool: the start address of the iv-pool resides in internal RAM
22 * @ctr_key_dma: address of pool's encryption key material in internal RAM
23 * @ctr_iv_dma: address of pool's counter iv in internal RAM
24 * @next_iv_ofs: the offset to the next available IV in pool
25 * @pool_meta: virt. address of the initial enc. key/IV
26 * @pool_meta_dma: phys. address of the initial enc. key/IV
27 */
28struct cc_ivgen_ctx {
29 cc_sram_addr_t pool;
30 cc_sram_addr_t ctr_key;
31 cc_sram_addr_t ctr_iv;
32 u32 next_iv_ofs;
33 u8 *pool_meta;
34 dma_addr_t pool_meta_dma;
35};
36
37/*!
38 * Generates CC_IVPOOL_SIZE of random bytes by
39 * encrypting 0's using AES128-CTR.
40 *
41 * \param ivgen iv-pool context
42 * \param iv_seq IN/OUT array to the descriptors sequence
43 * \param iv_seq_len IN/OUT pointer to the sequence length
44 */
45static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
46 struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
47{
48 unsigned int idx = *iv_seq_len;
49
50 if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
51 /* The sequence will be longer than allowed */
52 return -EINVAL;
53 }
54 /* Setup key */
55 hw_desc_init(&iv_seq[idx]);
56 set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
57 set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
58 set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
59 set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
60 set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
61 set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
62 idx++;
63
64 /* Setup cipher state */
65 hw_desc_init(&iv_seq[idx]);
66 set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
67 set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
68 set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
69 set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
70 set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
71 set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
72 idx++;
73
74 /* Perform dummy encrypt to skip first block */
75 hw_desc_init(&iv_seq[idx]);
76 set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
77 set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
78 set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
79 idx++;
80
81 /* Generate IV pool */
82 hw_desc_init(&iv_seq[idx]);
83 set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
84 set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
85 set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
86 idx++;
87
88 *iv_seq_len = idx; /* Update sequence length */
89
90 /* queue ordering assures pool readiness */
91 ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
92
93 return 0;
94}
95
96/*!
97 * Generates the initial pool in SRAM.
98 * This function should be invoked when resuming driver.
99 *
100 * \param drvdata
101 *
102 * \return int Zero for success, negative value otherwise.
103 */
104int cc_init_iv_sram(struct cc_drvdata *drvdata)
105{
106 struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
107 struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
108 unsigned int iv_seq_len = 0;
109 int rc;
110
111 /* Generate initial enc. key/iv */
112 get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
113
114 /* The first 32B reserved for the enc. Key/IV */
115 ivgen_ctx->ctr_key = ivgen_ctx->pool;
116 ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
117
118 /* Copy initial enc. key and IV to SRAM at a single descriptor */
119 hw_desc_init(&iv_seq[iv_seq_len]);
120 set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
121 CC_IVPOOL_META_SIZE, NS_BIT);
122 set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
123 CC_IVPOOL_META_SIZE);
124 set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
125 iv_seq_len++;
126
127 /* Generate initial pool */
128 rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
129 if (rc)
130 return rc;
131
132 /* Fire-and-forget */
133 return send_request_init(drvdata, iv_seq, iv_seq_len);
134}
135
136/*!
137 * Free iv-pool and ivgen context.
138 *
139 * \param drvdata
140 */
141void cc_ivgen_fini(struct cc_drvdata *drvdata)
142{
143 struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
144 struct device *device = &drvdata->plat_dev->dev;
145
146 if (!ivgen_ctx)
147 return;
148
149 if (ivgen_ctx->pool_meta) {
150 memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
151 dma_free_coherent(device, CC_IVPOOL_META_SIZE,
152 ivgen_ctx->pool_meta,
153 ivgen_ctx->pool_meta_dma);
154 }
155
156 ivgen_ctx->pool = NULL_SRAM_ADDR;
157}
158
159/*!
160 * Allocates iv-pool and maps resources.
161 * This function generates the first IV pool.
162 *
163 * \param drvdata Driver's private context
164 *
165 * \return int Zero for success, negative value otherwise.
166 */
167int cc_ivgen_init(struct cc_drvdata *drvdata)
168{
169 struct cc_ivgen_ctx *ivgen_ctx;
170 struct device *device = &drvdata->plat_dev->dev;
171 int rc;
172
173 /* Allocate "this" context */
174 ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
175 if (!ivgen_ctx)
176 return -ENOMEM;
177
178 drvdata->ivgen_handle = ivgen_ctx;
179
180 /* Allocate pool's header for initial enc. key/IV */
181 ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
182 &ivgen_ctx->pool_meta_dma,
183 GFP_KERNEL);
184 if (!ivgen_ctx->pool_meta) {
185 dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
186 CC_IVPOOL_META_SIZE);
187 rc = -ENOMEM;
188 goto out;
189 }
190 /* Allocate IV pool in SRAM */
191 ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
192 if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
193 dev_err(device, "SRAM pool exhausted\n");
194 rc = -ENOMEM;
195 goto out;
196 }
197
198 return cc_init_iv_sram(drvdata);
199
200out:
201 cc_ivgen_fini(drvdata);
202 return rc;
203}
204
205/*!
206 * Acquires 16 Bytes IV from the iv-pool
207 *
208 * \param drvdata Driver private context
209 * \param iv_out_dma Array of physical IV out addresses
210 * \param iv_out_dma_len Length of iv_out_dma array (additional elements
211 * of iv_out_dma array are ignore)
212 * \param iv_out_size May be 8 or 16 bytes long
213 * \param iv_seq IN/OUT array to the descriptors sequence
214 * \param iv_seq_len IN/OUT pointer to the sequence length
215 *
216 * \return int Zero for success, negative value otherwise.
217 */
218int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
219 unsigned int iv_out_dma_len, unsigned int iv_out_size,
220 struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
221{
222 struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
223 unsigned int idx = *iv_seq_len;
224 struct device *dev = drvdata_to_dev(drvdata);
225 unsigned int t;
226
227 if (iv_out_size != CC_AES_IV_SIZE &&
228 iv_out_size != CTR_RFC3686_IV_SIZE) {
229 return -EINVAL;
230 }
231 if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
232 /* The sequence will be longer than allowed */
233 return -EINVAL;
234 }
235
236 /* check that number of generated IV is limited to max dma address
237 * iv buffer size
238 */
239 if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
240 /* The sequence will be longer than allowed */
241 return -EINVAL;
242 }
243
244 for (t = 0; t < iv_out_dma_len; t++) {
245 /* Acquire IV from pool */
246 hw_desc_init(&iv_seq[idx]);
247 set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
248 ivgen_ctx->next_iv_ofs),
249 iv_out_size);
250 set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
251 NS_BIT, 0);
252 set_flow_mode(&iv_seq[idx], BYPASS);
253 idx++;
254 }
255
256 /* Bypass operation is proceeded by crypto sequence, hence must
257 * assure bypass-write-transaction by a memory barrier
258 */
259 hw_desc_init(&iv_seq[idx]);
260 set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
261 set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
262 idx++;
263
264 *iv_seq_len = idx; /* update seq length */
265
266 /* Update iv index */
267 ivgen_ctx->next_iv_ofs += iv_out_size;
268
269 if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
270 dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
271 /* pool is drained -regenerate it! */
272 return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
273 }
274
275 return 0;
276}
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
deleted file mode 100644
index a9f5e8bba4f1..000000000000
--- a/drivers/crypto/ccree/cc_ivgen.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3
4#ifndef __CC_IVGEN_H__
5#define __CC_IVGEN_H__
6
7#include "cc_hw_queue_defs.h"
8
9#define CC_IVPOOL_SEQ_LEN 8
10
11/*!
12 * Allocates iv-pool and maps resources.
13 * This function generates the first IV pool.
14 *
15 * \param drvdata Driver's private context
16 *
17 * \return int Zero for success, negative value otherwise.
18 */
19int cc_ivgen_init(struct cc_drvdata *drvdata);
20
21/*!
22 * Free iv-pool and ivgen context.
23 *
24 * \param drvdata
25 */
26void cc_ivgen_fini(struct cc_drvdata *drvdata);
27
28/*!
29 * Generates the initial pool in SRAM.
30 * This function should be invoked when resuming DX driver.
31 *
32 * \param drvdata
33 *
34 * \return int Zero for success, negative value otherwise.
35 */
36int cc_init_iv_sram(struct cc_drvdata *drvdata);
37
38/*!
39 * Acquires 16 Bytes IV from the iv-pool
40 *
41 * \param drvdata Driver private context
42 * \param iv_out_dma Array of physical IV out addresses
43 * \param iv_out_dma_len Length of iv_out_dma array (additional elements of
44 * iv_out_dma array are ignore)
45 * \param iv_out_size May be 8 or 16 bytes long
46 * \param iv_seq IN/OUT array to the descriptors sequence
47 * \param iv_seq_len IN/OUT pointer to the sequence length
48 *
49 * \return int Zero for success, negative value otherwise.
50 */
51int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
52 unsigned int iv_out_dma_len, unsigned int iv_out_size,
53 struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len);
54
55#endif /*__CC_IVGEN_H__*/
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 899a52f05b7a..dbc508fb719b 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -8,7 +8,6 @@
8#include "cc_buffer_mgr.h" 8#include "cc_buffer_mgr.h"
9#include "cc_request_mgr.h" 9#include "cc_request_mgr.h"
10#include "cc_sram_mgr.h" 10#include "cc_sram_mgr.h"
11#include "cc_ivgen.h"
12#include "cc_hash.h" 11#include "cc_hash.h"
13#include "cc_pm.h" 12#include "cc_pm.h"
14#include "cc_fips.h" 13#include "cc_fips.h"
@@ -73,7 +72,6 @@ int cc_pm_resume(struct device *dev)
73 /* must be after the queue resuming as it uses the HW queue*/ 72 /* must be after the queue resuming as it uses the HW queue*/
74 cc_init_hash_sram(drvdata); 73 cc_init_hash_sram(drvdata);
75 74
76 cc_init_iv_sram(drvdata);
77 return 0; 75 return 0;
78} 76}
79 77
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 0bc6ccb0b899..a947d5a2cf35 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -6,7 +6,6 @@
6#include "cc_driver.h" 6#include "cc_driver.h"
7#include "cc_buffer_mgr.h" 7#include "cc_buffer_mgr.h"
8#include "cc_request_mgr.h" 8#include "cc_request_mgr.h"
9#include "cc_ivgen.h"
10#include "cc_pm.h" 9#include "cc_pm.h"
11 10
12#define CC_MAX_POLL_ITER 10 11#define CC_MAX_POLL_ITER 10
@@ -281,36 +280,12 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
281static int cc_do_send_request(struct cc_drvdata *drvdata, 280static int cc_do_send_request(struct cc_drvdata *drvdata,
282 struct cc_crypto_req *cc_req, 281 struct cc_crypto_req *cc_req,
283 struct cc_hw_desc *desc, unsigned int len, 282 struct cc_hw_desc *desc, unsigned int len,
284 bool add_comp, bool ivgen) 283 bool add_comp)
285{ 284{
286 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; 285 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
287 unsigned int used_sw_slots; 286 unsigned int used_sw_slots;
288 unsigned int iv_seq_len = 0;
289 unsigned int total_seq_len = len; /*initial sequence length*/ 287 unsigned int total_seq_len = len; /*initial sequence length*/
290 struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
291 struct device *dev = drvdata_to_dev(drvdata); 288 struct device *dev = drvdata_to_dev(drvdata);
292 int rc;
293
294 if (ivgen) {
295 dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
296 cc_req->ivgen_dma_addr_len,
297 &cc_req->ivgen_dma_addr[0],
298 &cc_req->ivgen_dma_addr[1],
299 &cc_req->ivgen_dma_addr[2],
300 cc_req->ivgen_size);
301
302 /* Acquire IV from pool */
303 rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
304 cc_req->ivgen_dma_addr_len,
305 cc_req->ivgen_size, iv_seq, &iv_seq_len);
306
307 if (rc) {
308 dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
309 return rc;
310 }
311
312 total_seq_len += iv_seq_len;
313 }
314 289
315 used_sw_slots = ((req_mgr_h->req_queue_head - 290 used_sw_slots = ((req_mgr_h->req_queue_head -
316 req_mgr_h->req_queue_tail) & 291 req_mgr_h->req_queue_tail) &
@@ -334,8 +309,6 @@ static int cc_do_send_request(struct cc_drvdata *drvdata,
334 wmb(); 309 wmb();
335 310
336 /* STAT_PHASE_4: Push sequence */ 311 /* STAT_PHASE_4: Push sequence */
337 if (ivgen)
338 enqueue_seq(drvdata, iv_seq, iv_seq_len);
339 312
340 enqueue_seq(drvdata, desc, len); 313 enqueue_seq(drvdata, desc, len);
341 314
@@ -380,8 +353,6 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
380 struct cc_bl_item *bli; 353 struct cc_bl_item *bli;
381 struct cc_crypto_req *creq; 354 struct cc_crypto_req *creq;
382 void *req; 355 void *req;
383 bool ivgen;
384 unsigned int total_len;
385 struct device *dev = drvdata_to_dev(drvdata); 356 struct device *dev = drvdata_to_dev(drvdata);
386 int rc; 357 int rc;
387 358
@@ -406,12 +377,9 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
406 bli->notif = true; 377 bli->notif = true;
407 } 378 }
408 379
409 ivgen = !!creq->ivgen_dma_addr_len;
410 total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
411
412 spin_lock(&mgr->hw_lock); 380 spin_lock(&mgr->hw_lock);
413 381
414 rc = cc_queues_status(drvdata, mgr, total_len); 382 rc = cc_queues_status(drvdata, mgr, bli->len);
415 if (rc) { 383 if (rc) {
416 /* 384 /*
417 * There is still not room in the FIFO for 385 * There is still not room in the FIFO for
@@ -423,7 +391,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
423 } 391 }
424 392
425 rc = cc_do_send_request(drvdata, &bli->creq, bli->desc, 393 rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
426 bli->len, false, ivgen); 394 bli->len, false);
427 395
428 spin_unlock(&mgr->hw_lock); 396 spin_unlock(&mgr->hw_lock);
429 397
@@ -447,8 +415,6 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
447{ 415{
448 int rc; 416 int rc;
449 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 417 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
450 bool ivgen = !!cc_req->ivgen_dma_addr_len;
451 unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
452 struct device *dev = drvdata_to_dev(drvdata); 418 struct device *dev = drvdata_to_dev(drvdata);
453 bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; 419 bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
454 gfp_t flags = cc_gfp_flags(req); 420 gfp_t flags = cc_gfp_flags(req);
@@ -461,7 +427,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
461 } 427 }
462 428
463 spin_lock_bh(&mgr->hw_lock); 429 spin_lock_bh(&mgr->hw_lock);
464 rc = cc_queues_status(drvdata, mgr, total_len); 430 rc = cc_queues_status(drvdata, mgr, len);
465 431
466#ifdef CC_DEBUG_FORCE_BACKLOG 432#ifdef CC_DEBUG_FORCE_BACKLOG
467 if (backlog_ok) 433 if (backlog_ok)
@@ -486,8 +452,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
486 } 452 }
487 453
488 if (!rc) 454 if (!rc)
489 rc = cc_do_send_request(drvdata, cc_req, desc, len, false, 455 rc = cc_do_send_request(drvdata, cc_req, desc, len, false);
490 ivgen);
491 456
492 spin_unlock_bh(&mgr->hw_lock); 457 spin_unlock_bh(&mgr->hw_lock);
493 return rc; 458 return rc;
@@ -527,7 +492,7 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
527 reinit_completion(&drvdata->hw_queue_avail); 492 reinit_completion(&drvdata->hw_queue_avail);
528 } 493 }
529 494
530 rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false); 495 rc = cc_do_send_request(drvdata, cc_req, desc, len, true);
531 spin_unlock_bh(&mgr->hw_lock); 496 spin_unlock_bh(&mgr->hw_lock);
532 497
533 if (rc != -EINPROGRESS) { 498 if (rc != -EINPROGRESS) {
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 4b9b37a130d3..250150560e68 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -2,6 +2,7 @@
2config CRYPTO_DEV_CHELSIO 2config CRYPTO_DEV_CHELSIO
3 tristate "Chelsio Crypto Co-processor Driver" 3 tristate "Chelsio Crypto Co-processor Driver"
4 depends on CHELSIO_T4 4 depends on CHELSIO_T4
5 select CRYPTO_LIB_AES
5 select CRYPTO_SHA1 6 select CRYPTO_SHA1
6 select CRYPTO_SHA256 7 select CRYPTO_SHA256
7 select CRYPTO_SHA512 8 select CRYPTO_SHA512
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 177f572b9589..38ee38b37ae6 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1023,22 +1023,21 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1023 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 1023 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1024 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); 1024 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1025 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 1025 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1026 struct crypto_cipher *cipher; 1026 struct crypto_aes_ctx aes;
1027 int ret, i; 1027 int ret, i;
1028 u8 *key; 1028 u8 *key;
1029 unsigned int keylen; 1029 unsigned int keylen;
1030 int round = reqctx->last_req_len / AES_BLOCK_SIZE; 1030 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1031 int round8 = round / 8; 1031 int round8 = round / 8;
1032 1032
1033 cipher = ablkctx->aes_generic;
1034 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); 1033 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1035 1034
1036 keylen = ablkctx->enckey_len / 2; 1035 keylen = ablkctx->enckey_len / 2;
1037 key = ablkctx->key + keylen; 1036 key = ablkctx->key + keylen;
1038 ret = crypto_cipher_setkey(cipher, key, keylen); 1037 ret = aes_expandkey(&aes, key, keylen);
1039 if (ret) 1038 if (ret)
1040 goto out; 1039 return ret;
1041 crypto_cipher_encrypt_one(cipher, iv, iv); 1040 aes_encrypt(&aes, iv, iv);
1042 for (i = 0; i < round8; i++) 1041 for (i = 0; i < round8; i++)
1043 gf128mul_x8_ble((le128 *)iv, (le128 *)iv); 1042 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1044 1043
@@ -1046,9 +1045,10 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1046 gf128mul_x_ble((le128 *)iv, (le128 *)iv); 1045 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1047 1046
1048 if (!isfinal) 1047 if (!isfinal)
1049 crypto_cipher_decrypt_one(cipher, iv, iv); 1048 aes_decrypt(&aes, iv, iv);
1050out: 1049
1051 return ret; 1050 memzero_explicit(&aes, sizeof(aes));
1051 return 0;
1052} 1052}
1053 1053
1054static int chcr_update_cipher_iv(struct ablkcipher_request *req, 1054static int chcr_update_cipher_iv(struct ablkcipher_request *req,
@@ -1411,16 +1411,6 @@ static int chcr_cra_init(struct crypto_tfm *tfm)
1411 return PTR_ERR(ablkctx->sw_cipher); 1411 return PTR_ERR(ablkctx->sw_cipher);
1412 } 1412 }
1413 1413
1414 if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1415 /* To update tweak*/
1416 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1417 if (IS_ERR(ablkctx->aes_generic)) {
1418 pr_err("failed to allocate aes cipher for tweak\n");
1419 return PTR_ERR(ablkctx->aes_generic);
1420 }
1421 } else
1422 ablkctx->aes_generic = NULL;
1423
1424 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); 1414 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1425 return chcr_device_init(crypto_tfm_ctx(tfm)); 1415 return chcr_device_init(crypto_tfm_ctx(tfm));
1426} 1416}
@@ -1451,8 +1441,6 @@ static void chcr_cra_exit(struct crypto_tfm *tfm)
1451 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1441 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1452 1442
1453 crypto_free_sync_skcipher(ablkctx->sw_cipher); 1443 crypto_free_sync_skcipher(ablkctx->sw_cipher);
1454 if (ablkctx->aes_generic)
1455 crypto_free_cipher(ablkctx->aes_generic);
1456} 1444}
1457 1445
1458static int get_alg_config(struct algo_param *params, 1446static int get_alg_config(struct algo_param *params,
@@ -3364,9 +3352,9 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3364{ 3352{
3365 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); 3353 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3366 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); 3354 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3367 struct crypto_cipher *cipher;
3368 unsigned int ck_size; 3355 unsigned int ck_size;
3369 int ret = 0, key_ctx_size = 0; 3356 int ret = 0, key_ctx_size = 0;
3357 struct crypto_aes_ctx aes;
3370 3358
3371 aeadctx->enckey_len = 0; 3359 aeadctx->enckey_len = 0;
3372 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); 3360 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -3409,23 +3397,15 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3409 /* Calculate the H = CIPH(K, 0 repeated 16 times). 3397 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3410 * It will go in key context 3398 * It will go in key context
3411 */ 3399 */
3412 cipher = crypto_alloc_cipher("aes-generic", 0, 0); 3400 ret = aes_expandkey(&aes, key, keylen);
3413 if (IS_ERR(cipher)) {
3414 aeadctx->enckey_len = 0;
3415 ret = -ENOMEM;
3416 goto out;
3417 }
3418
3419 ret = crypto_cipher_setkey(cipher, key, keylen);
3420 if (ret) { 3401 if (ret) {
3421 aeadctx->enckey_len = 0; 3402 aeadctx->enckey_len = 0;
3422 goto out1; 3403 goto out;
3423 } 3404 }
3424 memset(gctx->ghash_h, 0, AEAD_H_SIZE); 3405 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3425 crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h); 3406 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3407 memzero_explicit(&aes, sizeof(aes));
3426 3408
3427out1:
3428 crypto_free_cipher(cipher);
3429out: 3409out:
3430 return ret; 3410 return ret;
3431} 3411}
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index ee20dd899e83..d1e6b51df0ce 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -333,26 +333,26 @@ struct phys_sge_pairs {
333}; 333};
334 334
335 335
336static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { 336static const u32 chcr_sha1_init[SHA1_DIGEST_SIZE / 4] = {
337 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 337 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
338}; 338};
339 339
340static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { 340static const u32 chcr_sha224_init[SHA256_DIGEST_SIZE / 4] = {
341 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, 341 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
342 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, 342 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
343}; 343};
344 344
345static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { 345static const u32 chcr_sha256_init[SHA256_DIGEST_SIZE / 4] = {
346 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 346 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
347 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, 347 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
348}; 348};
349 349
350static const u64 sha384_init[SHA512_DIGEST_SIZE / 8] = { 350static const u64 chcr_sha384_init[SHA512_DIGEST_SIZE / 8] = {
351 SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3, 351 SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3,
352 SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7, 352 SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7,
353}; 353};
354 354
355static const u64 sha512_init[SHA512_DIGEST_SIZE / 8] = { 355static const u64 chcr_sha512_init[SHA512_DIGEST_SIZE / 8] = {
356 SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3, 356 SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3,
357 SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7, 357 SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7,
358}; 358};
@@ -362,21 +362,21 @@ static inline void copy_hash_init_values(char *key, int digestsize)
362 u8 i; 362 u8 i;
363 __be32 *dkey = (__be32 *)key; 363 __be32 *dkey = (__be32 *)key;
364 u64 *ldkey = (u64 *)key; 364 u64 *ldkey = (u64 *)key;
365 __be64 *sha384 = (__be64 *)sha384_init; 365 __be64 *sha384 = (__be64 *)chcr_sha384_init;
366 __be64 *sha512 = (__be64 *)sha512_init; 366 __be64 *sha512 = (__be64 *)chcr_sha512_init;
367 367
368 switch (digestsize) { 368 switch (digestsize) {
369 case SHA1_DIGEST_SIZE: 369 case SHA1_DIGEST_SIZE:
370 for (i = 0; i < SHA1_INIT_STATE; i++) 370 for (i = 0; i < SHA1_INIT_STATE; i++)
371 dkey[i] = cpu_to_be32(sha1_init[i]); 371 dkey[i] = cpu_to_be32(chcr_sha1_init[i]);
372 break; 372 break;
373 case SHA224_DIGEST_SIZE: 373 case SHA224_DIGEST_SIZE:
374 for (i = 0; i < SHA224_INIT_STATE; i++) 374 for (i = 0; i < SHA224_INIT_STATE; i++)
375 dkey[i] = cpu_to_be32(sha224_init[i]); 375 dkey[i] = cpu_to_be32(chcr_sha224_init[i]);
376 break; 376 break;
377 case SHA256_DIGEST_SIZE: 377 case SHA256_DIGEST_SIZE:
378 for (i = 0; i < SHA256_INIT_STATE; i++) 378 for (i = 0; i < SHA256_INIT_STATE; i++)
379 dkey[i] = cpu_to_be32(sha256_init[i]); 379 dkey[i] = cpu_to_be32(chcr_sha256_init[i]);
380 break; 380 break;
381 case SHA384_DIGEST_SIZE: 381 case SHA384_DIGEST_SIZE:
382 for (i = 0; i < SHA384_INIT_STATE; i++) 382 for (i = 0; i < SHA384_INIT_STATE; i++)
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 655606f2e4d0..993c97e70565 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -172,7 +172,6 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
172 172
173struct ablk_ctx { 173struct ablk_ctx {
174 struct crypto_sync_skcipher *sw_cipher; 174 struct crypto_sync_skcipher *sw_cipher;
175 struct crypto_cipher *aes_generic;
176 __be32 key_ctx_hdr; 175 __be32 key_ctx_hdr;
177 unsigned int enckey_len; 176 unsigned int enckey_len;
178 unsigned char ciph_mode; 177 unsigned char ciph_mode;
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index f429aae72542..24355680f30a 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -132,11 +132,11 @@ static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
132static inline int chcr_ipsec_setkey(struct xfrm_state *x, 132static inline int chcr_ipsec_setkey(struct xfrm_state *x,
133 struct ipsec_sa_entry *sa_entry) 133 struct ipsec_sa_entry *sa_entry)
134{ 134{
135 struct crypto_cipher *cipher;
136 int keylen = (x->aead->alg_key_len + 7) / 8; 135 int keylen = (x->aead->alg_key_len + 7) / 8;
137 unsigned char *key = x->aead->alg_key; 136 unsigned char *key = x->aead->alg_key;
138 int ck_size, key_ctx_size = 0; 137 int ck_size, key_ctx_size = 0;
139 unsigned char ghash_h[AEAD_H_SIZE]; 138 unsigned char ghash_h[AEAD_H_SIZE];
139 struct crypto_aes_ctx aes;
140 int ret = 0; 140 int ret = 0;
141 141
142 if (keylen > 3) { 142 if (keylen > 3) {
@@ -170,26 +170,19 @@ static inline int chcr_ipsec_setkey(struct xfrm_state *x,
170 /* Calculate the H = CIPH(K, 0 repeated 16 times). 170 /* Calculate the H = CIPH(K, 0 repeated 16 times).
171 * It will go in key context 171 * It will go in key context
172 */ 172 */
173 cipher = crypto_alloc_cipher("aes-generic", 0, 0); 173 ret = aes_expandkey(&aes, key, keylen);
174 if (IS_ERR(cipher)) {
175 sa_entry->enckey_len = 0;
176 ret = -ENOMEM;
177 goto out;
178 }
179
180 ret = crypto_cipher_setkey(cipher, key, keylen);
181 if (ret) { 174 if (ret) {
182 sa_entry->enckey_len = 0; 175 sa_entry->enckey_len = 0;
183 goto out1; 176 goto out;
184 } 177 }
185 memset(ghash_h, 0, AEAD_H_SIZE); 178 memset(ghash_h, 0, AEAD_H_SIZE);
186 crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); 179 aes_encrypt(&aes, ghash_h, ghash_h);
180 memzero_explicit(&aes, sizeof(aes));
181
187 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) * 182 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
188 16), ghash_h, AEAD_H_SIZE); 183 16), ghash_h, AEAD_H_SIZE);
189 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) + 184 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
190 AEAD_H_SIZE; 185 AEAD_H_SIZE;
191out1:
192 crypto_free_cipher(cipher);
193out: 186out:
194 return ret; 187 return ret;
195} 188}
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index f2424f4c5f78..2a34035d3cfb 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -213,8 +213,8 @@ static int chtls_key_info(struct chtls_sock *csk,
213 unsigned char key[AES_KEYSIZE_128]; 213 unsigned char key[AES_KEYSIZE_128];
214 struct tls12_crypto_info_aes_gcm_128 *gcm_ctx; 214 struct tls12_crypto_info_aes_gcm_128 *gcm_ctx;
215 unsigned char ghash_h[AEAD_H_SIZE]; 215 unsigned char ghash_h[AEAD_H_SIZE];
216 struct crypto_cipher *cipher;
217 int ck_size, key_ctx_size; 216 int ck_size, key_ctx_size;
217 struct crypto_aes_ctx aes;
218 int ret; 218 int ret;
219 219
220 gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *) 220 gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *)
@@ -234,18 +234,13 @@ static int chtls_key_info(struct chtls_sock *csk,
234 /* Calculate the H = CIPH(K, 0 repeated 16 times). 234 /* Calculate the H = CIPH(K, 0 repeated 16 times).
235 * It will go in key context 235 * It will go in key context
236 */ 236 */
237 cipher = crypto_alloc_cipher("aes", 0, 0); 237 ret = aes_expandkey(&aes, key, keylen);
238 if (IS_ERR(cipher)) {
239 ret = -ENOMEM;
240 goto out;
241 }
242
243 ret = crypto_cipher_setkey(cipher, key, keylen);
244 if (ret) 238 if (ret)
245 goto out1; 239 return ret;
246 240
247 memset(ghash_h, 0, AEAD_H_SIZE); 241 memset(ghash_h, 0, AEAD_H_SIZE);
248 crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); 242 aes_encrypt(&aes, ghash_h, ghash_h);
243 memzero_explicit(&aes, sizeof(aes));
249 csk->tlshws.keylen = key_ctx_size; 244 csk->tlshws.keylen = key_ctx_size;
250 245
251 /* Copy the Key context */ 246 /* Copy the Key context */
@@ -269,10 +264,7 @@ static int chtls_key_info(struct chtls_sock *csk,
269 /* erase key info from driver */ 264 /* erase key info from driver */
270 memset(gcm_ctx->key, 0, keylen); 265 memset(gcm_ctx->key, 0, keylen);
271 266
272out1: 267 return 0;
273 crypto_free_cipher(cipher);
274out:
275 return ret;
276} 268}
277 269
278static void chtls_set_scmd(struct chtls_sock *csk) 270static void chtls_set_scmd(struct chtls_sock *csk)
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 2cfabb99cb6e..cbd8ca6e52ee 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -268,7 +268,6 @@ static struct rng_alg exynos_rng_alg = {
268static int exynos_rng_probe(struct platform_device *pdev) 268static int exynos_rng_probe(struct platform_device *pdev)
269{ 269{
270 struct exynos_rng_dev *rng; 270 struct exynos_rng_dev *rng;
271 struct resource *res;
272 int ret; 271 int ret;
273 272
274 if (exynos_rng_dev) 273 if (exynos_rng_dev)
@@ -289,8 +288,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
289 return PTR_ERR(rng->clk); 288 return PTR_ERR(rng->clk);
290 } 289 }
291 290
292 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 291 rng->mem = devm_platform_ioremap_resource(pdev, 0);
293 rng->mem = devm_ioremap_resource(&pdev->dev, res);
294 if (IS_ERR(rng->mem)) 292 if (IS_ERR(rng->mem))
295 return PTR_ERR(rng->mem); 293 return PTR_ERR(rng->mem);
296 294
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 5c3f02e4aece..a18e62df68d9 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -21,7 +21,7 @@
21#include <linux/ktime.h> 21#include <linux/ktime.h>
22 22
23#include <crypto/algapi.h> 23#include <crypto/algapi.h>
24#include <crypto/des.h> 24#include <crypto/internal/des.h>
25 25
26static char hifn_pll_ref[sizeof("extNNN")] = "ext"; 26static char hifn_pll_ref[sizeof("extNNN")] = "ext";
27module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); 27module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
@@ -1939,25 +1939,13 @@ static void hifn_flush(struct hifn_device *dev)
1939static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1939static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1940 unsigned int len) 1940 unsigned int len)
1941{ 1941{
1942 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 1942 struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
1943 struct hifn_context *ctx = crypto_tfm_ctx(tfm);
1944 struct hifn_device *dev = ctx->dev; 1943 struct hifn_device *dev = ctx->dev;
1944 int err;
1945 1945
1946 if (len > HIFN_MAX_CRYPT_KEY_LENGTH) { 1946 err = verify_ablkcipher_des_key(cipher, key);
1947 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1947 if (err)
1948 return -1; 1948 return err;
1949 }
1950
1951 if (len == HIFN_DES_KEY_LENGTH) {
1952 u32 tmp[DES_EXPKEY_WORDS];
1953 int ret = des_ekey(tmp, key);
1954
1955 if (unlikely(ret == 0) &&
1956 (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
1957 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
1958 return -EINVAL;
1959 }
1960 }
1961 1949
1962 dev->flags &= ~HIFN_FLAG_OLD_KEY; 1950 dev->flags &= ~HIFN_FLAG_OLD_KEY;
1963 1951
@@ -1972,15 +1960,11 @@ static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1972{ 1960{
1973 struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher); 1961 struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
1974 struct hifn_device *dev = ctx->dev; 1962 struct hifn_device *dev = ctx->dev;
1975 u32 flags;
1976 int err; 1963 int err;
1977 1964
1978 flags = crypto_ablkcipher_get_flags(cipher); 1965 err = verify_ablkcipher_des3_key(cipher, key);
1979 err = __des3_verify_key(&flags, key); 1966 if (err)
1980 if (unlikely(err)) {
1981 crypto_ablkcipher_set_flags(cipher, flags);
1982 return err; 1967 return err;
1983 }
1984 1968
1985 dev->flags &= ~HIFN_FLAG_OLD_KEY; 1969 dev->flags &= ~HIFN_FLAG_OLD_KEY;
1986 1970
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 8ca9c503bcb0..ebaf91e0146d 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -4,6 +4,7 @@ config CRYPTO_DEV_HISI_SEC
4 tristate "Support for Hisilicon SEC crypto block cipher accelerator" 4 tristate "Support for Hisilicon SEC crypto block cipher accelerator"
5 select CRYPTO_BLKCIPHER 5 select CRYPTO_BLKCIPHER
6 select CRYPTO_ALGAPI 6 select CRYPTO_ALGAPI
7 select CRYPTO_LIB_DES
7 select SG_SPLIT 8 select SG_SPLIT
8 depends on ARM64 || COMPILE_TEST 9 depends on ARM64 || COMPILE_TEST
9 depends on HAS_IOMEM 10 depends on HAS_IOMEM
@@ -12,3 +13,27 @@ config CRYPTO_DEV_HISI_SEC
12 13
13 To compile this as a module, choose M here: the module 14 To compile this as a module, choose M here: the module
14 will be called hisi_sec. 15 will be called hisi_sec.
16
17config CRYPTO_DEV_HISI_QM
18 tristate
19 depends on ARM64 && PCI && PCI_MSI
20 help
21 HiSilicon accelerator engines use a common queue management
22 interface. Specific engine driver may use this module.
23
24config CRYPTO_HISI_SGL
25 tristate
26 depends on ARM64
27 help
28 HiSilicon accelerator engines use a common hardware scatterlist
29 interface for data format. Specific engine driver may use this
30 module.
31
32config CRYPTO_DEV_HISI_ZIP
33 tristate "Support for HiSilicon ZIP accelerator"
34 depends on ARM64 && PCI && PCI_MSI
35 select CRYPTO_DEV_HISI_QM
36 select CRYPTO_HISI_SGL
37 select SG_SPLIT
38 help
39 Support for HiSilicon ZIP Driver
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
index 463f46ace182..45a279741126 100644
--- a/drivers/crypto/hisilicon/Makefile
+++ b/drivers/crypto/hisilicon/Makefile
@@ -1,2 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/ 2obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
3obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o
4obj-$(CONFIG_CRYPTO_HISI_SGL) += sgl.o
5obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
new file mode 100644
index 000000000000..f975c393a603
--- /dev/null
+++ b/drivers/crypto/hisilicon/qm.c
@@ -0,0 +1,1913 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <asm/page.h>
4#include <linux/bitmap.h>
5#include <linux/debugfs.h>
6#include <linux/dma-mapping.h>
7#include <linux/io.h>
8#include <linux/irqreturn.h>
9#include <linux/log2.h>
10#include <linux/seq_file.h>
11#include <linux/slab.h>
12#include "qm.h"
13
14/* eq/aeq irq enable */
15#define QM_VF_AEQ_INT_SOURCE 0x0
16#define QM_VF_AEQ_INT_MASK 0x4
17#define QM_VF_EQ_INT_SOURCE 0x8
18#define QM_VF_EQ_INT_MASK 0xc
19#define QM_IRQ_NUM_V1 1
20#define QM_IRQ_NUM_PF_V2 4
21#define QM_IRQ_NUM_VF_V2 2
22
23#define QM_EQ_EVENT_IRQ_VECTOR 0
24#define QM_AEQ_EVENT_IRQ_VECTOR 1
25#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
26
27/* mailbox */
28#define QM_MB_CMD_SQC 0x0
29#define QM_MB_CMD_CQC 0x1
30#define QM_MB_CMD_EQC 0x2
31#define QM_MB_CMD_AEQC 0x3
32#define QM_MB_CMD_SQC_BT 0x4
33#define QM_MB_CMD_CQC_BT 0x5
34#define QM_MB_CMD_SQC_VFT_V2 0x6
35
36#define QM_MB_CMD_SEND_BASE 0x300
37#define QM_MB_EVENT_SHIFT 8
38#define QM_MB_BUSY_SHIFT 13
39#define QM_MB_OP_SHIFT 14
40#define QM_MB_CMD_DATA_ADDR_L 0x304
41#define QM_MB_CMD_DATA_ADDR_H 0x308
42
43/* sqc shift */
44#define QM_SQ_HOP_NUM_SHIFT 0
45#define QM_SQ_PAGE_SIZE_SHIFT 4
46#define QM_SQ_BUF_SIZE_SHIFT 8
47#define QM_SQ_SQE_SIZE_SHIFT 12
48#define QM_SQ_PRIORITY_SHIFT 0
49#define QM_SQ_ORDERS_SHIFT 4
50#define QM_SQ_TYPE_SHIFT 8
51
52#define QM_SQ_TYPE_MASK GENMASK(3, 0)
53
54/* cqc shift */
55#define QM_CQ_HOP_NUM_SHIFT 0
56#define QM_CQ_PAGE_SIZE_SHIFT 4
57#define QM_CQ_BUF_SIZE_SHIFT 8
58#define QM_CQ_CQE_SIZE_SHIFT 12
59#define QM_CQ_PHASE_SHIFT 0
60#define QM_CQ_FLAG_SHIFT 1
61
62#define QM_CQE_PHASE(cqe) ((cqe)->w7 & 0x1)
63#define QM_QC_CQE_SIZE 4
64
65/* eqc shift */
66#define QM_EQE_AEQE_SIZE (2UL << 12)
67#define QM_EQC_PHASE_SHIFT 16
68
69#define QM_EQE_PHASE(eqe) (((eqe)->dw0 >> 16) & 0x1)
70#define QM_EQE_CQN_MASK GENMASK(15, 0)
71
72#define QM_AEQE_PHASE(aeqe) (((aeqe)->dw0 >> 16) & 0x1)
73#define QM_AEQE_TYPE_SHIFT 17
74
75#define QM_DOORBELL_CMD_SQ 0
76#define QM_DOORBELL_CMD_CQ 1
77#define QM_DOORBELL_CMD_EQ 2
78#define QM_DOORBELL_CMD_AEQ 3
79
80#define QM_DOORBELL_BASE_V1 0x340
81#define QM_DB_CMD_SHIFT_V1 16
82#define QM_DB_INDEX_SHIFT_V1 32
83#define QM_DB_PRIORITY_SHIFT_V1 48
84#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
85#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
86#define QM_DB_CMD_SHIFT_V2 12
87#define QM_DB_RAND_SHIFT_V2 16
88#define QM_DB_INDEX_SHIFT_V2 32
89#define QM_DB_PRIORITY_SHIFT_V2 48
90
91#define QM_MEM_START_INIT 0x100040
92#define QM_MEM_INIT_DONE 0x100044
93#define QM_VFT_CFG_RDY 0x10006c
94#define QM_VFT_CFG_OP_WR 0x100058
95#define QM_VFT_CFG_TYPE 0x10005c
96#define QM_SQC_VFT 0x0
97#define QM_CQC_VFT 0x1
98#define QM_VFT_CFG 0x100060
99#define QM_VFT_CFG_OP_ENABLE 0x100054
100
101#define QM_VFT_CFG_DATA_L 0x100064
102#define QM_VFT_CFG_DATA_H 0x100068
103#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
104#define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
105#define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
106#define QM_SQC_VFT_START_SQN_SHIFT 28
107#define QM_SQC_VFT_VALID (1ULL << 44)
108#define QM_SQC_VFT_SQN_SHIFT 45
109#define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
110#define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
111#define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
112#define QM_CQC_VFT_VALID (1ULL << 28)
113
114#define QM_SQC_VFT_BASE_SHIFT_V2 28
115#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(5, 0)
116#define QM_SQC_VFT_NUM_SHIFT_V2 45
117#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
118
119#define QM_DFX_CNT_CLR_CE 0x100118
120
121#define QM_ABNORMAL_INT_SOURCE 0x100000
122#define QM_ABNORMAL_INT_MASK 0x100004
123#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
124#define QM_ABNORMAL_INT_STATUS 0x100008
125#define QM_ABNORMAL_INF00 0x100010
126#define QM_FIFO_OVERFLOW_TYPE 0xc0
127#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
128#define QM_FIFO_OVERFLOW_VF 0x3f
129#define QM_ABNORMAL_INF01 0x100014
130#define QM_DB_TIMEOUT_TYPE 0xc0
131#define QM_DB_TIMEOUT_TYPE_SHIFT 6
132#define QM_DB_TIMEOUT_VF 0x3f
133#define QM_RAS_CE_ENABLE 0x1000ec
134#define QM_RAS_FE_ENABLE 0x1000f0
135#define QM_RAS_NFE_ENABLE 0x1000f4
136#define QM_RAS_CE_THRESHOLD 0x1000f8
137#define QM_RAS_CE_TIMES_PER_IRQ 1
138#define QM_RAS_MSI_INT_SEL 0x1040f4
139
140#define QM_CACHE_WB_START 0x204
141#define QM_CACHE_WB_DONE 0x208
142
143#define PCI_BAR_2 2
144#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
145#define QMC_ALIGN(sz) ALIGN(sz, 32)
146
147#define QM_DBG_TMP_BUF_LEN 22
148
149#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
150 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
151 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
152 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
153 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
154
155#define QM_MK_CQC_DW3_V2(cqe_sz) \
156 ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
157
158#define QM_MK_SQC_W13(priority, orders, alg_type) \
159 (((priority) << QM_SQ_PRIORITY_SHIFT) | \
160 ((orders) << QM_SQ_ORDERS_SHIFT) | \
161 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
162
163#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
164 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
165 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
166 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
167 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
168
169#define QM_MK_SQC_DW3_V2(sqe_sz) \
170 ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
171
172#define INIT_QC_COMMON(qc, base, pasid) do { \
173 (qc)->head = 0; \
174 (qc)->tail = 0; \
175 (qc)->base_l = lower_32_bits(base); \
176 (qc)->base_h = upper_32_bits(base); \
177 (qc)->dw3 = 0; \
178 (qc)->w8 = 0; \
179 (qc)->rsvd0 = 0; \
180 (qc)->pasid = pasid; \
181 (qc)->w11 = 0; \
182 (qc)->rsvd1 = 0; \
183} while (0)
184
185enum vft_type {
186 SQC_VFT = 0,
187 CQC_VFT,
188};
189
190struct qm_cqe {
191 __le32 rsvd0;
192 __le16 cmd_id;
193 __le16 rsvd1;
194 __le16 sq_head;
195 __le16 sq_num;
196 __le16 rsvd2;
197 __le16 w7;
198};
199
200struct qm_eqe {
201 __le32 dw0;
202};
203
204struct qm_aeqe {
205 __le32 dw0;
206};
207
208struct qm_sqc {
209 __le16 head;
210 __le16 tail;
211 __le32 base_l;
212 __le32 base_h;
213 __le32 dw3;
214 __le16 w8;
215 __le16 rsvd0;
216 __le16 pasid;
217 __le16 w11;
218 __le16 cq_num;
219 __le16 w13;
220 __le32 rsvd1;
221};
222
223struct qm_cqc {
224 __le16 head;
225 __le16 tail;
226 __le32 base_l;
227 __le32 base_h;
228 __le32 dw3;
229 __le16 w8;
230 __le16 rsvd0;
231 __le16 pasid;
232 __le16 w11;
233 __le32 dw6;
234 __le32 rsvd1;
235};
236
237struct qm_eqc {
238 __le16 head;
239 __le16 tail;
240 __le32 base_l;
241 __le32 base_h;
242 __le32 dw3;
243 __le32 rsvd[2];
244 __le32 dw6;
245};
246
247struct qm_aeqc {
248 __le16 head;
249 __le16 tail;
250 __le32 base_l;
251 __le32 base_h;
252 __le32 dw3;
253 __le32 rsvd[2];
254 __le32 dw6;
255};
256
257struct qm_mailbox {
258 __le16 w0;
259 __le16 queue_num;
260 __le32 base_l;
261 __le32 base_h;
262 __le32 rsvd;
263};
264
265struct qm_doorbell {
266 __le16 queue_num;
267 __le16 cmd;
268 __le16 index;
269 __le16 priority;
270};
271
272struct hisi_qm_hw_ops {
273 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
274 void (*qm_db)(struct hisi_qm *qm, u16 qn,
275 u8 cmd, u16 index, u8 priority);
276 u32 (*get_irq_num)(struct hisi_qm *qm);
277 int (*debug_init)(struct hisi_qm *qm);
278 void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
279 u32 msi);
280 pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm);
281};
282
283static const char * const qm_debug_file_name[] = {
284 [CURRENT_Q] = "current_q",
285 [CLEAR_ENABLE] = "clear_enable",
286};
287
288struct hisi_qm_hw_error {
289 u32 int_msk;
290 const char *msg;
291};
292
293static const struct hisi_qm_hw_error qm_hw_error[] = {
294 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
295 { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
296 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
297 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
298 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
299 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
300 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
301 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
302 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
303 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
304 { .int_msk = BIT(10), .msg = "qm_db_timeout" },
305 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
306 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
307 { /* sentinel */ }
308};
309
310static const char * const qm_db_timeout[] = {
311 "sq", "cq", "eq", "aeq",
312};
313
314static const char * const qm_fifo_overflow[] = {
315 "cq", "eq", "aeq",
316};
317
318/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
319static int qm_wait_mb_ready(struct hisi_qm *qm)
320{
321 u32 val;
322
323 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
324 val, !((val >> QM_MB_BUSY_SHIFT) &
325 0x1), 10, 1000);
326}
327
328/* 128 bit should be written to hardware at one time to trigger a mailbox */
329static void qm_mb_write(struct hisi_qm *qm, const void *src)
330{
331 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
332 unsigned long tmp0 = 0, tmp1 = 0;
333
334 asm volatile("ldp %0, %1, %3\n"
335 "stp %0, %1, %2\n"
336 "dsb sy\n"
337 : "=&r" (tmp0),
338 "=&r" (tmp1),
339 "+Q" (*((char *)fun_base))
340 : "Q" (*((char *)src))
341 : "memory");
342}
343
344static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
345 bool op)
346{
347 struct qm_mailbox mailbox;
348 int ret = 0;
349
350 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
351 queue, cmd, (unsigned long long)dma_addr);
352
353 mailbox.w0 = cmd |
354 (op ? 0x1 << QM_MB_OP_SHIFT : 0) |
355 (0x1 << QM_MB_BUSY_SHIFT);
356 mailbox.queue_num = queue;
357 mailbox.base_l = lower_32_bits(dma_addr);
358 mailbox.base_h = upper_32_bits(dma_addr);
359 mailbox.rsvd = 0;
360
361 mutex_lock(&qm->mailbox_lock);
362
363 if (unlikely(qm_wait_mb_ready(qm))) {
364 ret = -EBUSY;
365 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
366 goto busy_unlock;
367 }
368
369 qm_mb_write(qm, &mailbox);
370
371 if (unlikely(qm_wait_mb_ready(qm))) {
372 ret = -EBUSY;
373 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
374 goto busy_unlock;
375 }
376
377busy_unlock:
378 mutex_unlock(&qm->mailbox_lock);
379
380 return ret;
381}
382
383static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
384{
385 u64 doorbell;
386
387 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
388 ((u64)index << QM_DB_INDEX_SHIFT_V1) |
389 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
390
391 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
392}
393
394static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
395{
396 u64 doorbell;
397 u64 dbase;
398 u16 randata = 0;
399
400 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
401 dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
402 else
403 dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
404
405 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
406 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
407 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
408 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
409
410 writeq(doorbell, qm->io_base + dbase);
411}
412
413static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
414{
415 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
416 qn, cmd, index);
417
418 qm->ops->qm_db(qm, qn, cmd, index, priority);
419}
420
421static int qm_dev_mem_reset(struct hisi_qm *qm)
422{
423 u32 val;
424
425 writel(0x1, qm->io_base + QM_MEM_START_INIT);
426 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
427 val & BIT(0), 10, 1000);
428}
429
430static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
431{
432 return QM_IRQ_NUM_V1;
433}
434
435static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
436{
437 if (qm->fun_type == QM_HW_PF)
438 return QM_IRQ_NUM_PF_V2;
439 else
440 return QM_IRQ_NUM_VF_V2;
441}
442
443static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
444{
445 u16 cqn = eqe->dw0 & QM_EQE_CQN_MASK;
446
447 return qm->qp_array[cqn];
448}
449
450static void qm_cq_head_update(struct hisi_qp *qp)
451{
452 if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
453 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
454 qp->qp_status.cq_head = 0;
455 } else {
456 qp->qp_status.cq_head++;
457 }
458}
459
460static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
461{
462 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
463
464 if (qp->req_cb) {
465 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
466 dma_rmb();
467 qp->req_cb(qp, qp->sqe + qm->sqe_size * cqe->sq_head);
468 qm_cq_head_update(qp);
469 cqe = qp->cqe + qp->qp_status.cq_head;
470 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
471 qp->qp_status.cq_head, 0);
472 atomic_dec(&qp->qp_status.used);
473 }
474
475 /* set c_flag */
476 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
477 qp->qp_status.cq_head, 1);
478 }
479}
480
481static void qm_qp_work_func(struct work_struct *work)
482{
483 struct hisi_qp *qp;
484
485 qp = container_of(work, struct hisi_qp, work);
486 qm_poll_qp(qp, qp->qm);
487}
488
489static irqreturn_t qm_irq_handler(int irq, void *data)
490{
491 struct hisi_qm *qm = data;
492 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
493 struct hisi_qp *qp;
494 int eqe_num = 0;
495
496 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
497 eqe_num++;
498 qp = qm_to_hisi_qp(qm, eqe);
499 if (qp)
500 queue_work(qp->wq, &qp->work);
501
502 if (qm->status.eq_head == QM_Q_DEPTH - 1) {
503 qm->status.eqc_phase = !qm->status.eqc_phase;
504 eqe = qm->eqe;
505 qm->status.eq_head = 0;
506 } else {
507 eqe++;
508 qm->status.eq_head++;
509 }
510
511 if (eqe_num == QM_Q_DEPTH / 2 - 1) {
512 eqe_num = 0;
513 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
514 }
515 }
516
517 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
518
519 return IRQ_HANDLED;
520}
521
522static irqreturn_t qm_irq(int irq, void *data)
523{
524 struct hisi_qm *qm = data;
525
526 if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
527 return qm_irq_handler(irq, data);
528
529 dev_err(&qm->pdev->dev, "invalid int source\n");
530 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
531
532 return IRQ_NONE;
533}
534
535static irqreturn_t qm_aeq_irq(int irq, void *data)
536{
537 struct hisi_qm *qm = data;
538 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
539 u32 type;
540
541 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
542 return IRQ_NONE;
543
544 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
545 type = aeqe->dw0 >> QM_AEQE_TYPE_SHIFT;
546 if (type < ARRAY_SIZE(qm_fifo_overflow))
547 dev_err(&qm->pdev->dev, "%s overflow\n",
548 qm_fifo_overflow[type]);
549 else
550 dev_err(&qm->pdev->dev, "unknown error type %d\n",
551 type);
552
553 if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
554 qm->status.aeqc_phase = !qm->status.aeqc_phase;
555 aeqe = qm->aeqe;
556 qm->status.aeq_head = 0;
557 } else {
558 aeqe++;
559 qm->status.aeq_head++;
560 }
561
562 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
563 }
564
565 return IRQ_HANDLED;
566}
567
568static irqreturn_t qm_abnormal_irq(int irq, void *data)
569{
570 const struct hisi_qm_hw_error *err = qm_hw_error;
571 struct hisi_qm *qm = data;
572 struct device *dev = &qm->pdev->dev;
573 u32 error_status, tmp;
574
575 /* read err sts */
576 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
577 error_status = qm->msi_mask & tmp;
578
579 while (err->msg) {
580 if (err->int_msk & error_status)
581 dev_err(dev, "%s [error status=0x%x] found\n",
582 err->msg, err->int_msk);
583
584 err++;
585 }
586
587 /* clear err sts */
588 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
589
590 return IRQ_HANDLED;
591}
592
593static int qm_irq_register(struct hisi_qm *qm)
594{
595 struct pci_dev *pdev = qm->pdev;
596 int ret;
597
598 ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
599 qm_irq, IRQF_SHARED, qm->dev_name, qm);
600 if (ret)
601 return ret;
602
603 if (qm->ver == QM_HW_V2) {
604 ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
605 qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
606 if (ret)
607 goto err_aeq_irq;
608
609 if (qm->fun_type == QM_HW_PF) {
610 ret = request_irq(pci_irq_vector(pdev,
611 QM_ABNORMAL_EVENT_IRQ_VECTOR),
612 qm_abnormal_irq, IRQF_SHARED,
613 qm->dev_name, qm);
614 if (ret)
615 goto err_abonormal_irq;
616 }
617 }
618
619 return 0;
620
621err_abonormal_irq:
622 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
623err_aeq_irq:
624 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
625 return ret;
626}
627
628static void qm_irq_unregister(struct hisi_qm *qm)
629{
630 struct pci_dev *pdev = qm->pdev;
631
632 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
633
634 if (qm->ver == QM_HW_V2) {
635 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
636
637 if (qm->fun_type == QM_HW_PF)
638 free_irq(pci_irq_vector(pdev,
639 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
640 }
641}
642
643static void qm_init_qp_status(struct hisi_qp *qp)
644{
645 struct hisi_qp_status *qp_status = &qp->qp_status;
646
647 qp_status->sq_tail = 0;
648 qp_status->cq_head = 0;
649 qp_status->cqc_phase = 1;
650 qp_status->flags = 0;
651}
652
653static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
654 u32 number)
655{
656 u64 tmp = 0;
657
658 if (number > 0) {
659 switch (type) {
660 case SQC_VFT:
661 switch (qm->ver) {
662 case QM_HW_V1:
663 tmp = QM_SQC_VFT_BUF_SIZE |
664 QM_SQC_VFT_SQC_SIZE |
665 QM_SQC_VFT_INDEX_NUMBER |
666 QM_SQC_VFT_VALID |
667 (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
668 break;
669 case QM_HW_V2:
670 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
671 QM_SQC_VFT_VALID |
672 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
673 break;
674 case QM_HW_UNKNOWN:
675 break;
676 }
677 break;
678 case CQC_VFT:
679 switch (qm->ver) {
680 case QM_HW_V1:
681 tmp = QM_CQC_VFT_BUF_SIZE |
682 QM_CQC_VFT_SQC_SIZE |
683 QM_CQC_VFT_INDEX_NUMBER |
684 QM_CQC_VFT_VALID;
685 break;
686 case QM_HW_V2:
687 tmp = QM_CQC_VFT_VALID;
688 break;
689 case QM_HW_UNKNOWN:
690 break;
691 }
692 break;
693 }
694 }
695
696 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
697 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
698}
699
700static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
701 u32 fun_num, u32 base, u32 number)
702{
703 unsigned int val;
704 int ret;
705
706 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
707 val & BIT(0), 10, 1000);
708 if (ret)
709 return ret;
710
711 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
712 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
713 writel(fun_num, qm->io_base + QM_VFT_CFG);
714
715 qm_vft_data_cfg(qm, type, base, number);
716
717 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
718 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
719
720 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
721 val & BIT(0), 10, 1000);
722}
723
724/* The config should be conducted after qm_dev_mem_reset() */
725static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
726 u32 number)
727{
728 int ret, i;
729
730 for (i = SQC_VFT; i <= CQC_VFT; i++) {
731 ret = qm_set_vft_common(qm, i, fun_num, base, number);
732 if (ret)
733 return ret;
734 }
735
736 return 0;
737}
738
739static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
740{
741 u64 sqc_vft;
742 int ret;
743
744 ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
745 if (ret)
746 return ret;
747
748 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
749 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
750 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
751 *number = (QM_SQC_VFT_NUM_MASK_v2 &
752 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
753
754 return 0;
755}
756
757static struct hisi_qm *file_to_qm(struct debugfs_file *file)
758{
759 struct qm_debug *debug = file->debug;
760
761 return container_of(debug, struct hisi_qm, debug);
762}
763
764static u32 current_q_read(struct debugfs_file *file)
765{
766 struct hisi_qm *qm = file_to_qm(file);
767
768 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
769}
770
771static int current_q_write(struct debugfs_file *file, u32 val)
772{
773 struct hisi_qm *qm = file_to_qm(file);
774 u32 tmp;
775
776 if (val >= qm->debug.curr_qm_qp_num)
777 return -EINVAL;
778
779 tmp = val << QM_DFX_QN_SHIFT |
780 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
781 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
782
783 tmp = val << QM_DFX_QN_SHIFT |
784 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
785 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
786
787 return 0;
788}
789
790static u32 clear_enable_read(struct debugfs_file *file)
791{
792 struct hisi_qm *qm = file_to_qm(file);
793
794 return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
795}
796
797/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
798static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
799{
800 struct hisi_qm *qm = file_to_qm(file);
801
802 if (rd_clr_ctrl > 1)
803 return -EINVAL;
804
805 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
806
807 return 0;
808}
809
810static ssize_t qm_debug_read(struct file *filp, char __user *buf,
811 size_t count, loff_t *pos)
812{
813 struct debugfs_file *file = filp->private_data;
814 enum qm_debug_file index = file->index;
815 char tbuf[QM_DBG_TMP_BUF_LEN];
816 u32 val;
817 int ret;
818
819 mutex_lock(&file->lock);
820 switch (index) {
821 case CURRENT_Q:
822 val = current_q_read(file);
823 break;
824 case CLEAR_ENABLE:
825 val = clear_enable_read(file);
826 break;
827 default:
828 mutex_unlock(&file->lock);
829 return -EINVAL;
830 }
831 mutex_unlock(&file->lock);
832 ret = sprintf(tbuf, "%u\n", val);
833 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
834}
835
836static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
837 size_t count, loff_t *pos)
838{
839 struct debugfs_file *file = filp->private_data;
840 enum qm_debug_file index = file->index;
841 unsigned long val;
842 char tbuf[QM_DBG_TMP_BUF_LEN];
843 int len, ret;
844
845 if (*pos != 0)
846 return 0;
847
848 if (count >= QM_DBG_TMP_BUF_LEN)
849 return -ENOSPC;
850
851 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
852 count);
853 if (len < 0)
854 return len;
855
856 tbuf[len] = '\0';
857 if (kstrtoul(tbuf, 0, &val))
858 return -EFAULT;
859
860 mutex_lock(&file->lock);
861 switch (index) {
862 case CURRENT_Q:
863 ret = current_q_write(file, val);
864 if (ret)
865 goto err_input;
866 break;
867 case CLEAR_ENABLE:
868 ret = clear_enable_write(file, val);
869 if (ret)
870 goto err_input;
871 break;
872 default:
873 ret = -EINVAL;
874 goto err_input;
875 }
876 mutex_unlock(&file->lock);
877
878 return count;
879
880err_input:
881 mutex_unlock(&file->lock);
882 return ret;
883}
884
885static const struct file_operations qm_debug_fops = {
886 .owner = THIS_MODULE,
887 .open = simple_open,
888 .read = qm_debug_read,
889 .write = qm_debug_write,
890};
891
892struct qm_dfx_registers {
893 char *reg_name;
894 u64 reg_offset;
895};
896
897#define CNT_CYC_REGS_NUM 10
898static struct qm_dfx_registers qm_dfx_regs[] = {
899 /* XXX_CNT are reading clear register */
900 {"QM_ECC_1BIT_CNT ", 0x104000ull},
901 {"QM_ECC_MBIT_CNT ", 0x104008ull},
902 {"QM_DFX_MB_CNT ", 0x104018ull},
903 {"QM_DFX_DB_CNT ", 0x104028ull},
904 {"QM_DFX_SQE_CNT ", 0x104038ull},
905 {"QM_DFX_CQE_CNT ", 0x104048ull},
906 {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
907 {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
908 {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
909 {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
910 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
911 {"QM_ECC_1BIT_INF ", 0x104004ull},
912 {"QM_ECC_MBIT_INF ", 0x10400cull},
913 {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
914 {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
915 {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
916 {"QM_DFX_FF_ST0 ", 0x1040c8ull},
917 {"QM_DFX_FF_ST1 ", 0x1040ccull},
918 {"QM_DFX_FF_ST2 ", 0x1040d0ull},
919 {"QM_DFX_FF_ST3 ", 0x1040d4ull},
920 {"QM_DFX_FF_ST4 ", 0x1040d8ull},
921 {"QM_DFX_FF_ST5 ", 0x1040dcull},
922 {"QM_DFX_FF_ST6 ", 0x1040e0ull},
923 {"QM_IN_IDLE_ST ", 0x1040e4ull},
924 { NULL, 0}
925};
926
927static struct qm_dfx_registers qm_vf_dfx_regs[] = {
928 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
929 { NULL, 0}
930};
931
932static int qm_regs_show(struct seq_file *s, void *unused)
933{
934 struct hisi_qm *qm = s->private;
935 struct qm_dfx_registers *regs;
936 u32 val;
937
938 if (qm->fun_type == QM_HW_PF)
939 regs = qm_dfx_regs;
940 else
941 regs = qm_vf_dfx_regs;
942
943 while (regs->reg_name) {
944 val = readl(qm->io_base + regs->reg_offset);
945 seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
946 regs++;
947 }
948
949 return 0;
950}
951
952static int qm_regs_open(struct inode *inode, struct file *file)
953{
954 return single_open(file, qm_regs_show, inode->i_private);
955}
956
957static const struct file_operations qm_regs_fops = {
958 .owner = THIS_MODULE,
959 .open = qm_regs_open,
960 .read = seq_read,
961 .release = single_release,
962};
963
964static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
965{
966 struct dentry *qm_d = qm->debug.qm_d, *tmp;
967 struct debugfs_file *file = qm->debug.files + index;
968
969 tmp = debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
970 &qm_debug_fops);
971 if (IS_ERR(tmp))
972 return -ENOENT;
973
974 file->index = index;
975 mutex_init(&file->lock);
976 file->debug = &qm->debug;
977
978 return 0;
979}
980
981static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
982 u32 msi)
983{
984 dev_info(&qm->pdev->dev,
985 "QM v%d does not support hw error handle\n", qm->ver);
986
987 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
988}
989
990static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
991 u32 msi)
992{
993 u32 irq_enable = ce | nfe | fe | msi;
994 u32 irq_unmask = ~irq_enable;
995
996 qm->error_mask = ce | nfe | fe;
997 qm->msi_mask = msi;
998
999 /* configure error type */
1000 writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
1001 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1002 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1003 writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
1004
1005 /* use RAS irq default, so only set QM_RAS_MSI_INT_SEL for MSI */
1006 writel(msi, qm->io_base + QM_RAS_MSI_INT_SEL);
1007
1008 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1009 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1010}
1011
1012static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1013{
1014 const struct hisi_qm_hw_error *err = qm_hw_error;
1015 struct device *dev = &qm->pdev->dev;
1016 u32 reg_val, type, vf_num;
1017
1018 while (err->msg) {
1019 if (err->int_msk & error_status) {
1020 dev_err(dev, "%s [error status=0x%x] found\n",
1021 err->msg, err->int_msk);
1022
1023 if (error_status & QM_DB_TIMEOUT) {
1024 reg_val = readl(qm->io_base +
1025 QM_ABNORMAL_INF01);
1026 type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
1027 QM_DB_TIMEOUT_TYPE_SHIFT;
1028 vf_num = reg_val & QM_DB_TIMEOUT_VF;
1029 dev_err(dev, "qm %s doorbell timeout in function %u\n",
1030 qm_db_timeout[type], vf_num);
1031 }
1032
1033 if (error_status & QM_OF_FIFO_OF) {
1034 reg_val = readl(qm->io_base +
1035 QM_ABNORMAL_INF00);
1036 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
1037 QM_FIFO_OVERFLOW_TYPE_SHIFT;
1038 vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
1039
1040 if (type < ARRAY_SIZE(qm_fifo_overflow))
1041 dev_err(dev, "qm %s fifo overflow in function %u\n",
1042 qm_fifo_overflow[type],
1043 vf_num);
1044 else
1045 dev_err(dev, "unknown error type\n");
1046 }
1047 }
1048 err++;
1049 }
1050}
1051
1052static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm)
1053{
1054 u32 error_status, tmp;
1055
1056 /* read err sts */
1057 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
1058 error_status = qm->error_mask & tmp;
1059
1060 if (error_status) {
1061 qm_log_hw_error(qm, error_status);
1062
1063 /* clear err sts */
1064 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1065
1066 return PCI_ERS_RESULT_NEED_RESET;
1067 }
1068
1069 return PCI_ERS_RESULT_RECOVERED;
1070}
1071
1072static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
1073 .qm_db = qm_db_v1,
1074 .get_irq_num = qm_get_irq_num_v1,
1075 .hw_error_init = qm_hw_error_init_v1,
1076};
1077
1078static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
1079 .get_vft = qm_get_vft_v2,
1080 .qm_db = qm_db_v2,
1081 .get_irq_num = qm_get_irq_num_v2,
1082 .hw_error_init = qm_hw_error_init_v2,
1083 .hw_error_handle = qm_hw_error_handle_v2,
1084};
1085
1086static void *qm_get_avail_sqe(struct hisi_qp *qp)
1087{
1088 struct hisi_qp_status *qp_status = &qp->qp_status;
1089 u16 sq_tail = qp_status->sq_tail;
1090
1091 if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH))
1092 return NULL;
1093
1094 return qp->sqe + sq_tail * qp->qm->sqe_size;
1095}
1096
1097/**
1098 * hisi_qm_create_qp() - Create a queue pair from qm.
1099 * @qm: The qm we create a qp from.
1100 * @alg_type: Accelerator specific algorithm type in sqc.
1101 *
1102 * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
1103 * qp memory fails.
1104 */
1105struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
1106{
1107 struct device *dev = &qm->pdev->dev;
1108 struct hisi_qp *qp;
1109 int qp_id, ret;
1110
1111 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1112 if (!qp)
1113 return ERR_PTR(-ENOMEM);
1114
1115 write_lock(&qm->qps_lock);
1116
1117 qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num);
1118 if (qp_id >= qm->qp_num) {
1119 write_unlock(&qm->qps_lock);
1120 dev_info(&qm->pdev->dev, "QM all queues are busy!\n");
1121 ret = -EBUSY;
1122 goto err_free_qp;
1123 }
1124 set_bit(qp_id, qm->qp_bitmap);
1125 qm->qp_array[qp_id] = qp;
1126
1127 write_unlock(&qm->qps_lock);
1128
1129 qp->qm = qm;
1130
1131 if (qm->use_dma_api) {
1132 qp->qdma.size = qm->sqe_size * QM_Q_DEPTH +
1133 sizeof(struct qm_cqe) * QM_Q_DEPTH;
1134 qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size,
1135 &qp->qdma.dma, GFP_KERNEL);
1136 if (!qp->qdma.va) {
1137 ret = -ENOMEM;
1138 goto err_clear_bit;
1139 }
1140
1141 dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n",
1142 qp->qdma.va, &qp->qdma.dma, qp->qdma.size);
1143 }
1144
1145 qp->qp_id = qp_id;
1146 qp->alg_type = alg_type;
1147 INIT_WORK(&qp->work, qm_qp_work_func);
1148 qp->wq = alloc_workqueue("hisi_qm", WQ_UNBOUND | WQ_HIGHPRI |
1149 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0);
1150 if (!qp->wq) {
1151 ret = -EFAULT;
1152 goto err_free_qp_mem;
1153 }
1154
1155 return qp;
1156
1157err_free_qp_mem:
1158 if (qm->use_dma_api)
1159 dma_free_coherent(dev, qp->qdma.size, qp->qdma.va,
1160 qp->qdma.dma);
1161err_clear_bit:
1162 write_lock(&qm->qps_lock);
1163 qm->qp_array[qp_id] = NULL;
1164 clear_bit(qp_id, qm->qp_bitmap);
1165 write_unlock(&qm->qps_lock);
1166err_free_qp:
1167 kfree(qp);
1168 return ERR_PTR(ret);
1169}
1170EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
1171
1172/**
1173 * hisi_qm_release_qp() - Release a qp back to its qm.
1174 * @qp: The qp we want to release.
1175 *
1176 * This function releases the resource of a qp.
1177 */
1178void hisi_qm_release_qp(struct hisi_qp *qp)
1179{
1180 struct hisi_qm *qm = qp->qm;
1181 struct qm_dma *qdma = &qp->qdma;
1182 struct device *dev = &qm->pdev->dev;
1183
1184 if (qm->use_dma_api && qdma->va)
1185 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
1186
1187 write_lock(&qm->qps_lock);
1188 qm->qp_array[qp->qp_id] = NULL;
1189 clear_bit(qp->qp_id, qm->qp_bitmap);
1190 write_unlock(&qm->qps_lock);
1191
1192 kfree(qp);
1193}
1194EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
1195
1196static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
1197{
1198 struct hisi_qm *qm = qp->qm;
1199 struct device *dev = &qm->pdev->dev;
1200 enum qm_hw_ver ver = qm->ver;
1201 struct qm_sqc *sqc;
1202 struct qm_cqc *cqc;
1203 dma_addr_t sqc_dma;
1204 dma_addr_t cqc_dma;
1205 int ret;
1206
1207 qm_init_qp_status(qp);
1208
1209 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
1210 if (!sqc)
1211 return -ENOMEM;
1212 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
1213 DMA_TO_DEVICE);
1214 if (dma_mapping_error(dev, sqc_dma)) {
1215 kfree(sqc);
1216 return -ENOMEM;
1217 }
1218
1219 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
1220 if (ver == QM_HW_V1) {
1221 sqc->dw3 = QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size);
1222 sqc->w8 = QM_Q_DEPTH - 1;
1223 } else if (ver == QM_HW_V2) {
1224 sqc->dw3 = QM_MK_SQC_DW3_V2(qm->sqe_size);
1225 sqc->w8 = 0; /* rand_qc */
1226 }
1227 sqc->cq_num = qp_id;
1228 sqc->w13 = QM_MK_SQC_W13(0, 1, qp->alg_type);
1229
1230 ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
1231 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
1232 kfree(sqc);
1233 if (ret)
1234 return ret;
1235
1236 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
1237 if (!cqc)
1238 return -ENOMEM;
1239 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
1240 DMA_TO_DEVICE);
1241 if (dma_mapping_error(dev, cqc_dma)) {
1242 kfree(cqc);
1243 return -ENOMEM;
1244 }
1245
1246 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
1247 if (ver == QM_HW_V1) {
1248 cqc->dw3 = QM_MK_CQC_DW3_V1(0, 0, 0, 4);
1249 cqc->w8 = QM_Q_DEPTH - 1;
1250 } else if (ver == QM_HW_V2) {
1251 cqc->dw3 = QM_MK_CQC_DW3_V2(4);
1252 cqc->w8 = 0;
1253 }
1254 cqc->dw6 = 1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT;
1255
1256 ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
1257 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
1258 kfree(cqc);
1259
1260 return ret;
1261}
1262
1263/**
1264 * hisi_qm_start_qp() - Start a qp into running.
1265 * @qp: The qp we want to start to run.
1266 * @arg: Accelerator specific argument.
1267 *
1268 * After this function, qp can receive request from user. Return qp_id if
1269 * successful, Return -EBUSY if failed.
1270 */
1271int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
1272{
1273 struct hisi_qm *qm = qp->qm;
1274 struct device *dev = &qm->pdev->dev;
1275 enum qm_hw_ver ver = qm->ver;
1276 int qp_id = qp->qp_id;
1277 int pasid = arg;
1278 size_t off = 0;
1279 int ret;
1280
1281#define QP_INIT_BUF(qp, type, size) do { \
1282 (qp)->type = ((qp)->qdma.va + (off)); \
1283 (qp)->type##_dma = (qp)->qdma.dma + (off); \
1284 off += (size); \
1285} while (0)
1286
1287 if (!qp->qdma.dma) {
1288 dev_err(dev, "cannot get qm dma buffer\n");
1289 return -EINVAL;
1290 }
1291
1292 /* sq need 128 bytes alignment */
1293 if (qp->qdma.dma & QM_SQE_DATA_ALIGN_MASK) {
1294 dev_err(dev, "qm sq is not aligned to 128 byte\n");
1295 return -EINVAL;
1296 }
1297
1298 QP_INIT_BUF(qp, sqe, qm->sqe_size * QM_Q_DEPTH);
1299 QP_INIT_BUF(qp, cqe, sizeof(struct qm_cqe) * QM_Q_DEPTH);
1300
1301 dev_dbg(dev, "init qp buffer(v%d):\n"
1302 " sqe (%pK, %lx)\n"
1303 " cqe (%pK, %lx)\n",
1304 ver, qp->sqe, (unsigned long)qp->sqe_dma,
1305 qp->cqe, (unsigned long)qp->cqe_dma);
1306
1307 ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
1308 if (ret)
1309 return ret;
1310
1311 dev_dbg(dev, "queue %d started\n", qp_id);
1312
1313 return qp_id;
1314}
1315EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
1316
1317/**
1318 * hisi_qm_stop_qp() - Stop a qp in qm.
1319 * @qp: The qp we want to stop.
1320 *
1321 * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
1322 */
1323int hisi_qm_stop_qp(struct hisi_qp *qp)
1324{
1325 struct device *dev = &qp->qm->pdev->dev;
1326 int i = 0;
1327
1328 /* it is stopped */
1329 if (test_bit(QP_STOP, &qp->qp_status.flags))
1330 return 0;
1331
1332 while (atomic_read(&qp->qp_status.used)) {
1333 i++;
1334 msleep(20);
1335 if (i == 10) {
1336 dev_err(dev, "Cannot drain out data for stopping, Force to stop!\n");
1337 return 0;
1338 }
1339 }
1340
1341 set_bit(QP_STOP, &qp->qp_status.flags);
1342
1343 dev_dbg(dev, "stop queue %u!", qp->qp_id);
1344
1345 return 0;
1346}
1347EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
1348
1349/**
1350 * hisi_qp_send() - Queue up a task in the hardware queue.
1351 * @qp: The qp in which to put the message.
1352 * @msg: The message.
1353 *
1354 * This function will return -EBUSY if qp is currently full, and -EAGAIN
1355 * if qp related qm is resetting.
1356 */
1357int hisi_qp_send(struct hisi_qp *qp, const void *msg)
1358{
1359 struct hisi_qp_status *qp_status = &qp->qp_status;
1360 u16 sq_tail = qp_status->sq_tail;
1361 u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
1362 void *sqe = qm_get_avail_sqe(qp);
1363
1364 if (unlikely(test_bit(QP_STOP, &qp->qp_status.flags))) {
1365 dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
1366 return -EAGAIN;
1367 }
1368
1369 if (!sqe)
1370 return -EBUSY;
1371
1372 memcpy(sqe, msg, qp->qm->sqe_size);
1373
1374 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
1375 atomic_inc(&qp->qp_status.used);
1376 qp_status->sq_tail = sq_tail_next;
1377
1378 return 0;
1379}
1380EXPORT_SYMBOL_GPL(hisi_qp_send);
1381
1382static void hisi_qm_cache_wb(struct hisi_qm *qm)
1383{
1384 unsigned int val;
1385
1386 if (qm->ver == QM_HW_V2) {
1387 writel(0x1, qm->io_base + QM_CACHE_WB_START);
1388 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
1389 val, val & BIT(0), 10, 1000))
1390 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
1391 }
1392}
1393
1394/**
1395 * hisi_qm_init() - Initialize configures about qm.
1396 * @qm: The qm needing init.
1397 *
1398 * This function init qm, then we can call hisi_qm_start to put qm into work.
1399 */
1400int hisi_qm_init(struct hisi_qm *qm)
1401{
1402 struct pci_dev *pdev = qm->pdev;
1403 struct device *dev = &pdev->dev;
1404 unsigned int num_vec;
1405 int ret;
1406
1407 switch (qm->ver) {
1408 case QM_HW_V1:
1409 qm->ops = &qm_hw_ops_v1;
1410 break;
1411 case QM_HW_V2:
1412 qm->ops = &qm_hw_ops_v2;
1413 break;
1414 default:
1415 return -EINVAL;
1416 }
1417
1418 ret = pci_enable_device_mem(pdev);
1419 if (ret < 0) {
1420 dev_err(&pdev->dev, "Failed to enable device mem!\n");
1421 return ret;
1422 }
1423
1424 ret = pci_request_mem_regions(pdev, qm->dev_name);
1425 if (ret < 0) {
1426 dev_err(&pdev->dev, "Failed to request mem regions!\n");
1427 goto err_disable_pcidev;
1428 }
1429
1430 qm->io_base = ioremap(pci_resource_start(pdev, PCI_BAR_2),
1431 pci_resource_len(qm->pdev, PCI_BAR_2));
1432 if (!qm->io_base) {
1433 ret = -EIO;
1434 goto err_release_mem_regions;
1435 }
1436
1437 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1438 if (ret < 0)
1439 goto err_iounmap;
1440 pci_set_master(pdev);
1441
1442 if (!qm->ops->get_irq_num) {
1443 ret = -EOPNOTSUPP;
1444 goto err_iounmap;
1445 }
1446 num_vec = qm->ops->get_irq_num(qm);
1447 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
1448 if (ret < 0) {
1449 dev_err(dev, "Failed to enable MSI vectors!\n");
1450 goto err_iounmap;
1451 }
1452
1453 ret = qm_irq_register(qm);
1454 if (ret)
1455 goto err_free_irq_vectors;
1456
1457 mutex_init(&qm->mailbox_lock);
1458 rwlock_init(&qm->qps_lock);
1459
1460 dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf",
1461 qm->use_dma_api ? "dma api" : "iommu api");
1462
1463 return 0;
1464
1465err_free_irq_vectors:
1466 pci_free_irq_vectors(pdev);
1467err_iounmap:
1468 iounmap(qm->io_base);
1469err_release_mem_regions:
1470 pci_release_mem_regions(pdev);
1471err_disable_pcidev:
1472 pci_disable_device(pdev);
1473
1474 return ret;
1475}
1476EXPORT_SYMBOL_GPL(hisi_qm_init);
1477
1478/**
1479 * hisi_qm_uninit() - Uninitialize qm.
1480 * @qm: The qm needed uninit.
1481 *
1482 * This function uninits qm related device resources.
1483 */
1484void hisi_qm_uninit(struct hisi_qm *qm)
1485{
1486 struct pci_dev *pdev = qm->pdev;
1487 struct device *dev = &pdev->dev;
1488
1489 if (qm->use_dma_api && qm->qdma.va) {
1490 hisi_qm_cache_wb(qm);
1491 dma_free_coherent(dev, qm->qdma.size,
1492 qm->qdma.va, qm->qdma.dma);
1493 memset(&qm->qdma, 0, sizeof(qm->qdma));
1494 }
1495
1496 qm_irq_unregister(qm);
1497 pci_free_irq_vectors(pdev);
1498 iounmap(qm->io_base);
1499 pci_release_mem_regions(pdev);
1500 pci_disable_device(pdev);
1501}
1502EXPORT_SYMBOL_GPL(hisi_qm_uninit);
1503
1504/**
1505 * hisi_qm_get_vft() - Get vft from a qm.
1506 * @qm: The qm we want to get its vft.
1507 * @base: The base number of queue in vft.
1508 * @number: The number of queues in vft.
1509 *
1510 * We can allocate multiple queues to a qm by configuring virtual function
1511 * table. We get related configures by this function. Normally, we call this
1512 * function in VF driver to get the queue information.
1513 *
1514 * qm hw v1 does not support this interface.
1515 */
1516int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
1517{
1518 if (!base || !number)
1519 return -EINVAL;
1520
1521 if (!qm->ops->get_vft) {
1522 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
1523 return -EINVAL;
1524 }
1525
1526 return qm->ops->get_vft(qm, base, number);
1527}
1528EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
1529
1530/**
1531 * hisi_qm_set_vft() - Set "virtual function table" for a qm.
1532 * @fun_num: Number of operated function.
1533 * @qm: The qm in which to set vft, alway in a PF.
1534 * @base: The base number of queue in vft.
1535 * @number: The number of queues in vft. 0 means invalid vft.
1536 *
1537 * This function is alway called in PF driver, it is used to assign queues
1538 * among PF and VFs.
1539 *
1540 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
1541 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
1542 * (VF function number 0x2)
1543 */
1544int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1545 u32 number)
1546{
1547 u32 max_q_num = qm->ctrl_qp_num;
1548
1549 if (base >= max_q_num || number > max_q_num ||
1550 (base + number) > max_q_num)
1551 return -EINVAL;
1552
1553 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
1554}
1555EXPORT_SYMBOL_GPL(hisi_qm_set_vft);
1556
1557static void qm_init_eq_aeq_status(struct hisi_qm *qm)
1558{
1559 struct hisi_qm_status *status = &qm->status;
1560
1561 status->eq_head = 0;
1562 status->aeq_head = 0;
1563 status->eqc_phase = 1;
1564 status->aeqc_phase = 1;
1565}
1566
1567static int qm_eq_ctx_cfg(struct hisi_qm *qm)
1568{
1569 struct device *dev = &qm->pdev->dev;
1570 struct qm_eqc *eqc;
1571 struct qm_aeqc *aeqc;
1572 dma_addr_t eqc_dma;
1573 dma_addr_t aeqc_dma;
1574 int ret;
1575
1576 qm_init_eq_aeq_status(qm);
1577
1578 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
1579 if (!eqc)
1580 return -ENOMEM;
1581 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
1582 DMA_TO_DEVICE);
1583 if (dma_mapping_error(dev, eqc_dma)) {
1584 kfree(eqc);
1585 return -ENOMEM;
1586 }
1587
1588 eqc->base_l = lower_32_bits(qm->eqe_dma);
1589 eqc->base_h = upper_32_bits(qm->eqe_dma);
1590 if (qm->ver == QM_HW_V1)
1591 eqc->dw3 = QM_EQE_AEQE_SIZE;
1592 eqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT);
1593 ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
1594 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
1595 kfree(eqc);
1596 if (ret)
1597 return ret;
1598
1599 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
1600 if (!aeqc)
1601 return -ENOMEM;
1602 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
1603 DMA_TO_DEVICE);
1604 if (dma_mapping_error(dev, aeqc_dma)) {
1605 kfree(aeqc);
1606 return -ENOMEM;
1607 }
1608
1609 aeqc->base_l = lower_32_bits(qm->aeqe_dma);
1610 aeqc->base_h = upper_32_bits(qm->aeqe_dma);
1611 aeqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT);
1612
1613 ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
1614 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
1615 kfree(aeqc);
1616
1617 return ret;
1618}
1619
1620static int __hisi_qm_start(struct hisi_qm *qm)
1621{
1622 struct pci_dev *pdev = qm->pdev;
1623 struct device *dev = &pdev->dev;
1624 size_t off = 0;
1625 int ret;
1626
1627#define QM_INIT_BUF(qm, type, num) do { \
1628 (qm)->type = ((qm)->qdma.va + (off)); \
1629 (qm)->type##_dma = (qm)->qdma.dma + (off); \
1630 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
1631} while (0)
1632
1633 WARN_ON(!qm->qdma.dma);
1634
1635 if (qm->qp_num == 0)
1636 return -EINVAL;
1637
1638 if (qm->fun_type == QM_HW_PF) {
1639 ret = qm_dev_mem_reset(qm);
1640 if (ret)
1641 return ret;
1642
1643 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
1644 if (ret)
1645 return ret;
1646 }
1647
1648 QM_INIT_BUF(qm, eqe, QM_Q_DEPTH);
1649 QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
1650 QM_INIT_BUF(qm, sqc, qm->qp_num);
1651 QM_INIT_BUF(qm, cqc, qm->qp_num);
1652
1653 dev_dbg(dev, "init qm buffer:\n"
1654 " eqe (%pK, %lx)\n"
1655 " aeqe (%pK, %lx)\n"
1656 " sqc (%pK, %lx)\n"
1657 " cqc (%pK, %lx)\n",
1658 qm->eqe, (unsigned long)qm->eqe_dma,
1659 qm->aeqe, (unsigned long)qm->aeqe_dma,
1660 qm->sqc, (unsigned long)qm->sqc_dma,
1661 qm->cqc, (unsigned long)qm->cqc_dma);
1662
1663 ret = qm_eq_ctx_cfg(qm);
1664 if (ret)
1665 return ret;
1666
1667 ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
1668 if (ret)
1669 return ret;
1670
1671 ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
1672 if (ret)
1673 return ret;
1674
1675 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
1676 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
1677
1678 return 0;
1679}
1680
1681/**
1682 * hisi_qm_start() - start qm
1683 * @qm: The qm to be started.
1684 *
1685 * This function starts a qm, then we can allocate qp from this qm.
1686 */
1687int hisi_qm_start(struct hisi_qm *qm)
1688{
1689 struct device *dev = &qm->pdev->dev;
1690
1691 dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
1692
1693 if (!qm->qp_num) {
1694 dev_err(dev, "qp_num should not be 0\n");
1695 return -EINVAL;
1696 }
1697
1698 if (!qm->qp_bitmap) {
1699 qm->qp_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(qm->qp_num),
1700 sizeof(long), GFP_KERNEL);
1701 qm->qp_array = devm_kcalloc(dev, qm->qp_num,
1702 sizeof(struct hisi_qp *),
1703 GFP_KERNEL);
1704 if (!qm->qp_bitmap || !qm->qp_array)
1705 return -ENOMEM;
1706 }
1707
1708 if (!qm->use_dma_api) {
1709 dev_dbg(&qm->pdev->dev, "qm delay start\n");
1710 return 0;
1711 } else if (!qm->qdma.va) {
1712 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) +
1713 QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
1714 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
1715 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
1716 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size,
1717 &qm->qdma.dma, GFP_KERNEL);
1718 dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%zx)\n",
1719 qm->qdma.va, &qm->qdma.dma, qm->qdma.size);
1720 if (!qm->qdma.va)
1721 return -ENOMEM;
1722 }
1723
1724 return __hisi_qm_start(qm);
1725}
1726EXPORT_SYMBOL_GPL(hisi_qm_start);
1727
1728/**
1729 * hisi_qm_stop() - Stop a qm.
1730 * @qm: The qm which will be stopped.
1731 *
1732 * This function stops qm and its qps, then qm can not accept request.
1733 * Related resources are not released at this state, we can use hisi_qm_start
1734 * to let qm start again.
1735 */
1736int hisi_qm_stop(struct hisi_qm *qm)
1737{
1738 struct device *dev;
1739 struct hisi_qp *qp;
1740 int ret = 0, i;
1741
1742 if (!qm || !qm->pdev) {
1743 WARN_ON(1);
1744 return -EINVAL;
1745 }
1746
1747 dev = &qm->pdev->dev;
1748
1749 /* Mask eq and aeq irq */
1750 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
1751 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
1752
1753 /* Stop all qps belong to this qm */
1754 for (i = 0; i < qm->qp_num; i++) {
1755 qp = qm->qp_array[i];
1756 if (qp) {
1757 ret = hisi_qm_stop_qp(qp);
1758 if (ret < 0) {
1759 dev_err(dev, "Failed to stop qp%d!\n", i);
1760 return -EBUSY;
1761 }
1762 }
1763 }
1764
1765 if (qm->fun_type == QM_HW_PF) {
1766 ret = hisi_qm_set_vft(qm, 0, 0, 0);
1767 if (ret < 0)
1768 dev_err(dev, "Failed to set vft!\n");
1769 }
1770
1771 return ret;
1772}
1773EXPORT_SYMBOL_GPL(hisi_qm_stop);
1774
1775/**
1776 * hisi_qm_debug_init() - Initialize qm related debugfs files.
1777 * @qm: The qm for which we want to add debugfs files.
1778 *
1779 * Create qm related debugfs files.
1780 */
1781int hisi_qm_debug_init(struct hisi_qm *qm)
1782{
1783 struct dentry *qm_d, *qm_regs;
1784 int i, ret;
1785
1786 qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
1787 if (IS_ERR(qm_d))
1788 return -ENOENT;
1789 qm->debug.qm_d = qm_d;
1790
1791 /* only show this in PF */
1792 if (qm->fun_type == QM_HW_PF)
1793 for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
1794 if (qm_create_debugfs_file(qm, i)) {
1795 ret = -ENOENT;
1796 goto failed_to_create;
1797 }
1798
1799 qm_regs = debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm,
1800 &qm_regs_fops);
1801 if (IS_ERR(qm_regs)) {
1802 ret = -ENOENT;
1803 goto failed_to_create;
1804 }
1805
1806 return 0;
1807
1808failed_to_create:
1809 debugfs_remove_recursive(qm_d);
1810 return ret;
1811}
1812EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
1813
1814/**
1815 * hisi_qm_debug_regs_clear() - clear qm debug related registers.
1816 * @qm: The qm for which we want to clear its debug registers.
1817 */
1818void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
1819{
1820 struct qm_dfx_registers *regs;
1821 int i;
1822
1823 /* clear current_q */
1824 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1825 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1826
1827 /*
1828 * these registers are reading and clearing, so clear them after
1829 * reading them.
1830 */
1831 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
1832
1833 regs = qm_dfx_regs;
1834 for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
1835 readl(qm->io_base + regs->reg_offset);
1836 regs++;
1837 }
1838
1839 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
1840}
1841EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
1842
1843/**
1844 * hisi_qm_hw_error_init() - Configure qm hardware error report method.
1845 * @qm: The qm which we want to configure.
1846 * @ce: Bit mask of correctable error configure.
1847 * @nfe: Bit mask of non-fatal error configure.
1848 * @fe: Bit mask of fatal error configure.
1849 * @msi: Bit mask of error reported by message signal interrupt.
1850 *
1851 * Hardware errors of qm can be reported either by RAS interrupts which will
1852 * be handled by UEFI and then PCIe AER or by device MSI. User can configure
1853 * each error to use either of above two methods. For RAS interrupts, we can
1854 * configure an error as one of correctable error, non-fatal error or
1855 * fatal error.
1856 *
1857 * Bits indicating errors can be configured to ce, nfe, fe and msi to enable
1858 * related report methods. Error report will be masked if related error bit
1859 * does not configure.
1860 */
1861void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
1862 u32 msi)
1863{
1864 if (!qm->ops->hw_error_init) {
1865 dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error handling!\n",
1866 qm->ver);
1867 return;
1868 }
1869
1870 qm->ops->hw_error_init(qm, ce, nfe, fe, msi);
1871}
1872EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init);
1873
1874/**
1875 * hisi_qm_hw_error_handle() - Handle qm non-fatal hardware errors.
1876 * @qm: The qm which has non-fatal hardware errors.
1877 *
1878 * Accelerators use this function to handle qm non-fatal hardware errors.
1879 */
1880int hisi_qm_hw_error_handle(struct hisi_qm *qm)
1881{
1882 if (!qm->ops->hw_error_handle) {
1883 dev_err(&qm->pdev->dev, "QM version %d doesn't support hw error report!\n",
1884 qm->ver);
1885 return PCI_ERS_RESULT_NONE;
1886 }
1887
1888 return qm->ops->hw_error_handle(qm);
1889}
1890EXPORT_SYMBOL_GPL(hisi_qm_hw_error_handle);
1891
1892/**
1893 * hisi_qm_get_hw_version() - Get hardware version of a qm.
1894 * @pdev: The device which hardware version we want to get.
1895 *
1896 * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN
1897 * if the hardware version is not supported.
1898 */
1899enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev)
1900{
1901 switch (pdev->revision) {
1902 case QM_HW_V1:
1903 case QM_HW_V2:
1904 return pdev->revision;
1905 default:
1906 return QM_HW_UNKNOWN;
1907 }
1908}
1909EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version);
1910
1911MODULE_LICENSE("GPL v2");
1912MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
1913MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
new file mode 100644
index 000000000000..70e672ae86bf
--- /dev/null
+++ b/drivers/crypto/hisilicon/qm.h
@@ -0,0 +1,215 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019 HiSilicon Limited. */
3#ifndef HISI_ACC_QM_H
4#define HISI_ACC_QM_H
5
6#include <linux/bitfield.h>
7#include <linux/iopoll.h>
8#include <linux/module.h>
9#include <linux/pci.h>
10
11/* qm user domain */
12#define QM_ARUSER_M_CFG_1 0x100088
13#define AXUSER_SNOOP_ENABLE BIT(30)
14#define AXUSER_CMD_TYPE GENMASK(14, 12)
15#define AXUSER_CMD_SMMU_NORMAL 1
16#define AXUSER_NS BIT(6)
17#define AXUSER_NO BIT(5)
18#define AXUSER_FP BIT(4)
19#define AXUSER_SSV BIT(0)
20#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \
21 FIELD_PREP(AXUSER_CMD_TYPE, \
22 AXUSER_CMD_SMMU_NORMAL) | \
23 AXUSER_NS | AXUSER_NO | AXUSER_FP)
24#define QM_ARUSER_M_CFG_ENABLE 0x100090
25#define ARUSER_M_CFG_ENABLE 0xfffffffe
26#define QM_AWUSER_M_CFG_1 0x100098
27#define QM_AWUSER_M_CFG_ENABLE 0x1000a0
28#define AWUSER_M_CFG_ENABLE 0xfffffffe
29#define QM_WUSER_M_CFG_ENABLE 0x1000a8
30#define WUSER_M_CFG_ENABLE 0xffffffff
31
32/* qm cache */
33#define QM_CACHE_CTL 0x100050
34#define SQC_CACHE_ENABLE BIT(0)
35#define CQC_CACHE_ENABLE BIT(1)
36#define SQC_CACHE_WB_ENABLE BIT(4)
37#define SQC_CACHE_WB_THRD GENMASK(10, 5)
38#define CQC_CACHE_WB_ENABLE BIT(11)
39#define CQC_CACHE_WB_THRD GENMASK(17, 12)
40#define QM_AXI_M_CFG 0x1000ac
41#define AXI_M_CFG 0xffff
42#define QM_AXI_M_CFG_ENABLE 0x1000b0
43#define AXI_M_CFG_ENABLE 0xffffffff
44#define QM_PEH_AXUSER_CFG 0x1000cc
45#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
46#define PEH_AXUSER_CFG 0x401001
47#define PEH_AXUSER_CFG_ENABLE 0xffffffff
48
49#define QM_DFX_MB_CNT_VF 0x104010
50#define QM_DFX_DB_CNT_VF 0x104020
51#define QM_DFX_SQE_CNT_VF_SQN 0x104030
52#define QM_DFX_CQE_CNT_VF_CQN 0x104040
53#define QM_DFX_QN_SHIFT 16
54#define CURRENT_FUN_MASK GENMASK(5, 0)
55#define CURRENT_Q_MASK GENMASK(31, 16)
56
57#define QM_AXI_RRESP BIT(0)
58#define QM_AXI_BRESP BIT(1)
59#define QM_ECC_MBIT BIT(2)
60#define QM_ECC_1BIT BIT(3)
61#define QM_ACC_GET_TASK_TIMEOUT BIT(4)
62#define QM_ACC_DO_TASK_TIMEOUT BIT(5)
63#define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6)
64#define QM_SQ_CQ_VF_INVALID BIT(7)
65#define QM_CQ_VF_INVALID BIT(8)
66#define QM_SQ_VF_INVALID BIT(9)
67#define QM_DB_TIMEOUT BIT(10)
68#define QM_OF_FIFO_OF BIT(11)
69#define QM_DB_RANDOM_INVALID BIT(12)
70
71#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
72 QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
73 QM_OF_FIFO_OF)
74#define QM_BASE_CE QM_ECC_1BIT
75
76#define QM_Q_DEPTH 1024
77
78enum qp_state {
79 QP_STOP,
80};
81
82enum qm_hw_ver {
83 QM_HW_UNKNOWN = -1,
84 QM_HW_V1 = 0x20,
85 QM_HW_V2 = 0x21,
86};
87
88enum qm_fun_type {
89 QM_HW_PF,
90 QM_HW_VF,
91};
92
93enum qm_debug_file {
94 CURRENT_Q,
95 CLEAR_ENABLE,
96 DEBUG_FILE_NUM,
97};
98
99struct debugfs_file {
100 enum qm_debug_file index;
101 struct mutex lock;
102 struct qm_debug *debug;
103};
104
105struct qm_debug {
106 u32 curr_qm_qp_num;
107 struct dentry *debug_root;
108 struct dentry *qm_d;
109 struct debugfs_file files[DEBUG_FILE_NUM];
110};
111
112struct qm_dma {
113 void *va;
114 dma_addr_t dma;
115 size_t size;
116};
117
118struct hisi_qm_status {
119 u32 eq_head;
120 bool eqc_phase;
121 u32 aeq_head;
122 bool aeqc_phase;
123 unsigned long flags;
124};
125
126struct hisi_qm {
127 enum qm_hw_ver ver;
128 enum qm_fun_type fun_type;
129 const char *dev_name;
130 struct pci_dev *pdev;
131 void __iomem *io_base;
132 u32 sqe_size;
133 u32 qp_base;
134 u32 qp_num;
135 u32 ctrl_qp_num;
136
137 struct qm_dma qdma;
138 struct qm_sqc *sqc;
139 struct qm_cqc *cqc;
140 struct qm_eqe *eqe;
141 struct qm_aeqe *aeqe;
142 dma_addr_t sqc_dma;
143 dma_addr_t cqc_dma;
144 dma_addr_t eqe_dma;
145 dma_addr_t aeqe_dma;
146
147 struct hisi_qm_status status;
148
149 rwlock_t qps_lock;
150 unsigned long *qp_bitmap;
151 struct hisi_qp **qp_array;
152
153 struct mutex mailbox_lock;
154
155 const struct hisi_qm_hw_ops *ops;
156
157 struct qm_debug debug;
158
159 u32 error_mask;
160 u32 msi_mask;
161
162 bool use_dma_api;
163};
164
165struct hisi_qp_status {
166 atomic_t used;
167 u16 sq_tail;
168 u16 cq_head;
169 bool cqc_phase;
170 unsigned long flags;
171};
172
173struct hisi_qp_ops {
174 int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
175};
176
177struct hisi_qp {
178 u32 qp_id;
179 u8 alg_type;
180 u8 req_type;
181
182 struct qm_dma qdma;
183 void *sqe;
184 struct qm_cqe *cqe;
185 dma_addr_t sqe_dma;
186 dma_addr_t cqe_dma;
187
188 struct hisi_qp_status qp_status;
189 struct hisi_qp_ops *hw_ops;
190 void *qp_ctx;
191 void (*req_cb)(struct hisi_qp *qp, void *data);
192 struct work_struct work;
193 struct workqueue_struct *wq;
194
195 struct hisi_qm *qm;
196};
197
198int hisi_qm_init(struct hisi_qm *qm);
199void hisi_qm_uninit(struct hisi_qm *qm);
200int hisi_qm_start(struct hisi_qm *qm);
201int hisi_qm_stop(struct hisi_qm *qm);
202struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
203int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
204int hisi_qm_stop_qp(struct hisi_qp *qp);
205void hisi_qm_release_qp(struct hisi_qp *qp);
206int hisi_qp_send(struct hisi_qp *qp, const void *msg);
207int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
208int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
209int hisi_qm_debug_init(struct hisi_qm *qm);
210void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
211 u32 msi);
212int hisi_qm_hw_error_handle(struct hisi_qm *qm);
213enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
214void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
215#endif
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 02768af0dccd..e0508ea160f1 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -9,7 +9,7 @@
9 9
10#include <crypto/aes.h> 10#include <crypto/aes.h>
11#include <crypto/algapi.h> 11#include <crypto/algapi.h>
12#include <crypto/des.h> 12#include <crypto/internal/des.h>
13#include <crypto/skcipher.h> 13#include <crypto/skcipher.h>
14#include <crypto/xts.h> 14#include <crypto/xts.h>
15#include <crypto/internal/skcipher.h> 15#include <crypto/internal/skcipher.h>
@@ -347,25 +347,21 @@ static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
347static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm, 347static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
348 const u8 *key, unsigned int keylen) 348 const u8 *key, unsigned int keylen)
349{ 349{
350 if (keylen != DES_KEY_SIZE) 350 return verify_skcipher_des_key(tfm, key) ?:
351 return -EINVAL; 351 sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
352
353 return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
354} 352}
355 353
356static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm, 354static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
357 const u8 *key, unsigned int keylen) 355 const u8 *key, unsigned int keylen)
358{ 356{
359 if (keylen != DES_KEY_SIZE) 357 return verify_skcipher_des_key(tfm, key) ?:
360 return -EINVAL; 358 sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
361
362 return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
363} 359}
364 360
365static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm, 361static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
366 const u8 *key, unsigned int keylen) 362 const u8 *key, unsigned int keylen)
367{ 363{
368 return unlikely(des3_verify_key(tfm, key)) ?: 364 return verify_skcipher_des3_key(tfm, key) ?:
369 sec_alg_skcipher_setkey(tfm, key, keylen, 365 sec_alg_skcipher_setkey(tfm, key, keylen,
370 SEC_C_3DES_ECB_192_3KEY); 366 SEC_C_3DES_ECB_192_3KEY);
371} 367}
@@ -373,7 +369,7 @@ static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
373static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm, 369static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
374 const u8 *key, unsigned int keylen) 370 const u8 *key, unsigned int keylen)
375{ 371{
376 return unlikely(des3_verify_key(tfm, key)) ?: 372 return verify_skcipher_des3_key(tfm, key) ?:
377 sec_alg_skcipher_setkey(tfm, key, keylen, 373 sec_alg_skcipher_setkey(tfm, key, keylen,
378 SEC_C_3DES_CBC_192_3KEY); 374 SEC_C_3DES_CBC_192_3KEY);
379} 375}
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
new file mode 100644
index 000000000000..e083d172b618
--- /dev/null
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -0,0 +1,214 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <linux/dma-mapping.h>
4#include <linux/module.h>
5#include "./sgl.h"
6
7#define HISI_ACC_SGL_SGE_NR_MIN 1
8#define HISI_ACC_SGL_SGE_NR_MAX 255
9#define HISI_ACC_SGL_SGE_NR_DEF 10
10#define HISI_ACC_SGL_NR_MAX 256
11#define HISI_ACC_SGL_ALIGN_SIZE 64
12
13static int acc_sgl_sge_set(const char *val, const struct kernel_param *kp)
14{
15 int ret;
16 u32 n;
17
18 if (!val)
19 return -EINVAL;
20
21 ret = kstrtou32(val, 10, &n);
22 if (ret != 0 || n > HISI_ACC_SGL_SGE_NR_MAX || n == 0)
23 return -EINVAL;
24
25 return param_set_int(val, kp);
26}
27
28static const struct kernel_param_ops acc_sgl_sge_ops = {
29 .set = acc_sgl_sge_set,
30 .get = param_get_int,
31};
32
33static u32 acc_sgl_sge_nr = HISI_ACC_SGL_SGE_NR_DEF;
34module_param_cb(acc_sgl_sge_nr, &acc_sgl_sge_ops, &acc_sgl_sge_nr, 0444);
35MODULE_PARM_DESC(acc_sgl_sge_nr, "Number of sge in sgl(1-255)");
36
37struct acc_hw_sge {
38 dma_addr_t buf;
39 void *page_ctrl;
40 __le32 len;
41 __le32 pad;
42 __le32 pad0;
43 __le32 pad1;
44};
45
46/* use default sgl head size 64B */
47struct hisi_acc_hw_sgl {
48 dma_addr_t next_dma;
49 __le16 entry_sum_in_chain;
50 __le16 entry_sum_in_sgl;
51 __le16 entry_length_in_sgl;
52 __le16 pad0;
53 __le64 pad1[5];
54 struct hisi_acc_hw_sgl *next;
55 struct acc_hw_sge sge_entries[];
56} __aligned(1);
57
58/**
59 * hisi_acc_create_sgl_pool() - Create a hw sgl pool.
60 * @dev: The device which hw sgl pool belongs to.
61 * @pool: Pointer of pool.
62 * @count: Count of hisi_acc_hw_sgl in pool.
63 *
64 * This function creates a hw sgl pool, after this user can get hw sgl memory
65 * from it.
66 */
67int hisi_acc_create_sgl_pool(struct device *dev,
68 struct hisi_acc_sgl_pool *pool, u32 count)
69{
70 u32 sgl_size;
71 u32 size;
72
73 if (!dev || !pool || !count)
74 return -EINVAL;
75
76 sgl_size = sizeof(struct acc_hw_sge) * acc_sgl_sge_nr +
77 sizeof(struct hisi_acc_hw_sgl);
78 size = sgl_size * count;
79
80 pool->sgl = dma_alloc_coherent(dev, size, &pool->sgl_dma, GFP_KERNEL);
81 if (!pool->sgl)
82 return -ENOMEM;
83
84 pool->size = size;
85 pool->count = count;
86 pool->sgl_size = sgl_size;
87
88 return 0;
89}
90EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
91
92/**
93 * hisi_acc_free_sgl_pool() - Free a hw sgl pool.
94 * @dev: The device which hw sgl pool belongs to.
95 * @pool: Pointer of pool.
96 *
97 * This function frees memory of a hw sgl pool.
98 */
99void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
100{
101 dma_free_coherent(dev, pool->size, pool->sgl, pool->sgl_dma);
102 memset(pool, 0, sizeof(struct hisi_acc_sgl_pool));
103}
104EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool);
105
106struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, u32 index,
107 dma_addr_t *hw_sgl_dma)
108{
109 if (!pool || !hw_sgl_dma || index >= pool->count || !pool->sgl)
110 return ERR_PTR(-EINVAL);
111
112 *hw_sgl_dma = pool->sgl_dma + pool->sgl_size * index;
113 return (void *)pool->sgl + pool->sgl_size * index;
114}
115
116void acc_put_sgl(struct hisi_acc_sgl_pool *pool, u32 index) {}
117
118static void sg_map_to_hw_sg(struct scatterlist *sgl,
119 struct acc_hw_sge *hw_sge)
120{
121 hw_sge->buf = sgl->dma_address;
122 hw_sge->len = sgl->dma_length;
123}
124
125static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
126{
127 hw_sgl->entry_sum_in_sgl++;
128}
129
130static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
131{
132 hw_sgl->entry_sum_in_chain = sum;
133}
134
135/**
136 * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl.
137 * @dev: The device which hw sgl belongs to.
138 * @sgl: Scatterlist which will be mapped to hw sgl.
139 * @pool: Pool which hw sgl memory will be allocated in.
140 * @index: Index of hisi_acc_hw_sgl in pool.
141 * @hw_sgl_dma: The dma address of allocated hw sgl.
142 *
143 * This function builds hw sgl according input sgl, user can use hw_sgl_dma
144 * as src/dst in its BD. Only support single hw sgl currently.
145 */
146struct hisi_acc_hw_sgl *
147hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
148 struct scatterlist *sgl,
149 struct hisi_acc_sgl_pool *pool,
150 u32 index, dma_addr_t *hw_sgl_dma)
151{
152 struct hisi_acc_hw_sgl *curr_hw_sgl;
153 dma_addr_t curr_sgl_dma = 0;
154 struct acc_hw_sge *curr_hw_sge;
155 struct scatterlist *sg;
156 int sg_n = sg_nents(sgl);
157 int i, ret;
158
159 if (!dev || !sgl || !pool || !hw_sgl_dma || sg_n > acc_sgl_sge_nr)
160 return ERR_PTR(-EINVAL);
161
162 ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
163 if (!ret)
164 return ERR_PTR(-EINVAL);
165
166 curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
167 if (!curr_hw_sgl) {
168 ret = -ENOMEM;
169 goto err_unmap_sg;
170 }
171 curr_hw_sgl->entry_length_in_sgl = acc_sgl_sge_nr;
172 curr_hw_sge = curr_hw_sgl->sge_entries;
173
174 for_each_sg(sgl, sg, sg_n, i) {
175 sg_map_to_hw_sg(sg, curr_hw_sge);
176 inc_hw_sgl_sge(curr_hw_sgl);
177 curr_hw_sge++;
178 }
179
180 update_hw_sgl_sum_sge(curr_hw_sgl, acc_sgl_sge_nr);
181 *hw_sgl_dma = curr_sgl_dma;
182
183 return curr_hw_sgl;
184
185err_unmap_sg:
186 dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
187 return ERR_PTR(ret);
188}
189EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
190
191/**
192 * hisi_acc_sg_buf_unmap() - Unmap allocated hw sgl.
193 * @dev: The device which hw sgl belongs to.
194 * @sgl: Related scatterlist.
195 * @hw_sgl: Virtual address of hw sgl.
196 * @hw_sgl_dma: DMA address of hw sgl.
197 * @pool: Pool which hw sgl is allocated in.
198 *
199 * This function unmaps allocated hw sgl.
200 */
201void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
202 struct hisi_acc_hw_sgl *hw_sgl)
203{
204 dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
205
206 hw_sgl->entry_sum_in_chain = 0;
207 hw_sgl->entry_sum_in_sgl = 0;
208 hw_sgl->entry_length_in_sgl = 0;
209}
210EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap);
211
212MODULE_LICENSE("GPL v2");
213MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
214MODULE_DESCRIPTION("HiSilicon Accelerator SGL support");
diff --git a/drivers/crypto/hisilicon/sgl.h b/drivers/crypto/hisilicon/sgl.h
new file mode 100644
index 000000000000..3ac8871c7acf
--- /dev/null
+++ b/drivers/crypto/hisilicon/sgl.h
@@ -0,0 +1,24 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019 HiSilicon Limited. */
3#ifndef HISI_ACC_SGL_H
4#define HISI_ACC_SGL_H
5
6struct hisi_acc_sgl_pool {
7 struct hisi_acc_hw_sgl *sgl;
8 dma_addr_t sgl_dma;
9 size_t size;
10 u32 count;
11 size_t sgl_size;
12};
13
14struct hisi_acc_hw_sgl *
15hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
16 struct scatterlist *sgl,
17 struct hisi_acc_sgl_pool *pool,
18 u32 index, dma_addr_t *hw_sgl_dma);
19void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
20 struct hisi_acc_hw_sgl *hw_sgl);
21int hisi_acc_create_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool,
22 u32 count);
23void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool);
24#endif
diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile
new file mode 100644
index 000000000000..a936f099ee22
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o
2hisi_zip-objs = zip_main.o zip_crypto.o
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
new file mode 100644
index 000000000000..ffb00d987d02
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -0,0 +1,71 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019 HiSilicon Limited. */
3#ifndef HISI_ZIP_H
4#define HISI_ZIP_H
5
6#undef pr_fmt
7#define pr_fmt(fmt) "hisi_zip: " fmt
8
9#include <linux/list.h>
10#include "../qm.h"
11#include "../sgl.h"
12
13/* hisi_zip_sqe dw3 */
14#define HZIP_BD_STATUS_M GENMASK(7, 0)
15/* hisi_zip_sqe dw9 */
16#define HZIP_REQ_TYPE_M GENMASK(7, 0)
17#define HZIP_ALG_TYPE_ZLIB 0x02
18#define HZIP_ALG_TYPE_GZIP 0x03
19#define HZIP_BUF_TYPE_M GENMASK(11, 8)
20#define HZIP_PBUFFER 0x0
21#define HZIP_SGL 0x1
22
23enum hisi_zip_error_type {
24 /* negative compression */
25 HZIP_NC_ERR = 0x0d,
26};
27
28struct hisi_zip_ctrl;
29
30struct hisi_zip {
31 struct hisi_qm qm;
32 struct list_head list;
33 struct hisi_zip_ctrl *ctrl;
34};
35
36struct hisi_zip_sqe {
37 u32 consumed;
38 u32 produced;
39 u32 comp_data_length;
40 u32 dw3;
41 u32 input_data_length;
42 u32 lba_l;
43 u32 lba_h;
44 u32 dw7;
45 u32 dw8;
46 u32 dw9;
47 u32 dw10;
48 u32 priv_info;
49 u32 dw12;
50 u32 tag;
51 u32 dest_avail_out;
52 u32 rsvd0;
53 u32 comp_head_addr_l;
54 u32 comp_head_addr_h;
55 u32 source_addr_l;
56 u32 source_addr_h;
57 u32 dest_addr_l;
58 u32 dest_addr_h;
59 u32 stream_ctx_addr_l;
60 u32 stream_ctx_addr_h;
61 u32 cipher_key1_addr_l;
62 u32 cipher_key1_addr_h;
63 u32 cipher_key2_addr_l;
64 u32 cipher_key2_addr_h;
65 u32 rsvd1[4];
66};
67
68struct hisi_zip *find_zip_device(int node);
69int hisi_zip_register_to_crypto(void);
70void hisi_zip_unregister_from_crypto(void);
71#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
new file mode 100644
index 000000000000..5a3f84dcdcde
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -0,0 +1,653 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <crypto/internal/acompress.h>
4#include <linux/bitfield.h>
5#include <linux/dma-mapping.h>
6#include <linux/scatterlist.h>
7#include "zip.h"
8
9#define HZIP_ZLIB_HEAD_SIZE 2
10#define HZIP_GZIP_HEAD_SIZE 10
11
12#define GZIP_HEAD_FHCRC_BIT BIT(1)
13#define GZIP_HEAD_FEXTRA_BIT BIT(2)
14#define GZIP_HEAD_FNAME_BIT BIT(3)
15#define GZIP_HEAD_FCOMMENT_BIT BIT(4)
16
17#define GZIP_HEAD_FLG_SHIFT 3
18#define GZIP_HEAD_FEXTRA_SHIFT 10
19#define GZIP_HEAD_FEXTRA_XLEN 2
20#define GZIP_HEAD_FHCRC_SIZE 2
21
22#define HZIP_CTX_Q_NUM 2
23#define HZIP_GZIP_HEAD_BUF 256
24#define HZIP_ALG_PRIORITY 300
25
26static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
27static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {0x1f, 0x8b, 0x08, 0x0, 0x0,
28 0x0, 0x0, 0x0, 0x0, 0x03};
29enum hisi_zip_alg_type {
30 HZIP_ALG_TYPE_COMP = 0,
31 HZIP_ALG_TYPE_DECOMP = 1,
32};
33
34#define COMP_NAME_TO_TYPE(alg_name) \
35 (!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \
36 !strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \
37
38#define TO_HEAD_SIZE(req_type) \
39 (((req_type) == HZIP_ALG_TYPE_ZLIB) ? sizeof(zlib_head) : \
40 ((req_type) == HZIP_ALG_TYPE_GZIP) ? sizeof(gzip_head) : 0) \
41
42#define TO_HEAD(req_type) \
43 (((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \
44 ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : 0) \
45
46struct hisi_zip_req {
47 struct acomp_req *req;
48 struct scatterlist *src;
49 struct scatterlist *dst;
50 size_t slen;
51 size_t dlen;
52 struct hisi_acc_hw_sgl *hw_src;
53 struct hisi_acc_hw_sgl *hw_dst;
54 dma_addr_t dma_src;
55 dma_addr_t dma_dst;
56 int req_id;
57};
58
59struct hisi_zip_req_q {
60 struct hisi_zip_req *q;
61 unsigned long *req_bitmap;
62 rwlock_t req_lock;
63 u16 size;
64};
65
66struct hisi_zip_qp_ctx {
67 struct hisi_qp *qp;
68 struct hisi_zip_sqe zip_sqe;
69 struct hisi_zip_req_q req_q;
70 struct hisi_acc_sgl_pool sgl_pool;
71 struct hisi_zip *zip_dev;
72 struct hisi_zip_ctx *ctx;
73};
74
75struct hisi_zip_ctx {
76#define QPC_COMP 0
77#define QPC_DECOMP 1
78 struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
79};
80
81static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
82{
83 u32 val;
84
85 val = (sqe->dw9) & ~HZIP_BUF_TYPE_M;
86 val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
87 sqe->dw9 = val;
88}
89
90static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
91{
92 sqe->tag = tag;
93}
94
95static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
96 dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
97 u32 dlen)
98{
99 memset(sqe, 0, sizeof(struct hisi_zip_sqe));
100
101 sqe->input_data_length = slen;
102 sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
103 sqe->dest_avail_out = dlen;
104 sqe->source_addr_l = lower_32_bits(s_addr);
105 sqe->source_addr_h = upper_32_bits(s_addr);
106 sqe->dest_addr_l = lower_32_bits(d_addr);
107 sqe->dest_addr_h = upper_32_bits(d_addr);
108}
109
110static int hisi_zip_create_qp(struct hisi_qm *qm, struct hisi_zip_qp_ctx *ctx,
111 int alg_type, int req_type)
112{
113 struct hisi_qp *qp;
114 int ret;
115
116 qp = hisi_qm_create_qp(qm, alg_type);
117 if (IS_ERR(qp))
118 return PTR_ERR(qp);
119
120 qp->req_type = req_type;
121 qp->qp_ctx = ctx;
122 ctx->qp = qp;
123
124 ret = hisi_qm_start_qp(qp, 0);
125 if (ret < 0)
126 goto err_release_qp;
127
128 return 0;
129
130err_release_qp:
131 hisi_qm_release_qp(qp);
132 return ret;
133}
134
135static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
136{
137 hisi_qm_stop_qp(ctx->qp);
138 hisi_qm_release_qp(ctx->qp);
139}
140
141static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type)
142{
143 struct hisi_zip *hisi_zip;
144 struct hisi_qm *qm;
145 int ret, i, j;
146
147 /* find the proper zip device */
148 hisi_zip = find_zip_device(cpu_to_node(smp_processor_id()));
149 if (!hisi_zip) {
150 pr_err("Failed to find a proper ZIP device!\n");
151 return -ENODEV;
152 }
153 qm = &hisi_zip->qm;
154
155 for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
156 /* alg_type = 0 for compress, 1 for decompress in hw sqe */
157 ret = hisi_zip_create_qp(qm, &hisi_zip_ctx->qp_ctx[i], i,
158 req_type);
159 if (ret)
160 goto err;
161
162 hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip;
163 }
164
165 return 0;
166err:
167 for (j = i - 1; j >= 0; j--)
168 hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[j]);
169
170 return ret;
171}
172
173static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
174{
175 int i;
176
177 for (i = 1; i >= 0; i--)
178 hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
179}
180
181static u16 get_extra_field_size(const u8 *start)
182{
183 return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
184}
185
186static u32 get_name_field_size(const u8 *start)
187{
188 return strlen(start) + 1;
189}
190
191static u32 get_comment_field_size(const u8 *start)
192{
193 return strlen(start) + 1;
194}
195
196static u32 __get_gzip_head_size(const u8 *src)
197{
198 u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
199 u32 size = GZIP_HEAD_FEXTRA_SHIFT;
200
201 if (head_flg & GZIP_HEAD_FEXTRA_BIT)
202 size += get_extra_field_size(src + size);
203 if (head_flg & GZIP_HEAD_FNAME_BIT)
204 size += get_name_field_size(src + size);
205 if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
206 size += get_comment_field_size(src + size);
207 if (head_flg & GZIP_HEAD_FHCRC_BIT)
208 size += GZIP_HEAD_FHCRC_SIZE;
209
210 return size;
211}
212
213static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
214{
215 struct hisi_zip_req_q *req_q;
216 int i, ret;
217
218 for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
219 req_q = &ctx->qp_ctx[i].req_q;
220 req_q->size = QM_Q_DEPTH;
221
222 req_q->req_bitmap = kcalloc(BITS_TO_LONGS(req_q->size),
223 sizeof(long), GFP_KERNEL);
224 if (!req_q->req_bitmap) {
225 ret = -ENOMEM;
226 if (i == 0)
227 return ret;
228
229 goto err_free_loop0;
230 }
231 rwlock_init(&req_q->req_lock);
232
233 req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
234 GFP_KERNEL);
235 if (!req_q->q) {
236 ret = -ENOMEM;
237 if (i == 0)
238 goto err_free_bitmap;
239 else
240 goto err_free_loop1;
241 }
242 }
243
244 return 0;
245
246err_free_loop1:
247 kfree(ctx->qp_ctx[QPC_DECOMP].req_q.req_bitmap);
248err_free_loop0:
249 kfree(ctx->qp_ctx[QPC_COMP].req_q.q);
250err_free_bitmap:
251 kfree(ctx->qp_ctx[QPC_COMP].req_q.req_bitmap);
252 return ret;
253}
254
255static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
256{
257 int i;
258
259 for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
260 kfree(ctx->qp_ctx[i].req_q.q);
261 kfree(ctx->qp_ctx[i].req_q.req_bitmap);
262 }
263}
264
265static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
266{
267 struct hisi_zip_qp_ctx *tmp;
268 int i, ret;
269
270 for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
271 tmp = &ctx->qp_ctx[i];
272 ret = hisi_acc_create_sgl_pool(&tmp->qp->qm->pdev->dev,
273 &tmp->sgl_pool,
274 QM_Q_DEPTH << 1);
275 if (ret < 0) {
276 if (i == 1)
277 goto err_free_sgl_pool0;
278 return -ENOMEM;
279 }
280 }
281
282 return 0;
283
284err_free_sgl_pool0:
285 hisi_acc_free_sgl_pool(&ctx->qp_ctx[QPC_COMP].qp->qm->pdev->dev,
286 &ctx->qp_ctx[QPC_COMP].sgl_pool);
287 return -ENOMEM;
288}
289
290static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
291{
292 int i;
293
294 for (i = 0; i < HZIP_CTX_Q_NUM; i++)
295 hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
296 &ctx->qp_ctx[i].sgl_pool);
297}
298
299static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
300 struct hisi_zip_req *req)
301{
302 struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
303
304 if (qp_ctx->qp->alg_type == HZIP_ALG_TYPE_COMP)
305 kfree(req->dst);
306 else
307 kfree(req->src);
308
309 write_lock(&req_q->req_lock);
310 clear_bit(req->req_id, req_q->req_bitmap);
311 memset(req, 0, sizeof(struct hisi_zip_req));
312 write_unlock(&req_q->req_lock);
313}
314
315static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
316{
317 struct hisi_zip_sqe *sqe = data;
318 struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
319 struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
320 struct hisi_zip_req *req = req_q->q + sqe->tag;
321 struct acomp_req *acomp_req = req->req;
322 struct device *dev = &qp->qm->pdev->dev;
323 u32 status, dlen, head_size;
324 int err = 0;
325
326 status = sqe->dw3 & HZIP_BD_STATUS_M;
327
328 if (status != 0 && status != HZIP_NC_ERR) {
329 dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
330 (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
331 sqe->produced);
332 err = -EIO;
333 }
334 dlen = sqe->produced;
335
336 hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
337 hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
338
339 head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
340 acomp_req->dlen = dlen + head_size;
341
342 if (acomp_req->base.complete)
343 acomp_request_complete(acomp_req, err);
344
345 hisi_zip_remove_req(qp_ctx, req);
346}
347
348static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
349 void (*fn)(struct hisi_qp *, void *))
350{
351 int i;
352
353 for (i = 0; i < HZIP_CTX_Q_NUM; i++)
354 ctx->qp_ctx[i].qp->req_cb = fn;
355}
356
357static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
358{
359 const char *alg_name = crypto_tfm_alg_name(&tfm->base);
360 struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
361 int ret;
362
363 ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name));
364 if (ret)
365 return ret;
366
367 ret = hisi_zip_create_req_q(ctx);
368 if (ret)
369 goto err_ctx_exit;
370
371 ret = hisi_zip_create_sgl_pool(ctx);
372 if (ret)
373 goto err_release_req_q;
374
375 hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
376
377 return 0;
378
379err_release_req_q:
380 hisi_zip_release_req_q(ctx);
381err_ctx_exit:
382 hisi_zip_ctx_exit(ctx);
383 return ret;
384}
385
386static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
387{
388 struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
389
390 hisi_zip_set_acomp_cb(ctx, NULL);
391 hisi_zip_release_sgl_pool(ctx);
392 hisi_zip_release_req_q(ctx);
393 hisi_zip_ctx_exit(ctx);
394}
395
396static int add_comp_head(struct scatterlist *dst, u8 req_type)
397{
398 int head_size = TO_HEAD_SIZE(req_type);
399 const u8 *head = TO_HEAD(req_type);
400 int ret;
401
402 ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
403 if (ret != head_size)
404 return -ENOMEM;
405
406 return head_size;
407}
408
409static size_t get_gzip_head_size(struct scatterlist *sgl)
410{
411 char buf[HZIP_GZIP_HEAD_BUF];
412
413 sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
414
415 return __get_gzip_head_size(buf);
416}
417
418static size_t get_comp_head_size(struct scatterlist *src, u8 req_type)
419{
420 switch (req_type) {
421 case HZIP_ALG_TYPE_ZLIB:
422 return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
423 case HZIP_ALG_TYPE_GZIP:
424 return get_gzip_head_size(src);
425 default:
426 pr_err("request type does not support!\n");
427 return -EINVAL;
428 }
429}
430
431static int get_sg_skip_bytes(struct scatterlist *sgl, size_t bytes,
432 size_t remains, struct scatterlist **out)
433{
434#define SPLIT_NUM 2
435 size_t split_sizes[SPLIT_NUM];
436 int out_mapped_nents[SPLIT_NUM];
437
438 split_sizes[0] = bytes;
439 split_sizes[1] = remains;
440
441 return sg_split(sgl, 0, 0, SPLIT_NUM, split_sizes, out,
442 out_mapped_nents, GFP_KERNEL);
443}
444
445static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
446 struct hisi_zip_qp_ctx *qp_ctx,
447 size_t head_size, bool is_comp)
448{
449 struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
450 struct hisi_zip_req *q = req_q->q;
451 struct hisi_zip_req *req_cache;
452 struct scatterlist *out[2];
453 struct scatterlist *sgl;
454 size_t len;
455 int ret, req_id;
456
457 /*
458 * remove/add zlib/gzip head, as hardware operations do not include
459 * comp head. so split req->src to get sgl without heads in acomp, or
460 * add comp head to req->dst ahead of that hardware output compressed
461 * data in sgl splited from req->dst without comp head.
462 */
463 if (is_comp) {
464 sgl = req->dst;
465 len = req->dlen - head_size;
466 } else {
467 sgl = req->src;
468 len = req->slen - head_size;
469 }
470
471 ret = get_sg_skip_bytes(sgl, head_size, len, out);
472 if (ret)
473 return ERR_PTR(ret);
474
475 /* sgl for comp head is useless, so free it now */
476 kfree(out[0]);
477
478 write_lock(&req_q->req_lock);
479
480 req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
481 if (req_id >= req_q->size) {
482 write_unlock(&req_q->req_lock);
483 dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
484 kfree(out[1]);
485 return ERR_PTR(-EBUSY);
486 }
487 set_bit(req_id, req_q->req_bitmap);
488
489 req_cache = q + req_id;
490 req_cache->req_id = req_id;
491 req_cache->req = req;
492 if (is_comp) {
493 req_cache->src = req->src;
494 req_cache->dst = out[1];
495 req_cache->slen = req->slen;
496 req_cache->dlen = req->dlen - head_size;
497 } else {
498 req_cache->src = out[1];
499 req_cache->dst = req->dst;
500 req_cache->slen = req->slen - head_size;
501 req_cache->dlen = req->dlen;
502 }
503
504 write_unlock(&req_q->req_lock);
505
506 return req_cache;
507}
508
509static int hisi_zip_do_work(struct hisi_zip_req *req,
510 struct hisi_zip_qp_ctx *qp_ctx)
511{
512 struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe;
513 struct hisi_qp *qp = qp_ctx->qp;
514 struct device *dev = &qp->qm->pdev->dev;
515 struct hisi_acc_sgl_pool *pool = &qp_ctx->sgl_pool;
516 dma_addr_t input;
517 dma_addr_t output;
518 int ret;
519
520 if (!req->src || !req->slen || !req->dst || !req->dlen)
521 return -EINVAL;
522
523 req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->src, pool,
524 req->req_id << 1, &input);
525 if (IS_ERR(req->hw_src))
526 return PTR_ERR(req->hw_src);
527 req->dma_src = input;
528
529 req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->dst, pool,
530 (req->req_id << 1) + 1,
531 &output);
532 if (IS_ERR(req->hw_dst)) {
533 ret = PTR_ERR(req->hw_dst);
534 goto err_unmap_input;
535 }
536 req->dma_dst = output;
537
538 hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, req->slen,
539 req->dlen);
540 hisi_zip_config_buf_type(zip_sqe, HZIP_SGL);
541 hisi_zip_config_tag(zip_sqe, req->req_id);
542
543 /* send command to start a task */
544 ret = hisi_qp_send(qp, zip_sqe);
545 if (ret < 0)
546 goto err_unmap_output;
547
548 return -EINPROGRESS;
549
550err_unmap_output:
551 hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst);
552err_unmap_input:
553 hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src);
554 return ret;
555}
556
557static int hisi_zip_acompress(struct acomp_req *acomp_req)
558{
559 struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
560 struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_COMP];
561 struct hisi_zip_req *req;
562 size_t head_size;
563 int ret;
564
565 /* let's output compression head now */
566 head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
567 if (head_size < 0)
568 return -ENOMEM;
569
570 req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
571 if (IS_ERR(req))
572 return PTR_ERR(req);
573
574 ret = hisi_zip_do_work(req, qp_ctx);
575 if (ret != -EINPROGRESS)
576 hisi_zip_remove_req(qp_ctx, req);
577
578 return ret;
579}
580
581static int hisi_zip_adecompress(struct acomp_req *acomp_req)
582{
583 struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
584 struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_DECOMP];
585 struct hisi_zip_req *req;
586 size_t head_size;
587 int ret;
588
589 head_size = get_comp_head_size(acomp_req->src, qp_ctx->qp->req_type);
590
591 req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
592 if (IS_ERR(req))
593 return PTR_ERR(req);
594
595 ret = hisi_zip_do_work(req, qp_ctx);
596 if (ret != -EINPROGRESS)
597 hisi_zip_remove_req(qp_ctx, req);
598
599 return ret;
600}
601
602static struct acomp_alg hisi_zip_acomp_zlib = {
603 .init = hisi_zip_acomp_init,
604 .exit = hisi_zip_acomp_exit,
605 .compress = hisi_zip_acompress,
606 .decompress = hisi_zip_adecompress,
607 .base = {
608 .cra_name = "zlib-deflate",
609 .cra_driver_name = "hisi-zlib-acomp",
610 .cra_module = THIS_MODULE,
611 .cra_priority = HZIP_ALG_PRIORITY,
612 .cra_ctxsize = sizeof(struct hisi_zip_ctx),
613 }
614};
615
616static struct acomp_alg hisi_zip_acomp_gzip = {
617 .init = hisi_zip_acomp_init,
618 .exit = hisi_zip_acomp_exit,
619 .compress = hisi_zip_acompress,
620 .decompress = hisi_zip_adecompress,
621 .base = {
622 .cra_name = "gzip",
623 .cra_driver_name = "hisi-gzip-acomp",
624 .cra_module = THIS_MODULE,
625 .cra_priority = HZIP_ALG_PRIORITY,
626 .cra_ctxsize = sizeof(struct hisi_zip_ctx),
627 }
628};
629
630int hisi_zip_register_to_crypto(void)
631{
632 int ret = 0;
633
634 ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
635 if (ret) {
636 pr_err("Zlib acomp algorithm registration failed\n");
637 return ret;
638 }
639
640 ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
641 if (ret) {
642 pr_err("Gzip acomp algorithm registration failed\n");
643 crypto_unregister_acomp(&hisi_zip_acomp_zlib);
644 }
645
646 return ret;
647}
648
649void hisi_zip_unregister_from_crypto(void)
650{
651 crypto_unregister_acomp(&hisi_zip_acomp_gzip);
652 crypto_unregister_acomp(&hisi_zip_acomp_zlib);
653}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
new file mode 100644
index 000000000000..6e0ca75585d4
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -0,0 +1,1013 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <linux/acpi.h>
4#include <linux/aer.h>
5#include <linux/bitops.h>
6#include <linux/debugfs.h>
7#include <linux/init.h>
8#include <linux/io.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/seq_file.h>
13#include <linux/topology.h>
14#include "zip.h"
15
16#define PCI_DEVICE_ID_ZIP_PF 0xa250
17#define PCI_DEVICE_ID_ZIP_VF 0xa251
18
19#define HZIP_VF_NUM 63
20#define HZIP_QUEUE_NUM_V1 4096
21#define HZIP_QUEUE_NUM_V2 1024
22
23#define HZIP_CLOCK_GATE_CTRL 0x301004
24#define COMP0_ENABLE BIT(0)
25#define COMP1_ENABLE BIT(1)
26#define DECOMP0_ENABLE BIT(2)
27#define DECOMP1_ENABLE BIT(3)
28#define DECOMP2_ENABLE BIT(4)
29#define DECOMP3_ENABLE BIT(5)
30#define DECOMP4_ENABLE BIT(6)
31#define DECOMP5_ENABLE BIT(7)
32#define ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
33 DECOMP0_ENABLE | DECOMP1_ENABLE | \
34 DECOMP2_ENABLE | DECOMP3_ENABLE | \
35 DECOMP4_ENABLE | DECOMP5_ENABLE)
36#define DECOMP_CHECK_ENABLE BIT(16)
37#define HZIP_FSM_MAX_CNT 0x301008
38
39#define HZIP_PORT_ARCA_CHE_0 0x301040
40#define HZIP_PORT_ARCA_CHE_1 0x301044
41#define HZIP_PORT_AWCA_CHE_0 0x301060
42#define HZIP_PORT_AWCA_CHE_1 0x301064
43#define CACHE_ALL_EN 0xffffffff
44
45#define HZIP_BD_RUSER_32_63 0x301110
46#define HZIP_SGL_RUSER_32_63 0x30111c
47#define HZIP_DATA_RUSER_32_63 0x301128
48#define HZIP_DATA_WUSER_32_63 0x301134
49#define HZIP_BD_WUSER_32_63 0x301140
50
51#define HZIP_QM_IDEL_STATUS 0x3040e4
52
53#define HZIP_CORE_DEBUG_COMP_0 0x302000
54#define HZIP_CORE_DEBUG_COMP_1 0x303000
55#define HZIP_CORE_DEBUG_DECOMP_0 0x304000
56#define HZIP_CORE_DEBUG_DECOMP_1 0x305000
57#define HZIP_CORE_DEBUG_DECOMP_2 0x306000
58#define HZIP_CORE_DEBUG_DECOMP_3 0x307000
59#define HZIP_CORE_DEBUG_DECOMP_4 0x308000
60#define HZIP_CORE_DEBUG_DECOMP_5 0x309000
61
62#define HZIP_CORE_INT_SOURCE 0x3010A0
63#define HZIP_CORE_INT_MASK 0x3010A4
64#define HZIP_CORE_INT_STATUS 0x3010AC
65#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
66#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
67#define SRAM_ECC_ERR_NUM_SHIFT 16
68#define SRAM_ECC_ERR_ADDR_SHIFT 24
69#define HZIP_CORE_INT_DISABLE 0x000007FF
70#define HZIP_COMP_CORE_NUM 2
71#define HZIP_DECOMP_CORE_NUM 6
72#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
73 HZIP_DECOMP_CORE_NUM)
74#define HZIP_SQE_SIZE 128
75#define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH)
76#define HZIP_PF_DEF_Q_NUM 64
77#define HZIP_PF_DEF_Q_BASE 0
78
79#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
80#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
81
82#define HZIP_NUMA_DISTANCE 100
83#define HZIP_BUF_SIZE 22
84
85static const char hisi_zip_name[] = "hisi_zip";
86static struct dentry *hzip_debugfs_root;
87LIST_HEAD(hisi_zip_list);
88DEFINE_MUTEX(hisi_zip_list_lock);
89
90#ifdef CONFIG_NUMA
91static struct hisi_zip *find_zip_device_numa(int node)
92{
93 struct hisi_zip *zip = NULL;
94 struct hisi_zip *hisi_zip;
95 int min_distance = HZIP_NUMA_DISTANCE;
96 struct device *dev;
97
98 list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
99 dev = &hisi_zip->qm.pdev->dev;
100 if (node_distance(dev->numa_node, node) < min_distance) {
101 zip = hisi_zip;
102 min_distance = node_distance(dev->numa_node, node);
103 }
104 }
105
106 return zip;
107}
108#endif
109
110struct hisi_zip *find_zip_device(int node)
111{
112 struct hisi_zip *zip = NULL;
113
114 mutex_lock(&hisi_zip_list_lock);
115#ifdef CONFIG_NUMA
116 zip = find_zip_device_numa(node);
117#else
118 zip = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
119#endif
120 mutex_unlock(&hisi_zip_list_lock);
121
122 return zip;
123}
124
125struct hisi_zip_hw_error {
126 u32 int_msk;
127 const char *msg;
128};
129
130static const struct hisi_zip_hw_error zip_hw_error[] = {
131 { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
132 { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
133 { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" },
134 { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" },
135 { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" },
136 { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" },
137 { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" },
138 { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" },
139 { .int_msk = BIT(8), .msg = "zip_com_inf_err" },
140 { .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
141 { .int_msk = BIT(10), .msg = "zip_pre_out_err" },
142 { /* sentinel */ }
143};
144
145enum ctrl_debug_file_index {
146 HZIP_CURRENT_QM,
147 HZIP_CLEAR_ENABLE,
148 HZIP_DEBUG_FILE_NUM,
149};
150
151static const char * const ctrl_debug_file_name[] = {
152 [HZIP_CURRENT_QM] = "current_qm",
153 [HZIP_CLEAR_ENABLE] = "clear_enable",
154};
155
156struct ctrl_debug_file {
157 enum ctrl_debug_file_index index;
158 spinlock_t lock;
159 struct hisi_zip_ctrl *ctrl;
160};
161
162/*
163 * One ZIP controller has one PF and multiple VFs, some global configurations
164 * which PF has need this structure.
165 *
166 * Just relevant for PF.
167 */
168struct hisi_zip_ctrl {
169 u32 num_vfs;
170 struct hisi_zip *hisi_zip;
171 struct dentry *debug_root;
172 struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
173};
174
175enum {
176 HZIP_COMP_CORE0,
177 HZIP_COMP_CORE1,
178 HZIP_DECOMP_CORE0,
179 HZIP_DECOMP_CORE1,
180 HZIP_DECOMP_CORE2,
181 HZIP_DECOMP_CORE3,
182 HZIP_DECOMP_CORE4,
183 HZIP_DECOMP_CORE5,
184};
185
186static const u64 core_offsets[] = {
187 [HZIP_COMP_CORE0] = 0x302000,
188 [HZIP_COMP_CORE1] = 0x303000,
189 [HZIP_DECOMP_CORE0] = 0x304000,
190 [HZIP_DECOMP_CORE1] = 0x305000,
191 [HZIP_DECOMP_CORE2] = 0x306000,
192 [HZIP_DECOMP_CORE3] = 0x307000,
193 [HZIP_DECOMP_CORE4] = 0x308000,
194 [HZIP_DECOMP_CORE5] = 0x309000,
195};
196
197static struct debugfs_reg32 hzip_dfx_regs[] = {
198 {"HZIP_GET_BD_NUM ", 0x00ull},
199 {"HZIP_GET_RIGHT_BD ", 0x04ull},
200 {"HZIP_GET_ERROR_BD ", 0x08ull},
201 {"HZIP_DONE_BD_NUM ", 0x0cull},
202 {"HZIP_WORK_CYCLE ", 0x10ull},
203 {"HZIP_IDLE_CYCLE ", 0x18ull},
204 {"HZIP_MAX_DELAY ", 0x20ull},
205 {"HZIP_MIN_DELAY ", 0x24ull},
206 {"HZIP_AVG_DELAY ", 0x28ull},
207 {"HZIP_MEM_VISIBLE_DATA ", 0x30ull},
208 {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull},
209 {"HZIP_COMSUMED_BYTE ", 0x38ull},
210 {"HZIP_PRODUCED_BYTE ", 0x40ull},
211 {"HZIP_COMP_INF ", 0x70ull},
212 {"HZIP_PRE_OUT ", 0x78ull},
213 {"HZIP_BD_RD ", 0x7cull},
214 {"HZIP_BD_WR ", 0x80ull},
215 {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull},
216 {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull},
217 {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull},
218 {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull},
219 {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
220};
221
222static int pf_q_num_set(const char *val, const struct kernel_param *kp)
223{
224 struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
225 PCI_DEVICE_ID_ZIP_PF, NULL);
226 u32 n, q_num;
227 u8 rev_id;
228 int ret;
229
230 if (!val)
231 return -EINVAL;
232
233 if (!pdev) {
234 q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2);
235 pr_info("No device found currently, suppose queue number is %d\n",
236 q_num);
237 } else {
238 rev_id = pdev->revision;
239 switch (rev_id) {
240 case QM_HW_V1:
241 q_num = HZIP_QUEUE_NUM_V1;
242 break;
243 case QM_HW_V2:
244 q_num = HZIP_QUEUE_NUM_V2;
245 break;
246 default:
247 return -EINVAL;
248 }
249 }
250
251 ret = kstrtou32(val, 10, &n);
252 if (ret != 0 || n > q_num || n == 0)
253 return -EINVAL;
254
255 return param_set_int(val, kp);
256}
257
258static const struct kernel_param_ops pf_q_num_ops = {
259 .set = pf_q_num_set,
260 .get = param_get_int,
261};
262
263static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
264module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
265MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
266
267static int uacce_mode;
268module_param(uacce_mode, int, 0);
269
270static const struct pci_device_id hisi_zip_dev_ids[] = {
271 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
272 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) },
273 { 0, }
274};
275MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
276
277static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip)
278{
279 mutex_lock(&hisi_zip_list_lock);
280 list_add_tail(&hisi_zip->list, &hisi_zip_list);
281 mutex_unlock(&hisi_zip_list_lock);
282}
283
284static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip)
285{
286 mutex_lock(&hisi_zip_list_lock);
287 list_del(&hisi_zip->list);
288 mutex_unlock(&hisi_zip_list_lock);
289}
290
291static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
292{
293 void __iomem *base = hisi_zip->qm.io_base;
294
295 /* qm user domain */
296 writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
297 writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE);
298 writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1);
299 writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE);
300 writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE);
301
302 /* qm cache */
303 writel(AXI_M_CFG, base + QM_AXI_M_CFG);
304 writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
305 /* disable FLR triggered by BME(bus master enable) */
306 writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
307 writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
308
309 /* cache */
310 writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
311 writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
312 writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
313 writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
314
315 /* user domain configurations */
316 writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
317 writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
318 writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
319 writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
320 writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
321
322 /* let's open all compression/decompression cores */
323 writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN,
324 base + HZIP_CLOCK_GATE_CTRL);
325
326 /* enable sqc writeback */
327 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
328 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
329 FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
330}
331
332static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
333{
334 struct hisi_qm *qm = &hisi_zip->qm;
335
336 if (qm->ver == QM_HW_V1) {
337 writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
338 dev_info(&qm->pdev->dev, "ZIP v%d does not support hw error handle\n",
339 qm->ver);
340 return;
341 }
342
343 if (state) {
344 /* clear ZIP hw error source if having */
345 writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base +
346 HZIP_CORE_INT_SOURCE);
347 /* enable ZIP hw error interrupts */
348 writel(0, hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
349 } else {
350 /* disable ZIP hw error interrupts */
351 writel(HZIP_CORE_INT_DISABLE,
352 hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
353 }
354}
355
356static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
357{
358 struct hisi_zip *hisi_zip = file->ctrl->hisi_zip;
359
360 return &hisi_zip->qm;
361}
362
363static u32 current_qm_read(struct ctrl_debug_file *file)
364{
365 struct hisi_qm *qm = file_to_qm(file);
366
367 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
368}
369
370static int current_qm_write(struct ctrl_debug_file *file, u32 val)
371{
372 struct hisi_qm *qm = file_to_qm(file);
373 struct hisi_zip_ctrl *ctrl = file->ctrl;
374 u32 vfq_num;
375 u32 tmp;
376
377 if (val > ctrl->num_vfs)
378 return -EINVAL;
379
380 /* Calculate curr_qm_qp_num and store */
381 if (val == 0) {
382 qm->debug.curr_qm_qp_num = qm->qp_num;
383 } else {
384 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs;
385 if (val == ctrl->num_vfs)
386 qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
387 qm->qp_num - (ctrl->num_vfs - 1) * vfq_num;
388 else
389 qm->debug.curr_qm_qp_num = vfq_num;
390 }
391
392 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
393 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
394
395 tmp = val |
396 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
397 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
398
399 tmp = val |
400 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
401 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
402
403 return 0;
404}
405
406static u32 clear_enable_read(struct ctrl_debug_file *file)
407{
408 struct hisi_qm *qm = file_to_qm(file);
409
410 return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
411 SOFT_CTRL_CNT_CLR_CE_BIT;
412}
413
414static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
415{
416 struct hisi_qm *qm = file_to_qm(file);
417 u32 tmp;
418
419 if (val != 1 && val != 0)
420 return -EINVAL;
421
422 tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
423 ~SOFT_CTRL_CNT_CLR_CE_BIT) | val;
424 writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
425
426 return 0;
427}
428
429static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
430 size_t count, loff_t *pos)
431{
432 struct ctrl_debug_file *file = filp->private_data;
433 char tbuf[HZIP_BUF_SIZE];
434 u32 val;
435 int ret;
436
437 spin_lock_irq(&file->lock);
438 switch (file->index) {
439 case HZIP_CURRENT_QM:
440 val = current_qm_read(file);
441 break;
442 case HZIP_CLEAR_ENABLE:
443 val = clear_enable_read(file);
444 break;
445 default:
446 spin_unlock_irq(&file->lock);
447 return -EINVAL;
448 }
449 spin_unlock_irq(&file->lock);
450 ret = sprintf(tbuf, "%u\n", val);
451 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
452}
453
454static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
455 size_t count, loff_t *pos)
456{
457 struct ctrl_debug_file *file = filp->private_data;
458 char tbuf[HZIP_BUF_SIZE];
459 unsigned long val;
460 int len, ret;
461
462 if (*pos != 0)
463 return 0;
464
465 if (count >= HZIP_BUF_SIZE)
466 return -ENOSPC;
467
468 len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count);
469 if (len < 0)
470 return len;
471
472 tbuf[len] = '\0';
473 if (kstrtoul(tbuf, 0, &val))
474 return -EFAULT;
475
476 spin_lock_irq(&file->lock);
477 switch (file->index) {
478 case HZIP_CURRENT_QM:
479 ret = current_qm_write(file, val);
480 if (ret)
481 goto err_input;
482 break;
483 case HZIP_CLEAR_ENABLE:
484 ret = clear_enable_write(file, val);
485 if (ret)
486 goto err_input;
487 break;
488 default:
489 ret = -EINVAL;
490 goto err_input;
491 }
492 spin_unlock_irq(&file->lock);
493
494 return count;
495
496err_input:
497 spin_unlock_irq(&file->lock);
498 return ret;
499}
500
501static const struct file_operations ctrl_debug_fops = {
502 .owner = THIS_MODULE,
503 .open = simple_open,
504 .read = ctrl_debug_read,
505 .write = ctrl_debug_write,
506};
507
508static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
509{
510 struct hisi_zip *hisi_zip = ctrl->hisi_zip;
511 struct hisi_qm *qm = &hisi_zip->qm;
512 struct device *dev = &qm->pdev->dev;
513 struct debugfs_regset32 *regset;
514 struct dentry *tmp_d, *tmp;
515 char buf[HZIP_BUF_SIZE];
516 int i;
517
518 for (i = 0; i < HZIP_CORE_NUM; i++) {
519 if (i < HZIP_COMP_CORE_NUM)
520 sprintf(buf, "comp_core%d", i);
521 else
522 sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM);
523
524 tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
525 if (!tmp_d)
526 return -ENOENT;
527
528 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
529 if (!regset)
530 return -ENOENT;
531
532 regset->regs = hzip_dfx_regs;
533 regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
534 regset->base = qm->io_base + core_offsets[i];
535
536 tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
537 if (!tmp)
538 return -ENOENT;
539 }
540
541 return 0;
542}
543
544static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
545{
546 struct dentry *tmp;
547 int i;
548
549 for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
550 spin_lock_init(&ctrl->files[i].lock);
551 ctrl->files[i].ctrl = ctrl;
552 ctrl->files[i].index = i;
553
554 tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
555 ctrl->debug_root, ctrl->files + i,
556 &ctrl_debug_fops);
557 if (!tmp)
558 return -ENOENT;
559 }
560
561 return hisi_zip_core_debug_init(ctrl);
562}
563
564static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
565{
566 struct hisi_qm *qm = &hisi_zip->qm;
567 struct device *dev = &qm->pdev->dev;
568 struct dentry *dev_d;
569 int ret;
570
571 dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
572 if (!dev_d)
573 return -ENOENT;
574
575 qm->debug.debug_root = dev_d;
576 ret = hisi_qm_debug_init(qm);
577 if (ret)
578 goto failed_to_create;
579
580 if (qm->fun_type == QM_HW_PF) {
581 hisi_zip->ctrl->debug_root = dev_d;
582 ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl);
583 if (ret)
584 goto failed_to_create;
585 }
586
587 return 0;
588
589failed_to_create:
590 debugfs_remove_recursive(hzip_debugfs_root);
591 return ret;
592}
593
594static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
595{
596 struct hisi_qm *qm = &hisi_zip->qm;
597
598 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
599 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
600 writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
601
602 hisi_qm_debug_regs_clear(qm);
603}
604
605static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
606{
607 struct hisi_qm *qm = &hisi_zip->qm;
608
609 debugfs_remove_recursive(qm->debug.debug_root);
610
611 if (qm->fun_type == QM_HW_PF)
612 hisi_zip_debug_regs_clear(hisi_zip);
613}
614
615static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip)
616{
617 hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE,
618 QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
619 QM_DB_RANDOM_INVALID);
620 hisi_zip_hw_error_set_state(hisi_zip, true);
621}
622
623static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
624{
625 struct hisi_qm *qm = &hisi_zip->qm;
626 struct hisi_zip_ctrl *ctrl;
627
628 ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
629 if (!ctrl)
630 return -ENOMEM;
631
632 hisi_zip->ctrl = ctrl;
633 ctrl->hisi_zip = hisi_zip;
634
635 switch (qm->ver) {
636 case QM_HW_V1:
637 qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
638 break;
639
640 case QM_HW_V2:
641 qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
642 break;
643
644 default:
645 return -EINVAL;
646 }
647
648 hisi_zip_set_user_domain_and_cache(hisi_zip);
649 hisi_zip_hw_error_init(hisi_zip);
650 hisi_zip_debug_regs_clear(hisi_zip);
651
652 return 0;
653}
654
655static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
656{
657 struct hisi_zip *hisi_zip;
658 enum qm_hw_ver rev_id;
659 struct hisi_qm *qm;
660 int ret;
661
662 rev_id = hisi_qm_get_hw_version(pdev);
663 if (rev_id == QM_HW_UNKNOWN)
664 return -EINVAL;
665
666 hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
667 if (!hisi_zip)
668 return -ENOMEM;
669 pci_set_drvdata(pdev, hisi_zip);
670
671 qm = &hisi_zip->qm;
672 qm->pdev = pdev;
673 qm->ver = rev_id;
674
675 qm->sqe_size = HZIP_SQE_SIZE;
676 qm->dev_name = hisi_zip_name;
677 qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
678 QM_HW_VF;
679 switch (uacce_mode) {
680 case 0:
681 qm->use_dma_api = true;
682 break;
683 case 1:
684 qm->use_dma_api = false;
685 break;
686 case 2:
687 qm->use_dma_api = true;
688 break;
689 default:
690 return -EINVAL;
691 }
692
693 ret = hisi_qm_init(qm);
694 if (ret) {
695 dev_err(&pdev->dev, "Failed to init qm!\n");
696 return ret;
697 }
698
699 if (qm->fun_type == QM_HW_PF) {
700 ret = hisi_zip_pf_probe_init(hisi_zip);
701 if (ret)
702 return ret;
703
704 qm->qp_base = HZIP_PF_DEF_Q_BASE;
705 qm->qp_num = pf_q_num;
706 } else if (qm->fun_type == QM_HW_VF) {
707 /*
708 * have no way to get qm configure in VM in v1 hardware,
709 * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
710 * to trigger only one VF in v1 hardware.
711 *
712 * v2 hardware has no such problem.
713 */
714 if (qm->ver == QM_HW_V1) {
715 qm->qp_base = HZIP_PF_DEF_Q_NUM;
716 qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
717 } else if (qm->ver == QM_HW_V2)
718 /* v2 starts to support get vft by mailbox */
719 hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
720 }
721
722 ret = hisi_qm_start(qm);
723 if (ret)
724 goto err_qm_uninit;
725
726 ret = hisi_zip_debugfs_init(hisi_zip);
727 if (ret)
728 dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
729
730 hisi_zip_add_to_list(hisi_zip);
731
732 return 0;
733
734err_qm_uninit:
735 hisi_qm_uninit(qm);
736 return ret;
737}
738
739/* Currently we only support equal assignment */
740static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs)
741{
742 struct hisi_qm *qm = &hisi_zip->qm;
743 u32 qp_num = qm->qp_num;
744 u32 q_base = qp_num;
745 u32 q_num, remain_q_num, i;
746 int ret;
747
748 if (!num_vfs)
749 return -EINVAL;
750
751 remain_q_num = qm->ctrl_qp_num - qp_num;
752 if (remain_q_num < num_vfs)
753 return -EINVAL;
754
755 q_num = remain_q_num / num_vfs;
756 for (i = 1; i <= num_vfs; i++) {
757 if (i == num_vfs)
758 q_num += remain_q_num % num_vfs;
759 ret = hisi_qm_set_vft(qm, i, q_base, q_num);
760 if (ret)
761 return ret;
762 q_base += q_num;
763 }
764
765 return 0;
766}
767
768static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip)
769{
770 struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl;
771 struct hisi_qm *qm = &hisi_zip->qm;
772 u32 i, num_vfs = ctrl->num_vfs;
773 int ret;
774
775 for (i = 1; i <= num_vfs; i++) {
776 ret = hisi_qm_set_vft(qm, i, 0, 0);
777 if (ret)
778 return ret;
779 }
780
781 ctrl->num_vfs = 0;
782
783 return 0;
784}
785
786static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
787{
788#ifdef CONFIG_PCI_IOV
789 struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
790 int pre_existing_vfs, num_vfs, ret;
791
792 pre_existing_vfs = pci_num_vf(pdev);
793
794 if (pre_existing_vfs) {
795 dev_err(&pdev->dev,
796 "Can't enable VF. Please disable pre-enabled VFs!\n");
797 return 0;
798 }
799
800 num_vfs = min_t(int, max_vfs, HZIP_VF_NUM);
801
802 ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs);
803 if (ret) {
804 dev_err(&pdev->dev, "Can't assign queues for VF!\n");
805 return ret;
806 }
807
808 hisi_zip->ctrl->num_vfs = num_vfs;
809
810 ret = pci_enable_sriov(pdev, num_vfs);
811 if (ret) {
812 dev_err(&pdev->dev, "Can't enable VF!\n");
813 hisi_zip_clear_vft_config(hisi_zip);
814 return ret;
815 }
816
817 return num_vfs;
818#else
819 return 0;
820#endif
821}
822
823static int hisi_zip_sriov_disable(struct pci_dev *pdev)
824{
825 struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
826
827 if (pci_vfs_assigned(pdev)) {
828 dev_err(&pdev->dev,
829 "Can't disable VFs while VFs are assigned!\n");
830 return -EPERM;
831 }
832
833 /* remove in hisi_zip_pci_driver will be called to free VF resources */
834 pci_disable_sriov(pdev);
835
836 return hisi_zip_clear_vft_config(hisi_zip);
837}
838
839static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
840{
841 if (num_vfs == 0)
842 return hisi_zip_sriov_disable(pdev);
843 else
844 return hisi_zip_sriov_enable(pdev, num_vfs);
845}
846
847static void hisi_zip_remove(struct pci_dev *pdev)
848{
849 struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
850 struct hisi_qm *qm = &hisi_zip->qm;
851
852 if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0)
853 hisi_zip_sriov_disable(pdev);
854
855 hisi_zip_debugfs_exit(hisi_zip);
856 hisi_qm_stop(qm);
857
858 if (qm->fun_type == QM_HW_PF)
859 hisi_zip_hw_error_set_state(hisi_zip, false);
860
861 hisi_qm_uninit(qm);
862 hisi_zip_remove_from_list(hisi_zip);
863}
864
865static void hisi_zip_log_hw_error(struct hisi_zip *hisi_zip, u32 err_sts)
866{
867 const struct hisi_zip_hw_error *err = zip_hw_error;
868 struct device *dev = &hisi_zip->qm.pdev->dev;
869 u32 err_val;
870
871 while (err->msg) {
872 if (err->int_msk & err_sts) {
873 dev_warn(dev, "%s [error status=0x%x] found\n",
874 err->msg, err->int_msk);
875
876 if (HZIP_CORE_INT_STATUS_M_ECC & err->int_msk) {
877 err_val = readl(hisi_zip->qm.io_base +
878 HZIP_CORE_SRAM_ECC_ERR_INFO);
879 dev_warn(dev, "hisi-zip multi ecc sram num=0x%x\n",
880 ((err_val >> SRAM_ECC_ERR_NUM_SHIFT) &
881 0xFF));
882 dev_warn(dev, "hisi-zip multi ecc sram addr=0x%x\n",
883 (err_val >> SRAM_ECC_ERR_ADDR_SHIFT));
884 }
885 }
886 err++;
887 }
888}
889
890static pci_ers_result_t hisi_zip_hw_error_handle(struct hisi_zip *hisi_zip)
891{
892 u32 err_sts;
893
894 /* read err sts */
895 err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS);
896
897 if (err_sts) {
898 hisi_zip_log_hw_error(hisi_zip, err_sts);
899 /* clear error interrupts */
900 writel(err_sts, hisi_zip->qm.io_base + HZIP_CORE_INT_SOURCE);
901
902 return PCI_ERS_RESULT_NEED_RESET;
903 }
904
905 return PCI_ERS_RESULT_RECOVERED;
906}
907
908static pci_ers_result_t hisi_zip_process_hw_error(struct pci_dev *pdev)
909{
910 struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
911 struct device *dev = &pdev->dev;
912 pci_ers_result_t qm_ret, zip_ret;
913
914 if (!hisi_zip) {
915 dev_err(dev,
916 "Can't recover ZIP-error occurred during device init\n");
917 return PCI_ERS_RESULT_NONE;
918 }
919
920 qm_ret = hisi_qm_hw_error_handle(&hisi_zip->qm);
921
922 zip_ret = hisi_zip_hw_error_handle(hisi_zip);
923
924 return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
925 zip_ret == PCI_ERS_RESULT_NEED_RESET) ?
926 PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
927}
928
929static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev,
930 pci_channel_state_t state)
931{
932 if (pdev->is_virtfn)
933 return PCI_ERS_RESULT_NONE;
934
935 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
936 if (state == pci_channel_io_perm_failure)
937 return PCI_ERS_RESULT_DISCONNECT;
938
939 return hisi_zip_process_hw_error(pdev);
940}
941
942static const struct pci_error_handlers hisi_zip_err_handler = {
943 .error_detected = hisi_zip_error_detected,
944};
945
946static struct pci_driver hisi_zip_pci_driver = {
947 .name = "hisi_zip",
948 .id_table = hisi_zip_dev_ids,
949 .probe = hisi_zip_probe,
950 .remove = hisi_zip_remove,
951 .sriov_configure = hisi_zip_sriov_configure,
952 .err_handler = &hisi_zip_err_handler,
953};
954
955static void hisi_zip_register_debugfs(void)
956{
957 if (!debugfs_initialized())
958 return;
959
960 hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL);
961 if (IS_ERR_OR_NULL(hzip_debugfs_root))
962 hzip_debugfs_root = NULL;
963}
964
965static void hisi_zip_unregister_debugfs(void)
966{
967 debugfs_remove_recursive(hzip_debugfs_root);
968}
969
970static int __init hisi_zip_init(void)
971{
972 int ret;
973
974 hisi_zip_register_debugfs();
975
976 ret = pci_register_driver(&hisi_zip_pci_driver);
977 if (ret < 0) {
978 pr_err("Failed to register pci driver.\n");
979 goto err_pci;
980 }
981
982 if (uacce_mode == 0 || uacce_mode == 2) {
983 ret = hisi_zip_register_to_crypto();
984 if (ret < 0) {
985 pr_err("Failed to register driver to crypto.\n");
986 goto err_crypto;
987 }
988 }
989
990 return 0;
991
992err_crypto:
993 pci_unregister_driver(&hisi_zip_pci_driver);
994err_pci:
995 hisi_zip_unregister_debugfs();
996
997 return ret;
998}
999
1000static void __exit hisi_zip_exit(void)
1001{
1002 if (uacce_mode == 0 || uacce_mode == 2)
1003 hisi_zip_unregister_from_crypto();
1004 pci_unregister_driver(&hisi_zip_pci_driver);
1005 hisi_zip_unregister_debugfs();
1006}
1007
1008module_init(hisi_zip_init);
1009module_exit(hisi_zip_exit);
1010
1011MODULE_LICENSE("GPL v2");
1012MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
1013MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index d27c812c3d8d..fe4cc8babe1c 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -958,9 +958,7 @@ static int img_hash_probe(struct platform_device *pdev)
958 crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH); 958 crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
959 959
960 /* Register bank */ 960 /* Register bank */
961 hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 961 hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
962
963 hdev->io_base = devm_ioremap_resource(dev, hash_res);
964 if (IS_ERR(hdev->io_base)) { 962 if (IS_ERR(hdev->io_base)) {
965 err = PTR_ERR(hdev->io_base); 963 err = PTR_ERR(hdev->io_base);
966 dev_err(dev, "can't ioremap, returned %d\n", err); 964 dev_err(dev, "can't ioremap, returned %d\n", err);
@@ -980,7 +978,6 @@ static int img_hash_probe(struct platform_device *pdev)
980 978
981 irq = platform_get_irq(pdev, 0); 979 irq = platform_get_irq(pdev, 0);
982 if (irq < 0) { 980 if (irq < 0) {
983 dev_err(dev, "no IRQ resource info\n");
984 err = irq; 981 err = irq;
985 goto res_err; 982 goto res_err;
986 } 983 }
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index df43a2c6933b..b456b85f46d3 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/of_platform.h> 15#include <linux/of_platform.h>
16#include <linux/of_irq.h> 16#include <linux/of_irq.h>
17#include <linux/pci.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
18#include <linux/workqueue.h> 19#include <linux/workqueue.h>
19 20
@@ -27,62 +28,205 @@ static u32 max_rings = EIP197_MAX_RINGS;
27module_param(max_rings, uint, 0644); 28module_param(max_rings, uint, 0644);
28MODULE_PARM_DESC(max_rings, "Maximum number of rings to use."); 29MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
29 30
30static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) 31static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
31{ 32{
32 u32 val, htable_offset; 33 int i;
33 int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; 34
34 35 /*
35 if (priv->version == EIP197B) { 36 * Map all interfaces/rings to register index 0
36 cs_rc_max = EIP197B_CS_RC_MAX; 37 * so they can share contexts. Without this, the EIP197 will
37 cs_ht_wc = EIP197B_CS_HT_WC; 38 * assume each interface/ring to be in its own memory domain
38 cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC; 39 * i.e. have its own subset of UNIQUE memory addresses.
39 cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC; 40 * Which would cause records with the SAME memory address to
40 } else { 41 * use DIFFERENT cache buffers, causing both poor cache utilization
41 cs_rc_max = EIP197D_CS_RC_MAX; 42 * AND serious coherence/invalidation issues.
42 cs_ht_wc = EIP197D_CS_HT_WC; 43 */
43 cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC; 44 for (i = 0; i < 4; i++)
44 cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC; 45 writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
46
47 /*
48 * Initialize other virtualization regs for cache
49 * These may not be in their reset state ...
50 */
51 for (i = 0; i < priv->config.rings; i++) {
52 writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
53 writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
54 writel(EIP197_FLUE_CONFIG_MAGIC,
55 priv->base + EIP197_FLUE_CONFIG(i));
45 } 56 }
57 writel(0, priv->base + EIP197_FLUE_OFFSETS);
58 writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
59}
46 60
47 /* Enable the record cache memory access */ 61static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
48 val = readl(priv->base + EIP197_CS_RAM_CTRL); 62 u32 addrmid, int *actbank)
49 val &= ~EIP197_TRC_ENABLE_MASK; 63{
50 val |= EIP197_TRC_ENABLE_0; 64 u32 val;
51 writel(val, priv->base + EIP197_CS_RAM_CTRL); 65 int curbank;
66
67 curbank = addrmid >> 16;
68 if (curbank != *actbank) {
69 val = readl(priv->base + EIP197_CS_RAM_CTRL);
70 val = (val & ~EIP197_CS_BANKSEL_MASK) |
71 (curbank << EIP197_CS_BANKSEL_OFS);
72 writel(val, priv->base + EIP197_CS_RAM_CTRL);
73 *actbank = curbank;
74 }
75}
52 76
53 /* Clear all ECC errors */ 77static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
54 writel(0, priv->base + EIP197_TRC_ECCCTRL); 78 int maxbanks, u32 probemask)
79{
80 u32 val, addrhi, addrlo, addrmid;
81 int actbank;
55 82
56 /* 83 /*
57 * Make sure the cache memory is accessible by taking record cache into 84 * And probe the actual size of the physically attached cache data RAM
58 * reset. 85 * Using a binary subdivision algorithm downto 32 byte cache lines.
59 */ 86 */
60 val = readl(priv->base + EIP197_TRC_PARAMS); 87 addrhi = 1 << (16 + maxbanks);
61 val |= EIP197_TRC_PARAMS_SW_RESET; 88 addrlo = 0;
62 val &= ~EIP197_TRC_PARAMS_DATA_ACCESS; 89 actbank = min(maxbanks - 1, 0);
63 writel(val, priv->base + EIP197_TRC_PARAMS); 90 while ((addrhi - addrlo) > 32) {
91 /* write marker to lowest address in top half */
92 addrmid = (addrhi + addrlo) >> 1;
93 eip197_trc_cache_banksel(priv, addrmid, &actbank);
94 writel((addrmid | (addrlo << 16)) & probemask,
95 priv->base + EIP197_CLASSIFICATION_RAMS +
96 (addrmid & 0xffff));
97
98 /* write marker to lowest address in bottom half */
99 eip197_trc_cache_banksel(priv, addrlo, &actbank);
100 writel((addrlo | (addrhi << 16)) & probemask,
101 priv->base + EIP197_CLASSIFICATION_RAMS +
102 (addrlo & 0xffff));
103
104 /* read back marker from top half */
105 eip197_trc_cache_banksel(priv, addrmid, &actbank);
106 val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
107 (addrmid & 0xffff));
108
109 if (val == ((addrmid | (addrlo << 16)) & probemask)) {
110 /* read back correct, continue with top half */
111 addrlo = addrmid;
112 } else {
113 /* not read back correct, continue with bottom half */
114 addrhi = addrmid;
115 }
116 }
117 return addrhi;
118}
119
120static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
121 int cs_rc_max, int cs_ht_wc)
122{
123 int i;
124 u32 htable_offset, val, offset;
64 125
65 /* Clear all records */ 126 /* Clear all records in administration RAM */
66 for (i = 0; i < cs_rc_max; i++) { 127 for (i = 0; i < cs_rc_max; i++) {
67 u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; 128 offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
68 129
69 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) | 130 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
70 EIP197_CS_RC_PREV(EIP197_RC_NULL), 131 EIP197_CS_RC_PREV(EIP197_RC_NULL),
71 priv->base + offset); 132 priv->base + offset);
72 133
73 val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1); 134 val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
74 if (i == 0) 135 if (i == 0)
75 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL); 136 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
76 else if (i == cs_rc_max - 1) 137 else if (i == cs_rc_max - 1)
77 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL); 138 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
78 writel(val, priv->base + offset + sizeof(u32)); 139 writel(val, priv->base + offset + 4);
140 /* must also initialize the address key due to ECC! */
141 writel(0, priv->base + offset + 8);
142 writel(0, priv->base + offset + 12);
79 } 143 }
80 144
81 /* Clear the hash table entries */ 145 /* Clear the hash table entries */
82 htable_offset = cs_rc_max * EIP197_CS_RC_SIZE; 146 htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
83 for (i = 0; i < cs_ht_wc; i++) 147 for (i = 0; i < cs_ht_wc; i++)
84 writel(GENMASK(29, 0), 148 writel(GENMASK(29, 0),
85 priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32)); 149 priv->base + EIP197_CLASSIFICATION_RAMS +
150 htable_offset + i * sizeof(u32));
151}
152
153static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
154{
155 u32 val, dsize, asize;
156 int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
157 int cs_rc_abs_max, cs_ht_sz;
158 int maxbanks;
159
160 /* Setup (dummy) virtualization for cache */
161 eip197_trc_cache_setupvirt(priv);
162
163 /*
164 * Enable the record cache memory access and
165 * probe the bank select width
166 */
167 val = readl(priv->base + EIP197_CS_RAM_CTRL);
168 val &= ~EIP197_TRC_ENABLE_MASK;
169 val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
170 writel(val, priv->base + EIP197_CS_RAM_CTRL);
171 val = readl(priv->base + EIP197_CS_RAM_CTRL);
172 maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
173
174 /* Clear all ECC errors */
175 writel(0, priv->base + EIP197_TRC_ECCCTRL);
176
177 /*
178 * Make sure the cache memory is accessible by taking record cache into
179 * reset. Need data memory access here, not admin access.
180 */
181 val = readl(priv->base + EIP197_TRC_PARAMS);
182 val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
183 writel(val, priv->base + EIP197_TRC_PARAMS);
184
185 /* Probed data RAM size in bytes */
186 dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff);
187
188 /*
189 * Now probe the administration RAM size pretty much the same way
190 * Except that only the lower 30 bits are writable and we don't need
191 * bank selects
192 */
193 val = readl(priv->base + EIP197_TRC_PARAMS);
194 /* admin access now */
195 val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
196 writel(val, priv->base + EIP197_TRC_PARAMS);
197
198 /* Probed admin RAM size in admin words */
199 asize = eip197_trc_cache_probe(priv, 0, 0xbfffffff) >> 4;
200
201 /* Clear any ECC errors detected while probing! */
202 writel(0, priv->base + EIP197_TRC_ECCCTRL);
203
204 /*
205 * Determine optimal configuration from RAM sizes
206 * Note that we assume that the physical RAM configuration is sane
207 * Therefore, we don't do any parameter error checking here ...
208 */
209
210 /* For now, just use a single record format covering everything */
211 cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
212 cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
213
214 /*
215 * Step #1: How many records will physically fit?
216 * Hard upper limit is 1023!
217 */
218 cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
219 /* Step #2: Need at least 2 words in the admin RAM per record */
220 cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
221 /* Step #3: Determine log2 of hash table size */
222 cs_ht_sz = __fls(asize - cs_rc_max) - 2;
223 /* Step #4: determine current size of hash table in dwords */
224 cs_ht_wc = 16<<cs_ht_sz; /* dwords, not admin words */
225 /* Step #5: add back excess words and see if we can fit more records */
226 cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 4));
227
228 /* Clear the cache RAMs */
229 eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
86 230
87 /* Disable the record cache memory access */ 231 /* Disable the record cache memory access */
88 val = readl(priv->base + EIP197_CS_RAM_CTRL); 232 val = readl(priv->base + EIP197_CS_RAM_CTRL);
@@ -102,119 +246,231 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
102 /* Configure the record cache #2 */ 246 /* Configure the record cache #2 */
103 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) | 247 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
104 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) | 248 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
105 EIP197_TRC_PARAMS_HTABLE_SZ(2); 249 EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
106 writel(val, priv->base + EIP197_TRC_PARAMS); 250 writel(val, priv->base + EIP197_TRC_PARAMS);
251
252 dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
253 dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
107} 254}
108 255
109static void eip197_write_firmware(struct safexcel_crypto_priv *priv, 256static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
110 const struct firmware *fw, int pe, u32 ctrl,
111 u32 prog_en)
112{ 257{
113 const u32 *data = (const u32 *)fw->data; 258 int pe, i;
114 u32 val; 259 u32 val;
115 int i;
116 260
117 /* Reset the engine to make its program memory accessible */ 261 for (pe = 0; pe < priv->config.pes; pe++) {
118 writel(EIP197_PE_ICE_x_CTRL_SW_RESET | 262 /* Configure the token FIFO's */
119 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | 263 writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
120 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, 264 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
121 EIP197_PE(priv) + ctrl);
122 265
123 /* Enable access to the program memory */ 266 /* Clear the ICE scratchpad memory */
124 writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); 267 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
268 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
269 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
270 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
271 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
272 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
273
274 /* clear the scratchpad RAM using 32 bit writes only */
275 for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
276 writel(0, EIP197_PE(priv) +
277 EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
278
279 /* Reset the IFPP engine to make its program mem accessible */
280 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
281 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
282 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
283 EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
284
285 /* Reset the IPUE engine to make its program mem accessible */
286 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
287 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
288 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
289 EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
290
291 /* Enable access to all IFPP program memories */
292 writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
293 EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
294 }
295
296}
297
298static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
299 const struct firmware *fw)
300{
301 const u32 *data = (const u32 *)fw->data;
302 int i;
125 303
126 /* Write the firmware */ 304 /* Write the firmware */
127 for (i = 0; i < fw->size / sizeof(u32); i++) 305 for (i = 0; i < fw->size / sizeof(u32); i++)
128 writel(be32_to_cpu(data[i]), 306 writel(be32_to_cpu(data[i]),
129 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); 307 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
130 308
131 /* Disable access to the program memory */ 309 /* Exclude final 2 NOPs from size */
132 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); 310 return i - EIP197_FW_TERMINAL_NOPS;
311}
312
313/*
314 * If FW is actual production firmware, then poll for its initialization
315 * to complete and check if it is good for the HW, otherwise just return OK.
316 */
317static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
318{
319 int pe, pollcnt;
320 u32 base, pollofs;
133 321
134 /* Release engine from reset */ 322 if (fpp)
135 val = readl(EIP197_PE(priv) + ctrl); 323 pollofs = EIP197_FW_FPP_READY;
136 val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET; 324 else
137 writel(val, EIP197_PE(priv) + ctrl); 325 pollofs = EIP197_FW_PUE_READY;
326
327 for (pe = 0; pe < priv->config.pes; pe++) {
328 base = EIP197_PE_ICE_SCRATCH_RAM(pe);
329 pollcnt = EIP197_FW_START_POLLCNT;
330 while (pollcnt &&
331 (readl_relaxed(EIP197_PE(priv) + base +
332 pollofs) != 1)) {
333 pollcnt--;
334 }
335 if (!pollcnt) {
336 dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
337 fpp, pe);
338 return false;
339 }
340 }
341 return true;
342}
343
344static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
345 int ipuesz, int ifppsz, int minifw)
346{
347 int pe;
348 u32 val;
349
350 for (pe = 0; pe < priv->config.pes; pe++) {
351 /* Disable access to all program memory */
352 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
353
354 /* Start IFPP microengines */
355 if (minifw)
356 val = 0;
357 else
358 val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
359 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
360 EIP197_PE_ICE_UENG_DEBUG_RESET;
361 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
362
363 /* Start IPUE microengines */
364 if (minifw)
365 val = 0;
366 else
367 val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
368 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
369 EIP197_PE_ICE_UENG_DEBUG_RESET;
370 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
371 }
372
373 /* For miniFW startup, there is no initialization, so always succeed */
374 if (minifw)
375 return true;
376
377 /* Wait until all the firmwares have properly started up */
378 if (!poll_fw_ready(priv, 1))
379 return false;
380 if (!poll_fw_ready(priv, 0))
381 return false;
382
383 return true;
138} 384}
139 385
140static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) 386static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
141{ 387{
142 const char *fw_name[] = {"ifpp.bin", "ipue.bin"}; 388 const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
143 const struct firmware *fw[FW_NB]; 389 const struct firmware *fw[FW_NB];
144 char fw_path[31], *dir = NULL; 390 char fw_path[37], *dir = NULL;
145 int i, j, ret = 0, pe; 391 int i, j, ret = 0, pe;
146 u32 val; 392 int ipuesz, ifppsz, minifw = 0;
147 393
148 switch (priv->version) { 394 if (priv->version == EIP197D_MRVL)
149 case EIP197B:
150 dir = "eip197b";
151 break;
152 case EIP197D:
153 dir = "eip197d"; 395 dir = "eip197d";
154 break; 396 else if (priv->version == EIP197B_MRVL ||
155 default: 397 priv->version == EIP197_DEVBRD)
156 /* No firmware is required */ 398 dir = "eip197b";
157 return 0; 399 else
158 } 400 return -ENODEV;
159 401
402retry_fw:
160 for (i = 0; i < FW_NB; i++) { 403 for (i = 0; i < FW_NB; i++) {
161 snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]); 404 snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
162 ret = request_firmware(&fw[i], fw_path, priv->dev); 405 ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
163 if (ret) { 406 if (ret) {
164 if (priv->version != EIP197B) 407 if (minifw || priv->version != EIP197B_MRVL)
165 goto release_fw; 408 goto release_fw;
166 409
167 /* Fallback to the old firmware location for the 410 /* Fallback to the old firmware location for the
168 * EIP197b. 411 * EIP197b.
169 */ 412 */
170 ret = request_firmware(&fw[i], fw_name[i], priv->dev); 413 ret = firmware_request_nowarn(&fw[i], fw_name[i],
171 if (ret) { 414 priv->dev);
172 dev_err(priv->dev, 415 if (ret)
173 "Failed to request firmware %s (%d)\n",
174 fw_name[i], ret);
175 goto release_fw; 416 goto release_fw;
176 }
177 } 417 }
178 } 418 }
179 419
180 for (pe = 0; pe < priv->config.pes; pe++) { 420 eip197_init_firmware(priv);
181 /* Clear the scratchpad memory */ 421
182 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); 422 ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
183 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
184 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
185 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
186 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
187 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
188 423
189 memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0, 424 /* Enable access to IPUE program memories */
190 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); 425 for (pe = 0; pe < priv->config.pes; pe++)
426 writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
427 EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
191 428
192 eip197_write_firmware(priv, fw[FW_IFPP], pe, 429 ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
193 EIP197_PE_ICE_FPP_CTRL(pe),
194 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
195 430
196 eip197_write_firmware(priv, fw[FW_IPUE], pe, 431 if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
197 EIP197_PE_ICE_PUE_CTRL(pe), 432 dev_dbg(priv->dev, "Firmware loaded successfully\n");
198 EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN); 433 return 0;
199 } 434 }
200 435
436 ret = -ENODEV;
437
201release_fw: 438release_fw:
202 for (j = 0; j < i; j++) 439 for (j = 0; j < i; j++)
203 release_firmware(fw[j]); 440 release_firmware(fw[j]);
204 441
442 if (!minifw) {
443 /* Retry with minifw path */
444 dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
445 dir = "eip197_minifw";
446 minifw = 1;
447 goto retry_fw;
448 }
449
450 dev_dbg(priv->dev, "Firmware load failed.\n");
451
205 return ret; 452 return ret;
206} 453}
207 454
208static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) 455static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
209{ 456{
210 u32 hdw, cd_size_rnd, val; 457 u32 cd_size_rnd, val;
211 int i; 458 int i, cd_fetch_cnt;
212 459
213 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 460 cd_size_rnd = (priv->config.cd_size +
214 hdw &= GENMASK(27, 25); 461 (BIT(priv->hwconfig.hwdataw) - 1)) >>
215 hdw >>= 25; 462 priv->hwconfig.hwdataw;
216 463 /* determine number of CD's we can fetch into the CD FIFO as 1 block */
217 cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw; 464 if (priv->flags & SAFEXCEL_HW_EIP197) {
465 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
466 cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
467 cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
468 (priv->config.pes * EIP197_FETCH_DEPTH));
469 } else {
470 /* for the EIP97, just fetch all that fits minus 1 */
471 cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
472 cd_size_rnd) - 1;
473 }
218 474
219 for (i = 0; i < priv->config.rings; i++) { 475 for (i = 0; i < priv->config.rings; i++) {
220 /* ring base address */ 476 /* ring base address */
@@ -226,8 +482,9 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
226 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | 482 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
227 priv->config.cd_size, 483 priv->config.cd_size,
228 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); 484 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
229 writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) | 485 writel(((cd_fetch_cnt *
230 (EIP197_FETCH_COUNT * priv->config.cd_offset), 486 (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
487 (cd_fetch_cnt * priv->config.cd_offset),
231 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); 488 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
232 489
233 /* Configure DMA tx control */ 490 /* Configure DMA tx control */
@@ -245,14 +502,23 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
245 502
246static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) 503static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
247{ 504{
248 u32 hdw, rd_size_rnd, val; 505 u32 rd_size_rnd, val;
249 int i; 506 int i, rd_fetch_cnt;
250 507
251 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 508 /* determine number of RD's we can fetch into the FIFO as one block */
252 hdw &= GENMASK(27, 25); 509 rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
253 hdw >>= 25; 510 (BIT(priv->hwconfig.hwdataw) - 1)) >>
254 511 priv->hwconfig.hwdataw;
255 rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw; 512 if (priv->flags & SAFEXCEL_HW_EIP197) {
513 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
514 rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
515 rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
516 (priv->config.pes * EIP197_FETCH_DEPTH));
517 } else {
518 /* for the EIP97, just fetch all that fits minus 1 */
519 rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
520 rd_size_rnd) - 1;
521 }
256 522
257 for (i = 0; i < priv->config.rings; i++) { 523 for (i = 0; i < priv->config.rings; i++) {
258 /* ring base address */ 524 /* ring base address */
@@ -265,8 +531,9 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
265 priv->config.rd_size, 531 priv->config.rd_size,
266 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); 532 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
267 533
268 writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) | 534 writel(((rd_fetch_cnt *
269 (EIP197_FETCH_COUNT * priv->config.rd_offset), 535 (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
536 (rd_fetch_cnt * priv->config.rd_offset),
270 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); 537 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
271 538
272 /* Configure DMA tx control */ 539 /* Configure DMA tx control */
@@ -291,23 +558,21 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
291 558
292static int safexcel_hw_init(struct safexcel_crypto_priv *priv) 559static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
293{ 560{
294 u32 version, val; 561 u32 val;
295 int i, ret, pe; 562 int i, ret, pe;
296 563
297 /* Determine endianess and configure byte swap */ 564 dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
298 version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION); 565 priv->config.pes, priv->config.rings);
299 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
300 566
301 if ((version & 0xffff) == EIP197_HIA_VERSION_BE) 567 /*
302 val |= EIP197_MST_CTRL_BYTE_SWAP; 568 * For EIP197's only set maximum number of TX commands to 2^5 = 32
303 else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) 569 * Skip for the EIP97 as it does not have this field.
304 val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); 570 */
305 571 if (priv->flags & SAFEXCEL_HW_EIP197) {
306 /* For EIP197 set maximum number of TX commands to 2^5 = 32 */ 572 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
307 if (priv->version == EIP197B || priv->version == EIP197D)
308 val |= EIP197_MST_CTRL_TX_MAX_CMD(5); 573 val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
309 574 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
310 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); 575 }
311 576
312 /* Configure wr/rd cache values */ 577 /* Configure wr/rd cache values */
313 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | 578 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
@@ -330,11 +595,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
330 writel(EIP197_DxE_THR_CTRL_RESET_PE, 595 writel(EIP197_DxE_THR_CTRL_RESET_PE,
331 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); 596 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
332 597
333 if (priv->version == EIP197B || priv->version == EIP197D) { 598 if (priv->flags & SAFEXCEL_HW_EIP197)
334 /* Reset HIA input interface arbiter */ 599 /* Reset HIA input interface arbiter (EIP197 only) */
335 writel(EIP197_HIA_RA_PE_CTRL_RESET, 600 writel(EIP197_HIA_RA_PE_CTRL_RESET,
336 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); 601 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
337 }
338 602
339 /* DMA transfer size to use */ 603 /* DMA transfer size to use */
340 val = EIP197_HIA_DFE_CFG_DIS_DEBUG; 604 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
@@ -357,12 +621,11 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
357 EIP197_PE_IN_xBUF_THRES_MAX(7), 621 EIP197_PE_IN_xBUF_THRES_MAX(7),
358 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe)); 622 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
359 623
360 if (priv->version == EIP197B || priv->version == EIP197D) { 624 if (priv->flags & SAFEXCEL_HW_EIP197)
361 /* enable HIA input interface arbiter and rings */ 625 /* enable HIA input interface arbiter and rings */
362 writel(EIP197_HIA_RA_PE_CTRL_EN | 626 writel(EIP197_HIA_RA_PE_CTRL_EN |
363 GENMASK(priv->config.rings - 1, 0), 627 GENMASK(priv->config.rings - 1, 0),
364 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); 628 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
365 }
366 629
367 /* Data Store Engine configuration */ 630 /* Data Store Engine configuration */
368 631
@@ -381,10 +644,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
381 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); 644 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
382 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); 645 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
383 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE; 646 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
384 /* FIXME: instability issues can occur for EIP97 but disabling it impact 647 /* FIXME: instability issues can occur for EIP97 but disabling
385 * performances. 648 * it impacts performance.
386 */ 649 */
387 if (priv->version == EIP197B || priv->version == EIP197D) 650 if (priv->flags & SAFEXCEL_HW_EIP197)
388 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; 651 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
389 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe)); 652 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
390 653
@@ -400,21 +663,15 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
400 663
401 /* Token & context configuration */ 664 /* Token & context configuration */
402 val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES | 665 val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
403 EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX | 666 EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
404 EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX; 667 EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
405 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe)); 668 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
406 669
407 /* H/W capabilities selection */ 670 /* H/W capabilities selection: just enable everything */
408 val = EIP197_FUNCTION_RSVD; 671 writel(EIP197_FUNCTION_ALL,
409 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; 672 EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
410 val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT; 673 writel(EIP197_FUNCTION_ALL,
411 val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC; 674 EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
412 val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
413 val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
414 val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
415 val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
416 val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
417 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
418 } 675 }
419 676
420 /* Command Descriptor Rings prepare */ 677 /* Command Descriptor Rings prepare */
@@ -479,8 +736,9 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
479 /* Clear any HIA interrupt */ 736 /* Clear any HIA interrupt */
480 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); 737 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
481 738
482 if (priv->version == EIP197B || priv->version == EIP197D) { 739 if (priv->flags & SAFEXCEL_HW_EIP197) {
483 eip197_trc_cache_init(priv); 740 eip197_trc_cache_init(priv);
741 priv->flags |= EIP197_TRC_CACHE;
484 742
485 ret = eip197_load_firmwares(priv); 743 ret = eip197_load_firmwares(priv);
486 if (ret) 744 if (ret)
@@ -589,16 +847,32 @@ finalize:
589inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, 847inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
590 struct safexcel_result_desc *rdesc) 848 struct safexcel_result_desc *rdesc)
591{ 849{
592 if (likely(!rdesc->result_data.error_code)) 850 if (likely((!rdesc->descriptor_overflow) &&
851 (!rdesc->buffer_overflow) &&
852 (!rdesc->result_data.error_code)))
593 return 0; 853 return 0;
594 854
595 if (rdesc->result_data.error_code & 0x407f) { 855 if (rdesc->descriptor_overflow)
596 /* Fatal error (bits 0-7, 14) */ 856 dev_err(priv->dev, "Descriptor overflow detected");
857
858 if (rdesc->buffer_overflow)
859 dev_err(priv->dev, "Buffer overflow detected");
860
861 if (rdesc->result_data.error_code & 0x4066) {
862 /* Fatal error (bits 1,2,5,6 & 14) */
597 dev_err(priv->dev, 863 dev_err(priv->dev,
598 "cipher: result: result descriptor error (0x%x)\n", 864 "result descriptor error (%x)",
599 rdesc->result_data.error_code); 865 rdesc->result_data.error_code);
866 return -EIO;
867 } else if (rdesc->result_data.error_code &
868 (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
869 /*
870 * Give priority over authentication fails:
871 * Blocksize, length & overflow errors,
872 * something wrong with the input!
873 */
600 return -EINVAL; 874 return -EINVAL;
601 } else if (rdesc->result_data.error_code == BIT(9)) { 875 } else if (rdesc->result_data.error_code & BIT(9)) {
602 /* Authentication failed */ 876 /* Authentication failed */
603 return -EBADMSG; 877 return -EBADMSG;
604 } 878 }
@@ -711,7 +985,8 @@ handle_results:
711 ndesc = ctx->handle_result(priv, ring, req, 985 ndesc = ctx->handle_result(priv, ring, req,
712 &should_complete, &ret); 986 &should_complete, &ret);
713 if (ndesc < 0) { 987 if (ndesc < 0) {
714 dev_err(priv->dev, "failed to handle result (%d)", ndesc); 988 dev_err(priv->dev, "failed to handle result (%d)\n",
989 ndesc);
715 goto acknowledge; 990 goto acknowledge;
716 } 991 }
717 992
@@ -783,7 +1058,7 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
783 * reinitialized. This should not happen under 1058 * reinitialized. This should not happen under
784 * normal circumstances. 1059 * normal circumstances.
785 */ 1060 */
786 dev_err(priv->dev, "RDR: fatal error."); 1061 dev_err(priv->dev, "RDR: fatal error.\n");
787 } else if (likely(stat & EIP197_xDR_THRESH)) { 1062 } else if (likely(stat & EIP197_xDR_THRESH)) {
788 rc = IRQ_WAKE_THREAD; 1063 rc = IRQ_WAKE_THREAD;
789 } 1064 }
@@ -813,23 +1088,45 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
813 return IRQ_HANDLED; 1088 return IRQ_HANDLED;
814} 1089}
815 1090
816static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name, 1091static int safexcel_request_ring_irq(void *pdev, int irqid,
1092 int is_pci_dev,
817 irq_handler_t handler, 1093 irq_handler_t handler,
818 irq_handler_t threaded_handler, 1094 irq_handler_t threaded_handler,
819 struct safexcel_ring_irq_data *ring_irq_priv) 1095 struct safexcel_ring_irq_data *ring_irq_priv)
820{ 1096{
821 int ret, irq = platform_get_irq_byname(pdev, name); 1097 int ret, irq;
1098 struct device *dev;
1099
1100 if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
1101 struct pci_dev *pci_pdev = pdev;
1102
1103 dev = &pci_pdev->dev;
1104 irq = pci_irq_vector(pci_pdev, irqid);
1105 if (irq < 0) {
1106 dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
1107 irqid, irq);
1108 return irq;
1109 }
1110 } else if (IS_ENABLED(CONFIG_OF)) {
1111 struct platform_device *plf_pdev = pdev;
1112 char irq_name[6] = {0}; /* "ringX\0" */
1113
1114 snprintf(irq_name, 6, "ring%d", irqid);
1115 dev = &plf_pdev->dev;
1116 irq = platform_get_irq_byname(plf_pdev, irq_name);
822 1117
823 if (irq < 0) { 1118 if (irq < 0) {
824 dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name); 1119 dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
825 return irq; 1120 irq_name, irq);
1121 return irq;
1122 }
826 } 1123 }
827 1124
828 ret = devm_request_threaded_irq(&pdev->dev, irq, handler, 1125 ret = devm_request_threaded_irq(dev, irq, handler,
829 threaded_handler, IRQF_ONESHOT, 1126 threaded_handler, IRQF_ONESHOT,
830 dev_name(&pdev->dev), ring_irq_priv); 1127 dev_name(dev), ring_irq_priv);
831 if (ret) { 1128 if (ret) {
832 dev_err(&pdev->dev, "unable to request IRQ %d\n", irq); 1129 dev_err(dev, "unable to request IRQ %d\n", irq);
833 return ret; 1130 return ret;
834 } 1131 }
835 1132
@@ -843,6 +1140,9 @@ static struct safexcel_alg_template *safexcel_algs[] = {
843 &safexcel_alg_cbc_des3_ede, 1140 &safexcel_alg_cbc_des3_ede,
844 &safexcel_alg_ecb_aes, 1141 &safexcel_alg_ecb_aes,
845 &safexcel_alg_cbc_aes, 1142 &safexcel_alg_cbc_aes,
1143 &safexcel_alg_cfb_aes,
1144 &safexcel_alg_ofb_aes,
1145 &safexcel_alg_ctr_aes,
846 &safexcel_alg_md5, 1146 &safexcel_alg_md5,
847 &safexcel_alg_sha1, 1147 &safexcel_alg_sha1,
848 &safexcel_alg_sha224, 1148 &safexcel_alg_sha224,
@@ -860,6 +1160,15 @@ static struct safexcel_alg_template *safexcel_algs[] = {
860 &safexcel_alg_authenc_hmac_sha256_cbc_aes, 1160 &safexcel_alg_authenc_hmac_sha256_cbc_aes,
861 &safexcel_alg_authenc_hmac_sha384_cbc_aes, 1161 &safexcel_alg_authenc_hmac_sha384_cbc_aes,
862 &safexcel_alg_authenc_hmac_sha512_cbc_aes, 1162 &safexcel_alg_authenc_hmac_sha512_cbc_aes,
1163 &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
1164 &safexcel_alg_authenc_hmac_sha1_ctr_aes,
1165 &safexcel_alg_authenc_hmac_sha224_ctr_aes,
1166 &safexcel_alg_authenc_hmac_sha256_ctr_aes,
1167 &safexcel_alg_authenc_hmac_sha384_ctr_aes,
1168 &safexcel_alg_authenc_hmac_sha512_ctr_aes,
1169 &safexcel_alg_xts_aes,
1170 &safexcel_alg_gcm,
1171 &safexcel_alg_ccm,
863}; 1172};
864 1173
865static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) 1174static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -869,7 +1178,10 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
869 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { 1178 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
870 safexcel_algs[i]->priv = priv; 1179 safexcel_algs[i]->priv = priv;
871 1180
872 if (!(safexcel_algs[i]->engines & priv->version)) 1181 /* Do we have all required base algorithms available? */
1182 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1183 safexcel_algs[i]->algo_mask)
1184 /* No, so don't register this ciphersuite */
873 continue; 1185 continue;
874 1186
875 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) 1187 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
@@ -887,7 +1199,10 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
887 1199
888fail: 1200fail:
889 for (j = 0; j < i; j++) { 1201 for (j = 0; j < i; j++) {
890 if (!(safexcel_algs[j]->engines & priv->version)) 1202 /* Do we have all required base algorithms available? */
1203 if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
1204 safexcel_algs[j]->algo_mask)
1205 /* No, so don't unregister this ciphersuite */
891 continue; 1206 continue;
892 1207
893 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) 1208 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
@@ -906,7 +1221,10 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
906 int i; 1221 int i;
907 1222
908 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { 1223 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
909 if (!(safexcel_algs[i]->engines & priv->version)) 1224 /* Do we have all required base algorithms available? */
1225 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1226 safexcel_algs[i]->algo_mask)
1227 /* No, so don't unregister this ciphersuite */
910 continue; 1228 continue;
911 1229
912 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) 1230 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
@@ -925,22 +1243,20 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
925 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 1243 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
926 1244
927 /* Read number of PEs from the engine */ 1245 /* Read number of PEs from the engine */
928 switch (priv->version) { 1246 if (priv->flags & SAFEXCEL_HW_EIP197)
929 case EIP197B: 1247 /* Wider field width for all EIP197 type engines */
930 case EIP197D:
931 mask = EIP197_N_PES_MASK; 1248 mask = EIP197_N_PES_MASK;
932 break; 1249 else
933 default: 1250 /* Narrow field width for EIP97 type engine */
934 mask = EIP97_N_PES_MASK; 1251 mask = EIP97_N_PES_MASK;
935 } 1252
936 priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask; 1253 priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
937 1254
1255 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
1256
938 val = (val & GENMASK(27, 25)) >> 25; 1257 val = (val & GENMASK(27, 25)) >> 25;
939 mask = BIT(val) - 1; 1258 mask = BIT(val) - 1;
940 1259
941 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
942 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
943
944 priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); 1260 priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
945 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask; 1261 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
946 1262
@@ -952,9 +1268,7 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
952{ 1268{
953 struct safexcel_register_offsets *offsets = &priv->offsets; 1269 struct safexcel_register_offsets *offsets = &priv->offsets;
954 1270
955 switch (priv->version) { 1271 if (priv->flags & SAFEXCEL_HW_EIP197) {
956 case EIP197B:
957 case EIP197D:
958 offsets->hia_aic = EIP197_HIA_AIC_BASE; 1272 offsets->hia_aic = EIP197_HIA_AIC_BASE;
959 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; 1273 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
960 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; 1274 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
@@ -965,8 +1279,8 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
965 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; 1279 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
966 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; 1280 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
967 offsets->pe = EIP197_PE_BASE; 1281 offsets->pe = EIP197_PE_BASE;
968 break; 1282 offsets->global = EIP197_GLOBAL_BASE;
969 case EIP97IES: 1283 } else {
970 offsets->hia_aic = EIP97_HIA_AIC_BASE; 1284 offsets->hia_aic = EIP97_HIA_AIC_BASE;
971 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; 1285 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
972 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; 1286 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
@@ -977,135 +1291,213 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
977 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; 1291 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
978 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; 1292 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
979 offsets->pe = EIP97_PE_BASE; 1293 offsets->pe = EIP97_PE_BASE;
980 break; 1294 offsets->global = EIP97_GLOBAL_BASE;
981 } 1295 }
982} 1296}
983 1297
984static int safexcel_probe(struct platform_device *pdev) 1298/*
1299 * Generic part of probe routine, shared by platform and PCI driver
1300 *
1301 * Assumes IO resources have been mapped, private data mem has been allocated,
1302 * clocks have been enabled, device pointer has been assigned etc.
1303 *
1304 */
1305static int safexcel_probe_generic(void *pdev,
1306 struct safexcel_crypto_priv *priv,
1307 int is_pci_dev)
985{ 1308{
986 struct device *dev = &pdev->dev; 1309 struct device *dev = priv->dev;
987 struct resource *res; 1310 u32 peid, version, mask, val, hiaopt;
988 struct safexcel_crypto_priv *priv; 1311 int i, ret, hwctg;
989 int i, ret;
990 1312
991 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 1313 priv->context_pool = dmam_pool_create("safexcel-context", dev,
992 if (!priv) 1314 sizeof(struct safexcel_context_record),
1315 1, 0);
1316 if (!priv->context_pool)
993 return -ENOMEM; 1317 return -ENOMEM;
994 1318
995 priv->dev = dev; 1319 /*
996 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); 1320 * First try the EIP97 HIA version regs
997 1321 * For the EIP197, this is guaranteed to NOT return any of the test
998 if (priv->version == EIP197B || priv->version == EIP197D) 1322 * values
999 priv->flags |= EIP197_TRC_CACHE; 1323 */
1324 version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
1325
1326 mask = 0; /* do not swap */
1327 if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1328 priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1329 } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
1330 /* read back byte-swapped, so complement byte swap bits */
1331 mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1332 priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1333 } else {
1334 /* So it wasn't an EIP97 ... maybe it's an EIP197? */
1335 version = readl(priv->base + EIP197_HIA_AIC_BASE +
1336 EIP197_HIA_VERSION);
1337 if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1338 priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1339 priv->flags |= SAFEXCEL_HW_EIP197;
1340 } else if (EIP197_REG_HI16(version) ==
1341 EIP197_HIA_VERSION_BE) {
1342 /* read back byte-swapped, so complement swap bits */
1343 mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1344 priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1345 priv->flags |= SAFEXCEL_HW_EIP197;
1346 } else {
1347 return -ENODEV;
1348 }
1349 }
1000 1350
1351 /* Now initialize the reg offsets based on the probing info so far */
1001 safexcel_init_register_offsets(priv); 1352 safexcel_init_register_offsets(priv);
1002 1353
1003 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1354 /*
1004 priv->base = devm_ioremap_resource(dev, res); 1355 * If the version was read byte-swapped, we need to flip the device
1005 if (IS_ERR(priv->base)) { 1356 * swapping Keep in mind here, though, that what we write will also be
1006 dev_err(dev, "failed to get resource\n"); 1357 * byte-swapped ...
1007 return PTR_ERR(priv->base); 1358 */
1359 if (mask) {
1360 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1361 val = val ^ (mask >> 24); /* toggle byte swap bits */
1362 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1008 } 1363 }
1009 1364
1010 priv->clk = devm_clk_get(&pdev->dev, NULL); 1365 /*
1011 ret = PTR_ERR_OR_ZERO(priv->clk); 1366 * We're not done probing yet! We may fall through to here if no HIA
1012 /* The clock isn't mandatory */ 1367 * was found at all. So, with the endianness presumably correct now and
1013 if (ret != -ENOENT) { 1368 * the offsets setup, *really* probe for the EIP97/EIP197.
1014 if (ret) 1369 */
1015 return ret; 1370 version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
1016 1371 if (((priv->flags & SAFEXCEL_HW_EIP197) &&
1017 ret = clk_prepare_enable(priv->clk); 1372 (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) ||
1018 if (ret) { 1373 ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
1019 dev_err(dev, "unable to enable clk (%d)\n", ret); 1374 (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
1020 return ret; 1375 /*
1021 } 1376 * We did not find the device that matched our initial probing
1377 * (or our initial probing failed) Report appropriate error.
1378 */
1379 return -ENODEV;
1022 } 1380 }
1023 1381
1024 priv->reg_clk = devm_clk_get(&pdev->dev, "reg"); 1382 priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
1025 ret = PTR_ERR_OR_ZERO(priv->reg_clk); 1383 hwctg = version >> 28;
1026 /* The clock isn't mandatory */ 1384 peid = version & 255;
1027 if (ret != -ENOENT) {
1028 if (ret)
1029 goto err_core_clk;
1030 1385
1031 ret = clk_prepare_enable(priv->reg_clk); 1386 /* Detect EIP96 packet engine and version */
1032 if (ret) { 1387 version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
1033 dev_err(dev, "unable to enable reg clk (%d)\n", ret); 1388 if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
1034 goto err_core_clk; 1389 dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
1035 } 1390 return -ENODEV;
1391 }
1392 priv->hwconfig.pever = EIP197_VERSION_MASK(version);
1393
1394 hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
1395
1396 if (priv->flags & SAFEXCEL_HW_EIP197) {
1397 /* EIP197 */
1398 priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1399 EIP197_HWDATAW_MASK;
1400 priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
1401 EIP197_CFSIZE_MASK) +
1402 EIP197_CFSIZE_ADJUST;
1403 priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
1404 EIP197_RFSIZE_MASK) +
1405 EIP197_RFSIZE_ADJUST;
1406 } else {
1407 /* EIP97 */
1408 priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1409 EIP97_HWDATAW_MASK;
1410 priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
1411 EIP97_CFSIZE_MASK;
1412 priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
1413 EIP97_RFSIZE_MASK;
1036 } 1414 }
1037 1415
1038 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1416 /* Get supported algorithms from EIP96 transform engine */
1039 if (ret) 1417 priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
1040 goto err_reg_clk; 1418 EIP197_PE_EIP96_OPTIONS(0));
1041 1419
1042 priv->context_pool = dmam_pool_create("safexcel-context", dev, 1420 /* Print single info line describing what we just detected */
1043 sizeof(struct safexcel_context_record), 1421 dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d,%d),PE:%x,alg:%08x\n",
1044 1, 0); 1422 peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver,
1045 if (!priv->context_pool) { 1423 priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize,
1046 ret = -ENOMEM; 1424 priv->hwconfig.hwrfsize, priv->hwconfig.pever,
1047 goto err_reg_clk; 1425 priv->hwconfig.algo_flags);
1048 }
1049 1426
1050 safexcel_configure(priv); 1427 safexcel_configure(priv);
1051 1428
1429 if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
1430 /*
1431 * Request MSI vectors for global + 1 per ring -
1432 * or just 1 for older dev images
1433 */
1434 struct pci_dev *pci_pdev = pdev;
1435
1436 ret = pci_alloc_irq_vectors(pci_pdev,
1437 priv->config.rings + 1,
1438 priv->config.rings + 1,
1439 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1440 if (ret < 0) {
1441 dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1442 return ret;
1443 }
1444 }
1445
1446 /* Register the ring IRQ handlers and configure the rings */
1052 priv->ring = devm_kcalloc(dev, priv->config.rings, 1447 priv->ring = devm_kcalloc(dev, priv->config.rings,
1053 sizeof(*priv->ring), 1448 sizeof(*priv->ring),
1054 GFP_KERNEL); 1449 GFP_KERNEL);
1055 if (!priv->ring) { 1450 if (!priv->ring)
1056 ret = -ENOMEM; 1451 return -ENOMEM;
1057 goto err_reg_clk;
1058 }
1059 1452
1060 for (i = 0; i < priv->config.rings; i++) { 1453 for (i = 0; i < priv->config.rings; i++) {
1061 char irq_name[6] = {0}; /* "ringX\0" */ 1454 char wq_name[9] = {0};
1062 char wq_name[9] = {0}; /* "wq_ringX\0" */
1063 int irq; 1455 int irq;
1064 struct safexcel_ring_irq_data *ring_irq; 1456 struct safexcel_ring_irq_data *ring_irq;
1065 1457
1066 ret = safexcel_init_ring_descriptors(priv, 1458 ret = safexcel_init_ring_descriptors(priv,
1067 &priv->ring[i].cdr, 1459 &priv->ring[i].cdr,
1068 &priv->ring[i].rdr); 1460 &priv->ring[i].rdr);
1069 if (ret) 1461 if (ret) {
1070 goto err_reg_clk; 1462 dev_err(dev, "Failed to initialize rings\n");
1463 return ret;
1464 }
1071 1465
1072 priv->ring[i].rdr_req = devm_kcalloc(dev, 1466 priv->ring[i].rdr_req = devm_kcalloc(dev,
1073 EIP197_DEFAULT_RING_SIZE, 1467 EIP197_DEFAULT_RING_SIZE,
1074 sizeof(priv->ring[i].rdr_req), 1468 sizeof(priv->ring[i].rdr_req),
1075 GFP_KERNEL); 1469 GFP_KERNEL);
1076 if (!priv->ring[i].rdr_req) { 1470 if (!priv->ring[i].rdr_req)
1077 ret = -ENOMEM; 1471 return -ENOMEM;
1078 goto err_reg_clk;
1079 }
1080 1472
1081 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); 1473 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1082 if (!ring_irq) { 1474 if (!ring_irq)
1083 ret = -ENOMEM; 1475 return -ENOMEM;
1084 goto err_reg_clk;
1085 }
1086 1476
1087 ring_irq->priv = priv; 1477 ring_irq->priv = priv;
1088 ring_irq->ring = i; 1478 ring_irq->ring = i;
1089 1479
1090 snprintf(irq_name, 6, "ring%d", i); 1480 irq = safexcel_request_ring_irq(pdev,
1091 irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, 1481 EIP197_IRQ_NUMBER(i, is_pci_dev),
1482 is_pci_dev,
1483 safexcel_irq_ring,
1092 safexcel_irq_ring_thread, 1484 safexcel_irq_ring_thread,
1093 ring_irq); 1485 ring_irq);
1094 if (irq < 0) { 1486 if (irq < 0) {
1095 ret = irq; 1487 dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1096 goto err_reg_clk; 1488 return irq;
1097 } 1489 }
1098 1490
1099 priv->ring[i].work_data.priv = priv; 1491 priv->ring[i].work_data.priv = priv;
1100 priv->ring[i].work_data.ring = i; 1492 priv->ring[i].work_data.ring = i;
1101 INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work); 1493 INIT_WORK(&priv->ring[i].work_data.work,
1494 safexcel_dequeue_work);
1102 1495
1103 snprintf(wq_name, 9, "wq_ring%d", i); 1496 snprintf(wq_name, 9, "wq_ring%d", i);
1104 priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); 1497 priv->ring[i].workqueue =
1105 if (!priv->ring[i].workqueue) { 1498 create_singlethread_workqueue(wq_name);
1106 ret = -ENOMEM; 1499 if (!priv->ring[i].workqueue)
1107 goto err_reg_clk; 1500 return -ENOMEM;
1108 }
1109 1501
1110 priv->ring[i].requests = 0; 1502 priv->ring[i].requests = 0;
1111 priv->ring[i].busy = false; 1503 priv->ring[i].busy = false;
@@ -1117,28 +1509,21 @@ static int safexcel_probe(struct platform_device *pdev)
1117 spin_lock_init(&priv->ring[i].queue_lock); 1509 spin_lock_init(&priv->ring[i].queue_lock);
1118 } 1510 }
1119 1511
1120 platform_set_drvdata(pdev, priv);
1121 atomic_set(&priv->ring_used, 0); 1512 atomic_set(&priv->ring_used, 0);
1122 1513
1123 ret = safexcel_hw_init(priv); 1514 ret = safexcel_hw_init(priv);
1124 if (ret) { 1515 if (ret) {
1125 dev_err(dev, "EIP h/w init failed (%d)\n", ret); 1516 dev_err(dev, "HW init failed (%d)\n", ret);
1126 goto err_reg_clk; 1517 return ret;
1127 } 1518 }
1128 1519
1129 ret = safexcel_register_algorithms(priv); 1520 ret = safexcel_register_algorithms(priv);
1130 if (ret) { 1521 if (ret) {
1131 dev_err(dev, "Failed to register algorithms (%d)\n", ret); 1522 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1132 goto err_reg_clk; 1523 return ret;
1133 } 1524 }
1134 1525
1135 return 0; 1526 return 0;
1136
1137err_reg_clk:
1138 clk_disable_unprepare(priv->reg_clk);
1139err_core_clk:
1140 clk_disable_unprepare(priv->clk);
1141 return ret;
1142} 1527}
1143 1528
1144static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) 1529static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
@@ -1160,6 +1545,76 @@ static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1160 } 1545 }
1161} 1546}
1162 1547
1548#if IS_ENABLED(CONFIG_OF)
1549/* for Device Tree platform driver */
1550
1551static int safexcel_probe(struct platform_device *pdev)
1552{
1553 struct device *dev = &pdev->dev;
1554 struct safexcel_crypto_priv *priv;
1555 int ret;
1556
1557 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1558 if (!priv)
1559 return -ENOMEM;
1560
1561 priv->dev = dev;
1562 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
1563
1564 platform_set_drvdata(pdev, priv);
1565
1566 priv->base = devm_platform_ioremap_resource(pdev, 0);
1567 if (IS_ERR(priv->base)) {
1568 dev_err(dev, "failed to get resource\n");
1569 return PTR_ERR(priv->base);
1570 }
1571
1572 priv->clk = devm_clk_get(&pdev->dev, NULL);
1573 ret = PTR_ERR_OR_ZERO(priv->clk);
1574 /* The clock isn't mandatory */
1575 if (ret != -ENOENT) {
1576 if (ret)
1577 return ret;
1578
1579 ret = clk_prepare_enable(priv->clk);
1580 if (ret) {
1581 dev_err(dev, "unable to enable clk (%d)\n", ret);
1582 return ret;
1583 }
1584 }
1585
1586 priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1587 ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1588 /* The clock isn't mandatory */
1589 if (ret != -ENOENT) {
1590 if (ret)
1591 goto err_core_clk;
1592
1593 ret = clk_prepare_enable(priv->reg_clk);
1594 if (ret) {
1595 dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1596 goto err_core_clk;
1597 }
1598 }
1599
1600 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1601 if (ret)
1602 goto err_reg_clk;
1603
1604 /* Generic EIP97/EIP197 device probing */
1605 ret = safexcel_probe_generic(pdev, priv, 0);
1606 if (ret)
1607 goto err_reg_clk;
1608
1609 return 0;
1610
1611err_reg_clk:
1612 clk_disable_unprepare(priv->reg_clk);
1613err_core_clk:
1614 clk_disable_unprepare(priv->clk);
1615 return ret;
1616}
1617
1163static int safexcel_remove(struct platform_device *pdev) 1618static int safexcel_remove(struct platform_device *pdev)
1164{ 1619{
1165 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev); 1620 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
@@ -1179,30 +1634,28 @@ static int safexcel_remove(struct platform_device *pdev)
1179static const struct of_device_id safexcel_of_match_table[] = { 1634static const struct of_device_id safexcel_of_match_table[] = {
1180 { 1635 {
1181 .compatible = "inside-secure,safexcel-eip97ies", 1636 .compatible = "inside-secure,safexcel-eip97ies",
1182 .data = (void *)EIP97IES, 1637 .data = (void *)EIP97IES_MRVL,
1183 }, 1638 },
1184 { 1639 {
1185 .compatible = "inside-secure,safexcel-eip197b", 1640 .compatible = "inside-secure,safexcel-eip197b",
1186 .data = (void *)EIP197B, 1641 .data = (void *)EIP197B_MRVL,
1187 }, 1642 },
1188 { 1643 {
1189 .compatible = "inside-secure,safexcel-eip197d", 1644 .compatible = "inside-secure,safexcel-eip197d",
1190 .data = (void *)EIP197D, 1645 .data = (void *)EIP197D_MRVL,
1191 }, 1646 },
1647 /* For backward compatibility and intended for generic use */
1192 { 1648 {
1193 /* Deprecated. Kept for backward compatibility. */
1194 .compatible = "inside-secure,safexcel-eip97", 1649 .compatible = "inside-secure,safexcel-eip97",
1195 .data = (void *)EIP97IES, 1650 .data = (void *)EIP97IES_MRVL,
1196 }, 1651 },
1197 { 1652 {
1198 /* Deprecated. Kept for backward compatibility. */
1199 .compatible = "inside-secure,safexcel-eip197", 1653 .compatible = "inside-secure,safexcel-eip197",
1200 .data = (void *)EIP197B, 1654 .data = (void *)EIP197B_MRVL,
1201 }, 1655 },
1202 {}, 1656 {},
1203}; 1657};
1204 1658
1205
1206static struct platform_driver crypto_safexcel = { 1659static struct platform_driver crypto_safexcel = {
1207 .probe = safexcel_probe, 1660 .probe = safexcel_probe,
1208 .remove = safexcel_remove, 1661 .remove = safexcel_remove,
@@ -1211,10 +1664,166 @@ static struct platform_driver crypto_safexcel = {
1211 .of_match_table = safexcel_of_match_table, 1664 .of_match_table = safexcel_of_match_table,
1212 }, 1665 },
1213}; 1666};
1214module_platform_driver(crypto_safexcel); 1667#endif
1668
1669#if IS_ENABLED(CONFIG_PCI)
1670/* PCIE devices - i.e. Inside Secure development boards */
1671
1672static int safexcel_pci_probe(struct pci_dev *pdev,
1673 const struct pci_device_id *ent)
1674{
1675 struct device *dev = &pdev->dev;
1676 struct safexcel_crypto_priv *priv;
1677 void __iomem *pciebase;
1678 int rc;
1679 u32 val;
1680
1681 dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1682 ent->vendor, ent->device, ent->subvendor,
1683 ent->subdevice, ent->driver_data);
1684
1685 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1686 if (!priv)
1687 return -ENOMEM;
1688
1689 priv->dev = dev;
1690 priv->version = (enum safexcel_eip_version)ent->driver_data;
1691
1692 pci_set_drvdata(pdev, priv);
1693
1694 /* enable the device */
1695 rc = pcim_enable_device(pdev);
1696 if (rc) {
1697 dev_err(dev, "Failed to enable PCI device\n");
1698 return rc;
1699 }
1700
1701 /* take ownership of PCI BAR0 */
1702 rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
1703 if (rc) {
1704 dev_err(dev, "Failed to map IO region for BAR0\n");
1705 return rc;
1706 }
1707 priv->base = pcim_iomap_table(pdev)[0];
1708
1709 if (priv->version == EIP197_DEVBRD) {
1710 dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
1711
1712 rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
1713 if (rc) {
1714 dev_err(dev, "Failed to map IO region for BAR4\n");
1715 return rc;
1716 }
1717
1718 pciebase = pcim_iomap_table(pdev)[2];
1719 val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
1720 if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
1721 dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1722 (val & 0xff));
1723
1724 /* Setup MSI identity map mapping */
1725 writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
1726 pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
1727 writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
1728 pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
1729 writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
1730 pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
1731 writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
1732 pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
1733
1734 /* Enable all device interrupts */
1735 writel(GENMASK(31, 0),
1736 pciebase + EIP197_XLX_USER_INT_ENB_MSK);
1737 } else {
1738 dev_err(dev, "Unrecognised IRQ block identifier %x\n",
1739 val);
1740 return -ENODEV;
1741 }
1742
1743 /* HW reset FPGA dev board */
1744 /* assert reset */
1745 writel(1, priv->base + EIP197_XLX_GPIO_BASE);
1746 wmb(); /* maintain strict ordering for accesses here */
1747 /* deassert reset */
1748 writel(0, priv->base + EIP197_XLX_GPIO_BASE);
1749 wmb(); /* maintain strict ordering for accesses here */
1750 }
1751
1752 /* enable bus mastering */
1753 pci_set_master(pdev);
1754
1755 /* Generic EIP97/EIP197 device probing */
1756 rc = safexcel_probe_generic(pdev, priv, 1);
1757 return rc;
1758}
1759
1760void safexcel_pci_remove(struct pci_dev *pdev)
1761{
1762 struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
1763 int i;
1764
1765 safexcel_unregister_algorithms(priv);
1766
1767 for (i = 0; i < priv->config.rings; i++)
1768 destroy_workqueue(priv->ring[i].workqueue);
1769
1770 safexcel_hw_reset_rings(priv);
1771}
1772
1773static const struct pci_device_id safexcel_pci_ids[] = {
1774 {
1775 PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
1776 0x16ae, 0xc522),
1777 .driver_data = EIP197_DEVBRD,
1778 },
1779 {},
1780};
1781
1782MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
1783
1784static struct pci_driver safexcel_pci_driver = {
1785 .name = "crypto-safexcel",
1786 .id_table = safexcel_pci_ids,
1787 .probe = safexcel_pci_probe,
1788 .remove = safexcel_pci_remove,
1789};
1790#endif
1791
1792static int __init safexcel_init(void)
1793{
1794 int rc;
1795
1796#if IS_ENABLED(CONFIG_OF)
1797 /* Register platform driver */
1798 platform_driver_register(&crypto_safexcel);
1799#endif
1800
1801#if IS_ENABLED(CONFIG_PCI)
1802 /* Register PCI driver */
1803 rc = pci_register_driver(&safexcel_pci_driver);
1804#endif
1805
1806 return 0;
1807}
1808
1809static void __exit safexcel_exit(void)
1810{
1811#if IS_ENABLED(CONFIG_OF)
1812 /* Unregister platform driver */
1813 platform_driver_unregister(&crypto_safexcel);
1814#endif
1815
1816#if IS_ENABLED(CONFIG_PCI)
1817 /* Unregister PCI driver if successfully registered before */
1818 pci_unregister_driver(&safexcel_pci_driver);
1819#endif
1820}
1821
1822module_init(safexcel_init);
1823module_exit(safexcel_exit);
1215 1824
1216MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>"); 1825MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1217MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>"); 1826MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1218MODULE_AUTHOR("Igal Liberman <igall@marvell.com>"); 1827MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1219MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197"); 1828MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
1220MODULE_LICENSE("GPL v2"); 1829MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index e0c202f33674..930cc48a6f85 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -14,14 +14,23 @@
14#include <crypto/sha.h> 14#include <crypto/sha.h>
15#include <crypto/skcipher.h> 15#include <crypto/skcipher.h>
16 16
17#define EIP197_HIA_VERSION_LE 0xca35 17#define EIP197_HIA_VERSION_BE 0xca35
18#define EIP197_HIA_VERSION_BE 0x35ca 18#define EIP197_HIA_VERSION_LE 0x35ca
19#define EIP97_VERSION_LE 0x9e61
20#define EIP197_VERSION_LE 0x3ac5
21#define EIP96_VERSION_LE 0x9f60
22#define EIP197_REG_LO16(reg) (reg & 0xffff)
23#define EIP197_REG_HI16(reg) ((reg >> 16) & 0xffff)
24#define EIP197_VERSION_MASK(reg) ((reg >> 16) & 0xfff)
25#define EIP197_VERSION_SWAP(reg) (((reg & 0xf0) << 4) | \
26 ((reg >> 4) & 0xf0) | \
27 ((reg >> 12) & 0xf))
19 28
20/* Static configuration */ 29/* Static configuration */
21#define EIP197_DEFAULT_RING_SIZE 400 30#define EIP197_DEFAULT_RING_SIZE 400
22#define EIP197_MAX_TOKENS 8 31#define EIP197_MAX_TOKENS 18
23#define EIP197_MAX_RINGS 4 32#define EIP197_MAX_RINGS 4
24#define EIP197_FETCH_COUNT 1 33#define EIP197_FETCH_DEPTH 2
25#define EIP197_MAX_BATCH_SZ 64 34#define EIP197_MAX_BATCH_SZ 64
26 35
27#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ 36#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
@@ -38,6 +47,27 @@
38 char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \ 47 char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \
39 struct type##_request *name = (void *)__##name##_desc 48 struct type##_request *name = (void *)__##name##_desc
40 49
50/* Xilinx dev board base offsets */
51#define EIP197_XLX_GPIO_BASE 0x200000
52#define EIP197_XLX_IRQ_BLOCK_ID_ADDR 0x2000
53#define EIP197_XLX_IRQ_BLOCK_ID_VALUE 0x1fc2
54#define EIP197_XLX_USER_INT_ENB_MSK 0x2004
55#define EIP197_XLX_USER_INT_ENB_SET 0x2008
56#define EIP197_XLX_USER_INT_ENB_CLEAR 0x200c
57#define EIP197_XLX_USER_INT_BLOCK 0x2040
58#define EIP197_XLX_USER_INT_PEND 0x2048
59#define EIP197_XLX_USER_VECT_LUT0_ADDR 0x2080
60#define EIP197_XLX_USER_VECT_LUT0_IDENT 0x03020100
61#define EIP197_XLX_USER_VECT_LUT1_ADDR 0x2084
62#define EIP197_XLX_USER_VECT_LUT1_IDENT 0x07060504
63#define EIP197_XLX_USER_VECT_LUT2_ADDR 0x2088
64#define EIP197_XLX_USER_VECT_LUT2_IDENT 0x0b0a0908
65#define EIP197_XLX_USER_VECT_LUT3_ADDR 0x208c
66#define EIP197_XLX_USER_VECT_LUT3_IDENT 0x0f0e0d0c
67
68/* Helper defines for probe function */
69#define EIP197_IRQ_NUMBER(i, is_pci) (i + is_pci)
70
41/* Register base offsets */ 71/* Register base offsets */
42#define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic) 72#define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic)
43#define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g) 73#define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g)
@@ -49,6 +79,7 @@
49#define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr) 79#define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr)
50#define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg) 80#define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg)
51#define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe) 81#define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe)
82#define EIP197_GLOBAL(priv) ((priv)->base + (priv)->offsets.global)
52 83
53/* EIP197 base offsets */ 84/* EIP197 base offsets */
54#define EIP197_HIA_AIC_BASE 0x90000 85#define EIP197_HIA_AIC_BASE 0x90000
@@ -61,6 +92,7 @@
61#define EIP197_HIA_DSE_THR_BASE 0x8d040 92#define EIP197_HIA_DSE_THR_BASE 0x8d040
62#define EIP197_HIA_GEN_CFG_BASE 0xf0000 93#define EIP197_HIA_GEN_CFG_BASE 0xf0000
63#define EIP197_PE_BASE 0xa0000 94#define EIP197_PE_BASE 0xa0000
95#define EIP197_GLOBAL_BASE 0xf0000
64 96
65/* EIP97 base offsets */ 97/* EIP97 base offsets */
66#define EIP97_HIA_AIC_BASE 0x0 98#define EIP97_HIA_AIC_BASE 0x0
@@ -73,6 +105,7 @@
73#define EIP97_HIA_DSE_THR_BASE 0xf600 105#define EIP97_HIA_DSE_THR_BASE 0xf600
74#define EIP97_HIA_GEN_CFG_BASE 0x10000 106#define EIP97_HIA_GEN_CFG_BASE 0x10000
75#define EIP97_PE_BASE 0x10000 107#define EIP97_PE_BASE 0x10000
108#define EIP97_GLOBAL_BASE 0x10000
76 109
77/* CDR/RDR register offsets */ 110/* CDR/RDR register offsets */
78#define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000) 111#define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000)
@@ -115,16 +148,22 @@
115#define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n))) 148#define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n)))
116#define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n))) 149#define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n)))
117#define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n))) 150#define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n)))
151#define EIP197_PE_ICE_PUTF_CTRL(n) (0x0d00 + (0x2000 * (n)))
118#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) 152#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
119#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) 153#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
154#define EIP197_PE_ICE_PPTF_CTRL(n) (0x0e00 + (0x2000 * (n)))
120#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) 155#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
121#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n))) 156#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n)))
122#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) 157#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
123#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) 158#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
124#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) 159#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
160#define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n)))
161#define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n)))
162#define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n)))
125#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n))) 163#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
126#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n))) 164#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
127#define EIP197_MST_CTRL 0xfff4 165#define EIP197_MST_CTRL 0xfff4
166#define EIP197_VERSION 0xfffc
128 167
129/* EIP197-specific registers, no indirection */ 168/* EIP197-specific registers, no indirection */
130#define EIP197_CLASSIFICATION_RAMS 0xe0000 169#define EIP197_CLASSIFICATION_RAMS 0xe0000
@@ -139,6 +178,12 @@
139#define EIP197_TRC_ECCADMINSTAT 0xf0838 178#define EIP197_TRC_ECCADMINSTAT 0xf0838
140#define EIP197_TRC_ECCDATASTAT 0xf083c 179#define EIP197_TRC_ECCDATASTAT 0xf083c
141#define EIP197_TRC_ECCDATA 0xf0840 180#define EIP197_TRC_ECCDATA 0xf0840
181#define EIP197_FLUE_CACHEBASE_LO(n) (0xf6000 + (32 * (n)))
182#define EIP197_FLUE_CACHEBASE_HI(n) (0xf6004 + (32 * (n)))
183#define EIP197_FLUE_CONFIG(n) (0xf6010 + (32 * (n)))
184#define EIP197_FLUE_OFFSETS 0xf6808
185#define EIP197_FLUE_ARC4_OFFSET 0xf680c
186#define EIP197_FLUE_IFC_LUT(n) (0xf6820 + (4 * (n)))
142#define EIP197_CS_RAM_CTRL 0xf7ff0 187#define EIP197_CS_RAM_CTRL 0xf7ff0
143 188
144/* EIP197_HIA_xDR_DESC_SIZE */ 189/* EIP197_HIA_xDR_DESC_SIZE */
@@ -186,6 +231,19 @@
186#define EIP197_N_PES_OFFSET 4 231#define EIP197_N_PES_OFFSET 4
187#define EIP197_N_PES_MASK GENMASK(4, 0) 232#define EIP197_N_PES_MASK GENMASK(4, 0)
188#define EIP97_N_PES_MASK GENMASK(2, 0) 233#define EIP97_N_PES_MASK GENMASK(2, 0)
234#define EIP197_HWDATAW_OFFSET 25
235#define EIP197_HWDATAW_MASK GENMASK(3, 0)
236#define EIP97_HWDATAW_MASK GENMASK(2, 0)
237#define EIP197_CFSIZE_OFFSET 9
238#define EIP197_CFSIZE_ADJUST 4
239#define EIP97_CFSIZE_OFFSET 8
240#define EIP197_CFSIZE_MASK GENMASK(3, 0)
241#define EIP97_CFSIZE_MASK GENMASK(4, 0)
242#define EIP197_RFSIZE_OFFSET 12
243#define EIP197_RFSIZE_ADJUST 4
244#define EIP97_RFSIZE_OFFSET 12
245#define EIP197_RFSIZE_MASK GENMASK(3, 0)
246#define EIP97_RFSIZE_MASK GENMASK(4, 0)
189 247
190/* EIP197_HIA_AIC_R_ENABLE_CTRL */ 248/* EIP197_HIA_AIC_R_ENABLE_CTRL */
191#define EIP197_CDR_IRQ(n) BIT((n) * 2) 249#define EIP197_CDR_IRQ(n) BIT((n) * 2)
@@ -207,6 +265,11 @@
207#define EIP197_DxE_THR_CTRL_EN BIT(30) 265#define EIP197_DxE_THR_CTRL_EN BIT(30)
208#define EIP197_DxE_THR_CTRL_RESET_PE BIT(31) 266#define EIP197_DxE_THR_CTRL_RESET_PE BIT(31)
209 267
268/* EIP197_PE_ICE_PUE/FPP_CTRL */
269#define EIP197_PE_ICE_UENG_START_OFFSET(n) ((n) << 16)
270#define EIP197_PE_ICE_UENG_INIT_ALIGN_MASK 0x7ff0
271#define EIP197_PE_ICE_UENG_DEBUG_RESET BIT(3)
272
210/* EIP197_HIA_AIC_G_ENABLED_STAT */ 273/* EIP197_HIA_AIC_G_ENABLED_STAT */
211#define EIP197_G_IRQ_DFE(n) BIT((n) << 1) 274#define EIP197_G_IRQ_DFE(n) BIT((n) << 1)
212#define EIP197_G_IRQ_DSE(n) BIT(((n) << 1) + 1) 275#define EIP197_G_IRQ_DSE(n) BIT(((n) << 1) + 1)
@@ -223,6 +286,7 @@
223#define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20) 286#define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20)
224#define EIP197_MST_CTRL_BYTE_SWAP BIT(24) 287#define EIP197_MST_CTRL_BYTE_SWAP BIT(24)
225#define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25) 288#define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25)
289#define EIP197_MST_CTRL_BYTE_SWAP_BITS GENMASK(25, 24)
226 290
227/* EIP197_PE_IN_DBUF/TBUF_THRES */ 291/* EIP197_PE_IN_DBUF/TBUF_THRES */
228#define EIP197_PE_IN_xBUF_THRES_MIN(n) ((n) << 8) 292#define EIP197_PE_IN_xBUF_THRES_MIN(n) ((n) << 8)
@@ -252,45 +316,20 @@
252 316
253/* EIP197_PE_EIP96_TOKEN_CTRL */ 317/* EIP197_PE_EIP96_TOKEN_CTRL */
254#define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16) 318#define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16)
255#define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19) 319#define EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT BIT(17)
256#define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20) 320#define EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT BIT(22)
257 321
258/* EIP197_PE_EIP96_FUNCTION_EN */ 322/* EIP197_PE_EIP96_FUNCTION_EN */
259#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23)) 323#define EIP197_FUNCTION_ALL 0xffffffff
260#define EIP197_PROTOCOL_HASH_ONLY BIT(0)
261#define EIP197_PROTOCOL_ENCRYPT_ONLY BIT(1)
262#define EIP197_PROTOCOL_HASH_ENCRYPT BIT(2)
263#define EIP197_PROTOCOL_HASH_DECRYPT BIT(3)
264#define EIP197_PROTOCOL_ENCRYPT_HASH BIT(4)
265#define EIP197_PROTOCOL_DECRYPT_HASH BIT(5)
266#define EIP197_ALG_ARC4 BIT(7)
267#define EIP197_ALG_AES_ECB BIT(8)
268#define EIP197_ALG_AES_CBC BIT(9)
269#define EIP197_ALG_AES_CTR_ICM BIT(10)
270#define EIP197_ALG_AES_OFB BIT(11)
271#define EIP197_ALG_AES_CFB BIT(12)
272#define EIP197_ALG_DES_ECB BIT(13)
273#define EIP197_ALG_DES_CBC BIT(14)
274#define EIP197_ALG_DES_OFB BIT(16)
275#define EIP197_ALG_DES_CFB BIT(17)
276#define EIP197_ALG_3DES_ECB BIT(18)
277#define EIP197_ALG_3DES_CBC BIT(19)
278#define EIP197_ALG_3DES_OFB BIT(21)
279#define EIP197_ALG_3DES_CFB BIT(22)
280#define EIP197_ALG_MD5 BIT(24)
281#define EIP197_ALG_HMAC_MD5 BIT(25)
282#define EIP197_ALG_SHA1 BIT(26)
283#define EIP197_ALG_HMAC_SHA1 BIT(27)
284#define EIP197_ALG_SHA2 BIT(28)
285#define EIP197_ALG_HMAC_SHA2 BIT(29)
286#define EIP197_ALG_AES_XCBC_MAC BIT(30)
287#define EIP197_ALG_GCM_HASH BIT(31)
288 324
289/* EIP197_PE_EIP96_CONTEXT_CTRL */ 325/* EIP197_PE_EIP96_CONTEXT_CTRL */
290#define EIP197_CONTEXT_SIZE(n) (n) 326#define EIP197_CONTEXT_SIZE(n) (n)
291#define EIP197_ADDRESS_MODE BIT(8) 327#define EIP197_ADDRESS_MODE BIT(8)
292#define EIP197_CONTROL_MODE BIT(9) 328#define EIP197_CONTROL_MODE BIT(9)
293 329
330/* EIP197_FLUE_CONFIG */
331#define EIP197_FLUE_CONFIG_MAGIC 0xc7000004
332
294/* Context Control */ 333/* Context Control */
295struct safexcel_context_record { 334struct safexcel_context_record {
296 u32 control0; 335 u32 control0;
@@ -320,6 +359,7 @@ struct safexcel_context_record {
320#define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17) 359#define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17)
321#define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17) 360#define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17)
322#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21) 361#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21)
362#define CONTEXT_CONTROL_DIGEST_XCM (0x2 << 21)
323#define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21) 363#define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21)
324#define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23) 364#define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23)
325#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23) 365#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23)
@@ -327,12 +367,21 @@ struct safexcel_context_record {
327#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23) 367#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23)
328#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23) 368#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23)
329#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23) 369#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23)
370#define CONTEXT_CONTROL_CRYPTO_ALG_GHASH (0x4 << 23)
371#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC128 (0x1 << 23)
372#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC192 (0x2 << 23)
373#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC256 (0x3 << 23)
330#define CONTEXT_CONTROL_INV_FR (0x5 << 24) 374#define CONTEXT_CONTROL_INV_FR (0x5 << 24)
331#define CONTEXT_CONTROL_INV_TR (0x6 << 24) 375#define CONTEXT_CONTROL_INV_TR (0x6 << 24)
332 376
333/* control1 */ 377/* control1 */
334#define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0) 378#define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0)
335#define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0) 379#define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0)
380#define CONTEXT_CONTROL_CRYPTO_MODE_OFB (4 << 0)
381#define CONTEXT_CONTROL_CRYPTO_MODE_CFB (5 << 0)
382#define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0)
383#define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0)
384#define CONTEXT_CONTROL_CRYPTO_MODE_XCM ((6 << 0) | BIT(17))
336#define CONTEXT_CONTROL_IV0 BIT(5) 385#define CONTEXT_CONTROL_IV0 BIT(5)
337#define CONTEXT_CONTROL_IV1 BIT(6) 386#define CONTEXT_CONTROL_IV1 BIT(6)
338#define CONTEXT_CONTROL_IV2 BIT(7) 387#define CONTEXT_CONTROL_IV2 BIT(7)
@@ -342,6 +391,9 @@ struct safexcel_context_record {
342#define CONTEXT_CONTROL_CRYPTO_STORE BIT(12) 391#define CONTEXT_CONTROL_CRYPTO_STORE BIT(12)
343#define CONTEXT_CONTROL_HASH_STORE BIT(19) 392#define CONTEXT_CONTROL_HASH_STORE BIT(19)
344 393
394#define EIP197_XCM_MODE_GCM 1
395#define EIP197_XCM_MODE_CCM 2
396
345/* The hash counter given to the engine in the context has a granularity of 397/* The hash counter given to the engine in the context has a granularity of
346 * 64 bits. 398 * 64 bits.
347 */ 399 */
@@ -352,6 +404,8 @@ struct safexcel_context_record {
352#define EIP197_TRC_ENABLE_1 BIT(5) 404#define EIP197_TRC_ENABLE_1 BIT(5)
353#define EIP197_TRC_ENABLE_2 BIT(6) 405#define EIP197_TRC_ENABLE_2 BIT(6)
354#define EIP197_TRC_ENABLE_MASK GENMASK(6, 4) 406#define EIP197_TRC_ENABLE_MASK GENMASK(6, 4)
407#define EIP197_CS_BANKSEL_MASK GENMASK(14, 12)
408#define EIP197_CS_BANKSEL_OFS 12
355 409
356/* EIP197_TRC_PARAMS */ 410/* EIP197_TRC_PARAMS */
357#define EIP197_TRC_PARAMS_SW_RESET BIT(0) 411#define EIP197_TRC_PARAMS_SW_RESET BIT(0)
@@ -369,19 +423,11 @@ struct safexcel_context_record {
369#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18) 423#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18)
370 424
371/* Cache helpers */ 425/* Cache helpers */
372#define EIP197B_CS_RC_MAX 52 426#define EIP197_CS_TRC_REC_WC 64
373#define EIP197D_CS_RC_MAX 96
374#define EIP197_CS_RC_SIZE (4 * sizeof(u32)) 427#define EIP197_CS_RC_SIZE (4 * sizeof(u32))
375#define EIP197_CS_RC_NEXT(x) (x) 428#define EIP197_CS_RC_NEXT(x) (x)
376#define EIP197_CS_RC_PREV(x) ((x) << 10) 429#define EIP197_CS_RC_PREV(x) ((x) << 10)
377#define EIP197_RC_NULL 0x3ff 430#define EIP197_RC_NULL 0x3ff
378#define EIP197B_CS_TRC_REC_WC 59
379#define EIP197D_CS_TRC_REC_WC 64
380#define EIP197B_CS_TRC_LG_REC_WC 73
381#define EIP197D_CS_TRC_LG_REC_WC 80
382#define EIP197B_CS_HT_WC 64
383#define EIP197D_CS_HT_WC 256
384
385 431
386/* Result data */ 432/* Result data */
387struct result_data_desc { 433struct result_data_desc {
@@ -423,6 +469,14 @@ struct safexcel_result_desc {
423 struct result_data_desc result_data; 469 struct result_data_desc result_data;
424} __packed; 470} __packed;
425 471
472/*
473 * The EIP(1)97 only needs to fetch the descriptor part of
474 * the result descriptor, not the result token part!
475 */
476#define EIP197_RD64_FETCH_SIZE ((sizeof(struct safexcel_result_desc) -\
477 sizeof(struct result_data_desc)) /\
478 sizeof(u32))
479
426struct safexcel_token { 480struct safexcel_token {
427 u32 packet_length:17; 481 u32 packet_length:17;
428 u8 stat:2; 482 u8 stat:2;
@@ -442,6 +496,7 @@ struct safexcel_token {
442#define EIP197_TOKEN_OPCODE_INSERT 0x2 496#define EIP197_TOKEN_OPCODE_INSERT 0x2
443#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT 497#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT
444#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 498#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4
499#define EIP197_TOKEN_OPCODE_INSERT_REMRES 0xa
445#define EIP197_TOKEN_OPCODE_VERIFY 0xd 500#define EIP197_TOKEN_OPCODE_VERIFY 0xd
446#define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe 501#define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe
447#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) 502#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0)
@@ -455,10 +510,11 @@ static inline void eip197_noop_token(struct safexcel_token *token)
455/* Instructions */ 510/* Instructions */
456#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c 511#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c
457#define EIP197_TOKEN_INS_ORIGIN_IV0 0x14 512#define EIP197_TOKEN_INS_ORIGIN_IV0 0x14
513#define EIP197_TOKEN_INS_ORIGIN_TOKEN 0x1b
458#define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5) 514#define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5)
459#define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) 515#define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5)
460#define EIP197_TOKEN_INS_TYPE_HASH BIT(6) 516#define EIP197_TOKEN_INS_TYPE_HASH BIT(6)
461#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7) 517#define EIP197_TOKEN_INS_TYPE_CRYPTO BIT(7)
462#define EIP197_TOKEN_INS_LAST BIT(8) 518#define EIP197_TOKEN_INS_LAST BIT(8)
463 519
464/* Processing Engine Control Data */ 520/* Processing Engine Control Data */
@@ -509,6 +565,11 @@ struct safexcel_command_desc {
509 * Internal structures & functions 565 * Internal structures & functions
510 */ 566 */
511 567
568#define EIP197_FW_TERMINAL_NOPS 2
569#define EIP197_FW_START_POLLCNT 16
570#define EIP197_FW_PUE_READY 0x14
571#define EIP197_FW_FPP_READY 0x18
572
512enum eip197_fw { 573enum eip197_fw {
513 FW_IFPP = 0, 574 FW_IFPP = 0,
514 FW_IPUE, 575 FW_IPUE,
@@ -581,10 +642,42 @@ struct safexcel_ring {
581 struct crypto_async_request *backlog; 642 struct crypto_async_request *backlog;
582}; 643};
583 644
645/* EIP integration context flags */
584enum safexcel_eip_version { 646enum safexcel_eip_version {
585 EIP97IES = BIT(0), 647 /* Platform (EIP integration context) specifier */
586 EIP197B = BIT(1), 648 EIP97IES_MRVL,
587 EIP197D = BIT(2), 649 EIP197B_MRVL,
650 EIP197D_MRVL,
651 EIP197_DEVBRD
652};
653
654/* Priority we use for advertising our algorithms */
655#define SAFEXCEL_CRA_PRIORITY 300
656
657/* EIP algorithm presence flags */
658enum safexcel_eip_algorithms {
659 SAFEXCEL_ALG_BC0 = BIT(5),
660 SAFEXCEL_ALG_SM4 = BIT(6),
661 SAFEXCEL_ALG_SM3 = BIT(7),
662 SAFEXCEL_ALG_CHACHA20 = BIT(8),
663 SAFEXCEL_ALG_POLY1305 = BIT(9),
664 SAFEXCEL_SEQMASK_256 = BIT(10),
665 SAFEXCEL_SEQMASK_384 = BIT(11),
666 SAFEXCEL_ALG_AES = BIT(12),
667 SAFEXCEL_ALG_AES_XFB = BIT(13),
668 SAFEXCEL_ALG_DES = BIT(15),
669 SAFEXCEL_ALG_DES_XFB = BIT(16),
670 SAFEXCEL_ALG_ARC4 = BIT(18),
671 SAFEXCEL_ALG_AES_XTS = BIT(20),
672 SAFEXCEL_ALG_WIRELESS = BIT(21),
673 SAFEXCEL_ALG_MD5 = BIT(22),
674 SAFEXCEL_ALG_SHA1 = BIT(23),
675 SAFEXCEL_ALG_SHA2_256 = BIT(25),
676 SAFEXCEL_ALG_SHA2_512 = BIT(26),
677 SAFEXCEL_ALG_XCBC_MAC = BIT(27),
678 SAFEXCEL_ALG_CBC_MAC_ALL = BIT(29),
679 SAFEXCEL_ALG_GHASH = BIT(30),
680 SAFEXCEL_ALG_SHA3 = BIT(31),
588}; 681};
589 682
590struct safexcel_register_offsets { 683struct safexcel_register_offsets {
@@ -598,10 +691,22 @@ struct safexcel_register_offsets {
598 u32 hia_dse_thr; 691 u32 hia_dse_thr;
599 u32 hia_gen_cfg; 692 u32 hia_gen_cfg;
600 u32 pe; 693 u32 pe;
694 u32 global;
601}; 695};
602 696
603enum safexcel_flags { 697enum safexcel_flags {
604 EIP197_TRC_CACHE = BIT(0), 698 EIP197_TRC_CACHE = BIT(0),
699 SAFEXCEL_HW_EIP197 = BIT(1),
700};
701
702struct safexcel_hwconfig {
703 enum safexcel_eip_algorithms algo_flags;
704 int hwver;
705 int hiaver;
706 int pever;
707 int hwdataw;
708 int hwcfsize;
709 int hwrfsize;
605}; 710};
606 711
607struct safexcel_crypto_priv { 712struct safexcel_crypto_priv {
@@ -613,6 +718,7 @@ struct safexcel_crypto_priv {
613 718
614 enum safexcel_eip_version version; 719 enum safexcel_eip_version version;
615 struct safexcel_register_offsets offsets; 720 struct safexcel_register_offsets offsets;
721 struct safexcel_hwconfig hwconfig;
616 u32 flags; 722 u32 flags;
617 723
618 /* context DMA pool */ 724 /* context DMA pool */
@@ -637,14 +743,16 @@ struct safexcel_context {
637 bool exit_inv; 743 bool exit_inv;
638}; 744};
639 745
746#define HASH_CACHE_SIZE SHA512_BLOCK_SIZE
747
640struct safexcel_ahash_export_state { 748struct safexcel_ahash_export_state {
641 u64 len[2]; 749 u64 len;
642 u64 processed[2]; 750 u64 processed;
643 751
644 u32 digest; 752 u32 digest;
645 753
646 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; 754 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
647 u8 cache[SHA512_BLOCK_SIZE << 1]; 755 u8 cache[HASH_CACHE_SIZE];
648}; 756};
649 757
650/* 758/*
@@ -655,7 +763,7 @@ struct safexcel_ahash_export_state {
655struct safexcel_alg_template { 763struct safexcel_alg_template {
656 struct safexcel_crypto_priv *priv; 764 struct safexcel_crypto_priv *priv;
657 enum safexcel_alg_type type; 765 enum safexcel_alg_type type;
658 u32 engines; 766 enum safexcel_eip_algorithms algo_mask;
659 union { 767 union {
660 struct skcipher_alg skcipher; 768 struct skcipher_alg skcipher;
661 struct aead_alg aead; 769 struct aead_alg aead;
@@ -716,6 +824,9 @@ extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
716extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede; 824extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
717extern struct safexcel_alg_template safexcel_alg_ecb_aes; 825extern struct safexcel_alg_template safexcel_alg_ecb_aes;
718extern struct safexcel_alg_template safexcel_alg_cbc_aes; 826extern struct safexcel_alg_template safexcel_alg_cbc_aes;
827extern struct safexcel_alg_template safexcel_alg_cfb_aes;
828extern struct safexcel_alg_template safexcel_alg_ofb_aes;
829extern struct safexcel_alg_template safexcel_alg_ctr_aes;
719extern struct safexcel_alg_template safexcel_alg_md5; 830extern struct safexcel_alg_template safexcel_alg_md5;
720extern struct safexcel_alg_template safexcel_alg_sha1; 831extern struct safexcel_alg_template safexcel_alg_sha1;
721extern struct safexcel_alg_template safexcel_alg_sha224; 832extern struct safexcel_alg_template safexcel_alg_sha224;
@@ -733,5 +844,14 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
733extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; 844extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
734extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes; 845extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
735extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes; 846extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
847extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede;
848extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes;
849extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes;
850extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes;
851extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes;
852extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes;
853extern struct safexcel_alg_template safexcel_alg_xts_aes;
854extern struct safexcel_alg_template safexcel_alg_gcm;
855extern struct safexcel_alg_template safexcel_alg_ccm;
736 856
737#endif 857#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 8cdbdbe35681..ef51f8c2b473 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -12,8 +12,12 @@
12#include <crypto/aead.h> 12#include <crypto/aead.h>
13#include <crypto/aes.h> 13#include <crypto/aes.h>
14#include <crypto/authenc.h> 14#include <crypto/authenc.h>
15#include <crypto/des.h> 15#include <crypto/ctr.h>
16#include <crypto/internal/des.h>
17#include <crypto/gcm.h>
18#include <crypto/ghash.h>
16#include <crypto/sha.h> 19#include <crypto/sha.h>
20#include <crypto/xts.h>
17#include <crypto/skcipher.h> 21#include <crypto/skcipher.h>
18#include <crypto/internal/aead.h> 22#include <crypto/internal/aead.h>
19#include <crypto/internal/skcipher.h> 23#include <crypto/internal/skcipher.h>
@@ -38,15 +42,19 @@ struct safexcel_cipher_ctx {
38 u32 mode; 42 u32 mode;
39 enum safexcel_cipher_alg alg; 43 enum safexcel_cipher_alg alg;
40 bool aead; 44 bool aead;
45 int xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
41 46
42 __le32 key[8]; 47 __le32 key[16];
43 unsigned int key_len; 48 u32 nonce;
49 unsigned int key_len, xts;
44 50
45 /* All the below is AEAD specific */ 51 /* All the below is AEAD specific */
46 u32 hash_alg; 52 u32 hash_alg;
47 u32 state_sz; 53 u32 state_sz;
48 u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)]; 54 u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
49 u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)]; 55 u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
56
57 struct crypto_cipher *hkaes;
50}; 58};
51 59
52struct safexcel_cipher_req { 60struct safexcel_cipher_req {
@@ -54,16 +62,47 @@ struct safexcel_cipher_req {
54 /* Number of result descriptors associated to the request */ 62 /* Number of result descriptors associated to the request */
55 unsigned int rdescs; 63 unsigned int rdescs;
56 bool needs_inv; 64 bool needs_inv;
65 int nr_src, nr_dst;
57}; 66};
58 67
59static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, 68static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
60 struct safexcel_command_desc *cdesc, 69 struct safexcel_command_desc *cdesc)
61 u32 length)
62{ 70{
63 struct safexcel_token *token; 71 u32 block_sz = 0;
64 u32 offset = 0, block_sz = 0; 72
73 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
74 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
75
76 /* 32 bit nonce */
77 cdesc->control_data.token[0] = ctx->nonce;
78 /* 64 bit IV part */
79 memcpy(&cdesc->control_data.token[1], iv, 8);
80 /* 32 bit counter, start at 1 (big endian!) */
81 cdesc->control_data.token[3] = cpu_to_be32(1);
82
83 return;
84 } else if (ctx->xcm == EIP197_XCM_MODE_GCM) {
85 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
86
87 /* 96 bit IV part */
88 memcpy(&cdesc->control_data.token[0], iv, 12);
89 /* 32 bit counter, start at 1 (big endian!) */
90 cdesc->control_data.token[3] = cpu_to_be32(1);
91
92 return;
93 } else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
94 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
65 95
66 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { 96 /* Variable length IV part */
97 memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]);
98 /* Start variable length counter at 0 */
99 memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0],
100 0, iv[0] + 1);
101
102 return;
103 }
104
105 if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) {
67 switch (ctx->alg) { 106 switch (ctx->alg) {
68 case SAFEXCEL_DES: 107 case SAFEXCEL_DES:
69 block_sz = DES_BLOCK_SIZE; 108 block_sz = DES_BLOCK_SIZE;
@@ -78,39 +117,28 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
78 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; 117 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
79 break; 118 break;
80 } 119 }
81
82 offset = block_sz / sizeof(u32);
83 memcpy(cdesc->control_data.token, iv, block_sz); 120 memcpy(cdesc->control_data.token, iv, block_sz);
84 } 121 }
122}
123
124static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
125 struct safexcel_command_desc *cdesc,
126 u32 length)
127{
128 struct safexcel_token *token;
129
130 safexcel_cipher_token(ctx, iv, cdesc);
85 131
86 token = (struct safexcel_token *)(cdesc->control_data.token + offset); 132 /* skip over worst case IV of 4 dwords, no need to be exact */
133 token = (struct safexcel_token *)(cdesc->control_data.token + 4);
87 134
88 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; 135 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
89 token[0].packet_length = length; 136 token[0].packet_length = length;
90 token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET | 137 token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
91 EIP197_TOKEN_STAT_LAST_HASH; 138 EIP197_TOKEN_STAT_LAST_HASH;
92 token[0].instructions = EIP197_TOKEN_INS_LAST | 139 token[0].instructions = EIP197_TOKEN_INS_LAST |
93 EIP197_TOKEN_INS_TYPE_CRYTO | 140 EIP197_TOKEN_INS_TYPE_CRYPTO |
94 EIP197_TOKEN_INS_TYPE_OUTPUT; 141 EIP197_TOKEN_INS_TYPE_OUTPUT;
95
96 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
97 u32 last = (EIP197_MAX_TOKENS - 1) - offset;
98
99 token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS;
100 token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL |
101 EIP197_TOKEN_EXEC_IF_SUCCESSFUL|
102 EIP197_TOKEN_CTX_OFFSET(0x2);
103 token[last].stat = EIP197_TOKEN_STAT_LAST_HASH |
104 EIP197_TOKEN_STAT_LAST_PACKET;
105 token[last].instructions =
106 EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) |
107 EIP197_TOKEN_INS_ORIGIN_IV0;
108
109 /* Store the updated IV values back in the internal context
110 * registers.
111 */
112 cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE;
113 }
114} 142}
115 143
116static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, 144static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
@@ -119,53 +147,123 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
119 u32 cryptlen, u32 assoclen, u32 digestsize) 147 u32 cryptlen, u32 assoclen, u32 digestsize)
120{ 148{
121 struct safexcel_token *token; 149 struct safexcel_token *token;
122 unsigned offset = 0;
123 150
124 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { 151 safexcel_cipher_token(ctx, iv, cdesc);
125 offset = AES_BLOCK_SIZE / sizeof(u32);
126 memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
127 152
128 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; 153 if (direction == SAFEXCEL_ENCRYPT) {
154 /* align end of instruction sequence to end of token */
155 token = (struct safexcel_token *)(cdesc->control_data.token +
156 EIP197_MAX_TOKENS - 13);
157
158 token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
159 token[12].packet_length = digestsize;
160 token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
161 EIP197_TOKEN_STAT_LAST_PACKET;
162 token[12].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
163 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
164 } else {
165 cryptlen -= digestsize;
166
167 /* align end of instruction sequence to end of token */
168 token = (struct safexcel_token *)(cdesc->control_data.token +
169 EIP197_MAX_TOKENS - 14);
170
171 token[12].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
172 token[12].packet_length = digestsize;
173 token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
174 EIP197_TOKEN_STAT_LAST_PACKET;
175 token[12].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
176
177 token[13].opcode = EIP197_TOKEN_OPCODE_VERIFY;
178 token[13].packet_length = digestsize |
179 EIP197_TOKEN_HASH_RESULT_VERIFY;
180 token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
181 EIP197_TOKEN_STAT_LAST_PACKET;
182 token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
129 } 183 }
130 184
131 token = (struct safexcel_token *)(cdesc->control_data.token + offset); 185 token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
186 token[6].packet_length = assoclen;
187
188 if (likely(cryptlen)) {
189 token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
190
191 token[10].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
192 token[10].packet_length = cryptlen;
193 token[10].stat = EIP197_TOKEN_STAT_LAST_HASH;
194 token[10].instructions = EIP197_TOKEN_INS_LAST |
195 EIP197_TOKEN_INS_TYPE_CRYPTO |
196 EIP197_TOKEN_INS_TYPE_HASH |
197 EIP197_TOKEN_INS_TYPE_OUTPUT;
198 } else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
199 token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
200 token[6].instructions = EIP197_TOKEN_INS_LAST |
201 EIP197_TOKEN_INS_TYPE_HASH;
202 }
132 203
133 if (direction == SAFEXCEL_DECRYPT) 204 if (!ctx->xcm)
134 cryptlen -= digestsize; 205 return;
135 206
136 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; 207 token[8].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
137 token[0].packet_length = assoclen; 208 token[8].packet_length = 0;
138 token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH | 209 token[8].instructions = AES_BLOCK_SIZE;
139 EIP197_TOKEN_INS_TYPE_OUTPUT;
140 210
141 token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION; 211 token[9].opcode = EIP197_TOKEN_OPCODE_INSERT;
142 token[1].packet_length = cryptlen; 212 token[9].packet_length = AES_BLOCK_SIZE;
143 token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; 213 token[9].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
144 token[1].instructions = EIP197_TOKEN_INS_LAST | 214 EIP197_TOKEN_INS_TYPE_CRYPTO;
145 EIP197_TOKEN_INS_TYPE_CRYTO |
146 EIP197_TOKEN_INS_TYPE_HASH |
147 EIP197_TOKEN_INS_TYPE_OUTPUT;
148 215
149 if (direction == SAFEXCEL_ENCRYPT) { 216 if (ctx->xcm == EIP197_XCM_MODE_GCM) {
150 token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; 217 token[6].instructions = EIP197_TOKEN_INS_LAST |
151 token[2].packet_length = digestsize; 218 EIP197_TOKEN_INS_TYPE_HASH;
152 token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
153 EIP197_TOKEN_STAT_LAST_PACKET;
154 token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
155 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
156 } else { 219 } else {
157 token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; 220 u8 *cbcmaciv = (u8 *)&token[1];
158 token[2].packet_length = digestsize; 221 u32 *aadlen = (u32 *)&token[5];
159 token[2].stat = EIP197_TOKEN_STAT_LAST_HASH | 222
160 EIP197_TOKEN_STAT_LAST_PACKET; 223 /* Construct IV block B0 for the CBC-MAC */
161 token[2].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; 224 token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
162 225 token[0].packet_length = AES_BLOCK_SIZE +
163 token[3].opcode = EIP197_TOKEN_OPCODE_VERIFY; 226 ((assoclen > 0) << 1);
164 token[3].packet_length = digestsize | 227 token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
165 EIP197_TOKEN_HASH_RESULT_VERIFY; 228 EIP197_TOKEN_INS_TYPE_HASH;
166 token[3].stat = EIP197_TOKEN_STAT_LAST_HASH | 229 /* Variable length IV part */
167 EIP197_TOKEN_STAT_LAST_PACKET; 230 memcpy(cbcmaciv, iv, 15 - iv[0]);
168 token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; 231 /* fixup flags byte */
232 cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
233 /* Clear upper bytes of variable message length to 0 */
234 memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
235 /* insert lower 2 bytes of message length */
236 cbcmaciv[14] = cryptlen >> 8;
237 cbcmaciv[15] = cryptlen & 255;
238
239 if (assoclen) {
240 *aadlen = cpu_to_le32(cpu_to_be16(assoclen));
241 assoclen += 2;
242 }
243
244 token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
245
246 /* Align AAD data towards hash engine */
247 token[7].opcode = EIP197_TOKEN_OPCODE_INSERT;
248 assoclen &= 15;
249 token[7].packet_length = assoclen ? 16 - assoclen : 0;
250
251 if (likely(cryptlen)) {
252 token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
253
254 /* Align crypto data towards hash engine */
255 token[10].stat = 0;
256
257 token[11].opcode = EIP197_TOKEN_OPCODE_INSERT;
258 cryptlen &= 15;
259 token[11].packet_length = cryptlen ? 16 - cryptlen : 0;
260 token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
261 token[11].instructions = EIP197_TOKEN_INS_TYPE_HASH;
262 } else {
263 token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
264 token[7].instructions = EIP197_TOKEN_INS_LAST |
265 EIP197_TOKEN_INS_TYPE_HASH;
266 }
169 } 267 }
170} 268}
171 269
@@ -178,7 +276,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
178 struct crypto_aes_ctx aes; 276 struct crypto_aes_ctx aes;
179 int ret, i; 277 int ret, i;
180 278
181 ret = crypto_aes_expand_key(&aes, key, len); 279 ret = aes_expandkey(&aes, key, len);
182 if (ret) { 280 if (ret) {
183 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 281 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
184 return ret; 282 return ret;
@@ -202,22 +300,49 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
202 return 0; 300 return 0;
203} 301}
204 302
205static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, 303static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
206 unsigned int len) 304 unsigned int len)
207{ 305{
208 struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); 306 struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
209 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 307 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
210 struct safexcel_ahash_export_state istate, ostate; 308 struct safexcel_ahash_export_state istate, ostate;
211 struct safexcel_crypto_priv *priv = ctx->priv; 309 struct safexcel_crypto_priv *priv = ctx->priv;
212 struct crypto_authenc_keys keys; 310 struct crypto_authenc_keys keys;
311 struct crypto_aes_ctx aes;
312 int err = -EINVAL;
213 313
214 if (crypto_authenc_extractkeys(&keys, key, len) != 0) 314 if (crypto_authenc_extractkeys(&keys, key, len) != 0)
215 goto badkey; 315 goto badkey;
216 316
217 if (keys.enckeylen > sizeof(ctx->key)) 317 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
218 goto badkey; 318 /* Minimum keysize is minimum AES key size + nonce size */
319 if (keys.enckeylen < (AES_MIN_KEY_SIZE +
320 CTR_RFC3686_NONCE_SIZE))
321 goto badkey;
322 /* last 4 bytes of key are the nonce! */
323 ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen -
324 CTR_RFC3686_NONCE_SIZE);
325 /* exclude the nonce here */
326 keys.enckeylen -= CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
327 }
219 328
220 /* Encryption key */ 329 /* Encryption key */
330 switch (ctx->alg) {
331 case SAFEXCEL_3DES:
332 err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
333 if (unlikely(err))
334 goto badkey_expflags;
335 break;
336 case SAFEXCEL_AES:
337 err = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
338 if (unlikely(err))
339 goto badkey;
340 break;
341 default:
342 dev_err(priv->dev, "aead: unsupported cipher algorithm\n");
343 goto badkey;
344 }
345
221 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && 346 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
222 memcmp(ctx->key, keys.enckey, keys.enckeylen)) 347 memcmp(ctx->key, keys.enckey, keys.enckeylen))
223 ctx->base.needs_inv = true; 348 ctx->base.needs_inv = true;
@@ -274,8 +399,9 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
274 399
275badkey: 400badkey:
276 crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 401 crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
402badkey_expflags:
277 memzero_explicit(&keys, sizeof(keys)); 403 memzero_explicit(&keys, sizeof(keys));
278 return -EINVAL; 404 return err;
279} 405}
280 406
281static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, 407static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
@@ -284,59 +410,78 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
284 struct safexcel_command_desc *cdesc) 410 struct safexcel_command_desc *cdesc)
285{ 411{
286 struct safexcel_crypto_priv *priv = ctx->priv; 412 struct safexcel_crypto_priv *priv = ctx->priv;
287 int ctrl_size; 413 int ctrl_size = ctx->key_len / sizeof(u32);
414
415 cdesc->control_data.control1 = ctx->mode;
288 416
289 if (ctx->aead) { 417 if (ctx->aead) {
418 /* Take in account the ipad+opad digests */
419 if (ctx->xcm) {
420 ctrl_size += ctx->state_sz / sizeof(u32);
421 cdesc->control_data.control0 =
422 CONTEXT_CONTROL_KEY_EN |
423 CONTEXT_CONTROL_DIGEST_XCM |
424 ctx->hash_alg |
425 CONTEXT_CONTROL_SIZE(ctrl_size);
426 } else {
427 ctrl_size += ctx->state_sz / sizeof(u32) * 2;
428 cdesc->control_data.control0 =
429 CONTEXT_CONTROL_KEY_EN |
430 CONTEXT_CONTROL_DIGEST_HMAC |
431 ctx->hash_alg |
432 CONTEXT_CONTROL_SIZE(ctrl_size);
433 }
290 if (sreq->direction == SAFEXCEL_ENCRYPT) 434 if (sreq->direction == SAFEXCEL_ENCRYPT)
291 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT; 435 cdesc->control_data.control0 |=
436 (ctx->xcm == EIP197_XCM_MODE_CCM) ?
437 CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT :
438 CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
439
292 else 440 else
293 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN; 441 cdesc->control_data.control0 |=
442 (ctx->xcm == EIP197_XCM_MODE_CCM) ?
443 CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN :
444 CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
294 } else { 445 } else {
295 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; 446 if (sreq->direction == SAFEXCEL_ENCRYPT)
296 447 cdesc->control_data.control0 =
297 /* The decryption control type is a combination of the 448 CONTEXT_CONTROL_TYPE_CRYPTO_OUT |
298 * encryption type and CONTEXT_CONTROL_TYPE_NULL_IN, for all 449 CONTEXT_CONTROL_KEY_EN |
299 * types. 450 CONTEXT_CONTROL_SIZE(ctrl_size);
300 */ 451 else
301 if (sreq->direction == SAFEXCEL_DECRYPT) 452 cdesc->control_data.control0 =
302 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN; 453 CONTEXT_CONTROL_TYPE_CRYPTO_IN |
454 CONTEXT_CONTROL_KEY_EN |
455 CONTEXT_CONTROL_SIZE(ctrl_size);
303 } 456 }
304 457
305 cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
306 cdesc->control_data.control1 |= ctx->mode;
307
308 if (ctx->aead)
309 cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC |
310 ctx->hash_alg;
311
312 if (ctx->alg == SAFEXCEL_DES) { 458 if (ctx->alg == SAFEXCEL_DES) {
313 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES; 459 cdesc->control_data.control0 |=
460 CONTEXT_CONTROL_CRYPTO_ALG_DES;
314 } else if (ctx->alg == SAFEXCEL_3DES) { 461 } else if (ctx->alg == SAFEXCEL_3DES) {
315 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES; 462 cdesc->control_data.control0 |=
463 CONTEXT_CONTROL_CRYPTO_ALG_3DES;
316 } else if (ctx->alg == SAFEXCEL_AES) { 464 } else if (ctx->alg == SAFEXCEL_AES) {
317 switch (ctx->key_len) { 465 switch (ctx->key_len >> ctx->xts) {
318 case AES_KEYSIZE_128: 466 case AES_KEYSIZE_128:
319 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; 467 cdesc->control_data.control0 |=
468 CONTEXT_CONTROL_CRYPTO_ALG_AES128;
320 break; 469 break;
321 case AES_KEYSIZE_192: 470 case AES_KEYSIZE_192:
322 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192; 471 cdesc->control_data.control0 |=
472 CONTEXT_CONTROL_CRYPTO_ALG_AES192;
323 break; 473 break;
324 case AES_KEYSIZE_256: 474 case AES_KEYSIZE_256:
325 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256; 475 cdesc->control_data.control0 |=
476 CONTEXT_CONTROL_CRYPTO_ALG_AES256;
326 break; 477 break;
327 default: 478 default:
328 dev_err(priv->dev, "aes keysize not supported: %u\n", 479 dev_err(priv->dev, "aes keysize not supported: %u\n",
329 ctx->key_len); 480 ctx->key_len >> ctx->xts);
330 return -EINVAL; 481 return -EINVAL;
331 } 482 }
332 } 483 }
333 484
334 ctrl_size = ctx->key_len / sizeof(u32);
335 if (ctx->aead)
336 /* Take in account the ipad+opad digests */
337 ctrl_size += ctx->state_sz / sizeof(u32) * 2;
338 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
339
340 return 0; 485 return 0;
341} 486}
342 487
@@ -348,6 +493,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
348 struct safexcel_cipher_req *sreq, 493 struct safexcel_cipher_req *sreq,
349 bool *should_complete, int *ret) 494 bool *should_complete, int *ret)
350{ 495{
496 struct skcipher_request *areq = skcipher_request_cast(async);
497 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq);
498 struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(skcipher);
351 struct safexcel_result_desc *rdesc; 499 struct safexcel_result_desc *rdesc;
352 int ndesc = 0; 500 int ndesc = 0;
353 501
@@ -374,10 +522,22 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
374 safexcel_complete(priv, ring); 522 safexcel_complete(priv, ring);
375 523
376 if (src == dst) { 524 if (src == dst) {
377 dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL); 525 dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
378 } else { 526 } else {
379 dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE); 527 dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
380 dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE); 528 dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
529 }
530
531 /*
532 * Update IV in req from last crypto output word for CBC modes
533 */
534 if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
535 (sreq->direction == SAFEXCEL_ENCRYPT)) {
536 /* For encrypt take the last output word */
537 sg_pcopy_to_buffer(dst, sreq->nr_dst, areq->iv,
538 crypto_skcipher_ivsize(skcipher),
539 (cryptlen -
540 crypto_skcipher_ivsize(skcipher)));
381 } 541 }
382 542
383 *should_complete = true; 543 *should_complete = true;
@@ -392,53 +552,105 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
392 unsigned int digestsize, u8 *iv, int *commands, 552 unsigned int digestsize, u8 *iv, int *commands,
393 int *results) 553 int *results)
394{ 554{
555 struct skcipher_request *areq = skcipher_request_cast(base);
556 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq);
395 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); 557 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
396 struct safexcel_crypto_priv *priv = ctx->priv; 558 struct safexcel_crypto_priv *priv = ctx->priv;
397 struct safexcel_command_desc *cdesc; 559 struct safexcel_command_desc *cdesc;
560 struct safexcel_command_desc *first_cdesc = NULL;
398 struct safexcel_result_desc *rdesc, *first_rdesc = NULL; 561 struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
399 struct scatterlist *sg; 562 struct scatterlist *sg;
400 unsigned int totlen = cryptlen + assoclen; 563 unsigned int totlen;
401 int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen; 564 unsigned int totlen_src = cryptlen + assoclen;
402 int i, ret = 0; 565 unsigned int totlen_dst = totlen_src;
566 int n_cdesc = 0, n_rdesc = 0;
567 int queued, i, ret = 0;
568 bool first = true;
569
570 sreq->nr_src = sg_nents_for_len(src, totlen_src);
571
572 if (ctx->aead) {
573 /*
574 * AEAD has auth tag appended to output for encrypt and
575 * removed from the output for decrypt!
576 */
577 if (sreq->direction == SAFEXCEL_DECRYPT)
578 totlen_dst -= digestsize;
579 else
580 totlen_dst += digestsize;
581
582 memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
583 ctx->ipad, ctx->state_sz);
584 if (!ctx->xcm)
585 memcpy(ctx->base.ctxr->data + (ctx->key_len +
586 ctx->state_sz) / sizeof(u32), ctx->opad,
587 ctx->state_sz);
588 } else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
589 (sreq->direction == SAFEXCEL_DECRYPT)) {
590 /*
591 * Save IV from last crypto input word for CBC modes in decrypt
592 * direction. Need to do this first in case of inplace operation
593 * as it will be overwritten.
594 */
595 sg_pcopy_to_buffer(src, sreq->nr_src, areq->iv,
596 crypto_skcipher_ivsize(skcipher),
597 (totlen_src -
598 crypto_skcipher_ivsize(skcipher)));
599 }
600
601 sreq->nr_dst = sg_nents_for_len(dst, totlen_dst);
602
603 /*
604 * Remember actual input length, source buffer length may be
605 * updated in case of inline operation below.
606 */
607 totlen = totlen_src;
608 queued = totlen_src;
403 609
404 if (src == dst) { 610 if (src == dst) {
405 nr_src = dma_map_sg(priv->dev, src, sg_nents(src), 611 sreq->nr_src = max(sreq->nr_src, sreq->nr_dst);
406 DMA_BIDIRECTIONAL); 612 sreq->nr_dst = sreq->nr_src;
407 nr_dst = nr_src; 613 if (unlikely((totlen_src || totlen_dst) &&
408 if (!nr_src) 614 (sreq->nr_src <= 0))) {
615 dev_err(priv->dev, "In-place buffer not large enough (need %d bytes)!",
616 max(totlen_src, totlen_dst));
409 return -EINVAL; 617 return -EINVAL;
618 }
619 dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
410 } else { 620 } else {
411 nr_src = dma_map_sg(priv->dev, src, sg_nents(src), 621 if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
412 DMA_TO_DEVICE); 622 dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
413 if (!nr_src) 623 totlen_src);
414 return -EINVAL; 624 return -EINVAL;
625 }
626 dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
415 627
416 nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst), 628 if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
417 DMA_FROM_DEVICE); 629 dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
418 if (!nr_dst) { 630 totlen_dst);
419 dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE); 631 dma_unmap_sg(priv->dev, src, sreq->nr_src,
632 DMA_TO_DEVICE);
420 return -EINVAL; 633 return -EINVAL;
421 } 634 }
635 dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
422 } 636 }
423 637
424 memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); 638 memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
425 639
426 if (ctx->aead) { 640 /* The EIP cannot deal with zero length input packets! */
427 memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32), 641 if (totlen == 0)
428 ctx->ipad, ctx->state_sz); 642 totlen = 1;
429 memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32),
430 ctx->opad, ctx->state_sz);
431 }
432 643
433 /* command descriptors */ 644 /* command descriptors */
434 for_each_sg(src, sg, nr_src, i) { 645 for_each_sg(src, sg, sreq->nr_src, i) {
435 int len = sg_dma_len(sg); 646 int len = sg_dma_len(sg);
436 647
437 /* Do not overflow the request */ 648 /* Do not overflow the request */
438 if (queued - len < 0) 649 if (queued - len < 0)
439 len = queued; 650 len = queued;
440 651
441 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len), 652 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
653 !(queued - len),
442 sg_dma_address(sg), len, totlen, 654 sg_dma_address(sg), len, totlen,
443 ctx->base.ctxr_dma); 655 ctx->base.ctxr_dma);
444 if (IS_ERR(cdesc)) { 656 if (IS_ERR(cdesc)) {
@@ -449,14 +661,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
449 n_cdesc++; 661 n_cdesc++;
450 662
451 if (n_cdesc == 1) { 663 if (n_cdesc == 1) {
452 safexcel_context_control(ctx, base, sreq, cdesc); 664 first_cdesc = cdesc;
453 if (ctx->aead)
454 safexcel_aead_token(ctx, iv, cdesc,
455 sreq->direction, cryptlen,
456 assoclen, digestsize);
457 else
458 safexcel_skcipher_token(ctx, iv, cdesc,
459 cryptlen);
460 } 665 }
461 666
462 queued -= len; 667 queued -= len;
@@ -464,23 +669,83 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
464 break; 669 break;
465 } 670 }
466 671
672 if (unlikely(!n_cdesc)) {
673 /*
674 * Special case: zero length input buffer.
675 * The engine always needs the 1st command descriptor, however!
676 */
677 first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
678 ctx->base.ctxr_dma);
679 n_cdesc = 1;
680 }
681
682 /* Add context control words and token to first command descriptor */
683 safexcel_context_control(ctx, base, sreq, first_cdesc);
684 if (ctx->aead)
685 safexcel_aead_token(ctx, iv, first_cdesc,
686 sreq->direction, cryptlen,
687 assoclen, digestsize);
688 else
689 safexcel_skcipher_token(ctx, iv, first_cdesc,
690 cryptlen);
691
467 /* result descriptors */ 692 /* result descriptors */
468 for_each_sg(dst, sg, nr_dst, i) { 693 for_each_sg(dst, sg, sreq->nr_dst, i) {
469 bool first = !i, last = sg_is_last(sg); 694 bool last = (i == sreq->nr_dst - 1);
470 u32 len = sg_dma_len(sg); 695 u32 len = sg_dma_len(sg);
471 696
472 rdesc = safexcel_add_rdesc(priv, ring, first, last, 697 /* only allow the part of the buffer we know we need */
473 sg_dma_address(sg), len); 698 if (len > totlen_dst)
699 len = totlen_dst;
700 if (unlikely(!len))
701 break;
702 totlen_dst -= len;
703
704 /* skip over AAD space in buffer - not written */
705 if (assoclen) {
706 if (assoclen >= len) {
707 assoclen -= len;
708 continue;
709 }
710 rdesc = safexcel_add_rdesc(priv, ring, first, last,
711 sg_dma_address(sg) +
712 assoclen,
713 len - assoclen);
714 assoclen = 0;
715 } else {
716 rdesc = safexcel_add_rdesc(priv, ring, first, last,
717 sg_dma_address(sg),
718 len);
719 }
474 if (IS_ERR(rdesc)) { 720 if (IS_ERR(rdesc)) {
475 /* No space left in the result descriptor ring */ 721 /* No space left in the result descriptor ring */
476 ret = PTR_ERR(rdesc); 722 ret = PTR_ERR(rdesc);
477 goto rdesc_rollback; 723 goto rdesc_rollback;
478 } 724 }
479 if (first) 725 if (first) {
480 first_rdesc = rdesc; 726 first_rdesc = rdesc;
727 first = false;
728 }
481 n_rdesc++; 729 n_rdesc++;
482 } 730 }
483 731
732 if (unlikely(first)) {
733 /*
734 * Special case: AEAD decrypt with only AAD data.
735 * In this case there is NO output data from the engine,
736 * but the engine still needs a result descriptor!
737 * Create a dummy one just for catching the result token.
738 */
739 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
740 if (IS_ERR(rdesc)) {
741 /* No space left in the result descriptor ring */
742 ret = PTR_ERR(rdesc);
743 goto rdesc_rollback;
744 }
745 first_rdesc = rdesc;
746 n_rdesc = 1;
747 }
748
484 safexcel_rdr_req_set(priv, ring, first_rdesc, base); 749 safexcel_rdr_req_set(priv, ring, first_rdesc, base);
485 750
486 *commands = n_cdesc; 751 *commands = n_cdesc;
@@ -495,10 +760,10 @@ cdesc_rollback:
495 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); 760 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
496 761
497 if (src == dst) { 762 if (src == dst) {
498 dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL); 763 dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
499 } else { 764 } else {
500 dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE); 765 dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
501 dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE); 766 dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
502 } 767 }
503 768
504 return ret; 769 return ret;
@@ -570,7 +835,6 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
570{ 835{
571 struct skcipher_request *req = skcipher_request_cast(async); 836 struct skcipher_request *req = skcipher_request_cast(async);
572 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); 837 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
573 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm);
574 int err; 838 int err;
575 839
576 if (sreq->needs_inv) { 840 if (sreq->needs_inv) {
@@ -581,24 +845,6 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
581 err = safexcel_handle_req_result(priv, ring, async, req->src, 845 err = safexcel_handle_req_result(priv, ring, async, req->src,
582 req->dst, req->cryptlen, sreq, 846 req->dst, req->cryptlen, sreq,
583 should_complete, ret); 847 should_complete, ret);
584
585 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
586 u32 block_sz = 0;
587
588 switch (ctx->alg) {
589 case SAFEXCEL_DES:
590 block_sz = DES_BLOCK_SIZE;
591 break;
592 case SAFEXCEL_3DES:
593 block_sz = DES3_EDE_BLOCK_SIZE;
594 break;
595 case SAFEXCEL_AES:
596 block_sz = AES_BLOCK_SIZE;
597 break;
598 }
599
600 memcpy(req->iv, ctx->base.ctxr->data, block_sz);
601 }
602 } 848 }
603 849
604 return err; 850 return err;
@@ -656,12 +902,22 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
656 902
657 BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv); 903 BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
658 904
659 if (sreq->needs_inv) 905 if (sreq->needs_inv) {
660 ret = safexcel_cipher_send_inv(async, ring, commands, results); 906 ret = safexcel_cipher_send_inv(async, ring, commands, results);
661 else 907 } else {
908 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
909 u8 input_iv[AES_BLOCK_SIZE];
910
911 /*
912 * Save input IV in case of CBC decrypt mode
913 * Will be overwritten with output IV prior to use!
914 */
915 memcpy(input_iv, req->iv, crypto_skcipher_ivsize(skcipher));
916
662 ret = safexcel_send_req(async, ring, sreq, req->src, 917 ret = safexcel_send_req(async, ring, sreq, req->src,
663 req->dst, req->cryptlen, 0, 0, req->iv, 918 req->dst, req->cryptlen, 0, 0, input_iv,
664 commands, results); 919 commands, results);
920 }
665 921
666 sreq->rdescs = *results; 922 sreq->rdescs = *results;
667 return ret; 923 return ret;
@@ -756,8 +1012,7 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
756 1012
757static int safexcel_queue_req(struct crypto_async_request *base, 1013static int safexcel_queue_req(struct crypto_async_request *base,
758 struct safexcel_cipher_req *sreq, 1014 struct safexcel_cipher_req *sreq,
759 enum safexcel_cipher_direction dir, u32 mode, 1015 enum safexcel_cipher_direction dir)
760 enum safexcel_cipher_alg alg)
761{ 1016{
762 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); 1017 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
763 struct safexcel_crypto_priv *priv = ctx->priv; 1018 struct safexcel_crypto_priv *priv = ctx->priv;
@@ -765,8 +1020,6 @@ static int safexcel_queue_req(struct crypto_async_request *base,
765 1020
766 sreq->needs_inv = false; 1021 sreq->needs_inv = false;
767 sreq->direction = dir; 1022 sreq->direction = dir;
768 ctx->alg = alg;
769 ctx->mode = mode;
770 1023
771 if (ctx->base.ctxr) { 1024 if (ctx->base.ctxr) {
772 if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) { 1025 if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
@@ -794,18 +1047,16 @@ static int safexcel_queue_req(struct crypto_async_request *base,
794 return ret; 1047 return ret;
795} 1048}
796 1049
797static int safexcel_ecb_aes_encrypt(struct skcipher_request *req) 1050static int safexcel_encrypt(struct skcipher_request *req)
798{ 1051{
799 return safexcel_queue_req(&req->base, skcipher_request_ctx(req), 1052 return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
800 SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, 1053 SAFEXCEL_ENCRYPT);
801 SAFEXCEL_AES);
802} 1054}
803 1055
804static int safexcel_ecb_aes_decrypt(struct skcipher_request *req) 1056static int safexcel_decrypt(struct skcipher_request *req)
805{ 1057{
806 return safexcel_queue_req(&req->base, skcipher_request_ctx(req), 1058 return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
807 SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, 1059 SAFEXCEL_DECRYPT);
808 SAFEXCEL_AES);
809} 1060}
810 1061
811static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) 1062static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
@@ -879,104 +1130,234 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
879 } 1130 }
880} 1131}
881 1132
1133static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm)
1134{
1135 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1136
1137 safexcel_skcipher_cra_init(tfm);
1138 ctx->alg = SAFEXCEL_AES;
1139 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
1140 return 0;
1141}
1142
882struct safexcel_alg_template safexcel_alg_ecb_aes = { 1143struct safexcel_alg_template safexcel_alg_ecb_aes = {
883 .type = SAFEXCEL_ALG_TYPE_SKCIPHER, 1144 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
884 .engines = EIP97IES | EIP197B | EIP197D, 1145 .algo_mask = SAFEXCEL_ALG_AES,
885 .alg.skcipher = { 1146 .alg.skcipher = {
886 .setkey = safexcel_skcipher_aes_setkey, 1147 .setkey = safexcel_skcipher_aes_setkey,
887 .encrypt = safexcel_ecb_aes_encrypt, 1148 .encrypt = safexcel_encrypt,
888 .decrypt = safexcel_ecb_aes_decrypt, 1149 .decrypt = safexcel_decrypt,
889 .min_keysize = AES_MIN_KEY_SIZE, 1150 .min_keysize = AES_MIN_KEY_SIZE,
890 .max_keysize = AES_MAX_KEY_SIZE, 1151 .max_keysize = AES_MAX_KEY_SIZE,
891 .base = { 1152 .base = {
892 .cra_name = "ecb(aes)", 1153 .cra_name = "ecb(aes)",
893 .cra_driver_name = "safexcel-ecb-aes", 1154 .cra_driver_name = "safexcel-ecb-aes",
894 .cra_priority = 300, 1155 .cra_priority = SAFEXCEL_CRA_PRIORITY,
895 .cra_flags = CRYPTO_ALG_ASYNC | 1156 .cra_flags = CRYPTO_ALG_ASYNC |
896 CRYPTO_ALG_KERN_DRIVER_ONLY, 1157 CRYPTO_ALG_KERN_DRIVER_ONLY,
897 .cra_blocksize = AES_BLOCK_SIZE, 1158 .cra_blocksize = AES_BLOCK_SIZE,
898 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), 1159 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
899 .cra_alignmask = 0, 1160 .cra_alignmask = 0,
900 .cra_init = safexcel_skcipher_cra_init, 1161 .cra_init = safexcel_skcipher_aes_ecb_cra_init,
901 .cra_exit = safexcel_skcipher_cra_exit, 1162 .cra_exit = safexcel_skcipher_cra_exit,
902 .cra_module = THIS_MODULE, 1163 .cra_module = THIS_MODULE,
903 }, 1164 },
904 }, 1165 },
905}; 1166};
906 1167
907static int safexcel_cbc_aes_encrypt(struct skcipher_request *req) 1168static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm)
908{ 1169{
909 return safexcel_queue_req(&req->base, skcipher_request_ctx(req), 1170 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
910 SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
911 SAFEXCEL_AES);
912}
913 1171
914static int safexcel_cbc_aes_decrypt(struct skcipher_request *req) 1172 safexcel_skcipher_cra_init(tfm);
915{ 1173 ctx->alg = SAFEXCEL_AES;
916 return safexcel_queue_req(&req->base, skcipher_request_ctx(req), 1174 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
917 SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, 1175 return 0;
918 SAFEXCEL_AES);
919} 1176}
920 1177
921struct safexcel_alg_template safexcel_alg_cbc_aes = { 1178struct safexcel_alg_template safexcel_alg_cbc_aes = {
922 .type = SAFEXCEL_ALG_TYPE_SKCIPHER, 1179 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
923 .engines = EIP97IES | EIP197B | EIP197D, 1180 .algo_mask = SAFEXCEL_ALG_AES,
924 .alg.skcipher = { 1181 .alg.skcipher = {
925 .setkey = safexcel_skcipher_aes_setkey, 1182 .setkey = safexcel_skcipher_aes_setkey,
926 .encrypt = safexcel_cbc_aes_encrypt, 1183 .encrypt = safexcel_encrypt,
927 .decrypt = safexcel_cbc_aes_decrypt, 1184 .decrypt = safexcel_decrypt,
928 .min_keysize = AES_MIN_KEY_SIZE, 1185 .min_keysize = AES_MIN_KEY_SIZE,
929 .max_keysize = AES_MAX_KEY_SIZE, 1186 .max_keysize = AES_MAX_KEY_SIZE,
930 .ivsize = AES_BLOCK_SIZE, 1187 .ivsize = AES_BLOCK_SIZE,
931 .base = { 1188 .base = {
932 .cra_name = "cbc(aes)", 1189 .cra_name = "cbc(aes)",
933 .cra_driver_name = "safexcel-cbc-aes", 1190 .cra_driver_name = "safexcel-cbc-aes",
934 .cra_priority = 300, 1191 .cra_priority = SAFEXCEL_CRA_PRIORITY,
935 .cra_flags = CRYPTO_ALG_ASYNC | 1192 .cra_flags = CRYPTO_ALG_ASYNC |
936 CRYPTO_ALG_KERN_DRIVER_ONLY, 1193 CRYPTO_ALG_KERN_DRIVER_ONLY,
937 .cra_blocksize = AES_BLOCK_SIZE, 1194 .cra_blocksize = AES_BLOCK_SIZE,
938 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), 1195 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
939 .cra_alignmask = 0, 1196 .cra_alignmask = 0,
940 .cra_init = safexcel_skcipher_cra_init, 1197 .cra_init = safexcel_skcipher_aes_cbc_cra_init,
941 .cra_exit = safexcel_skcipher_cra_exit, 1198 .cra_exit = safexcel_skcipher_cra_exit,
942 .cra_module = THIS_MODULE, 1199 .cra_module = THIS_MODULE,
943 }, 1200 },
944 }, 1201 },
945}; 1202};
946 1203
947static int safexcel_cbc_des_encrypt(struct skcipher_request *req) 1204static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm)
948{ 1205{
949 return safexcel_queue_req(&req->base, skcipher_request_ctx(req), 1206 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
950 SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, 1207
951 SAFEXCEL_DES); 1208 safexcel_skcipher_cra_init(tfm);
1209 ctx->alg = SAFEXCEL_AES;
1210 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
1211 return 0;
952} 1212}
953 1213
954static int safexcel_cbc_des_decrypt(struct skcipher_request *req) 1214struct safexcel_alg_template safexcel_alg_cfb_aes = {
1215 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
1216 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB,
1217 .alg.skcipher = {
1218 .setkey = safexcel_skcipher_aes_setkey,
1219 .encrypt = safexcel_encrypt,
1220 .decrypt = safexcel_decrypt,
1221 .min_keysize = AES_MIN_KEY_SIZE,
1222 .max_keysize = AES_MAX_KEY_SIZE,
1223 .ivsize = AES_BLOCK_SIZE,
1224 .base = {
1225 .cra_name = "cfb(aes)",
1226 .cra_driver_name = "safexcel-cfb-aes",
1227 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1228 .cra_flags = CRYPTO_ALG_ASYNC |
1229 CRYPTO_ALG_KERN_DRIVER_ONLY,
1230 .cra_blocksize = 1,
1231 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1232 .cra_alignmask = 0,
1233 .cra_init = safexcel_skcipher_aes_cfb_cra_init,
1234 .cra_exit = safexcel_skcipher_cra_exit,
1235 .cra_module = THIS_MODULE,
1236 },
1237 },
1238};
1239
1240static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm)
955{ 1241{
956 return safexcel_queue_req(&req->base, skcipher_request_ctx(req), 1242 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
957 SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, 1243
958 SAFEXCEL_DES); 1244 safexcel_skcipher_cra_init(tfm);
1245 ctx->alg = SAFEXCEL_AES;
1246 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
1247 return 0;
959} 1248}
960 1249
961static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, 1250struct safexcel_alg_template safexcel_alg_ofb_aes = {
962 unsigned int len) 1251 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
1252 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB,
1253 .alg.skcipher = {
1254 .setkey = safexcel_skcipher_aes_setkey,
1255 .encrypt = safexcel_encrypt,
1256 .decrypt = safexcel_decrypt,
1257 .min_keysize = AES_MIN_KEY_SIZE,
1258 .max_keysize = AES_MAX_KEY_SIZE,
1259 .ivsize = AES_BLOCK_SIZE,
1260 .base = {
1261 .cra_name = "ofb(aes)",
1262 .cra_driver_name = "safexcel-ofb-aes",
1263 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1264 .cra_flags = CRYPTO_ALG_ASYNC |
1265 CRYPTO_ALG_KERN_DRIVER_ONLY,
1266 .cra_blocksize = 1,
1267 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1268 .cra_alignmask = 0,
1269 .cra_init = safexcel_skcipher_aes_ofb_cra_init,
1270 .cra_exit = safexcel_skcipher_cra_exit,
1271 .cra_module = THIS_MODULE,
1272 },
1273 },
1274};
1275
1276static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
1277 const u8 *key, unsigned int len)
963{ 1278{
964 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); 1279 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
965 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 1280 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
966 u32 tmp[DES_EXPKEY_WORDS]; 1281 struct safexcel_crypto_priv *priv = ctx->priv;
967 int ret; 1282 struct crypto_aes_ctx aes;
1283 int ret, i;
1284 unsigned int keylen;
968 1285
969 if (len != DES_KEY_SIZE) { 1286 /* last 4 bytes of key are the nonce! */
1287 ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
1288 /* exclude the nonce here */
1289 keylen = len - CTR_RFC3686_NONCE_SIZE;
1290 ret = aes_expandkey(&aes, key, keylen);
1291 if (ret) {
970 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 1292 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
971 return -EINVAL; 1293 return ret;
972 } 1294 }
973 1295
974 ret = des_ekey(tmp, key); 1296 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
975 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 1297 for (i = 0; i < keylen / sizeof(u32); i++) {
976 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 1298 if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
977 return -EINVAL; 1299 ctx->base.needs_inv = true;
1300 break;
1301 }
1302 }
978 } 1303 }
979 1304
1305 for (i = 0; i < keylen / sizeof(u32); i++)
1306 ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
1307
1308 ctx->key_len = keylen;
1309
1310 memzero_explicit(&aes, sizeof(aes));
1311 return 0;
1312}
1313
1314static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm)
1315{
1316 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1317
1318 safexcel_skcipher_cra_init(tfm);
1319 ctx->alg = SAFEXCEL_AES;
1320 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
1321 return 0;
1322}
1323
1324struct safexcel_alg_template safexcel_alg_ctr_aes = {
1325 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
1326 .algo_mask = SAFEXCEL_ALG_AES,
1327 .alg.skcipher = {
1328 .setkey = safexcel_skcipher_aesctr_setkey,
1329 .encrypt = safexcel_encrypt,
1330 .decrypt = safexcel_decrypt,
1331 /* Add nonce size */
1332 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1333 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1334 .ivsize = CTR_RFC3686_IV_SIZE,
1335 .base = {
1336 .cra_name = "rfc3686(ctr(aes))",
1337 .cra_driver_name = "safexcel-ctr-aes",
1338 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1339 .cra_flags = CRYPTO_ALG_ASYNC |
1340 CRYPTO_ALG_KERN_DRIVER_ONLY,
1341 .cra_blocksize = 1,
1342 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1343 .cra_alignmask = 0,
1344 .cra_init = safexcel_skcipher_aes_ctr_cra_init,
1345 .cra_exit = safexcel_skcipher_cra_exit,
1346 .cra_module = THIS_MODULE,
1347 },
1348 },
1349};
1350
1351static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
1352 unsigned int len)
1353{
1354 struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
1355 int ret;
1356
1357 ret = verify_skcipher_des_key(ctfm, key);
1358 if (ret)
1359 return ret;
1360
980 /* if context exits and key changed, need to invalidate it */ 1361 /* if context exits and key changed, need to invalidate it */
981 if (ctx->base.ctxr_dma) 1362 if (ctx->base.ctxr_dma)
982 if (memcmp(ctx->key, key, len)) 1363 if (memcmp(ctx->key, key, len))
@@ -988,94 +1369,85 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
988 return 0; 1369 return 0;
989} 1370}
990 1371
1372static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm)
1373{
1374 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1375
1376 safexcel_skcipher_cra_init(tfm);
1377 ctx->alg = SAFEXCEL_DES;
1378 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
1379 return 0;
1380}
1381
991struct safexcel_alg_template safexcel_alg_cbc_des = { 1382struct safexcel_alg_template safexcel_alg_cbc_des = {
992 .type = SAFEXCEL_ALG_TYPE_SKCIPHER, 1383 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
993 .engines = EIP97IES | EIP197B | EIP197D, 1384 .algo_mask = SAFEXCEL_ALG_DES,
994 .alg.skcipher = { 1385 .alg.skcipher = {
995 .setkey = safexcel_des_setkey, 1386 .setkey = safexcel_des_setkey,
996 .encrypt = safexcel_cbc_des_encrypt, 1387 .encrypt = safexcel_encrypt,
997 .decrypt = safexcel_cbc_des_decrypt, 1388 .decrypt = safexcel_decrypt,
998 .min_keysize = DES_KEY_SIZE, 1389 .min_keysize = DES_KEY_SIZE,
999 .max_keysize = DES_KEY_SIZE, 1390 .max_keysize = DES_KEY_SIZE,
1000 .ivsize = DES_BLOCK_SIZE, 1391 .ivsize = DES_BLOCK_SIZE,
1001 .base = { 1392 .base = {
1002 .cra_name = "cbc(des)", 1393 .cra_name = "cbc(des)",
1003 .cra_driver_name = "safexcel-cbc-des", 1394 .cra_driver_name = "safexcel-cbc-des",
1004 .cra_priority = 300, 1395 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1005 .cra_flags = CRYPTO_ALG_ASYNC | 1396 .cra_flags = CRYPTO_ALG_ASYNC |
1006 CRYPTO_ALG_KERN_DRIVER_ONLY, 1397 CRYPTO_ALG_KERN_DRIVER_ONLY,
1007 .cra_blocksize = DES_BLOCK_SIZE, 1398 .cra_blocksize = DES_BLOCK_SIZE,
1008 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), 1399 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1009 .cra_alignmask = 0, 1400 .cra_alignmask = 0,
1010 .cra_init = safexcel_skcipher_cra_init, 1401 .cra_init = safexcel_skcipher_des_cbc_cra_init,
1011 .cra_exit = safexcel_skcipher_cra_exit, 1402 .cra_exit = safexcel_skcipher_cra_exit,
1012 .cra_module = THIS_MODULE, 1403 .cra_module = THIS_MODULE,
1013 }, 1404 },
1014 }, 1405 },
1015}; 1406};
1016 1407
1017static int safexcel_ecb_des_encrypt(struct skcipher_request *req) 1408static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm)
1018{ 1409{
1019 return safexcel_queue_req(&req->base, skcipher_request_ctx(req), 1410 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1020 SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
1021 SAFEXCEL_DES);
1022}
1023 1411
1024static int safexcel_ecb_des_decrypt(struct skcipher_request *req) 1412 safexcel_skcipher_cra_init(tfm);
1025{ 1413 ctx->alg = SAFEXCEL_DES;
1026 return safexcel_queue_req(&req->base, skcipher_request_ctx(req), 1414 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
1027 SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, 1415 return 0;
1028 SAFEXCEL_DES);
1029} 1416}
1030 1417
1031struct safexcel_alg_template safexcel_alg_ecb_des = { 1418struct safexcel_alg_template safexcel_alg_ecb_des = {
1032 .type = SAFEXCEL_ALG_TYPE_SKCIPHER, 1419 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
1033 .engines = EIP97IES | EIP197B | EIP197D, 1420 .algo_mask = SAFEXCEL_ALG_DES,
1034 .alg.skcipher = { 1421 .alg.skcipher = {
1035 .setkey = safexcel_des_setkey, 1422 .setkey = safexcel_des_setkey,
1036 .encrypt = safexcel_ecb_des_encrypt, 1423 .encrypt = safexcel_encrypt,
1037 .decrypt = safexcel_ecb_des_decrypt, 1424 .decrypt = safexcel_decrypt,
1038 .min_keysize = DES_KEY_SIZE, 1425 .min_keysize = DES_KEY_SIZE,
1039 .max_keysize = DES_KEY_SIZE, 1426 .max_keysize = DES_KEY_SIZE,
1040 .ivsize = DES_BLOCK_SIZE,
1041 .base = { 1427 .base = {
1042 .cra_name = "ecb(des)", 1428 .cra_name = "ecb(des)",
1043 .cra_driver_name = "safexcel-ecb-des", 1429 .cra_driver_name = "safexcel-ecb-des",
1044 .cra_priority = 300, 1430 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1045 .cra_flags = CRYPTO_ALG_ASYNC | 1431 .cra_flags = CRYPTO_ALG_ASYNC |
1046 CRYPTO_ALG_KERN_DRIVER_ONLY, 1432 CRYPTO_ALG_KERN_DRIVER_ONLY,
1047 .cra_blocksize = DES_BLOCK_SIZE, 1433 .cra_blocksize = DES_BLOCK_SIZE,
1048 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), 1434 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1049 .cra_alignmask = 0, 1435 .cra_alignmask = 0,
1050 .cra_init = safexcel_skcipher_cra_init, 1436 .cra_init = safexcel_skcipher_des_ecb_cra_init,
1051 .cra_exit = safexcel_skcipher_cra_exit, 1437 .cra_exit = safexcel_skcipher_cra_exit,
1052 .cra_module = THIS_MODULE, 1438 .cra_module = THIS_MODULE,
1053 }, 1439 },
1054 }, 1440 },
1055}; 1441};
1056 1442
1057static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req)
1058{
1059 return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
1060 SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
1061 SAFEXCEL_3DES);
1062}
1063
1064static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
1065{
1066 return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
1067 SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
1068 SAFEXCEL_3DES);
1069}
1070
1071static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, 1443static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
1072 const u8 *key, unsigned int len) 1444 const u8 *key, unsigned int len)
1073{ 1445{
1074 struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); 1446 struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
1075 int err; 1447 int err;
1076 1448
1077 err = des3_verify_key(ctfm, key); 1449 err = verify_skcipher_des3_key(ctfm, key);
1078 if (unlikely(err)) 1450 if (err)
1079 return err; 1451 return err;
1080 1452
1081 /* if context exits and key changed, need to invalidate it */ 1453 /* if context exits and key changed, need to invalidate it */
@@ -1091,66 +1463,71 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
1091 return 0; 1463 return 0;
1092} 1464}
1093 1465
1466static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm)
1467{
1468 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1469
1470 safexcel_skcipher_cra_init(tfm);
1471 ctx->alg = SAFEXCEL_3DES;
1472 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
1473 return 0;
1474}
1475
1094struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { 1476struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
1095 .type = SAFEXCEL_ALG_TYPE_SKCIPHER, 1477 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
1096 .engines = EIP97IES | EIP197B | EIP197D, 1478 .algo_mask = SAFEXCEL_ALG_DES,
1097 .alg.skcipher = { 1479 .alg.skcipher = {
1098 .setkey = safexcel_des3_ede_setkey, 1480 .setkey = safexcel_des3_ede_setkey,
1099 .encrypt = safexcel_cbc_des3_ede_encrypt, 1481 .encrypt = safexcel_encrypt,
1100 .decrypt = safexcel_cbc_des3_ede_decrypt, 1482 .decrypt = safexcel_decrypt,
1101 .min_keysize = DES3_EDE_KEY_SIZE, 1483 .min_keysize = DES3_EDE_KEY_SIZE,
1102 .max_keysize = DES3_EDE_KEY_SIZE, 1484 .max_keysize = DES3_EDE_KEY_SIZE,
1103 .ivsize = DES3_EDE_BLOCK_SIZE, 1485 .ivsize = DES3_EDE_BLOCK_SIZE,
1104 .base = { 1486 .base = {
1105 .cra_name = "cbc(des3_ede)", 1487 .cra_name = "cbc(des3_ede)",
1106 .cra_driver_name = "safexcel-cbc-des3_ede", 1488 .cra_driver_name = "safexcel-cbc-des3_ede",
1107 .cra_priority = 300, 1489 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1108 .cra_flags = CRYPTO_ALG_ASYNC | 1490 .cra_flags = CRYPTO_ALG_ASYNC |
1109 CRYPTO_ALG_KERN_DRIVER_ONLY, 1491 CRYPTO_ALG_KERN_DRIVER_ONLY,
1110 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1492 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1111 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), 1493 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1112 .cra_alignmask = 0, 1494 .cra_alignmask = 0,
1113 .cra_init = safexcel_skcipher_cra_init, 1495 .cra_init = safexcel_skcipher_des3_cbc_cra_init,
1114 .cra_exit = safexcel_skcipher_cra_exit, 1496 .cra_exit = safexcel_skcipher_cra_exit,
1115 .cra_module = THIS_MODULE, 1497 .cra_module = THIS_MODULE,
1116 }, 1498 },
1117 }, 1499 },
1118}; 1500};
1119 1501
1120static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req) 1502static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm)
1121{ 1503{
1122 return safexcel_queue_req(&req->base, skcipher_request_ctx(req), 1504 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1123 SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
1124 SAFEXCEL_3DES);
1125}
1126 1505
1127static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req) 1506 safexcel_skcipher_cra_init(tfm);
1128{ 1507 ctx->alg = SAFEXCEL_3DES;
1129 return safexcel_queue_req(&req->base, skcipher_request_ctx(req), 1508 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
1130 SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, 1509 return 0;
1131 SAFEXCEL_3DES);
1132} 1510}
1133 1511
1134struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { 1512struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
1135 .type = SAFEXCEL_ALG_TYPE_SKCIPHER, 1513 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
1136 .engines = EIP97IES | EIP197B | EIP197D, 1514 .algo_mask = SAFEXCEL_ALG_DES,
1137 .alg.skcipher = { 1515 .alg.skcipher = {
1138 .setkey = safexcel_des3_ede_setkey, 1516 .setkey = safexcel_des3_ede_setkey,
1139 .encrypt = safexcel_ecb_des3_ede_encrypt, 1517 .encrypt = safexcel_encrypt,
1140 .decrypt = safexcel_ecb_des3_ede_decrypt, 1518 .decrypt = safexcel_decrypt,
1141 .min_keysize = DES3_EDE_KEY_SIZE, 1519 .min_keysize = DES3_EDE_KEY_SIZE,
1142 .max_keysize = DES3_EDE_KEY_SIZE, 1520 .max_keysize = DES3_EDE_KEY_SIZE,
1143 .ivsize = DES3_EDE_BLOCK_SIZE,
1144 .base = { 1521 .base = {
1145 .cra_name = "ecb(des3_ede)", 1522 .cra_name = "ecb(des3_ede)",
1146 .cra_driver_name = "safexcel-ecb-des3_ede", 1523 .cra_driver_name = "safexcel-ecb-des3_ede",
1147 .cra_priority = 300, 1524 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1148 .cra_flags = CRYPTO_ALG_ASYNC | 1525 .cra_flags = CRYPTO_ALG_ASYNC |
1149 CRYPTO_ALG_KERN_DRIVER_ONLY, 1526 CRYPTO_ALG_KERN_DRIVER_ONLY,
1150 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1527 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1151 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), 1528 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1152 .cra_alignmask = 0, 1529 .cra_alignmask = 0,
1153 .cra_init = safexcel_skcipher_cra_init, 1530 .cra_init = safexcel_skcipher_des3_ecb_cra_init,
1154 .cra_exit = safexcel_skcipher_cra_exit, 1531 .cra_exit = safexcel_skcipher_cra_exit,
1155 .cra_module = THIS_MODULE, 1532 .cra_module = THIS_MODULE,
1156 }, 1533 },
@@ -1161,16 +1538,14 @@ static int safexcel_aead_encrypt(struct aead_request *req)
1161{ 1538{
1162 struct safexcel_cipher_req *creq = aead_request_ctx(req); 1539 struct safexcel_cipher_req *creq = aead_request_ctx(req);
1163 1540
1164 return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT, 1541 return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
1165 CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
1166} 1542}
1167 1543
1168static int safexcel_aead_decrypt(struct aead_request *req) 1544static int safexcel_aead_decrypt(struct aead_request *req)
1169{ 1545{
1170 struct safexcel_cipher_req *creq = aead_request_ctx(req); 1546 struct safexcel_cipher_req *creq = aead_request_ctx(req);
1171 1547
1172 return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT, 1548 return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
1173 CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
1174} 1549}
1175 1550
1176static int safexcel_aead_cra_init(struct crypto_tfm *tfm) 1551static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
@@ -1185,6 +1560,8 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
1185 1560
1186 ctx->priv = tmpl->priv; 1561 ctx->priv = tmpl->priv;
1187 1562
1563 ctx->alg = SAFEXCEL_AES; /* default */
1564 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */
1188 ctx->aead = true; 1565 ctx->aead = true;
1189 ctx->base.send = safexcel_aead_send; 1566 ctx->base.send = safexcel_aead_send;
1190 ctx->base.handle_result = safexcel_aead_handle_result; 1567 ctx->base.handle_result = safexcel_aead_handle_result;
@@ -1203,9 +1580,9 @@ static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
1203 1580
1204struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { 1581struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
1205 .type = SAFEXCEL_ALG_TYPE_AEAD, 1582 .type = SAFEXCEL_ALG_TYPE_AEAD,
1206 .engines = EIP97IES | EIP197B | EIP197D, 1583 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1,
1207 .alg.aead = { 1584 .alg.aead = {
1208 .setkey = safexcel_aead_aes_setkey, 1585 .setkey = safexcel_aead_setkey,
1209 .encrypt = safexcel_aead_encrypt, 1586 .encrypt = safexcel_aead_encrypt,
1210 .decrypt = safexcel_aead_decrypt, 1587 .decrypt = safexcel_aead_decrypt,
1211 .ivsize = AES_BLOCK_SIZE, 1588 .ivsize = AES_BLOCK_SIZE,
@@ -1213,7 +1590,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
1213 .base = { 1590 .base = {
1214 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1591 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1215 .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", 1592 .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
1216 .cra_priority = 300, 1593 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1217 .cra_flags = CRYPTO_ALG_ASYNC | 1594 .cra_flags = CRYPTO_ALG_ASYNC |
1218 CRYPTO_ALG_KERN_DRIVER_ONLY, 1595 CRYPTO_ALG_KERN_DRIVER_ONLY,
1219 .cra_blocksize = AES_BLOCK_SIZE, 1596 .cra_blocksize = AES_BLOCK_SIZE,
@@ -1238,9 +1615,9 @@ static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
1238 1615
1239struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { 1616struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
1240 .type = SAFEXCEL_ALG_TYPE_AEAD, 1617 .type = SAFEXCEL_ALG_TYPE_AEAD,
1241 .engines = EIP97IES | EIP197B | EIP197D, 1618 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
1242 .alg.aead = { 1619 .alg.aead = {
1243 .setkey = safexcel_aead_aes_setkey, 1620 .setkey = safexcel_aead_setkey,
1244 .encrypt = safexcel_aead_encrypt, 1621 .encrypt = safexcel_aead_encrypt,
1245 .decrypt = safexcel_aead_decrypt, 1622 .decrypt = safexcel_aead_decrypt,
1246 .ivsize = AES_BLOCK_SIZE, 1623 .ivsize = AES_BLOCK_SIZE,
@@ -1248,7 +1625,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
1248 .base = { 1625 .base = {
1249 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1626 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1250 .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", 1627 .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
1251 .cra_priority = 300, 1628 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1252 .cra_flags = CRYPTO_ALG_ASYNC | 1629 .cra_flags = CRYPTO_ALG_ASYNC |
1253 CRYPTO_ALG_KERN_DRIVER_ONLY, 1630 CRYPTO_ALG_KERN_DRIVER_ONLY,
1254 .cra_blocksize = AES_BLOCK_SIZE, 1631 .cra_blocksize = AES_BLOCK_SIZE,
@@ -1273,9 +1650,9 @@ static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
1273 1650
1274struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { 1651struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
1275 .type = SAFEXCEL_ALG_TYPE_AEAD, 1652 .type = SAFEXCEL_ALG_TYPE_AEAD,
1276 .engines = EIP97IES | EIP197B | EIP197D, 1653 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
1277 .alg.aead = { 1654 .alg.aead = {
1278 .setkey = safexcel_aead_aes_setkey, 1655 .setkey = safexcel_aead_setkey,
1279 .encrypt = safexcel_aead_encrypt, 1656 .encrypt = safexcel_aead_encrypt,
1280 .decrypt = safexcel_aead_decrypt, 1657 .decrypt = safexcel_aead_decrypt,
1281 .ivsize = AES_BLOCK_SIZE, 1658 .ivsize = AES_BLOCK_SIZE,
@@ -1283,7 +1660,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
1283 .base = { 1660 .base = {
1284 .cra_name = "authenc(hmac(sha224),cbc(aes))", 1661 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1285 .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", 1662 .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
1286 .cra_priority = 300, 1663 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1287 .cra_flags = CRYPTO_ALG_ASYNC | 1664 .cra_flags = CRYPTO_ALG_ASYNC |
1288 CRYPTO_ALG_KERN_DRIVER_ONLY, 1665 CRYPTO_ALG_KERN_DRIVER_ONLY,
1289 .cra_blocksize = AES_BLOCK_SIZE, 1666 .cra_blocksize = AES_BLOCK_SIZE,
@@ -1308,9 +1685,9 @@ static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm)
1308 1685
1309struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { 1686struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
1310 .type = SAFEXCEL_ALG_TYPE_AEAD, 1687 .type = SAFEXCEL_ALG_TYPE_AEAD,
1311 .engines = EIP97IES | EIP197B | EIP197D, 1688 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
1312 .alg.aead = { 1689 .alg.aead = {
1313 .setkey = safexcel_aead_aes_setkey, 1690 .setkey = safexcel_aead_setkey,
1314 .encrypt = safexcel_aead_encrypt, 1691 .encrypt = safexcel_aead_encrypt,
1315 .decrypt = safexcel_aead_decrypt, 1692 .decrypt = safexcel_aead_decrypt,
1316 .ivsize = AES_BLOCK_SIZE, 1693 .ivsize = AES_BLOCK_SIZE,
@@ -1318,7 +1695,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
1318 .base = { 1695 .base = {
1319 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1696 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1320 .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes", 1697 .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
1321 .cra_priority = 300, 1698 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1322 .cra_flags = CRYPTO_ALG_ASYNC | 1699 .cra_flags = CRYPTO_ALG_ASYNC |
1323 CRYPTO_ALG_KERN_DRIVER_ONLY, 1700 CRYPTO_ALG_KERN_DRIVER_ONLY,
1324 .cra_blocksize = AES_BLOCK_SIZE, 1701 .cra_blocksize = AES_BLOCK_SIZE,
@@ -1343,9 +1720,9 @@ static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm)
1343 1720
1344struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { 1721struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
1345 .type = SAFEXCEL_ALG_TYPE_AEAD, 1722 .type = SAFEXCEL_ALG_TYPE_AEAD,
1346 .engines = EIP97IES | EIP197B | EIP197D, 1723 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
1347 .alg.aead = { 1724 .alg.aead = {
1348 .setkey = safexcel_aead_aes_setkey, 1725 .setkey = safexcel_aead_setkey,
1349 .encrypt = safexcel_aead_encrypt, 1726 .encrypt = safexcel_aead_encrypt,
1350 .decrypt = safexcel_aead_decrypt, 1727 .decrypt = safexcel_aead_decrypt,
1351 .ivsize = AES_BLOCK_SIZE, 1728 .ivsize = AES_BLOCK_SIZE,
@@ -1353,7 +1730,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
1353 .base = { 1730 .base = {
1354 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1731 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1355 .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes", 1732 .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
1356 .cra_priority = 300, 1733 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1357 .cra_flags = CRYPTO_ALG_ASYNC | 1734 .cra_flags = CRYPTO_ALG_ASYNC |
1358 CRYPTO_ALG_KERN_DRIVER_ONLY, 1735 CRYPTO_ALG_KERN_DRIVER_ONLY,
1359 .cra_blocksize = AES_BLOCK_SIZE, 1736 .cra_blocksize = AES_BLOCK_SIZE,
@@ -1365,3 +1742,564 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
1365 }, 1742 },
1366 }, 1743 },
1367}; 1744};
1745
1746static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
1747{
1748 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1749
1750 safexcel_aead_sha1_cra_init(tfm);
1751 ctx->alg = SAFEXCEL_3DES; /* override default */
1752 return 0;
1753}
1754
1755struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = {
1756 .type = SAFEXCEL_ALG_TYPE_AEAD,
1757 .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1,
1758 .alg.aead = {
1759 .setkey = safexcel_aead_setkey,
1760 .encrypt = safexcel_aead_encrypt,
1761 .decrypt = safexcel_aead_decrypt,
1762 .ivsize = DES3_EDE_BLOCK_SIZE,
1763 .maxauthsize = SHA1_DIGEST_SIZE,
1764 .base = {
1765 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1766 .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede",
1767 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1768 .cra_flags = CRYPTO_ALG_ASYNC |
1769 CRYPTO_ALG_KERN_DRIVER_ONLY,
1770 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1771 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1772 .cra_alignmask = 0,
1773 .cra_init = safexcel_aead_sha1_des3_cra_init,
1774 .cra_exit = safexcel_aead_cra_exit,
1775 .cra_module = THIS_MODULE,
1776 },
1777 },
1778};
1779
1780static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm)
1781{
1782 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1783
1784 safexcel_aead_sha1_cra_init(tfm);
1785 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
1786 return 0;
1787}
1788
1789struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = {
1790 .type = SAFEXCEL_ALG_TYPE_AEAD,
1791 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1,
1792 .alg.aead = {
1793 .setkey = safexcel_aead_setkey,
1794 .encrypt = safexcel_aead_encrypt,
1795 .decrypt = safexcel_aead_decrypt,
1796 .ivsize = CTR_RFC3686_IV_SIZE,
1797 .maxauthsize = SHA1_DIGEST_SIZE,
1798 .base = {
1799 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
1800 .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes",
1801 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1802 .cra_flags = CRYPTO_ALG_ASYNC |
1803 CRYPTO_ALG_KERN_DRIVER_ONLY,
1804 .cra_blocksize = 1,
1805 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1806 .cra_alignmask = 0,
1807 .cra_init = safexcel_aead_sha1_ctr_cra_init,
1808 .cra_exit = safexcel_aead_cra_exit,
1809 .cra_module = THIS_MODULE,
1810 },
1811 },
1812};
1813
1814static int safexcel_aead_sha256_ctr_cra_init(struct crypto_tfm *tfm)
1815{
1816 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1817
1818 safexcel_aead_sha256_cra_init(tfm);
1819 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
1820 return 0;
1821}
1822
1823struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = {
1824 .type = SAFEXCEL_ALG_TYPE_AEAD,
1825 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
1826 .alg.aead = {
1827 .setkey = safexcel_aead_setkey,
1828 .encrypt = safexcel_aead_encrypt,
1829 .decrypt = safexcel_aead_decrypt,
1830 .ivsize = CTR_RFC3686_IV_SIZE,
1831 .maxauthsize = SHA256_DIGEST_SIZE,
1832 .base = {
1833 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
1834 .cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes",
1835 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1836 .cra_flags = CRYPTO_ALG_ASYNC |
1837 CRYPTO_ALG_KERN_DRIVER_ONLY,
1838 .cra_blocksize = 1,
1839 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1840 .cra_alignmask = 0,
1841 .cra_init = safexcel_aead_sha256_ctr_cra_init,
1842 .cra_exit = safexcel_aead_cra_exit,
1843 .cra_module = THIS_MODULE,
1844 },
1845 },
1846};
1847
1848static int safexcel_aead_sha224_ctr_cra_init(struct crypto_tfm *tfm)
1849{
1850 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1851
1852 safexcel_aead_sha224_cra_init(tfm);
1853 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
1854 return 0;
1855}
1856
1857struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = {
1858 .type = SAFEXCEL_ALG_TYPE_AEAD,
1859 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
1860 .alg.aead = {
1861 .setkey = safexcel_aead_setkey,
1862 .encrypt = safexcel_aead_encrypt,
1863 .decrypt = safexcel_aead_decrypt,
1864 .ivsize = CTR_RFC3686_IV_SIZE,
1865 .maxauthsize = SHA224_DIGEST_SIZE,
1866 .base = {
1867 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
1868 .cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes",
1869 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1870 .cra_flags = CRYPTO_ALG_ASYNC |
1871 CRYPTO_ALG_KERN_DRIVER_ONLY,
1872 .cra_blocksize = 1,
1873 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1874 .cra_alignmask = 0,
1875 .cra_init = safexcel_aead_sha224_ctr_cra_init,
1876 .cra_exit = safexcel_aead_cra_exit,
1877 .cra_module = THIS_MODULE,
1878 },
1879 },
1880};
1881
1882static int safexcel_aead_sha512_ctr_cra_init(struct crypto_tfm *tfm)
1883{
1884 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1885
1886 safexcel_aead_sha512_cra_init(tfm);
1887 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
1888 return 0;
1889}
1890
1891struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = {
1892 .type = SAFEXCEL_ALG_TYPE_AEAD,
1893 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
1894 .alg.aead = {
1895 .setkey = safexcel_aead_setkey,
1896 .encrypt = safexcel_aead_encrypt,
1897 .decrypt = safexcel_aead_decrypt,
1898 .ivsize = CTR_RFC3686_IV_SIZE,
1899 .maxauthsize = SHA512_DIGEST_SIZE,
1900 .base = {
1901 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
1902 .cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes",
1903 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1904 .cra_flags = CRYPTO_ALG_ASYNC |
1905 CRYPTO_ALG_KERN_DRIVER_ONLY,
1906 .cra_blocksize = 1,
1907 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1908 .cra_alignmask = 0,
1909 .cra_init = safexcel_aead_sha512_ctr_cra_init,
1910 .cra_exit = safexcel_aead_cra_exit,
1911 .cra_module = THIS_MODULE,
1912 },
1913 },
1914};
1915
1916static int safexcel_aead_sha384_ctr_cra_init(struct crypto_tfm *tfm)
1917{
1918 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1919
1920 safexcel_aead_sha384_cra_init(tfm);
1921 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
1922 return 0;
1923}
1924
1925struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = {
1926 .type = SAFEXCEL_ALG_TYPE_AEAD,
1927 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
1928 .alg.aead = {
1929 .setkey = safexcel_aead_setkey,
1930 .encrypt = safexcel_aead_encrypt,
1931 .decrypt = safexcel_aead_decrypt,
1932 .ivsize = CTR_RFC3686_IV_SIZE,
1933 .maxauthsize = SHA384_DIGEST_SIZE,
1934 .base = {
1935 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
1936 .cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes",
1937 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1938 .cra_flags = CRYPTO_ALG_ASYNC |
1939 CRYPTO_ALG_KERN_DRIVER_ONLY,
1940 .cra_blocksize = 1,
1941 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
1942 .cra_alignmask = 0,
1943 .cra_init = safexcel_aead_sha384_ctr_cra_init,
1944 .cra_exit = safexcel_aead_cra_exit,
1945 .cra_module = THIS_MODULE,
1946 },
1947 },
1948};
1949
1950static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
1951 const u8 *key, unsigned int len)
1952{
1953 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
1954 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1955 struct safexcel_crypto_priv *priv = ctx->priv;
1956 struct crypto_aes_ctx aes;
1957 int ret, i;
1958 unsigned int keylen;
1959
1960 /* Check for illegal XTS keys */
1961 ret = xts_verify_key(ctfm, key, len);
1962 if (ret)
1963 return ret;
1964
1965 /* Only half of the key data is cipher key */
1966 keylen = (len >> 1);
1967 ret = aes_expandkey(&aes, key, keylen);
1968 if (ret) {
1969 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1970 return ret;
1971 }
1972
1973 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
1974 for (i = 0; i < keylen / sizeof(u32); i++) {
1975 if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
1976 ctx->base.needs_inv = true;
1977 break;
1978 }
1979 }
1980 }
1981
1982 for (i = 0; i < keylen / sizeof(u32); i++)
1983 ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
1984
1985 /* The other half is the tweak key */
1986 ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen);
1987 if (ret) {
1988 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1989 return ret;
1990 }
1991
1992 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
1993 for (i = 0; i < keylen / sizeof(u32); i++) {
1994 if (ctx->key[i + keylen / sizeof(u32)] !=
1995 cpu_to_le32(aes.key_enc[i])) {
1996 ctx->base.needs_inv = true;
1997 break;
1998 }
1999 }
2000 }
2001
2002 for (i = 0; i < keylen / sizeof(u32); i++)
2003 ctx->key[i + keylen / sizeof(u32)] =
2004 cpu_to_le32(aes.key_enc[i]);
2005
2006 ctx->key_len = keylen << 1;
2007
2008 memzero_explicit(&aes, sizeof(aes));
2009 return 0;
2010}
2011
2012static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm)
2013{
2014 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
2015
2016 safexcel_skcipher_cra_init(tfm);
2017 ctx->alg = SAFEXCEL_AES;
2018 ctx->xts = 1;
2019 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS;
2020 return 0;
2021}
2022
2023static int safexcel_encrypt_xts(struct skcipher_request *req)
2024{
2025 if (req->cryptlen < XTS_BLOCK_SIZE)
2026 return -EINVAL;
2027 return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
2028 SAFEXCEL_ENCRYPT);
2029}
2030
2031static int safexcel_decrypt_xts(struct skcipher_request *req)
2032{
2033 if (req->cryptlen < XTS_BLOCK_SIZE)
2034 return -EINVAL;
2035 return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
2036 SAFEXCEL_DECRYPT);
2037}
2038
2039struct safexcel_alg_template safexcel_alg_xts_aes = {
2040 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
2041 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XTS,
2042 .alg.skcipher = {
2043 .setkey = safexcel_skcipher_aesxts_setkey,
2044 .encrypt = safexcel_encrypt_xts,
2045 .decrypt = safexcel_decrypt_xts,
2046 /* XTS actually uses 2 AES keys glued together */
2047 .min_keysize = AES_MIN_KEY_SIZE * 2,
2048 .max_keysize = AES_MAX_KEY_SIZE * 2,
2049 .ivsize = XTS_BLOCK_SIZE,
2050 .base = {
2051 .cra_name = "xts(aes)",
2052 .cra_driver_name = "safexcel-xts-aes",
2053 .cra_priority = SAFEXCEL_CRA_PRIORITY,
2054 .cra_flags = CRYPTO_ALG_ASYNC |
2055 CRYPTO_ALG_KERN_DRIVER_ONLY,
2056 .cra_blocksize = XTS_BLOCK_SIZE,
2057 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
2058 .cra_alignmask = 0,
2059 .cra_init = safexcel_skcipher_aes_xts_cra_init,
2060 .cra_exit = safexcel_skcipher_cra_exit,
2061 .cra_module = THIS_MODULE,
2062 },
2063 },
2064};
2065
2066static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
2067 unsigned int len)
2068{
2069 struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
2070 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
2071 struct safexcel_crypto_priv *priv = ctx->priv;
2072 struct crypto_aes_ctx aes;
2073 u32 hashkey[AES_BLOCK_SIZE >> 2];
2074 int ret, i;
2075
2076 ret = aes_expandkey(&aes, key, len);
2077 if (ret) {
2078 crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2079 memzero_explicit(&aes, sizeof(aes));
2080 return ret;
2081 }
2082
2083 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
2084 for (i = 0; i < len / sizeof(u32); i++) {
2085 if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
2086 ctx->base.needs_inv = true;
2087 break;
2088 }
2089 }
2090 }
2091
2092 for (i = 0; i < len / sizeof(u32); i++)
2093 ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
2094
2095 ctx->key_len = len;
2096
2097 /* Compute hash key by encrypting zeroes with cipher key */
2098 crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK);
2099 crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
2100 CRYPTO_TFM_REQ_MASK);
2101 ret = crypto_cipher_setkey(ctx->hkaes, key, len);
2102 crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) &
2103 CRYPTO_TFM_RES_MASK);
2104 if (ret)
2105 return ret;
2106
2107 memset(hashkey, 0, AES_BLOCK_SIZE);
2108 crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey);
2109
2110 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
2111 for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
2112 if (ctx->ipad[i] != cpu_to_be32(hashkey[i])) {
2113 ctx->base.needs_inv = true;
2114 break;
2115 }
2116 }
2117 }
2118
2119 for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
2120 ctx->ipad[i] = cpu_to_be32(hashkey[i]);
2121
2122 memzero_explicit(hashkey, AES_BLOCK_SIZE);
2123 memzero_explicit(&aes, sizeof(aes));
2124 return 0;
2125}
2126
2127static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
2128{
2129 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
2130
2131 safexcel_aead_cra_init(tfm);
2132 ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_GHASH;
2133 ctx->state_sz = GHASH_BLOCK_SIZE;
2134 ctx->xcm = EIP197_XCM_MODE_GCM;
2135 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
2136
2137 ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
2138 if (IS_ERR(ctx->hkaes))
2139 return PTR_ERR(ctx->hkaes);
2140
2141 return 0;
2142}
2143
2144static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
2145{
2146 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
2147
2148 crypto_free_cipher(ctx->hkaes);
2149 safexcel_aead_cra_exit(tfm);
2150}
2151
2152static int safexcel_aead_gcm_setauthsize(struct crypto_aead *tfm,
2153 unsigned int authsize)
2154{
2155 return crypto_gcm_check_authsize(authsize);
2156}
2157
2158struct safexcel_alg_template safexcel_alg_gcm = {
2159 .type = SAFEXCEL_ALG_TYPE_AEAD,
2160 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
2161 .alg.aead = {
2162 .setkey = safexcel_aead_gcm_setkey,
2163 .setauthsize = safexcel_aead_gcm_setauthsize,
2164 .encrypt = safexcel_aead_encrypt,
2165 .decrypt = safexcel_aead_decrypt,
2166 .ivsize = GCM_AES_IV_SIZE,
2167 .maxauthsize = GHASH_DIGEST_SIZE,
2168 .base = {
2169 .cra_name = "gcm(aes)",
2170 .cra_driver_name = "safexcel-gcm-aes",
2171 .cra_priority = SAFEXCEL_CRA_PRIORITY,
2172 .cra_flags = CRYPTO_ALG_ASYNC |
2173 CRYPTO_ALG_KERN_DRIVER_ONLY,
2174 .cra_blocksize = 1,
2175 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
2176 .cra_alignmask = 0,
2177 .cra_init = safexcel_aead_gcm_cra_init,
2178 .cra_exit = safexcel_aead_gcm_cra_exit,
2179 .cra_module = THIS_MODULE,
2180 },
2181 },
2182};
2183
2184static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
2185 unsigned int len)
2186{
2187 struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
2188 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
2189 struct safexcel_crypto_priv *priv = ctx->priv;
2190 struct crypto_aes_ctx aes;
2191 int ret, i;
2192
2193 ret = aes_expandkey(&aes, key, len);
2194 if (ret) {
2195 crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2196 memzero_explicit(&aes, sizeof(aes));
2197 return ret;
2198 }
2199
2200 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
2201 for (i = 0; i < len / sizeof(u32); i++) {
2202 if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
2203 ctx->base.needs_inv = true;
2204 break;
2205 }
2206 }
2207 }
2208
2209 for (i = 0; i < len / sizeof(u32); i++) {
2210 ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
2211 ctx->ipad[i + 2 * AES_BLOCK_SIZE / sizeof(u32)] =
2212 cpu_to_be32(aes.key_enc[i]);
2213 }
2214
2215 ctx->key_len = len;
2216 ctx->state_sz = 2 * AES_BLOCK_SIZE + len;
2217
2218 if (len == AES_KEYSIZE_192)
2219 ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
2220 else if (len == AES_KEYSIZE_256)
2221 ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
2222 else
2223 ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
2224
2225 memzero_explicit(&aes, sizeof(aes));
2226 return 0;
2227}
2228
2229static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm)
2230{
2231 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
2232
2233 safexcel_aead_cra_init(tfm);
2234 ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
2235 ctx->state_sz = 3 * AES_BLOCK_SIZE;
2236 ctx->xcm = EIP197_XCM_MODE_CCM;
2237 ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
2238 return 0;
2239}
2240
2241static int safexcel_aead_ccm_setauthsize(struct crypto_aead *tfm,
2242 unsigned int authsize)
2243{
2244 /* Borrowed from crypto/ccm.c */
2245 switch (authsize) {
2246 case 4:
2247 case 6:
2248 case 8:
2249 case 10:
2250 case 12:
2251 case 14:
2252 case 16:
2253 break;
2254 default:
2255 return -EINVAL;
2256 }
2257
2258 return 0;
2259}
2260
2261static int safexcel_ccm_encrypt(struct aead_request *req)
2262{
2263 struct safexcel_cipher_req *creq = aead_request_ctx(req);
2264
2265 if (req->iv[0] < 1 || req->iv[0] > 7)
2266 return -EINVAL;
2267
2268 return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
2269}
2270
2271static int safexcel_ccm_decrypt(struct aead_request *req)
2272{
2273 struct safexcel_cipher_req *creq = aead_request_ctx(req);
2274
2275 if (req->iv[0] < 1 || req->iv[0] > 7)
2276 return -EINVAL;
2277
2278 return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
2279}
2280
2281struct safexcel_alg_template safexcel_alg_ccm = {
2282 .type = SAFEXCEL_ALG_TYPE_AEAD,
2283 .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL,
2284 .alg.aead = {
2285 .setkey = safexcel_aead_ccm_setkey,
2286 .setauthsize = safexcel_aead_ccm_setauthsize,
2287 .encrypt = safexcel_ccm_encrypt,
2288 .decrypt = safexcel_ccm_decrypt,
2289 .ivsize = AES_BLOCK_SIZE,
2290 .maxauthsize = AES_BLOCK_SIZE,
2291 .base = {
2292 .cra_name = "ccm(aes)",
2293 .cra_driver_name = "safexcel-ccm-aes",
2294 .cra_priority = SAFEXCEL_CRA_PRIORITY,
2295 .cra_flags = CRYPTO_ALG_ASYNC |
2296 CRYPTO_ALG_KERN_DRIVER_ONLY,
2297 .cra_blocksize = 1,
2298 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
2299 .cra_alignmask = 0,
2300 .cra_init = safexcel_aead_ccm_cra_init,
2301 .cra_exit = safexcel_aead_cra_exit,
2302 .cra_module = THIS_MODULE,
2303 },
2304 },
2305};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index a80a5e757b1f..2effb6d21e8b 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -29,33 +29,31 @@ struct safexcel_ahash_req {
29 bool finish; 29 bool finish;
30 bool hmac; 30 bool hmac;
31 bool needs_inv; 31 bool needs_inv;
32 bool hmac_zlen;
33 bool len_is_le;
32 34
33 int nents; 35 int nents;
34 dma_addr_t result_dma; 36 dma_addr_t result_dma;
35 37
36 u32 digest; 38 u32 digest;
37 39
38 u8 state_sz; /* expected sate size, only set once */ 40 u8 state_sz; /* expected state size, only set once */
41 u8 block_sz; /* block size, only set once */
39 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); 42 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
40 43
41 u64 len[2]; 44 u64 len;
42 u64 processed[2]; 45 u64 processed;
43 46
44 u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32)); 47 u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32));
45 dma_addr_t cache_dma; 48 dma_addr_t cache_dma;
46 unsigned int cache_sz; 49 unsigned int cache_sz;
47 50
48 u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32)); 51 u8 cache_next[HASH_CACHE_SIZE] __aligned(sizeof(u32));
49}; 52};
50 53
51static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) 54static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
52{ 55{
53 u64 len, processed; 56 return req->len - req->processed;
54
55 len = (0xffffffff * req->len[1]) + req->len[0];
56 processed = (0xffffffff * req->processed[1]) + req->processed[0];
57
58 return len - processed;
59} 57}
60 58
61static void safexcel_hash_token(struct safexcel_command_desc *cdesc, 59static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
@@ -79,75 +77,104 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
79 77
80static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, 78static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
81 struct safexcel_ahash_req *req, 79 struct safexcel_ahash_req *req,
82 struct safexcel_command_desc *cdesc, 80 struct safexcel_command_desc *cdesc)
83 unsigned int digestsize)
84{ 81{
85 struct safexcel_crypto_priv *priv = ctx->priv; 82 struct safexcel_crypto_priv *priv = ctx->priv;
86 int i; 83 u64 count = 0;
87 84
88 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
89 cdesc->control_data.control0 |= ctx->alg; 85 cdesc->control_data.control0 |= ctx->alg;
90 cdesc->control_data.control0 |= req->digest; 86
91 87 /*
92 if (!req->finish) 88 * Copy the input digest if needed, and setup the context
93 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; 89 * fields. Do this now as we need it to setup the first command
94 90 * descriptor.
95 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { 91 */
96 if (req->processed[0] || req->processed[1]) { 92 if (!req->processed) {
97 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) 93 /* First - and possibly only - block of basic hash only */
98 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5); 94 if (req->finish) {
99 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) 95 cdesc->control_data.control0 |=
100 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6); 96 CONTEXT_CONTROL_TYPE_HASH_OUT |
101 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 || 97 CONTEXT_CONTROL_RESTART_HASH |
102 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) 98 /* ensure its not 0! */
103 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9); 99 CONTEXT_CONTROL_SIZE(1);
104 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ||
105 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
106 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17);
107
108 cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
109 } else { 100 } else {
110 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; 101 cdesc->control_data.control0 |=
102 CONTEXT_CONTROL_TYPE_HASH_OUT |
103 CONTEXT_CONTROL_RESTART_HASH |
104 CONTEXT_CONTROL_NO_FINISH_HASH |
105 /* ensure its not 0! */
106 CONTEXT_CONTROL_SIZE(1);
111 } 107 }
108 return;
109 }
112 110
113 /* 111 /* Hash continuation or HMAC, setup (inner) digest from state */
114 * Copy the input digest if needed, and setup the context 112 memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
115 * fields. Do this now as we need it to setup the first command 113
116 * descriptor. 114 if (req->finish) {
117 */ 115 /* Compute digest count for hash/HMAC finish operations */
118 if (req->processed[0] || req->processed[1]) { 116 if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
119 for (i = 0; i < digestsize / sizeof(u32); i++) 117 req->hmac_zlen || (req->processed != req->block_sz)) {
120 ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]); 118 count = req->processed / EIP197_COUNTER_BLOCK_SIZE;
121 119
122 if (req->finish) { 120 /* This is a hardware limitation, as the
123 u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; 121 * counter must fit into an u32. This represents
124 count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * 122 * a fairly big amount of input data, so we
125 req->processed[1]); 123 * shouldn't see this.
126 124 */
127 /* This is a haredware limitation, as the 125 if (unlikely(count & 0xffffffff00000000ULL)) {
128 * counter must fit into an u32. This represents 126 dev_warn(priv->dev,
129 * a farily big amount of input data, so we 127 "Input data is too big\n");
130 * shouldn't see this. 128 return;
131 */
132 if (unlikely(count & 0xffff0000)) {
133 dev_warn(priv->dev,
134 "Input data is too big\n");
135 return;
136 }
137
138 ctx->base.ctxr->data[i] = cpu_to_le32(count);
139 } 129 }
140 } 130 }
141 } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
142 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
143 131
144 memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz); 132 if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
145 memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32), 133 /* Special case: zero length HMAC */
146 ctx->opad, req->state_sz); 134 req->hmac_zlen ||
135 /* PE HW < 4.4 cannot do HMAC continue, fake using hash */
136 (req->processed != req->block_sz)) {
137 /* Basic hash continue operation, need digest + cnt */
138 cdesc->control_data.control0 |=
139 CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) |
140 CONTEXT_CONTROL_TYPE_HASH_OUT |
141 CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
142 /* For zero-len HMAC, don't finalize, already padded! */
143 if (req->hmac_zlen)
144 cdesc->control_data.control0 |=
145 CONTEXT_CONTROL_NO_FINISH_HASH;
146 cdesc->control_data.control1 |=
147 CONTEXT_CONTROL_DIGEST_CNT;
148 ctx->base.ctxr->data[req->state_sz >> 2] =
149 cpu_to_le32(count);
150 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
151
152 /* Clear zero-length HMAC flag for next operation! */
153 req->hmac_zlen = false;
154 } else { /* HMAC */
155 /* Need outer digest for HMAC finalization */
156 memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
157 ctx->opad, req->state_sz);
158
159 /* Single pass HMAC - no digest count */
160 cdesc->control_data.control0 |=
161 CONTEXT_CONTROL_SIZE(req->state_sz >> 1) |
162 CONTEXT_CONTROL_TYPE_HASH_OUT |
163 CONTEXT_CONTROL_DIGEST_HMAC;
164 }
165 } else { /* Hash continuation, do not finish yet */
166 cdesc->control_data.control0 |=
167 CONTEXT_CONTROL_SIZE(req->state_sz >> 2) |
168 CONTEXT_CONTROL_DIGEST_PRECOMPUTED |
169 CONTEXT_CONTROL_TYPE_HASH_OUT |
170 CONTEXT_CONTROL_NO_FINISH_HASH;
147 } 171 }
148} 172}
149 173
150static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, 174static int safexcel_ahash_enqueue(struct ahash_request *areq);
175
176static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
177 int ring,
151 struct crypto_async_request *async, 178 struct crypto_async_request *async,
152 bool *should_complete, int *ret) 179 bool *should_complete, int *ret)
153{ 180{
@@ -155,6 +182,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
155 struct ahash_request *areq = ahash_request_cast(async); 182 struct ahash_request *areq = ahash_request_cast(async);
156 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 183 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
157 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); 184 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
185 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
158 u64 cache_len; 186 u64 cache_len;
159 187
160 *ret = 0; 188 *ret = 0;
@@ -188,9 +216,31 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
188 sreq->cache_sz = 0; 216 sreq->cache_sz = 0;
189 } 217 }
190 218
191 if (sreq->finish) 219 if (sreq->finish) {
220 if (sreq->hmac &&
221 (sreq->digest != CONTEXT_CONTROL_DIGEST_HMAC)) {
222 /* Faking HMAC using hash - need to do outer hash */
223 memcpy(sreq->cache, sreq->state,
224 crypto_ahash_digestsize(ahash));
225
226 memcpy(sreq->state, ctx->opad, sreq->state_sz);
227
228 sreq->len = sreq->block_sz +
229 crypto_ahash_digestsize(ahash);
230 sreq->processed = sreq->block_sz;
231 sreq->hmac = 0;
232
233 ctx->base.needs_inv = true;
234 areq->nbytes = 0;
235 safexcel_ahash_enqueue(areq);
236
237 *should_complete = false; /* Not done yet */
238 return 1;
239 }
240
192 memcpy(areq->result, sreq->state, 241 memcpy(areq->result, sreq->state,
193 crypto_ahash_digestsize(ahash)); 242 crypto_ahash_digestsize(ahash));
243 }
194 244
195 cache_len = safexcel_queued_len(sreq); 245 cache_len = safexcel_queued_len(sreq);
196 if (cache_len) 246 if (cache_len)
@@ -205,7 +255,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
205 int *commands, int *results) 255 int *commands, int *results)
206{ 256{
207 struct ahash_request *areq = ahash_request_cast(async); 257 struct ahash_request *areq = ahash_request_cast(async);
208 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
209 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 258 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
210 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); 259 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
211 struct safexcel_crypto_priv *priv = ctx->priv; 260 struct safexcel_crypto_priv *priv = ctx->priv;
@@ -213,33 +262,25 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
213 struct safexcel_result_desc *rdesc; 262 struct safexcel_result_desc *rdesc;
214 struct scatterlist *sg; 263 struct scatterlist *sg;
215 int i, extra = 0, n_cdesc = 0, ret = 0; 264 int i, extra = 0, n_cdesc = 0, ret = 0;
216 u64 queued, len, cache_len, cache_max; 265 u64 queued, len, cache_len;
217
218 cache_max = crypto_ahash_blocksize(ahash);
219 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
220 cache_max <<= 1;
221 266
222 queued = len = safexcel_queued_len(req); 267 queued = len = safexcel_queued_len(req);
223 if (queued <= cache_max) 268 if (queued <= HASH_CACHE_SIZE)
224 cache_len = queued; 269 cache_len = queued;
225 else 270 else
226 cache_len = queued - areq->nbytes; 271 cache_len = queued - areq->nbytes;
227 272
228 if (!req->last_req) { 273 if (!req->finish && !req->last_req) {
229 /* If this is not the last request and the queued data does not 274 /* If this is not the last request and the queued data does not
230 * fit into full blocks, cache it for the next send() call. 275 * fit into full cache blocks, cache it for the next send call.
231 */ 276 */
232 extra = queued & (crypto_ahash_blocksize(ahash) - 1); 277 extra = queued & (HASH_CACHE_SIZE - 1);
233
234 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC &&
235 extra < crypto_ahash_blocksize(ahash))
236 extra += crypto_ahash_blocksize(ahash);
237 278
238 /* If this is not the last request and the queued data 279 /* If this is not the last request and the queued data
239 * is a multiple of a block, cache the last one for now. 280 * is a multiple of a block, cache the last one for now.
240 */ 281 */
241 if (!extra) 282 if (!extra)
242 extra = crypto_ahash_blocksize(ahash); 283 extra = HASH_CACHE_SIZE;
243 284
244 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 285 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
245 req->cache_next, extra, 286 req->cache_next, extra,
@@ -247,6 +288,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
247 288
248 queued -= extra; 289 queued -= extra;
249 len -= extra; 290 len -= extra;
291
292 if (!queued) {
293 *commands = 0;
294 *results = 0;
295 return 0;
296 }
250 } 297 }
251 298
252 /* Add a command descriptor for the cached data, if any */ 299 /* Add a command descriptor for the cached data, if any */
@@ -272,8 +319,14 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
272 goto send_command; 319 goto send_command;
273 } 320 }
274 321
322 /* Skip descriptor generation for zero-length requests */
323 if (!areq->nbytes)
324 goto send_command;
325
275 /* Now handle the current ahash request buffer(s) */ 326 /* Now handle the current ahash request buffer(s) */
276 req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src), 327 req->nents = dma_map_sg(priv->dev, areq->src,
328 sg_nents_for_len(areq->src,
329 areq->nbytes),
277 DMA_TO_DEVICE); 330 DMA_TO_DEVICE);
278 if (!req->nents) { 331 if (!req->nents) {
279 ret = -ENOMEM; 332 ret = -ENOMEM;
@@ -288,7 +341,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
288 sglen = queued; 341 sglen = queued;
289 342
290 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, 343 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
291 !(queued - sglen), sg_dma_address(sg), 344 !(queued - sglen),
345 sg_dma_address(sg),
292 sglen, len, ctx->base.ctxr_dma); 346 sglen, len, ctx->base.ctxr_dma);
293 if (IS_ERR(cdesc)) { 347 if (IS_ERR(cdesc)) {
294 ret = PTR_ERR(cdesc); 348 ret = PTR_ERR(cdesc);
@@ -306,7 +360,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
306 360
307send_command: 361send_command:
308 /* Setup the context options */ 362 /* Setup the context options */
309 safexcel_context_control(ctx, req, first_cdesc, req->state_sz); 363 safexcel_context_control(ctx, req, first_cdesc);
310 364
311 /* Add the token */ 365 /* Add the token */
312 safexcel_hash_token(first_cdesc, len, req->state_sz); 366 safexcel_hash_token(first_cdesc, len, req->state_sz);
@@ -328,9 +382,7 @@ send_command:
328 382
329 safexcel_rdr_req_set(priv, ring, rdesc, &areq->base); 383 safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
330 384
331 req->processed[0] += len; 385 req->processed += len;
332 if (req->processed[0] < len)
333 req->processed[1]++;
334 386
335 *commands = n_cdesc; 387 *commands = n_cdesc;
336 *results = 1; 388 *results = 1;
@@ -355,27 +407,6 @@ unmap_cache:
355 return ret; 407 return ret;
356} 408}
357 409
358static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
359{
360 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
361 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
362 unsigned int state_w_sz = req->state_sz / sizeof(u32);
363 u64 processed;
364 int i;
365
366 processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
367 processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1];
368
369 for (i = 0; i < state_w_sz; i++)
370 if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
371 return true;
372
373 if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed))
374 return true;
375
376 return false;
377}
378
379static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, 410static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
380 int ring, 411 int ring,
381 struct crypto_async_request *async, 412 struct crypto_async_request *async,
@@ -523,30 +554,25 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
523/* safexcel_ahash_cache: cache data until at least one request can be sent to 554/* safexcel_ahash_cache: cache data until at least one request can be sent to
524 * the engine, aka. when there is at least 1 block size in the pipe. 555 * the engine, aka. when there is at least 1 block size in the pipe.
525 */ 556 */
526static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max) 557static int safexcel_ahash_cache(struct ahash_request *areq)
527{ 558{
528 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 559 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
529 u64 queued, cache_len; 560 u64 cache_len;
530 561
531 /* queued: everything accepted by the driver which will be handled by
532 * the next send() calls.
533 * tot sz handled by update() - tot sz handled by send()
534 */
535 queued = safexcel_queued_len(req);
536 /* cache_len: everything accepted by the driver but not sent yet, 562 /* cache_len: everything accepted by the driver but not sent yet,
537 * tot sz handled by update() - last req sz - tot sz handled by send() 563 * tot sz handled by update() - last req sz - tot sz handled by send()
538 */ 564 */
539 cache_len = queued - areq->nbytes; 565 cache_len = safexcel_queued_len(req);
540 566
541 /* 567 /*
542 * In case there isn't enough bytes to proceed (less than a 568 * In case there isn't enough bytes to proceed (less than a
543 * block size), cache the data until we have enough. 569 * block size), cache the data until we have enough.
544 */ 570 */
545 if (cache_len + areq->nbytes <= cache_max) { 571 if (cache_len + areq->nbytes <= HASH_CACHE_SIZE) {
546 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 572 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
547 req->cache + cache_len, 573 req->cache + cache_len,
548 areq->nbytes, 0); 574 areq->nbytes, 0);
549 return areq->nbytes; 575 return 0;
550 } 576 }
551 577
552 /* We couldn't cache all the data */ 578 /* We couldn't cache all the data */
@@ -564,14 +590,25 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
564 590
565 if (ctx->base.ctxr) { 591 if (ctx->base.ctxr) {
566 if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv && 592 if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
567 (req->processed[0] || req->processed[1]) && 593 req->processed &&
568 req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) 594 (/* invalidate for basic hash continuation finish */
569 /* We're still setting needs_inv here, even though it is 595 (req->finish &&
596 (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) ||
597 /* invalidate if (i)digest changed */
598 memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
599 /* invalidate for HMAC continuation finish */
600 (req->finish && (req->processed != req->block_sz)) ||
601 /* invalidate for HMAC finish with odigest changed */
602 (req->finish &&
603 memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
604 ctx->opad, req->state_sz))))
605 /*
606 * We're still setting needs_inv here, even though it is
570 * cleared right away, because the needs_inv flag can be 607 * cleared right away, because the needs_inv flag can be
571 * set in other functions and we want to keep the same 608 * set in other functions and we want to keep the same
572 * logic. 609 * logic.
573 */ 610 */
574 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); 611 ctx->base.needs_inv = true;
575 612
576 if (ctx->base.needs_inv) { 613 if (ctx->base.needs_inv) {
577 ctx->base.needs_inv = false; 614 ctx->base.needs_inv = false;
@@ -601,35 +638,23 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
601static int safexcel_ahash_update(struct ahash_request *areq) 638static int safexcel_ahash_update(struct ahash_request *areq)
602{ 639{
603 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 640 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
604 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 641 int ret;
605 u32 cache_max;
606 642
607 /* If the request is 0 length, do nothing */ 643 /* If the request is 0 length, do nothing */
608 if (!areq->nbytes) 644 if (!areq->nbytes)
609 return 0; 645 return 0;
610 646
611 req->len[0] += areq->nbytes; 647 /* Add request to the cache if it fits */
612 if (req->len[0] < areq->nbytes) 648 ret = safexcel_ahash_cache(areq);
613 req->len[1]++;
614
615 cache_max = crypto_ahash_blocksize(ahash);
616 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
617 cache_max <<= 1;
618 649
619 safexcel_ahash_cache(areq, cache_max); 650 /* Update total request length */
651 req->len += areq->nbytes;
620 652
621 /* 653 /* If not all data could fit into the cache, go process the excess.
622 * We're not doing partial updates when performing an hmac request. 654 * Also go process immediately for an HMAC IV precompute, which
623 * Everything will be handled by the final() call. 655 * will never be finished at all, but needs to be processed anyway.
624 */ 656 */
625 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) 657 if ((ret && !req->finish) || req->last_req)
626 return 0;
627
628 if (req->hmac)
629 return safexcel_ahash_enqueue(areq);
630
631 if (!req->last_req &&
632 safexcel_queued_len(req) > cache_max)
633 return safexcel_ahash_enqueue(areq); 658 return safexcel_ahash_enqueue(areq);
634 659
635 return 0; 660 return 0;
@@ -640,11 +665,14 @@ static int safexcel_ahash_final(struct ahash_request *areq)
640 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 665 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
641 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); 666 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
642 667
643 req->last_req = true;
644 req->finish = true; 668 req->finish = true;
645 669
646 /* If we have an overall 0 length request */ 670 if (unlikely(!req->len && !areq->nbytes)) {
647 if (!req->len[0] && !req->len[1] && !areq->nbytes) { 671 /*
672 * If we have an overall 0 length *hash* request:
673 * The HW cannot do 0 length hash, so we provide the correct
674 * result directly here.
675 */
648 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) 676 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
649 memcpy(areq->result, md5_zero_message_hash, 677 memcpy(areq->result, md5_zero_message_hash,
650 MD5_DIGEST_SIZE); 678 MD5_DIGEST_SIZE);
@@ -665,6 +693,43 @@ static int safexcel_ahash_final(struct ahash_request *areq)
665 SHA512_DIGEST_SIZE); 693 SHA512_DIGEST_SIZE);
666 694
667 return 0; 695 return 0;
696 } else if (unlikely(req->hmac &&
697 (req->len == req->block_sz) &&
698 !areq->nbytes)) {
699 /*
700 * If we have an overall 0 length *HMAC* request:
701 * For HMAC, we need to finalize the inner digest
702 * and then perform the outer hash.
703 */
704
705 /* generate pad block in the cache */
706 /* start with a hash block of all zeroes */
707 memset(req->cache, 0, req->block_sz);
708 /* set the first byte to 0x80 to 'append a 1 bit' */
709 req->cache[0] = 0x80;
710 /* add the length in bits in the last 2 bytes */
711 if (req->len_is_le) {
712 /* Little endian length word (e.g. MD5) */
713 req->cache[req->block_sz-8] = (req->block_sz << 3) &
714 255;
715 req->cache[req->block_sz-7] = (req->block_sz >> 5);
716 } else {
717 /* Big endian length word (e.g. any SHA) */
718 req->cache[req->block_sz-2] = (req->block_sz >> 5);
719 req->cache[req->block_sz-1] = (req->block_sz << 3) &
720 255;
721 }
722
723 req->len += req->block_sz; /* plus 1 hash block */
724
725 /* Set special zero-length HMAC flag */
726 req->hmac_zlen = true;
727
728 /* Finalize HMAC */
729 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
730 } else if (req->hmac) {
731 /* Finalize HMAC */
732 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
668 } 733 }
669 734
670 return safexcel_ahash_enqueue(areq); 735 return safexcel_ahash_enqueue(areq);
@@ -674,7 +739,6 @@ static int safexcel_ahash_finup(struct ahash_request *areq)
674{ 739{
675 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 740 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
676 741
677 req->last_req = true;
678 req->finish = true; 742 req->finish = true;
679 743
680 safexcel_ahash_update(areq); 744 safexcel_ahash_update(areq);
@@ -683,52 +747,36 @@ static int safexcel_ahash_finup(struct ahash_request *areq)
683 747
684static int safexcel_ahash_export(struct ahash_request *areq, void *out) 748static int safexcel_ahash_export(struct ahash_request *areq, void *out)
685{ 749{
686 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
687 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 750 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
688 struct safexcel_ahash_export_state *export = out; 751 struct safexcel_ahash_export_state *export = out;
689 u32 cache_sz;
690 752
691 cache_sz = crypto_ahash_blocksize(ahash); 753 export->len = req->len;
692 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) 754 export->processed = req->processed;
693 cache_sz <<= 1;
694
695 export->len[0] = req->len[0];
696 export->len[1] = req->len[1];
697 export->processed[0] = req->processed[0];
698 export->processed[1] = req->processed[1];
699 755
700 export->digest = req->digest; 756 export->digest = req->digest;
701 757
702 memcpy(export->state, req->state, req->state_sz); 758 memcpy(export->state, req->state, req->state_sz);
703 memcpy(export->cache, req->cache, cache_sz); 759 memcpy(export->cache, req->cache, HASH_CACHE_SIZE);
704 760
705 return 0; 761 return 0;
706} 762}
707 763
708static int safexcel_ahash_import(struct ahash_request *areq, const void *in) 764static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
709{ 765{
710 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
711 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 766 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
712 const struct safexcel_ahash_export_state *export = in; 767 const struct safexcel_ahash_export_state *export = in;
713 u32 cache_sz;
714 int ret; 768 int ret;
715 769
716 ret = crypto_ahash_init(areq); 770 ret = crypto_ahash_init(areq);
717 if (ret) 771 if (ret)
718 return ret; 772 return ret;
719 773
720 cache_sz = crypto_ahash_blocksize(ahash); 774 req->len = export->len;
721 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) 775 req->processed = export->processed;
722 cache_sz <<= 1;
723
724 req->len[0] = export->len[0];
725 req->len[1] = export->len[1];
726 req->processed[0] = export->processed[0];
727 req->processed[1] = export->processed[1];
728 776
729 req->digest = export->digest; 777 req->digest = export->digest;
730 778
731 memcpy(req->cache, export->cache, cache_sz); 779 memcpy(req->cache, export->cache, HASH_CACHE_SIZE);
732 memcpy(req->state, export->state, req->state_sz); 780 memcpy(req->state, export->state, req->state_sz);
733 781
734 return 0; 782 return 0;
@@ -757,15 +805,10 @@ static int safexcel_sha1_init(struct ahash_request *areq)
757 805
758 memset(req, 0, sizeof(*req)); 806 memset(req, 0, sizeof(*req));
759 807
760 req->state[0] = SHA1_H0;
761 req->state[1] = SHA1_H1;
762 req->state[2] = SHA1_H2;
763 req->state[3] = SHA1_H3;
764 req->state[4] = SHA1_H4;
765
766 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; 808 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
767 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; 809 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
768 req->state_sz = SHA1_DIGEST_SIZE; 810 req->state_sz = SHA1_DIGEST_SIZE;
811 req->block_sz = SHA1_BLOCK_SIZE;
769 812
770 return 0; 813 return 0;
771} 814}
@@ -802,7 +845,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
802 845
803struct safexcel_alg_template safexcel_alg_sha1 = { 846struct safexcel_alg_template safexcel_alg_sha1 = {
804 .type = SAFEXCEL_ALG_TYPE_AHASH, 847 .type = SAFEXCEL_ALG_TYPE_AHASH,
805 .engines = EIP97IES | EIP197B | EIP197D, 848 .algo_mask = SAFEXCEL_ALG_SHA1,
806 .alg.ahash = { 849 .alg.ahash = {
807 .init = safexcel_sha1_init, 850 .init = safexcel_sha1_init,
808 .update = safexcel_ahash_update, 851 .update = safexcel_ahash_update,
@@ -817,7 +860,7 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
817 .base = { 860 .base = {
818 .cra_name = "sha1", 861 .cra_name = "sha1",
819 .cra_driver_name = "safexcel-sha1", 862 .cra_driver_name = "safexcel-sha1",
820 .cra_priority = 300, 863 .cra_priority = SAFEXCEL_CRA_PRIORITY,
821 .cra_flags = CRYPTO_ALG_ASYNC | 864 .cra_flags = CRYPTO_ALG_ASYNC |
822 CRYPTO_ALG_KERN_DRIVER_ONLY, 865 CRYPTO_ALG_KERN_DRIVER_ONLY,
823 .cra_blocksize = SHA1_BLOCK_SIZE, 866 .cra_blocksize = SHA1_BLOCK_SIZE,
@@ -832,10 +875,23 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
832 875
833static int safexcel_hmac_sha1_init(struct ahash_request *areq) 876static int safexcel_hmac_sha1_init(struct ahash_request *areq)
834{ 877{
878 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
835 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 879 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
836 880
837 safexcel_sha1_init(areq); 881 memset(req, 0, sizeof(*req));
838 req->digest = CONTEXT_CONTROL_DIGEST_HMAC; 882
883 /* Start from ipad precompute */
884 memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE);
885 /* Already processed the key^ipad part now! */
886 req->len = SHA1_BLOCK_SIZE;
887 req->processed = SHA1_BLOCK_SIZE;
888
889 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
890 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
891 req->state_sz = SHA1_DIGEST_SIZE;
892 req->block_sz = SHA1_BLOCK_SIZE;
893 req->hmac = true;
894
839 return 0; 895 return 0;
840} 896}
841 897
@@ -1004,21 +1060,16 @@ static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
1004 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1060 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1005 struct safexcel_crypto_priv *priv = ctx->priv; 1061 struct safexcel_crypto_priv *priv = ctx->priv;
1006 struct safexcel_ahash_export_state istate, ostate; 1062 struct safexcel_ahash_export_state istate, ostate;
1007 int ret, i; 1063 int ret;
1008 1064
1009 ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate); 1065 ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
1010 if (ret) 1066 if (ret)
1011 return ret; 1067 return ret;
1012 1068
1013 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) { 1069 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr &&
1014 for (i = 0; i < state_sz / sizeof(u32); i++) { 1070 (memcmp(ctx->ipad, istate.state, state_sz) ||
1015 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || 1071 memcmp(ctx->opad, ostate.state, state_sz)))
1016 ctx->opad[i] != le32_to_cpu(ostate.state[i])) { 1072 ctx->base.needs_inv = true;
1017 ctx->base.needs_inv = true;
1018 break;
1019 }
1020 }
1021 }
1022 1073
1023 memcpy(ctx->ipad, &istate.state, state_sz); 1074 memcpy(ctx->ipad, &istate.state, state_sz);
1024 memcpy(ctx->opad, &ostate.state, state_sz); 1075 memcpy(ctx->opad, &ostate.state, state_sz);
@@ -1035,7 +1086,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1035 1086
1036struct safexcel_alg_template safexcel_alg_hmac_sha1 = { 1087struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
1037 .type = SAFEXCEL_ALG_TYPE_AHASH, 1088 .type = SAFEXCEL_ALG_TYPE_AHASH,
1038 .engines = EIP97IES | EIP197B | EIP197D, 1089 .algo_mask = SAFEXCEL_ALG_SHA1,
1039 .alg.ahash = { 1090 .alg.ahash = {
1040 .init = safexcel_hmac_sha1_init, 1091 .init = safexcel_hmac_sha1_init,
1041 .update = safexcel_ahash_update, 1092 .update = safexcel_ahash_update,
@@ -1051,7 +1102,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
1051 .base = { 1102 .base = {
1052 .cra_name = "hmac(sha1)", 1103 .cra_name = "hmac(sha1)",
1053 .cra_driver_name = "safexcel-hmac-sha1", 1104 .cra_driver_name = "safexcel-hmac-sha1",
1054 .cra_priority = 300, 1105 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1055 .cra_flags = CRYPTO_ALG_ASYNC | 1106 .cra_flags = CRYPTO_ALG_ASYNC |
1056 CRYPTO_ALG_KERN_DRIVER_ONLY, 1107 CRYPTO_ALG_KERN_DRIVER_ONLY,
1057 .cra_blocksize = SHA1_BLOCK_SIZE, 1108 .cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1071,18 +1122,10 @@ static int safexcel_sha256_init(struct ahash_request *areq)
1071 1122
1072 memset(req, 0, sizeof(*req)); 1123 memset(req, 0, sizeof(*req));
1073 1124
1074 req->state[0] = SHA256_H0;
1075 req->state[1] = SHA256_H1;
1076 req->state[2] = SHA256_H2;
1077 req->state[3] = SHA256_H3;
1078 req->state[4] = SHA256_H4;
1079 req->state[5] = SHA256_H5;
1080 req->state[6] = SHA256_H6;
1081 req->state[7] = SHA256_H7;
1082
1083 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; 1125 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1084 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; 1126 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1085 req->state_sz = SHA256_DIGEST_SIZE; 1127 req->state_sz = SHA256_DIGEST_SIZE;
1128 req->block_sz = SHA256_BLOCK_SIZE;
1086 1129
1087 return 0; 1130 return 0;
1088} 1131}
@@ -1099,7 +1142,7 @@ static int safexcel_sha256_digest(struct ahash_request *areq)
1099 1142
1100struct safexcel_alg_template safexcel_alg_sha256 = { 1143struct safexcel_alg_template safexcel_alg_sha256 = {
1101 .type = SAFEXCEL_ALG_TYPE_AHASH, 1144 .type = SAFEXCEL_ALG_TYPE_AHASH,
1102 .engines = EIP97IES | EIP197B | EIP197D, 1145 .algo_mask = SAFEXCEL_ALG_SHA2_256,
1103 .alg.ahash = { 1146 .alg.ahash = {
1104 .init = safexcel_sha256_init, 1147 .init = safexcel_sha256_init,
1105 .update = safexcel_ahash_update, 1148 .update = safexcel_ahash_update,
@@ -1114,7 +1157,7 @@ struct safexcel_alg_template safexcel_alg_sha256 = {
1114 .base = { 1157 .base = {
1115 .cra_name = "sha256", 1158 .cra_name = "sha256",
1116 .cra_driver_name = "safexcel-sha256", 1159 .cra_driver_name = "safexcel-sha256",
1117 .cra_priority = 300, 1160 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1118 .cra_flags = CRYPTO_ALG_ASYNC | 1161 .cra_flags = CRYPTO_ALG_ASYNC |
1119 CRYPTO_ALG_KERN_DRIVER_ONLY, 1162 CRYPTO_ALG_KERN_DRIVER_ONLY,
1120 .cra_blocksize = SHA256_BLOCK_SIZE, 1163 .cra_blocksize = SHA256_BLOCK_SIZE,
@@ -1134,18 +1177,10 @@ static int safexcel_sha224_init(struct ahash_request *areq)
1134 1177
1135 memset(req, 0, sizeof(*req)); 1178 memset(req, 0, sizeof(*req));
1136 1179
1137 req->state[0] = SHA224_H0;
1138 req->state[1] = SHA224_H1;
1139 req->state[2] = SHA224_H2;
1140 req->state[3] = SHA224_H3;
1141 req->state[4] = SHA224_H4;
1142 req->state[5] = SHA224_H5;
1143 req->state[6] = SHA224_H6;
1144 req->state[7] = SHA224_H7;
1145
1146 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; 1180 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1147 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; 1181 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1148 req->state_sz = SHA256_DIGEST_SIZE; 1182 req->state_sz = SHA256_DIGEST_SIZE;
1183 req->block_sz = SHA256_BLOCK_SIZE;
1149 1184
1150 return 0; 1185 return 0;
1151} 1186}
@@ -1162,7 +1197,7 @@ static int safexcel_sha224_digest(struct ahash_request *areq)
1162 1197
1163struct safexcel_alg_template safexcel_alg_sha224 = { 1198struct safexcel_alg_template safexcel_alg_sha224 = {
1164 .type = SAFEXCEL_ALG_TYPE_AHASH, 1199 .type = SAFEXCEL_ALG_TYPE_AHASH,
1165 .engines = EIP97IES | EIP197B | EIP197D, 1200 .algo_mask = SAFEXCEL_ALG_SHA2_256,
1166 .alg.ahash = { 1201 .alg.ahash = {
1167 .init = safexcel_sha224_init, 1202 .init = safexcel_sha224_init,
1168 .update = safexcel_ahash_update, 1203 .update = safexcel_ahash_update,
@@ -1177,7 +1212,7 @@ struct safexcel_alg_template safexcel_alg_sha224 = {
1177 .base = { 1212 .base = {
1178 .cra_name = "sha224", 1213 .cra_name = "sha224",
1179 .cra_driver_name = "safexcel-sha224", 1214 .cra_driver_name = "safexcel-sha224",
1180 .cra_priority = 300, 1215 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1181 .cra_flags = CRYPTO_ALG_ASYNC | 1216 .cra_flags = CRYPTO_ALG_ASYNC |
1182 CRYPTO_ALG_KERN_DRIVER_ONLY, 1217 CRYPTO_ALG_KERN_DRIVER_ONLY,
1183 .cra_blocksize = SHA224_BLOCK_SIZE, 1218 .cra_blocksize = SHA224_BLOCK_SIZE,
@@ -1199,10 +1234,23 @@ static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
1199 1234
1200static int safexcel_hmac_sha224_init(struct ahash_request *areq) 1235static int safexcel_hmac_sha224_init(struct ahash_request *areq)
1201{ 1236{
1237 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1202 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 1238 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1203 1239
1204 safexcel_sha224_init(areq); 1240 memset(req, 0, sizeof(*req));
1205 req->digest = CONTEXT_CONTROL_DIGEST_HMAC; 1241
1242 /* Start from ipad precompute */
1243 memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
1244 /* Already processed the key^ipad part now! */
1245 req->len = SHA256_BLOCK_SIZE;
1246 req->processed = SHA256_BLOCK_SIZE;
1247
1248 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1249 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1250 req->state_sz = SHA256_DIGEST_SIZE;
1251 req->block_sz = SHA256_BLOCK_SIZE;
1252 req->hmac = true;
1253
1206 return 0; 1254 return 0;
1207} 1255}
1208 1256
@@ -1218,7 +1266,7 @@ static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
1218 1266
1219struct safexcel_alg_template safexcel_alg_hmac_sha224 = { 1267struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
1220 .type = SAFEXCEL_ALG_TYPE_AHASH, 1268 .type = SAFEXCEL_ALG_TYPE_AHASH,
1221 .engines = EIP97IES | EIP197B | EIP197D, 1269 .algo_mask = SAFEXCEL_ALG_SHA2_256,
1222 .alg.ahash = { 1270 .alg.ahash = {
1223 .init = safexcel_hmac_sha224_init, 1271 .init = safexcel_hmac_sha224_init,
1224 .update = safexcel_ahash_update, 1272 .update = safexcel_ahash_update,
@@ -1234,7 +1282,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
1234 .base = { 1282 .base = {
1235 .cra_name = "hmac(sha224)", 1283 .cra_name = "hmac(sha224)",
1236 .cra_driver_name = "safexcel-hmac-sha224", 1284 .cra_driver_name = "safexcel-hmac-sha224",
1237 .cra_priority = 300, 1285 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1238 .cra_flags = CRYPTO_ALG_ASYNC | 1286 .cra_flags = CRYPTO_ALG_ASYNC |
1239 CRYPTO_ALG_KERN_DRIVER_ONLY, 1287 CRYPTO_ALG_KERN_DRIVER_ONLY,
1240 .cra_blocksize = SHA224_BLOCK_SIZE, 1288 .cra_blocksize = SHA224_BLOCK_SIZE,
@@ -1256,10 +1304,23 @@ static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1256 1304
1257static int safexcel_hmac_sha256_init(struct ahash_request *areq) 1305static int safexcel_hmac_sha256_init(struct ahash_request *areq)
1258{ 1306{
1307 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1259 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 1308 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1260 1309
1261 safexcel_sha256_init(areq); 1310 memset(req, 0, sizeof(*req));
1262 req->digest = CONTEXT_CONTROL_DIGEST_HMAC; 1311
1312 /* Start from ipad precompute */
1313 memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
1314 /* Already processed the key^ipad part now! */
1315 req->len = SHA256_BLOCK_SIZE;
1316 req->processed = SHA256_BLOCK_SIZE;
1317
1318 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1319 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1320 req->state_sz = SHA256_DIGEST_SIZE;
1321 req->block_sz = SHA256_BLOCK_SIZE;
1322 req->hmac = true;
1323
1263 return 0; 1324 return 0;
1264} 1325}
1265 1326
@@ -1275,7 +1336,7 @@ static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
1275 1336
1276struct safexcel_alg_template safexcel_alg_hmac_sha256 = { 1337struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
1277 .type = SAFEXCEL_ALG_TYPE_AHASH, 1338 .type = SAFEXCEL_ALG_TYPE_AHASH,
1278 .engines = EIP97IES | EIP197B | EIP197D, 1339 .algo_mask = SAFEXCEL_ALG_SHA2_256,
1279 .alg.ahash = { 1340 .alg.ahash = {
1280 .init = safexcel_hmac_sha256_init, 1341 .init = safexcel_hmac_sha256_init,
1281 .update = safexcel_ahash_update, 1342 .update = safexcel_ahash_update,
@@ -1291,7 +1352,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
1291 .base = { 1352 .base = {
1292 .cra_name = "hmac(sha256)", 1353 .cra_name = "hmac(sha256)",
1293 .cra_driver_name = "safexcel-hmac-sha256", 1354 .cra_driver_name = "safexcel-hmac-sha256",
1294 .cra_priority = 300, 1355 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1295 .cra_flags = CRYPTO_ALG_ASYNC | 1356 .cra_flags = CRYPTO_ALG_ASYNC |
1296 CRYPTO_ALG_KERN_DRIVER_ONLY, 1357 CRYPTO_ALG_KERN_DRIVER_ONLY,
1297 .cra_blocksize = SHA256_BLOCK_SIZE, 1358 .cra_blocksize = SHA256_BLOCK_SIZE,
@@ -1311,26 +1372,10 @@ static int safexcel_sha512_init(struct ahash_request *areq)
1311 1372
1312 memset(req, 0, sizeof(*req)); 1373 memset(req, 0, sizeof(*req));
1313 1374
1314 req->state[0] = lower_32_bits(SHA512_H0);
1315 req->state[1] = upper_32_bits(SHA512_H0);
1316 req->state[2] = lower_32_bits(SHA512_H1);
1317 req->state[3] = upper_32_bits(SHA512_H1);
1318 req->state[4] = lower_32_bits(SHA512_H2);
1319 req->state[5] = upper_32_bits(SHA512_H2);
1320 req->state[6] = lower_32_bits(SHA512_H3);
1321 req->state[7] = upper_32_bits(SHA512_H3);
1322 req->state[8] = lower_32_bits(SHA512_H4);
1323 req->state[9] = upper_32_bits(SHA512_H4);
1324 req->state[10] = lower_32_bits(SHA512_H5);
1325 req->state[11] = upper_32_bits(SHA512_H5);
1326 req->state[12] = lower_32_bits(SHA512_H6);
1327 req->state[13] = upper_32_bits(SHA512_H6);
1328 req->state[14] = lower_32_bits(SHA512_H7);
1329 req->state[15] = upper_32_bits(SHA512_H7);
1330
1331 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; 1375 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
1332 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; 1376 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1333 req->state_sz = SHA512_DIGEST_SIZE; 1377 req->state_sz = SHA512_DIGEST_SIZE;
1378 req->block_sz = SHA512_BLOCK_SIZE;
1334 1379
1335 return 0; 1380 return 0;
1336} 1381}
@@ -1347,7 +1392,7 @@ static int safexcel_sha512_digest(struct ahash_request *areq)
1347 1392
1348struct safexcel_alg_template safexcel_alg_sha512 = { 1393struct safexcel_alg_template safexcel_alg_sha512 = {
1349 .type = SAFEXCEL_ALG_TYPE_AHASH, 1394 .type = SAFEXCEL_ALG_TYPE_AHASH,
1350 .engines = EIP97IES | EIP197B | EIP197D, 1395 .algo_mask = SAFEXCEL_ALG_SHA2_512,
1351 .alg.ahash = { 1396 .alg.ahash = {
1352 .init = safexcel_sha512_init, 1397 .init = safexcel_sha512_init,
1353 .update = safexcel_ahash_update, 1398 .update = safexcel_ahash_update,
@@ -1362,7 +1407,7 @@ struct safexcel_alg_template safexcel_alg_sha512 = {
1362 .base = { 1407 .base = {
1363 .cra_name = "sha512", 1408 .cra_name = "sha512",
1364 .cra_driver_name = "safexcel-sha512", 1409 .cra_driver_name = "safexcel-sha512",
1365 .cra_priority = 300, 1410 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1366 .cra_flags = CRYPTO_ALG_ASYNC | 1411 .cra_flags = CRYPTO_ALG_ASYNC |
1367 CRYPTO_ALG_KERN_DRIVER_ONLY, 1412 CRYPTO_ALG_KERN_DRIVER_ONLY,
1368 .cra_blocksize = SHA512_BLOCK_SIZE, 1413 .cra_blocksize = SHA512_BLOCK_SIZE,
@@ -1382,26 +1427,10 @@ static int safexcel_sha384_init(struct ahash_request *areq)
1382 1427
1383 memset(req, 0, sizeof(*req)); 1428 memset(req, 0, sizeof(*req));
1384 1429
1385 req->state[0] = lower_32_bits(SHA384_H0);
1386 req->state[1] = upper_32_bits(SHA384_H0);
1387 req->state[2] = lower_32_bits(SHA384_H1);
1388 req->state[3] = upper_32_bits(SHA384_H1);
1389 req->state[4] = lower_32_bits(SHA384_H2);
1390 req->state[5] = upper_32_bits(SHA384_H2);
1391 req->state[6] = lower_32_bits(SHA384_H3);
1392 req->state[7] = upper_32_bits(SHA384_H3);
1393 req->state[8] = lower_32_bits(SHA384_H4);
1394 req->state[9] = upper_32_bits(SHA384_H4);
1395 req->state[10] = lower_32_bits(SHA384_H5);
1396 req->state[11] = upper_32_bits(SHA384_H5);
1397 req->state[12] = lower_32_bits(SHA384_H6);
1398 req->state[13] = upper_32_bits(SHA384_H6);
1399 req->state[14] = lower_32_bits(SHA384_H7);
1400 req->state[15] = upper_32_bits(SHA384_H7);
1401
1402 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; 1430 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
1403 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; 1431 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1404 req->state_sz = SHA512_DIGEST_SIZE; 1432 req->state_sz = SHA512_DIGEST_SIZE;
1433 req->block_sz = SHA512_BLOCK_SIZE;
1405 1434
1406 return 0; 1435 return 0;
1407} 1436}
@@ -1418,7 +1447,7 @@ static int safexcel_sha384_digest(struct ahash_request *areq)
1418 1447
1419struct safexcel_alg_template safexcel_alg_sha384 = { 1448struct safexcel_alg_template safexcel_alg_sha384 = {
1420 .type = SAFEXCEL_ALG_TYPE_AHASH, 1449 .type = SAFEXCEL_ALG_TYPE_AHASH,
1421 .engines = EIP97IES | EIP197B | EIP197D, 1450 .algo_mask = SAFEXCEL_ALG_SHA2_512,
1422 .alg.ahash = { 1451 .alg.ahash = {
1423 .init = safexcel_sha384_init, 1452 .init = safexcel_sha384_init,
1424 .update = safexcel_ahash_update, 1453 .update = safexcel_ahash_update,
@@ -1433,7 +1462,7 @@ struct safexcel_alg_template safexcel_alg_sha384 = {
1433 .base = { 1462 .base = {
1434 .cra_name = "sha384", 1463 .cra_name = "sha384",
1435 .cra_driver_name = "safexcel-sha384", 1464 .cra_driver_name = "safexcel-sha384",
1436 .cra_priority = 300, 1465 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1437 .cra_flags = CRYPTO_ALG_ASYNC | 1466 .cra_flags = CRYPTO_ALG_ASYNC |
1438 CRYPTO_ALG_KERN_DRIVER_ONLY, 1467 CRYPTO_ALG_KERN_DRIVER_ONLY,
1439 .cra_blocksize = SHA384_BLOCK_SIZE, 1468 .cra_blocksize = SHA384_BLOCK_SIZE,
@@ -1455,10 +1484,23 @@ static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
1455 1484
1456static int safexcel_hmac_sha512_init(struct ahash_request *areq) 1485static int safexcel_hmac_sha512_init(struct ahash_request *areq)
1457{ 1486{
1487 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1458 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 1488 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1459 1489
1460 safexcel_sha512_init(areq); 1490 memset(req, 0, sizeof(*req));
1461 req->digest = CONTEXT_CONTROL_DIGEST_HMAC; 1491
1492 /* Start from ipad precompute */
1493 memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
1494 /* Already processed the key^ipad part now! */
1495 req->len = SHA512_BLOCK_SIZE;
1496 req->processed = SHA512_BLOCK_SIZE;
1497
1498 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
1499 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1500 req->state_sz = SHA512_DIGEST_SIZE;
1501 req->block_sz = SHA512_BLOCK_SIZE;
1502 req->hmac = true;
1503
1462 return 0; 1504 return 0;
1463} 1505}
1464 1506
@@ -1474,7 +1516,7 @@ static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
1474 1516
1475struct safexcel_alg_template safexcel_alg_hmac_sha512 = { 1517struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
1476 .type = SAFEXCEL_ALG_TYPE_AHASH, 1518 .type = SAFEXCEL_ALG_TYPE_AHASH,
1477 .engines = EIP97IES | EIP197B | EIP197D, 1519 .algo_mask = SAFEXCEL_ALG_SHA2_512,
1478 .alg.ahash = { 1520 .alg.ahash = {
1479 .init = safexcel_hmac_sha512_init, 1521 .init = safexcel_hmac_sha512_init,
1480 .update = safexcel_ahash_update, 1522 .update = safexcel_ahash_update,
@@ -1490,7 +1532,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
1490 .base = { 1532 .base = {
1491 .cra_name = "hmac(sha512)", 1533 .cra_name = "hmac(sha512)",
1492 .cra_driver_name = "safexcel-hmac-sha512", 1534 .cra_driver_name = "safexcel-hmac-sha512",
1493 .cra_priority = 300, 1535 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1494 .cra_flags = CRYPTO_ALG_ASYNC | 1536 .cra_flags = CRYPTO_ALG_ASYNC |
1495 CRYPTO_ALG_KERN_DRIVER_ONLY, 1537 CRYPTO_ALG_KERN_DRIVER_ONLY,
1496 .cra_blocksize = SHA512_BLOCK_SIZE, 1538 .cra_blocksize = SHA512_BLOCK_SIZE,
@@ -1512,10 +1554,23 @@ static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
1512 1554
1513static int safexcel_hmac_sha384_init(struct ahash_request *areq) 1555static int safexcel_hmac_sha384_init(struct ahash_request *areq)
1514{ 1556{
1557 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1515 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 1558 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1516 1559
1517 safexcel_sha384_init(areq); 1560 memset(req, 0, sizeof(*req));
1518 req->digest = CONTEXT_CONTROL_DIGEST_HMAC; 1561
1562 /* Start from ipad precompute */
1563 memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
1564 /* Already processed the key^ipad part now! */
1565 req->len = SHA512_BLOCK_SIZE;
1566 req->processed = SHA512_BLOCK_SIZE;
1567
1568 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
1569 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1570 req->state_sz = SHA512_DIGEST_SIZE;
1571 req->block_sz = SHA512_BLOCK_SIZE;
1572 req->hmac = true;
1573
1519 return 0; 1574 return 0;
1520} 1575}
1521 1576
@@ -1531,7 +1586,7 @@ static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
1531 1586
1532struct safexcel_alg_template safexcel_alg_hmac_sha384 = { 1587struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
1533 .type = SAFEXCEL_ALG_TYPE_AHASH, 1588 .type = SAFEXCEL_ALG_TYPE_AHASH,
1534 .engines = EIP97IES | EIP197B | EIP197D, 1589 .algo_mask = SAFEXCEL_ALG_SHA2_512,
1535 .alg.ahash = { 1590 .alg.ahash = {
1536 .init = safexcel_hmac_sha384_init, 1591 .init = safexcel_hmac_sha384_init,
1537 .update = safexcel_ahash_update, 1592 .update = safexcel_ahash_update,
@@ -1547,7 +1602,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
1547 .base = { 1602 .base = {
1548 .cra_name = "hmac(sha384)", 1603 .cra_name = "hmac(sha384)",
1549 .cra_driver_name = "safexcel-hmac-sha384", 1604 .cra_driver_name = "safexcel-hmac-sha384",
1550 .cra_priority = 300, 1605 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1551 .cra_flags = CRYPTO_ALG_ASYNC | 1606 .cra_flags = CRYPTO_ALG_ASYNC |
1552 CRYPTO_ALG_KERN_DRIVER_ONLY, 1607 CRYPTO_ALG_KERN_DRIVER_ONLY,
1553 .cra_blocksize = SHA384_BLOCK_SIZE, 1608 .cra_blocksize = SHA384_BLOCK_SIZE,
@@ -1567,14 +1622,10 @@ static int safexcel_md5_init(struct ahash_request *areq)
1567 1622
1568 memset(req, 0, sizeof(*req)); 1623 memset(req, 0, sizeof(*req));
1569 1624
1570 req->state[0] = MD5_H0;
1571 req->state[1] = MD5_H1;
1572 req->state[2] = MD5_H2;
1573 req->state[3] = MD5_H3;
1574
1575 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; 1625 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
1576 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; 1626 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1577 req->state_sz = MD5_DIGEST_SIZE; 1627 req->state_sz = MD5_DIGEST_SIZE;
1628 req->block_sz = MD5_HMAC_BLOCK_SIZE;
1578 1629
1579 return 0; 1630 return 0;
1580} 1631}
@@ -1591,7 +1642,7 @@ static int safexcel_md5_digest(struct ahash_request *areq)
1591 1642
1592struct safexcel_alg_template safexcel_alg_md5 = { 1643struct safexcel_alg_template safexcel_alg_md5 = {
1593 .type = SAFEXCEL_ALG_TYPE_AHASH, 1644 .type = SAFEXCEL_ALG_TYPE_AHASH,
1594 .engines = EIP97IES | EIP197B | EIP197D, 1645 .algo_mask = SAFEXCEL_ALG_MD5,
1595 .alg.ahash = { 1646 .alg.ahash = {
1596 .init = safexcel_md5_init, 1647 .init = safexcel_md5_init,
1597 .update = safexcel_ahash_update, 1648 .update = safexcel_ahash_update,
@@ -1606,7 +1657,7 @@ struct safexcel_alg_template safexcel_alg_md5 = {
1606 .base = { 1657 .base = {
1607 .cra_name = "md5", 1658 .cra_name = "md5",
1608 .cra_driver_name = "safexcel-md5", 1659 .cra_driver_name = "safexcel-md5",
1609 .cra_priority = 300, 1660 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1610 .cra_flags = CRYPTO_ALG_ASYNC | 1661 .cra_flags = CRYPTO_ALG_ASYNC |
1611 CRYPTO_ALG_KERN_DRIVER_ONLY, 1662 CRYPTO_ALG_KERN_DRIVER_ONLY,
1612 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1663 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
@@ -1621,10 +1672,24 @@ struct safexcel_alg_template safexcel_alg_md5 = {
1621 1672
1622static int safexcel_hmac_md5_init(struct ahash_request *areq) 1673static int safexcel_hmac_md5_init(struct ahash_request *areq)
1623{ 1674{
1675 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1624 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 1676 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1625 1677
1626 safexcel_md5_init(areq); 1678 memset(req, 0, sizeof(*req));
1627 req->digest = CONTEXT_CONTROL_DIGEST_HMAC; 1679
1680 /* Start from ipad precompute */
1681 memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE);
1682 /* Already processed the key^ipad part now! */
1683 req->len = MD5_HMAC_BLOCK_SIZE;
1684 req->processed = MD5_HMAC_BLOCK_SIZE;
1685
1686 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
1687 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1688 req->state_sz = MD5_DIGEST_SIZE;
1689 req->block_sz = MD5_HMAC_BLOCK_SIZE;
1690 req->len_is_le = true; /* MD5 is little endian! ... */
1691 req->hmac = true;
1692
1628 return 0; 1693 return 0;
1629} 1694}
1630 1695
@@ -1647,7 +1712,7 @@ static int safexcel_hmac_md5_digest(struct ahash_request *areq)
1647 1712
1648struct safexcel_alg_template safexcel_alg_hmac_md5 = { 1713struct safexcel_alg_template safexcel_alg_hmac_md5 = {
1649 .type = SAFEXCEL_ALG_TYPE_AHASH, 1714 .type = SAFEXCEL_ALG_TYPE_AHASH,
1650 .engines = EIP97IES | EIP197B | EIP197D, 1715 .algo_mask = SAFEXCEL_ALG_MD5,
1651 .alg.ahash = { 1716 .alg.ahash = {
1652 .init = safexcel_hmac_md5_init, 1717 .init = safexcel_hmac_md5_init,
1653 .update = safexcel_ahash_update, 1718 .update = safexcel_ahash_update,
@@ -1663,7 +1728,7 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = {
1663 .base = { 1728 .base = {
1664 .cra_name = "hmac(md5)", 1729 .cra_name = "hmac(md5)",
1665 .cra_driver_name = "safexcel-hmac-md5", 1730 .cra_driver_name = "safexcel-hmac-md5",
1666 .cra_priority = 300, 1731 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1667 .cra_flags = CRYPTO_ALG_ASYNC | 1732 .cra_flags = CRYPTO_ALG_ASYNC |
1668 CRYPTO_ALG_KERN_DRIVER_ONLY, 1733 CRYPTO_ALG_KERN_DRIVER_ONLY,
1669 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1734 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index 142bc3f5c45c..0f269b89cfd4 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -137,7 +137,13 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
137 struct safexcel_token *token = 137 struct safexcel_token *token =
138 (struct safexcel_token *)cdesc->control_data.token; 138 (struct safexcel_token *)cdesc->control_data.token;
139 139
140 cdesc->control_data.packet_length = full_data_len; 140 /*
141 * Note that the length here MUST be >0 or else the EIP(1)97
142 * may hang. Newer EIP197 firmware actually incorporates this
143 * fix already, but that doesn't help the EIP97 and we may
144 * also be running older firmware.
145 */
146 cdesc->control_data.packet_length = full_data_len ?: 1;
141 cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE | 147 cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
142 EIP197_OPTION_64BIT_CTX | 148 EIP197_OPTION_64BIT_CTX |
143 EIP197_OPTION_CTX_CTRL_IN_CMD; 149 EIP197_OPTION_CTX_CTRL_IN_CMD;
@@ -145,7 +151,8 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
145 (lower_32_bits(context) & GENMASK(31, 2)) >> 2; 151 (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
146 cdesc->control_data.context_hi = upper_32_bits(context); 152 cdesc->control_data.context_hi = upper_32_bits(context);
147 153
148 if (priv->version == EIP197B || priv->version == EIP197D) 154 if (priv->version == EIP197B_MRVL ||
155 priv->version == EIP197D_MRVL)
149 cdesc->control_data.options |= EIP197_OPTION_RC_AUTO; 156 cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
150 157
151 /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ 158 /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index acedafe3fa98..9181523ba760 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -17,7 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18 18
19#include <crypto/ctr.h> 19#include <crypto/ctr.h>
20#include <crypto/des.h> 20#include <crypto/internal/des.h>
21#include <crypto/aes.h> 21#include <crypto/aes.h>
22#include <crypto/hmac.h> 22#include <crypto/hmac.h>
23#include <crypto/sha.h> 23#include <crypto/sha.h>
@@ -756,10 +756,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
756 } 756 }
757 cipher_cfg |= keylen_cfg; 757 cipher_cfg |= keylen_cfg;
758 } else { 758 } else {
759 u32 tmp[DES_EXPKEY_WORDS]; 759 crypto_des_verify_key(tfm, key);
760 if (des_ekey(tmp, key) == 0) {
761 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
762 }
763 } 760 }
764 /* write cfg word to cryptinfo */ 761 /* write cfg word to cryptinfo */
765 *(u32*)cinfo = cpu_to_be32(cipher_cfg); 762 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
@@ -851,14 +848,8 @@ out:
851static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 848static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
852 unsigned int key_len) 849 unsigned int key_len)
853{ 850{
854 u32 flags = crypto_ablkcipher_get_flags(tfm); 851 return verify_ablkcipher_des3_key(tfm, key) ?:
855 int err; 852 ablk_setkey(tfm, key, key_len);
856
857 err = __des3_verify_key(&flags, key);
858 if (unlikely(err))
859 crypto_ablkcipher_set_flags(tfm, flags);
860
861 return ablk_setkey(tfm, key, key_len);
862} 853}
863 854
864static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 855static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -1181,7 +1172,6 @@ static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1181 unsigned int keylen) 1172 unsigned int keylen)
1182{ 1173{
1183 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1174 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1184 u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
1185 struct crypto_authenc_keys keys; 1175 struct crypto_authenc_keys keys;
1186 int err; 1176 int err;
1187 1177
@@ -1193,12 +1183,8 @@ static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1193 if (keys.authkeylen > sizeof(ctx->authkey)) 1183 if (keys.authkeylen > sizeof(ctx->authkey))
1194 goto badkey; 1184 goto badkey;
1195 1185
1196 if (keys.enckeylen != DES3_EDE_KEY_SIZE) 1186 err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1197 goto badkey; 1187 if (err)
1198
1199 flags = crypto_aead_get_flags(tfm);
1200 err = __des3_verify_key(&flags, keys.enckey);
1201 if (unlikely(err))
1202 goto badkey; 1188 goto badkey;
1203 1189
1204 memcpy(ctx->authkey, keys.authkey, keys.authkeylen); 1190 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
@@ -1209,7 +1195,6 @@ static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1209 memzero_explicit(&keys, sizeof(keys)); 1195 memzero_explicit(&keys, sizeof(keys));
1210 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1196 return aead_setup(tfm, crypto_aead_authsize(tfm));
1211badkey: 1197badkey:
1212 crypto_aead_set_flags(tfm, flags);
1213 memzero_explicit(&keys, sizeof(keys)); 1198 memzero_explicit(&keys, sizeof(keys));
1214 return err; 1199 return err;
1215} 1200}
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index f4321f3c0777..84ceddfee76b 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <crypto/aes.h> 12#include <crypto/aes.h>
13#include <crypto/des.h> 13#include <crypto/internal/des.h>
14 14
15#include "cesa.h" 15#include "cesa.h"
16 16
@@ -254,7 +254,7 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
254 int ret; 254 int ret;
255 int i; 255 int i;
256 256
257 ret = crypto_aes_expand_key(&ctx->aes, key, len); 257 ret = aes_expandkey(&ctx->aes, key, len);
258 if (ret) { 258 if (ret) {
259 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 259 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
260 return ret; 260 return ret;
@@ -272,21 +272,12 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
272static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, 272static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
273 unsigned int len) 273 unsigned int len)
274{ 274{
275 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 275 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
276 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 276 int err;
277 u32 tmp[DES_EXPKEY_WORDS];
278 int ret;
279
280 if (len != DES_KEY_SIZE) {
281 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
282 return -EINVAL;
283 }
284 277
285 ret = des_ekey(tmp, key); 278 err = verify_skcipher_des_key(cipher, key);
286 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 279 if (err)
287 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 280 return err;
288 return -EINVAL;
289 }
290 281
291 memcpy(ctx->key, key, DES_KEY_SIZE); 282 memcpy(ctx->key, key, DES_KEY_SIZE);
292 283
@@ -299,8 +290,8 @@ static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
299 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher); 290 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
300 int err; 291 int err;
301 292
302 err = des3_verify_key(cipher, key); 293 err = verify_skcipher_des3_key(cipher, key);
303 if (unlikely(err)) 294 if (err)
304 return err; 295 return err;
305 296
306 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); 297 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 0f0ac851f4eb..a2b35fb0fb89 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -1148,8 +1148,7 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1148 } 1148 }
1149 1149
1150 /* Set the memory region to 0 to avoid any leak. */ 1150 /* Set the memory region to 0 to avoid any leak. */
1151 memset(keydup, 0, keylen); 1151 kzfree(keydup);
1152 kfree(keydup);
1153 1152
1154 if (ret) 1153 if (ret)
1155 return ret; 1154 return ret;
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index b7477ee32ca0..90c9644fb8a8 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -23,7 +23,7 @@
23 23
24#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) 24#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
25 25
26/* AES-CBC/ECB/CTR command token */ 26/* AES-CBC/ECB/CTR/OFB/CFB command token */
27#define AES_CMD0 cpu_to_le32(0x05000000) 27#define AES_CMD0 cpu_to_le32(0x05000000)
28#define AES_CMD1 cpu_to_le32(0x2d060000) 28#define AES_CMD1 cpu_to_le32(0x2d060000)
29#define AES_CMD2 cpu_to_le32(0xe4a63806) 29#define AES_CMD2 cpu_to_le32(0xe4a63806)
@@ -50,6 +50,8 @@
50/* AES transform information word 1 fields */ 50/* AES transform information word 1 fields */
51#define AES_TFM_ECB cpu_to_le32(0x0 << 0) 51#define AES_TFM_ECB cpu_to_le32(0x0 << 0)
52#define AES_TFM_CBC cpu_to_le32(0x1 << 0) 52#define AES_TFM_CBC cpu_to_le32(0x1 << 0)
53#define AES_TFM_OFB cpu_to_le32(0x4 << 0)
54#define AES_TFM_CFB128 cpu_to_le32(0x5 << 0)
53#define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */ 55#define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
54#define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */ 56#define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
55#define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */ 57#define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
@@ -58,13 +60,15 @@
58#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) 60#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
59 61
60/* AES flags */ 62/* AES flags */
61#define AES_FLAGS_CIPHER_MSK GENMASK(2, 0) 63#define AES_FLAGS_CIPHER_MSK GENMASK(4, 0)
62#define AES_FLAGS_ECB BIT(0) 64#define AES_FLAGS_ECB BIT(0)
63#define AES_FLAGS_CBC BIT(1) 65#define AES_FLAGS_CBC BIT(1)
64#define AES_FLAGS_CTR BIT(2) 66#define AES_FLAGS_CTR BIT(2)
65#define AES_FLAGS_GCM BIT(3) 67#define AES_FLAGS_OFB BIT(3)
66#define AES_FLAGS_ENCRYPT BIT(4) 68#define AES_FLAGS_CFB128 BIT(4)
67#define AES_FLAGS_BUSY BIT(5) 69#define AES_FLAGS_GCM BIT(5)
70#define AES_FLAGS_ENCRYPT BIT(6)
71#define AES_FLAGS_BUSY BIT(7)
68 72
69#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26)) 73#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
70 74
@@ -101,6 +105,7 @@ struct mtk_aes_reqctx {
101struct mtk_aes_base_ctx { 105struct mtk_aes_base_ctx {
102 struct mtk_cryp *cryp; 106 struct mtk_cryp *cryp;
103 u32 keylen; 107 u32 keylen;
108 __le32 key[12];
104 __le32 keymode; 109 __le32 keymode;
105 110
106 mtk_aes_fn start; 111 mtk_aes_fn start;
@@ -405,7 +410,7 @@ exit:
405 return mtk_aes_complete(cryp, aes, -EINVAL); 410 return mtk_aes_complete(cryp, aes, -EINVAL);
406} 411}
407 412
408/* Initialize transform information of CBC/ECB/CTR mode */ 413/* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
409static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 414static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
410 size_t len) 415 size_t len)
411{ 416{
@@ -434,7 +439,12 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
434 case AES_FLAGS_CTR: 439 case AES_FLAGS_CTR:
435 info->tfm[1] = AES_TFM_CTR_LOAD; 440 info->tfm[1] = AES_TFM_CTR_LOAD;
436 goto ctr; 441 goto ctr;
437 442 case AES_FLAGS_OFB:
443 info->tfm[1] = AES_TFM_OFB;
444 break;
445 case AES_FLAGS_CFB128:
446 info->tfm[1] = AES_TFM_CFB128;
447 break;
438 default: 448 default:
439 /* Should not happen... */ 449 /* Should not happen... */
440 return; 450 return;
@@ -525,6 +535,8 @@ static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
525 backlog->complete(backlog, -EINPROGRESS); 535 backlog->complete(backlog, -EINPROGRESS);
526 536
527 ctx = crypto_tfm_ctx(areq->tfm); 537 ctx = crypto_tfm_ctx(areq->tfm);
538 /* Write key into state buffer */
539 memcpy(ctx->info.state, ctx->key, sizeof(ctx->key));
528 540
529 aes->areq = areq; 541 aes->areq = areq;
530 aes->ctx = ctx; 542 aes->ctx = ctx;
@@ -644,21 +656,26 @@ static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
644 } 656 }
645 657
646 ctx->keylen = SIZE_IN_WORDS(keylen); 658 ctx->keylen = SIZE_IN_WORDS(keylen);
647 mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); 659 mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
648 660
649 return 0; 661 return 0;
650} 662}
651 663
652static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode) 664static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
653{ 665{
654 struct mtk_aes_base_ctx *ctx; 666 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
667 struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
655 struct mtk_aes_reqctx *rctx; 668 struct mtk_aes_reqctx *rctx;
669 struct mtk_cryp *cryp;
670
671 cryp = mtk_aes_find_dev(ctx);
672 if (!cryp)
673 return -ENODEV;
656 674
657 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
658 rctx = ablkcipher_request_ctx(req); 675 rctx = ablkcipher_request_ctx(req);
659 rctx->mode = mode; 676 rctx->mode = mode;
660 677
661 return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT), 678 return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
662 &req->base); 679 &req->base);
663} 680}
664 681
@@ -692,16 +709,29 @@ static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
692 return mtk_aes_crypt(req, AES_FLAGS_CTR); 709 return mtk_aes_crypt(req, AES_FLAGS_CTR);
693} 710}
694 711
712static int mtk_aes_ofb_encrypt(struct ablkcipher_request *req)
713{
714 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
715}
716
717static int mtk_aes_ofb_decrypt(struct ablkcipher_request *req)
718{
719 return mtk_aes_crypt(req, AES_FLAGS_OFB);
720}
721
722static int mtk_aes_cfb_encrypt(struct ablkcipher_request *req)
723{
724 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
725}
726
727static int mtk_aes_cfb_decrypt(struct ablkcipher_request *req)
728{
729 return mtk_aes_crypt(req, AES_FLAGS_CFB128);
730}
731
695static int mtk_aes_cra_init(struct crypto_tfm *tfm) 732static int mtk_aes_cra_init(struct crypto_tfm *tfm)
696{ 733{
697 struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm); 734 struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
698 struct mtk_cryp *cryp = NULL;
699
700 cryp = mtk_aes_find_dev(&ctx->base);
701 if (!cryp) {
702 pr_err("can't find crypto device\n");
703 return -ENODEV;
704 }
705 735
706 tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); 736 tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
707 ctx->base.start = mtk_aes_start; 737 ctx->base.start = mtk_aes_start;
@@ -711,13 +741,6 @@ static int mtk_aes_cra_init(struct crypto_tfm *tfm)
711static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm) 741static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
712{ 742{
713 struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm); 743 struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
714 struct mtk_cryp *cryp = NULL;
715
716 cryp = mtk_aes_find_dev(&ctx->base);
717 if (!cryp) {
718 pr_err("can't find crypto device\n");
719 return -ENODEV;
720 }
721 744
722 tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); 745 tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
723 ctx->base.start = mtk_aes_ctr_start; 746 ctx->base.start = mtk_aes_ctr_start;
@@ -787,6 +810,48 @@ static struct crypto_alg aes_algs[] = {
787 .decrypt = mtk_aes_ctr_decrypt, 810 .decrypt = mtk_aes_ctr_decrypt,
788 } 811 }
789}, 812},
813{
814 .cra_name = "ofb(aes)",
815 .cra_driver_name = "ofb-aes-mtk",
816 .cra_priority = 400,
817 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
818 CRYPTO_ALG_ASYNC,
819 .cra_init = mtk_aes_cra_init,
820 .cra_blocksize = 1,
821 .cra_ctxsize = sizeof(struct mtk_aes_ctx),
822 .cra_alignmask = 0xf,
823 .cra_type = &crypto_ablkcipher_type,
824 .cra_module = THIS_MODULE,
825 .cra_u.ablkcipher = {
826 .min_keysize = AES_MIN_KEY_SIZE,
827 .max_keysize = AES_MAX_KEY_SIZE,
828 .ivsize = AES_BLOCK_SIZE,
829 .setkey = mtk_aes_setkey,
830 .encrypt = mtk_aes_ofb_encrypt,
831 .decrypt = mtk_aes_ofb_decrypt,
832 }
833},
834{
835 .cra_name = "cfb(aes)",
836 .cra_driver_name = "cfb-aes-mtk",
837 .cra_priority = 400,
838 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
839 CRYPTO_ALG_ASYNC,
840 .cra_init = mtk_aes_cra_init,
841 .cra_blocksize = 1,
842 .cra_ctxsize = sizeof(struct mtk_aes_ctx),
843 .cra_alignmask = 0xf,
844 .cra_type = &crypto_ablkcipher_type,
845 .cra_module = THIS_MODULE,
846 .cra_u.ablkcipher = {
847 .min_keysize = AES_MIN_KEY_SIZE,
848 .max_keysize = AES_MAX_KEY_SIZE,
849 .ivsize = AES_BLOCK_SIZE,
850 .setkey = mtk_aes_setkey,
851 .encrypt = mtk_aes_cfb_encrypt,
852 .decrypt = mtk_aes_cfb_decrypt,
853 }
854},
790}; 855};
791 856
792static inline struct mtk_aes_gcm_ctx * 857static inline struct mtk_aes_gcm_ctx *
@@ -905,14 +970,11 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
905 aes->resume = mtk_aes_transfer_complete; 970 aes->resume = mtk_aes_transfer_complete;
906 /* Compute total process length. */ 971 /* Compute total process length. */
907 aes->total = len + gctx->authsize; 972 aes->total = len + gctx->authsize;
908 /* Compute text length. */
909 gctx->textlen = req->cryptlen;
910 /* Hardware will append authenticated tag to output buffer */ 973 /* Hardware will append authenticated tag to output buffer */
911 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); 974 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
912 } else { 975 } else {
913 aes->resume = mtk_aes_gcm_tag_verify; 976 aes->resume = mtk_aes_gcm_tag_verify;
914 aes->total = len; 977 aes->total = len;
915 gctx->textlen = req->cryptlen - gctx->authsize;
916 } 978 }
917 979
918 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); 980 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
@@ -923,6 +985,15 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
923 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 985 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
924 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 986 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
925 struct mtk_aes_reqctx *rctx = aead_request_ctx(req); 987 struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
988 struct mtk_cryp *cryp;
989 bool enc = !!(mode & AES_FLAGS_ENCRYPT);
990
991 cryp = mtk_aes_find_dev(ctx);
992 if (!cryp)
993 return -ENODEV;
994
995 /* Compute text length. */
996 gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize);
926 997
927 /* Empty messages are not supported yet */ 998 /* Empty messages are not supported yet */
928 if (!gctx->textlen && !req->assoclen) 999 if (!gctx->textlen && !req->assoclen)
@@ -930,8 +1001,7 @@ static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
930 1001
931 rctx->mode = AES_FLAGS_GCM | mode; 1002 rctx->mode = AES_FLAGS_GCM | mode;
932 1003
933 return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT), 1004 return mtk_aes_handle_queue(cryp, enc, &req->base);
934 &req->base);
935} 1005}
936 1006
937/* 1007/*
@@ -1003,10 +1073,8 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
1003 if (err) 1073 if (err)
1004 goto out; 1074 goto out;
1005 1075
1006 /* Write key into state buffer */ 1076 mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
1007 mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); 1077 mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash,
1008 /* Write key(H) into state buffer */
1009 mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash,
1010 AES_BLOCK_SIZE); 1078 AES_BLOCK_SIZE);
1011out: 1079out:
1012 kzfree(data); 1080 kzfree(data);
@@ -1046,13 +1114,6 @@ static int mtk_aes_gcm_decrypt(struct aead_request *req)
1046static int mtk_aes_gcm_init(struct crypto_aead *aead) 1114static int mtk_aes_gcm_init(struct crypto_aead *aead)
1047{ 1115{
1048 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); 1116 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1049 struct mtk_cryp *cryp = NULL;
1050
1051 cryp = mtk_aes_find_dev(&ctx->base);
1052 if (!cryp) {
1053 pr_err("can't find crypto device\n");
1054 return -ENODEV;
1055 }
1056 1117
1057 ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0, 1118 ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
1058 CRYPTO_ALG_ASYNC); 1119 CRYPTO_ALG_ASYNC);
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index 125318a88cd4..7e3ad085b5bd 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -481,7 +481,6 @@ err_cleanup:
481 481
482static int mtk_crypto_probe(struct platform_device *pdev) 482static int mtk_crypto_probe(struct platform_device *pdev)
483{ 483{
484 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
485 struct mtk_cryp *cryp; 484 struct mtk_cryp *cryp;
486 int i, err; 485 int i, err;
487 486
@@ -489,16 +488,14 @@ static int mtk_crypto_probe(struct platform_device *pdev)
489 if (!cryp) 488 if (!cryp)
490 return -ENOMEM; 489 return -ENOMEM;
491 490
492 cryp->base = devm_ioremap_resource(&pdev->dev, res); 491 cryp->base = devm_platform_ioremap_resource(pdev, 0);
493 if (IS_ERR(cryp->base)) 492 if (IS_ERR(cryp->base))
494 return PTR_ERR(cryp->base); 493 return PTR_ERR(cryp->base);
495 494
496 for (i = 0; i < MTK_IRQ_NUM; i++) { 495 for (i = 0; i < MTK_IRQ_NUM; i++) {
497 cryp->irq[i] = platform_get_irq(pdev, i); 496 cryp->irq[i] = platform_get_irq(pdev, i);
498 if (cryp->irq[i] < 0) { 497 if (cryp->irq[i] < 0)
499 dev_err(cryp->dev, "no IRQ:%d resource info\n", i);
500 return cryp->irq[i]; 498 return cryp->irq[i];
501 }
502 } 499 }
503 500
504 cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); 501 cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp");
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index f03b0f06fb2f..9e9f48bb7f85 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -778,7 +778,9 @@ static int mtk_sha_finup(struct ahash_request *req)
778 ctx->flags |= SHA_FLAGS_FINUP; 778 ctx->flags |= SHA_FLAGS_FINUP;
779 779
780 err1 = mtk_sha_update(req); 780 err1 = mtk_sha_update(req);
781 if (err1 == -EINPROGRESS || err1 == -EBUSY) 781 if (err1 == -EINPROGRESS ||
782 (err1 == -EBUSY && (ahash_request_flags(req) &
783 CRYPTO_TFM_REQ_MAY_BACKLOG)))
782 return err1; 784 return err1;
783 /* 785 /*
784 * final() has to be always called to cleanup resources 786 * final() has to be always called to cleanup resources
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index f1fa637cb029..bf8d2197bc11 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -994,16 +994,12 @@ static int mxs_dcp_probe(struct platform_device *pdev)
994 } 994 }
995 995
996 dcp_vmi_irq = platform_get_irq(pdev, 0); 996 dcp_vmi_irq = platform_get_irq(pdev, 0);
997 if (dcp_vmi_irq < 0) { 997 if (dcp_vmi_irq < 0)
998 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
999 return dcp_vmi_irq; 998 return dcp_vmi_irq;
1000 }
1001 999
1002 dcp_irq = platform_get_irq(pdev, 1); 1000 dcp_irq = platform_get_irq(pdev, 1);
1003 if (dcp_irq < 0) { 1001 if (dcp_irq < 0)
1004 dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq);
1005 return dcp_irq; 1002 return dcp_irq;
1006 }
1007 1003
1008 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL); 1004 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
1009 if (!sdcp) 1005 if (!sdcp)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 760e72a5893b..dc15b06e96ab 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -17,7 +17,7 @@
17#include <crypto/md5.h> 17#include <crypto/md5.h>
18#include <crypto/sha.h> 18#include <crypto/sha.h>
19#include <crypto/aes.h> 19#include <crypto/aes.h>
20#include <crypto/des.h> 20#include <crypto/internal/des.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
@@ -760,21 +760,13 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
760 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 760 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
761 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 761 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
762 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 762 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
763 u32 tmp[DES_EXPKEY_WORDS];
764 int err; 763 int err;
765 764
766 ctx->enc_type = n2alg->enc_type; 765 err = verify_ablkcipher_des_key(cipher, key);
767 766 if (err)
768 if (keylen != DES_KEY_SIZE) { 767 return err;
769 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
770 return -EINVAL;
771 }
772 768
773 err = des_ekey(tmp, key); 769 ctx->enc_type = n2alg->enc_type;
774 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
775 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
776 return -EINVAL;
777 }
778 770
779 ctx->key_len = keylen; 771 ctx->key_len = keylen;
780 memcpy(ctx->key.des, key, keylen); 772 memcpy(ctx->key.des, key, keylen);
@@ -787,15 +779,11 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
787 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 779 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
788 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 780 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
789 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 781 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
790 u32 flags;
791 int err; 782 int err;
792 783
793 flags = crypto_ablkcipher_get_flags(cipher); 784 err = verify_ablkcipher_des3_key(cipher, key);
794 err = __des3_verify_key(&flags, key); 785 if (err)
795 if (unlikely(err)) {
796 crypto_ablkcipher_set_flags(cipher, flags);
797 return err; 786 return err;
798 }
799 787
800 ctx->enc_type = n2alg->enc_type; 788 ctx->enc_type = n2alg->enc_type;
801 789
@@ -1295,20 +1283,20 @@ struct n2_hash_tmpl {
1295 u8 hmac_type; 1283 u8 hmac_type;
1296}; 1284};
1297 1285
1298static const u32 md5_init[MD5_HASH_WORDS] = { 1286static const u32 n2_md5_init[MD5_HASH_WORDS] = {
1299 cpu_to_le32(MD5_H0), 1287 cpu_to_le32(MD5_H0),
1300 cpu_to_le32(MD5_H1), 1288 cpu_to_le32(MD5_H1),
1301 cpu_to_le32(MD5_H2), 1289 cpu_to_le32(MD5_H2),
1302 cpu_to_le32(MD5_H3), 1290 cpu_to_le32(MD5_H3),
1303}; 1291};
1304static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { 1292static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1305 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 1293 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1306}; 1294};
1307static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { 1295static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1308 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 1296 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1309 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, 1297 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1310}; 1298};
1311static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { 1299static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1312 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, 1300 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1313 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, 1301 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1314}; 1302};
@@ -1316,7 +1304,7 @@ static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
1316static const struct n2_hash_tmpl hash_tmpls[] = { 1304static const struct n2_hash_tmpl hash_tmpls[] = {
1317 { .name = "md5", 1305 { .name = "md5",
1318 .hash_zero = md5_zero_message_hash, 1306 .hash_zero = md5_zero_message_hash,
1319 .hash_init = md5_init, 1307 .hash_init = n2_md5_init,
1320 .auth_type = AUTH_TYPE_MD5, 1308 .auth_type = AUTH_TYPE_MD5,
1321 .hmac_type = AUTH_TYPE_HMAC_MD5, 1309 .hmac_type = AUTH_TYPE_HMAC_MD5,
1322 .hw_op_hashsz = MD5_DIGEST_SIZE, 1310 .hw_op_hashsz = MD5_DIGEST_SIZE,
@@ -1324,7 +1312,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
1324 .block_size = MD5_HMAC_BLOCK_SIZE }, 1312 .block_size = MD5_HMAC_BLOCK_SIZE },
1325 { .name = "sha1", 1313 { .name = "sha1",
1326 .hash_zero = sha1_zero_message_hash, 1314 .hash_zero = sha1_zero_message_hash,
1327 .hash_init = sha1_init, 1315 .hash_init = n2_sha1_init,
1328 .auth_type = AUTH_TYPE_SHA1, 1316 .auth_type = AUTH_TYPE_SHA1,
1329 .hmac_type = AUTH_TYPE_HMAC_SHA1, 1317 .hmac_type = AUTH_TYPE_HMAC_SHA1,
1330 .hw_op_hashsz = SHA1_DIGEST_SIZE, 1318 .hw_op_hashsz = SHA1_DIGEST_SIZE,
@@ -1332,7 +1320,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
1332 .block_size = SHA1_BLOCK_SIZE }, 1320 .block_size = SHA1_BLOCK_SIZE },
1333 { .name = "sha256", 1321 { .name = "sha256",
1334 .hash_zero = sha256_zero_message_hash, 1322 .hash_zero = sha256_zero_message_hash,
1335 .hash_init = sha256_init, 1323 .hash_init = n2_sha256_init,
1336 .auth_type = AUTH_TYPE_SHA256, 1324 .auth_type = AUTH_TYPE_SHA256,
1337 .hmac_type = AUTH_TYPE_HMAC_SHA256, 1325 .hmac_type = AUTH_TYPE_HMAC_SHA256,
1338 .hw_op_hashsz = SHA256_DIGEST_SIZE, 1326 .hw_op_hashsz = SHA256_DIGEST_SIZE,
@@ -1340,7 +1328,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
1340 .block_size = SHA256_BLOCK_SIZE }, 1328 .block_size = SHA256_BLOCK_SIZE },
1341 { .name = "sha224", 1329 { .name = "sha224",
1342 .hash_zero = sha224_zero_message_hash, 1330 .hash_zero = sha224_zero_message_hash,
1343 .hash_init = sha224_init, 1331 .hash_init = n2_sha224_init,
1344 .auth_type = AUTH_TYPE_SHA256, 1332 .auth_type = AUTH_TYPE_SHA256,
1345 .hmac_type = AUTH_TYPE_RESERVED, 1333 .hmac_type = AUTH_TYPE_RESERVED,
1346 .hw_op_hashsz = SHA256_DIGEST_SIZE, 1334 .hw_op_hashsz = SHA256_DIGEST_SIZE,
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index e78ff5c65ed6..c037a2403b82 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -1020,6 +1020,7 @@ static __init int nx842_powernv_init(void)
1020 ret = nx842_powernv_probe_vas(dn); 1020 ret = nx842_powernv_probe_vas(dn);
1021 if (ret) { 1021 if (ret) {
1022 nx842_delete_coprocs(); 1022 nx842_delete_coprocs();
1023 of_node_put(dn);
1023 return ret; 1024 return ret;
1024 } 1025 }
1025 } 1026 }
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index c6b5a3be02be..7ecca168f8c4 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -9,9 +9,6 @@
9#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" 9#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
10#define NX_VERSION "1.0" 10#define NX_VERSION "1.0"
11 11
12static const char nx_driver_string[] = NX_STRING;
13static const char nx_driver_version[] = NX_VERSION;
14
15/* a scatterlist in the format PHYP is expecting */ 12/* a scatterlist in the format PHYP is expecting */
16struct nx_sg { 13struct nx_sg {
17 u64 addr; 14 u64 addr;
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 45a4647f7030..2f53fbb74100 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1180,7 +1180,6 @@ static int omap_aes_probe(struct platform_device *pdev)
1180 1180
1181 irq = platform_get_irq(pdev, 0); 1181 irq = platform_get_irq(pdev, 0);
1182 if (irq < 0) { 1182 if (irq < 0) {
1183 dev_err(dev, "can't get IRQ resource\n");
1184 err = irq; 1183 err = irq;
1185 goto err_irq; 1184 goto err_irq;
1186 } 1185 }
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 1ee69a979677..b19d7e5d55ec 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -33,7 +33,7 @@
33#include <linux/crypto.h> 33#include <linux/crypto.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <crypto/scatterwalk.h> 35#include <crypto/scatterwalk.h>
36#include <crypto/des.h> 36#include <crypto/internal/des.h>
37#include <crypto/algapi.h> 37#include <crypto/algapi.h>
38#include <crypto/engine.h> 38#include <crypto/engine.h>
39 39
@@ -650,20 +650,13 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
650 unsigned int keylen) 650 unsigned int keylen)
651{ 651{
652 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher); 652 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
653 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 653 int err;
654 654
655 pr_debug("enter, keylen: %d\n", keylen); 655 pr_debug("enter, keylen: %d\n", keylen);
656 656
657 /* Do we need to test against weak key? */ 657 err = verify_ablkcipher_des_key(cipher, key);
658 if (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { 658 if (err)
659 u32 tmp[DES_EXPKEY_WORDS]; 659 return err;
660 int ret = des_ekey(tmp, key);
661
662 if (!ret) {
663 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
664 return -EINVAL;
665 }
666 }
667 660
668 memcpy(ctx->key, key, keylen); 661 memcpy(ctx->key, key, keylen);
669 ctx->keylen = keylen; 662 ctx->keylen = keylen;
@@ -672,20 +665,16 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
672} 665}
673 666
674static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 667static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
675 unsigned int keylen) 668 unsigned int keylen)
676{ 669{
677 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher); 670 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
678 u32 flags;
679 int err; 671 int err;
680 672
681 pr_debug("enter, keylen: %d\n", keylen); 673 pr_debug("enter, keylen: %d\n", keylen);
682 674
683 flags = crypto_ablkcipher_get_flags(cipher); 675 err = verify_ablkcipher_des3_key(cipher, key);
684 err = __des3_verify_key(&flags, key); 676 if (err)
685 if (unlikely(err)) {
686 crypto_ablkcipher_set_flags(cipher, flags);
687 return err; 677 return err;
688 }
689 678
690 memcpy(ctx->key, key, keylen); 679 memcpy(ctx->key, key, keylen);
691 ctx->keylen = keylen; 680 ctx->keylen = keylen;
@@ -1049,7 +1038,6 @@ static int omap_des_probe(struct platform_device *pdev)
1049 1038
1050 irq = platform_get_irq(pdev, 0); 1039 irq = platform_get_irq(pdev, 0);
1051 if (irq < 0) { 1040 if (irq < 0) {
1052 dev_err(dev, "can't get IRQ resource: %d\n", irq);
1053 err = irq; 1041 err = irq;
1054 goto err_irq; 1042 goto err_irq;
1055 } 1043 }
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index e8e2907bd9f4..ac80bc6af093 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1989,7 +1989,6 @@ static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1989 /* Get the IRQ */ 1989 /* Get the IRQ */
1990 dd->irq = platform_get_irq(pdev, 0); 1990 dd->irq = platform_get_irq(pdev, 0);
1991 if (dd->irq < 0) { 1991 if (dd->irq < 0) {
1992 dev_err(dev, "no IRQ resource info\n");
1993 err = dd->irq; 1992 err = dd->irq;
1994 goto err; 1993 goto err;
1995 } 1994 }
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index ad020133da19..8a0661250078 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -145,7 +145,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
145 ctx->cword.encrypt.keygen = 1; 145 ctx->cword.encrypt.keygen = 1;
146 ctx->cword.decrypt.keygen = 1; 146 ctx->cword.decrypt.keygen = 1;
147 147
148 if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) { 148 if (aes_expandkey(&gen_aes, in_key, key_len)) {
149 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 149 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
150 return -EINVAL; 150 return -EINVAL;
151 } 151 }
@@ -300,7 +300,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
300 return iv; 300 return iv;
301} 301}
302 302
303static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 303static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
304{ 304{
305 struct aes_ctx *ctx = aes_ctx(tfm); 305 struct aes_ctx *ctx = aes_ctx(tfm);
306 306
@@ -309,7 +309,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
309 padlock_store_cword(&ctx->cword.encrypt); 309 padlock_store_cword(&ctx->cword.encrypt);
310} 310}
311 311
312static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 312static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
313{ 313{
314 struct aes_ctx *ctx = aes_ctx(tfm); 314 struct aes_ctx *ctx = aes_ctx(tfm);
315 315
@@ -332,8 +332,8 @@ static struct crypto_alg aes_alg = {
332 .cia_min_keysize = AES_MIN_KEY_SIZE, 332 .cia_min_keysize = AES_MIN_KEY_SIZE,
333 .cia_max_keysize = AES_MAX_KEY_SIZE, 333 .cia_max_keysize = AES_MAX_KEY_SIZE,
334 .cia_setkey = aes_set_key, 334 .cia_setkey = aes_set_key,
335 .cia_encrypt = aes_encrypt, 335 .cia_encrypt = padlock_aes_encrypt,
336 .cia_decrypt = aes_decrypt, 336 .cia_decrypt = padlock_aes_decrypt,
337 } 337 }
338 } 338 }
339}; 339};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index b985cb85c9bc..3cbefb41b099 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -6,7 +6,7 @@
6#include <crypto/aes.h> 6#include <crypto/aes.h>
7#include <crypto/algapi.h> 7#include <crypto/algapi.h>
8#include <crypto/authenc.h> 8#include <crypto/authenc.h>
9#include <crypto/des.h> 9#include <crypto/internal/des.h>
10#include <crypto/md5.h> 10#include <crypto/md5.h>
11#include <crypto/sha.h> 11#include <crypto/sha.h>
12#include <crypto/internal/skcipher.h> 12#include <crypto/internal/skcipher.h>
@@ -736,16 +736,12 @@ static void spacc_aead_cra_exit(struct crypto_aead *tfm)
736static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 736static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
737 unsigned int len) 737 unsigned int len)
738{ 738{
739 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 739 struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
740 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 740 int err;
741 u32 tmp[DES_EXPKEY_WORDS];
742 741
743 if (unlikely(!des_ekey(tmp, key)) && 742 err = verify_ablkcipher_des_key(cipher, key);
744 (crypto_ablkcipher_get_flags(cipher) & 743 if (err)
745 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 744 return err;
746 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
747 return -EINVAL;
748 }
749 745
750 memcpy(ctx->key, key, len); 746 memcpy(ctx->key, key, len);
751 ctx->key_len = len; 747 ctx->key_len = len;
@@ -761,15 +757,11 @@ static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
761 unsigned int len) 757 unsigned int len)
762{ 758{
763 struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher); 759 struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
764 u32 flags;
765 int err; 760 int err;
766 761
767 flags = crypto_ablkcipher_get_flags(cipher); 762 err = verify_ablkcipher_des3_key(cipher, key);
768 err = __des3_verify_key(&flags, key); 763 if (err)
769 if (unlikely(err)) {
770 crypto_ablkcipher_set_flags(cipher, flags);
771 return err; 764 return err;
772 }
773 765
774 memcpy(ctx->key, key, len); 766 memcpy(ctx->key, key, len);
775 ctx->key_len = len; 767 ctx->key_len = len;
@@ -1624,7 +1616,7 @@ MODULE_DEVICE_TABLE(of, spacc_of_id_table);
1624static int spacc_probe(struct platform_device *pdev) 1616static int spacc_probe(struct platform_device *pdev)
1625{ 1617{
1626 int i, err, ret; 1618 int i, err, ret;
1627 struct resource *mem, *irq; 1619 struct resource *irq;
1628 struct device_node *np = pdev->dev.of_node; 1620 struct device_node *np = pdev->dev.of_node;
1629 struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), 1621 struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
1630 GFP_KERNEL); 1622 GFP_KERNEL);
@@ -1653,8 +1645,7 @@ static int spacc_probe(struct platform_device *pdev)
1653 1645
1654 engine->name = dev_name(&pdev->dev); 1646 engine->name = dev_name(&pdev->dev);
1655 1647
1656 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1648 engine->regs = devm_platform_ioremap_resource(pdev, 0);
1657 engine->regs = devm_ioremap_resource(&pdev->dev, mem);
1658 if (IS_ERR(engine->regs)) 1649 if (IS_ERR(engine->regs))
1659 return PTR_ERR(engine->regs); 1650 return PTR_ERR(engine->regs);
1660 1651
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 5c4c0a253129..d78f8d5c89c3 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -95,7 +95,7 @@ struct service_hndl {
95 95
96static inline int get_current_node(void) 96static inline int get_current_node(void)
97{ 97{
98 return topology_physical_package_id(smp_processor_id()); 98 return topology_physical_package_id(raw_smp_processor_id());
99} 99}
100 100
101int adf_service_register(struct service_hndl *service); 101int adf_service_register(struct service_hndl *service);
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index a976210ba41c..7a98bf5cc967 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -7,7 +7,7 @@
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <linux/types.h> 8#include <linux/types.h>
9#include <crypto/aes.h> 9#include <crypto/aes.h>
10#include <crypto/des.h> 10#include <crypto/internal/des.h>
11#include <crypto/internal/skcipher.h> 11#include <crypto/internal/skcipher.h>
12 12
13#include "cipher.h" 13#include "cipher.h"
@@ -154,27 +154,17 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
154{ 154{
155 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); 155 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
156 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 156 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
157 unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
158 int ret; 157 int ret;
159 158
160 if (!key || !keylen) 159 if (!key || !keylen)
161 return -EINVAL; 160 return -EINVAL;
162 161
163 if (IS_AES(flags)) { 162 switch (keylen) {
164 switch (keylen) { 163 case AES_KEYSIZE_128:
165 case AES_KEYSIZE_128: 164 case AES_KEYSIZE_256:
166 case AES_KEYSIZE_256: 165 break;
167 break; 166 default:
168 default: 167 goto fallback;
169 goto fallback;
170 }
171 } else if (IS_DES(flags)) {
172 u32 tmp[DES_EXPKEY_WORDS];
173
174 ret = des_ekey(tmp, key);
175 if (!ret && (crypto_ablkcipher_get_flags(ablk) &
176 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS))
177 goto weakkey;
178 } 168 }
179 169
180 ctx->enc_keylen = keylen; 170 ctx->enc_keylen = keylen;
@@ -185,24 +175,32 @@ fallback:
185 if (!ret) 175 if (!ret)
186 ctx->enc_keylen = keylen; 176 ctx->enc_keylen = keylen;
187 return ret; 177 return ret;
188weakkey: 178}
189 crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY); 179
190 return -EINVAL; 180static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
181 unsigned int keylen)
182{
183 struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
184 int err;
185
186 err = verify_ablkcipher_des_key(ablk, key);
187 if (err)
188 return err;
189
190 ctx->enc_keylen = keylen;
191 memcpy(ctx->enc_key, key, keylen);
192 return 0;
191} 193}
192 194
193static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key, 195static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
194 unsigned int keylen) 196 unsigned int keylen)
195{ 197{
196 struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk); 198 struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
197 u32 flags;
198 int err; 199 int err;
199 200
200 flags = crypto_ablkcipher_get_flags(ablk); 201 err = verify_ablkcipher_des3_key(ablk, key);
201 err = __des3_verify_key(&flags, key); 202 if (err)
202 if (unlikely(err)) {
203 crypto_ablkcipher_set_flags(ablk, flags);
204 return err; 203 return err;
205 }
206 204
207 ctx->enc_keylen = keylen; 205 ctx->enc_keylen = keylen;
208 memcpy(ctx->enc_key, key, keylen); 206 memcpy(ctx->enc_key, key, keylen);
@@ -374,8 +372,9 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
374 alg->cra_ablkcipher.ivsize = def->ivsize; 372 alg->cra_ablkcipher.ivsize = def->ivsize;
375 alg->cra_ablkcipher.min_keysize = def->min_keysize; 373 alg->cra_ablkcipher.min_keysize = def->min_keysize;
376 alg->cra_ablkcipher.max_keysize = def->max_keysize; 374 alg->cra_ablkcipher.max_keysize = def->max_keysize;
377 alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? 375 alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey :
378 qce_des3_setkey : qce_ablkcipher_setkey; 376 IS_DES(def->flags) ? qce_des_setkey :
377 qce_ablkcipher_setkey;
379 alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; 378 alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
380 alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; 379 alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
381 380
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index ef1d74e8ddb2..08d4ce3bfddf 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -167,7 +167,6 @@ static int qce_crypto_probe(struct platform_device *pdev)
167{ 167{
168 struct device *dev = &pdev->dev; 168 struct device *dev = &pdev->dev;
169 struct qce_device *qce; 169 struct qce_device *qce;
170 struct resource *res;
171 int ret; 170 int ret;
172 171
173 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); 172 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
@@ -177,8 +176,7 @@ static int qce_crypto_probe(struct platform_device *pdev)
177 qce->dev = dev; 176 qce->dev = dev;
178 platform_set_drvdata(pdev, qce); 177 platform_set_drvdata(pdev, qce);
179 178
180 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 179 qce->base = devm_platform_ioremap_resource(pdev, 0);
181 qce->base = devm_ioremap_resource(&pdev->dev, res);
182 if (IS_ERR(qce->base)) 180 if (IS_ERR(qce->base))
183 return PTR_ERR(qce->base); 181 return PTR_ERR(qce->base);
184 182
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index e54249ccc009..4730f84b646d 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -153,7 +153,6 @@ static struct rng_alg qcom_rng_alg = {
153 153
154static int qcom_rng_probe(struct platform_device *pdev) 154static int qcom_rng_probe(struct platform_device *pdev)
155{ 155{
156 struct resource *res;
157 struct qcom_rng *rng; 156 struct qcom_rng *rng;
158 int ret; 157 int ret;
159 158
@@ -164,8 +163,7 @@ static int qcom_rng_probe(struct platform_device *pdev)
164 platform_set_drvdata(pdev, rng); 163 platform_set_drvdata(pdev, rng);
165 mutex_init(&rng->lock); 164 mutex_init(&rng->lock);
166 165
167 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 166 rng->base = devm_platform_ioremap_resource(pdev, 0);
168 rng->base = devm_ioremap_resource(&pdev->dev, res);
169 if (IS_ERR(rng->base)) 167 if (IS_ERR(rng->base))
170 return PTR_ERR(rng->base); 168 return PTR_ERR(rng->base);
171 169
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index 8d7e2545e65a..e5714ef24bf2 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -311,7 +311,6 @@ MODULE_DEVICE_TABLE(of, crypto_of_id_table);
311 311
312static int rk_crypto_probe(struct platform_device *pdev) 312static int rk_crypto_probe(struct platform_device *pdev)
313{ 313{
314 struct resource *res;
315 struct device *dev = &pdev->dev; 314 struct device *dev = &pdev->dev;
316 struct rk_crypto_info *crypto_info; 315 struct rk_crypto_info *crypto_info;
317 int err = 0; 316 int err = 0;
@@ -339,8 +338,7 @@ static int rk_crypto_probe(struct platform_device *pdev)
339 338
340 spin_lock_init(&crypto_info->lock); 339 spin_lock_init(&crypto_info->lock);
341 340
342 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 341 crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
343 crypto_info->reg = devm_ioremap_resource(&pdev->dev, res);
344 if (IS_ERR(crypto_info->reg)) { 342 if (IS_ERR(crypto_info->reg)) {
345 err = PTR_ERR(crypto_info->reg); 343 err = PTR_ERR(crypto_info->reg);
346 goto err_crypto; 344 goto err_crypto;
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index 54ee5b3ed9db..18e2b3f29336 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -3,7 +3,7 @@
3#define __RK3288_CRYPTO_H__ 3#define __RK3288_CRYPTO_H__
4 4
5#include <crypto/aes.h> 5#include <crypto/aes.h>
6#include <crypto/des.h> 6#include <crypto/internal/des.h>
7#include <crypto/algapi.h> 7#include <crypto/algapi.h>
8#include <linux/interrupt.h> 8#include <linux/interrupt.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index 96078aaa2098..d0f4b2d18059 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -46,15 +46,12 @@ static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
46static int rk_des_setkey(struct crypto_ablkcipher *cipher, 46static int rk_des_setkey(struct crypto_ablkcipher *cipher,
47 const u8 *key, unsigned int keylen) 47 const u8 *key, unsigned int keylen)
48{ 48{
49 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 49 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
50 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 50 int err;
51 u32 tmp[DES_EXPKEY_WORDS];
52 51
53 if (!des_ekey(tmp, key) && 52 err = verify_ablkcipher_des_key(cipher, key);
54 (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 53 if (err)
55 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 54 return err;
56 return -EINVAL;
57 }
58 55
59 ctx->keylen = keylen; 56 ctx->keylen = keylen;
60 memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); 57 memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
@@ -65,15 +62,11 @@ static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
65 const u8 *key, unsigned int keylen) 62 const u8 *key, unsigned int keylen)
66{ 63{
67 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher); 64 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
68 u32 flags;
69 int err; 65 int err;
70 66
71 flags = crypto_ablkcipher_get_flags(cipher); 67 err = verify_ablkcipher_des3_key(cipher, key);
72 err = __des3_verify_key(&flags, key); 68 if (err)
73 if (unlikely(err)) {
74 crypto_ablkcipher_set_flags(cipher, flags);
75 return err; 69 return err;
76 }
77 70
78 ctx->keylen = keylen; 71 ctx->keylen = keylen;
79 memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); 72 memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 9ef25230c199..010f1bb20dad 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -2056,9 +2056,12 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
2056 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 2056 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2057 struct s5p_aes_dev *dev = ctx->dev; 2057 struct s5p_aes_dev *dev = ctx->dev;
2058 2058
2059 if (!req->nbytes)
2060 return 0;
2061
2059 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) && 2062 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
2060 ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) { 2063 ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
2061 dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); 2064 dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
2062 return -EINVAL; 2065 return -EINVAL;
2063 } 2066 }
2064 2067
@@ -2170,7 +2173,7 @@ static struct crypto_alg algs[] = {
2170 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2173 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2171 CRYPTO_ALG_ASYNC | 2174 CRYPTO_ALG_ASYNC |
2172 CRYPTO_ALG_KERN_DRIVER_ONLY, 2175 CRYPTO_ALG_KERN_DRIVER_ONLY,
2173 .cra_blocksize = AES_BLOCK_SIZE, 2176 .cra_blocksize = 1,
2174 .cra_ctxsize = sizeof(struct s5p_aes_ctx), 2177 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2175 .cra_alignmask = 0x0f, 2178 .cra_alignmask = 0x0f,
2176 .cra_type = &crypto_ablkcipher_type, 2179 .cra_type = &crypto_ablkcipher_type,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index b0b8e3d48aef..8ac8ec6decd5 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1403,10 +1403,8 @@ static int sahara_probe(struct platform_device *pdev)
1403 1403
1404 /* Get the IRQ */ 1404 /* Get the IRQ */
1405 irq = platform_get_irq(pdev, 0); 1405 irq = platform_get_irq(pdev, 0);
1406 if (irq < 0) { 1406 if (irq < 0)
1407 dev_err(&pdev->dev, "failed to get irq resource\n");
1408 return irq; 1407 return irq;
1409 }
1410 1408
1411 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler, 1409 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1412 0, dev_name(&pdev->dev), dev); 1410 0, dev_name(&pdev->dev), dev);
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index d6576280fc9b..1aba9372cd23 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -25,7 +25,7 @@ config CRYPTO_DEV_STM32_CRYP
25 depends on ARCH_STM32 25 depends on ARCH_STM32
26 select CRYPTO_HASH 26 select CRYPTO_HASH
27 select CRYPTO_ENGINE 27 select CRYPTO_ENGINE
28 select CRYPTO_DES 28 select CRYPTO_LIB_DES
29 help 29 help
30 This enables support for the CRYP (AES/DES/TDES) hw accelerator which 30 This enables support for the CRYP (AES/DES/TDES) hw accelerator which
31 can be found on STMicroelectronics STM32 SOC. 31 can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 440c9f1bd006..9e11c3480353 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -255,7 +255,6 @@ static int stm32_crc_probe(struct platform_device *pdev)
255{ 255{
256 struct device *dev = &pdev->dev; 256 struct device *dev = &pdev->dev;
257 struct stm32_crc *crc; 257 struct stm32_crc *crc;
258 struct resource *res;
259 int ret; 258 int ret;
260 259
261 crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL); 260 crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
@@ -264,8 +263,7 @@ static int stm32_crc_probe(struct platform_device *pdev)
264 263
265 crc->dev = dev; 264 crc->dev = dev;
266 265
267 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 266 crc->regs = devm_platform_ioremap_resource(pdev, 0);
268 crc->regs = devm_ioremap_resource(dev, res);
269 if (IS_ERR(crc->regs)) { 267 if (IS_ERR(crc->regs)) {
270 dev_err(dev, "Cannot map CRC IO\n"); 268 dev_err(dev, "Cannot map CRC IO\n");
271 return PTR_ERR(crc->regs); 269 return PTR_ERR(crc->regs);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 98ae02826e8f..ba5ea6434f9c 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -15,7 +15,7 @@
15#include <linux/reset.h> 15#include <linux/reset.h>
16 16
17#include <crypto/aes.h> 17#include <crypto/aes.h>
18#include <crypto/des.h> 18#include <crypto/internal/des.h>
19#include <crypto/engine.h> 19#include <crypto/engine.h>
20#include <crypto/scatterwalk.h> 20#include <crypto/scatterwalk.h>
21#include <crypto/internal/aead.h> 21#include <crypto/internal/aead.h>
@@ -767,35 +767,15 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
767static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 767static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
768 unsigned int keylen) 768 unsigned int keylen)
769{ 769{
770 u32 tmp[DES_EXPKEY_WORDS]; 770 return verify_ablkcipher_des_key(tfm, key) ?:
771 771 stm32_cryp_setkey(tfm, key, keylen);
772 if (keylen != DES_KEY_SIZE)
773 return -EINVAL;
774
775 if ((crypto_ablkcipher_get_flags(tfm) &
776 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
777 unlikely(!des_ekey(tmp, key))) {
778 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
779 return -EINVAL;
780 }
781
782 return stm32_cryp_setkey(tfm, key, keylen);
783} 772}
784 773
785static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 774static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
786 unsigned int keylen) 775 unsigned int keylen)
787{ 776{
788 u32 flags; 777 return verify_ablkcipher_des3_key(tfm, key) ?:
789 int err; 778 stm32_cryp_setkey(tfm, key, keylen);
790
791 flags = crypto_ablkcipher_get_flags(tfm);
792 err = __des3_verify_key(&flags, key);
793 if (unlikely(err)) {
794 crypto_ablkcipher_set_flags(tfm, flags);
795 return err;
796 }
797
798 return stm32_cryp_setkey(tfm, key, keylen);
799} 779}
800 780
801static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, 781static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -1955,7 +1935,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
1955{ 1935{
1956 struct device *dev = &pdev->dev; 1936 struct device *dev = &pdev->dev;
1957 struct stm32_cryp *cryp; 1937 struct stm32_cryp *cryp;
1958 struct resource *res;
1959 struct reset_control *rst; 1938 struct reset_control *rst;
1960 int irq, ret; 1939 int irq, ret;
1961 1940
@@ -1969,16 +1948,13 @@ static int stm32_cryp_probe(struct platform_device *pdev)
1969 1948
1970 cryp->dev = dev; 1949 cryp->dev = dev;
1971 1950
1972 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1951 cryp->regs = devm_platform_ioremap_resource(pdev, 0);
1973 cryp->regs = devm_ioremap_resource(dev, res);
1974 if (IS_ERR(cryp->regs)) 1952 if (IS_ERR(cryp->regs))
1975 return PTR_ERR(cryp->regs); 1953 return PTR_ERR(cryp->regs);
1976 1954
1977 irq = platform_get_irq(pdev, 0); 1955 irq = platform_get_irq(pdev, 0);
1978 if (irq < 0) { 1956 if (irq < 0)
1979 dev_err(dev, "Cannot get IRQ resource\n");
1980 return irq; 1957 return irq;
1981 }
1982 1958
1983 ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq, 1959 ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq,
1984 stm32_cryp_irq_thread, IRQF_ONESHOT, 1960 stm32_cryp_irq_thread, IRQF_ONESHOT,
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 2b70d8796f25..cfc8e0e37bee 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -1450,10 +1450,8 @@ static int stm32_hash_probe(struct platform_device *pdev)
1450 return ret; 1450 return ret;
1451 1451
1452 irq = platform_get_irq(pdev, 0); 1452 irq = platform_get_irq(pdev, 0);
1453 if (irq < 0) { 1453 if (irq < 0)
1454 dev_err(dev, "Cannot get IRQ resource\n");
1455 return irq; 1454 return irq;
1456 }
1457 1455
1458 ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler, 1456 ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1459 stm32_hash_irq_thread, IRQF_ONESHOT, 1457 stm32_hash_irq_thread, IRQF_ONESHOT,
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 6f7cbf6c2b55..6536fd4bee65 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -542,25 +542,11 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
542 unsigned int keylen) 542 unsigned int keylen)
543{ 543{
544 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); 544 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
545 struct sun4i_ss_ctx *ss = op->ss; 545 int err;
546 u32 flags;
547 u32 tmp[DES_EXPKEY_WORDS];
548 int ret;
549
550 if (unlikely(keylen != DES_KEY_SIZE)) {
551 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
552 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
553 return -EINVAL;
554 }
555
556 flags = crypto_skcipher_get_flags(tfm);
557 546
558 ret = des_ekey(tmp, key); 547 err = verify_skcipher_des_key(tfm, key);
559 if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 548 if (err)
560 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); 549 return err;
561 dev_dbg(ss->dev, "Weak key %u\n", keylen);
562 return -EINVAL;
563 }
564 550
565 op->keylen = keylen; 551 op->keylen = keylen;
566 memcpy(op->key, key, keylen); 552 memcpy(op->key, key, keylen);
@@ -578,8 +564,8 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
578 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); 564 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
579 int err; 565 int err;
580 566
581 err = des3_verify_key(tfm, key); 567 err = verify_skcipher_des3_key(tfm, key);
582 if (unlikely(err)) 568 if (err)
583 return err; 569 return err;
584 570
585 op->keylen = keylen; 571 op->keylen = keylen;
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 2e8704271f45..9aa6fe081a27 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -225,7 +225,6 @@ static struct sun4i_ss_alg_template ss_algs[] = {
225 225
226static int sun4i_ss_probe(struct platform_device *pdev) 226static int sun4i_ss_probe(struct platform_device *pdev)
227{ 227{
228 struct resource *res;
229 u32 v; 228 u32 v;
230 int err, i; 229 int err, i;
231 unsigned long cr; 230 unsigned long cr;
@@ -240,8 +239,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
240 if (!ss) 239 if (!ss)
241 return -ENOMEM; 240 return -ENOMEM;
242 241
243 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 242 ss->base = devm_platform_ioremap_resource(pdev, 0);
244 ss->base = devm_ioremap_resource(&pdev->dev, res);
245 if (IS_ERR(ss->base)) { 243 if (IS_ERR(ss->base)) {
246 dev_err(&pdev->dev, "Cannot request MMIO\n"); 244 dev_err(&pdev->dev, "Cannot request MMIO\n");
247 return PTR_ERR(ss->base); 245 return PTR_ERR(ss->base);
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index 8654d48aedc0..35a27a7145f8 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -29,7 +29,7 @@
29#include <crypto/internal/hash.h> 29#include <crypto/internal/hash.h>
30#include <crypto/internal/skcipher.h> 30#include <crypto/internal/skcipher.h>
31#include <crypto/aes.h> 31#include <crypto/aes.h>
32#include <crypto/des.h> 32#include <crypto/internal/des.h>
33#include <crypto/internal/rng.h> 33#include <crypto/internal/rng.h>
34#include <crypto/rng.h> 34#include <crypto/rng.h>
35 35
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c9d686a0e805..cb6c10b1bf36 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -30,7 +30,7 @@
30 30
31#include <crypto/algapi.h> 31#include <crypto/algapi.h>
32#include <crypto/aes.h> 32#include <crypto/aes.h>
33#include <crypto/des.h> 33#include <crypto/internal/des.h>
34#include <crypto/sha.h> 34#include <crypto/sha.h>
35#include <crypto/md5.h> 35#include <crypto/md5.h>
36#include <crypto/internal/aead.h> 36#include <crypto/internal/aead.h>
@@ -925,7 +925,6 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
925 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 925 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
926 struct device *dev = ctx->dev; 926 struct device *dev = ctx->dev;
927 struct crypto_authenc_keys keys; 927 struct crypto_authenc_keys keys;
928 u32 flags;
929 int err; 928 int err;
930 929
931 err = crypto_authenc_extractkeys(&keys, key, keylen); 930 err = crypto_authenc_extractkeys(&keys, key, keylen);
@@ -936,15 +935,9 @@ static int aead_des3_setkey(struct crypto_aead *authenc,
936 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) 935 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
937 goto badkey; 936 goto badkey;
938 937
939 if (keys.enckeylen != DES3_EDE_KEY_SIZE) 938 err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
940 goto badkey; 939 if (err)
941
942 flags = crypto_aead_get_flags(authenc);
943 err = __des3_verify_key(&flags, keys.enckey);
944 if (unlikely(err)) {
945 crypto_aead_set_flags(authenc, flags);
946 goto out; 940 goto out;
947 }
948 941
949 if (ctx->keylen) 942 if (ctx->keylen)
950 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 943 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
@@ -1517,32 +1510,15 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1517static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher, 1510static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1518 const u8 *key, unsigned int keylen) 1511 const u8 *key, unsigned int keylen)
1519{ 1512{
1520 u32 tmp[DES_EXPKEY_WORDS]; 1513 return verify_ablkcipher_des_key(cipher, key) ?:
1521 1514 ablkcipher_setkey(cipher, key, keylen);
1522 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1523 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1524 !des_ekey(tmp, key)) {
1525 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1526 return -EINVAL;
1527 }
1528
1529 return ablkcipher_setkey(cipher, key, keylen);
1530} 1515}
1531 1516
1532static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher, 1517static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1533 const u8 *key, unsigned int keylen) 1518 const u8 *key, unsigned int keylen)
1534{ 1519{
1535 u32 flags; 1520 return verify_ablkcipher_des3_key(cipher, key) ?:
1536 int err; 1521 ablkcipher_setkey(cipher, key, keylen);
1537
1538 flags = crypto_ablkcipher_get_flags(cipher);
1539 err = __des3_verify_key(&flags, key);
1540 if (unlikely(err)) {
1541 crypto_ablkcipher_set_flags(cipher, flags);
1542 return err;
1543 }
1544
1545 return ablkcipher_setkey(cipher, key, keylen);
1546} 1522}
1547 1523
1548static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher, 1524static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
index 349d34eaac13..b1c6f739f77b 100644
--- a/drivers/crypto/ux500/Kconfig
+++ b/drivers/crypto/ux500/Kconfig
@@ -9,7 +9,7 @@ config CRYPTO_DEV_UX500_CRYP
9 depends on CRYPTO_DEV_UX500 9 depends on CRYPTO_DEV_UX500
10 select CRYPTO_ALGAPI 10 select CRYPTO_ALGAPI
11 select CRYPTO_BLKCIPHER 11 select CRYPTO_BLKCIPHER
12 select CRYPTO_DES 12 select CRYPTO_LIB_DES
13 help 13 help
14 This selects the crypto driver for the UX500_CRYP hardware. It supports 14 This selects the crypto driver for the UX500_CRYP hardware. It supports
15 AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes. 15 AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
index bd89504e8167..8da7f87b339b 100644
--- a/drivers/crypto/ux500/cryp/cryp.h
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -241,12 +241,12 @@ struct cryp_device_data {
241 struct clk *clk; 241 struct clk *clk;
242 struct regulator *pwr_regulator; 242 struct regulator *pwr_regulator;
243 int power_status; 243 int power_status;
244 struct spinlock ctx_lock; 244 spinlock_t ctx_lock;
245 struct cryp_ctx *current_ctx; 245 struct cryp_ctx *current_ctx;
246 struct klist_node list_node; 246 struct klist_node list_node;
247 struct cryp_dma dma; 247 struct cryp_dma dma;
248 bool power_state; 248 bool power_state;
249 struct spinlock power_state_spinlock; 249 spinlock_t power_state_spinlock;
250 bool restore_dev_ctx; 250 bool restore_dev_ctx;
251}; 251};
252 252
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index b4beb54c0dbe..1628ae7a1467 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -29,7 +29,7 @@
29#include <crypto/aes.h> 29#include <crypto/aes.h>
30#include <crypto/algapi.h> 30#include <crypto/algapi.h>
31#include <crypto/ctr.h> 31#include <crypto/ctr.h>
32#include <crypto/des.h> 32#include <crypto/internal/des.h>
33#include <crypto/scatterwalk.h> 33#include <crypto/scatterwalk.h>
34 34
35#include <linux/platform_data/crypto-ux500.h> 35#include <linux/platform_data/crypto-ux500.h>
@@ -528,9 +528,9 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
528 528
529 dev_dbg(ctx->device->dev, "[%s]: ", __func__); 529 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
530 530
531 if (unlikely(!IS_ALIGNED((u32)sg, 4))) { 531 if (unlikely(!IS_ALIGNED((unsigned long)sg, 4))) {
532 dev_err(ctx->device->dev, "[%s]: Data in sg list isn't " 532 dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
533 "aligned! Addr: 0x%08x", __func__, (u32)sg); 533 "aligned! Addr: 0x%08lx", __func__, (unsigned long)sg);
534 return -EFAULT; 534 return -EFAULT;
535 } 535 }
536 536
@@ -763,9 +763,9 @@ static int hw_crypt_noxts(struct cryp_ctx *ctx,
763 763
764 ctx->outlen = ctx->datalen; 764 ctx->outlen = ctx->datalen;
765 765
766 if (unlikely(!IS_ALIGNED((u32)indata, 4))) { 766 if (unlikely(!IS_ALIGNED((unsigned long)indata, 4))) {
767 pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: " 767 pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
768 "0x%08x", __func__, (u32)indata); 768 "0x%08lx", __func__, (unsigned long)indata);
769 return -EINVAL; 769 return -EINVAL;
770 } 770 }
771 771
@@ -987,26 +987,13 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
987 const u8 *key, unsigned int keylen) 987 const u8 *key, unsigned int keylen)
988{ 988{
989 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); 989 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
990 u32 *flags = &cipher->base.crt_flags; 990 int err;
991 u32 tmp[DES_EXPKEY_WORDS];
992 int ret;
993 991
994 pr_debug(DEV_DBG_NAME " [%s]", __func__); 992 pr_debug(DEV_DBG_NAME " [%s]", __func__);
995 if (keylen != DES_KEY_SIZE) {
996 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
997 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
998 __func__);
999 return -EINVAL;
1000 }
1001 993
1002 ret = des_ekey(tmp, key); 994 err = verify_ablkcipher_des_key(cipher, key);
1003 if (unlikely(ret == 0) && 995 if (err)
1004 (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 996 return err;
1005 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
1006 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
1007 __func__);
1008 return -EINVAL;
1009 }
1010 997
1011 memcpy(ctx->key, key, keylen); 998 memcpy(ctx->key, key, keylen);
1012 ctx->keylen = keylen; 999 ctx->keylen = keylen;
@@ -1019,17 +1006,13 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1019 const u8 *key, unsigned int keylen) 1006 const u8 *key, unsigned int keylen)
1020{ 1007{
1021 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1008 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1022 u32 flags;
1023 int err; 1009 int err;
1024 1010
1025 pr_debug(DEV_DBG_NAME " [%s]", __func__); 1011 pr_debug(DEV_DBG_NAME " [%s]", __func__);
1026 1012
1027 flags = crypto_ablkcipher_get_flags(cipher); 1013 err = verify_ablkcipher_des3_key(cipher, key);
1028 err = __des3_verify_key(&flags, key); 1014 if (err)
1029 if (unlikely(err)) {
1030 crypto_ablkcipher_set_flags(cipher, flags);
1031 return err; 1015 return err;
1032 }
1033 1016
1034 memcpy(ctx->key, key, keylen); 1017 memcpy(ctx->key, key, keylen);
1035 ctx->keylen = keylen; 1018 ctx->keylen = keylen;
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
index ab2bd00c1c36..7c9bcc15125f 100644
--- a/drivers/crypto/ux500/hash/hash_alg.h
+++ b/drivers/crypto/ux500/hash/hash_alg.h
@@ -366,10 +366,10 @@ struct hash_device_data {
366 phys_addr_t phybase; 366 phys_addr_t phybase;
367 struct klist_node list_node; 367 struct klist_node list_node;
368 struct device *dev; 368 struct device *dev;
369 struct spinlock ctx_lock; 369 spinlock_t ctx_lock;
370 struct hash_ctx *current_ctx; 370 struct hash_ctx *current_ctx;
371 bool power_state; 371 bool power_state;
372 struct spinlock power_state_lock; 372 spinlock_t power_state_lock;
373 struct regulator *regulator; 373 struct regulator *regulator;
374 struct clk *clk; 374 struct clk *clk;
375 bool restore_dev_state; 375 bool restore_dev_state;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index f1ebc3dfa21e..c172a6953477 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -806,7 +806,7 @@ static int hash_process_data(struct hash_device_data *device_data,
806 * HW peripheral, otherwise we first copy data 806 * HW peripheral, otherwise we first copy data
807 * to a local buffer 807 * to a local buffer
808 */ 808 */
809 if ((0 == (((u32)data_buffer) % 4)) && 809 if (IS_ALIGNED((unsigned long)data_buffer, 4) &&
810 (0 == *index)) 810 (0 == *index))
811 hash_processblock(device_data, 811 hash_processblock(device_data,
812 (const u32 *)data_buffer, 812 (const u32 *)data_buffer,
@@ -864,7 +864,8 @@ static int hash_dma_final(struct ahash_request *req)
864 if (ret) 864 if (ret)
865 return ret; 865 return ret;
866 866
867 dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); 867 dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
868 (unsigned long)ctx);
868 869
869 if (req_ctx->updated) { 870 if (req_ctx->updated) {
870 ret = hash_resume_state(device_data, &device_data->state); 871 ret = hash_resume_state(device_data, &device_data->state);
@@ -969,7 +970,8 @@ static int hash_hw_final(struct ahash_request *req)
969 if (ret) 970 if (ret)
970 return ret; 971 return ret;
971 972
972 dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); 973 dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
974 (unsigned long)ctx);
973 975
974 if (req_ctx->updated) { 976 if (req_ctx->updated) {
975 ret = hash_resume_state(device_data, &device_data->state); 977 ret = hash_resume_state(device_data, &device_data->state);
@@ -1272,8 +1274,8 @@ void hash_get_digest(struct hash_device_data *device_data,
1272 else 1274 else
1273 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); 1275 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1274 1276
1275 dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n", 1277 dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n",
1276 __func__, (u32) digest); 1278 __func__, (unsigned long)digest);
1277 1279
1278 /* Copy result into digest array */ 1280 /* Copy result into digest array */
1279 for (count = 0; count < loop_ctr; count++) { 1281 for (count = 0; count < loop_ctr; count++) {
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 10f266d462d6..42d19205166b 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -129,13 +129,11 @@ static int virtio_crypto_alg_ablkcipher_init_session(
129 * Avoid to do DMA from the stack, switch to using 129 * Avoid to do DMA from the stack, switch to using
130 * dynamically-allocated for the key 130 * dynamically-allocated for the key
131 */ 131 */
132 uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC); 132 uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
133 133
134 if (!cipher_key) 134 if (!cipher_key)
135 return -ENOMEM; 135 return -ENOMEM;
136 136
137 memcpy(cipher_key, key, keylen);
138
139 spin_lock(&vcrypto->ctrl_lock); 137 spin_lock(&vcrypto->ctrl_lock);
140 /* Pad ctrl header */ 138 /* Pad ctrl header */
141 vcrypto->ctrl.header.opcode = 139 vcrypto->ctrl.header.opcode =
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 49f7258045fa..d59e736882f6 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -84,7 +84,7 @@ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
84 u8 tweak[AES_BLOCK_SIZE]; 84 u8 tweak[AES_BLOCK_SIZE];
85 int ret; 85 int ret;
86 86
87 if (!crypto_simd_usable()) { 87 if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) {
88 struct skcipher_request *subreq = skcipher_request_ctx(req); 88 struct skcipher_request *subreq = skcipher_request_ctx(req);
89 89
90 *subreq = *req; 90 *subreq = *req;
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index b16219e5dac9..350bc3061656 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -16,7 +16,7 @@ config CIFS
16 select CRYPTO_GCM 16 select CRYPTO_GCM
17 select CRYPTO_ECB 17 select CRYPTO_ECB
18 select CRYPTO_AES 18 select CRYPTO_AES
19 select CRYPTO_DES 19 select CRYPTO_LIB_DES
20 select KEYS 20 select KEYS
21 help 21 help
22 This is the client VFS module for the SMB3 family of NAS protocols, 22 This is the client VFS module for the SMB3 family of NAS protocols,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 3289b566463f..4e2f74894e9b 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1601,7 +1601,6 @@ MODULE_DESCRIPTION
1601 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and " 1601 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1602 "also older servers complying with the SNIA CIFS Specification)"); 1602 "also older servers complying with the SNIA CIFS Specification)");
1603MODULE_VERSION(CIFS_VERSION); 1603MODULE_VERSION(CIFS_VERSION);
1604MODULE_SOFTDEP("pre: des");
1605MODULE_SOFTDEP("pre: ecb"); 1604MODULE_SOFTDEP("pre: ecb");
1606MODULE_SOFTDEP("pre: hmac"); 1605MODULE_SOFTDEP("pre: hmac");
1607MODULE_SOFTDEP("pre: md4"); 1606MODULE_SOFTDEP("pre: md4");
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 2b6d87bfdf8e..39a938443e3e 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -11,13 +11,14 @@
11 11
12*/ 12*/
13 13
14#include <linux/crypto.h>
15#include <linux/module.h> 14#include <linux/module.h>
16#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/fips.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/random.h> 20#include <linux/random.h>
21#include <crypto/des.h>
21#include "cifs_fs_sb.h" 22#include "cifs_fs_sb.h"
22#include "cifs_unicode.h" 23#include "cifs_unicode.h"
23#include "cifspdu.h" 24#include "cifspdu.h"
@@ -58,19 +59,18 @@ static int
58smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) 59smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
59{ 60{
60 unsigned char key2[8]; 61 unsigned char key2[8];
61 struct crypto_cipher *tfm_des; 62 struct des_ctx ctx;
62 63
63 str_to_key(key, key2); 64 str_to_key(key, key2);
64 65
65 tfm_des = crypto_alloc_cipher("des", 0, 0); 66 if (fips_enabled) {
66 if (IS_ERR(tfm_des)) { 67 cifs_dbg(VFS, "FIPS compliance enabled: DES not permitted\n");
67 cifs_dbg(VFS, "could not allocate des crypto API\n"); 68 return -ENOENT;
68 return PTR_ERR(tfm_des);
69 } 69 }
70 70
71 crypto_cipher_setkey(tfm_des, key2, 8); 71 des_expand_key(&ctx, key2, DES_KEY_SIZE);
72 crypto_cipher_encrypt_one(tfm_des, out, in); 72 des_encrypt(&ctx, out, in);
73 crypto_free_cipher(tfm_des); 73 memzero_explicit(&ctx, sizeof(ctx));
74 74
75 return 0; 75 return 0;
76} 76}
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 6f4536d70b8e..adff14fcb8e4 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -3,3 +3,5 @@
3# asm headers that all architectures except um should have 3# asm headers that all architectures except um should have
4# (This file is not included when SRCARCH=um since UML borrows several 4# (This file is not included when SRCARCH=um since UML borrows several
5# asm headers from the host architecutre.) 5# asm headers from the host architecutre.)
6
7mandatory-y += simd.h
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 0fdb542c70cd..2090729701ab 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -29,12 +29,62 @@ struct crypto_aes_ctx {
29}; 29};
30 30
31extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned; 31extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned;
32extern const u32 crypto_fl_tab[4][256] ____cacheline_aligned;
33extern const u32 crypto_it_tab[4][256] ____cacheline_aligned; 32extern const u32 crypto_it_tab[4][256] ____cacheline_aligned;
34extern const u32 crypto_il_tab[4][256] ____cacheline_aligned; 33
34/*
35 * validate key length for AES algorithms
36 */
37static inline int aes_check_keylen(unsigned int keylen)
38{
39 switch (keylen) {
40 case AES_KEYSIZE_128:
41 case AES_KEYSIZE_192:
42 case AES_KEYSIZE_256:
43 break;
44 default:
45 return -EINVAL;
46 }
47
48 return 0;
49}
35 50
36int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 51int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
37 unsigned int key_len); 52 unsigned int key_len);
38int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, 53
39 unsigned int key_len); 54/**
55 * aes_expandkey - Expands the AES key as described in FIPS-197
56 * @ctx: The location where the computed key will be stored.
57 * @in_key: The supplied key.
58 * @key_len: The length of the supplied key.
59 *
60 * Returns 0 on success. The function fails only if an invalid key size (or
61 * pointer) is supplied.
62 * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
63 * key schedule plus a 16 bytes key which is used before the first round).
64 * The decryption key is prepared for the "Equivalent Inverse Cipher" as
65 * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
66 * for the initial combination, the second slot for the first round and so on.
67 */
68int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
69 unsigned int key_len);
70
71/**
72 * aes_encrypt - Encrypt a single AES block
73 * @ctx: Context struct containing the key schedule
74 * @out: Buffer to store the ciphertext
75 * @in: Buffer containing the plaintext
76 */
77void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
78
79/**
80 * aes_decrypt - Decrypt a single AES block
81 * @ctx: Context struct containing the key schedule
82 * @out: Buffer to store the plaintext
83 * @in: Buffer containing the ciphertext
84 */
85void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
86
87extern const u8 crypto_aes_sbox[];
88extern const u8 crypto_aes_inv_sbox[];
89
40#endif 90#endif
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index dc1106af95c3..e5bd302f2c49 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -409,10 +409,8 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size)
409 409
410static inline void crypto_yield(u32 flags) 410static inline void crypto_yield(u32 flags)
411{ 411{
412#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
413 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) 412 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
414 cond_resched(); 413 cond_resched();
415#endif
416} 414}
417 415
418int crypto_register_notifier(struct notifier_block *nb); 416int crypto_register_notifier(struct notifier_block *nb);
diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h
index 06984a26c8cf..a1c66d1001af 100644
--- a/include/crypto/ctr.h
+++ b/include/crypto/ctr.h
@@ -8,8 +8,58 @@
8#ifndef _CRYPTO_CTR_H 8#ifndef _CRYPTO_CTR_H
9#define _CRYPTO_CTR_H 9#define _CRYPTO_CTR_H
10 10
11#include <crypto/algapi.h>
12#include <crypto/internal/skcipher.h>
13#include <linux/string.h>
14#include <linux/types.h>
15
11#define CTR_RFC3686_NONCE_SIZE 4 16#define CTR_RFC3686_NONCE_SIZE 4
12#define CTR_RFC3686_IV_SIZE 8 17#define CTR_RFC3686_IV_SIZE 8
13#define CTR_RFC3686_BLOCK_SIZE 16 18#define CTR_RFC3686_BLOCK_SIZE 16
14 19
20static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req,
21 void (*fn)(struct crypto_skcipher *,
22 const u8 *, u8 *))
23{
24 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
25 int blocksize = crypto_skcipher_chunksize(tfm);
26 u8 buf[MAX_CIPHER_BLOCKSIZE];
27 struct skcipher_walk walk;
28 int err;
29
30 /* avoid integer division due to variable blocksize parameter */
31 if (WARN_ON_ONCE(!is_power_of_2(blocksize)))
32 return -EINVAL;
33
34 err = skcipher_walk_virt(&walk, req, false);
35
36 while (walk.nbytes > 0) {
37 u8 *dst = walk.dst.virt.addr;
38 u8 *src = walk.src.virt.addr;
39 int nbytes = walk.nbytes;
40 int tail = 0;
41
42 if (nbytes < walk.total) {
43 tail = walk.nbytes & (blocksize - 1);
44 nbytes -= tail;
45 }
46
47 do {
48 int bsize = min(nbytes, blocksize);
49
50 fn(tfm, walk.iv, buf);
51
52 crypto_xor_cpy(dst, src, buf, bsize);
53 crypto_inc(walk.iv, blocksize);
54
55 dst += bsize;
56 src += bsize;
57 nbytes -= bsize;
58 } while (nbytes > 0);
59
60 err = skcipher_walk_done(&walk, tail);
61 }
62 return err;
63}
64
15#endif /* _CRYPTO_CTR_H */ 65#endif /* _CRYPTO_CTR_H */
diff --git a/include/crypto/des.h b/include/crypto/des.h
index 72c7c8e5a5a7..7812b4331ae4 100644
--- a/include/crypto/des.h
+++ b/include/crypto/des.h
@@ -6,10 +6,7 @@
6#ifndef __CRYPTO_DES_H 6#ifndef __CRYPTO_DES_H
7#define __CRYPTO_DES_H 7#define __CRYPTO_DES_H
8 8
9#include <crypto/skcipher.h> 9#include <linux/types.h>
10#include <linux/compiler.h>
11#include <linux/fips.h>
12#include <linux/string.h>
13 10
14#define DES_KEY_SIZE 8 11#define DES_KEY_SIZE 8
15#define DES_EXPKEY_WORDS 32 12#define DES_EXPKEY_WORDS 32
@@ -19,48 +16,42 @@
19#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) 16#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS)
20#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE 17#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE
21 18
22static inline int __des3_verify_key(u32 *flags, const u8 *key) 19struct des_ctx {
23{ 20 u32 expkey[DES_EXPKEY_WORDS];
24 int err = -EINVAL; 21};
25 u32 K[6];
26 22
27 memcpy(K, key, DES3_EDE_KEY_SIZE); 23struct des3_ede_ctx {
24 u32 expkey[DES3_EDE_EXPKEY_WORDS];
25};
28 26
29 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 27void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src);
30 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && 28void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src);
31 (fips_enabled ||
32 (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)))
33 goto bad;
34 29
35 if (unlikely(!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled) 30void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src);
36 goto bad; 31void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src);
37 32
38 err = 0; 33/**
39 34 * des_expand_key - Expand a DES input key into a key schedule
40out: 35 * @ctx: the key schedule
41 memzero_explicit(K, DES3_EDE_KEY_SIZE); 36 * @key: buffer containing the input key
42 37 * @len: size of the buffer contents
43 return err; 38 *
44 39 * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if
45bad: 40 * the key is accepted but has been found to be weak.
46 *flags |= CRYPTO_TFM_RES_WEAK_KEY; 41 */
47 goto out; 42int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen);
48} 43
49 44/**
50static inline int des3_verify_key(struct crypto_skcipher *tfm, const u8 *key) 45 * des3_ede_expand_key - Expand a triple DES input key into a key schedule
51{ 46 * @ctx: the key schedule
52 u32 flags; 47 * @key: buffer containing the input key
53 int err; 48 * @len: size of the buffer contents
54 49 *
55 flags = crypto_skcipher_get_flags(tfm); 50 * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if
56 err = __des3_verify_key(&flags, key); 51 * the key is accepted but has been found to be weak. Note that weak keys will
57 crypto_skcipher_set_flags(tfm, flags); 52 * be rejected (and -EINVAL will be returned) when running in FIPS mode.
58 return err; 53 */
59} 54int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key,
60 55 unsigned int keylen);
61extern unsigned long des_ekey(u32 *pe, const u8 *k);
62
63extern int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
64 unsigned int keylen);
65 56
66#endif /* __CRYPTO_DES_H */ 57#endif /* __CRYPTO_DES_H */
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
index c50e057ea17e..9d7eff04f224 100644
--- a/include/crypto/gcm.h
+++ b/include/crypto/gcm.h
@@ -1,8 +1,63 @@
1#ifndef _CRYPTO_GCM_H 1#ifndef _CRYPTO_GCM_H
2#define _CRYPTO_GCM_H 2#define _CRYPTO_GCM_H
3 3
4#include <linux/errno.h>
5
4#define GCM_AES_IV_SIZE 12 6#define GCM_AES_IV_SIZE 12
5#define GCM_RFC4106_IV_SIZE 8 7#define GCM_RFC4106_IV_SIZE 8
6#define GCM_RFC4543_IV_SIZE 8 8#define GCM_RFC4543_IV_SIZE 8
7 9
10/*
11 * validate authentication tag for GCM
12 */
13static inline int crypto_gcm_check_authsize(unsigned int authsize)
14{
15 switch (authsize) {
16 case 4:
17 case 8:
18 case 12:
19 case 13:
20 case 14:
21 case 15:
22 case 16:
23 break;
24 default:
25 return -EINVAL;
26 }
27
28 return 0;
29}
30
31/*
32 * validate authentication tag for RFC4106
33 */
34static inline int crypto_rfc4106_check_authsize(unsigned int authsize)
35{
36 switch (authsize) {
37 case 8:
38 case 12:
39 case 16:
40 break;
41 default:
42 return -EINVAL;
43 }
44
45 return 0;
46}
47
48/*
49 * validate assoclen for RFC4106/RFC4543
50 */
51static inline int crypto_ipsec_check_assoclen(unsigned int assoclen)
52{
53 switch (assoclen) {
54 case 16:
55 case 20:
56 break;
57 default:
58 return -EINVAL;
59 }
60
61 return 0;
62}
8#endif 63#endif
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
index 9136301062a5..f832c9f2aca3 100644
--- a/include/crypto/ghash.h
+++ b/include/crypto/ghash.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Common values for GHASH algorithms 3 * Common values for the GHASH hash function
4 */ 4 */
5 5
6#ifndef __CRYPTO_GHASH_H__ 6#ifndef __CRYPTO_GHASH_H__
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
index 8c602b187c58..fd54074332f5 100644
--- a/include/crypto/internal/cryptouser.h
+++ b/include/crypto/internal/cryptouser.h
@@ -1,14 +1,15 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/cryptouser.h>
2#include <net/netlink.h> 3#include <net/netlink.h>
3 4
4extern struct sock *crypto_nlsk;
5
6struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); 5struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
7 6
8#ifdef CONFIG_CRYPTO_STATS 7#ifdef CONFIG_CRYPTO_STATS
9int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); 8int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
10#else 9#else
11static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs) 10static inline int crypto_reportstat(struct sk_buff *in_skb,
11 struct nlmsghdr *in_nlh,
12 struct nlattr **attrs)
12{ 13{
13 return -ENOTSUPP; 14 return -ENOTSUPP;
14} 15}
diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h
new file mode 100644
index 000000000000..81ea1a425e9c
--- /dev/null
+++ b/include/crypto/internal/des.h
@@ -0,0 +1,152 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * DES & Triple DES EDE key verification helpers
4 */
5
6#ifndef __CRYPTO_INTERNAL_DES_H
7#define __CRYPTO_INTERNAL_DES_H
8
9#include <linux/crypto.h>
10#include <linux/fips.h>
11#include <crypto/des.h>
12#include <crypto/aead.h>
13#include <crypto/skcipher.h>
14
15/**
16 * crypto_des_verify_key - Check whether a DES key is weak
17 * @tfm: the crypto algo
18 * @key: the key buffer
19 *
20 * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak
21 * keys. Otherwise, 0 is returned.
22 *
23 * It is the job of the caller to ensure that the size of the key equals
24 * DES_KEY_SIZE.
25 */
26static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key)
27{
28 struct des_ctx tmp;
29 int err;
30
31 err = des_expand_key(&tmp, key, DES_KEY_SIZE);
32 if (err == -ENOKEY) {
33 if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
34 err = -EINVAL;
35 else
36 err = 0;
37 }
38
39 if (err)
40 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
41
42 memzero_explicit(&tmp, sizeof(tmp));
43 return err;
44}
45
46/*
47 * RFC2451:
48 *
49 * For DES-EDE3, there is no known need to reject weak or
50 * complementation keys. Any weakness is obviated by the use of
51 * multiple keys.
52 *
53 * However, if the first two or last two independent 64-bit keys are
54 * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
55 * same as DES. Implementers MUST reject keys that exhibit this
56 * property.
57 *
58 */
59static inline int des3_ede_verify_key(const u8 *key, unsigned int key_len,
60 bool check_weak)
61{
62 int ret = fips_enabled ? -EINVAL : -ENOKEY;
63 u32 K[6];
64
65 memcpy(K, key, DES3_EDE_KEY_SIZE);
66
67 if ((!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
68 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
69 (fips_enabled || check_weak))
70 goto bad;
71
72 if ((!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled)
73 goto bad;
74
75 ret = 0;
76bad:
77 memzero_explicit(K, DES3_EDE_KEY_SIZE);
78
79 return ret;
80}
81
82/**
83 * crypto_des3_ede_verify_key - Check whether a DES3-EDE key is weak
84 * @tfm: the crypto algo
85 * @key: the key buffer
86 *
87 * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak
88 * keys or when running in FIPS mode. Otherwise, 0 is returned. Note that some
89 * keys are rejected in FIPS mode even if weak keys are permitted by the TFM
90 * flags.
91 *
92 * It is the job of the caller to ensure that the size of the key equals
93 * DES3_EDE_KEY_SIZE.
94 */
95static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm,
96 const u8 *key)
97{
98 int err;
99
100 err = des3_ede_verify_key(key, DES3_EDE_KEY_SIZE,
101 crypto_tfm_get_flags(tfm) &
102 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
103 if (err)
104 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
105 return err;
106}
107
108static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm,
109 const u8 *key)
110{
111 return crypto_des_verify_key(crypto_skcipher_tfm(tfm), key);
112}
113
114static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm,
115 const u8 *key)
116{
117 return crypto_des3_ede_verify_key(crypto_skcipher_tfm(tfm), key);
118}
119
120static inline int verify_ablkcipher_des_key(struct crypto_ablkcipher *tfm,
121 const u8 *key)
122{
123 return crypto_des_verify_key(crypto_ablkcipher_tfm(tfm), key);
124}
125
126static inline int verify_ablkcipher_des3_key(struct crypto_ablkcipher *tfm,
127 const u8 *key)
128{
129 return crypto_des3_ede_verify_key(crypto_ablkcipher_tfm(tfm), key);
130}
131
132static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key,
133 int keylen)
134{
135 if (keylen != DES_KEY_SIZE) {
136 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
137 return -EINVAL;
138 }
139 return crypto_des_verify_key(crypto_aead_tfm(tfm), key);
140}
141
142static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key,
143 int keylen)
144{
145 if (keylen != DES3_EDE_KEY_SIZE) {
146 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
147 return -EINVAL;
148 }
149 return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key);
150}
151
152#endif /* __CRYPTO_INTERNAL_DES_H */
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index d68faa5759ad..734b6f7081b8 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -148,6 +148,11 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
148 struct aead_request *req, bool atomic); 148 struct aead_request *req, bool atomic);
149void skcipher_walk_complete(struct skcipher_walk *walk, int err); 149void skcipher_walk_complete(struct skcipher_walk *walk, int err);
150 150
151static inline void skcipher_walk_abort(struct skcipher_walk *walk)
152{
153 skcipher_walk_done(walk, -ECANCELED);
154}
155
151static inline void ablkcipher_request_complete(struct ablkcipher_request *req, 156static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
152 int err) 157 int err)
153{ 158{
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
deleted file mode 100644
index 5cefddb1991f..000000000000
--- a/include/crypto/morus1280_glue.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * The MORUS-1280 Authenticated-Encryption Algorithm
4 * Common glue skeleton -- header file
5 *
6 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 */
9
10#ifndef _CRYPTO_MORUS1280_GLUE_H
11#define _CRYPTO_MORUS1280_GLUE_H
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <crypto/algapi.h>
16#include <crypto/aead.h>
17#include <crypto/morus_common.h>
18
19#define MORUS1280_WORD_SIZE 8
20#define MORUS1280_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS1280_WORD_SIZE)
21
22struct morus1280_block {
23 u8 bytes[MORUS1280_BLOCK_SIZE];
24};
25
26struct morus1280_glue_ops {
27 void (*init)(void *state, const void *key, const void *iv);
28 void (*ad)(void *state, const void *data, unsigned int length);
29 void (*enc)(void *state, const void *src, void *dst, unsigned int length);
30 void (*dec)(void *state, const void *src, void *dst, unsigned int length);
31 void (*enc_tail)(void *state, const void *src, void *dst, unsigned int length);
32 void (*dec_tail)(void *state, const void *src, void *dst, unsigned int length);
33 void (*final)(void *state, void *tag_xor, u64 assoclen, u64 cryptlen);
34};
35
36struct morus1280_ctx {
37 const struct morus1280_glue_ops *ops;
38 struct morus1280_block key;
39};
40
41void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
42 const struct morus1280_glue_ops *ops);
43int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
44 unsigned int keylen);
45int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
46 unsigned int authsize);
47int crypto_morus1280_glue_encrypt(struct aead_request *req);
48int crypto_morus1280_glue_decrypt(struct aead_request *req);
49
50#define MORUS1280_DECLARE_ALG(id, driver_name, priority) \
51 static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\
52 .init = crypto_morus1280_##id##_init, \
53 .ad = crypto_morus1280_##id##_ad, \
54 .enc = crypto_morus1280_##id##_enc, \
55 .enc_tail = crypto_morus1280_##id##_enc_tail, \
56 .dec = crypto_morus1280_##id##_dec, \
57 .dec_tail = crypto_morus1280_##id##_dec_tail, \
58 .final = crypto_morus1280_##id##_final, \
59 }; \
60 \
61 static int crypto_morus1280_##id##_init_tfm(struct crypto_aead *tfm) \
62 { \
63 crypto_morus1280_glue_init_ops(tfm, &crypto_morus1280_##id##_ops); \
64 return 0; \
65 } \
66 \
67 static void crypto_morus1280_##id##_exit_tfm(struct crypto_aead *tfm) \
68 { \
69 } \
70 \
71 static struct aead_alg crypto_morus1280_##id##_alg = { \
72 .setkey = crypto_morus1280_glue_setkey, \
73 .setauthsize = crypto_morus1280_glue_setauthsize, \
74 .encrypt = crypto_morus1280_glue_encrypt, \
75 .decrypt = crypto_morus1280_glue_decrypt, \
76 .init = crypto_morus1280_##id##_init_tfm, \
77 .exit = crypto_morus1280_##id##_exit_tfm, \
78 \
79 .ivsize = MORUS_NONCE_SIZE, \
80 .maxauthsize = MORUS_MAX_AUTH_SIZE, \
81 .chunksize = MORUS1280_BLOCK_SIZE, \
82 \
83 .base = { \
84 .cra_flags = CRYPTO_ALG_INTERNAL, \
85 .cra_blocksize = 1, \
86 .cra_ctxsize = sizeof(struct morus1280_ctx), \
87 .cra_alignmask = 0, \
88 .cra_priority = priority, \
89 \
90 .cra_name = "__morus1280", \
91 .cra_driver_name = "__"driver_name, \
92 \
93 .cra_module = THIS_MODULE, \
94 } \
95 }
96
97#endif /* _CRYPTO_MORUS1280_GLUE_H */
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
deleted file mode 100644
index 0ee6266cb26c..000000000000
--- a/include/crypto/morus640_glue.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * The MORUS-640 Authenticated-Encryption Algorithm
4 * Common glue skeleton -- header file
5 *
6 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 */
9
10#ifndef _CRYPTO_MORUS640_GLUE_H
11#define _CRYPTO_MORUS640_GLUE_H
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <crypto/algapi.h>
16#include <crypto/aead.h>
17#include <crypto/morus_common.h>
18
19#define MORUS640_WORD_SIZE 4
20#define MORUS640_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS640_WORD_SIZE)
21
22struct morus640_block {
23 u8 bytes[MORUS640_BLOCK_SIZE];
24};
25
26struct morus640_glue_ops {
27 void (*init)(void *state, const void *key, const void *iv);
28 void (*ad)(void *state, const void *data, unsigned int length);
29 void (*enc)(void *state, const void *src, void *dst, unsigned int length);
30 void (*dec)(void *state, const void *src, void *dst, unsigned int length);
31 void (*enc_tail)(void *state, const void *src, void *dst, unsigned int length);
32 void (*dec_tail)(void *state, const void *src, void *dst, unsigned int length);
33 void (*final)(void *state, void *tag_xor, u64 assoclen, u64 cryptlen);
34};
35
36struct morus640_ctx {
37 const struct morus640_glue_ops *ops;
38 struct morus640_block key;
39};
40
41void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
42 const struct morus640_glue_ops *ops);
43int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
44 unsigned int keylen);
45int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm,
46 unsigned int authsize);
47int crypto_morus640_glue_encrypt(struct aead_request *req);
48int crypto_morus640_glue_decrypt(struct aead_request *req);
49
50#define MORUS640_DECLARE_ALG(id, driver_name, priority) \
51 static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\
52 .init = crypto_morus640_##id##_init, \
53 .ad = crypto_morus640_##id##_ad, \
54 .enc = crypto_morus640_##id##_enc, \
55 .enc_tail = crypto_morus640_##id##_enc_tail, \
56 .dec = crypto_morus640_##id##_dec, \
57 .dec_tail = crypto_morus640_##id##_dec_tail, \
58 .final = crypto_morus640_##id##_final, \
59 }; \
60 \
61 static int crypto_morus640_##id##_init_tfm(struct crypto_aead *tfm) \
62 { \
63 crypto_morus640_glue_init_ops(tfm, &crypto_morus640_##id##_ops); \
64 return 0; \
65 } \
66 \
67 static void crypto_morus640_##id##_exit_tfm(struct crypto_aead *tfm) \
68 { \
69 } \
70 \
71 static struct aead_alg crypto_morus640_##id##_alg = {\
72 .setkey = crypto_morus640_glue_setkey, \
73 .setauthsize = crypto_morus640_glue_setauthsize, \
74 .encrypt = crypto_morus640_glue_encrypt, \
75 .decrypt = crypto_morus640_glue_decrypt, \
76 .init = crypto_morus640_##id##_init_tfm, \
77 .exit = crypto_morus640_##id##_exit_tfm, \
78 \
79 .ivsize = MORUS_NONCE_SIZE, \
80 .maxauthsize = MORUS_MAX_AUTH_SIZE, \
81 .chunksize = MORUS640_BLOCK_SIZE, \
82 \
83 .base = { \
84 .cra_flags = CRYPTO_ALG_INTERNAL, \
85 .cra_blocksize = 1, \
86 .cra_ctxsize = sizeof(struct morus640_ctx), \
87 .cra_alignmask = 0, \
88 .cra_priority = priority, \
89 \
90 .cra_name = "__morus640", \
91 .cra_driver_name = "__"driver_name, \
92 \
93 .cra_module = THIS_MODULE, \
94 } \
95 }
96
97#endif /* _CRYPTO_MORUS640_GLUE_H */
diff --git a/include/crypto/morus_common.h b/include/crypto/morus_common.h
deleted file mode 100644
index 969510a9a56c..000000000000
--- a/include/crypto/morus_common.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * The MORUS Authenticated-Encryption Algorithm
4 * Common definitions
5 *
6 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
8 */
9
10#ifndef _CRYPTO_MORUS_COMMON_H
11#define _CRYPTO_MORUS_COMMON_H
12
13#define MORUS_BLOCK_WORDS 4
14#define MORUS_STATE_BLOCKS 5
15#define MORUS_NONCE_SIZE 16
16#define MORUS_MAX_AUTH_SIZE 16
17
18#endif /* _CRYPTO_MORUS_COMMON_H */
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 8a46202b1857..5c2132c71900 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -112,4 +112,51 @@ extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
112 112
113extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, 113extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
114 unsigned int len, u8 *hash); 114 unsigned int len, u8 *hash);
115
116/*
117 * Stand-alone implementation of the SHA256 algorithm. It is designed to
118 * have as little dependencies as possible so it can be used in the
119 * kexec_file purgatory. In other cases you should generally use the
120 * hash APIs from include/crypto/hash.h. Especially when hashing large
121 * amounts of data as those APIs may be hw-accelerated.
122 *
123 * For details see lib/crypto/sha256.c
124 */
125
126static inline int sha256_init(struct sha256_state *sctx)
127{
128 sctx->state[0] = SHA256_H0;
129 sctx->state[1] = SHA256_H1;
130 sctx->state[2] = SHA256_H2;
131 sctx->state[3] = SHA256_H3;
132 sctx->state[4] = SHA256_H4;
133 sctx->state[5] = SHA256_H5;
134 sctx->state[6] = SHA256_H6;
135 sctx->state[7] = SHA256_H7;
136 sctx->count = 0;
137
138 return 0;
139}
140extern int sha256_update(struct sha256_state *sctx, const u8 *input,
141 unsigned int length);
142extern int sha256_final(struct sha256_state *sctx, u8 *hash);
143
144static inline int sha224_init(struct sha256_state *sctx)
145{
146 sctx->state[0] = SHA224_H0;
147 sctx->state[1] = SHA224_H1;
148 sctx->state[2] = SHA224_H2;
149 sctx->state[3] = SHA224_H3;
150 sctx->state[4] = SHA224_H4;
151 sctx->state[5] = SHA224_H5;
152 sctx->state[6] = SHA224_H6;
153 sctx->state[7] = SHA224_H7;
154 sctx->count = 0;
155
156 return 0;
157}
158extern int sha224_update(struct sha256_state *sctx, const u8 *input,
159 unsigned int length);
160extern int sha224_final(struct sha256_state *sctx, u8 *hash);
161
115#endif 162#endif
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
index 63c14f2dc7bd..20fd1f7468af 100644
--- a/include/crypto/sha1_base.h
+++ b/include/crypto/sha1_base.h
@@ -5,6 +5,9 @@
5 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> 5 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
6 */ 6 */
7 7
8#ifndef _CRYPTO_SHA1_BASE_H
9#define _CRYPTO_SHA1_BASE_H
10
8#include <crypto/internal/hash.h> 11#include <crypto/internal/hash.h>
9#include <crypto/sha.h> 12#include <crypto/sha.h>
10#include <linux/crypto.h> 13#include <linux/crypto.h>
@@ -101,3 +104,5 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
101 *sctx = (struct sha1_state){}; 104 *sctx = (struct sha1_state){};
102 return 0; 105 return 0;
103} 106}
107
108#endif /* _CRYPTO_SHA1_BASE_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
index 59159bc944f5..cea60cff80bd 100644
--- a/include/crypto/sha256_base.h
+++ b/include/crypto/sha256_base.h
@@ -5,6 +5,9 @@
5 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> 5 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
6 */ 6 */
7 7
8#ifndef _CRYPTO_SHA256_BASE_H
9#define _CRYPTO_SHA256_BASE_H
10
8#include <crypto/internal/hash.h> 11#include <crypto/internal/hash.h>
9#include <crypto/sha.h> 12#include <crypto/sha.h>
10#include <linux/crypto.h> 13#include <linux/crypto.h>
@@ -19,34 +22,14 @@ static inline int sha224_base_init(struct shash_desc *desc)
19{ 22{
20 struct sha256_state *sctx = shash_desc_ctx(desc); 23 struct sha256_state *sctx = shash_desc_ctx(desc);
21 24
22 sctx->state[0] = SHA224_H0; 25 return sha224_init(sctx);
23 sctx->state[1] = SHA224_H1;
24 sctx->state[2] = SHA224_H2;
25 sctx->state[3] = SHA224_H3;
26 sctx->state[4] = SHA224_H4;
27 sctx->state[5] = SHA224_H5;
28 sctx->state[6] = SHA224_H6;
29 sctx->state[7] = SHA224_H7;
30 sctx->count = 0;
31
32 return 0;
33} 26}
34 27
35static inline int sha256_base_init(struct shash_desc *desc) 28static inline int sha256_base_init(struct shash_desc *desc)
36{ 29{
37 struct sha256_state *sctx = shash_desc_ctx(desc); 30 struct sha256_state *sctx = shash_desc_ctx(desc);
38 31
39 sctx->state[0] = SHA256_H0; 32 return sha256_init(sctx);
40 sctx->state[1] = SHA256_H1;
41 sctx->state[2] = SHA256_H2;
42 sctx->state[3] = SHA256_H3;
43 sctx->state[4] = SHA256_H4;
44 sctx->state[5] = SHA256_H5;
45 sctx->state[6] = SHA256_H6;
46 sctx->state[7] = SHA256_H7;
47 sctx->count = 0;
48
49 return 0;
50} 33}
51 34
52static inline int sha256_base_do_update(struct shash_desc *desc, 35static inline int sha256_base_do_update(struct shash_desc *desc,
@@ -123,3 +106,5 @@ static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
123 *sctx = (struct sha256_state){}; 106 *sctx = (struct sha256_state){};
124 return 0; 107 return 0;
125} 108}
109
110#endif /* _CRYPTO_SHA256_BASE_H */
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
index 099be8027f3f..fb19c77494dc 100644
--- a/include/crypto/sha512_base.h
+++ b/include/crypto/sha512_base.h
@@ -5,6 +5,9 @@
5 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> 5 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
6 */ 6 */
7 7
8#ifndef _CRYPTO_SHA512_BASE_H
9#define _CRYPTO_SHA512_BASE_H
10
8#include <crypto/internal/hash.h> 11#include <crypto/internal/hash.h>
9#include <crypto/sha.h> 12#include <crypto/sha.h>
10#include <linux/crypto.h> 13#include <linux/crypto.h>
@@ -126,3 +129,5 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
126 *sctx = (struct sha512_state){}; 129 *sctx = (struct sha512_state){};
127 return 0; 130 return 0;
128} 131}
132
133#endif /* _CRYPTO_SHA512_BASE_H */
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
index 31891b0dc7e3..1cbf9aa1fe52 100644
--- a/include/crypto/sm3_base.h
+++ b/include/crypto/sm3_base.h
@@ -6,6 +6,9 @@
6 * Written by Gilad Ben-Yossef <gilad@benyossef.com> 6 * Written by Gilad Ben-Yossef <gilad@benyossef.com>
7 */ 7 */
8 8
9#ifndef _CRYPTO_SM3_BASE_H
10#define _CRYPTO_SM3_BASE_H
11
9#include <crypto/internal/hash.h> 12#include <crypto/internal/hash.h>
10#include <crypto/sm3.h> 13#include <crypto/sm3.h>
11#include <linux/crypto.h> 14#include <linux/crypto.h>
@@ -104,3 +107,5 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
104 *sctx = (struct sm3_state){}; 107 *sctx = (struct sm3_state){};
105 return 0; 108 return 0;
106} 109}
110
111#endif /* _CRYPTO_SM3_BASE_H */
diff --git a/include/linux/fips.h b/include/linux/fips.h
index afeeece92302..c6961e932fef 100644
--- a/include/linux/fips.h
+++ b/include/linux/fips.h
@@ -4,8 +4,15 @@
4 4
5#ifdef CONFIG_CRYPTO_FIPS 5#ifdef CONFIG_CRYPTO_FIPS
6extern int fips_enabled; 6extern int fips_enabled;
7extern struct atomic_notifier_head fips_fail_notif_chain;
8
9void fips_fail_notify(void);
10
7#else 11#else
8#define fips_enabled 0 12#define fips_enabled 0
13
14static inline void fips_fail_notify(void) {}
15
9#endif 16#endif
10 17
11#endif 18#endif
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 56f09e36f770..23717eeaad23 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -12,7 +12,6 @@
12#include <linux/workqueue.h> 12#include <linux/workqueue.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/timer.h>
16#include <linux/notifier.h> 15#include <linux/notifier.h>
17#include <linux/kobject.h> 16#include <linux/kobject.h>
18 17
@@ -36,6 +35,7 @@ struct padata_priv {
36 struct parallel_data *pd; 35 struct parallel_data *pd;
37 int cb_cpu; 36 int cb_cpu;
38 int cpu; 37 int cpu;
38 unsigned int seq_nr;
39 int info; 39 int info;
40 void (*parallel)(struct padata_priv *padata); 40 void (*parallel)(struct padata_priv *padata);
41 void (*serial)(struct padata_priv *padata); 41 void (*serial)(struct padata_priv *padata);
@@ -73,20 +73,14 @@ struct padata_serial_queue {
73 * @serial: List to wait for serialization after reordering. 73 * @serial: List to wait for serialization after reordering.
74 * @pwork: work struct for parallelization. 74 * @pwork: work struct for parallelization.
75 * @swork: work struct for serialization. 75 * @swork: work struct for serialization.
76 * @pd: Backpointer to the internal control structure.
77 * @work: work struct for parallelization. 76 * @work: work struct for parallelization.
78 * @reorder_work: work struct for reordering.
79 * @num_obj: Number of objects that are processed by this cpu. 77 * @num_obj: Number of objects that are processed by this cpu.
80 * @cpu_index: Index of the cpu.
81 */ 78 */
82struct padata_parallel_queue { 79struct padata_parallel_queue {
83 struct padata_list parallel; 80 struct padata_list parallel;
84 struct padata_list reorder; 81 struct padata_list reorder;
85 struct parallel_data *pd;
86 struct work_struct work; 82 struct work_struct work;
87 struct work_struct reorder_work;
88 atomic_t num_obj; 83 atomic_t num_obj;
89 int cpu_index;
90}; 84};
91 85
92/** 86/**
@@ -110,10 +104,11 @@ struct padata_cpumask {
110 * @reorder_objects: Number of objects waiting in the reorder queues. 104 * @reorder_objects: Number of objects waiting in the reorder queues.
111 * @refcnt: Number of objects holding a reference on this parallel_data. 105 * @refcnt: Number of objects holding a reference on this parallel_data.
112 * @max_seq_nr: Maximal used sequence number. 106 * @max_seq_nr: Maximal used sequence number.
107 * @processed: Number of already processed objects.
108 * @cpu: Next CPU to be processed.
113 * @cpumask: The cpumasks in use for parallel and serial workers. 109 * @cpumask: The cpumasks in use for parallel and serial workers.
110 * @reorder_work: work struct for reordering.
114 * @lock: Reorder lock. 111 * @lock: Reorder lock.
115 * @processed: Number of already processed objects.
116 * @timer: Reorder timer.
117 */ 112 */
118struct parallel_data { 113struct parallel_data {
119 struct padata_instance *pinst; 114 struct padata_instance *pinst;
@@ -122,17 +117,19 @@ struct parallel_data {
122 atomic_t reorder_objects; 117 atomic_t reorder_objects;
123 atomic_t refcnt; 118 atomic_t refcnt;
124 atomic_t seq_nr; 119 atomic_t seq_nr;
120 unsigned int processed;
121 int cpu;
125 struct padata_cpumask cpumask; 122 struct padata_cpumask cpumask;
123 struct work_struct reorder_work;
126 spinlock_t lock ____cacheline_aligned; 124 spinlock_t lock ____cacheline_aligned;
127 unsigned int processed;
128 struct timer_list timer;
129}; 125};
130 126
131/** 127/**
132 * struct padata_instance - The overall control structure. 128 * struct padata_instance - The overall control structure.
133 * 129 *
134 * @cpu_notifier: cpu hotplug notifier. 130 * @cpu_notifier: cpu hotplug notifier.
135 * @wq: The workqueue in use. 131 * @parallel_wq: The workqueue used for parallel work.
132 * @serial_wq: The workqueue used for serial work.
136 * @pd: The internal control structure. 133 * @pd: The internal control structure.
137 * @cpumask: User supplied cpumasks for parallel and serial works. 134 * @cpumask: User supplied cpumasks for parallel and serial works.
138 * @cpumask_change_notifier: Notifiers chain for user-defined notify 135 * @cpumask_change_notifier: Notifiers chain for user-defined notify
@@ -144,7 +141,8 @@ struct parallel_data {
144 */ 141 */
145struct padata_instance { 142struct padata_instance {
146 struct hlist_node node; 143 struct hlist_node node;
147 struct workqueue_struct *wq; 144 struct workqueue_struct *parallel_wq;
145 struct workqueue_struct *serial_wq;
148 struct parallel_data *pd; 146 struct parallel_data *pd;
149 struct padata_cpumask cpumask; 147 struct padata_cpumask cpumask;
150 struct blocking_notifier_head cpumask_change_notifier; 148 struct blocking_notifier_head cpumask_change_notifier;
@@ -156,11 +154,10 @@ struct padata_instance {
156#define PADATA_INVALID 4 154#define PADATA_INVALID 4
157}; 155};
158 156
159extern struct padata_instance *padata_alloc_possible( 157extern struct padata_instance *padata_alloc_possible(const char *name);
160 struct workqueue_struct *wq);
161extern void padata_free(struct padata_instance *pinst); 158extern void padata_free(struct padata_instance *pinst);
162extern int padata_do_parallel(struct padata_instance *pinst, 159extern int padata_do_parallel(struct padata_instance *pinst,
163 struct padata_priv *padata, int cb_cpu); 160 struct padata_priv *padata, int *cb_cpu);
164extern void padata_do_serial(struct padata_priv *padata); 161extern void padata_do_serial(struct padata_priv *padata);
165extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, 162extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
166 cpumask_var_t cpumask); 163 cpumask_var_t cpumask);
diff --git a/include/linux/sha256.h b/include/linux/sha256.h
deleted file mode 100644
index 26972b9e92db..000000000000
--- a/include/linux/sha256.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2014 Red Hat Inc.
4 *
5 * Author: Vivek Goyal <vgoyal@redhat.com>
6 */
7
8#ifndef SHA256_H
9#define SHA256_H
10
11#include <linux/types.h>
12#include <crypto/sha.h>
13
14/*
15 * Stand-alone implementation of the SHA256 algorithm. It is designed to
16 * have as little dependencies as possible so it can be used in the
17 * kexec_file purgatory. In other cases you should use the implementation in
18 * crypto/.
19 *
20 * For details see lib/sha256.c
21 */
22
23extern int sha256_init(struct sha256_state *sctx);
24extern int sha256_update(struct sha256_state *sctx, const u8 *input,
25 unsigned int length);
26extern int sha256_final(struct sha256_state *sctx, u8 *hash);
27
28#endif /* SHA256_H */
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h
index fd4a6e6ec831..672df7fbf6c1 100644
--- a/include/linux/timeriomem-rng.h
+++ b/include/linux/timeriomem-rng.h
@@ -5,6 +5,9 @@
5 * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk> 5 * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk>
6 */ 6 */
7 7
8#ifndef _LINUX_TIMERIOMEM_RNG_H
9#define _LINUX_TIMERIOMEM_RNG_H
10
8struct timeriomem_rng_data { 11struct timeriomem_rng_data {
9 void __iomem *address; 12 void __iomem *address;
10 13
@@ -14,3 +17,5 @@ struct timeriomem_rng_data {
14 /* bits of entropy per 1024 bits read */ 17 /* bits of entropy per 1024 bits read */
15 unsigned int quality; 18 unsigned int quality;
16}; 19};
20
21#endif /* _LINUX_TIMERIOMEM_RNG_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index b7c585b5ec1c..4261d1c6e87b 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -435,6 +435,10 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
435 435
436extern void destroy_workqueue(struct workqueue_struct *wq); 436extern void destroy_workqueue(struct workqueue_struct *wq);
437 437
438struct workqueue_attrs *alloc_workqueue_attrs(void);
439void free_workqueue_attrs(struct workqueue_attrs *attrs);
440int apply_workqueue_attrs(struct workqueue_struct *wq,
441 const struct workqueue_attrs *attrs);
438int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); 442int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
439 443
440extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 444extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index ab40d7afdc54..f3f3189f5389 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -170,6 +170,9 @@ struct net {
170#ifdef CONFIG_XDP_SOCKETS 170#ifdef CONFIG_XDP_SOCKETS
171 struct netns_xdp xdp; 171 struct netns_xdp xdp;
172#endif 172#endif
173#if IS_ENABLED(CONFIG_CRYPTO_USER)
174 struct sock *crypto_nlsk;
175#endif
173 struct sock *diag_nlsk; 176 struct sock *diag_nlsk;
174 atomic_t fnhe_genid; 177 atomic_t fnhe_genid;
175} __randomize_layout; 178} __randomize_layout;
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 4dc1603919ce..5730c67f0617 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -19,6 +19,9 @@
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 20 */
21 21
22#ifndef _UAPI_LINUX_CRYPTOUSER_H
23#define _UAPI_LINUX_CRYPTOUSER_H
24
22#include <linux/types.h> 25#include <linux/types.h>
23 26
24/* Netlink configuration messages. */ 27/* Netlink configuration messages. */
@@ -198,3 +201,5 @@ struct crypto_report_acomp {
198 201
199#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ 202#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
200 sizeof(struct crypto_report_blkcipher)) 203 sizeof(struct crypto_report_blkcipher))
204
205#endif /* _UAPI_LINUX_CRYPTOUSER_H */
diff --git a/kernel/padata.c b/kernel/padata.c
index 15a8ad63f4ff..c3fec1413295 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -46,18 +46,13 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
46 return target_cpu; 46 return target_cpu;
47} 47}
48 48
49static int padata_cpu_hash(struct parallel_data *pd) 49static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
50{ 50{
51 unsigned int seq_nr;
52 int cpu_index;
53
54 /* 51 /*
55 * Hash the sequence numbers to the cpus by taking 52 * Hash the sequence numbers to the cpus by taking
56 * seq_nr mod. number of cpus in use. 53 * seq_nr mod. number of cpus in use.
57 */ 54 */
58 55 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
59 seq_nr = atomic_inc_return(&pd->seq_nr);
60 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
61 56
62 return padata_index_to_cpu(pd, cpu_index); 57 return padata_index_to_cpu(pd, cpu_index);
63} 58}
@@ -94,17 +89,19 @@ static void padata_parallel_worker(struct work_struct *parallel_work)
94 * 89 *
95 * @pinst: padata instance 90 * @pinst: padata instance
96 * @padata: object to be parallelized 91 * @padata: object to be parallelized
97 * @cb_cpu: cpu the serialization callback function will run on, 92 * @cb_cpu: pointer to the CPU that the serialization callback function should
98 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). 93 * run on. If it's not in the serial cpumask of @pinst
94 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
95 * none found, returns -EINVAL.
99 * 96 *
100 * The parallelization callback function will run with BHs off. 97 * The parallelization callback function will run with BHs off.
101 * Note: Every object which is parallelized by padata_do_parallel 98 * Note: Every object which is parallelized by padata_do_parallel
102 * must be seen by padata_do_serial. 99 * must be seen by padata_do_serial.
103 */ 100 */
104int padata_do_parallel(struct padata_instance *pinst, 101int padata_do_parallel(struct padata_instance *pinst,
105 struct padata_priv *padata, int cb_cpu) 102 struct padata_priv *padata, int *cb_cpu)
106{ 103{
107 int target_cpu, err; 104 int i, cpu, cpu_index, target_cpu, err;
108 struct padata_parallel_queue *queue; 105 struct padata_parallel_queue *queue;
109 struct parallel_data *pd; 106 struct parallel_data *pd;
110 107
@@ -116,8 +113,19 @@ int padata_do_parallel(struct padata_instance *pinst,
116 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) 113 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
117 goto out; 114 goto out;
118 115
119 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) 116 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
120 goto out; 117 if (!cpumask_weight(pd->cpumask.cbcpu))
118 goto out;
119
120 /* Select an alternate fallback CPU and notify the caller. */
121 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
122
123 cpu = cpumask_first(pd->cpumask.cbcpu);
124 for (i = 0; i < cpu_index; i++)
125 cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
126
127 *cb_cpu = cpu;
128 }
121 129
122 err = -EBUSY; 130 err = -EBUSY;
123 if ((pinst->flags & PADATA_RESET)) 131 if ((pinst->flags & PADATA_RESET))
@@ -129,9 +137,10 @@ int padata_do_parallel(struct padata_instance *pinst,
129 err = 0; 137 err = 0;
130 atomic_inc(&pd->refcnt); 138 atomic_inc(&pd->refcnt);
131 padata->pd = pd; 139 padata->pd = pd;
132 padata->cb_cpu = cb_cpu; 140 padata->cb_cpu = *cb_cpu;
133 141
134 target_cpu = padata_cpu_hash(pd); 142 padata->seq_nr = atomic_inc_return(&pd->seq_nr);
143 target_cpu = padata_cpu_hash(pd, padata->seq_nr);
135 padata->cpu = target_cpu; 144 padata->cpu = target_cpu;
136 queue = per_cpu_ptr(pd->pqueue, target_cpu); 145 queue = per_cpu_ptr(pd->pqueue, target_cpu);
137 146
@@ -139,7 +148,7 @@ int padata_do_parallel(struct padata_instance *pinst,
139 list_add_tail(&padata->list, &queue->parallel.list); 148 list_add_tail(&padata->list, &queue->parallel.list);
140 spin_unlock(&queue->parallel.lock); 149 spin_unlock(&queue->parallel.lock);
141 150
142 queue_work_on(target_cpu, pinst->wq, &queue->work); 151 queue_work(pinst->parallel_wq, &queue->work);
143 152
144out: 153out:
145 rcu_read_unlock_bh(); 154 rcu_read_unlock_bh();
@@ -149,63 +158,53 @@ out:
149EXPORT_SYMBOL(padata_do_parallel); 158EXPORT_SYMBOL(padata_do_parallel);
150 159
151/* 160/*
152 * padata_get_next - Get the next object that needs serialization. 161 * padata_find_next - Find the next object that needs serialization.
153 * 162 *
154 * Return values are: 163 * Return values are:
155 * 164 *
156 * A pointer to the control struct of the next object that needs 165 * A pointer to the control struct of the next object that needs
157 * serialization, if present in one of the percpu reorder queues. 166 * serialization, if present in one of the percpu reorder queues.
158 * 167 *
159 * -EINPROGRESS, if the next object that needs serialization will 168 * NULL, if the next object that needs serialization will
160 * be parallel processed by another cpu and is not yet present in 169 * be parallel processed by another cpu and is not yet present in
161 * the cpu's reorder queue. 170 * the cpu's reorder queue.
162 *
163 * -ENODATA, if this cpu has to do the parallel processing for
164 * the next object.
165 */ 171 */
166static struct padata_priv *padata_get_next(struct parallel_data *pd) 172static struct padata_priv *padata_find_next(struct parallel_data *pd,
173 bool remove_object)
167{ 174{
168 int cpu, num_cpus;
169 unsigned int next_nr, next_index;
170 struct padata_parallel_queue *next_queue; 175 struct padata_parallel_queue *next_queue;
171 struct padata_priv *padata; 176 struct padata_priv *padata;
172 struct padata_list *reorder; 177 struct padata_list *reorder;
178 int cpu = pd->cpu;
173 179
174 num_cpus = cpumask_weight(pd->cpumask.pcpu);
175
176 /*
177 * Calculate the percpu reorder queue and the sequence
178 * number of the next object.
179 */
180 next_nr = pd->processed;
181 next_index = next_nr % num_cpus;
182 cpu = padata_index_to_cpu(pd, next_index);
183 next_queue = per_cpu_ptr(pd->pqueue, cpu); 180 next_queue = per_cpu_ptr(pd->pqueue, cpu);
184
185 reorder = &next_queue->reorder; 181 reorder = &next_queue->reorder;
186 182
187 spin_lock(&reorder->lock); 183 spin_lock(&reorder->lock);
188 if (!list_empty(&reorder->list)) { 184 if (list_empty(&reorder->list)) {
189 padata = list_entry(reorder->list.next, 185 spin_unlock(&reorder->lock);
190 struct padata_priv, list); 186 return NULL;
191 187 }
192 list_del_init(&padata->list);
193 atomic_dec(&pd->reorder_objects);
194 188
195 pd->processed++; 189 padata = list_entry(reorder->list.next, struct padata_priv, list);
196 190
191 /*
192 * Checks the rare case where two or more parallel jobs have hashed to
193 * the same CPU and one of the later ones finishes first.
194 */
195 if (padata->seq_nr != pd->processed) {
197 spin_unlock(&reorder->lock); 196 spin_unlock(&reorder->lock);
198 goto out; 197 return NULL;
199 } 198 }
200 spin_unlock(&reorder->lock);
201 199
202 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { 200 if (remove_object) {
203 padata = ERR_PTR(-ENODATA); 201 list_del_init(&padata->list);
204 goto out; 202 atomic_dec(&pd->reorder_objects);
203 ++pd->processed;
204 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
205 } 205 }
206 206
207 padata = ERR_PTR(-EINPROGRESS); 207 spin_unlock(&reorder->lock);
208out:
209 return padata; 208 return padata;
210} 209}
211 210
@@ -215,6 +214,7 @@ static void padata_reorder(struct parallel_data *pd)
215 struct padata_priv *padata; 214 struct padata_priv *padata;
216 struct padata_serial_queue *squeue; 215 struct padata_serial_queue *squeue;
217 struct padata_instance *pinst = pd->pinst; 216 struct padata_instance *pinst = pd->pinst;
217 struct padata_parallel_queue *next_queue;
218 218
219 /* 219 /*
220 * We need to ensure that only one cpu can work on dequeueing of 220 * We need to ensure that only one cpu can work on dequeueing of
@@ -230,27 +230,16 @@ static void padata_reorder(struct parallel_data *pd)
230 return; 230 return;
231 231
232 while (1) { 232 while (1) {
233 padata = padata_get_next(pd); 233 padata = padata_find_next(pd, true);
234 234
235 /* 235 /*
236 * If the next object that needs serialization is parallel 236 * If the next object that needs serialization is parallel
237 * processed by another cpu and is still on it's way to the 237 * processed by another cpu and is still on it's way to the
238 * cpu's reorder queue, nothing to do for now. 238 * cpu's reorder queue, nothing to do for now.
239 */ 239 */
240 if (PTR_ERR(padata) == -EINPROGRESS) 240 if (!padata)
241 break; 241 break;
242 242
243 /*
244 * This cpu has to do the parallel processing of the next
245 * object. It's waiting in the cpu's parallelization queue,
246 * so exit immediately.
247 */
248 if (PTR_ERR(padata) == -ENODATA) {
249 del_timer(&pd->timer);
250 spin_unlock_bh(&pd->lock);
251 return;
252 }
253
254 cb_cpu = padata->cb_cpu; 243 cb_cpu = padata->cb_cpu;
255 squeue = per_cpu_ptr(pd->squeue, cb_cpu); 244 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
256 245
@@ -258,77 +247,37 @@ static void padata_reorder(struct parallel_data *pd)
258 list_add_tail(&padata->list, &squeue->serial.list); 247 list_add_tail(&padata->list, &squeue->serial.list);
259 spin_unlock(&squeue->serial.lock); 248 spin_unlock(&squeue->serial.lock);
260 249
261 queue_work_on(cb_cpu, pinst->wq, &squeue->work); 250 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
262 } 251 }
263 252
264 spin_unlock_bh(&pd->lock); 253 spin_unlock_bh(&pd->lock);
265 254
266 /* 255 /*
267 * The next object that needs serialization might have arrived to 256 * The next object that needs serialization might have arrived to
268 * the reorder queues in the meantime, we will be called again 257 * the reorder queues in the meantime.
269 * from the timer function if no one else cares for it.
270 * 258 *
271 * Ensure reorder_objects is read after pd->lock is dropped so we see 259 * Ensure reorder queue is read after pd->lock is dropped so we see
272 * an increment from another task in padata_do_serial. Pairs with 260 * new objects from another task in padata_do_serial. Pairs with
273 * smp_mb__after_atomic in padata_do_serial. 261 * smp_mb__after_atomic in padata_do_serial.
274 */ 262 */
275 smp_mb(); 263 smp_mb();
276 if (atomic_read(&pd->reorder_objects)
277 && !(pinst->flags & PADATA_RESET))
278 mod_timer(&pd->timer, jiffies + HZ);
279 else
280 del_timer(&pd->timer);
281 264
282 return; 265 next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
266 if (!list_empty(&next_queue->reorder.list) &&
267 padata_find_next(pd, false))
268 queue_work(pinst->serial_wq, &pd->reorder_work);
283} 269}
284 270
285static void invoke_padata_reorder(struct work_struct *work) 271static void invoke_padata_reorder(struct work_struct *work)
286{ 272{
287 struct padata_parallel_queue *pqueue;
288 struct parallel_data *pd; 273 struct parallel_data *pd;
289 274
290 local_bh_disable(); 275 local_bh_disable();
291 pqueue = container_of(work, struct padata_parallel_queue, reorder_work); 276 pd = container_of(work, struct parallel_data, reorder_work);
292 pd = pqueue->pd;
293 padata_reorder(pd); 277 padata_reorder(pd);
294 local_bh_enable(); 278 local_bh_enable();
295} 279}
296 280
297static void padata_reorder_timer(struct timer_list *t)
298{
299 struct parallel_data *pd = from_timer(pd, t, timer);
300 unsigned int weight;
301 int target_cpu, cpu;
302
303 cpu = get_cpu();
304
305 /* We don't lock pd here to not interfere with parallel processing
306 * padata_reorder() calls on other CPUs. We just need any CPU out of
307 * the cpumask.pcpu set. It would be nice if it's the right one but
308 * it doesn't matter if we're off to the next one by using an outdated
309 * pd->processed value.
310 */
311 weight = cpumask_weight(pd->cpumask.pcpu);
312 target_cpu = padata_index_to_cpu(pd, pd->processed % weight);
313
314 /* ensure to call the reorder callback on the correct CPU */
315 if (cpu != target_cpu) {
316 struct padata_parallel_queue *pqueue;
317 struct padata_instance *pinst;
318
319 /* The timer function is serialized wrt itself -- no locking
320 * needed.
321 */
322 pinst = pd->pinst;
323 pqueue = per_cpu_ptr(pd->pqueue, target_cpu);
324 queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work);
325 } else {
326 padata_reorder(pd);
327 }
328
329 put_cpu();
330}
331
332static void padata_serial_worker(struct work_struct *serial_work) 281static void padata_serial_worker(struct work_struct *serial_work)
333{ 282{
334 struct padata_serial_queue *squeue; 283 struct padata_serial_queue *squeue;
@@ -367,47 +316,28 @@ static void padata_serial_worker(struct work_struct *serial_work)
367 */ 316 */
368void padata_do_serial(struct padata_priv *padata) 317void padata_do_serial(struct padata_priv *padata)
369{ 318{
370 int cpu; 319 struct parallel_data *pd = padata->pd;
371 struct padata_parallel_queue *pqueue; 320 struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
372 struct parallel_data *pd; 321 padata->cpu);
373 int reorder_via_wq = 0; 322 struct padata_priv *cur;
374
375 pd = padata->pd;
376
377 cpu = get_cpu();
378
379 /* We need to run on the same CPU padata_do_parallel(.., padata, ..)
380 * was called on -- or, at least, enqueue the padata object into the
381 * correct per-cpu queue.
382 */
383 if (cpu != padata->cpu) {
384 reorder_via_wq = 1;
385 cpu = padata->cpu;
386 }
387
388 pqueue = per_cpu_ptr(pd->pqueue, cpu);
389 323
390 spin_lock(&pqueue->reorder.lock); 324 spin_lock(&pqueue->reorder.lock);
325 /* Sort in ascending order of sequence number. */
326 list_for_each_entry_reverse(cur, &pqueue->reorder.list, list)
327 if (cur->seq_nr < padata->seq_nr)
328 break;
329 list_add(&padata->list, &cur->list);
391 atomic_inc(&pd->reorder_objects); 330 atomic_inc(&pd->reorder_objects);
392 list_add_tail(&padata->list, &pqueue->reorder.list);
393 spin_unlock(&pqueue->reorder.lock); 331 spin_unlock(&pqueue->reorder.lock);
394 332
395 /* 333 /*
396 * Ensure the atomic_inc of reorder_objects above is ordered correctly 334 * Ensure the addition to the reorder list is ordered correctly
397 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb 335 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
398 * in padata_reorder. 336 * in padata_reorder.
399 */ 337 */
400 smp_mb__after_atomic(); 338 smp_mb__after_atomic();
401 339
402 put_cpu(); 340 padata_reorder(pd);
403
404 /* If we're running on the wrong CPU, call padata_reorder() via a
405 * kernel worker.
406 */
407 if (reorder_via_wq)
408 queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work);
409 else
410 padata_reorder(pd);
411} 341}
412EXPORT_SYMBOL(padata_do_serial); 342EXPORT_SYMBOL(padata_do_serial);
413 343
@@ -415,17 +345,36 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
415 const struct cpumask *pcpumask, 345 const struct cpumask *pcpumask,
416 const struct cpumask *cbcpumask) 346 const struct cpumask *cbcpumask)
417{ 347{
418 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) 348 struct workqueue_attrs *attrs;
419 return -ENOMEM; 349 int err = -ENOMEM;
420 350
351 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
352 goto out;
421 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); 353 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
422 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
423 free_cpumask_var(pd->cpumask.pcpu);
424 return -ENOMEM;
425 }
426 354
355 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
356 goto free_pcpu_mask;
427 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); 357 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
358
359 attrs = alloc_workqueue_attrs();
360 if (!attrs)
361 goto free_cbcpu_mask;
362
363 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
364 cpumask_copy(attrs->cpumask, pd->cpumask.pcpu);
365 err = apply_workqueue_attrs(pd->pinst->parallel_wq, attrs);
366 free_workqueue_attrs(attrs);
367 if (err < 0)
368 goto free_cbcpu_mask;
369
428 return 0; 370 return 0;
371
372free_cbcpu_mask:
373 free_cpumask_var(pd->cpumask.cbcpu);
374free_pcpu_mask:
375 free_cpumask_var(pd->cpumask.pcpu);
376out:
377 return err;
429} 378}
430 379
431static void __padata_list_init(struct padata_list *pd_list) 380static void __padata_list_init(struct padata_list *pd_list)
@@ -451,26 +400,15 @@ static void padata_init_squeues(struct parallel_data *pd)
451/* Initialize all percpu queues used by parallel workers */ 400/* Initialize all percpu queues used by parallel workers */
452static void padata_init_pqueues(struct parallel_data *pd) 401static void padata_init_pqueues(struct parallel_data *pd)
453{ 402{
454 int cpu_index, cpu; 403 int cpu;
455 struct padata_parallel_queue *pqueue; 404 struct padata_parallel_queue *pqueue;
456 405
457 cpu_index = 0; 406 for_each_cpu(cpu, pd->cpumask.pcpu) {
458 for_each_possible_cpu(cpu) {
459 pqueue = per_cpu_ptr(pd->pqueue, cpu); 407 pqueue = per_cpu_ptr(pd->pqueue, cpu);
460 408
461 if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
462 pqueue->cpu_index = -1;
463 continue;
464 }
465
466 pqueue->pd = pd;
467 pqueue->cpu_index = cpu_index;
468 cpu_index++;
469
470 __padata_list_init(&pqueue->reorder); 409 __padata_list_init(&pqueue->reorder);
471 __padata_list_init(&pqueue->parallel); 410 __padata_list_init(&pqueue->parallel);
472 INIT_WORK(&pqueue->work, padata_parallel_worker); 411 INIT_WORK(&pqueue->work, padata_parallel_worker);
473 INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder);
474 atomic_set(&pqueue->num_obj, 0); 412 atomic_set(&pqueue->num_obj, 0);
475 } 413 }
476} 414}
@@ -493,17 +431,19 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
493 pd->squeue = alloc_percpu(struct padata_serial_queue); 431 pd->squeue = alloc_percpu(struct padata_serial_queue);
494 if (!pd->squeue) 432 if (!pd->squeue)
495 goto err_free_pqueue; 433 goto err_free_pqueue;
434
435 pd->pinst = pinst;
496 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) 436 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
497 goto err_free_squeue; 437 goto err_free_squeue;
498 438
499 padata_init_pqueues(pd); 439 padata_init_pqueues(pd);
500 padata_init_squeues(pd); 440 padata_init_squeues(pd);
501 timer_setup(&pd->timer, padata_reorder_timer, 0);
502 atomic_set(&pd->seq_nr, -1); 441 atomic_set(&pd->seq_nr, -1);
503 atomic_set(&pd->reorder_objects, 0); 442 atomic_set(&pd->reorder_objects, 0);
504 atomic_set(&pd->refcnt, 0); 443 atomic_set(&pd->refcnt, 0);
505 pd->pinst = pinst;
506 spin_lock_init(&pd->lock); 444 spin_lock_init(&pd->lock);
445 pd->cpu = cpumask_first(pd->cpumask.pcpu);
446 INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
507 447
508 return pd; 448 return pd;
509 449
@@ -538,8 +478,6 @@ static void padata_flush_queues(struct parallel_data *pd)
538 flush_work(&pqueue->work); 478 flush_work(&pqueue->work);
539 } 479 }
540 480
541 del_timer_sync(&pd->timer);
542
543 if (atomic_read(&pd->reorder_objects)) 481 if (atomic_read(&pd->reorder_objects))
544 padata_reorder(pd); 482 padata_reorder(pd);
545 483
@@ -883,6 +821,8 @@ static void __padata_free(struct padata_instance *pinst)
883 padata_free_pd(pinst->pd); 821 padata_free_pd(pinst->pd);
884 free_cpumask_var(pinst->cpumask.pcpu); 822 free_cpumask_var(pinst->cpumask.pcpu);
885 free_cpumask_var(pinst->cpumask.cbcpu); 823 free_cpumask_var(pinst->cpumask.cbcpu);
824 destroy_workqueue(pinst->serial_wq);
825 destroy_workqueue(pinst->parallel_wq);
886 kfree(pinst); 826 kfree(pinst);
887} 827}
888 828
@@ -1016,13 +956,11 @@ static struct kobj_type padata_attr_type = {
1016 * padata_alloc - allocate and initialize a padata instance and specify 956 * padata_alloc - allocate and initialize a padata instance and specify
1017 * cpumasks for serial and parallel workers. 957 * cpumasks for serial and parallel workers.
1018 * 958 *
1019 * @wq: workqueue to use for the allocated padata instance 959 * @name: used to identify the instance
1020 * @pcpumask: cpumask that will be used for padata parallelization 960 * @pcpumask: cpumask that will be used for padata parallelization
1021 * @cbcpumask: cpumask that will be used for padata serialization 961 * @cbcpumask: cpumask that will be used for padata serialization
1022 *
1023 * Must be called from a cpus_read_lock() protected region
1024 */ 962 */
1025static struct padata_instance *padata_alloc(struct workqueue_struct *wq, 963static struct padata_instance *padata_alloc(const char *name,
1026 const struct cpumask *pcpumask, 964 const struct cpumask *pcpumask,
1027 const struct cpumask *cbcpumask) 965 const struct cpumask *cbcpumask)
1028{ 966{
@@ -1033,11 +971,23 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
1033 if (!pinst) 971 if (!pinst)
1034 goto err; 972 goto err;
1035 973
1036 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) 974 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
975 name);
976 if (!pinst->parallel_wq)
1037 goto err_free_inst; 977 goto err_free_inst;
978
979 get_online_cpus();
980
981 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
982 WQ_CPU_INTENSIVE, 1, name);
983 if (!pinst->serial_wq)
984 goto err_put_cpus;
985
986 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
987 goto err_free_serial_wq;
1038 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { 988 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1039 free_cpumask_var(pinst->cpumask.pcpu); 989 free_cpumask_var(pinst->cpumask.pcpu);
1040 goto err_free_inst; 990 goto err_free_serial_wq;
1041 } 991 }
1042 if (!padata_validate_cpumask(pinst, pcpumask) || 992 if (!padata_validate_cpumask(pinst, pcpumask) ||
1043 !padata_validate_cpumask(pinst, cbcpumask)) 993 !padata_validate_cpumask(pinst, cbcpumask))
@@ -1049,8 +999,6 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
1049 999
1050 rcu_assign_pointer(pinst->pd, pd); 1000 rcu_assign_pointer(pinst->pd, pd);
1051 1001
1052 pinst->wq = wq;
1053
1054 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 1002 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
1055 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 1003 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1056 1004
@@ -1063,11 +1011,19 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
1063#ifdef CONFIG_HOTPLUG_CPU 1011#ifdef CONFIG_HOTPLUG_CPU
1064 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node); 1012 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
1065#endif 1013#endif
1014
1015 put_online_cpus();
1016
1066 return pinst; 1017 return pinst;
1067 1018
1068err_free_masks: 1019err_free_masks:
1069 free_cpumask_var(pinst->cpumask.pcpu); 1020 free_cpumask_var(pinst->cpumask.pcpu);
1070 free_cpumask_var(pinst->cpumask.cbcpu); 1021 free_cpumask_var(pinst->cpumask.cbcpu);
1022err_free_serial_wq:
1023 destroy_workqueue(pinst->serial_wq);
1024err_put_cpus:
1025 put_online_cpus();
1026 destroy_workqueue(pinst->parallel_wq);
1071err_free_inst: 1027err_free_inst:
1072 kfree(pinst); 1028 kfree(pinst);
1073err: 1029err:
@@ -1079,14 +1035,11 @@ err:
1079 * Use the cpu_possible_mask for serial and 1035 * Use the cpu_possible_mask for serial and
1080 * parallel workers. 1036 * parallel workers.
1081 * 1037 *
1082 * @wq: workqueue to use for the allocated padata instance 1038 * @name: used to identify the instance
1083 *
1084 * Must be called from a cpus_read_lock() protected region
1085 */ 1039 */
1086struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) 1040struct padata_instance *padata_alloc_possible(const char *name)
1087{ 1041{
1088 lockdep_assert_cpus_held(); 1042 return padata_alloc(name, cpu_possible_mask, cpu_possible_mask);
1089 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1090} 1043}
1091EXPORT_SYMBOL(padata_alloc_possible); 1044EXPORT_SYMBOL(padata_alloc_possible);
1092 1045
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 601d61150b65..bc2e09a8ea61 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3329,7 +3329,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context);
3329 * 3329 *
3330 * Undo alloc_workqueue_attrs(). 3330 * Undo alloc_workqueue_attrs().
3331 */ 3331 */
3332static void free_workqueue_attrs(struct workqueue_attrs *attrs) 3332void free_workqueue_attrs(struct workqueue_attrs *attrs)
3333{ 3333{
3334 if (attrs) { 3334 if (attrs) {
3335 free_cpumask_var(attrs->cpumask); 3335 free_cpumask_var(attrs->cpumask);
@@ -3345,7 +3345,7 @@ static void free_workqueue_attrs(struct workqueue_attrs *attrs)
3345 * 3345 *
3346 * Return: The allocated new workqueue_attr on success. %NULL on failure. 3346 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3347 */ 3347 */
3348static struct workqueue_attrs *alloc_workqueue_attrs(void) 3348struct workqueue_attrs *alloc_workqueue_attrs(void)
3349{ 3349{
3350 struct workqueue_attrs *attrs; 3350 struct workqueue_attrs *attrs;
3351 3351
@@ -4030,16 +4030,20 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4030 * 4030 *
4031 * Performs GFP_KERNEL allocations. 4031 * Performs GFP_KERNEL allocations.
4032 * 4032 *
4033 * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
4034 *
4033 * Return: 0 on success and -errno on failure. 4035 * Return: 0 on success and -errno on failure.
4034 */ 4036 */
4035static int apply_workqueue_attrs(struct workqueue_struct *wq, 4037int apply_workqueue_attrs(struct workqueue_struct *wq,
4036 const struct workqueue_attrs *attrs) 4038 const struct workqueue_attrs *attrs)
4037{ 4039{
4038 int ret; 4040 int ret;
4039 4041
4040 apply_wqattrs_lock(); 4042 lockdep_assert_cpus_held();
4043
4044 mutex_lock(&wq_pool_mutex);
4041 ret = apply_workqueue_attrs_locked(wq, attrs); 4045 ret = apply_workqueue_attrs_locked(wq, attrs);
4042 apply_wqattrs_unlock(); 4046 mutex_unlock(&wq_pool_mutex);
4043 4047
4044 return ret; 4048 return ret;
4045} 4049}
@@ -4152,16 +4156,21 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4152 mutex_unlock(&wq->mutex); 4156 mutex_unlock(&wq->mutex);
4153 } 4157 }
4154 return 0; 4158 return 0;
4155 } else if (wq->flags & __WQ_ORDERED) { 4159 }
4160
4161 get_online_cpus();
4162 if (wq->flags & __WQ_ORDERED) {
4156 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 4163 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4157 /* there should only be single pwq for ordering guarantee */ 4164 /* there should only be single pwq for ordering guarantee */
4158 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 4165 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4159 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 4166 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4160 "ordering guarantee broken for workqueue %s\n", wq->name); 4167 "ordering guarantee broken for workqueue %s\n", wq->name);
4161 return ret;
4162 } else { 4168 } else {
4163 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 4169 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4164 } 4170 }
4171 put_online_cpus();
4172
4173 return ret;
4165} 4174}
4166 4175
4167static int wq_clamp_max_active(int max_active, unsigned int flags, 4176static int wq_clamp_max_active(int max_active, unsigned int flags,
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 88195c34932d..cbe0b6a6450d 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -1,4 +1,13 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2 2
3obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
4libaes-y := aes.o
5
3obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o 6obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
4libarc4-y := arc4.o 7libarc4-y := arc4.o
8
9obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o
10libdes-y := des.o
11
12obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
13libsha256-y := sha256.o
diff --git a/lib/crypto/aes.c b/lib/crypto/aes.c
new file mode 100644
index 000000000000..827fe89922ff
--- /dev/null
+++ b/lib/crypto/aes.c
@@ -0,0 +1,356 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017-2019 Linaro Ltd <ard.biesheuvel@linaro.org>
4 */
5
6#include <crypto/aes.h>
7#include <linux/crypto.h>
8#include <linux/module.h>
9#include <asm/unaligned.h>
10
11/*
12 * Emit the sbox as volatile const to prevent the compiler from doing
13 * constant folding on sbox references involving fixed indexes.
14 */
15static volatile const u8 __cacheline_aligned aes_sbox[] = {
16 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
17 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
18 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
19 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
20 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
21 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
22 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
23 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
24 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
25 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
26 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
27 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
28 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
29 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
30 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
31 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
32 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
33 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
34 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
35 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
36 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
37 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
38 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
39 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
40 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
41 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
42 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
43 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
44 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
45 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
46 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
47 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
48};
49
50static volatile const u8 __cacheline_aligned aes_inv_sbox[] = {
51 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
52 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
53 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
54 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
55 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
56 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
57 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
58 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
59 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
60 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
61 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
62 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
63 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
64 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
65 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
66 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
67 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
68 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
69 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
70 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
71 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
72 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
73 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
74 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
75 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
76 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
77 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
78 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
79 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
80 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
81 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
82 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
83};
84
85extern const u8 crypto_aes_sbox[256] __alias(aes_sbox);
86extern const u8 crypto_aes_inv_sbox[256] __alias(aes_inv_sbox);
87
88EXPORT_SYMBOL(crypto_aes_sbox);
89EXPORT_SYMBOL(crypto_aes_inv_sbox);
90
91static u32 mul_by_x(u32 w)
92{
93 u32 x = w & 0x7f7f7f7f;
94 u32 y = w & 0x80808080;
95
96 /* multiply by polynomial 'x' (0b10) in GF(2^8) */
97 return (x << 1) ^ (y >> 7) * 0x1b;
98}
99
100static u32 mul_by_x2(u32 w)
101{
102 u32 x = w & 0x3f3f3f3f;
103 u32 y = w & 0x80808080;
104 u32 z = w & 0x40404040;
105
106 /* multiply by polynomial 'x^2' (0b100) in GF(2^8) */
107 return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b;
108}
109
110static u32 mix_columns(u32 x)
111{
112 /*
113 * Perform the following matrix multiplication in GF(2^8)
114 *
115 * | 0x2 0x3 0x1 0x1 | | x[0] |
116 * | 0x1 0x2 0x3 0x1 | | x[1] |
117 * | 0x1 0x1 0x2 0x3 | x | x[2] |
118 * | 0x3 0x1 0x1 0x2 | | x[3] |
119 */
120 u32 y = mul_by_x(x) ^ ror32(x, 16);
121
122 return y ^ ror32(x ^ y, 8);
123}
124
125static u32 inv_mix_columns(u32 x)
126{
127 /*
128 * Perform the following matrix multiplication in GF(2^8)
129 *
130 * | 0xe 0xb 0xd 0x9 | | x[0] |
131 * | 0x9 0xe 0xb 0xd | | x[1] |
132 * | 0xd 0x9 0xe 0xb | x | x[2] |
133 * | 0xb 0xd 0x9 0xe | | x[3] |
134 *
135 * which can conveniently be reduced to
136 *
137 * | 0x2 0x3 0x1 0x1 | | 0x5 0x0 0x4 0x0 | | x[0] |
138 * | 0x1 0x2 0x3 0x1 | | 0x0 0x5 0x0 0x4 | | x[1] |
139 * | 0x1 0x1 0x2 0x3 | x | 0x4 0x0 0x5 0x0 | x | x[2] |
140 * | 0x3 0x1 0x1 0x2 | | 0x0 0x4 0x0 0x5 | | x[3] |
141 */
142 u32 y = mul_by_x2(x);
143
144 return mix_columns(x ^ y ^ ror32(y, 16));
145}
146
147static __always_inline u32 subshift(u32 in[], int pos)
148{
149 return (aes_sbox[in[pos] & 0xff]) ^
150 (aes_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^
151 (aes_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
152 (aes_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24);
153}
154
155static __always_inline u32 inv_subshift(u32 in[], int pos)
156{
157 return (aes_inv_sbox[in[pos] & 0xff]) ^
158 (aes_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^
159 (aes_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
160 (aes_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24);
161}
162
163static u32 subw(u32 in)
164{
165 return (aes_sbox[in & 0xff]) ^
166 (aes_sbox[(in >> 8) & 0xff] << 8) ^
167 (aes_sbox[(in >> 16) & 0xff] << 16) ^
168 (aes_sbox[(in >> 24) & 0xff] << 24);
169}
170
171/**
172 * aes_expandkey - Expands the AES key as described in FIPS-197
173 * @ctx: The location where the computed key will be stored.
174 * @in_key: The supplied key.
175 * @key_len: The length of the supplied key.
176 *
177 * Returns 0 on success. The function fails only if an invalid key size (or
178 * pointer) is supplied.
179 * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
180 * key schedule plus a 16 bytes key which is used before the first round).
181 * The decryption key is prepared for the "Equivalent Inverse Cipher" as
182 * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
183 * for the initial combination, the second slot for the first round and so on.
184 */
185int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
186 unsigned int key_len)
187{
188 u32 kwords = key_len / sizeof(u32);
189 u32 rc, i, j;
190 int err;
191
192 err = aes_check_keylen(key_len);
193 if (err)
194 return err;
195
196 ctx->key_length = key_len;
197
198 for (i = 0; i < kwords; i++)
199 ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
200
201 for (i = 0, rc = 1; i < 10; i++, rc = mul_by_x(rc)) {
202 u32 *rki = ctx->key_enc + (i * kwords);
203 u32 *rko = rki + kwords;
204
205 rko[0] = ror32(subw(rki[kwords - 1]), 8) ^ rc ^ rki[0];
206 rko[1] = rko[0] ^ rki[1];
207 rko[2] = rko[1] ^ rki[2];
208 rko[3] = rko[2] ^ rki[3];
209
210 if (key_len == AES_KEYSIZE_192) {
211 if (i >= 7)
212 break;
213 rko[4] = rko[3] ^ rki[4];
214 rko[5] = rko[4] ^ rki[5];
215 } else if (key_len == AES_KEYSIZE_256) {
216 if (i >= 6)
217 break;
218 rko[4] = subw(rko[3]) ^ rki[4];
219 rko[5] = rko[4] ^ rki[5];
220 rko[6] = rko[5] ^ rki[6];
221 rko[7] = rko[6] ^ rki[7];
222 }
223 }
224
225 /*
226 * Generate the decryption keys for the Equivalent Inverse Cipher.
227 * This involves reversing the order of the round keys, and applying
228 * the Inverse Mix Columns transformation to all but the first and
229 * the last one.
230 */
231 ctx->key_dec[0] = ctx->key_enc[key_len + 24];
232 ctx->key_dec[1] = ctx->key_enc[key_len + 25];
233 ctx->key_dec[2] = ctx->key_enc[key_len + 26];
234 ctx->key_dec[3] = ctx->key_enc[key_len + 27];
235
236 for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) {
237 ctx->key_dec[i] = inv_mix_columns(ctx->key_enc[j]);
238 ctx->key_dec[i + 1] = inv_mix_columns(ctx->key_enc[j + 1]);
239 ctx->key_dec[i + 2] = inv_mix_columns(ctx->key_enc[j + 2]);
240 ctx->key_dec[i + 3] = inv_mix_columns(ctx->key_enc[j + 3]);
241 }
242
243 ctx->key_dec[i] = ctx->key_enc[0];
244 ctx->key_dec[i + 1] = ctx->key_enc[1];
245 ctx->key_dec[i + 2] = ctx->key_enc[2];
246 ctx->key_dec[i + 3] = ctx->key_enc[3];
247
248 return 0;
249}
250EXPORT_SYMBOL(aes_expandkey);
251
252/**
253 * aes_encrypt - Encrypt a single AES block
254 * @ctx: Context struct containing the key schedule
255 * @out: Buffer to store the ciphertext
256 * @in: Buffer containing the plaintext
257 */
258void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
259{
260 const u32 *rkp = ctx->key_enc + 4;
261 int rounds = 6 + ctx->key_length / 4;
262 u32 st0[4], st1[4];
263 int round;
264
265 st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
266 st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
267 st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
268 st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
269
270 /*
271 * Force the compiler to emit data independent Sbox references,
272 * by xoring the input with Sbox values that are known to add up
273 * to zero. This pulls the entire Sbox into the D-cache before any
274 * data dependent lookups are done.
275 */
276 st0[0] ^= aes_sbox[ 0] ^ aes_sbox[ 64] ^ aes_sbox[134] ^ aes_sbox[195];
277 st0[1] ^= aes_sbox[16] ^ aes_sbox[ 82] ^ aes_sbox[158] ^ aes_sbox[221];
278 st0[2] ^= aes_sbox[32] ^ aes_sbox[ 96] ^ aes_sbox[160] ^ aes_sbox[234];
279 st0[3] ^= aes_sbox[48] ^ aes_sbox[112] ^ aes_sbox[186] ^ aes_sbox[241];
280
281 for (round = 0;; round += 2, rkp += 8) {
282 st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0];
283 st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1];
284 st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2];
285 st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3];
286
287 if (round == rounds - 2)
288 break;
289
290 st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4];
291 st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5];
292 st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6];
293 st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7];
294 }
295
296 put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out);
297 put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
298 put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
299 put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
300}
301EXPORT_SYMBOL(aes_encrypt);
302
303/**
304 * aes_decrypt - Decrypt a single AES block
305 * @ctx: Context struct containing the key schedule
306 * @out: Buffer to store the plaintext
307 * @in: Buffer containing the ciphertext
308 */
309void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
310{
311 const u32 *rkp = ctx->key_dec + 4;
312 int rounds = 6 + ctx->key_length / 4;
313 u32 st0[4], st1[4];
314 int round;
315
316 st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
317 st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);
318 st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
319 st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
320
321 /*
322 * Force the compiler to emit data independent Sbox references,
323 * by xoring the input with Sbox values that are known to add up
324 * to zero. This pulls the entire Sbox into the D-cache before any
325 * data dependent lookups are done.
326 */
327 st0[0] ^= aes_inv_sbox[ 0] ^ aes_inv_sbox[ 64] ^ aes_inv_sbox[129] ^ aes_inv_sbox[200];
328 st0[1] ^= aes_inv_sbox[16] ^ aes_inv_sbox[ 83] ^ aes_inv_sbox[150] ^ aes_inv_sbox[212];
329 st0[2] ^= aes_inv_sbox[32] ^ aes_inv_sbox[ 96] ^ aes_inv_sbox[160] ^ aes_inv_sbox[236];
330 st0[3] ^= aes_inv_sbox[48] ^ aes_inv_sbox[112] ^ aes_inv_sbox[187] ^ aes_inv_sbox[247];
331
332 for (round = 0;; round += 2, rkp += 8) {
333 st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];
334 st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1];
335 st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2];
336 st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3];
337
338 if (round == rounds - 2)
339 break;
340
341 st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4];
342 st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5];
343 st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6];
344 st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7];
345 }
346
347 put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out);
348 put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
349 put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
350 put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
351}
352EXPORT_SYMBOL(aes_decrypt);
353
354MODULE_DESCRIPTION("Generic AES library");
355MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
356MODULE_LICENSE("GPL v2");
diff --git a/lib/crypto/des.c b/lib/crypto/des.c
new file mode 100644
index 000000000000..ef5bb8822aba
--- /dev/null
+++ b/lib/crypto/des.c
@@ -0,0 +1,902 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Cryptographic API.
4 *
5 * DES & Triple DES EDE Cipher Algorithms.
6 *
7 * Copyright (c) 2005 Dag Arne Osvik <da@osvik.no>
8 */
9
10#include <linux/bitops.h>
11#include <linux/compiler.h>
12#include <linux/crypto.h>
13#include <linux/errno.h>
14#include <linux/fips.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/types.h>
19
20#include <asm/unaligned.h>
21
22#include <crypto/des.h>
23#include <crypto/internal/des.h>
24
25#define ROL(x, r) ((x) = rol32((x), (r)))
26#define ROR(x, r) ((x) = ror32((x), (r)))
27
28/* Lookup tables for key expansion */
29
30static const u8 pc1[256] = {
31 0x00, 0x00, 0x40, 0x04, 0x10, 0x10, 0x50, 0x14,
32 0x04, 0x40, 0x44, 0x44, 0x14, 0x50, 0x54, 0x54,
33 0x02, 0x02, 0x42, 0x06, 0x12, 0x12, 0x52, 0x16,
34 0x06, 0x42, 0x46, 0x46, 0x16, 0x52, 0x56, 0x56,
35 0x80, 0x08, 0xc0, 0x0c, 0x90, 0x18, 0xd0, 0x1c,
36 0x84, 0x48, 0xc4, 0x4c, 0x94, 0x58, 0xd4, 0x5c,
37 0x82, 0x0a, 0xc2, 0x0e, 0x92, 0x1a, 0xd2, 0x1e,
38 0x86, 0x4a, 0xc6, 0x4e, 0x96, 0x5a, 0xd6, 0x5e,
39 0x20, 0x20, 0x60, 0x24, 0x30, 0x30, 0x70, 0x34,
40 0x24, 0x60, 0x64, 0x64, 0x34, 0x70, 0x74, 0x74,
41 0x22, 0x22, 0x62, 0x26, 0x32, 0x32, 0x72, 0x36,
42 0x26, 0x62, 0x66, 0x66, 0x36, 0x72, 0x76, 0x76,
43 0xa0, 0x28, 0xe0, 0x2c, 0xb0, 0x38, 0xf0, 0x3c,
44 0xa4, 0x68, 0xe4, 0x6c, 0xb4, 0x78, 0xf4, 0x7c,
45 0xa2, 0x2a, 0xe2, 0x2e, 0xb2, 0x3a, 0xf2, 0x3e,
46 0xa6, 0x6a, 0xe6, 0x6e, 0xb6, 0x7a, 0xf6, 0x7e,
47 0x08, 0x80, 0x48, 0x84, 0x18, 0x90, 0x58, 0x94,
48 0x0c, 0xc0, 0x4c, 0xc4, 0x1c, 0xd0, 0x5c, 0xd4,
49 0x0a, 0x82, 0x4a, 0x86, 0x1a, 0x92, 0x5a, 0x96,
50 0x0e, 0xc2, 0x4e, 0xc6, 0x1e, 0xd2, 0x5e, 0xd6,
51 0x88, 0x88, 0xc8, 0x8c, 0x98, 0x98, 0xd8, 0x9c,
52 0x8c, 0xc8, 0xcc, 0xcc, 0x9c, 0xd8, 0xdc, 0xdc,
53 0x8a, 0x8a, 0xca, 0x8e, 0x9a, 0x9a, 0xda, 0x9e,
54 0x8e, 0xca, 0xce, 0xce, 0x9e, 0xda, 0xde, 0xde,
55 0x28, 0xa0, 0x68, 0xa4, 0x38, 0xb0, 0x78, 0xb4,
56 0x2c, 0xe0, 0x6c, 0xe4, 0x3c, 0xf0, 0x7c, 0xf4,
57 0x2a, 0xa2, 0x6a, 0xa6, 0x3a, 0xb2, 0x7a, 0xb6,
58 0x2e, 0xe2, 0x6e, 0xe6, 0x3e, 0xf2, 0x7e, 0xf6,
59 0xa8, 0xa8, 0xe8, 0xac, 0xb8, 0xb8, 0xf8, 0xbc,
60 0xac, 0xe8, 0xec, 0xec, 0xbc, 0xf8, 0xfc, 0xfc,
61 0xaa, 0xaa, 0xea, 0xae, 0xba, 0xba, 0xfa, 0xbe,
62 0xae, 0xea, 0xee, 0xee, 0xbe, 0xfa, 0xfe, 0xfe
63};
64
65static const u8 rs[256] = {
66 0x00, 0x00, 0x80, 0x80, 0x02, 0x02, 0x82, 0x82,
67 0x04, 0x04, 0x84, 0x84, 0x06, 0x06, 0x86, 0x86,
68 0x08, 0x08, 0x88, 0x88, 0x0a, 0x0a, 0x8a, 0x8a,
69 0x0c, 0x0c, 0x8c, 0x8c, 0x0e, 0x0e, 0x8e, 0x8e,
70 0x10, 0x10, 0x90, 0x90, 0x12, 0x12, 0x92, 0x92,
71 0x14, 0x14, 0x94, 0x94, 0x16, 0x16, 0x96, 0x96,
72 0x18, 0x18, 0x98, 0x98, 0x1a, 0x1a, 0x9a, 0x9a,
73 0x1c, 0x1c, 0x9c, 0x9c, 0x1e, 0x1e, 0x9e, 0x9e,
74 0x20, 0x20, 0xa0, 0xa0, 0x22, 0x22, 0xa2, 0xa2,
75 0x24, 0x24, 0xa4, 0xa4, 0x26, 0x26, 0xa6, 0xa6,
76 0x28, 0x28, 0xa8, 0xa8, 0x2a, 0x2a, 0xaa, 0xaa,
77 0x2c, 0x2c, 0xac, 0xac, 0x2e, 0x2e, 0xae, 0xae,
78 0x30, 0x30, 0xb0, 0xb0, 0x32, 0x32, 0xb2, 0xb2,
79 0x34, 0x34, 0xb4, 0xb4, 0x36, 0x36, 0xb6, 0xb6,
80 0x38, 0x38, 0xb8, 0xb8, 0x3a, 0x3a, 0xba, 0xba,
81 0x3c, 0x3c, 0xbc, 0xbc, 0x3e, 0x3e, 0xbe, 0xbe,
82 0x40, 0x40, 0xc0, 0xc0, 0x42, 0x42, 0xc2, 0xc2,
83 0x44, 0x44, 0xc4, 0xc4, 0x46, 0x46, 0xc6, 0xc6,
84 0x48, 0x48, 0xc8, 0xc8, 0x4a, 0x4a, 0xca, 0xca,
85 0x4c, 0x4c, 0xcc, 0xcc, 0x4e, 0x4e, 0xce, 0xce,
86 0x50, 0x50, 0xd0, 0xd0, 0x52, 0x52, 0xd2, 0xd2,
87 0x54, 0x54, 0xd4, 0xd4, 0x56, 0x56, 0xd6, 0xd6,
88 0x58, 0x58, 0xd8, 0xd8, 0x5a, 0x5a, 0xda, 0xda,
89 0x5c, 0x5c, 0xdc, 0xdc, 0x5e, 0x5e, 0xde, 0xde,
90 0x60, 0x60, 0xe0, 0xe0, 0x62, 0x62, 0xe2, 0xe2,
91 0x64, 0x64, 0xe4, 0xe4, 0x66, 0x66, 0xe6, 0xe6,
92 0x68, 0x68, 0xe8, 0xe8, 0x6a, 0x6a, 0xea, 0xea,
93 0x6c, 0x6c, 0xec, 0xec, 0x6e, 0x6e, 0xee, 0xee,
94 0x70, 0x70, 0xf0, 0xf0, 0x72, 0x72, 0xf2, 0xf2,
95 0x74, 0x74, 0xf4, 0xf4, 0x76, 0x76, 0xf6, 0xf6,
96 0x78, 0x78, 0xf8, 0xf8, 0x7a, 0x7a, 0xfa, 0xfa,
97 0x7c, 0x7c, 0xfc, 0xfc, 0x7e, 0x7e, 0xfe, 0xfe
98};
99
100static const u32 pc2[1024] = {
101 0x00000000, 0x00000000, 0x00000000, 0x00000000,
102 0x00040000, 0x00000000, 0x04000000, 0x00100000,
103 0x00400000, 0x00000008, 0x00000800, 0x40000000,
104 0x00440000, 0x00000008, 0x04000800, 0x40100000,
105 0x00000400, 0x00000020, 0x08000000, 0x00000100,
106 0x00040400, 0x00000020, 0x0c000000, 0x00100100,
107 0x00400400, 0x00000028, 0x08000800, 0x40000100,
108 0x00440400, 0x00000028, 0x0c000800, 0x40100100,
109 0x80000000, 0x00000010, 0x00000000, 0x00800000,
110 0x80040000, 0x00000010, 0x04000000, 0x00900000,
111 0x80400000, 0x00000018, 0x00000800, 0x40800000,
112 0x80440000, 0x00000018, 0x04000800, 0x40900000,
113 0x80000400, 0x00000030, 0x08000000, 0x00800100,
114 0x80040400, 0x00000030, 0x0c000000, 0x00900100,
115 0x80400400, 0x00000038, 0x08000800, 0x40800100,
116 0x80440400, 0x00000038, 0x0c000800, 0x40900100,
117 0x10000000, 0x00000000, 0x00200000, 0x00001000,
118 0x10040000, 0x00000000, 0x04200000, 0x00101000,
119 0x10400000, 0x00000008, 0x00200800, 0x40001000,
120 0x10440000, 0x00000008, 0x04200800, 0x40101000,
121 0x10000400, 0x00000020, 0x08200000, 0x00001100,
122 0x10040400, 0x00000020, 0x0c200000, 0x00101100,
123 0x10400400, 0x00000028, 0x08200800, 0x40001100,
124 0x10440400, 0x00000028, 0x0c200800, 0x40101100,
125 0x90000000, 0x00000010, 0x00200000, 0x00801000,
126 0x90040000, 0x00000010, 0x04200000, 0x00901000,
127 0x90400000, 0x00000018, 0x00200800, 0x40801000,
128 0x90440000, 0x00000018, 0x04200800, 0x40901000,
129 0x90000400, 0x00000030, 0x08200000, 0x00801100,
130 0x90040400, 0x00000030, 0x0c200000, 0x00901100,
131 0x90400400, 0x00000038, 0x08200800, 0x40801100,
132 0x90440400, 0x00000038, 0x0c200800, 0x40901100,
133 0x00000200, 0x00080000, 0x00000000, 0x00000004,
134 0x00040200, 0x00080000, 0x04000000, 0x00100004,
135 0x00400200, 0x00080008, 0x00000800, 0x40000004,
136 0x00440200, 0x00080008, 0x04000800, 0x40100004,
137 0x00000600, 0x00080020, 0x08000000, 0x00000104,
138 0x00040600, 0x00080020, 0x0c000000, 0x00100104,
139 0x00400600, 0x00080028, 0x08000800, 0x40000104,
140 0x00440600, 0x00080028, 0x0c000800, 0x40100104,
141 0x80000200, 0x00080010, 0x00000000, 0x00800004,
142 0x80040200, 0x00080010, 0x04000000, 0x00900004,
143 0x80400200, 0x00080018, 0x00000800, 0x40800004,
144 0x80440200, 0x00080018, 0x04000800, 0x40900004,
145 0x80000600, 0x00080030, 0x08000000, 0x00800104,
146 0x80040600, 0x00080030, 0x0c000000, 0x00900104,
147 0x80400600, 0x00080038, 0x08000800, 0x40800104,
148 0x80440600, 0x00080038, 0x0c000800, 0x40900104,
149 0x10000200, 0x00080000, 0x00200000, 0x00001004,
150 0x10040200, 0x00080000, 0x04200000, 0x00101004,
151 0x10400200, 0x00080008, 0x00200800, 0x40001004,
152 0x10440200, 0x00080008, 0x04200800, 0x40101004,
153 0x10000600, 0x00080020, 0x08200000, 0x00001104,
154 0x10040600, 0x00080020, 0x0c200000, 0x00101104,
155 0x10400600, 0x00080028, 0x08200800, 0x40001104,
156 0x10440600, 0x00080028, 0x0c200800, 0x40101104,
157 0x90000200, 0x00080010, 0x00200000, 0x00801004,
158 0x90040200, 0x00080010, 0x04200000, 0x00901004,
159 0x90400200, 0x00080018, 0x00200800, 0x40801004,
160 0x90440200, 0x00080018, 0x04200800, 0x40901004,
161 0x90000600, 0x00080030, 0x08200000, 0x00801104,
162 0x90040600, 0x00080030, 0x0c200000, 0x00901104,
163 0x90400600, 0x00080038, 0x08200800, 0x40801104,
164 0x90440600, 0x00080038, 0x0c200800, 0x40901104,
165 0x00000002, 0x00002000, 0x20000000, 0x00000001,
166 0x00040002, 0x00002000, 0x24000000, 0x00100001,
167 0x00400002, 0x00002008, 0x20000800, 0x40000001,
168 0x00440002, 0x00002008, 0x24000800, 0x40100001,
169 0x00000402, 0x00002020, 0x28000000, 0x00000101,
170 0x00040402, 0x00002020, 0x2c000000, 0x00100101,
171 0x00400402, 0x00002028, 0x28000800, 0x40000101,
172 0x00440402, 0x00002028, 0x2c000800, 0x40100101,
173 0x80000002, 0x00002010, 0x20000000, 0x00800001,
174 0x80040002, 0x00002010, 0x24000000, 0x00900001,
175 0x80400002, 0x00002018, 0x20000800, 0x40800001,
176 0x80440002, 0x00002018, 0x24000800, 0x40900001,
177 0x80000402, 0x00002030, 0x28000000, 0x00800101,
178 0x80040402, 0x00002030, 0x2c000000, 0x00900101,
179 0x80400402, 0x00002038, 0x28000800, 0x40800101,
180 0x80440402, 0x00002038, 0x2c000800, 0x40900101,
181 0x10000002, 0x00002000, 0x20200000, 0x00001001,
182 0x10040002, 0x00002000, 0x24200000, 0x00101001,
183 0x10400002, 0x00002008, 0x20200800, 0x40001001,
184 0x10440002, 0x00002008, 0x24200800, 0x40101001,
185 0x10000402, 0x00002020, 0x28200000, 0x00001101,
186 0x10040402, 0x00002020, 0x2c200000, 0x00101101,
187 0x10400402, 0x00002028, 0x28200800, 0x40001101,
188 0x10440402, 0x00002028, 0x2c200800, 0x40101101,
189 0x90000002, 0x00002010, 0x20200000, 0x00801001,
190 0x90040002, 0x00002010, 0x24200000, 0x00901001,
191 0x90400002, 0x00002018, 0x20200800, 0x40801001,
192 0x90440002, 0x00002018, 0x24200800, 0x40901001,
193 0x90000402, 0x00002030, 0x28200000, 0x00801101,
194 0x90040402, 0x00002030, 0x2c200000, 0x00901101,
195 0x90400402, 0x00002038, 0x28200800, 0x40801101,
196 0x90440402, 0x00002038, 0x2c200800, 0x40901101,
197 0x00000202, 0x00082000, 0x20000000, 0x00000005,
198 0x00040202, 0x00082000, 0x24000000, 0x00100005,
199 0x00400202, 0x00082008, 0x20000800, 0x40000005,
200 0x00440202, 0x00082008, 0x24000800, 0x40100005,
201 0x00000602, 0x00082020, 0x28000000, 0x00000105,
202 0x00040602, 0x00082020, 0x2c000000, 0x00100105,
203 0x00400602, 0x00082028, 0x28000800, 0x40000105,
204 0x00440602, 0x00082028, 0x2c000800, 0x40100105,
205 0x80000202, 0x00082010, 0x20000000, 0x00800005,
206 0x80040202, 0x00082010, 0x24000000, 0x00900005,
207 0x80400202, 0x00082018, 0x20000800, 0x40800005,
208 0x80440202, 0x00082018, 0x24000800, 0x40900005,
209 0x80000602, 0x00082030, 0x28000000, 0x00800105,
210 0x80040602, 0x00082030, 0x2c000000, 0x00900105,
211 0x80400602, 0x00082038, 0x28000800, 0x40800105,
212 0x80440602, 0x00082038, 0x2c000800, 0x40900105,
213 0x10000202, 0x00082000, 0x20200000, 0x00001005,
214 0x10040202, 0x00082000, 0x24200000, 0x00101005,
215 0x10400202, 0x00082008, 0x20200800, 0x40001005,
216 0x10440202, 0x00082008, 0x24200800, 0x40101005,
217 0x10000602, 0x00082020, 0x28200000, 0x00001105,
218 0x10040602, 0x00082020, 0x2c200000, 0x00101105,
219 0x10400602, 0x00082028, 0x28200800, 0x40001105,
220 0x10440602, 0x00082028, 0x2c200800, 0x40101105,
221 0x90000202, 0x00082010, 0x20200000, 0x00801005,
222 0x90040202, 0x00082010, 0x24200000, 0x00901005,
223 0x90400202, 0x00082018, 0x20200800, 0x40801005,
224 0x90440202, 0x00082018, 0x24200800, 0x40901005,
225 0x90000602, 0x00082030, 0x28200000, 0x00801105,
226 0x90040602, 0x00082030, 0x2c200000, 0x00901105,
227 0x90400602, 0x00082038, 0x28200800, 0x40801105,
228 0x90440602, 0x00082038, 0x2c200800, 0x40901105,
229
230 0x00000000, 0x00000000, 0x00000000, 0x00000000,
231 0x00000000, 0x00000008, 0x00080000, 0x10000000,
232 0x02000000, 0x00000000, 0x00000080, 0x00001000,
233 0x02000000, 0x00000008, 0x00080080, 0x10001000,
234 0x00004000, 0x00000000, 0x00000040, 0x00040000,
235 0x00004000, 0x00000008, 0x00080040, 0x10040000,
236 0x02004000, 0x00000000, 0x000000c0, 0x00041000,
237 0x02004000, 0x00000008, 0x000800c0, 0x10041000,
238 0x00020000, 0x00008000, 0x08000000, 0x00200000,
239 0x00020000, 0x00008008, 0x08080000, 0x10200000,
240 0x02020000, 0x00008000, 0x08000080, 0x00201000,
241 0x02020000, 0x00008008, 0x08080080, 0x10201000,
242 0x00024000, 0x00008000, 0x08000040, 0x00240000,
243 0x00024000, 0x00008008, 0x08080040, 0x10240000,
244 0x02024000, 0x00008000, 0x080000c0, 0x00241000,
245 0x02024000, 0x00008008, 0x080800c0, 0x10241000,
246 0x00000000, 0x01000000, 0x00002000, 0x00000020,
247 0x00000000, 0x01000008, 0x00082000, 0x10000020,
248 0x02000000, 0x01000000, 0x00002080, 0x00001020,
249 0x02000000, 0x01000008, 0x00082080, 0x10001020,
250 0x00004000, 0x01000000, 0x00002040, 0x00040020,
251 0x00004000, 0x01000008, 0x00082040, 0x10040020,
252 0x02004000, 0x01000000, 0x000020c0, 0x00041020,
253 0x02004000, 0x01000008, 0x000820c0, 0x10041020,
254 0x00020000, 0x01008000, 0x08002000, 0x00200020,
255 0x00020000, 0x01008008, 0x08082000, 0x10200020,
256 0x02020000, 0x01008000, 0x08002080, 0x00201020,
257 0x02020000, 0x01008008, 0x08082080, 0x10201020,
258 0x00024000, 0x01008000, 0x08002040, 0x00240020,
259 0x00024000, 0x01008008, 0x08082040, 0x10240020,
260 0x02024000, 0x01008000, 0x080020c0, 0x00241020,
261 0x02024000, 0x01008008, 0x080820c0, 0x10241020,
262 0x00000400, 0x04000000, 0x00100000, 0x00000004,
263 0x00000400, 0x04000008, 0x00180000, 0x10000004,
264 0x02000400, 0x04000000, 0x00100080, 0x00001004,
265 0x02000400, 0x04000008, 0x00180080, 0x10001004,
266 0x00004400, 0x04000000, 0x00100040, 0x00040004,
267 0x00004400, 0x04000008, 0x00180040, 0x10040004,
268 0x02004400, 0x04000000, 0x001000c0, 0x00041004,
269 0x02004400, 0x04000008, 0x001800c0, 0x10041004,
270 0x00020400, 0x04008000, 0x08100000, 0x00200004,
271 0x00020400, 0x04008008, 0x08180000, 0x10200004,
272 0x02020400, 0x04008000, 0x08100080, 0x00201004,
273 0x02020400, 0x04008008, 0x08180080, 0x10201004,
274 0x00024400, 0x04008000, 0x08100040, 0x00240004,
275 0x00024400, 0x04008008, 0x08180040, 0x10240004,
276 0x02024400, 0x04008000, 0x081000c0, 0x00241004,
277 0x02024400, 0x04008008, 0x081800c0, 0x10241004,
278 0x00000400, 0x05000000, 0x00102000, 0x00000024,
279 0x00000400, 0x05000008, 0x00182000, 0x10000024,
280 0x02000400, 0x05000000, 0x00102080, 0x00001024,
281 0x02000400, 0x05000008, 0x00182080, 0x10001024,
282 0x00004400, 0x05000000, 0x00102040, 0x00040024,
283 0x00004400, 0x05000008, 0x00182040, 0x10040024,
284 0x02004400, 0x05000000, 0x001020c0, 0x00041024,
285 0x02004400, 0x05000008, 0x001820c0, 0x10041024,
286 0x00020400, 0x05008000, 0x08102000, 0x00200024,
287 0x00020400, 0x05008008, 0x08182000, 0x10200024,
288 0x02020400, 0x05008000, 0x08102080, 0x00201024,
289 0x02020400, 0x05008008, 0x08182080, 0x10201024,
290 0x00024400, 0x05008000, 0x08102040, 0x00240024,
291 0x00024400, 0x05008008, 0x08182040, 0x10240024,
292 0x02024400, 0x05008000, 0x081020c0, 0x00241024,
293 0x02024400, 0x05008008, 0x081820c0, 0x10241024,
294 0x00000800, 0x00010000, 0x20000000, 0x00000010,
295 0x00000800, 0x00010008, 0x20080000, 0x10000010,
296 0x02000800, 0x00010000, 0x20000080, 0x00001010,
297 0x02000800, 0x00010008, 0x20080080, 0x10001010,
298 0x00004800, 0x00010000, 0x20000040, 0x00040010,
299 0x00004800, 0x00010008, 0x20080040, 0x10040010,
300 0x02004800, 0x00010000, 0x200000c0, 0x00041010,
301 0x02004800, 0x00010008, 0x200800c0, 0x10041010,
302 0x00020800, 0x00018000, 0x28000000, 0x00200010,
303 0x00020800, 0x00018008, 0x28080000, 0x10200010,
304 0x02020800, 0x00018000, 0x28000080, 0x00201010,
305 0x02020800, 0x00018008, 0x28080080, 0x10201010,
306 0x00024800, 0x00018000, 0x28000040, 0x00240010,
307 0x00024800, 0x00018008, 0x28080040, 0x10240010,
308 0x02024800, 0x00018000, 0x280000c0, 0x00241010,
309 0x02024800, 0x00018008, 0x280800c0, 0x10241010,
310 0x00000800, 0x01010000, 0x20002000, 0x00000030,
311 0x00000800, 0x01010008, 0x20082000, 0x10000030,
312 0x02000800, 0x01010000, 0x20002080, 0x00001030,
313 0x02000800, 0x01010008, 0x20082080, 0x10001030,
314 0x00004800, 0x01010000, 0x20002040, 0x00040030,
315 0x00004800, 0x01010008, 0x20082040, 0x10040030,
316 0x02004800, 0x01010000, 0x200020c0, 0x00041030,
317 0x02004800, 0x01010008, 0x200820c0, 0x10041030,
318 0x00020800, 0x01018000, 0x28002000, 0x00200030,
319 0x00020800, 0x01018008, 0x28082000, 0x10200030,
320 0x02020800, 0x01018000, 0x28002080, 0x00201030,
321 0x02020800, 0x01018008, 0x28082080, 0x10201030,
322 0x00024800, 0x01018000, 0x28002040, 0x00240030,
323 0x00024800, 0x01018008, 0x28082040, 0x10240030,
324 0x02024800, 0x01018000, 0x280020c0, 0x00241030,
325 0x02024800, 0x01018008, 0x280820c0, 0x10241030,
326 0x00000c00, 0x04010000, 0x20100000, 0x00000014,
327 0x00000c00, 0x04010008, 0x20180000, 0x10000014,
328 0x02000c00, 0x04010000, 0x20100080, 0x00001014,
329 0x02000c00, 0x04010008, 0x20180080, 0x10001014,
330 0x00004c00, 0x04010000, 0x20100040, 0x00040014,
331 0x00004c00, 0x04010008, 0x20180040, 0x10040014,
332 0x02004c00, 0x04010000, 0x201000c0, 0x00041014,
333 0x02004c00, 0x04010008, 0x201800c0, 0x10041014,
334 0x00020c00, 0x04018000, 0x28100000, 0x00200014,
335 0x00020c00, 0x04018008, 0x28180000, 0x10200014,
336 0x02020c00, 0x04018000, 0x28100080, 0x00201014,
337 0x02020c00, 0x04018008, 0x28180080, 0x10201014,
338 0x00024c00, 0x04018000, 0x28100040, 0x00240014,
339 0x00024c00, 0x04018008, 0x28180040, 0x10240014,
340 0x02024c00, 0x04018000, 0x281000c0, 0x00241014,
341 0x02024c00, 0x04018008, 0x281800c0, 0x10241014,
342 0x00000c00, 0x05010000, 0x20102000, 0x00000034,
343 0x00000c00, 0x05010008, 0x20182000, 0x10000034,
344 0x02000c00, 0x05010000, 0x20102080, 0x00001034,
345 0x02000c00, 0x05010008, 0x20182080, 0x10001034,
346 0x00004c00, 0x05010000, 0x20102040, 0x00040034,
347 0x00004c00, 0x05010008, 0x20182040, 0x10040034,
348 0x02004c00, 0x05010000, 0x201020c0, 0x00041034,
349 0x02004c00, 0x05010008, 0x201820c0, 0x10041034,
350 0x00020c00, 0x05018000, 0x28102000, 0x00200034,
351 0x00020c00, 0x05018008, 0x28182000, 0x10200034,
352 0x02020c00, 0x05018000, 0x28102080, 0x00201034,
353 0x02020c00, 0x05018008, 0x28182080, 0x10201034,
354 0x00024c00, 0x05018000, 0x28102040, 0x00240034,
355 0x00024c00, 0x05018008, 0x28182040, 0x10240034,
356 0x02024c00, 0x05018000, 0x281020c0, 0x00241034,
357 0x02024c00, 0x05018008, 0x281820c0, 0x10241034
358};
359
360/* S-box lookup tables */
361
362static const u32 S1[64] = {
363 0x01010400, 0x00000000, 0x00010000, 0x01010404,
364 0x01010004, 0x00010404, 0x00000004, 0x00010000,
365 0x00000400, 0x01010400, 0x01010404, 0x00000400,
366 0x01000404, 0x01010004, 0x01000000, 0x00000004,
367 0x00000404, 0x01000400, 0x01000400, 0x00010400,
368 0x00010400, 0x01010000, 0x01010000, 0x01000404,
369 0x00010004, 0x01000004, 0x01000004, 0x00010004,
370 0x00000000, 0x00000404, 0x00010404, 0x01000000,
371 0x00010000, 0x01010404, 0x00000004, 0x01010000,
372 0x01010400, 0x01000000, 0x01000000, 0x00000400,
373 0x01010004, 0x00010000, 0x00010400, 0x01000004,
374 0x00000400, 0x00000004, 0x01000404, 0x00010404,
375 0x01010404, 0x00010004, 0x01010000, 0x01000404,
376 0x01000004, 0x00000404, 0x00010404, 0x01010400,
377 0x00000404, 0x01000400, 0x01000400, 0x00000000,
378 0x00010004, 0x00010400, 0x00000000, 0x01010004
379};
380
381static const u32 S2[64] = {
382 0x80108020, 0x80008000, 0x00008000, 0x00108020,
383 0x00100000, 0x00000020, 0x80100020, 0x80008020,
384 0x80000020, 0x80108020, 0x80108000, 0x80000000,
385 0x80008000, 0x00100000, 0x00000020, 0x80100020,
386 0x00108000, 0x00100020, 0x80008020, 0x00000000,
387 0x80000000, 0x00008000, 0x00108020, 0x80100000,
388 0x00100020, 0x80000020, 0x00000000, 0x00108000,
389 0x00008020, 0x80108000, 0x80100000, 0x00008020,
390 0x00000000, 0x00108020, 0x80100020, 0x00100000,
391 0x80008020, 0x80100000, 0x80108000, 0x00008000,
392 0x80100000, 0x80008000, 0x00000020, 0x80108020,
393 0x00108020, 0x00000020, 0x00008000, 0x80000000,
394 0x00008020, 0x80108000, 0x00100000, 0x80000020,
395 0x00100020, 0x80008020, 0x80000020, 0x00100020,
396 0x00108000, 0x00000000, 0x80008000, 0x00008020,
397 0x80000000, 0x80100020, 0x80108020, 0x00108000
398};
399
400static const u32 S3[64] = {
401 0x00000208, 0x08020200, 0x00000000, 0x08020008,
402 0x08000200, 0x00000000, 0x00020208, 0x08000200,
403 0x00020008, 0x08000008, 0x08000008, 0x00020000,
404 0x08020208, 0x00020008, 0x08020000, 0x00000208,
405 0x08000000, 0x00000008, 0x08020200, 0x00000200,
406 0x00020200, 0x08020000, 0x08020008, 0x00020208,
407 0x08000208, 0x00020200, 0x00020000, 0x08000208,
408 0x00000008, 0x08020208, 0x00000200, 0x08000000,
409 0x08020200, 0x08000000, 0x00020008, 0x00000208,
410 0x00020000, 0x08020200, 0x08000200, 0x00000000,
411 0x00000200, 0x00020008, 0x08020208, 0x08000200,
412 0x08000008, 0x00000200, 0x00000000, 0x08020008,
413 0x08000208, 0x00020000, 0x08000000, 0x08020208,
414 0x00000008, 0x00020208, 0x00020200, 0x08000008,
415 0x08020000, 0x08000208, 0x00000208, 0x08020000,
416 0x00020208, 0x00000008, 0x08020008, 0x00020200
417};
418
419static const u32 S4[64] = {
420 0x00802001, 0x00002081, 0x00002081, 0x00000080,
421 0x00802080, 0x00800081, 0x00800001, 0x00002001,
422 0x00000000, 0x00802000, 0x00802000, 0x00802081,
423 0x00000081, 0x00000000, 0x00800080, 0x00800001,
424 0x00000001, 0x00002000, 0x00800000, 0x00802001,
425 0x00000080, 0x00800000, 0x00002001, 0x00002080,
426 0x00800081, 0x00000001, 0x00002080, 0x00800080,
427 0x00002000, 0x00802080, 0x00802081, 0x00000081,
428 0x00800080, 0x00800001, 0x00802000, 0x00802081,
429 0x00000081, 0x00000000, 0x00000000, 0x00802000,
430 0x00002080, 0x00800080, 0x00800081, 0x00000001,
431 0x00802001, 0x00002081, 0x00002081, 0x00000080,
432 0x00802081, 0x00000081, 0x00000001, 0x00002000,
433 0x00800001, 0x00002001, 0x00802080, 0x00800081,
434 0x00002001, 0x00002080, 0x00800000, 0x00802001,
435 0x00000080, 0x00800000, 0x00002000, 0x00802080
436};
437
438static const u32 S5[64] = {
439 0x00000100, 0x02080100, 0x02080000, 0x42000100,
440 0x00080000, 0x00000100, 0x40000000, 0x02080000,
441 0x40080100, 0x00080000, 0x02000100, 0x40080100,
442 0x42000100, 0x42080000, 0x00080100, 0x40000000,
443 0x02000000, 0x40080000, 0x40080000, 0x00000000,
444 0x40000100, 0x42080100, 0x42080100, 0x02000100,
445 0x42080000, 0x40000100, 0x00000000, 0x42000000,
446 0x02080100, 0x02000000, 0x42000000, 0x00080100,
447 0x00080000, 0x42000100, 0x00000100, 0x02000000,
448 0x40000000, 0x02080000, 0x42000100, 0x40080100,
449 0x02000100, 0x40000000, 0x42080000, 0x02080100,
450 0x40080100, 0x00000100, 0x02000000, 0x42080000,
451 0x42080100, 0x00080100, 0x42000000, 0x42080100,
452 0x02080000, 0x00000000, 0x40080000, 0x42000000,
453 0x00080100, 0x02000100, 0x40000100, 0x00080000,
454 0x00000000, 0x40080000, 0x02080100, 0x40000100
455};
456
457static const u32 S6[64] = {
458 0x20000010, 0x20400000, 0x00004000, 0x20404010,
459 0x20400000, 0x00000010, 0x20404010, 0x00400000,
460 0x20004000, 0x00404010, 0x00400000, 0x20000010,
461 0x00400010, 0x20004000, 0x20000000, 0x00004010,
462 0x00000000, 0x00400010, 0x20004010, 0x00004000,
463 0x00404000, 0x20004010, 0x00000010, 0x20400010,
464 0x20400010, 0x00000000, 0x00404010, 0x20404000,
465 0x00004010, 0x00404000, 0x20404000, 0x20000000,
466 0x20004000, 0x00000010, 0x20400010, 0x00404000,
467 0x20404010, 0x00400000, 0x00004010, 0x20000010,
468 0x00400000, 0x20004000, 0x20000000, 0x00004010,
469 0x20000010, 0x20404010, 0x00404000, 0x20400000,
470 0x00404010, 0x20404000, 0x00000000, 0x20400010,
471 0x00000010, 0x00004000, 0x20400000, 0x00404010,
472 0x00004000, 0x00400010, 0x20004010, 0x00000000,
473 0x20404000, 0x20000000, 0x00400010, 0x20004010
474};
475
476static const u32 S7[64] = {
477 0x00200000, 0x04200002, 0x04000802, 0x00000000,
478 0x00000800, 0x04000802, 0x00200802, 0x04200800,
479 0x04200802, 0x00200000, 0x00000000, 0x04000002,
480 0x00000002, 0x04000000, 0x04200002, 0x00000802,
481 0x04000800, 0x00200802, 0x00200002, 0x04000800,
482 0x04000002, 0x04200000, 0x04200800, 0x00200002,
483 0x04200000, 0x00000800, 0x00000802, 0x04200802,
484 0x00200800, 0x00000002, 0x04000000, 0x00200800,
485 0x04000000, 0x00200800, 0x00200000, 0x04000802,
486 0x04000802, 0x04200002, 0x04200002, 0x00000002,
487 0x00200002, 0x04000000, 0x04000800, 0x00200000,
488 0x04200800, 0x00000802, 0x00200802, 0x04200800,
489 0x00000802, 0x04000002, 0x04200802, 0x04200000,
490 0x00200800, 0x00000000, 0x00000002, 0x04200802,
491 0x00000000, 0x00200802, 0x04200000, 0x00000800,
492 0x04000002, 0x04000800, 0x00000800, 0x00200002
493};
494
495static const u32 S8[64] = {
496 0x10001040, 0x00001000, 0x00040000, 0x10041040,
497 0x10000000, 0x10001040, 0x00000040, 0x10000000,
498 0x00040040, 0x10040000, 0x10041040, 0x00041000,
499 0x10041000, 0x00041040, 0x00001000, 0x00000040,
500 0x10040000, 0x10000040, 0x10001000, 0x00001040,
501 0x00041000, 0x00040040, 0x10040040, 0x10041000,
502 0x00001040, 0x00000000, 0x00000000, 0x10040040,
503 0x10000040, 0x10001000, 0x00041040, 0x00040000,
504 0x00041040, 0x00040000, 0x10041000, 0x00001000,
505 0x00000040, 0x10040040, 0x00001000, 0x00041040,
506 0x10001000, 0x00000040, 0x10000040, 0x10040000,
507 0x10040040, 0x10000000, 0x00040000, 0x10001040,
508 0x00000000, 0x10041040, 0x00040040, 0x10000040,
509 0x10040000, 0x10001000, 0x10001040, 0x00000000,
510 0x10041040, 0x00041000, 0x00041000, 0x00001040,
511 0x00001040, 0x00040040, 0x10000000, 0x10041000
512};
513
514/* Encryption components: IP, FP, and round function */
515
516#define IP(L, R, T) \
517 ROL(R, 4); \
518 T = L; \
519 L ^= R; \
520 L &= 0xf0f0f0f0; \
521 R ^= L; \
522 L ^= T; \
523 ROL(R, 12); \
524 T = L; \
525 L ^= R; \
526 L &= 0xffff0000; \
527 R ^= L; \
528 L ^= T; \
529 ROR(R, 14); \
530 T = L; \
531 L ^= R; \
532 L &= 0xcccccccc; \
533 R ^= L; \
534 L ^= T; \
535 ROL(R, 6); \
536 T = L; \
537 L ^= R; \
538 L &= 0xff00ff00; \
539 R ^= L; \
540 L ^= T; \
541 ROR(R, 7); \
542 T = L; \
543 L ^= R; \
544 L &= 0xaaaaaaaa; \
545 R ^= L; \
546 L ^= T; \
547 ROL(L, 1);
548
549#define FP(L, R, T) \
550 ROR(L, 1); \
551 T = L; \
552 L ^= R; \
553 L &= 0xaaaaaaaa; \
554 R ^= L; \
555 L ^= T; \
556 ROL(R, 7); \
557 T = L; \
558 L ^= R; \
559 L &= 0xff00ff00; \
560 R ^= L; \
561 L ^= T; \
562 ROR(R, 6); \
563 T = L; \
564 L ^= R; \
565 L &= 0xcccccccc; \
566 R ^= L; \
567 L ^= T; \
568 ROL(R, 14); \
569 T = L; \
570 L ^= R; \
571 L &= 0xffff0000; \
572 R ^= L; \
573 L ^= T; \
574 ROR(R, 12); \
575 T = L; \
576 L ^= R; \
577 L &= 0xf0f0f0f0; \
578 R ^= L; \
579 L ^= T; \
580 ROR(R, 4);
581
582#define ROUND(L, R, A, B, K, d) \
583 B = K[0]; A = K[1]; K += d; \
584 B ^= R; A ^= R; \
585 B &= 0x3f3f3f3f; ROR(A, 4); \
586 L ^= S8[0xff & B]; A &= 0x3f3f3f3f; \
587 L ^= S6[0xff & (B >> 8)]; B >>= 16; \
588 L ^= S7[0xff & A]; \
589 L ^= S5[0xff & (A >> 8)]; A >>= 16; \
590 L ^= S4[0xff & B]; \
591 L ^= S2[0xff & (B >> 8)]; \
592 L ^= S3[0xff & A]; \
593 L ^= S1[0xff & (A >> 8)];
594
595/*
596 * PC2 lookup tables are organized as 2 consecutive sets of 4 interleaved
597 * tables of 128 elements. One set is for C_i and the other for D_i, while
598 * the 4 interleaved tables correspond to four 7-bit subsets of C_i or D_i.
599 *
600 * After PC1 each of the variables a,b,c,d contains a 7 bit subset of C_i
601 * or D_i in bits 7-1 (bit 0 being the least significant).
602 */
603
604#define T1(x) pt[2 * (x) + 0]
605#define T2(x) pt[2 * (x) + 1]
606#define T3(x) pt[2 * (x) + 2]
607#define T4(x) pt[2 * (x) + 3]
608
609#define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
610
611/*
612 * Encryption key expansion
613 *
614 * RFC2451: Weak key checks SHOULD be performed.
615 *
616 * FIPS 74:
617 *
618 * Keys having duals are keys which produce all zeros, all ones, or
619 * alternating zero-one patterns in the C and D registers after Permuted
620 * Choice 1 has operated on the key.
621 *
622 */
623static unsigned long des_ekey(u32 *pe, const u8 *k)
624{
625 /* K&R: long is at least 32 bits */
626 unsigned long a, b, c, d, w;
627 const u32 *pt = pc2;
628
629 d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
630 c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
631 b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
632 a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
633
634 pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d];
635 pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
636 pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
637 pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
638 pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
639 pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
640 pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
641 pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c];
642 pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
643 pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
644 pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
645 pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
646 pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
647 pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
648 pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b];
649 pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a);
650
651 /* Check if first half is weak */
652 w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
653
654 /* Skip to next table set */
655 pt += 512;
656
657 d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
658 c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
659 b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
660 a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
661
662 /* Check if second half is weak */
663 w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
664
665 pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
666 pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
667 pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
668 pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
669 pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
670 pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
671 pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
672 pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
673 pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
674 pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
675 pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
676 pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
677 pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
678 pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
679 pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
680 pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a);
681
682 /* Fixup: 2413 5768 -> 1357 2468 */
683 for (d = 0; d < 16; ++d) {
684 a = pe[2 * d];
685 b = pe[2 * d + 1];
686 c = a ^ b;
687 c &= 0xffff0000;
688 a ^= c;
689 b ^= c;
690 ROL(b, 18);
691 pe[2 * d] = a;
692 pe[2 * d + 1] = b;
693 }
694
695 /* Zero if weak key */
696 return w;
697}
698
699int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen)
700{
701 if (keylen != DES_KEY_SIZE)
702 return -EINVAL;
703
704 return des_ekey(ctx->expkey, key) ? 0 : -ENOKEY;
705}
706EXPORT_SYMBOL_GPL(des_expand_key);
707
708/*
709 * Decryption key expansion
710 *
711 * No weak key checking is performed, as this is only used by triple DES
712 *
713 */
714static void dkey(u32 *pe, const u8 *k)
715{
716 /* K&R: long is at least 32 bits */
717 unsigned long a, b, c, d;
718 const u32 *pt = pc2;
719
720 d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
721 c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
722 b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
723 a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
724
725 pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d];
726 pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
727 pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
728 pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
729 pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
730 pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
731 pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
732 pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c];
733 pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
734 pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
735 pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
736 pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
737 pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
738 pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
739 pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b];
740 pe[15 * 2] = DES_PC2(b, c, d, a);
741
742 /* Skip to next table set */
743 pt += 512;
744
745 d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
746 c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
747 b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
748 a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
749
750 pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
751 pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
752 pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
753 pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
754 pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
755 pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
756 pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
757 pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
758 pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
759 pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
760 pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
761 pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
762 pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
763 pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
764 pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
765 pe[15 * 2 + 1] = DES_PC2(b, c, d, a);
766
767 /* Fixup: 2413 5768 -> 1357 2468 */
768 for (d = 0; d < 16; ++d) {
769 a = pe[2 * d];
770 b = pe[2 * d + 1];
771 c = a ^ b;
772 c &= 0xffff0000;
773 a ^= c;
774 b ^= c;
775 ROL(b, 18);
776 pe[2 * d] = a;
777 pe[2 * d + 1] = b;
778 }
779}
780
781void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src)
782{
783 const u32 *K = ctx->expkey;
784 u32 L, R, A, B;
785 int i;
786
787 L = get_unaligned_le32(src);
788 R = get_unaligned_le32(src + 4);
789
790 IP(L, R, A);
791 for (i = 0; i < 8; i++) {
792 ROUND(L, R, A, B, K, 2);
793 ROUND(R, L, A, B, K, 2);
794 }
795 FP(R, L, A);
796
797 put_unaligned_le32(R, dst);
798 put_unaligned_le32(L, dst + 4);
799}
800EXPORT_SYMBOL_GPL(des_encrypt);
801
802void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src)
803{
804 const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2;
805 u32 L, R, A, B;
806 int i;
807
808 L = get_unaligned_le32(src);
809 R = get_unaligned_le32(src + 4);
810
811 IP(L, R, A);
812 for (i = 0; i < 8; i++) {
813 ROUND(L, R, A, B, K, -2);
814 ROUND(R, L, A, B, K, -2);
815 }
816 FP(R, L, A);
817
818 put_unaligned_le32(R, dst);
819 put_unaligned_le32(L, dst + 4);
820}
821EXPORT_SYMBOL_GPL(des_decrypt);
822
823int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key,
824 unsigned int keylen)
825{
826 u32 *pe = ctx->expkey;
827 int err;
828
829 if (keylen != DES3_EDE_KEY_SIZE)
830 return -EINVAL;
831
832 err = des3_ede_verify_key(key, keylen, true);
833 if (err && err != -ENOKEY)
834 return err;
835
836 des_ekey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
837 dkey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
838 des_ekey(pe, key);
839
840 return err;
841}
842EXPORT_SYMBOL_GPL(des3_ede_expand_key);
843
844void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src)
845{
846 const u32 *K = dctx->expkey;
847 u32 L, R, A, B;
848 int i;
849
850 L = get_unaligned_le32(src);
851 R = get_unaligned_le32(src + 4);
852
853 IP(L, R, A);
854 for (i = 0; i < 8; i++) {
855 ROUND(L, R, A, B, K, 2);
856 ROUND(R, L, A, B, K, 2);
857 }
858 for (i = 0; i < 8; i++) {
859 ROUND(R, L, A, B, K, 2);
860 ROUND(L, R, A, B, K, 2);
861 }
862 for (i = 0; i < 8; i++) {
863 ROUND(L, R, A, B, K, 2);
864 ROUND(R, L, A, B, K, 2);
865 }
866 FP(R, L, A);
867
868 put_unaligned_le32(R, dst);
869 put_unaligned_le32(L, dst + 4);
870}
871EXPORT_SYMBOL_GPL(des3_ede_encrypt);
872
873void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src)
874{
875 const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2;
876 u32 L, R, A, B;
877 int i;
878
879 L = get_unaligned_le32(src);
880 R = get_unaligned_le32(src + 4);
881
882 IP(L, R, A);
883 for (i = 0; i < 8; i++) {
884 ROUND(L, R, A, B, K, -2);
885 ROUND(R, L, A, B, K, -2);
886 }
887 for (i = 0; i < 8; i++) {
888 ROUND(R, L, A, B, K, -2);
889 ROUND(L, R, A, B, K, -2);
890 }
891 for (i = 0; i < 8; i++) {
892 ROUND(L, R, A, B, K, -2);
893 ROUND(R, L, A, B, K, -2);
894 }
895 FP(R, L, A);
896
897 put_unaligned_le32(R, dst);
898 put_unaligned_le32(L, dst + 4);
899}
900EXPORT_SYMBOL_GPL(des3_ede_decrypt);
901
902MODULE_LICENSE("GPL");
diff --git a/lib/sha256.c b/lib/crypto/sha256.c
index d9af148d4349..66cb04b0cf4e 100644
--- a/lib/sha256.c
+++ b/lib/crypto/sha256.c
@@ -12,9 +12,11 @@
12 */ 12 */
13 13
14#include <linux/bitops.h> 14#include <linux/bitops.h>
15#include <linux/sha256.h> 15#include <linux/export.h>
16#include <linux/module.h>
16#include <linux/string.h> 17#include <linux/string.h>
17#include <asm/byteorder.h> 18#include <crypto/sha.h>
19#include <asm/unaligned.h>
18 20
19static inline u32 Ch(u32 x, u32 y, u32 z) 21static inline u32 Ch(u32 x, u32 y, u32 z)
20{ 22{
@@ -33,7 +35,7 @@ static inline u32 Maj(u32 x, u32 y, u32 z)
33 35
34static inline void LOAD_OP(int I, u32 *W, const u8 *input) 36static inline void LOAD_OP(int I, u32 *W, const u8 *input)
35{ 37{
36 W[I] = __be32_to_cpu(((__be32 *)(input))[I]); 38 W[I] = get_unaligned_be32((__u32 *)input + I);
37} 39}
38 40
39static inline void BLEND_OP(int I, u32 *W) 41static inline void BLEND_OP(int I, u32 *W)
@@ -92,131 +94,116 @@ static void sha256_transform(u32 *state, const u8 *input)
92 t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14]; 94 t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14];
93 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; 95 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
94 t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15]; 96 t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15];
95 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; 97 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
96 98
97 t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16]; 99 t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16];
98 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; 100 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
99 t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17]; 101 t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17];
100 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; 102 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
101 t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18]; 103 t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18];
102 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; 104 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
103 t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19]; 105 t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19];
104 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; 106 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
105 t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20]; 107 t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20];
106 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; 108 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
107 t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21]; 109 t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21];
108 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; 110 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
109 t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22]; 111 t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22];
110 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; 112 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
111 t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23]; 113 t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23];
112 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; 114 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
113 115
114 t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24]; 116 t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24];
115 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; 117 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
116 t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25]; 118 t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25];
117 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; 119 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
118 t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26]; 120 t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26];
119 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; 121 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
120 t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27]; 122 t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27];
121 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; 123 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
122 t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28]; 124 t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28];
123 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; 125 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
124 t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29]; 126 t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29];
125 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; 127 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
126 t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30]; 128 t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30];
127 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; 129 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
128 t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31]; 130 t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31];
129 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; 131 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
130 132
131 t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32]; 133 t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32];
132 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; 134 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
133 t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33]; 135 t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33];
134 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; 136 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
135 t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34]; 137 t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34];
136 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; 138 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
137 t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35]; 139 t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35];
138 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; 140 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
139 t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36]; 141 t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36];
140 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; 142 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
141 t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37]; 143 t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37];
142 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; 144 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
143 t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38]; 145 t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38];
144 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; 146 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
145 t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39]; 147 t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39];
146 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; 148 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
147 149
148 t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40]; 150 t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40];
149 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; 151 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
150 t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41]; 152 t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41];
151 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; 153 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
152 t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42]; 154 t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42];
153 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; 155 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
154 t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43]; 156 t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43];
155 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; 157 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
156 t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44]; 158 t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44];
157 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; 159 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
158 t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45]; 160 t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45];
159 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; 161 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
160 t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46]; 162 t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46];
161 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; 163 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
162 t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47]; 164 t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47];
163 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; 165 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
164 166
165 t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48]; 167 t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48];
166 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; 168 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
167 t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49]; 169 t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49];
168 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; 170 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
169 t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50]; 171 t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50];
170 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; 172 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
171 t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51]; 173 t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51];
172 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; 174 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
173 t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52]; 175 t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52];
174 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; 176 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
175 t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53]; 177 t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53];
176 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; 178 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
177 t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54]; 179 t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54];
178 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; 180 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
179 t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55]; 181 t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55];
180 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; 182 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
181 183
182 t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56]; 184 t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56];
183 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; 185 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
184 t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57]; 186 t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57];
185 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; 187 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
186 t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58]; 188 t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58];
187 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; 189 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
188 t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59]; 190 t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59];
189 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; 191 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
190 t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60]; 192 t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60];
191 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; 193 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
192 t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61]; 194 t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61];
193 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; 195 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
194 t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62]; 196 t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62];
195 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; 197 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
196 t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63]; 198 t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63];
197 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; 199 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
198 200
199 state[0] += a; state[1] += b; state[2] += c; state[3] += d; 201 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
200 state[4] += e; state[5] += f; state[6] += g; state[7] += h; 202 state[4] += e; state[5] += f; state[6] += g; state[7] += h;
201 203
202 /* clear any sensitive info... */ 204 /* clear any sensitive info... */
203 a = b = c = d = e = f = g = h = t1 = t2 = 0; 205 a = b = c = d = e = f = g = h = t1 = t2 = 0;
204 memset(W, 0, 64 * sizeof(u32)); 206 memzero_explicit(W, 64 * sizeof(u32));
205}
206
207int sha256_init(struct sha256_state *sctx)
208{
209 sctx->state[0] = SHA256_H0;
210 sctx->state[1] = SHA256_H1;
211 sctx->state[2] = SHA256_H2;
212 sctx->state[3] = SHA256_H3;
213 sctx->state[4] = SHA256_H4;
214 sctx->state[5] = SHA256_H5;
215 sctx->state[6] = SHA256_H6;
216 sctx->state[7] = SHA256_H7;
217 sctx->count = 0;
218
219 return 0;
220} 207}
221 208
222int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) 209int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
@@ -248,8 +235,15 @@ int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
248 235
249 return 0; 236 return 0;
250} 237}
238EXPORT_SYMBOL(sha256_update);
251 239
252int sha256_final(struct sha256_state *sctx, u8 *out) 240int sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
241{
242 return sha256_update(sctx, data, len);
243}
244EXPORT_SYMBOL(sha224_update);
245
246static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
253{ 247{
254 __be32 *dst = (__be32 *)out; 248 __be32 *dst = (__be32 *)out;
255 __be64 bits; 249 __be64 bits;
@@ -269,11 +263,25 @@ int sha256_final(struct sha256_state *sctx, u8 *out)
269 sha256_update(sctx, (const u8 *)&bits, sizeof(bits)); 263 sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
270 264
271 /* Store state in digest */ 265 /* Store state in digest */
272 for (i = 0; i < 8; i++) 266 for (i = 0; i < digest_words; i++)
273 dst[i] = cpu_to_be32(sctx->state[i]); 267 put_unaligned_be32(sctx->state[i], &dst[i]);
274 268
275 /* Zeroize sensitive information. */ 269 /* Zeroize sensitive information. */
276 memset(sctx, 0, sizeof(*sctx)); 270 memset(sctx, 0, sizeof(*sctx));
277 271
278 return 0; 272 return 0;
279} 273}
274
275int sha256_final(struct sha256_state *sctx, u8 *out)
276{
277 return __sha256_final(sctx, out, 8);
278}
279EXPORT_SYMBOL(sha256_final);
280
281int sha224_final(struct sha256_state *sctx, u8 *out)
282{
283 return __sha256_final(sctx, out, 7);
284}
285EXPORT_SYMBOL(sha224_final);
286
287MODULE_LICENSE("GPL");
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 3bb6260d8f42..2dceaca27489 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -639,30 +639,12 @@ do { \
639 ************** MIPS ***************** 639 ************** MIPS *****************
640 ***************************************/ 640 ***************************************/
641#if defined(__mips__) && W_TYPE_SIZE == 32 641#if defined(__mips__) && W_TYPE_SIZE == 32
642#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
643#define umul_ppmm(w1, w0, u, v) \ 642#define umul_ppmm(w1, w0, u, v) \
644do { \ 643do { \
645 UDItype __ll = (UDItype)(u) * (v); \ 644 UDItype __ll = (UDItype)(u) * (v); \
646 w1 = __ll >> 32; \ 645 w1 = __ll >> 32; \
647 w0 = __ll; \ 646 w0 = __ll; \
648} while (0) 647} while (0)
649#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
650#define umul_ppmm(w1, w0, u, v) \
651 __asm__ ("multu %2,%3" \
652 : "=l" ((USItype)(w0)), \
653 "=h" ((USItype)(w1)) \
654 : "d" ((USItype)(u)), \
655 "d" ((USItype)(v)))
656#else
657#define umul_ppmm(w1, w0, u, v) \
658 __asm__ ("multu %2,%3\n" \
659 "mflo %0\n" \
660 "mfhi %1" \
661 : "=d" ((USItype)(w0)), \
662 "=d" ((USItype)(w1)) \
663 : "d" ((USItype)(u)), \
664 "d" ((USItype)(v)))
665#endif
666#define UMUL_TIME 10 648#define UMUL_TIME 10
667#define UDIV_TIME 100 649#define UDIV_TIME 100
668#endif /* __mips__ */ 650#endif /* __mips__ */
@@ -687,7 +669,7 @@ do { \
687 : "d" ((UDItype)(u)), \ 669 : "d" ((UDItype)(u)), \
688 "d" ((UDItype)(v))); \ 670 "d" ((UDItype)(v))); \
689} while (0) 671} while (0)
690#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) 672#else
691#define umul_ppmm(w1, w0, u, v) \ 673#define umul_ppmm(w1, w0, u, v) \
692do { \ 674do { \
693 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ 675 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
@@ -695,22 +677,6 @@ do { \
695 w1 = __ll >> 64; \ 677 w1 = __ll >> 64; \
696 w0 = __ll; \ 678 w0 = __ll; \
697} while (0) 679} while (0)
698#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
699#define umul_ppmm(w1, w0, u, v) \
700 __asm__ ("dmultu %2,%3" \
701 : "=l" ((UDItype)(w0)), \
702 "=h" ((UDItype)(w1)) \
703 : "d" ((UDItype)(u)), \
704 "d" ((UDItype)(v)))
705#else
706#define umul_ppmm(w1, w0, u, v) \
707 __asm__ ("dmultu %2,%3\n" \
708 "mflo %0\n" \
709 "mfhi %1" \
710 : "=d" ((UDItype)(w0)), \
711 "=d" ((UDItype)(w1)) \
712 : "d" ((UDItype)(u)), \
713 "d" ((UDItype)(v)))
714#endif 680#endif
715#define UMUL_TIME 20 681#define UMUL_TIME 20
716#define UDIV_TIME 140 682#define UDIV_TIME 140
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 2efac049ad4c..3803135c88ff 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -10,7 +10,8 @@ menuconfig BT
10 select CRC16 10 select CRC16
11 select CRYPTO 11 select CRYPTO
12 select CRYPTO_BLKCIPHER 12 select CRYPTO_BLKCIPHER
13 select CRYPTO_AES 13 select CRYPTO_LIB_AES
14 imply CRYPTO_AES
14 select CRYPTO_CMAC 15 select CRYPTO_CMAC
15 select CRYPTO_ECB 16 select CRYPTO_ECB
16 select CRYPTO_SHA256 17 select CRYPTO_SHA256
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6c2b4e6e87ba..26e8cfad22b8 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -23,6 +23,7 @@
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <linux/crypto.h> 25#include <linux/crypto.h>
26#include <crypto/aes.h>
26#include <crypto/algapi.h> 27#include <crypto/algapi.h>
27#include <crypto/b128ops.h> 28#include <crypto/b128ops.h>
28#include <crypto/hash.h> 29#include <crypto/hash.h>
@@ -88,7 +89,6 @@ struct smp_dev {
88 u8 local_rand[16]; 89 u8 local_rand[16];
89 bool debug_key; 90 bool debug_key;
90 91
91 struct crypto_cipher *tfm_aes;
92 struct crypto_shash *tfm_cmac; 92 struct crypto_shash *tfm_cmac;
93 struct crypto_kpp *tfm_ecdh; 93 struct crypto_kpp *tfm_ecdh;
94}; 94};
@@ -127,7 +127,6 @@ struct smp_chan {
127 u8 dhkey[32]; 127 u8 dhkey[32];
128 u8 mackey[16]; 128 u8 mackey[16];
129 129
130 struct crypto_cipher *tfm_aes;
131 struct crypto_shash *tfm_cmac; 130 struct crypto_shash *tfm_cmac;
132 struct crypto_kpp *tfm_ecdh; 131 struct crypto_kpp *tfm_ecdh;
133}; 132};
@@ -377,22 +376,18 @@ static int smp_h7(struct crypto_shash *tfm_cmac, const u8 w[16],
377 * s1 and ah. 376 * s1 and ah.
378 */ 377 */
379 378
380static int smp_e(struct crypto_cipher *tfm, const u8 *k, u8 *r) 379static int smp_e(const u8 *k, u8 *r)
381{ 380{
381 struct crypto_aes_ctx ctx;
382 uint8_t tmp[16], data[16]; 382 uint8_t tmp[16], data[16];
383 int err; 383 int err;
384 384
385 SMP_DBG("k %16phN r %16phN", k, r); 385 SMP_DBG("k %16phN r %16phN", k, r);
386 386
387 if (!tfm) {
388 BT_ERR("tfm %p", tfm);
389 return -EINVAL;
390 }
391
392 /* The most significant octet of key corresponds to k[0] */ 387 /* The most significant octet of key corresponds to k[0] */
393 swap_buf(k, tmp, 16); 388 swap_buf(k, tmp, 16);
394 389
395 err = crypto_cipher_setkey(tfm, tmp, 16); 390 err = aes_expandkey(&ctx, tmp, 16);
396 if (err) { 391 if (err) {
397 BT_ERR("cipher setkey failed: %d", err); 392 BT_ERR("cipher setkey failed: %d", err);
398 return err; 393 return err;
@@ -401,17 +396,18 @@ static int smp_e(struct crypto_cipher *tfm, const u8 *k, u8 *r)
401 /* Most significant octet of plaintextData corresponds to data[0] */ 396 /* Most significant octet of plaintextData corresponds to data[0] */
402 swap_buf(r, data, 16); 397 swap_buf(r, data, 16);
403 398
404 crypto_cipher_encrypt_one(tfm, data, data); 399 aes_encrypt(&ctx, data, data);
405 400
406 /* Most significant octet of encryptedData corresponds to data[0] */ 401 /* Most significant octet of encryptedData corresponds to data[0] */
407 swap_buf(data, r, 16); 402 swap_buf(data, r, 16);
408 403
409 SMP_DBG("r %16phN", r); 404 SMP_DBG("r %16phN", r);
410 405
406 memzero_explicit(&ctx, sizeof (ctx));
411 return err; 407 return err;
412} 408}
413 409
414static int smp_c1(struct crypto_cipher *tfm_aes, const u8 k[16], 410static int smp_c1(const u8 k[16],
415 const u8 r[16], const u8 preq[7], const u8 pres[7], u8 _iat, 411 const u8 r[16], const u8 preq[7], const u8 pres[7], u8 _iat,
416 const bdaddr_t *ia, u8 _rat, const bdaddr_t *ra, u8 res[16]) 412 const bdaddr_t *ia, u8 _rat, const bdaddr_t *ra, u8 res[16])
417{ 413{
@@ -436,7 +432,7 @@ static int smp_c1(struct crypto_cipher *tfm_aes, const u8 k[16],
436 u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); 432 u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
437 433
438 /* res = e(k, res) */ 434 /* res = e(k, res) */
439 err = smp_e(tfm_aes, k, res); 435 err = smp_e(k, res);
440 if (err) { 436 if (err) {
441 BT_ERR("Encrypt data error"); 437 BT_ERR("Encrypt data error");
442 return err; 438 return err;
@@ -453,14 +449,14 @@ static int smp_c1(struct crypto_cipher *tfm_aes, const u8 k[16],
453 u128_xor((u128 *) res, (u128 *) res, (u128 *) p2); 449 u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
454 450
455 /* res = e(k, res) */ 451 /* res = e(k, res) */
456 err = smp_e(tfm_aes, k, res); 452 err = smp_e(k, res);
457 if (err) 453 if (err)
458 BT_ERR("Encrypt data error"); 454 BT_ERR("Encrypt data error");
459 455
460 return err; 456 return err;
461} 457}
462 458
463static int smp_s1(struct crypto_cipher *tfm_aes, const u8 k[16], 459static int smp_s1(const u8 k[16],
464 const u8 r1[16], const u8 r2[16], u8 _r[16]) 460 const u8 r1[16], const u8 r2[16], u8 _r[16])
465{ 461{
466 int err; 462 int err;
@@ -469,15 +465,14 @@ static int smp_s1(struct crypto_cipher *tfm_aes, const u8 k[16],
469 memcpy(_r, r2, 8); 465 memcpy(_r, r2, 8);
470 memcpy(_r + 8, r1, 8); 466 memcpy(_r + 8, r1, 8);
471 467
472 err = smp_e(tfm_aes, k, _r); 468 err = smp_e(k, _r);
473 if (err) 469 if (err)
474 BT_ERR("Encrypt data error"); 470 BT_ERR("Encrypt data error");
475 471
476 return err; 472 return err;
477} 473}
478 474
479static int smp_ah(struct crypto_cipher *tfm, const u8 irk[16], 475static int smp_ah(const u8 irk[16], const u8 r[3], u8 res[3])
480 const u8 r[3], u8 res[3])
481{ 476{
482 u8 _res[16]; 477 u8 _res[16];
483 int err; 478 int err;
@@ -486,7 +481,7 @@ static int smp_ah(struct crypto_cipher *tfm, const u8 irk[16],
486 memcpy(_res, r, 3); 481 memcpy(_res, r, 3);
487 memset(_res + 3, 0, 13); 482 memset(_res + 3, 0, 13);
488 483
489 err = smp_e(tfm, irk, _res); 484 err = smp_e(irk, _res);
490 if (err) { 485 if (err) {
491 BT_ERR("Encrypt error"); 486 BT_ERR("Encrypt error");
492 return err; 487 return err;
@@ -518,7 +513,7 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
518 513
519 BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk); 514 BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk);
520 515
521 err = smp_ah(smp->tfm_aes, irk, &bdaddr->b[3], hash); 516 err = smp_ah(irk, &bdaddr->b[3], hash);
522 if (err) 517 if (err)
523 return false; 518 return false;
524 519
@@ -541,7 +536,7 @@ int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
541 rpa->b[5] &= 0x3f; /* Clear two most significant bits */ 536 rpa->b[5] &= 0x3f; /* Clear two most significant bits */
542 rpa->b[5] |= 0x40; /* Set second most significant bit */ 537 rpa->b[5] |= 0x40; /* Set second most significant bit */
543 538
544 err = smp_ah(smp->tfm_aes, irk, &rpa->b[3], rpa->b); 539 err = smp_ah(irk, &rpa->b[3], rpa->b);
545 if (err < 0) 540 if (err < 0)
546 return err; 541 return err;
547 542
@@ -768,7 +763,6 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
768 kzfree(smp->slave_csrk); 763 kzfree(smp->slave_csrk);
769 kzfree(smp->link_key); 764 kzfree(smp->link_key);
770 765
771 crypto_free_cipher(smp->tfm_aes);
772 crypto_free_shash(smp->tfm_cmac); 766 crypto_free_shash(smp->tfm_cmac);
773 crypto_free_kpp(smp->tfm_ecdh); 767 crypto_free_kpp(smp->tfm_ecdh);
774 768
@@ -957,7 +951,7 @@ static u8 smp_confirm(struct smp_chan *smp)
957 951
958 BT_DBG("conn %p", conn); 952 BT_DBG("conn %p", conn);
959 953
960 ret = smp_c1(smp->tfm_aes, smp->tk, smp->prnd, smp->preq, smp->prsp, 954 ret = smp_c1(smp->tk, smp->prnd, smp->preq, smp->prsp,
961 conn->hcon->init_addr_type, &conn->hcon->init_addr, 955 conn->hcon->init_addr_type, &conn->hcon->init_addr,
962 conn->hcon->resp_addr_type, &conn->hcon->resp_addr, 956 conn->hcon->resp_addr_type, &conn->hcon->resp_addr,
963 cp.confirm_val); 957 cp.confirm_val);
@@ -983,12 +977,9 @@ static u8 smp_random(struct smp_chan *smp)
983 u8 confirm[16]; 977 u8 confirm[16];
984 int ret; 978 int ret;
985 979
986 if (IS_ERR_OR_NULL(smp->tfm_aes))
987 return SMP_UNSPECIFIED;
988
989 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); 980 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
990 981
991 ret = smp_c1(smp->tfm_aes, smp->tk, smp->rrnd, smp->preq, smp->prsp, 982 ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp,
992 hcon->init_addr_type, &hcon->init_addr, 983 hcon->init_addr_type, &hcon->init_addr,
993 hcon->resp_addr_type, &hcon->resp_addr, confirm); 984 hcon->resp_addr_type, &hcon->resp_addr, confirm);
994 if (ret) 985 if (ret)
@@ -1005,7 +996,7 @@ static u8 smp_random(struct smp_chan *smp)
1005 __le64 rand = 0; 996 __le64 rand = 0;
1006 __le16 ediv = 0; 997 __le16 ediv = 0;
1007 998
1008 smp_s1(smp->tfm_aes, smp->tk, smp->rrnd, smp->prnd, stk); 999 smp_s1(smp->tk, smp->rrnd, smp->prnd, stk);
1009 1000
1010 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) 1001 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
1011 return SMP_UNSPECIFIED; 1002 return SMP_UNSPECIFIED;
@@ -1021,7 +1012,7 @@ static u8 smp_random(struct smp_chan *smp)
1021 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), 1012 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
1022 smp->prnd); 1013 smp->prnd);
1023 1014
1024 smp_s1(smp->tfm_aes, smp->tk, smp->prnd, smp->rrnd, stk); 1015 smp_s1(smp->tk, smp->prnd, smp->rrnd, stk);
1025 1016
1026 if (hcon->pending_sec_level == BT_SECURITY_HIGH) 1017 if (hcon->pending_sec_level == BT_SECURITY_HIGH)
1027 auth = 1; 1018 auth = 1;
@@ -1389,16 +1380,10 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
1389 if (!smp) 1380 if (!smp)
1390 return NULL; 1381 return NULL;
1391 1382
1392 smp->tfm_aes = crypto_alloc_cipher("aes", 0, 0);
1393 if (IS_ERR(smp->tfm_aes)) {
1394 BT_ERR("Unable to create AES crypto context");
1395 goto zfree_smp;
1396 }
1397
1398 smp->tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0); 1383 smp->tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0);
1399 if (IS_ERR(smp->tfm_cmac)) { 1384 if (IS_ERR(smp->tfm_cmac)) {
1400 BT_ERR("Unable to create CMAC crypto context"); 1385 BT_ERR("Unable to create CMAC crypto context");
1401 goto free_cipher; 1386 goto zfree_smp;
1402 } 1387 }
1403 1388
1404 smp->tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); 1389 smp->tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0);
@@ -1420,8 +1405,6 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
1420 1405
1421free_shash: 1406free_shash:
1422 crypto_free_shash(smp->tfm_cmac); 1407 crypto_free_shash(smp->tfm_cmac);
1423free_cipher:
1424 crypto_free_cipher(smp->tfm_aes);
1425zfree_smp: 1408zfree_smp:
1426 kzfree(smp); 1409 kzfree(smp);
1427 return NULL; 1410 return NULL;
@@ -3232,7 +3215,6 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
3232{ 3215{
3233 struct l2cap_chan *chan; 3216 struct l2cap_chan *chan;
3234 struct smp_dev *smp; 3217 struct smp_dev *smp;
3235 struct crypto_cipher *tfm_aes;
3236 struct crypto_shash *tfm_cmac; 3218 struct crypto_shash *tfm_cmac;
3237 struct crypto_kpp *tfm_ecdh; 3219 struct crypto_kpp *tfm_ecdh;
3238 3220
@@ -3245,17 +3227,9 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
3245 if (!smp) 3227 if (!smp)
3246 return ERR_PTR(-ENOMEM); 3228 return ERR_PTR(-ENOMEM);
3247 3229
3248 tfm_aes = crypto_alloc_cipher("aes", 0, 0);
3249 if (IS_ERR(tfm_aes)) {
3250 BT_ERR("Unable to create AES crypto context");
3251 kzfree(smp);
3252 return ERR_CAST(tfm_aes);
3253 }
3254
3255 tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0); 3230 tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0);
3256 if (IS_ERR(tfm_cmac)) { 3231 if (IS_ERR(tfm_cmac)) {
3257 BT_ERR("Unable to create CMAC crypto context"); 3232 BT_ERR("Unable to create CMAC crypto context");
3258 crypto_free_cipher(tfm_aes);
3259 kzfree(smp); 3233 kzfree(smp);
3260 return ERR_CAST(tfm_cmac); 3234 return ERR_CAST(tfm_cmac);
3261 } 3235 }
@@ -3264,13 +3238,11 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
3264 if (IS_ERR(tfm_ecdh)) { 3238 if (IS_ERR(tfm_ecdh)) {
3265 BT_ERR("Unable to create ECDH crypto context"); 3239 BT_ERR("Unable to create ECDH crypto context");
3266 crypto_free_shash(tfm_cmac); 3240 crypto_free_shash(tfm_cmac);
3267 crypto_free_cipher(tfm_aes);
3268 kzfree(smp); 3241 kzfree(smp);
3269 return ERR_CAST(tfm_ecdh); 3242 return ERR_CAST(tfm_ecdh);
3270 } 3243 }
3271 3244
3272 smp->local_oob = false; 3245 smp->local_oob = false;
3273 smp->tfm_aes = tfm_aes;
3274 smp->tfm_cmac = tfm_cmac; 3246 smp->tfm_cmac = tfm_cmac;
3275 smp->tfm_ecdh = tfm_ecdh; 3247 smp->tfm_ecdh = tfm_ecdh;
3276 3248
@@ -3278,7 +3250,6 @@ create_chan:
3278 chan = l2cap_chan_create(); 3250 chan = l2cap_chan_create();
3279 if (!chan) { 3251 if (!chan) {
3280 if (smp) { 3252 if (smp) {
3281 crypto_free_cipher(smp->tfm_aes);
3282 crypto_free_shash(smp->tfm_cmac); 3253 crypto_free_shash(smp->tfm_cmac);
3283 crypto_free_kpp(smp->tfm_ecdh); 3254 crypto_free_kpp(smp->tfm_ecdh);
3284 kzfree(smp); 3255 kzfree(smp);
@@ -3326,7 +3297,6 @@ static void smp_del_chan(struct l2cap_chan *chan)
3326 smp = chan->data; 3297 smp = chan->data;
3327 if (smp) { 3298 if (smp) {
3328 chan->data = NULL; 3299 chan->data = NULL;
3329 crypto_free_cipher(smp->tfm_aes);
3330 crypto_free_shash(smp->tfm_cmac); 3300 crypto_free_shash(smp->tfm_cmac);
3331 crypto_free_kpp(smp->tfm_ecdh); 3301 crypto_free_kpp(smp->tfm_ecdh);
3332 kzfree(smp); 3302 kzfree(smp);
@@ -3582,7 +3552,7 @@ static int __init test_debug_key(struct crypto_kpp *tfm_ecdh)
3582 return 0; 3552 return 0;
3583} 3553}
3584 3554
3585static int __init test_ah(struct crypto_cipher *tfm_aes) 3555static int __init test_ah(void)
3586{ 3556{
3587 const u8 irk[16] = { 3557 const u8 irk[16] = {
3588 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34, 3558 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
@@ -3592,7 +3562,7 @@ static int __init test_ah(struct crypto_cipher *tfm_aes)
3592 u8 res[3]; 3562 u8 res[3];
3593 int err; 3563 int err;
3594 3564
3595 err = smp_ah(tfm_aes, irk, r, res); 3565 err = smp_ah(irk, r, res);
3596 if (err) 3566 if (err)
3597 return err; 3567 return err;
3598 3568
@@ -3602,7 +3572,7 @@ static int __init test_ah(struct crypto_cipher *tfm_aes)
3602 return 0; 3572 return 0;
3603} 3573}
3604 3574
3605static int __init test_c1(struct crypto_cipher *tfm_aes) 3575static int __init test_c1(void)
3606{ 3576{
3607 const u8 k[16] = { 3577 const u8 k[16] = {
3608 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 3578 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3622,7 +3592,7 @@ static int __init test_c1(struct crypto_cipher *tfm_aes)
3622 u8 res[16]; 3592 u8 res[16];
3623 int err; 3593 int err;
3624 3594
3625 err = smp_c1(tfm_aes, k, r, preq, pres, _iat, &ia, _rat, &ra, res); 3595 err = smp_c1(k, r, preq, pres, _iat, &ia, _rat, &ra, res);
3626 if (err) 3596 if (err)
3627 return err; 3597 return err;
3628 3598
@@ -3632,7 +3602,7 @@ static int __init test_c1(struct crypto_cipher *tfm_aes)
3632 return 0; 3602 return 0;
3633} 3603}
3634 3604
3635static int __init test_s1(struct crypto_cipher *tfm_aes) 3605static int __init test_s1(void)
3636{ 3606{
3637 const u8 k[16] = { 3607 const u8 k[16] = {
3638 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 3608 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3647,7 +3617,7 @@ static int __init test_s1(struct crypto_cipher *tfm_aes)
3647 u8 res[16]; 3617 u8 res[16];
3648 int err; 3618 int err;
3649 3619
3650 err = smp_s1(tfm_aes, k, r1, r2, res); 3620 err = smp_s1(k, r1, r2, res);
3651 if (err) 3621 if (err)
3652 return err; 3622 return err;
3653 3623
@@ -3828,8 +3798,7 @@ static const struct file_operations test_smp_fops = {
3828 .llseek = default_llseek, 3798 .llseek = default_llseek,
3829}; 3799};
3830 3800
3831static int __init run_selftests(struct crypto_cipher *tfm_aes, 3801static int __init run_selftests(struct crypto_shash *tfm_cmac,
3832 struct crypto_shash *tfm_cmac,
3833 struct crypto_kpp *tfm_ecdh) 3802 struct crypto_kpp *tfm_ecdh)
3834{ 3803{
3835 ktime_t calltime, delta, rettime; 3804 ktime_t calltime, delta, rettime;
@@ -3844,19 +3813,19 @@ static int __init run_selftests(struct crypto_cipher *tfm_aes,
3844 goto done; 3813 goto done;
3845 } 3814 }
3846 3815
3847 err = test_ah(tfm_aes); 3816 err = test_ah();
3848 if (err) { 3817 if (err) {
3849 BT_ERR("smp_ah test failed"); 3818 BT_ERR("smp_ah test failed");
3850 goto done; 3819 goto done;
3851 } 3820 }
3852 3821
3853 err = test_c1(tfm_aes); 3822 err = test_c1();
3854 if (err) { 3823 if (err) {
3855 BT_ERR("smp_c1 test failed"); 3824 BT_ERR("smp_c1 test failed");
3856 goto done; 3825 goto done;
3857 } 3826 }
3858 3827
3859 err = test_s1(tfm_aes); 3828 err = test_s1();
3860 if (err) { 3829 if (err) {
3861 BT_ERR("smp_s1 test failed"); 3830 BT_ERR("smp_s1 test failed");
3862 goto done; 3831 goto done;
@@ -3913,21 +3882,13 @@ done:
3913 3882
3914int __init bt_selftest_smp(void) 3883int __init bt_selftest_smp(void)
3915{ 3884{
3916 struct crypto_cipher *tfm_aes;
3917 struct crypto_shash *tfm_cmac; 3885 struct crypto_shash *tfm_cmac;
3918 struct crypto_kpp *tfm_ecdh; 3886 struct crypto_kpp *tfm_ecdh;
3919 int err; 3887 int err;
3920 3888
3921 tfm_aes = crypto_alloc_cipher("aes", 0, 0);
3922 if (IS_ERR(tfm_aes)) {
3923 BT_ERR("Unable to create AES crypto context");
3924 return PTR_ERR(tfm_aes);
3925 }
3926
3927 tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0); 3889 tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0);
3928 if (IS_ERR(tfm_cmac)) { 3890 if (IS_ERR(tfm_cmac)) {
3929 BT_ERR("Unable to create CMAC crypto context"); 3891 BT_ERR("Unable to create CMAC crypto context");
3930 crypto_free_cipher(tfm_aes);
3931 return PTR_ERR(tfm_cmac); 3892 return PTR_ERR(tfm_cmac);
3932 } 3893 }
3933 3894
@@ -3935,14 +3896,12 @@ int __init bt_selftest_smp(void)
3935 if (IS_ERR(tfm_ecdh)) { 3896 if (IS_ERR(tfm_ecdh)) {
3936 BT_ERR("Unable to create ECDH crypto context"); 3897 BT_ERR("Unable to create ECDH crypto context");
3937 crypto_free_shash(tfm_cmac); 3898 crypto_free_shash(tfm_cmac);
3938 crypto_free_cipher(tfm_aes);
3939 return PTR_ERR(tfm_ecdh); 3899 return PTR_ERR(tfm_ecdh);
3940 } 3900 }
3941 3901
3942 err = run_selftests(tfm_aes, tfm_cmac, tfm_ecdh); 3902 err = run_selftests(tfm_cmac, tfm_ecdh);
3943 3903
3944 crypto_free_shash(tfm_cmac); 3904 crypto_free_shash(tfm_cmac);
3945 crypto_free_cipher(tfm_aes);
3946 crypto_free_kpp(tfm_ecdh); 3905 crypto_free_kpp(tfm_ecdh);
3947 3906
3948 return err; 3907 return err;
diff --git a/tools/crypto/getstat.c b/tools/crypto/getstat.c
deleted file mode 100644
index 9e8ff76420fa..000000000000
--- a/tools/crypto/getstat.c
+++ /dev/null
@@ -1,294 +0,0 @@
1/* Heavily copied from libkcapi 2015 - 2017, Stephan Mueller <smueller@chronox.de> */
2#include <errno.h>
3#include <linux/cryptouser.h>
4#include <linux/netlink.h>
5#include <linux/rtnetlink.h>
6#include <sys/types.h>
7#include <sys/socket.h>
8#include <stdlib.h>
9#include <stdio.h>
10#include <string.h>
11#include <time.h>
12#include <unistd.h>
13
14#define CR_RTA(x) ((struct rtattr *)(((char *)(x)) + NLMSG_ALIGN(sizeof(struct crypto_user_alg))))
15
16static int get_stat(const char *drivername)
17{
18 struct {
19 struct nlmsghdr n;
20 struct crypto_user_alg cru;
21 } req;
22 struct sockaddr_nl nl;
23 int sd = 0, ret;
24 socklen_t addr_len;
25 struct iovec iov;
26 struct msghdr msg;
27 char buf[4096];
28 struct nlmsghdr *res_n = (struct nlmsghdr *)buf;
29 struct crypto_user_alg *cru_res = NULL;
30 int res_len = 0;
31 struct rtattr *tb[CRYPTOCFGA_MAX + 1];
32 struct rtattr *rta;
33 struct nlmsgerr *errmsg;
34
35 memset(&req, 0, sizeof(req));
36 memset(&buf, 0, sizeof(buf));
37 memset(&msg, 0, sizeof(msg));
38
39 req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.cru));
40 req.n.nlmsg_flags = NLM_F_REQUEST;
41 req.n.nlmsg_type = CRYPTO_MSG_GETSTAT;
42 req.n.nlmsg_seq = time(NULL);
43
44 strncpy(req.cru.cru_driver_name, drivername, strlen(drivername));
45
46 sd = socket(AF_NETLINK, SOCK_RAW, NETLINK_CRYPTO);
47 if (sd < 0) {
48 fprintf(stderr, "Netlink error: cannot open netlink socket");
49 return -errno;
50 }
51 memset(&nl, 0, sizeof(nl));
52 nl.nl_family = AF_NETLINK;
53 if (bind(sd, (struct sockaddr *)&nl, sizeof(nl)) < 0) {
54 ret = -errno;
55 fprintf(stderr, "Netlink error: cannot bind netlink socket");
56 goto out;
57 }
58
59 /* sanity check that netlink socket was successfully opened */
60 addr_len = sizeof(nl);
61 if (getsockname(sd, (struct sockaddr *)&nl, &addr_len) < 0) {
62 ret = -errno;
63 printf("Netlink error: cannot getsockname");
64 goto out;
65 }
66 if (addr_len != sizeof(nl)) {
67 ret = -errno;
68 printf("Netlink error: wrong address length %d", addr_len);
69 goto out;
70 }
71 if (nl.nl_family != AF_NETLINK) {
72 ret = -errno;
73 printf("Netlink error: wrong address family %d",
74 nl.nl_family);
75 goto out;
76 }
77
78 memset(&nl, 0, sizeof(nl));
79 nl.nl_family = AF_NETLINK;
80 iov.iov_base = (void *)&req.n;
81 iov.iov_len = req.n.nlmsg_len;
82 msg.msg_name = &nl;
83 msg.msg_namelen = sizeof(nl);
84 msg.msg_iov = &iov;
85 msg.msg_iovlen = 1;
86 if (sendmsg(sd, &msg, 0) < 0) {
87 ret = -errno;
88 printf("Netlink error: sendmsg failed");
89 goto out;
90 }
91 memset(buf, 0, sizeof(buf));
92 iov.iov_base = buf;
93 while (1) {
94 iov.iov_len = sizeof(buf);
95 ret = recvmsg(sd, &msg, 0);
96 if (ret < 0) {
97 if (errno == EINTR || errno == EAGAIN)
98 continue;
99 ret = -errno;
100 printf("Netlink error: netlink receive error");
101 goto out;
102 }
103 if (ret == 0) {
104 ret = -errno;
105 printf("Netlink error: no data");
106 goto out;
107 }
108 if (ret > sizeof(buf)) {
109 ret = -errno;
110 printf("Netlink error: received too much data");
111 goto out;
112 }
113 break;
114 }
115
116 ret = -EFAULT;
117 res_len = res_n->nlmsg_len;
118 if (res_n->nlmsg_type == NLMSG_ERROR) {
119 errmsg = NLMSG_DATA(res_n);
120 fprintf(stderr, "Fail with %d\n", errmsg->error);
121 ret = errmsg->error;
122 goto out;
123 }
124
125 if (res_n->nlmsg_type == CRYPTO_MSG_GETSTAT) {
126 cru_res = NLMSG_DATA(res_n);
127 res_len -= NLMSG_SPACE(sizeof(*cru_res));
128 }
129 if (res_len < 0) {
130 printf("Netlink error: nlmsg len %d\n", res_len);
131 goto out;
132 }
133
134 if (!cru_res) {
135 ret = -EFAULT;
136 printf("Netlink error: no cru_res\n");
137 goto out;
138 }
139
140 rta = CR_RTA(cru_res);
141 memset(tb, 0, sizeof(struct rtattr *) * (CRYPTOCFGA_MAX + 1));
142 while (RTA_OK(rta, res_len)) {
143 if ((rta->rta_type <= CRYPTOCFGA_MAX) && (!tb[rta->rta_type]))
144 tb[rta->rta_type] = rta;
145 rta = RTA_NEXT(rta, res_len);
146 }
147 if (res_len) {
148 printf("Netlink error: unprocessed data %d",
149 res_len);
150 goto out;
151 }
152
153 if (tb[CRYPTOCFGA_STAT_HASH]) {
154 struct rtattr *rta = tb[CRYPTOCFGA_STAT_HASH];
155 struct crypto_stat_hash *rhash =
156 (struct crypto_stat_hash *)RTA_DATA(rta);
157 printf("%s\tHash\n\tHash: %llu bytes: %llu\n\tErrors: %llu\n",
158 drivername,
159 rhash->stat_hash_cnt, rhash->stat_hash_tlen,
160 rhash->stat_err_cnt);
161 } else if (tb[CRYPTOCFGA_STAT_COMPRESS]) {
162 struct rtattr *rta = tb[CRYPTOCFGA_STAT_COMPRESS];
163 struct crypto_stat_compress *rblk =
164 (struct crypto_stat_compress *)RTA_DATA(rta);
165 printf("%s\tCompress\n\tCompress: %llu bytes: %llu\n\tDecompress: %llu bytes: %llu\n\tErrors: %llu\n",
166 drivername,
167 rblk->stat_compress_cnt, rblk->stat_compress_tlen,
168 rblk->stat_decompress_cnt, rblk->stat_decompress_tlen,
169 rblk->stat_err_cnt);
170 } else if (tb[CRYPTOCFGA_STAT_ACOMP]) {
171 struct rtattr *rta = tb[CRYPTOCFGA_STAT_ACOMP];
172 struct crypto_stat_compress *rcomp =
173 (struct crypto_stat_compress *)RTA_DATA(rta);
174 printf("%s\tACompress\n\tCompress: %llu bytes: %llu\n\tDecompress: %llu bytes: %llu\n\tErrors: %llu\n",
175 drivername,
176 rcomp->stat_compress_cnt, rcomp->stat_compress_tlen,
177 rcomp->stat_decompress_cnt, rcomp->stat_decompress_tlen,
178 rcomp->stat_err_cnt);
179 } else if (tb[CRYPTOCFGA_STAT_AEAD]) {
180 struct rtattr *rta = tb[CRYPTOCFGA_STAT_AEAD];
181 struct crypto_stat_aead *raead =
182 (struct crypto_stat_aead *)RTA_DATA(rta);
183 printf("%s\tAEAD\n\tEncrypt: %llu bytes: %llu\n\tDecrypt: %llu bytes: %llu\n\tErrors: %llu\n",
184 drivername,
185 raead->stat_encrypt_cnt, raead->stat_encrypt_tlen,
186 raead->stat_decrypt_cnt, raead->stat_decrypt_tlen,
187 raead->stat_err_cnt);
188 } else if (tb[CRYPTOCFGA_STAT_BLKCIPHER]) {
189 struct rtattr *rta = tb[CRYPTOCFGA_STAT_BLKCIPHER];
190 struct crypto_stat_cipher *rblk =
191 (struct crypto_stat_cipher *)RTA_DATA(rta);
192 printf("%s\tCipher\n\tEncrypt: %llu bytes: %llu\n\tDecrypt: %llu bytes: %llu\n\tErrors: %llu\n",
193 drivername,
194 rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
195 rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
196 rblk->stat_err_cnt);
197 } else if (tb[CRYPTOCFGA_STAT_AKCIPHER]) {
198 struct rtattr *rta = tb[CRYPTOCFGA_STAT_AKCIPHER];
199 struct crypto_stat_akcipher *rblk =
200 (struct crypto_stat_akcipher *)RTA_DATA(rta);
201 printf("%s\tAkcipher\n\tEncrypt: %llu bytes: %llu\n\tDecrypt: %llu bytes: %llu\n\tSign: %llu\n\tVerify: %llu\n\tErrors: %llu\n",
202 drivername,
203 rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
204 rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
205 rblk->stat_sign_cnt, rblk->stat_verify_cnt,
206 rblk->stat_err_cnt);
207 } else if (tb[CRYPTOCFGA_STAT_CIPHER]) {
208 struct rtattr *rta = tb[CRYPTOCFGA_STAT_CIPHER];
209 struct crypto_stat_cipher *rblk =
210 (struct crypto_stat_cipher *)RTA_DATA(rta);
211 printf("%s\tcipher\n\tEncrypt: %llu bytes: %llu\n\tDecrypt: %llu bytes: %llu\n\tErrors: %llu\n",
212 drivername,
213 rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
214 rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
215 rblk->stat_err_cnt);
216 } else if (tb[CRYPTOCFGA_STAT_RNG]) {
217 struct rtattr *rta = tb[CRYPTOCFGA_STAT_RNG];
218 struct crypto_stat_rng *rrng =
219 (struct crypto_stat_rng *)RTA_DATA(rta);
220 printf("%s\tRNG\n\tSeed: %llu\n\tGenerate: %llu bytes: %llu\n\tErrors: %llu\n",
221 drivername,
222 rrng->stat_seed_cnt,
223 rrng->stat_generate_cnt, rrng->stat_generate_tlen,
224 rrng->stat_err_cnt);
225 } else if (tb[CRYPTOCFGA_STAT_KPP]) {
226 struct rtattr *rta = tb[CRYPTOCFGA_STAT_KPP];
227 struct crypto_stat_kpp *rkpp =
228 (struct crypto_stat_kpp *)RTA_DATA(rta);
229 printf("%s\tKPP\n\tSetsecret: %llu\n\tGenerate public key: %llu\n\tCompute_shared_secret: %llu\n\tErrors: %llu\n",
230 drivername,
231 rkpp->stat_setsecret_cnt,
232 rkpp->stat_generate_public_key_cnt,
233 rkpp->stat_compute_shared_secret_cnt,
234 rkpp->stat_err_cnt);
235 } else {
236 fprintf(stderr, "%s is of an unknown algorithm\n", drivername);
237 }
238 ret = 0;
239out:
240 close(sd);
241 return ret;
242}
243
244int main(int argc, const char *argv[])
245{
246 char buf[4096];
247 FILE *procfd;
248 int i, lastspace;
249 int ret;
250
251 procfd = fopen("/proc/crypto", "r");
252 if (!procfd) {
253 ret = errno;
254 fprintf(stderr, "Cannot open /proc/crypto %s\n", strerror(errno));
255 return ret;
256 }
257 if (argc > 1) {
258 if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
259 printf("Usage: %s [-h|--help] display this help\n", argv[0]);
260 printf("Usage: %s display all crypto statistics\n", argv[0]);
261 printf("Usage: %s drivername1 drivername2 ... = display crypto statistics about drivername1 ...\n", argv[0]);
262 return 0;
263 }
264 for (i = 1; i < argc; i++) {
265 ret = get_stat(argv[i]);
266 if (ret) {
267 fprintf(stderr, "Failed with %s\n", strerror(-ret));
268 return ret;
269 }
270 }
271 return 0;
272 }
273
274 while (fgets(buf, sizeof(buf), procfd)) {
275 if (!strncmp(buf, "driver", 6)) {
276 lastspace = 0;
277 i = 0;
278 while (i < strlen(buf)) {
279 i++;
280 if (buf[i] == ' ')
281 lastspace = i;
282 }
283 buf[strlen(buf) - 1] = '\0';
284 ret = get_stat(buf + lastspace + 1);
285 if (ret) {
286 fprintf(stderr, "Failed with %s\n", strerror(-ret));
287 goto out;
288 }
289 }
290 }
291out:
292 fclose(procfd);
293 return ret;
294}