aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-25 19:43:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-25 19:43:35 -0400
commit62606c224d72a98c35d21a849f95cccf95b0a252 (patch)
tree6f6f3466451edf9baa2ea8b5f9fc558aa555c69a
parent24ed334f33666f2ae929ccc08f72e7e72e353c64 (diff)
parenta1c6fd4308d37f072e939a2782f24214115fc7e8 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Remove VLA usage - Add cryptostat user-space interface - Add notifier for new crypto algorithms Algorithms: - Add OFB mode - Remove speck Drivers: - Remove x86/sha*-mb as they are buggy - Remove pcbc(aes) from x86/aesni - Improve performance of arm/ghash-ce by up to 85% - Implement CTS-CBC in arm64/aes-blk, faster by up to 50% - Remove PMULL based arm64/crc32 driver - Use PMULL in arm64/crct10dif - Add aes-ctr support in s5p-sss - Add caam/qi2 driver Others: - Pick better transform if one becomes available in crc-t10dif" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (124 commits) crypto: chelsio - Update ntx queue received from cxgb4 crypto: ccree - avoid implicit enum conversion crypto: caam - add SPDX license identifier to all files crypto: caam/qi - simplify CGR allocation, freeing crypto: mxs-dcp - make symbols 'sha1_null_hash' and 'sha256_null_hash' static crypto: arm64/aes-blk - ensure XTS mask is always loaded crypto: testmgr - fix sizeof() on COMP_BUF_SIZE crypto: chtls - remove set but not used variable 'csk' crypto: axis - fix platform_no_drv_owner.cocci warnings crypto: x86/aes-ni - fix build error following fpu template removal crypto: arm64/aes - fix handling sub-block CTS-CBC inputs crypto: caam/qi2 - avoid double export crypto: mxs-dcp - Fix AES issues crypto: mxs-dcp - Fix SHA null hashes and output length crypto: mxs-dcp - Implement sha import/export crypto: aegis/generic - fix for big endian systems crypto: morus/generic - fix for big endian systems crypto: lrw - fix rebase error after out of bounds fix crypto: cavium/nitrox - use pci_alloc_irq_vectors() while enabling MSI-X. crypto: cavium/nitrox - NITROX command queue changes. ...
-rw-r--r--Documentation/filesystems/fscrypt.rst10
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/arm/crypto/Kconfig7
-rw-r--r--arch/arm/crypto/Makefile2
-rw-r--r--arch/arm/crypto/chacha20-neon-core.S277
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c2
-rw-r--r--arch/arm/crypto/ghash-ce-core.S108
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c38
-rw-r--r--arch/arm/crypto/speck-neon-core.S434
-rw-r--r--arch/arm/crypto/speck-neon-glue.c288
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/crypto/Kconfig11
-rw-r--r--arch/arm64/crypto/Makefile6
-rw-r--r--arch/arm64/crypto/aes-ce.S5
-rw-r--r--arch/arm64/crypto/aes-glue.c217
-rw-r--r--arch/arm64/crypto/aes-modes.S416
-rw-r--r--arch/arm64/crypto/aes-neon.S6
-rw-r--r--arch/arm64/crypto/crc32-ce-core.S287
-rw-r--r--arch/arm64/crypto/crc32-ce-glue.c244
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S314
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c14
-rw-r--r--arch/arm64/crypto/speck-neon-core.S352
-rw-r--r--arch/arm64/crypto/speck-neon-glue.c282
-rw-r--r--arch/m68k/configs/amiga_defconfig2
-rw-r--r--arch/m68k/configs/apollo_defconfig2
-rw-r--r--arch/m68k/configs/atari_defconfig2
-rw-r--r--arch/m68k/configs/bvme6000_defconfig2
-rw-r--r--arch/m68k/configs/hp300_defconfig2
-rw-r--r--arch/m68k/configs/mac_defconfig2
-rw-r--r--arch/m68k/configs/multi_defconfig2
-rw-r--r--arch/m68k/configs/mvme147_defconfig2
-rw-r--r--arch/m68k/configs/mvme16x_defconfig2
-rw-r--r--arch/m68k/configs/q40_defconfig2
-rw-r--r--arch/m68k/configs/sun3_defconfig2
-rw-r--r--arch/m68k/configs/sun3x_defconfig2
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/performance_defconfig1
-rw-r--r--arch/s390/crypto/aes_s390.c48
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/x86/crypto/Makefile5
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c47
-rw-r--r--arch/x86/crypto/fpu.c207
-rw-r--r--arch/x86/crypto/sha1-mb/Makefile14
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c1011
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_ctx.h134
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr.h110
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S287
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S304
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c64
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S209
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_x8_avx2.S492
-rw-r--r--arch/x86/crypto/sha256-mb/Makefile14
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c1013
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_ctx.h134
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr.h108
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S304
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S307
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c65
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S214
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_x8_avx2.S598
-rw-r--r--arch/x86/crypto/sha512-mb/Makefile12
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c1047
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_ctx.h128
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr.h104
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S281
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S297
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c69
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S224
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_x4_avx2.S531
-rw-r--r--crypto/Kconfig101
-rw-r--r--crypto/Makefile4
-rw-r--r--crypto/aegis.h20
-rw-r--r--crypto/ahash.c25
-rw-r--r--crypto/algapi.c17
-rw-r--r--crypto/algboss.c2
-rw-r--r--crypto/algif_aead.c12
-rw-r--r--crypto/algif_hash.c2
-rw-r--r--crypto/authenc.c8
-rw-r--r--crypto/authencesn.c8
-rw-r--r--crypto/ccm.c9
-rw-r--r--crypto/chacha20_generic.c7
-rw-r--r--crypto/cryptd.c32
-rw-r--r--crypto/crypto_null.c11
-rw-r--r--crypto/crypto_user_base.c (renamed from crypto/crypto_user.c)9
-rw-r--r--crypto/crypto_user_stat.c463
-rw-r--r--crypto/echainiv.c4
-rw-r--r--crypto/gcm.c8
-rw-r--r--crypto/internal.h8
-rw-r--r--crypto/lrw.c339
-rw-r--r--crypto/mcryptd.c675
-rw-r--r--crypto/morus1280.c7
-rw-r--r--crypto/morus640.c16
-rw-r--r--crypto/ofb.c225
-rw-r--r--crypto/rng.c1
-rw-r--r--crypto/rsa-pkcs1pad.c9
-rw-r--r--crypto/seqiv.c4
-rw-r--r--crypto/shash.c33
-rw-r--r--crypto/skcipher.c24
-rw-r--r--crypto/speck.c307
-rw-r--r--crypto/tcrypt.c27
-rw-r--r--crypto/tcrypt.h1
-rw-r--r--crypto/testmgr.c42
-rw-r--r--crypto/testmgr.h863
-rw-r--r--crypto/xcbc.c8
-rw-r--r--crypto/xts.c269
-rw-r--r--drivers/block/cryptoloop.c22
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c5
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/char/random.c24
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/atmel-aes.c5
-rw-r--r--drivers/crypto/atmel-authenc.h13
-rw-r--r--drivers/crypto/atmel-ecc.c11
-rw-r--r--drivers/crypto/atmel-ecc.h14
-rw-r--r--drivers/crypto/atmel-sha.c5
-rw-r--r--drivers/crypto/atmel-tdes.c5
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c20
-rw-r--r--drivers/crypto/caam/Kconfig57
-rw-r--r--drivers/crypto/caam/Makefile10
-rw-r--r--drivers/crypto/caam/caamalg.c728
-rw-r--r--drivers/crypto/caam/caamalg_desc.c143
-rw-r--r--drivers/crypto/caam/caamalg_desc.h28
-rw-r--r--drivers/crypto/caam/caamalg_qi.c627
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c5165
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h223
-rw-r--r--drivers/crypto/caam/caamhash.c80
-rw-r--r--drivers/crypto/caam/caamhash_desc.c80
-rw-r--r--drivers/crypto/caam/caamhash_desc.h21
-rw-r--r--drivers/crypto/caam/caampkc.c1
-rw-r--r--drivers/crypto/caam/caamrng.c1
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c1
-rw-r--r--drivers/crypto/caam/dpseci.c426
-rw-r--r--drivers/crypto/caam/dpseci.h333
-rw-r--r--drivers/crypto/caam/dpseci_cmd.h149
-rw-r--r--drivers/crypto/caam/error.c79
-rw-r--r--drivers/crypto/caam/error.h6
-rw-r--r--drivers/crypto/caam/jr.c1
-rw-r--r--drivers/crypto/caam/qi.c43
-rw-r--r--drivers/crypto/caam/qi.h3
-rw-r--r--drivers/crypto/caam/regs.h30
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h29
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c20
-rw-r--r--drivers/crypto/cavium/nitrox/Makefile3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_common.h19
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_csr.h111
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c115
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h162
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c71
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.h23
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c337
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.h10
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c98
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c203
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c49
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_sriov.c151
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h2
-rw-r--r--drivers/crypto/ccp/psp-dev.c47
-rw-r--r--drivers/crypto/ccp/sp-platform.c53
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c30
-rw-r--r--drivers/crypto/chelsio/chcr_core.c2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c3
-rw-r--r--drivers/crypto/mxs-dcp.c142
-rw-r--r--drivers/crypto/omap-aes.c17
-rw-r--r--drivers/crypto/omap-aes.h2
-rw-r--r--drivers/crypto/picoxcell_crypto.c21
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c60
-rw-r--r--drivers/crypto/qce/ablkcipher.c13
-rw-r--r--drivers/crypto/qce/cipher.h2
-rw-r--r--drivers/crypto/s5p-sss.c113
-rw-r--r--drivers/crypto/sahara.c31
-rw-r--r--drivers/crypto/vmx/aes_cbc.c22
-rw-r--r--drivers/crypto/vmx/aes_ctr.c18
-rw-r--r--drivers/crypto/vmx/aes_xts.c18
-rw-r--r--drivers/md/dm-integrity.c23
-rw-r--r--drivers/md/dm-verity-fec.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c20
-rw-r--r--drivers/net/ppp/ppp_mppe.c27
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c58
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c34
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c28
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c34
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c26
-rw-r--r--drivers/usb/wusbcore/crypto.c16
-rw-r--r--fs/crypto/fscrypt_private.h4
-rw-r--r--fs/crypto/keyinfo.c10
-rw-r--r--include/crypto/acompress.h38
-rw-r--r--include/crypto/aead.h51
-rw-r--r--include/crypto/akcipher.h76
-rw-r--r--include/crypto/algapi.h14
-rw-r--r--include/crypto/cbc.h2
-rw-r--r--include/crypto/chacha20.h3
-rw-r--r--include/crypto/hash.h38
-rw-r--r--include/crypto/internal/cryptouser.h8
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/kpp.h51
-rw-r--r--include/crypto/mcryptd.h114
-rw-r--r--include/crypto/morus1280_glue.h2
-rw-r--r--include/crypto/morus640_glue.h2
-rw-r--r--include/crypto/null.h2
-rw-r--r--include/crypto/rng.h29
-rw-r--r--include/crypto/skcipher.h118
-rw-r--r--include/crypto/speck.h62
-rw-r--r--include/linux/compiler_types.h1
-rw-r--r--include/linux/cpufeature.h2
-rw-r--r--include/linux/crc-t10dif.h1
-rw-r--r--include/linux/crypto.h110
-rw-r--r--include/linux/fsl/mc.h6
-rw-r--r--include/linux/hw_random.h3
-rw-r--r--include/linux/sunrpc/gss_krb5.h30
-rw-r--r--include/soc/fsl/dpaa2-fd.h242
-rw-r--r--include/soc/fsl/dpaa2-global.h15
-rw-r--r--include/soc/fsl/dpaa2-io.h4
-rw-r--r--include/uapi/linux/cryptouser.h52
-rw-r--r--include/uapi/linux/fs.h4
-rw-r--r--lib/chacha20.c6
-rw-r--r--lib/crc-t10dif.c57
-rw-r--r--net/ceph/crypto.c12
-rw-r--r--net/ceph/crypto.h2
-rw-r--r--net/mac802154/llsec.c16
-rw-r--r--net/mac802154/llsec.h2
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/rxrpc/rxkad.c44
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c87
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c53
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c18
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c20
-rw-r--r--tools/crypto/getstat.c294
234 files changed, 11921 insertions, 15829 deletions
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 48b424de85bb..cfbc18f0d9c9 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -191,21 +191,11 @@ Currently, the following pairs of encryption modes are supported:
191 191
192- AES-256-XTS for contents and AES-256-CTS-CBC for filenames 192- AES-256-XTS for contents and AES-256-CTS-CBC for filenames
193- AES-128-CBC for contents and AES-128-CTS-CBC for filenames 193- AES-128-CBC for contents and AES-128-CTS-CBC for filenames
194- Speck128/256-XTS for contents and Speck128/256-CTS-CBC for filenames
195 194
196It is strongly recommended to use AES-256-XTS for contents encryption. 195It is strongly recommended to use AES-256-XTS for contents encryption.
197AES-128-CBC was added only for low-powered embedded devices with 196AES-128-CBC was added only for low-powered embedded devices with
198crypto accelerators such as CAAM or CESA that do not support XTS. 197crypto accelerators such as CAAM or CESA that do not support XTS.
199 198
200Similarly, Speck128/256 support was only added for older or low-end
201CPUs which cannot do AES fast enough -- especially ARM CPUs which have
202NEON instructions but not the Cryptography Extensions -- and for which
203it would not otherwise be feasible to use encryption at all. It is
204not recommended to use Speck on CPUs that have AES instructions.
205Speck support is only available if it has been enabled in the crypto
206API via CONFIG_CRYPTO_SPECK. Also, on ARM platforms, to get
207acceptable performance CONFIG_CRYPTO_SPECK_NEON must be enabled.
208
209New encryption modes can be added relatively easily, without changes 199New encryption modes can be added relatively easily, without changes
210to individual filesystems. However, authenticated encryption (AE) 200to individual filesystems. However, authenticated encryption (AE)
211modes are not currently supported because of the difficulty of dealing 201modes are not currently supported because of the difficulty of dealing
diff --git a/MAINTAINERS b/MAINTAINERS
index 99540a5a4e19..8f22f6af3782 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7578,14 +7578,6 @@ S: Supported
7578F: drivers/infiniband/hw/i40iw/ 7578F: drivers/infiniband/hw/i40iw/
7579F: include/uapi/rdma/i40iw-abi.h 7579F: include/uapi/rdma/i40iw-abi.h
7580 7580
7581INTEL SHA MULTIBUFFER DRIVER
7582M: Megha Dey <megha.dey@linux.intel.com>
7583R: Tim Chen <tim.c.chen@linux.intel.com>
7584L: linux-crypto@vger.kernel.org
7585S: Supported
7586F: arch/x86/crypto/sha*-mb/
7587F: crypto/mcryptd.c
7588
7589INTEL TELEMETRY DRIVER 7581INTEL TELEMETRY DRIVER
7590M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com> 7582M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
7591L: platform-driver-x86@vger.kernel.org 7583L: platform-driver-x86@vger.kernel.org
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 925d1364727a..ef0c7feea6e2 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -99,6 +99,7 @@ config CRYPTO_GHASH_ARM_CE
99 depends on KERNEL_MODE_NEON 99 depends on KERNEL_MODE_NEON
100 select CRYPTO_HASH 100 select CRYPTO_HASH
101 select CRYPTO_CRYPTD 101 select CRYPTO_CRYPTD
102 select CRYPTO_GF128MUL
102 help 103 help
103 Use an implementation of GHASH (used by the GCM AEAD chaining mode) 104 Use an implementation of GHASH (used by the GCM AEAD chaining mode)
104 that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64) 105 that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
@@ -121,10 +122,4 @@ config CRYPTO_CHACHA20_NEON
121 select CRYPTO_BLKCIPHER 122 select CRYPTO_BLKCIPHER
122 select CRYPTO_CHACHA20 123 select CRYPTO_CHACHA20
123 124
124config CRYPTO_SPECK_NEON
125 tristate "NEON accelerated Speck cipher algorithms"
126 depends on KERNEL_MODE_NEON
127 select CRYPTO_BLKCIPHER
128 select CRYPTO_SPECK
129
130endif 125endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 8de542c48ade..bd5bceef0605 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
10obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o 10obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
11obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o 11obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
12obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o 12obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
13obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
14 13
15ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o 14ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
16ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o 15ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
@@ -54,7 +53,6 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
54crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o 53crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
55crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o 54crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
56chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o 55chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
57speck-neon-y := speck-neon-core.o speck-neon-glue.o
58 56
59ifdef REGENERATE_ARM_CRYPTO 57ifdef REGENERATE_ARM_CRYPTO
60quiet_cmd_perl = PERL $@ 58quiet_cmd_perl = PERL $@
diff --git a/arch/arm/crypto/chacha20-neon-core.S b/arch/arm/crypto/chacha20-neon-core.S
index 451a849ad518..50e7b9896818 100644
--- a/arch/arm/crypto/chacha20-neon-core.S
+++ b/arch/arm/crypto/chacha20-neon-core.S
@@ -18,6 +18,34 @@
18 * (at your option) any later version. 18 * (at your option) any later version.
19 */ 19 */
20 20
21 /*
22 * NEON doesn't have a rotate instruction. The alternatives are, more or less:
23 *
24 * (a) vshl.u32 + vsri.u32 (needs temporary register)
25 * (b) vshl.u32 + vshr.u32 + vorr (needs temporary register)
26 * (c) vrev32.16 (16-bit rotations only)
27 * (d) vtbl.8 + vtbl.8 (multiple of 8 bits rotations only,
28 * needs index vector)
29 *
30 * ChaCha20 has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit
31 * rotations, the only choices are (a) and (b). We use (a) since it takes
32 * two-thirds the cycles of (b) on both Cortex-A7 and Cortex-A53.
33 *
34 * For the 16-bit rotation, we use vrev32.16 since it's consistently fastest
35 * and doesn't need a temporary register.
36 *
37 * For the 8-bit rotation, we use vtbl.8 + vtbl.8. On Cortex-A7, this sequence
38 * is twice as fast as (a), even when doing (a) on multiple registers
39 * simultaneously to eliminate the stall between vshl and vsri. Also, it
40 * parallelizes better when temporary registers are scarce.
41 *
42 * A disadvantage is that on Cortex-A53, the vtbl sequence is the same speed as
43 * (a), so the need to load the rotation table actually makes the vtbl method
44 * slightly slower overall on that CPU (~1.3% slower ChaCha20). Still, it
45 * seems to be a good compromise to get a more significant speed boost on some
46 * CPUs, e.g. ~4.8% faster ChaCha20 on Cortex-A7.
47 */
48
21#include <linux/linkage.h> 49#include <linux/linkage.h>
22 50
23 .text 51 .text
@@ -46,7 +74,9 @@ ENTRY(chacha20_block_xor_neon)
46 vmov q10, q2 74 vmov q10, q2
47 vmov q11, q3 75 vmov q11, q3
48 76
77 adr ip, .Lrol8_table
49 mov r3, #10 78 mov r3, #10
79 vld1.8 {d10}, [ip, :64]
50 80
51.Ldoubleround: 81.Ldoubleround:
52 // x0 += x1, x3 = rotl32(x3 ^ x0, 16) 82 // x0 += x1, x3 = rotl32(x3 ^ x0, 16)
@@ -62,9 +92,9 @@ ENTRY(chacha20_block_xor_neon)
62 92
63 // x0 += x1, x3 = rotl32(x3 ^ x0, 8) 93 // x0 += x1, x3 = rotl32(x3 ^ x0, 8)
64 vadd.i32 q0, q0, q1 94 vadd.i32 q0, q0, q1
65 veor q4, q3, q0 95 veor q3, q3, q0
66 vshl.u32 q3, q4, #8 96 vtbl.8 d6, {d6}, d10
67 vsri.u32 q3, q4, #24 97 vtbl.8 d7, {d7}, d10
68 98
69 // x2 += x3, x1 = rotl32(x1 ^ x2, 7) 99 // x2 += x3, x1 = rotl32(x1 ^ x2, 7)
70 vadd.i32 q2, q2, q3 100 vadd.i32 q2, q2, q3
@@ -92,9 +122,9 @@ ENTRY(chacha20_block_xor_neon)
92 122
93 // x0 += x1, x3 = rotl32(x3 ^ x0, 8) 123 // x0 += x1, x3 = rotl32(x3 ^ x0, 8)
94 vadd.i32 q0, q0, q1 124 vadd.i32 q0, q0, q1
95 veor q4, q3, q0 125 veor q3, q3, q0
96 vshl.u32 q3, q4, #8 126 vtbl.8 d6, {d6}, d10
97 vsri.u32 q3, q4, #24 127 vtbl.8 d7, {d7}, d10
98 128
99 // x2 += x3, x1 = rotl32(x1 ^ x2, 7) 129 // x2 += x3, x1 = rotl32(x1 ^ x2, 7)
100 vadd.i32 q2, q2, q3 130 vadd.i32 q2, q2, q3
@@ -139,13 +169,17 @@ ENTRY(chacha20_block_xor_neon)
139 bx lr 169 bx lr
140ENDPROC(chacha20_block_xor_neon) 170ENDPROC(chacha20_block_xor_neon)
141 171
172 .align 4
173.Lctrinc: .word 0, 1, 2, 3
174.Lrol8_table: .byte 3, 0, 1, 2, 7, 4, 5, 6
175
142 .align 5 176 .align 5
143ENTRY(chacha20_4block_xor_neon) 177ENTRY(chacha20_4block_xor_neon)
144 push {r4-r6, lr} 178 push {r4-r5}
145 mov ip, sp // preserve the stack pointer 179 mov r4, sp // preserve the stack pointer
146 sub r3, sp, #0x20 // allocate a 32 byte buffer 180 sub ip, sp, #0x20 // allocate a 32 byte buffer
147 bic r3, r3, #0x1f // aligned to 32 bytes 181 bic ip, ip, #0x1f // aligned to 32 bytes
148 mov sp, r3 182 mov sp, ip
149 183
150 // r0: Input state matrix, s 184 // r0: Input state matrix, s
151 // r1: 4 data blocks output, o 185 // r1: 4 data blocks output, o
@@ -155,25 +189,24 @@ ENTRY(chacha20_4block_xor_neon)
155 // This function encrypts four consecutive ChaCha20 blocks by loading 189 // This function encrypts four consecutive ChaCha20 blocks by loading
156 // the state matrix in NEON registers four times. The algorithm performs 190 // the state matrix in NEON registers four times. The algorithm performs
157 // each operation on the corresponding word of each state matrix, hence 191 // each operation on the corresponding word of each state matrix, hence
158 // requires no word shuffling. For final XORing step we transpose the 192 // requires no word shuffling. The words are re-interleaved before the
159 // matrix by interleaving 32- and then 64-bit words, which allows us to 193 // final addition of the original state and the XORing step.
160 // do XOR in NEON registers.
161 // 194 //
162 195
163 // x0..15[0-3] = s0..3[0..3] 196 // x0..15[0-3] = s0..15[0-3]
164 add r3, r0, #0x20 197 add ip, r0, #0x20
165 vld1.32 {q0-q1}, [r0] 198 vld1.32 {q0-q1}, [r0]
166 vld1.32 {q2-q3}, [r3] 199 vld1.32 {q2-q3}, [ip]
167 200
168 adr r3, CTRINC 201 adr r5, .Lctrinc
169 vdup.32 q15, d7[1] 202 vdup.32 q15, d7[1]
170 vdup.32 q14, d7[0] 203 vdup.32 q14, d7[0]
171 vld1.32 {q11}, [r3, :128] 204 vld1.32 {q4}, [r5, :128]
172 vdup.32 q13, d6[1] 205 vdup.32 q13, d6[1]
173 vdup.32 q12, d6[0] 206 vdup.32 q12, d6[0]
174 vadd.i32 q12, q12, q11 // x12 += counter values 0-3
175 vdup.32 q11, d5[1] 207 vdup.32 q11, d5[1]
176 vdup.32 q10, d5[0] 208 vdup.32 q10, d5[0]
209 vadd.u32 q12, q12, q4 // x12 += counter values 0-3
177 vdup.32 q9, d4[1] 210 vdup.32 q9, d4[1]
178 vdup.32 q8, d4[0] 211 vdup.32 q8, d4[0]
179 vdup.32 q7, d3[1] 212 vdup.32 q7, d3[1]
@@ -185,9 +218,13 @@ ENTRY(chacha20_4block_xor_neon)
185 vdup.32 q1, d0[1] 218 vdup.32 q1, d0[1]
186 vdup.32 q0, d0[0] 219 vdup.32 q0, d0[0]
187 220
221 adr ip, .Lrol8_table
188 mov r3, #10 222 mov r3, #10
223 b 1f
189 224
190.Ldoubleround4: 225.Ldoubleround4:
226 vld1.32 {q8-q9}, [sp, :256]
2271:
191 // x0 += x4, x12 = rotl32(x12 ^ x0, 16) 228 // x0 += x4, x12 = rotl32(x12 ^ x0, 16)
192 // x1 += x5, x13 = rotl32(x13 ^ x1, 16) 229 // x1 += x5, x13 = rotl32(x13 ^ x1, 16)
193 // x2 += x6, x14 = rotl32(x14 ^ x2, 16) 230 // x2 += x6, x14 = rotl32(x14 ^ x2, 16)
@@ -236,24 +273,25 @@ ENTRY(chacha20_4block_xor_neon)
236 // x1 += x5, x13 = rotl32(x13 ^ x1, 8) 273 // x1 += x5, x13 = rotl32(x13 ^ x1, 8)
237 // x2 += x6, x14 = rotl32(x14 ^ x2, 8) 274 // x2 += x6, x14 = rotl32(x14 ^ x2, 8)
238 // x3 += x7, x15 = rotl32(x15 ^ x3, 8) 275 // x3 += x7, x15 = rotl32(x15 ^ x3, 8)
276 vld1.8 {d16}, [ip, :64]
239 vadd.i32 q0, q0, q4 277 vadd.i32 q0, q0, q4
240 vadd.i32 q1, q1, q5 278 vadd.i32 q1, q1, q5
241 vadd.i32 q2, q2, q6 279 vadd.i32 q2, q2, q6
242 vadd.i32 q3, q3, q7 280 vadd.i32 q3, q3, q7
243 281
244 veor q8, q12, q0 282 veor q12, q12, q0
245 veor q9, q13, q1 283 veor q13, q13, q1
246 vshl.u32 q12, q8, #8 284 veor q14, q14, q2
247 vshl.u32 q13, q9, #8 285 veor q15, q15, q3
248 vsri.u32 q12, q8, #24
249 vsri.u32 q13, q9, #24
250 286
251 veor q8, q14, q2 287 vtbl.8 d24, {d24}, d16
252 veor q9, q15, q3 288 vtbl.8 d25, {d25}, d16
253 vshl.u32 q14, q8, #8 289 vtbl.8 d26, {d26}, d16
254 vshl.u32 q15, q9, #8 290 vtbl.8 d27, {d27}, d16
255 vsri.u32 q14, q8, #24 291 vtbl.8 d28, {d28}, d16
256 vsri.u32 q15, q9, #24 292 vtbl.8 d29, {d29}, d16
293 vtbl.8 d30, {d30}, d16
294 vtbl.8 d31, {d31}, d16
257 295
258 vld1.32 {q8-q9}, [sp, :256] 296 vld1.32 {q8-q9}, [sp, :256]
259 297
@@ -332,24 +370,25 @@ ENTRY(chacha20_4block_xor_neon)
332 // x1 += x6, x12 = rotl32(x12 ^ x1, 8) 370 // x1 += x6, x12 = rotl32(x12 ^ x1, 8)
333 // x2 += x7, x13 = rotl32(x13 ^ x2, 8) 371 // x2 += x7, x13 = rotl32(x13 ^ x2, 8)
334 // x3 += x4, x14 = rotl32(x14 ^ x3, 8) 372 // x3 += x4, x14 = rotl32(x14 ^ x3, 8)
373 vld1.8 {d16}, [ip, :64]
335 vadd.i32 q0, q0, q5 374 vadd.i32 q0, q0, q5
336 vadd.i32 q1, q1, q6 375 vadd.i32 q1, q1, q6
337 vadd.i32 q2, q2, q7 376 vadd.i32 q2, q2, q7
338 vadd.i32 q3, q3, q4 377 vadd.i32 q3, q3, q4
339 378
340 veor q8, q15, q0 379 veor q15, q15, q0
341 veor q9, q12, q1 380 veor q12, q12, q1
342 vshl.u32 q15, q8, #8 381 veor q13, q13, q2
343 vshl.u32 q12, q9, #8 382 veor q14, q14, q3
344 vsri.u32 q15, q8, #24
345 vsri.u32 q12, q9, #24
346 383
347 veor q8, q13, q2 384 vtbl.8 d30, {d30}, d16
348 veor q9, q14, q3 385 vtbl.8 d31, {d31}, d16
349 vshl.u32 q13, q8, #8 386 vtbl.8 d24, {d24}, d16
350 vshl.u32 q14, q9, #8 387 vtbl.8 d25, {d25}, d16
351 vsri.u32 q13, q8, #24 388 vtbl.8 d26, {d26}, d16
352 vsri.u32 q14, q9, #24 389 vtbl.8 d27, {d27}, d16
390 vtbl.8 d28, {d28}, d16
391 vtbl.8 d29, {d29}, d16
353 392
354 vld1.32 {q8-q9}, [sp, :256] 393 vld1.32 {q8-q9}, [sp, :256]
355 394
@@ -379,104 +418,76 @@ ENTRY(chacha20_4block_xor_neon)
379 vsri.u32 q6, q9, #25 418 vsri.u32 q6, q9, #25
380 419
381 subs r3, r3, #1 420 subs r3, r3, #1
382 beq 0f 421 bne .Ldoubleround4
383 422
384 vld1.32 {q8-q9}, [sp, :256] 423 // x0..7[0-3] are in q0-q7, x10..15[0-3] are in q10-q15.
385 b .Ldoubleround4 424 // x8..9[0-3] are on the stack.
386 425
387 // x0[0-3] += s0[0] 426 // Re-interleave the words in the first two rows of each block (x0..7).
388 // x1[0-3] += s0[1] 427 // Also add the counter values 0-3 to x12[0-3].
389 // x2[0-3] += s0[2] 428 vld1.32 {q8}, [r5, :128] // load counter values 0-3
390 // x3[0-3] += s0[3] 429 vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1)
3910: ldmia r0!, {r3-r6} 430 vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3)
392 vdup.32 q8, r3 431 vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5)
393 vdup.32 q9, r4 432 vzip.32 q6, q7 // => (6 7 6 7) (6 7 6 7)
394 vadd.i32 q0, q0, q8 433 vadd.u32 q12, q8 // x12 += counter values 0-3
395 vadd.i32 q1, q1, q9
396 vdup.32 q8, r5
397 vdup.32 q9, r6
398 vadd.i32 q2, q2, q8
399 vadd.i32 q3, q3, q9
400
401 // x4[0-3] += s1[0]
402 // x5[0-3] += s1[1]
403 // x6[0-3] += s1[2]
404 // x7[0-3] += s1[3]
405 ldmia r0!, {r3-r6}
406 vdup.32 q8, r3
407 vdup.32 q9, r4
408 vadd.i32 q4, q4, q8
409 vadd.i32 q5, q5, q9
410 vdup.32 q8, r5
411 vdup.32 q9, r6
412 vadd.i32 q6, q6, q8
413 vadd.i32 q7, q7, q9
414
415 // interleave 32-bit words in state n, n+1
416 vzip.32 q0, q1
417 vzip.32 q2, q3
418 vzip.32 q4, q5
419 vzip.32 q6, q7
420
421 // interleave 64-bit words in state n, n+2
422 vswp d1, d4 434 vswp d1, d4
423 vswp d3, d6 435 vswp d3, d6
436 vld1.32 {q8-q9}, [r0]! // load s0..7
424 vswp d9, d12 437 vswp d9, d12
425 vswp d11, d14 438 vswp d11, d14
426 439
427 // xor with corresponding input, write to output 440 // Swap q1 and q4 so that we'll free up consecutive registers (q0-q1)
441 // after XORing the first 32 bytes.
442 vswp q1, q4
443
444 // First two rows of each block are (q0 q1) (q2 q6) (q4 q5) (q3 q7)
445
446 // x0..3[0-3] += s0..3[0-3] (add orig state to 1st row of each block)
447 vadd.u32 q0, q0, q8
448 vadd.u32 q2, q2, q8
449 vadd.u32 q4, q4, q8
450 vadd.u32 q3, q3, q8
451
452 // x4..7[0-3] += s4..7[0-3] (add orig state to 2nd row of each block)
453 vadd.u32 q1, q1, q9
454 vadd.u32 q6, q6, q9
455 vadd.u32 q5, q5, q9
456 vadd.u32 q7, q7, q9
457
458 // XOR first 32 bytes using keystream from first two rows of first block
428 vld1.8 {q8-q9}, [r2]! 459 vld1.8 {q8-q9}, [r2]!
429 veor q8, q8, q0 460 veor q8, q8, q0
430 veor q9, q9, q4 461 veor q9, q9, q1
431 vst1.8 {q8-q9}, [r1]! 462 vst1.8 {q8-q9}, [r1]!
432 463
464 // Re-interleave the words in the last two rows of each block (x8..15).
433 vld1.32 {q8-q9}, [sp, :256] 465 vld1.32 {q8-q9}, [sp, :256]
434 466 vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13)
435 // x8[0-3] += s2[0] 467 vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15)
436 // x9[0-3] += s2[1] 468 vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9)
437 // x10[0-3] += s2[2] 469 vzip.32 q10, q11 // => (10 11 10 11) (10 11 10 11)
438 // x11[0-3] += s2[3] 470 vld1.32 {q0-q1}, [r0] // load s8..15
439 ldmia r0!, {r3-r6}
440 vdup.32 q0, r3
441 vdup.32 q4, r4
442 vadd.i32 q8, q8, q0
443 vadd.i32 q9, q9, q4
444 vdup.32 q0, r5
445 vdup.32 q4, r6
446 vadd.i32 q10, q10, q0
447 vadd.i32 q11, q11, q4
448
449 // x12[0-3] += s3[0]
450 // x13[0-3] += s3[1]
451 // x14[0-3] += s3[2]
452 // x15[0-3] += s3[3]
453 ldmia r0!, {r3-r6}
454 vdup.32 q0, r3
455 vdup.32 q4, r4
456 adr r3, CTRINC
457 vadd.i32 q12, q12, q0
458 vld1.32 {q0}, [r3, :128]
459 vadd.i32 q13, q13, q4
460 vadd.i32 q12, q12, q0 // x12 += counter values 0-3
461
462 vdup.32 q0, r5
463 vdup.32 q4, r6
464 vadd.i32 q14, q14, q0
465 vadd.i32 q15, q15, q4
466
467 // interleave 32-bit words in state n, n+1
468 vzip.32 q8, q9
469 vzip.32 q10, q11
470 vzip.32 q12, q13
471 vzip.32 q14, q15
472
473 // interleave 64-bit words in state n, n+2
474 vswp d17, d20
475 vswp d19, d22
476 vswp d25, d28 471 vswp d25, d28
477 vswp d27, d30 472 vswp d27, d30
473 vswp d17, d20
474 vswp d19, d22
475
476 // Last two rows of each block are (q8 q12) (q10 q14) (q9 q13) (q11 q15)
477
478 // x8..11[0-3] += s8..11[0-3] (add orig state to 3rd row of each block)
479 vadd.u32 q8, q8, q0
480 vadd.u32 q10, q10, q0
481 vadd.u32 q9, q9, q0
482 vadd.u32 q11, q11, q0
483
484 // x12..15[0-3] += s12..15[0-3] (add orig state to 4th row of each block)
485 vadd.u32 q12, q12, q1
486 vadd.u32 q14, q14, q1
487 vadd.u32 q13, q13, q1
488 vadd.u32 q15, q15, q1
478 489
479 vmov q4, q1 490 // XOR the rest of the data with the keystream
480 491
481 vld1.8 {q0-q1}, [r2]! 492 vld1.8 {q0-q1}, [r2]!
482 veor q0, q0, q8 493 veor q0, q0, q8
@@ -509,13 +520,11 @@ ENTRY(chacha20_4block_xor_neon)
509 vst1.8 {q0-q1}, [r1]! 520 vst1.8 {q0-q1}, [r1]!
510 521
511 vld1.8 {q0-q1}, [r2] 522 vld1.8 {q0-q1}, [r2]
523 mov sp, r4 // restore original stack pointer
512 veor q0, q0, q11 524 veor q0, q0, q11
513 veor q1, q1, q15 525 veor q1, q1, q15
514 vst1.8 {q0-q1}, [r1] 526 vst1.8 {q0-q1}, [r1]
515 527
516 mov sp, ip 528 pop {r4-r5}
517 pop {r4-r6, pc} 529 bx lr
518ENDPROC(chacha20_4block_xor_neon) 530ENDPROC(chacha20_4block_xor_neon)
519
520 .align 4
521CTRINC: .word 0, 1, 2, 3
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
index 96e62ec105d0..cd9e93b46c2d 100644
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -236,7 +236,7 @@ static void __exit crc32_pmull_mod_exit(void)
236 ARRAY_SIZE(crc32_pmull_algs)); 236 ARRAY_SIZE(crc32_pmull_algs));
237} 237}
238 238
239static const struct cpu_feature crc32_cpu_feature[] = { 239static const struct cpu_feature __maybe_unused crc32_cpu_feature[] = {
240 { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { } 240 { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { }
241}; 241};
242MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature); 242MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature);
diff --git a/arch/arm/crypto/ghash-ce-core.S b/arch/arm/crypto/ghash-ce-core.S
index 2f78c10b1881..406009afa9cf 100644
--- a/arch/arm/crypto/ghash-ce-core.S
+++ b/arch/arm/crypto/ghash-ce-core.S
@@ -63,6 +63,33 @@
63 k48 .req d31 63 k48 .req d31
64 SHASH2_p64 .req d31 64 SHASH2_p64 .req d31
65 65
66 HH .req q10
67 HH3 .req q11
68 HH4 .req q12
69 HH34 .req q13
70
71 HH_L .req d20
72 HH_H .req d21
73 HH3_L .req d22
74 HH3_H .req d23
75 HH4_L .req d24
76 HH4_H .req d25
77 HH34_L .req d26
78 HH34_H .req d27
79 SHASH2_H .req d29
80
81 XL2 .req q5
82 XM2 .req q6
83 XH2 .req q7
84 T3 .req q8
85
86 XL2_L .req d10
87 XL2_H .req d11
88 XM2_L .req d12
89 XM2_H .req d13
90 T3_L .req d16
91 T3_H .req d17
92
66 .text 93 .text
67 .fpu crypto-neon-fp-armv8 94 .fpu crypto-neon-fp-armv8
68 95
@@ -175,12 +202,77 @@
175 beq 0f 202 beq 0f
176 vld1.64 {T1}, [ip] 203 vld1.64 {T1}, [ip]
177 teq r0, #0 204 teq r0, #0
178 b 1f 205 b 3f
206
2070: .ifc \pn, p64
208 tst r0, #3 // skip until #blocks is a
209 bne 2f // round multiple of 4
210
211 vld1.8 {XL2-XM2}, [r2]!
2121: vld1.8 {T3-T2}, [r2]!
213 vrev64.8 XL2, XL2
214 vrev64.8 XM2, XM2
215
216 subs r0, r0, #4
217
218 vext.8 T1, XL2, XL2, #8
219 veor XL2_H, XL2_H, XL_L
220 veor XL, XL, T1
221
222 vrev64.8 T3, T3
223 vrev64.8 T1, T2
224
225 vmull.p64 XH, HH4_H, XL_H // a1 * b1
226 veor XL2_H, XL2_H, XL_H
227 vmull.p64 XL, HH4_L, XL_L // a0 * b0
228 vmull.p64 XM, HH34_H, XL2_H // (a1 + a0)(b1 + b0)
229
230 vmull.p64 XH2, HH3_H, XM2_L // a1 * b1
231 veor XM2_L, XM2_L, XM2_H
232 vmull.p64 XL2, HH3_L, XM2_H // a0 * b0
233 vmull.p64 XM2, HH34_L, XM2_L // (a1 + a0)(b1 + b0)
234
235 veor XH, XH, XH2
236 veor XL, XL, XL2
237 veor XM, XM, XM2
238
239 vmull.p64 XH2, HH_H, T3_L // a1 * b1
240 veor T3_L, T3_L, T3_H
241 vmull.p64 XL2, HH_L, T3_H // a0 * b0
242 vmull.p64 XM2, SHASH2_H, T3_L // (a1 + a0)(b1 + b0)
243
244 veor XH, XH, XH2
245 veor XL, XL, XL2
246 veor XM, XM, XM2
247
248 vmull.p64 XH2, SHASH_H, T1_L // a1 * b1
249 veor T1_L, T1_L, T1_H
250 vmull.p64 XL2, SHASH_L, T1_H // a0 * b0
251 vmull.p64 XM2, SHASH2_p64, T1_L // (a1 + a0)(b1 + b0)
252
253 veor XH, XH, XH2
254 veor XL, XL, XL2
255 veor XM, XM, XM2
179 256
1800: vld1.64 {T1}, [r2]! 257 beq 4f
258
259 vld1.8 {XL2-XM2}, [r2]!
260
261 veor T1, XL, XH
262 veor XM, XM, T1
263
264 __pmull_reduce_p64
265
266 veor T1, T1, XH
267 veor XL, XL, T1
268
269 b 1b
270 .endif
271
2722: vld1.64 {T1}, [r2]!
181 subs r0, r0, #1 273 subs r0, r0, #1
182 274
1831: /* multiply XL by SHASH in GF(2^128) */ 2753: /* multiply XL by SHASH in GF(2^128) */
184#ifndef CONFIG_CPU_BIG_ENDIAN 276#ifndef CONFIG_CPU_BIG_ENDIAN
185 vrev64.8 T1, T1 277 vrev64.8 T1, T1
186#endif 278#endif
@@ -193,7 +285,7 @@
193 __pmull_\pn XL, XL_L, SHASH_L, s1l, s2l, s3l, s4l @ a0 * b0 285 __pmull_\pn XL, XL_L, SHASH_L, s1l, s2l, s3l, s4l @ a0 * b0
194 __pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0) 286 __pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0)
195 287
196 veor T1, XL, XH 2884: veor T1, XL, XH
197 veor XM, XM, T1 289 veor XM, XM, T1
198 290
199 __pmull_reduce_\pn 291 __pmull_reduce_\pn
@@ -212,8 +304,14 @@
212 * struct ghash_key const *k, const char *head) 304 * struct ghash_key const *k, const char *head)
213 */ 305 */
214ENTRY(pmull_ghash_update_p64) 306ENTRY(pmull_ghash_update_p64)
215 vld1.64 {SHASH}, [r3] 307 vld1.64 {SHASH}, [r3]!
308 vld1.64 {HH}, [r3]!
309 vld1.64 {HH3-HH4}, [r3]
310
216 veor SHASH2_p64, SHASH_L, SHASH_H 311 veor SHASH2_p64, SHASH_L, SHASH_H
312 veor SHASH2_H, HH_L, HH_H
313 veor HH34_L, HH3_L, HH3_H
314 veor HH34_H, HH4_L, HH4_H
217 315
218 vmov.i8 MASK, #0xe1 316 vmov.i8 MASK, #0xe1
219 vshl.u64 MASK, MASK, #57 317 vshl.u64 MASK, MASK, #57
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 8930fc4e7c22..b7d30b6cf49c 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Accelerated GHASH implementation with ARMv8 vmull.p64 instructions. 2 * Accelerated GHASH implementation with ARMv8 vmull.p64 instructions.
3 * 3 *
4 * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org> 4 * Copyright (C) 2015 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -28,8 +28,10 @@ MODULE_ALIAS_CRYPTO("ghash");
28#define GHASH_DIGEST_SIZE 16 28#define GHASH_DIGEST_SIZE 16
29 29
30struct ghash_key { 30struct ghash_key {
31 u64 a; 31 u64 h[2];
32 u64 b; 32 u64 h2[2];
33 u64 h3[2];
34 u64 h4[2];
33}; 35};
34 36
35struct ghash_desc_ctx { 37struct ghash_desc_ctx {
@@ -117,26 +119,40 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
117 return 0; 119 return 0;
118} 120}
119 121
122static void ghash_reflect(u64 h[], const be128 *k)
123{
124 u64 carry = be64_to_cpu(k->a) >> 63;
125
126 h[0] = (be64_to_cpu(k->b) << 1) | carry;
127 h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
128
129 if (carry)
130 h[1] ^= 0xc200000000000000UL;
131}
132
120static int ghash_setkey(struct crypto_shash *tfm, 133static int ghash_setkey(struct crypto_shash *tfm,
121 const u8 *inkey, unsigned int keylen) 134 const u8 *inkey, unsigned int keylen)
122{ 135{
123 struct ghash_key *key = crypto_shash_ctx(tfm); 136 struct ghash_key *key = crypto_shash_ctx(tfm);
124 u64 a, b; 137 be128 h, k;
125 138
126 if (keylen != GHASH_BLOCK_SIZE) { 139 if (keylen != GHASH_BLOCK_SIZE) {
127 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 140 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
128 return -EINVAL; 141 return -EINVAL;
129 } 142 }
130 143
131 /* perform multiplication by 'x' in GF(2^128) */ 144 memcpy(&k, inkey, GHASH_BLOCK_SIZE);
132 b = get_unaligned_be64(inkey); 145 ghash_reflect(key->h, &k);
133 a = get_unaligned_be64(inkey + 8); 146
147 h = k;
148 gf128mul_lle(&h, &k);
149 ghash_reflect(key->h2, &h);
134 150
135 key->a = (a << 1) | (b >> 63); 151 gf128mul_lle(&h, &k);
136 key->b = (b << 1) | (a >> 63); 152 ghash_reflect(key->h3, &h);
137 153
138 if (b >> 63) 154 gf128mul_lle(&h, &k);
139 key->b ^= 0xc200000000000000UL; 155 ghash_reflect(key->h4, &h);
140 156
141 return 0; 157 return 0;
142} 158}
diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S
deleted file mode 100644
index 57caa742016e..000000000000
--- a/arch/arm/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,434 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Author: Eric Biggers <ebiggers@google.com>
8 */
9
10#include <linux/linkage.h>
11
12 .text
13 .fpu neon
14
15 // arguments
16 ROUND_KEYS .req r0 // const {u64,u32} *round_keys
17 NROUNDS .req r1 // int nrounds
18 DST .req r2 // void *dst
19 SRC .req r3 // const void *src
20 NBYTES .req r4 // unsigned int nbytes
21 TWEAK .req r5 // void *tweak
22
23 // registers which hold the data being encrypted/decrypted
24 X0 .req q0
25 X0_L .req d0
26 X0_H .req d1
27 Y0 .req q1
28 Y0_H .req d3
29 X1 .req q2
30 X1_L .req d4
31 X1_H .req d5
32 Y1 .req q3
33 Y1_H .req d7
34 X2 .req q4
35 X2_L .req d8
36 X2_H .req d9
37 Y2 .req q5
38 Y2_H .req d11
39 X3 .req q6
40 X3_L .req d12
41 X3_H .req d13
42 Y3 .req q7
43 Y3_H .req d15
44
45 // the round key, duplicated in all lanes
46 ROUND_KEY .req q8
47 ROUND_KEY_L .req d16
48 ROUND_KEY_H .req d17
49
50 // index vector for vtbl-based 8-bit rotates
51 ROTATE_TABLE .req d18
52
53 // multiplication table for updating XTS tweaks
54 GF128MUL_TABLE .req d19
55 GF64MUL_TABLE .req d19
56
57 // current XTS tweak value(s)
58 TWEAKV .req q10
59 TWEAKV_L .req d20
60 TWEAKV_H .req d21
61
62 TMP0 .req q12
63 TMP0_L .req d24
64 TMP0_H .req d25
65 TMP1 .req q13
66 TMP2 .req q14
67 TMP3 .req q15
68
69 .align 4
70.Lror64_8_table:
71 .byte 1, 2, 3, 4, 5, 6, 7, 0
72.Lror32_8_table:
73 .byte 1, 2, 3, 0, 5, 6, 7, 4
74.Lrol64_8_table:
75 .byte 7, 0, 1, 2, 3, 4, 5, 6
76.Lrol32_8_table:
77 .byte 3, 0, 1, 2, 7, 4, 5, 6
78.Lgf128mul_table:
79 .byte 0, 0x87
80 .fill 14
81.Lgf64mul_table:
82 .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b
83 .fill 12
84
85/*
86 * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
87 *
88 * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
89 * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
90 * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
91 *
92 * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because
93 * the vtbl approach is faster on some processors and the same speed on others.
94 */
95.macro _speck_round_128bytes n
96
97 // x = ror(x, 8)
98 vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
99 vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
100 vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
101 vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
102 vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
103 vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
104 vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
105 vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
106
107 // x += y
108 vadd.u\n X0, Y0
109 vadd.u\n X1, Y1
110 vadd.u\n X2, Y2
111 vadd.u\n X3, Y3
112
113 // x ^= k
114 veor X0, ROUND_KEY
115 veor X1, ROUND_KEY
116 veor X2, ROUND_KEY
117 veor X3, ROUND_KEY
118
119 // y = rol(y, 3)
120 vshl.u\n TMP0, Y0, #3
121 vshl.u\n TMP1, Y1, #3
122 vshl.u\n TMP2, Y2, #3
123 vshl.u\n TMP3, Y3, #3
124 vsri.u\n TMP0, Y0, #(\n - 3)
125 vsri.u\n TMP1, Y1, #(\n - 3)
126 vsri.u\n TMP2, Y2, #(\n - 3)
127 vsri.u\n TMP3, Y3, #(\n - 3)
128
129 // y ^= x
130 veor Y0, TMP0, X0
131 veor Y1, TMP1, X1
132 veor Y2, TMP2, X2
133 veor Y3, TMP3, X3
134.endm
135
136/*
137 * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
138 *
139 * This is the inverse of _speck_round_128bytes().
140 */
141.macro _speck_unround_128bytes n
142
143 // y ^= x
144 veor TMP0, Y0, X0
145 veor TMP1, Y1, X1
146 veor TMP2, Y2, X2
147 veor TMP3, Y3, X3
148
149 // y = ror(y, 3)
150 vshr.u\n Y0, TMP0, #3
151 vshr.u\n Y1, TMP1, #3
152 vshr.u\n Y2, TMP2, #3
153 vshr.u\n Y3, TMP3, #3
154 vsli.u\n Y0, TMP0, #(\n - 3)
155 vsli.u\n Y1, TMP1, #(\n - 3)
156 vsli.u\n Y2, TMP2, #(\n - 3)
157 vsli.u\n Y3, TMP3, #(\n - 3)
158
159 // x ^= k
160 veor X0, ROUND_KEY
161 veor X1, ROUND_KEY
162 veor X2, ROUND_KEY
163 veor X3, ROUND_KEY
164
165 // x -= y
166 vsub.u\n X0, Y0
167 vsub.u\n X1, Y1
168 vsub.u\n X2, Y2
169 vsub.u\n X3, Y3
170
171 // x = rol(x, 8);
172 vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
173 vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
174 vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
175 vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
176 vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
177 vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
178 vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
179 vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
180.endm
181
182.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp
183
184 // Load the next source block
185 vld1.8 {\dst_reg}, [SRC]!
186
187 // Save the current tweak in the tweak buffer
188 vst1.8 {TWEAKV}, [\tweak_buf:128]!
189
190 // XOR the next source block with the current tweak
191 veor \dst_reg, TWEAKV
192
193 /*
194 * Calculate the next tweak by multiplying the current one by x,
195 * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
196 */
197 vshr.u64 \tmp, TWEAKV, #63
198 vshl.u64 TWEAKV, #1
199 veor TWEAKV_H, \tmp\()_L
200 vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H
201 veor TWEAKV_L, \tmp\()_H
202.endm
203
204.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp
205
206 // Load the next two source blocks
207 vld1.8 {\dst_reg}, [SRC]!
208
209 // Save the current two tweaks in the tweak buffer
210 vst1.8 {TWEAKV}, [\tweak_buf:128]!
211
212 // XOR the next two source blocks with the current two tweaks
213 veor \dst_reg, TWEAKV
214
215 /*
216 * Calculate the next two tweaks by multiplying the current ones by x^2,
217 * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
218 */
219 vshr.u64 \tmp, TWEAKV, #62
220 vshl.u64 TWEAKV, #2
221 vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L
222 vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H
223 veor TWEAKV, \tmp
224.endm
225
226/*
227 * _speck_xts_crypt() - Speck-XTS encryption/decryption
228 *
229 * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
230 * using Speck-XTS, specifically the variant with a block size of '2n' and round
231 * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
232 * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
233 * nonzero multiple of 128.
234 */
235.macro _speck_xts_crypt n, decrypting
236 push {r4-r7}
237 mov r7, sp
238
239 /*
240 * The first four parameters were passed in registers r0-r3. Load the
241 * additional parameters, which were passed on the stack.
242 */
243 ldr NBYTES, [sp, #16]
244 ldr TWEAK, [sp, #20]
245
246 /*
247 * If decrypting, modify the ROUND_KEYS parameter to point to the last
248 * round key rather than the first, since for decryption the round keys
249 * are used in reverse order.
250 */
251.if \decrypting
252.if \n == 64
253 add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3
254 sub ROUND_KEYS, #8
255.else
256 add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2
257 sub ROUND_KEYS, #4
258.endif
259.endif
260
261 // Load the index vector for vtbl-based 8-bit rotates
262.if \decrypting
263 ldr r12, =.Lrol\n\()_8_table
264.else
265 ldr r12, =.Lror\n\()_8_table
266.endif
267 vld1.8 {ROTATE_TABLE}, [r12:64]
268
269 // One-time XTS preparation
270
271 /*
272 * Allocate stack space to store 128 bytes worth of tweaks. For
273 * performance, this space is aligned to a 16-byte boundary so that we
274 * can use the load/store instructions that declare 16-byte alignment.
275 * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
276 */
277 sub r12, sp, #128
278 bic r12, #0xf
279 mov sp, r12
280
281.if \n == 64
282 // Load first tweak
283 vld1.8 {TWEAKV}, [TWEAK]
284
285 // Load GF(2^128) multiplication table
286 ldr r12, =.Lgf128mul_table
287 vld1.8 {GF128MUL_TABLE}, [r12:64]
288.else
289 // Load first tweak
290 vld1.8 {TWEAKV_L}, [TWEAK]
291
292 // Load GF(2^64) multiplication table
293 ldr r12, =.Lgf64mul_table
294 vld1.8 {GF64MUL_TABLE}, [r12:64]
295
296 // Calculate second tweak, packing it together with the first
297 vshr.u64 TMP0_L, TWEAKV_L, #63
298 vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L
299 vshl.u64 TWEAKV_H, TWEAKV_L, #1
300 veor TWEAKV_H, TMP0_L
301.endif
302
303.Lnext_128bytes_\@:
304
305 /*
306 * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak
307 * values, and save the tweaks on the stack for later. Then
308 * de-interleave the 'x' and 'y' elements of each block, i.e. make it so
309 * that the X[0-3] registers contain only the second halves of blocks,
310 * and the Y[0-3] registers contain only the first halves of blocks.
311 * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
312 */
313 mov r12, sp
314.if \n == 64
315 _xts128_precrypt_one X0, r12, TMP0
316 _xts128_precrypt_one Y0, r12, TMP0
317 _xts128_precrypt_one X1, r12, TMP0
318 _xts128_precrypt_one Y1, r12, TMP0
319 _xts128_precrypt_one X2, r12, TMP0
320 _xts128_precrypt_one Y2, r12, TMP0
321 _xts128_precrypt_one X3, r12, TMP0
322 _xts128_precrypt_one Y3, r12, TMP0
323 vswp X0_L, Y0_H
324 vswp X1_L, Y1_H
325 vswp X2_L, Y2_H
326 vswp X3_L, Y3_H
327.else
328 _xts64_precrypt_two X0, r12, TMP0
329 _xts64_precrypt_two Y0, r12, TMP0
330 _xts64_precrypt_two X1, r12, TMP0
331 _xts64_precrypt_two Y1, r12, TMP0
332 _xts64_precrypt_two X2, r12, TMP0
333 _xts64_precrypt_two Y2, r12, TMP0
334 _xts64_precrypt_two X3, r12, TMP0
335 _xts64_precrypt_two Y3, r12, TMP0
336 vuzp.32 Y0, X0
337 vuzp.32 Y1, X1
338 vuzp.32 Y2, X2
339 vuzp.32 Y3, X3
340.endif
341
342 // Do the cipher rounds
343
344 mov r12, ROUND_KEYS
345 mov r6, NROUNDS
346
347.Lnext_round_\@:
348.if \decrypting
349.if \n == 64
350 vld1.64 ROUND_KEY_L, [r12]
351 sub r12, #8
352 vmov ROUND_KEY_H, ROUND_KEY_L
353.else
354 vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]
355 sub r12, #4
356.endif
357 _speck_unround_128bytes \n
358.else
359.if \n == 64
360 vld1.64 ROUND_KEY_L, [r12]!
361 vmov ROUND_KEY_H, ROUND_KEY_L
362.else
363 vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]!
364.endif
365 _speck_round_128bytes \n
366.endif
367 subs r6, r6, #1
368 bne .Lnext_round_\@
369
370 // Re-interleave the 'x' and 'y' elements of each block
371.if \n == 64
372 vswp X0_L, Y0_H
373 vswp X1_L, Y1_H
374 vswp X2_L, Y2_H
375 vswp X3_L, Y3_H
376.else
377 vzip.32 Y0, X0
378 vzip.32 Y1, X1
379 vzip.32 Y2, X2
380 vzip.32 Y3, X3
381.endif
382
383 // XOR the encrypted/decrypted blocks with the tweaks we saved earlier
384 mov r12, sp
385 vld1.8 {TMP0, TMP1}, [r12:128]!
386 vld1.8 {TMP2, TMP3}, [r12:128]!
387 veor X0, TMP0
388 veor Y0, TMP1
389 veor X1, TMP2
390 veor Y1, TMP3
391 vld1.8 {TMP0, TMP1}, [r12:128]!
392 vld1.8 {TMP2, TMP3}, [r12:128]!
393 veor X2, TMP0
394 veor Y2, TMP1
395 veor X3, TMP2
396 veor Y3, TMP3
397
398 // Store the ciphertext in the destination buffer
399 vst1.8 {X0, Y0}, [DST]!
400 vst1.8 {X1, Y1}, [DST]!
401 vst1.8 {X2, Y2}, [DST]!
402 vst1.8 {X3, Y3}, [DST]!
403
404 // Continue if there are more 128-byte chunks remaining, else return
405 subs NBYTES, #128
406 bne .Lnext_128bytes_\@
407
408 // Store the next tweak
409.if \n == 64
410 vst1.8 {TWEAKV}, [TWEAK]
411.else
412 vst1.8 {TWEAKV_L}, [TWEAK]
413.endif
414
415 mov sp, r7
416 pop {r4-r7}
417 bx lr
418.endm
419
420ENTRY(speck128_xts_encrypt_neon)
421 _speck_xts_crypt n=64, decrypting=0
422ENDPROC(speck128_xts_encrypt_neon)
423
424ENTRY(speck128_xts_decrypt_neon)
425 _speck_xts_crypt n=64, decrypting=1
426ENDPROC(speck128_xts_decrypt_neon)
427
428ENTRY(speck64_xts_encrypt_neon)
429 _speck_xts_crypt n=32, decrypting=0
430ENDPROC(speck64_xts_encrypt_neon)
431
432ENTRY(speck64_xts_decrypt_neon)
433 _speck_xts_crypt n=32, decrypting=1
434ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c
deleted file mode 100644
index f012c3ea998f..000000000000
--- a/arch/arm/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,288 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Note: the NIST recommendation for XTS only specifies a 128-bit block size,
8 * but a 64-bit version (needed for Speck64) is fairly straightforward; the math
9 * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial
10 * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004:
11 * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes
12 * OCB and PMAC"), represented as 0x1B.
13 */
14
15#include <asm/hwcap.h>
16#include <asm/neon.h>
17#include <asm/simd.h>
18#include <crypto/algapi.h>
19#include <crypto/gf128mul.h>
20#include <crypto/internal/skcipher.h>
21#include <crypto/speck.h>
22#include <crypto/xts.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25
26/* The assembly functions only handle multiples of 128 bytes */
27#define SPECK_NEON_CHUNK_SIZE 128
28
29/* Speck128 */
30
31struct speck128_xts_tfm_ctx {
32 struct speck128_tfm_ctx main_key;
33 struct speck128_tfm_ctx tweak_key;
34};
35
36asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
37 void *dst, const void *src,
38 unsigned int nbytes, void *tweak);
39
40asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
41 void *dst, const void *src,
42 unsigned int nbytes, void *tweak);
43
44typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
45 u8 *, const u8 *);
46typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
47 const void *, unsigned int, void *);
48
49static __always_inline int
50__speck128_xts_crypt(struct skcipher_request *req,
51 speck128_crypt_one_t crypt_one,
52 speck128_xts_crypt_many_t crypt_many)
53{
54 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
55 const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
56 struct skcipher_walk walk;
57 le128 tweak;
58 int err;
59
60 err = skcipher_walk_virt(&walk, req, true);
61
62 crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
63
64 while (walk.nbytes > 0) {
65 unsigned int nbytes = walk.nbytes;
66 u8 *dst = walk.dst.virt.addr;
67 const u8 *src = walk.src.virt.addr;
68
69 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
70 unsigned int count;
71
72 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
73 kernel_neon_begin();
74 (*crypt_many)(ctx->main_key.round_keys,
75 ctx->main_key.nrounds,
76 dst, src, count, &tweak);
77 kernel_neon_end();
78 dst += count;
79 src += count;
80 nbytes -= count;
81 }
82
83 /* Handle any remainder with generic code */
84 while (nbytes >= sizeof(tweak)) {
85 le128_xor((le128 *)dst, (const le128 *)src, &tweak);
86 (*crypt_one)(&ctx->main_key, dst, dst);
87 le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
88 gf128mul_x_ble(&tweak, &tweak);
89
90 dst += sizeof(tweak);
91 src += sizeof(tweak);
92 nbytes -= sizeof(tweak);
93 }
94 err = skcipher_walk_done(&walk, nbytes);
95 }
96
97 return err;
98}
99
100static int speck128_xts_encrypt(struct skcipher_request *req)
101{
102 return __speck128_xts_crypt(req, crypto_speck128_encrypt,
103 speck128_xts_encrypt_neon);
104}
105
106static int speck128_xts_decrypt(struct skcipher_request *req)
107{
108 return __speck128_xts_crypt(req, crypto_speck128_decrypt,
109 speck128_xts_decrypt_neon);
110}
111
112static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
113 unsigned int keylen)
114{
115 struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
116 int err;
117
118 err = xts_verify_key(tfm, key, keylen);
119 if (err)
120 return err;
121
122 keylen /= 2;
123
124 err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
125 if (err)
126 return err;
127
128 return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
129}
130
131/* Speck64 */
132
133struct speck64_xts_tfm_ctx {
134 struct speck64_tfm_ctx main_key;
135 struct speck64_tfm_ctx tweak_key;
136};
137
138asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
139 void *dst, const void *src,
140 unsigned int nbytes, void *tweak);
141
142asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
143 void *dst, const void *src,
144 unsigned int nbytes, void *tweak);
145
146typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
147 u8 *, const u8 *);
148typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
149 const void *, unsigned int, void *);
150
151static __always_inline int
152__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
153 speck64_xts_crypt_many_t crypt_many)
154{
155 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
156 const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
157 struct skcipher_walk walk;
158 __le64 tweak;
159 int err;
160
161 err = skcipher_walk_virt(&walk, req, true);
162
163 crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
164
165 while (walk.nbytes > 0) {
166 unsigned int nbytes = walk.nbytes;
167 u8 *dst = walk.dst.virt.addr;
168 const u8 *src = walk.src.virt.addr;
169
170 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
171 unsigned int count;
172
173 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
174 kernel_neon_begin();
175 (*crypt_many)(ctx->main_key.round_keys,
176 ctx->main_key.nrounds,
177 dst, src, count, &tweak);
178 kernel_neon_end();
179 dst += count;
180 src += count;
181 nbytes -= count;
182 }
183
184 /* Handle any remainder with generic code */
185 while (nbytes >= sizeof(tweak)) {
186 *(__le64 *)dst = *(__le64 *)src ^ tweak;
187 (*crypt_one)(&ctx->main_key, dst, dst);
188 *(__le64 *)dst ^= tweak;
189 tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
190 ((tweak & cpu_to_le64(1ULL << 63)) ?
191 0x1B : 0));
192 dst += sizeof(tweak);
193 src += sizeof(tweak);
194 nbytes -= sizeof(tweak);
195 }
196 err = skcipher_walk_done(&walk, nbytes);
197 }
198
199 return err;
200}
201
202static int speck64_xts_encrypt(struct skcipher_request *req)
203{
204 return __speck64_xts_crypt(req, crypto_speck64_encrypt,
205 speck64_xts_encrypt_neon);
206}
207
208static int speck64_xts_decrypt(struct skcipher_request *req)
209{
210 return __speck64_xts_crypt(req, crypto_speck64_decrypt,
211 speck64_xts_decrypt_neon);
212}
213
214static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
215 unsigned int keylen)
216{
217 struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
218 int err;
219
220 err = xts_verify_key(tfm, key, keylen);
221 if (err)
222 return err;
223
224 keylen /= 2;
225
226 err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
227 if (err)
228 return err;
229
230 return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
231}
232
233static struct skcipher_alg speck_algs[] = {
234 {
235 .base.cra_name = "xts(speck128)",
236 .base.cra_driver_name = "xts-speck128-neon",
237 .base.cra_priority = 300,
238 .base.cra_blocksize = SPECK128_BLOCK_SIZE,
239 .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
240 .base.cra_alignmask = 7,
241 .base.cra_module = THIS_MODULE,
242 .min_keysize = 2 * SPECK128_128_KEY_SIZE,
243 .max_keysize = 2 * SPECK128_256_KEY_SIZE,
244 .ivsize = SPECK128_BLOCK_SIZE,
245 .walksize = SPECK_NEON_CHUNK_SIZE,
246 .setkey = speck128_xts_setkey,
247 .encrypt = speck128_xts_encrypt,
248 .decrypt = speck128_xts_decrypt,
249 }, {
250 .base.cra_name = "xts(speck64)",
251 .base.cra_driver_name = "xts-speck64-neon",
252 .base.cra_priority = 300,
253 .base.cra_blocksize = SPECK64_BLOCK_SIZE,
254 .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
255 .base.cra_alignmask = 7,
256 .base.cra_module = THIS_MODULE,
257 .min_keysize = 2 * SPECK64_96_KEY_SIZE,
258 .max_keysize = 2 * SPECK64_128_KEY_SIZE,
259 .ivsize = SPECK64_BLOCK_SIZE,
260 .walksize = SPECK_NEON_CHUNK_SIZE,
261 .setkey = speck64_xts_setkey,
262 .encrypt = speck64_xts_encrypt,
263 .decrypt = speck64_xts_decrypt,
264 }
265};
266
267static int __init speck_neon_module_init(void)
268{
269 if (!(elf_hwcap & HWCAP_NEON))
270 return -ENODEV;
271 return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
272}
273
274static void __exit speck_neon_module_exit(void)
275{
276 crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
277}
278
279module_init(speck_neon_module_init);
280module_exit(speck_neon_module_exit);
281
282MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
283MODULE_LICENSE("GPL");
284MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
285MODULE_ALIAS_CRYPTO("xts(speck128)");
286MODULE_ALIAS_CRYPTO("xts-speck128-neon");
287MODULE_ALIAS_CRYPTO("xts(speck64)");
288MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index db8d364f8476..3d165b4cdd2a 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -698,6 +698,7 @@ CONFIG_MEMTEST=y
698CONFIG_SECURITY=y 698CONFIG_SECURITY=y
699CONFIG_CRYPTO_ECHAINIV=y 699CONFIG_CRYPTO_ECHAINIV=y
700CONFIG_CRYPTO_ANSI_CPRNG=y 700CONFIG_CRYPTO_ANSI_CPRNG=y
701CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM=y
701CONFIG_ARM64_CRYPTO=y 702CONFIG_ARM64_CRYPTO=y
702CONFIG_CRYPTO_SHA1_ARM64_CE=y 703CONFIG_CRYPTO_SHA1_ARM64_CE=y
703CONFIG_CRYPTO_SHA2_ARM64_CE=y 704CONFIG_CRYPTO_SHA2_ARM64_CE=y
@@ -706,7 +707,6 @@ CONFIG_CRYPTO_SHA3_ARM64=m
706CONFIG_CRYPTO_SM3_ARM64_CE=m 707CONFIG_CRYPTO_SM3_ARM64_CE=m
707CONFIG_CRYPTO_GHASH_ARM64_CE=y 708CONFIG_CRYPTO_GHASH_ARM64_CE=y
708CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m 709CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
709CONFIG_CRYPTO_CRC32_ARM64_CE=m
710CONFIG_CRYPTO_AES_ARM64_CE_CCM=y 710CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
711CONFIG_CRYPTO_AES_ARM64_CE_BLK=y 711CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
712CONFIG_CRYPTO_CHACHA20_NEON=m 712CONFIG_CRYPTO_CHACHA20_NEON=m
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index e3fdb0fd6f70..a5606823ed4d 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -66,11 +66,6 @@ config CRYPTO_CRCT10DIF_ARM64_CE
66 depends on KERNEL_MODE_NEON && CRC_T10DIF 66 depends on KERNEL_MODE_NEON && CRC_T10DIF
67 select CRYPTO_HASH 67 select CRYPTO_HASH
68 68
69config CRYPTO_CRC32_ARM64_CE
70 tristate "CRC32 and CRC32C digest algorithms using ARMv8 extensions"
71 depends on CRC32
72 select CRYPTO_HASH
73
74config CRYPTO_AES_ARM64 69config CRYPTO_AES_ARM64
75 tristate "AES core cipher using scalar instructions" 70 tristate "AES core cipher using scalar instructions"
76 select CRYPTO_AES 71 select CRYPTO_AES
@@ -119,10 +114,4 @@ config CRYPTO_AES_ARM64_BS
119 select CRYPTO_AES_ARM64 114 select CRYPTO_AES_ARM64
120 select CRYPTO_SIMD 115 select CRYPTO_SIMD
121 116
122config CRYPTO_SPECK_NEON
123 tristate "NEON accelerated Speck cipher algorithms"
124 depends on KERNEL_MODE_NEON
125 select CRYPTO_BLKCIPHER
126 select CRYPTO_SPECK
127
128endif 117endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index bcafd016618e..f476fede09ba 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -32,9 +32,6 @@ ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
32obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o 32obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o
33crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o 33crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
34 34
35obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o
36crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o
37
38obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o 35obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
39aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o 36aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o
40 37
@@ -56,9 +53,6 @@ sha512-arm64-y := sha512-glue.o sha512-core.o
56obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o 53obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
57chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o 54chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
58 55
59obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
60speck-neon-y := speck-neon-core.o speck-neon-glue.o
61
62obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o 56obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o
63aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o 57aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
64 58
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index 623e74ed1c67..143070510809 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -17,6 +17,11 @@
17 17
18 .arch armv8-a+crypto 18 .arch armv8-a+crypto
19 19
20 xtsmask .req v16
21
22 .macro xts_reload_mask, tmp
23 .endm
24
20 /* preload all round keys */ 25 /* preload all round keys */
21 .macro load_round_keys, rounds, rk 26 .macro load_round_keys, rounds, rk
22 cmp \rounds, #12 27 cmp \rounds, #12
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index adcb83eb683c..1e676625ef33 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -15,6 +15,7 @@
15#include <crypto/internal/hash.h> 15#include <crypto/internal/hash.h>
16#include <crypto/internal/simd.h> 16#include <crypto/internal/simd.h>
17#include <crypto/internal/skcipher.h> 17#include <crypto/internal/skcipher.h>
18#include <crypto/scatterwalk.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/cpufeature.h> 20#include <linux/cpufeature.h>
20#include <crypto/xts.h> 21#include <crypto/xts.h>
@@ -31,6 +32,8 @@
31#define aes_ecb_decrypt ce_aes_ecb_decrypt 32#define aes_ecb_decrypt ce_aes_ecb_decrypt
32#define aes_cbc_encrypt ce_aes_cbc_encrypt 33#define aes_cbc_encrypt ce_aes_cbc_encrypt
33#define aes_cbc_decrypt ce_aes_cbc_decrypt 34#define aes_cbc_decrypt ce_aes_cbc_decrypt
35#define aes_cbc_cts_encrypt ce_aes_cbc_cts_encrypt
36#define aes_cbc_cts_decrypt ce_aes_cbc_cts_decrypt
34#define aes_ctr_encrypt ce_aes_ctr_encrypt 37#define aes_ctr_encrypt ce_aes_ctr_encrypt
35#define aes_xts_encrypt ce_aes_xts_encrypt 38#define aes_xts_encrypt ce_aes_xts_encrypt
36#define aes_xts_decrypt ce_aes_xts_decrypt 39#define aes_xts_decrypt ce_aes_xts_decrypt
@@ -45,6 +48,8 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
45#define aes_ecb_decrypt neon_aes_ecb_decrypt 48#define aes_ecb_decrypt neon_aes_ecb_decrypt
46#define aes_cbc_encrypt neon_aes_cbc_encrypt 49#define aes_cbc_encrypt neon_aes_cbc_encrypt
47#define aes_cbc_decrypt neon_aes_cbc_decrypt 50#define aes_cbc_decrypt neon_aes_cbc_decrypt
51#define aes_cbc_cts_encrypt neon_aes_cbc_cts_encrypt
52#define aes_cbc_cts_decrypt neon_aes_cbc_cts_decrypt
48#define aes_ctr_encrypt neon_aes_ctr_encrypt 53#define aes_ctr_encrypt neon_aes_ctr_encrypt
49#define aes_xts_encrypt neon_aes_xts_encrypt 54#define aes_xts_encrypt neon_aes_xts_encrypt
50#define aes_xts_decrypt neon_aes_xts_decrypt 55#define aes_xts_decrypt neon_aes_xts_decrypt
@@ -63,30 +68,41 @@ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
63MODULE_LICENSE("GPL v2"); 68MODULE_LICENSE("GPL v2");
64 69
65/* defined in aes-modes.S */ 70/* defined in aes-modes.S */
66asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], 71asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
67 int rounds, int blocks); 72 int rounds, int blocks);
68asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], 73asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
69 int rounds, int blocks); 74 int rounds, int blocks);
70 75
71asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], 76asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
72 int rounds, int blocks, u8 iv[]); 77 int rounds, int blocks, u8 iv[]);
73asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], 78asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
74 int rounds, int blocks, u8 iv[]); 79 int rounds, int blocks, u8 iv[]);
75 80
76asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], 81asmlinkage void aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
82 int rounds, int bytes, u8 const iv[]);
83asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
84 int rounds, int bytes, u8 const iv[]);
85
86asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
77 int rounds, int blocks, u8 ctr[]); 87 int rounds, int blocks, u8 ctr[]);
78 88
79asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], 89asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
80 int rounds, int blocks, u8 const rk2[], u8 iv[], 90 int rounds, int blocks, u32 const rk2[], u8 iv[],
81 int first); 91 int first);
82asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], 92asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
83 int rounds, int blocks, u8 const rk2[], u8 iv[], 93 int rounds, int blocks, u32 const rk2[], u8 iv[],
84 int first); 94 int first);
85 95
86asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds, 96asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
87 int blocks, u8 dg[], int enc_before, 97 int blocks, u8 dg[], int enc_before,
88 int enc_after); 98 int enc_after);
89 99
100struct cts_cbc_req_ctx {
101 struct scatterlist sg_src[2];
102 struct scatterlist sg_dst[2];
103 struct skcipher_request subreq;
104};
105
90struct crypto_aes_xts_ctx { 106struct crypto_aes_xts_ctx {
91 struct crypto_aes_ctx key1; 107 struct crypto_aes_ctx key1;
92 struct crypto_aes_ctx __aligned(8) key2; 108 struct crypto_aes_ctx __aligned(8) key2;
@@ -142,7 +158,7 @@ static int ecb_encrypt(struct skcipher_request *req)
142 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 158 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
143 kernel_neon_begin(); 159 kernel_neon_begin();
144 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 160 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
145 (u8 *)ctx->key_enc, rounds, blocks); 161 ctx->key_enc, rounds, blocks);
146 kernel_neon_end(); 162 kernel_neon_end();
147 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 163 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
148 } 164 }
@@ -162,7 +178,7 @@ static int ecb_decrypt(struct skcipher_request *req)
162 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 178 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
163 kernel_neon_begin(); 179 kernel_neon_begin();
164 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 180 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
165 (u8 *)ctx->key_dec, rounds, blocks); 181 ctx->key_dec, rounds, blocks);
166 kernel_neon_end(); 182 kernel_neon_end();
167 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 183 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
168 } 184 }
@@ -182,7 +198,7 @@ static int cbc_encrypt(struct skcipher_request *req)
182 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 198 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
183 kernel_neon_begin(); 199 kernel_neon_begin();
184 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 200 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
185 (u8 *)ctx->key_enc, rounds, blocks, walk.iv); 201 ctx->key_enc, rounds, blocks, walk.iv);
186 kernel_neon_end(); 202 kernel_neon_end();
187 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 203 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
188 } 204 }
@@ -202,13 +218,149 @@ static int cbc_decrypt(struct skcipher_request *req)
202 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 218 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
203 kernel_neon_begin(); 219 kernel_neon_begin();
204 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 220 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
205 (u8 *)ctx->key_dec, rounds, blocks, walk.iv); 221 ctx->key_dec, rounds, blocks, walk.iv);
206 kernel_neon_end(); 222 kernel_neon_end();
207 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 223 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
208 } 224 }
209 return err; 225 return err;
210} 226}
211 227
228static int cts_cbc_init_tfm(struct crypto_skcipher *tfm)
229{
230 crypto_skcipher_set_reqsize(tfm, sizeof(struct cts_cbc_req_ctx));
231 return 0;
232}
233
234static int cts_cbc_encrypt(struct skcipher_request *req)
235{
236 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
237 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
238 struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req);
239 int err, rounds = 6 + ctx->key_length / 4;
240 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
241 struct scatterlist *src = req->src, *dst = req->dst;
242 struct skcipher_walk walk;
243
244 skcipher_request_set_tfm(&rctx->subreq, tfm);
245
246 if (req->cryptlen <= AES_BLOCK_SIZE) {
247 if (req->cryptlen < AES_BLOCK_SIZE)
248 return -EINVAL;
249 cbc_blocks = 1;
250 }
251
252 if (cbc_blocks > 0) {
253 unsigned int blocks;
254
255 skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst,
256 cbc_blocks * AES_BLOCK_SIZE,
257 req->iv);
258
259 err = skcipher_walk_virt(&walk, &rctx->subreq, false);
260
261 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
262 kernel_neon_begin();
263 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
264 ctx->key_enc, rounds, blocks, walk.iv);
265 kernel_neon_end();
266 err = skcipher_walk_done(&walk,
267 walk.nbytes % AES_BLOCK_SIZE);
268 }
269 if (err)
270 return err;
271
272 if (req->cryptlen == AES_BLOCK_SIZE)
273 return 0;
274
275 dst = src = scatterwalk_ffwd(rctx->sg_src, req->src,
276 rctx->subreq.cryptlen);
277 if (req->dst != req->src)
278 dst = scatterwalk_ffwd(rctx->sg_dst, req->dst,
279 rctx->subreq.cryptlen);
280 }
281
282 /* handle ciphertext stealing */
283 skcipher_request_set_crypt(&rctx->subreq, src, dst,
284 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
285 req->iv);
286
287 err = skcipher_walk_virt(&walk, &rctx->subreq, false);
288 if (err)
289 return err;
290
291 kernel_neon_begin();
292 aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
293 ctx->key_enc, rounds, walk.nbytes, walk.iv);
294 kernel_neon_end();
295
296 return skcipher_walk_done(&walk, 0);
297}
298
299static int cts_cbc_decrypt(struct skcipher_request *req)
300{
301 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
302 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
303 struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req);
304 int err, rounds = 6 + ctx->key_length / 4;
305 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
306 struct scatterlist *src = req->src, *dst = req->dst;
307 struct skcipher_walk walk;
308
309 skcipher_request_set_tfm(&rctx->subreq, tfm);
310
311 if (req->cryptlen <= AES_BLOCK_SIZE) {
312 if (req->cryptlen < AES_BLOCK_SIZE)
313 return -EINVAL;
314 cbc_blocks = 1;
315 }
316
317 if (cbc_blocks > 0) {
318 unsigned int blocks;
319
320 skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst,
321 cbc_blocks * AES_BLOCK_SIZE,
322 req->iv);
323
324 err = skcipher_walk_virt(&walk, &rctx->subreq, false);
325
326 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
327 kernel_neon_begin();
328 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
329 ctx->key_dec, rounds, blocks, walk.iv);
330 kernel_neon_end();
331 err = skcipher_walk_done(&walk,
332 walk.nbytes % AES_BLOCK_SIZE);
333 }
334 if (err)
335 return err;
336
337 if (req->cryptlen == AES_BLOCK_SIZE)
338 return 0;
339
340 dst = src = scatterwalk_ffwd(rctx->sg_src, req->src,
341 rctx->subreq.cryptlen);
342 if (req->dst != req->src)
343 dst = scatterwalk_ffwd(rctx->sg_dst, req->dst,
344 rctx->subreq.cryptlen);
345 }
346
347 /* handle ciphertext stealing */
348 skcipher_request_set_crypt(&rctx->subreq, src, dst,
349 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
350 req->iv);
351
352 err = skcipher_walk_virt(&walk, &rctx->subreq, false);
353 if (err)
354 return err;
355
356 kernel_neon_begin();
357 aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
358 ctx->key_dec, rounds, walk.nbytes, walk.iv);
359 kernel_neon_end();
360
361 return skcipher_walk_done(&walk, 0);
362}
363
212static int ctr_encrypt(struct skcipher_request *req) 364static int ctr_encrypt(struct skcipher_request *req)
213{ 365{
214 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 366 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -222,7 +374,7 @@ static int ctr_encrypt(struct skcipher_request *req)
222 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 374 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
223 kernel_neon_begin(); 375 kernel_neon_begin();
224 aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 376 aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
225 (u8 *)ctx->key_enc, rounds, blocks, walk.iv); 377 ctx->key_enc, rounds, blocks, walk.iv);
226 kernel_neon_end(); 378 kernel_neon_end();
227 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 379 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
228 } 380 }
@@ -238,7 +390,7 @@ static int ctr_encrypt(struct skcipher_request *req)
238 blocks = -1; 390 blocks = -1;
239 391
240 kernel_neon_begin(); 392 kernel_neon_begin();
241 aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds, 393 aes_ctr_encrypt(tail, NULL, ctx->key_enc, rounds,
242 blocks, walk.iv); 394 blocks, walk.iv);
243 kernel_neon_end(); 395 kernel_neon_end();
244 crypto_xor_cpy(tdst, tsrc, tail, nbytes); 396 crypto_xor_cpy(tdst, tsrc, tail, nbytes);
@@ -272,8 +424,8 @@ static int xts_encrypt(struct skcipher_request *req)
272 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 424 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
273 kernel_neon_begin(); 425 kernel_neon_begin();
274 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 426 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
275 (u8 *)ctx->key1.key_enc, rounds, blocks, 427 ctx->key1.key_enc, rounds, blocks,
276 (u8 *)ctx->key2.key_enc, walk.iv, first); 428 ctx->key2.key_enc, walk.iv, first);
277 kernel_neon_end(); 429 kernel_neon_end();
278 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 430 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
279 } 431 }
@@ -294,8 +446,8 @@ static int xts_decrypt(struct skcipher_request *req)
294 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 446 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
295 kernel_neon_begin(); 447 kernel_neon_begin();
296 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 448 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
297 (u8 *)ctx->key1.key_dec, rounds, blocks, 449 ctx->key1.key_dec, rounds, blocks,
298 (u8 *)ctx->key2.key_enc, walk.iv, first); 450 ctx->key2.key_enc, walk.iv, first);
299 kernel_neon_end(); 451 kernel_neon_end();
300 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 452 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
301 } 453 }
@@ -336,6 +488,24 @@ static struct skcipher_alg aes_algs[] = { {
336 .decrypt = cbc_decrypt, 488 .decrypt = cbc_decrypt,
337}, { 489}, {
338 .base = { 490 .base = {
491 .cra_name = "__cts(cbc(aes))",
492 .cra_driver_name = "__cts-cbc-aes-" MODE,
493 .cra_priority = PRIO,
494 .cra_flags = CRYPTO_ALG_INTERNAL,
495 .cra_blocksize = AES_BLOCK_SIZE,
496 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
497 .cra_module = THIS_MODULE,
498 },
499 .min_keysize = AES_MIN_KEY_SIZE,
500 .max_keysize = AES_MAX_KEY_SIZE,
501 .ivsize = AES_BLOCK_SIZE,
502 .walksize = 2 * AES_BLOCK_SIZE,
503 .setkey = skcipher_aes_setkey,
504 .encrypt = cts_cbc_encrypt,
505 .decrypt = cts_cbc_decrypt,
506 .init = cts_cbc_init_tfm,
507}, {
508 .base = {
339 .cra_name = "__ctr(aes)", 509 .cra_name = "__ctr(aes)",
340 .cra_driver_name = "__ctr-aes-" MODE, 510 .cra_driver_name = "__ctr-aes-" MODE,
341 .cra_priority = PRIO, 511 .cra_priority = PRIO,
@@ -412,7 +582,6 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
412{ 582{
413 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); 583 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
414 be128 *consts = (be128 *)ctx->consts; 584 be128 *consts = (be128 *)ctx->consts;
415 u8 *rk = (u8 *)ctx->key.key_enc;
416 int rounds = 6 + key_len / 4; 585 int rounds = 6 + key_len / 4;
417 int err; 586 int err;
418 587
@@ -422,7 +591,8 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
422 591
423 /* encrypt the zero vector */ 592 /* encrypt the zero vector */
424 kernel_neon_begin(); 593 kernel_neon_begin();
425 aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1); 594 aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, ctx->key.key_enc,
595 rounds, 1);
426 kernel_neon_end(); 596 kernel_neon_end();
427 597
428 cmac_gf128_mul_by_x(consts, consts); 598 cmac_gf128_mul_by_x(consts, consts);
@@ -441,7 +611,6 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
441 }; 611 };
442 612
443 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); 613 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
444 u8 *rk = (u8 *)ctx->key.key_enc;
445 int rounds = 6 + key_len / 4; 614 int rounds = 6 + key_len / 4;
446 u8 key[AES_BLOCK_SIZE]; 615 u8 key[AES_BLOCK_SIZE];
447 int err; 616 int err;
@@ -451,8 +620,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
451 return err; 620 return err;
452 621
453 kernel_neon_begin(); 622 kernel_neon_begin();
454 aes_ecb_encrypt(key, ks[0], rk, rounds, 1); 623 aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1);
455 aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2); 624 aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2);
456 kernel_neon_end(); 625 kernel_neon_end();
457 626
458 return cbcmac_setkey(tfm, key, sizeof(key)); 627 return cbcmac_setkey(tfm, key, sizeof(key));
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 483a7130cf0e..67700045a0e0 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -14,12 +14,12 @@
14 .align 4 14 .align 4
15 15
16aes_encrypt_block4x: 16aes_encrypt_block4x:
17 encrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7 17 encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
18 ret 18 ret
19ENDPROC(aes_encrypt_block4x) 19ENDPROC(aes_encrypt_block4x)
20 20
21aes_decrypt_block4x: 21aes_decrypt_block4x:
22 decrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7 22 decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
23 ret 23 ret
24ENDPROC(aes_decrypt_block4x) 24ENDPROC(aes_decrypt_block4x)
25 25
@@ -31,71 +31,57 @@ ENDPROC(aes_decrypt_block4x)
31 */ 31 */
32 32
33AES_ENTRY(aes_ecb_encrypt) 33AES_ENTRY(aes_ecb_encrypt)
34 frame_push 5 34 stp x29, x30, [sp, #-16]!
35 mov x29, sp
35 36
36 mov x19, x0 37 enc_prepare w3, x2, x5
37 mov x20, x1
38 mov x21, x2
39 mov x22, x3
40 mov x23, x4
41
42.Lecbencrestart:
43 enc_prepare w22, x21, x5
44 38
45.LecbencloopNx: 39.LecbencloopNx:
46 subs w23, w23, #4 40 subs w4, w4, #4
47 bmi .Lecbenc1x 41 bmi .Lecbenc1x
48 ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ 42 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
49 bl aes_encrypt_block4x 43 bl aes_encrypt_block4x
50 st1 {v0.16b-v3.16b}, [x19], #64 44 st1 {v0.16b-v3.16b}, [x0], #64
51 cond_yield_neon .Lecbencrestart
52 b .LecbencloopNx 45 b .LecbencloopNx
53.Lecbenc1x: 46.Lecbenc1x:
54 adds w23, w23, #4 47 adds w4, w4, #4
55 beq .Lecbencout 48 beq .Lecbencout
56.Lecbencloop: 49.Lecbencloop:
57 ld1 {v0.16b}, [x20], #16 /* get next pt block */ 50 ld1 {v0.16b}, [x1], #16 /* get next pt block */
58 encrypt_block v0, w22, x21, x5, w6 51 encrypt_block v0, w3, x2, x5, w6
59 st1 {v0.16b}, [x19], #16 52 st1 {v0.16b}, [x0], #16
60 subs w23, w23, #1 53 subs w4, w4, #1
61 bne .Lecbencloop 54 bne .Lecbencloop
62.Lecbencout: 55.Lecbencout:
63 frame_pop 56 ldp x29, x30, [sp], #16
64 ret 57 ret
65AES_ENDPROC(aes_ecb_encrypt) 58AES_ENDPROC(aes_ecb_encrypt)
66 59
67 60
68AES_ENTRY(aes_ecb_decrypt) 61AES_ENTRY(aes_ecb_decrypt)
69 frame_push 5 62 stp x29, x30, [sp, #-16]!
63 mov x29, sp
70 64
71 mov x19, x0 65 dec_prepare w3, x2, x5
72 mov x20, x1
73 mov x21, x2
74 mov x22, x3
75 mov x23, x4
76
77.Lecbdecrestart:
78 dec_prepare w22, x21, x5
79 66
80.LecbdecloopNx: 67.LecbdecloopNx:
81 subs w23, w23, #4 68 subs w4, w4, #4
82 bmi .Lecbdec1x 69 bmi .Lecbdec1x
83 ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ 70 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
84 bl aes_decrypt_block4x 71 bl aes_decrypt_block4x
85 st1 {v0.16b-v3.16b}, [x19], #64 72 st1 {v0.16b-v3.16b}, [x0], #64
86 cond_yield_neon .Lecbdecrestart
87 b .LecbdecloopNx 73 b .LecbdecloopNx
88.Lecbdec1x: 74.Lecbdec1x:
89 adds w23, w23, #4 75 adds w4, w4, #4
90 beq .Lecbdecout 76 beq .Lecbdecout
91.Lecbdecloop: 77.Lecbdecloop:
92 ld1 {v0.16b}, [x20], #16 /* get next ct block */ 78 ld1 {v0.16b}, [x1], #16 /* get next ct block */
93 decrypt_block v0, w22, x21, x5, w6 79 decrypt_block v0, w3, x2, x5, w6
94 st1 {v0.16b}, [x19], #16 80 st1 {v0.16b}, [x0], #16
95 subs w23, w23, #1 81 subs w4, w4, #1
96 bne .Lecbdecloop 82 bne .Lecbdecloop
97.Lecbdecout: 83.Lecbdecout:
98 frame_pop 84 ldp x29, x30, [sp], #16
99 ret 85 ret
100AES_ENDPROC(aes_ecb_decrypt) 86AES_ENDPROC(aes_ecb_decrypt)
101 87
@@ -108,162 +94,211 @@ AES_ENDPROC(aes_ecb_decrypt)
108 */ 94 */
109 95
110AES_ENTRY(aes_cbc_encrypt) 96AES_ENTRY(aes_cbc_encrypt)
111 frame_push 6 97 ld1 {v4.16b}, [x5] /* get iv */
112 98 enc_prepare w3, x2, x6
113 mov x19, x0
114 mov x20, x1
115 mov x21, x2
116 mov x22, x3
117 mov x23, x4
118 mov x24, x5
119
120.Lcbcencrestart:
121 ld1 {v4.16b}, [x24] /* get iv */
122 enc_prepare w22, x21, x6
123 99
124.Lcbcencloop4x: 100.Lcbcencloop4x:
125 subs w23, w23, #4 101 subs w4, w4, #4
126 bmi .Lcbcenc1x 102 bmi .Lcbcenc1x
127 ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ 103 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
128 eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */ 104 eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
129 encrypt_block v0, w22, x21, x6, w7 105 encrypt_block v0, w3, x2, x6, w7
130 eor v1.16b, v1.16b, v0.16b 106 eor v1.16b, v1.16b, v0.16b
131 encrypt_block v1, w22, x21, x6, w7 107 encrypt_block v1, w3, x2, x6, w7
132 eor v2.16b, v2.16b, v1.16b 108 eor v2.16b, v2.16b, v1.16b
133 encrypt_block v2, w22, x21, x6, w7 109 encrypt_block v2, w3, x2, x6, w7
134 eor v3.16b, v3.16b, v2.16b 110 eor v3.16b, v3.16b, v2.16b
135 encrypt_block v3, w22, x21, x6, w7 111 encrypt_block v3, w3, x2, x6, w7
136 st1 {v0.16b-v3.16b}, [x19], #64 112 st1 {v0.16b-v3.16b}, [x0], #64
137 mov v4.16b, v3.16b 113 mov v4.16b, v3.16b
138 st1 {v4.16b}, [x24] /* return iv */
139 cond_yield_neon .Lcbcencrestart
140 b .Lcbcencloop4x 114 b .Lcbcencloop4x
141.Lcbcenc1x: 115.Lcbcenc1x:
142 adds w23, w23, #4 116 adds w4, w4, #4
143 beq .Lcbcencout 117 beq .Lcbcencout
144.Lcbcencloop: 118.Lcbcencloop:
145 ld1 {v0.16b}, [x20], #16 /* get next pt block */ 119 ld1 {v0.16b}, [x1], #16 /* get next pt block */
146 eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */ 120 eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
147 encrypt_block v4, w22, x21, x6, w7 121 encrypt_block v4, w3, x2, x6, w7
148 st1 {v4.16b}, [x19], #16 122 st1 {v4.16b}, [x0], #16
149 subs w23, w23, #1 123 subs w4, w4, #1
150 bne .Lcbcencloop 124 bne .Lcbcencloop
151.Lcbcencout: 125.Lcbcencout:
152 st1 {v4.16b}, [x24] /* return iv */ 126 st1 {v4.16b}, [x5] /* return iv */
153 frame_pop
154 ret 127 ret
155AES_ENDPROC(aes_cbc_encrypt) 128AES_ENDPROC(aes_cbc_encrypt)
156 129
157 130
158AES_ENTRY(aes_cbc_decrypt) 131AES_ENTRY(aes_cbc_decrypt)
159 frame_push 6 132 stp x29, x30, [sp, #-16]!
160 133 mov x29, sp
161 mov x19, x0
162 mov x20, x1
163 mov x21, x2
164 mov x22, x3
165 mov x23, x4
166 mov x24, x5
167 134
168.Lcbcdecrestart: 135 ld1 {v7.16b}, [x5] /* get iv */
169 ld1 {v7.16b}, [x24] /* get iv */ 136 dec_prepare w3, x2, x6
170 dec_prepare w22, x21, x6
171 137
172.LcbcdecloopNx: 138.LcbcdecloopNx:
173 subs w23, w23, #4 139 subs w4, w4, #4
174 bmi .Lcbcdec1x 140 bmi .Lcbcdec1x
175 ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ 141 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
176 mov v4.16b, v0.16b 142 mov v4.16b, v0.16b
177 mov v5.16b, v1.16b 143 mov v5.16b, v1.16b
178 mov v6.16b, v2.16b 144 mov v6.16b, v2.16b
179 bl aes_decrypt_block4x 145 bl aes_decrypt_block4x
180 sub x20, x20, #16 146 sub x1, x1, #16
181 eor v0.16b, v0.16b, v7.16b 147 eor v0.16b, v0.16b, v7.16b
182 eor v1.16b, v1.16b, v4.16b 148 eor v1.16b, v1.16b, v4.16b
183 ld1 {v7.16b}, [x20], #16 /* reload 1 ct block */ 149 ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */
184 eor v2.16b, v2.16b, v5.16b 150 eor v2.16b, v2.16b, v5.16b
185 eor v3.16b, v3.16b, v6.16b 151 eor v3.16b, v3.16b, v6.16b
186 st1 {v0.16b-v3.16b}, [x19], #64 152 st1 {v0.16b-v3.16b}, [x0], #64
187 st1 {v7.16b}, [x24] /* return iv */
188 cond_yield_neon .Lcbcdecrestart
189 b .LcbcdecloopNx 153 b .LcbcdecloopNx
190.Lcbcdec1x: 154.Lcbcdec1x:
191 adds w23, w23, #4 155 adds w4, w4, #4
192 beq .Lcbcdecout 156 beq .Lcbcdecout
193.Lcbcdecloop: 157.Lcbcdecloop:
194 ld1 {v1.16b}, [x20], #16 /* get next ct block */ 158 ld1 {v1.16b}, [x1], #16 /* get next ct block */
195 mov v0.16b, v1.16b /* ...and copy to v0 */ 159 mov v0.16b, v1.16b /* ...and copy to v0 */
196 decrypt_block v0, w22, x21, x6, w7 160 decrypt_block v0, w3, x2, x6, w7
197 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ 161 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
198 mov v7.16b, v1.16b /* ct is next iv */ 162 mov v7.16b, v1.16b /* ct is next iv */
199 st1 {v0.16b}, [x19], #16 163 st1 {v0.16b}, [x0], #16
200 subs w23, w23, #1 164 subs w4, w4, #1
201 bne .Lcbcdecloop 165 bne .Lcbcdecloop
202.Lcbcdecout: 166.Lcbcdecout:
203 st1 {v7.16b}, [x24] /* return iv */ 167 st1 {v7.16b}, [x5] /* return iv */
204 frame_pop 168 ldp x29, x30, [sp], #16
205 ret 169 ret
206AES_ENDPROC(aes_cbc_decrypt) 170AES_ENDPROC(aes_cbc_decrypt)
207 171
208 172
209 /* 173 /*
174 * aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
175 * int rounds, int bytes, u8 const iv[])
176 * aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
177 * int rounds, int bytes, u8 const iv[])
178 */
179
180AES_ENTRY(aes_cbc_cts_encrypt)
181 adr_l x8, .Lcts_permute_table
182 sub x4, x4, #16
183 add x9, x8, #32
184 add x8, x8, x4
185 sub x9, x9, x4
186 ld1 {v3.16b}, [x8]
187 ld1 {v4.16b}, [x9]
188
189 ld1 {v0.16b}, [x1], x4 /* overlapping loads */
190 ld1 {v1.16b}, [x1]
191
192 ld1 {v5.16b}, [x5] /* get iv */
193 enc_prepare w3, x2, x6
194
195 eor v0.16b, v0.16b, v5.16b /* xor with iv */
196 tbl v1.16b, {v1.16b}, v4.16b
197 encrypt_block v0, w3, x2, x6, w7
198
199 eor v1.16b, v1.16b, v0.16b
200 tbl v0.16b, {v0.16b}, v3.16b
201 encrypt_block v1, w3, x2, x6, w7
202
203 add x4, x0, x4
204 st1 {v0.16b}, [x4] /* overlapping stores */
205 st1 {v1.16b}, [x0]
206 ret
207AES_ENDPROC(aes_cbc_cts_encrypt)
208
209AES_ENTRY(aes_cbc_cts_decrypt)
210 adr_l x8, .Lcts_permute_table
211 sub x4, x4, #16
212 add x9, x8, #32
213 add x8, x8, x4
214 sub x9, x9, x4
215 ld1 {v3.16b}, [x8]
216 ld1 {v4.16b}, [x9]
217
218 ld1 {v0.16b}, [x1], x4 /* overlapping loads */
219 ld1 {v1.16b}, [x1]
220
221 ld1 {v5.16b}, [x5] /* get iv */
222 dec_prepare w3, x2, x6
223
224 tbl v2.16b, {v1.16b}, v4.16b
225 decrypt_block v0, w3, x2, x6, w7
226 eor v2.16b, v2.16b, v0.16b
227
228 tbx v0.16b, {v1.16b}, v4.16b
229 tbl v2.16b, {v2.16b}, v3.16b
230 decrypt_block v0, w3, x2, x6, w7
231 eor v0.16b, v0.16b, v5.16b /* xor with iv */
232
233 add x4, x0, x4
234 st1 {v2.16b}, [x4] /* overlapping stores */
235 st1 {v0.16b}, [x0]
236 ret
237AES_ENDPROC(aes_cbc_cts_decrypt)
238
239 .section ".rodata", "a"
240 .align 6
241.Lcts_permute_table:
242 .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
243 .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
244 .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
245 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
246 .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
247 .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
248 .previous
249
250
251 /*
210 * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 252 * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
211 * int blocks, u8 ctr[]) 253 * int blocks, u8 ctr[])
212 */ 254 */
213 255
214AES_ENTRY(aes_ctr_encrypt) 256AES_ENTRY(aes_ctr_encrypt)
215 frame_push 6 257 stp x29, x30, [sp, #-16]!
258 mov x29, sp
216 259
217 mov x19, x0 260 enc_prepare w3, x2, x6
218 mov x20, x1 261 ld1 {v4.16b}, [x5]
219 mov x21, x2
220 mov x22, x3
221 mov x23, x4
222 mov x24, x5
223
224.Lctrrestart:
225 enc_prepare w22, x21, x6
226 ld1 {v4.16b}, [x24]
227 262
228 umov x6, v4.d[1] /* keep swabbed ctr in reg */ 263 umov x6, v4.d[1] /* keep swabbed ctr in reg */
229 rev x6, x6 264 rev x6, x6
265 cmn w6, w4 /* 32 bit overflow? */
266 bcs .Lctrloop
230.LctrloopNx: 267.LctrloopNx:
231 subs w23, w23, #4 268 subs w4, w4, #4
232 bmi .Lctr1x 269 bmi .Lctr1x
233 cmn w6, #4 /* 32 bit overflow? */ 270 add w7, w6, #1
234 bcs .Lctr1x
235 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
236 dup v7.4s, w6
237 mov v0.16b, v4.16b 271 mov v0.16b, v4.16b
238 add v7.4s, v7.4s, v8.4s 272 add w8, w6, #2
239 mov v1.16b, v4.16b 273 mov v1.16b, v4.16b
240 rev32 v8.16b, v7.16b 274 add w9, w6, #3
241 mov v2.16b, v4.16b 275 mov v2.16b, v4.16b
276 rev w7, w7
242 mov v3.16b, v4.16b 277 mov v3.16b, v4.16b
243 mov v1.s[3], v8.s[0] 278 rev w8, w8
244 mov v2.s[3], v8.s[1] 279 mov v1.s[3], w7
245 mov v3.s[3], v8.s[2] 280 rev w9, w9
246 ld1 {v5.16b-v7.16b}, [x20], #48 /* get 3 input blocks */ 281 mov v2.s[3], w8
282 mov v3.s[3], w9
283 ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
247 bl aes_encrypt_block4x 284 bl aes_encrypt_block4x
248 eor v0.16b, v5.16b, v0.16b 285 eor v0.16b, v5.16b, v0.16b
249 ld1 {v5.16b}, [x20], #16 /* get 1 input block */ 286 ld1 {v5.16b}, [x1], #16 /* get 1 input block */
250 eor v1.16b, v6.16b, v1.16b 287 eor v1.16b, v6.16b, v1.16b
251 eor v2.16b, v7.16b, v2.16b 288 eor v2.16b, v7.16b, v2.16b
252 eor v3.16b, v5.16b, v3.16b 289 eor v3.16b, v5.16b, v3.16b
253 st1 {v0.16b-v3.16b}, [x19], #64 290 st1 {v0.16b-v3.16b}, [x0], #64
254 add x6, x6, #4 291 add x6, x6, #4
255 rev x7, x6 292 rev x7, x6
256 ins v4.d[1], x7 293 ins v4.d[1], x7
257 cbz w23, .Lctrout 294 cbz w4, .Lctrout
258 st1 {v4.16b}, [x24] /* return next CTR value */
259 cond_yield_neon .Lctrrestart
260 b .LctrloopNx 295 b .LctrloopNx
261.Lctr1x: 296.Lctr1x:
262 adds w23, w23, #4 297 adds w4, w4, #4
263 beq .Lctrout 298 beq .Lctrout
264.Lctrloop: 299.Lctrloop:
265 mov v0.16b, v4.16b 300 mov v0.16b, v4.16b
266 encrypt_block v0, w22, x21, x8, w7 301 encrypt_block v0, w3, x2, x8, w7
267 302
268 adds x6, x6, #1 /* increment BE ctr */ 303 adds x6, x6, #1 /* increment BE ctr */
269 rev x7, x6 304 rev x7, x6
@@ -271,22 +306,22 @@ AES_ENTRY(aes_ctr_encrypt)
271 bcs .Lctrcarry /* overflow? */ 306 bcs .Lctrcarry /* overflow? */
272 307
273.Lctrcarrydone: 308.Lctrcarrydone:
274 subs w23, w23, #1 309 subs w4, w4, #1
275 bmi .Lctrtailblock /* blocks <0 means tail block */ 310 bmi .Lctrtailblock /* blocks <0 means tail block */
276 ld1 {v3.16b}, [x20], #16 311 ld1 {v3.16b}, [x1], #16
277 eor v3.16b, v0.16b, v3.16b 312 eor v3.16b, v0.16b, v3.16b
278 st1 {v3.16b}, [x19], #16 313 st1 {v3.16b}, [x0], #16
279 bne .Lctrloop 314 bne .Lctrloop
280 315
281.Lctrout: 316.Lctrout:
282 st1 {v4.16b}, [x24] /* return next CTR value */ 317 st1 {v4.16b}, [x5] /* return next CTR value */
283.Lctrret: 318 ldp x29, x30, [sp], #16
284 frame_pop
285 ret 319 ret
286 320
287.Lctrtailblock: 321.Lctrtailblock:
288 st1 {v0.16b}, [x19] 322 st1 {v0.16b}, [x0]
289 b .Lctrret 323 ldp x29, x30, [sp], #16
324 ret
290 325
291.Lctrcarry: 326.Lctrcarry:
292 umov x7, v4.d[0] /* load upper word of ctr */ 327 umov x7, v4.d[0] /* load upper word of ctr */
@@ -296,7 +331,6 @@ AES_ENTRY(aes_ctr_encrypt)
296 ins v4.d[0], x7 331 ins v4.d[0], x7
297 b .Lctrcarrydone 332 b .Lctrcarrydone
298AES_ENDPROC(aes_ctr_encrypt) 333AES_ENDPROC(aes_ctr_encrypt)
299 .ltorg
300 334
301 335
302 /* 336 /*
@@ -306,150 +340,132 @@ AES_ENDPROC(aes_ctr_encrypt)
306 * int blocks, u8 const rk2[], u8 iv[], int first) 340 * int blocks, u8 const rk2[], u8 iv[], int first)
307 */ 341 */
308 342
309 .macro next_tweak, out, in, const, tmp 343 .macro next_tweak, out, in, tmp
310 sshr \tmp\().2d, \in\().2d, #63 344 sshr \tmp\().2d, \in\().2d, #63
311 and \tmp\().16b, \tmp\().16b, \const\().16b 345 and \tmp\().16b, \tmp\().16b, xtsmask.16b
312 add \out\().2d, \in\().2d, \in\().2d 346 add \out\().2d, \in\().2d, \in\().2d
313 ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 347 ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
314 eor \out\().16b, \out\().16b, \tmp\().16b 348 eor \out\().16b, \out\().16b, \tmp\().16b
315 .endm 349 .endm
316 350
317.Lxts_mul_x: 351 .macro xts_load_mask, tmp
318CPU_LE( .quad 1, 0x87 ) 352 movi xtsmask.2s, #0x1
319CPU_BE( .quad 0x87, 1 ) 353 movi \tmp\().2s, #0x87
354 uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
355 .endm
320 356
321AES_ENTRY(aes_xts_encrypt) 357AES_ENTRY(aes_xts_encrypt)
322 frame_push 6 358 stp x29, x30, [sp, #-16]!
359 mov x29, sp
323 360
324 mov x19, x0 361 ld1 {v4.16b}, [x6]
325 mov x20, x1 362 xts_load_mask v8
326 mov x21, x2
327 mov x22, x3
328 mov x23, x4
329 mov x24, x6
330
331 ld1 {v4.16b}, [x24]
332 cbz w7, .Lxtsencnotfirst 363 cbz w7, .Lxtsencnotfirst
333 364
334 enc_prepare w3, x5, x8 365 enc_prepare w3, x5, x8
335 encrypt_block v4, w3, x5, x8, w7 /* first tweak */ 366 encrypt_block v4, w3, x5, x8, w7 /* first tweak */
336 enc_switch_key w3, x2, x8 367 enc_switch_key w3, x2, x8
337 ldr q7, .Lxts_mul_x
338 b .LxtsencNx 368 b .LxtsencNx
339 369
340.Lxtsencrestart:
341 ld1 {v4.16b}, [x24]
342.Lxtsencnotfirst: 370.Lxtsencnotfirst:
343 enc_prepare w22, x21, x8 371 enc_prepare w3, x2, x8
344.LxtsencloopNx: 372.LxtsencloopNx:
345 ldr q7, .Lxts_mul_x 373 next_tweak v4, v4, v8
346 next_tweak v4, v4, v7, v8
347.LxtsencNx: 374.LxtsencNx:
348 subs w23, w23, #4 375 subs w4, w4, #4
349 bmi .Lxtsenc1x 376 bmi .Lxtsenc1x
350 ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ 377 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
351 next_tweak v5, v4, v7, v8 378 next_tweak v5, v4, v8
352 eor v0.16b, v0.16b, v4.16b 379 eor v0.16b, v0.16b, v4.16b
353 next_tweak v6, v5, v7, v8 380 next_tweak v6, v5, v8
354 eor v1.16b, v1.16b, v5.16b 381 eor v1.16b, v1.16b, v5.16b
355 eor v2.16b, v2.16b, v6.16b 382 eor v2.16b, v2.16b, v6.16b
356 next_tweak v7, v6, v7, v8 383 next_tweak v7, v6, v8
357 eor v3.16b, v3.16b, v7.16b 384 eor v3.16b, v3.16b, v7.16b
358 bl aes_encrypt_block4x 385 bl aes_encrypt_block4x
359 eor v3.16b, v3.16b, v7.16b 386 eor v3.16b, v3.16b, v7.16b
360 eor v0.16b, v0.16b, v4.16b 387 eor v0.16b, v0.16b, v4.16b
361 eor v1.16b, v1.16b, v5.16b 388 eor v1.16b, v1.16b, v5.16b
362 eor v2.16b, v2.16b, v6.16b 389 eor v2.16b, v2.16b, v6.16b
363 st1 {v0.16b-v3.16b}, [x19], #64 390 st1 {v0.16b-v3.16b}, [x0], #64
364 mov v4.16b, v7.16b 391 mov v4.16b, v7.16b
365 cbz w23, .Lxtsencout 392 cbz w4, .Lxtsencout
366 st1 {v4.16b}, [x24] 393 xts_reload_mask v8
367 cond_yield_neon .Lxtsencrestart
368 b .LxtsencloopNx 394 b .LxtsencloopNx
369.Lxtsenc1x: 395.Lxtsenc1x:
370 adds w23, w23, #4 396 adds w4, w4, #4
371 beq .Lxtsencout 397 beq .Lxtsencout
372.Lxtsencloop: 398.Lxtsencloop:
373 ld1 {v1.16b}, [x20], #16 399 ld1 {v1.16b}, [x1], #16
374 eor v0.16b, v1.16b, v4.16b 400 eor v0.16b, v1.16b, v4.16b
375 encrypt_block v0, w22, x21, x8, w7 401 encrypt_block v0, w3, x2, x8, w7
376 eor v0.16b, v0.16b, v4.16b 402 eor v0.16b, v0.16b, v4.16b
377 st1 {v0.16b}, [x19], #16 403 st1 {v0.16b}, [x0], #16
378 subs w23, w23, #1 404 subs w4, w4, #1
379 beq .Lxtsencout 405 beq .Lxtsencout
380 next_tweak v4, v4, v7, v8 406 next_tweak v4, v4, v8
381 b .Lxtsencloop 407 b .Lxtsencloop
382.Lxtsencout: 408.Lxtsencout:
383 st1 {v4.16b}, [x24] 409 st1 {v4.16b}, [x6]
384 frame_pop 410 ldp x29, x30, [sp], #16
385 ret 411 ret
386AES_ENDPROC(aes_xts_encrypt) 412AES_ENDPROC(aes_xts_encrypt)
387 413
388 414
389AES_ENTRY(aes_xts_decrypt) 415AES_ENTRY(aes_xts_decrypt)
390 frame_push 6 416 stp x29, x30, [sp, #-16]!
417 mov x29, sp
391 418
392 mov x19, x0 419 ld1 {v4.16b}, [x6]
393 mov x20, x1 420 xts_load_mask v8
394 mov x21, x2
395 mov x22, x3
396 mov x23, x4
397 mov x24, x6
398
399 ld1 {v4.16b}, [x24]
400 cbz w7, .Lxtsdecnotfirst 421 cbz w7, .Lxtsdecnotfirst
401 422
402 enc_prepare w3, x5, x8 423 enc_prepare w3, x5, x8
403 encrypt_block v4, w3, x5, x8, w7 /* first tweak */ 424 encrypt_block v4, w3, x5, x8, w7 /* first tweak */
404 dec_prepare w3, x2, x8 425 dec_prepare w3, x2, x8
405 ldr q7, .Lxts_mul_x
406 b .LxtsdecNx 426 b .LxtsdecNx
407 427
408.Lxtsdecrestart:
409 ld1 {v4.16b}, [x24]
410.Lxtsdecnotfirst: 428.Lxtsdecnotfirst:
411 dec_prepare w22, x21, x8 429 dec_prepare w3, x2, x8
412.LxtsdecloopNx: 430.LxtsdecloopNx:
413 ldr q7, .Lxts_mul_x 431 next_tweak v4, v4, v8
414 next_tweak v4, v4, v7, v8
415.LxtsdecNx: 432.LxtsdecNx:
416 subs w23, w23, #4 433 subs w4, w4, #4
417 bmi .Lxtsdec1x 434 bmi .Lxtsdec1x
418 ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ 435 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
419 next_tweak v5, v4, v7, v8 436 next_tweak v5, v4, v8
420 eor v0.16b, v0.16b, v4.16b 437 eor v0.16b, v0.16b, v4.16b
421 next_tweak v6, v5, v7, v8 438 next_tweak v6, v5, v8
422 eor v1.16b, v1.16b, v5.16b 439 eor v1.16b, v1.16b, v5.16b
423 eor v2.16b, v2.16b, v6.16b 440 eor v2.16b, v2.16b, v6.16b
424 next_tweak v7, v6, v7, v8 441 next_tweak v7, v6, v8
425 eor v3.16b, v3.16b, v7.16b 442 eor v3.16b, v3.16b, v7.16b
426 bl aes_decrypt_block4x 443 bl aes_decrypt_block4x
427 eor v3.16b, v3.16b, v7.16b 444 eor v3.16b, v3.16b, v7.16b
428 eor v0.16b, v0.16b, v4.16b 445 eor v0.16b, v0.16b, v4.16b
429 eor v1.16b, v1.16b, v5.16b 446 eor v1.16b, v1.16b, v5.16b
430 eor v2.16b, v2.16b, v6.16b 447 eor v2.16b, v2.16b, v6.16b
431 st1 {v0.16b-v3.16b}, [x19], #64 448 st1 {v0.16b-v3.16b}, [x0], #64
432 mov v4.16b, v7.16b 449 mov v4.16b, v7.16b
433 cbz w23, .Lxtsdecout 450 cbz w4, .Lxtsdecout
434 st1 {v4.16b}, [x24] 451 xts_reload_mask v8
435 cond_yield_neon .Lxtsdecrestart
436 b .LxtsdecloopNx 452 b .LxtsdecloopNx
437.Lxtsdec1x: 453.Lxtsdec1x:
438 adds w23, w23, #4 454 adds w4, w4, #4
439 beq .Lxtsdecout 455 beq .Lxtsdecout
440.Lxtsdecloop: 456.Lxtsdecloop:
441 ld1 {v1.16b}, [x20], #16 457 ld1 {v1.16b}, [x1], #16
442 eor v0.16b, v1.16b, v4.16b 458 eor v0.16b, v1.16b, v4.16b
443 decrypt_block v0, w22, x21, x8, w7 459 decrypt_block v0, w3, x2, x8, w7
444 eor v0.16b, v0.16b, v4.16b 460 eor v0.16b, v0.16b, v4.16b
445 st1 {v0.16b}, [x19], #16 461 st1 {v0.16b}, [x0], #16
446 subs w23, w23, #1 462 subs w4, w4, #1
447 beq .Lxtsdecout 463 beq .Lxtsdecout
448 next_tweak v4, v4, v7, v8 464 next_tweak v4, v4, v8
449 b .Lxtsdecloop 465 b .Lxtsdecloop
450.Lxtsdecout: 466.Lxtsdecout:
451 st1 {v4.16b}, [x24] 467 st1 {v4.16b}, [x6]
452 frame_pop 468 ldp x29, x30, [sp], #16
453 ret 469 ret
454AES_ENDPROC(aes_xts_decrypt) 470AES_ENDPROC(aes_xts_decrypt)
455 471
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index 1c7b45b7268e..29100f692e8a 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -14,6 +14,12 @@
14#define AES_ENTRY(func) ENTRY(neon_ ## func) 14#define AES_ENTRY(func) ENTRY(neon_ ## func)
15#define AES_ENDPROC(func) ENDPROC(neon_ ## func) 15#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
16 16
17 xtsmask .req v7
18
19 .macro xts_reload_mask, tmp
20 xts_load_mask \tmp
21 .endm
22
17 /* multiply by polynomial 'x' in GF(2^8) */ 23 /* multiply by polynomial 'x' in GF(2^8) */
18 .macro mul_by_x, out, in, temp, const 24 .macro mul_by_x, out, in, temp, const
19 sshr \temp, \in, #7 25 sshr \temp, \in, #7
diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S
deleted file mode 100644
index 8061bf0f9c66..000000000000
--- a/arch/arm64/crypto/crc32-ce-core.S
+++ /dev/null
@@ -1,287 +0,0 @@
1/*
2 * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions
3 *
4 * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/* GPL HEADER START
12 *
13 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 only,
17 * as published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License version 2 for more details (a copy is included
23 * in the LICENSE file that accompanied this code).
24 *
25 * You should have received a copy of the GNU General Public License
26 * version 2 along with this program; If not, see http://www.gnu.org/licenses
27 *
28 * Please visit http://www.xyratex.com/contact if you need additional
29 * information or have any questions.
30 *
31 * GPL HEADER END
32 */
33
34/*
35 * Copyright 2012 Xyratex Technology Limited
36 *
37 * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
38 * calculation.
39 * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
40 * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
41 * at:
42 * http://www.intel.com/products/processor/manuals/
43 * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
44 * Volume 2B: Instruction Set Reference, N-Z
45 *
46 * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
47 * Alexander Boyko <Alexander_Boyko@xyratex.com>
48 */
49
50#include <linux/linkage.h>
51#include <asm/assembler.h>
52
53 .section ".rodata", "a"
54 .align 6
55 .cpu generic+crypto+crc
56
57.Lcrc32_constants:
58 /*
59 * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
60 * #define CONSTANT_R1 0x154442bd4LL
61 *
62 * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
63 * #define CONSTANT_R2 0x1c6e41596LL
64 */
65 .octa 0x00000001c6e415960000000154442bd4
66
67 /*
68 * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
69 * #define CONSTANT_R3 0x1751997d0LL
70 *
71 * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
72 * #define CONSTANT_R4 0x0ccaa009eLL
73 */
74 .octa 0x00000000ccaa009e00000001751997d0
75
76 /*
77 * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
78 * #define CONSTANT_R5 0x163cd6124LL
79 */
80 .quad 0x0000000163cd6124
81 .quad 0x00000000FFFFFFFF
82
83 /*
84 * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
85 *
86 * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
87 * = 0x1F7011641LL
88 * #define CONSTANT_RU 0x1F7011641LL
89 */
90 .octa 0x00000001F701164100000001DB710641
91
92.Lcrc32c_constants:
93 .octa 0x000000009e4addf800000000740eef02
94 .octa 0x000000014cd00bd600000000f20c0dfe
95 .quad 0x00000000dd45aab8
96 .quad 0x00000000FFFFFFFF
97 .octa 0x00000000dea713f10000000105ec76f0
98
99 vCONSTANT .req v0
100 dCONSTANT .req d0
101 qCONSTANT .req q0
102
103 BUF .req x19
104 LEN .req x20
105 CRC .req x21
106 CONST .req x22
107
108 vzr .req v9
109
110 /**
111 * Calculate crc32
112 * BUF - buffer
113 * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
114 * CRC - initial crc32
115 * return %eax crc32
116 * uint crc32_pmull_le(unsigned char const *buffer,
117 * size_t len, uint crc32)
118 */
119 .text
120ENTRY(crc32_pmull_le)
121 adr_l x3, .Lcrc32_constants
122 b 0f
123
124ENTRY(crc32c_pmull_le)
125 adr_l x3, .Lcrc32c_constants
126
1270: frame_push 4, 64
128
129 mov BUF, x0
130 mov LEN, x1
131 mov CRC, x2
132 mov CONST, x3
133
134 bic LEN, LEN, #15
135 ld1 {v1.16b-v4.16b}, [BUF], #0x40
136 movi vzr.16b, #0
137 fmov dCONSTANT, CRC
138 eor v1.16b, v1.16b, vCONSTANT.16b
139 sub LEN, LEN, #0x40
140 cmp LEN, #0x40
141 b.lt less_64
142
143 ldr qCONSTANT, [CONST]
144
145loop_64: /* 64 bytes Full cache line folding */
146 sub LEN, LEN, #0x40
147
148 pmull2 v5.1q, v1.2d, vCONSTANT.2d
149 pmull2 v6.1q, v2.2d, vCONSTANT.2d
150 pmull2 v7.1q, v3.2d, vCONSTANT.2d
151 pmull2 v8.1q, v4.2d, vCONSTANT.2d
152
153 pmull v1.1q, v1.1d, vCONSTANT.1d
154 pmull v2.1q, v2.1d, vCONSTANT.1d
155 pmull v3.1q, v3.1d, vCONSTANT.1d
156 pmull v4.1q, v4.1d, vCONSTANT.1d
157
158 eor v1.16b, v1.16b, v5.16b
159 ld1 {v5.16b}, [BUF], #0x10
160 eor v2.16b, v2.16b, v6.16b
161 ld1 {v6.16b}, [BUF], #0x10
162 eor v3.16b, v3.16b, v7.16b
163 ld1 {v7.16b}, [BUF], #0x10
164 eor v4.16b, v4.16b, v8.16b
165 ld1 {v8.16b}, [BUF], #0x10
166
167 eor v1.16b, v1.16b, v5.16b
168 eor v2.16b, v2.16b, v6.16b
169 eor v3.16b, v3.16b, v7.16b
170 eor v4.16b, v4.16b, v8.16b
171
172 cmp LEN, #0x40
173 b.lt less_64
174
175 if_will_cond_yield_neon
176 stp q1, q2, [sp, #.Lframe_local_offset]
177 stp q3, q4, [sp, #.Lframe_local_offset + 32]
178 do_cond_yield_neon
179 ldp q1, q2, [sp, #.Lframe_local_offset]
180 ldp q3, q4, [sp, #.Lframe_local_offset + 32]
181 ldr qCONSTANT, [CONST]
182 movi vzr.16b, #0
183 endif_yield_neon
184 b loop_64
185
186less_64: /* Folding cache line into 128bit */
187 ldr qCONSTANT, [CONST, #16]
188
189 pmull2 v5.1q, v1.2d, vCONSTANT.2d
190 pmull v1.1q, v1.1d, vCONSTANT.1d
191 eor v1.16b, v1.16b, v5.16b
192 eor v1.16b, v1.16b, v2.16b
193
194 pmull2 v5.1q, v1.2d, vCONSTANT.2d
195 pmull v1.1q, v1.1d, vCONSTANT.1d
196 eor v1.16b, v1.16b, v5.16b
197 eor v1.16b, v1.16b, v3.16b
198
199 pmull2 v5.1q, v1.2d, vCONSTANT.2d
200 pmull v1.1q, v1.1d, vCONSTANT.1d
201 eor v1.16b, v1.16b, v5.16b
202 eor v1.16b, v1.16b, v4.16b
203
204 cbz LEN, fold_64
205
206loop_16: /* Folding rest buffer into 128bit */
207 subs LEN, LEN, #0x10
208
209 ld1 {v2.16b}, [BUF], #0x10
210 pmull2 v5.1q, v1.2d, vCONSTANT.2d
211 pmull v1.1q, v1.1d, vCONSTANT.1d
212 eor v1.16b, v1.16b, v5.16b
213 eor v1.16b, v1.16b, v2.16b
214
215 b.ne loop_16
216
217fold_64:
218 /* perform the last 64 bit fold, also adds 32 zeroes
219 * to the input stream */
220 ext v2.16b, v1.16b, v1.16b, #8
221 pmull2 v2.1q, v2.2d, vCONSTANT.2d
222 ext v1.16b, v1.16b, vzr.16b, #8
223 eor v1.16b, v1.16b, v2.16b
224
225 /* final 32-bit fold */
226 ldr dCONSTANT, [CONST, #32]
227 ldr d3, [CONST, #40]
228
229 ext v2.16b, v1.16b, vzr.16b, #4
230 and v1.16b, v1.16b, v3.16b
231 pmull v1.1q, v1.1d, vCONSTANT.1d
232 eor v1.16b, v1.16b, v2.16b
233
234 /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
235 ldr qCONSTANT, [CONST, #48]
236
237 and v2.16b, v1.16b, v3.16b
238 ext v2.16b, vzr.16b, v2.16b, #8
239 pmull2 v2.1q, v2.2d, vCONSTANT.2d
240 and v2.16b, v2.16b, v3.16b
241 pmull v2.1q, v2.1d, vCONSTANT.1d
242 eor v1.16b, v1.16b, v2.16b
243 mov w0, v1.s[1]
244
245 frame_pop
246 ret
247ENDPROC(crc32_pmull_le)
248ENDPROC(crc32c_pmull_le)
249
250 .macro __crc32, c
2510: subs x2, x2, #16
252 b.mi 8f
253 ldp x3, x4, [x1], #16
254CPU_BE( rev x3, x3 )
255CPU_BE( rev x4, x4 )
256 crc32\c\()x w0, w0, x3
257 crc32\c\()x w0, w0, x4
258 b.ne 0b
259 ret
260
2618: tbz x2, #3, 4f
262 ldr x3, [x1], #8
263CPU_BE( rev x3, x3 )
264 crc32\c\()x w0, w0, x3
2654: tbz x2, #2, 2f
266 ldr w3, [x1], #4
267CPU_BE( rev w3, w3 )
268 crc32\c\()w w0, w0, w3
2692: tbz x2, #1, 1f
270 ldrh w3, [x1], #2
271CPU_BE( rev16 w3, w3 )
272 crc32\c\()h w0, w0, w3
2731: tbz x2, #0, 0f
274 ldrb w3, [x1]
275 crc32\c\()b w0, w0, w3
2760: ret
277 .endm
278
279 .align 5
280ENTRY(crc32_armv8_le)
281 __crc32
282ENDPROC(crc32_armv8_le)
283
284 .align 5
285ENTRY(crc32c_armv8_le)
286 __crc32 c
287ENDPROC(crc32c_armv8_le)
diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
deleted file mode 100644
index 34b4e3d46aab..000000000000
--- a/arch/arm64/crypto/crc32-ce-glue.c
+++ /dev/null
@@ -1,244 +0,0 @@
1/*
2 * Accelerated CRC32(C) using arm64 NEON and Crypto Extensions instructions
3 *
4 * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/cpufeature.h>
12#include <linux/crc32.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/string.h>
17
18#include <crypto/internal/hash.h>
19
20#include <asm/hwcap.h>
21#include <asm/neon.h>
22#include <asm/simd.h>
23#include <asm/unaligned.h>
24
25#define PMULL_MIN_LEN 64L /* minimum size of buffer
26 * for crc32_pmull_le_16 */
27#define SCALE_F 16L /* size of NEON register */
28
29asmlinkage u32 crc32_pmull_le(const u8 buf[], u64 len, u32 init_crc);
30asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], size_t len);
31
32asmlinkage u32 crc32c_pmull_le(const u8 buf[], u64 len, u32 init_crc);
33asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], size_t len);
34
35static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], size_t len);
36static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], size_t len);
37
38static int crc32_pmull_cra_init(struct crypto_tfm *tfm)
39{
40 u32 *key = crypto_tfm_ctx(tfm);
41
42 *key = 0;
43 return 0;
44}
45
46static int crc32c_pmull_cra_init(struct crypto_tfm *tfm)
47{
48 u32 *key = crypto_tfm_ctx(tfm);
49
50 *key = ~0;
51 return 0;
52}
53
54static int crc32_pmull_setkey(struct crypto_shash *hash, const u8 *key,
55 unsigned int keylen)
56{
57 u32 *mctx = crypto_shash_ctx(hash);
58
59 if (keylen != sizeof(u32)) {
60 crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
61 return -EINVAL;
62 }
63 *mctx = le32_to_cpup((__le32 *)key);
64 return 0;
65}
66
67static int crc32_pmull_init(struct shash_desc *desc)
68{
69 u32 *mctx = crypto_shash_ctx(desc->tfm);
70 u32 *crc = shash_desc_ctx(desc);
71
72 *crc = *mctx;
73 return 0;
74}
75
76static int crc32_update(struct shash_desc *desc, const u8 *data,
77 unsigned int length)
78{
79 u32 *crc = shash_desc_ctx(desc);
80
81 *crc = crc32_armv8_le(*crc, data, length);
82 return 0;
83}
84
85static int crc32c_update(struct shash_desc *desc, const u8 *data,
86 unsigned int length)
87{
88 u32 *crc = shash_desc_ctx(desc);
89
90 *crc = crc32c_armv8_le(*crc, data, length);
91 return 0;
92}
93
94static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
95 unsigned int length)
96{
97 u32 *crc = shash_desc_ctx(desc);
98 unsigned int l;
99
100 if ((u64)data % SCALE_F) {
101 l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F));
102
103 *crc = fallback_crc32(*crc, data, l);
104
105 data += l;
106 length -= l;
107 }
108
109 if (length >= PMULL_MIN_LEN && may_use_simd()) {
110 l = round_down(length, SCALE_F);
111
112 kernel_neon_begin();
113 *crc = crc32_pmull_le(data, l, *crc);
114 kernel_neon_end();
115
116 data += l;
117 length -= l;
118 }
119
120 if (length > 0)
121 *crc = fallback_crc32(*crc, data, length);
122
123 return 0;
124}
125
126static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
127 unsigned int length)
128{
129 u32 *crc = shash_desc_ctx(desc);
130 unsigned int l;
131
132 if ((u64)data % SCALE_F) {
133 l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F));
134
135 *crc = fallback_crc32c(*crc, data, l);
136
137 data += l;
138 length -= l;
139 }
140
141 if (length >= PMULL_MIN_LEN && may_use_simd()) {
142 l = round_down(length, SCALE_F);
143
144 kernel_neon_begin();
145 *crc = crc32c_pmull_le(data, l, *crc);
146 kernel_neon_end();
147
148 data += l;
149 length -= l;
150 }
151
152 if (length > 0) {
153 *crc = fallback_crc32c(*crc, data, length);
154 }
155
156 return 0;
157}
158
159static int crc32_pmull_final(struct shash_desc *desc, u8 *out)
160{
161 u32 *crc = shash_desc_ctx(desc);
162
163 put_unaligned_le32(*crc, out);
164 return 0;
165}
166
167static int crc32c_pmull_final(struct shash_desc *desc, u8 *out)
168{
169 u32 *crc = shash_desc_ctx(desc);
170
171 put_unaligned_le32(~*crc, out);
172 return 0;
173}
174
175static struct shash_alg crc32_pmull_algs[] = { {
176 .setkey = crc32_pmull_setkey,
177 .init = crc32_pmull_init,
178 .update = crc32_update,
179 .final = crc32_pmull_final,
180 .descsize = sizeof(u32),
181 .digestsize = sizeof(u32),
182
183 .base.cra_ctxsize = sizeof(u32),
184 .base.cra_init = crc32_pmull_cra_init,
185 .base.cra_name = "crc32",
186 .base.cra_driver_name = "crc32-arm64-ce",
187 .base.cra_priority = 200,
188 .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
189 .base.cra_blocksize = 1,
190 .base.cra_module = THIS_MODULE,
191}, {
192 .setkey = crc32_pmull_setkey,
193 .init = crc32_pmull_init,
194 .update = crc32c_update,
195 .final = crc32c_pmull_final,
196 .descsize = sizeof(u32),
197 .digestsize = sizeof(u32),
198
199 .base.cra_ctxsize = sizeof(u32),
200 .base.cra_init = crc32c_pmull_cra_init,
201 .base.cra_name = "crc32c",
202 .base.cra_driver_name = "crc32c-arm64-ce",
203 .base.cra_priority = 200,
204 .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
205 .base.cra_blocksize = 1,
206 .base.cra_module = THIS_MODULE,
207} };
208
209static int __init crc32_pmull_mod_init(void)
210{
211 if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) {
212 crc32_pmull_algs[0].update = crc32_pmull_update;
213 crc32_pmull_algs[1].update = crc32c_pmull_update;
214
215 if (elf_hwcap & HWCAP_CRC32) {
216 fallback_crc32 = crc32_armv8_le;
217 fallback_crc32c = crc32c_armv8_le;
218 } else {
219 fallback_crc32 = crc32_le;
220 fallback_crc32c = __crc32c_le;
221 }
222 } else if (!(elf_hwcap & HWCAP_CRC32)) {
223 return -ENODEV;
224 }
225 return crypto_register_shashes(crc32_pmull_algs,
226 ARRAY_SIZE(crc32_pmull_algs));
227}
228
229static void __exit crc32_pmull_mod_exit(void)
230{
231 crypto_unregister_shashes(crc32_pmull_algs,
232 ARRAY_SIZE(crc32_pmull_algs));
233}
234
235static const struct cpu_feature crc32_cpu_feature[] = {
236 { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { }
237};
238MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature);
239
240module_init(crc32_pmull_mod_init);
241module_exit(crc32_pmull_mod_exit);
242
243MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
244MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
index 663ea71cdb38..9e82e8e8ed05 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -80,7 +80,186 @@
80 80
81 vzr .req v13 81 vzr .req v13
82 82
83ENTRY(crc_t10dif_pmull) 83 ad .req v14
84 bd .req v10
85
86 k00_16 .req v15
87 k32_48 .req v16
88
89 t3 .req v17
90 t4 .req v18
91 t5 .req v19
92 t6 .req v20
93 t7 .req v21
94 t8 .req v22
95 t9 .req v23
96
97 perm1 .req v24
98 perm2 .req v25
99 perm3 .req v26
100 perm4 .req v27
101
102 bd1 .req v28
103 bd2 .req v29
104 bd3 .req v30
105 bd4 .req v31
106
107 .macro __pmull_init_p64
108 .endm
109
110 .macro __pmull_pre_p64, bd
111 .endm
112
113 .macro __pmull_init_p8
114 // k00_16 := 0x0000000000000000_000000000000ffff
115 // k32_48 := 0x00000000ffffffff_0000ffffffffffff
116 movi k32_48.2d, #0xffffffff
117 mov k32_48.h[2], k32_48.h[0]
118 ushr k00_16.2d, k32_48.2d, #32
119
120 // prepare the permutation vectors
121 mov_q x5, 0x080f0e0d0c0b0a09
122 movi perm4.8b, #8
123 dup perm1.2d, x5
124 eor perm1.16b, perm1.16b, perm4.16b
125 ushr perm2.2d, perm1.2d, #8
126 ushr perm3.2d, perm1.2d, #16
127 ushr perm4.2d, perm1.2d, #24
128 sli perm2.2d, perm1.2d, #56
129 sli perm3.2d, perm1.2d, #48
130 sli perm4.2d, perm1.2d, #40
131 .endm
132
133 .macro __pmull_pre_p8, bd
134 tbl bd1.16b, {\bd\().16b}, perm1.16b
135 tbl bd2.16b, {\bd\().16b}, perm2.16b
136 tbl bd3.16b, {\bd\().16b}, perm3.16b
137 tbl bd4.16b, {\bd\().16b}, perm4.16b
138 .endm
139
140__pmull_p8_core:
141.L__pmull_p8_core:
142 ext t4.8b, ad.8b, ad.8b, #1 // A1
143 ext t5.8b, ad.8b, ad.8b, #2 // A2
144 ext t6.8b, ad.8b, ad.8b, #3 // A3
145
146 pmull t4.8h, t4.8b, bd.8b // F = A1*B
147 pmull t8.8h, ad.8b, bd1.8b // E = A*B1
148 pmull t5.8h, t5.8b, bd.8b // H = A2*B
149 pmull t7.8h, ad.8b, bd2.8b // G = A*B2
150 pmull t6.8h, t6.8b, bd.8b // J = A3*B
151 pmull t9.8h, ad.8b, bd3.8b // I = A*B3
152 pmull t3.8h, ad.8b, bd4.8b // K = A*B4
153 b 0f
154
155.L__pmull_p8_core2:
156 tbl t4.16b, {ad.16b}, perm1.16b // A1
157 tbl t5.16b, {ad.16b}, perm2.16b // A2
158 tbl t6.16b, {ad.16b}, perm3.16b // A3
159
160 pmull2 t4.8h, t4.16b, bd.16b // F = A1*B
161 pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1
162 pmull2 t5.8h, t5.16b, bd.16b // H = A2*B
163 pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2
164 pmull2 t6.8h, t6.16b, bd.16b // J = A3*B
165 pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3
166 pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4
167
1680: eor t4.16b, t4.16b, t8.16b // L = E + F
169 eor t5.16b, t5.16b, t7.16b // M = G + H
170 eor t6.16b, t6.16b, t9.16b // N = I + J
171
172 uzp1 t8.2d, t4.2d, t5.2d
173 uzp2 t4.2d, t4.2d, t5.2d
174 uzp1 t7.2d, t6.2d, t3.2d
175 uzp2 t6.2d, t6.2d, t3.2d
176
177 // t4 = (L) (P0 + P1) << 8
178 // t5 = (M) (P2 + P3) << 16
179 eor t8.16b, t8.16b, t4.16b
180 and t4.16b, t4.16b, k32_48.16b
181
182 // t6 = (N) (P4 + P5) << 24
183 // t7 = (K) (P6 + P7) << 32
184 eor t7.16b, t7.16b, t6.16b
185 and t6.16b, t6.16b, k00_16.16b
186
187 eor t8.16b, t8.16b, t4.16b
188 eor t7.16b, t7.16b, t6.16b
189
190 zip2 t5.2d, t8.2d, t4.2d
191 zip1 t4.2d, t8.2d, t4.2d
192 zip2 t3.2d, t7.2d, t6.2d
193 zip1 t6.2d, t7.2d, t6.2d
194
195 ext t4.16b, t4.16b, t4.16b, #15
196 ext t5.16b, t5.16b, t5.16b, #14
197 ext t6.16b, t6.16b, t6.16b, #13
198 ext t3.16b, t3.16b, t3.16b, #12
199
200 eor t4.16b, t4.16b, t5.16b
201 eor t6.16b, t6.16b, t3.16b
202 ret
203ENDPROC(__pmull_p8_core)
204
205 .macro __pmull_p8, rq, ad, bd, i
206 .ifnc \bd, v10
207 .err
208 .endif
209 mov ad.16b, \ad\().16b
210 .ifb \i
211 pmull \rq\().8h, \ad\().8b, bd.8b // D = A*B
212 .else
213 pmull2 \rq\().8h, \ad\().16b, bd.16b // D = A*B
214 .endif
215
216 bl .L__pmull_p8_core\i
217
218 eor \rq\().16b, \rq\().16b, t4.16b
219 eor \rq\().16b, \rq\().16b, t6.16b
220 .endm
221
222 .macro fold64, p, reg1, reg2
223 ldp q11, q12, [arg2], #0x20
224
225 __pmull_\p v8, \reg1, v10, 2
226 __pmull_\p \reg1, \reg1, v10
227
228CPU_LE( rev64 v11.16b, v11.16b )
229CPU_LE( rev64 v12.16b, v12.16b )
230
231 __pmull_\p v9, \reg2, v10, 2
232 __pmull_\p \reg2, \reg2, v10
233
234CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
235CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
236
237 eor \reg1\().16b, \reg1\().16b, v8.16b
238 eor \reg2\().16b, \reg2\().16b, v9.16b
239 eor \reg1\().16b, \reg1\().16b, v11.16b
240 eor \reg2\().16b, \reg2\().16b, v12.16b
241 .endm
242
243 .macro fold16, p, reg, rk
244 __pmull_\p v8, \reg, v10
245 __pmull_\p \reg, \reg, v10, 2
246 .ifnb \rk
247 ldr_l q10, \rk, x8
248 __pmull_pre_\p v10
249 .endif
250 eor v7.16b, v7.16b, v8.16b
251 eor v7.16b, v7.16b, \reg\().16b
252 .endm
253
254 .macro __pmull_p64, rd, rn, rm, n
255 .ifb \n
256 pmull \rd\().1q, \rn\().1d, \rm\().1d
257 .else
258 pmull2 \rd\().1q, \rn\().2d, \rm\().2d
259 .endif
260 .endm
261
262 .macro crc_t10dif_pmull, p
84 frame_push 3, 128 263 frame_push 3, 128
85 264
86 mov arg1_low32, w0 265 mov arg1_low32, w0
@@ -89,6 +268,8 @@ ENTRY(crc_t10dif_pmull)
89 268
90 movi vzr.16b, #0 // init zero register 269 movi vzr.16b, #0 // init zero register
91 270
271 __pmull_init_\p
272
92 // adjust the 16-bit initial_crc value, scale it to 32 bits 273 // adjust the 16-bit initial_crc value, scale it to 32 bits
93 lsl arg1_low32, arg1_low32, #16 274 lsl arg1_low32, arg1_low32, #16
94 275
@@ -96,7 +277,7 @@ ENTRY(crc_t10dif_pmull)
96 cmp arg3, #256 277 cmp arg3, #256
97 278
98 // for sizes less than 128, we can't fold 64B at a time... 279 // for sizes less than 128, we can't fold 64B at a time...
99 b.lt _less_than_128 280 b.lt .L_less_than_128_\@
100 281
101 // load the initial crc value 282 // load the initial crc value
102 // crc value does not need to be byte-reflected, but it needs 283 // crc value does not need to be byte-reflected, but it needs
@@ -137,6 +318,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
137 ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4 318 ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4
138 // type of pmull instruction 319 // type of pmull instruction
139 // will determine which constant to use 320 // will determine which constant to use
321 __pmull_pre_\p v10
140 322
141 // 323 //
142 // we subtract 256 instead of 128 to save one instruction from the loop 324 // we subtract 256 instead of 128 to save one instruction from the loop
@@ -147,41 +329,19 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
147 // buffer. The _fold_64_B_loop will fold 64B at a time 329 // buffer. The _fold_64_B_loop will fold 64B at a time
148 // until we have 64+y Bytes of buffer 330 // until we have 64+y Bytes of buffer
149 331
150
151 // fold 64B at a time. This section of the code folds 4 vector 332 // fold 64B at a time. This section of the code folds 4 vector
152 // registers in parallel 333 // registers in parallel
153_fold_64_B_loop: 334.L_fold_64_B_loop_\@:
154 335
155 .macro fold64, reg1, reg2 336 fold64 \p, v0, v1
156 ldp q11, q12, [arg2], #0x20 337 fold64 \p, v2, v3
157 338 fold64 \p, v4, v5
158 pmull2 v8.1q, \reg1\().2d, v10.2d 339 fold64 \p, v6, v7
159 pmull \reg1\().1q, \reg1\().1d, v10.1d
160
161CPU_LE( rev64 v11.16b, v11.16b )
162CPU_LE( rev64 v12.16b, v12.16b )
163
164 pmull2 v9.1q, \reg2\().2d, v10.2d
165 pmull \reg2\().1q, \reg2\().1d, v10.1d
166
167CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
168CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
169
170 eor \reg1\().16b, \reg1\().16b, v8.16b
171 eor \reg2\().16b, \reg2\().16b, v9.16b
172 eor \reg1\().16b, \reg1\().16b, v11.16b
173 eor \reg2\().16b, \reg2\().16b, v12.16b
174 .endm
175
176 fold64 v0, v1
177 fold64 v2, v3
178 fold64 v4, v5
179 fold64 v6, v7
180 340
181 subs arg3, arg3, #128 341 subs arg3, arg3, #128
182 342
183 // check if there is another 64B in the buffer to be able to fold 343 // check if there is another 64B in the buffer to be able to fold
184 b.lt _fold_64_B_end 344 b.lt .L_fold_64_B_end_\@
185 345
186 if_will_cond_yield_neon 346 if_will_cond_yield_neon
187 stp q0, q1, [sp, #.Lframe_local_offset] 347 stp q0, q1, [sp, #.Lframe_local_offset]
@@ -195,11 +355,13 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
195 ldp q6, q7, [sp, #.Lframe_local_offset + 96] 355 ldp q6, q7, [sp, #.Lframe_local_offset + 96]
196 ldr_l q10, rk3, x8 356 ldr_l q10, rk3, x8
197 movi vzr.16b, #0 // init zero register 357 movi vzr.16b, #0 // init zero register
358 __pmull_init_\p
359 __pmull_pre_\p v10
198 endif_yield_neon 360 endif_yield_neon
199 361
200 b _fold_64_B_loop 362 b .L_fold_64_B_loop_\@
201 363
202_fold_64_B_end: 364.L_fold_64_B_end_\@:
203 // at this point, the buffer pointer is pointing at the last y Bytes 365 // at this point, the buffer pointer is pointing at the last y Bytes
204 // of the buffer the 64B of folded data is in 4 of the vector 366 // of the buffer the 64B of folded data is in 4 of the vector
205 // registers: v0, v1, v2, v3 367 // registers: v0, v1, v2, v3
@@ -208,38 +370,29 @@ _fold_64_B_end:
208 // constants 370 // constants
209 371
210 ldr_l q10, rk9, x8 372 ldr_l q10, rk9, x8
373 __pmull_pre_\p v10
211 374
212 .macro fold16, reg, rk 375 fold16 \p, v0, rk11
213 pmull v8.1q, \reg\().1d, v10.1d 376 fold16 \p, v1, rk13
214 pmull2 \reg\().1q, \reg\().2d, v10.2d 377 fold16 \p, v2, rk15
215 .ifnb \rk 378 fold16 \p, v3, rk17
216 ldr_l q10, \rk, x8 379 fold16 \p, v4, rk19
217 .endif 380 fold16 \p, v5, rk1
218 eor v7.16b, v7.16b, v8.16b 381 fold16 \p, v6
219 eor v7.16b, v7.16b, \reg\().16b
220 .endm
221
222 fold16 v0, rk11
223 fold16 v1, rk13
224 fold16 v2, rk15
225 fold16 v3, rk17
226 fold16 v4, rk19
227 fold16 v5, rk1
228 fold16 v6
229 382
230 // instead of 64, we add 48 to the loop counter to save 1 instruction 383 // instead of 64, we add 48 to the loop counter to save 1 instruction
231 // from the loop instead of a cmp instruction, we use the negative 384 // from the loop instead of a cmp instruction, we use the negative
232 // flag with the jl instruction 385 // flag with the jl instruction
233 adds arg3, arg3, #(128-16) 386 adds arg3, arg3, #(128-16)
234 b.lt _final_reduction_for_128 387 b.lt .L_final_reduction_for_128_\@
235 388
236 // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 389 // now we have 16+y bytes left to reduce. 16 Bytes is in register v7
237 // and the rest is in memory. We can fold 16 bytes at a time if y>=16 390 // and the rest is in memory. We can fold 16 bytes at a time if y>=16
238 // continue folding 16B at a time 391 // continue folding 16B at a time
239 392
240_16B_reduction_loop: 393.L_16B_reduction_loop_\@:
241 pmull v8.1q, v7.1d, v10.1d 394 __pmull_\p v8, v7, v10
242 pmull2 v7.1q, v7.2d, v10.2d 395 __pmull_\p v7, v7, v10, 2
243 eor v7.16b, v7.16b, v8.16b 396 eor v7.16b, v7.16b, v8.16b
244 397
245 ldr q0, [arg2], #16 398 ldr q0, [arg2], #16
@@ -251,22 +404,22 @@ CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
251 // instead of a cmp instruction, we utilize the flags with the 404 // instead of a cmp instruction, we utilize the flags with the
252 // jge instruction equivalent of: cmp arg3, 16-16 405 // jge instruction equivalent of: cmp arg3, 16-16
253 // check if there is any more 16B in the buffer to be able to fold 406 // check if there is any more 16B in the buffer to be able to fold
254 b.ge _16B_reduction_loop 407 b.ge .L_16B_reduction_loop_\@
255 408
256 // now we have 16+z bytes left to reduce, where 0<= z < 16. 409 // now we have 16+z bytes left to reduce, where 0<= z < 16.
257 // first, we reduce the data in the xmm7 register 410 // first, we reduce the data in the xmm7 register
258 411
259_final_reduction_for_128: 412.L_final_reduction_for_128_\@:
260 // check if any more data to fold. If not, compute the CRC of 413 // check if any more data to fold. If not, compute the CRC of
261 // the final 128 bits 414 // the final 128 bits
262 adds arg3, arg3, #16 415 adds arg3, arg3, #16
263 b.eq _128_done 416 b.eq .L_128_done_\@
264 417
265 // here we are getting data that is less than 16 bytes. 418 // here we are getting data that is less than 16 bytes.
266 // since we know that there was data before the pointer, we can 419 // since we know that there was data before the pointer, we can
267 // offset the input pointer before the actual point, to receive 420 // offset the input pointer before the actual point, to receive
268 // exactly 16 bytes. after that the registers need to be adjusted. 421 // exactly 16 bytes. after that the registers need to be adjusted.
269_get_last_two_regs: 422.L_get_last_two_regs_\@:
270 add arg2, arg2, arg3 423 add arg2, arg2, arg3
271 ldr q1, [arg2, #-16] 424 ldr q1, [arg2, #-16]
272CPU_LE( rev64 v1.16b, v1.16b ) 425CPU_LE( rev64 v1.16b, v1.16b )
@@ -291,47 +444,48 @@ CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
291 bsl v0.16b, v2.16b, v1.16b 444 bsl v0.16b, v2.16b, v1.16b
292 445
293 // fold 16 Bytes 446 // fold 16 Bytes
294 pmull v8.1q, v7.1d, v10.1d 447 __pmull_\p v8, v7, v10
295 pmull2 v7.1q, v7.2d, v10.2d 448 __pmull_\p v7, v7, v10, 2
296 eor v7.16b, v7.16b, v8.16b 449 eor v7.16b, v7.16b, v8.16b
297 eor v7.16b, v7.16b, v0.16b 450 eor v7.16b, v7.16b, v0.16b
298 451
299_128_done: 452.L_128_done_\@:
300 // compute crc of a 128-bit value 453 // compute crc of a 128-bit value
301 ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10 454 ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10
455 __pmull_pre_\p v10
302 456
303 // 64b fold 457 // 64b fold
304 ext v0.16b, vzr.16b, v7.16b, #8 458 ext v0.16b, vzr.16b, v7.16b, #8
305 mov v7.d[0], v7.d[1] 459 mov v7.d[0], v7.d[1]
306 pmull v7.1q, v7.1d, v10.1d 460 __pmull_\p v7, v7, v10
307 eor v7.16b, v7.16b, v0.16b 461 eor v7.16b, v7.16b, v0.16b
308 462
309 // 32b fold 463 // 32b fold
310 ext v0.16b, v7.16b, vzr.16b, #4 464 ext v0.16b, v7.16b, vzr.16b, #4
311 mov v7.s[3], vzr.s[0] 465 mov v7.s[3], vzr.s[0]
312 pmull2 v0.1q, v0.2d, v10.2d 466 __pmull_\p v0, v0, v10, 2
313 eor v7.16b, v7.16b, v0.16b 467 eor v7.16b, v7.16b, v0.16b
314 468
315 // barrett reduction 469 // barrett reduction
316_barrett:
317 ldr_l q10, rk7, x8 470 ldr_l q10, rk7, x8
471 __pmull_pre_\p v10
318 mov v0.d[0], v7.d[1] 472 mov v0.d[0], v7.d[1]
319 473
320 pmull v0.1q, v0.1d, v10.1d 474 __pmull_\p v0, v0, v10
321 ext v0.16b, vzr.16b, v0.16b, #12 475 ext v0.16b, vzr.16b, v0.16b, #12
322 pmull2 v0.1q, v0.2d, v10.2d 476 __pmull_\p v0, v0, v10, 2
323 ext v0.16b, vzr.16b, v0.16b, #12 477 ext v0.16b, vzr.16b, v0.16b, #12
324 eor v7.16b, v7.16b, v0.16b 478 eor v7.16b, v7.16b, v0.16b
325 mov w0, v7.s[1] 479 mov w0, v7.s[1]
326 480
327_cleanup: 481.L_cleanup_\@:
328 // scale the result back to 16 bits 482 // scale the result back to 16 bits
329 lsr x0, x0, #16 483 lsr x0, x0, #16
330 frame_pop 484 frame_pop
331 ret 485 ret
332 486
333_less_than_128: 487.L_less_than_128_\@:
334 cbz arg3, _cleanup 488 cbz arg3, .L_cleanup_\@
335 489
336 movi v0.16b, #0 490 movi v0.16b, #0
337 mov v0.s[3], arg1_low32 // get the initial crc value 491 mov v0.s[3], arg1_low32 // get the initial crc value
@@ -342,20 +496,21 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
342 eor v7.16b, v7.16b, v0.16b // xor the initial crc value 496 eor v7.16b, v7.16b, v0.16b // xor the initial crc value
343 497
344 cmp arg3, #16 498 cmp arg3, #16
345 b.eq _128_done // exactly 16 left 499 b.eq .L_128_done_\@ // exactly 16 left
346 b.lt _less_than_16_left 500 b.lt .L_less_than_16_left_\@
347 501
348 ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10 502 ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10
503 __pmull_pre_\p v10
349 504
350 // update the counter. subtract 32 instead of 16 to save one 505 // update the counter. subtract 32 instead of 16 to save one
351 // instruction from the loop 506 // instruction from the loop
352 subs arg3, arg3, #32 507 subs arg3, arg3, #32
353 b.ge _16B_reduction_loop 508 b.ge .L_16B_reduction_loop_\@
354 509
355 add arg3, arg3, #16 510 add arg3, arg3, #16
356 b _get_last_two_regs 511 b .L_get_last_two_regs_\@
357 512
358_less_than_16_left: 513.L_less_than_16_left_\@:
359 // shl r9, 4 514 // shl r9, 4
360 adr_l x0, tbl_shf_table + 16 515 adr_l x0, tbl_shf_table + 16
361 sub x0, x0, arg3 516 sub x0, x0, arg3
@@ -363,8 +518,17 @@ _less_than_16_left:
363 movi v9.16b, #0x80 518 movi v9.16b, #0x80
364 eor v0.16b, v0.16b, v9.16b 519 eor v0.16b, v0.16b, v9.16b
365 tbl v7.16b, {v7.16b}, v0.16b 520 tbl v7.16b, {v7.16b}, v0.16b
366 b _128_done 521 b .L_128_done_\@
367ENDPROC(crc_t10dif_pmull) 522 .endm
523
524ENTRY(crc_t10dif_pmull_p8)
525 crc_t10dif_pmull p8
526ENDPROC(crc_t10dif_pmull_p8)
527
528 .align 5
529ENTRY(crc_t10dif_pmull_p64)
530 crc_t10dif_pmull p64
531ENDPROC(crc_t10dif_pmull_p64)
368 532
369// precomputed constants 533// precomputed constants
370// these constants are precomputed from the poly: 534// these constants are precomputed from the poly:
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
index 96f0cae4a022..b461d62023f2 100644
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -22,7 +22,10 @@
22 22
23#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U 23#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
24 24
25asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u64 len); 25asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 buf[], u64 len);
26asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 buf[], u64 len);
27
28static u16 (*crc_t10dif_pmull)(u16 init_crc, const u8 buf[], u64 len);
26 29
27static int crct10dif_init(struct shash_desc *desc) 30static int crct10dif_init(struct shash_desc *desc)
28{ 31{
@@ -85,6 +88,11 @@ static struct shash_alg crc_t10dif_alg = {
85 88
86static int __init crc_t10dif_mod_init(void) 89static int __init crc_t10dif_mod_init(void)
87{ 90{
91 if (elf_hwcap & HWCAP_PMULL)
92 crc_t10dif_pmull = crc_t10dif_pmull_p64;
93 else
94 crc_t10dif_pmull = crc_t10dif_pmull_p8;
95
88 return crypto_register_shash(&crc_t10dif_alg); 96 return crypto_register_shash(&crc_t10dif_alg);
89} 97}
90 98
@@ -93,8 +101,10 @@ static void __exit crc_t10dif_mod_exit(void)
93 crypto_unregister_shash(&crc_t10dif_alg); 101 crypto_unregister_shash(&crc_t10dif_alg);
94} 102}
95 103
96module_cpu_feature_match(PMULL, crc_t10dif_mod_init); 104module_cpu_feature_match(ASIMD, crc_t10dif_mod_init);
97module_exit(crc_t10dif_mod_exit); 105module_exit(crc_t10dif_mod_exit);
98 106
99MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 107MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
100MODULE_LICENSE("GPL v2"); 108MODULE_LICENSE("GPL v2");
109MODULE_ALIAS_CRYPTO("crct10dif");
110MODULE_ALIAS_CRYPTO("crct10dif-arm64-ce");
diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S
deleted file mode 100644
index b14463438b09..000000000000
--- a/arch/arm64/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,352 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Author: Eric Biggers <ebiggers@google.com>
8 */
9
10#include <linux/linkage.h>
11
12 .text
13
14 // arguments
15 ROUND_KEYS .req x0 // const {u64,u32} *round_keys
16 NROUNDS .req w1 // int nrounds
17 NROUNDS_X .req x1
18 DST .req x2 // void *dst
19 SRC .req x3 // const void *src
20 NBYTES .req w4 // unsigned int nbytes
21 TWEAK .req x5 // void *tweak
22
23 // registers which hold the data being encrypted/decrypted
24 // (underscores avoid a naming collision with ARM64 registers x0-x3)
25 X_0 .req v0
26 Y_0 .req v1
27 X_1 .req v2
28 Y_1 .req v3
29 X_2 .req v4
30 Y_2 .req v5
31 X_3 .req v6
32 Y_3 .req v7
33
34 // the round key, duplicated in all lanes
35 ROUND_KEY .req v8
36
37 // index vector for tbl-based 8-bit rotates
38 ROTATE_TABLE .req v9
39 ROTATE_TABLE_Q .req q9
40
41 // temporary registers
42 TMP0 .req v10
43 TMP1 .req v11
44 TMP2 .req v12
45 TMP3 .req v13
46
47 // multiplication table for updating XTS tweaks
48 GFMUL_TABLE .req v14
49 GFMUL_TABLE_Q .req q14
50
51 // next XTS tweak value(s)
52 TWEAKV_NEXT .req v15
53
54 // XTS tweaks for the blocks currently being encrypted/decrypted
55 TWEAKV0 .req v16
56 TWEAKV1 .req v17
57 TWEAKV2 .req v18
58 TWEAKV3 .req v19
59 TWEAKV4 .req v20
60 TWEAKV5 .req v21
61 TWEAKV6 .req v22
62 TWEAKV7 .req v23
63
64 .align 4
65.Lror64_8_table:
66 .octa 0x080f0e0d0c0b0a090007060504030201
67.Lror32_8_table:
68 .octa 0x0c0f0e0d080b0a090407060500030201
69.Lrol64_8_table:
70 .octa 0x0e0d0c0b0a09080f0605040302010007
71.Lrol32_8_table:
72 .octa 0x0e0d0c0f0a09080b0605040702010003
73.Lgf128mul_table:
74 .octa 0x00000000000000870000000000000001
75.Lgf64mul_table:
76 .octa 0x0000000000000000000000002d361b00
77
78/*
79 * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
80 *
81 * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
82 * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
83 * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
84 * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
85 */
86.macro _speck_round_128bytes n, lanes
87
88 // x = ror(x, 8)
89 tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
90 tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
91 tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
92 tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
93
94 // x += y
95 add X_0.\lanes, X_0.\lanes, Y_0.\lanes
96 add X_1.\lanes, X_1.\lanes, Y_1.\lanes
97 add X_2.\lanes, X_2.\lanes, Y_2.\lanes
98 add X_3.\lanes, X_3.\lanes, Y_3.\lanes
99
100 // x ^= k
101 eor X_0.16b, X_0.16b, ROUND_KEY.16b
102 eor X_1.16b, X_1.16b, ROUND_KEY.16b
103 eor X_2.16b, X_2.16b, ROUND_KEY.16b
104 eor X_3.16b, X_3.16b, ROUND_KEY.16b
105
106 // y = rol(y, 3)
107 shl TMP0.\lanes, Y_0.\lanes, #3
108 shl TMP1.\lanes, Y_1.\lanes, #3
109 shl TMP2.\lanes, Y_2.\lanes, #3
110 shl TMP3.\lanes, Y_3.\lanes, #3
111 sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
112 sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
113 sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
114 sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
115
116 // y ^= x
117 eor Y_0.16b, TMP0.16b, X_0.16b
118 eor Y_1.16b, TMP1.16b, X_1.16b
119 eor Y_2.16b, TMP2.16b, X_2.16b
120 eor Y_3.16b, TMP3.16b, X_3.16b
121.endm
122
123/*
124 * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
125 *
126 * This is the inverse of _speck_round_128bytes().
127 */
128.macro _speck_unround_128bytes n, lanes
129
130 // y ^= x
131 eor TMP0.16b, Y_0.16b, X_0.16b
132 eor TMP1.16b, Y_1.16b, X_1.16b
133 eor TMP2.16b, Y_2.16b, X_2.16b
134 eor TMP3.16b, Y_3.16b, X_3.16b
135
136 // y = ror(y, 3)
137 ushr Y_0.\lanes, TMP0.\lanes, #3
138 ushr Y_1.\lanes, TMP1.\lanes, #3
139 ushr Y_2.\lanes, TMP2.\lanes, #3
140 ushr Y_3.\lanes, TMP3.\lanes, #3
141 sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
142 sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
143 sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
144 sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
145
146 // x ^= k
147 eor X_0.16b, X_0.16b, ROUND_KEY.16b
148 eor X_1.16b, X_1.16b, ROUND_KEY.16b
149 eor X_2.16b, X_2.16b, ROUND_KEY.16b
150 eor X_3.16b, X_3.16b, ROUND_KEY.16b
151
152 // x -= y
153 sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
154 sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
155 sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
156 sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
157
158 // x = rol(x, 8)
159 tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
160 tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
161 tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
162 tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
163.endm
164
165.macro _next_xts_tweak next, cur, tmp, n
166.if \n == 64
167 /*
168 * Calculate the next tweak by multiplying the current one by x,
169 * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
170 */
171 sshr \tmp\().2d, \cur\().2d, #63
172 and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
173 shl \next\().2d, \cur\().2d, #1
174 ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
175 eor \next\().16b, \next\().16b, \tmp\().16b
176.else
177 /*
178 * Calculate the next two tweaks by multiplying the current ones by x^2,
179 * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
180 */
181 ushr \tmp\().2d, \cur\().2d, #62
182 shl \next\().2d, \cur\().2d, #2
183 tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
184 eor \next\().16b, \next\().16b, \tmp\().16b
185.endif
186.endm
187
188/*
189 * _speck_xts_crypt() - Speck-XTS encryption/decryption
190 *
191 * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
192 * using Speck-XTS, specifically the variant with a block size of '2n' and round
193 * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
194 * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
195 * nonzero multiple of 128.
196 */
197.macro _speck_xts_crypt n, lanes, decrypting
198
199 /*
200 * If decrypting, modify the ROUND_KEYS parameter to point to the last
201 * round key rather than the first, since for decryption the round keys
202 * are used in reverse order.
203 */
204.if \decrypting
205 mov NROUNDS, NROUNDS /* zero the high 32 bits */
206.if \n == 64
207 add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
208 sub ROUND_KEYS, ROUND_KEYS, #8
209.else
210 add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
211 sub ROUND_KEYS, ROUND_KEYS, #4
212.endif
213.endif
214
215 // Load the index vector for tbl-based 8-bit rotates
216.if \decrypting
217 ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
218.else
219 ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
220.endif
221
222 // One-time XTS preparation
223.if \n == 64
224 // Load first tweak
225 ld1 {TWEAKV0.16b}, [TWEAK]
226
227 // Load GF(2^128) multiplication table
228 ldr GFMUL_TABLE_Q, .Lgf128mul_table
229.else
230 // Load first tweak
231 ld1 {TWEAKV0.8b}, [TWEAK]
232
233 // Load GF(2^64) multiplication table
234 ldr GFMUL_TABLE_Q, .Lgf64mul_table
235
236 // Calculate second tweak, packing it together with the first
237 ushr TMP0.2d, TWEAKV0.2d, #63
238 shl TMP1.2d, TWEAKV0.2d, #1
239 tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
240 eor TMP0.8b, TMP0.8b, TMP1.8b
241 mov TWEAKV0.d[1], TMP0.d[0]
242.endif
243
244.Lnext_128bytes_\@:
245
246 // Calculate XTS tweaks for next 128 bytes
247 _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
248 _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
249 _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
250 _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
251 _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
252 _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
253 _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
254 _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
255
256 // Load the next source blocks into {X,Y}[0-3]
257 ld1 {X_0.16b-Y_1.16b}, [SRC], #64
258 ld1 {X_2.16b-Y_3.16b}, [SRC], #64
259
260 // XOR the source blocks with their XTS tweaks
261 eor TMP0.16b, X_0.16b, TWEAKV0.16b
262 eor Y_0.16b, Y_0.16b, TWEAKV1.16b
263 eor TMP1.16b, X_1.16b, TWEAKV2.16b
264 eor Y_1.16b, Y_1.16b, TWEAKV3.16b
265 eor TMP2.16b, X_2.16b, TWEAKV4.16b
266 eor Y_2.16b, Y_2.16b, TWEAKV5.16b
267 eor TMP3.16b, X_3.16b, TWEAKV6.16b
268 eor Y_3.16b, Y_3.16b, TWEAKV7.16b
269
270 /*
271 * De-interleave the 'x' and 'y' elements of each block, i.e. make it so
272 * that the X[0-3] registers contain only the second halves of blocks,
273 * and the Y[0-3] registers contain only the first halves of blocks.
274 * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
275 */
276 uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
277 uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
278 uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
279 uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
280 uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
281 uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
282 uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
283 uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
284
285 // Do the cipher rounds
286 mov x6, ROUND_KEYS
287 mov w7, NROUNDS
288.Lnext_round_\@:
289.if \decrypting
290 ld1r {ROUND_KEY.\lanes}, [x6]
291 sub x6, x6, #( \n / 8 )
292 _speck_unround_128bytes \n, \lanes
293.else
294 ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
295 _speck_round_128bytes \n, \lanes
296.endif
297 subs w7, w7, #1
298 bne .Lnext_round_\@
299
300 // Re-interleave the 'x' and 'y' elements of each block
301 zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
302 zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
303 zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
304 zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
305 zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
306 zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
307 zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
308 zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
309
310 // XOR the encrypted/decrypted blocks with the tweaks calculated earlier
311 eor X_0.16b, TMP0.16b, TWEAKV0.16b
312 eor Y_0.16b, Y_0.16b, TWEAKV1.16b
313 eor X_1.16b, TMP1.16b, TWEAKV2.16b
314 eor Y_1.16b, Y_1.16b, TWEAKV3.16b
315 eor X_2.16b, TMP2.16b, TWEAKV4.16b
316 eor Y_2.16b, Y_2.16b, TWEAKV5.16b
317 eor X_3.16b, TMP3.16b, TWEAKV6.16b
318 eor Y_3.16b, Y_3.16b, TWEAKV7.16b
319 mov TWEAKV0.16b, TWEAKV_NEXT.16b
320
321 // Store the ciphertext in the destination buffer
322 st1 {X_0.16b-Y_1.16b}, [DST], #64
323 st1 {X_2.16b-Y_3.16b}, [DST], #64
324
325 // Continue if there are more 128-byte chunks remaining
326 subs NBYTES, NBYTES, #128
327 bne .Lnext_128bytes_\@
328
329 // Store the next tweak and return
330.if \n == 64
331 st1 {TWEAKV_NEXT.16b}, [TWEAK]
332.else
333 st1 {TWEAKV_NEXT.8b}, [TWEAK]
334.endif
335 ret
336.endm
337
338ENTRY(speck128_xts_encrypt_neon)
339 _speck_xts_crypt n=64, lanes=2d, decrypting=0
340ENDPROC(speck128_xts_encrypt_neon)
341
342ENTRY(speck128_xts_decrypt_neon)
343 _speck_xts_crypt n=64, lanes=2d, decrypting=1
344ENDPROC(speck128_xts_decrypt_neon)
345
346ENTRY(speck64_xts_encrypt_neon)
347 _speck_xts_crypt n=32, lanes=4s, decrypting=0
348ENDPROC(speck64_xts_encrypt_neon)
349
350ENTRY(speck64_xts_decrypt_neon)
351 _speck_xts_crypt n=32, lanes=4s, decrypting=1
352ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c
deleted file mode 100644
index 6e233aeb4ff4..000000000000
--- a/arch/arm64/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,282 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 * (64-bit version; based on the 32-bit version)
5 *
6 * Copyright (c) 2018 Google, Inc
7 */
8
9#include <asm/hwcap.h>
10#include <asm/neon.h>
11#include <asm/simd.h>
12#include <crypto/algapi.h>
13#include <crypto/gf128mul.h>
14#include <crypto/internal/skcipher.h>
15#include <crypto/speck.h>
16#include <crypto/xts.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19
20/* The assembly functions only handle multiples of 128 bytes */
21#define SPECK_NEON_CHUNK_SIZE 128
22
23/* Speck128 */
24
25struct speck128_xts_tfm_ctx {
26 struct speck128_tfm_ctx main_key;
27 struct speck128_tfm_ctx tweak_key;
28};
29
30asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
31 void *dst, const void *src,
32 unsigned int nbytes, void *tweak);
33
34asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
35 void *dst, const void *src,
36 unsigned int nbytes, void *tweak);
37
38typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
39 u8 *, const u8 *);
40typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
41 const void *, unsigned int, void *);
42
43static __always_inline int
44__speck128_xts_crypt(struct skcipher_request *req,
45 speck128_crypt_one_t crypt_one,
46 speck128_xts_crypt_many_t crypt_many)
47{
48 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
49 const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
50 struct skcipher_walk walk;
51 le128 tweak;
52 int err;
53
54 err = skcipher_walk_virt(&walk, req, true);
55
56 crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
57
58 while (walk.nbytes > 0) {
59 unsigned int nbytes = walk.nbytes;
60 u8 *dst = walk.dst.virt.addr;
61 const u8 *src = walk.src.virt.addr;
62
63 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
64 unsigned int count;
65
66 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
67 kernel_neon_begin();
68 (*crypt_many)(ctx->main_key.round_keys,
69 ctx->main_key.nrounds,
70 dst, src, count, &tweak);
71 kernel_neon_end();
72 dst += count;
73 src += count;
74 nbytes -= count;
75 }
76
77 /* Handle any remainder with generic code */
78 while (nbytes >= sizeof(tweak)) {
79 le128_xor((le128 *)dst, (const le128 *)src, &tweak);
80 (*crypt_one)(&ctx->main_key, dst, dst);
81 le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
82 gf128mul_x_ble(&tweak, &tweak);
83
84 dst += sizeof(tweak);
85 src += sizeof(tweak);
86 nbytes -= sizeof(tweak);
87 }
88 err = skcipher_walk_done(&walk, nbytes);
89 }
90
91 return err;
92}
93
94static int speck128_xts_encrypt(struct skcipher_request *req)
95{
96 return __speck128_xts_crypt(req, crypto_speck128_encrypt,
97 speck128_xts_encrypt_neon);
98}
99
100static int speck128_xts_decrypt(struct skcipher_request *req)
101{
102 return __speck128_xts_crypt(req, crypto_speck128_decrypt,
103 speck128_xts_decrypt_neon);
104}
105
106static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
107 unsigned int keylen)
108{
109 struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
110 int err;
111
112 err = xts_verify_key(tfm, key, keylen);
113 if (err)
114 return err;
115
116 keylen /= 2;
117
118 err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
119 if (err)
120 return err;
121
122 return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
123}
124
125/* Speck64 */
126
127struct speck64_xts_tfm_ctx {
128 struct speck64_tfm_ctx main_key;
129 struct speck64_tfm_ctx tweak_key;
130};
131
132asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
133 void *dst, const void *src,
134 unsigned int nbytes, void *tweak);
135
136asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
137 void *dst, const void *src,
138 unsigned int nbytes, void *tweak);
139
140typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
141 u8 *, const u8 *);
142typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
143 const void *, unsigned int, void *);
144
145static __always_inline int
146__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
147 speck64_xts_crypt_many_t crypt_many)
148{
149 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
150 const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
151 struct skcipher_walk walk;
152 __le64 tweak;
153 int err;
154
155 err = skcipher_walk_virt(&walk, req, true);
156
157 crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
158
159 while (walk.nbytes > 0) {
160 unsigned int nbytes = walk.nbytes;
161 u8 *dst = walk.dst.virt.addr;
162 const u8 *src = walk.src.virt.addr;
163
164 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
165 unsigned int count;
166
167 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
168 kernel_neon_begin();
169 (*crypt_many)(ctx->main_key.round_keys,
170 ctx->main_key.nrounds,
171 dst, src, count, &tweak);
172 kernel_neon_end();
173 dst += count;
174 src += count;
175 nbytes -= count;
176 }
177
178 /* Handle any remainder with generic code */
179 while (nbytes >= sizeof(tweak)) {
180 *(__le64 *)dst = *(__le64 *)src ^ tweak;
181 (*crypt_one)(&ctx->main_key, dst, dst);
182 *(__le64 *)dst ^= tweak;
183 tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
184 ((tweak & cpu_to_le64(1ULL << 63)) ?
185 0x1B : 0));
186 dst += sizeof(tweak);
187 src += sizeof(tweak);
188 nbytes -= sizeof(tweak);
189 }
190 err = skcipher_walk_done(&walk, nbytes);
191 }
192
193 return err;
194}
195
196static int speck64_xts_encrypt(struct skcipher_request *req)
197{
198 return __speck64_xts_crypt(req, crypto_speck64_encrypt,
199 speck64_xts_encrypt_neon);
200}
201
202static int speck64_xts_decrypt(struct skcipher_request *req)
203{
204 return __speck64_xts_crypt(req, crypto_speck64_decrypt,
205 speck64_xts_decrypt_neon);
206}
207
208static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
209 unsigned int keylen)
210{
211 struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
212 int err;
213
214 err = xts_verify_key(tfm, key, keylen);
215 if (err)
216 return err;
217
218 keylen /= 2;
219
220 err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
221 if (err)
222 return err;
223
224 return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
225}
226
227static struct skcipher_alg speck_algs[] = {
228 {
229 .base.cra_name = "xts(speck128)",
230 .base.cra_driver_name = "xts-speck128-neon",
231 .base.cra_priority = 300,
232 .base.cra_blocksize = SPECK128_BLOCK_SIZE,
233 .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
234 .base.cra_alignmask = 7,
235 .base.cra_module = THIS_MODULE,
236 .min_keysize = 2 * SPECK128_128_KEY_SIZE,
237 .max_keysize = 2 * SPECK128_256_KEY_SIZE,
238 .ivsize = SPECK128_BLOCK_SIZE,
239 .walksize = SPECK_NEON_CHUNK_SIZE,
240 .setkey = speck128_xts_setkey,
241 .encrypt = speck128_xts_encrypt,
242 .decrypt = speck128_xts_decrypt,
243 }, {
244 .base.cra_name = "xts(speck64)",
245 .base.cra_driver_name = "xts-speck64-neon",
246 .base.cra_priority = 300,
247 .base.cra_blocksize = SPECK64_BLOCK_SIZE,
248 .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
249 .base.cra_alignmask = 7,
250 .base.cra_module = THIS_MODULE,
251 .min_keysize = 2 * SPECK64_96_KEY_SIZE,
252 .max_keysize = 2 * SPECK64_128_KEY_SIZE,
253 .ivsize = SPECK64_BLOCK_SIZE,
254 .walksize = SPECK_NEON_CHUNK_SIZE,
255 .setkey = speck64_xts_setkey,
256 .encrypt = speck64_xts_encrypt,
257 .decrypt = speck64_xts_decrypt,
258 }
259};
260
261static int __init speck_neon_module_init(void)
262{
263 if (!(elf_hwcap & HWCAP_ASIMD))
264 return -ENODEV;
265 return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
266}
267
268static void __exit speck_neon_module_exit(void)
269{
270 crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
271}
272
273module_init(speck_neon_module_init);
274module_exit(speck_neon_module_exit);
275
276MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
277MODULE_LICENSE("GPL");
278MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
279MODULE_ALIAS_CRYPTO("xts(speck128)");
280MODULE_ALIAS_CRYPTO("xts-speck128-neon");
281MODULE_ALIAS_CRYPTO("xts(speck64)");
282MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 1d5483f6e457..85904b73e261 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -621,7 +621,6 @@ CONFIG_CRYPTO_ECDH=m
621CONFIG_CRYPTO_MANAGER=y 621CONFIG_CRYPTO_MANAGER=y
622CONFIG_CRYPTO_USER=m 622CONFIG_CRYPTO_USER=m
623CONFIG_CRYPTO_CRYPTD=m 623CONFIG_CRYPTO_CRYPTD=m
624CONFIG_CRYPTO_MCRYPTD=m
625CONFIG_CRYPTO_TEST=m 624CONFIG_CRYPTO_TEST=m
626CONFIG_CRYPTO_CHACHA20POLY1305=m 625CONFIG_CRYPTO_CHACHA20POLY1305=m
627CONFIG_CRYPTO_AEGIS128=m 626CONFIG_CRYPTO_AEGIS128=m
@@ -657,7 +656,6 @@ CONFIG_CRYPTO_SALSA20=m
657CONFIG_CRYPTO_SEED=m 656CONFIG_CRYPTO_SEED=m
658CONFIG_CRYPTO_SERPENT=m 657CONFIG_CRYPTO_SERPENT=m
659CONFIG_CRYPTO_SM4=m 658CONFIG_CRYPTO_SM4=m
660CONFIG_CRYPTO_SPECK=m
661CONFIG_CRYPTO_TEA=m 659CONFIG_CRYPTO_TEA=m
662CONFIG_CRYPTO_TWOFISH=m 660CONFIG_CRYPTO_TWOFISH=m
663CONFIG_CRYPTO_LZO=m 661CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 52a0af127951..9b3818bbb68b 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -578,7 +578,6 @@ CONFIG_CRYPTO_ECDH=m
578CONFIG_CRYPTO_MANAGER=y 578CONFIG_CRYPTO_MANAGER=y
579CONFIG_CRYPTO_USER=m 579CONFIG_CRYPTO_USER=m
580CONFIG_CRYPTO_CRYPTD=m 580CONFIG_CRYPTO_CRYPTD=m
581CONFIG_CRYPTO_MCRYPTD=m
582CONFIG_CRYPTO_TEST=m 581CONFIG_CRYPTO_TEST=m
583CONFIG_CRYPTO_CHACHA20POLY1305=m 582CONFIG_CRYPTO_CHACHA20POLY1305=m
584CONFIG_CRYPTO_AEGIS128=m 583CONFIG_CRYPTO_AEGIS128=m
@@ -614,7 +613,6 @@ CONFIG_CRYPTO_SALSA20=m
614CONFIG_CRYPTO_SEED=m 613CONFIG_CRYPTO_SEED=m
615CONFIG_CRYPTO_SERPENT=m 614CONFIG_CRYPTO_SERPENT=m
616CONFIG_CRYPTO_SM4=m 615CONFIG_CRYPTO_SM4=m
617CONFIG_CRYPTO_SPECK=m
618CONFIG_CRYPTO_TEA=m 616CONFIG_CRYPTO_TEA=m
619CONFIG_CRYPTO_TWOFISH=m 617CONFIG_CRYPTO_TWOFISH=m
620CONFIG_CRYPTO_LZO=m 618CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index b3103e51268a..769677809945 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -599,7 +599,6 @@ CONFIG_CRYPTO_ECDH=m
599CONFIG_CRYPTO_MANAGER=y 599CONFIG_CRYPTO_MANAGER=y
600CONFIG_CRYPTO_USER=m 600CONFIG_CRYPTO_USER=m
601CONFIG_CRYPTO_CRYPTD=m 601CONFIG_CRYPTO_CRYPTD=m
602CONFIG_CRYPTO_MCRYPTD=m
603CONFIG_CRYPTO_TEST=m 602CONFIG_CRYPTO_TEST=m
604CONFIG_CRYPTO_CHACHA20POLY1305=m 603CONFIG_CRYPTO_CHACHA20POLY1305=m
605CONFIG_CRYPTO_AEGIS128=m 604CONFIG_CRYPTO_AEGIS128=m
@@ -635,7 +634,6 @@ CONFIG_CRYPTO_SALSA20=m
635CONFIG_CRYPTO_SEED=m 634CONFIG_CRYPTO_SEED=m
636CONFIG_CRYPTO_SERPENT=m 635CONFIG_CRYPTO_SERPENT=m
637CONFIG_CRYPTO_SM4=m 636CONFIG_CRYPTO_SM4=m
638CONFIG_CRYPTO_SPECK=m
639CONFIG_CRYPTO_TEA=m 637CONFIG_CRYPTO_TEA=m
640CONFIG_CRYPTO_TWOFISH=m 638CONFIG_CRYPTO_TWOFISH=m
641CONFIG_CRYPTO_LZO=m 639CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fb7d651a4cab..7dd264ddf2ea 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
570CONFIG_CRYPTO_MANAGER=y 570CONFIG_CRYPTO_MANAGER=y
571CONFIG_CRYPTO_USER=m 571CONFIG_CRYPTO_USER=m
572CONFIG_CRYPTO_CRYPTD=m 572CONFIG_CRYPTO_CRYPTD=m
573CONFIG_CRYPTO_MCRYPTD=m
574CONFIG_CRYPTO_TEST=m 573CONFIG_CRYPTO_TEST=m
575CONFIG_CRYPTO_CHACHA20POLY1305=m 574CONFIG_CRYPTO_CHACHA20POLY1305=m
576CONFIG_CRYPTO_AEGIS128=m 575CONFIG_CRYPTO_AEGIS128=m
@@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m
606CONFIG_CRYPTO_SEED=m 605CONFIG_CRYPTO_SEED=m
607CONFIG_CRYPTO_SERPENT=m 606CONFIG_CRYPTO_SERPENT=m
608CONFIG_CRYPTO_SM4=m 607CONFIG_CRYPTO_SM4=m
609CONFIG_CRYPTO_SPECK=m
610CONFIG_CRYPTO_TEA=m 608CONFIG_CRYPTO_TEA=m
611CONFIG_CRYPTO_TWOFISH=m 609CONFIG_CRYPTO_TWOFISH=m
612CONFIG_CRYPTO_LZO=m 610CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 6b37f5537c39..515f7439c755 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -580,7 +580,6 @@ CONFIG_CRYPTO_ECDH=m
580CONFIG_CRYPTO_MANAGER=y 580CONFIG_CRYPTO_MANAGER=y
581CONFIG_CRYPTO_USER=m 581CONFIG_CRYPTO_USER=m
582CONFIG_CRYPTO_CRYPTD=m 582CONFIG_CRYPTO_CRYPTD=m
583CONFIG_CRYPTO_MCRYPTD=m
584CONFIG_CRYPTO_TEST=m 583CONFIG_CRYPTO_TEST=m
585CONFIG_CRYPTO_CHACHA20POLY1305=m 584CONFIG_CRYPTO_CHACHA20POLY1305=m
586CONFIG_CRYPTO_AEGIS128=m 585CONFIG_CRYPTO_AEGIS128=m
@@ -616,7 +615,6 @@ CONFIG_CRYPTO_SALSA20=m
616CONFIG_CRYPTO_SEED=m 615CONFIG_CRYPTO_SEED=m
617CONFIG_CRYPTO_SERPENT=m 616CONFIG_CRYPTO_SERPENT=m
618CONFIG_CRYPTO_SM4=m 617CONFIG_CRYPTO_SM4=m
619CONFIG_CRYPTO_SPECK=m
620CONFIG_CRYPTO_TEA=m 618CONFIG_CRYPTO_TEA=m
621CONFIG_CRYPTO_TWOFISH=m 619CONFIG_CRYPTO_TWOFISH=m
622CONFIG_CRYPTO_LZO=m 620CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index c717bf879449..8e1038ceb407 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -602,7 +602,6 @@ CONFIG_CRYPTO_ECDH=m
602CONFIG_CRYPTO_MANAGER=y 602CONFIG_CRYPTO_MANAGER=y
603CONFIG_CRYPTO_USER=m 603CONFIG_CRYPTO_USER=m
604CONFIG_CRYPTO_CRYPTD=m 604CONFIG_CRYPTO_CRYPTD=m
605CONFIG_CRYPTO_MCRYPTD=m
606CONFIG_CRYPTO_TEST=m 605CONFIG_CRYPTO_TEST=m
607CONFIG_CRYPTO_CHACHA20POLY1305=m 606CONFIG_CRYPTO_CHACHA20POLY1305=m
608CONFIG_CRYPTO_AEGIS128=m 607CONFIG_CRYPTO_AEGIS128=m
@@ -638,7 +637,6 @@ CONFIG_CRYPTO_SALSA20=m
638CONFIG_CRYPTO_SEED=m 637CONFIG_CRYPTO_SEED=m
639CONFIG_CRYPTO_SERPENT=m 638CONFIG_CRYPTO_SERPENT=m
640CONFIG_CRYPTO_SM4=m 639CONFIG_CRYPTO_SM4=m
641CONFIG_CRYPTO_SPECK=m
642CONFIG_CRYPTO_TEA=m 640CONFIG_CRYPTO_TEA=m
643CONFIG_CRYPTO_TWOFISH=m 641CONFIG_CRYPTO_TWOFISH=m
644CONFIG_CRYPTO_LZO=m 642CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 226c994ce794..62c8aaa15cc7 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -684,7 +684,6 @@ CONFIG_CRYPTO_ECDH=m
684CONFIG_CRYPTO_MANAGER=y 684CONFIG_CRYPTO_MANAGER=y
685CONFIG_CRYPTO_USER=m 685CONFIG_CRYPTO_USER=m
686CONFIG_CRYPTO_CRYPTD=m 686CONFIG_CRYPTO_CRYPTD=m
687CONFIG_CRYPTO_MCRYPTD=m
688CONFIG_CRYPTO_TEST=m 687CONFIG_CRYPTO_TEST=m
689CONFIG_CRYPTO_CHACHA20POLY1305=m 688CONFIG_CRYPTO_CHACHA20POLY1305=m
690CONFIG_CRYPTO_AEGIS128=m 689CONFIG_CRYPTO_AEGIS128=m
@@ -720,7 +719,6 @@ CONFIG_CRYPTO_SALSA20=m
720CONFIG_CRYPTO_SEED=m 719CONFIG_CRYPTO_SEED=m
721CONFIG_CRYPTO_SERPENT=m 720CONFIG_CRYPTO_SERPENT=m
722CONFIG_CRYPTO_SM4=m 721CONFIG_CRYPTO_SM4=m
723CONFIG_CRYPTO_SPECK=m
724CONFIG_CRYPTO_TEA=m 722CONFIG_CRYPTO_TEA=m
725CONFIG_CRYPTO_TWOFISH=m 723CONFIG_CRYPTO_TWOFISH=m
726CONFIG_CRYPTO_LZO=m 724CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index b383327fd77a..733973f91297 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
570CONFIG_CRYPTO_MANAGER=y 570CONFIG_CRYPTO_MANAGER=y
571CONFIG_CRYPTO_USER=m 571CONFIG_CRYPTO_USER=m
572CONFIG_CRYPTO_CRYPTD=m 572CONFIG_CRYPTO_CRYPTD=m
573CONFIG_CRYPTO_MCRYPTD=m
574CONFIG_CRYPTO_TEST=m 573CONFIG_CRYPTO_TEST=m
575CONFIG_CRYPTO_CHACHA20POLY1305=m 574CONFIG_CRYPTO_CHACHA20POLY1305=m
576CONFIG_CRYPTO_AEGIS128=m 575CONFIG_CRYPTO_AEGIS128=m
@@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m
606CONFIG_CRYPTO_SEED=m 605CONFIG_CRYPTO_SEED=m
607CONFIG_CRYPTO_SERPENT=m 606CONFIG_CRYPTO_SERPENT=m
608CONFIG_CRYPTO_SM4=m 607CONFIG_CRYPTO_SM4=m
609CONFIG_CRYPTO_SPECK=m
610CONFIG_CRYPTO_TEA=m 608CONFIG_CRYPTO_TEA=m
611CONFIG_CRYPTO_TWOFISH=m 609CONFIG_CRYPTO_TWOFISH=m
612CONFIG_CRYPTO_LZO=m 610CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 9783d3deb9e9..fee30cc9ac16 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
570CONFIG_CRYPTO_MANAGER=y 570CONFIG_CRYPTO_MANAGER=y
571CONFIG_CRYPTO_USER=m 571CONFIG_CRYPTO_USER=m
572CONFIG_CRYPTO_CRYPTD=m 572CONFIG_CRYPTO_CRYPTD=m
573CONFIG_CRYPTO_MCRYPTD=m
574CONFIG_CRYPTO_TEST=m 573CONFIG_CRYPTO_TEST=m
575CONFIG_CRYPTO_CHACHA20POLY1305=m 574CONFIG_CRYPTO_CHACHA20POLY1305=m
576CONFIG_CRYPTO_AEGIS128=m 575CONFIG_CRYPTO_AEGIS128=m
@@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m
606CONFIG_CRYPTO_SEED=m 605CONFIG_CRYPTO_SEED=m
607CONFIG_CRYPTO_SERPENT=m 606CONFIG_CRYPTO_SERPENT=m
608CONFIG_CRYPTO_SM4=m 607CONFIG_CRYPTO_SM4=m
609CONFIG_CRYPTO_SPECK=m
610CONFIG_CRYPTO_TEA=m 608CONFIG_CRYPTO_TEA=m
611CONFIG_CRYPTO_TWOFISH=m 609CONFIG_CRYPTO_TWOFISH=m
612CONFIG_CRYPTO_LZO=m 610CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index a35d10ee10cb..eebf9c9088e7 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -593,7 +593,6 @@ CONFIG_CRYPTO_ECDH=m
593CONFIG_CRYPTO_MANAGER=y 593CONFIG_CRYPTO_MANAGER=y
594CONFIG_CRYPTO_USER=m 594CONFIG_CRYPTO_USER=m
595CONFIG_CRYPTO_CRYPTD=m 595CONFIG_CRYPTO_CRYPTD=m
596CONFIG_CRYPTO_MCRYPTD=m
597CONFIG_CRYPTO_TEST=m 596CONFIG_CRYPTO_TEST=m
598CONFIG_CRYPTO_CHACHA20POLY1305=m 597CONFIG_CRYPTO_CHACHA20POLY1305=m
599CONFIG_CRYPTO_AEGIS128=m 598CONFIG_CRYPTO_AEGIS128=m
@@ -629,7 +628,6 @@ CONFIG_CRYPTO_SALSA20=m
629CONFIG_CRYPTO_SEED=m 628CONFIG_CRYPTO_SEED=m
630CONFIG_CRYPTO_SERPENT=m 629CONFIG_CRYPTO_SERPENT=m
631CONFIG_CRYPTO_SM4=m 630CONFIG_CRYPTO_SM4=m
632CONFIG_CRYPTO_SPECK=m
633CONFIG_CRYPTO_TEA=m 631CONFIG_CRYPTO_TEA=m
634CONFIG_CRYPTO_TWOFISH=m 632CONFIG_CRYPTO_TWOFISH=m
635CONFIG_CRYPTO_LZO=m 633CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 573bf922d448..dabc54318c09 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -571,7 +571,6 @@ CONFIG_CRYPTO_ECDH=m
571CONFIG_CRYPTO_MANAGER=y 571CONFIG_CRYPTO_MANAGER=y
572CONFIG_CRYPTO_USER=m 572CONFIG_CRYPTO_USER=m
573CONFIG_CRYPTO_CRYPTD=m 573CONFIG_CRYPTO_CRYPTD=m
574CONFIG_CRYPTO_MCRYPTD=m
575CONFIG_CRYPTO_TEST=m 574CONFIG_CRYPTO_TEST=m
576CONFIG_CRYPTO_CHACHA20POLY1305=m 575CONFIG_CRYPTO_CHACHA20POLY1305=m
577CONFIG_CRYPTO_AEGIS128=m 576CONFIG_CRYPTO_AEGIS128=m
@@ -607,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m
607CONFIG_CRYPTO_SEED=m 606CONFIG_CRYPTO_SEED=m
608CONFIG_CRYPTO_SERPENT=m 607CONFIG_CRYPTO_SERPENT=m
609CONFIG_CRYPTO_SM4=m 608CONFIG_CRYPTO_SM4=m
610CONFIG_CRYPTO_SPECK=m
611CONFIG_CRYPTO_TEA=m 609CONFIG_CRYPTO_TEA=m
612CONFIG_CRYPTO_TWOFISH=m 610CONFIG_CRYPTO_TWOFISH=m
613CONFIG_CRYPTO_LZO=m 611CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index efb27a7fcc55..0d9a5c2a311a 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -572,7 +572,6 @@ CONFIG_CRYPTO_ECDH=m
572CONFIG_CRYPTO_MANAGER=y 572CONFIG_CRYPTO_MANAGER=y
573CONFIG_CRYPTO_USER=m 573CONFIG_CRYPTO_USER=m
574CONFIG_CRYPTO_CRYPTD=m 574CONFIG_CRYPTO_CRYPTD=m
575CONFIG_CRYPTO_MCRYPTD=m
576CONFIG_CRYPTO_TEST=m 575CONFIG_CRYPTO_TEST=m
577CONFIG_CRYPTO_CHACHA20POLY1305=m 576CONFIG_CRYPTO_CHACHA20POLY1305=m
578CONFIG_CRYPTO_AEGIS128=m 577CONFIG_CRYPTO_AEGIS128=m
@@ -608,7 +607,6 @@ CONFIG_CRYPTO_SALSA20=m
608CONFIG_CRYPTO_SEED=m 607CONFIG_CRYPTO_SEED=m
609CONFIG_CRYPTO_SERPENT=m 608CONFIG_CRYPTO_SERPENT=m
610CONFIG_CRYPTO_SM4=m 609CONFIG_CRYPTO_SM4=m
611CONFIG_CRYPTO_SPECK=m
612CONFIG_CRYPTO_TEA=m 610CONFIG_CRYPTO_TEA=m
613CONFIG_CRYPTO_TWOFISH=m 611CONFIG_CRYPTO_TWOFISH=m
614CONFIG_CRYPTO_LZO=m 612CONFIG_CRYPTO_LZO=m
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 941d8cc6c9f5..259d1698ac50 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -668,7 +668,6 @@ CONFIG_CRYPTO_USER=m
668# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 668# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
669CONFIG_CRYPTO_PCRYPT=m 669CONFIG_CRYPTO_PCRYPT=m
670CONFIG_CRYPTO_CRYPTD=m 670CONFIG_CRYPTO_CRYPTD=m
671CONFIG_CRYPTO_MCRYPTD=m
672CONFIG_CRYPTO_TEST=m 671CONFIG_CRYPTO_TEST=m
673CONFIG_CRYPTO_CHACHA20POLY1305=m 672CONFIG_CRYPTO_CHACHA20POLY1305=m
674CONFIG_CRYPTO_LRW=m 673CONFIG_CRYPTO_LRW=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index eb6f75f24208..37fd60c20e22 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -610,7 +610,6 @@ CONFIG_CRYPTO_USER=m
610# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 610# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
611CONFIG_CRYPTO_PCRYPT=m 611CONFIG_CRYPTO_PCRYPT=m
612CONFIG_CRYPTO_CRYPTD=m 612CONFIG_CRYPTO_CRYPTD=m
613CONFIG_CRYPTO_MCRYPTD=m
614CONFIG_CRYPTO_TEST=m 613CONFIG_CRYPTO_TEST=m
615CONFIG_CRYPTO_CHACHA20POLY1305=m 614CONFIG_CRYPTO_CHACHA20POLY1305=m
616CONFIG_CRYPTO_LRW=m 615CONFIG_CRYPTO_LRW=m
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index c54cb26eb7f5..812d9498d97b 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -44,7 +44,7 @@ struct s390_aes_ctx {
44 int key_len; 44 int key_len;
45 unsigned long fc; 45 unsigned long fc;
46 union { 46 union {
47 struct crypto_skcipher *blk; 47 struct crypto_sync_skcipher *blk;
48 struct crypto_cipher *cip; 48 struct crypto_cipher *cip;
49 } fallback; 49 } fallback;
50}; 50};
@@ -54,7 +54,7 @@ struct s390_xts_ctx {
54 u8 pcc_key[32]; 54 u8 pcc_key[32];
55 int key_len; 55 int key_len;
56 unsigned long fc; 56 unsigned long fc;
57 struct crypto_skcipher *fallback; 57 struct crypto_sync_skcipher *fallback;
58}; 58};
59 59
60struct gcm_sg_walk { 60struct gcm_sg_walk {
@@ -184,14 +184,15 @@ static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
184 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 184 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
185 unsigned int ret; 185 unsigned int ret;
186 186
187 crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK); 187 crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
188 crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags & 188 CRYPTO_TFM_REQ_MASK);
189 crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
189 CRYPTO_TFM_REQ_MASK); 190 CRYPTO_TFM_REQ_MASK);
190 191
191 ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len); 192 ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
192 193
193 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 194 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
194 tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) & 195 tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
195 CRYPTO_TFM_RES_MASK; 196 CRYPTO_TFM_RES_MASK;
196 197
197 return ret; 198 return ret;
@@ -204,9 +205,9 @@ static int fallback_blk_dec(struct blkcipher_desc *desc,
204 unsigned int ret; 205 unsigned int ret;
205 struct crypto_blkcipher *tfm = desc->tfm; 206 struct crypto_blkcipher *tfm = desc->tfm;
206 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); 207 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
207 SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); 208 SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
208 209
209 skcipher_request_set_tfm(req, sctx->fallback.blk); 210 skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
210 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 211 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
211 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 212 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
212 213
@@ -223,9 +224,9 @@ static int fallback_blk_enc(struct blkcipher_desc *desc,
223 unsigned int ret; 224 unsigned int ret;
224 struct crypto_blkcipher *tfm = desc->tfm; 225 struct crypto_blkcipher *tfm = desc->tfm;
225 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); 226 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
226 SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); 227 SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
227 228
228 skcipher_request_set_tfm(req, sctx->fallback.blk); 229 skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
229 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 230 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
230 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 231 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
231 232
@@ -306,8 +307,7 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
306 const char *name = tfm->__crt_alg->cra_name; 307 const char *name = tfm->__crt_alg->cra_name;
307 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 308 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
308 309
309 sctx->fallback.blk = crypto_alloc_skcipher(name, 0, 310 sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
310 CRYPTO_ALG_ASYNC |
311 CRYPTO_ALG_NEED_FALLBACK); 311 CRYPTO_ALG_NEED_FALLBACK);
312 312
313 if (IS_ERR(sctx->fallback.blk)) { 313 if (IS_ERR(sctx->fallback.blk)) {
@@ -323,7 +323,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
323{ 323{
324 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 324 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
325 325
326 crypto_free_skcipher(sctx->fallback.blk); 326 crypto_free_sync_skcipher(sctx->fallback.blk);
327} 327}
328 328
329static struct crypto_alg ecb_aes_alg = { 329static struct crypto_alg ecb_aes_alg = {
@@ -453,14 +453,15 @@ static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
453 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); 453 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
454 unsigned int ret; 454 unsigned int ret;
455 455
456 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK); 456 crypto_sync_skcipher_clear_flags(xts_ctx->fallback,
457 crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags & 457 CRYPTO_TFM_REQ_MASK);
458 crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
458 CRYPTO_TFM_REQ_MASK); 459 CRYPTO_TFM_REQ_MASK);
459 460
460 ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len); 461 ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len);
461 462
462 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 463 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
463 tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) & 464 tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) &
464 CRYPTO_TFM_RES_MASK; 465 CRYPTO_TFM_RES_MASK;
465 466
466 return ret; 467 return ret;
@@ -472,10 +473,10 @@ static int xts_fallback_decrypt(struct blkcipher_desc *desc,
472{ 473{
473 struct crypto_blkcipher *tfm = desc->tfm; 474 struct crypto_blkcipher *tfm = desc->tfm;
474 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); 475 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
475 SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); 476 SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
476 unsigned int ret; 477 unsigned int ret;
477 478
478 skcipher_request_set_tfm(req, xts_ctx->fallback); 479 skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
479 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 480 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
480 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 481 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
481 482
@@ -491,10 +492,10 @@ static int xts_fallback_encrypt(struct blkcipher_desc *desc,
491{ 492{
492 struct crypto_blkcipher *tfm = desc->tfm; 493 struct crypto_blkcipher *tfm = desc->tfm;
493 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); 494 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
494 SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); 495 SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
495 unsigned int ret; 496 unsigned int ret;
496 497
497 skcipher_request_set_tfm(req, xts_ctx->fallback); 498 skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
498 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 499 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
499 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 500 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
500 501
@@ -611,8 +612,7 @@ static int xts_fallback_init(struct crypto_tfm *tfm)
611 const char *name = tfm->__crt_alg->cra_name; 612 const char *name = tfm->__crt_alg->cra_name;
612 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); 613 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
613 614
614 xts_ctx->fallback = crypto_alloc_skcipher(name, 0, 615 xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
615 CRYPTO_ALG_ASYNC |
616 CRYPTO_ALG_NEED_FALLBACK); 616 CRYPTO_ALG_NEED_FALLBACK);
617 617
618 if (IS_ERR(xts_ctx->fallback)) { 618 if (IS_ERR(xts_ctx->fallback)) {
@@ -627,7 +627,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
627{ 627{
628 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); 628 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
629 629
630 crypto_free_skcipher(xts_ctx->fallback); 630 crypto_free_sync_skcipher(xts_ctx->fallback);
631} 631}
632 632
633static struct crypto_alg xts_aes_alg = { 633static struct crypto_alg xts_aes_alg = {
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 20add000dd6d..7cb6a52f727d 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -221,7 +221,6 @@ CONFIG_CRYPTO_SALSA20=m
221CONFIG_CRYPTO_SEED=m 221CONFIG_CRYPTO_SEED=m
222CONFIG_CRYPTO_SERPENT=m 222CONFIG_CRYPTO_SERPENT=m
223CONFIG_CRYPTO_SM4=m 223CONFIG_CRYPTO_SM4=m
224CONFIG_CRYPTO_SPECK=m
225CONFIG_CRYPTO_TEA=m 224CONFIG_CRYPTO_TEA=m
226CONFIG_CRYPTO_TWOFISH=m 225CONFIG_CRYPTO_TWOFISH=m
227CONFIG_CRYPTO_DEFLATE=m 226CONFIG_CRYPTO_DEFLATE=m
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index a450ad573dcb..a4b0007a54e1 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -60,9 +60,6 @@ endif
60ifeq ($(avx2_supported),yes) 60ifeq ($(avx2_supported),yes)
61 obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o 61 obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
62 obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o 62 obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
63 obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/
64 obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/
65 obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/
66 63
67 obj-$(CONFIG_CRYPTO_MORUS1280_AVX2) += morus1280-avx2.o 64 obj-$(CONFIG_CRYPTO_MORUS1280_AVX2) += morus1280-avx2.o
68endif 65endif
@@ -106,7 +103,7 @@ ifeq ($(avx2_supported),yes)
106 morus1280-avx2-y := morus1280-avx2-asm.o morus1280-avx2-glue.o 103 morus1280-avx2-y := morus1280-avx2-asm.o morus1280-avx2-glue.o
107endif 104endif
108 105
109aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o 106aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
110aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o 107aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
111ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o 108ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
112sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o 109sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index acbe7e8336d8..661f7daf43da 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -102,9 +102,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
102asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, 102asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
103 const u8 *in, unsigned int len, u8 *iv); 103 const u8 *in, unsigned int len, u8 *iv);
104 104
105int crypto_fpu_init(void);
106void crypto_fpu_exit(void);
107
108#define AVX_GEN2_OPTSIZE 640 105#define AVX_GEN2_OPTSIZE 640
109#define AVX_GEN4_OPTSIZE 4096 106#define AVX_GEN4_OPTSIZE 4096
110 107
@@ -817,7 +814,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
817 /* Linearize assoc, if not already linear */ 814 /* Linearize assoc, if not already linear */
818 if (req->src->length >= assoclen && req->src->length && 815 if (req->src->length >= assoclen && req->src->length &&
819 (!PageHighMem(sg_page(req->src)) || 816 (!PageHighMem(sg_page(req->src)) ||
820 req->src->offset + req->src->length < PAGE_SIZE)) { 817 req->src->offset + req->src->length <= PAGE_SIZE)) {
821 scatterwalk_start(&assoc_sg_walk, req->src); 818 scatterwalk_start(&assoc_sg_walk, req->src);
822 assoc = scatterwalk_map(&assoc_sg_walk); 819 assoc = scatterwalk_map(&assoc_sg_walk);
823 } else { 820 } else {
@@ -1253,22 +1250,6 @@ static struct skcipher_alg aesni_skciphers[] = {
1253static 1250static
1254struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; 1251struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1255 1252
1256static struct {
1257 const char *algname;
1258 const char *drvname;
1259 const char *basename;
1260 struct simd_skcipher_alg *simd;
1261} aesni_simd_skciphers2[] = {
1262#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
1263 IS_BUILTIN(CONFIG_CRYPTO_PCBC)
1264 {
1265 .algname = "pcbc(aes)",
1266 .drvname = "pcbc-aes-aesni",
1267 .basename = "fpu(pcbc(__aes-aesni))",
1268 },
1269#endif
1270};
1271
1272#ifdef CONFIG_X86_64 1253#ifdef CONFIG_X86_64
1273static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key, 1254static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1274 unsigned int key_len) 1255 unsigned int key_len)
@@ -1422,10 +1403,6 @@ static void aesni_free_simds(void)
1422 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) && 1403 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1423 aesni_simd_skciphers[i]; i++) 1404 aesni_simd_skciphers[i]; i++)
1424 simd_skcipher_free(aesni_simd_skciphers[i]); 1405 simd_skcipher_free(aesni_simd_skciphers[i]);
1425
1426 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
1427 if (aesni_simd_skciphers2[i].simd)
1428 simd_skcipher_free(aesni_simd_skciphers2[i].simd);
1429} 1406}
1430 1407
1431static int __init aesni_init(void) 1408static int __init aesni_init(void)
@@ -1469,13 +1446,9 @@ static int __init aesni_init(void)
1469#endif 1446#endif
1470#endif 1447#endif
1471 1448
1472 err = crypto_fpu_init();
1473 if (err)
1474 return err;
1475
1476 err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1449 err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1477 if (err) 1450 if (err)
1478 goto fpu_exit; 1451 return err;
1479 1452
1480 err = crypto_register_skciphers(aesni_skciphers, 1453 err = crypto_register_skciphers(aesni_skciphers,
1481 ARRAY_SIZE(aesni_skciphers)); 1454 ARRAY_SIZE(aesni_skciphers));
@@ -1499,18 +1472,6 @@ static int __init aesni_init(void)
1499 aesni_simd_skciphers[i] = simd; 1472 aesni_simd_skciphers[i] = simd;
1500 } 1473 }
1501 1474
1502 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
1503 algname = aesni_simd_skciphers2[i].algname;
1504 drvname = aesni_simd_skciphers2[i].drvname;
1505 basename = aesni_simd_skciphers2[i].basename;
1506 simd = simd_skcipher_create_compat(algname, drvname, basename);
1507 err = PTR_ERR(simd);
1508 if (IS_ERR(simd))
1509 continue;
1510
1511 aesni_simd_skciphers2[i].simd = simd;
1512 }
1513
1514 return 0; 1475 return 0;
1515 1476
1516unregister_simds: 1477unregister_simds:
@@ -1521,8 +1482,6 @@ unregister_skciphers:
1521 ARRAY_SIZE(aesni_skciphers)); 1482 ARRAY_SIZE(aesni_skciphers));
1522unregister_algs: 1483unregister_algs:
1523 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1484 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1524fpu_exit:
1525 crypto_fpu_exit();
1526 return err; 1485 return err;
1527} 1486}
1528 1487
@@ -1533,8 +1492,6 @@ static void __exit aesni_exit(void)
1533 crypto_unregister_skciphers(aesni_skciphers, 1492 crypto_unregister_skciphers(aesni_skciphers,
1534 ARRAY_SIZE(aesni_skciphers)); 1493 ARRAY_SIZE(aesni_skciphers));
1535 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1494 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1536
1537 crypto_fpu_exit();
1538} 1495}
1539 1496
1540late_initcall(aesni_init); 1497late_initcall(aesni_init);
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
deleted file mode 100644
index 406680476c52..000000000000
--- a/arch/x86/crypto/fpu.c
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * FPU: Wrapper for blkcipher touching fpu
3 *
4 * Copyright (c) Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 */
13
14#include <crypto/internal/skcipher.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <asm/fpu/api.h>
21
22struct crypto_fpu_ctx {
23 struct crypto_skcipher *child;
24};
25
26static int crypto_fpu_setkey(struct crypto_skcipher *parent, const u8 *key,
27 unsigned int keylen)
28{
29 struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(parent);
30 struct crypto_skcipher *child = ctx->child;
31 int err;
32
33 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
34 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
35 CRYPTO_TFM_REQ_MASK);
36 err = crypto_skcipher_setkey(child, key, keylen);
37 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
38 CRYPTO_TFM_RES_MASK);
39 return err;
40}
41
42static int crypto_fpu_encrypt(struct skcipher_request *req)
43{
44 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
45 struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
46 struct crypto_skcipher *child = ctx->child;
47 SKCIPHER_REQUEST_ON_STACK(subreq, child);
48 int err;
49
50 skcipher_request_set_tfm(subreq, child);
51 skcipher_request_set_callback(subreq, 0, NULL, NULL);
52 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
53 req->iv);
54
55 kernel_fpu_begin();
56 err = crypto_skcipher_encrypt(subreq);
57 kernel_fpu_end();
58
59 skcipher_request_zero(subreq);
60 return err;
61}
62
63static int crypto_fpu_decrypt(struct skcipher_request *req)
64{
65 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
66 struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
67 struct crypto_skcipher *child = ctx->child;
68 SKCIPHER_REQUEST_ON_STACK(subreq, child);
69 int err;
70
71 skcipher_request_set_tfm(subreq, child);
72 skcipher_request_set_callback(subreq, 0, NULL, NULL);
73 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
74 req->iv);
75
76 kernel_fpu_begin();
77 err = crypto_skcipher_decrypt(subreq);
78 kernel_fpu_end();
79
80 skcipher_request_zero(subreq);
81 return err;
82}
83
84static int crypto_fpu_init_tfm(struct crypto_skcipher *tfm)
85{
86 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
87 struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
88 struct crypto_skcipher_spawn *spawn;
89 struct crypto_skcipher *cipher;
90
91 spawn = skcipher_instance_ctx(inst);
92 cipher = crypto_spawn_skcipher(spawn);
93 if (IS_ERR(cipher))
94 return PTR_ERR(cipher);
95
96 ctx->child = cipher;
97
98 return 0;
99}
100
101static void crypto_fpu_exit_tfm(struct crypto_skcipher *tfm)
102{
103 struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
104
105 crypto_free_skcipher(ctx->child);
106}
107
108static void crypto_fpu_free(struct skcipher_instance *inst)
109{
110 crypto_drop_skcipher(skcipher_instance_ctx(inst));
111 kfree(inst);
112}
113
114static int crypto_fpu_create(struct crypto_template *tmpl, struct rtattr **tb)
115{
116 struct crypto_skcipher_spawn *spawn;
117 struct skcipher_instance *inst;
118 struct crypto_attr_type *algt;
119 struct skcipher_alg *alg;
120 const char *cipher_name;
121 int err;
122
123 algt = crypto_get_attr_type(tb);
124 if (IS_ERR(algt))
125 return PTR_ERR(algt);
126
127 if ((algt->type ^ (CRYPTO_ALG_INTERNAL | CRYPTO_ALG_TYPE_SKCIPHER)) &
128 algt->mask)
129 return -EINVAL;
130
131 if (!(algt->mask & CRYPTO_ALG_INTERNAL))
132 return -EINVAL;
133
134 cipher_name = crypto_attr_alg_name(tb[1]);
135 if (IS_ERR(cipher_name))
136 return PTR_ERR(cipher_name);
137
138 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
139 if (!inst)
140 return -ENOMEM;
141
142 spawn = skcipher_instance_ctx(inst);
143
144 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
145 err = crypto_grab_skcipher(spawn, cipher_name, CRYPTO_ALG_INTERNAL,
146 CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
147 if (err)
148 goto out_free_inst;
149
150 alg = crypto_skcipher_spawn_alg(spawn);
151
152 err = crypto_inst_setname(skcipher_crypto_instance(inst), "fpu",
153 &alg->base);
154 if (err)
155 goto out_drop_skcipher;
156
157 inst->alg.base.cra_flags = CRYPTO_ALG_INTERNAL;
158 inst->alg.base.cra_priority = alg->base.cra_priority;
159 inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
160 inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
161
162 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
163 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
164 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
165
166 inst->alg.base.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
167
168 inst->alg.init = crypto_fpu_init_tfm;
169 inst->alg.exit = crypto_fpu_exit_tfm;
170
171 inst->alg.setkey = crypto_fpu_setkey;
172 inst->alg.encrypt = crypto_fpu_encrypt;
173 inst->alg.decrypt = crypto_fpu_decrypt;
174
175 inst->free = crypto_fpu_free;
176
177 err = skcipher_register_instance(tmpl, inst);
178 if (err)
179 goto out_drop_skcipher;
180
181out:
182 return err;
183
184out_drop_skcipher:
185 crypto_drop_skcipher(spawn);
186out_free_inst:
187 kfree(inst);
188 goto out;
189}
190
191static struct crypto_template crypto_fpu_tmpl = {
192 .name = "fpu",
193 .create = crypto_fpu_create,
194 .module = THIS_MODULE,
195};
196
197int __init crypto_fpu_init(void)
198{
199 return crypto_register_template(&crypto_fpu_tmpl);
200}
201
202void crypto_fpu_exit(void)
203{
204 crypto_unregister_template(&crypto_fpu_tmpl);
205}
206
207MODULE_ALIAS_CRYPTO("fpu");
diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
deleted file mode 100644
index 815ded3ba90e..000000000000
--- a/arch/x86/crypto/sha1-mb/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3# Arch-specific CryptoAPI modules.
4#
5
6OBJECT_FILES_NON_STANDARD := y
7
8avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
9 $(comma)4)$(comma)%ymm2,yes,no)
10ifeq ($(avx2_supported),yes)
11 obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb.o
12 sha1-mb-y := sha1_mb.o sha1_mb_mgr_flush_avx2.o \
13 sha1_mb_mgr_init_avx2.o sha1_mb_mgr_submit_avx2.o sha1_x8_avx2.o
14endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
deleted file mode 100644
index b93805664c1d..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ /dev/null
@@ -1,1011 +0,0 @@
1/*
2 * Multi buffer SHA1 algorithm Glue Code
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Tim Chen <tim.c.chen@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2014 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <crypto/internal/hash.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/mm.h>
60#include <linux/cryptohash.h>
61#include <linux/types.h>
62#include <linux/list.h>
63#include <crypto/scatterwalk.h>
64#include <crypto/sha.h>
65#include <crypto/mcryptd.h>
66#include <crypto/crypto_wq.h>
67#include <asm/byteorder.h>
68#include <linux/hardirq.h>
69#include <asm/fpu/api.h>
70#include "sha1_mb_ctx.h"
71
72#define FLUSH_INTERVAL 1000 /* in usec */
73
74static struct mcryptd_alg_state sha1_mb_alg_state;
75
76struct sha1_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
78};
79
80static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
82{
83 struct ahash_request *areq;
84
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
87}
88
89static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
91{
92 return container_of((void *) ctx, struct ahash_request, __ctx);
93}
94
95static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
97{
98 rctx->flag = HASH_UPDATE;
99}
100
101static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
102static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
103 (struct sha1_mb_mgr *state, struct job_sha1 *job);
104static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
105 (struct sha1_mb_mgr *state);
106static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
107 (struct sha1_mb_mgr *state);
108
109static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
110 uint64_t total_len)
111{
112 uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
113
114 memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
115 padblock[i] = 0x80;
116
117 i += ((SHA1_BLOCK_SIZE - 1) &
118 (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
119 + 1 + SHA1_PADLENGTHFIELD_SIZE;
120
121#if SHA1_PADLENGTHFIELD_SIZE == 16
122 *((uint64_t *) &padblock[i - 16]) = 0;
123#endif
124
125 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
126
127 /* Number of extra blocks to hash */
128 return i >> SHA1_LOG2_BLOCK_SIZE;
129}
130
131static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
132 struct sha1_hash_ctx *ctx)
133{
134 while (ctx) {
135 if (ctx->status & HASH_CTX_STS_COMPLETE) {
136 /* Clear PROCESSING bit */
137 ctx->status = HASH_CTX_STS_COMPLETE;
138 return ctx;
139 }
140
141 /*
142 * If the extra blocks are empty, begin hashing what remains
143 * in the user's buffer.
144 */
145 if (ctx->partial_block_buffer_length == 0 &&
146 ctx->incoming_buffer_length) {
147
148 const void *buffer = ctx->incoming_buffer;
149 uint32_t len = ctx->incoming_buffer_length;
150 uint32_t copy_len;
151
152 /*
153 * Only entire blocks can be hashed.
154 * Copy remainder to extra blocks buffer.
155 */
156 copy_len = len & (SHA1_BLOCK_SIZE-1);
157
158 if (copy_len) {
159 len -= copy_len;
160 memcpy(ctx->partial_block_buffer,
161 ((const char *) buffer + len),
162 copy_len);
163 ctx->partial_block_buffer_length = copy_len;
164 }
165
166 ctx->incoming_buffer_length = 0;
167
168 /* len should be a multiple of the block size now */
169 assert((len % SHA1_BLOCK_SIZE) == 0);
170
171 /* Set len to the number of blocks to be hashed */
172 len >>= SHA1_LOG2_BLOCK_SIZE;
173
174 if (len) {
175
176 ctx->job.buffer = (uint8_t *) buffer;
177 ctx->job.len = len;
178 ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
179 &ctx->job);
180 continue;
181 }
182 }
183
184 /*
185 * If the extra blocks are not empty, then we are
186 * either on the last block(s) or we need more
187 * user input before continuing.
188 */
189 if (ctx->status & HASH_CTX_STS_LAST) {
190
191 uint8_t *buf = ctx->partial_block_buffer;
192 uint32_t n_extra_blocks =
193 sha1_pad(buf, ctx->total_length);
194
195 ctx->status = (HASH_CTX_STS_PROCESSING |
196 HASH_CTX_STS_COMPLETE);
197 ctx->job.buffer = buf;
198 ctx->job.len = (uint32_t) n_extra_blocks;
199 ctx = (struct sha1_hash_ctx *)
200 sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
201 continue;
202 }
203
204 ctx->status = HASH_CTX_STS_IDLE;
205 return ctx;
206 }
207
208 return NULL;
209}
210
211static struct sha1_hash_ctx
212 *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
213{
214 /*
215 * If get_comp_job returns NULL, there are no jobs complete.
216 * If get_comp_job returns a job, verify that it is safe to return to
217 * the user.
218 * If it is not ready, resubmit the job to finish processing.
219 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
220 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
221 * still need processing.
222 */
223 struct sha1_hash_ctx *ctx;
224
225 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
226 return sha1_ctx_mgr_resubmit(mgr, ctx);
227}
228
229static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
230{
231 sha1_job_mgr_init(&mgr->mgr);
232}
233
234static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
235 struct sha1_hash_ctx *ctx,
236 const void *buffer,
237 uint32_t len,
238 int flags)
239{
240 if (flags & ~(HASH_UPDATE | HASH_LAST)) {
241 /* User should not pass anything other than UPDATE or LAST */
242 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
243 return ctx;
244 }
245
246 if (ctx->status & HASH_CTX_STS_PROCESSING) {
247 /* Cannot submit to a currently processing job. */
248 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
249 return ctx;
250 }
251
252 if (ctx->status & HASH_CTX_STS_COMPLETE) {
253 /* Cannot update a finished job. */
254 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
255 return ctx;
256 }
257
258 /*
259 * If we made it here, there were no errors during this call to
260 * submit
261 */
262 ctx->error = HASH_CTX_ERROR_NONE;
263
264 /* Store buffer ptr info from user */
265 ctx->incoming_buffer = buffer;
266 ctx->incoming_buffer_length = len;
267
268 /*
269 * Store the user's request flags and mark this ctx as currently
270 * being processed.
271 */
272 ctx->status = (flags & HASH_LAST) ?
273 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
274 HASH_CTX_STS_PROCESSING;
275
276 /* Advance byte counter */
277 ctx->total_length += len;
278
279 /*
280 * If there is anything currently buffered in the extra blocks,
281 * append to it until it contains a whole block.
282 * Or if the user's buffer contains less than a whole block,
283 * append as much as possible to the extra block.
284 */
285 if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
286 /*
287 * Compute how many bytes to copy from user buffer into
288 * extra block
289 */
290 uint32_t copy_len = SHA1_BLOCK_SIZE -
291 ctx->partial_block_buffer_length;
292 if (len < copy_len)
293 copy_len = len;
294
295 if (copy_len) {
296 /* Copy and update relevant pointers and counters */
297 memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
298 buffer, copy_len);
299
300 ctx->partial_block_buffer_length += copy_len;
301 ctx->incoming_buffer = (const void *)
302 ((const char *)buffer + copy_len);
303 ctx->incoming_buffer_length = len - copy_len;
304 }
305
306 /*
307 * The extra block should never contain more than 1 block
308 * here
309 */
310 assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
311
312 /*
313 * If the extra block buffer contains exactly 1 block, it can
314 * be hashed.
315 */
316 if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
317 ctx->partial_block_buffer_length = 0;
318
319 ctx->job.buffer = ctx->partial_block_buffer;
320 ctx->job.len = 1;
321 ctx = (struct sha1_hash_ctx *)
322 sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
323 }
324 }
325
326 return sha1_ctx_mgr_resubmit(mgr, ctx);
327}
328
329static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
330{
331 struct sha1_hash_ctx *ctx;
332
333 while (1) {
334 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
335
336 /* If flush returned 0, there are no more jobs in flight. */
337 if (!ctx)
338 return NULL;
339
340 /*
341 * If flush returned a job, resubmit the job to finish
342 * processing.
343 */
344 ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
345
346 /*
347 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be
348 * returned. Otherwise, all jobs currently being managed by the
349 * sha1_ctx_mgr still need processing. Loop.
350 */
351 if (ctx)
352 return ctx;
353 }
354}
355
356static int sha1_mb_init(struct ahash_request *areq)
357{
358 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
359
360 hash_ctx_init(sctx);
361 sctx->job.result_digest[0] = SHA1_H0;
362 sctx->job.result_digest[1] = SHA1_H1;
363 sctx->job.result_digest[2] = SHA1_H2;
364 sctx->job.result_digest[3] = SHA1_H3;
365 sctx->job.result_digest[4] = SHA1_H4;
366 sctx->total_length = 0;
367 sctx->partial_block_buffer_length = 0;
368 sctx->status = HASH_CTX_STS_IDLE;
369
370 return 0;
371}
372
373static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
374{
375 int i;
376 struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
377 __be32 *dst = (__be32 *) rctx->out;
378
379 for (i = 0; i < 5; ++i)
380 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
381
382 return 0;
383}
384
385static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
386 struct mcryptd_alg_cstate *cstate, bool flush)
387{
388 int flag = HASH_UPDATE;
389 int nbytes, err = 0;
390 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
391 struct sha1_hash_ctx *sha_ctx;
392
393 /* more work ? */
394 while (!(rctx->flag & HASH_DONE)) {
395 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
396 if (nbytes < 0) {
397 err = nbytes;
398 goto out;
399 }
400 /* check if the walk is done */
401 if (crypto_ahash_walk_last(&rctx->walk)) {
402 rctx->flag |= HASH_DONE;
403 if (rctx->flag & HASH_FINAL)
404 flag |= HASH_LAST;
405
406 }
407 sha_ctx = (struct sha1_hash_ctx *)
408 ahash_request_ctx(&rctx->areq);
409 kernel_fpu_begin();
410 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
411 rctx->walk.data, nbytes, flag);
412 if (!sha_ctx) {
413 if (flush)
414 sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
415 }
416 kernel_fpu_end();
417 if (sha_ctx)
418 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
419 else {
420 rctx = NULL;
421 goto out;
422 }
423 }
424
425 /* copy the results */
426 if (rctx->flag & HASH_FINAL)
427 sha1_mb_set_results(rctx);
428
429out:
430 *ret_rctx = rctx;
431 return err;
432}
433
434static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
435 struct mcryptd_alg_cstate *cstate,
436 int err)
437{
438 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
439 struct sha1_hash_ctx *sha_ctx;
440 struct mcryptd_hash_request_ctx *req_ctx;
441 int ret;
442
443 /* remove from work list */
444 spin_lock(&cstate->work_lock);
445 list_del(&rctx->waiter);
446 spin_unlock(&cstate->work_lock);
447
448 if (irqs_disabled())
449 rctx->complete(&req->base, err);
450 else {
451 local_bh_disable();
452 rctx->complete(&req->base, err);
453 local_bh_enable();
454 }
455
456 /* check to see if there are other jobs that are done */
457 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
458 while (sha_ctx) {
459 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
460 ret = sha_finish_walk(&req_ctx, cstate, false);
461 if (req_ctx) {
462 spin_lock(&cstate->work_lock);
463 list_del(&req_ctx->waiter);
464 spin_unlock(&cstate->work_lock);
465
466 req = cast_mcryptd_ctx_to_req(req_ctx);
467 if (irqs_disabled())
468 req_ctx->complete(&req->base, ret);
469 else {
470 local_bh_disable();
471 req_ctx->complete(&req->base, ret);
472 local_bh_enable();
473 }
474 }
475 sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
476 }
477
478 return 0;
479}
480
481static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
482 struct mcryptd_alg_cstate *cstate)
483{
484 unsigned long next_flush;
485 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
486
487 /* initialize tag */
488 rctx->tag.arrival = jiffies; /* tag the arrival time */
489 rctx->tag.seq_num = cstate->next_seq_num++;
490 next_flush = rctx->tag.arrival + delay;
491 rctx->tag.expire = next_flush;
492
493 spin_lock(&cstate->work_lock);
494 list_add_tail(&rctx->waiter, &cstate->work_list);
495 spin_unlock(&cstate->work_lock);
496
497 mcryptd_arm_flusher(cstate, delay);
498}
499
500static int sha1_mb_update(struct ahash_request *areq)
501{
502 struct mcryptd_hash_request_ctx *rctx =
503 container_of(areq, struct mcryptd_hash_request_ctx, areq);
504 struct mcryptd_alg_cstate *cstate =
505 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
506
507 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
508 struct sha1_hash_ctx *sha_ctx;
509 int ret = 0, nbytes;
510
511
512 /* sanity check */
513 if (rctx->tag.cpu != smp_processor_id()) {
514 pr_err("mcryptd error: cpu clash\n");
515 goto done;
516 }
517
518 /* need to init context */
519 req_ctx_init(rctx, areq);
520
521 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
522
523 if (nbytes < 0) {
524 ret = nbytes;
525 goto done;
526 }
527
528 if (crypto_ahash_walk_last(&rctx->walk))
529 rctx->flag |= HASH_DONE;
530
531 /* submit */
532 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
533 sha1_mb_add_list(rctx, cstate);
534 kernel_fpu_begin();
535 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
536 nbytes, HASH_UPDATE);
537 kernel_fpu_end();
538
539 /* check if anything is returned */
540 if (!sha_ctx)
541 return -EINPROGRESS;
542
543 if (sha_ctx->error) {
544 ret = sha_ctx->error;
545 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
546 goto done;
547 }
548
549 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
550 ret = sha_finish_walk(&rctx, cstate, false);
551
552 if (!rctx)
553 return -EINPROGRESS;
554done:
555 sha_complete_job(rctx, cstate, ret);
556 return ret;
557}
558
559static int sha1_mb_finup(struct ahash_request *areq)
560{
561 struct mcryptd_hash_request_ctx *rctx =
562 container_of(areq, struct mcryptd_hash_request_ctx, areq);
563 struct mcryptd_alg_cstate *cstate =
564 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
565
566 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
567 struct sha1_hash_ctx *sha_ctx;
568 int ret = 0, flag = HASH_UPDATE, nbytes;
569
570 /* sanity check */
571 if (rctx->tag.cpu != smp_processor_id()) {
572 pr_err("mcryptd error: cpu clash\n");
573 goto done;
574 }
575
576 /* need to init context */
577 req_ctx_init(rctx, areq);
578
579 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
580
581 if (nbytes < 0) {
582 ret = nbytes;
583 goto done;
584 }
585
586 if (crypto_ahash_walk_last(&rctx->walk)) {
587 rctx->flag |= HASH_DONE;
588 flag = HASH_LAST;
589 }
590
591 /* submit */
592 rctx->flag |= HASH_FINAL;
593 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
594 sha1_mb_add_list(rctx, cstate);
595
596 kernel_fpu_begin();
597 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
598 nbytes, flag);
599 kernel_fpu_end();
600
601 /* check if anything is returned */
602 if (!sha_ctx)
603 return -EINPROGRESS;
604
605 if (sha_ctx->error) {
606 ret = sha_ctx->error;
607 goto done;
608 }
609
610 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
611 ret = sha_finish_walk(&rctx, cstate, false);
612 if (!rctx)
613 return -EINPROGRESS;
614done:
615 sha_complete_job(rctx, cstate, ret);
616 return ret;
617}
618
619static int sha1_mb_final(struct ahash_request *areq)
620{
621 struct mcryptd_hash_request_ctx *rctx =
622 container_of(areq, struct mcryptd_hash_request_ctx, areq);
623 struct mcryptd_alg_cstate *cstate =
624 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
625
626 struct sha1_hash_ctx *sha_ctx;
627 int ret = 0;
628 u8 data;
629
630 /* sanity check */
631 if (rctx->tag.cpu != smp_processor_id()) {
632 pr_err("mcryptd error: cpu clash\n");
633 goto done;
634 }
635
636 /* need to init context */
637 req_ctx_init(rctx, areq);
638
639 rctx->flag |= HASH_DONE | HASH_FINAL;
640
641 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
642 /* flag HASH_FINAL and 0 data size */
643 sha1_mb_add_list(rctx, cstate);
644 kernel_fpu_begin();
645 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
646 HASH_LAST);
647 kernel_fpu_end();
648
649 /* check if anything is returned */
650 if (!sha_ctx)
651 return -EINPROGRESS;
652
653 if (sha_ctx->error) {
654 ret = sha_ctx->error;
655 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
656 goto done;
657 }
658
659 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
660 ret = sha_finish_walk(&rctx, cstate, false);
661 if (!rctx)
662 return -EINPROGRESS;
663done:
664 sha_complete_job(rctx, cstate, ret);
665 return ret;
666}
667
668static int sha1_mb_export(struct ahash_request *areq, void *out)
669{
670 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
671
672 memcpy(out, sctx, sizeof(*sctx));
673
674 return 0;
675}
676
677static int sha1_mb_import(struct ahash_request *areq, const void *in)
678{
679 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
680
681 memcpy(sctx, in, sizeof(*sctx));
682
683 return 0;
684}
685
686static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
687{
688 struct mcryptd_ahash *mcryptd_tfm;
689 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
690 struct mcryptd_hash_ctx *mctx;
691
692 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
693 CRYPTO_ALG_INTERNAL,
694 CRYPTO_ALG_INTERNAL);
695 if (IS_ERR(mcryptd_tfm))
696 return PTR_ERR(mcryptd_tfm);
697 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
698 mctx->alg_state = &sha1_mb_alg_state;
699 ctx->mcryptd_tfm = mcryptd_tfm;
700 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
701 sizeof(struct ahash_request) +
702 crypto_ahash_reqsize(&mcryptd_tfm->base));
703
704 return 0;
705}
706
707static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
708{
709 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
710
711 mcryptd_free_ahash(ctx->mcryptd_tfm);
712}
713
714static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
715{
716 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
717 sizeof(struct ahash_request) +
718 sizeof(struct sha1_hash_ctx));
719
720 return 0;
721}
722
723static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
724{
725 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
726
727 mcryptd_free_ahash(ctx->mcryptd_tfm);
728}
729
730static struct ahash_alg sha1_mb_areq_alg = {
731 .init = sha1_mb_init,
732 .update = sha1_mb_update,
733 .final = sha1_mb_final,
734 .finup = sha1_mb_finup,
735 .export = sha1_mb_export,
736 .import = sha1_mb_import,
737 .halg = {
738 .digestsize = SHA1_DIGEST_SIZE,
739 .statesize = sizeof(struct sha1_hash_ctx),
740 .base = {
741 .cra_name = "__sha1-mb",
742 .cra_driver_name = "__intel_sha1-mb",
743 .cra_priority = 100,
744 /*
745 * use ASYNC flag as some buffers in multi-buffer
746 * algo may not have completed before hashing thread
747 * sleep
748 */
749 .cra_flags = CRYPTO_ALG_ASYNC |
750 CRYPTO_ALG_INTERNAL,
751 .cra_blocksize = SHA1_BLOCK_SIZE,
752 .cra_module = THIS_MODULE,
753 .cra_list = LIST_HEAD_INIT
754 (sha1_mb_areq_alg.halg.base.cra_list),
755 .cra_init = sha1_mb_areq_init_tfm,
756 .cra_exit = sha1_mb_areq_exit_tfm,
757 .cra_ctxsize = sizeof(struct sha1_hash_ctx),
758 }
759 }
760};
761
762static int sha1_mb_async_init(struct ahash_request *req)
763{
764 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
765 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
766 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
767 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
768
769 memcpy(mcryptd_req, req, sizeof(*req));
770 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
771 return crypto_ahash_init(mcryptd_req);
772}
773
774static int sha1_mb_async_update(struct ahash_request *req)
775{
776 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
777
778 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
779 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
780 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
781
782 memcpy(mcryptd_req, req, sizeof(*req));
783 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
784 return crypto_ahash_update(mcryptd_req);
785}
786
787static int sha1_mb_async_finup(struct ahash_request *req)
788{
789 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
790
791 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
792 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
793 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
794
795 memcpy(mcryptd_req, req, sizeof(*req));
796 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
797 return crypto_ahash_finup(mcryptd_req);
798}
799
800static int sha1_mb_async_final(struct ahash_request *req)
801{
802 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
803
804 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
805 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
806 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
807
808 memcpy(mcryptd_req, req, sizeof(*req));
809 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
810 return crypto_ahash_final(mcryptd_req);
811}
812
813static int sha1_mb_async_digest(struct ahash_request *req)
814{
815 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
816 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
817 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
818 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
819
820 memcpy(mcryptd_req, req, sizeof(*req));
821 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
822 return crypto_ahash_digest(mcryptd_req);
823}
824
825static int sha1_mb_async_export(struct ahash_request *req, void *out)
826{
827 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
828 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
829 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
830 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
831
832 memcpy(mcryptd_req, req, sizeof(*req));
833 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
834 return crypto_ahash_export(mcryptd_req, out);
835}
836
837static int sha1_mb_async_import(struct ahash_request *req, const void *in)
838{
839 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
840 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
841 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
842 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
843 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
844 struct mcryptd_hash_request_ctx *rctx;
845 struct ahash_request *areq;
846
847 memcpy(mcryptd_req, req, sizeof(*req));
848 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
849 rctx = ahash_request_ctx(mcryptd_req);
850 areq = &rctx->areq;
851
852 ahash_request_set_tfm(areq, child);
853 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
854 rctx->complete, req);
855
856 return crypto_ahash_import(mcryptd_req, in);
857}
858
859static struct ahash_alg sha1_mb_async_alg = {
860 .init = sha1_mb_async_init,
861 .update = sha1_mb_async_update,
862 .final = sha1_mb_async_final,
863 .finup = sha1_mb_async_finup,
864 .digest = sha1_mb_async_digest,
865 .export = sha1_mb_async_export,
866 .import = sha1_mb_async_import,
867 .halg = {
868 .digestsize = SHA1_DIGEST_SIZE,
869 .statesize = sizeof(struct sha1_hash_ctx),
870 .base = {
871 .cra_name = "sha1",
872 .cra_driver_name = "sha1_mb",
873 /*
874 * Low priority, since with few concurrent hash requests
875 * this is extremely slow due to the flush delay. Users
876 * whose workloads would benefit from this can request
877 * it explicitly by driver name, or can increase its
878 * priority at runtime using NETLINK_CRYPTO.
879 */
880 .cra_priority = 50,
881 .cra_flags = CRYPTO_ALG_ASYNC,
882 .cra_blocksize = SHA1_BLOCK_SIZE,
883 .cra_module = THIS_MODULE,
884 .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
885 .cra_init = sha1_mb_async_init_tfm,
886 .cra_exit = sha1_mb_async_exit_tfm,
887 .cra_ctxsize = sizeof(struct sha1_mb_ctx),
888 .cra_alignmask = 0,
889 },
890 },
891};
892
893static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
894{
895 struct mcryptd_hash_request_ctx *rctx;
896 unsigned long cur_time;
897 unsigned long next_flush = 0;
898 struct sha1_hash_ctx *sha_ctx;
899
900
901 cur_time = jiffies;
902
903 while (!list_empty(&cstate->work_list)) {
904 rctx = list_entry(cstate->work_list.next,
905 struct mcryptd_hash_request_ctx, waiter);
906 if (time_before(cur_time, rctx->tag.expire))
907 break;
908 kernel_fpu_begin();
909 sha_ctx = (struct sha1_hash_ctx *)
910 sha1_ctx_mgr_flush(cstate->mgr);
911 kernel_fpu_end();
912 if (!sha_ctx) {
913 pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
914 break;
915 }
916 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
917 sha_finish_walk(&rctx, cstate, true);
918 sha_complete_job(rctx, cstate, 0);
919 }
920
921 if (!list_empty(&cstate->work_list)) {
922 rctx = list_entry(cstate->work_list.next,
923 struct mcryptd_hash_request_ctx, waiter);
924 /* get the hash context and then flush time */
925 next_flush = rctx->tag.expire;
926 mcryptd_arm_flusher(cstate, get_delay(next_flush));
927 }
928 return next_flush;
929}
930
931static int __init sha1_mb_mod_init(void)
932{
933
934 int cpu;
935 int err;
936 struct mcryptd_alg_cstate *cpu_state;
937
938 /* check for dependent cpu features */
939 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
940 !boot_cpu_has(X86_FEATURE_BMI2))
941 return -ENODEV;
942
943 /* initialize multibuffer structures */
944 sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
945
946 sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
947 sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
948 sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
949 sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
950
951 if (!sha1_mb_alg_state.alg_cstate)
952 return -ENOMEM;
953 for_each_possible_cpu(cpu) {
954 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
955 cpu_state->next_flush = 0;
956 cpu_state->next_seq_num = 0;
957 cpu_state->flusher_engaged = false;
958 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
959 cpu_state->cpu = cpu;
960 cpu_state->alg_state = &sha1_mb_alg_state;
961 cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr),
962 GFP_KERNEL);
963 if (!cpu_state->mgr)
964 goto err2;
965 sha1_ctx_mgr_init(cpu_state->mgr);
966 INIT_LIST_HEAD(&cpu_state->work_list);
967 spin_lock_init(&cpu_state->work_lock);
968 }
969 sha1_mb_alg_state.flusher = &sha1_mb_flusher;
970
971 err = crypto_register_ahash(&sha1_mb_areq_alg);
972 if (err)
973 goto err2;
974 err = crypto_register_ahash(&sha1_mb_async_alg);
975 if (err)
976 goto err1;
977
978
979 return 0;
980err1:
981 crypto_unregister_ahash(&sha1_mb_areq_alg);
982err2:
983 for_each_possible_cpu(cpu) {
984 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
985 kfree(cpu_state->mgr);
986 }
987 free_percpu(sha1_mb_alg_state.alg_cstate);
988 return -ENODEV;
989}
990
991static void __exit sha1_mb_mod_fini(void)
992{
993 int cpu;
994 struct mcryptd_alg_cstate *cpu_state;
995
996 crypto_unregister_ahash(&sha1_mb_async_alg);
997 crypto_unregister_ahash(&sha1_mb_areq_alg);
998 for_each_possible_cpu(cpu) {
999 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
1000 kfree(cpu_state->mgr);
1001 }
1002 free_percpu(sha1_mb_alg_state.alg_cstate);
1003}
1004
1005module_init(sha1_mb_mod_init);
1006module_exit(sha1_mb_mod_fini);
1007
1008MODULE_LICENSE("GPL");
1009MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
1010
1011MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
deleted file mode 100644
index 9454bd16f9f8..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * Header file for multi buffer SHA context
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Tim Chen <tim.c.chen@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2014 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#ifndef _SHA_MB_CTX_INTERNAL_H
55#define _SHA_MB_CTX_INTERNAL_H
56
57#include "sha1_mb_mgr.h"
58
59#define HASH_UPDATE 0x00
60#define HASH_LAST 0x01
61#define HASH_DONE 0x02
62#define HASH_FINAL 0x04
63
64#define HASH_CTX_STS_IDLE 0x00
65#define HASH_CTX_STS_PROCESSING 0x01
66#define HASH_CTX_STS_LAST 0x02
67#define HASH_CTX_STS_COMPLETE 0x04
68
69enum hash_ctx_error {
70 HASH_CTX_ERROR_NONE = 0,
71 HASH_CTX_ERROR_INVALID_FLAGS = -1,
72 HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
73 HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
74
75#ifdef HASH_CTX_DEBUG
76 HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
77#endif
78};
79
80
81#define hash_ctx_user_data(ctx) ((ctx)->user_data)
82#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
83#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
84#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
85#define hash_ctx_status(ctx) ((ctx)->status)
86#define hash_ctx_error(ctx) ((ctx)->error)
87#define hash_ctx_init(ctx) \
88 do { \
89 (ctx)->error = HASH_CTX_ERROR_NONE; \
90 (ctx)->status = HASH_CTX_STS_COMPLETE; \
91 } while (0)
92
93
94/* Hash Constants and Typedefs */
95#define SHA1_DIGEST_LENGTH 5
96#define SHA1_LOG2_BLOCK_SIZE 6
97
98#define SHA1_PADLENGTHFIELD_SIZE 8
99
100#ifdef SHA_MB_DEBUG
101#define assert(expr) \
102do { \
103 if (unlikely(!(expr))) { \
104 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
105 #expr, __FILE__, __func__, __LINE__); \
106 } \
107} while (0)
108#else
109#define assert(expr) do {} while (0)
110#endif
111
112struct sha1_ctx_mgr {
113 struct sha1_mb_mgr mgr;
114};
115
116/* typedef struct sha1_ctx_mgr sha1_ctx_mgr; */
117
118struct sha1_hash_ctx {
119 /* Must be at struct offset 0 */
120 struct job_sha1 job;
121 /* status flag */
122 int status;
123 /* error flag */
124 int error;
125
126 uint64_t total_length;
127 const void *incoming_buffer;
128 uint32_t incoming_buffer_length;
129 uint8_t partial_block_buffer[SHA1_BLOCK_SIZE * 2];
130 uint32_t partial_block_buffer_length;
131 void *user_data;
132};
133
134#endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
deleted file mode 100644
index 08ad1a9acfd7..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
+++ /dev/null
@@ -1,110 +0,0 @@
1/*
2 * Header file for multi buffer SHA1 algorithm manager
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * James Guilford <james.guilford@intel.com>
22 * Tim Chen <tim.c.chen@linux.intel.com>
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2014 Intel Corporation.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 *
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
37 * distribution.
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
45 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
46 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
47 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
48 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
52 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */
54#ifndef __SHA_MB_MGR_H
55#define __SHA_MB_MGR_H
56
57
58#include <linux/types.h>
59
60#define NUM_SHA1_DIGEST_WORDS 5
61
62enum job_sts { STS_UNKNOWN = 0,
63 STS_BEING_PROCESSED = 1,
64 STS_COMPLETED = 2,
65 STS_INTERNAL_ERROR = 3,
66 STS_ERROR = 4
67};
68
69struct job_sha1 {
70 u8 *buffer;
71 u32 len;
72 u32 result_digest[NUM_SHA1_DIGEST_WORDS] __aligned(32);
73 enum job_sts status;
74 void *user_data;
75};
76
77/* SHA1 out-of-order scheduler */
78
79/* typedef uint32_t sha1_digest_array[5][8]; */
80
81struct sha1_args_x8 {
82 uint32_t digest[5][8];
83 uint8_t *data_ptr[8];
84};
85
86struct sha1_lane_data {
87 struct job_sha1 *job_in_lane;
88};
89
90struct sha1_mb_mgr {
91 struct sha1_args_x8 args;
92
93 uint32_t lens[8];
94
95 /* each byte is index (0...7) of unused lanes */
96 uint64_t unused_lanes;
97 /* byte 4 is set to FF as a flag */
98 struct sha1_lane_data ldata[8];
99};
100
101
102#define SHA1_MB_MGR_NUM_LANES_AVX2 8
103
104void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state);
105struct job_sha1 *sha1_mb_mgr_submit_avx2(struct sha1_mb_mgr *state,
106 struct job_sha1 *job);
107struct job_sha1 *sha1_mb_mgr_flush_avx2(struct sha1_mb_mgr *state);
108struct job_sha1 *sha1_mb_mgr_get_comp_job_avx2(struct sha1_mb_mgr *state);
109
110#endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
deleted file mode 100644
index 86688c6e7a25..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,287 +0,0 @@
1/*
2 * Header file for multi buffer SHA1 algorithm data structure
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * James Guilford <james.guilford@intel.com>
22 * Tim Chen <tim.c.chen@linux.intel.com>
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2014 Intel Corporation.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 *
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
37 * distribution.
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
45 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
46 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
47 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
48 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
52 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */
54
55# Macros for defining data structures
56
57# Usage example
58
59#START_FIELDS # JOB_AES
60### name size align
61#FIELD _plaintext, 8, 8 # pointer to plaintext
62#FIELD _ciphertext, 8, 8 # pointer to ciphertext
63#FIELD _IV, 16, 8 # IV
64#FIELD _keys, 8, 8 # pointer to keys
65#FIELD _len, 4, 4 # length in bytes
66#FIELD _status, 4, 4 # status enumeration
67#FIELD _user_data, 8, 8 # pointer to user data
68#UNION _union, size1, align1, \
69# size2, align2, \
70# size3, align3, \
71# ...
72#END_FIELDS
73#%assign _JOB_AES_size _FIELD_OFFSET
74#%assign _JOB_AES_align _STRUCT_ALIGN
75
76#########################################################################
77
78# Alternate "struc-like" syntax:
79# STRUCT job_aes2
80# RES_Q .plaintext, 1
81# RES_Q .ciphertext, 1
82# RES_DQ .IV, 1
83# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
84# RES_U .union, size1, align1, \
85# size2, align2, \
86# ...
87# ENDSTRUCT
88# # Following only needed if nesting
89# %assign job_aes2_size _FIELD_OFFSET
90# %assign job_aes2_align _STRUCT_ALIGN
91#
92# RES_* macros take a name, a count and an optional alignment.
93# The count in in terms of the base size of the macro, and the
94# default alignment is the base size.
95# The macros are:
96# Macro Base size
97# RES_B 1
98# RES_W 2
99# RES_D 4
100# RES_Q 8
101# RES_DQ 16
102# RES_Y 32
103# RES_Z 64
104#
105# RES_U defines a union. It's arguments are a name and two or more
106# pairs of "size, alignment"
107#
108# The two assigns are only needed if this structure is being nested
109# within another. Even if the assigns are not done, one can still use
110# STRUCT_NAME_size as the size of the structure.
111#
112# Note that for nesting, you still need to assign to STRUCT_NAME_size.
113#
114# The differences between this and using "struc" directly are that each
115# type is implicitly aligned to its natural length (although this can be
116# over-ridden with an explicit third parameter), and that the structure
117# is padded at the end to its overall alignment.
118#
119
120#########################################################################
121
122#ifndef _SHA1_MB_MGR_DATASTRUCT_ASM_
123#define _SHA1_MB_MGR_DATASTRUCT_ASM_
124
125## START_FIELDS
126.macro START_FIELDS
127 _FIELD_OFFSET = 0
128 _STRUCT_ALIGN = 0
129.endm
130
131## FIELD name size align
132.macro FIELD name size align
133 _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
134 \name = _FIELD_OFFSET
135 _FIELD_OFFSET = _FIELD_OFFSET + (\size)
136.if (\align > _STRUCT_ALIGN)
137 _STRUCT_ALIGN = \align
138.endif
139.endm
140
141## END_FIELDS
142.macro END_FIELDS
143 _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
144.endm
145
146########################################################################
147
148.macro STRUCT p1
149START_FIELDS
150.struc \p1
151.endm
152
153.macro ENDSTRUCT
154 tmp = _FIELD_OFFSET
155 END_FIELDS
156 tmp = (_FIELD_OFFSET - %%tmp)
157.if (tmp > 0)
158 .lcomm tmp
159.endif
160.endstruc
161.endm
162
163## RES_int name size align
164.macro RES_int p1 p2 p3
165 name = \p1
166 size = \p2
167 align = .\p3
168
169 _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
170.align align
171.lcomm name size
172 _FIELD_OFFSET = _FIELD_OFFSET + (size)
173.if (align > _STRUCT_ALIGN)
174 _STRUCT_ALIGN = align
175.endif
176.endm
177
178
179
180# macro RES_B name, size [, align]
181.macro RES_B _name, _size, _align=1
182RES_int _name _size _align
183.endm
184
185# macro RES_W name, size [, align]
186.macro RES_W _name, _size, _align=2
187RES_int _name 2*(_size) _align
188.endm
189
190# macro RES_D name, size [, align]
191.macro RES_D _name, _size, _align=4
192RES_int _name 4*(_size) _align
193.endm
194
195# macro RES_Q name, size [, align]
196.macro RES_Q _name, _size, _align=8
197RES_int _name 8*(_size) _align
198.endm
199
200# macro RES_DQ name, size [, align]
201.macro RES_DQ _name, _size, _align=16
202RES_int _name 16*(_size) _align
203.endm
204
205# macro RES_Y name, size [, align]
206.macro RES_Y _name, _size, _align=32
207RES_int _name 32*(_size) _align
208.endm
209
210# macro RES_Z name, size [, align]
211.macro RES_Z _name, _size, _align=64
212RES_int _name 64*(_size) _align
213.endm
214
215
216#endif
217
218########################################################################
219#### Define constants
220########################################################################
221
222########################################################################
223#### Define SHA1 Out Of Order Data Structures
224########################################################################
225
226START_FIELDS # LANE_DATA
227### name size align
228FIELD _job_in_lane, 8, 8 # pointer to job object
229END_FIELDS
230
231_LANE_DATA_size = _FIELD_OFFSET
232_LANE_DATA_align = _STRUCT_ALIGN
233
234########################################################################
235
236START_FIELDS # SHA1_ARGS_X8
237### name size align
238FIELD _digest, 4*5*8, 16 # transposed digest
239FIELD _data_ptr, 8*8, 8 # array of pointers to data
240END_FIELDS
241
242_SHA1_ARGS_X4_size = _FIELD_OFFSET
243_SHA1_ARGS_X4_align = _STRUCT_ALIGN
244_SHA1_ARGS_X8_size = _FIELD_OFFSET
245_SHA1_ARGS_X8_align = _STRUCT_ALIGN
246
247########################################################################
248
249START_FIELDS # MB_MGR
250### name size align
251FIELD _args, _SHA1_ARGS_X4_size, _SHA1_ARGS_X4_align
252FIELD _lens, 4*8, 8
253FIELD _unused_lanes, 8, 8
254FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
255END_FIELDS
256
257_MB_MGR_size = _FIELD_OFFSET
258_MB_MGR_align = _STRUCT_ALIGN
259
260_args_digest = _args + _digest
261_args_data_ptr = _args + _data_ptr
262
263
264########################################################################
265#### Define constants
266########################################################################
267
268#define STS_UNKNOWN 0
269#define STS_BEING_PROCESSED 1
270#define STS_COMPLETED 2
271
272########################################################################
273#### Define JOB_SHA1 structure
274########################################################################
275
276START_FIELDS # JOB_SHA1
277
278### name size align
279FIELD _buffer, 8, 8 # pointer to buffer
280FIELD _len, 4, 4 # length in bytes
281FIELD _result_digest, 5*4, 32 # Digest (output)
282FIELD _status, 4, 4
283FIELD _user_data, 8, 8
284END_FIELDS
285
286_JOB_SHA1_size = _FIELD_OFFSET
287_JOB_SHA1_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
deleted file mode 100644
index 7cfba738f104..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,304 +0,0 @@
1/*
2 * Flush routine for SHA1 multibuffer
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * James Guilford <james.guilford@intel.com>
22 * Tim Chen <tim.c.chen@linux.intel.com>
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2014 Intel Corporation.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 *
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
37 * distribution.
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
45 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
46 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
47 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
48 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
52 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */
54#include <linux/linkage.h>
55#include <asm/frame.h>
56#include "sha1_mb_mgr_datastruct.S"
57
58
59.extern sha1_x8_avx2
60
61# LINUX register definitions
62#define arg1 %rdi
63#define arg2 %rsi
64
65# Common definitions
66#define state arg1
67#define job arg2
68#define len2 arg2
69
70# idx must be a register not clobbered by sha1_x8_avx2
71#define idx %r8
72#define DWORD_idx %r8d
73
74#define unused_lanes %rbx
75#define lane_data %rbx
76#define tmp2 %rbx
77#define tmp2_w %ebx
78
79#define job_rax %rax
80#define tmp1 %rax
81#define size_offset %rax
82#define tmp %rax
83#define start_offset %rax
84
85#define tmp3 %arg1
86
87#define extra_blocks %arg2
88#define p %arg2
89
90.macro LABEL prefix n
91\prefix\n\():
92.endm
93
94.macro JNE_SKIP i
95jne skip_\i
96.endm
97
98.altmacro
99.macro SET_OFFSET _offset
100offset = \_offset
101.endm
102.noaltmacro
103
104# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
105# arg 1 : rcx : state
106ENTRY(sha1_mb_mgr_flush_avx2)
107 FRAME_BEGIN
108 push %rbx
109
110 # If bit (32+3) is set, then all lanes are empty
111 mov _unused_lanes(state), unused_lanes
112 bt $32+3, unused_lanes
113 jc return_null
114
115 # find a lane with a non-null job
116 xor idx, idx
117 offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
118 cmpq $0, offset(state)
119 cmovne one(%rip), idx
120 offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
121 cmpq $0, offset(state)
122 cmovne two(%rip), idx
123 offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
124 cmpq $0, offset(state)
125 cmovne three(%rip), idx
126 offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
127 cmpq $0, offset(state)
128 cmovne four(%rip), idx
129 offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
130 cmpq $0, offset(state)
131 cmovne five(%rip), idx
132 offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
133 cmpq $0, offset(state)
134 cmovne six(%rip), idx
135 offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
136 cmpq $0, offset(state)
137 cmovne seven(%rip), idx
138
139 # copy idx to empty lanes
140copy_lane_data:
141 offset = (_args + _data_ptr)
142 mov offset(state,idx,8), tmp
143
144 I = 0
145.rep 8
146 offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
147 cmpq $0, offset(state)
148.altmacro
149 JNE_SKIP %I
150 offset = (_args + _data_ptr + 8*I)
151 mov tmp, offset(state)
152 offset = (_lens + 4*I)
153 movl $0xFFFFFFFF, offset(state)
154LABEL skip_ %I
155 I = (I+1)
156.noaltmacro
157.endr
158
159 # Find min length
160 vmovdqu _lens+0*16(state), %xmm0
161 vmovdqu _lens+1*16(state), %xmm1
162
163 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
164 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
165 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
166 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
167 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
168
169 vmovd %xmm2, DWORD_idx
170 mov idx, len2
171 and $0xF, idx
172 shr $4, len2
173 jz len_is_0
174
175 vpand clear_low_nibble(%rip), %xmm2, %xmm2
176 vpshufd $0, %xmm2, %xmm2
177
178 vpsubd %xmm2, %xmm0, %xmm0
179 vpsubd %xmm2, %xmm1, %xmm1
180
181 vmovdqu %xmm0, _lens+0*16(state)
182 vmovdqu %xmm1, _lens+1*16(state)
183
184 # "state" and "args" are the same address, arg1
185 # len is arg2
186 call sha1_x8_avx2
187 # state and idx are intact
188
189
190len_is_0:
191 # process completed job "idx"
192 imul $_LANE_DATA_size, idx, lane_data
193 lea _ldata(state, lane_data), lane_data
194
195 mov _job_in_lane(lane_data), job_rax
196 movq $0, _job_in_lane(lane_data)
197 movl $STS_COMPLETED, _status(job_rax)
198 mov _unused_lanes(state), unused_lanes
199 shl $4, unused_lanes
200 or idx, unused_lanes
201 mov unused_lanes, _unused_lanes(state)
202
203 movl $0xFFFFFFFF, _lens(state, idx, 4)
204
205 vmovd _args_digest(state , idx, 4) , %xmm0
206 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
207 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
208 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
209 movl _args_digest+4*32(state, idx, 4), tmp2_w
210
211 vmovdqu %xmm0, _result_digest(job_rax)
212 offset = (_result_digest + 1*16)
213 mov tmp2_w, offset(job_rax)
214
215return:
216 pop %rbx
217 FRAME_END
218 ret
219
220return_null:
221 xor job_rax, job_rax
222 jmp return
223ENDPROC(sha1_mb_mgr_flush_avx2)
224
225
226#################################################################
227
228.align 16
229ENTRY(sha1_mb_mgr_get_comp_job_avx2)
230 push %rbx
231
232 ## if bit 32+3 is set, then all lanes are empty
233 mov _unused_lanes(state), unused_lanes
234 bt $(32+3), unused_lanes
235 jc .return_null
236
237 # Find min length
238 vmovdqu _lens(state), %xmm0
239 vmovdqu _lens+1*16(state), %xmm1
240
241 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
242 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
243 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
244 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
245 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
246
247 vmovd %xmm2, DWORD_idx
248 test $~0xF, idx
249 jnz .return_null
250
251 # process completed job "idx"
252 imul $_LANE_DATA_size, idx, lane_data
253 lea _ldata(state, lane_data), lane_data
254
255 mov _job_in_lane(lane_data), job_rax
256 movq $0, _job_in_lane(lane_data)
257 movl $STS_COMPLETED, _status(job_rax)
258 mov _unused_lanes(state), unused_lanes
259 shl $4, unused_lanes
260 or idx, unused_lanes
261 mov unused_lanes, _unused_lanes(state)
262
263 movl $0xFFFFFFFF, _lens(state, idx, 4)
264
265 vmovd _args_digest(state, idx, 4), %xmm0
266 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
267 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
268 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
269 movl _args_digest+4*32(state, idx, 4), tmp2_w
270
271 vmovdqu %xmm0, _result_digest(job_rax)
272 movl tmp2_w, _result_digest+1*16(job_rax)
273
274 pop %rbx
275
276 ret
277
278.return_null:
279 xor job_rax, job_rax
280 pop %rbx
281 ret
282ENDPROC(sha1_mb_mgr_get_comp_job_avx2)
283
284.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
285.align 16
286clear_low_nibble:
287.octa 0x000000000000000000000000FFFFFFF0
288
289.section .rodata.cst8, "aM", @progbits, 8
290.align 8
291one:
292.quad 1
293two:
294.quad 2
295three:
296.quad 3
297four:
298.quad 4
299five:
300.quad 5
301six:
302.quad 6
303seven:
304.quad 7
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
deleted file mode 100644
index d2add0d35f43..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * Initialization code for multi buffer SHA1 algorithm for AVX2
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Tim Chen <tim.c.chen@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2014 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include "sha1_mb_mgr.h"
55
56void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state)
57{
58 unsigned int j;
59 state->unused_lanes = 0xF76543210ULL;
60 for (j = 0; j < 8; j++) {
61 state->lens[j] = 0xFFFFFFFF;
62 state->ldata[j].job_in_lane = NULL;
63 }
64}
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
deleted file mode 100644
index 7a93b1c0d69a..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,209 +0,0 @@
1/*
2 * Buffer submit code for multi buffer SHA1 algorithm
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * James Guilford <james.guilford@intel.com>
22 * Tim Chen <tim.c.chen@linux.intel.com>
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2014 Intel Corporation.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 *
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
37 * distribution.
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
45 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
46 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
47 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
48 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
52 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */
54
55#include <linux/linkage.h>
56#include <asm/frame.h>
57#include "sha1_mb_mgr_datastruct.S"
58
59
60.extern sha1_x8_avx
61
62# LINUX register definitions
63arg1 = %rdi
64arg2 = %rsi
65size_offset = %rcx
66tmp2 = %rcx
67extra_blocks = %rdx
68
69# Common definitions
70#define state arg1
71#define job %rsi
72#define len2 arg2
73#define p2 arg2
74
75# idx must be a register not clobberred by sha1_x8_avx2
76idx = %r8
77DWORD_idx = %r8d
78last_len = %r8
79
80p = %r11
81start_offset = %r11
82
83unused_lanes = %rbx
84BYTE_unused_lanes = %bl
85
86job_rax = %rax
87len = %rax
88DWORD_len = %eax
89
90lane = %r12
91tmp3 = %r12
92
93tmp = %r9
94DWORD_tmp = %r9d
95
96lane_data = %r10
97
98# JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job)
99# arg 1 : rcx : state
100# arg 2 : rdx : job
101ENTRY(sha1_mb_mgr_submit_avx2)
102 FRAME_BEGIN
103 push %rbx
104 push %r12
105
106 mov _unused_lanes(state), unused_lanes
107 mov unused_lanes, lane
108 and $0xF, lane
109 shr $4, unused_lanes
110 imul $_LANE_DATA_size, lane, lane_data
111 movl $STS_BEING_PROCESSED, _status(job)
112 lea _ldata(state, lane_data), lane_data
113 mov unused_lanes, _unused_lanes(state)
114 movl _len(job), DWORD_len
115
116 mov job, _job_in_lane(lane_data)
117 shl $4, len
118 or lane, len
119
120 movl DWORD_len, _lens(state , lane, 4)
121
122 # Load digest words from result_digest
123 vmovdqu _result_digest(job), %xmm0
124 mov _result_digest+1*16(job), DWORD_tmp
125 vmovd %xmm0, _args_digest(state, lane, 4)
126 vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
127 vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
128 vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
129 movl DWORD_tmp, _args_digest+4*32(state , lane, 4)
130
131 mov _buffer(job), p
132 mov p, _args_data_ptr(state, lane, 8)
133
134 cmp $0xF, unused_lanes
135 jne return_null
136
137start_loop:
138 # Find min length
139 vmovdqa _lens(state), %xmm0
140 vmovdqa _lens+1*16(state), %xmm1
141
142 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
143 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
144 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
145 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
146 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
147
148 vmovd %xmm2, DWORD_idx
149 mov idx, len2
150 and $0xF, idx
151 shr $4, len2
152 jz len_is_0
153
154 vpand clear_low_nibble(%rip), %xmm2, %xmm2
155 vpshufd $0, %xmm2, %xmm2
156
157 vpsubd %xmm2, %xmm0, %xmm0
158 vpsubd %xmm2, %xmm1, %xmm1
159
160 vmovdqa %xmm0, _lens + 0*16(state)
161 vmovdqa %xmm1, _lens + 1*16(state)
162
163
164 # "state" and "args" are the same address, arg1
165 # len is arg2
166 call sha1_x8_avx2
167
168 # state and idx are intact
169
170len_is_0:
171 # process completed job "idx"
172 imul $_LANE_DATA_size, idx, lane_data
173 lea _ldata(state, lane_data), lane_data
174
175 mov _job_in_lane(lane_data), job_rax
176 mov _unused_lanes(state), unused_lanes
177 movq $0, _job_in_lane(lane_data)
178 movl $STS_COMPLETED, _status(job_rax)
179 shl $4, unused_lanes
180 or idx, unused_lanes
181 mov unused_lanes, _unused_lanes(state)
182
183 movl $0xFFFFFFFF, _lens(state, idx, 4)
184
185 vmovd _args_digest(state, idx, 4), %xmm0
186 vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
187 vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
188 vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
189 movl _args_digest+4*32(state, idx, 4), DWORD_tmp
190
191 vmovdqu %xmm0, _result_digest(job_rax)
192 movl DWORD_tmp, _result_digest+1*16(job_rax)
193
194return:
195 pop %r12
196 pop %rbx
197 FRAME_END
198 ret
199
200return_null:
201 xor job_rax, job_rax
202 jmp return
203
204ENDPROC(sha1_mb_mgr_submit_avx2)
205
206.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
207.align 16
208clear_low_nibble:
209 .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
deleted file mode 100644
index 20f77aa633de..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
+++ /dev/null
@@ -1,492 +0,0 @@
1/*
2 * Multi-buffer SHA1 algorithm hash compute routine
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * James Guilford <james.guilford@intel.com>
22 * Tim Chen <tim.c.chen@linux.intel.com>
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2014 Intel Corporation.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 *
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
37 * distribution.
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
45 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
46 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
47 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
48 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
52 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */
54
55#include <linux/linkage.h>
56#include "sha1_mb_mgr_datastruct.S"
57
58## code to compute oct SHA1 using SSE-256
59## outer calling routine takes care of save and restore of XMM registers
60
61## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15# ymm0-15
62##
63## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
64## Linux preserves: rdi rbp r8
65##
66## clobbers ymm0-15
67
68
69# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
70# "transpose" data in {r0...r7} using temps {t0...t1}
71# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
72# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
73# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
74# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
75# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
76# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
77# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
78# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
79# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
80#
81# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
82# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
83# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
84# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
85# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
86# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
87# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
88# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
89# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
90#
91
92.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
93 # process top half (r0..r3) {a...d}
94 vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
95 vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
96 vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
97 vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
98 vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
99 vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
100 vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
101 vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
102
103 # use r2 in place of t0
104 # process bottom half (r4..r7) {e...h}
105 vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
106 vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
107 vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
108 vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
109 vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
110 vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
111 vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
112 vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
113
114 vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
115 vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
116 vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
117 vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
118 vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
119 vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
120 vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
121 vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
122
123.endm
124##
125## Magic functions defined in FIPS 180-1
126##
127# macro MAGIC_F0 F,B,C,D,T ## F = (D ^ (B & (C ^ D)))
128.macro MAGIC_F0 regF regB regC regD regT
129 vpxor \regD, \regC, \regF
130 vpand \regB, \regF, \regF
131 vpxor \regD, \regF, \regF
132.endm
133
134# macro MAGIC_F1 F,B,C,D,T ## F = (B ^ C ^ D)
135.macro MAGIC_F1 regF regB regC regD regT
136 vpxor \regC, \regD, \regF
137 vpxor \regB, \regF, \regF
138.endm
139
140# macro MAGIC_F2 F,B,C,D,T ## F = ((B & C) | (B & D) | (C & D))
141.macro MAGIC_F2 regF regB regC regD regT
142 vpor \regC, \regB, \regF
143 vpand \regC, \regB, \regT
144 vpand \regD, \regF, \regF
145 vpor \regT, \regF, \regF
146.endm
147
148# macro MAGIC_F3 F,B,C,D,T ## F = (B ^ C ^ D)
149.macro MAGIC_F3 regF regB regC regD regT
150 MAGIC_F1 \regF,\regB,\regC,\regD,\regT
151.endm
152
153# PROLD reg, imm, tmp
154.macro PROLD reg imm tmp
155 vpsrld $(32-\imm), \reg, \tmp
156 vpslld $\imm, \reg, \reg
157 vpor \tmp, \reg, \reg
158.endm
159
160.macro PROLD_nd reg imm tmp src
161 vpsrld $(32-\imm), \src, \tmp
162 vpslld $\imm, \src, \reg
163 vpor \tmp, \reg, \reg
164.endm
165
166.macro SHA1_STEP_00_15 regA regB regC regD regE regT regF memW immCNT MAGIC
167 vpaddd \immCNT, \regE, \regE
168 vpaddd \memW*32(%rsp), \regE, \regE
169 PROLD_nd \regT, 5, \regF, \regA
170 vpaddd \regT, \regE, \regE
171 \MAGIC \regF, \regB, \regC, \regD, \regT
172 PROLD \regB, 30, \regT
173 vpaddd \regF, \regE, \regE
174.endm
175
176.macro SHA1_STEP_16_79 regA regB regC regD regE regT regF memW immCNT MAGIC
177 vpaddd \immCNT, \regE, \regE
178 offset = ((\memW - 14) & 15) * 32
179 vmovdqu offset(%rsp), W14
180 vpxor W14, W16, W16
181 offset = ((\memW - 8) & 15) * 32
182 vpxor offset(%rsp), W16, W16
183 offset = ((\memW - 3) & 15) * 32
184 vpxor offset(%rsp), W16, W16
185 vpsrld $(32-1), W16, \regF
186 vpslld $1, W16, W16
187 vpor W16, \regF, \regF
188
189 ROTATE_W
190
191 offset = ((\memW - 0) & 15) * 32
192 vmovdqu \regF, offset(%rsp)
193 vpaddd \regF, \regE, \regE
194 PROLD_nd \regT, 5, \regF, \regA
195 vpaddd \regT, \regE, \regE
196 \MAGIC \regF,\regB,\regC,\regD,\regT ## FUN = MAGIC_Fi(B,C,D)
197 PROLD \regB,30, \regT
198 vpaddd \regF, \regE, \regE
199.endm
200
201########################################################################
202########################################################################
203########################################################################
204
205## FRAMESZ plus pushes must be an odd multiple of 8
206YMM_SAVE = (15-15)*32
207FRAMESZ = 32*16 + YMM_SAVE
208_YMM = FRAMESZ - YMM_SAVE
209
210#define VMOVPS vmovups
211
212IDX = %rax
213inp0 = %r9
214inp1 = %r10
215inp2 = %r11
216inp3 = %r12
217inp4 = %r13
218inp5 = %r14
219inp6 = %r15
220inp7 = %rcx
221arg1 = %rdi
222arg2 = %rsi
223RSP_SAVE = %rdx
224
225# ymm0 A
226# ymm1 B
227# ymm2 C
228# ymm3 D
229# ymm4 E
230# ymm5 F AA
231# ymm6 T0 BB
232# ymm7 T1 CC
233# ymm8 T2 DD
234# ymm9 T3 EE
235# ymm10 T4 TMP
236# ymm11 T5 FUN
237# ymm12 T6 K
238# ymm13 T7 W14
239# ymm14 T8 W15
240# ymm15 T9 W16
241
242
243A = %ymm0
244B = %ymm1
245C = %ymm2
246D = %ymm3
247E = %ymm4
248F = %ymm5
249T0 = %ymm6
250T1 = %ymm7
251T2 = %ymm8
252T3 = %ymm9
253T4 = %ymm10
254T5 = %ymm11
255T6 = %ymm12
256T7 = %ymm13
257T8 = %ymm14
258T9 = %ymm15
259
260AA = %ymm5
261BB = %ymm6
262CC = %ymm7
263DD = %ymm8
264EE = %ymm9
265TMP = %ymm10
266FUN = %ymm11
267K = %ymm12
268W14 = %ymm13
269W15 = %ymm14
270W16 = %ymm15
271
272.macro ROTATE_ARGS
273 TMP_ = E
274 E = D
275 D = C
276 C = B
277 B = A
278 A = TMP_
279.endm
280
281.macro ROTATE_W
282TMP_ = W16
283W16 = W15
284W15 = W14
285W14 = TMP_
286.endm
287
288# 8 streams x 5 32bit words per digest x 4 bytes per word
289#define DIGEST_SIZE (8*5*4)
290
291.align 32
292
293# void sha1_x8_avx2(void **input_data, UINT128 *digest, UINT32 size)
294# arg 1 : pointer to array[4] of pointer to input data
295# arg 2 : size (in blocks) ;; assumed to be >= 1
296#
297ENTRY(sha1_x8_avx2)
298
299 # save callee-saved clobbered registers to comply with C function ABI
300 push %r12
301 push %r13
302 push %r14
303 push %r15
304
305 #save rsp
306 mov %rsp, RSP_SAVE
307 sub $FRAMESZ, %rsp
308
309 #align rsp to 32 Bytes
310 and $~0x1F, %rsp
311
312 ## Initialize digests
313 vmovdqu 0*32(arg1), A
314 vmovdqu 1*32(arg1), B
315 vmovdqu 2*32(arg1), C
316 vmovdqu 3*32(arg1), D
317 vmovdqu 4*32(arg1), E
318
319 ## transpose input onto stack
320 mov _data_ptr+0*8(arg1),inp0
321 mov _data_ptr+1*8(arg1),inp1
322 mov _data_ptr+2*8(arg1),inp2
323 mov _data_ptr+3*8(arg1),inp3
324 mov _data_ptr+4*8(arg1),inp4
325 mov _data_ptr+5*8(arg1),inp5
326 mov _data_ptr+6*8(arg1),inp6
327 mov _data_ptr+7*8(arg1),inp7
328
329 xor IDX, IDX
330lloop:
331 vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), F
332 I=0
333.rep 2
334 VMOVPS (inp0, IDX), T0
335 VMOVPS (inp1, IDX), T1
336 VMOVPS (inp2, IDX), T2
337 VMOVPS (inp3, IDX), T3
338 VMOVPS (inp4, IDX), T4
339 VMOVPS (inp5, IDX), T5
340 VMOVPS (inp6, IDX), T6
341 VMOVPS (inp7, IDX), T7
342
343 TRANSPOSE8 T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
344 vpshufb F, T0, T0
345 vmovdqu T0, (I*8)*32(%rsp)
346 vpshufb F, T1, T1
347 vmovdqu T1, (I*8+1)*32(%rsp)
348 vpshufb F, T2, T2
349 vmovdqu T2, (I*8+2)*32(%rsp)
350 vpshufb F, T3, T3
351 vmovdqu T3, (I*8+3)*32(%rsp)
352 vpshufb F, T4, T4
353 vmovdqu T4, (I*8+4)*32(%rsp)
354 vpshufb F, T5, T5
355 vmovdqu T5, (I*8+5)*32(%rsp)
356 vpshufb F, T6, T6
357 vmovdqu T6, (I*8+6)*32(%rsp)
358 vpshufb F, T7, T7
359 vmovdqu T7, (I*8+7)*32(%rsp)
360 add $32, IDX
361 I = (I+1)
362.endr
363 # save old digests
364 vmovdqu A,AA
365 vmovdqu B,BB
366 vmovdqu C,CC
367 vmovdqu D,DD
368 vmovdqu E,EE
369
370##
371## perform 0-79 steps
372##
373 vmovdqu K00_19(%rip), K
374## do rounds 0...15
375 I = 0
376.rep 16
377 SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
378 ROTATE_ARGS
379 I = (I+1)
380.endr
381
382## do rounds 16...19
383 vmovdqu ((16 - 16) & 15) * 32 (%rsp), W16
384 vmovdqu ((16 - 15) & 15) * 32 (%rsp), W15
385.rep 4
386 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
387 ROTATE_ARGS
388 I = (I+1)
389.endr
390
391## do rounds 20...39
392 vmovdqu K20_39(%rip), K
393.rep 20
394 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1
395 ROTATE_ARGS
396 I = (I+1)
397.endr
398
399## do rounds 40...59
400 vmovdqu K40_59(%rip), K
401.rep 20
402 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2
403 ROTATE_ARGS
404 I = (I+1)
405.endr
406
407## do rounds 60...79
408 vmovdqu K60_79(%rip), K
409.rep 20
410 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3
411 ROTATE_ARGS
412 I = (I+1)
413.endr
414
415 vpaddd AA,A,A
416 vpaddd BB,B,B
417 vpaddd CC,C,C
418 vpaddd DD,D,D
419 vpaddd EE,E,E
420
421 sub $1, arg2
422 jne lloop
423
424 # write out digests
425 vmovdqu A, 0*32(arg1)
426 vmovdqu B, 1*32(arg1)
427 vmovdqu C, 2*32(arg1)
428 vmovdqu D, 3*32(arg1)
429 vmovdqu E, 4*32(arg1)
430
431 # update input pointers
432 add IDX, inp0
433 add IDX, inp1
434 add IDX, inp2
435 add IDX, inp3
436 add IDX, inp4
437 add IDX, inp5
438 add IDX, inp6
439 add IDX, inp7
440 mov inp0, _data_ptr (arg1)
441 mov inp1, _data_ptr + 1*8(arg1)
442 mov inp2, _data_ptr + 2*8(arg1)
443 mov inp3, _data_ptr + 3*8(arg1)
444 mov inp4, _data_ptr + 4*8(arg1)
445 mov inp5, _data_ptr + 5*8(arg1)
446 mov inp6, _data_ptr + 6*8(arg1)
447 mov inp7, _data_ptr + 7*8(arg1)
448
449 ################
450 ## Postamble
451
452 mov RSP_SAVE, %rsp
453
454 # restore callee-saved clobbered registers
455 pop %r15
456 pop %r14
457 pop %r13
458 pop %r12
459
460 ret
461ENDPROC(sha1_x8_avx2)
462
463
464.section .rodata.cst32.K00_19, "aM", @progbits, 32
465.align 32
466K00_19:
467.octa 0x5A8279995A8279995A8279995A827999
468.octa 0x5A8279995A8279995A8279995A827999
469
470.section .rodata.cst32.K20_39, "aM", @progbits, 32
471.align 32
472K20_39:
473.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
474.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
475
476.section .rodata.cst32.K40_59, "aM", @progbits, 32
477.align 32
478K40_59:
479.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
480.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
481
482.section .rodata.cst32.K60_79, "aM", @progbits, 32
483.align 32
484K60_79:
485.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
486.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
487
488.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
489.align 32
490PSHUFFLE_BYTE_FLIP_MASK:
491.octa 0x0c0d0e0f08090a0b0405060700010203
492.octa 0x0c0d0e0f08090a0b0405060700010203
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
deleted file mode 100644
index 53ad6e7db747..000000000000
--- a/arch/x86/crypto/sha256-mb/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3# Arch-specific CryptoAPI modules.
4#
5
6OBJECT_FILES_NON_STANDARD := y
7
8avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
9 $(comma)4)$(comma)%ymm2,yes,no)
10ifeq ($(avx2_supported),yes)
11 obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb.o
12 sha256-mb-y := sha256_mb.o sha256_mb_mgr_flush_avx2.o \
13 sha256_mb_mgr_init_avx2.o sha256_mb_mgr_submit_avx2.o sha256_x8_avx2.o
14endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
deleted file mode 100644
index 97c5fc43e115..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ /dev/null
@@ -1,1013 +0,0 @@
1/*
2 * Multi buffer SHA256 algorithm Glue Code
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <crypto/internal/hash.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/mm.h>
60#include <linux/cryptohash.h>
61#include <linux/types.h>
62#include <linux/list.h>
63#include <crypto/scatterwalk.h>
64#include <crypto/sha.h>
65#include <crypto/mcryptd.h>
66#include <crypto/crypto_wq.h>
67#include <asm/byteorder.h>
68#include <linux/hardirq.h>
69#include <asm/fpu/api.h>
70#include "sha256_mb_ctx.h"
71
72#define FLUSH_INTERVAL 1000 /* in usec */
73
74static struct mcryptd_alg_state sha256_mb_alg_state;
75
76struct sha256_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
78};
79
80static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
82{
83 struct ahash_request *areq;
84
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
87}
88
89static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
91{
92 return container_of((void *) ctx, struct ahash_request, __ctx);
93}
94
95static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
97{
98 rctx->flag = HASH_UPDATE;
99}
100
101static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
102static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
103 (struct sha256_mb_mgr *state, struct job_sha256 *job);
104static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
105 (struct sha256_mb_mgr *state);
106static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
107 (struct sha256_mb_mgr *state);
108
109inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
110 uint64_t total_len)
111{
112 uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
113
114 memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
115 padblock[i] = 0x80;
116
117 i += ((SHA256_BLOCK_SIZE - 1) &
118 (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
119 + 1 + SHA256_PADLENGTHFIELD_SIZE;
120
121#if SHA256_PADLENGTHFIELD_SIZE == 16
122 *((uint64_t *) &padblock[i - 16]) = 0;
123#endif
124
125 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
126
127 /* Number of extra blocks to hash */
128 return i >> SHA256_LOG2_BLOCK_SIZE;
129}
130
131static struct sha256_hash_ctx
132 *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
133 struct sha256_hash_ctx *ctx)
134{
135 while (ctx) {
136 if (ctx->status & HASH_CTX_STS_COMPLETE) {
137 /* Clear PROCESSING bit */
138 ctx->status = HASH_CTX_STS_COMPLETE;
139 return ctx;
140 }
141
142 /*
143 * If the extra blocks are empty, begin hashing what remains
144 * in the user's buffer.
145 */
146 if (ctx->partial_block_buffer_length == 0 &&
147 ctx->incoming_buffer_length) {
148
149 const void *buffer = ctx->incoming_buffer;
150 uint32_t len = ctx->incoming_buffer_length;
151 uint32_t copy_len;
152
153 /*
154 * Only entire blocks can be hashed.
155 * Copy remainder to extra blocks buffer.
156 */
157 copy_len = len & (SHA256_BLOCK_SIZE-1);
158
159 if (copy_len) {
160 len -= copy_len;
161 memcpy(ctx->partial_block_buffer,
162 ((const char *) buffer + len),
163 copy_len);
164 ctx->partial_block_buffer_length = copy_len;
165 }
166
167 ctx->incoming_buffer_length = 0;
168
169 /* len should be a multiple of the block size now */
170 assert((len % SHA256_BLOCK_SIZE) == 0);
171
172 /* Set len to the number of blocks to be hashed */
173 len >>= SHA256_LOG2_BLOCK_SIZE;
174
175 if (len) {
176
177 ctx->job.buffer = (uint8_t *) buffer;
178 ctx->job.len = len;
179 ctx = (struct sha256_hash_ctx *)
180 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
181 continue;
182 }
183 }
184
185 /*
186 * If the extra blocks are not empty, then we are
187 * either on the last block(s) or we need more
188 * user input before continuing.
189 */
190 if (ctx->status & HASH_CTX_STS_LAST) {
191
192 uint8_t *buf = ctx->partial_block_buffer;
193 uint32_t n_extra_blocks =
194 sha256_pad(buf, ctx->total_length);
195
196 ctx->status = (HASH_CTX_STS_PROCESSING |
197 HASH_CTX_STS_COMPLETE);
198 ctx->job.buffer = buf;
199 ctx->job.len = (uint32_t) n_extra_blocks;
200 ctx = (struct sha256_hash_ctx *)
201 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
202 continue;
203 }
204
205 ctx->status = HASH_CTX_STS_IDLE;
206 return ctx;
207 }
208
209 return NULL;
210}
211
212static struct sha256_hash_ctx
213 *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
214{
215 /*
216 * If get_comp_job returns NULL, there are no jobs complete.
217 * If get_comp_job returns a job, verify that it is safe to return to
218 * the user. If it is not ready, resubmit the job to finish processing.
219 * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
220 * returned. Otherwise, all jobs currently being managed by the
221 * hash_ctx_mgr still need processing.
222 */
223 struct sha256_hash_ctx *ctx;
224
225 ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
226 return sha256_ctx_mgr_resubmit(mgr, ctx);
227}
228
229static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
230{
231 sha256_job_mgr_init(&mgr->mgr);
232}
233
234static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
235 struct sha256_hash_ctx *ctx,
236 const void *buffer,
237 uint32_t len,
238 int flags)
239{
240 if (flags & ~(HASH_UPDATE | HASH_LAST)) {
241 /* User should not pass anything other than UPDATE or LAST */
242 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
243 return ctx;
244 }
245
246 if (ctx->status & HASH_CTX_STS_PROCESSING) {
247 /* Cannot submit to a currently processing job. */
248 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
249 return ctx;
250 }
251
252 if (ctx->status & HASH_CTX_STS_COMPLETE) {
253 /* Cannot update a finished job. */
254 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
255 return ctx;
256 }
257
258 /* If we made it here, there was no error during this call to submit */
259 ctx->error = HASH_CTX_ERROR_NONE;
260
261 /* Store buffer ptr info from user */
262 ctx->incoming_buffer = buffer;
263 ctx->incoming_buffer_length = len;
264
265 /*
266 * Store the user's request flags and mark this ctx as currently
267 * being processed.
268 */
269 ctx->status = (flags & HASH_LAST) ?
270 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
271 HASH_CTX_STS_PROCESSING;
272
273 /* Advance byte counter */
274 ctx->total_length += len;
275
276 /*
277 * If there is anything currently buffered in the extra blocks,
278 * append to it until it contains a whole block.
279 * Or if the user's buffer contains less than a whole block,
280 * append as much as possible to the extra block.
281 */
282 if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
283 /*
284 * Compute how many bytes to copy from user buffer into
285 * extra block
286 */
287 uint32_t copy_len = SHA256_BLOCK_SIZE -
288 ctx->partial_block_buffer_length;
289 if (len < copy_len)
290 copy_len = len;
291
292 if (copy_len) {
293 /* Copy and update relevant pointers and counters */
294 memcpy(
295 &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
296 buffer, copy_len);
297
298 ctx->partial_block_buffer_length += copy_len;
299 ctx->incoming_buffer = (const void *)
300 ((const char *)buffer + copy_len);
301 ctx->incoming_buffer_length = len - copy_len;
302 }
303
304 /* The extra block should never contain more than 1 block */
305 assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
306
307 /*
308 * If the extra block buffer contains exactly 1 block,
309 * it can be hashed.
310 */
311 if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
312 ctx->partial_block_buffer_length = 0;
313
314 ctx->job.buffer = ctx->partial_block_buffer;
315 ctx->job.len = 1;
316 ctx = (struct sha256_hash_ctx *)
317 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
318 }
319 }
320
321 return sha256_ctx_mgr_resubmit(mgr, ctx);
322}
323
324static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
325{
326 struct sha256_hash_ctx *ctx;
327
328 while (1) {
329 ctx = (struct sha256_hash_ctx *)
330 sha256_job_mgr_flush(&mgr->mgr);
331
332 /* If flush returned 0, there are no more jobs in flight. */
333 if (!ctx)
334 return NULL;
335
336 /*
337 * If flush returned a job, resubmit the job to finish
338 * processing.
339 */
340 ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
341
342 /*
343 * If sha256_ctx_mgr_resubmit returned a job, it is ready to
344 * be returned. Otherwise, all jobs currently being managed by
345 * the sha256_ctx_mgr still need processing. Loop.
346 */
347 if (ctx)
348 return ctx;
349 }
350}
351
352static int sha256_mb_init(struct ahash_request *areq)
353{
354 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
355
356 hash_ctx_init(sctx);
357 sctx->job.result_digest[0] = SHA256_H0;
358 sctx->job.result_digest[1] = SHA256_H1;
359 sctx->job.result_digest[2] = SHA256_H2;
360 sctx->job.result_digest[3] = SHA256_H3;
361 sctx->job.result_digest[4] = SHA256_H4;
362 sctx->job.result_digest[5] = SHA256_H5;
363 sctx->job.result_digest[6] = SHA256_H6;
364 sctx->job.result_digest[7] = SHA256_H7;
365 sctx->total_length = 0;
366 sctx->partial_block_buffer_length = 0;
367 sctx->status = HASH_CTX_STS_IDLE;
368
369 return 0;
370}
371
372static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
373{
374 int i;
375 struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
376 __be32 *dst = (__be32 *) rctx->out;
377
378 for (i = 0; i < 8; ++i)
379 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
380
381 return 0;
382}
383
384static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
385 struct mcryptd_alg_cstate *cstate, bool flush)
386{
387 int flag = HASH_UPDATE;
388 int nbytes, err = 0;
389 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
390 struct sha256_hash_ctx *sha_ctx;
391
392 /* more work ? */
393 while (!(rctx->flag & HASH_DONE)) {
394 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
395 if (nbytes < 0) {
396 err = nbytes;
397 goto out;
398 }
399 /* check if the walk is done */
400 if (crypto_ahash_walk_last(&rctx->walk)) {
401 rctx->flag |= HASH_DONE;
402 if (rctx->flag & HASH_FINAL)
403 flag |= HASH_LAST;
404
405 }
406 sha_ctx = (struct sha256_hash_ctx *)
407 ahash_request_ctx(&rctx->areq);
408 kernel_fpu_begin();
409 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
410 rctx->walk.data, nbytes, flag);
411 if (!sha_ctx) {
412 if (flush)
413 sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
414 }
415 kernel_fpu_end();
416 if (sha_ctx)
417 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
418 else {
419 rctx = NULL;
420 goto out;
421 }
422 }
423
424 /* copy the results */
425 if (rctx->flag & HASH_FINAL)
426 sha256_mb_set_results(rctx);
427
428out:
429 *ret_rctx = rctx;
430 return err;
431}
432
433static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
434 struct mcryptd_alg_cstate *cstate,
435 int err)
436{
437 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
438 struct sha256_hash_ctx *sha_ctx;
439 struct mcryptd_hash_request_ctx *req_ctx;
440 int ret;
441
442 /* remove from work list */
443 spin_lock(&cstate->work_lock);
444 list_del(&rctx->waiter);
445 spin_unlock(&cstate->work_lock);
446
447 if (irqs_disabled())
448 rctx->complete(&req->base, err);
449 else {
450 local_bh_disable();
451 rctx->complete(&req->base, err);
452 local_bh_enable();
453 }
454
455 /* check to see if there are other jobs that are done */
456 sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
457 while (sha_ctx) {
458 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
459 ret = sha_finish_walk(&req_ctx, cstate, false);
460 if (req_ctx) {
461 spin_lock(&cstate->work_lock);
462 list_del(&req_ctx->waiter);
463 spin_unlock(&cstate->work_lock);
464
465 req = cast_mcryptd_ctx_to_req(req_ctx);
466 if (irqs_disabled())
467 req_ctx->complete(&req->base, ret);
468 else {
469 local_bh_disable();
470 req_ctx->complete(&req->base, ret);
471 local_bh_enable();
472 }
473 }
474 sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
475 }
476
477 return 0;
478}
479
480static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
481 struct mcryptd_alg_cstate *cstate)
482{
483 unsigned long next_flush;
484 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
485
486 /* initialize tag */
487 rctx->tag.arrival = jiffies; /* tag the arrival time */
488 rctx->tag.seq_num = cstate->next_seq_num++;
489 next_flush = rctx->tag.arrival + delay;
490 rctx->tag.expire = next_flush;
491
492 spin_lock(&cstate->work_lock);
493 list_add_tail(&rctx->waiter, &cstate->work_list);
494 spin_unlock(&cstate->work_lock);
495
496 mcryptd_arm_flusher(cstate, delay);
497}
498
499static int sha256_mb_update(struct ahash_request *areq)
500{
501 struct mcryptd_hash_request_ctx *rctx =
502 container_of(areq, struct mcryptd_hash_request_ctx, areq);
503 struct mcryptd_alg_cstate *cstate =
504 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
505
506 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
507 struct sha256_hash_ctx *sha_ctx;
508 int ret = 0, nbytes;
509
510 /* sanity check */
511 if (rctx->tag.cpu != smp_processor_id()) {
512 pr_err("mcryptd error: cpu clash\n");
513 goto done;
514 }
515
516 /* need to init context */
517 req_ctx_init(rctx, areq);
518
519 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
520
521 if (nbytes < 0) {
522 ret = nbytes;
523 goto done;
524 }
525
526 if (crypto_ahash_walk_last(&rctx->walk))
527 rctx->flag |= HASH_DONE;
528
529 /* submit */
530 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
531 sha256_mb_add_list(rctx, cstate);
532 kernel_fpu_begin();
533 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
534 nbytes, HASH_UPDATE);
535 kernel_fpu_end();
536
537 /* check if anything is returned */
538 if (!sha_ctx)
539 return -EINPROGRESS;
540
541 if (sha_ctx->error) {
542 ret = sha_ctx->error;
543 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
544 goto done;
545 }
546
547 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
548 ret = sha_finish_walk(&rctx, cstate, false);
549
550 if (!rctx)
551 return -EINPROGRESS;
552done:
553 sha_complete_job(rctx, cstate, ret);
554 return ret;
555}
556
557static int sha256_mb_finup(struct ahash_request *areq)
558{
559 struct mcryptd_hash_request_ctx *rctx =
560 container_of(areq, struct mcryptd_hash_request_ctx, areq);
561 struct mcryptd_alg_cstate *cstate =
562 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
563
564 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
565 struct sha256_hash_ctx *sha_ctx;
566 int ret = 0, flag = HASH_UPDATE, nbytes;
567
568 /* sanity check */
569 if (rctx->tag.cpu != smp_processor_id()) {
570 pr_err("mcryptd error: cpu clash\n");
571 goto done;
572 }
573
574 /* need to init context */
575 req_ctx_init(rctx, areq);
576
577 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
578
579 if (nbytes < 0) {
580 ret = nbytes;
581 goto done;
582 }
583
584 if (crypto_ahash_walk_last(&rctx->walk)) {
585 rctx->flag |= HASH_DONE;
586 flag = HASH_LAST;
587 }
588
589 /* submit */
590 rctx->flag |= HASH_FINAL;
591 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
592 sha256_mb_add_list(rctx, cstate);
593
594 kernel_fpu_begin();
595 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
596 nbytes, flag);
597 kernel_fpu_end();
598
599 /* check if anything is returned */
600 if (!sha_ctx)
601 return -EINPROGRESS;
602
603 if (sha_ctx->error) {
604 ret = sha_ctx->error;
605 goto done;
606 }
607
608 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
609 ret = sha_finish_walk(&rctx, cstate, false);
610 if (!rctx)
611 return -EINPROGRESS;
612done:
613 sha_complete_job(rctx, cstate, ret);
614 return ret;
615}
616
617static int sha256_mb_final(struct ahash_request *areq)
618{
619 struct mcryptd_hash_request_ctx *rctx =
620 container_of(areq, struct mcryptd_hash_request_ctx,
621 areq);
622 struct mcryptd_alg_cstate *cstate =
623 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
624
625 struct sha256_hash_ctx *sha_ctx;
626 int ret = 0;
627 u8 data;
628
629 /* sanity check */
630 if (rctx->tag.cpu != smp_processor_id()) {
631 pr_err("mcryptd error: cpu clash\n");
632 goto done;
633 }
634
635 /* need to init context */
636 req_ctx_init(rctx, areq);
637
638 rctx->flag |= HASH_DONE | HASH_FINAL;
639
640 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
641 /* flag HASH_FINAL and 0 data size */
642 sha256_mb_add_list(rctx, cstate);
643 kernel_fpu_begin();
644 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
645 HASH_LAST);
646 kernel_fpu_end();
647
648 /* check if anything is returned */
649 if (!sha_ctx)
650 return -EINPROGRESS;
651
652 if (sha_ctx->error) {
653 ret = sha_ctx->error;
654 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
655 goto done;
656 }
657
658 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
659 ret = sha_finish_walk(&rctx, cstate, false);
660 if (!rctx)
661 return -EINPROGRESS;
662done:
663 sha_complete_job(rctx, cstate, ret);
664 return ret;
665}
666
667static int sha256_mb_export(struct ahash_request *areq, void *out)
668{
669 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
670
671 memcpy(out, sctx, sizeof(*sctx));
672
673 return 0;
674}
675
676static int sha256_mb_import(struct ahash_request *areq, const void *in)
677{
678 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
679
680 memcpy(sctx, in, sizeof(*sctx));
681
682 return 0;
683}
684
685static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
686{
687 struct mcryptd_ahash *mcryptd_tfm;
688 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
689 struct mcryptd_hash_ctx *mctx;
690
691 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
692 CRYPTO_ALG_INTERNAL,
693 CRYPTO_ALG_INTERNAL);
694 if (IS_ERR(mcryptd_tfm))
695 return PTR_ERR(mcryptd_tfm);
696 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
697 mctx->alg_state = &sha256_mb_alg_state;
698 ctx->mcryptd_tfm = mcryptd_tfm;
699 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
700 sizeof(struct ahash_request) +
701 crypto_ahash_reqsize(&mcryptd_tfm->base));
702
703 return 0;
704}
705
706static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
707{
708 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
709
710 mcryptd_free_ahash(ctx->mcryptd_tfm);
711}
712
713static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
714{
715 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
716 sizeof(struct ahash_request) +
717 sizeof(struct sha256_hash_ctx));
718
719 return 0;
720}
721
722static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
723{
724 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
725
726 mcryptd_free_ahash(ctx->mcryptd_tfm);
727}
728
729static struct ahash_alg sha256_mb_areq_alg = {
730 .init = sha256_mb_init,
731 .update = sha256_mb_update,
732 .final = sha256_mb_final,
733 .finup = sha256_mb_finup,
734 .export = sha256_mb_export,
735 .import = sha256_mb_import,
736 .halg = {
737 .digestsize = SHA256_DIGEST_SIZE,
738 .statesize = sizeof(struct sha256_hash_ctx),
739 .base = {
740 .cra_name = "__sha256-mb",
741 .cra_driver_name = "__intel_sha256-mb",
742 .cra_priority = 100,
743 /*
744 * use ASYNC flag as some buffers in multi-buffer
745 * algo may not have completed before hashing thread
746 * sleep
747 */
748 .cra_flags = CRYPTO_ALG_ASYNC |
749 CRYPTO_ALG_INTERNAL,
750 .cra_blocksize = SHA256_BLOCK_SIZE,
751 .cra_module = THIS_MODULE,
752 .cra_list = LIST_HEAD_INIT
753 (sha256_mb_areq_alg.halg.base.cra_list),
754 .cra_init = sha256_mb_areq_init_tfm,
755 .cra_exit = sha256_mb_areq_exit_tfm,
756 .cra_ctxsize = sizeof(struct sha256_hash_ctx),
757 }
758 }
759};
760
761static int sha256_mb_async_init(struct ahash_request *req)
762{
763 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
764 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
765 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
766 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
767
768 memcpy(mcryptd_req, req, sizeof(*req));
769 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
770 return crypto_ahash_init(mcryptd_req);
771}
772
773static int sha256_mb_async_update(struct ahash_request *req)
774{
775 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
776
777 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
778 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
779 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
780
781 memcpy(mcryptd_req, req, sizeof(*req));
782 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
783 return crypto_ahash_update(mcryptd_req);
784}
785
786static int sha256_mb_async_finup(struct ahash_request *req)
787{
788 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
789
790 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
791 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
792 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
793
794 memcpy(mcryptd_req, req, sizeof(*req));
795 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
796 return crypto_ahash_finup(mcryptd_req);
797}
798
799static int sha256_mb_async_final(struct ahash_request *req)
800{
801 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
802
803 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
804 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
805 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
806
807 memcpy(mcryptd_req, req, sizeof(*req));
808 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
809 return crypto_ahash_final(mcryptd_req);
810}
811
812static int sha256_mb_async_digest(struct ahash_request *req)
813{
814 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
815 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
816 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
817 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
818
819 memcpy(mcryptd_req, req, sizeof(*req));
820 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
821 return crypto_ahash_digest(mcryptd_req);
822}
823
824static int sha256_mb_async_export(struct ahash_request *req, void *out)
825{
826 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
827 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
828 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
829 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
830
831 memcpy(mcryptd_req, req, sizeof(*req));
832 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
833 return crypto_ahash_export(mcryptd_req, out);
834}
835
836static int sha256_mb_async_import(struct ahash_request *req, const void *in)
837{
838 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
839 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
840 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
841 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
842 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
843 struct mcryptd_hash_request_ctx *rctx;
844 struct ahash_request *areq;
845
846 memcpy(mcryptd_req, req, sizeof(*req));
847 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
848 rctx = ahash_request_ctx(mcryptd_req);
849 areq = &rctx->areq;
850
851 ahash_request_set_tfm(areq, child);
852 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
853 rctx->complete, req);
854
855 return crypto_ahash_import(mcryptd_req, in);
856}
857
858static struct ahash_alg sha256_mb_async_alg = {
859 .init = sha256_mb_async_init,
860 .update = sha256_mb_async_update,
861 .final = sha256_mb_async_final,
862 .finup = sha256_mb_async_finup,
863 .export = sha256_mb_async_export,
864 .import = sha256_mb_async_import,
865 .digest = sha256_mb_async_digest,
866 .halg = {
867 .digestsize = SHA256_DIGEST_SIZE,
868 .statesize = sizeof(struct sha256_hash_ctx),
869 .base = {
870 .cra_name = "sha256",
871 .cra_driver_name = "sha256_mb",
872 /*
873 * Low priority, since with few concurrent hash requests
874 * this is extremely slow due to the flush delay. Users
875 * whose workloads would benefit from this can request
876 * it explicitly by driver name, or can increase its
877 * priority at runtime using NETLINK_CRYPTO.
878 */
879 .cra_priority = 50,
880 .cra_flags = CRYPTO_ALG_ASYNC,
881 .cra_blocksize = SHA256_BLOCK_SIZE,
882 .cra_module = THIS_MODULE,
883 .cra_list = LIST_HEAD_INIT
884 (sha256_mb_async_alg.halg.base.cra_list),
885 .cra_init = sha256_mb_async_init_tfm,
886 .cra_exit = sha256_mb_async_exit_tfm,
887 .cra_ctxsize = sizeof(struct sha256_mb_ctx),
888 .cra_alignmask = 0,
889 },
890 },
891};
892
893static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
894{
895 struct mcryptd_hash_request_ctx *rctx;
896 unsigned long cur_time;
897 unsigned long next_flush = 0;
898 struct sha256_hash_ctx *sha_ctx;
899
900
901 cur_time = jiffies;
902
903 while (!list_empty(&cstate->work_list)) {
904 rctx = list_entry(cstate->work_list.next,
905 struct mcryptd_hash_request_ctx, waiter);
906 if (time_before(cur_time, rctx->tag.expire))
907 break;
908 kernel_fpu_begin();
909 sha_ctx = (struct sha256_hash_ctx *)
910 sha256_ctx_mgr_flush(cstate->mgr);
911 kernel_fpu_end();
912 if (!sha_ctx) {
913 pr_err("sha256_mb error: nothing got"
914 " flushed for non-empty list\n");
915 break;
916 }
917 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
918 sha_finish_walk(&rctx, cstate, true);
919 sha_complete_job(rctx, cstate, 0);
920 }
921
922 if (!list_empty(&cstate->work_list)) {
923 rctx = list_entry(cstate->work_list.next,
924 struct mcryptd_hash_request_ctx, waiter);
925 /* get the hash context and then flush time */
926 next_flush = rctx->tag.expire;
927 mcryptd_arm_flusher(cstate, get_delay(next_flush));
928 }
929 return next_flush;
930}
931
932static int __init sha256_mb_mod_init(void)
933{
934
935 int cpu;
936 int err;
937 struct mcryptd_alg_cstate *cpu_state;
938
939 /* check for dependent cpu features */
940 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
941 !boot_cpu_has(X86_FEATURE_BMI2))
942 return -ENODEV;
943
944 /* initialize multibuffer structures */
945 sha256_mb_alg_state.alg_cstate = alloc_percpu
946 (struct mcryptd_alg_cstate);
947
948 sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
949 sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
950 sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
951 sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
952
953 if (!sha256_mb_alg_state.alg_cstate)
954 return -ENOMEM;
955 for_each_possible_cpu(cpu) {
956 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
957 cpu_state->next_flush = 0;
958 cpu_state->next_seq_num = 0;
959 cpu_state->flusher_engaged = false;
960 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
961 cpu_state->cpu = cpu;
962 cpu_state->alg_state = &sha256_mb_alg_state;
963 cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
964 GFP_KERNEL);
965 if (!cpu_state->mgr)
966 goto err2;
967 sha256_ctx_mgr_init(cpu_state->mgr);
968 INIT_LIST_HEAD(&cpu_state->work_list);
969 spin_lock_init(&cpu_state->work_lock);
970 }
971 sha256_mb_alg_state.flusher = &sha256_mb_flusher;
972
973 err = crypto_register_ahash(&sha256_mb_areq_alg);
974 if (err)
975 goto err2;
976 err = crypto_register_ahash(&sha256_mb_async_alg);
977 if (err)
978 goto err1;
979
980
981 return 0;
982err1:
983 crypto_unregister_ahash(&sha256_mb_areq_alg);
984err2:
985 for_each_possible_cpu(cpu) {
986 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
987 kfree(cpu_state->mgr);
988 }
989 free_percpu(sha256_mb_alg_state.alg_cstate);
990 return -ENODEV;
991}
992
993static void __exit sha256_mb_mod_fini(void)
994{
995 int cpu;
996 struct mcryptd_alg_cstate *cpu_state;
997
998 crypto_unregister_ahash(&sha256_mb_async_alg);
999 crypto_unregister_ahash(&sha256_mb_areq_alg);
1000 for_each_possible_cpu(cpu) {
1001 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
1002 kfree(cpu_state->mgr);
1003 }
1004 free_percpu(sha256_mb_alg_state.alg_cstate);
1005}
1006
1007module_init(sha256_mb_mod_init);
1008module_exit(sha256_mb_mod_fini);
1009
1010MODULE_LICENSE("GPL");
1011MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
1012
1013MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
deleted file mode 100644
index 7c432543dc7f..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * Header file for multi buffer SHA256 context
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#ifndef _SHA_MB_CTX_INTERNAL_H
55#define _SHA_MB_CTX_INTERNAL_H
56
57#include "sha256_mb_mgr.h"
58
59#define HASH_UPDATE 0x00
60#define HASH_LAST 0x01
61#define HASH_DONE 0x02
62#define HASH_FINAL 0x04
63
64#define HASH_CTX_STS_IDLE 0x00
65#define HASH_CTX_STS_PROCESSING 0x01
66#define HASH_CTX_STS_LAST 0x02
67#define HASH_CTX_STS_COMPLETE 0x04
68
69enum hash_ctx_error {
70 HASH_CTX_ERROR_NONE = 0,
71 HASH_CTX_ERROR_INVALID_FLAGS = -1,
72 HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
73 HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
74
75#ifdef HASH_CTX_DEBUG
76 HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
77#endif
78};
79
80
81#define hash_ctx_user_data(ctx) ((ctx)->user_data)
82#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
83#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
84#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
85#define hash_ctx_status(ctx) ((ctx)->status)
86#define hash_ctx_error(ctx) ((ctx)->error)
87#define hash_ctx_init(ctx) \
88 do { \
89 (ctx)->error = HASH_CTX_ERROR_NONE; \
90 (ctx)->status = HASH_CTX_STS_COMPLETE; \
91 } while (0)
92
93
94/* Hash Constants and Typedefs */
95#define SHA256_DIGEST_LENGTH 8
96#define SHA256_LOG2_BLOCK_SIZE 6
97
98#define SHA256_PADLENGTHFIELD_SIZE 8
99
100#ifdef SHA_MB_DEBUG
101#define assert(expr) \
102do { \
103 if (unlikely(!(expr))) { \
104 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
105 #expr, __FILE__, __func__, __LINE__); \
106 } \
107} while (0)
108#else
109#define assert(expr) do {} while (0)
110#endif
111
112struct sha256_ctx_mgr {
113 struct sha256_mb_mgr mgr;
114};
115
116/* typedef struct sha256_ctx_mgr sha256_ctx_mgr; */
117
118struct sha256_hash_ctx {
119 /* Must be at struct offset 0 */
120 struct job_sha256 job;
121 /* status flag */
122 int status;
123 /* error flag */
124 int error;
125
126 uint64_t total_length;
127 const void *incoming_buffer;
128 uint32_t incoming_buffer_length;
129 uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2];
130 uint32_t partial_block_buffer_length;
131 void *user_data;
132};
133
134#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
deleted file mode 100644
index b01ae408c56d..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * Header file for multi buffer SHA256 algorithm manager
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53#ifndef __SHA_MB_MGR_H
54#define __SHA_MB_MGR_H
55
56#include <linux/types.h>
57
58#define NUM_SHA256_DIGEST_WORDS 8
59
60enum job_sts { STS_UNKNOWN = 0,
61 STS_BEING_PROCESSED = 1,
62 STS_COMPLETED = 2,
63 STS_INTERNAL_ERROR = 3,
64 STS_ERROR = 4
65};
66
67struct job_sha256 {
68 u8 *buffer;
69 u32 len;
70 u32 result_digest[NUM_SHA256_DIGEST_WORDS] __aligned(32);
71 enum job_sts status;
72 void *user_data;
73};
74
75/* SHA256 out-of-order scheduler */
76
77/* typedef uint32_t sha8_digest_array[8][8]; */
78
79struct sha256_args_x8 {
80 uint32_t digest[8][8];
81 uint8_t *data_ptr[8];
82};
83
84struct sha256_lane_data {
85 struct job_sha256 *job_in_lane;
86};
87
88struct sha256_mb_mgr {
89 struct sha256_args_x8 args;
90
91 uint32_t lens[8];
92
93 /* each byte is index (0...7) of unused lanes */
94 uint64_t unused_lanes;
95 /* byte 4 is set to FF as a flag */
96 struct sha256_lane_data ldata[8];
97};
98
99
100#define SHA256_MB_MGR_NUM_LANES_AVX2 8
101
102void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state);
103struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state,
104 struct job_sha256 *job);
105struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state);
106struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state);
107
108#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
deleted file mode 100644
index 5c377bac21d0..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,304 +0,0 @@
1/*
2 * Header file for multi buffer SHA256 algorithm data structure
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54# Macros for defining data structures
55
56# Usage example
57
58#START_FIELDS # JOB_AES
59### name size align
60#FIELD _plaintext, 8, 8 # pointer to plaintext
61#FIELD _ciphertext, 8, 8 # pointer to ciphertext
62#FIELD _IV, 16, 8 # IV
63#FIELD _keys, 8, 8 # pointer to keys
64#FIELD _len, 4, 4 # length in bytes
65#FIELD _status, 4, 4 # status enumeration
66#FIELD _user_data, 8, 8 # pointer to user data
67#UNION _union, size1, align1, \
68# size2, align2, \
69# size3, align3, \
70# ...
71#END_FIELDS
72#%assign _JOB_AES_size _FIELD_OFFSET
73#%assign _JOB_AES_align _STRUCT_ALIGN
74
75#########################################################################
76
77# Alternate "struc-like" syntax:
78# STRUCT job_aes2
79# RES_Q .plaintext, 1
80# RES_Q .ciphertext, 1
81# RES_DQ .IV, 1
82# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
83# RES_U .union, size1, align1, \
84# size2, align2, \
85# ...
86# ENDSTRUCT
87# # Following only needed if nesting
88# %assign job_aes2_size _FIELD_OFFSET
89# %assign job_aes2_align _STRUCT_ALIGN
90#
91# RES_* macros take a name, a count and an optional alignment.
92# The count in in terms of the base size of the macro, and the
93# default alignment is the base size.
94# The macros are:
95# Macro Base size
96# RES_B 1
97# RES_W 2
98# RES_D 4
99# RES_Q 8
100# RES_DQ 16
101# RES_Y 32
102# RES_Z 64
103#
104# RES_U defines a union. It's arguments are a name and two or more
105# pairs of "size, alignment"
106#
107# The two assigns are only needed if this structure is being nested
108# within another. Even if the assigns are not done, one can still use
109# STRUCT_NAME_size as the size of the structure.
110#
111# Note that for nesting, you still need to assign to STRUCT_NAME_size.
112#
113# The differences between this and using "struc" directly are that each
114# type is implicitly aligned to its natural length (although this can be
115# over-ridden with an explicit third parameter), and that the structure
116# is padded at the end to its overall alignment.
117#
118
119#########################################################################
120
121#ifndef _DATASTRUCT_ASM_
122#define _DATASTRUCT_ASM_
123
124#define SZ8 8*SHA256_DIGEST_WORD_SIZE
125#define ROUNDS 64*SZ8
126#define PTR_SZ 8
127#define SHA256_DIGEST_WORD_SIZE 4
128#define MAX_SHA256_LANES 8
129#define SHA256_DIGEST_WORDS 8
130#define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE)
131#define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS)
132#define SHA256_BLK_SZ 64
133
134# START_FIELDS
135.macro START_FIELDS
136 _FIELD_OFFSET = 0
137 _STRUCT_ALIGN = 0
138.endm
139
140# FIELD name size align
141.macro FIELD name size align
142 _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
143 \name = _FIELD_OFFSET
144 _FIELD_OFFSET = _FIELD_OFFSET + (\size)
145.if (\align > _STRUCT_ALIGN)
146 _STRUCT_ALIGN = \align
147.endif
148.endm
149
150# END_FIELDS
151.macro END_FIELDS
152 _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
153.endm
154
155########################################################################
156
157.macro STRUCT p1
158START_FIELDS
159.struc \p1
160.endm
161
162.macro ENDSTRUCT
163 tmp = _FIELD_OFFSET
164 END_FIELDS
165 tmp = (_FIELD_OFFSET - %%tmp)
166.if (tmp > 0)
167 .lcomm tmp
168.endif
169.endstruc
170.endm
171
172## RES_int name size align
173.macro RES_int p1 p2 p3
174 name = \p1
175 size = \p2
176 align = .\p3
177
178 _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
179.align align
180.lcomm name size
181 _FIELD_OFFSET = _FIELD_OFFSET + (size)
182.if (align > _STRUCT_ALIGN)
183 _STRUCT_ALIGN = align
184.endif
185.endm
186
187# macro RES_B name, size [, align]
188.macro RES_B _name, _size, _align=1
189RES_int _name _size _align
190.endm
191
192# macro RES_W name, size [, align]
193.macro RES_W _name, _size, _align=2
194RES_int _name 2*(_size) _align
195.endm
196
197# macro RES_D name, size [, align]
198.macro RES_D _name, _size, _align=4
199RES_int _name 4*(_size) _align
200.endm
201
202# macro RES_Q name, size [, align]
203.macro RES_Q _name, _size, _align=8
204RES_int _name 8*(_size) _align
205.endm
206
207# macro RES_DQ name, size [, align]
208.macro RES_DQ _name, _size, _align=16
209RES_int _name 16*(_size) _align
210.endm
211
212# macro RES_Y name, size [, align]
213.macro RES_Y _name, _size, _align=32
214RES_int _name 32*(_size) _align
215.endm
216
217# macro RES_Z name, size [, align]
218.macro RES_Z _name, _size, _align=64
219RES_int _name 64*(_size) _align
220.endm
221
222#endif
223
224
225########################################################################
226#### Define SHA256 Out Of Order Data Structures
227########################################################################
228
229START_FIELDS # LANE_DATA
230### name size align
231FIELD _job_in_lane, 8, 8 # pointer to job object
232END_FIELDS
233
234 _LANE_DATA_size = _FIELD_OFFSET
235 _LANE_DATA_align = _STRUCT_ALIGN
236
237########################################################################
238
239START_FIELDS # SHA256_ARGS_X4
240### name size align
241FIELD _digest, 4*8*8, 4 # transposed digest
242FIELD _data_ptr, 8*8, 8 # array of pointers to data
243END_FIELDS
244
245 _SHA256_ARGS_X4_size = _FIELD_OFFSET
246 _SHA256_ARGS_X4_align = _STRUCT_ALIGN
247 _SHA256_ARGS_X8_size = _FIELD_OFFSET
248 _SHA256_ARGS_X8_align = _STRUCT_ALIGN
249
250#######################################################################
251
252START_FIELDS # MB_MGR
253### name size align
254FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align
255FIELD _lens, 4*8, 8
256FIELD _unused_lanes, 8, 8
257FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
258END_FIELDS
259
260 _MB_MGR_size = _FIELD_OFFSET
261 _MB_MGR_align = _STRUCT_ALIGN
262
263_args_digest = _args + _digest
264_args_data_ptr = _args + _data_ptr
265
266#######################################################################
267
268START_FIELDS #STACK_FRAME
269### name size align
270FIELD _data, 16*SZ8, 1 # transposed digest
271FIELD _digest, 8*SZ8, 1 # array of pointers to data
272FIELD _ytmp, 4*SZ8, 1
273FIELD _rsp, 8, 1
274END_FIELDS
275
276 _STACK_FRAME_size = _FIELD_OFFSET
277 _STACK_FRAME_align = _STRUCT_ALIGN
278
279#######################################################################
280
281########################################################################
282#### Define constants
283########################################################################
284
285#define STS_UNKNOWN 0
286#define STS_BEING_PROCESSED 1
287#define STS_COMPLETED 2
288
289########################################################################
290#### Define JOB_SHA256 structure
291########################################################################
292
293START_FIELDS # JOB_SHA256
294
295### name size align
296FIELD _buffer, 8, 8 # pointer to buffer
297FIELD _len, 8, 8 # length in bytes
298FIELD _result_digest, 8*4, 32 # Digest (output)
299FIELD _status, 4, 4
300FIELD _user_data, 8, 8
301END_FIELDS
302
303 _JOB_SHA256_size = _FIELD_OFFSET
304 _JOB_SHA256_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
deleted file mode 100644
index d2364c55bbde..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,307 +0,0 @@
1/*
2 * Flush routine for SHA256 multibuffer
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53#include <linux/linkage.h>
54#include <asm/frame.h>
55#include "sha256_mb_mgr_datastruct.S"
56
57.extern sha256_x8_avx2
58
59#LINUX register definitions
60#define arg1 %rdi
61#define arg2 %rsi
62
63# Common register definitions
64#define state arg1
65#define job arg2
66#define len2 arg2
67
68# idx must be a register not clobberred by sha1_mult
69#define idx %r8
70#define DWORD_idx %r8d
71
72#define unused_lanes %rbx
73#define lane_data %rbx
74#define tmp2 %rbx
75#define tmp2_w %ebx
76
77#define job_rax %rax
78#define tmp1 %rax
79#define size_offset %rax
80#define tmp %rax
81#define start_offset %rax
82
83#define tmp3 %arg1
84
85#define extra_blocks %arg2
86#define p %arg2
87
88.macro LABEL prefix n
89\prefix\n\():
90.endm
91
92.macro JNE_SKIP i
93jne skip_\i
94.endm
95
96.altmacro
97.macro SET_OFFSET _offset
98offset = \_offset
99.endm
100.noaltmacro
101
102# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state)
103# arg 1 : rcx : state
104ENTRY(sha256_mb_mgr_flush_avx2)
105 FRAME_BEGIN
106 push %rbx
107
108 # If bit (32+3) is set, then all lanes are empty
109 mov _unused_lanes(state), unused_lanes
110 bt $32+3, unused_lanes
111 jc return_null
112
113 # find a lane with a non-null job
114 xor idx, idx
115 offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
116 cmpq $0, offset(state)
117 cmovne one(%rip), idx
118 offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
119 cmpq $0, offset(state)
120 cmovne two(%rip), idx
121 offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
122 cmpq $0, offset(state)
123 cmovne three(%rip), idx
124 offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
125 cmpq $0, offset(state)
126 cmovne four(%rip), idx
127 offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
128 cmpq $0, offset(state)
129 cmovne five(%rip), idx
130 offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
131 cmpq $0, offset(state)
132 cmovne six(%rip), idx
133 offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
134 cmpq $0, offset(state)
135 cmovne seven(%rip), idx
136
137 # copy idx to empty lanes
138copy_lane_data:
139 offset = (_args + _data_ptr)
140 mov offset(state,idx,8), tmp
141
142 I = 0
143.rep 8
144 offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
145 cmpq $0, offset(state)
146.altmacro
147 JNE_SKIP %I
148 offset = (_args + _data_ptr + 8*I)
149 mov tmp, offset(state)
150 offset = (_lens + 4*I)
151 movl $0xFFFFFFFF, offset(state)
152LABEL skip_ %I
153 I = (I+1)
154.noaltmacro
155.endr
156
157 # Find min length
158 vmovdqu _lens+0*16(state), %xmm0
159 vmovdqu _lens+1*16(state), %xmm1
160
161 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
162 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
163 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
164 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
165 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
166
167 vmovd %xmm2, DWORD_idx
168 mov idx, len2
169 and $0xF, idx
170 shr $4, len2
171 jz len_is_0
172
173 vpand clear_low_nibble(%rip), %xmm2, %xmm2
174 vpshufd $0, %xmm2, %xmm2
175
176 vpsubd %xmm2, %xmm0, %xmm0
177 vpsubd %xmm2, %xmm1, %xmm1
178
179 vmovdqu %xmm0, _lens+0*16(state)
180 vmovdqu %xmm1, _lens+1*16(state)
181
182 # "state" and "args" are the same address, arg1
183 # len is arg2
184 call sha256_x8_avx2
185 # state and idx are intact
186
187len_is_0:
188 # process completed job "idx"
189 imul $_LANE_DATA_size, idx, lane_data
190 lea _ldata(state, lane_data), lane_data
191
192 mov _job_in_lane(lane_data), job_rax
193 movq $0, _job_in_lane(lane_data)
194 movl $STS_COMPLETED, _status(job_rax)
195 mov _unused_lanes(state), unused_lanes
196 shl $4, unused_lanes
197 or idx, unused_lanes
198
199 mov unused_lanes, _unused_lanes(state)
200 movl $0xFFFFFFFF, _lens(state,idx,4)
201
202 vmovd _args_digest(state , idx, 4) , %xmm0
203 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
204 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
205 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
206 vmovd _args_digest+4*32(state, idx, 4), %xmm1
207 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
208 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
209 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
210
211 vmovdqu %xmm0, _result_digest(job_rax)
212 offset = (_result_digest + 1*16)
213 vmovdqu %xmm1, offset(job_rax)
214
215return:
216 pop %rbx
217 FRAME_END
218 ret
219
220return_null:
221 xor job_rax, job_rax
222 jmp return
223ENDPROC(sha256_mb_mgr_flush_avx2)
224
225##############################################################################
226
227.align 16
228ENTRY(sha256_mb_mgr_get_comp_job_avx2)
229 push %rbx
230
231 ## if bit 32+3 is set, then all lanes are empty
232 mov _unused_lanes(state), unused_lanes
233 bt $(32+3), unused_lanes
234 jc .return_null
235
236 # Find min length
237 vmovdqu _lens(state), %xmm0
238 vmovdqu _lens+1*16(state), %xmm1
239
240 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
241 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
242 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
243 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
244 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
245
246 vmovd %xmm2, DWORD_idx
247 test $~0xF, idx
248 jnz .return_null
249
250 # process completed job "idx"
251 imul $_LANE_DATA_size, idx, lane_data
252 lea _ldata(state, lane_data), lane_data
253
254 mov _job_in_lane(lane_data), job_rax
255 movq $0, _job_in_lane(lane_data)
256 movl $STS_COMPLETED, _status(job_rax)
257 mov _unused_lanes(state), unused_lanes
258 shl $4, unused_lanes
259 or idx, unused_lanes
260 mov unused_lanes, _unused_lanes(state)
261
262 movl $0xFFFFFFFF, _lens(state, idx, 4)
263
264 vmovd _args_digest(state, idx, 4), %xmm0
265 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
266 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
267 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
268 vmovd _args_digest+4*32(state, idx, 4), %xmm1
269 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
270 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
271 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
272
273 vmovdqu %xmm0, _result_digest(job_rax)
274 offset = (_result_digest + 1*16)
275 vmovdqu %xmm1, offset(job_rax)
276
277 pop %rbx
278
279 ret
280
281.return_null:
282 xor job_rax, job_rax
283 pop %rbx
284 ret
285ENDPROC(sha256_mb_mgr_get_comp_job_avx2)
286
287.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
288.align 16
289clear_low_nibble:
290.octa 0x000000000000000000000000FFFFFFF0
291
292.section .rodata.cst8, "aM", @progbits, 8
293.align 8
294one:
295.quad 1
296two:
297.quad 2
298three:
299.quad 3
300four:
301.quad 4
302five:
303.quad 5
304six:
305.quad 6
306seven:
307.quad 7
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
deleted file mode 100644
index b0c498371e67..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * Initialization code for multi buffer SHA256 algorithm for AVX2
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include "sha256_mb_mgr.h"
55
56void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state)
57{
58 unsigned int j;
59
60 state->unused_lanes = 0xF76543210ULL;
61 for (j = 0; j < 8; j++) {
62 state->lens[j] = 0xFFFFFFFF;
63 state->ldata[j].job_in_lane = NULL;
64 }
65}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
deleted file mode 100644
index b36ae7454084..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,214 +0,0 @@
1/*
2 * Buffer submit code for multi buffer SHA256 algorithm
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include <linux/linkage.h>
55#include <asm/frame.h>
56#include "sha256_mb_mgr_datastruct.S"
57
58.extern sha256_x8_avx2
59
60# LINUX register definitions
61arg1 = %rdi
62arg2 = %rsi
63size_offset = %rcx
64tmp2 = %rcx
65extra_blocks = %rdx
66
67# Common definitions
68#define state arg1
69#define job %rsi
70#define len2 arg2
71#define p2 arg2
72
73# idx must be a register not clobberred by sha1_x8_avx2
74idx = %r8
75DWORD_idx = %r8d
76last_len = %r8
77
78p = %r11
79start_offset = %r11
80
81unused_lanes = %rbx
82BYTE_unused_lanes = %bl
83
84job_rax = %rax
85len = %rax
86DWORD_len = %eax
87
88lane = %r12
89tmp3 = %r12
90
91tmp = %r9
92DWORD_tmp = %r9d
93
94lane_data = %r10
95
96# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
97# arg 1 : rcx : state
98# arg 2 : rdx : job
99ENTRY(sha256_mb_mgr_submit_avx2)
100 FRAME_BEGIN
101 push %rbx
102 push %r12
103
104 mov _unused_lanes(state), unused_lanes
105 mov unused_lanes, lane
106 and $0xF, lane
107 shr $4, unused_lanes
108 imul $_LANE_DATA_size, lane, lane_data
109 movl $STS_BEING_PROCESSED, _status(job)
110 lea _ldata(state, lane_data), lane_data
111 mov unused_lanes, _unused_lanes(state)
112 movl _len(job), DWORD_len
113
114 mov job, _job_in_lane(lane_data)
115 shl $4, len
116 or lane, len
117
118 movl DWORD_len, _lens(state , lane, 4)
119
120 # Load digest words from result_digest
121 vmovdqu _result_digest(job), %xmm0
122 vmovdqu _result_digest+1*16(job), %xmm1
123 vmovd %xmm0, _args_digest(state, lane, 4)
124 vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
125 vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
126 vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
127 vmovd %xmm1, _args_digest+4*32(state , lane, 4)
128
129 vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4)
130 vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4)
131 vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4)
132
133 mov _buffer(job), p
134 mov p, _args_data_ptr(state, lane, 8)
135
136 cmp $0xF, unused_lanes
137 jne return_null
138
139start_loop:
140 # Find min length
141 vmovdqa _lens(state), %xmm0
142 vmovdqa _lens+1*16(state), %xmm1
143
144 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
145 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
146 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
147 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
148 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
149
150 vmovd %xmm2, DWORD_idx
151 mov idx, len2
152 and $0xF, idx
153 shr $4, len2
154 jz len_is_0
155
156 vpand clear_low_nibble(%rip), %xmm2, %xmm2
157 vpshufd $0, %xmm2, %xmm2
158
159 vpsubd %xmm2, %xmm0, %xmm0
160 vpsubd %xmm2, %xmm1, %xmm1
161
162 vmovdqa %xmm0, _lens + 0*16(state)
163 vmovdqa %xmm1, _lens + 1*16(state)
164
165 # "state" and "args" are the same address, arg1
166 # len is arg2
167 call sha256_x8_avx2
168
169 # state and idx are intact
170
171len_is_0:
172 # process completed job "idx"
173 imul $_LANE_DATA_size, idx, lane_data
174 lea _ldata(state, lane_data), lane_data
175
176 mov _job_in_lane(lane_data), job_rax
177 mov _unused_lanes(state), unused_lanes
178 movq $0, _job_in_lane(lane_data)
179 movl $STS_COMPLETED, _status(job_rax)
180 shl $4, unused_lanes
181 or idx, unused_lanes
182 mov unused_lanes, _unused_lanes(state)
183
184 movl $0xFFFFFFFF, _lens(state,idx,4)
185
186 vmovd _args_digest(state, idx, 4), %xmm0
187 vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
188 vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
189 vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
190 vmovd _args_digest+4*32(state, idx, 4), %xmm1
191
192 vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1
193 vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1
194 vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1
195
196 vmovdqu %xmm0, _result_digest(job_rax)
197 vmovdqu %xmm1, _result_digest+1*16(job_rax)
198
199return:
200 pop %r12
201 pop %rbx
202 FRAME_END
203 ret
204
205return_null:
206 xor job_rax, job_rax
207 jmp return
208
209ENDPROC(sha256_mb_mgr_submit_avx2)
210
211.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
212.align 16
213clear_low_nibble:
214 .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
deleted file mode 100644
index 1687c80c5995..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
+++ /dev/null
@@ -1,598 +0,0 @@
1/*
2 * Multi-buffer SHA256 algorithm hash compute routine
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include <linux/linkage.h>
55#include "sha256_mb_mgr_datastruct.S"
56
57## code to compute oct SHA256 using SSE-256
58## outer calling routine takes care of save and restore of XMM registers
59## Logic designed/laid out by JDG
60
61## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15
62## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
63## Linux preserves: rdi rbp r8
64##
65## clobbers %ymm0-15
66
67arg1 = %rdi
68arg2 = %rsi
69reg3 = %rcx
70reg4 = %rdx
71
72# Common definitions
73STATE = arg1
74INP_SIZE = arg2
75
76IDX = %rax
77ROUND = %rbx
78TBL = reg3
79
80inp0 = %r9
81inp1 = %r10
82inp2 = %r11
83inp3 = %r12
84inp4 = %r13
85inp5 = %r14
86inp6 = %r15
87inp7 = reg4
88
89a = %ymm0
90b = %ymm1
91c = %ymm2
92d = %ymm3
93e = %ymm4
94f = %ymm5
95g = %ymm6
96h = %ymm7
97
98T1 = %ymm8
99
100a0 = %ymm12
101a1 = %ymm13
102a2 = %ymm14
103TMP = %ymm15
104TMP0 = %ymm6
105TMP1 = %ymm7
106
107TT0 = %ymm8
108TT1 = %ymm9
109TT2 = %ymm10
110TT3 = %ymm11
111TT4 = %ymm12
112TT5 = %ymm13
113TT6 = %ymm14
114TT7 = %ymm15
115
116# Define stack usage
117
118# Assume stack aligned to 32 bytes before call
119# Therefore FRAMESZ mod 32 must be 32-8 = 24
120
121#define FRAMESZ 0x388
122
123#define VMOVPS vmovups
124
125# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
126# "transpose" data in {r0...r7} using temps {t0...t1}
127# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
128# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
129# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
130# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
131# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
132# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
133# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
134# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
135# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
136#
137# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
138# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
139# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
140# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
141# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
142# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
143# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
144# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
145# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
146#
147
148.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
149 # process top half (r0..r3) {a...d}
150 vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
151 vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
152 vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
153 vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
154 vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
155 vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
156 vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
157 vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
158
159 # use r2 in place of t0
160 # process bottom half (r4..r7) {e...h}
161 vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
162 vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
163 vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
164 vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
165 vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
166 vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
167 vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
168 vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
169
170 vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
171 vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
172 vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
173 vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
174 vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
175 vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
176 vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
177 vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
178
179.endm
180
181.macro ROTATE_ARGS
182TMP_ = h
183h = g
184g = f
185f = e
186e = d
187d = c
188c = b
189b = a
190a = TMP_
191.endm
192
193.macro _PRORD reg imm tmp
194 vpslld $(32-\imm),\reg,\tmp
195 vpsrld $\imm,\reg, \reg
196 vpor \tmp,\reg, \reg
197.endm
198
199# PRORD_nd reg, imm, tmp, src
200.macro _PRORD_nd reg imm tmp src
201 vpslld $(32-\imm), \src, \tmp
202 vpsrld $\imm, \src, \reg
203 vpor \tmp, \reg, \reg
204.endm
205
206# PRORD dst/src, amt
207.macro PRORD reg imm
208 _PRORD \reg,\imm,TMP
209.endm
210
211# PRORD_nd dst, src, amt
212.macro PRORD_nd reg tmp imm
213 _PRORD_nd \reg, \imm, TMP, \tmp
214.endm
215
216# arguments passed implicitly in preprocessor symbols i, a...h
217.macro ROUND_00_15 _T1 i
218 PRORD_nd a0,e,5 # sig1: a0 = (e >> 5)
219
220 vpxor g, f, a2 # ch: a2 = f^g
221 vpand e,a2, a2 # ch: a2 = (f^g)&e
222 vpxor g, a2, a2 # a2 = ch
223
224 PRORD_nd a1,e,25 # sig1: a1 = (e >> 25)
225
226 vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp)
227 vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
228 vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
229 PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11)
230 vpaddd a2, h, h # h = h + ch
231 PRORD_nd a2,a,11 # sig0: a2 = (a >> 11)
232 vpaddd \_T1,h, h # h = h + ch + W + K
233 vpxor a1, a0, a0 # a0 = sigma1
234 PRORD_nd a1,a,22 # sig0: a1 = (a >> 22)
235 vpxor c, a, \_T1 # maj: T1 = a^c
236 add $SZ8, ROUND # ROUND++
237 vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
238 vpaddd a0, h, h
239 vpaddd h, d, d
240 vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
241 PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13)
242 vpxor a1, a2, a2 # a2 = sig0
243 vpand c, a, a1 # maj: a1 = a&c
244 vpor \_T1, a1, a1 # a1 = maj
245 vpaddd a1, h, h # h = h + ch + W + K + maj
246 vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0
247 ROTATE_ARGS
248.endm
249
250# arguments passed implicitly in preprocessor symbols i, a...h
251.macro ROUND_16_XX _T1 i
252 vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1
253 vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1
254 vmovdqu \_T1, a0
255 PRORD \_T1,11
256 vmovdqu a1, a2
257 PRORD a1,2
258 vpxor a0, \_T1, \_T1
259 PRORD \_T1, 7
260 vpxor a2, a1, a1
261 PRORD a1, 17
262 vpsrld $3, a0, a0
263 vpxor a0, \_T1, \_T1
264 vpsrld $10, a2, a2
265 vpxor a2, a1, a1
266 vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1
267 vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1
268 vpaddd a1, \_T1, \_T1
269
270 ROUND_00_15 \_T1,\i
271.endm
272
273# SHA256_ARGS:
274# UINT128 digest[8]; // transposed digests
275# UINT8 *data_ptr[4];
276
277# void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes);
278# arg 1 : STATE : pointer to array of pointers to input data
279# arg 2 : INP_SIZE : size of input in blocks
280 # general registers preserved in outer calling routine
281 # outer calling routine saves all the XMM registers
282 # save rsp, allocate 32-byte aligned for local variables
283ENTRY(sha256_x8_avx2)
284
285 # save callee-saved clobbered registers to comply with C function ABI
286 push %r12
287 push %r13
288 push %r14
289 push %r15
290
291 mov %rsp, IDX
292 sub $FRAMESZ, %rsp
293 and $~0x1F, %rsp
294 mov IDX, _rsp(%rsp)
295
296 # Load the pre-transposed incoming digest.
297 vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a
298 vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b
299 vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c
300 vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d
301 vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e
302 vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f
303 vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g
304 vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h
305
306 lea K256_8(%rip),TBL
307
308 # load the address of each of the 4 message lanes
309 # getting ready to transpose input onto stack
310 mov _args_data_ptr+0*PTR_SZ(STATE),inp0
311 mov _args_data_ptr+1*PTR_SZ(STATE),inp1
312 mov _args_data_ptr+2*PTR_SZ(STATE),inp2
313 mov _args_data_ptr+3*PTR_SZ(STATE),inp3
314 mov _args_data_ptr+4*PTR_SZ(STATE),inp4
315 mov _args_data_ptr+5*PTR_SZ(STATE),inp5
316 mov _args_data_ptr+6*PTR_SZ(STATE),inp6
317 mov _args_data_ptr+7*PTR_SZ(STATE),inp7
318
319 xor IDX, IDX
320lloop:
321 xor ROUND, ROUND
322
323 # save old digest
324 vmovdqu a, _digest(%rsp)
325 vmovdqu b, _digest+1*SZ8(%rsp)
326 vmovdqu c, _digest+2*SZ8(%rsp)
327 vmovdqu d, _digest+3*SZ8(%rsp)
328 vmovdqu e, _digest+4*SZ8(%rsp)
329 vmovdqu f, _digest+5*SZ8(%rsp)
330 vmovdqu g, _digest+6*SZ8(%rsp)
331 vmovdqu h, _digest+7*SZ8(%rsp)
332 i = 0
333.rep 2
334 VMOVPS i*32(inp0, IDX), TT0
335 VMOVPS i*32(inp1, IDX), TT1
336 VMOVPS i*32(inp2, IDX), TT2
337 VMOVPS i*32(inp3, IDX), TT3
338 VMOVPS i*32(inp4, IDX), TT4
339 VMOVPS i*32(inp5, IDX), TT5
340 VMOVPS i*32(inp6, IDX), TT6
341 VMOVPS i*32(inp7, IDX), TT7
342 vmovdqu g, _ytmp(%rsp)
343 vmovdqu h, _ytmp+1*SZ8(%rsp)
344 TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1
345 vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1
346 vmovdqu _ytmp(%rsp), g
347 vpshufb TMP1, TT0, TT0
348 vpshufb TMP1, TT1, TT1
349 vpshufb TMP1, TT2, TT2
350 vpshufb TMP1, TT3, TT3
351 vpshufb TMP1, TT4, TT4
352 vpshufb TMP1, TT5, TT5
353 vpshufb TMP1, TT6, TT6
354 vpshufb TMP1, TT7, TT7
355 vmovdqu _ytmp+1*SZ8(%rsp), h
356 vmovdqu TT4, _ytmp(%rsp)
357 vmovdqu TT5, _ytmp+1*SZ8(%rsp)
358 vmovdqu TT6, _ytmp+2*SZ8(%rsp)
359 vmovdqu TT7, _ytmp+3*SZ8(%rsp)
360 ROUND_00_15 TT0,(i*8+0)
361 vmovdqu _ytmp(%rsp), TT0
362 ROUND_00_15 TT1,(i*8+1)
363 vmovdqu _ytmp+1*SZ8(%rsp), TT1
364 ROUND_00_15 TT2,(i*8+2)
365 vmovdqu _ytmp+2*SZ8(%rsp), TT2
366 ROUND_00_15 TT3,(i*8+3)
367 vmovdqu _ytmp+3*SZ8(%rsp), TT3
368 ROUND_00_15 TT0,(i*8+4)
369 ROUND_00_15 TT1,(i*8+5)
370 ROUND_00_15 TT2,(i*8+6)
371 ROUND_00_15 TT3,(i*8+7)
372 i = (i+1)
373.endr
374 add $64, IDX
375 i = (i*8)
376
377 jmp Lrounds_16_xx
378.align 16
379Lrounds_16_xx:
380.rep 16
381 ROUND_16_XX T1, i
382 i = (i+1)
383.endr
384
385 cmp $ROUNDS,ROUND
386 jb Lrounds_16_xx
387
388 # add old digest
389 vpaddd _digest+0*SZ8(%rsp), a, a
390 vpaddd _digest+1*SZ8(%rsp), b, b
391 vpaddd _digest+2*SZ8(%rsp), c, c
392 vpaddd _digest+3*SZ8(%rsp), d, d
393 vpaddd _digest+4*SZ8(%rsp), e, e
394 vpaddd _digest+5*SZ8(%rsp), f, f
395 vpaddd _digest+6*SZ8(%rsp), g, g
396 vpaddd _digest+7*SZ8(%rsp), h, h
397
398 sub $1, INP_SIZE # unit is blocks
399 jne lloop
400
401 # write back to memory (state object) the transposed digest
402 vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE)
403 vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE)
404 vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE)
405 vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE)
406 vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE)
407 vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE)
408 vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE)
409 vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE)
410
411 # update input pointers
412 add IDX, inp0
413 mov inp0, _args_data_ptr+0*8(STATE)
414 add IDX, inp1
415 mov inp1, _args_data_ptr+1*8(STATE)
416 add IDX, inp2
417 mov inp2, _args_data_ptr+2*8(STATE)
418 add IDX, inp3
419 mov inp3, _args_data_ptr+3*8(STATE)
420 add IDX, inp4
421 mov inp4, _args_data_ptr+4*8(STATE)
422 add IDX, inp5
423 mov inp5, _args_data_ptr+5*8(STATE)
424 add IDX, inp6
425 mov inp6, _args_data_ptr+6*8(STATE)
426 add IDX, inp7
427 mov inp7, _args_data_ptr+7*8(STATE)
428
429 # Postamble
430 mov _rsp(%rsp), %rsp
431
432 # restore callee-saved clobbered registers
433 pop %r15
434 pop %r14
435 pop %r13
436 pop %r12
437
438 ret
439ENDPROC(sha256_x8_avx2)
440
441.section .rodata.K256_8, "a", @progbits
442.align 64
443K256_8:
444 .octa 0x428a2f98428a2f98428a2f98428a2f98
445 .octa 0x428a2f98428a2f98428a2f98428a2f98
446 .octa 0x71374491713744917137449171374491
447 .octa 0x71374491713744917137449171374491
448 .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
449 .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
450 .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
451 .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
452 .octa 0x3956c25b3956c25b3956c25b3956c25b
453 .octa 0x3956c25b3956c25b3956c25b3956c25b
454 .octa 0x59f111f159f111f159f111f159f111f1
455 .octa 0x59f111f159f111f159f111f159f111f1
456 .octa 0x923f82a4923f82a4923f82a4923f82a4
457 .octa 0x923f82a4923f82a4923f82a4923f82a4
458 .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
459 .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
460 .octa 0xd807aa98d807aa98d807aa98d807aa98
461 .octa 0xd807aa98d807aa98d807aa98d807aa98
462 .octa 0x12835b0112835b0112835b0112835b01
463 .octa 0x12835b0112835b0112835b0112835b01
464 .octa 0x243185be243185be243185be243185be
465 .octa 0x243185be243185be243185be243185be
466 .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
467 .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
468 .octa 0x72be5d7472be5d7472be5d7472be5d74
469 .octa 0x72be5d7472be5d7472be5d7472be5d74
470 .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
471 .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
472 .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
473 .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
474 .octa 0xc19bf174c19bf174c19bf174c19bf174
475 .octa 0xc19bf174c19bf174c19bf174c19bf174
476 .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
477 .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
478 .octa 0xefbe4786efbe4786efbe4786efbe4786
479 .octa 0xefbe4786efbe4786efbe4786efbe4786
480 .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
481 .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
482 .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
483 .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
484 .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
485 .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
486 .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
487 .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
488 .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
489 .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
490 .octa 0x76f988da76f988da76f988da76f988da
491 .octa 0x76f988da76f988da76f988da76f988da
492 .octa 0x983e5152983e5152983e5152983e5152
493 .octa 0x983e5152983e5152983e5152983e5152
494 .octa 0xa831c66da831c66da831c66da831c66d
495 .octa 0xa831c66da831c66da831c66da831c66d
496 .octa 0xb00327c8b00327c8b00327c8b00327c8
497 .octa 0xb00327c8b00327c8b00327c8b00327c8
498 .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
499 .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
500 .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
501 .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
502 .octa 0xd5a79147d5a79147d5a79147d5a79147
503 .octa 0xd5a79147d5a79147d5a79147d5a79147
504 .octa 0x06ca635106ca635106ca635106ca6351
505 .octa 0x06ca635106ca635106ca635106ca6351
506 .octa 0x14292967142929671429296714292967
507 .octa 0x14292967142929671429296714292967
508 .octa 0x27b70a8527b70a8527b70a8527b70a85
509 .octa 0x27b70a8527b70a8527b70a8527b70a85
510 .octa 0x2e1b21382e1b21382e1b21382e1b2138
511 .octa 0x2e1b21382e1b21382e1b21382e1b2138
512 .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
513 .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
514 .octa 0x53380d1353380d1353380d1353380d13
515 .octa 0x53380d1353380d1353380d1353380d13
516 .octa 0x650a7354650a7354650a7354650a7354
517 .octa 0x650a7354650a7354650a7354650a7354
518 .octa 0x766a0abb766a0abb766a0abb766a0abb
519 .octa 0x766a0abb766a0abb766a0abb766a0abb
520 .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
521 .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
522 .octa 0x92722c8592722c8592722c8592722c85
523 .octa 0x92722c8592722c8592722c8592722c85
524 .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
525 .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
526 .octa 0xa81a664ba81a664ba81a664ba81a664b
527 .octa 0xa81a664ba81a664ba81a664ba81a664b
528 .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
529 .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
530 .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
531 .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
532 .octa 0xd192e819d192e819d192e819d192e819
533 .octa 0xd192e819d192e819d192e819d192e819
534 .octa 0xd6990624d6990624d6990624d6990624
535 .octa 0xd6990624d6990624d6990624d6990624
536 .octa 0xf40e3585f40e3585f40e3585f40e3585
537 .octa 0xf40e3585f40e3585f40e3585f40e3585
538 .octa 0x106aa070106aa070106aa070106aa070
539 .octa 0x106aa070106aa070106aa070106aa070
540 .octa 0x19a4c11619a4c11619a4c11619a4c116
541 .octa 0x19a4c11619a4c11619a4c11619a4c116
542 .octa 0x1e376c081e376c081e376c081e376c08
543 .octa 0x1e376c081e376c081e376c081e376c08
544 .octa 0x2748774c2748774c2748774c2748774c
545 .octa 0x2748774c2748774c2748774c2748774c
546 .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
547 .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
548 .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
549 .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
550 .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
551 .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
552 .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
553 .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
554 .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
555 .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
556 .octa 0x748f82ee748f82ee748f82ee748f82ee
557 .octa 0x748f82ee748f82ee748f82ee748f82ee
558 .octa 0x78a5636f78a5636f78a5636f78a5636f
559 .octa 0x78a5636f78a5636f78a5636f78a5636f
560 .octa 0x84c8781484c8781484c8781484c87814
561 .octa 0x84c8781484c8781484c8781484c87814
562 .octa 0x8cc702088cc702088cc702088cc70208
563 .octa 0x8cc702088cc702088cc702088cc70208
564 .octa 0x90befffa90befffa90befffa90befffa
565 .octa 0x90befffa90befffa90befffa90befffa
566 .octa 0xa4506ceba4506ceba4506ceba4506ceb
567 .octa 0xa4506ceba4506ceba4506ceba4506ceb
568 .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
569 .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
570 .octa 0xc67178f2c67178f2c67178f2c67178f2
571 .octa 0xc67178f2c67178f2c67178f2c67178f2
572
573.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
574.align 32
575PSHUFFLE_BYTE_FLIP_MASK:
576.octa 0x0c0d0e0f08090a0b0405060700010203
577.octa 0x0c0d0e0f08090a0b0405060700010203
578
579.section .rodata.cst256.K256, "aM", @progbits, 256
580.align 64
581.global K256
582K256:
583 .int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
584 .int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
585 .int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
586 .int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
587 .int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
588 .int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
589 .int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
590 .int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
591 .int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
592 .int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
593 .int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
594 .int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
595 .int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
596 .int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
597 .int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
598 .int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/arch/x86/crypto/sha512-mb/Makefile b/arch/x86/crypto/sha512-mb/Makefile
deleted file mode 100644
index 90f1ef69152e..000000000000
--- a/arch/x86/crypto/sha512-mb/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
1# SPDX-License-Identifier: GPL-2.0
2#
3# Arch-specific CryptoAPI modules.
4#
5
6avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
7 $(comma)4)$(comma)%ymm2,yes,no)
8ifeq ($(avx2_supported),yes)
9 obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb.o
10 sha512-mb-y := sha512_mb.o sha512_mb_mgr_flush_avx2.o \
11 sha512_mb_mgr_init_avx2.o sha512_mb_mgr_submit_avx2.o sha512_x4_avx2.o
12endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
deleted file mode 100644
index 26b85678012d..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ /dev/null
@@ -1,1047 +0,0 @@
1/*
2 * Multi buffer SHA512 algorithm Glue Code
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <crypto/internal/hash.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/mm.h>
60#include <linux/cryptohash.h>
61#include <linux/types.h>
62#include <linux/list.h>
63#include <crypto/scatterwalk.h>
64#include <crypto/sha.h>
65#include <crypto/mcryptd.h>
66#include <crypto/crypto_wq.h>
67#include <asm/byteorder.h>
68#include <linux/hardirq.h>
69#include <asm/fpu/api.h>
70#include "sha512_mb_ctx.h"
71
72#define FLUSH_INTERVAL 1000 /* in usec */
73
74static struct mcryptd_alg_state sha512_mb_alg_state;
75
76struct sha512_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
78};
79
80static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx)
82{
83 struct ahash_request *areq;
84
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
87}
88
89static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
91{
92 return container_of((void *) ctx, struct ahash_request, __ctx);
93}
94
95static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
97{
98 rctx->flag = HASH_UPDATE;
99}
100
101static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state);
102static asmlinkage struct job_sha512* (*sha512_job_mgr_submit)
103 (struct sha512_mb_mgr *state,
104 struct job_sha512 *job);
105static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
106 (struct sha512_mb_mgr *state);
107static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
108 (struct sha512_mb_mgr *state);
109
110inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
111 uint64_t total_len)
112{
113 uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
114
115 memset(&padblock[i], 0, SHA512_BLOCK_SIZE);
116 padblock[i] = 0x80;
117
118 i += ((SHA512_BLOCK_SIZE - 1) &
119 (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1)))
120 + 1 + SHA512_PADLENGTHFIELD_SIZE;
121
122#if SHA512_PADLENGTHFIELD_SIZE == 16
123 *((uint64_t *) &padblock[i - 16]) = 0;
124#endif
125
126 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
127
128 /* Number of extra blocks to hash */
129 return i >> SHA512_LOG2_BLOCK_SIZE;
130}
131
132static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
133 (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx)
134{
135 while (ctx) {
136 if (ctx->status & HASH_CTX_STS_COMPLETE) {
137 /* Clear PROCESSING bit */
138 ctx->status = HASH_CTX_STS_COMPLETE;
139 return ctx;
140 }
141
142 /*
143 * If the extra blocks are empty, begin hashing what remains
144 * in the user's buffer.
145 */
146 if (ctx->partial_block_buffer_length == 0 &&
147 ctx->incoming_buffer_length) {
148
149 const void *buffer = ctx->incoming_buffer;
150 uint32_t len = ctx->incoming_buffer_length;
151 uint32_t copy_len;
152
153 /*
154 * Only entire blocks can be hashed.
155 * Copy remainder to extra blocks buffer.
156 */
157 copy_len = len & (SHA512_BLOCK_SIZE-1);
158
159 if (copy_len) {
160 len -= copy_len;
161 memcpy(ctx->partial_block_buffer,
162 ((const char *) buffer + len),
163 copy_len);
164 ctx->partial_block_buffer_length = copy_len;
165 }
166
167 ctx->incoming_buffer_length = 0;
168
169 /* len should be a multiple of the block size now */
170 assert((len % SHA512_BLOCK_SIZE) == 0);
171
172 /* Set len to the number of blocks to be hashed */
173 len >>= SHA512_LOG2_BLOCK_SIZE;
174
175 if (len) {
176
177 ctx->job.buffer = (uint8_t *) buffer;
178 ctx->job.len = len;
179 ctx = (struct sha512_hash_ctx *)
180 sha512_job_mgr_submit(&mgr->mgr,
181 &ctx->job);
182 continue;
183 }
184 }
185
186 /*
187 * If the extra blocks are not empty, then we are
188 * either on the last block(s) or we need more
189 * user input before continuing.
190 */
191 if (ctx->status & HASH_CTX_STS_LAST) {
192
193 uint8_t *buf = ctx->partial_block_buffer;
194 uint32_t n_extra_blocks =
195 sha512_pad(buf, ctx->total_length);
196
197 ctx->status = (HASH_CTX_STS_PROCESSING |
198 HASH_CTX_STS_COMPLETE);
199 ctx->job.buffer = buf;
200 ctx->job.len = (uint32_t) n_extra_blocks;
201 ctx = (struct sha512_hash_ctx *)
202 sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
203 continue;
204 }
205
206 if (ctx)
207 ctx->status = HASH_CTX_STS_IDLE;
208 return ctx;
209 }
210
211 return NULL;
212}
213
214static struct sha512_hash_ctx
215 *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate)
216{
217 /*
218 * If get_comp_job returns NULL, there are no jobs complete.
219 * If get_comp_job returns a job, verify that it is safe to return to
220 * the user.
221 * If it is not ready, resubmit the job to finish processing.
222 * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
223 * returned.
224 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
225 * still need processing.
226 */
227 struct sha512_ctx_mgr *mgr;
228 struct sha512_hash_ctx *ctx;
229 unsigned long flags;
230
231 mgr = cstate->mgr;
232 spin_lock_irqsave(&cstate->work_lock, flags);
233 ctx = (struct sha512_hash_ctx *)
234 sha512_job_mgr_get_comp_job(&mgr->mgr);
235 ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
236 spin_unlock_irqrestore(&cstate->work_lock, flags);
237 return ctx;
238}
239
240static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
241{
242 sha512_job_mgr_init(&mgr->mgr);
243}
244
245static struct sha512_hash_ctx
246 *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate,
247 struct sha512_hash_ctx *ctx,
248 const void *buffer,
249 uint32_t len,
250 int flags)
251{
252 struct sha512_ctx_mgr *mgr;
253 unsigned long irqflags;
254
255 mgr = cstate->mgr;
256 spin_lock_irqsave(&cstate->work_lock, irqflags);
257 if (flags & ~(HASH_UPDATE | HASH_LAST)) {
258 /* User should not pass anything other than UPDATE or LAST */
259 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
260 goto unlock;
261 }
262
263 if (ctx->status & HASH_CTX_STS_PROCESSING) {
264 /* Cannot submit to a currently processing job. */
265 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
266 goto unlock;
267 }
268
269 if (ctx->status & HASH_CTX_STS_COMPLETE) {
270 /* Cannot update a finished job. */
271 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
272 goto unlock;
273 }
274
275 /*
276 * If we made it here, there were no errors during this call to
277 * submit
278 */
279 ctx->error = HASH_CTX_ERROR_NONE;
280
281 /* Store buffer ptr info from user */
282 ctx->incoming_buffer = buffer;
283 ctx->incoming_buffer_length = len;
284
285 /*
286 * Store the user's request flags and mark this ctx as currently being
287 * processed.
288 */
289 ctx->status = (flags & HASH_LAST) ?
290 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
291 HASH_CTX_STS_PROCESSING;
292
293 /* Advance byte counter */
294 ctx->total_length += len;
295
296 /*
297 * If there is anything currently buffered in the extra blocks,
298 * append to it until it contains a whole block.
299 * Or if the user's buffer contains less than a whole block,
300 * append as much as possible to the extra block.
301 */
302 if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) {
303 /* Compute how many bytes to copy from user buffer into extra
304 * block
305 */
306 uint32_t copy_len = SHA512_BLOCK_SIZE -
307 ctx->partial_block_buffer_length;
308 if (len < copy_len)
309 copy_len = len;
310
311 if (copy_len) {
312 /* Copy and update relevant pointers and counters */
313 memcpy
314 (&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
315 buffer, copy_len);
316
317 ctx->partial_block_buffer_length += copy_len;
318 ctx->incoming_buffer = (const void *)
319 ((const char *)buffer + copy_len);
320 ctx->incoming_buffer_length = len - copy_len;
321 }
322
323 /* The extra block should never contain more than 1 block
324 * here
325 */
326 assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE);
327
328 /* If the extra block buffer contains exactly 1 block, it can
329 * be hashed.
330 */
331 if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) {
332 ctx->partial_block_buffer_length = 0;
333
334 ctx->job.buffer = ctx->partial_block_buffer;
335 ctx->job.len = 1;
336 ctx = (struct sha512_hash_ctx *)
337 sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
338 }
339 }
340
341 ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
342unlock:
343 spin_unlock_irqrestore(&cstate->work_lock, irqflags);
344 return ctx;
345}
346
347static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate)
348{
349 struct sha512_ctx_mgr *mgr;
350 struct sha512_hash_ctx *ctx;
351 unsigned long flags;
352
353 mgr = cstate->mgr;
354 spin_lock_irqsave(&cstate->work_lock, flags);
355 while (1) {
356 ctx = (struct sha512_hash_ctx *)
357 sha512_job_mgr_flush(&mgr->mgr);
358
359 /* If flush returned 0, there are no more jobs in flight. */
360 if (!ctx)
361 break;
362
363 /*
364 * If flush returned a job, resubmit the job to finish
365 * processing.
366 */
367 ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
368
369 /*
370 * If sha512_ctx_mgr_resubmit returned a job, it is ready to
371 * be returned. Otherwise, all jobs currently being managed by
372 * the sha512_ctx_mgr still need processing. Loop.
373 */
374 if (ctx)
375 break;
376 }
377 spin_unlock_irqrestore(&cstate->work_lock, flags);
378 return ctx;
379}
380
381static int sha512_mb_init(struct ahash_request *areq)
382{
383 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
384
385 hash_ctx_init(sctx);
386 sctx->job.result_digest[0] = SHA512_H0;
387 sctx->job.result_digest[1] = SHA512_H1;
388 sctx->job.result_digest[2] = SHA512_H2;
389 sctx->job.result_digest[3] = SHA512_H3;
390 sctx->job.result_digest[4] = SHA512_H4;
391 sctx->job.result_digest[5] = SHA512_H5;
392 sctx->job.result_digest[6] = SHA512_H6;
393 sctx->job.result_digest[7] = SHA512_H7;
394 sctx->total_length = 0;
395 sctx->partial_block_buffer_length = 0;
396 sctx->status = HASH_CTX_STS_IDLE;
397
398 return 0;
399}
400
401static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
402{
403 int i;
404 struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
405 __be64 *dst = (__be64 *) rctx->out;
406
407 for (i = 0; i < 8; ++i)
408 dst[i] = cpu_to_be64(sctx->job.result_digest[i]);
409
410 return 0;
411}
412
413static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
414 struct mcryptd_alg_cstate *cstate, bool flush)
415{
416 int flag = HASH_UPDATE;
417 int nbytes, err = 0;
418 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
419 struct sha512_hash_ctx *sha_ctx;
420
421 /* more work ? */
422 while (!(rctx->flag & HASH_DONE)) {
423 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
424 if (nbytes < 0) {
425 err = nbytes;
426 goto out;
427 }
428 /* check if the walk is done */
429 if (crypto_ahash_walk_last(&rctx->walk)) {
430 rctx->flag |= HASH_DONE;
431 if (rctx->flag & HASH_FINAL)
432 flag |= HASH_LAST;
433
434 }
435 sha_ctx = (struct sha512_hash_ctx *)
436 ahash_request_ctx(&rctx->areq);
437 kernel_fpu_begin();
438 sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx,
439 rctx->walk.data, nbytes, flag);
440 if (!sha_ctx) {
441 if (flush)
442 sha_ctx = sha512_ctx_mgr_flush(cstate);
443 }
444 kernel_fpu_end();
445 if (sha_ctx)
446 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
447 else {
448 rctx = NULL;
449 goto out;
450 }
451 }
452
453 /* copy the results */
454 if (rctx->flag & HASH_FINAL)
455 sha512_mb_set_results(rctx);
456
457out:
458 *ret_rctx = rctx;
459 return err;
460}
461
462static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
463 struct mcryptd_alg_cstate *cstate,
464 int err)
465{
466 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
467 struct sha512_hash_ctx *sha_ctx;
468 struct mcryptd_hash_request_ctx *req_ctx;
469 int ret;
470 unsigned long flags;
471
472 /* remove from work list */
473 spin_lock_irqsave(&cstate->work_lock, flags);
474 list_del(&rctx->waiter);
475 spin_unlock_irqrestore(&cstate->work_lock, flags);
476
477 if (irqs_disabled())
478 rctx->complete(&req->base, err);
479 else {
480 local_bh_disable();
481 rctx->complete(&req->base, err);
482 local_bh_enable();
483 }
484
485 /* check to see if there are other jobs that are done */
486 sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
487 while (sha_ctx) {
488 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
489 ret = sha_finish_walk(&req_ctx, cstate, false);
490 if (req_ctx) {
491 spin_lock_irqsave(&cstate->work_lock, flags);
492 list_del(&req_ctx->waiter);
493 spin_unlock_irqrestore(&cstate->work_lock, flags);
494
495 req = cast_mcryptd_ctx_to_req(req_ctx);
496 if (irqs_disabled())
497 req_ctx->complete(&req->base, ret);
498 else {
499 local_bh_disable();
500 req_ctx->complete(&req->base, ret);
501 local_bh_enable();
502 }
503 }
504 sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
505 }
506
507 return 0;
508}
509
510static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
511 struct mcryptd_alg_cstate *cstate)
512{
513 unsigned long next_flush;
514 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
515 unsigned long flags;
516
517 /* initialize tag */
518 rctx->tag.arrival = jiffies; /* tag the arrival time */
519 rctx->tag.seq_num = cstate->next_seq_num++;
520 next_flush = rctx->tag.arrival + delay;
521 rctx->tag.expire = next_flush;
522
523 spin_lock_irqsave(&cstate->work_lock, flags);
524 list_add_tail(&rctx->waiter, &cstate->work_list);
525 spin_unlock_irqrestore(&cstate->work_lock, flags);
526
527 mcryptd_arm_flusher(cstate, delay);
528}
529
530static int sha512_mb_update(struct ahash_request *areq)
531{
532 struct mcryptd_hash_request_ctx *rctx =
533 container_of(areq, struct mcryptd_hash_request_ctx,
534 areq);
535 struct mcryptd_alg_cstate *cstate =
536 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
537
538 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
539 struct sha512_hash_ctx *sha_ctx;
540 int ret = 0, nbytes;
541
542
543 /* sanity check */
544 if (rctx->tag.cpu != smp_processor_id()) {
545 pr_err("mcryptd error: cpu clash\n");
546 goto done;
547 }
548
549 /* need to init context */
550 req_ctx_init(rctx, areq);
551
552 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
553
554 if (nbytes < 0) {
555 ret = nbytes;
556 goto done;
557 }
558
559 if (crypto_ahash_walk_last(&rctx->walk))
560 rctx->flag |= HASH_DONE;
561
562 /* submit */
563 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
564 sha512_mb_add_list(rctx, cstate);
565 kernel_fpu_begin();
566 sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
567 nbytes, HASH_UPDATE);
568 kernel_fpu_end();
569
570 /* check if anything is returned */
571 if (!sha_ctx)
572 return -EINPROGRESS;
573
574 if (sha_ctx->error) {
575 ret = sha_ctx->error;
576 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
577 goto done;
578 }
579
580 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
581 ret = sha_finish_walk(&rctx, cstate, false);
582
583 if (!rctx)
584 return -EINPROGRESS;
585done:
586 sha_complete_job(rctx, cstate, ret);
587 return ret;
588}
589
590static int sha512_mb_finup(struct ahash_request *areq)
591{
592 struct mcryptd_hash_request_ctx *rctx =
593 container_of(areq, struct mcryptd_hash_request_ctx,
594 areq);
595 struct mcryptd_alg_cstate *cstate =
596 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
597
598 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
599 struct sha512_hash_ctx *sha_ctx;
600 int ret = 0, flag = HASH_UPDATE, nbytes;
601
602 /* sanity check */
603 if (rctx->tag.cpu != smp_processor_id()) {
604 pr_err("mcryptd error: cpu clash\n");
605 goto done;
606 }
607
608 /* need to init context */
609 req_ctx_init(rctx, areq);
610
611 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
612
613 if (nbytes < 0) {
614 ret = nbytes;
615 goto done;
616 }
617
618 if (crypto_ahash_walk_last(&rctx->walk)) {
619 rctx->flag |= HASH_DONE;
620 flag = HASH_LAST;
621 }
622
623 /* submit */
624 rctx->flag |= HASH_FINAL;
625 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
626 sha512_mb_add_list(rctx, cstate);
627
628 kernel_fpu_begin();
629 sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
630 nbytes, flag);
631 kernel_fpu_end();
632
633 /* check if anything is returned */
634 if (!sha_ctx)
635 return -EINPROGRESS;
636
637 if (sha_ctx->error) {
638 ret = sha_ctx->error;
639 goto done;
640 }
641
642 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
643 ret = sha_finish_walk(&rctx, cstate, false);
644 if (!rctx)
645 return -EINPROGRESS;
646done:
647 sha_complete_job(rctx, cstate, ret);
648 return ret;
649}
650
651static int sha512_mb_final(struct ahash_request *areq)
652{
653 struct mcryptd_hash_request_ctx *rctx =
654 container_of(areq, struct mcryptd_hash_request_ctx,
655 areq);
656 struct mcryptd_alg_cstate *cstate =
657 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
658
659 struct sha512_hash_ctx *sha_ctx;
660 int ret = 0;
661 u8 data;
662
663 /* sanity check */
664 if (rctx->tag.cpu != smp_processor_id()) {
665 pr_err("mcryptd error: cpu clash\n");
666 goto done;
667 }
668
669 /* need to init context */
670 req_ctx_init(rctx, areq);
671
672 rctx->flag |= HASH_DONE | HASH_FINAL;
673
674 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
675 /* flag HASH_FINAL and 0 data size */
676 sha512_mb_add_list(rctx, cstate);
677 kernel_fpu_begin();
678 sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST);
679 kernel_fpu_end();
680
681 /* check if anything is returned */
682 if (!sha_ctx)
683 return -EINPROGRESS;
684
685 if (sha_ctx->error) {
686 ret = sha_ctx->error;
687 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
688 goto done;
689 }
690
691 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
692 ret = sha_finish_walk(&rctx, cstate, false);
693 if (!rctx)
694 return -EINPROGRESS;
695done:
696 sha_complete_job(rctx, cstate, ret);
697 return ret;
698}
699
700static int sha512_mb_export(struct ahash_request *areq, void *out)
701{
702 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
703
704 memcpy(out, sctx, sizeof(*sctx));
705
706 return 0;
707}
708
709static int sha512_mb_import(struct ahash_request *areq, const void *in)
710{
711 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
712
713 memcpy(sctx, in, sizeof(*sctx));
714
715 return 0;
716}
717
718static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm)
719{
720 struct mcryptd_ahash *mcryptd_tfm;
721 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
722 struct mcryptd_hash_ctx *mctx;
723
724 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb",
725 CRYPTO_ALG_INTERNAL,
726 CRYPTO_ALG_INTERNAL);
727 if (IS_ERR(mcryptd_tfm))
728 return PTR_ERR(mcryptd_tfm);
729 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
730 mctx->alg_state = &sha512_mb_alg_state;
731 ctx->mcryptd_tfm = mcryptd_tfm;
732 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
733 sizeof(struct ahash_request) +
734 crypto_ahash_reqsize(&mcryptd_tfm->base));
735
736 return 0;
737}
738
739static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm)
740{
741 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
742
743 mcryptd_free_ahash(ctx->mcryptd_tfm);
744}
745
746static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm)
747{
748 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
749 sizeof(struct ahash_request) +
750 sizeof(struct sha512_hash_ctx));
751
752 return 0;
753}
754
755static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm)
756{
757 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
758
759 mcryptd_free_ahash(ctx->mcryptd_tfm);
760}
761
762static struct ahash_alg sha512_mb_areq_alg = {
763 .init = sha512_mb_init,
764 .update = sha512_mb_update,
765 .final = sha512_mb_final,
766 .finup = sha512_mb_finup,
767 .export = sha512_mb_export,
768 .import = sha512_mb_import,
769 .halg = {
770 .digestsize = SHA512_DIGEST_SIZE,
771 .statesize = sizeof(struct sha512_hash_ctx),
772 .base = {
773 .cra_name = "__sha512-mb",
774 .cra_driver_name = "__intel_sha512-mb",
775 .cra_priority = 100,
776 /*
777 * use ASYNC flag as some buffers in multi-buffer
778 * algo may not have completed before hashing thread
779 * sleep
780 */
781 .cra_flags = CRYPTO_ALG_ASYNC |
782 CRYPTO_ALG_INTERNAL,
783 .cra_blocksize = SHA512_BLOCK_SIZE,
784 .cra_module = THIS_MODULE,
785 .cra_list = LIST_HEAD_INIT
786 (sha512_mb_areq_alg.halg.base.cra_list),
787 .cra_init = sha512_mb_areq_init_tfm,
788 .cra_exit = sha512_mb_areq_exit_tfm,
789 .cra_ctxsize = sizeof(struct sha512_hash_ctx),
790 }
791 }
792};
793
794static int sha512_mb_async_init(struct ahash_request *req)
795{
796 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
797 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
798 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
799 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
800
801 memcpy(mcryptd_req, req, sizeof(*req));
802 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
803 return crypto_ahash_init(mcryptd_req);
804}
805
806static int sha512_mb_async_update(struct ahash_request *req)
807{
808 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
809
810 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
811 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
812 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
813
814 memcpy(mcryptd_req, req, sizeof(*req));
815 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
816 return crypto_ahash_update(mcryptd_req);
817}
818
819static int sha512_mb_async_finup(struct ahash_request *req)
820{
821 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
822
823 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
824 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
825 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
826
827 memcpy(mcryptd_req, req, sizeof(*req));
828 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
829 return crypto_ahash_finup(mcryptd_req);
830}
831
832static int sha512_mb_async_final(struct ahash_request *req)
833{
834 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
835
836 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
837 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
838 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
839
840 memcpy(mcryptd_req, req, sizeof(*req));
841 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
842 return crypto_ahash_final(mcryptd_req);
843}
844
845static int sha512_mb_async_digest(struct ahash_request *req)
846{
847 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
848 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
849 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
850 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
851
852 memcpy(mcryptd_req, req, sizeof(*req));
853 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
854 return crypto_ahash_digest(mcryptd_req);
855}
856
857static int sha512_mb_async_export(struct ahash_request *req, void *out)
858{
859 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
860 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
861 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
862 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
863
864 memcpy(mcryptd_req, req, sizeof(*req));
865 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
866 return crypto_ahash_export(mcryptd_req, out);
867}
868
869static int sha512_mb_async_import(struct ahash_request *req, const void *in)
870{
871 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
872 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
873 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
874 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
875 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
876 struct mcryptd_hash_request_ctx *rctx;
877 struct ahash_request *areq;
878
879 memcpy(mcryptd_req, req, sizeof(*req));
880 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
881 rctx = ahash_request_ctx(mcryptd_req);
882
883 areq = &rctx->areq;
884
885 ahash_request_set_tfm(areq, child);
886 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
887 rctx->complete, req);
888
889 return crypto_ahash_import(mcryptd_req, in);
890}
891
892static struct ahash_alg sha512_mb_async_alg = {
893 .init = sha512_mb_async_init,
894 .update = sha512_mb_async_update,
895 .final = sha512_mb_async_final,
896 .finup = sha512_mb_async_finup,
897 .digest = sha512_mb_async_digest,
898 .export = sha512_mb_async_export,
899 .import = sha512_mb_async_import,
900 .halg = {
901 .digestsize = SHA512_DIGEST_SIZE,
902 .statesize = sizeof(struct sha512_hash_ctx),
903 .base = {
904 .cra_name = "sha512",
905 .cra_driver_name = "sha512_mb",
906 /*
907 * Low priority, since with few concurrent hash requests
908 * this is extremely slow due to the flush delay. Users
909 * whose workloads would benefit from this can request
910 * it explicitly by driver name, or can increase its
911 * priority at runtime using NETLINK_CRYPTO.
912 */
913 .cra_priority = 50,
914 .cra_flags = CRYPTO_ALG_ASYNC,
915 .cra_blocksize = SHA512_BLOCK_SIZE,
916 .cra_module = THIS_MODULE,
917 .cra_list = LIST_HEAD_INIT
918 (sha512_mb_async_alg.halg.base.cra_list),
919 .cra_init = sha512_mb_async_init_tfm,
920 .cra_exit = sha512_mb_async_exit_tfm,
921 .cra_ctxsize = sizeof(struct sha512_mb_ctx),
922 .cra_alignmask = 0,
923 },
924 },
925};
926
927static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
928{
929 struct mcryptd_hash_request_ctx *rctx;
930 unsigned long cur_time;
931 unsigned long next_flush = 0;
932 struct sha512_hash_ctx *sha_ctx;
933
934
935 cur_time = jiffies;
936
937 while (!list_empty(&cstate->work_list)) {
938 rctx = list_entry(cstate->work_list.next,
939 struct mcryptd_hash_request_ctx, waiter);
940 if time_before(cur_time, rctx->tag.expire)
941 break;
942 kernel_fpu_begin();
943 sha_ctx = (struct sha512_hash_ctx *)
944 sha512_ctx_mgr_flush(cstate);
945 kernel_fpu_end();
946 if (!sha_ctx) {
947 pr_err("sha512_mb error: nothing got flushed for"
948 " non-empty list\n");
949 break;
950 }
951 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
952 sha_finish_walk(&rctx, cstate, true);
953 sha_complete_job(rctx, cstate, 0);
954 }
955
956 if (!list_empty(&cstate->work_list)) {
957 rctx = list_entry(cstate->work_list.next,
958 struct mcryptd_hash_request_ctx, waiter);
959 /* get the hash context and then flush time */
960 next_flush = rctx->tag.expire;
961 mcryptd_arm_flusher(cstate, get_delay(next_flush));
962 }
963 return next_flush;
964}
965
966static int __init sha512_mb_mod_init(void)
967{
968
969 int cpu;
970 int err;
971 struct mcryptd_alg_cstate *cpu_state;
972
973 /* check for dependent cpu features */
974 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
975 !boot_cpu_has(X86_FEATURE_BMI2))
976 return -ENODEV;
977
978 /* initialize multibuffer structures */
979 sha512_mb_alg_state.alg_cstate =
980 alloc_percpu(struct mcryptd_alg_cstate);
981
982 sha512_job_mgr_init = sha512_mb_mgr_init_avx2;
983 sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2;
984 sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2;
985 sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2;
986
987 if (!sha512_mb_alg_state.alg_cstate)
988 return -ENOMEM;
989 for_each_possible_cpu(cpu) {
990 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
991 cpu_state->next_flush = 0;
992 cpu_state->next_seq_num = 0;
993 cpu_state->flusher_engaged = false;
994 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
995 cpu_state->cpu = cpu;
996 cpu_state->alg_state = &sha512_mb_alg_state;
997 cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr),
998 GFP_KERNEL);
999 if (!cpu_state->mgr)
1000 goto err2;
1001 sha512_ctx_mgr_init(cpu_state->mgr);
1002 INIT_LIST_HEAD(&cpu_state->work_list);
1003 spin_lock_init(&cpu_state->work_lock);
1004 }
1005 sha512_mb_alg_state.flusher = &sha512_mb_flusher;
1006
1007 err = crypto_register_ahash(&sha512_mb_areq_alg);
1008 if (err)
1009 goto err2;
1010 err = crypto_register_ahash(&sha512_mb_async_alg);
1011 if (err)
1012 goto err1;
1013
1014
1015 return 0;
1016err1:
1017 crypto_unregister_ahash(&sha512_mb_areq_alg);
1018err2:
1019 for_each_possible_cpu(cpu) {
1020 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
1021 kfree(cpu_state->mgr);
1022 }
1023 free_percpu(sha512_mb_alg_state.alg_cstate);
1024 return -ENODEV;
1025}
1026
1027static void __exit sha512_mb_mod_fini(void)
1028{
1029 int cpu;
1030 struct mcryptd_alg_cstate *cpu_state;
1031
1032 crypto_unregister_ahash(&sha512_mb_async_alg);
1033 crypto_unregister_ahash(&sha512_mb_areq_alg);
1034 for_each_possible_cpu(cpu) {
1035 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
1036 kfree(cpu_state->mgr);
1037 }
1038 free_percpu(sha512_mb_alg_state.alg_cstate);
1039}
1040
1041module_init(sha512_mb_mod_init);
1042module_exit(sha512_mb_mod_fini);
1043
1044MODULE_LICENSE("GPL");
1045MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
1046
1047MODULE_ALIAS("sha512");
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
deleted file mode 100644
index e5c465bd821e..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * Header file for multi buffer SHA512 context
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#ifndef _SHA_MB_CTX_INTERNAL_H
55#define _SHA_MB_CTX_INTERNAL_H
56
57#include "sha512_mb_mgr.h"
58
59#define HASH_UPDATE 0x00
60#define HASH_LAST 0x01
61#define HASH_DONE 0x02
62#define HASH_FINAL 0x04
63
64#define HASH_CTX_STS_IDLE 0x00
65#define HASH_CTX_STS_PROCESSING 0x01
66#define HASH_CTX_STS_LAST 0x02
67#define HASH_CTX_STS_COMPLETE 0x04
68
69enum hash_ctx_error {
70 HASH_CTX_ERROR_NONE = 0,
71 HASH_CTX_ERROR_INVALID_FLAGS = -1,
72 HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
73 HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
74};
75
76#define hash_ctx_user_data(ctx) ((ctx)->user_data)
77#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
78#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
79#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
80#define hash_ctx_status(ctx) ((ctx)->status)
81#define hash_ctx_error(ctx) ((ctx)->error)
82#define hash_ctx_init(ctx) \
83 do { \
84 (ctx)->error = HASH_CTX_ERROR_NONE; \
85 (ctx)->status = HASH_CTX_STS_COMPLETE; \
86 } while (0)
87
88/* Hash Constants and Typedefs */
89#define SHA512_DIGEST_LENGTH 8
90#define SHA512_LOG2_BLOCK_SIZE 7
91
92#define SHA512_PADLENGTHFIELD_SIZE 16
93
94#ifdef SHA_MB_DEBUG
95#define assert(expr) \
96do { \
97 if (unlikely(!(expr))) { \
98 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
99 #expr, __FILE__, __func__, __LINE__); \
100 } \
101} while (0)
102#else
103#define assert(expr) do {} while (0)
104#endif
105
106struct sha512_ctx_mgr {
107 struct sha512_mb_mgr mgr;
108};
109
110/* typedef struct sha512_ctx_mgr sha512_ctx_mgr; */
111
112struct sha512_hash_ctx {
113 /* Must be at struct offset 0 */
114 struct job_sha512 job;
115 /* status flag */
116 int status;
117 /* error flag */
118 int error;
119
120 uint64_t total_length;
121 const void *incoming_buffer;
122 uint32_t incoming_buffer_length;
123 uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2];
124 uint32_t partial_block_buffer_length;
125 void *user_data;
126};
127
128#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
deleted file mode 100644
index 178f17eef382..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * Header file for multi buffer SHA512 algorithm manager
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#ifndef __SHA_MB_MGR_H
55#define __SHA_MB_MGR_H
56
57#include <linux/types.h>
58
59#define NUM_SHA512_DIGEST_WORDS 8
60
61enum job_sts {STS_UNKNOWN = 0,
62 STS_BEING_PROCESSED = 1,
63 STS_COMPLETED = 2,
64 STS_INTERNAL_ERROR = 3,
65 STS_ERROR = 4
66};
67
68struct job_sha512 {
69 u8 *buffer;
70 u64 len;
71 u64 result_digest[NUM_SHA512_DIGEST_WORDS] __aligned(32);
72 enum job_sts status;
73 void *user_data;
74};
75
76struct sha512_args_x4 {
77 uint64_t digest[8][4];
78 uint8_t *data_ptr[4];
79};
80
81struct sha512_lane_data {
82 struct job_sha512 *job_in_lane;
83};
84
85struct sha512_mb_mgr {
86 struct sha512_args_x4 args;
87
88 uint64_t lens[4];
89
90 /* each byte is index (0...7) of unused lanes */
91 uint64_t unused_lanes;
92 /* byte 4 is set to FF as a flag */
93 struct sha512_lane_data ldata[4];
94};
95
96#define SHA512_MB_MGR_NUM_LANES_AVX2 4
97
98void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state);
99struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state,
100 struct job_sha512 *job);
101struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state);
102struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state);
103
104#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
deleted file mode 100644
index cf2636d4c9ba..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,281 +0,0 @@
1/*
2 * Header file for multi buffer SHA256 algorithm data structure
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54# Macros for defining data structures
55
56# Usage example
57
58#START_FIELDS # JOB_AES
59### name size align
60#FIELD _plaintext, 8, 8 # pointer to plaintext
61#FIELD _ciphertext, 8, 8 # pointer to ciphertext
62#FIELD _IV, 16, 8 # IV
63#FIELD _keys, 8, 8 # pointer to keys
64#FIELD _len, 4, 4 # length in bytes
65#FIELD _status, 4, 4 # status enumeration
66#FIELD _user_data, 8, 8 # pointer to user data
67#UNION _union, size1, align1, \
68# size2, align2, \
69# size3, align3, \
70# ...
71#END_FIELDS
72#%assign _JOB_AES_size _FIELD_OFFSET
73#%assign _JOB_AES_align _STRUCT_ALIGN
74
75#########################################################################
76
77# Alternate "struc-like" syntax:
78# STRUCT job_aes2
79# RES_Q .plaintext, 1
80# RES_Q .ciphertext, 1
81# RES_DQ .IV, 1
82# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
83# RES_U .union, size1, align1, \
84# size2, align2, \
85# ...
86# ENDSTRUCT
87# # Following only needed if nesting
88# %assign job_aes2_size _FIELD_OFFSET
89# %assign job_aes2_align _STRUCT_ALIGN
90#
91# RES_* macros take a name, a count and an optional alignment.
92# The count in in terms of the base size of the macro, and the
93# default alignment is the base size.
94# The macros are:
95# Macro Base size
96# RES_B 1
97# RES_W 2
98# RES_D 4
99# RES_Q 8
100# RES_DQ 16
101# RES_Y 32
102# RES_Z 64
103#
104# RES_U defines a union. It's arguments are a name and two or more
105# pairs of "size, alignment"
106#
107# The two assigns are only needed if this structure is being nested
108# within another. Even if the assigns are not done, one can still use
109# STRUCT_NAME_size as the size of the structure.
110#
111# Note that for nesting, you still need to assign to STRUCT_NAME_size.
112#
113# The differences between this and using "struc" directly are that each
114# type is implicitly aligned to its natural length (although this can be
115# over-ridden with an explicit third parameter), and that the structure
116# is padded at the end to its overall alignment.
117#
118
119#########################################################################
120
121#ifndef _DATASTRUCT_ASM_
122#define _DATASTRUCT_ASM_
123
124#define PTR_SZ 8
125#define SHA512_DIGEST_WORD_SIZE 8
126#define SHA512_MB_MGR_NUM_LANES_AVX2 4
127#define NUM_SHA512_DIGEST_WORDS 8
128#define SZ4 4*SHA512_DIGEST_WORD_SIZE
129#define ROUNDS 80*SZ4
130#define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8)
131
132# START_FIELDS
133.macro START_FIELDS
134 _FIELD_OFFSET = 0
135 _STRUCT_ALIGN = 0
136.endm
137
138# FIELD name size align
139.macro FIELD name size align
140 _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
141 \name = _FIELD_OFFSET
142 _FIELD_OFFSET = _FIELD_OFFSET + (\size)
143.if (\align > _STRUCT_ALIGN)
144 _STRUCT_ALIGN = \align
145.endif
146.endm
147
148# END_FIELDS
149.macro END_FIELDS
150 _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
151.endm
152
153.macro STRUCT p1
154START_FIELDS
155.struc \p1
156.endm
157
158.macro ENDSTRUCT
159 tmp = _FIELD_OFFSET
160 END_FIELDS
161 tmp = (_FIELD_OFFSET - ##tmp)
162.if (tmp > 0)
163 .lcomm tmp
164.endm
165
166## RES_int name size align
167.macro RES_int p1 p2 p3
168 name = \p1
169 size = \p2
170 align = .\p3
171
172 _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
173.align align
174.lcomm name size
175 _FIELD_OFFSET = _FIELD_OFFSET + (size)
176.if (align > _STRUCT_ALIGN)
177 _STRUCT_ALIGN = align
178.endif
179.endm
180
181# macro RES_B name, size [, align]
182.macro RES_B _name, _size, _align=1
183RES_int _name _size _align
184.endm
185
186# macro RES_W name, size [, align]
187.macro RES_W _name, _size, _align=2
188RES_int _name 2*(_size) _align
189.endm
190
191# macro RES_D name, size [, align]
192.macro RES_D _name, _size, _align=4
193RES_int _name 4*(_size) _align
194.endm
195
196# macro RES_Q name, size [, align]
197.macro RES_Q _name, _size, _align=8
198RES_int _name 8*(_size) _align
199.endm
200
201# macro RES_DQ name, size [, align]
202.macro RES_DQ _name, _size, _align=16
203RES_int _name 16*(_size) _align
204.endm
205
206# macro RES_Y name, size [, align]
207.macro RES_Y _name, _size, _align=32
208RES_int _name 32*(_size) _align
209.endm
210
211# macro RES_Z name, size [, align]
212.macro RES_Z _name, _size, _align=64
213RES_int _name 64*(_size) _align
214.endm
215
216#endif
217
218###################################################################
219### Define SHA512 Out Of Order Data Structures
220###################################################################
221
222START_FIELDS # LANE_DATA
223### name size align
224FIELD _job_in_lane, 8, 8 # pointer to job object
225END_FIELDS
226
227 _LANE_DATA_size = _FIELD_OFFSET
228 _LANE_DATA_align = _STRUCT_ALIGN
229
230####################################################################
231
232START_FIELDS # SHA512_ARGS_X4
233### name size align
234FIELD _digest, 8*8*4, 4 # transposed digest
235FIELD _data_ptr, 8*4, 8 # array of pointers to data
236END_FIELDS
237
238 _SHA512_ARGS_X4_size = _FIELD_OFFSET
239 _SHA512_ARGS_X4_align = _STRUCT_ALIGN
240
241#####################################################################
242
243START_FIELDS # MB_MGR
244### name size align
245FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align
246FIELD _lens, 8*4, 8
247FIELD _unused_lanes, 8, 8
248FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align
249END_FIELDS
250
251 _MB_MGR_size = _FIELD_OFFSET
252 _MB_MGR_align = _STRUCT_ALIGN
253
254_args_digest = _args + _digest
255_args_data_ptr = _args + _data_ptr
256
257#######################################################################
258
259#######################################################################
260#### Define constants
261#######################################################################
262
263#define STS_UNKNOWN 0
264#define STS_BEING_PROCESSED 1
265#define STS_COMPLETED 2
266
267#######################################################################
268#### Define JOB_SHA512 structure
269#######################################################################
270
271START_FIELDS # JOB_SHA512
272### name size align
273FIELD _buffer, 8, 8 # pointer to buffer
274FIELD _len, 8, 8 # length in bytes
275FIELD _result_digest, 8*8, 32 # Digest (output)
276FIELD _status, 4, 4
277FIELD _user_data, 8, 8
278END_FIELDS
279
280 _JOB_SHA512_size = _FIELD_OFFSET
281 _JOB_SHA512_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
deleted file mode 100644
index 7c629caebc05..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,297 +0,0 @@
1/*
2 * Flush routine for SHA512 multibuffer
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include <linux/linkage.h>
55#include <asm/frame.h>
56#include "sha512_mb_mgr_datastruct.S"
57
58.extern sha512_x4_avx2
59
60# LINUX register definitions
61#define arg1 %rdi
62#define arg2 %rsi
63
64# idx needs to be other than arg1, arg2, rbx, r12
65#define idx %rdx
66
67# Common definitions
68#define state arg1
69#define job arg2
70#define len2 arg2
71
72#define unused_lanes %rbx
73#define lane_data %rbx
74#define tmp2 %rbx
75
76#define job_rax %rax
77#define tmp1 %rax
78#define size_offset %rax
79#define tmp %rax
80#define start_offset %rax
81
82#define tmp3 arg1
83
84#define extra_blocks arg2
85#define p arg2
86
87#define tmp4 %r8
88#define lens0 %r8
89
90#define lens1 %r9
91#define lens2 %r10
92#define lens3 %r11
93
94.macro LABEL prefix n
95\prefix\n\():
96.endm
97
98.macro JNE_SKIP i
99jne skip_\i
100.endm
101
102.altmacro
103.macro SET_OFFSET _offset
104offset = \_offset
105.endm
106.noaltmacro
107
108# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state)
109# arg 1 : rcx : state
110ENTRY(sha512_mb_mgr_flush_avx2)
111 FRAME_BEGIN
112 push %rbx
113
114 # If bit (32+3) is set, then all lanes are empty
115 mov _unused_lanes(state), unused_lanes
116 bt $32+7, unused_lanes
117 jc return_null
118
119 # find a lane with a non-null job
120 xor idx, idx
121 offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane)
122 cmpq $0, offset(state)
123 cmovne one(%rip), idx
124 offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane)
125 cmpq $0, offset(state)
126 cmovne two(%rip), idx
127 offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane)
128 cmpq $0, offset(state)
129 cmovne three(%rip), idx
130
131 # copy idx to empty lanes
132copy_lane_data:
133 offset = (_args + _data_ptr)
134 mov offset(state,idx,8), tmp
135
136 I = 0
137.rep 4
138 offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
139 cmpq $0, offset(state)
140.altmacro
141 JNE_SKIP %I
142 offset = (_args + _data_ptr + 8*I)
143 mov tmp, offset(state)
144 offset = (_lens + 8*I +4)
145 movl $0xFFFFFFFF, offset(state)
146LABEL skip_ %I
147 I = (I+1)
148.noaltmacro
149.endr
150
151 # Find min length
152 mov _lens + 0*8(state),lens0
153 mov lens0,idx
154 mov _lens + 1*8(state),lens1
155 cmp idx,lens1
156 cmovb lens1,idx
157 mov _lens + 2*8(state),lens2
158 cmp idx,lens2
159 cmovb lens2,idx
160 mov _lens + 3*8(state),lens3
161 cmp idx,lens3
162 cmovb lens3,idx
163 mov idx,len2
164 and $0xF,idx
165 and $~0xFF,len2
166 jz len_is_0
167
168 sub len2, lens0
169 sub len2, lens1
170 sub len2, lens2
171 sub len2, lens3
172 shr $32,len2
173 mov lens0, _lens + 0*8(state)
174 mov lens1, _lens + 1*8(state)
175 mov lens2, _lens + 2*8(state)
176 mov lens3, _lens + 3*8(state)
177
178 # "state" and "args" are the same address, arg1
179 # len is arg2
180 call sha512_x4_avx2
181 # state and idx are intact
182
183len_is_0:
184 # process completed job "idx"
185 imul $_LANE_DATA_size, idx, lane_data
186 lea _ldata(state, lane_data), lane_data
187
188 mov _job_in_lane(lane_data), job_rax
189 movq $0, _job_in_lane(lane_data)
190 movl $STS_COMPLETED, _status(job_rax)
191 mov _unused_lanes(state), unused_lanes
192 shl $8, unused_lanes
193 or idx, unused_lanes
194 mov unused_lanes, _unused_lanes(state)
195
196 movl $0xFFFFFFFF, _lens+4(state, idx, 8)
197
198 vmovq _args_digest+0*32(state, idx, 8), %xmm0
199 vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
200 vmovq _args_digest+2*32(state, idx, 8), %xmm1
201 vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
202 vmovq _args_digest+4*32(state, idx, 8), %xmm2
203 vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
204 vmovq _args_digest+6*32(state, idx, 8), %xmm3
205 vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
206
207 vmovdqu %xmm0, _result_digest(job_rax)
208 vmovdqu %xmm1, _result_digest+1*16(job_rax)
209 vmovdqu %xmm2, _result_digest+2*16(job_rax)
210 vmovdqu %xmm3, _result_digest+3*16(job_rax)
211
212return:
213 pop %rbx
214 FRAME_END
215 ret
216
217return_null:
218 xor job_rax, job_rax
219 jmp return
220ENDPROC(sha512_mb_mgr_flush_avx2)
221.align 16
222
223ENTRY(sha512_mb_mgr_get_comp_job_avx2)
224 push %rbx
225
226 mov _unused_lanes(state), unused_lanes
227 bt $(32+7), unused_lanes
228 jc .return_null
229
230 # Find min length
231 mov _lens(state),lens0
232 mov lens0,idx
233 mov _lens+1*8(state),lens1
234 cmp idx,lens1
235 cmovb lens1,idx
236 mov _lens+2*8(state),lens2
237 cmp idx,lens2
238 cmovb lens2,idx
239 mov _lens+3*8(state),lens3
240 cmp idx,lens3
241 cmovb lens3,idx
242 test $~0xF,idx
243 jnz .return_null
244 and $0xF,idx
245
246 #process completed job "idx"
247 imul $_LANE_DATA_size, idx, lane_data
248 lea _ldata(state, lane_data), lane_data
249
250 mov _job_in_lane(lane_data), job_rax
251 movq $0, _job_in_lane(lane_data)
252 movl $STS_COMPLETED, _status(job_rax)
253 mov _unused_lanes(state), unused_lanes
254 shl $8, unused_lanes
255 or idx, unused_lanes
256 mov unused_lanes, _unused_lanes(state)
257
258 movl $0xFFFFFFFF, _lens+4(state, idx, 8)
259
260 vmovq _args_digest(state, idx, 8), %xmm0
261 vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
262 vmovq _args_digest+2*32(state, idx, 8), %xmm1
263 vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
264 vmovq _args_digest+4*32(state, idx, 8), %xmm2
265 vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
266 vmovq _args_digest+6*32(state, idx, 8), %xmm3
267 vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
268
269 vmovdqu %xmm0, _result_digest+0*16(job_rax)
270 vmovdqu %xmm1, _result_digest+1*16(job_rax)
271 vmovdqu %xmm2, _result_digest+2*16(job_rax)
272 vmovdqu %xmm3, _result_digest+3*16(job_rax)
273
274 pop %rbx
275
276 ret
277
278.return_null:
279 xor job_rax, job_rax
280 pop %rbx
281 ret
282ENDPROC(sha512_mb_mgr_get_comp_job_avx2)
283
284.section .rodata.cst8.one, "aM", @progbits, 8
285.align 8
286one:
287.quad 1
288
289.section .rodata.cst8.two, "aM", @progbits, 8
290.align 8
291two:
292.quad 2
293
294.section .rodata.cst8.three, "aM", @progbits, 8
295.align 8
296three:
297.quad 3
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
deleted file mode 100644
index d08805032f01..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * Initialization code for multi buffer SHA256 algorithm for AVX2
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include "sha512_mb_mgr.h"
55
56void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
57{
58 unsigned int j;
59
60 /* initially all lanes are unused */
61 state->lens[0] = 0xFFFFFFFF00000000;
62 state->lens[1] = 0xFFFFFFFF00000001;
63 state->lens[2] = 0xFFFFFFFF00000002;
64 state->lens[3] = 0xFFFFFFFF00000003;
65
66 state->unused_lanes = 0xFF03020100;
67 for (j = 0; j < 4; j++)
68 state->ldata[j].job_in_lane = NULL;
69}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
deleted file mode 100644
index 4ba709ba78e5..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,224 +0,0 @@
1/*
2 * Buffer submit code for multi buffer SHA512 algorithm
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include <linux/linkage.h>
55#include <asm/frame.h>
56#include "sha512_mb_mgr_datastruct.S"
57
58.extern sha512_x4_avx2
59
60#define arg1 %rdi
61#define arg2 %rsi
62
63#define idx %rdx
64#define last_len %rdx
65
66#define size_offset %rcx
67#define tmp2 %rcx
68
69# Common definitions
70#define state arg1
71#define job arg2
72#define len2 arg2
73#define p2 arg2
74
75#define p %r11
76#define start_offset %r11
77
78#define unused_lanes %rbx
79
80#define job_rax %rax
81#define len %rax
82
83#define lane %r12
84#define tmp3 %r12
85#define lens3 %r12
86
87#define extra_blocks %r8
88#define lens0 %r8
89
90#define tmp %r9
91#define lens1 %r9
92
93#define lane_data %r10
94#define lens2 %r10
95
96#define DWORD_len %eax
97
98# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job)
99# arg 1 : rcx : state
100# arg 2 : rdx : job
101ENTRY(sha512_mb_mgr_submit_avx2)
102 FRAME_BEGIN
103 push %rbx
104 push %r12
105
106 mov _unused_lanes(state), unused_lanes
107 movzb %bl,lane
108 shr $8, unused_lanes
109 imul $_LANE_DATA_size, lane,lane_data
110 movl $STS_BEING_PROCESSED, _status(job)
111 lea _ldata(state, lane_data), lane_data
112 mov unused_lanes, _unused_lanes(state)
113 movl _len(job), DWORD_len
114
115 mov job, _job_in_lane(lane_data)
116 movl DWORD_len,_lens+4(state , lane, 8)
117
118 # Load digest words from result_digest
119 vmovdqu _result_digest+0*16(job), %xmm0
120 vmovdqu _result_digest+1*16(job), %xmm1
121 vmovdqu _result_digest+2*16(job), %xmm2
122 vmovdqu _result_digest+3*16(job), %xmm3
123
124 vmovq %xmm0, _args_digest(state, lane, 8)
125 vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8)
126 vmovq %xmm1, _args_digest+2*32(state , lane, 8)
127 vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8)
128 vmovq %xmm2, _args_digest+4*32(state , lane, 8)
129 vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8)
130 vmovq %xmm3, _args_digest+6*32(state , lane, 8)
131 vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8)
132
133 mov _buffer(job), p
134 mov p, _args_data_ptr(state, lane, 8)
135
136 cmp $0xFF, unused_lanes
137 jne return_null
138
139start_loop:
140
141 # Find min length
142 mov _lens+0*8(state),lens0
143 mov lens0,idx
144 mov _lens+1*8(state),lens1
145 cmp idx,lens1
146 cmovb lens1, idx
147 mov _lens+2*8(state),lens2
148 cmp idx,lens2
149 cmovb lens2,idx
150 mov _lens+3*8(state),lens3
151 cmp idx,lens3
152 cmovb lens3,idx
153 mov idx,len2
154 and $0xF,idx
155 and $~0xFF,len2
156 jz len_is_0
157
158 sub len2,lens0
159 sub len2,lens1
160 sub len2,lens2
161 sub len2,lens3
162 shr $32,len2
163 mov lens0, _lens + 0*8(state)
164 mov lens1, _lens + 1*8(state)
165 mov lens2, _lens + 2*8(state)
166 mov lens3, _lens + 3*8(state)
167
168 # "state" and "args" are the same address, arg1
169 # len is arg2
170 call sha512_x4_avx2
171 # state and idx are intact
172
173len_is_0:
174
175 # process completed job "idx"
176 imul $_LANE_DATA_size, idx, lane_data
177 lea _ldata(state, lane_data), lane_data
178
179 mov _job_in_lane(lane_data), job_rax
180 mov _unused_lanes(state), unused_lanes
181 movq $0, _job_in_lane(lane_data)
182 movl $STS_COMPLETED, _status(job_rax)
183 shl $8, unused_lanes
184 or idx, unused_lanes
185 mov unused_lanes, _unused_lanes(state)
186
187 movl $0xFFFFFFFF,_lens+4(state,idx,8)
188 vmovq _args_digest+0*32(state , idx, 8), %xmm0
189 vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0
190 vmovq _args_digest+2*32(state , idx, 8), %xmm1
191 vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1
192 vmovq _args_digest+4*32(state , idx, 8), %xmm2
193 vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2
194 vmovq _args_digest+6*32(state , idx, 8), %xmm3
195 vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3
196
197 vmovdqu %xmm0, _result_digest + 0*16(job_rax)
198 vmovdqu %xmm1, _result_digest + 1*16(job_rax)
199 vmovdqu %xmm2, _result_digest + 2*16(job_rax)
200 vmovdqu %xmm3, _result_digest + 3*16(job_rax)
201
202return:
203 pop %r12
204 pop %rbx
205 FRAME_END
206 ret
207
208return_null:
209 xor job_rax, job_rax
210 jmp return
211ENDPROC(sha512_mb_mgr_submit_avx2)
212
213/* UNUSED?
214.section .rodata.cst16, "aM", @progbits, 16
215.align 16
216H0: .int 0x6a09e667
217H1: .int 0xbb67ae85
218H2: .int 0x3c6ef372
219H3: .int 0xa54ff53a
220H4: .int 0x510e527f
221H5: .int 0x9b05688c
222H6: .int 0x1f83d9ab
223H7: .int 0x5be0cd19
224*/
diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
deleted file mode 100644
index e22e907643a6..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
+++ /dev/null
@@ -1,531 +0,0 @@
1/*
2 * Multi-buffer SHA512 algorithm hash compute routine
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54# code to compute quad SHA512 using AVX2
55# use YMMs to tackle the larger digest size
56# outer calling routine takes care of save and restore of XMM registers
57# Logic designed/laid out by JDG
58
59# Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
60# Stack must be aligned to 32 bytes before call
61# Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12
62# Linux preserves: rcx rdx rdi rbp r13 r14 r15
63# clobbers ymm0-15
64
65#include <linux/linkage.h>
66#include "sha512_mb_mgr_datastruct.S"
67
68arg1 = %rdi
69arg2 = %rsi
70
71# Common definitions
72STATE = arg1
73INP_SIZE = arg2
74
75IDX = %rax
76ROUND = %rbx
77TBL = %r8
78
79inp0 = %r9
80inp1 = %r10
81inp2 = %r11
82inp3 = %r12
83
84a = %ymm0
85b = %ymm1
86c = %ymm2
87d = %ymm3
88e = %ymm4
89f = %ymm5
90g = %ymm6
91h = %ymm7
92
93a0 = %ymm8
94a1 = %ymm9
95a2 = %ymm10
96
97TT0 = %ymm14
98TT1 = %ymm13
99TT2 = %ymm12
100TT3 = %ymm11
101TT4 = %ymm10
102TT5 = %ymm9
103
104T1 = %ymm14
105TMP = %ymm15
106
107# Define stack usage
108STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24
109
110#define VMOVPD vmovupd
111_digest = SZ4*16
112
113# transpose r0, r1, r2, r3, t0, t1
114# "transpose" data in {r0..r3} using temps {t0..t3}
115# Input looks like: {r0 r1 r2 r3}
116# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
117# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
118# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
119# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
120#
121# output looks like: {t0 r1 r0 r3}
122# t0 = {d1 d0 c1 c0 b1 b0 a1 a0}
123# r1 = {d3 d2 c3 c2 b3 b2 a3 a2}
124# r0 = {d5 d4 c5 c4 b5 b4 a5 a4}
125# r3 = {d7 d6 c7 c6 b7 b6 a7 a6}
126
127.macro TRANSPOSE r0 r1 r2 r3 t0 t1
128 vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
129 vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
130 vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
131 vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
132
133 vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6
134 vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2
135 vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5
136 vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1
137.endm
138
139.macro ROTATE_ARGS
140TMP_ = h
141h = g
142g = f
143f = e
144e = d
145d = c
146c = b
147b = a
148a = TMP_
149.endm
150
151# PRORQ reg, imm, tmp
152# packed-rotate-right-double
153# does a rotate by doing two shifts and an or
154.macro _PRORQ reg imm tmp
155 vpsllq $(64-\imm),\reg,\tmp
156 vpsrlq $\imm,\reg, \reg
157 vpor \tmp,\reg, \reg
158.endm
159
160# non-destructive
161# PRORQ_nd reg, imm, tmp, src
162.macro _PRORQ_nd reg imm tmp src
163 vpsllq $(64-\imm), \src, \tmp
164 vpsrlq $\imm, \src, \reg
165 vpor \tmp, \reg, \reg
166.endm
167
168# PRORQ dst/src, amt
169.macro PRORQ reg imm
170 _PRORQ \reg, \imm, TMP
171.endm
172
173# PRORQ_nd dst, src, amt
174.macro PRORQ_nd reg tmp imm
175 _PRORQ_nd \reg, \imm, TMP, \tmp
176.endm
177
178#; arguments passed implicitly in preprocessor symbols i, a...h
179.macro ROUND_00_15 _T1 i
180 PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4)
181
182 vpxor g, f, a2 # ch: a2 = f^g
183 vpand e,a2, a2 # ch: a2 = (f^g)&e
184 vpxor g, a2, a2 # a2 = ch
185
186 PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25)
187
188 offset = SZ4*(\i & 0xf)
189 vmovdqu \_T1,offset(%rsp)
190 vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
191 vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
192 PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11)
193 vpaddq a2, h, h # h = h + ch
194 PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11)
195 vpaddq \_T1,h, h # h = h + ch + W + K
196 vpxor a1, a0, a0 # a0 = sigma1
197 vmovdqu a,\_T1
198 PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22)
199 vpxor c, \_T1, \_T1 # maj: T1 = a^c
200 add $SZ4, ROUND # ROUND++
201 vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
202 vpaddq a0, h, h
203 vpaddq h, d, d
204 vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
205 PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13)
206 vpxor a1, a2, a2 # a2 = sig0
207 vpand c, a, a1 # maj: a1 = a&c
208 vpor \_T1, a1, a1 # a1 = maj
209 vpaddq a1, h, h # h = h + ch + W + K + maj
210 vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0
211 ROTATE_ARGS
212.endm
213
214
215#; arguments passed implicitly in preprocessor symbols i, a...h
216.macro ROUND_16_XX _T1 i
217 vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1
218 vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1
219 vmovdqu \_T1, a0
220 PRORQ \_T1,7
221 vmovdqu a1, a2
222 PRORQ a1,42
223 vpxor a0, \_T1, \_T1
224 PRORQ \_T1, 1
225 vpxor a2, a1, a1
226 PRORQ a1, 19
227 vpsrlq $7, a0, a0
228 vpxor a0, \_T1, \_T1
229 vpsrlq $6, a2, a2
230 vpxor a2, a1, a1
231 vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1
232 vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1
233 vpaddq a1, \_T1, \_T1
234
235 ROUND_00_15 \_T1,\i
236.endm
237
238
239# void sha512_x4_avx2(void *STATE, const int INP_SIZE)
240# arg 1 : STATE : pointer to input data
241# arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
242ENTRY(sha512_x4_avx2)
243 # general registers preserved in outer calling routine
244 # outer calling routine saves all the XMM registers
245 # save callee-saved clobbered registers to comply with C function ABI
246 push %r12
247 push %r13
248 push %r14
249 push %r15
250
251 sub $STACK_SPACE1, %rsp
252
253 # Load the pre-transposed incoming digest.
254 vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a
255 vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b
256 vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c
257 vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d
258 vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e
259 vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f
260 vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g
261 vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h
262
263 lea K512_4(%rip),TBL
264
265 # load the address of each of the 4 message lanes
266 # getting ready to transpose input onto stack
267 mov _data_ptr+0*PTR_SZ(STATE),inp0
268 mov _data_ptr+1*PTR_SZ(STATE),inp1
269 mov _data_ptr+2*PTR_SZ(STATE),inp2
270 mov _data_ptr+3*PTR_SZ(STATE),inp3
271
272 xor IDX, IDX
273lloop:
274 xor ROUND, ROUND
275
276 # save old digest
277 vmovdqu a, _digest(%rsp)
278 vmovdqu b, _digest+1*SZ4(%rsp)
279 vmovdqu c, _digest+2*SZ4(%rsp)
280 vmovdqu d, _digest+3*SZ4(%rsp)
281 vmovdqu e, _digest+4*SZ4(%rsp)
282 vmovdqu f, _digest+5*SZ4(%rsp)
283 vmovdqu g, _digest+6*SZ4(%rsp)
284 vmovdqu h, _digest+7*SZ4(%rsp)
285 i = 0
286.rep 4
287 vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP
288 VMOVPD i*32(inp0, IDX), TT2
289 VMOVPD i*32(inp1, IDX), TT1
290 VMOVPD i*32(inp2, IDX), TT4
291 VMOVPD i*32(inp3, IDX), TT3
292 TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
293 vpshufb TMP, TT0, TT0
294 vpshufb TMP, TT1, TT1
295 vpshufb TMP, TT2, TT2
296 vpshufb TMP, TT3, TT3
297 ROUND_00_15 TT0,(i*4+0)
298 ROUND_00_15 TT1,(i*4+1)
299 ROUND_00_15 TT2,(i*4+2)
300 ROUND_00_15 TT3,(i*4+3)
301 i = (i+1)
302.endr
303 add $128, IDX
304
305 i = (i*4)
306
307 jmp Lrounds_16_xx
308.align 16
309Lrounds_16_xx:
310.rep 16
311 ROUND_16_XX T1, i
312 i = (i+1)
313.endr
314 cmp $0xa00,ROUND
315 jb Lrounds_16_xx
316
317 # add old digest
318 vpaddq _digest(%rsp), a, a
319 vpaddq _digest+1*SZ4(%rsp), b, b
320 vpaddq _digest+2*SZ4(%rsp), c, c
321 vpaddq _digest+3*SZ4(%rsp), d, d
322 vpaddq _digest+4*SZ4(%rsp), e, e
323 vpaddq _digest+5*SZ4(%rsp), f, f
324 vpaddq _digest+6*SZ4(%rsp), g, g
325 vpaddq _digest+7*SZ4(%rsp), h, h
326
327 sub $1, INP_SIZE # unit is blocks
328 jne lloop
329
330 # write back to memory (state object) the transposed digest
331 vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE)
332 vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE)
333 vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE)
334 vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE)
335 vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE)
336 vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE)
337 vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE)
338 vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE)
339
340 # update input data pointers
341 add IDX, inp0
342 mov inp0, _data_ptr+0*PTR_SZ(STATE)
343 add IDX, inp1
344 mov inp1, _data_ptr+1*PTR_SZ(STATE)
345 add IDX, inp2
346 mov inp2, _data_ptr+2*PTR_SZ(STATE)
347 add IDX, inp3
348 mov inp3, _data_ptr+3*PTR_SZ(STATE)
349
350 #;;;;;;;;;;;;;;;
351 #; Postamble
352 add $STACK_SPACE1, %rsp
353 # restore callee-saved clobbered registers
354
355 pop %r15
356 pop %r14
357 pop %r13
358 pop %r12
359
360 # outer calling routine restores XMM and other GP registers
361 ret
362ENDPROC(sha512_x4_avx2)
363
364.section .rodata.K512_4, "a", @progbits
365.align 64
366K512_4:
367 .octa 0x428a2f98d728ae22428a2f98d728ae22,\
368 0x428a2f98d728ae22428a2f98d728ae22
369 .octa 0x7137449123ef65cd7137449123ef65cd,\
370 0x7137449123ef65cd7137449123ef65cd
371 .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\
372 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f
373 .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\
374 0xe9b5dba58189dbbce9b5dba58189dbbc
375 .octa 0x3956c25bf348b5383956c25bf348b538,\
376 0x3956c25bf348b5383956c25bf348b538
377 .octa 0x59f111f1b605d01959f111f1b605d019,\
378 0x59f111f1b605d01959f111f1b605d019
379 .octa 0x923f82a4af194f9b923f82a4af194f9b,\
380 0x923f82a4af194f9b923f82a4af194f9b
381 .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\
382 0xab1c5ed5da6d8118ab1c5ed5da6d8118
383 .octa 0xd807aa98a3030242d807aa98a3030242,\
384 0xd807aa98a3030242d807aa98a3030242
385 .octa 0x12835b0145706fbe12835b0145706fbe,\
386 0x12835b0145706fbe12835b0145706fbe
387 .octa 0x243185be4ee4b28c243185be4ee4b28c,\
388 0x243185be4ee4b28c243185be4ee4b28c
389 .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\
390 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2
391 .octa 0x72be5d74f27b896f72be5d74f27b896f,\
392 0x72be5d74f27b896f72be5d74f27b896f
393 .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\
394 0x80deb1fe3b1696b180deb1fe3b1696b1
395 .octa 0x9bdc06a725c712359bdc06a725c71235,\
396 0x9bdc06a725c712359bdc06a725c71235
397 .octa 0xc19bf174cf692694c19bf174cf692694,\
398 0xc19bf174cf692694c19bf174cf692694
399 .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\
400 0xe49b69c19ef14ad2e49b69c19ef14ad2
401 .octa 0xefbe4786384f25e3efbe4786384f25e3,\
402 0xefbe4786384f25e3efbe4786384f25e3
403 .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\
404 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5
405 .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\
406 0x240ca1cc77ac9c65240ca1cc77ac9c65
407 .octa 0x2de92c6f592b02752de92c6f592b0275,\
408 0x2de92c6f592b02752de92c6f592b0275
409 .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\
410 0x4a7484aa6ea6e4834a7484aa6ea6e483
411 .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\
412 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4
413 .octa 0x76f988da831153b576f988da831153b5,\
414 0x76f988da831153b576f988da831153b5
415 .octa 0x983e5152ee66dfab983e5152ee66dfab,\
416 0x983e5152ee66dfab983e5152ee66dfab
417 .octa 0xa831c66d2db43210a831c66d2db43210,\
418 0xa831c66d2db43210a831c66d2db43210
419 .octa 0xb00327c898fb213fb00327c898fb213f,\
420 0xb00327c898fb213fb00327c898fb213f
421 .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\
422 0xbf597fc7beef0ee4bf597fc7beef0ee4
423 .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\
424 0xc6e00bf33da88fc2c6e00bf33da88fc2
425 .octa 0xd5a79147930aa725d5a79147930aa725,\
426 0xd5a79147930aa725d5a79147930aa725
427 .octa 0x06ca6351e003826f06ca6351e003826f,\
428 0x06ca6351e003826f06ca6351e003826f
429 .octa 0x142929670a0e6e70142929670a0e6e70,\
430 0x142929670a0e6e70142929670a0e6e70
431 .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\
432 0x27b70a8546d22ffc27b70a8546d22ffc
433 .octa 0x2e1b21385c26c9262e1b21385c26c926,\
434 0x2e1b21385c26c9262e1b21385c26c926
435 .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\
436 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed
437 .octa 0x53380d139d95b3df53380d139d95b3df,\
438 0x53380d139d95b3df53380d139d95b3df
439 .octa 0x650a73548baf63de650a73548baf63de,\
440 0x650a73548baf63de650a73548baf63de
441 .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\
442 0x766a0abb3c77b2a8766a0abb3c77b2a8
443 .octa 0x81c2c92e47edaee681c2c92e47edaee6,\
444 0x81c2c92e47edaee681c2c92e47edaee6
445 .octa 0x92722c851482353b92722c851482353b,\
446 0x92722c851482353b92722c851482353b
447 .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\
448 0xa2bfe8a14cf10364a2bfe8a14cf10364
449 .octa 0xa81a664bbc423001a81a664bbc423001,\
450 0xa81a664bbc423001a81a664bbc423001
451 .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\
452 0xc24b8b70d0f89791c24b8b70d0f89791
453 .octa 0xc76c51a30654be30c76c51a30654be30,\
454 0xc76c51a30654be30c76c51a30654be30
455 .octa 0xd192e819d6ef5218d192e819d6ef5218,\
456 0xd192e819d6ef5218d192e819d6ef5218
457 .octa 0xd69906245565a910d69906245565a910,\
458 0xd69906245565a910d69906245565a910
459 .octa 0xf40e35855771202af40e35855771202a,\
460 0xf40e35855771202af40e35855771202a
461 .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\
462 0x106aa07032bbd1b8106aa07032bbd1b8
463 .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\
464 0x19a4c116b8d2d0c819a4c116b8d2d0c8
465 .octa 0x1e376c085141ab531e376c085141ab53,\
466 0x1e376c085141ab531e376c085141ab53
467 .octa 0x2748774cdf8eeb992748774cdf8eeb99,\
468 0x2748774cdf8eeb992748774cdf8eeb99
469 .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\
470 0x34b0bcb5e19b48a834b0bcb5e19b48a8
471 .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\
472 0x391c0cb3c5c95a63391c0cb3c5c95a63
473 .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\
474 0x4ed8aa4ae3418acb4ed8aa4ae3418acb
475 .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\
476 0x5b9cca4f7763e3735b9cca4f7763e373
477 .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\
478 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3
479 .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\
480 0x748f82ee5defb2fc748f82ee5defb2fc
481 .octa 0x78a5636f43172f6078a5636f43172f60,\
482 0x78a5636f43172f6078a5636f43172f60
483 .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\
484 0x84c87814a1f0ab7284c87814a1f0ab72
485 .octa 0x8cc702081a6439ec8cc702081a6439ec,\
486 0x8cc702081a6439ec8cc702081a6439ec
487 .octa 0x90befffa23631e2890befffa23631e28,\
488 0x90befffa23631e2890befffa23631e28
489 .octa 0xa4506cebde82bde9a4506cebde82bde9,\
490 0xa4506cebde82bde9a4506cebde82bde9
491 .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\
492 0xbef9a3f7b2c67915bef9a3f7b2c67915
493 .octa 0xc67178f2e372532bc67178f2e372532b,\
494 0xc67178f2e372532bc67178f2e372532b
495 .octa 0xca273eceea26619cca273eceea26619c,\
496 0xca273eceea26619cca273eceea26619c
497 .octa 0xd186b8c721c0c207d186b8c721c0c207,\
498 0xd186b8c721c0c207d186b8c721c0c207
499 .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\
500 0xeada7dd6cde0eb1eeada7dd6cde0eb1e
501 .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\
502 0xf57d4f7fee6ed178f57d4f7fee6ed178
503 .octa 0x06f067aa72176fba06f067aa72176fba,\
504 0x06f067aa72176fba06f067aa72176fba
505 .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\
506 0x0a637dc5a2c898a60a637dc5a2c898a6
507 .octa 0x113f9804bef90dae113f9804bef90dae,\
508 0x113f9804bef90dae113f9804bef90dae
509 .octa 0x1b710b35131c471b1b710b35131c471b,\
510 0x1b710b35131c471b1b710b35131c471b
511 .octa 0x28db77f523047d8428db77f523047d84,\
512 0x28db77f523047d8428db77f523047d84
513 .octa 0x32caab7b40c7249332caab7b40c72493,\
514 0x32caab7b40c7249332caab7b40c72493
515 .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\
516 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc
517 .octa 0x431d67c49c100d4c431d67c49c100d4c,\
518 0x431d67c49c100d4c431d67c49c100d4c
519 .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\
520 0x4cc5d4becb3e42b64cc5d4becb3e42b6
521 .octa 0x597f299cfc657e2a597f299cfc657e2a,\
522 0x597f299cfc657e2a597f299cfc657e2a
523 .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\
524 0x5fcb6fab3ad6faec5fcb6fab3ad6faec
525 .octa 0x6c44198c4a4758176c44198c4a475817,\
526 0x6c44198c4a4758176c44198c4a475817
527
528.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
529.align 32
530PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
531 .octa 0x18191a1b1c1d1e1f1011121314151617
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f3e40ac56d93..f7a235db56aa 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -213,20 +213,6 @@ config CRYPTO_CRYPTD
213 converts an arbitrary synchronous software crypto algorithm 213 converts an arbitrary synchronous software crypto algorithm
214 into an asynchronous algorithm that executes in a kernel thread. 214 into an asynchronous algorithm that executes in a kernel thread.
215 215
216config CRYPTO_MCRYPTD
217 tristate "Software async multi-buffer crypto daemon"
218 select CRYPTO_BLKCIPHER
219 select CRYPTO_HASH
220 select CRYPTO_MANAGER
221 select CRYPTO_WORKQUEUE
222 help
223 This is a generic software asynchronous crypto daemon that
224 provides the kernel thread to assist multi-buffer crypto
225 algorithms for submitting jobs and flushing jobs in multi-buffer
226 crypto algorithms. Multi-buffer crypto algorithms are executed
227 in the context of this kernel thread and drivers can post
228 their crypto request asynchronously to be processed by this daemon.
229
230config CRYPTO_AUTHENC 216config CRYPTO_AUTHENC
231 tristate "Authenc support" 217 tristate "Authenc support"
232 select CRYPTO_AEAD 218 select CRYPTO_AEAD
@@ -470,6 +456,18 @@ config CRYPTO_LRW
470 The first 128, 192 or 256 bits in the key are used for AES and the 456 The first 128, 192 or 256 bits in the key are used for AES and the
471 rest is used to tie each cipher block to its logical position. 457 rest is used to tie each cipher block to its logical position.
472 458
459config CRYPTO_OFB
460 tristate "OFB support"
461 select CRYPTO_BLKCIPHER
462 select CRYPTO_MANAGER
463 help
464 OFB: the Output Feedback mode makes a block cipher into a synchronous
465 stream cipher. It generates keystream blocks, which are then XORed
466 with the plaintext blocks to get the ciphertext. Flipping a bit in the
467 ciphertext produces a flipped bit in the plaintext at the same
468 location. This property allows many error correcting codes to function
469 normally even when applied before encryption.
470
473config CRYPTO_PCBC 471config CRYPTO_PCBC
474 tristate "PCBC support" 472 tristate "PCBC support"
475 select CRYPTO_BLKCIPHER 473 select CRYPTO_BLKCIPHER
@@ -848,54 +846,6 @@ config CRYPTO_SHA1_PPC_SPE
848 SHA-1 secure hash standard (DFIPS 180-4) implemented 846 SHA-1 secure hash standard (DFIPS 180-4) implemented
849 using powerpc SPE SIMD instruction set. 847 using powerpc SPE SIMD instruction set.
850 848
851config CRYPTO_SHA1_MB
852 tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)"
853 depends on X86 && 64BIT
854 select CRYPTO_SHA1
855 select CRYPTO_HASH
856 select CRYPTO_MCRYPTD
857 help
858 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
859 using multi-buffer technique. This algorithm computes on
860 multiple data lanes concurrently with SIMD instructions for
861 better throughput. It should not be enabled by default but
862 used when there is significant amount of work to keep the keep
863 the data lanes filled to get performance benefit. If the data
864 lanes remain unfilled, a flush operation will be initiated to
865 process the crypto jobs, adding a slight latency.
866
867config CRYPTO_SHA256_MB
868 tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)"
869 depends on X86 && 64BIT
870 select CRYPTO_SHA256
871 select CRYPTO_HASH
872 select CRYPTO_MCRYPTD
873 help
874 SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
875 using multi-buffer technique. This algorithm computes on
876 multiple data lanes concurrently with SIMD instructions for
877 better throughput. It should not be enabled by default but
878 used when there is significant amount of work to keep the keep
879 the data lanes filled to get performance benefit. If the data
880 lanes remain unfilled, a flush operation will be initiated to
881 process the crypto jobs, adding a slight latency.
882
883config CRYPTO_SHA512_MB
884 tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)"
885 depends on X86 && 64BIT
886 select CRYPTO_SHA512
887 select CRYPTO_HASH
888 select CRYPTO_MCRYPTD
889 help
890 SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
891 using multi-buffer technique. This algorithm computes on
892 multiple data lanes concurrently with SIMD instructions for
893 better throughput. It should not be enabled by default but
894 used when there is significant amount of work to keep the keep
895 the data lanes filled to get performance benefit. If the data
896 lanes remain unfilled, a flush operation will be initiated to
897 process the crypto jobs, adding a slight latency.
898
899config CRYPTO_SHA256 849config CRYPTO_SHA256
900 tristate "SHA224 and SHA256 digest algorithm" 850 tristate "SHA224 and SHA256 digest algorithm"
901 select CRYPTO_HASH 851 select CRYPTO_HASH
@@ -1133,7 +1083,7 @@ config CRYPTO_AES_NI_INTEL
1133 1083
1134 In addition to AES cipher algorithm support, the acceleration 1084 In addition to AES cipher algorithm support, the acceleration
1135 for some popular block cipher mode is supported too, including 1085 for some popular block cipher mode is supported too, including
1136 ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional 1086 ECB, CBC, LRW, XTS. The 64 bit version has additional
1137 acceleration for CTR. 1087 acceleration for CTR.
1138 1088
1139config CRYPTO_AES_SPARC64 1089config CRYPTO_AES_SPARC64
@@ -1590,20 +1540,6 @@ config CRYPTO_SM4
1590 1540
1591 If unsure, say N. 1541 If unsure, say N.
1592 1542
1593config CRYPTO_SPECK
1594 tristate "Speck cipher algorithm"
1595 select CRYPTO_ALGAPI
1596 help
1597 Speck is a lightweight block cipher that is tuned for optimal
1598 performance in software (rather than hardware).
1599
1600 Speck may not be as secure as AES, and should only be used on systems
1601 where AES is not fast enough.
1602
1603 See also: <https://eprint.iacr.org/2013/404.pdf>
1604
1605 If unsure, say N.
1606
1607config CRYPTO_TEA 1543config CRYPTO_TEA
1608 tristate "TEA, XTEA and XETA cipher algorithms" 1544 tristate "TEA, XTEA and XETA cipher algorithms"
1609 select CRYPTO_ALGAPI 1545 select CRYPTO_ALGAPI
@@ -1875,6 +1811,17 @@ config CRYPTO_USER_API_AEAD
1875 This option enables the user-spaces interface for AEAD 1811 This option enables the user-spaces interface for AEAD
1876 cipher algorithms. 1812 cipher algorithms.
1877 1813
1814config CRYPTO_STATS
1815 bool "Crypto usage statistics for User-space"
1816 help
1817 This option enables the gathering of crypto stats.
1818 This will collect:
1819 - encrypt/decrypt size and numbers of symmeric operations
1820 - compress/decompress size and numbers of compress operations
1821 - size and numbers of hash operations
1822 - encrypt/decrypt/sign/verify numbers for asymmetric operations
1823 - generate/seed numbers for rng operations
1824
1878config CRYPTO_HASH_INFO 1825config CRYPTO_HASH_INFO
1879 bool 1826 bool
1880 1827
diff --git a/crypto/Makefile b/crypto/Makefile
index 6d1d40eeb964..5c207c76abf7 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -54,6 +54,7 @@ cryptomgr-y := algboss.o testmgr.o
54 54
55obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o 55obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
56obj-$(CONFIG_CRYPTO_USER) += crypto_user.o 56obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
57crypto_user-y := crypto_user_base.o crypto_user_stat.o
57obj-$(CONFIG_CRYPTO_CMAC) += cmac.o 58obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
58obj-$(CONFIG_CRYPTO_HMAC) += hmac.o 59obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
59obj-$(CONFIG_CRYPTO_VMAC) += vmac.o 60obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
@@ -93,7 +94,6 @@ obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o
93obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o 94obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o
94obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o 95obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
95obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o 96obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
96obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o
97obj-$(CONFIG_CRYPTO_DES) += des_generic.o 97obj-$(CONFIG_CRYPTO_DES) += des_generic.o
98obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o 98obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
99obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o 99obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
@@ -115,7 +115,6 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
115obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o 115obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
116obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o 116obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
117obj-$(CONFIG_CRYPTO_SEED) += seed.o 117obj-$(CONFIG_CRYPTO_SEED) += seed.o
118obj-$(CONFIG_CRYPTO_SPECK) += speck.o
119obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o 118obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
120obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o 119obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
121obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o 120obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
@@ -143,6 +142,7 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
143obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o 142obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
144obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o 143obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
145obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o 144obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
145obj-$(CONFIG_CRYPTO_OFB) += ofb.o
146 146
147ecdh_generic-y := ecc.o 147ecdh_generic-y := ecc.o
148ecdh_generic-y += ecdh.o 148ecdh_generic-y += ecdh.o
diff --git a/crypto/aegis.h b/crypto/aegis.h
index f1c6900ddb80..405e025fc906 100644
--- a/crypto/aegis.h
+++ b/crypto/aegis.h
@@ -21,7 +21,7 @@
21 21
22union aegis_block { 22union aegis_block {
23 __le64 words64[AEGIS_BLOCK_SIZE / sizeof(__le64)]; 23 __le64 words64[AEGIS_BLOCK_SIZE / sizeof(__le64)];
24 u32 words32[AEGIS_BLOCK_SIZE / sizeof(u32)]; 24 __le32 words32[AEGIS_BLOCK_SIZE / sizeof(__le32)];
25 u8 bytes[AEGIS_BLOCK_SIZE]; 25 u8 bytes[AEGIS_BLOCK_SIZE];
26}; 26};
27 27
@@ -57,24 +57,22 @@ static void crypto_aegis_aesenc(union aegis_block *dst,
57 const union aegis_block *src, 57 const union aegis_block *src,
58 const union aegis_block *key) 58 const union aegis_block *key)
59{ 59{
60 u32 *d = dst->words32;
61 const u8 *s = src->bytes; 60 const u8 *s = src->bytes;
62 const u32 *k = key->words32;
63 const u32 *t0 = crypto_ft_tab[0]; 61 const u32 *t0 = crypto_ft_tab[0];
64 const u32 *t1 = crypto_ft_tab[1]; 62 const u32 *t1 = crypto_ft_tab[1];
65 const u32 *t2 = crypto_ft_tab[2]; 63 const u32 *t2 = crypto_ft_tab[2];
66 const u32 *t3 = crypto_ft_tab[3]; 64 const u32 *t3 = crypto_ft_tab[3];
67 u32 d0, d1, d2, d3; 65 u32 d0, d1, d2, d3;
68 66
69 d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]] ^ k[0]; 67 d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]];
70 d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]] ^ k[1]; 68 d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]];
71 d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]] ^ k[2]; 69 d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]];
72 d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]] ^ k[3]; 70 d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]];
73 71
74 d[0] = d0; 72 dst->words32[0] = cpu_to_le32(d0) ^ key->words32[0];
75 d[1] = d1; 73 dst->words32[1] = cpu_to_le32(d1) ^ key->words32[1];
76 d[2] = d2; 74 dst->words32[2] = cpu_to_le32(d2) ^ key->words32[2];
77 d[3] = d3; 75 dst->words32[3] = cpu_to_le32(d3) ^ key->words32[3];
78} 76}
79 77
80#endif /* _CRYPTO_AEGIS_H */ 78#endif /* _CRYPTO_AEGIS_H */
diff --git a/crypto/ahash.c b/crypto/ahash.c
index a64c143165b1..e21667b4e10a 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -364,24 +364,35 @@ static int crypto_ahash_op(struct ahash_request *req,
364 364
365int crypto_ahash_final(struct ahash_request *req) 365int crypto_ahash_final(struct ahash_request *req)
366{ 366{
367 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); 367 int ret;
368
369 ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
370 crypto_stat_ahash_final(req, ret);
371 return ret;
368} 372}
369EXPORT_SYMBOL_GPL(crypto_ahash_final); 373EXPORT_SYMBOL_GPL(crypto_ahash_final);
370 374
371int crypto_ahash_finup(struct ahash_request *req) 375int crypto_ahash_finup(struct ahash_request *req)
372{ 376{
373 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); 377 int ret;
378
379 ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
380 crypto_stat_ahash_final(req, ret);
381 return ret;
374} 382}
375EXPORT_SYMBOL_GPL(crypto_ahash_finup); 383EXPORT_SYMBOL_GPL(crypto_ahash_finup);
376 384
377int crypto_ahash_digest(struct ahash_request *req) 385int crypto_ahash_digest(struct ahash_request *req)
378{ 386{
379 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 387 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
388 int ret;
380 389
381 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 390 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
382 return -ENOKEY; 391 ret = -ENOKEY;
383 392 else
384 return crypto_ahash_op(req, tfm->digest); 393 ret = crypto_ahash_op(req, tfm->digest);
394 crypto_stat_ahash_final(req, ret);
395 return ret;
385} 396}
386EXPORT_SYMBOL_GPL(crypto_ahash_digest); 397EXPORT_SYMBOL_GPL(crypto_ahash_digest);
387 398
@@ -550,8 +561,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
550{ 561{
551 struct crypto_alg *base = &alg->halg.base; 562 struct crypto_alg *base = &alg->halg.base;
552 563
553 if (alg->halg.digestsize > PAGE_SIZE / 8 || 564 if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
554 alg->halg.statesize > PAGE_SIZE / 8 || 565 alg->halg.statesize > HASH_MAX_STATESIZE ||
555 alg->halg.statesize == 0) 566 alg->halg.statesize == 0)
556 return -EINVAL; 567 return -EINVAL;
557 568
diff --git a/crypto/algapi.c b/crypto/algapi.c
index c0755cf4f53f..2545c5f89c4c 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -57,9 +57,14 @@ static int crypto_check_alg(struct crypto_alg *alg)
57 if (alg->cra_alignmask & (alg->cra_alignmask + 1)) 57 if (alg->cra_alignmask & (alg->cra_alignmask + 1))
58 return -EINVAL; 58 return -EINVAL;
59 59
60 if (alg->cra_blocksize > PAGE_SIZE / 8) 60 /* General maximums for all algs. */
61 if (alg->cra_alignmask > MAX_ALGAPI_ALIGNMASK)
61 return -EINVAL; 62 return -EINVAL;
62 63
64 if (alg->cra_blocksize > MAX_ALGAPI_BLOCKSIZE)
65 return -EINVAL;
66
67 /* Lower maximums for specific alg types. */
63 if (!alg->cra_type && (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == 68 if (!alg->cra_type && (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
64 CRYPTO_ALG_TYPE_CIPHER) { 69 CRYPTO_ALG_TYPE_CIPHER) {
65 if (alg->cra_alignmask > MAX_CIPHER_ALIGNMASK) 70 if (alg->cra_alignmask > MAX_CIPHER_ALIGNMASK)
@@ -253,6 +258,14 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
253 list_add(&alg->cra_list, &crypto_alg_list); 258 list_add(&alg->cra_list, &crypto_alg_list);
254 list_add(&larval->alg.cra_list, &crypto_alg_list); 259 list_add(&larval->alg.cra_list, &crypto_alg_list);
255 260
261 atomic_set(&alg->encrypt_cnt, 0);
262 atomic_set(&alg->decrypt_cnt, 0);
263 atomic64_set(&alg->encrypt_tlen, 0);
264 atomic64_set(&alg->decrypt_tlen, 0);
265 atomic_set(&alg->verify_cnt, 0);
266 atomic_set(&alg->cipher_err_cnt, 0);
267 atomic_set(&alg->sign_cnt, 0);
268
256out: 269out:
257 return larval; 270 return larval;
258 271
@@ -367,6 +380,8 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
367 380
368 err = wait_for_completion_killable(&larval->completion); 381 err = wait_for_completion_killable(&larval->completion);
369 WARN_ON(err); 382 WARN_ON(err);
383 if (!err)
384 crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval);
370 385
371out: 386out:
372 crypto_larval_kill(&larval->alg); 387 crypto_larval_kill(&larval->alg);
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 5e6df2a087fa..527b44d0af21 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -274,6 +274,8 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
274 return cryptomgr_schedule_probe(data); 274 return cryptomgr_schedule_probe(data);
275 case CRYPTO_MSG_ALG_REGISTER: 275 case CRYPTO_MSG_ALG_REGISTER:
276 return cryptomgr_schedule_test(data); 276 return cryptomgr_schedule_test(data);
277 case CRYPTO_MSG_ALG_LOADED:
278 break;
277 } 279 }
278 280
279 return NOTIFY_DONE; 281 return NOTIFY_DONE;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index c40a8c7ee8ae..eb100a04ce9f 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -42,7 +42,7 @@
42 42
43struct aead_tfm { 43struct aead_tfm {
44 struct crypto_aead *aead; 44 struct crypto_aead *aead;
45 struct crypto_skcipher *null_tfm; 45 struct crypto_sync_skcipher *null_tfm;
46}; 46};
47 47
48static inline bool aead_sufficient_data(struct sock *sk) 48static inline bool aead_sufficient_data(struct sock *sk)
@@ -75,13 +75,13 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
75 return af_alg_sendmsg(sock, msg, size, ivsize); 75 return af_alg_sendmsg(sock, msg, size, ivsize);
76} 76}
77 77
78static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm, 78static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
79 struct scatterlist *src, 79 struct scatterlist *src,
80 struct scatterlist *dst, unsigned int len) 80 struct scatterlist *dst, unsigned int len)
81{ 81{
82 SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); 82 SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
83 83
84 skcipher_request_set_tfm(skreq, null_tfm); 84 skcipher_request_set_sync_tfm(skreq, null_tfm);
85 skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, 85 skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
86 NULL, NULL); 86 NULL, NULL);
87 skcipher_request_set_crypt(skreq, src, dst, len, NULL); 87 skcipher_request_set_crypt(skreq, src, dst, len, NULL);
@@ -99,7 +99,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
99 struct af_alg_ctx *ctx = ask->private; 99 struct af_alg_ctx *ctx = ask->private;
100 struct aead_tfm *aeadc = pask->private; 100 struct aead_tfm *aeadc = pask->private;
101 struct crypto_aead *tfm = aeadc->aead; 101 struct crypto_aead *tfm = aeadc->aead;
102 struct crypto_skcipher *null_tfm = aeadc->null_tfm; 102 struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
103 unsigned int i, as = crypto_aead_authsize(tfm); 103 unsigned int i, as = crypto_aead_authsize(tfm);
104 struct af_alg_async_req *areq; 104 struct af_alg_async_req *areq;
105 struct af_alg_tsgl *tsgl, *tmp; 105 struct af_alg_tsgl *tsgl, *tmp;
@@ -478,7 +478,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
478{ 478{
479 struct aead_tfm *tfm; 479 struct aead_tfm *tfm;
480 struct crypto_aead *aead; 480 struct crypto_aead *aead;
481 struct crypto_skcipher *null_tfm; 481 struct crypto_sync_skcipher *null_tfm;
482 482
483 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 483 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
484 if (!tfm) 484 if (!tfm)
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index bfcf595fd8f9..d0cde541beb6 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -239,7 +239,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
239 struct alg_sock *ask = alg_sk(sk); 239 struct alg_sock *ask = alg_sk(sk);
240 struct hash_ctx *ctx = ask->private; 240 struct hash_ctx *ctx = ask->private;
241 struct ahash_request *req = &ctx->req; 241 struct ahash_request *req = &ctx->req;
242 char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1]; 242 char state[HASH_MAX_STATESIZE];
243 struct sock *sk2; 243 struct sock *sk2;
244 struct alg_sock *ask2; 244 struct alg_sock *ask2;
245 struct hash_ctx *ctx2; 245 struct hash_ctx *ctx2;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 4fa8d40d947b..37f54d1b2f66 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -33,7 +33,7 @@ struct authenc_instance_ctx {
33struct crypto_authenc_ctx { 33struct crypto_authenc_ctx {
34 struct crypto_ahash *auth; 34 struct crypto_ahash *auth;
35 struct crypto_skcipher *enc; 35 struct crypto_skcipher *enc;
36 struct crypto_skcipher *null; 36 struct crypto_sync_skcipher *null;
37}; 37};
38 38
39struct authenc_request_ctx { 39struct authenc_request_ctx {
@@ -185,9 +185,9 @@ static int crypto_authenc_copy_assoc(struct aead_request *req)
185{ 185{
186 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 186 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
187 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 187 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
188 SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); 188 SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
189 189
190 skcipher_request_set_tfm(skreq, ctx->null); 190 skcipher_request_set_sync_tfm(skreq, ctx->null);
191 skcipher_request_set_callback(skreq, aead_request_flags(req), 191 skcipher_request_set_callback(skreq, aead_request_flags(req),
192 NULL, NULL); 192 NULL, NULL);
193 skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen, 193 skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
@@ -318,7 +318,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
318 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); 318 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
319 struct crypto_ahash *auth; 319 struct crypto_ahash *auth;
320 struct crypto_skcipher *enc; 320 struct crypto_skcipher *enc;
321 struct crypto_skcipher *null; 321 struct crypto_sync_skcipher *null;
322 int err; 322 int err;
323 323
324 auth = crypto_spawn_ahash(&ictx->auth); 324 auth = crypto_spawn_ahash(&ictx->auth);
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 50b804747e20..80a25cc04aec 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -36,7 +36,7 @@ struct crypto_authenc_esn_ctx {
36 unsigned int reqoff; 36 unsigned int reqoff;
37 struct crypto_ahash *auth; 37 struct crypto_ahash *auth;
38 struct crypto_skcipher *enc; 38 struct crypto_skcipher *enc;
39 struct crypto_skcipher *null; 39 struct crypto_sync_skcipher *null;
40}; 40};
41 41
42struct authenc_esn_request_ctx { 42struct authenc_esn_request_ctx {
@@ -183,9 +183,9 @@ static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
183{ 183{
184 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); 184 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
185 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 185 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
186 SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); 186 SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
187 187
188 skcipher_request_set_tfm(skreq, ctx->null); 188 skcipher_request_set_sync_tfm(skreq, ctx->null);
189 skcipher_request_set_callback(skreq, aead_request_flags(req), 189 skcipher_request_set_callback(skreq, aead_request_flags(req),
190 NULL, NULL); 190 NULL, NULL);
191 skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL); 191 skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
@@ -341,7 +341,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
341 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); 341 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
342 struct crypto_ahash *auth; 342 struct crypto_ahash *auth;
343 struct crypto_skcipher *enc; 343 struct crypto_skcipher *enc;
344 struct crypto_skcipher *null; 344 struct crypto_sync_skcipher *null;
345 int err; 345 int err;
346 346
347 auth = crypto_spawn_ahash(&ictx->auth); 347 auth = crypto_spawn_ahash(&ictx->auth);
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 0a083342ec8c..b242fd0d3262 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -50,7 +50,10 @@ struct crypto_ccm_req_priv_ctx {
50 u32 flags; 50 u32 flags;
51 struct scatterlist src[3]; 51 struct scatterlist src[3];
52 struct scatterlist dst[3]; 52 struct scatterlist dst[3];
53 struct skcipher_request skreq; 53 union {
54 struct ahash_request ahreq;
55 struct skcipher_request skreq;
56 };
54}; 57};
55 58
56struct cbcmac_tfm_ctx { 59struct cbcmac_tfm_ctx {
@@ -181,7 +184,7 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
181 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); 184 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
182 struct crypto_aead *aead = crypto_aead_reqtfm(req); 185 struct crypto_aead *aead = crypto_aead_reqtfm(req);
183 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); 186 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
184 AHASH_REQUEST_ON_STACK(ahreq, ctx->mac); 187 struct ahash_request *ahreq = &pctx->ahreq;
185 unsigned int assoclen = req->assoclen; 188 unsigned int assoclen = req->assoclen;
186 struct scatterlist sg[3]; 189 struct scatterlist sg[3];
187 u8 *odata = pctx->odata; 190 u8 *odata = pctx->odata;
@@ -427,7 +430,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
427 crypto_aead_set_reqsize( 430 crypto_aead_set_reqsize(
428 tfm, 431 tfm,
429 align + sizeof(struct crypto_ccm_req_priv_ctx) + 432 align + sizeof(struct crypto_ccm_req_priv_ctx) +
430 crypto_skcipher_reqsize(ctr)); 433 max(crypto_ahash_reqsize(mac), crypto_skcipher_reqsize(ctr)));
431 434
432 return 0; 435 return 0;
433 436
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index e451c3cb6a56..3ae96587caf9 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -18,20 +18,21 @@
18static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, 18static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
19 unsigned int bytes) 19 unsigned int bytes)
20{ 20{
21 u32 stream[CHACHA20_BLOCK_WORDS]; 21 /* aligned to potentially speed up crypto_xor() */
22 u8 stream[CHACHA20_BLOCK_SIZE] __aligned(sizeof(long));
22 23
23 if (dst != src) 24 if (dst != src)
24 memcpy(dst, src, bytes); 25 memcpy(dst, src, bytes);
25 26
26 while (bytes >= CHACHA20_BLOCK_SIZE) { 27 while (bytes >= CHACHA20_BLOCK_SIZE) {
27 chacha20_block(state, stream); 28 chacha20_block(state, stream);
28 crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE); 29 crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
29 bytes -= CHACHA20_BLOCK_SIZE; 30 bytes -= CHACHA20_BLOCK_SIZE;
30 dst += CHACHA20_BLOCK_SIZE; 31 dst += CHACHA20_BLOCK_SIZE;
31 } 32 }
32 if (bytes) { 33 if (bytes) {
33 chacha20_block(state, stream); 34 chacha20_block(state, stream);
34 crypto_xor(dst, (const u8 *)stream, bytes); 35 crypto_xor(dst, stream, bytes);
35 } 36 }
36} 37}
37 38
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index addca7bae33f..7118fb5efbaa 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -76,7 +76,7 @@ struct cryptd_blkcipher_request_ctx {
76 76
77struct cryptd_skcipher_ctx { 77struct cryptd_skcipher_ctx {
78 atomic_t refcnt; 78 atomic_t refcnt;
79 struct crypto_skcipher *child; 79 struct crypto_sync_skcipher *child;
80}; 80};
81 81
82struct cryptd_skcipher_request_ctx { 82struct cryptd_skcipher_request_ctx {
@@ -449,14 +449,16 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
449 const u8 *key, unsigned int keylen) 449 const u8 *key, unsigned int keylen)
450{ 450{
451 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); 451 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
452 struct crypto_skcipher *child = ctx->child; 452 struct crypto_sync_skcipher *child = ctx->child;
453 int err; 453 int err;
454 454
455 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 455 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
456 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & 456 crypto_sync_skcipher_set_flags(child,
457 crypto_skcipher_get_flags(parent) &
457 CRYPTO_TFM_REQ_MASK); 458 CRYPTO_TFM_REQ_MASK);
458 err = crypto_skcipher_setkey(child, key, keylen); 459 err = crypto_sync_skcipher_setkey(child, key, keylen);
459 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & 460 crypto_skcipher_set_flags(parent,
461 crypto_sync_skcipher_get_flags(child) &
460 CRYPTO_TFM_RES_MASK); 462 CRYPTO_TFM_RES_MASK);
461 return err; 463 return err;
462} 464}
@@ -483,13 +485,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
483 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 485 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
484 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 486 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
485 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 487 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
486 struct crypto_skcipher *child = ctx->child; 488 struct crypto_sync_skcipher *child = ctx->child;
487 SKCIPHER_REQUEST_ON_STACK(subreq, child); 489 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
488 490
489 if (unlikely(err == -EINPROGRESS)) 491 if (unlikely(err == -EINPROGRESS))
490 goto out; 492 goto out;
491 493
492 skcipher_request_set_tfm(subreq, child); 494 skcipher_request_set_sync_tfm(subreq, child);
493 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 495 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
494 NULL, NULL); 496 NULL, NULL);
495 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 497 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
@@ -511,13 +513,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
511 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 513 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
512 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 514 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
513 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 515 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
514 struct crypto_skcipher *child = ctx->child; 516 struct crypto_sync_skcipher *child = ctx->child;
515 SKCIPHER_REQUEST_ON_STACK(subreq, child); 517 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
516 518
517 if (unlikely(err == -EINPROGRESS)) 519 if (unlikely(err == -EINPROGRESS))
518 goto out; 520 goto out;
519 521
520 skcipher_request_set_tfm(subreq, child); 522 skcipher_request_set_sync_tfm(subreq, child);
521 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 523 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
522 NULL, NULL); 524 NULL, NULL);
523 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 525 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
@@ -568,7 +570,7 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
568 if (IS_ERR(cipher)) 570 if (IS_ERR(cipher))
569 return PTR_ERR(cipher); 571 return PTR_ERR(cipher);
570 572
571 ctx->child = cipher; 573 ctx->child = (struct crypto_sync_skcipher *)cipher;
572 crypto_skcipher_set_reqsize( 574 crypto_skcipher_set_reqsize(
573 tfm, sizeof(struct cryptd_skcipher_request_ctx)); 575 tfm, sizeof(struct cryptd_skcipher_request_ctx));
574 return 0; 576 return 0;
@@ -578,7 +580,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
578{ 580{
579 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 581 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
580 582
581 crypto_free_skcipher(ctx->child); 583 crypto_free_sync_skcipher(ctx->child);
582} 584}
583 585
584static void cryptd_skcipher_free(struct skcipher_instance *inst) 586static void cryptd_skcipher_free(struct skcipher_instance *inst)
@@ -1243,7 +1245,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
1243{ 1245{
1244 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1246 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1245 1247
1246 return ctx->child; 1248 return &ctx->child->base;
1247} 1249}
1248EXPORT_SYMBOL_GPL(cryptd_skcipher_child); 1250EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
1249 1251
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 0959b268966c..0bae59922a80 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -26,7 +26,7 @@
26#include <linux/string.h> 26#include <linux/string.h>
27 27
28static DEFINE_MUTEX(crypto_default_null_skcipher_lock); 28static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
29static struct crypto_skcipher *crypto_default_null_skcipher; 29static struct crypto_sync_skcipher *crypto_default_null_skcipher;
30static int crypto_default_null_skcipher_refcnt; 30static int crypto_default_null_skcipher_refcnt;
31 31
32static int null_compress(struct crypto_tfm *tfm, const u8 *src, 32static int null_compress(struct crypto_tfm *tfm, const u8 *src,
@@ -152,16 +152,15 @@ MODULE_ALIAS_CRYPTO("compress_null");
152MODULE_ALIAS_CRYPTO("digest_null"); 152MODULE_ALIAS_CRYPTO("digest_null");
153MODULE_ALIAS_CRYPTO("cipher_null"); 153MODULE_ALIAS_CRYPTO("cipher_null");
154 154
155struct crypto_skcipher *crypto_get_default_null_skcipher(void) 155struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
156{ 156{
157 struct crypto_skcipher *tfm; 157 struct crypto_sync_skcipher *tfm;
158 158
159 mutex_lock(&crypto_default_null_skcipher_lock); 159 mutex_lock(&crypto_default_null_skcipher_lock);
160 tfm = crypto_default_null_skcipher; 160 tfm = crypto_default_null_skcipher;
161 161
162 if (!tfm) { 162 if (!tfm) {
163 tfm = crypto_alloc_skcipher("ecb(cipher_null)", 163 tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
164 0, CRYPTO_ALG_ASYNC);
165 if (IS_ERR(tfm)) 164 if (IS_ERR(tfm))
166 goto unlock; 165 goto unlock;
167 166
@@ -181,7 +180,7 @@ void crypto_put_default_null_skcipher(void)
181{ 180{
182 mutex_lock(&crypto_default_null_skcipher_lock); 181 mutex_lock(&crypto_default_null_skcipher_lock);
183 if (!--crypto_default_null_skcipher_refcnt) { 182 if (!--crypto_default_null_skcipher_refcnt) {
184 crypto_free_skcipher(crypto_default_null_skcipher); 183 crypto_free_sync_skcipher(crypto_default_null_skcipher);
185 crypto_default_null_skcipher = NULL; 184 crypto_default_null_skcipher = NULL;
186 } 185 }
187 mutex_unlock(&crypto_default_null_skcipher_lock); 186 mutex_unlock(&crypto_default_null_skcipher_lock);
diff --git a/crypto/crypto_user.c b/crypto/crypto_user_base.c
index 0e89b5457cab..e41f6cc33fff 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user_base.c
@@ -29,6 +29,7 @@
29#include <crypto/internal/rng.h> 29#include <crypto/internal/rng.h>
30#include <crypto/akcipher.h> 30#include <crypto/akcipher.h>
31#include <crypto/kpp.h> 31#include <crypto/kpp.h>
32#include <crypto/internal/cryptouser.h>
32 33
33#include "internal.h" 34#include "internal.h"
34 35
@@ -37,7 +38,7 @@
37static DEFINE_MUTEX(crypto_cfg_mutex); 38static DEFINE_MUTEX(crypto_cfg_mutex);
38 39
39/* The crypto netlink socket */ 40/* The crypto netlink socket */
40static struct sock *crypto_nlsk; 41struct sock *crypto_nlsk;
41 42
42struct crypto_dump_info { 43struct crypto_dump_info {
43 struct sk_buff *in_skb; 44 struct sk_buff *in_skb;
@@ -46,7 +47,7 @@ struct crypto_dump_info {
46 u16 nlmsg_flags; 47 u16 nlmsg_flags;
47}; 48};
48 49
49static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) 50struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
50{ 51{
51 struct crypto_alg *q, *alg = NULL; 52 struct crypto_alg *q, *alg = NULL;
52 53
@@ -461,6 +462,7 @@ static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
461 [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), 462 [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
462 [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), 463 [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
463 [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0, 464 [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
465 [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
464}; 466};
465 467
466static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = { 468static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
@@ -481,6 +483,9 @@ static const struct crypto_link {
481 .dump = crypto_dump_report, 483 .dump = crypto_dump_report,
482 .done = crypto_dump_report_done}, 484 .done = crypto_dump_report_done},
483 [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng }, 485 [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
486 [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat,
487 .dump = crypto_dump_reportstat,
488 .done = crypto_dump_reportstat_done},
484}; 489};
485 490
486static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 491static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
new file mode 100644
index 000000000000..021ad06bbb62
--- /dev/null
+++ b/crypto/crypto_user_stat.c
@@ -0,0 +1,463 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Crypto user configuration API.
4 *
5 * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com>
6 *
7 */
8
9#include <linux/crypto.h>
10#include <linux/cryptouser.h>
11#include <linux/sched.h>
12#include <net/netlink.h>
13#include <crypto/internal/skcipher.h>
14#include <crypto/internal/rng.h>
15#include <crypto/akcipher.h>
16#include <crypto/kpp.h>
17#include <crypto/internal/cryptouser.h>
18
19#include "internal.h"
20
21#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
22
23static DEFINE_MUTEX(crypto_cfg_mutex);
24
25extern struct sock *crypto_nlsk;
26
27struct crypto_dump_info {
28 struct sk_buff *in_skb;
29 struct sk_buff *out_skb;
30 u32 nlmsg_seq;
31 u16 nlmsg_flags;
32};
33
34static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
35{
36 struct crypto_stat raead;
37 u64 v64;
38 u32 v32;
39
40 strncpy(raead.type, "aead", sizeof(raead.type));
41
42 v32 = atomic_read(&alg->encrypt_cnt);
43 raead.stat_encrypt_cnt = v32;
44 v64 = atomic64_read(&alg->encrypt_tlen);
45 raead.stat_encrypt_tlen = v64;
46 v32 = atomic_read(&alg->decrypt_cnt);
47 raead.stat_decrypt_cnt = v32;
48 v64 = atomic64_read(&alg->decrypt_tlen);
49 raead.stat_decrypt_tlen = v64;
50 v32 = atomic_read(&alg->aead_err_cnt);
51 raead.stat_aead_err_cnt = v32;
52
53 if (nla_put(skb, CRYPTOCFGA_STAT_AEAD,
54 sizeof(struct crypto_stat), &raead))
55 goto nla_put_failure;
56 return 0;
57
58nla_put_failure:
59 return -EMSGSIZE;
60}
61
62static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
63{
64 struct crypto_stat rcipher;
65 u64 v64;
66 u32 v32;
67
68 strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
69
70 v32 = atomic_read(&alg->encrypt_cnt);
71 rcipher.stat_encrypt_cnt = v32;
72 v64 = atomic64_read(&alg->encrypt_tlen);
73 rcipher.stat_encrypt_tlen = v64;
74 v32 = atomic_read(&alg->decrypt_cnt);
75 rcipher.stat_decrypt_cnt = v32;
76 v64 = atomic64_read(&alg->decrypt_tlen);
77 rcipher.stat_decrypt_tlen = v64;
78 v32 = atomic_read(&alg->cipher_err_cnt);
79 rcipher.stat_cipher_err_cnt = v32;
80
81 if (nla_put(skb, CRYPTOCFGA_STAT_CIPHER,
82 sizeof(struct crypto_stat), &rcipher))
83 goto nla_put_failure;
84 return 0;
85
86nla_put_failure:
87 return -EMSGSIZE;
88}
89
90static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
91{
92 struct crypto_stat rcomp;
93 u64 v64;
94 u32 v32;
95
96 strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
97 v32 = atomic_read(&alg->compress_cnt);
98 rcomp.stat_compress_cnt = v32;
99 v64 = atomic64_read(&alg->compress_tlen);
100 rcomp.stat_compress_tlen = v64;
101 v32 = atomic_read(&alg->decompress_cnt);
102 rcomp.stat_decompress_cnt = v32;
103 v64 = atomic64_read(&alg->decompress_tlen);
104 rcomp.stat_decompress_tlen = v64;
105 v32 = atomic_read(&alg->cipher_err_cnt);
106 rcomp.stat_compress_err_cnt = v32;
107
108 if (nla_put(skb, CRYPTOCFGA_STAT_COMPRESS,
109 sizeof(struct crypto_stat), &rcomp))
110 goto nla_put_failure;
111 return 0;
112
113nla_put_failure:
114 return -EMSGSIZE;
115}
116
117static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
118{
119 struct crypto_stat racomp;
120 u64 v64;
121 u32 v32;
122
123 strlcpy(racomp.type, "acomp", sizeof(racomp.type));
124 v32 = atomic_read(&alg->compress_cnt);
125 racomp.stat_compress_cnt = v32;
126 v64 = atomic64_read(&alg->compress_tlen);
127 racomp.stat_compress_tlen = v64;
128 v32 = atomic_read(&alg->decompress_cnt);
129 racomp.stat_decompress_cnt = v32;
130 v64 = atomic64_read(&alg->decompress_tlen);
131 racomp.stat_decompress_tlen = v64;
132 v32 = atomic_read(&alg->cipher_err_cnt);
133 racomp.stat_compress_err_cnt = v32;
134
135 if (nla_put(skb, CRYPTOCFGA_STAT_ACOMP,
136 sizeof(struct crypto_stat), &racomp))
137 goto nla_put_failure;
138 return 0;
139
140nla_put_failure:
141 return -EMSGSIZE;
142}
143
144static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
145{
146 struct crypto_stat rakcipher;
147 u64 v64;
148 u32 v32;
149
150 strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
151 v32 = atomic_read(&alg->encrypt_cnt);
152 rakcipher.stat_encrypt_cnt = v32;
153 v64 = atomic64_read(&alg->encrypt_tlen);
154 rakcipher.stat_encrypt_tlen = v64;
155 v32 = atomic_read(&alg->decrypt_cnt);
156 rakcipher.stat_decrypt_cnt = v32;
157 v64 = atomic64_read(&alg->decrypt_tlen);
158 rakcipher.stat_decrypt_tlen = v64;
159 v32 = atomic_read(&alg->sign_cnt);
160 rakcipher.stat_sign_cnt = v32;
161 v32 = atomic_read(&alg->verify_cnt);
162 rakcipher.stat_verify_cnt = v32;
163 v32 = atomic_read(&alg->akcipher_err_cnt);
164 rakcipher.stat_akcipher_err_cnt = v32;
165
166 if (nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
167 sizeof(struct crypto_stat), &rakcipher))
168 goto nla_put_failure;
169 return 0;
170
171nla_put_failure:
172 return -EMSGSIZE;
173}
174
175static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
176{
177 struct crypto_stat rkpp;
178 u32 v;
179
180 strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
181
182 v = atomic_read(&alg->setsecret_cnt);
183 rkpp.stat_setsecret_cnt = v;
184 v = atomic_read(&alg->generate_public_key_cnt);
185 rkpp.stat_generate_public_key_cnt = v;
186 v = atomic_read(&alg->compute_shared_secret_cnt);
187 rkpp.stat_compute_shared_secret_cnt = v;
188 v = atomic_read(&alg->kpp_err_cnt);
189 rkpp.stat_kpp_err_cnt = v;
190
191 if (nla_put(skb, CRYPTOCFGA_STAT_KPP,
192 sizeof(struct crypto_stat), &rkpp))
193 goto nla_put_failure;
194 return 0;
195
196nla_put_failure:
197 return -EMSGSIZE;
198}
199
200static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
201{
202 struct crypto_stat rhash;
203 u64 v64;
204 u32 v32;
205
206 strncpy(rhash.type, "ahash", sizeof(rhash.type));
207
208 v32 = atomic_read(&alg->hash_cnt);
209 rhash.stat_hash_cnt = v32;
210 v64 = atomic64_read(&alg->hash_tlen);
211 rhash.stat_hash_tlen = v64;
212 v32 = atomic_read(&alg->hash_err_cnt);
213 rhash.stat_hash_err_cnt = v32;
214
215 if (nla_put(skb, CRYPTOCFGA_STAT_HASH,
216 sizeof(struct crypto_stat), &rhash))
217 goto nla_put_failure;
218 return 0;
219
220nla_put_failure:
221 return -EMSGSIZE;
222}
223
224static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
225{
226 struct crypto_stat rhash;
227 u64 v64;
228 u32 v32;
229
230 strncpy(rhash.type, "shash", sizeof(rhash.type));
231
232 v32 = atomic_read(&alg->hash_cnt);
233 rhash.stat_hash_cnt = v32;
234 v64 = atomic64_read(&alg->hash_tlen);
235 rhash.stat_hash_tlen = v64;
236 v32 = atomic_read(&alg->hash_err_cnt);
237 rhash.stat_hash_err_cnt = v32;
238
239 if (nla_put(skb, CRYPTOCFGA_STAT_HASH,
240 sizeof(struct crypto_stat), &rhash))
241 goto nla_put_failure;
242 return 0;
243
244nla_put_failure:
245 return -EMSGSIZE;
246}
247
248static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
249{
250 struct crypto_stat rrng;
251 u64 v64;
252 u32 v32;
253
254 strncpy(rrng.type, "rng", sizeof(rrng.type));
255
256 v32 = atomic_read(&alg->generate_cnt);
257 rrng.stat_generate_cnt = v32;
258 v64 = atomic64_read(&alg->generate_tlen);
259 rrng.stat_generate_tlen = v64;
260 v32 = atomic_read(&alg->seed_cnt);
261 rrng.stat_seed_cnt = v32;
262 v32 = atomic_read(&alg->hash_err_cnt);
263 rrng.stat_rng_err_cnt = v32;
264
265 if (nla_put(skb, CRYPTOCFGA_STAT_RNG,
266 sizeof(struct crypto_stat), &rrng))
267 goto nla_put_failure;
268 return 0;
269
270nla_put_failure:
271 return -EMSGSIZE;
272}
273
274static int crypto_reportstat_one(struct crypto_alg *alg,
275 struct crypto_user_alg *ualg,
276 struct sk_buff *skb)
277{
278 strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
279 strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
280 sizeof(ualg->cru_driver_name));
281 strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
282 sizeof(ualg->cru_module_name));
283
284 ualg->cru_type = 0;
285 ualg->cru_mask = 0;
286 ualg->cru_flags = alg->cra_flags;
287 ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
288
289 if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
290 goto nla_put_failure;
291 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
292 struct crypto_stat rl;
293
294 strlcpy(rl.type, "larval", sizeof(rl.type));
295 if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
296 sizeof(struct crypto_stat), &rl))
297 goto nla_put_failure;
298 goto out;
299 }
300
301 switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
302 case CRYPTO_ALG_TYPE_AEAD:
303 if (crypto_report_aead(skb, alg))
304 goto nla_put_failure;
305 break;
306 case CRYPTO_ALG_TYPE_SKCIPHER:
307 if (crypto_report_cipher(skb, alg))
308 goto nla_put_failure;
309 break;
310 case CRYPTO_ALG_TYPE_BLKCIPHER:
311 if (crypto_report_cipher(skb, alg))
312 goto nla_put_failure;
313 break;
314 case CRYPTO_ALG_TYPE_CIPHER:
315 if (crypto_report_cipher(skb, alg))
316 goto nla_put_failure;
317 break;
318 case CRYPTO_ALG_TYPE_COMPRESS:
319 if (crypto_report_comp(skb, alg))
320 goto nla_put_failure;
321 break;
322 case CRYPTO_ALG_TYPE_ACOMPRESS:
323 if (crypto_report_acomp(skb, alg))
324 goto nla_put_failure;
325 break;
326 case CRYPTO_ALG_TYPE_SCOMPRESS:
327 if (crypto_report_acomp(skb, alg))
328 goto nla_put_failure;
329 break;
330 case CRYPTO_ALG_TYPE_AKCIPHER:
331 if (crypto_report_akcipher(skb, alg))
332 goto nla_put_failure;
333 break;
334 case CRYPTO_ALG_TYPE_KPP:
335 if (crypto_report_kpp(skb, alg))
336 goto nla_put_failure;
337 break;
338 case CRYPTO_ALG_TYPE_AHASH:
339 if (crypto_report_ahash(skb, alg))
340 goto nla_put_failure;
341 break;
342 case CRYPTO_ALG_TYPE_HASH:
343 if (crypto_report_shash(skb, alg))
344 goto nla_put_failure;
345 break;
346 case CRYPTO_ALG_TYPE_RNG:
347 if (crypto_report_rng(skb, alg))
348 goto nla_put_failure;
349 break;
350 default:
351 pr_err("ERROR: Unhandled alg %d in %s\n",
352 alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
353 __func__);
354 }
355
356out:
357 return 0;
358
359nla_put_failure:
360 return -EMSGSIZE;
361}
362
363static int crypto_reportstat_alg(struct crypto_alg *alg,
364 struct crypto_dump_info *info)
365{
366 struct sk_buff *in_skb = info->in_skb;
367 struct sk_buff *skb = info->out_skb;
368 struct nlmsghdr *nlh;
369 struct crypto_user_alg *ualg;
370 int err = 0;
371
372 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
373 CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
374 if (!nlh) {
375 err = -EMSGSIZE;
376 goto out;
377 }
378
379 ualg = nlmsg_data(nlh);
380
381 err = crypto_reportstat_one(alg, ualg, skb);
382 if (err) {
383 nlmsg_cancel(skb, nlh);
384 goto out;
385 }
386
387 nlmsg_end(skb, nlh);
388
389out:
390 return err;
391}
392
393int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
394 struct nlattr **attrs)
395{
396 struct crypto_user_alg *p = nlmsg_data(in_nlh);
397 struct crypto_alg *alg;
398 struct sk_buff *skb;
399 struct crypto_dump_info info;
400 int err;
401
402 if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
403 return -EINVAL;
404
405 alg = crypto_alg_match(p, 0);
406 if (!alg)
407 return -ENOENT;
408
409 err = -ENOMEM;
410 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
411 if (!skb)
412 goto drop_alg;
413
414 info.in_skb = in_skb;
415 info.out_skb = skb;
416 info.nlmsg_seq = in_nlh->nlmsg_seq;
417 info.nlmsg_flags = 0;
418
419 err = crypto_reportstat_alg(alg, &info);
420
421drop_alg:
422 crypto_mod_put(alg);
423
424 if (err)
425 return err;
426
427 return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
428}
429
430int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb)
431{
432 struct crypto_alg *alg;
433 struct crypto_dump_info info;
434 int err;
435
436 if (cb->args[0])
437 goto out;
438
439 cb->args[0] = 1;
440
441 info.in_skb = cb->skb;
442 info.out_skb = skb;
443 info.nlmsg_seq = cb->nlh->nlmsg_seq;
444 info.nlmsg_flags = NLM_F_MULTI;
445
446 list_for_each_entry(alg, &crypto_alg_list, cra_list) {
447 err = crypto_reportstat_alg(alg, &info);
448 if (err)
449 goto out_err;
450 }
451
452out:
453 return skb->len;
454out_err:
455 return err;
456}
457
458int crypto_dump_reportstat_done(struct netlink_callback *cb)
459{
460 return 0;
461}
462
463MODULE_LICENSE("GPL");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 45819e6015bf..77e607fdbfb7 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -47,9 +47,9 @@ static int echainiv_encrypt(struct aead_request *req)
47 info = req->iv; 47 info = req->iv;
48 48
49 if (req->src != req->dst) { 49 if (req->src != req->dst) {
50 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); 50 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
51 51
52 skcipher_request_set_tfm(nreq, ctx->sknull); 52 skcipher_request_set_sync_tfm(nreq, ctx->sknull);
53 skcipher_request_set_callback(nreq, req->base.flags, 53 skcipher_request_set_callback(nreq, req->base.flags,
54 NULL, NULL); 54 NULL, NULL);
55 skcipher_request_set_crypt(nreq, req->src, req->dst, 55 skcipher_request_set_crypt(nreq, req->src, req->dst,
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 0ad879e1f9b2..e438492db2ca 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -50,7 +50,7 @@ struct crypto_rfc4543_instance_ctx {
50 50
51struct crypto_rfc4543_ctx { 51struct crypto_rfc4543_ctx {
52 struct crypto_aead *child; 52 struct crypto_aead *child;
53 struct crypto_skcipher *null; 53 struct crypto_sync_skcipher *null;
54 u8 nonce[4]; 54 u8 nonce[4];
55}; 55};
56 56
@@ -1067,9 +1067,9 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
1067 unsigned int authsize = crypto_aead_authsize(aead); 1067 unsigned int authsize = crypto_aead_authsize(aead);
1068 unsigned int nbytes = req->assoclen + req->cryptlen - 1068 unsigned int nbytes = req->assoclen + req->cryptlen -
1069 (enc ? 0 : authsize); 1069 (enc ? 0 : authsize);
1070 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); 1070 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
1071 1071
1072 skcipher_request_set_tfm(nreq, ctx->null); 1072 skcipher_request_set_sync_tfm(nreq, ctx->null);
1073 skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); 1073 skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
1074 skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); 1074 skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
1075 1075
@@ -1093,7 +1093,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
1093 struct crypto_aead_spawn *spawn = &ictx->aead; 1093 struct crypto_aead_spawn *spawn = &ictx->aead;
1094 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); 1094 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
1095 struct crypto_aead *aead; 1095 struct crypto_aead *aead;
1096 struct crypto_skcipher *null; 1096 struct crypto_sync_skcipher *null;
1097 unsigned long align; 1097 unsigned long align;
1098 int err = 0; 1098 int err = 0;
1099 1099
diff --git a/crypto/internal.h b/crypto/internal.h
index 9a3f39939fba..ef769b5e8ad3 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -26,12 +26,6 @@
26#include <linux/rwsem.h> 26#include <linux/rwsem.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29/* Crypto notification events. */
30enum {
31 CRYPTO_MSG_ALG_REQUEST,
32 CRYPTO_MSG_ALG_REGISTER,
33};
34
35struct crypto_instance; 29struct crypto_instance;
36struct crypto_template; 30struct crypto_template;
37 31
@@ -90,8 +84,6 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
90void *crypto_alloc_tfm(const char *alg_name, 84void *crypto_alloc_tfm(const char *alg_name,
91 const struct crypto_type *frontend, u32 type, u32 mask); 85 const struct crypto_type *frontend, u32 type, u32 mask);
92 86
93int crypto_register_notifier(struct notifier_block *nb);
94int crypto_unregister_notifier(struct notifier_block *nb);
95int crypto_probing_notify(unsigned long val, void *v); 87int crypto_probing_notify(unsigned long val, void *v);
96 88
97unsigned int crypto_alg_extsize(struct crypto_alg *alg); 89unsigned int crypto_alg_extsize(struct crypto_alg *alg);
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 393a782679c7..0430ccd08728 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -29,8 +29,6 @@
29#include <crypto/b128ops.h> 29#include <crypto/b128ops.h>
30#include <crypto/gf128mul.h> 30#include <crypto/gf128mul.h>
31 31
32#define LRW_BUFFER_SIZE 128u
33
34#define LRW_BLOCK_SIZE 16 32#define LRW_BLOCK_SIZE 16
35 33
36struct priv { 34struct priv {
@@ -56,19 +54,7 @@ struct priv {
56}; 54};
57 55
58struct rctx { 56struct rctx {
59 be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
60
61 be128 t; 57 be128 t;
62
63 be128 *ext;
64
65 struct scatterlist srcbuf[2];
66 struct scatterlist dstbuf[2];
67 struct scatterlist *src;
68 struct scatterlist *dst;
69
70 unsigned int left;
71
72 struct skcipher_request subreq; 58 struct skcipher_request subreq;
73}; 59};
74 60
@@ -120,112 +106,68 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
120 return 0; 106 return 0;
121} 107}
122 108
123static inline void inc(be128 *iv) 109/*
124{ 110 * Returns the number of trailing '1' bits in the words of the counter, which is
125 be64_add_cpu(&iv->b, 1); 111 * represented by 4 32-bit words, arranged from least to most significant.
126 if (!iv->b) 112 * At the same time, increments the counter by one.
127 be64_add_cpu(&iv->a, 1); 113 *
128} 114 * For example:
129 115 *
130/* this returns the number of consequative 1 bits starting 116 * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
131 * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ 117 * int i = next_index(&counter);
132static inline int get_index128(be128 *block) 118 * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
119 */
120static int next_index(u32 *counter)
133{ 121{
134 int x; 122 int i, res = 0;
135 __be32 *p = (__be32 *) block;
136 123
137 for (p += 3, x = 0; x < 128; p--, x += 32) { 124 for (i = 0; i < 4; i++) {
138 u32 val = be32_to_cpup(p); 125 if (counter[i] + 1 != 0)
126 return res + ffz(counter[i]++);
139 127
140 if (!~val) 128 counter[i] = 0;
141 continue; 129 res += 32;
142
143 return x + ffz(val);
144 } 130 }
145 131
146 return x; 132 /*
133 * If we get here, then x == 128 and we are incrementing the counter
134 * from all ones to all zeros. This means we must return index 127, i.e.
135 * the one corresponding to key2*{ 1,...,1 }.
136 */
137 return 127;
147} 138}
148 139
149static int post_crypt(struct skcipher_request *req) 140/*
141 * We compute the tweak masks twice (both before and after the ECB encryption or
142 * decryption) to avoid having to allocate a temporary buffer and/or make
143 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
144 * just doing the next_index() calls again.
145 */
146static int xor_tweak(struct skcipher_request *req, bool second_pass)
150{ 147{
151 struct rctx *rctx = skcipher_request_ctx(req);
152 be128 *buf = rctx->ext ?: rctx->buf;
153 struct skcipher_request *subreq;
154 const int bs = LRW_BLOCK_SIZE; 148 const int bs = LRW_BLOCK_SIZE;
155 struct skcipher_walk w;
156 struct scatterlist *sg;
157 unsigned offset;
158 int err;
159
160 subreq = &rctx->subreq;
161 err = skcipher_walk_virt(&w, subreq, false);
162
163 while (w.nbytes) {
164 unsigned int avail = w.nbytes;
165 be128 *wdst;
166
167 wdst = w.dst.virt.addr;
168
169 do {
170 be128_xor(wdst, buf++, wdst);
171 wdst++;
172 } while ((avail -= bs) >= bs);
173
174 err = skcipher_walk_done(&w, avail);
175 }
176
177 rctx->left -= subreq->cryptlen;
178
179 if (err || !rctx->left)
180 goto out;
181
182 rctx->dst = rctx->dstbuf;
183
184 scatterwalk_done(&w.out, 0, 1);
185 sg = w.out.sg;
186 offset = w.out.offset;
187
188 if (rctx->dst != sg) {
189 rctx->dst[0] = *sg;
190 sg_unmark_end(rctx->dst);
191 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
192 }
193 rctx->dst[0].length -= offset - sg->offset;
194 rctx->dst[0].offset = offset;
195
196out:
197 return err;
198}
199
200static int pre_crypt(struct skcipher_request *req)
201{
202 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 149 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
203 struct rctx *rctx = skcipher_request_ctx(req);
204 struct priv *ctx = crypto_skcipher_ctx(tfm); 150 struct priv *ctx = crypto_skcipher_ctx(tfm);
205 be128 *buf = rctx->ext ?: rctx->buf; 151 struct rctx *rctx = skcipher_request_ctx(req);
206 struct skcipher_request *subreq; 152 be128 t = rctx->t;
207 const int bs = LRW_BLOCK_SIZE;
208 struct skcipher_walk w; 153 struct skcipher_walk w;
209 struct scatterlist *sg; 154 __be32 *iv;
210 unsigned cryptlen; 155 u32 counter[4];
211 unsigned offset;
212 be128 *iv;
213 bool more;
214 int err; 156 int err;
215 157
216 subreq = &rctx->subreq; 158 if (second_pass) {
217 skcipher_request_set_tfm(subreq, tfm); 159 req = &rctx->subreq;
218 160 /* set to our TFM to enforce correct alignment: */
219 cryptlen = subreq->cryptlen; 161 skcipher_request_set_tfm(req, tfm);
220 more = rctx->left > cryptlen; 162 }
221 if (!more)
222 cryptlen = rctx->left;
223 163
224 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, 164 err = skcipher_walk_virt(&w, req, false);
225 cryptlen, req->iv); 165 iv = (__be32 *)w.iv;
226 166
227 err = skcipher_walk_virt(&w, subreq, false); 167 counter[0] = be32_to_cpu(iv[3]);
228 iv = w.iv; 168 counter[1] = be32_to_cpu(iv[2]);
169 counter[2] = be32_to_cpu(iv[1]);
170 counter[3] = be32_to_cpu(iv[0]);
229 171
230 while (w.nbytes) { 172 while (w.nbytes) {
231 unsigned int avail = w.nbytes; 173 unsigned int avail = w.nbytes;
@@ -236,188 +178,85 @@ static int pre_crypt(struct skcipher_request *req)
236 wdst = w.dst.virt.addr; 178 wdst = w.dst.virt.addr;
237 179
238 do { 180 do {
239 *buf++ = rctx->t; 181 be128_xor(wdst++, &t, wsrc++);
240 be128_xor(wdst++, &rctx->t, wsrc++);
241 182
242 /* T <- I*Key2, using the optimization 183 /* T <- I*Key2, using the optimization
243 * discussed in the specification */ 184 * discussed in the specification */
244 be128_xor(&rctx->t, &rctx->t, 185 be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
245 &ctx->mulinc[get_index128(iv)]);
246 inc(iv);
247 } while ((avail -= bs) >= bs); 186 } while ((avail -= bs) >= bs);
248 187
249 err = skcipher_walk_done(&w, avail); 188 if (second_pass && w.nbytes == w.total) {
250 } 189 iv[0] = cpu_to_be32(counter[3]);
251 190 iv[1] = cpu_to_be32(counter[2]);
252 skcipher_request_set_tfm(subreq, ctx->child); 191 iv[2] = cpu_to_be32(counter[1]);
253 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, 192 iv[3] = cpu_to_be32(counter[0]);
254 cryptlen, NULL); 193 }
255
256 if (err || !more)
257 goto out;
258
259 rctx->src = rctx->srcbuf;
260
261 scatterwalk_done(&w.in, 0, 1);
262 sg = w.in.sg;
263 offset = w.in.offset;
264 194
265 if (rctx->src != sg) { 195 err = skcipher_walk_done(&w, avail);
266 rctx->src[0] = *sg;
267 sg_unmark_end(rctx->src);
268 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
269 } 196 }
270 rctx->src[0].length -= offset - sg->offset;
271 rctx->src[0].offset = offset;
272 197
273out:
274 return err; 198 return err;
275} 199}
276 200
277static int init_crypt(struct skcipher_request *req, crypto_completion_t done) 201static int xor_tweak_pre(struct skcipher_request *req)
278{ 202{
279 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 203 return xor_tweak(req, false);
280 struct rctx *rctx = skcipher_request_ctx(req);
281 struct skcipher_request *subreq;
282 gfp_t gfp;
283
284 subreq = &rctx->subreq;
285 skcipher_request_set_callback(subreq, req->base.flags, done, req);
286
287 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
288 GFP_ATOMIC;
289 rctx->ext = NULL;
290
291 subreq->cryptlen = LRW_BUFFER_SIZE;
292 if (req->cryptlen > LRW_BUFFER_SIZE) {
293 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
294
295 rctx->ext = kmalloc(n, gfp);
296 if (rctx->ext)
297 subreq->cryptlen = n;
298 }
299
300 rctx->src = req->src;
301 rctx->dst = req->dst;
302 rctx->left = req->cryptlen;
303
304 /* calculate first value of T */
305 memcpy(&rctx->t, req->iv, sizeof(rctx->t));
306
307 /* T <- I*Key2 */
308 gf128mul_64k_bbe(&rctx->t, ctx->table);
309
310 return 0;
311} 204}
312 205
313static void exit_crypt(struct skcipher_request *req) 206static int xor_tweak_post(struct skcipher_request *req)
314{ 207{
315 struct rctx *rctx = skcipher_request_ctx(req); 208 return xor_tweak(req, true);
316
317 rctx->left = 0;
318
319 if (rctx->ext)
320 kzfree(rctx->ext);
321} 209}
322 210
323static int do_encrypt(struct skcipher_request *req, int err) 211static void crypt_done(struct crypto_async_request *areq, int err)
324{
325 struct rctx *rctx = skcipher_request_ctx(req);
326 struct skcipher_request *subreq;
327
328 subreq = &rctx->subreq;
329
330 while (!err && rctx->left) {
331 err = pre_crypt(req) ?:
332 crypto_skcipher_encrypt(subreq) ?:
333 post_crypt(req);
334
335 if (err == -EINPROGRESS || err == -EBUSY)
336 return err;
337 }
338
339 exit_crypt(req);
340 return err;
341}
342
343static void encrypt_done(struct crypto_async_request *areq, int err)
344{ 212{
345 struct skcipher_request *req = areq->data; 213 struct skcipher_request *req = areq->data;
346 struct skcipher_request *subreq;
347 struct rctx *rctx;
348 214
349 rctx = skcipher_request_ctx(req); 215 if (!err)
216 err = xor_tweak_post(req);
350 217
351 if (err == -EINPROGRESS) {
352 if (rctx->left != req->cryptlen)
353 return;
354 goto out;
355 }
356
357 subreq = &rctx->subreq;
358 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
359
360 err = do_encrypt(req, err ?: post_crypt(req));
361 if (rctx->left)
362 return;
363
364out:
365 skcipher_request_complete(req, err); 218 skcipher_request_complete(req, err);
366} 219}
367 220
368static int encrypt(struct skcipher_request *req) 221static void init_crypt(struct skcipher_request *req)
369{
370 return do_encrypt(req, init_crypt(req, encrypt_done));
371}
372
373static int do_decrypt(struct skcipher_request *req, int err)
374{ 222{
223 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
375 struct rctx *rctx = skcipher_request_ctx(req); 224 struct rctx *rctx = skcipher_request_ctx(req);
376 struct skcipher_request *subreq; 225 struct skcipher_request *subreq = &rctx->subreq;
377
378 subreq = &rctx->subreq;
379 226
380 while (!err && rctx->left) { 227 skcipher_request_set_tfm(subreq, ctx->child);
381 err = pre_crypt(req) ?: 228 skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
382 crypto_skcipher_decrypt(subreq) ?: 229 /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
383 post_crypt(req); 230 skcipher_request_set_crypt(subreq, req->dst, req->dst,
231 req->cryptlen, req->iv);
384 232
385 if (err == -EINPROGRESS || err == -EBUSY) 233 /* calculate first value of T */
386 return err; 234 memcpy(&rctx->t, req->iv, sizeof(rctx->t));
387 }
388 235
389 exit_crypt(req); 236 /* T <- I*Key2 */
390 return err; 237 gf128mul_64k_bbe(&rctx->t, ctx->table);
391} 238}
392 239
393static void decrypt_done(struct crypto_async_request *areq, int err) 240static int encrypt(struct skcipher_request *req)
394{ 241{
395 struct skcipher_request *req = areq->data; 242 struct rctx *rctx = skcipher_request_ctx(req);
396 struct skcipher_request *subreq; 243 struct skcipher_request *subreq = &rctx->subreq;
397 struct rctx *rctx;
398
399 rctx = skcipher_request_ctx(req);
400
401 if (err == -EINPROGRESS) {
402 if (rctx->left != req->cryptlen)
403 return;
404 goto out;
405 }
406
407 subreq = &rctx->subreq;
408 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
409
410 err = do_decrypt(req, err ?: post_crypt(req));
411 if (rctx->left)
412 return;
413 244
414out: 245 init_crypt(req);
415 skcipher_request_complete(req, err); 246 return xor_tweak_pre(req) ?:
247 crypto_skcipher_encrypt(subreq) ?:
248 xor_tweak_post(req);
416} 249}
417 250
418static int decrypt(struct skcipher_request *req) 251static int decrypt(struct skcipher_request *req)
419{ 252{
420 return do_decrypt(req, init_crypt(req, decrypt_done)); 253 struct rctx *rctx = skcipher_request_ctx(req);
254 struct skcipher_request *subreq = &rctx->subreq;
255
256 init_crypt(req);
257 return xor_tweak_pre(req) ?:
258 crypto_skcipher_decrypt(subreq) ?:
259 xor_tweak_post(req);
421} 260}
422 261
423static int init_tfm(struct crypto_skcipher *tfm) 262static int init_tfm(struct crypto_skcipher *tfm)
@@ -543,7 +382,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
543 inst->alg.base.cra_priority = alg->base.cra_priority; 382 inst->alg.base.cra_priority = alg->base.cra_priority;
544 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; 383 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
545 inst->alg.base.cra_alignmask = alg->base.cra_alignmask | 384 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
546 (__alignof__(u64) - 1); 385 (__alignof__(__be32) - 1);
547 386
548 inst->alg.ivsize = LRW_BLOCK_SIZE; 387 inst->alg.ivsize = LRW_BLOCK_SIZE;
549 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + 388 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
deleted file mode 100644
index f14152147ce8..000000000000
--- a/crypto/mcryptd.c
+++ /dev/null
@@ -1,675 +0,0 @@
1/*
2 * Software multibuffer async crypto daemon.
3 *
4 * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
5 *
6 * Adapted from crypto daemon.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15#include <crypto/algapi.h>
16#include <crypto/internal/hash.h>
17#include <crypto/internal/aead.h>
18#include <crypto/mcryptd.h>
19#include <crypto/crypto_wq.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/sched.h>
27#include <linux/sched/stat.h>
28#include <linux/slab.h>
29
30#define MCRYPTD_MAX_CPU_QLEN 100
31#define MCRYPTD_BATCH 9
32
33static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
34 unsigned int tail);
35
36struct mcryptd_flush_list {
37 struct list_head list;
38 struct mutex lock;
39};
40
41static struct mcryptd_flush_list __percpu *mcryptd_flist;
42
43struct hashd_instance_ctx {
44 struct crypto_ahash_spawn spawn;
45 struct mcryptd_queue *queue;
46};
47
48static void mcryptd_queue_worker(struct work_struct *work);
49
50void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
51{
52 struct mcryptd_flush_list *flist;
53
54 if (!cstate->flusher_engaged) {
55 /* put the flusher on the flush list */
56 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
57 mutex_lock(&flist->lock);
58 list_add_tail(&cstate->flush_list, &flist->list);
59 cstate->flusher_engaged = true;
60 cstate->next_flush = jiffies + delay;
61 queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
62 &cstate->flush, delay);
63 mutex_unlock(&flist->lock);
64 }
65}
66EXPORT_SYMBOL(mcryptd_arm_flusher);
67
68static int mcryptd_init_queue(struct mcryptd_queue *queue,
69 unsigned int max_cpu_qlen)
70{
71 int cpu;
72 struct mcryptd_cpu_queue *cpu_queue;
73
74 queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
75 pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
76 if (!queue->cpu_queue)
77 return -ENOMEM;
78 for_each_possible_cpu(cpu) {
79 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
80 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
81 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
82 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
83 spin_lock_init(&cpu_queue->q_lock);
84 }
85 return 0;
86}
87
88static void mcryptd_fini_queue(struct mcryptd_queue *queue)
89{
90 int cpu;
91 struct mcryptd_cpu_queue *cpu_queue;
92
93 for_each_possible_cpu(cpu) {
94 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
95 BUG_ON(cpu_queue->queue.qlen);
96 }
97 free_percpu(queue->cpu_queue);
98}
99
100static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
101 struct crypto_async_request *request,
102 struct mcryptd_hash_request_ctx *rctx)
103{
104 int cpu, err;
105 struct mcryptd_cpu_queue *cpu_queue;
106
107 cpu_queue = raw_cpu_ptr(queue->cpu_queue);
108 spin_lock(&cpu_queue->q_lock);
109 cpu = smp_processor_id();
110 rctx->tag.cpu = smp_processor_id();
111
112 err = crypto_enqueue_request(&cpu_queue->queue, request);
113 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
114 cpu, cpu_queue, request);
115 spin_unlock(&cpu_queue->q_lock);
116 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
117
118 return err;
119}
120
121/*
122 * Try to opportunisticlly flush the partially completed jobs if
123 * crypto daemon is the only task running.
124 */
125static void mcryptd_opportunistic_flush(void)
126{
127 struct mcryptd_flush_list *flist;
128 struct mcryptd_alg_cstate *cstate;
129
130 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
131 while (single_task_running()) {
132 mutex_lock(&flist->lock);
133 cstate = list_first_entry_or_null(&flist->list,
134 struct mcryptd_alg_cstate, flush_list);
135 if (!cstate || !cstate->flusher_engaged) {
136 mutex_unlock(&flist->lock);
137 return;
138 }
139 list_del(&cstate->flush_list);
140 cstate->flusher_engaged = false;
141 mutex_unlock(&flist->lock);
142 cstate->alg_state->flusher(cstate);
143 }
144}
145
146/*
147 * Called in workqueue context, do one real cryption work (via
148 * req->complete) and reschedule itself if there are more work to
149 * do.
150 */
151static void mcryptd_queue_worker(struct work_struct *work)
152{
153 struct mcryptd_cpu_queue *cpu_queue;
154 struct crypto_async_request *req, *backlog;
155 int i;
156
157 /*
158 * Need to loop through more than once for multi-buffer to
159 * be effective.
160 */
161
162 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
163 i = 0;
164 while (i < MCRYPTD_BATCH || single_task_running()) {
165
166 spin_lock_bh(&cpu_queue->q_lock);
167 backlog = crypto_get_backlog(&cpu_queue->queue);
168 req = crypto_dequeue_request(&cpu_queue->queue);
169 spin_unlock_bh(&cpu_queue->q_lock);
170
171 if (!req) {
172 mcryptd_opportunistic_flush();
173 return;
174 }
175
176 if (backlog)
177 backlog->complete(backlog, -EINPROGRESS);
178 req->complete(req, 0);
179 if (!cpu_queue->queue.qlen)
180 return;
181 ++i;
182 }
183 if (cpu_queue->queue.qlen)
184 queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
185}
186
187void mcryptd_flusher(struct work_struct *__work)
188{
189 struct mcryptd_alg_cstate *alg_cpu_state;
190 struct mcryptd_alg_state *alg_state;
191 struct mcryptd_flush_list *flist;
192 int cpu;
193
194 cpu = smp_processor_id();
195 alg_cpu_state = container_of(to_delayed_work(__work),
196 struct mcryptd_alg_cstate, flush);
197 alg_state = alg_cpu_state->alg_state;
198 if (alg_cpu_state->cpu != cpu)
199 pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
200 cpu, alg_cpu_state->cpu);
201
202 if (alg_cpu_state->flusher_engaged) {
203 flist = per_cpu_ptr(mcryptd_flist, cpu);
204 mutex_lock(&flist->lock);
205 list_del(&alg_cpu_state->flush_list);
206 alg_cpu_state->flusher_engaged = false;
207 mutex_unlock(&flist->lock);
208 alg_state->flusher(alg_cpu_state);
209 }
210}
211EXPORT_SYMBOL_GPL(mcryptd_flusher);
212
213static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
214{
215 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
216 struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
217
218 return ictx->queue;
219}
220
221static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
222 unsigned int tail)
223{
224 char *p;
225 struct crypto_instance *inst;
226 int err;
227
228 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
229 if (!p)
230 return ERR_PTR(-ENOMEM);
231
232 inst = (void *)(p + head);
233
234 err = -ENAMETOOLONG;
235 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
236 "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
237 goto out_free_inst;
238
239 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
240
241 inst->alg.cra_priority = alg->cra_priority + 50;
242 inst->alg.cra_blocksize = alg->cra_blocksize;
243 inst->alg.cra_alignmask = alg->cra_alignmask;
244
245out:
246 return p;
247
248out_free_inst:
249 kfree(p);
250 p = ERR_PTR(err);
251 goto out;
252}
253
254static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
255 u32 *mask)
256{
257 struct crypto_attr_type *algt;
258
259 algt = crypto_get_attr_type(tb);
260 if (IS_ERR(algt))
261 return false;
262
263 *type |= algt->type & CRYPTO_ALG_INTERNAL;
264 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
265
266 if (*type & *mask & CRYPTO_ALG_INTERNAL)
267 return true;
268 else
269 return false;
270}
271
272static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
273{
274 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
275 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
276 struct crypto_ahash_spawn *spawn = &ictx->spawn;
277 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
278 struct crypto_ahash *hash;
279
280 hash = crypto_spawn_ahash(spawn);
281 if (IS_ERR(hash))
282 return PTR_ERR(hash);
283
284 ctx->child = hash;
285 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
286 sizeof(struct mcryptd_hash_request_ctx) +
287 crypto_ahash_reqsize(hash));
288 return 0;
289}
290
291static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
292{
293 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
294
295 crypto_free_ahash(ctx->child);
296}
297
298static int mcryptd_hash_setkey(struct crypto_ahash *parent,
299 const u8 *key, unsigned int keylen)
300{
301 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
302 struct crypto_ahash *child = ctx->child;
303 int err;
304
305 crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
306 crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
307 CRYPTO_TFM_REQ_MASK);
308 err = crypto_ahash_setkey(child, key, keylen);
309 crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
310 CRYPTO_TFM_RES_MASK);
311 return err;
312}
313
314static int mcryptd_hash_enqueue(struct ahash_request *req,
315 crypto_completion_t complete)
316{
317 int ret;
318
319 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
320 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
321 struct mcryptd_queue *queue =
322 mcryptd_get_queue(crypto_ahash_tfm(tfm));
323
324 rctx->complete = req->base.complete;
325 req->base.complete = complete;
326
327 ret = mcryptd_enqueue_request(queue, &req->base, rctx);
328
329 return ret;
330}
331
332static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
333{
334 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
335 struct crypto_ahash *child = ctx->child;
336 struct ahash_request *req = ahash_request_cast(req_async);
337 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
338 struct ahash_request *desc = &rctx->areq;
339
340 if (unlikely(err == -EINPROGRESS))
341 goto out;
342
343 ahash_request_set_tfm(desc, child);
344 ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
345 rctx->complete, req_async);
346
347 rctx->out = req->result;
348 err = crypto_ahash_init(desc);
349
350out:
351 local_bh_disable();
352 rctx->complete(&req->base, err);
353 local_bh_enable();
354}
355
356static int mcryptd_hash_init_enqueue(struct ahash_request *req)
357{
358 return mcryptd_hash_enqueue(req, mcryptd_hash_init);
359}
360
361static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
362{
363 struct ahash_request *req = ahash_request_cast(req_async);
364 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
365
366 if (unlikely(err == -EINPROGRESS))
367 goto out;
368
369 rctx->out = req->result;
370 err = crypto_ahash_update(&rctx->areq);
371 if (err) {
372 req->base.complete = rctx->complete;
373 goto out;
374 }
375
376 return;
377out:
378 local_bh_disable();
379 rctx->complete(&req->base, err);
380 local_bh_enable();
381}
382
383static int mcryptd_hash_update_enqueue(struct ahash_request *req)
384{
385 return mcryptd_hash_enqueue(req, mcryptd_hash_update);
386}
387
388static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
389{
390 struct ahash_request *req = ahash_request_cast(req_async);
391 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
392
393 if (unlikely(err == -EINPROGRESS))
394 goto out;
395
396 rctx->out = req->result;
397 err = crypto_ahash_final(&rctx->areq);
398 if (err) {
399 req->base.complete = rctx->complete;
400 goto out;
401 }
402
403 return;
404out:
405 local_bh_disable();
406 rctx->complete(&req->base, err);
407 local_bh_enable();
408}
409
410static int mcryptd_hash_final_enqueue(struct ahash_request *req)
411{
412 return mcryptd_hash_enqueue(req, mcryptd_hash_final);
413}
414
415static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
416{
417 struct ahash_request *req = ahash_request_cast(req_async);
418 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
419
420 if (unlikely(err == -EINPROGRESS))
421 goto out;
422 rctx->out = req->result;
423 err = crypto_ahash_finup(&rctx->areq);
424
425 if (err) {
426 req->base.complete = rctx->complete;
427 goto out;
428 }
429
430 return;
431out:
432 local_bh_disable();
433 rctx->complete(&req->base, err);
434 local_bh_enable();
435}
436
437static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
438{
439 return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
440}
441
442static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
443{
444 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
445 struct crypto_ahash *child = ctx->child;
446 struct ahash_request *req = ahash_request_cast(req_async);
447 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
448 struct ahash_request *desc = &rctx->areq;
449
450 if (unlikely(err == -EINPROGRESS))
451 goto out;
452
453 ahash_request_set_tfm(desc, child);
454 ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
455 rctx->complete, req_async);
456
457 rctx->out = req->result;
458 err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc);
459
460out:
461 local_bh_disable();
462 rctx->complete(&req->base, err);
463 local_bh_enable();
464}
465
466static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
467{
468 return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
469}
470
471static int mcryptd_hash_export(struct ahash_request *req, void *out)
472{
473 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
474
475 return crypto_ahash_export(&rctx->areq, out);
476}
477
478static int mcryptd_hash_import(struct ahash_request *req, const void *in)
479{
480 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
481
482 return crypto_ahash_import(&rctx->areq, in);
483}
484
485static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
486 struct mcryptd_queue *queue)
487{
488 struct hashd_instance_ctx *ctx;
489 struct ahash_instance *inst;
490 struct hash_alg_common *halg;
491 struct crypto_alg *alg;
492 u32 type = 0;
493 u32 mask = 0;
494 int err;
495
496 if (!mcryptd_check_internal(tb, &type, &mask))
497 return -EINVAL;
498
499 halg = ahash_attr_alg(tb[1], type, mask);
500 if (IS_ERR(halg))
501 return PTR_ERR(halg);
502
503 alg = &halg->base;
504 pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
505 inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
506 sizeof(*ctx));
507 err = PTR_ERR(inst);
508 if (IS_ERR(inst))
509 goto out_put_alg;
510
511 ctx = ahash_instance_ctx(inst);
512 ctx->queue = queue;
513
514 err = crypto_init_ahash_spawn(&ctx->spawn, halg,
515 ahash_crypto_instance(inst));
516 if (err)
517 goto out_free_inst;
518
519 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
520 (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
521 CRYPTO_ALG_OPTIONAL_KEY));
522
523 inst->alg.halg.digestsize = halg->digestsize;
524 inst->alg.halg.statesize = halg->statesize;
525 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
526
527 inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
528 inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
529
530 inst->alg.init = mcryptd_hash_init_enqueue;
531 inst->alg.update = mcryptd_hash_update_enqueue;
532 inst->alg.final = mcryptd_hash_final_enqueue;
533 inst->alg.finup = mcryptd_hash_finup_enqueue;
534 inst->alg.export = mcryptd_hash_export;
535 inst->alg.import = mcryptd_hash_import;
536 if (crypto_hash_alg_has_setkey(halg))
537 inst->alg.setkey = mcryptd_hash_setkey;
538 inst->alg.digest = mcryptd_hash_digest_enqueue;
539
540 err = ahash_register_instance(tmpl, inst);
541 if (err) {
542 crypto_drop_ahash(&ctx->spawn);
543out_free_inst:
544 kfree(inst);
545 }
546
547out_put_alg:
548 crypto_mod_put(alg);
549 return err;
550}
551
552static struct mcryptd_queue mqueue;
553
554static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
555{
556 struct crypto_attr_type *algt;
557
558 algt = crypto_get_attr_type(tb);
559 if (IS_ERR(algt))
560 return PTR_ERR(algt);
561
562 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
563 case CRYPTO_ALG_TYPE_DIGEST:
564 return mcryptd_create_hash(tmpl, tb, &mqueue);
565 break;
566 }
567
568 return -EINVAL;
569}
570
571static void mcryptd_free(struct crypto_instance *inst)
572{
573 struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
574 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
575
576 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
577 case CRYPTO_ALG_TYPE_AHASH:
578 crypto_drop_ahash(&hctx->spawn);
579 kfree(ahash_instance(inst));
580 return;
581 default:
582 crypto_drop_spawn(&ctx->spawn);
583 kfree(inst);
584 }
585}
586
587static struct crypto_template mcryptd_tmpl = {
588 .name = "mcryptd",
589 .create = mcryptd_create,
590 .free = mcryptd_free,
591 .module = THIS_MODULE,
592};
593
594struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
595 u32 type, u32 mask)
596{
597 char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
598 struct crypto_ahash *tfm;
599
600 if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
601 "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
602 return ERR_PTR(-EINVAL);
603 tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
604 if (IS_ERR(tfm))
605 return ERR_CAST(tfm);
606 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
607 crypto_free_ahash(tfm);
608 return ERR_PTR(-EINVAL);
609 }
610
611 return __mcryptd_ahash_cast(tfm);
612}
613EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
614
615struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
616{
617 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
618
619 return ctx->child;
620}
621EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
622
623struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
624{
625 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
626 return &rctx->areq;
627}
628EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
629
630void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
631{
632 crypto_free_ahash(&tfm->base);
633}
634EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
635
636static int __init mcryptd_init(void)
637{
638 int err, cpu;
639 struct mcryptd_flush_list *flist;
640
641 mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
642 for_each_possible_cpu(cpu) {
643 flist = per_cpu_ptr(mcryptd_flist, cpu);
644 INIT_LIST_HEAD(&flist->list);
645 mutex_init(&flist->lock);
646 }
647
648 err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
649 if (err) {
650 free_percpu(mcryptd_flist);
651 return err;
652 }
653
654 err = crypto_register_template(&mcryptd_tmpl);
655 if (err) {
656 mcryptd_fini_queue(&mqueue);
657 free_percpu(mcryptd_flist);
658 }
659
660 return err;
661}
662
663static void __exit mcryptd_exit(void)
664{
665 mcryptd_fini_queue(&mqueue);
666 crypto_unregister_template(&mcryptd_tmpl);
667 free_percpu(mcryptd_flist);
668}
669
670subsys_initcall(mcryptd_init);
671module_exit(mcryptd_exit);
672
673MODULE_LICENSE("GPL");
674MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
675MODULE_ALIAS_CRYPTO("mcryptd");
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index d057cf5ac4a8..3889c188f266 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -385,14 +385,11 @@ static void crypto_morus1280_final(struct morus1280_state *state,
385 struct morus1280_block *tag_xor, 385 struct morus1280_block *tag_xor,
386 u64 assoclen, u64 cryptlen) 386 u64 assoclen, u64 cryptlen)
387{ 387{
388 u64 assocbits = assoclen * 8;
389 u64 cryptbits = cryptlen * 8;
390
391 struct morus1280_block tmp; 388 struct morus1280_block tmp;
392 unsigned int i; 389 unsigned int i;
393 390
394 tmp.words[0] = cpu_to_le64(assocbits); 391 tmp.words[0] = assoclen * 8;
395 tmp.words[1] = cpu_to_le64(cryptbits); 392 tmp.words[1] = cryptlen * 8;
396 tmp.words[2] = 0; 393 tmp.words[2] = 0;
397 tmp.words[3] = 0; 394 tmp.words[3] = 0;
398 395
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 1ca76e54281b..da06ec2f6a80 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -384,21 +384,13 @@ static void crypto_morus640_final(struct morus640_state *state,
384 struct morus640_block *tag_xor, 384 struct morus640_block *tag_xor,
385 u64 assoclen, u64 cryptlen) 385 u64 assoclen, u64 cryptlen)
386{ 386{
387 u64 assocbits = assoclen * 8;
388 u64 cryptbits = cryptlen * 8;
389
390 u32 assocbits_lo = (u32)assocbits;
391 u32 assocbits_hi = (u32)(assocbits >> 32);
392 u32 cryptbits_lo = (u32)cryptbits;
393 u32 cryptbits_hi = (u32)(cryptbits >> 32);
394
395 struct morus640_block tmp; 387 struct morus640_block tmp;
396 unsigned int i; 388 unsigned int i;
397 389
398 tmp.words[0] = cpu_to_le32(assocbits_lo); 390 tmp.words[0] = lower_32_bits(assoclen * 8);
399 tmp.words[1] = cpu_to_le32(assocbits_hi); 391 tmp.words[1] = upper_32_bits(assoclen * 8);
400 tmp.words[2] = cpu_to_le32(cryptbits_lo); 392 tmp.words[2] = lower_32_bits(cryptlen * 8);
401 tmp.words[3] = cpu_to_le32(cryptbits_hi); 393 tmp.words[3] = upper_32_bits(cryptlen * 8);
402 394
403 for (i = 0; i < MORUS_BLOCK_WORDS; i++) 395 for (i = 0; i < MORUS_BLOCK_WORDS; i++)
404 state->s[4].words[i] ^= state->s[0].words[i]; 396 state->s[4].words[i] ^= state->s[0].words[i];
diff --git a/crypto/ofb.c b/crypto/ofb.c
new file mode 100644
index 000000000000..886631708c5e
--- /dev/null
+++ b/crypto/ofb.c
@@ -0,0 +1,225 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * OFB: Output FeedBack mode
5 *
6 * Copyright (C) 2018 ARM Limited or its affiliates.
7 * All rights reserved.
8 *
9 * Based loosely on public domain code gleaned from libtomcrypt
10 * (https://github.com/libtom/libtomcrypt).
11 */
12
13#include <crypto/algapi.h>
14#include <crypto/internal/skcipher.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/scatterlist.h>
20#include <linux/slab.h>
21
22struct crypto_ofb_ctx {
23 struct crypto_cipher *child;
24 int cnt;
25};
26
27
28static int crypto_ofb_setkey(struct crypto_skcipher *parent, const u8 *key,
29 unsigned int keylen)
30{
31 struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(parent);
32 struct crypto_cipher *child = ctx->child;
33 int err;
34
35 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
36 crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
37 CRYPTO_TFM_REQ_MASK);
38 err = crypto_cipher_setkey(child, key, keylen);
39 crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
40 CRYPTO_TFM_RES_MASK);
41 return err;
42}
43
44static int crypto_ofb_encrypt_segment(struct crypto_ofb_ctx *ctx,
45 struct skcipher_walk *walk,
46 struct crypto_cipher *tfm)
47{
48 int bsize = crypto_cipher_blocksize(tfm);
49 int nbytes = walk->nbytes;
50
51 u8 *src = walk->src.virt.addr;
52 u8 *dst = walk->dst.virt.addr;
53 u8 *iv = walk->iv;
54
55 do {
56 if (ctx->cnt == bsize) {
57 if (nbytes < bsize)
58 break;
59 crypto_cipher_encrypt_one(tfm, iv, iv);
60 ctx->cnt = 0;
61 }
62 *dst = *src ^ iv[ctx->cnt];
63 src++;
64 dst++;
65 ctx->cnt++;
66 } while (--nbytes);
67 return nbytes;
68}
69
70static int crypto_ofb_encrypt(struct skcipher_request *req)
71{
72 struct skcipher_walk walk;
73 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
74 unsigned int bsize;
75 struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
76 struct crypto_cipher *child = ctx->child;
77 int ret = 0;
78
79 bsize = crypto_cipher_blocksize(child);
80 ctx->cnt = bsize;
81
82 ret = skcipher_walk_virt(&walk, req, false);
83
84 while (walk.nbytes) {
85 ret = crypto_ofb_encrypt_segment(ctx, &walk, child);
86 ret = skcipher_walk_done(&walk, ret);
87 }
88
89 return ret;
90}
91
92/* OFB encrypt and decrypt are identical */
93static int crypto_ofb_decrypt(struct skcipher_request *req)
94{
95 return crypto_ofb_encrypt(req);
96}
97
98static int crypto_ofb_init_tfm(struct crypto_skcipher *tfm)
99{
100 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
101 struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
102 struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
103 struct crypto_cipher *cipher;
104
105 cipher = crypto_spawn_cipher(spawn);
106 if (IS_ERR(cipher))
107 return PTR_ERR(cipher);
108
109 ctx->child = cipher;
110 return 0;
111}
112
113static void crypto_ofb_exit_tfm(struct crypto_skcipher *tfm)
114{
115 struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
116
117 crypto_free_cipher(ctx->child);
118}
119
120static void crypto_ofb_free(struct skcipher_instance *inst)
121{
122 crypto_drop_skcipher(skcipher_instance_ctx(inst));
123 kfree(inst);
124}
125
126static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
127{
128 struct skcipher_instance *inst;
129 struct crypto_attr_type *algt;
130 struct crypto_spawn *spawn;
131 struct crypto_alg *alg;
132 u32 mask;
133 int err;
134
135 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
136 if (err)
137 return err;
138
139 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
140 if (!inst)
141 return -ENOMEM;
142
143 algt = crypto_get_attr_type(tb);
144 err = PTR_ERR(algt);
145 if (IS_ERR(algt))
146 goto err_free_inst;
147
148 mask = CRYPTO_ALG_TYPE_MASK |
149 crypto_requires_off(algt->type, algt->mask,
150 CRYPTO_ALG_NEED_FALLBACK);
151
152 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
153 err = PTR_ERR(alg);
154 if (IS_ERR(alg))
155 goto err_free_inst;
156
157 spawn = skcipher_instance_ctx(inst);
158 err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
159 CRYPTO_ALG_TYPE_MASK);
160 crypto_mod_put(alg);
161 if (err)
162 goto err_free_inst;
163
164 err = crypto_inst_setname(skcipher_crypto_instance(inst), "ofb", alg);
165 if (err)
166 goto err_drop_spawn;
167
168 inst->alg.base.cra_priority = alg->cra_priority;
169 inst->alg.base.cra_blocksize = alg->cra_blocksize;
170 inst->alg.base.cra_alignmask = alg->cra_alignmask;
171
172 /* We access the data as u32s when xoring. */
173 inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
174
175 inst->alg.ivsize = alg->cra_blocksize;
176 inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
177 inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
178
179 inst->alg.base.cra_ctxsize = sizeof(struct crypto_ofb_ctx);
180
181 inst->alg.init = crypto_ofb_init_tfm;
182 inst->alg.exit = crypto_ofb_exit_tfm;
183
184 inst->alg.setkey = crypto_ofb_setkey;
185 inst->alg.encrypt = crypto_ofb_encrypt;
186 inst->alg.decrypt = crypto_ofb_decrypt;
187
188 inst->free = crypto_ofb_free;
189
190 err = skcipher_register_instance(tmpl, inst);
191 if (err)
192 goto err_drop_spawn;
193
194out:
195 return err;
196
197err_drop_spawn:
198 crypto_drop_spawn(spawn);
199err_free_inst:
200 kfree(inst);
201 goto out;
202}
203
204static struct crypto_template crypto_ofb_tmpl = {
205 .name = "ofb",
206 .create = crypto_ofb_create,
207 .module = THIS_MODULE,
208};
209
210static int __init crypto_ofb_module_init(void)
211{
212 return crypto_register_template(&crypto_ofb_tmpl);
213}
214
215static void __exit crypto_ofb_module_exit(void)
216{
217 crypto_unregister_template(&crypto_ofb_tmpl);
218}
219
220module_init(crypto_ofb_module_init);
221module_exit(crypto_ofb_module_exit);
222
223MODULE_LICENSE("GPL");
224MODULE_DESCRIPTION("OFB block cipher algorithm");
225MODULE_ALIAS_CRYPTO("ofb");
diff --git a/crypto/rng.c b/crypto/rng.c
index b4a618668161..547f16ecbfb0 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -50,6 +50,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
50 } 50 }
51 51
52 err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); 52 err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
53 crypto_stat_rng_seed(tfm, err);
53out: 54out:
54 kzfree(buf); 55 kzfree(buf);
55 return err; 56 return err;
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 9893dbfc1af4..812476e46821 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -261,15 +261,6 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
261 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, 261 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
262 ctx->key_size - 1 - req->src_len, req->src); 262 ctx->key_size - 1 - req->src_len, req->src);
263 263
264 req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
265 if (!req_ctx->out_buf) {
266 kfree(req_ctx->in_buf);
267 return -ENOMEM;
268 }
269
270 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
271 ctx->key_size, NULL);
272
273 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 264 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
274 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 265 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
275 pkcs1pad_encrypt_sign_complete_cb, req); 266 pkcs1pad_encrypt_sign_complete_cb, req);
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 39dbf2f7e5f5..64a412be255e 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -73,9 +73,9 @@ static int seqiv_aead_encrypt(struct aead_request *req)
73 info = req->iv; 73 info = req->iv;
74 74
75 if (req->src != req->dst) { 75 if (req->src != req->dst) {
76 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); 76 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
77 77
78 skcipher_request_set_tfm(nreq, ctx->sknull); 78 skcipher_request_set_sync_tfm(nreq, ctx->sknull);
79 skcipher_request_set_callback(nreq, req->base.flags, 79 skcipher_request_set_callback(nreq, req->base.flags,
80 NULL, NULL); 80 NULL, NULL);
81 skcipher_request_set_crypt(nreq, req->src, req->dst, 81 skcipher_request_set_crypt(nreq, req->src, req->dst,
diff --git a/crypto/shash.c b/crypto/shash.c
index 5d732c6bb4b2..d21f04d70dce 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -73,13 +73,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
73} 73}
74EXPORT_SYMBOL_GPL(crypto_shash_setkey); 74EXPORT_SYMBOL_GPL(crypto_shash_setkey);
75 75
76static inline unsigned int shash_align_buffer_size(unsigned len,
77 unsigned long mask)
78{
79 typedef u8 __aligned_largest u8_aligned;
80 return len + (mask & ~(__alignof__(u8_aligned) - 1));
81}
82
83static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, 76static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
84 unsigned int len) 77 unsigned int len)
85{ 78{
@@ -88,11 +81,17 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
88 unsigned long alignmask = crypto_shash_alignmask(tfm); 81 unsigned long alignmask = crypto_shash_alignmask(tfm);
89 unsigned int unaligned_len = alignmask + 1 - 82 unsigned int unaligned_len = alignmask + 1 -
90 ((unsigned long)data & alignmask); 83 ((unsigned long)data & alignmask);
91 u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)] 84 /*
92 __aligned_largest; 85 * We cannot count on __aligned() working for large values:
86 * https://patchwork.kernel.org/patch/9507697/
87 */
88 u8 ubuf[MAX_ALGAPI_ALIGNMASK * 2];
93 u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); 89 u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
94 int err; 90 int err;
95 91
92 if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf)))
93 return -EINVAL;
94
96 if (unaligned_len > len) 95 if (unaligned_len > len)
97 unaligned_len = len; 96 unaligned_len = len;
98 97
@@ -124,11 +123,17 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
124 unsigned long alignmask = crypto_shash_alignmask(tfm); 123 unsigned long alignmask = crypto_shash_alignmask(tfm);
125 struct shash_alg *shash = crypto_shash_alg(tfm); 124 struct shash_alg *shash = crypto_shash_alg(tfm);
126 unsigned int ds = crypto_shash_digestsize(tfm); 125 unsigned int ds = crypto_shash_digestsize(tfm);
127 u8 ubuf[shash_align_buffer_size(ds, alignmask)] 126 /*
128 __aligned_largest; 127 * We cannot count on __aligned() working for large values:
128 * https://patchwork.kernel.org/patch/9507697/
129 */
130 u8 ubuf[MAX_ALGAPI_ALIGNMASK + HASH_MAX_DIGESTSIZE];
129 u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); 131 u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
130 int err; 132 int err;
131 133
134 if (WARN_ON(buf + ds > ubuf + sizeof(ubuf)))
135 return -EINVAL;
136
132 err = shash->final(desc, buf); 137 err = shash->final(desc, buf);
133 if (err) 138 if (err)
134 goto out; 139 goto out;
@@ -458,9 +463,9 @@ static int shash_prepare_alg(struct shash_alg *alg)
458{ 463{
459 struct crypto_alg *base = &alg->base; 464 struct crypto_alg *base = &alg->base;
460 465
461 if (alg->digestsize > PAGE_SIZE / 8 || 466 if (alg->digestsize > HASH_MAX_DIGESTSIZE ||
462 alg->descsize > PAGE_SIZE / 8 || 467 alg->descsize > HASH_MAX_DESCSIZE ||
463 alg->statesize > PAGE_SIZE / 8) 468 alg->statesize > HASH_MAX_STATESIZE)
464 return -EINVAL; 469 return -EINVAL;
465 470
466 base->cra_type = &crypto_shash_type; 471 base->cra_type = &crypto_shash_type;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 0bd8c6caa498..4caab81d2d02 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -949,6 +949,30 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
949} 949}
950EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); 950EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
951 951
952struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
953 const char *alg_name, u32 type, u32 mask)
954{
955 struct crypto_skcipher *tfm;
956
957 /* Only sync algorithms allowed. */
958 mask |= CRYPTO_ALG_ASYNC;
959
960 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
961
962 /*
963 * Make sure we do not allocate something that might get used with
964 * an on-stack request: check the request size.
965 */
966 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
967 MAX_SYNC_SKCIPHER_REQSIZE)) {
968 crypto_free_skcipher(tfm);
969 return ERR_PTR(-EINVAL);
970 }
971
972 return (struct crypto_sync_skcipher *)tfm;
973}
974EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
975
952int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask) 976int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
953{ 977{
954 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2, 978 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
diff --git a/crypto/speck.c b/crypto/speck.c
deleted file mode 100644
index 58aa9f7f91f7..000000000000
--- a/crypto/speck.c
+++ /dev/null
@@ -1,307 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Speck: a lightweight block cipher
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Speck has 10 variants, including 5 block sizes. For now we only implement
8 * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
9 * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
10 * and a key size of K bits. The Speck128 variants are believed to be the most
11 * secure variants, and they use the same block size and key sizes as AES. The
12 * Speck64 variants are less secure, but on 32-bit processors are usually
13 * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
14 * secure and/or not as well suited for implementation on either 32-bit or
15 * 64-bit processors, so are omitted.
16 *
17 * Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
18 * https://eprint.iacr.org/2013/404.pdf
19 *
20 * In a correspondence, the Speck designers have also clarified that the words
21 * should be interpreted in little-endian format, and the words should be
22 * ordered such that the first word of each block is 'y' rather than 'x', and
23 * the first key word (rather than the last) becomes the first round key.
24 */
25
26#include <asm/unaligned.h>
27#include <crypto/speck.h>
28#include <linux/bitops.h>
29#include <linux/crypto.h>
30#include <linux/init.h>
31#include <linux/module.h>
32
33/* Speck128 */
34
35static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
36{
37 *x = ror64(*x, 8);
38 *x += *y;
39 *x ^= k;
40 *y = rol64(*y, 3);
41 *y ^= *x;
42}
43
44static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
45{
46 *y ^= *x;
47 *y = ror64(*y, 3);
48 *x ^= k;
49 *x -= *y;
50 *x = rol64(*x, 8);
51}
52
53void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
54 u8 *out, const u8 *in)
55{
56 u64 y = get_unaligned_le64(in);
57 u64 x = get_unaligned_le64(in + 8);
58 int i;
59
60 for (i = 0; i < ctx->nrounds; i++)
61 speck128_round(&x, &y, ctx->round_keys[i]);
62
63 put_unaligned_le64(y, out);
64 put_unaligned_le64(x, out + 8);
65}
66EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
67
68static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
69{
70 crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
71}
72
73void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
74 u8 *out, const u8 *in)
75{
76 u64 y = get_unaligned_le64(in);
77 u64 x = get_unaligned_le64(in + 8);
78 int i;
79
80 for (i = ctx->nrounds - 1; i >= 0; i--)
81 speck128_unround(&x, &y, ctx->round_keys[i]);
82
83 put_unaligned_le64(y, out);
84 put_unaligned_le64(x, out + 8);
85}
86EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
87
88static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
89{
90 crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
91}
92
93int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
94 unsigned int keylen)
95{
96 u64 l[3];
97 u64 k;
98 int i;
99
100 switch (keylen) {
101 case SPECK128_128_KEY_SIZE:
102 k = get_unaligned_le64(key);
103 l[0] = get_unaligned_le64(key + 8);
104 ctx->nrounds = SPECK128_128_NROUNDS;
105 for (i = 0; i < ctx->nrounds; i++) {
106 ctx->round_keys[i] = k;
107 speck128_round(&l[0], &k, i);
108 }
109 break;
110 case SPECK128_192_KEY_SIZE:
111 k = get_unaligned_le64(key);
112 l[0] = get_unaligned_le64(key + 8);
113 l[1] = get_unaligned_le64(key + 16);
114 ctx->nrounds = SPECK128_192_NROUNDS;
115 for (i = 0; i < ctx->nrounds; i++) {
116 ctx->round_keys[i] = k;
117 speck128_round(&l[i % 2], &k, i);
118 }
119 break;
120 case SPECK128_256_KEY_SIZE:
121 k = get_unaligned_le64(key);
122 l[0] = get_unaligned_le64(key + 8);
123 l[1] = get_unaligned_le64(key + 16);
124 l[2] = get_unaligned_le64(key + 24);
125 ctx->nrounds = SPECK128_256_NROUNDS;
126 for (i = 0; i < ctx->nrounds; i++) {
127 ctx->round_keys[i] = k;
128 speck128_round(&l[i % 3], &k, i);
129 }
130 break;
131 default:
132 return -EINVAL;
133 }
134
135 return 0;
136}
137EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
138
139static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
140 unsigned int keylen)
141{
142 return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
143}
144
145/* Speck64 */
146
147static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
148{
149 *x = ror32(*x, 8);
150 *x += *y;
151 *x ^= k;
152 *y = rol32(*y, 3);
153 *y ^= *x;
154}
155
156static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
157{
158 *y ^= *x;
159 *y = ror32(*y, 3);
160 *x ^= k;
161 *x -= *y;
162 *x = rol32(*x, 8);
163}
164
165void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
166 u8 *out, const u8 *in)
167{
168 u32 y = get_unaligned_le32(in);
169 u32 x = get_unaligned_le32(in + 4);
170 int i;
171
172 for (i = 0; i < ctx->nrounds; i++)
173 speck64_round(&x, &y, ctx->round_keys[i]);
174
175 put_unaligned_le32(y, out);
176 put_unaligned_le32(x, out + 4);
177}
178EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
179
180static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
181{
182 crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
183}
184
185void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
186 u8 *out, const u8 *in)
187{
188 u32 y = get_unaligned_le32(in);
189 u32 x = get_unaligned_le32(in + 4);
190 int i;
191
192 for (i = ctx->nrounds - 1; i >= 0; i--)
193 speck64_unround(&x, &y, ctx->round_keys[i]);
194
195 put_unaligned_le32(y, out);
196 put_unaligned_le32(x, out + 4);
197}
198EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
199
200static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
201{
202 crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
203}
204
205int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
206 unsigned int keylen)
207{
208 u32 l[3];
209 u32 k;
210 int i;
211
212 switch (keylen) {
213 case SPECK64_96_KEY_SIZE:
214 k = get_unaligned_le32(key);
215 l[0] = get_unaligned_le32(key + 4);
216 l[1] = get_unaligned_le32(key + 8);
217 ctx->nrounds = SPECK64_96_NROUNDS;
218 for (i = 0; i < ctx->nrounds; i++) {
219 ctx->round_keys[i] = k;
220 speck64_round(&l[i % 2], &k, i);
221 }
222 break;
223 case SPECK64_128_KEY_SIZE:
224 k = get_unaligned_le32(key);
225 l[0] = get_unaligned_le32(key + 4);
226 l[1] = get_unaligned_le32(key + 8);
227 l[2] = get_unaligned_le32(key + 12);
228 ctx->nrounds = SPECK64_128_NROUNDS;
229 for (i = 0; i < ctx->nrounds; i++) {
230 ctx->round_keys[i] = k;
231 speck64_round(&l[i % 3], &k, i);
232 }
233 break;
234 default:
235 return -EINVAL;
236 }
237
238 return 0;
239}
240EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
241
242static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
243 unsigned int keylen)
244{
245 return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
246}
247
248/* Algorithm definitions */
249
250static struct crypto_alg speck_algs[] = {
251 {
252 .cra_name = "speck128",
253 .cra_driver_name = "speck128-generic",
254 .cra_priority = 100,
255 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
256 .cra_blocksize = SPECK128_BLOCK_SIZE,
257 .cra_ctxsize = sizeof(struct speck128_tfm_ctx),
258 .cra_module = THIS_MODULE,
259 .cra_u = {
260 .cipher = {
261 .cia_min_keysize = SPECK128_128_KEY_SIZE,
262 .cia_max_keysize = SPECK128_256_KEY_SIZE,
263 .cia_setkey = speck128_setkey,
264 .cia_encrypt = speck128_encrypt,
265 .cia_decrypt = speck128_decrypt
266 }
267 }
268 }, {
269 .cra_name = "speck64",
270 .cra_driver_name = "speck64-generic",
271 .cra_priority = 100,
272 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
273 .cra_blocksize = SPECK64_BLOCK_SIZE,
274 .cra_ctxsize = sizeof(struct speck64_tfm_ctx),
275 .cra_module = THIS_MODULE,
276 .cra_u = {
277 .cipher = {
278 .cia_min_keysize = SPECK64_96_KEY_SIZE,
279 .cia_max_keysize = SPECK64_128_KEY_SIZE,
280 .cia_setkey = speck64_setkey,
281 .cia_encrypt = speck64_encrypt,
282 .cia_decrypt = speck64_decrypt
283 }
284 }
285 }
286};
287
288static int __init speck_module_init(void)
289{
290 return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
291}
292
293static void __exit speck_module_exit(void)
294{
295 crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
296}
297
298module_init(speck_module_init);
299module_exit(speck_module_exit);
300
301MODULE_DESCRIPTION("Speck block cipher (generic)");
302MODULE_LICENSE("GPL");
303MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
304MODULE_ALIAS_CRYPTO("speck128");
305MODULE_ALIAS_CRYPTO("speck128-generic");
306MODULE_ALIAS_CRYPTO("speck64");
307MODULE_ALIAS_CRYPTO("speck64-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index bdde95e8d369..c20c9f5c18f2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -76,8 +76,7 @@ static char *check[] = {
76 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", 76 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
77 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", 77 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
78 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", 78 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
79 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512", 79 "lzo", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", NULL
80 NULL
81}; 80};
82 81
83static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; 82static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
@@ -1103,6 +1102,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
1103 break; 1102 break;
1104 } 1103 }
1105 1104
1105 if (speed[i].klen)
1106 crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
1107
1106 pr_info("test%3u " 1108 pr_info("test%3u "
1107 "(%5u byte blocks,%5u bytes per update,%4u updates): ", 1109 "(%5u byte blocks,%5u bytes per update,%4u updates): ",
1108 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); 1110 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -1733,6 +1735,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
1733 ret += tcrypt_test("xts(aes)"); 1735 ret += tcrypt_test("xts(aes)");
1734 ret += tcrypt_test("ctr(aes)"); 1736 ret += tcrypt_test("ctr(aes)");
1735 ret += tcrypt_test("rfc3686(ctr(aes))"); 1737 ret += tcrypt_test("rfc3686(ctr(aes))");
1738 ret += tcrypt_test("ofb(aes)");
1736 break; 1739 break;
1737 1740
1738 case 11: 1741 case 11:
@@ -1878,10 +1881,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
1878 ret += tcrypt_test("ecb(seed)"); 1881 ret += tcrypt_test("ecb(seed)");
1879 break; 1882 break;
1880 1883
1881 case 44:
1882 ret += tcrypt_test("zlib");
1883 break;
1884
1885 case 45: 1884 case 45:
1886 ret += tcrypt_test("rfc4309(ccm(aes))"); 1885 ret += tcrypt_test("rfc4309(ccm(aes))");
1887 break; 1886 break;
@@ -2033,6 +2032,8 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
2033 break; 2032 break;
2034 case 191: 2033 case 191:
2035 ret += tcrypt_test("ecb(sm4)"); 2034 ret += tcrypt_test("ecb(sm4)");
2035 ret += tcrypt_test("cbc(sm4)");
2036 ret += tcrypt_test("ctr(sm4)");
2036 break; 2037 break;
2037 case 200: 2038 case 200:
2038 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, 2039 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
@@ -2282,6 +2283,20 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
2282 num_mb); 2283 num_mb);
2283 break; 2284 break;
2284 2285
2286 case 218:
2287 test_cipher_speed("ecb(sm4)", ENCRYPT, sec, NULL, 0,
2288 speed_template_16);
2289 test_cipher_speed("ecb(sm4)", DECRYPT, sec, NULL, 0,
2290 speed_template_16);
2291 test_cipher_speed("cbc(sm4)", ENCRYPT, sec, NULL, 0,
2292 speed_template_16);
2293 test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
2294 speed_template_16);
2295 test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
2296 speed_template_16);
2297 test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
2298 speed_template_16);
2299 break;
2285 case 300: 2300 case 300:
2286 if (alg) { 2301 if (alg) {
2287 test_hash_speed(alg, sec, generic_hash_speed_template); 2302 test_hash_speed(alg, sec, generic_hash_speed_template);
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index f0bfee1bb293..d09ea8b10b4f 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -51,6 +51,7 @@ static struct cipher_speed_template des3_speed_template[] = {
51 * Cipher speed tests 51 * Cipher speed tests
52 */ 52 */
53static u8 speed_template_8[] = {8, 0}; 53static u8 speed_template_8[] = {8, 0};
54static u8 speed_template_16[] = {16, 0};
54static u8 speed_template_24[] = {24, 0}; 55static u8 speed_template_24[] = {24, 0};
55static u8 speed_template_8_16[] = {8, 16, 0}; 56static u8 speed_template_8_16[] = {8, 16, 0};
56static u8 speed_template_8_32[] = {8, 32, 0}; 57static u8 speed_template_8_32[] = {8, 32, 0};
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index a1d42245082a..b1f79c6bf409 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1400,8 +1400,8 @@ static int test_comp(struct crypto_comp *tfm,
1400 int ilen; 1400 int ilen;
1401 unsigned int dlen = COMP_BUF_SIZE; 1401 unsigned int dlen = COMP_BUF_SIZE;
1402 1402
1403 memset(output, 0, sizeof(COMP_BUF_SIZE)); 1403 memset(output, 0, COMP_BUF_SIZE);
1404 memset(decomp_output, 0, sizeof(COMP_BUF_SIZE)); 1404 memset(decomp_output, 0, COMP_BUF_SIZE);
1405 1405
1406 ilen = ctemplate[i].inlen; 1406 ilen = ctemplate[i].inlen;
1407 ret = crypto_comp_compress(tfm, ctemplate[i].input, 1407 ret = crypto_comp_compress(tfm, ctemplate[i].input,
@@ -1445,7 +1445,7 @@ static int test_comp(struct crypto_comp *tfm,
1445 int ilen; 1445 int ilen;
1446 unsigned int dlen = COMP_BUF_SIZE; 1446 unsigned int dlen = COMP_BUF_SIZE;
1447 1447
1448 memset(decomp_output, 0, sizeof(COMP_BUF_SIZE)); 1448 memset(decomp_output, 0, COMP_BUF_SIZE);
1449 1449
1450 ilen = dtemplate[i].inlen; 1450 ilen = dtemplate[i].inlen;
1451 ret = crypto_comp_decompress(tfm, dtemplate[i].input, 1451 ret = crypto_comp_decompress(tfm, dtemplate[i].input,
@@ -2662,6 +2662,12 @@ static const struct alg_test_desc alg_test_descs[] = {
2662 .cipher = __VECS(serpent_cbc_tv_template) 2662 .cipher = __VECS(serpent_cbc_tv_template)
2663 }, 2663 },
2664 }, { 2664 }, {
2665 .alg = "cbc(sm4)",
2666 .test = alg_test_skcipher,
2667 .suite = {
2668 .cipher = __VECS(sm4_cbc_tv_template)
2669 }
2670 }, {
2665 .alg = "cbc(twofish)", 2671 .alg = "cbc(twofish)",
2666 .test = alg_test_skcipher, 2672 .test = alg_test_skcipher,
2667 .suite = { 2673 .suite = {
@@ -2785,6 +2791,12 @@ static const struct alg_test_desc alg_test_descs[] = {
2785 .cipher = __VECS(serpent_ctr_tv_template) 2791 .cipher = __VECS(serpent_ctr_tv_template)
2786 } 2792 }
2787 }, { 2793 }, {
2794 .alg = "ctr(sm4)",
2795 .test = alg_test_skcipher,
2796 .suite = {
2797 .cipher = __VECS(sm4_ctr_tv_template)
2798 }
2799 }, {
2788 .alg = "ctr(twofish)", 2800 .alg = "ctr(twofish)",
2789 .test = alg_test_skcipher, 2801 .test = alg_test_skcipher,
2790 .suite = { 2802 .suite = {
@@ -3038,18 +3050,6 @@ static const struct alg_test_desc alg_test_descs[] = {
3038 .cipher = __VECS(sm4_tv_template) 3050 .cipher = __VECS(sm4_tv_template)
3039 } 3051 }
3040 }, { 3052 }, {
3041 .alg = "ecb(speck128)",
3042 .test = alg_test_skcipher,
3043 .suite = {
3044 .cipher = __VECS(speck128_tv_template)
3045 }
3046 }, {
3047 .alg = "ecb(speck64)",
3048 .test = alg_test_skcipher,
3049 .suite = {
3050 .cipher = __VECS(speck64_tv_template)
3051 }
3052 }, {
3053 .alg = "ecb(tea)", 3053 .alg = "ecb(tea)",
3054 .test = alg_test_skcipher, 3054 .test = alg_test_skcipher,
3055 .suite = { 3055 .suite = {
@@ -3577,18 +3577,6 @@ static const struct alg_test_desc alg_test_descs[] = {
3577 .cipher = __VECS(serpent_xts_tv_template) 3577 .cipher = __VECS(serpent_xts_tv_template)
3578 } 3578 }
3579 }, { 3579 }, {
3580 .alg = "xts(speck128)",
3581 .test = alg_test_skcipher,
3582 .suite = {
3583 .cipher = __VECS(speck128_xts_tv_template)
3584 }
3585 }, {
3586 .alg = "xts(speck64)",
3587 .test = alg_test_skcipher,
3588 .suite = {
3589 .cipher = __VECS(speck64_xts_tv_template)
3590 }
3591 }, {
3592 .alg = "xts(twofish)", 3580 .alg = "xts(twofish)",
3593 .test = alg_test_skcipher, 3581 .test = alg_test_skcipher,
3594 .suite = { 3582 .suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 173111c70746..1fe7b97ba03f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -24,8 +24,6 @@
24#ifndef _CRYPTO_TESTMGR_H 24#ifndef _CRYPTO_TESTMGR_H
25#define _CRYPTO_TESTMGR_H 25#define _CRYPTO_TESTMGR_H
26 26
27#include <linux/netlink.h>
28
29#define MAX_DIGEST_SIZE 64 27#define MAX_DIGEST_SIZE 64
30#define MAX_TAP 8 28#define MAX_TAP 8
31 29
@@ -10133,12 +10131,13 @@ static const struct cipher_testvec serpent_xts_tv_template[] = {
10133}; 10131};
10134 10132
10135/* 10133/*
10136 * SM4 test vector taken from the draft RFC 10134 * SM4 test vectors taken from the "The SM4 Blockcipher Algorithm And Its
10137 * https://tools.ietf.org/html/draft-crypto-sm4-00#ref-GBT.32907-2016 10135 * Modes Of Operations" draft RFC
10136 * https://datatracker.ietf.org/doc/draft-ribose-cfrg-sm4
10138 */ 10137 */
10139 10138
10140static const struct cipher_testvec sm4_tv_template[] = { 10139static const struct cipher_testvec sm4_tv_template[] = {
10141 { /* SM4 Appendix A: Example Calculations. Example 1. */ 10140 { /* GB/T 32907-2016 Example 1. */
10142 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" 10141 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
10143 "\xFE\xDC\xBA\x98\x76\x54\x32\x10", 10142 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
10144 .klen = 16, 10143 .klen = 16,
@@ -10147,10 +10146,7 @@ static const struct cipher_testvec sm4_tv_template[] = {
10147 .ctext = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E" 10146 .ctext = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E"
10148 "\x86\xB3\xE9\x4F\x53\x6E\x42\x46", 10147 "\x86\xB3\xE9\x4F\x53\x6E\x42\x46",
10149 .len = 16, 10148 .len = 16,
10150 }, { /* 10149 }, { /* Last 10 iterations of GB/T 32907-2016 Example 2. */
10151 * SM4 Appendix A: Example Calculations.
10152 * Last 10 iterations of Example 2.
10153 */
10154 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" 10150 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
10155 "\xFE\xDC\xBA\x98\x76\x54\x32\x10", 10151 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
10156 .klen = 16, 10152 .klen = 16,
@@ -10195,744 +10191,116 @@ static const struct cipher_testvec sm4_tv_template[] = {
10195 "\x59\x52\x98\xc7\xc6\xfd\x27\x1f" 10191 "\x59\x52\x98\xc7\xc6\xfd\x27\x1f"
10196 "\x4\x2\xf8\x4\xc3\x3d\x3f\x66", 10192 "\x4\x2\xf8\x4\xc3\x3d\x3f\x66",
10197 .len = 160 10193 .len = 160
10194 }, { /* A.2.1.1 SM4-ECB Example 1 */
10195 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
10196 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
10197 .klen = 16,
10198 .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
10199 "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
10200 "\xee\xee\xee\xee\xff\xff\xff\xff"
10201 "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
10202 .ctext = "\x5e\xc8\x14\x3d\xe5\x09\xcf\xf7"
10203 "\xb5\x17\x9f\x8f\x47\x4b\x86\x19"
10204 "\x2f\x1d\x30\x5a\x7f\xb1\x7d\xf9"
10205 "\x85\xf8\x1c\x84\x82\x19\x23\x04",
10206 .len = 32,
10207 }, { /* A.2.1.2 SM4-ECB Example 2 */
10208 .key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
10209 "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
10210 .klen = 16,
10211 .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
10212 "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
10213 "\xee\xee\xee\xee\xff\xff\xff\xff"
10214 "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
10215 .ctext = "\xC5\x87\x68\x97\xE4\xA5\x9B\xBB"
10216 "\xA7\x2A\x10\xC8\x38\x72\x24\x5B"
10217 "\x12\xDD\x90\xBC\x2D\x20\x06\x92"
10218 "\xB5\x29\xA4\x15\x5A\xC9\xE6\x00",
10219 .len = 32,
10198 } 10220 }
10199}; 10221};
10200 10222
10201/* 10223static const struct cipher_testvec sm4_cbc_tv_template[] = {
10202 * Speck test vectors taken from the original paper: 10224 { /* A.2.2.1 SM4-CBC Example 1 */
10203 * "The Simon and Speck Families of Lightweight Block Ciphers" 10225 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
10204 * https://eprint.iacr.org/2013/404.pdf 10226 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
10205 *
10206 * Note that the paper does not make byte and word order clear. But it was
10207 * confirmed with the authors that the intended orders are little endian byte
10208 * order and (y, x) word order. Equivalently, the printed test vectors, when
10209 * looking at only the bytes (ignoring the whitespace that divides them into
10210 * words), are backwards: the left-most byte is actually the one with the
10211 * highest memory address, while the right-most byte is actually the one with
10212 * the lowest memory address.
10213 */
10214
10215static const struct cipher_testvec speck128_tv_template[] = {
10216 { /* Speck128/128 */
10217 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
10218 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
10219 .klen = 16, 10227 .klen = 16,
10220 .ptext = "\x20\x6d\x61\x64\x65\x20\x69\x74" 10228 .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
10221 "\x20\x65\x71\x75\x69\x76\x61\x6c", 10229 "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
10222 .ctext = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78" 10230 "\xee\xee\xee\xee\xff\xff\xff\xff"
10223 "\x65\x32\x78\x79\x51\x98\x5d\xa6", 10231 "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
10224 .len = 16, 10232 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
10225 }, { /* Speck128/192 */ 10233 "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
10226 .key = "\x00\x01\x02\x03\x04\x05\x06\x07" 10234 .ctext = "\x78\xEB\xB1\x1C\xC4\x0B\x0A\x48"
10227 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" 10235 "\x31\x2A\xAE\xB2\x04\x02\x44\xCB"
10228 "\x10\x11\x12\x13\x14\x15\x16\x17", 10236 "\x4C\xB7\x01\x69\x51\x90\x92\x26"
10229 .klen = 24, 10237 "\x97\x9B\x0D\x15\xDC\x6A\x8F\x6D",
10230 .ptext = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
10231 "\x68\x69\x65\x66\x20\x48\x61\x72",
10232 .ctext = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
10233 "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
10234 .len = 16,
10235 }, { /* Speck128/256 */
10236 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
10237 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10238 "\x10\x11\x12\x13\x14\x15\x16\x17"
10239 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
10240 .klen = 32,
10241 .ptext = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
10242 "\x49\x6e\x20\x74\x68\x6f\x73\x65",
10243 .ctext = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
10244 "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
10245 .len = 16,
10246 },
10247};
10248
10249/*
10250 * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the
10251 * ciphertext recomputed with Speck128 as the cipher
10252 */
10253static const struct cipher_testvec speck128_xts_tv_template[] = {
10254 {
10255 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
10256 "\x00\x00\x00\x00\x00\x00\x00\x00"
10257 "\x00\x00\x00\x00\x00\x00\x00\x00"
10258 "\x00\x00\x00\x00\x00\x00\x00\x00",
10259 .klen = 32,
10260 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
10261 "\x00\x00\x00\x00\x00\x00\x00\x00",
10262 .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
10263 "\x00\x00\x00\x00\x00\x00\x00\x00"
10264 "\x00\x00\x00\x00\x00\x00\x00\x00"
10265 "\x00\x00\x00\x00\x00\x00\x00\x00",
10266 .ctext = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
10267 "\x3b\x99\x4a\x64\x74\x77\xac\xed"
10268 "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
10269 "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
10270 .len = 32,
10271 }, {
10272 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
10273 "\x11\x11\x11\x11\x11\x11\x11\x11"
10274 "\x22\x22\x22\x22\x22\x22\x22\x22"
10275 "\x22\x22\x22\x22\x22\x22\x22\x22",
10276 .klen = 32,
10277 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
10278 "\x00\x00\x00\x00\x00\x00\x00\x00",
10279 .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
10280 "\x44\x44\x44\x44\x44\x44\x44\x44"
10281 "\x44\x44\x44\x44\x44\x44\x44\x44"
10282 "\x44\x44\x44\x44\x44\x44\x44\x44",
10283 .ctext = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
10284 "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
10285 "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
10286 "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
10287 .len = 32, 10238 .len = 32,
10288 }, { 10239 }, { /* A.2.2.2 SM4-CBC Example 2 */
10289 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" 10240 .key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
10290 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" 10241 "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
10291 "\x22\x22\x22\x22\x22\x22\x22\x22" 10242 .klen = 16,
10292 "\x22\x22\x22\x22\x22\x22\x22\x22", 10243 .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
10293 .klen = 32, 10244 "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
10294 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" 10245 "\xee\xee\xee\xee\xff\xff\xff\xff"
10295 "\x00\x00\x00\x00\x00\x00\x00\x00", 10246 "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
10296 .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" 10247 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
10297 "\x44\x44\x44\x44\x44\x44\x44\x44" 10248 "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
10298 "\x44\x44\x44\x44\x44\x44\x44\x44" 10249 .ctext = "\x0d\x3a\x6d\xdc\x2d\x21\xc6\x98"
10299 "\x44\x44\x44\x44\x44\x44\x44\x44", 10250 "\x85\x72\x15\x58\x7b\x7b\xb5\x9a"
10300 .ctext = "\x21\x52\x84\x15\xd1\xf7\x21\x55" 10251 "\x91\xf2\xc1\x47\x91\x1a\x41\x44"
10301 "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d" 10252 "\x66\x5e\x1f\xa1\xd4\x0b\xae\x38",
10302 "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
10303 "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
10304 .len = 32, 10253 .len = 32,
10305 }, {
10306 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
10307 "\x23\x53\x60\x28\x74\x71\x35\x26"
10308 "\x31\x41\x59\x26\x53\x58\x97\x93"
10309 "\x23\x84\x62\x64\x33\x83\x27\x95",
10310 .klen = 32,
10311 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
10312 "\x00\x00\x00\x00\x00\x00\x00\x00",
10313 .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
10314 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10315 "\x10\x11\x12\x13\x14\x15\x16\x17"
10316 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10317 "\x20\x21\x22\x23\x24\x25\x26\x27"
10318 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10319 "\x30\x31\x32\x33\x34\x35\x36\x37"
10320 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10321 "\x40\x41\x42\x43\x44\x45\x46\x47"
10322 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10323 "\x50\x51\x52\x53\x54\x55\x56\x57"
10324 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10325 "\x60\x61\x62\x63\x64\x65\x66\x67"
10326 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10327 "\x70\x71\x72\x73\x74\x75\x76\x77"
10328 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10329 "\x80\x81\x82\x83\x84\x85\x86\x87"
10330 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10331 "\x90\x91\x92\x93\x94\x95\x96\x97"
10332 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10333 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10334 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10335 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10336 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10337 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10338 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10339 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10340 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10341 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10342 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10343 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10344 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
10345 "\x00\x01\x02\x03\x04\x05\x06\x07"
10346 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10347 "\x10\x11\x12\x13\x14\x15\x16\x17"
10348 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10349 "\x20\x21\x22\x23\x24\x25\x26\x27"
10350 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10351 "\x30\x31\x32\x33\x34\x35\x36\x37"
10352 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10353 "\x40\x41\x42\x43\x44\x45\x46\x47"
10354 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10355 "\x50\x51\x52\x53\x54\x55\x56\x57"
10356 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10357 "\x60\x61\x62\x63\x64\x65\x66\x67"
10358 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10359 "\x70\x71\x72\x73\x74\x75\x76\x77"
10360 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10361 "\x80\x81\x82\x83\x84\x85\x86\x87"
10362 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10363 "\x90\x91\x92\x93\x94\x95\x96\x97"
10364 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10365 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10366 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10367 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10368 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10369 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10370 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10371 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10372 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10373 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10374 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10375 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10376 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
10377 .ctext = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
10378 "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
10379 "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
10380 "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
10381 "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
10382 "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
10383 "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
10384 "\x19\xc5\x58\x84\x63\xb9\x12\x68"
10385 "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
10386 "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
10387 "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
10388 "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
10389 "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
10390 "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
10391 "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
10392 "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
10393 "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
10394 "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
10395 "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
10396 "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
10397 "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
10398 "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
10399 "\xa5\x20\xac\x24\x1c\x73\x59\x73"
10400 "\x58\x61\x3a\x87\x58\xb3\x20\x56"
10401 "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
10402 "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
10403 "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
10404 "\x09\x35\x71\x50\x65\xac\x92\xe3"
10405 "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
10406 "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
10407 "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
10408 "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
10409 "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
10410 "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
10411 "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
10412 "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
10413 "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
10414 "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
10415 "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
10416 "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
10417 "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
10418 "\x87\x30\xac\xd5\xea\x73\x49\x10"
10419 "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
10420 "\x66\x02\x35\x3d\x60\x06\x36\x4f"
10421 "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
10422 "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
10423 "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
10424 "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
10425 "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
10426 "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
10427 "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
10428 "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
10429 "\x51\x66\x02\x69\x04\x97\x36\xd4"
10430 "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
10431 "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
10432 "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
10433 "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
10434 "\x03\xe7\x05\x39\xf5\x05\x26\xee"
10435 "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
10436 "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
10437 "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
10438 "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
10439 "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
10440 "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
10441 .len = 512,
10442 }, {
10443 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
10444 "\x23\x53\x60\x28\x74\x71\x35\x26"
10445 "\x62\x49\x77\x57\x24\x70\x93\x69"
10446 "\x99\x59\x57\x49\x66\x96\x76\x27"
10447 "\x31\x41\x59\x26\x53\x58\x97\x93"
10448 "\x23\x84\x62\x64\x33\x83\x27\x95"
10449 "\x02\x88\x41\x97\x16\x93\x99\x37"
10450 "\x51\x05\x82\x09\x74\x94\x45\x92",
10451 .klen = 64,
10452 .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
10453 "\x00\x00\x00\x00\x00\x00\x00\x00",
10454 .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
10455 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10456 "\x10\x11\x12\x13\x14\x15\x16\x17"
10457 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10458 "\x20\x21\x22\x23\x24\x25\x26\x27"
10459 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10460 "\x30\x31\x32\x33\x34\x35\x36\x37"
10461 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10462 "\x40\x41\x42\x43\x44\x45\x46\x47"
10463 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10464 "\x50\x51\x52\x53\x54\x55\x56\x57"
10465 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10466 "\x60\x61\x62\x63\x64\x65\x66\x67"
10467 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10468 "\x70\x71\x72\x73\x74\x75\x76\x77"
10469 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10470 "\x80\x81\x82\x83\x84\x85\x86\x87"
10471 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10472 "\x90\x91\x92\x93\x94\x95\x96\x97"
10473 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10474 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10475 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10476 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10477 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10478 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10479 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10480 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10481 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10482 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10483 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10484 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10485 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
10486 "\x00\x01\x02\x03\x04\x05\x06\x07"
10487 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10488 "\x10\x11\x12\x13\x14\x15\x16\x17"
10489 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10490 "\x20\x21\x22\x23\x24\x25\x26\x27"
10491 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10492 "\x30\x31\x32\x33\x34\x35\x36\x37"
10493 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10494 "\x40\x41\x42\x43\x44\x45\x46\x47"
10495 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10496 "\x50\x51\x52\x53\x54\x55\x56\x57"
10497 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10498 "\x60\x61\x62\x63\x64\x65\x66\x67"
10499 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10500 "\x70\x71\x72\x73\x74\x75\x76\x77"
10501 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10502 "\x80\x81\x82\x83\x84\x85\x86\x87"
10503 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10504 "\x90\x91\x92\x93\x94\x95\x96\x97"
10505 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10506 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10507 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10508 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10509 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10510 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10511 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10512 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10513 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10514 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10515 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10516 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10517 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
10518 .ctext = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
10519 "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
10520 "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
10521 "\x92\x99\xde\xd3\x76\xed\xcd\x63"
10522 "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
10523 "\x79\x36\x31\x19\x62\xae\x10\x7e"
10524 "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
10525 "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
10526 "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
10527 "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
10528 "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
10529 "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
10530 "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
10531 "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
10532 "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
10533 "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
10534 "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
10535 "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
10536 "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
10537 "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
10538 "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
10539 "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
10540 "\xac\xa5\xd0\x38\xef\x19\x56\x53"
10541 "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
10542 "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
10543 "\xed\x22\x34\x1c\x5d\xed\x17\x06"
10544 "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
10545 "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
10546 "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
10547 "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
10548 "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
10549 "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
10550 "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
10551 "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
10552 "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
10553 "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
10554 "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
10555 "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
10556 "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
10557 "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
10558 "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
10559 "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
10560 "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
10561 "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
10562 "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
10563 "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
10564 "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
10565 "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
10566 "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
10567 "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
10568 "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
10569 "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
10570 "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
10571 "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
10572 "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
10573 "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
10574 "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
10575 "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
10576 "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
10577 "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
10578 "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
10579 "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
10580 "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
10581 "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
10582 .len = 512,
10583 .also_non_np = 1,
10584 .np = 3,
10585 .tap = { 512 - 20, 4, 16 },
10586 } 10254 }
10587}; 10255};
10588 10256
10589static const struct cipher_testvec speck64_tv_template[] = { 10257static const struct cipher_testvec sm4_ctr_tv_template[] = {
10590 { /* Speck64/96 */ 10258 { /* A.2.5.1 SM4-CTR Example 1 */
10591 .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" 10259 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
10592 "\x10\x11\x12\x13", 10260 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
10593 .klen = 12,
10594 .ptext = "\x65\x61\x6e\x73\x20\x46\x61\x74",
10595 .ctext = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
10596 .len = 8,
10597 }, { /* Speck64/128 */
10598 .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
10599 "\x10\x11\x12\x13\x18\x19\x1a\x1b",
10600 .klen = 16, 10261 .klen = 16,
10601 .ptext = "\x2d\x43\x75\x74\x74\x65\x72\x3b", 10262 .ptext = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
10602 .ctext = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c", 10263 "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
10603 .len = 8, 10264 "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
10604 }, 10265 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
10605}; 10266 "\xee\xee\xee\xee\xee\xee\xee\xee"
10606 10267 "\xff\xff\xff\xff\xff\xff\xff\xff"
10607/* 10268 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
10608 * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the 10269 "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb",
10609 * ciphertext recomputed with Speck64 as the cipher, and key lengths adjusted 10270 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
10610 */ 10271 "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
10611static const struct cipher_testvec speck64_xts_tv_template[] = { 10272 .ctext = "\xac\x32\x36\xcb\x97\x0c\xc2\x07"
10612 { 10273 "\x91\x36\x4c\x39\x5a\x13\x42\xd1"
10613 .key = "\x00\x00\x00\x00\x00\x00\x00\x00" 10274 "\xa3\xcb\xc1\x87\x8c\x6f\x30\xcd"
10614 "\x00\x00\x00\x00\x00\x00\x00\x00" 10275 "\x07\x4c\xce\x38\x5c\xdd\x70\xc7"
10615 "\x00\x00\x00\x00\x00\x00\x00\x00", 10276 "\xf2\x34\xbc\x0e\x24\xc1\x19\x80"
10616 .klen = 24, 10277 "\xfd\x12\x86\x31\x0c\xe3\x7b\x92"
10617 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" 10278 "\x6e\x02\xfc\xd0\xfa\xa0\xba\xf3"
10618 "\x00\x00\x00\x00\x00\x00\x00\x00", 10279 "\x8b\x29\x33\x85\x1d\x82\x45\x14",
10619 .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" 10280 .len = 64,
10620 "\x00\x00\x00\x00\x00\x00\x00\x00" 10281 }, { /* A.2.5.2 SM4-CTR Example 2 */
10621 "\x00\x00\x00\x00\x00\x00\x00\x00" 10282 .key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
10622 "\x00\x00\x00\x00\x00\x00\x00\x00", 10283 "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
10623 .ctext = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6" 10284 .klen = 16,
10624 "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2" 10285 .ptext = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
10625 "\x80\xf5\x72\xe7\xcd\xf0\x99\x22" 10286 "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
10626 "\x35\xa7\x2f\x06\xef\xdc\x51\xaa", 10287 "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
10627 .len = 32, 10288 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
10628 }, { 10289 "\xee\xee\xee\xee\xee\xee\xee\xee"
10629 .key = "\x11\x11\x11\x11\x11\x11\x11\x11" 10290 "\xff\xff\xff\xff\xff\xff\xff\xff"
10630 "\x11\x11\x11\x11\x11\x11\x11\x11" 10291 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
10631 "\x22\x22\x22\x22\x22\x22\x22\x22", 10292 "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb",
10632 .klen = 24, 10293 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
10633 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" 10294 "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
10634 "\x00\x00\x00\x00\x00\x00\x00\x00", 10295 .ctext = "\x5d\xcc\xcd\x25\xb9\x5a\xb0\x74"
10635 .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44" 10296 "\x17\xa0\x85\x12\xee\x16\x0e\x2f"
10636 "\x44\x44\x44\x44\x44\x44\x44\x44" 10297 "\x8f\x66\x15\x21\xcb\xba\xb4\x4c"
10637 "\x44\x44\x44\x44\x44\x44\x44\x44" 10298 "\xc8\x71\x38\x44\x5b\xc2\x9e\x5c"
10638 "\x44\x44\x44\x44\x44\x44\x44\x44", 10299 "\x0a\xe0\x29\x72\x05\xd6\x27\x04"
10639 .ctext = "\x12\x56\x73\xcd\x15\x87\xa8\x59" 10300 "\x17\x3b\x21\x23\x9b\x88\x7f\x6c"
10640 "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f" 10301 "\x8c\xb5\xb8\x00\x91\x7a\x24\x88"
10641 "\xb3\x12\x69\x7e\x36\xeb\x52\xff" 10302 "\x28\x4b\xde\x9e\x16\xea\x29\x06",
10642 "\x62\xdd\xba\x90\xb3\xe1\xee\x99", 10303 .len = 64,
10643 .len = 32,
10644 }, {
10645 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
10646 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
10647 "\x22\x22\x22\x22\x22\x22\x22\x22",
10648 .klen = 24,
10649 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
10650 "\x00\x00\x00\x00\x00\x00\x00\x00",
10651 .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
10652 "\x44\x44\x44\x44\x44\x44\x44\x44"
10653 "\x44\x44\x44\x44\x44\x44\x44\x44"
10654 "\x44\x44\x44\x44\x44\x44\x44\x44",
10655 .ctext = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
10656 "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
10657 "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
10658 "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
10659 .len = 32,
10660 }, {
10661 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
10662 "\x23\x53\x60\x28\x74\x71\x35\x26"
10663 "\x31\x41\x59\x26\x53\x58\x97\x93",
10664 .klen = 24,
10665 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
10666 "\x00\x00\x00\x00\x00\x00\x00\x00",
10667 .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
10668 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10669 "\x10\x11\x12\x13\x14\x15\x16\x17"
10670 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10671 "\x20\x21\x22\x23\x24\x25\x26\x27"
10672 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10673 "\x30\x31\x32\x33\x34\x35\x36\x37"
10674 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10675 "\x40\x41\x42\x43\x44\x45\x46\x47"
10676 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10677 "\x50\x51\x52\x53\x54\x55\x56\x57"
10678 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10679 "\x60\x61\x62\x63\x64\x65\x66\x67"
10680 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10681 "\x70\x71\x72\x73\x74\x75\x76\x77"
10682 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10683 "\x80\x81\x82\x83\x84\x85\x86\x87"
10684 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10685 "\x90\x91\x92\x93\x94\x95\x96\x97"
10686 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10687 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10688 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10689 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10690 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10691 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10692 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10693 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10694 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10695 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10696 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10697 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10698 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
10699 "\x00\x01\x02\x03\x04\x05\x06\x07"
10700 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10701 "\x10\x11\x12\x13\x14\x15\x16\x17"
10702 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10703 "\x20\x21\x22\x23\x24\x25\x26\x27"
10704 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10705 "\x30\x31\x32\x33\x34\x35\x36\x37"
10706 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10707 "\x40\x41\x42\x43\x44\x45\x46\x47"
10708 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10709 "\x50\x51\x52\x53\x54\x55\x56\x57"
10710 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10711 "\x60\x61\x62\x63\x64\x65\x66\x67"
10712 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10713 "\x70\x71\x72\x73\x74\x75\x76\x77"
10714 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10715 "\x80\x81\x82\x83\x84\x85\x86\x87"
10716 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10717 "\x90\x91\x92\x93\x94\x95\x96\x97"
10718 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10719 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10720 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10721 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10722 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10723 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10724 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10725 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10726 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10727 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10728 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10729 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10730 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
10731 .ctext = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
10732 "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
10733 "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
10734 "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
10735 "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
10736 "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
10737 "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
10738 "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
10739 "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
10740 "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
10741 "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
10742 "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
10743 "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
10744 "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
10745 "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
10746 "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
10747 "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
10748 "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
10749 "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
10750 "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
10751 "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
10752 "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
10753 "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
10754 "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
10755 "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
10756 "\x07\xff\xf3\x72\x74\x48\xb5\x40"
10757 "\x50\xb5\xdd\x90\x43\x31\x18\x15"
10758 "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
10759 "\x29\x93\x90\x8b\xda\x07\xf0\x35"
10760 "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
10761 "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
10762 "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
10763 "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
10764 "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
10765 "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
10766 "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
10767 "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
10768 "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
10769 "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
10770 "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
10771 "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
10772 "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
10773 "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
10774 "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
10775 "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
10776 "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
10777 "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
10778 "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
10779 "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
10780 "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
10781 "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
10782 "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
10783 "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
10784 "\x59\xda\xee\x1a\x22\x18\xda\x0d"
10785 "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
10786 "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
10787 "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
10788 "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
10789 "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
10790 "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
10791 "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
10792 "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
10793 "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
10794 "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
10795 .len = 512,
10796 }, {
10797 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
10798 "\x23\x53\x60\x28\x74\x71\x35\x26"
10799 "\x62\x49\x77\x57\x24\x70\x93\x69"
10800 "\x99\x59\x57\x49\x66\x96\x76\x27",
10801 .klen = 32,
10802 .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
10803 "\x00\x00\x00\x00\x00\x00\x00\x00",
10804 .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
10805 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10806 "\x10\x11\x12\x13\x14\x15\x16\x17"
10807 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10808 "\x20\x21\x22\x23\x24\x25\x26\x27"
10809 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10810 "\x30\x31\x32\x33\x34\x35\x36\x37"
10811 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10812 "\x40\x41\x42\x43\x44\x45\x46\x47"
10813 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10814 "\x50\x51\x52\x53\x54\x55\x56\x57"
10815 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10816 "\x60\x61\x62\x63\x64\x65\x66\x67"
10817 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10818 "\x70\x71\x72\x73\x74\x75\x76\x77"
10819 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10820 "\x80\x81\x82\x83\x84\x85\x86\x87"
10821 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10822 "\x90\x91\x92\x93\x94\x95\x96\x97"
10823 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10824 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10825 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10826 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10827 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10828 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10829 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10830 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10831 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10832 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10833 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10834 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10835 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
10836 "\x00\x01\x02\x03\x04\x05\x06\x07"
10837 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10838 "\x10\x11\x12\x13\x14\x15\x16\x17"
10839 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10840 "\x20\x21\x22\x23\x24\x25\x26\x27"
10841 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10842 "\x30\x31\x32\x33\x34\x35\x36\x37"
10843 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10844 "\x40\x41\x42\x43\x44\x45\x46\x47"
10845 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10846 "\x50\x51\x52\x53\x54\x55\x56\x57"
10847 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10848 "\x60\x61\x62\x63\x64\x65\x66\x67"
10849 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10850 "\x70\x71\x72\x73\x74\x75\x76\x77"
10851 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10852 "\x80\x81\x82\x83\x84\x85\x86\x87"
10853 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10854 "\x90\x91\x92\x93\x94\x95\x96\x97"
10855 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10856 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10857 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10858 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10859 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10860 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10861 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10862 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10863 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10864 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10865 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10866 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10867 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
10868 .ctext = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
10869 "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
10870 "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
10871 "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
10872 "\x78\x16\xea\x80\xdb\x33\x75\x94"
10873 "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
10874 "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
10875 "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
10876 "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
10877 "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
10878 "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
10879 "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
10880 "\x84\x5e\x46\xed\x20\x89\xb6\x44"
10881 "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
10882 "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
10883 "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
10884 "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
10885 "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
10886 "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
10887 "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
10888 "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
10889 "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
10890 "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
10891 "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
10892 "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
10893 "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
10894 "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
10895 "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
10896 "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
10897 "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
10898 "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
10899 "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
10900 "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
10901 "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
10902 "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
10903 "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
10904 "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
10905 "\x52\x7f\x29\x51\x94\x20\x67\x3c"
10906 "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
10907 "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
10908 "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
10909 "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
10910 "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
10911 "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
10912 "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
10913 "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
10914 "\x65\x53\x0f\x41\x11\xbd\x98\x99"
10915 "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
10916 "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
10917 "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
10918 "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
10919 "\x63\x51\x34\x9d\x96\x12\xae\xce"
10920 "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
10921 "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
10922 "\x54\x27\x74\xbb\x10\x86\x57\x46"
10923 "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
10924 "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
10925 "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
10926 "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
10927 "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
10928 "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
10929 "\x9b\x63\x76\x32\x2f\x19\x72\x10"
10930 "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
10931 "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
10932 .len = 512,
10933 .also_non_np = 1,
10934 .np = 3,
10935 .tap = { 512 - 20, 4, 16 },
10936 } 10304 }
10937}; 10305};
10938 10306
@@ -13883,6 +13251,27 @@ static const struct cipher_testvec aes_lrw_tv_template[] = {
13883 .ctext = "\x5b\x90\x8e\xc1\xab\xdd\x67\x5f" 13251 .ctext = "\x5b\x90\x8e\xc1\xab\xdd\x67\x5f"
13884 "\x3d\x69\x8a\x95\x53\xc8\x9c\xe5", 13252 "\x3d\x69\x8a\x95\x53\xc8\x9c\xe5",
13885 .len = 16, 13253 .len = 16,
13254 }, { /* Test counter wrap-around, modified from LRW-32-AES 1 */
13255 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
13256 "\x4c\x26\x84\x14\xb5\x68\x01\x85"
13257 "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
13258 "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
13259 .klen = 32,
13260 .iv = "\xff\xff\xff\xff\xff\xff\xff\xff"
13261 "\xff\xff\xff\xff\xff\xff\xff\xff",
13262 .ptext = "\x30\x31\x32\x33\x34\x35\x36\x37"
13263 "\x38\x39\x41\x42\x43\x44\x45\x46"
13264 "\x30\x31\x32\x33\x34\x35\x36\x37"
13265 "\x38\x39\x41\x42\x43\x44\x45\x46"
13266 "\x30\x31\x32\x33\x34\x35\x36\x37"
13267 "\x38\x39\x41\x42\x43\x44\x45\x46",
13268 .ctext = "\x47\x90\x50\xf6\xf4\x8d\x5c\x7f"
13269 "\x84\xc7\x83\x95\x2d\xa2\x02\xc0"
13270 "\xda\x7f\xa3\xc0\x88\x2a\x0a\x50"
13271 "\xfb\xc1\x78\x03\x39\xfe\x1d\xe5"
13272 "\xf1\xb2\x73\xcd\x65\xa3\xdf\x5f"
13273 "\xe9\x5d\x48\x92\x54\x63\x4e\xb8",
13274 .len = 48,
13886 }, { 13275 }, {
13887/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */ 13276/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
13888 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c" 13277 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 25c75af50d3f..c055f57fab11 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -57,15 +57,17 @@ struct xcbc_desc_ctx {
57 u8 ctx[]; 57 u8 ctx[];
58}; 58};
59 59
60#define XCBC_BLOCKSIZE 16
61
60static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, 62static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
61 const u8 *inkey, unsigned int keylen) 63 const u8 *inkey, unsigned int keylen)
62{ 64{
63 unsigned long alignmask = crypto_shash_alignmask(parent); 65 unsigned long alignmask = crypto_shash_alignmask(parent);
64 struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); 66 struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
65 int bs = crypto_shash_blocksize(parent);
66 u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); 67 u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
67 int err = 0; 68 int err = 0;
68 u8 key1[bs]; 69 u8 key1[XCBC_BLOCKSIZE];
70 int bs = sizeof(key1);
69 71
70 if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen))) 72 if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen)))
71 return err; 73 return err;
@@ -212,7 +214,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
212 return PTR_ERR(alg); 214 return PTR_ERR(alg);
213 215
214 switch(alg->cra_blocksize) { 216 switch(alg->cra_blocksize) {
215 case 16: 217 case XCBC_BLOCKSIZE:
216 break; 218 break;
217 default: 219 default:
218 goto out_put_alg; 220 goto out_put_alg;
diff --git a/crypto/xts.c b/crypto/xts.c
index ccf55fbb8bc2..847f54f76789 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -26,8 +26,6 @@
26#include <crypto/b128ops.h> 26#include <crypto/b128ops.h>
27#include <crypto/gf128mul.h> 27#include <crypto/gf128mul.h>
28 28
29#define XTS_BUFFER_SIZE 128u
30
31struct priv { 29struct priv {
32 struct crypto_skcipher *child; 30 struct crypto_skcipher *child;
33 struct crypto_cipher *tweak; 31 struct crypto_cipher *tweak;
@@ -39,19 +37,7 @@ struct xts_instance_ctx {
39}; 37};
40 38
41struct rctx { 39struct rctx {
42 le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
43
44 le128 t; 40 le128 t;
45
46 le128 *ext;
47
48 struct scatterlist srcbuf[2];
49 struct scatterlist dstbuf[2];
50 struct scatterlist *src;
51 struct scatterlist *dst;
52
53 unsigned int left;
54
55 struct skcipher_request subreq; 41 struct skcipher_request subreq;
56}; 42};
57 43
@@ -96,81 +82,27 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
96 return err; 82 return err;
97} 83}
98 84
99static int post_crypt(struct skcipher_request *req) 85/*
86 * We compute the tweak masks twice (both before and after the ECB encryption or
87 * decryption) to avoid having to allocate a temporary buffer and/or make
88 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
89 * just doing the gf128mul_x_ble() calls again.
90 */
91static int xor_tweak(struct skcipher_request *req, bool second_pass)
100{ 92{
101 struct rctx *rctx = skcipher_request_ctx(req); 93 struct rctx *rctx = skcipher_request_ctx(req);
102 le128 *buf = rctx->ext ?: rctx->buf; 94 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
103 struct skcipher_request *subreq;
104 const int bs = XTS_BLOCK_SIZE; 95 const int bs = XTS_BLOCK_SIZE;
105 struct skcipher_walk w; 96 struct skcipher_walk w;
106 struct scatterlist *sg; 97 le128 t = rctx->t;
107 unsigned offset;
108 int err; 98 int err;
109 99
110 subreq = &rctx->subreq; 100 if (second_pass) {
111 err = skcipher_walk_virt(&w, subreq, false); 101 req = &rctx->subreq;
112 102 /* set to our TFM to enforce correct alignment: */
113 while (w.nbytes) { 103 skcipher_request_set_tfm(req, tfm);
114 unsigned int avail = w.nbytes;
115 le128 *wdst;
116
117 wdst = w.dst.virt.addr;
118
119 do {
120 le128_xor(wdst, buf++, wdst);
121 wdst++;
122 } while ((avail -= bs) >= bs);
123
124 err = skcipher_walk_done(&w, avail);
125 } 104 }
126 105 err = skcipher_walk_virt(&w, req, false);
127 rctx->left -= subreq->cryptlen;
128
129 if (err || !rctx->left)
130 goto out;
131
132 rctx->dst = rctx->dstbuf;
133
134 scatterwalk_done(&w.out, 0, 1);
135 sg = w.out.sg;
136 offset = w.out.offset;
137
138 if (rctx->dst != sg) {
139 rctx->dst[0] = *sg;
140 sg_unmark_end(rctx->dst);
141 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
142 }
143 rctx->dst[0].length -= offset - sg->offset;
144 rctx->dst[0].offset = offset;
145
146out:
147 return err;
148}
149
150static int pre_crypt(struct skcipher_request *req)
151{
152 struct rctx *rctx = skcipher_request_ctx(req);
153 le128 *buf = rctx->ext ?: rctx->buf;
154 struct skcipher_request *subreq;
155 const int bs = XTS_BLOCK_SIZE;
156 struct skcipher_walk w;
157 struct scatterlist *sg;
158 unsigned cryptlen;
159 unsigned offset;
160 bool more;
161 int err;
162
163 subreq = &rctx->subreq;
164 cryptlen = subreq->cryptlen;
165
166 more = rctx->left > cryptlen;
167 if (!more)
168 cryptlen = rctx->left;
169
170 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
171 cryptlen, NULL);
172
173 err = skcipher_walk_virt(&w, subreq, false);
174 106
175 while (w.nbytes) { 107 while (w.nbytes) {
176 unsigned int avail = w.nbytes; 108 unsigned int avail = w.nbytes;
@@ -181,180 +113,71 @@ static int pre_crypt(struct skcipher_request *req)
181 wdst = w.dst.virt.addr; 113 wdst = w.dst.virt.addr;
182 114
183 do { 115 do {
184 *buf++ = rctx->t; 116 le128_xor(wdst++, &t, wsrc++);
185 le128_xor(wdst++, &rctx->t, wsrc++); 117 gf128mul_x_ble(&t, &t);
186 gf128mul_x_ble(&rctx->t, &rctx->t);
187 } while ((avail -= bs) >= bs); 118 } while ((avail -= bs) >= bs);
188 119
189 err = skcipher_walk_done(&w, avail); 120 err = skcipher_walk_done(&w, avail);
190 } 121 }
191 122
192 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
193 cryptlen, NULL);
194
195 if (err || !more)
196 goto out;
197
198 rctx->src = rctx->srcbuf;
199
200 scatterwalk_done(&w.in, 0, 1);
201 sg = w.in.sg;
202 offset = w.in.offset;
203
204 if (rctx->src != sg) {
205 rctx->src[0] = *sg;
206 sg_unmark_end(rctx->src);
207 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
208 }
209 rctx->src[0].length -= offset - sg->offset;
210 rctx->src[0].offset = offset;
211
212out:
213 return err; 123 return err;
214} 124}
215 125
216static int init_crypt(struct skcipher_request *req, crypto_completion_t done) 126static int xor_tweak_pre(struct skcipher_request *req)
217{ 127{
218 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 128 return xor_tweak(req, false);
219 struct rctx *rctx = skcipher_request_ctx(req);
220 struct skcipher_request *subreq;
221 gfp_t gfp;
222
223 subreq = &rctx->subreq;
224 skcipher_request_set_tfm(subreq, ctx->child);
225 skcipher_request_set_callback(subreq, req->base.flags, done, req);
226
227 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
228 GFP_ATOMIC;
229 rctx->ext = NULL;
230
231 subreq->cryptlen = XTS_BUFFER_SIZE;
232 if (req->cryptlen > XTS_BUFFER_SIZE) {
233 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
234
235 rctx->ext = kmalloc(n, gfp);
236 if (rctx->ext)
237 subreq->cryptlen = n;
238 }
239
240 rctx->src = req->src;
241 rctx->dst = req->dst;
242 rctx->left = req->cryptlen;
243
244 /* calculate first value of T */
245 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
246
247 return 0;
248} 129}
249 130
250static void exit_crypt(struct skcipher_request *req) 131static int xor_tweak_post(struct skcipher_request *req)
251{ 132{
252 struct rctx *rctx = skcipher_request_ctx(req); 133 return xor_tweak(req, true);
253
254 rctx->left = 0;
255
256 if (rctx->ext)
257 kzfree(rctx->ext);
258} 134}
259 135
260static int do_encrypt(struct skcipher_request *req, int err) 136static void crypt_done(struct crypto_async_request *areq, int err)
261{
262 struct rctx *rctx = skcipher_request_ctx(req);
263 struct skcipher_request *subreq;
264
265 subreq = &rctx->subreq;
266
267 while (!err && rctx->left) {
268 err = pre_crypt(req) ?:
269 crypto_skcipher_encrypt(subreq) ?:
270 post_crypt(req);
271
272 if (err == -EINPROGRESS || err == -EBUSY)
273 return err;
274 }
275
276 exit_crypt(req);
277 return err;
278}
279
280static void encrypt_done(struct crypto_async_request *areq, int err)
281{ 137{
282 struct skcipher_request *req = areq->data; 138 struct skcipher_request *req = areq->data;
283 struct skcipher_request *subreq;
284 struct rctx *rctx;
285
286 rctx = skcipher_request_ctx(req);
287
288 if (err == -EINPROGRESS) {
289 if (rctx->left != req->cryptlen)
290 return;
291 goto out;
292 }
293
294 subreq = &rctx->subreq;
295 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
296 139
297 err = do_encrypt(req, err ?: post_crypt(req)); 140 if (!err)
298 if (rctx->left) 141 err = xor_tweak_post(req);
299 return;
300 142
301out:
302 skcipher_request_complete(req, err); 143 skcipher_request_complete(req, err);
303} 144}
304 145
305static int encrypt(struct skcipher_request *req) 146static void init_crypt(struct skcipher_request *req)
306{
307 return do_encrypt(req, init_crypt(req, encrypt_done));
308}
309
310static int do_decrypt(struct skcipher_request *req, int err)
311{ 147{
148 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
312 struct rctx *rctx = skcipher_request_ctx(req); 149 struct rctx *rctx = skcipher_request_ctx(req);
313 struct skcipher_request *subreq; 150 struct skcipher_request *subreq = &rctx->subreq;
314
315 subreq = &rctx->subreq;
316 151
317 while (!err && rctx->left) { 152 skcipher_request_set_tfm(subreq, ctx->child);
318 err = pre_crypt(req) ?: 153 skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
319 crypto_skcipher_decrypt(subreq) ?: 154 skcipher_request_set_crypt(subreq, req->dst, req->dst,
320 post_crypt(req); 155 req->cryptlen, NULL);
321
322 if (err == -EINPROGRESS || err == -EBUSY)
323 return err;
324 }
325 156
326 exit_crypt(req); 157 /* calculate first value of T */
327 return err; 158 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
328} 159}
329 160
330static void decrypt_done(struct crypto_async_request *areq, int err) 161static int encrypt(struct skcipher_request *req)
331{ 162{
332 struct skcipher_request *req = areq->data; 163 struct rctx *rctx = skcipher_request_ctx(req);
333 struct skcipher_request *subreq; 164 struct skcipher_request *subreq = &rctx->subreq;
334 struct rctx *rctx;
335
336 rctx = skcipher_request_ctx(req);
337
338 if (err == -EINPROGRESS) {
339 if (rctx->left != req->cryptlen)
340 return;
341 goto out;
342 }
343
344 subreq = &rctx->subreq;
345 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
346
347 err = do_decrypt(req, err ?: post_crypt(req));
348 if (rctx->left)
349 return;
350 165
351out: 166 init_crypt(req);
352 skcipher_request_complete(req, err); 167 return xor_tweak_pre(req) ?:
168 crypto_skcipher_encrypt(subreq) ?:
169 xor_tweak_post(req);
353} 170}
354 171
355static int decrypt(struct skcipher_request *req) 172static int decrypt(struct skcipher_request *req)
356{ 173{
357 return do_decrypt(req, init_crypt(req, decrypt_done)); 174 struct rctx *rctx = skcipher_request_ctx(req);
175 struct skcipher_request *subreq = &rctx->subreq;
176
177 init_crypt(req);
178 return xor_tweak_pre(req) ?:
179 crypto_skcipher_decrypt(subreq) ?:
180 xor_tweak_post(req);
358} 181}
359 182
360static int init_tfm(struct crypto_skcipher *tfm) 183static int init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 7033a4beda66..254ee7d54e91 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -45,7 +45,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
45 char cms[LO_NAME_SIZE]; /* cipher-mode string */ 45 char cms[LO_NAME_SIZE]; /* cipher-mode string */
46 char *mode; 46 char *mode;
47 char *cmsp = cms; /* c-m string pointer */ 47 char *cmsp = cms; /* c-m string pointer */
48 struct crypto_skcipher *tfm; 48 struct crypto_sync_skcipher *tfm;
49 49
50 /* encryption breaks for non sector aligned offsets */ 50 /* encryption breaks for non sector aligned offsets */
51 51
@@ -80,13 +80,13 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
80 *cmsp++ = ')'; 80 *cmsp++ = ')';
81 *cmsp = 0; 81 *cmsp = 0;
82 82
83 tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC); 83 tfm = crypto_alloc_sync_skcipher(cms, 0, 0);
84 if (IS_ERR(tfm)) 84 if (IS_ERR(tfm))
85 return PTR_ERR(tfm); 85 return PTR_ERR(tfm);
86 86
87 err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key, 87 err = crypto_sync_skcipher_setkey(tfm, info->lo_encrypt_key,
88 info->lo_encrypt_key_size); 88 info->lo_encrypt_key_size);
89 89
90 if (err != 0) 90 if (err != 0)
91 goto out_free_tfm; 91 goto out_free_tfm;
92 92
@@ -94,7 +94,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
94 return 0; 94 return 0;
95 95
96 out_free_tfm: 96 out_free_tfm:
97 crypto_free_skcipher(tfm); 97 crypto_free_sync_skcipher(tfm);
98 98
99 out: 99 out:
100 return err; 100 return err;
@@ -109,8 +109,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
109 struct page *loop_page, unsigned loop_off, 109 struct page *loop_page, unsigned loop_off,
110 int size, sector_t IV) 110 int size, sector_t IV)
111{ 111{
112 struct crypto_skcipher *tfm = lo->key_data; 112 struct crypto_sync_skcipher *tfm = lo->key_data;
113 SKCIPHER_REQUEST_ON_STACK(req, tfm); 113 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
114 struct scatterlist sg_out; 114 struct scatterlist sg_out;
115 struct scatterlist sg_in; 115 struct scatterlist sg_in;
116 116
@@ -119,7 +119,7 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
119 unsigned in_offs, out_offs; 119 unsigned in_offs, out_offs;
120 int err; 120 int err;
121 121
122 skcipher_request_set_tfm(req, tfm); 122 skcipher_request_set_sync_tfm(req, tfm);
123 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, 123 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
124 NULL, NULL); 124 NULL, NULL);
125 125
@@ -175,9 +175,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
175static int 175static int
176cryptoloop_release(struct loop_device *lo) 176cryptoloop_release(struct loop_device *lo)
177{ 177{
178 struct crypto_skcipher *tfm = lo->key_data; 178 struct crypto_sync_skcipher *tfm = lo->key_data;
179 if (tfm != NULL) { 179 if (tfm != NULL) {
180 crypto_free_skcipher(tfm); 180 crypto_free_sync_skcipher(tfm);
181 lo->key_data = NULL; 181 lo->key_data = NULL;
182 return 0; 182 return 0;
183 } 183 }
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 5d8266c6571f..4552b06fe601 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -188,6 +188,10 @@ struct device_type fsl_mc_bus_dprtc_type = {
188 .name = "fsl_mc_bus_dprtc" 188 .name = "fsl_mc_bus_dprtc"
189}; 189};
190 190
191struct device_type fsl_mc_bus_dpseci_type = {
192 .name = "fsl_mc_bus_dpseci"
193};
194
191static struct device_type *fsl_mc_get_device_type(const char *type) 195static struct device_type *fsl_mc_get_device_type(const char *type)
192{ 196{
193 static const struct { 197 static const struct {
@@ -203,6 +207,7 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
203 { &fsl_mc_bus_dpmcp_type, "dpmcp" }, 207 { &fsl_mc_bus_dpmcp_type, "dpmcp" },
204 { &fsl_mc_bus_dpmac_type, "dpmac" }, 208 { &fsl_mc_bus_dpmac_type, "dpmac" },
205 { &fsl_mc_bus_dprtc_type, "dprtc" }, 209 { &fsl_mc_bus_dprtc_type, "dprtc" },
210 { &fsl_mc_bus_dpseci_type, "dpseci" },
206 { NULL, NULL } 211 { NULL, NULL }
207 }; 212 };
208 int i; 213 int i;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index aaf9e5afaad4..95be7228f327 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -44,10 +44,10 @@ static unsigned short default_quality; /* = 0; default to "off" */
44 44
45module_param(current_quality, ushort, 0644); 45module_param(current_quality, ushort, 0644);
46MODULE_PARM_DESC(current_quality, 46MODULE_PARM_DESC(current_quality,
47 "current hwrng entropy estimation per mill"); 47 "current hwrng entropy estimation per 1024 bits of input");
48module_param(default_quality, ushort, 0644); 48module_param(default_quality, ushort, 0644);
49MODULE_PARM_DESC(default_quality, 49MODULE_PARM_DESC(default_quality,
50 "default entropy content of hwrng per mill"); 50 "default entropy content of hwrng per 1024 bits of input");
51 51
52static void drop_current_rng(void); 52static void drop_current_rng(void);
53static int hwrng_init(struct hwrng *rng); 53static int hwrng_init(struct hwrng *rng);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c75b6cdf0053..2eb70e76ed35 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -433,9 +433,9 @@ static int crng_init_cnt = 0;
433static unsigned long crng_global_init_time = 0; 433static unsigned long crng_global_init_time = 0;
434#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) 434#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
435static void _extract_crng(struct crng_state *crng, 435static void _extract_crng(struct crng_state *crng,
436 __u32 out[CHACHA20_BLOCK_WORDS]); 436 __u8 out[CHACHA20_BLOCK_SIZE]);
437static void _crng_backtrack_protect(struct crng_state *crng, 437static void _crng_backtrack_protect(struct crng_state *crng,
438 __u32 tmp[CHACHA20_BLOCK_WORDS], int used); 438 __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
439static void process_random_ready_list(void); 439static void process_random_ready_list(void);
440static void _get_random_bytes(void *buf, int nbytes); 440static void _get_random_bytes(void *buf, int nbytes);
441 441
@@ -926,7 +926,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
926 unsigned long flags; 926 unsigned long flags;
927 int i, num; 927 int i, num;
928 union { 928 union {
929 __u32 block[CHACHA20_BLOCK_WORDS]; 929 __u8 block[CHACHA20_BLOCK_SIZE];
930 __u32 key[8]; 930 __u32 key[8];
931 } buf; 931 } buf;
932 932
@@ -973,7 +973,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
973} 973}
974 974
975static void _extract_crng(struct crng_state *crng, 975static void _extract_crng(struct crng_state *crng,
976 __u32 out[CHACHA20_BLOCK_WORDS]) 976 __u8 out[CHACHA20_BLOCK_SIZE])
977{ 977{
978 unsigned long v, flags; 978 unsigned long v, flags;
979 979
@@ -990,7 +990,7 @@ static void _extract_crng(struct crng_state *crng,
990 spin_unlock_irqrestore(&crng->lock, flags); 990 spin_unlock_irqrestore(&crng->lock, flags);
991} 991}
992 992
993static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS]) 993static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
994{ 994{
995 struct crng_state *crng = NULL; 995 struct crng_state *crng = NULL;
996 996
@@ -1008,7 +1008,7 @@ static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
1008 * enough) to mutate the CRNG key to provide backtracking protection. 1008 * enough) to mutate the CRNG key to provide backtracking protection.
1009 */ 1009 */
1010static void _crng_backtrack_protect(struct crng_state *crng, 1010static void _crng_backtrack_protect(struct crng_state *crng,
1011 __u32 tmp[CHACHA20_BLOCK_WORDS], int used) 1011 __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
1012{ 1012{
1013 unsigned long flags; 1013 unsigned long flags;
1014 __u32 *s, *d; 1014 __u32 *s, *d;
@@ -1020,14 +1020,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
1020 used = 0; 1020 used = 0;
1021 } 1021 }
1022 spin_lock_irqsave(&crng->lock, flags); 1022 spin_lock_irqsave(&crng->lock, flags);
1023 s = &tmp[used / sizeof(__u32)]; 1023 s = (__u32 *) &tmp[used];
1024 d = &crng->state[4]; 1024 d = &crng->state[4];
1025 for (i=0; i < 8; i++) 1025 for (i=0; i < 8; i++)
1026 *d++ ^= *s++; 1026 *d++ ^= *s++;
1027 spin_unlock_irqrestore(&crng->lock, flags); 1027 spin_unlock_irqrestore(&crng->lock, flags);
1028} 1028}
1029 1029
1030static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used) 1030static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
1031{ 1031{
1032 struct crng_state *crng = NULL; 1032 struct crng_state *crng = NULL;
1033 1033
@@ -1043,7 +1043,7 @@ static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
1043static ssize_t extract_crng_user(void __user *buf, size_t nbytes) 1043static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1044{ 1044{
1045 ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE; 1045 ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
1046 __u32 tmp[CHACHA20_BLOCK_WORDS]; 1046 __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
1047 int large_request = (nbytes > 256); 1047 int large_request = (nbytes > 256);
1048 1048
1049 while (nbytes) { 1049 while (nbytes) {
@@ -1622,7 +1622,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
1622 */ 1622 */
1623static void _get_random_bytes(void *buf, int nbytes) 1623static void _get_random_bytes(void *buf, int nbytes)
1624{ 1624{
1625 __u32 tmp[CHACHA20_BLOCK_WORDS]; 1625 __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
1626 1626
1627 trace_get_random_bytes(nbytes, _RET_IP_); 1627 trace_get_random_bytes(nbytes, _RET_IP_);
1628 1628
@@ -2248,7 +2248,7 @@ u64 get_random_u64(void)
2248 if (use_lock) 2248 if (use_lock)
2249 read_lock_irqsave(&batched_entropy_reset_lock, flags); 2249 read_lock_irqsave(&batched_entropy_reset_lock, flags);
2250 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { 2250 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2251 extract_crng((__u32 *)batch->entropy_u64); 2251 extract_crng((u8 *)batch->entropy_u64);
2252 batch->position = 0; 2252 batch->position = 0;
2253 } 2253 }
2254 ret = batch->entropy_u64[batch->position++]; 2254 ret = batch->entropy_u64[batch->position++];
@@ -2278,7 +2278,7 @@ u32 get_random_u32(void)
2278 if (use_lock) 2278 if (use_lock)
2279 read_lock_irqsave(&batched_entropy_reset_lock, flags); 2279 read_lock_irqsave(&batched_entropy_reset_lock, flags);
2280 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { 2280 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2281 extract_crng(batch->entropy_u32); 2281 extract_crng((u8 *)batch->entropy_u32);
2282 batch->position = 0; 2282 batch->position = 0;
2283 } 2283 }
2284 ret = batch->entropy_u32[batch->position++]; 2284 ret = batch->entropy_u32[batch->position++];
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c23396f32c8a..8e7e225d2446 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
10obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/ 10obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
11obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/ 11obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
12obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o 12obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/ 13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
14obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 14obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
15obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 15obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
16obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o 16obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 801aeab5ab1e..2b7af44c7b85 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Cryptographic API. 3 * Cryptographic API.
3 * 4 *
@@ -6,10 +7,6 @@
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL 7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com> 8 * Author: Nicolas Royer <nicolas@eukrea.com>
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-aes.c driver. 10 * Some ideas are from omap-aes.c driver.
14 */ 11 */
15 12
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index 2a60d1224143..cbd37a2edada 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * API for Atmel Secure Protocol Layers Improved Performances (SPLIP) 3 * API for Atmel Secure Protocol Layers Improved Performances (SPLIP)
3 * 4 *
@@ -5,18 +6,6 @@
5 * 6 *
6 * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com> 7 * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale. 9 * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
21 */ 10 */
22 11
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 74f083f45e97..ba00e4563ca0 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Microchip / Atmel ECC (I2C) driver. 3 * Microchip / Atmel ECC (I2C) driver.
3 * 4 *
4 * Copyright (c) 2017, Microchip Technology Inc. 5 * Copyright (c) 2017, Microchip Technology Inc.
5 * Author: Tudor Ambarus <tudor.ambarus@microchip.com> 6 * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */ 7 */
17 8
18#include <linux/bitrev.h> 9#include <linux/bitrev.h>
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
index 25232c8abcc2..643a3b947338 100644
--- a/drivers/crypto/atmel-ecc.h
+++ b/drivers/crypto/atmel-ecc.h
@@ -1,19 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright (c) 2017, Microchip Technology Inc. 3 * Copyright (c) 2017, Microchip Technology Inc.
3 * Author: Tudor Ambarus <tudor.ambarus@microchip.com> 4 * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 */ 5 */
18 6
19#ifndef __ATMEL_ECC_H__ 7#ifndef __ATMEL_ECC_H__
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8a19df2fba6a..ab0cfe748931 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Cryptographic API. 3 * Cryptographic API.
3 * 4 *
@@ -6,10 +7,6 @@
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL 7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com> 8 * Author: Nicolas Royer <nicolas@eukrea.com>
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-sham.c drivers. 10 * Some ideas are from omap-sham.c drivers.
14 */ 11 */
15 12
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 97b0423efa7f..438e1ffb2ec0 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Cryptographic API. 3 * Cryptographic API.
3 * 4 *
@@ -6,10 +7,6 @@
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL 7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com> 8 * Author: Nicolas Royer <nicolas@eukrea.com>
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-aes.c drivers. 10 * Some ideas are from omap-aes.c drivers.
14 */ 11 */
15 12
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 7f07a5085e9b..f3442c2bdbdc 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -330,7 +330,7 @@ struct artpec6_cryptotfm_context {
330 size_t key_length; 330 size_t key_length;
331 u32 key_md; 331 u32 key_md;
332 int crypto_type; 332 int crypto_type;
333 struct crypto_skcipher *fallback; 333 struct crypto_sync_skcipher *fallback;
334}; 334};
335 335
336struct artpec6_crypto_aead_hw_ctx { 336struct artpec6_crypto_aead_hw_ctx {
@@ -1199,15 +1199,15 @@ artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
1199 pr_debug("counter %x will overflow (nblks %u), falling back\n", 1199 pr_debug("counter %x will overflow (nblks %u), falling back\n",
1200 counter, counter + nblks); 1200 counter, counter + nblks);
1201 1201
1202 ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key, 1202 ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
1203 ctx->key_length); 1203 ctx->key_length);
1204 if (ret) 1204 if (ret)
1205 return ret; 1205 return ret;
1206 1206
1207 { 1207 {
1208 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 1208 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
1209 1209
1210 skcipher_request_set_tfm(subreq, ctx->fallback); 1210 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
1211 skcipher_request_set_callback(subreq, req->base.flags, 1211 skcipher_request_set_callback(subreq, req->base.flags,
1212 NULL, NULL); 1212 NULL, NULL);
1213 skcipher_request_set_crypt(subreq, req->src, req->dst, 1213 skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -1561,10 +1561,9 @@ static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
1561{ 1561{
1562 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); 1562 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1563 1563
1564 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), 1564 ctx->fallback =
1565 0, 1565 crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
1566 CRYPTO_ALG_ASYNC | 1566 0, CRYPTO_ALG_NEED_FALLBACK);
1567 CRYPTO_ALG_NEED_FALLBACK);
1568 if (IS_ERR(ctx->fallback)) 1567 if (IS_ERR(ctx->fallback))
1569 return PTR_ERR(ctx->fallback); 1568 return PTR_ERR(ctx->fallback);
1570 1569
@@ -1605,7 +1604,7 @@ static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
1605{ 1604{
1606 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); 1605 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1607 1606
1608 crypto_free_skcipher(ctx->fallback); 1607 crypto_free_sync_skcipher(ctx->fallback);
1609 artpec6_crypto_aes_exit(tfm); 1608 artpec6_crypto_aes_exit(tfm);
1610} 1609}
1611 1610
@@ -3174,7 +3173,6 @@ static struct platform_driver artpec6_crypto_driver = {
3174 .remove = artpec6_crypto_remove, 3173 .remove = artpec6_crypto_remove,
3175 .driver = { 3174 .driver = {
3176 .name = "artpec6-crypto", 3175 .name = "artpec6-crypto",
3177 .owner = THIS_MODULE,
3178 .of_match_table = artpec6_crypto_of_match, 3176 .of_match_table = artpec6_crypto_of_match,
3179 }, 3177 },
3180}; 3178};
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 1eb852765469..c4b1cade55c1 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,7 +1,12 @@
1# SPDX-License-Identifier: GPL-2.0
2config CRYPTO_DEV_FSL_CAAM_COMMON
3 tristate
4
1config CRYPTO_DEV_FSL_CAAM 5config CRYPTO_DEV_FSL_CAAM
2 tristate "Freescale CAAM-Multicore driver backend" 6 tristate "Freescale CAAM-Multicore platform driver backend"
3 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE 7 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
4 select SOC_BUS 8 select SOC_BUS
9 select CRYPTO_DEV_FSL_CAAM_COMMON
5 help 10 help
6 Enables the driver module for Freescale's Cryptographic Accelerator 11 Enables the driver module for Freescale's Cryptographic Accelerator
7 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). 12 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -12,9 +17,16 @@ config CRYPTO_DEV_FSL_CAAM
12 To compile this driver as a module, choose M here: the module 17 To compile this driver as a module, choose M here: the module
13 will be called caam. 18 will be called caam.
14 19
20if CRYPTO_DEV_FSL_CAAM
21
22config CRYPTO_DEV_FSL_CAAM_DEBUG
23 bool "Enable debug output in CAAM driver"
24 help
25 Selecting this will enable printing of various debug
26 information in the CAAM driver.
27
15config CRYPTO_DEV_FSL_CAAM_JR 28config CRYPTO_DEV_FSL_CAAM_JR
16 tristate "Freescale CAAM Job Ring driver backend" 29 tristate "Freescale CAAM Job Ring driver backend"
17 depends on CRYPTO_DEV_FSL_CAAM
18 default y 30 default y
19 help 31 help
20 Enables the driver module for Job Rings which are part of 32 Enables the driver module for Job Rings which are part of
@@ -25,9 +37,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
25 To compile this driver as a module, choose M here: the module 37 To compile this driver as a module, choose M here: the module
26 will be called caam_jr. 38 will be called caam_jr.
27 39
40if CRYPTO_DEV_FSL_CAAM_JR
41
28config CRYPTO_DEV_FSL_CAAM_RINGSIZE 42config CRYPTO_DEV_FSL_CAAM_RINGSIZE
29 int "Job Ring size" 43 int "Job Ring size"
30 depends on CRYPTO_DEV_FSL_CAAM_JR
31 range 2 9 44 range 2 9
32 default "9" 45 default "9"
33 help 46 help
@@ -45,7 +58,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
45 58
46config CRYPTO_DEV_FSL_CAAM_INTC 59config CRYPTO_DEV_FSL_CAAM_INTC
47 bool "Job Ring interrupt coalescing" 60 bool "Job Ring interrupt coalescing"
48 depends on CRYPTO_DEV_FSL_CAAM_JR
49 help 61 help
50 Enable the Job Ring's interrupt coalescing feature. 62 Enable the Job Ring's interrupt coalescing feature.
51 63
@@ -75,7 +87,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
75 87
76config CRYPTO_DEV_FSL_CAAM_CRYPTO_API 88config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
77 tristate "Register algorithm implementations with the Crypto API" 89 tristate "Register algorithm implementations with the Crypto API"
78 depends on CRYPTO_DEV_FSL_CAAM_JR
79 default y 90 default y
80 select CRYPTO_AEAD 91 select CRYPTO_AEAD
81 select CRYPTO_AUTHENC 92 select CRYPTO_AUTHENC
@@ -90,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
90 101
91config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI 102config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
92 tristate "Queue Interface as Crypto API backend" 103 tristate "Queue Interface as Crypto API backend"
93 depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET 104 depends on FSL_DPAA && NET
94 default y 105 default y
95 select CRYPTO_AUTHENC 106 select CRYPTO_AUTHENC
96 select CRYPTO_BLKCIPHER 107 select CRYPTO_BLKCIPHER
@@ -107,7 +118,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
107 118
108config CRYPTO_DEV_FSL_CAAM_AHASH_API 119config CRYPTO_DEV_FSL_CAAM_AHASH_API
109 tristate "Register hash algorithm implementations with Crypto API" 120 tristate "Register hash algorithm implementations with Crypto API"
110 depends on CRYPTO_DEV_FSL_CAAM_JR
111 default y 121 default y
112 select CRYPTO_HASH 122 select CRYPTO_HASH
113 help 123 help
@@ -119,7 +129,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
119 129
120config CRYPTO_DEV_FSL_CAAM_PKC_API 130config CRYPTO_DEV_FSL_CAAM_PKC_API
121 tristate "Register public key cryptography implementations with Crypto API" 131 tristate "Register public key cryptography implementations with Crypto API"
122 depends on CRYPTO_DEV_FSL_CAAM_JR
123 default y 132 default y
124 select CRYPTO_RSA 133 select CRYPTO_RSA
125 help 134 help
@@ -131,7 +140,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
131 140
132config CRYPTO_DEV_FSL_CAAM_RNG_API 141config CRYPTO_DEV_FSL_CAAM_RNG_API
133 tristate "Register caam device for hwrng API" 142 tristate "Register caam device for hwrng API"
134 depends on CRYPTO_DEV_FSL_CAAM_JR
135 default y 143 default y
136 select CRYPTO_RNG 144 select CRYPTO_RNG
137 select HW_RANDOM 145 select HW_RANDOM
@@ -142,13 +150,32 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
142 To compile this as a module, choose M here: the module 150 To compile this as a module, choose M here: the module
143 will be called caamrng. 151 will be called caamrng.
144 152
145config CRYPTO_DEV_FSL_CAAM_DEBUG 153endif # CRYPTO_DEV_FSL_CAAM_JR
146 bool "Enable debug output in CAAM driver" 154
147 depends on CRYPTO_DEV_FSL_CAAM 155endif # CRYPTO_DEV_FSL_CAAM
156
157config CRYPTO_DEV_FSL_DPAA2_CAAM
158 tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
159 depends on FSL_MC_DPIO
160 depends on NETDEVICES
161 select CRYPTO_DEV_FSL_CAAM_COMMON
162 select CRYPTO_BLKCIPHER
163 select CRYPTO_AUTHENC
164 select CRYPTO_AEAD
165 select CRYPTO_HASH
148 help 166 help
149 Selecting this will enable printing of various debug 167 CAAM driver for QorIQ Data Path Acceleration Architecture 2.
150 information in the CAAM driver. 168 It handles DPSECI DPAA2 objects that sit on the Management Complex
169 (MC) fsl-mc bus.
170
171 To compile this as a module, choose M here: the module
172 will be called dpaa2_caam.
151 173
152config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC 174config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
153 def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \ 175 def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
154 CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) 176 CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
177 CRYPTO_DEV_FSL_DPAA2_CAAM)
178
179config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
180 def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
181 CRYPTO_DEV_FSL_DPAA2_CAAM)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index cb652ee7dfc8..7bbfd06a11ff 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
6 ccflags-y := -DDEBUG 6 ccflags-y := -DDEBUG
7endif 7endif
8 8
9ccflags-y += -DVERSION=\"\"
10
11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
9obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o 13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 14obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o 15obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o 16obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
14obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o 17obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
18obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
15obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o 19obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
16obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o 20obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
17 21
18caam-objs := ctrl.o 22caam-objs := ctrl.o
19caam_jr-objs := jr.o key_gen.o error.o 23caam_jr-objs := jr.o key_gen.o
20caam_pkc-y := caampkc.o pkc_desc.o 24caam_pkc-y := caampkc.o pkc_desc.o
21ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),) 25ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
22 ccflags-y += -DCONFIG_CAAM_QI 26 ccflags-y += -DCONFIG_CAAM_QI
23 caam-objs += qi.o 27 caam-objs += qi.o
24endif 28endif
29
30obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
31
32dpaa2_caam-y := caamalg_qi2.o dpseci.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ec40f991e6c6..869f092432de 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1,8 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * caam - Freescale FSL CAAM support for crypto API 3 * caam - Freescale FSL CAAM support for crypto API
3 * 4 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc. 5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright 2016 NXP 6 * Copyright 2016-2018 NXP
6 * 7 *
7 * Based on talitos crypto API driver. 8 * Based on talitos crypto API driver.
8 * 9 *
@@ -81,8 +82,6 @@
81#define debug(format, arg...) 82#define debug(format, arg...)
82#endif 83#endif
83 84
84static struct list_head alg_list;
85
86struct caam_alg_entry { 85struct caam_alg_entry {
87 int class1_alg_type; 86 int class1_alg_type;
88 int class2_alg_type; 87 int class2_alg_type;
@@ -96,17 +95,21 @@ struct caam_aead_alg {
96 bool registered; 95 bool registered;
97}; 96};
98 97
98struct caam_skcipher_alg {
99 struct skcipher_alg skcipher;
100 struct caam_alg_entry caam;
101 bool registered;
102};
103
99/* 104/*
100 * per-session context 105 * per-session context
101 */ 106 */
102struct caam_ctx { 107struct caam_ctx {
103 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 108 u32 sh_desc_enc[DESC_MAX_USED_LEN];
104 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 109 u32 sh_desc_dec[DESC_MAX_USED_LEN];
105 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
106 u8 key[CAAM_MAX_KEY_SIZE]; 110 u8 key[CAAM_MAX_KEY_SIZE];
107 dma_addr_t sh_desc_enc_dma; 111 dma_addr_t sh_desc_enc_dma;
108 dma_addr_t sh_desc_dec_dma; 112 dma_addr_t sh_desc_dec_dma;
109 dma_addr_t sh_desc_givenc_dma;
110 dma_addr_t key_dma; 113 dma_addr_t key_dma;
111 enum dma_data_direction dir; 114 enum dma_data_direction dir;
112 struct device *jrdev; 115 struct device *jrdev;
@@ -648,20 +651,20 @@ static int rfc4543_setkey(struct crypto_aead *aead,
648 return rfc4543_set_sh_desc(aead); 651 return rfc4543_set_sh_desc(aead);
649} 652}
650 653
651static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 654static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
652 const u8 *key, unsigned int keylen) 655 unsigned int keylen)
653{ 656{
654 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 657 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
655 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); 658 struct caam_skcipher_alg *alg =
656 const char *alg_name = crypto_tfm_alg_name(tfm); 659 container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
660 skcipher);
657 struct device *jrdev = ctx->jrdev; 661 struct device *jrdev = ctx->jrdev;
658 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 662 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
659 u32 *desc; 663 u32 *desc;
660 u32 ctx1_iv_off = 0; 664 u32 ctx1_iv_off = 0;
661 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 665 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
662 OP_ALG_AAI_CTR_MOD128); 666 OP_ALG_AAI_CTR_MOD128);
663 const bool is_rfc3686 = (ctr_mode && 667 const bool is_rfc3686 = alg->caam.rfc3686;
664 (strstr(alg_name, "rfc3686") != NULL));
665 668
666#ifdef DEBUG 669#ifdef DEBUG
667 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 670 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
@@ -689,40 +692,32 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
689 ctx->cdata.key_virt = key; 692 ctx->cdata.key_virt = key;
690 ctx->cdata.key_inline = true; 693 ctx->cdata.key_inline = true;
691 694
692 /* ablkcipher_encrypt shared descriptor */ 695 /* skcipher_encrypt shared descriptor */
693 desc = ctx->sh_desc_enc; 696 desc = ctx->sh_desc_enc;
694 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, 697 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
695 ctx1_iv_off); 698 ctx1_iv_off);
696 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 699 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
697 desc_bytes(desc), ctx->dir); 700 desc_bytes(desc), ctx->dir);
698 701
699 /* ablkcipher_decrypt shared descriptor */ 702 /* skcipher_decrypt shared descriptor */
700 desc = ctx->sh_desc_dec; 703 desc = ctx->sh_desc_dec;
701 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, 704 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
702 ctx1_iv_off); 705 ctx1_iv_off);
703 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 706 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
704 desc_bytes(desc), ctx->dir); 707 desc_bytes(desc), ctx->dir);
705 708
706 /* ablkcipher_givencrypt shared descriptor */
707 desc = ctx->sh_desc_givenc;
708 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
709 ctx1_iv_off);
710 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
711 desc_bytes(desc), ctx->dir);
712
713 return 0; 709 return 0;
714} 710}
715 711
716static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 712static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
717 const u8 *key, unsigned int keylen) 713 unsigned int keylen)
718{ 714{
719 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 715 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
720 struct device *jrdev = ctx->jrdev; 716 struct device *jrdev = ctx->jrdev;
721 u32 *desc; 717 u32 *desc;
722 718
723 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 719 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
724 crypto_ablkcipher_set_flags(ablkcipher, 720 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
725 CRYPTO_TFM_RES_BAD_KEY_LEN);
726 dev_err(jrdev, "key size mismatch\n"); 721 dev_err(jrdev, "key size mismatch\n");
727 return -EINVAL; 722 return -EINVAL;
728 } 723 }
@@ -731,15 +726,15 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
731 ctx->cdata.key_virt = key; 726 ctx->cdata.key_virt = key;
732 ctx->cdata.key_inline = true; 727 ctx->cdata.key_inline = true;
733 728
734 /* xts_ablkcipher_encrypt shared descriptor */ 729 /* xts_skcipher_encrypt shared descriptor */
735 desc = ctx->sh_desc_enc; 730 desc = ctx->sh_desc_enc;
736 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); 731 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
737 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 732 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
738 desc_bytes(desc), ctx->dir); 733 desc_bytes(desc), ctx->dir);
739 734
740 /* xts_ablkcipher_decrypt shared descriptor */ 735 /* xts_skcipher_decrypt shared descriptor */
741 desc = ctx->sh_desc_dec; 736 desc = ctx->sh_desc_dec;
742 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); 737 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
743 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 738 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
744 desc_bytes(desc), ctx->dir); 739 desc_bytes(desc), ctx->dir);
745 740
@@ -765,22 +760,20 @@ struct aead_edesc {
765}; 760};
766 761
767/* 762/*
768 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 763 * skcipher_edesc - s/w-extended skcipher descriptor
769 * @src_nents: number of segments in input s/w scatterlist 764 * @src_nents: number of segments in input s/w scatterlist
770 * @dst_nents: number of segments in output s/w scatterlist 765 * @dst_nents: number of segments in output s/w scatterlist
771 * @iv_dma: dma address of iv for checking continuity and link table 766 * @iv_dma: dma address of iv for checking continuity and link table
772 * @iv_dir: DMA mapping direction for IV
773 * @sec4_sg_bytes: length of dma mapped sec4_sg space 767 * @sec4_sg_bytes: length of dma mapped sec4_sg space
774 * @sec4_sg_dma: bus physical mapped address of h/w link table 768 * @sec4_sg_dma: bus physical mapped address of h/w link table
775 * @sec4_sg: pointer to h/w link table 769 * @sec4_sg: pointer to h/w link table
776 * @hw_desc: the h/w job descriptor followed by any referenced link tables 770 * @hw_desc: the h/w job descriptor followed by any referenced link tables
777 * and IV 771 * and IV
778 */ 772 */
779struct ablkcipher_edesc { 773struct skcipher_edesc {
780 int src_nents; 774 int src_nents;
781 int dst_nents; 775 int dst_nents;
782 dma_addr_t iv_dma; 776 dma_addr_t iv_dma;
783 enum dma_data_direction iv_dir;
784 int sec4_sg_bytes; 777 int sec4_sg_bytes;
785 dma_addr_t sec4_sg_dma; 778 dma_addr_t sec4_sg_dma;
786 struct sec4_sg_entry *sec4_sg; 779 struct sec4_sg_entry *sec4_sg;
@@ -790,8 +783,7 @@ struct ablkcipher_edesc {
790static void caam_unmap(struct device *dev, struct scatterlist *src, 783static void caam_unmap(struct device *dev, struct scatterlist *src,
791 struct scatterlist *dst, int src_nents, 784 struct scatterlist *dst, int src_nents,
792 int dst_nents, 785 int dst_nents,
793 dma_addr_t iv_dma, int ivsize, 786 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
794 enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
795 int sec4_sg_bytes) 787 int sec4_sg_bytes)
796{ 788{
797 if (dst != src) { 789 if (dst != src) {
@@ -803,7 +795,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
803 } 795 }
804 796
805 if (iv_dma) 797 if (iv_dma)
806 dma_unmap_single(dev, iv_dma, ivsize, iv_dir); 798 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
807 if (sec4_sg_bytes) 799 if (sec4_sg_bytes)
808 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, 800 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
809 DMA_TO_DEVICE); 801 DMA_TO_DEVICE);
@@ -814,20 +806,19 @@ static void aead_unmap(struct device *dev,
814 struct aead_request *req) 806 struct aead_request *req)
815{ 807{
816 caam_unmap(dev, req->src, req->dst, 808 caam_unmap(dev, req->src, req->dst,
817 edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE, 809 edesc->src_nents, edesc->dst_nents, 0, 0,
818 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 810 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
819} 811}
820 812
821static void ablkcipher_unmap(struct device *dev, 813static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
822 struct ablkcipher_edesc *edesc, 814 struct skcipher_request *req)
823 struct ablkcipher_request *req)
824{ 815{
825 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 816 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
826 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 817 int ivsize = crypto_skcipher_ivsize(skcipher);
827 818
828 caam_unmap(dev, req->src, req->dst, 819 caam_unmap(dev, req->src, req->dst,
829 edesc->src_nents, edesc->dst_nents, 820 edesc->src_nents, edesc->dst_nents,
830 edesc->iv_dma, ivsize, edesc->iv_dir, 821 edesc->iv_dma, ivsize,
831 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 822 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
832} 823}
833 824
@@ -881,87 +872,74 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
881 aead_request_complete(req, err); 872 aead_request_complete(req, err);
882} 873}
883 874
884static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 875static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
885 void *context) 876 void *context)
886{ 877{
887 struct ablkcipher_request *req = context; 878 struct skcipher_request *req = context;
888 struct ablkcipher_edesc *edesc; 879 struct skcipher_edesc *edesc;
889 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 880 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
890 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 881 int ivsize = crypto_skcipher_ivsize(skcipher);
891 882
892#ifdef DEBUG 883#ifdef DEBUG
893 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 884 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
894#endif 885#endif
895 886
896 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); 887 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
897 888
898 if (err) 889 if (err)
899 caam_jr_strstatus(jrdev, err); 890 caam_jr_strstatus(jrdev, err);
900 891
901#ifdef DEBUG 892#ifdef DEBUG
902 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 893 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
903 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 894 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
904 edesc->src_nents > 1 ? 100 : ivsize, 1); 895 edesc->src_nents > 1 ? 100 : ivsize, 1);
905#endif 896#endif
906 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 897 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
907 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 898 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
908 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 899 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
909 900
910 ablkcipher_unmap(jrdev, edesc, req); 901 skcipher_unmap(jrdev, edesc, req);
911 902
912 /* 903 /*
913 * The crypto API expects us to set the IV (req->info) to the last 904 * The crypto API expects us to set the IV (req->iv) to the last
914 * ciphertext block. This is used e.g. by the CTS mode. 905 * ciphertext block. This is used e.g. by the CTS mode.
915 */ 906 */
916 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, 907 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
917 ivsize, 0); 908 ivsize, 0);
918 909
919 /* In case initial IV was generated, copy it in GIVCIPHER request */
920 if (edesc->iv_dir == DMA_FROM_DEVICE) {
921 u8 *iv;
922 struct skcipher_givcrypt_request *greq;
923
924 greq = container_of(req, struct skcipher_givcrypt_request,
925 creq);
926 iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
927 edesc->sec4_sg_bytes;
928 memcpy(greq->giv, iv, ivsize);
929 }
930
931 kfree(edesc); 910 kfree(edesc);
932 911
933 ablkcipher_request_complete(req, err); 912 skcipher_request_complete(req, err);
934} 913}
935 914
936static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 915static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
937 void *context) 916 void *context)
938{ 917{
939 struct ablkcipher_request *req = context; 918 struct skcipher_request *req = context;
940 struct ablkcipher_edesc *edesc; 919 struct skcipher_edesc *edesc;
941#ifdef DEBUG 920#ifdef DEBUG
942 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 921 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
943 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 922 int ivsize = crypto_skcipher_ivsize(skcipher);
944 923
945 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 924 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
946#endif 925#endif
947 926
948 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); 927 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
949 if (err) 928 if (err)
950 caam_jr_strstatus(jrdev, err); 929 caam_jr_strstatus(jrdev, err);
951 930
952#ifdef DEBUG 931#ifdef DEBUG
953 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 932 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
954 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 933 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
955 ivsize, 1);
956#endif 934#endif
957 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 935 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
958 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 936 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
959 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 937 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
960 938
961 ablkcipher_unmap(jrdev, edesc, req); 939 skcipher_unmap(jrdev, edesc, req);
962 kfree(edesc); 940 kfree(edesc);
963 941
964 ablkcipher_request_complete(req, err); 942 skcipher_request_complete(req, err);
965} 943}
966 944
967/* 945/*
@@ -1103,34 +1081,38 @@ static void init_authenc_job(struct aead_request *req,
1103} 1081}
1104 1082
1105/* 1083/*
1106 * Fill in ablkcipher job descriptor 1084 * Fill in skcipher job descriptor
1107 */ 1085 */
1108static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, 1086static void init_skcipher_job(struct skcipher_request *req,
1109 struct ablkcipher_edesc *edesc, 1087 struct skcipher_edesc *edesc,
1110 struct ablkcipher_request *req) 1088 const bool encrypt)
1111{ 1089{
1112 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1090 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1113 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1091 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1092 int ivsize = crypto_skcipher_ivsize(skcipher);
1114 u32 *desc = edesc->hw_desc; 1093 u32 *desc = edesc->hw_desc;
1094 u32 *sh_desc;
1115 u32 out_options = 0; 1095 u32 out_options = 0;
1116 dma_addr_t dst_dma; 1096 dma_addr_t dst_dma, ptr;
1117 int len; 1097 int len;
1118 1098
1119#ifdef DEBUG 1099#ifdef DEBUG
1120 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 1100 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1121 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1101 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1122 ivsize, 1); 1102 pr_err("asked=%d, cryptlen%d\n",
1123 pr_err("asked=%d, nbytes%d\n", 1103 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
1124 (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
1125#endif 1104#endif
1126 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", 1105 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
1127 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1106 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1128 edesc->src_nents > 1 ? 100 : req->nbytes, 1); 1107 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1108
1109 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1110 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
1129 1111
1130 len = desc_len(sh_desc); 1112 len = desc_len(sh_desc);
1131 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1113 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1132 1114
1133 append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize, 1115 append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize,
1134 LDST_SGF); 1116 LDST_SGF);
1135 1117
1136 if (likely(req->src == req->dst)) { 1118 if (likely(req->src == req->dst)) {
@@ -1145,48 +1127,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1145 out_options = LDST_SGF; 1127 out_options = LDST_SGF;
1146 } 1128 }
1147 } 1129 }
1148 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); 1130 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
1149}
1150
1151/*
1152 * Fill in ablkcipher givencrypt job descriptor
1153 */
1154static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
1155 struct ablkcipher_edesc *edesc,
1156 struct ablkcipher_request *req)
1157{
1158 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1159 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1160 u32 *desc = edesc->hw_desc;
1161 u32 in_options;
1162 dma_addr_t dst_dma, src_dma;
1163 int len, sec4_sg_index = 0;
1164
1165#ifdef DEBUG
1166 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
1167 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1168 ivsize, 1);
1169#endif
1170 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
1171 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1172 edesc->src_nents > 1 ? 100 : req->nbytes, 1);
1173
1174 len = desc_len(sh_desc);
1175 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1176
1177 if (edesc->src_nents == 1) {
1178 src_dma = sg_dma_address(req->src);
1179 in_options = 0;
1180 } else {
1181 src_dma = edesc->sec4_sg_dma;
1182 sec4_sg_index += edesc->src_nents;
1183 in_options = LDST_SGF;
1184 }
1185 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
1186
1187 dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1188 sizeof(struct sec4_sg_entry);
1189 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
1190} 1131}
1191 1132
1192/* 1133/*
@@ -1275,7 +1216,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1275 GFP_DMA | flags); 1216 GFP_DMA | flags);
1276 if (!edesc) { 1217 if (!edesc) {
1277 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1218 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1278 0, DMA_NONE, 0, 0); 1219 0, 0, 0);
1279 return ERR_PTR(-ENOMEM); 1220 return ERR_PTR(-ENOMEM);
1280 } 1221 }
1281 1222
@@ -1476,35 +1417,35 @@ static int aead_decrypt(struct aead_request *req)
1476} 1417}
1477 1418
1478/* 1419/*
1479 * allocate and map the ablkcipher extended descriptor for ablkcipher 1420 * allocate and map the skcipher extended descriptor for skcipher
1480 */ 1421 */
1481static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request 1422static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1482 *req, int desc_bytes) 1423 int desc_bytes)
1483{ 1424{
1484 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1425 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1485 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1426 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1486 struct device *jrdev = ctx->jrdev; 1427 struct device *jrdev = ctx->jrdev;
1487 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1428 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1488 GFP_KERNEL : GFP_ATOMIC; 1429 GFP_KERNEL : GFP_ATOMIC;
1489 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1430 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1490 struct ablkcipher_edesc *edesc; 1431 struct skcipher_edesc *edesc;
1491 dma_addr_t iv_dma; 1432 dma_addr_t iv_dma;
1492 u8 *iv; 1433 u8 *iv;
1493 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1434 int ivsize = crypto_skcipher_ivsize(skcipher);
1494 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; 1435 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1495 1436
1496 src_nents = sg_nents_for_len(req->src, req->nbytes); 1437 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1497 if (unlikely(src_nents < 0)) { 1438 if (unlikely(src_nents < 0)) {
1498 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1439 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1499 req->nbytes); 1440 req->cryptlen);
1500 return ERR_PTR(src_nents); 1441 return ERR_PTR(src_nents);
1501 } 1442 }
1502 1443
1503 if (req->dst != req->src) { 1444 if (req->dst != req->src) {
1504 dst_nents = sg_nents_for_len(req->dst, req->nbytes); 1445 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1505 if (unlikely(dst_nents < 0)) { 1446 if (unlikely(dst_nents < 0)) {
1506 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", 1447 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1507 req->nbytes); 1448 req->cryptlen);
1508 return ERR_PTR(dst_nents); 1449 return ERR_PTR(dst_nents);
1509 } 1450 }
1510 } 1451 }
@@ -1546,7 +1487,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1546 if (!edesc) { 1487 if (!edesc) {
1547 dev_err(jrdev, "could not allocate extended descriptor\n"); 1488 dev_err(jrdev, "could not allocate extended descriptor\n");
1548 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1489 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1549 0, DMA_NONE, 0, 0); 1490 0, 0, 0);
1550 return ERR_PTR(-ENOMEM); 1491 return ERR_PTR(-ENOMEM);
1551 } 1492 }
1552 1493
@@ -1555,17 +1496,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1555 edesc->sec4_sg_bytes = sec4_sg_bytes; 1496 edesc->sec4_sg_bytes = sec4_sg_bytes;
1556 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + 1497 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1557 desc_bytes); 1498 desc_bytes);
1558 edesc->iv_dir = DMA_TO_DEVICE;
1559 1499
1560 /* Make sure IV is located in a DMAable area */ 1500 /* Make sure IV is located in a DMAable area */
1561 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; 1501 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
1562 memcpy(iv, req->info, ivsize); 1502 memcpy(iv, req->iv, ivsize);
1563 1503
1564 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); 1504 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
1565 if (dma_mapping_error(jrdev, iv_dma)) { 1505 if (dma_mapping_error(jrdev, iv_dma)) {
1566 dev_err(jrdev, "unable to map IV\n"); 1506 dev_err(jrdev, "unable to map IV\n");
1567 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1507 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1568 0, DMA_NONE, 0, 0); 1508 0, 0, 0);
1569 kfree(edesc); 1509 kfree(edesc);
1570 return ERR_PTR(-ENOMEM); 1510 return ERR_PTR(-ENOMEM);
1571 } 1511 }
@@ -1583,7 +1523,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1583 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1523 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1584 dev_err(jrdev, "unable to map S/G table\n"); 1524 dev_err(jrdev, "unable to map S/G table\n");
1585 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 1525 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1586 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1526 iv_dma, ivsize, 0, 0);
1587 kfree(edesc); 1527 kfree(edesc);
1588 return ERR_PTR(-ENOMEM); 1528 return ERR_PTR(-ENOMEM);
1589 } 1529 }
@@ -1591,7 +1531,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1591 edesc->iv_dma = iv_dma; 1531 edesc->iv_dma = iv_dma;
1592 1532
1593#ifdef DEBUG 1533#ifdef DEBUG
1594 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", 1534 print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ",
1595 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1535 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1596 sec4_sg_bytes, 1); 1536 sec4_sg_bytes, 1);
1597#endif 1537#endif
@@ -1599,362 +1539,187 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1599 return edesc; 1539 return edesc;
1600} 1540}
1601 1541
1602static int ablkcipher_encrypt(struct ablkcipher_request *req) 1542static int skcipher_encrypt(struct skcipher_request *req)
1603{ 1543{
1604 struct ablkcipher_edesc *edesc; 1544 struct skcipher_edesc *edesc;
1605 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1545 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1606 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1546 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1607 struct device *jrdev = ctx->jrdev; 1547 struct device *jrdev = ctx->jrdev;
1608 u32 *desc; 1548 u32 *desc;
1609 int ret = 0; 1549 int ret = 0;
1610 1550
1611 /* allocate extended descriptor */ 1551 /* allocate extended descriptor */
1612 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); 1552 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1613 if (IS_ERR(edesc)) 1553 if (IS_ERR(edesc))
1614 return PTR_ERR(edesc); 1554 return PTR_ERR(edesc);
1615 1555
1616 /* Create and submit job descriptor*/ 1556 /* Create and submit job descriptor*/
1617 init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req); 1557 init_skcipher_job(req, edesc, true);
1618#ifdef DEBUG 1558#ifdef DEBUG
1619 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", 1559 print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
1620 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1560 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1621 desc_bytes(edesc->hw_desc), 1); 1561 desc_bytes(edesc->hw_desc), 1);
1622#endif 1562#endif
1623 desc = edesc->hw_desc; 1563 desc = edesc->hw_desc;
1624 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); 1564 ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
1625 1565
1626 if (!ret) { 1566 if (!ret) {
1627 ret = -EINPROGRESS; 1567 ret = -EINPROGRESS;
1628 } else { 1568 } else {
1629 ablkcipher_unmap(jrdev, edesc, req); 1569 skcipher_unmap(jrdev, edesc, req);
1630 kfree(edesc); 1570 kfree(edesc);
1631 } 1571 }
1632 1572
1633 return ret; 1573 return ret;
1634} 1574}
1635 1575
1636static int ablkcipher_decrypt(struct ablkcipher_request *req) 1576static int skcipher_decrypt(struct skcipher_request *req)
1637{ 1577{
1638 struct ablkcipher_edesc *edesc; 1578 struct skcipher_edesc *edesc;
1639 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1579 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1640 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1580 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1641 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1581 int ivsize = crypto_skcipher_ivsize(skcipher);
1642 struct device *jrdev = ctx->jrdev; 1582 struct device *jrdev = ctx->jrdev;
1643 u32 *desc; 1583 u32 *desc;
1644 int ret = 0; 1584 int ret = 0;
1645 1585
1646 /* allocate extended descriptor */ 1586 /* allocate extended descriptor */
1647 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); 1587 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1648 if (IS_ERR(edesc)) 1588 if (IS_ERR(edesc))
1649 return PTR_ERR(edesc); 1589 return PTR_ERR(edesc);
1650 1590
1651 /* 1591 /*
1652 * The crypto API expects us to set the IV (req->info) to the last 1592 * The crypto API expects us to set the IV (req->iv) to the last
1653 * ciphertext block. 1593 * ciphertext block.
1654 */ 1594 */
1655 scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, 1595 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1656 ivsize, 0); 1596 ivsize, 0);
1657 1597
1658 /* Create and submit job descriptor*/ 1598 /* Create and submit job descriptor*/
1659 init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req); 1599 init_skcipher_job(req, edesc, false);
1660 desc = edesc->hw_desc; 1600 desc = edesc->hw_desc;
1661#ifdef DEBUG 1601#ifdef DEBUG
1662 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", 1602 print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
1663 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1603 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1664 desc_bytes(edesc->hw_desc), 1); 1604 desc_bytes(edesc->hw_desc), 1);
1665#endif 1605#endif
1666 1606
1667 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); 1607 ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
1668 if (!ret) { 1608 if (!ret) {
1669 ret = -EINPROGRESS; 1609 ret = -EINPROGRESS;
1670 } else { 1610 } else {
1671 ablkcipher_unmap(jrdev, edesc, req); 1611 skcipher_unmap(jrdev, edesc, req);
1672 kfree(edesc); 1612 kfree(edesc);
1673 } 1613 }
1674 1614
1675 return ret; 1615 return ret;
1676} 1616}
1677 1617
1678/* 1618static struct caam_skcipher_alg driver_algs[] = {
1679 * allocate and map the ablkcipher extended descriptor
1680 * for ablkcipher givencrypt
1681 */
1682static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1683 struct skcipher_givcrypt_request *greq,
1684 int desc_bytes)
1685{
1686 struct ablkcipher_request *req = &greq->creq;
1687 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1688 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1689 struct device *jrdev = ctx->jrdev;
1690 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1691 GFP_KERNEL : GFP_ATOMIC;
1692 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1693 struct ablkcipher_edesc *edesc;
1694 dma_addr_t iv_dma;
1695 u8 *iv;
1696 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1697 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1698
1699 src_nents = sg_nents_for_len(req->src, req->nbytes);
1700 if (unlikely(src_nents < 0)) {
1701 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1702 req->nbytes);
1703 return ERR_PTR(src_nents);
1704 }
1705
1706 if (likely(req->src == req->dst)) {
1707 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1708 DMA_BIDIRECTIONAL);
1709 if (unlikely(!mapped_src_nents)) {
1710 dev_err(jrdev, "unable to map source\n");
1711 return ERR_PTR(-ENOMEM);
1712 }
1713
1714 dst_nents = src_nents;
1715 mapped_dst_nents = src_nents;
1716 } else {
1717 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1718 DMA_TO_DEVICE);
1719 if (unlikely(!mapped_src_nents)) {
1720 dev_err(jrdev, "unable to map source\n");
1721 return ERR_PTR(-ENOMEM);
1722 }
1723
1724 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1725 if (unlikely(dst_nents < 0)) {
1726 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1727 req->nbytes);
1728 return ERR_PTR(dst_nents);
1729 }
1730
1731 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1732 DMA_FROM_DEVICE);
1733 if (unlikely(!mapped_dst_nents)) {
1734 dev_err(jrdev, "unable to map destination\n");
1735 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1736 return ERR_PTR(-ENOMEM);
1737 }
1738 }
1739
1740 sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1741 dst_sg_idx = sec4_sg_ents;
1742 sec4_sg_ents += 1 + mapped_dst_nents;
1743
1744 /*
1745 * allocate space for base edesc and hw desc commands, link tables, IV
1746 */
1747 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1748 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1749 GFP_DMA | flags);
1750 if (!edesc) {
1751 dev_err(jrdev, "could not allocate extended descriptor\n");
1752 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1753 0, DMA_NONE, 0, 0);
1754 return ERR_PTR(-ENOMEM);
1755 }
1756
1757 edesc->src_nents = src_nents;
1758 edesc->dst_nents = dst_nents;
1759 edesc->sec4_sg_bytes = sec4_sg_bytes;
1760 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1761 desc_bytes);
1762 edesc->iv_dir = DMA_FROM_DEVICE;
1763
1764 /* Make sure IV is located in a DMAable area */
1765 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
1766 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
1767 if (dma_mapping_error(jrdev, iv_dma)) {
1768 dev_err(jrdev, "unable to map IV\n");
1769 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1770 0, DMA_NONE, 0, 0);
1771 kfree(edesc);
1772 return ERR_PTR(-ENOMEM);
1773 }
1774
1775 if (mapped_src_nents > 1)
1776 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
1777 0);
1778
1779 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
1780 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
1781 dst_sg_idx + 1, 0);
1782
1783 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1784 sec4_sg_bytes, DMA_TO_DEVICE);
1785 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1786 dev_err(jrdev, "unable to map S/G table\n");
1787 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1788 iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
1789 kfree(edesc);
1790 return ERR_PTR(-ENOMEM);
1791 }
1792 edesc->iv_dma = iv_dma;
1793
1794#ifdef DEBUG
1795 print_hex_dump(KERN_ERR,
1796 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
1797 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1798 sec4_sg_bytes, 1);
1799#endif
1800
1801 return edesc;
1802}
1803
1804static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1805{
1806 struct ablkcipher_request *req = &creq->creq;
1807 struct ablkcipher_edesc *edesc;
1808 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1809 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1810 struct device *jrdev = ctx->jrdev;
1811 u32 *desc;
1812 int ret = 0;
1813
1814 /* allocate extended descriptor */
1815 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1816 if (IS_ERR(edesc))
1817 return PTR_ERR(edesc);
1818
1819 /* Create and submit job descriptor*/
1820 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
1821 edesc, req);
1822#ifdef DEBUG
1823 print_hex_dump(KERN_ERR,
1824 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
1825 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1826 desc_bytes(edesc->hw_desc), 1);
1827#endif
1828 desc = edesc->hw_desc;
1829 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1830
1831 if (!ret) {
1832 ret = -EINPROGRESS;
1833 } else {
1834 ablkcipher_unmap(jrdev, edesc, req);
1835 kfree(edesc);
1836 }
1837
1838 return ret;
1839}
1840
1841#define template_aead template_u.aead
1842#define template_ablkcipher template_u.ablkcipher
1843struct caam_alg_template {
1844 char name[CRYPTO_MAX_ALG_NAME];
1845 char driver_name[CRYPTO_MAX_ALG_NAME];
1846 unsigned int blocksize;
1847 u32 type;
1848 union {
1849 struct ablkcipher_alg ablkcipher;
1850 } template_u;
1851 u32 class1_alg_type;
1852 u32 class2_alg_type;
1853};
1854
1855static struct caam_alg_template driver_algs[] = {
1856 /* ablkcipher descriptor */
1857 { 1619 {
1858 .name = "cbc(aes)", 1620 .skcipher = {
1859 .driver_name = "cbc-aes-caam", 1621 .base = {
1860 .blocksize = AES_BLOCK_SIZE, 1622 .cra_name = "cbc(aes)",
1861 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1623 .cra_driver_name = "cbc-aes-caam",
1862 .template_ablkcipher = { 1624 .cra_blocksize = AES_BLOCK_SIZE,
1863 .setkey = ablkcipher_setkey, 1625 },
1864 .encrypt = ablkcipher_encrypt, 1626 .setkey = skcipher_setkey,
1865 .decrypt = ablkcipher_decrypt, 1627 .encrypt = skcipher_encrypt,
1866 .givencrypt = ablkcipher_givencrypt, 1628 .decrypt = skcipher_decrypt,
1867 .geniv = "<built-in>",
1868 .min_keysize = AES_MIN_KEY_SIZE, 1629 .min_keysize = AES_MIN_KEY_SIZE,
1869 .max_keysize = AES_MAX_KEY_SIZE, 1630 .max_keysize = AES_MAX_KEY_SIZE,
1870 .ivsize = AES_BLOCK_SIZE, 1631 .ivsize = AES_BLOCK_SIZE,
1871 }, 1632 },
1872 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1633 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1873 }, 1634 },
1874 { 1635 {
1875 .name = "cbc(des3_ede)", 1636 .skcipher = {
1876 .driver_name = "cbc-3des-caam", 1637 .base = {
1877 .blocksize = DES3_EDE_BLOCK_SIZE, 1638 .cra_name = "cbc(des3_ede)",
1878 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1639 .cra_driver_name = "cbc-3des-caam",
1879 .template_ablkcipher = { 1640 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1880 .setkey = ablkcipher_setkey, 1641 },
1881 .encrypt = ablkcipher_encrypt, 1642 .setkey = skcipher_setkey,
1882 .decrypt = ablkcipher_decrypt, 1643 .encrypt = skcipher_encrypt,
1883 .givencrypt = ablkcipher_givencrypt, 1644 .decrypt = skcipher_decrypt,
1884 .geniv = "<built-in>",
1885 .min_keysize = DES3_EDE_KEY_SIZE, 1645 .min_keysize = DES3_EDE_KEY_SIZE,
1886 .max_keysize = DES3_EDE_KEY_SIZE, 1646 .max_keysize = DES3_EDE_KEY_SIZE,
1887 .ivsize = DES3_EDE_BLOCK_SIZE, 1647 .ivsize = DES3_EDE_BLOCK_SIZE,
1888 }, 1648 },
1889 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1649 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1890 }, 1650 },
1891 { 1651 {
1892 .name = "cbc(des)", 1652 .skcipher = {
1893 .driver_name = "cbc-des-caam", 1653 .base = {
1894 .blocksize = DES_BLOCK_SIZE, 1654 .cra_name = "cbc(des)",
1895 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1655 .cra_driver_name = "cbc-des-caam",
1896 .template_ablkcipher = { 1656 .cra_blocksize = DES_BLOCK_SIZE,
1897 .setkey = ablkcipher_setkey, 1657 },
1898 .encrypt = ablkcipher_encrypt, 1658 .setkey = skcipher_setkey,
1899 .decrypt = ablkcipher_decrypt, 1659 .encrypt = skcipher_encrypt,
1900 .givencrypt = ablkcipher_givencrypt, 1660 .decrypt = skcipher_decrypt,
1901 .geniv = "<built-in>",
1902 .min_keysize = DES_KEY_SIZE, 1661 .min_keysize = DES_KEY_SIZE,
1903 .max_keysize = DES_KEY_SIZE, 1662 .max_keysize = DES_KEY_SIZE,
1904 .ivsize = DES_BLOCK_SIZE, 1663 .ivsize = DES_BLOCK_SIZE,
1905 }, 1664 },
1906 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1665 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1907 }, 1666 },
1908 { 1667 {
1909 .name = "ctr(aes)", 1668 .skcipher = {
1910 .driver_name = "ctr-aes-caam", 1669 .base = {
1911 .blocksize = 1, 1670 .cra_name = "ctr(aes)",
1912 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1671 .cra_driver_name = "ctr-aes-caam",
1913 .template_ablkcipher = { 1672 .cra_blocksize = 1,
1914 .setkey = ablkcipher_setkey, 1673 },
1915 .encrypt = ablkcipher_encrypt, 1674 .setkey = skcipher_setkey,
1916 .decrypt = ablkcipher_decrypt, 1675 .encrypt = skcipher_encrypt,
1917 .geniv = "chainiv", 1676 .decrypt = skcipher_decrypt,
1918 .min_keysize = AES_MIN_KEY_SIZE, 1677 .min_keysize = AES_MIN_KEY_SIZE,
1919 .max_keysize = AES_MAX_KEY_SIZE, 1678 .max_keysize = AES_MAX_KEY_SIZE,
1920 .ivsize = AES_BLOCK_SIZE, 1679 .ivsize = AES_BLOCK_SIZE,
1921 }, 1680 .chunksize = AES_BLOCK_SIZE,
1922 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, 1681 },
1682 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1683 OP_ALG_AAI_CTR_MOD128,
1923 }, 1684 },
1924 { 1685 {
1925 .name = "rfc3686(ctr(aes))", 1686 .skcipher = {
1926 .driver_name = "rfc3686-ctr-aes-caam", 1687 .base = {
1927 .blocksize = 1, 1688 .cra_name = "rfc3686(ctr(aes))",
1928 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1689 .cra_driver_name = "rfc3686-ctr-aes-caam",
1929 .template_ablkcipher = { 1690 .cra_blocksize = 1,
1930 .setkey = ablkcipher_setkey, 1691 },
1931 .encrypt = ablkcipher_encrypt, 1692 .setkey = skcipher_setkey,
1932 .decrypt = ablkcipher_decrypt, 1693 .encrypt = skcipher_encrypt,
1933 .givencrypt = ablkcipher_givencrypt, 1694 .decrypt = skcipher_decrypt,
1934 .geniv = "<built-in>",
1935 .min_keysize = AES_MIN_KEY_SIZE + 1695 .min_keysize = AES_MIN_KEY_SIZE +
1936 CTR_RFC3686_NONCE_SIZE, 1696 CTR_RFC3686_NONCE_SIZE,
1937 .max_keysize = AES_MAX_KEY_SIZE + 1697 .max_keysize = AES_MAX_KEY_SIZE +
1938 CTR_RFC3686_NONCE_SIZE, 1698 CTR_RFC3686_NONCE_SIZE,
1939 .ivsize = CTR_RFC3686_IV_SIZE, 1699 .ivsize = CTR_RFC3686_IV_SIZE,
1940 }, 1700 .chunksize = AES_BLOCK_SIZE,
1941 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, 1701 },
1702 .caam = {
1703 .class1_alg_type = OP_ALG_ALGSEL_AES |
1704 OP_ALG_AAI_CTR_MOD128,
1705 .rfc3686 = true,
1706 },
1942 }, 1707 },
1943 { 1708 {
1944 .name = "xts(aes)", 1709 .skcipher = {
1945 .driver_name = "xts-aes-caam", 1710 .base = {
1946 .blocksize = AES_BLOCK_SIZE, 1711 .cra_name = "xts(aes)",
1947 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1712 .cra_driver_name = "xts-aes-caam",
1948 .template_ablkcipher = { 1713 .cra_blocksize = AES_BLOCK_SIZE,
1949 .setkey = xts_ablkcipher_setkey, 1714 },
1950 .encrypt = ablkcipher_encrypt, 1715 .setkey = xts_skcipher_setkey,
1951 .decrypt = ablkcipher_decrypt, 1716 .encrypt = skcipher_encrypt,
1952 .geniv = "eseqiv", 1717 .decrypt = skcipher_decrypt,
1953 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1718 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1954 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1719 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1955 .ivsize = AES_BLOCK_SIZE, 1720 .ivsize = AES_BLOCK_SIZE,
1956 }, 1721 },
1957 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1722 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1958 }, 1723 },
1959}; 1724};
1960 1725
@@ -3239,12 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
3239 }, 3004 },
3240}; 3005};
3241 3006
3242struct caam_crypto_alg {
3243 struct crypto_alg crypto_alg;
3244 struct list_head entry;
3245 struct caam_alg_entry caam;
3246};
3247
3248static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 3007static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
3249 bool uses_dkp) 3008 bool uses_dkp)
3250{ 3009{
@@ -3276,8 +3035,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
3276 ctx->sh_desc_enc_dma = dma_addr; 3035 ctx->sh_desc_enc_dma = dma_addr;
3277 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx, 3036 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
3278 sh_desc_dec); 3037 sh_desc_dec);
3279 ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
3280 sh_desc_givenc);
3281 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key); 3038 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
3282 3039
3283 /* copy descriptor header template value */ 3040 /* copy descriptor header template value */
@@ -3287,14 +3044,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
3287 return 0; 3044 return 0;
3288} 3045}
3289 3046
3290static int caam_cra_init(struct crypto_tfm *tfm) 3047static int caam_cra_init(struct crypto_skcipher *tfm)
3291{ 3048{
3292 struct crypto_alg *alg = tfm->__crt_alg; 3049 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3293 struct caam_crypto_alg *caam_alg = 3050 struct caam_skcipher_alg *caam_alg =
3294 container_of(alg, struct caam_crypto_alg, crypto_alg); 3051 container_of(alg, typeof(*caam_alg), skcipher);
3295 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3296 3052
3297 return caam_init_common(ctx, &caam_alg->caam, false); 3053 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
3054 false);
3298} 3055}
3299 3056
3300static int caam_aead_init(struct crypto_aead *tfm) 3057static int caam_aead_init(struct crypto_aead *tfm)
@@ -3316,9 +3073,9 @@ static void caam_exit_common(struct caam_ctx *ctx)
3316 caam_jr_free(ctx->jrdev); 3073 caam_jr_free(ctx->jrdev);
3317} 3074}
3318 3075
3319static void caam_cra_exit(struct crypto_tfm *tfm) 3076static void caam_cra_exit(struct crypto_skcipher *tfm)
3320{ 3077{
3321 caam_exit_common(crypto_tfm_ctx(tfm)); 3078 caam_exit_common(crypto_skcipher_ctx(tfm));
3322} 3079}
3323 3080
3324static void caam_aead_exit(struct crypto_aead *tfm) 3081static void caam_aead_exit(struct crypto_aead *tfm)
@@ -3328,8 +3085,6 @@ static void caam_aead_exit(struct crypto_aead *tfm)
3328 3085
3329static void __exit caam_algapi_exit(void) 3086static void __exit caam_algapi_exit(void)
3330{ 3087{
3331
3332 struct caam_crypto_alg *t_alg, *n;
3333 int i; 3088 int i;
3334 3089
3335 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 3090 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -3339,57 +3094,25 @@ static void __exit caam_algapi_exit(void)
3339 crypto_unregister_aead(&t_alg->aead); 3094 crypto_unregister_aead(&t_alg->aead);
3340 } 3095 }
3341 3096
3342 if (!alg_list.next) 3097 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3343 return; 3098 struct caam_skcipher_alg *t_alg = driver_algs + i;
3344 3099
3345 list_for_each_entry_safe(t_alg, n, &alg_list, entry) { 3100 if (t_alg->registered)
3346 crypto_unregister_alg(&t_alg->crypto_alg); 3101 crypto_unregister_skcipher(&t_alg->skcipher);
3347 list_del(&t_alg->entry);
3348 kfree(t_alg);
3349 } 3102 }
3350} 3103}
3351 3104
3352static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template 3105static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3353 *template)
3354{ 3106{
3355 struct caam_crypto_alg *t_alg; 3107 struct skcipher_alg *alg = &t_alg->skcipher;
3356 struct crypto_alg *alg;
3357 3108
3358 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 3109 alg->base.cra_module = THIS_MODULE;
3359 if (!t_alg) { 3110 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3360 pr_err("failed to allocate t_alg\n"); 3111 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3361 return ERR_PTR(-ENOMEM); 3112 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
3362 }
3363
3364 alg = &t_alg->crypto_alg;
3365
3366 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3367 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3368 template->driver_name);
3369 alg->cra_module = THIS_MODULE;
3370 alg->cra_init = caam_cra_init;
3371 alg->cra_exit = caam_cra_exit;
3372 alg->cra_priority = CAAM_CRA_PRIORITY;
3373 alg->cra_blocksize = template->blocksize;
3374 alg->cra_alignmask = 0;
3375 alg->cra_ctxsize = sizeof(struct caam_ctx);
3376 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3377 template->type;
3378 switch (template->type) {
3379 case CRYPTO_ALG_TYPE_GIVCIPHER:
3380 alg->cra_type = &crypto_givcipher_type;
3381 alg->cra_ablkcipher = template->template_ablkcipher;
3382 break;
3383 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3384 alg->cra_type = &crypto_ablkcipher_type;
3385 alg->cra_ablkcipher = template->template_ablkcipher;
3386 break;
3387 }
3388
3389 t_alg->caam.class1_alg_type = template->class1_alg_type;
3390 t_alg->caam.class2_alg_type = template->class2_alg_type;
3391 3113
3392 return t_alg; 3114 alg->init = caam_cra_init;
3115 alg->exit = caam_cra_exit;
3393} 3116}
3394 3117
3395static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 3118static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -3441,8 +3164,6 @@ static int __init caam_algapi_init(void)
3441 return -ENODEV; 3164 return -ENODEV;
3442 3165
3443 3166
3444 INIT_LIST_HEAD(&alg_list);
3445
3446 /* 3167 /*
3447 * Register crypto algorithms the device supports. 3168 * Register crypto algorithms the device supports.
3448 * First, detect presence and attributes of DES, AES, and MD blocks. 3169 * First, detect presence and attributes of DES, AES, and MD blocks.
@@ -3458,9 +3179,8 @@ static int __init caam_algapi_init(void)
3458 md_limit = SHA256_DIGEST_SIZE; 3179 md_limit = SHA256_DIGEST_SIZE;
3459 3180
3460 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 3181 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3461 struct caam_crypto_alg *t_alg; 3182 struct caam_skcipher_alg *t_alg = driver_algs + i;
3462 struct caam_alg_template *alg = driver_algs + i; 3183 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
3463 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
3464 3184
3465 /* Skip DES algorithms if not supported by device */ 3185 /* Skip DES algorithms if not supported by device */
3466 if (!des_inst && 3186 if (!des_inst &&
@@ -3477,26 +3197,20 @@ static int __init caam_algapi_init(void)
3477 * on LP devices. 3197 * on LP devices.
3478 */ 3198 */
3479 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) 3199 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3480 if ((alg->class1_alg_type & OP_ALG_AAI_MASK) == 3200 if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
3481 OP_ALG_AAI_XTS) 3201 OP_ALG_AAI_XTS)
3482 continue; 3202 continue;
3483 3203
3484 t_alg = caam_alg_alloc(alg); 3204 caam_skcipher_alg_init(t_alg);
3485 if (IS_ERR(t_alg)) {
3486 err = PTR_ERR(t_alg);
3487 pr_warn("%s alg allocation failed\n", alg->driver_name);
3488 continue;
3489 }
3490 3205
3491 err = crypto_register_alg(&t_alg->crypto_alg); 3206 err = crypto_register_skcipher(&t_alg->skcipher);
3492 if (err) { 3207 if (err) {
3493 pr_warn("%s alg registration failed\n", 3208 pr_warn("%s alg registration failed\n",
3494 t_alg->crypto_alg.cra_driver_name); 3209 t_alg->skcipher.base.cra_driver_name);
3495 kfree(t_alg);
3496 continue; 3210 continue;
3497 } 3211 }
3498 3212
3499 list_add_tail(&t_alg->entry, &alg_list); 3213 t_alg->registered = true;
3500 registered = true; 3214 registered = true;
3501 } 3215 }
3502 3216
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index a408edd84f34..1a6f0da14106 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1,7 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Shared descriptors for aead, ablkcipher algorithms 3 * Shared descriptors for aead, skcipher algorithms
3 * 4 *
4 * Copyright 2016 NXP 5 * Copyright 2016-2018 NXP
5 */ 6 */
6 7
7#include "compat.h" 8#include "compat.h"
@@ -1212,11 +1213,8 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
1212} 1213}
1213EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); 1214EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
1214 1215
1215/* 1216/* For skcipher encrypt and decrypt, read from req->src and write to req->dst */
1216 * For ablkcipher encrypt and decrypt, read from req->src and 1217static inline void skcipher_append_src_dst(u32 *desc)
1217 * write to req->dst
1218 */
1219static inline void ablkcipher_append_src_dst(u32 *desc)
1220{ 1218{
1221 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 1219 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1222 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 1220 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1226,7 +1224,7 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
1226} 1224}
1227 1225
1228/** 1226/**
1229 * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor 1227 * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
1230 * @desc: pointer to buffer used for descriptor construction 1228 * @desc: pointer to buffer used for descriptor construction
1231 * @cdata: pointer to block cipher transform definitions 1229 * @cdata: pointer to block cipher transform definitions
1232 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed 1230 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1235,9 +1233,9 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
1235 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 1233 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
1236 * @ctx1_iv_off: IV offset in CONTEXT1 register 1234 * @ctx1_iv_off: IV offset in CONTEXT1 register
1237 */ 1235 */
1238void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata, 1236void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
1239 unsigned int ivsize, const bool is_rfc3686, 1237 unsigned int ivsize, const bool is_rfc3686,
1240 const u32 ctx1_iv_off) 1238 const u32 ctx1_iv_off)
1241{ 1239{
1242 u32 *key_jump_cmd; 1240 u32 *key_jump_cmd;
1243 1241
@@ -1280,18 +1278,18 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
1280 OP_ALG_ENCRYPT); 1278 OP_ALG_ENCRYPT);
1281 1279
1282 /* Perform operation */ 1280 /* Perform operation */
1283 ablkcipher_append_src_dst(desc); 1281 skcipher_append_src_dst(desc);
1284 1282
1285#ifdef DEBUG 1283#ifdef DEBUG
1286 print_hex_dump(KERN_ERR, 1284 print_hex_dump(KERN_ERR,
1287 "ablkcipher enc shdesc@" __stringify(__LINE__)": ", 1285 "skcipher enc shdesc@" __stringify(__LINE__)": ",
1288 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1286 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1289#endif 1287#endif
1290} 1288}
1291EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap); 1289EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
1292 1290
1293/** 1291/**
1294 * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor 1292 * cnstr_shdsc_skcipher_decap - skcipher decapsulation shared descriptor
1295 * @desc: pointer to buffer used for descriptor construction 1293 * @desc: pointer to buffer used for descriptor construction
1296 * @cdata: pointer to block cipher transform definitions 1294 * @cdata: pointer to block cipher transform definitions
1297 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed 1295 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1300,9 +1298,9 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
1300 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 1298 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
1301 * @ctx1_iv_off: IV offset in CONTEXT1 register 1299 * @ctx1_iv_off: IV offset in CONTEXT1 register
1302 */ 1300 */
1303void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata, 1301void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
1304 unsigned int ivsize, const bool is_rfc3686, 1302 unsigned int ivsize, const bool is_rfc3686,
1305 const u32 ctx1_iv_off) 1303 const u32 ctx1_iv_off)
1306{ 1304{
1307 u32 *key_jump_cmd; 1305 u32 *key_jump_cmd;
1308 1306
@@ -1348,105 +1346,23 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
1348 append_dec_op1(desc, cdata->algtype); 1346 append_dec_op1(desc, cdata->algtype);
1349 1347
1350 /* Perform operation */ 1348 /* Perform operation */
1351 ablkcipher_append_src_dst(desc); 1349 skcipher_append_src_dst(desc);
1352
1353#ifdef DEBUG
1354 print_hex_dump(KERN_ERR,
1355 "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
1356 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1357#endif
1358}
1359EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
1360
1361/**
1362 * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
1363 * with HW-generated initialization vector.
1364 * @desc: pointer to buffer used for descriptor construction
1365 * @cdata: pointer to block cipher transform definitions
1366 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
1367 * with OP_ALG_AAI_CBC.
1368 * @ivsize: initialization vector size
1369 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
1370 * @ctx1_iv_off: IV offset in CONTEXT1 register
1371 */
1372void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
1373 unsigned int ivsize, const bool is_rfc3686,
1374 const u32 ctx1_iv_off)
1375{
1376 u32 *key_jump_cmd, geniv;
1377
1378 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
1379 /* Skip if already shared */
1380 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1381 JUMP_COND_SHRD);
1382
1383 /* Load class1 key only */
1384 append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
1385 cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
1386
1387 /* Load Nonce into CONTEXT1 reg */
1388 if (is_rfc3686) {
1389 const u8 *nonce = cdata->key_virt + cdata->keylen;
1390
1391 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
1392 LDST_CLASS_IND_CCB |
1393 LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
1394 append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
1395 MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
1396 (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
1397 }
1398 set_jump_tgt_here(desc, key_jump_cmd);
1399
1400 /* Generate IV */
1401 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
1402 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
1403 (ivsize << NFIFOENTRY_DLEN_SHIFT);
1404 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
1405 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
1406 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
1407 append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
1408 MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
1409 (ctx1_iv_off << MOVE_OFFSET_SHIFT));
1410 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
1411
1412 /* Copy generated IV to memory */
1413 append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
1414 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
1415
1416 /* Load Counter into CONTEXT1 reg */
1417 if (is_rfc3686)
1418 append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
1419 LDST_SRCDST_BYTE_CONTEXT |
1420 ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
1421 LDST_OFFSET_SHIFT));
1422
1423 if (ctx1_iv_off)
1424 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
1425 (1 << JUMP_OFFSET_SHIFT));
1426
1427 /* Load operation */
1428 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
1429 OP_ALG_ENCRYPT);
1430
1431 /* Perform operation */
1432 ablkcipher_append_src_dst(desc);
1433 1350
1434#ifdef DEBUG 1351#ifdef DEBUG
1435 print_hex_dump(KERN_ERR, 1352 print_hex_dump(KERN_ERR,
1436 "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ", 1353 "skcipher dec shdesc@" __stringify(__LINE__)": ",
1437 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1354 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1438#endif 1355#endif
1439} 1356}
1440EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap); 1357EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
1441 1358
1442/** 1359/**
1443 * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared 1360 * cnstr_shdsc_xts_skcipher_encap - xts skcipher encapsulation shared descriptor
1444 * descriptor
1445 * @desc: pointer to buffer used for descriptor construction 1361 * @desc: pointer to buffer used for descriptor construction
1446 * @cdata: pointer to block cipher transform definitions 1362 * @cdata: pointer to block cipher transform definitions
1447 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS. 1363 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
1448 */ 1364 */
1449void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata) 1365void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
1450{ 1366{
1451 __be64 sector_size = cpu_to_be64(512); 1367 __be64 sector_size = cpu_to_be64(512);
1452 u32 *key_jump_cmd; 1368 u32 *key_jump_cmd;
@@ -1481,24 +1397,23 @@ void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
1481 OP_ALG_ENCRYPT); 1397 OP_ALG_ENCRYPT);
1482 1398
1483 /* Perform operation */ 1399 /* Perform operation */
1484 ablkcipher_append_src_dst(desc); 1400 skcipher_append_src_dst(desc);
1485 1401
1486#ifdef DEBUG 1402#ifdef DEBUG
1487 print_hex_dump(KERN_ERR, 1403 print_hex_dump(KERN_ERR,
1488 "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ", 1404 "xts skcipher enc shdesc@" __stringify(__LINE__) ": ",
1489 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1405 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1490#endif 1406#endif
1491} 1407}
1492EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap); 1408EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
1493 1409
1494/** 1410/**
1495 * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared 1411 * cnstr_shdsc_xts_skcipher_decap - xts skcipher decapsulation shared descriptor
1496 * descriptor
1497 * @desc: pointer to buffer used for descriptor construction 1412 * @desc: pointer to buffer used for descriptor construction
1498 * @cdata: pointer to block cipher transform definitions 1413 * @cdata: pointer to block cipher transform definitions
1499 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS. 1414 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
1500 */ 1415 */
1501void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata) 1416void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
1502{ 1417{
1503 __be64 sector_size = cpu_to_be64(512); 1418 __be64 sector_size = cpu_to_be64(512);
1504 u32 *key_jump_cmd; 1419 u32 *key_jump_cmd;
@@ -1532,15 +1447,15 @@ void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
1532 append_dec_op1(desc, cdata->algtype); 1447 append_dec_op1(desc, cdata->algtype);
1533 1448
1534 /* Perform operation */ 1449 /* Perform operation */
1535 ablkcipher_append_src_dst(desc); 1450 skcipher_append_src_dst(desc);
1536 1451
1537#ifdef DEBUG 1452#ifdef DEBUG
1538 print_hex_dump(KERN_ERR, 1453 print_hex_dump(KERN_ERR,
1539 "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ", 1454 "xts skcipher dec shdesc@" __stringify(__LINE__) ": ",
1540 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1455 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1541#endif 1456#endif
1542} 1457}
1543EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap); 1458EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
1544 1459
1545MODULE_LICENSE("GPL"); 1460MODULE_LICENSE("GPL");
1546MODULE_DESCRIPTION("FSL CAAM descriptor support"); 1461MODULE_DESCRIPTION("FSL CAAM descriptor support");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index a917af5776ce..1315c8f6f951 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Shared descriptors for aead, ablkcipher algorithms 3 * Shared descriptors for aead, skcipher algorithms
4 * 4 *
5 * Copyright 2016 NXP 5 * Copyright 2016 NXP
6 */ 6 */
@@ -42,10 +42,10 @@
42#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ) 42#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
43#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ) 43#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
44 44
45#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) 45#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
46#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ 46#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
47 20 * CAAM_CMD_SZ) 47 20 * CAAM_CMD_SZ)
48#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ 48#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
49 15 * CAAM_CMD_SZ) 49 15 * CAAM_CMD_SZ)
50 50
51void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, 51void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
@@ -96,20 +96,16 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
96 unsigned int ivsize, unsigned int icvsize, 96 unsigned int ivsize, unsigned int icvsize,
97 const bool is_qi); 97 const bool is_qi);
98 98
99void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata, 99void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
100 unsigned int ivsize, const bool is_rfc3686, 100 unsigned int ivsize, const bool is_rfc3686,
101 const u32 ctx1_iv_off); 101 const u32 ctx1_iv_off);
102 102
103void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata, 103void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
104 unsigned int ivsize, const bool is_rfc3686, 104 unsigned int ivsize, const bool is_rfc3686,
105 const u32 ctx1_iv_off); 105 const u32 ctx1_iv_off);
106 106
107void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata, 107void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
108 unsigned int ivsize, const bool is_rfc3686,
109 const u32 ctx1_iv_off);
110 108
111void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata); 109void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
112
113void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
114 110
115#endif /* _CAAMALG_DESC_H_ */ 111#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index d7aa7d7ff102..23c9fc4975f8 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -1,9 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Freescale FSL CAAM support for crypto API over QI backend. 3 * Freescale FSL CAAM support for crypto API over QI backend.
3 * Based on caamalg.c 4 * Based on caamalg.c
4 * 5 *
5 * Copyright 2013-2016 Freescale Semiconductor, Inc. 6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP 7 * Copyright 2016-2018 NXP
7 */ 8 */
8 9
9#include "compat.h" 10#include "compat.h"
@@ -43,6 +44,12 @@ struct caam_aead_alg {
43 bool registered; 44 bool registered;
44}; 45};
45 46
47struct caam_skcipher_alg {
48 struct skcipher_alg skcipher;
49 struct caam_alg_entry caam;
50 bool registered;
51};
52
46/* 53/*
47 * per-session context 54 * per-session context
48 */ 55 */
@@ -50,7 +57,6 @@ struct caam_ctx {
50 struct device *jrdev; 57 struct device *jrdev;
51 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 58 u32 sh_desc_enc[DESC_MAX_USED_LEN];
52 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 59 u32 sh_desc_dec[DESC_MAX_USED_LEN];
53 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
54 u8 key[CAAM_MAX_KEY_SIZE]; 60 u8 key[CAAM_MAX_KEY_SIZE];
55 dma_addr_t key_dma; 61 dma_addr_t key_dma;
56 enum dma_data_direction dir; 62 enum dma_data_direction dir;
@@ -589,18 +595,19 @@ static int rfc4543_setkey(struct crypto_aead *aead,
589 return 0; 595 return 0;
590} 596}
591 597
592static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 598static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
593 const u8 *key, unsigned int keylen) 599 unsigned int keylen)
594{ 600{
595 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 601 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
596 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); 602 struct caam_skcipher_alg *alg =
597 const char *alg_name = crypto_tfm_alg_name(tfm); 603 container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
604 skcipher);
598 struct device *jrdev = ctx->jrdev; 605 struct device *jrdev = ctx->jrdev;
599 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 606 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
600 u32 ctx1_iv_off = 0; 607 u32 ctx1_iv_off = 0;
601 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 608 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
602 OP_ALG_AAI_CTR_MOD128); 609 OP_ALG_AAI_CTR_MOD128);
603 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); 610 const bool is_rfc3686 = alg->caam.rfc3686;
604 int ret = 0; 611 int ret = 0;
605 612
606#ifdef DEBUG 613#ifdef DEBUG
@@ -629,13 +636,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
629 ctx->cdata.key_virt = key; 636 ctx->cdata.key_virt = key;
630 ctx->cdata.key_inline = true; 637 ctx->cdata.key_inline = true;
631 638
632 /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */ 639 /* skcipher encrypt, decrypt shared descriptors */
633 cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 640 cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
634 is_rfc3686, ctx1_iv_off); 641 is_rfc3686, ctx1_iv_off);
635 cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 642 cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
636 is_rfc3686, ctx1_iv_off); 643 is_rfc3686, ctx1_iv_off);
637 cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
638 ivsize, is_rfc3686, ctx1_iv_off);
639 644
640 /* Now update the driver contexts with the new shared descriptor */ 645 /* Now update the driver contexts with the new shared descriptor */
641 if (ctx->drv_ctx[ENCRYPT]) { 646 if (ctx->drv_ctx[ENCRYPT]) {
@@ -656,25 +661,16 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
656 } 661 }
657 } 662 }
658 663
659 if (ctx->drv_ctx[GIVENCRYPT]) {
660 ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
661 ctx->sh_desc_givenc);
662 if (ret) {
663 dev_err(jrdev, "driver givenc context update failed\n");
664 goto badkey;
665 }
666 }
667
668 return ret; 664 return ret;
669badkey: 665badkey:
670 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 666 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
671 return -EINVAL; 667 return -EINVAL;
672} 668}
673 669
674static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 670static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
675 const u8 *key, unsigned int keylen) 671 unsigned int keylen)
676{ 672{
677 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 673 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
678 struct device *jrdev = ctx->jrdev; 674 struct device *jrdev = ctx->jrdev;
679 int ret = 0; 675 int ret = 0;
680 676
@@ -687,9 +683,9 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
687 ctx->cdata.key_virt = key; 683 ctx->cdata.key_virt = key;
688 ctx->cdata.key_inline = true; 684 ctx->cdata.key_inline = true;
689 685
690 /* xts ablkcipher encrypt, decrypt shared descriptors */ 686 /* xts skcipher encrypt, decrypt shared descriptors */
691 cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata); 687 cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
692 cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata); 688 cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
693 689
694 /* Now update the driver contexts with the new shared descriptor */ 690 /* Now update the driver contexts with the new shared descriptor */
695 if (ctx->drv_ctx[ENCRYPT]) { 691 if (ctx->drv_ctx[ENCRYPT]) {
@@ -712,7 +708,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
712 708
713 return ret; 709 return ret;
714badkey: 710badkey:
715 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 711 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
716 return -EINVAL; 712 return -EINVAL;
717} 713}
718 714
@@ -741,7 +737,7 @@ struct aead_edesc {
741}; 737};
742 738
743/* 739/*
744 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 740 * skcipher_edesc - s/w-extended skcipher descriptor
745 * @src_nents: number of segments in input scatterlist 741 * @src_nents: number of segments in input scatterlist
746 * @dst_nents: number of segments in output scatterlist 742 * @dst_nents: number of segments in output scatterlist
747 * @iv_dma: dma address of iv for checking continuity and link table 743 * @iv_dma: dma address of iv for checking continuity and link table
@@ -750,7 +746,7 @@ struct aead_edesc {
750 * @drv_req: driver-specific request structure 746 * @drv_req: driver-specific request structure
751 * @sgt: the h/w link table, followed by IV 747 * @sgt: the h/w link table, followed by IV
752 */ 748 */
753struct ablkcipher_edesc { 749struct skcipher_edesc {
754 int src_nents; 750 int src_nents;
755 int dst_nents; 751 int dst_nents;
756 dma_addr_t iv_dma; 752 dma_addr_t iv_dma;
@@ -781,10 +777,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
781 777
782 if (type == ENCRYPT) 778 if (type == ENCRYPT)
783 desc = ctx->sh_desc_enc; 779 desc = ctx->sh_desc_enc;
784 else if (type == DECRYPT) 780 else /* (type == DECRYPT) */
785 desc = ctx->sh_desc_dec; 781 desc = ctx->sh_desc_dec;
786 else /* (type == GIVENCRYPT) */
787 desc = ctx->sh_desc_givenc;
788 782
789 cpu = smp_processor_id(); 783 cpu = smp_processor_id();
790 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); 784 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
@@ -803,8 +797,7 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
803static void caam_unmap(struct device *dev, struct scatterlist *src, 797static void caam_unmap(struct device *dev, struct scatterlist *src,
804 struct scatterlist *dst, int src_nents, 798 struct scatterlist *dst, int src_nents,
805 int dst_nents, dma_addr_t iv_dma, int ivsize, 799 int dst_nents, dma_addr_t iv_dma, int ivsize,
806 enum optype op_type, dma_addr_t qm_sg_dma, 800 dma_addr_t qm_sg_dma, int qm_sg_bytes)
807 int qm_sg_bytes)
808{ 801{
809 if (dst != src) { 802 if (dst != src) {
810 if (src_nents) 803 if (src_nents)
@@ -815,9 +808,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
815 } 808 }
816 809
817 if (iv_dma) 810 if (iv_dma)
818 dma_unmap_single(dev, iv_dma, ivsize, 811 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
819 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
820 DMA_TO_DEVICE);
821 if (qm_sg_bytes) 812 if (qm_sg_bytes)
822 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 813 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
823} 814}
@@ -830,21 +821,18 @@ static void aead_unmap(struct device *dev,
830 int ivsize = crypto_aead_ivsize(aead); 821 int ivsize = crypto_aead_ivsize(aead);
831 822
832 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 823 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
833 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, 824 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
834 edesc->qm_sg_dma, edesc->qm_sg_bytes);
835 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 825 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
836} 826}
837 827
838static void ablkcipher_unmap(struct device *dev, 828static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
839 struct ablkcipher_edesc *edesc, 829 struct skcipher_request *req)
840 struct ablkcipher_request *req)
841{ 830{
842 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 831 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
843 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 832 int ivsize = crypto_skcipher_ivsize(skcipher);
844 833
845 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 834 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
846 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, 835 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
847 edesc->qm_sg_dma, edesc->qm_sg_bytes);
848} 836}
849 837
850static void aead_done(struct caam_drv_req *drv_req, u32 status) 838static void aead_done(struct caam_drv_req *drv_req, u32 status)
@@ -902,9 +890,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
902 int in_len, out_len; 890 int in_len, out_len;
903 struct qm_sg_entry *sg_table, *fd_sgt; 891 struct qm_sg_entry *sg_table, *fd_sgt;
904 struct caam_drv_ctx *drv_ctx; 892 struct caam_drv_ctx *drv_ctx;
905 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
906 893
907 drv_ctx = get_drv_ctx(ctx, op_type); 894 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
908 if (unlikely(IS_ERR_OR_NULL(drv_ctx))) 895 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
909 return (struct aead_edesc *)drv_ctx; 896 return (struct aead_edesc *)drv_ctx;
910 897
@@ -994,7 +981,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
994 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 981 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
995 qm_sg_ents, ivsize); 982 qm_sg_ents, ivsize);
996 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 983 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
997 0, 0, 0, 0); 984 0, 0, 0);
998 qi_cache_free(edesc); 985 qi_cache_free(edesc);
999 return ERR_PTR(-ENOMEM); 986 return ERR_PTR(-ENOMEM);
1000 } 987 }
@@ -1009,7 +996,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1009 if (dma_mapping_error(qidev, iv_dma)) { 996 if (dma_mapping_error(qidev, iv_dma)) {
1010 dev_err(qidev, "unable to map IV\n"); 997 dev_err(qidev, "unable to map IV\n");
1011 caam_unmap(qidev, req->src, req->dst, src_nents, 998 caam_unmap(qidev, req->src, req->dst, src_nents,
1012 dst_nents, 0, 0, 0, 0, 0); 999 dst_nents, 0, 0, 0, 0);
1013 qi_cache_free(edesc); 1000 qi_cache_free(edesc);
1014 return ERR_PTR(-ENOMEM); 1001 return ERR_PTR(-ENOMEM);
1015 } 1002 }
@@ -1028,7 +1015,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1028 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 1015 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1029 dev_err(qidev, "unable to map assoclen\n"); 1016 dev_err(qidev, "unable to map assoclen\n");
1030 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1017 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1031 iv_dma, ivsize, op_type, 0, 0); 1018 iv_dma, ivsize, 0, 0);
1032 qi_cache_free(edesc); 1019 qi_cache_free(edesc);
1033 return ERR_PTR(-ENOMEM); 1020 return ERR_PTR(-ENOMEM);
1034 } 1021 }
@@ -1051,7 +1038,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1051 dev_err(qidev, "unable to map S/G table\n"); 1038 dev_err(qidev, "unable to map S/G table\n");
1052 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1039 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1053 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1040 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1054 iv_dma, ivsize, op_type, 0, 0); 1041 iv_dma, ivsize, 0, 0);
1055 qi_cache_free(edesc); 1042 qi_cache_free(edesc);
1056 return ERR_PTR(-ENOMEM); 1043 return ERR_PTR(-ENOMEM);
1057 } 1044 }
@@ -1138,14 +1125,14 @@ static int ipsec_gcm_decrypt(struct aead_request *req)
1138 return aead_crypt(req, false); 1125 return aead_crypt(req, false);
1139} 1126}
1140 1127
1141static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) 1128static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
1142{ 1129{
1143 struct ablkcipher_edesc *edesc; 1130 struct skcipher_edesc *edesc;
1144 struct ablkcipher_request *req = drv_req->app_ctx; 1131 struct skcipher_request *req = drv_req->app_ctx;
1145 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1132 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1146 struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher); 1133 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
1147 struct device *qidev = caam_ctx->qidev; 1134 struct device *qidev = caam_ctx->qidev;
1148 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1135 int ivsize = crypto_skcipher_ivsize(skcipher);
1149 1136
1150#ifdef DEBUG 1137#ifdef DEBUG
1151 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1138 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
@@ -1158,72 +1145,60 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
1158 1145
1159#ifdef DEBUG 1146#ifdef DEBUG
1160 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", 1147 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
1161 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1148 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1162 edesc->src_nents > 1 ? 100 : ivsize, 1); 1149 edesc->src_nents > 1 ? 100 : ivsize, 1);
1163 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 1150 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1164 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1151 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1165 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 1152 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1166#endif 1153#endif
1167 1154
1168 ablkcipher_unmap(qidev, edesc, req); 1155 skcipher_unmap(qidev, edesc, req);
1169
1170 /* In case initial IV was generated, copy it in GIVCIPHER request */
1171 if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
1172 u8 *iv;
1173 struct skcipher_givcrypt_request *greq;
1174
1175 greq = container_of(req, struct skcipher_givcrypt_request,
1176 creq);
1177 iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
1178 memcpy(greq->giv, iv, ivsize);
1179 }
1180 1156
1181 /* 1157 /*
1182 * The crypto API expects us to set the IV (req->info) to the last 1158 * The crypto API expects us to set the IV (req->iv) to the last
1183 * ciphertext block. This is used e.g. by the CTS mode. 1159 * ciphertext block. This is used e.g. by the CTS mode.
1184 */ 1160 */
1185 if (edesc->drv_req.drv_ctx->op_type != DECRYPT) 1161 if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
1186 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - 1162 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
1187 ivsize, ivsize, 0); 1163 ivsize, ivsize, 0);
1188 1164
1189 qi_cache_free(edesc); 1165 qi_cache_free(edesc);
1190 ablkcipher_request_complete(req, status); 1166 skcipher_request_complete(req, status);
1191} 1167}
1192 1168
1193static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request 1169static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1194 *req, bool encrypt) 1170 bool encrypt)
1195{ 1171{
1196 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1172 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1197 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1173 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1198 struct device *qidev = ctx->qidev; 1174 struct device *qidev = ctx->qidev;
1199 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1175 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1200 GFP_KERNEL : GFP_ATOMIC; 1176 GFP_KERNEL : GFP_ATOMIC;
1201 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1177 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1202 struct ablkcipher_edesc *edesc; 1178 struct skcipher_edesc *edesc;
1203 dma_addr_t iv_dma; 1179 dma_addr_t iv_dma;
1204 u8 *iv; 1180 u8 *iv;
1205 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1181 int ivsize = crypto_skcipher_ivsize(skcipher);
1206 int dst_sg_idx, qm_sg_ents, qm_sg_bytes; 1182 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1207 struct qm_sg_entry *sg_table, *fd_sgt; 1183 struct qm_sg_entry *sg_table, *fd_sgt;
1208 struct caam_drv_ctx *drv_ctx; 1184 struct caam_drv_ctx *drv_ctx;
1209 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
1210 1185
1211 drv_ctx = get_drv_ctx(ctx, op_type); 1186 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
1212 if (unlikely(IS_ERR_OR_NULL(drv_ctx))) 1187 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1213 return (struct ablkcipher_edesc *)drv_ctx; 1188 return (struct skcipher_edesc *)drv_ctx;
1214 1189
1215 src_nents = sg_nents_for_len(req->src, req->nbytes); 1190 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1216 if (unlikely(src_nents < 0)) { 1191 if (unlikely(src_nents < 0)) {
1217 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 1192 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1218 req->nbytes); 1193 req->cryptlen);
1219 return ERR_PTR(src_nents); 1194 return ERR_PTR(src_nents);
1220 } 1195 }
1221 1196
1222 if (unlikely(req->src != req->dst)) { 1197 if (unlikely(req->src != req->dst)) {
1223 dst_nents = sg_nents_for_len(req->dst, req->nbytes); 1198 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1224 if (unlikely(dst_nents < 0)) { 1199 if (unlikely(dst_nents < 0)) {
1225 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1200 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1226 req->nbytes); 1201 req->cryptlen);
1227 return ERR_PTR(dst_nents); 1202 return ERR_PTR(dst_nents);
1228 } 1203 }
1229 1204
@@ -1255,12 +1230,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1255 1230
1256 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1231 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1257 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1232 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1258 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes + 1233 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1259 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1234 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1260 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1235 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1261 qm_sg_ents, ivsize); 1236 qm_sg_ents, ivsize);
1262 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1237 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1263 0, 0, 0, 0); 1238 0, 0, 0);
1264 return ERR_PTR(-ENOMEM); 1239 return ERR_PTR(-ENOMEM);
1265 } 1240 }
1266 1241
@@ -1269,20 +1244,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1269 if (unlikely(!edesc)) { 1244 if (unlikely(!edesc)) {
1270 dev_err(qidev, "could not allocate extended descriptor\n"); 1245 dev_err(qidev, "could not allocate extended descriptor\n");
1271 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1246 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1272 0, 0, 0, 0); 1247 0, 0, 0);
1273 return ERR_PTR(-ENOMEM); 1248 return ERR_PTR(-ENOMEM);
1274 } 1249 }
1275 1250
1276 /* Make sure IV is located in a DMAable area */ 1251 /* Make sure IV is located in a DMAable area */
1277 sg_table = &edesc->sgt[0]; 1252 sg_table = &edesc->sgt[0];
1278 iv = (u8 *)(sg_table + qm_sg_ents); 1253 iv = (u8 *)(sg_table + qm_sg_ents);
1279 memcpy(iv, req->info, ivsize); 1254 memcpy(iv, req->iv, ivsize);
1280 1255
1281 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1256 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1282 if (dma_mapping_error(qidev, iv_dma)) { 1257 if (dma_mapping_error(qidev, iv_dma)) {
1283 dev_err(qidev, "unable to map IV\n"); 1258 dev_err(qidev, "unable to map IV\n");
1284 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1259 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1285 0, 0, 0, 0); 1260 0, 0, 0);
1286 qi_cache_free(edesc); 1261 qi_cache_free(edesc);
1287 return ERR_PTR(-ENOMEM); 1262 return ERR_PTR(-ENOMEM);
1288 } 1263 }
@@ -1292,7 +1267,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1292 edesc->iv_dma = iv_dma; 1267 edesc->iv_dma = iv_dma;
1293 edesc->qm_sg_bytes = qm_sg_bytes; 1268 edesc->qm_sg_bytes = qm_sg_bytes;
1294 edesc->drv_req.app_ctx = req; 1269 edesc->drv_req.app_ctx = req;
1295 edesc->drv_req.cbk = ablkcipher_done; 1270 edesc->drv_req.cbk = skcipher_done;
1296 edesc->drv_req.drv_ctx = drv_ctx; 1271 edesc->drv_req.drv_ctx = drv_ctx;
1297 1272
1298 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1273 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
@@ -1307,7 +1282,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1307 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1282 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1308 dev_err(qidev, "unable to map S/G table\n"); 1283 dev_err(qidev, "unable to map S/G table\n");
1309 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1284 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1310 iv_dma, ivsize, op_type, 0, 0); 1285 iv_dma, ivsize, 0, 0);
1311 qi_cache_free(edesc); 1286 qi_cache_free(edesc);
1312 return ERR_PTR(-ENOMEM); 1287 return ERR_PTR(-ENOMEM);
1313 } 1288 }
@@ -1315,348 +1290,172 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1315 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1290 fd_sgt = &edesc->drv_req.fd_sgt[0];
1316 1291
1317 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 1292 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1318 ivsize + req->nbytes, 0); 1293 ivsize + req->cryptlen, 0);
1319 1294
1320 if (req->src == req->dst) { 1295 if (req->src == req->dst) {
1321 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 1296 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1322 sizeof(*sg_table), req->nbytes, 0); 1297 sizeof(*sg_table), req->cryptlen, 0);
1323 } else if (mapped_dst_nents > 1) { 1298 } else if (mapped_dst_nents > 1) {
1324 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1299 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1325 sizeof(*sg_table), req->nbytes, 0); 1300 sizeof(*sg_table), req->cryptlen, 0);
1326 } else { 1301 } else {
1327 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), 1302 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1328 req->nbytes, 0); 1303 req->cryptlen, 0);
1329 }
1330
1331 return edesc;
1332}
1333
1334static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1335 struct skcipher_givcrypt_request *creq)
1336{
1337 struct ablkcipher_request *req = &creq->creq;
1338 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1339 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1340 struct device *qidev = ctx->qidev;
1341 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1342 GFP_KERNEL : GFP_ATOMIC;
1343 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1344 struct ablkcipher_edesc *edesc;
1345 dma_addr_t iv_dma;
1346 u8 *iv;
1347 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1348 struct qm_sg_entry *sg_table, *fd_sgt;
1349 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1350 struct caam_drv_ctx *drv_ctx;
1351
1352 drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1353 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1354 return (struct ablkcipher_edesc *)drv_ctx;
1355
1356 src_nents = sg_nents_for_len(req->src, req->nbytes);
1357 if (unlikely(src_nents < 0)) {
1358 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1359 req->nbytes);
1360 return ERR_PTR(src_nents);
1361 }
1362
1363 if (unlikely(req->src != req->dst)) {
1364 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1365 if (unlikely(dst_nents < 0)) {
1366 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1367 req->nbytes);
1368 return ERR_PTR(dst_nents);
1369 }
1370
1371 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1372 DMA_TO_DEVICE);
1373 if (unlikely(!mapped_src_nents)) {
1374 dev_err(qidev, "unable to map source\n");
1375 return ERR_PTR(-ENOMEM);
1376 }
1377
1378 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1379 DMA_FROM_DEVICE);
1380 if (unlikely(!mapped_dst_nents)) {
1381 dev_err(qidev, "unable to map destination\n");
1382 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1383 return ERR_PTR(-ENOMEM);
1384 }
1385 } else {
1386 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1387 DMA_BIDIRECTIONAL);
1388 if (unlikely(!mapped_src_nents)) {
1389 dev_err(qidev, "unable to map source\n");
1390 return ERR_PTR(-ENOMEM);
1391 }
1392
1393 dst_nents = src_nents;
1394 mapped_dst_nents = src_nents;
1395 }
1396
1397 qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1398 dst_sg_idx = qm_sg_ents;
1399
1400 qm_sg_ents += 1 + mapped_dst_nents;
1401 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1402 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1403 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1404 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1405 qm_sg_ents, ivsize);
1406 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1407 0, 0, 0, 0);
1408 return ERR_PTR(-ENOMEM);
1409 }
1410
1411 /* allocate space for base edesc, link tables and IV */
1412 edesc = qi_cache_alloc(GFP_DMA | flags);
1413 if (!edesc) {
1414 dev_err(qidev, "could not allocate extended descriptor\n");
1415 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1416 0, 0, 0, 0);
1417 return ERR_PTR(-ENOMEM);
1418 }
1419
1420 /* Make sure IV is located in a DMAable area */
1421 sg_table = &edesc->sgt[0];
1422 iv = (u8 *)(sg_table + qm_sg_ents);
1423 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
1424 if (dma_mapping_error(qidev, iv_dma)) {
1425 dev_err(qidev, "unable to map IV\n");
1426 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1427 0, 0, 0, 0);
1428 qi_cache_free(edesc);
1429 return ERR_PTR(-ENOMEM);
1430 }
1431
1432 edesc->src_nents = src_nents;
1433 edesc->dst_nents = dst_nents;
1434 edesc->iv_dma = iv_dma;
1435 edesc->qm_sg_bytes = qm_sg_bytes;
1436 edesc->drv_req.app_ctx = req;
1437 edesc->drv_req.cbk = ablkcipher_done;
1438 edesc->drv_req.drv_ctx = drv_ctx;
1439
1440 if (mapped_src_nents > 1)
1441 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1442
1443 dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1444 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
1445 0);
1446
1447 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1448 DMA_TO_DEVICE);
1449 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1450 dev_err(qidev, "unable to map S/G table\n");
1451 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1452 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1453 qi_cache_free(edesc);
1454 return ERR_PTR(-ENOMEM);
1455 } 1304 }
1456 1305
1457 fd_sgt = &edesc->drv_req.fd_sgt[0];
1458
1459 if (mapped_src_nents > 1)
1460 dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1461 0);
1462 else
1463 dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1464 req->nbytes, 0);
1465
1466 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1467 sizeof(*sg_table), ivsize + req->nbytes, 0);
1468
1469 return edesc; 1306 return edesc;
1470} 1307}
1471 1308
1472static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt) 1309static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
1473{ 1310{
1474 struct ablkcipher_edesc *edesc; 1311 struct skcipher_edesc *edesc;
1475 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1312 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1476 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1313 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1477 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1314 int ivsize = crypto_skcipher_ivsize(skcipher);
1478 int ret; 1315 int ret;
1479 1316
1480 if (unlikely(caam_congested)) 1317 if (unlikely(caam_congested))
1481 return -EAGAIN; 1318 return -EAGAIN;
1482 1319
1483 /* allocate extended descriptor */ 1320 /* allocate extended descriptor */
1484 edesc = ablkcipher_edesc_alloc(req, encrypt); 1321 edesc = skcipher_edesc_alloc(req, encrypt);
1485 if (IS_ERR(edesc)) 1322 if (IS_ERR(edesc))
1486 return PTR_ERR(edesc); 1323 return PTR_ERR(edesc);
1487 1324
1488 /* 1325 /*
1489 * The crypto API expects us to set the IV (req->info) to the last 1326 * The crypto API expects us to set the IV (req->iv) to the last
1490 * ciphertext block. 1327 * ciphertext block.
1491 */ 1328 */
1492 if (!encrypt) 1329 if (!encrypt)
1493 scatterwalk_map_and_copy(req->info, req->src, req->nbytes - 1330 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
1494 ivsize, ivsize, 0); 1331 ivsize, ivsize, 0);
1495 1332
1496 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1333 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1497 if (!ret) { 1334 if (!ret) {
1498 ret = -EINPROGRESS; 1335 ret = -EINPROGRESS;
1499 } else { 1336 } else {
1500 ablkcipher_unmap(ctx->qidev, edesc, req); 1337 skcipher_unmap(ctx->qidev, edesc, req);
1501 qi_cache_free(edesc); 1338 qi_cache_free(edesc);
1502 } 1339 }
1503 1340
1504 return ret; 1341 return ret;
1505} 1342}
1506 1343
1507static int ablkcipher_encrypt(struct ablkcipher_request *req) 1344static int skcipher_encrypt(struct skcipher_request *req)
1508{ 1345{
1509 return ablkcipher_crypt(req, true); 1346 return skcipher_crypt(req, true);
1510} 1347}
1511 1348
1512static int ablkcipher_decrypt(struct ablkcipher_request *req) 1349static int skcipher_decrypt(struct skcipher_request *req)
1513{ 1350{
1514 return ablkcipher_crypt(req, false); 1351 return skcipher_crypt(req, false);
1515}
1516
1517static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1518{
1519 struct ablkcipher_request *req = &creq->creq;
1520 struct ablkcipher_edesc *edesc;
1521 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1522 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1523 int ret;
1524
1525 if (unlikely(caam_congested))
1526 return -EAGAIN;
1527
1528 /* allocate extended descriptor */
1529 edesc = ablkcipher_giv_edesc_alloc(creq);
1530 if (IS_ERR(edesc))
1531 return PTR_ERR(edesc);
1532
1533 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1534 if (!ret) {
1535 ret = -EINPROGRESS;
1536 } else {
1537 ablkcipher_unmap(ctx->qidev, edesc, req);
1538 qi_cache_free(edesc);
1539 }
1540
1541 return ret;
1542} 1352}
1543 1353
1544#define template_ablkcipher template_u.ablkcipher 1354static struct caam_skcipher_alg driver_algs[] = {
1545struct caam_alg_template {
1546 char name[CRYPTO_MAX_ALG_NAME];
1547 char driver_name[CRYPTO_MAX_ALG_NAME];
1548 unsigned int blocksize;
1549 u32 type;
1550 union {
1551 struct ablkcipher_alg ablkcipher;
1552 } template_u;
1553 u32 class1_alg_type;
1554 u32 class2_alg_type;
1555};
1556
1557static struct caam_alg_template driver_algs[] = {
1558 /* ablkcipher descriptor */
1559 { 1355 {
1560 .name = "cbc(aes)", 1356 .skcipher = {
1561 .driver_name = "cbc-aes-caam-qi", 1357 .base = {
1562 .blocksize = AES_BLOCK_SIZE, 1358 .cra_name = "cbc(aes)",
1563 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1359 .cra_driver_name = "cbc-aes-caam-qi",
1564 .template_ablkcipher = { 1360 .cra_blocksize = AES_BLOCK_SIZE,
1565 .setkey = ablkcipher_setkey, 1361 },
1566 .encrypt = ablkcipher_encrypt, 1362 .setkey = skcipher_setkey,
1567 .decrypt = ablkcipher_decrypt, 1363 .encrypt = skcipher_encrypt,
1568 .givencrypt = ablkcipher_givencrypt, 1364 .decrypt = skcipher_decrypt,
1569 .geniv = "<built-in>",
1570 .min_keysize = AES_MIN_KEY_SIZE, 1365 .min_keysize = AES_MIN_KEY_SIZE,
1571 .max_keysize = AES_MAX_KEY_SIZE, 1366 .max_keysize = AES_MAX_KEY_SIZE,
1572 .ivsize = AES_BLOCK_SIZE, 1367 .ivsize = AES_BLOCK_SIZE,
1573 }, 1368 },
1574 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1369 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1575 }, 1370 },
1576 { 1371 {
1577 .name = "cbc(des3_ede)", 1372 .skcipher = {
1578 .driver_name = "cbc-3des-caam-qi", 1373 .base = {
1579 .blocksize = DES3_EDE_BLOCK_SIZE, 1374 .cra_name = "cbc(des3_ede)",
1580 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1375 .cra_driver_name = "cbc-3des-caam-qi",
1581 .template_ablkcipher = { 1376 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1582 .setkey = ablkcipher_setkey, 1377 },
1583 .encrypt = ablkcipher_encrypt, 1378 .setkey = skcipher_setkey,
1584 .decrypt = ablkcipher_decrypt, 1379 .encrypt = skcipher_encrypt,
1585 .givencrypt = ablkcipher_givencrypt, 1380 .decrypt = skcipher_decrypt,
1586 .geniv = "<built-in>",
1587 .min_keysize = DES3_EDE_KEY_SIZE, 1381 .min_keysize = DES3_EDE_KEY_SIZE,
1588 .max_keysize = DES3_EDE_KEY_SIZE, 1382 .max_keysize = DES3_EDE_KEY_SIZE,
1589 .ivsize = DES3_EDE_BLOCK_SIZE, 1383 .ivsize = DES3_EDE_BLOCK_SIZE,
1590 }, 1384 },
1591 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1385 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1592 }, 1386 },
1593 { 1387 {
1594 .name = "cbc(des)", 1388 .skcipher = {
1595 .driver_name = "cbc-des-caam-qi", 1389 .base = {
1596 .blocksize = DES_BLOCK_SIZE, 1390 .cra_name = "cbc(des)",
1597 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1391 .cra_driver_name = "cbc-des-caam-qi",
1598 .template_ablkcipher = { 1392 .cra_blocksize = DES_BLOCK_SIZE,
1599 .setkey = ablkcipher_setkey, 1393 },
1600 .encrypt = ablkcipher_encrypt, 1394 .setkey = skcipher_setkey,
1601 .decrypt = ablkcipher_decrypt, 1395 .encrypt = skcipher_encrypt,
1602 .givencrypt = ablkcipher_givencrypt, 1396 .decrypt = skcipher_decrypt,
1603 .geniv = "<built-in>",
1604 .min_keysize = DES_KEY_SIZE, 1397 .min_keysize = DES_KEY_SIZE,
1605 .max_keysize = DES_KEY_SIZE, 1398 .max_keysize = DES_KEY_SIZE,
1606 .ivsize = DES_BLOCK_SIZE, 1399 .ivsize = DES_BLOCK_SIZE,
1607 }, 1400 },
1608 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1401 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1609 }, 1402 },
1610 { 1403 {
1611 .name = "ctr(aes)", 1404 .skcipher = {
1612 .driver_name = "ctr-aes-caam-qi", 1405 .base = {
1613 .blocksize = 1, 1406 .cra_name = "ctr(aes)",
1614 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1407 .cra_driver_name = "ctr-aes-caam-qi",
1615 .template_ablkcipher = { 1408 .cra_blocksize = 1,
1616 .setkey = ablkcipher_setkey, 1409 },
1617 .encrypt = ablkcipher_encrypt, 1410 .setkey = skcipher_setkey,
1618 .decrypt = ablkcipher_decrypt, 1411 .encrypt = skcipher_encrypt,
1619 .geniv = "chainiv", 1412 .decrypt = skcipher_decrypt,
1620 .min_keysize = AES_MIN_KEY_SIZE, 1413 .min_keysize = AES_MIN_KEY_SIZE,
1621 .max_keysize = AES_MAX_KEY_SIZE, 1414 .max_keysize = AES_MAX_KEY_SIZE,
1622 .ivsize = AES_BLOCK_SIZE, 1415 .ivsize = AES_BLOCK_SIZE,
1416 .chunksize = AES_BLOCK_SIZE,
1623 }, 1417 },
1624 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, 1418 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1419 OP_ALG_AAI_CTR_MOD128,
1625 }, 1420 },
1626 { 1421 {
1627 .name = "rfc3686(ctr(aes))", 1422 .skcipher = {
1628 .driver_name = "rfc3686-ctr-aes-caam-qi", 1423 .base = {
1629 .blocksize = 1, 1424 .cra_name = "rfc3686(ctr(aes))",
1630 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1425 .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
1631 .template_ablkcipher = { 1426 .cra_blocksize = 1,
1632 .setkey = ablkcipher_setkey, 1427 },
1633 .encrypt = ablkcipher_encrypt, 1428 .setkey = skcipher_setkey,
1634 .decrypt = ablkcipher_decrypt, 1429 .encrypt = skcipher_encrypt,
1635 .givencrypt = ablkcipher_givencrypt, 1430 .decrypt = skcipher_decrypt,
1636 .geniv = "<built-in>",
1637 .min_keysize = AES_MIN_KEY_SIZE + 1431 .min_keysize = AES_MIN_KEY_SIZE +
1638 CTR_RFC3686_NONCE_SIZE, 1432 CTR_RFC3686_NONCE_SIZE,
1639 .max_keysize = AES_MAX_KEY_SIZE + 1433 .max_keysize = AES_MAX_KEY_SIZE +
1640 CTR_RFC3686_NONCE_SIZE, 1434 CTR_RFC3686_NONCE_SIZE,
1641 .ivsize = CTR_RFC3686_IV_SIZE, 1435 .ivsize = CTR_RFC3686_IV_SIZE,
1436 .chunksize = AES_BLOCK_SIZE,
1437 },
1438 .caam = {
1439 .class1_alg_type = OP_ALG_ALGSEL_AES |
1440 OP_ALG_AAI_CTR_MOD128,
1441 .rfc3686 = true,
1642 }, 1442 },
1643 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1644 }, 1443 },
1645 { 1444 {
1646 .name = "xts(aes)", 1445 .skcipher = {
1647 .driver_name = "xts-aes-caam-qi", 1446 .base = {
1648 .blocksize = AES_BLOCK_SIZE, 1447 .cra_name = "xts(aes)",
1649 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1448 .cra_driver_name = "xts-aes-caam-qi",
1650 .template_ablkcipher = { 1449 .cra_blocksize = AES_BLOCK_SIZE,
1651 .setkey = xts_ablkcipher_setkey, 1450 },
1652 .encrypt = ablkcipher_encrypt, 1451 .setkey = xts_skcipher_setkey,
1653 .decrypt = ablkcipher_decrypt, 1452 .encrypt = skcipher_encrypt,
1654 .geniv = "eseqiv", 1453 .decrypt = skcipher_decrypt,
1655 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1454 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1656 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1455 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1657 .ivsize = AES_BLOCK_SIZE, 1456 .ivsize = AES_BLOCK_SIZE,
1658 }, 1457 },
1659 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1458 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1660 }, 1459 },
1661}; 1460};
1662 1461
@@ -2528,12 +2327,6 @@ static struct caam_aead_alg driver_aeads[] = {
2528 }, 2327 },
2529}; 2328};
2530 2329
2531struct caam_crypto_alg {
2532 struct list_head entry;
2533 struct crypto_alg crypto_alg;
2534 struct caam_alg_entry caam;
2535};
2536
2537static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 2330static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2538 bool uses_dkp) 2331 bool uses_dkp)
2539{ 2332{
@@ -2572,19 +2365,18 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2572 spin_lock_init(&ctx->lock); 2365 spin_lock_init(&ctx->lock);
2573 ctx->drv_ctx[ENCRYPT] = NULL; 2366 ctx->drv_ctx[ENCRYPT] = NULL;
2574 ctx->drv_ctx[DECRYPT] = NULL; 2367 ctx->drv_ctx[DECRYPT] = NULL;
2575 ctx->drv_ctx[GIVENCRYPT] = NULL;
2576 2368
2577 return 0; 2369 return 0;
2578} 2370}
2579 2371
2580static int caam_cra_init(struct crypto_tfm *tfm) 2372static int caam_cra_init(struct crypto_skcipher *tfm)
2581{ 2373{
2582 struct crypto_alg *alg = tfm->__crt_alg; 2374 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
2583 struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg), 2375 struct caam_skcipher_alg *caam_alg =
2584 crypto_alg); 2376 container_of(alg, typeof(*caam_alg), skcipher);
2585 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2586 2377
2587 return caam_init_common(ctx, &caam_alg->caam, false); 2378 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
2379 false);
2588} 2380}
2589 2381
2590static int caam_aead_init(struct crypto_aead *tfm) 2382static int caam_aead_init(struct crypto_aead *tfm)
@@ -2602,16 +2394,15 @@ static void caam_exit_common(struct caam_ctx *ctx)
2602{ 2394{
2603 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2395 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2604 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2396 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2605 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2606 2397
2607 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); 2398 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2608 2399
2609 caam_jr_free(ctx->jrdev); 2400 caam_jr_free(ctx->jrdev);
2610} 2401}
2611 2402
2612static void caam_cra_exit(struct crypto_tfm *tfm) 2403static void caam_cra_exit(struct crypto_skcipher *tfm)
2613{ 2404{
2614 caam_exit_common(crypto_tfm_ctx(tfm)); 2405 caam_exit_common(crypto_skcipher_ctx(tfm));
2615} 2406}
2616 2407
2617static void caam_aead_exit(struct crypto_aead *tfm) 2408static void caam_aead_exit(struct crypto_aead *tfm)
@@ -2619,10 +2410,8 @@ static void caam_aead_exit(struct crypto_aead *tfm)
2619 caam_exit_common(crypto_aead_ctx(tfm)); 2410 caam_exit_common(crypto_aead_ctx(tfm));
2620} 2411}
2621 2412
2622static struct list_head alg_list;
2623static void __exit caam_qi_algapi_exit(void) 2413static void __exit caam_qi_algapi_exit(void)
2624{ 2414{
2625 struct caam_crypto_alg *t_alg, *n;
2626 int i; 2415 int i;
2627 2416
2628 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2417 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -2632,55 +2421,25 @@ static void __exit caam_qi_algapi_exit(void)
2632 crypto_unregister_aead(&t_alg->aead); 2421 crypto_unregister_aead(&t_alg->aead);
2633 } 2422 }
2634 2423
2635 if (!alg_list.next) 2424 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2636 return; 2425 struct caam_skcipher_alg *t_alg = driver_algs + i;
2637 2426
2638 list_for_each_entry_safe(t_alg, n, &alg_list, entry) { 2427 if (t_alg->registered)
2639 crypto_unregister_alg(&t_alg->crypto_alg); 2428 crypto_unregister_skcipher(&t_alg->skcipher);
2640 list_del(&t_alg->entry);
2641 kfree(t_alg);
2642 } 2429 }
2643} 2430}
2644 2431
2645static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template 2432static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2646 *template)
2647{ 2433{
2648 struct caam_crypto_alg *t_alg; 2434 struct skcipher_alg *alg = &t_alg->skcipher;
2649 struct crypto_alg *alg;
2650
2651 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2652 if (!t_alg)
2653 return ERR_PTR(-ENOMEM);
2654 2435
2655 alg = &t_alg->crypto_alg; 2436 alg->base.cra_module = THIS_MODULE;
2656 2437 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2657 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); 2438 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2658 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 2439 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2659 template->driver_name);
2660 alg->cra_module = THIS_MODULE;
2661 alg->cra_init = caam_cra_init;
2662 alg->cra_exit = caam_cra_exit;
2663 alg->cra_priority = CAAM_CRA_PRIORITY;
2664 alg->cra_blocksize = template->blocksize;
2665 alg->cra_alignmask = 0;
2666 alg->cra_ctxsize = sizeof(struct caam_ctx);
2667 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2668 template->type;
2669 switch (template->type) {
2670 case CRYPTO_ALG_TYPE_GIVCIPHER:
2671 alg->cra_type = &crypto_givcipher_type;
2672 alg->cra_ablkcipher = template->template_ablkcipher;
2673 break;
2674 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2675 alg->cra_type = &crypto_ablkcipher_type;
2676 alg->cra_ablkcipher = template->template_ablkcipher;
2677 break;
2678 }
2679
2680 t_alg->caam.class1_alg_type = template->class1_alg_type;
2681 t_alg->caam.class2_alg_type = template->class2_alg_type;
2682 2440
2683 return t_alg; 2441 alg->init = caam_cra_init;
2442 alg->exit = caam_cra_exit;
2684} 2443}
2685 2444
2686static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 2445static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -2734,8 +2493,6 @@ static int __init caam_qi_algapi_init(void)
2734 return -ENODEV; 2493 return -ENODEV;
2735 } 2494 }
2736 2495
2737 INIT_LIST_HEAD(&alg_list);
2738
2739 /* 2496 /*
2740 * Register crypto algorithms the device supports. 2497 * Register crypto algorithms the device supports.
2741 * First, detect presence and attributes of DES, AES, and MD blocks. 2498 * First, detect presence and attributes of DES, AES, and MD blocks.
@@ -2751,9 +2508,8 @@ static int __init caam_qi_algapi_init(void)
2751 md_limit = SHA256_DIGEST_SIZE; 2508 md_limit = SHA256_DIGEST_SIZE;
2752 2509
2753 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2510 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2754 struct caam_crypto_alg *t_alg; 2511 struct caam_skcipher_alg *t_alg = driver_algs + i;
2755 struct caam_alg_template *alg = driver_algs + i; 2512 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
2756 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2757 2513
2758 /* Skip DES algorithms if not supported by device */ 2514 /* Skip DES algorithms if not supported by device */
2759 if (!des_inst && 2515 if (!des_inst &&
@@ -2765,23 +2521,16 @@ static int __init caam_qi_algapi_init(void)
2765 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 2521 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2766 continue; 2522 continue;
2767 2523
2768 t_alg = caam_alg_alloc(alg); 2524 caam_skcipher_alg_init(t_alg);
2769 if (IS_ERR(t_alg)) {
2770 err = PTR_ERR(t_alg);
2771 dev_warn(priv->qidev, "%s alg allocation failed\n",
2772 alg->driver_name);
2773 continue;
2774 }
2775 2525
2776 err = crypto_register_alg(&t_alg->crypto_alg); 2526 err = crypto_register_skcipher(&t_alg->skcipher);
2777 if (err) { 2527 if (err) {
2778 dev_warn(priv->qidev, "%s alg registration failed\n", 2528 dev_warn(priv->qidev, "%s alg registration failed\n",
2779 t_alg->crypto_alg.cra_driver_name); 2529 t_alg->skcipher.base.cra_driver_name);
2780 kfree(t_alg);
2781 continue; 2530 continue;
2782 } 2531 }
2783 2532
2784 list_add_tail(&t_alg->entry, &alg_list); 2533 t_alg->registered = true;
2785 registered = true; 2534 registered = true;
2786 } 2535 }
2787 2536
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
new file mode 100644
index 000000000000..7d8ac0222fa3
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -0,0 +1,5165 @@
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP
5 */
6
7#include "compat.h"
8#include "regs.h"
9#include "caamalg_qi2.h"
10#include "dpseci_cmd.h"
11#include "desc_constr.h"
12#include "error.h"
13#include "sg_sw_sec4.h"
14#include "sg_sw_qm2.h"
15#include "key_gen.h"
16#include "caamalg_desc.h"
17#include "caamhash_desc.h"
18#include <linux/fsl/mc.h>
19#include <soc/fsl/dpaa2-io.h>
20#include <soc/fsl/dpaa2-fd.h>
21
22#define CAAM_CRA_PRIORITY 2000
23
24/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26 SHA512_DIGEST_SIZE * 2)
27
28#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
29bool caam_little_end;
30EXPORT_SYMBOL(caam_little_end);
31bool caam_imx;
32EXPORT_SYMBOL(caam_imx);
33#endif
34
35/*
36 * This is a a cache of buffers, from which the users of CAAM QI driver
37 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
38 * NOTE: A more elegant solution would be to have some headroom in the frames
39 * being processed. This can be added by the dpaa2-eth driver. This would
40 * pose a problem for userspace application processing which cannot
41 * know of this limitation. So for now, this will work.
42 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
43 */
44static struct kmem_cache *qi_cache;
45
46struct caam_alg_entry {
47 struct device *dev;
48 int class1_alg_type;
49 int class2_alg_type;
50 bool rfc3686;
51 bool geniv;
52};
53
54struct caam_aead_alg {
55 struct aead_alg aead;
56 struct caam_alg_entry caam;
57 bool registered;
58};
59
60struct caam_skcipher_alg {
61 struct skcipher_alg skcipher;
62 struct caam_alg_entry caam;
63 bool registered;
64};
65
66/**
67 * caam_ctx - per-session context
68 * @flc: Flow Contexts array
69 * @key: [authentication key], encryption key
70 * @flc_dma: I/O virtual addresses of the Flow Contexts
71 * @key_dma: I/O virtual address of the key
72 * @dir: DMA direction for mapping key and Flow Contexts
73 * @dev: dpseci device
74 * @adata: authentication algorithm details
75 * @cdata: encryption algorithm details
76 * @authsize: authentication tag (a.k.a. ICV / MAC) size
77 */
78struct caam_ctx {
79 struct caam_flc flc[NUM_OP];
80 u8 key[CAAM_MAX_KEY_SIZE];
81 dma_addr_t flc_dma[NUM_OP];
82 dma_addr_t key_dma;
83 enum dma_data_direction dir;
84 struct device *dev;
85 struct alginfo adata;
86 struct alginfo cdata;
87 unsigned int authsize;
88};
89
90static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
91 dma_addr_t iova_addr)
92{
93 phys_addr_t phys_addr;
94
95 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
96 iova_addr;
97
98 return phys_to_virt(phys_addr);
99}
100
101/*
102 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
103 *
104 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
105 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
106 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
107 * hosting 16 SG entries.
108 *
109 * @flags - flags that would be used for the equivalent kmalloc(..) call
110 *
111 * Returns a pointer to a retrieved buffer on success or NULL on failure.
112 */
113static inline void *qi_cache_zalloc(gfp_t flags)
114{
115 return kmem_cache_zalloc(qi_cache, flags);
116}
117
118/*
119 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
120 *
121 * @obj - buffer previously allocated by qi_cache_zalloc
122 *
123 * No checking is being done, the call is a passthrough call to
124 * kmem_cache_free(...)
125 */
126static inline void qi_cache_free(void *obj)
127{
128 kmem_cache_free(qi_cache, obj);
129}
130
131static struct caam_request *to_caam_req(struct crypto_async_request *areq)
132{
133 switch (crypto_tfm_alg_type(areq->tfm)) {
134 case CRYPTO_ALG_TYPE_SKCIPHER:
135 return skcipher_request_ctx(skcipher_request_cast(areq));
136 case CRYPTO_ALG_TYPE_AEAD:
137 return aead_request_ctx(container_of(areq, struct aead_request,
138 base));
139 case CRYPTO_ALG_TYPE_AHASH:
140 return ahash_request_ctx(ahash_request_cast(areq));
141 default:
142 return ERR_PTR(-EINVAL);
143 }
144}
145
146static void caam_unmap(struct device *dev, struct scatterlist *src,
147 struct scatterlist *dst, int src_nents,
148 int dst_nents, dma_addr_t iv_dma, int ivsize,
149 dma_addr_t qm_sg_dma, int qm_sg_bytes)
150{
151 if (dst != src) {
152 if (src_nents)
153 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
154 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
155 } else {
156 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
157 }
158
159 if (iv_dma)
160 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
161
162 if (qm_sg_bytes)
163 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
164}
165
166static int aead_set_sh_desc(struct crypto_aead *aead)
167{
168 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
169 typeof(*alg), aead);
170 struct caam_ctx *ctx = crypto_aead_ctx(aead);
171 unsigned int ivsize = crypto_aead_ivsize(aead);
172 struct device *dev = ctx->dev;
173 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
174 struct caam_flc *flc;
175 u32 *desc;
176 u32 ctx1_iv_off = 0;
177 u32 *nonce = NULL;
178 unsigned int data_len[2];
179 u32 inl_mask;
180 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
181 OP_ALG_AAI_CTR_MOD128);
182 const bool is_rfc3686 = alg->caam.rfc3686;
183
184 if (!ctx->cdata.keylen || !ctx->authsize)
185 return 0;
186
187 /*
188 * AES-CTR needs to load IV in CONTEXT1 reg
189 * at an offset of 128bits (16bytes)
190 * CONTEXT1[255:128] = IV
191 */
192 if (ctr_mode)
193 ctx1_iv_off = 16;
194
195 /*
196 * RFC3686 specific:
197 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
198 */
199 if (is_rfc3686) {
200 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
201 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
202 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
203 }
204
205 data_len[0] = ctx->adata.keylen_pad;
206 data_len[1] = ctx->cdata.keylen;
207
208 /* aead_encrypt shared descriptor */
209 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
210 DESC_QI_AEAD_ENC_LEN) +
211 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
212 DESC_JOB_IO_LEN, data_len, &inl_mask,
213 ARRAY_SIZE(data_len)) < 0)
214 return -EINVAL;
215
216 if (inl_mask & 1)
217 ctx->adata.key_virt = ctx->key;
218 else
219 ctx->adata.key_dma = ctx->key_dma;
220
221 if (inl_mask & 2)
222 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
223 else
224 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
225
226 ctx->adata.key_inline = !!(inl_mask & 1);
227 ctx->cdata.key_inline = !!(inl_mask & 2);
228
229 flc = &ctx->flc[ENCRYPT];
230 desc = flc->sh_desc;
231
232 if (alg->caam.geniv)
233 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
234 ivsize, ctx->authsize, is_rfc3686,
235 nonce, ctx1_iv_off, true,
236 priv->sec_attr.era);
237 else
238 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
239 ivsize, ctx->authsize, is_rfc3686, nonce,
240 ctx1_iv_off, true, priv->sec_attr.era);
241
242 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
243 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
244 sizeof(flc->flc) + desc_bytes(desc),
245 ctx->dir);
246
247 /* aead_decrypt shared descriptor */
248 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
249 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
250 DESC_JOB_IO_LEN, data_len, &inl_mask,
251 ARRAY_SIZE(data_len)) < 0)
252 return -EINVAL;
253
254 if (inl_mask & 1)
255 ctx->adata.key_virt = ctx->key;
256 else
257 ctx->adata.key_dma = ctx->key_dma;
258
259 if (inl_mask & 2)
260 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
261 else
262 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
263
264 ctx->adata.key_inline = !!(inl_mask & 1);
265 ctx->cdata.key_inline = !!(inl_mask & 2);
266
267 flc = &ctx->flc[DECRYPT];
268 desc = flc->sh_desc;
269 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
270 ivsize, ctx->authsize, alg->caam.geniv,
271 is_rfc3686, nonce, ctx1_iv_off, true,
272 priv->sec_attr.era);
273 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
274 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
275 sizeof(flc->flc) + desc_bytes(desc),
276 ctx->dir);
277
278 return 0;
279}
280
281static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
282{
283 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
284
285 ctx->authsize = authsize;
286 aead_set_sh_desc(authenc);
287
288 return 0;
289}
290
291static int aead_setkey(struct crypto_aead *aead, const u8 *key,
292 unsigned int keylen)
293{
294 struct caam_ctx *ctx = crypto_aead_ctx(aead);
295 struct device *dev = ctx->dev;
296 struct crypto_authenc_keys keys;
297
298 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
299 goto badkey;
300
301 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
302 keys.authkeylen + keys.enckeylen, keys.enckeylen,
303 keys.authkeylen);
304 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
305 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
306
307 ctx->adata.keylen = keys.authkeylen;
308 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
309 OP_ALG_ALGSEL_MASK);
310
311 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
312 goto badkey;
313
314 memcpy(ctx->key, keys.authkey, keys.authkeylen);
315 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
316 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
317 keys.enckeylen, ctx->dir);
318 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
319 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
320 ctx->adata.keylen_pad + keys.enckeylen, 1);
321
322 ctx->cdata.keylen = keys.enckeylen;
323
324 memzero_explicit(&keys, sizeof(keys));
325 return aead_set_sh_desc(aead);
326badkey:
327 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
328 memzero_explicit(&keys, sizeof(keys));
329 return -EINVAL;
330}
331
332static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
333 bool encrypt)
334{
335 struct crypto_aead *aead = crypto_aead_reqtfm(req);
336 struct caam_request *req_ctx = aead_request_ctx(req);
337 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
338 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
339 struct caam_ctx *ctx = crypto_aead_ctx(aead);
340 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
341 typeof(*alg), aead);
342 struct device *dev = ctx->dev;
343 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
344 GFP_KERNEL : GFP_ATOMIC;
345 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
346 struct aead_edesc *edesc;
347 dma_addr_t qm_sg_dma, iv_dma = 0;
348 int ivsize = 0;
349 unsigned int authsize = ctx->authsize;
350 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
351 int in_len, out_len;
352 struct dpaa2_sg_entry *sg_table;
353
354 /* allocate space for base edesc, link tables and IV */
355 edesc = qi_cache_zalloc(GFP_DMA | flags);
356 if (unlikely(!edesc)) {
357 dev_err(dev, "could not allocate extended descriptor\n");
358 return ERR_PTR(-ENOMEM);
359 }
360
361 if (unlikely(req->dst != req->src)) {
362 src_nents = sg_nents_for_len(req->src, req->assoclen +
363 req->cryptlen);
364 if (unlikely(src_nents < 0)) {
365 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
366 req->assoclen + req->cryptlen);
367 qi_cache_free(edesc);
368 return ERR_PTR(src_nents);
369 }
370
371 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
372 req->cryptlen +
373 (encrypt ? authsize :
374 (-authsize)));
375 if (unlikely(dst_nents < 0)) {
376 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
377 req->assoclen + req->cryptlen +
378 (encrypt ? authsize : (-authsize)));
379 qi_cache_free(edesc);
380 return ERR_PTR(dst_nents);
381 }
382
383 if (src_nents) {
384 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
385 DMA_TO_DEVICE);
386 if (unlikely(!mapped_src_nents)) {
387 dev_err(dev, "unable to map source\n");
388 qi_cache_free(edesc);
389 return ERR_PTR(-ENOMEM);
390 }
391 } else {
392 mapped_src_nents = 0;
393 }
394
395 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
396 DMA_FROM_DEVICE);
397 if (unlikely(!mapped_dst_nents)) {
398 dev_err(dev, "unable to map destination\n");
399 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
400 qi_cache_free(edesc);
401 return ERR_PTR(-ENOMEM);
402 }
403 } else {
404 src_nents = sg_nents_for_len(req->src, req->assoclen +
405 req->cryptlen +
406 (encrypt ? authsize : 0));
407 if (unlikely(src_nents < 0)) {
408 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
409 req->assoclen + req->cryptlen +
410 (encrypt ? authsize : 0));
411 qi_cache_free(edesc);
412 return ERR_PTR(src_nents);
413 }
414
415 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
416 DMA_BIDIRECTIONAL);
417 if (unlikely(!mapped_src_nents)) {
418 dev_err(dev, "unable to map source\n");
419 qi_cache_free(edesc);
420 return ERR_PTR(-ENOMEM);
421 }
422 }
423
424 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
425 ivsize = crypto_aead_ivsize(aead);
426
427 /*
428 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
429 * Input is not contiguous.
430 */
431 qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
432 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
433 sg_table = &edesc->sgt[0];
434 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
435 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
436 CAAM_QI_MEMCACHE_SIZE)) {
437 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
438 qm_sg_nents, ivsize);
439 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
440 0, 0, 0);
441 qi_cache_free(edesc);
442 return ERR_PTR(-ENOMEM);
443 }
444
445 if (ivsize) {
446 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
447
448 /* Make sure IV is located in a DMAable area */
449 memcpy(iv, req->iv, ivsize);
450
451 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
452 if (dma_mapping_error(dev, iv_dma)) {
453 dev_err(dev, "unable to map IV\n");
454 caam_unmap(dev, req->src, req->dst, src_nents,
455 dst_nents, 0, 0, 0, 0);
456 qi_cache_free(edesc);
457 return ERR_PTR(-ENOMEM);
458 }
459 }
460
461 edesc->src_nents = src_nents;
462 edesc->dst_nents = dst_nents;
463 edesc->iv_dma = iv_dma;
464
465 edesc->assoclen = cpu_to_caam32(req->assoclen);
466 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
467 DMA_TO_DEVICE);
468 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
469 dev_err(dev, "unable to map assoclen\n");
470 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
471 iv_dma, ivsize, 0, 0);
472 qi_cache_free(edesc);
473 return ERR_PTR(-ENOMEM);
474 }
475
476 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
477 qm_sg_index++;
478 if (ivsize) {
479 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
480 qm_sg_index++;
481 }
482 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
483 qm_sg_index += mapped_src_nents;
484
485 if (mapped_dst_nents > 1)
486 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
487 qm_sg_index, 0);
488
489 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
490 if (dma_mapping_error(dev, qm_sg_dma)) {
491 dev_err(dev, "unable to map S/G table\n");
492 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
493 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
494 iv_dma, ivsize, 0, 0);
495 qi_cache_free(edesc);
496 return ERR_PTR(-ENOMEM);
497 }
498
499 edesc->qm_sg_dma = qm_sg_dma;
500 edesc->qm_sg_bytes = qm_sg_bytes;
501
502 out_len = req->assoclen + req->cryptlen +
503 (encrypt ? ctx->authsize : (-ctx->authsize));
504 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
505
506 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
507 dpaa2_fl_set_final(in_fle, true);
508 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
509 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
510 dpaa2_fl_set_len(in_fle, in_len);
511
512 if (req->dst == req->src) {
513 if (mapped_src_nents == 1) {
514 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
515 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
516 } else {
517 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
518 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
519 (1 + !!ivsize) * sizeof(*sg_table));
520 }
521 } else if (mapped_dst_nents == 1) {
522 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
523 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
524 } else {
525 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
526 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
527 sizeof(*sg_table));
528 }
529
530 dpaa2_fl_set_len(out_fle, out_len);
531
532 return edesc;
533}
534
535static int gcm_set_sh_desc(struct crypto_aead *aead)
536{
537 struct caam_ctx *ctx = crypto_aead_ctx(aead);
538 struct device *dev = ctx->dev;
539 unsigned int ivsize = crypto_aead_ivsize(aead);
540 struct caam_flc *flc;
541 u32 *desc;
542 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
543 ctx->cdata.keylen;
544
545 if (!ctx->cdata.keylen || !ctx->authsize)
546 return 0;
547
548 /*
549 * AES GCM encrypt shared descriptor
550 * Job Descriptor and Shared Descriptor
551 * must fit into the 64-word Descriptor h/w Buffer
552 */
553 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
554 ctx->cdata.key_inline = true;
555 ctx->cdata.key_virt = ctx->key;
556 } else {
557 ctx->cdata.key_inline = false;
558 ctx->cdata.key_dma = ctx->key_dma;
559 }
560
561 flc = &ctx->flc[ENCRYPT];
562 desc = flc->sh_desc;
563 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
564 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
565 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
566 sizeof(flc->flc) + desc_bytes(desc),
567 ctx->dir);
568
569 /*
570 * Job Descriptor and Shared Descriptors
571 * must all fit into the 64-word Descriptor h/w Buffer
572 */
573 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
574 ctx->cdata.key_inline = true;
575 ctx->cdata.key_virt = ctx->key;
576 } else {
577 ctx->cdata.key_inline = false;
578 ctx->cdata.key_dma = ctx->key_dma;
579 }
580
581 flc = &ctx->flc[DECRYPT];
582 desc = flc->sh_desc;
583 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
584 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
585 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
586 sizeof(flc->flc) + desc_bytes(desc),
587 ctx->dir);
588
589 return 0;
590}
591
592static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
593{
594 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
595
596 ctx->authsize = authsize;
597 gcm_set_sh_desc(authenc);
598
599 return 0;
600}
601
602static int gcm_setkey(struct crypto_aead *aead,
603 const u8 *key, unsigned int keylen)
604{
605 struct caam_ctx *ctx = crypto_aead_ctx(aead);
606 struct device *dev = ctx->dev;
607
608 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
609 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
610
611 memcpy(ctx->key, key, keylen);
612 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
613 ctx->cdata.keylen = keylen;
614
615 return gcm_set_sh_desc(aead);
616}
617
618static int rfc4106_set_sh_desc(struct crypto_aead *aead)
619{
620 struct caam_ctx *ctx = crypto_aead_ctx(aead);
621 struct device *dev = ctx->dev;
622 unsigned int ivsize = crypto_aead_ivsize(aead);
623 struct caam_flc *flc;
624 u32 *desc;
625 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
626 ctx->cdata.keylen;
627
628 if (!ctx->cdata.keylen || !ctx->authsize)
629 return 0;
630
631 ctx->cdata.key_virt = ctx->key;
632
633 /*
634 * RFC4106 encrypt shared descriptor
635 * Job Descriptor and Shared Descriptor
636 * must fit into the 64-word Descriptor h/w Buffer
637 */
638 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
639 ctx->cdata.key_inline = true;
640 } else {
641 ctx->cdata.key_inline = false;
642 ctx->cdata.key_dma = ctx->key_dma;
643 }
644
645 flc = &ctx->flc[ENCRYPT];
646 desc = flc->sh_desc;
647 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
648 true);
649 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
650 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
651 sizeof(flc->flc) + desc_bytes(desc),
652 ctx->dir);
653
654 /*
655 * Job Descriptor and Shared Descriptors
656 * must all fit into the 64-word Descriptor h/w Buffer
657 */
658 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
659 ctx->cdata.key_inline = true;
660 } else {
661 ctx->cdata.key_inline = false;
662 ctx->cdata.key_dma = ctx->key_dma;
663 }
664
665 flc = &ctx->flc[DECRYPT];
666 desc = flc->sh_desc;
667 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
668 true);
669 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
670 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
671 sizeof(flc->flc) + desc_bytes(desc),
672 ctx->dir);
673
674 return 0;
675}
676
677static int rfc4106_setauthsize(struct crypto_aead *authenc,
678 unsigned int authsize)
679{
680 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
681
682 ctx->authsize = authsize;
683 rfc4106_set_sh_desc(authenc);
684
685 return 0;
686}
687
688static int rfc4106_setkey(struct crypto_aead *aead,
689 const u8 *key, unsigned int keylen)
690{
691 struct caam_ctx *ctx = crypto_aead_ctx(aead);
692 struct device *dev = ctx->dev;
693
694 if (keylen < 4)
695 return -EINVAL;
696
697 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
698 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
699
700 memcpy(ctx->key, key, keylen);
701 /*
702 * The last four bytes of the key material are used as the salt value
703 * in the nonce. Update the AES key length.
704 */
705 ctx->cdata.keylen = keylen - 4;
706 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
707 ctx->dir);
708
709 return rfc4106_set_sh_desc(aead);
710}
711
712static int rfc4543_set_sh_desc(struct crypto_aead *aead)
713{
714 struct caam_ctx *ctx = crypto_aead_ctx(aead);
715 struct device *dev = ctx->dev;
716 unsigned int ivsize = crypto_aead_ivsize(aead);
717 struct caam_flc *flc;
718 u32 *desc;
719 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
720 ctx->cdata.keylen;
721
722 if (!ctx->cdata.keylen || !ctx->authsize)
723 return 0;
724
725 ctx->cdata.key_virt = ctx->key;
726
727 /*
728 * RFC4543 encrypt shared descriptor
729 * Job Descriptor and Shared Descriptor
730 * must fit into the 64-word Descriptor h/w Buffer
731 */
732 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
733 ctx->cdata.key_inline = true;
734 } else {
735 ctx->cdata.key_inline = false;
736 ctx->cdata.key_dma = ctx->key_dma;
737 }
738
739 flc = &ctx->flc[ENCRYPT];
740 desc = flc->sh_desc;
741 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
742 true);
743 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
744 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
745 sizeof(flc->flc) + desc_bytes(desc),
746 ctx->dir);
747
748 /*
749 * Job Descriptor and Shared Descriptors
750 * must all fit into the 64-word Descriptor h/w Buffer
751 */
752 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
753 ctx->cdata.key_inline = true;
754 } else {
755 ctx->cdata.key_inline = false;
756 ctx->cdata.key_dma = ctx->key_dma;
757 }
758
759 flc = &ctx->flc[DECRYPT];
760 desc = flc->sh_desc;
761 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
762 true);
763 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
764 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
765 sizeof(flc->flc) + desc_bytes(desc),
766 ctx->dir);
767
768 return 0;
769}
770
771static int rfc4543_setauthsize(struct crypto_aead *authenc,
772 unsigned int authsize)
773{
774 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
775
776 ctx->authsize = authsize;
777 rfc4543_set_sh_desc(authenc);
778
779 return 0;
780}
781
782static int rfc4543_setkey(struct crypto_aead *aead,
783 const u8 *key, unsigned int keylen)
784{
785 struct caam_ctx *ctx = crypto_aead_ctx(aead);
786 struct device *dev = ctx->dev;
787
788 if (keylen < 4)
789 return -EINVAL;
790
791 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
792 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
793
794 memcpy(ctx->key, key, keylen);
795 /*
796 * The last four bytes of the key material are used as the salt value
797 * in the nonce. Update the AES key length.
798 */
799 ctx->cdata.keylen = keylen - 4;
800 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
801 ctx->dir);
802
803 return rfc4543_set_sh_desc(aead);
804}
805
806static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
807 unsigned int keylen)
808{
809 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
810 struct caam_skcipher_alg *alg =
811 container_of(crypto_skcipher_alg(skcipher),
812 struct caam_skcipher_alg, skcipher);
813 struct device *dev = ctx->dev;
814 struct caam_flc *flc;
815 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
816 u32 *desc;
817 u32 ctx1_iv_off = 0;
818 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
819 OP_ALG_AAI_CTR_MOD128);
820 const bool is_rfc3686 = alg->caam.rfc3686;
821
822 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
823 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
824
825 /*
826 * AES-CTR needs to load IV in CONTEXT1 reg
827 * at an offset of 128bits (16bytes)
828 * CONTEXT1[255:128] = IV
829 */
830 if (ctr_mode)
831 ctx1_iv_off = 16;
832
833 /*
834 * RFC3686 specific:
835 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
836 * | *key = {KEY, NONCE}
837 */
838 if (is_rfc3686) {
839 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
840 keylen -= CTR_RFC3686_NONCE_SIZE;
841 }
842
843 ctx->cdata.keylen = keylen;
844 ctx->cdata.key_virt = key;
845 ctx->cdata.key_inline = true;
846
847 /* skcipher_encrypt shared descriptor */
848 flc = &ctx->flc[ENCRYPT];
849 desc = flc->sh_desc;
850 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
851 ctx1_iv_off);
852 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
853 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
854 sizeof(flc->flc) + desc_bytes(desc),
855 ctx->dir);
856
857 /* skcipher_decrypt shared descriptor */
858 flc = &ctx->flc[DECRYPT];
859 desc = flc->sh_desc;
860 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
861 ctx1_iv_off);
862 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
863 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
864 sizeof(flc->flc) + desc_bytes(desc),
865 ctx->dir);
866
867 return 0;
868}
869
870static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
871 unsigned int keylen)
872{
873 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
874 struct device *dev = ctx->dev;
875 struct caam_flc *flc;
876 u32 *desc;
877
878 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
879 dev_err(dev, "key size mismatch\n");
880 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
881 return -EINVAL;
882 }
883
884 ctx->cdata.keylen = keylen;
885 ctx->cdata.key_virt = key;
886 ctx->cdata.key_inline = true;
887
888 /* xts_skcipher_encrypt shared descriptor */
889 flc = &ctx->flc[ENCRYPT];
890 desc = flc->sh_desc;
891 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
892 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
893 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
894 sizeof(flc->flc) + desc_bytes(desc),
895 ctx->dir);
896
897 /* xts_skcipher_decrypt shared descriptor */
898 flc = &ctx->flc[DECRYPT];
899 desc = flc->sh_desc;
900 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
901 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
902 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
903 sizeof(flc->flc) + desc_bytes(desc),
904 ctx->dir);
905
906 return 0;
907}
908
909static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
910{
911 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
912 struct caam_request *req_ctx = skcipher_request_ctx(req);
913 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
914 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
915 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
916 struct device *dev = ctx->dev;
917 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
918 GFP_KERNEL : GFP_ATOMIC;
919 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
920 struct skcipher_edesc *edesc;
921 dma_addr_t iv_dma;
922 u8 *iv;
923 int ivsize = crypto_skcipher_ivsize(skcipher);
924 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
925 struct dpaa2_sg_entry *sg_table;
926
927 src_nents = sg_nents_for_len(req->src, req->cryptlen);
928 if (unlikely(src_nents < 0)) {
929 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
930 req->cryptlen);
931 return ERR_PTR(src_nents);
932 }
933
934 if (unlikely(req->dst != req->src)) {
935 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
936 if (unlikely(dst_nents < 0)) {
937 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
938 req->cryptlen);
939 return ERR_PTR(dst_nents);
940 }
941
942 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
943 DMA_TO_DEVICE);
944 if (unlikely(!mapped_src_nents)) {
945 dev_err(dev, "unable to map source\n");
946 return ERR_PTR(-ENOMEM);
947 }
948
949 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
950 DMA_FROM_DEVICE);
951 if (unlikely(!mapped_dst_nents)) {
952 dev_err(dev, "unable to map destination\n");
953 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
954 return ERR_PTR(-ENOMEM);
955 }
956 } else {
957 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
958 DMA_BIDIRECTIONAL);
959 if (unlikely(!mapped_src_nents)) {
960 dev_err(dev, "unable to map source\n");
961 return ERR_PTR(-ENOMEM);
962 }
963 }
964
965 qm_sg_ents = 1 + mapped_src_nents;
966 dst_sg_idx = qm_sg_ents;
967
968 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
969 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
970 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
971 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
972 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
973 qm_sg_ents, ivsize);
974 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
975 0, 0, 0);
976 return ERR_PTR(-ENOMEM);
977 }
978
979 /* allocate space for base edesc, link tables and IV */
980 edesc = qi_cache_zalloc(GFP_DMA | flags);
981 if (unlikely(!edesc)) {
982 dev_err(dev, "could not allocate extended descriptor\n");
983 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
984 0, 0, 0);
985 return ERR_PTR(-ENOMEM);
986 }
987
988 /* Make sure IV is located in a DMAable area */
989 sg_table = &edesc->sgt[0];
990 iv = (u8 *)(sg_table + qm_sg_ents);
991 memcpy(iv, req->iv, ivsize);
992
993 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
994 if (dma_mapping_error(dev, iv_dma)) {
995 dev_err(dev, "unable to map IV\n");
996 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
997 0, 0, 0);
998 qi_cache_free(edesc);
999 return ERR_PTR(-ENOMEM);
1000 }
1001
1002 edesc->src_nents = src_nents;
1003 edesc->dst_nents = dst_nents;
1004 edesc->iv_dma = iv_dma;
1005 edesc->qm_sg_bytes = qm_sg_bytes;
1006
1007 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1008 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1009
1010 if (mapped_dst_nents > 1)
1011 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1012 dst_sg_idx, 0);
1013
1014 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1015 DMA_TO_DEVICE);
1016 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1017 dev_err(dev, "unable to map S/G table\n");
1018 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1019 iv_dma, ivsize, 0, 0);
1020 qi_cache_free(edesc);
1021 return ERR_PTR(-ENOMEM);
1022 }
1023
1024 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1025 dpaa2_fl_set_final(in_fle, true);
1026 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1027 dpaa2_fl_set_len(out_fle, req->cryptlen);
1028
1029 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1030 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1031
1032 if (req->src == req->dst) {
1033 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1034 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1035 sizeof(*sg_table));
1036 } else if (mapped_dst_nents > 1) {
1037 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1038 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1039 sizeof(*sg_table));
1040 } else {
1041 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1042 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
1043 }
1044
1045 return edesc;
1046}
1047
1048static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1049 struct aead_request *req)
1050{
1051 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1052 int ivsize = crypto_aead_ivsize(aead);
1053
1054 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1055 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1056 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1057}
1058
1059static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1060 struct skcipher_request *req)
1061{
1062 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1063 int ivsize = crypto_skcipher_ivsize(skcipher);
1064
1065 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1066 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1067}
1068
1069static void aead_encrypt_done(void *cbk_ctx, u32 status)
1070{
1071 struct crypto_async_request *areq = cbk_ctx;
1072 struct aead_request *req = container_of(areq, struct aead_request,
1073 base);
1074 struct caam_request *req_ctx = to_caam_req(areq);
1075 struct aead_edesc *edesc = req_ctx->edesc;
1076 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1077 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1078 int ecode = 0;
1079
1080 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1081
1082 if (unlikely(status)) {
1083 caam_qi2_strstatus(ctx->dev, status);
1084 ecode = -EIO;
1085 }
1086
1087 aead_unmap(ctx->dev, edesc, req);
1088 qi_cache_free(edesc);
1089 aead_request_complete(req, ecode);
1090}
1091
1092static void aead_decrypt_done(void *cbk_ctx, u32 status)
1093{
1094 struct crypto_async_request *areq = cbk_ctx;
1095 struct aead_request *req = container_of(areq, struct aead_request,
1096 base);
1097 struct caam_request *req_ctx = to_caam_req(areq);
1098 struct aead_edesc *edesc = req_ctx->edesc;
1099 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1100 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1101 int ecode = 0;
1102
1103 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1104
1105 if (unlikely(status)) {
1106 caam_qi2_strstatus(ctx->dev, status);
1107 /*
1108 * verify hw auth check passed else return -EBADMSG
1109 */
1110 if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1111 JRSTA_CCBERR_ERRID_ICVCHK)
1112 ecode = -EBADMSG;
1113 else
1114 ecode = -EIO;
1115 }
1116
1117 aead_unmap(ctx->dev, edesc, req);
1118 qi_cache_free(edesc);
1119 aead_request_complete(req, ecode);
1120}
1121
1122static int aead_encrypt(struct aead_request *req)
1123{
1124 struct aead_edesc *edesc;
1125 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1126 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1127 struct caam_request *caam_req = aead_request_ctx(req);
1128 int ret;
1129
1130 /* allocate extended descriptor */
1131 edesc = aead_edesc_alloc(req, true);
1132 if (IS_ERR(edesc))
1133 return PTR_ERR(edesc);
1134
1135 caam_req->flc = &ctx->flc[ENCRYPT];
1136 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1137 caam_req->cbk = aead_encrypt_done;
1138 caam_req->ctx = &req->base;
1139 caam_req->edesc = edesc;
1140 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1141 if (ret != -EINPROGRESS &&
1142 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1143 aead_unmap(ctx->dev, edesc, req);
1144 qi_cache_free(edesc);
1145 }
1146
1147 return ret;
1148}
1149
1150static int aead_decrypt(struct aead_request *req)
1151{
1152 struct aead_edesc *edesc;
1153 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1154 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1155 struct caam_request *caam_req = aead_request_ctx(req);
1156 int ret;
1157
1158 /* allocate extended descriptor */
1159 edesc = aead_edesc_alloc(req, false);
1160 if (IS_ERR(edesc))
1161 return PTR_ERR(edesc);
1162
1163 caam_req->flc = &ctx->flc[DECRYPT];
1164 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1165 caam_req->cbk = aead_decrypt_done;
1166 caam_req->ctx = &req->base;
1167 caam_req->edesc = edesc;
1168 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1169 if (ret != -EINPROGRESS &&
1170 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1171 aead_unmap(ctx->dev, edesc, req);
1172 qi_cache_free(edesc);
1173 }
1174
1175 return ret;
1176}
1177
1178static int ipsec_gcm_encrypt(struct aead_request *req)
1179{
1180 if (req->assoclen < 8)
1181 return -EINVAL;
1182
1183 return aead_encrypt(req);
1184}
1185
1186static int ipsec_gcm_decrypt(struct aead_request *req)
1187{
1188 if (req->assoclen < 8)
1189 return -EINVAL;
1190
1191 return aead_decrypt(req);
1192}
1193
1194static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1195{
1196 struct crypto_async_request *areq = cbk_ctx;
1197 struct skcipher_request *req = skcipher_request_cast(areq);
1198 struct caam_request *req_ctx = to_caam_req(areq);
1199 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1200 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1201 struct skcipher_edesc *edesc = req_ctx->edesc;
1202 int ecode = 0;
1203 int ivsize = crypto_skcipher_ivsize(skcipher);
1204
1205 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1206
1207 if (unlikely(status)) {
1208 caam_qi2_strstatus(ctx->dev, status);
1209 ecode = -EIO;
1210 }
1211
1212 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1213 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1214 edesc->src_nents > 1 ? 100 : ivsize, 1);
1215 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
1216 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1217 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1218
1219 skcipher_unmap(ctx->dev, edesc, req);
1220
1221 /*
1222 * The crypto API expects us to set the IV (req->iv) to the last
1223 * ciphertext block. This is used e.g. by the CTS mode.
1224 */
1225 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
1226 ivsize, 0);
1227
1228 qi_cache_free(edesc);
1229 skcipher_request_complete(req, ecode);
1230}
1231
1232static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1233{
1234 struct crypto_async_request *areq = cbk_ctx;
1235 struct skcipher_request *req = skcipher_request_cast(areq);
1236 struct caam_request *req_ctx = to_caam_req(areq);
1237 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1238 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1239 struct skcipher_edesc *edesc = req_ctx->edesc;
1240 int ecode = 0;
1241 int ivsize = crypto_skcipher_ivsize(skcipher);
1242
1243 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1244
1245 if (unlikely(status)) {
1246 caam_qi2_strstatus(ctx->dev, status);
1247 ecode = -EIO;
1248 }
1249
1250 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1251 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1252 edesc->src_nents > 1 ? 100 : ivsize, 1);
1253 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
1254 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1255 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1256
1257 skcipher_unmap(ctx->dev, edesc, req);
1258 qi_cache_free(edesc);
1259 skcipher_request_complete(req, ecode);
1260}
1261
1262static int skcipher_encrypt(struct skcipher_request *req)
1263{
1264 struct skcipher_edesc *edesc;
1265 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1266 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1267 struct caam_request *caam_req = skcipher_request_ctx(req);
1268 int ret;
1269
1270 /* allocate extended descriptor */
1271 edesc = skcipher_edesc_alloc(req);
1272 if (IS_ERR(edesc))
1273 return PTR_ERR(edesc);
1274
1275 caam_req->flc = &ctx->flc[ENCRYPT];
1276 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1277 caam_req->cbk = skcipher_encrypt_done;
1278 caam_req->ctx = &req->base;
1279 caam_req->edesc = edesc;
1280 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1281 if (ret != -EINPROGRESS &&
1282 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1283 skcipher_unmap(ctx->dev, edesc, req);
1284 qi_cache_free(edesc);
1285 }
1286
1287 return ret;
1288}
1289
1290static int skcipher_decrypt(struct skcipher_request *req)
1291{
1292 struct skcipher_edesc *edesc;
1293 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1294 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1295 struct caam_request *caam_req = skcipher_request_ctx(req);
1296 int ivsize = crypto_skcipher_ivsize(skcipher);
1297 int ret;
1298
1299 /* allocate extended descriptor */
1300 edesc = skcipher_edesc_alloc(req);
1301 if (IS_ERR(edesc))
1302 return PTR_ERR(edesc);
1303
1304 /*
1305 * The crypto API expects us to set the IV (req->iv) to the last
1306 * ciphertext block.
1307 */
1308 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1309 ivsize, 0);
1310
1311 caam_req->flc = &ctx->flc[DECRYPT];
1312 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1313 caam_req->cbk = skcipher_decrypt_done;
1314 caam_req->ctx = &req->base;
1315 caam_req->edesc = edesc;
1316 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1317 if (ret != -EINPROGRESS &&
1318 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1319 skcipher_unmap(ctx->dev, edesc, req);
1320 qi_cache_free(edesc);
1321 }
1322
1323 return ret;
1324}
1325
1326static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1327 bool uses_dkp)
1328{
1329 dma_addr_t dma_addr;
1330 int i;
1331
1332 /* copy descriptor header template value */
1333 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1334 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1335
1336 ctx->dev = caam->dev;
1337 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1338
1339 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1340 offsetof(struct caam_ctx, flc_dma),
1341 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1342 if (dma_mapping_error(ctx->dev, dma_addr)) {
1343 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1344 return -ENOMEM;
1345 }
1346
1347 for (i = 0; i < NUM_OP; i++)
1348 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1349 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1350
1351 return 0;
1352}
1353
1354static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1355{
1356 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1357 struct caam_skcipher_alg *caam_alg =
1358 container_of(alg, typeof(*caam_alg), skcipher);
1359
1360 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1361 return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1362}
1363
1364static int caam_cra_init_aead(struct crypto_aead *tfm)
1365{
1366 struct aead_alg *alg = crypto_aead_alg(tfm);
1367 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1368 aead);
1369
1370 crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1371 return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1372 alg->setkey == aead_setkey);
1373}
1374
1375static void caam_exit_common(struct caam_ctx *ctx)
1376{
1377 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1378 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1379 DMA_ATTR_SKIP_CPU_SYNC);
1380}
1381
1382static void caam_cra_exit(struct crypto_skcipher *tfm)
1383{
1384 caam_exit_common(crypto_skcipher_ctx(tfm));
1385}
1386
1387static void caam_cra_exit_aead(struct crypto_aead *tfm)
1388{
1389 caam_exit_common(crypto_aead_ctx(tfm));
1390}
1391
1392static struct caam_skcipher_alg driver_algs[] = {
1393 {
1394 .skcipher = {
1395 .base = {
1396 .cra_name = "cbc(aes)",
1397 .cra_driver_name = "cbc-aes-caam-qi2",
1398 .cra_blocksize = AES_BLOCK_SIZE,
1399 },
1400 .setkey = skcipher_setkey,
1401 .encrypt = skcipher_encrypt,
1402 .decrypt = skcipher_decrypt,
1403 .min_keysize = AES_MIN_KEY_SIZE,
1404 .max_keysize = AES_MAX_KEY_SIZE,
1405 .ivsize = AES_BLOCK_SIZE,
1406 },
1407 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1408 },
1409 {
1410 .skcipher = {
1411 .base = {
1412 .cra_name = "cbc(des3_ede)",
1413 .cra_driver_name = "cbc-3des-caam-qi2",
1414 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1415 },
1416 .setkey = skcipher_setkey,
1417 .encrypt = skcipher_encrypt,
1418 .decrypt = skcipher_decrypt,
1419 .min_keysize = DES3_EDE_KEY_SIZE,
1420 .max_keysize = DES3_EDE_KEY_SIZE,
1421 .ivsize = DES3_EDE_BLOCK_SIZE,
1422 },
1423 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1424 },
1425 {
1426 .skcipher = {
1427 .base = {
1428 .cra_name = "cbc(des)",
1429 .cra_driver_name = "cbc-des-caam-qi2",
1430 .cra_blocksize = DES_BLOCK_SIZE,
1431 },
1432 .setkey = skcipher_setkey,
1433 .encrypt = skcipher_encrypt,
1434 .decrypt = skcipher_decrypt,
1435 .min_keysize = DES_KEY_SIZE,
1436 .max_keysize = DES_KEY_SIZE,
1437 .ivsize = DES_BLOCK_SIZE,
1438 },
1439 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1440 },
1441 {
1442 .skcipher = {
1443 .base = {
1444 .cra_name = "ctr(aes)",
1445 .cra_driver_name = "ctr-aes-caam-qi2",
1446 .cra_blocksize = 1,
1447 },
1448 .setkey = skcipher_setkey,
1449 .encrypt = skcipher_encrypt,
1450 .decrypt = skcipher_decrypt,
1451 .min_keysize = AES_MIN_KEY_SIZE,
1452 .max_keysize = AES_MAX_KEY_SIZE,
1453 .ivsize = AES_BLOCK_SIZE,
1454 .chunksize = AES_BLOCK_SIZE,
1455 },
1456 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1457 OP_ALG_AAI_CTR_MOD128,
1458 },
1459 {
1460 .skcipher = {
1461 .base = {
1462 .cra_name = "rfc3686(ctr(aes))",
1463 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1464 .cra_blocksize = 1,
1465 },
1466 .setkey = skcipher_setkey,
1467 .encrypt = skcipher_encrypt,
1468 .decrypt = skcipher_decrypt,
1469 .min_keysize = AES_MIN_KEY_SIZE +
1470 CTR_RFC3686_NONCE_SIZE,
1471 .max_keysize = AES_MAX_KEY_SIZE +
1472 CTR_RFC3686_NONCE_SIZE,
1473 .ivsize = CTR_RFC3686_IV_SIZE,
1474 .chunksize = AES_BLOCK_SIZE,
1475 },
1476 .caam = {
1477 .class1_alg_type = OP_ALG_ALGSEL_AES |
1478 OP_ALG_AAI_CTR_MOD128,
1479 .rfc3686 = true,
1480 },
1481 },
1482 {
1483 .skcipher = {
1484 .base = {
1485 .cra_name = "xts(aes)",
1486 .cra_driver_name = "xts-aes-caam-qi2",
1487 .cra_blocksize = AES_BLOCK_SIZE,
1488 },
1489 .setkey = xts_skcipher_setkey,
1490 .encrypt = skcipher_encrypt,
1491 .decrypt = skcipher_decrypt,
1492 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1493 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1494 .ivsize = AES_BLOCK_SIZE,
1495 },
1496 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1497 }
1498};
1499
1500static struct caam_aead_alg driver_aeads[] = {
1501 {
1502 .aead = {
1503 .base = {
1504 .cra_name = "rfc4106(gcm(aes))",
1505 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1506 .cra_blocksize = 1,
1507 },
1508 .setkey = rfc4106_setkey,
1509 .setauthsize = rfc4106_setauthsize,
1510 .encrypt = ipsec_gcm_encrypt,
1511 .decrypt = ipsec_gcm_decrypt,
1512 .ivsize = 8,
1513 .maxauthsize = AES_BLOCK_SIZE,
1514 },
1515 .caam = {
1516 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1517 },
1518 },
1519 {
1520 .aead = {
1521 .base = {
1522 .cra_name = "rfc4543(gcm(aes))",
1523 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1524 .cra_blocksize = 1,
1525 },
1526 .setkey = rfc4543_setkey,
1527 .setauthsize = rfc4543_setauthsize,
1528 .encrypt = ipsec_gcm_encrypt,
1529 .decrypt = ipsec_gcm_decrypt,
1530 .ivsize = 8,
1531 .maxauthsize = AES_BLOCK_SIZE,
1532 },
1533 .caam = {
1534 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1535 },
1536 },
1537 /* Galois Counter Mode */
1538 {
1539 .aead = {
1540 .base = {
1541 .cra_name = "gcm(aes)",
1542 .cra_driver_name = "gcm-aes-caam-qi2",
1543 .cra_blocksize = 1,
1544 },
1545 .setkey = gcm_setkey,
1546 .setauthsize = gcm_setauthsize,
1547 .encrypt = aead_encrypt,
1548 .decrypt = aead_decrypt,
1549 .ivsize = 12,
1550 .maxauthsize = AES_BLOCK_SIZE,
1551 },
1552 .caam = {
1553 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1554 }
1555 },
1556 /* single-pass ipsec_esp descriptor */
1557 {
1558 .aead = {
1559 .base = {
1560 .cra_name = "authenc(hmac(md5),cbc(aes))",
1561 .cra_driver_name = "authenc-hmac-md5-"
1562 "cbc-aes-caam-qi2",
1563 .cra_blocksize = AES_BLOCK_SIZE,
1564 },
1565 .setkey = aead_setkey,
1566 .setauthsize = aead_setauthsize,
1567 .encrypt = aead_encrypt,
1568 .decrypt = aead_decrypt,
1569 .ivsize = AES_BLOCK_SIZE,
1570 .maxauthsize = MD5_DIGEST_SIZE,
1571 },
1572 .caam = {
1573 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1574 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1575 OP_ALG_AAI_HMAC_PRECOMP,
1576 }
1577 },
1578 {
1579 .aead = {
1580 .base = {
1581 .cra_name = "echainiv(authenc(hmac(md5),"
1582 "cbc(aes)))",
1583 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1584 "cbc-aes-caam-qi2",
1585 .cra_blocksize = AES_BLOCK_SIZE,
1586 },
1587 .setkey = aead_setkey,
1588 .setauthsize = aead_setauthsize,
1589 .encrypt = aead_encrypt,
1590 .decrypt = aead_decrypt,
1591 .ivsize = AES_BLOCK_SIZE,
1592 .maxauthsize = MD5_DIGEST_SIZE,
1593 },
1594 .caam = {
1595 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1596 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1597 OP_ALG_AAI_HMAC_PRECOMP,
1598 .geniv = true,
1599 }
1600 },
1601 {
1602 .aead = {
1603 .base = {
1604 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1605 .cra_driver_name = "authenc-hmac-sha1-"
1606 "cbc-aes-caam-qi2",
1607 .cra_blocksize = AES_BLOCK_SIZE,
1608 },
1609 .setkey = aead_setkey,
1610 .setauthsize = aead_setauthsize,
1611 .encrypt = aead_encrypt,
1612 .decrypt = aead_decrypt,
1613 .ivsize = AES_BLOCK_SIZE,
1614 .maxauthsize = SHA1_DIGEST_SIZE,
1615 },
1616 .caam = {
1617 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1618 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1619 OP_ALG_AAI_HMAC_PRECOMP,
1620 }
1621 },
1622 {
1623 .aead = {
1624 .base = {
1625 .cra_name = "echainiv(authenc(hmac(sha1),"
1626 "cbc(aes)))",
1627 .cra_driver_name = "echainiv-authenc-"
1628 "hmac-sha1-cbc-aes-caam-qi2",
1629 .cra_blocksize = AES_BLOCK_SIZE,
1630 },
1631 .setkey = aead_setkey,
1632 .setauthsize = aead_setauthsize,
1633 .encrypt = aead_encrypt,
1634 .decrypt = aead_decrypt,
1635 .ivsize = AES_BLOCK_SIZE,
1636 .maxauthsize = SHA1_DIGEST_SIZE,
1637 },
1638 .caam = {
1639 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1640 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1641 OP_ALG_AAI_HMAC_PRECOMP,
1642 .geniv = true,
1643 },
1644 },
1645 {
1646 .aead = {
1647 .base = {
1648 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1649 .cra_driver_name = "authenc-hmac-sha224-"
1650 "cbc-aes-caam-qi2",
1651 .cra_blocksize = AES_BLOCK_SIZE,
1652 },
1653 .setkey = aead_setkey,
1654 .setauthsize = aead_setauthsize,
1655 .encrypt = aead_encrypt,
1656 .decrypt = aead_decrypt,
1657 .ivsize = AES_BLOCK_SIZE,
1658 .maxauthsize = SHA224_DIGEST_SIZE,
1659 },
1660 .caam = {
1661 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1662 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1663 OP_ALG_AAI_HMAC_PRECOMP,
1664 }
1665 },
1666 {
1667 .aead = {
1668 .base = {
1669 .cra_name = "echainiv(authenc(hmac(sha224),"
1670 "cbc(aes)))",
1671 .cra_driver_name = "echainiv-authenc-"
1672 "hmac-sha224-cbc-aes-caam-qi2",
1673 .cra_blocksize = AES_BLOCK_SIZE,
1674 },
1675 .setkey = aead_setkey,
1676 .setauthsize = aead_setauthsize,
1677 .encrypt = aead_encrypt,
1678 .decrypt = aead_decrypt,
1679 .ivsize = AES_BLOCK_SIZE,
1680 .maxauthsize = SHA224_DIGEST_SIZE,
1681 },
1682 .caam = {
1683 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1684 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1685 OP_ALG_AAI_HMAC_PRECOMP,
1686 .geniv = true,
1687 }
1688 },
1689 {
1690 .aead = {
1691 .base = {
1692 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1693 .cra_driver_name = "authenc-hmac-sha256-"
1694 "cbc-aes-caam-qi2",
1695 .cra_blocksize = AES_BLOCK_SIZE,
1696 },
1697 .setkey = aead_setkey,
1698 .setauthsize = aead_setauthsize,
1699 .encrypt = aead_encrypt,
1700 .decrypt = aead_decrypt,
1701 .ivsize = AES_BLOCK_SIZE,
1702 .maxauthsize = SHA256_DIGEST_SIZE,
1703 },
1704 .caam = {
1705 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1706 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1707 OP_ALG_AAI_HMAC_PRECOMP,
1708 }
1709 },
1710 {
1711 .aead = {
1712 .base = {
1713 .cra_name = "echainiv(authenc(hmac(sha256),"
1714 "cbc(aes)))",
1715 .cra_driver_name = "echainiv-authenc-"
1716 "hmac-sha256-cbc-aes-"
1717 "caam-qi2",
1718 .cra_blocksize = AES_BLOCK_SIZE,
1719 },
1720 .setkey = aead_setkey,
1721 .setauthsize = aead_setauthsize,
1722 .encrypt = aead_encrypt,
1723 .decrypt = aead_decrypt,
1724 .ivsize = AES_BLOCK_SIZE,
1725 .maxauthsize = SHA256_DIGEST_SIZE,
1726 },
1727 .caam = {
1728 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1729 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1730 OP_ALG_AAI_HMAC_PRECOMP,
1731 .geniv = true,
1732 }
1733 },
1734 {
1735 .aead = {
1736 .base = {
1737 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1738 .cra_driver_name = "authenc-hmac-sha384-"
1739 "cbc-aes-caam-qi2",
1740 .cra_blocksize = AES_BLOCK_SIZE,
1741 },
1742 .setkey = aead_setkey,
1743 .setauthsize = aead_setauthsize,
1744 .encrypt = aead_encrypt,
1745 .decrypt = aead_decrypt,
1746 .ivsize = AES_BLOCK_SIZE,
1747 .maxauthsize = SHA384_DIGEST_SIZE,
1748 },
1749 .caam = {
1750 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1751 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1752 OP_ALG_AAI_HMAC_PRECOMP,
1753 }
1754 },
1755 {
1756 .aead = {
1757 .base = {
1758 .cra_name = "echainiv(authenc(hmac(sha384),"
1759 "cbc(aes)))",
1760 .cra_driver_name = "echainiv-authenc-"
1761 "hmac-sha384-cbc-aes-"
1762 "caam-qi2",
1763 .cra_blocksize = AES_BLOCK_SIZE,
1764 },
1765 .setkey = aead_setkey,
1766 .setauthsize = aead_setauthsize,
1767 .encrypt = aead_encrypt,
1768 .decrypt = aead_decrypt,
1769 .ivsize = AES_BLOCK_SIZE,
1770 .maxauthsize = SHA384_DIGEST_SIZE,
1771 },
1772 .caam = {
1773 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1774 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1775 OP_ALG_AAI_HMAC_PRECOMP,
1776 .geniv = true,
1777 }
1778 },
1779 {
1780 .aead = {
1781 .base = {
1782 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1783 .cra_driver_name = "authenc-hmac-sha512-"
1784 "cbc-aes-caam-qi2",
1785 .cra_blocksize = AES_BLOCK_SIZE,
1786 },
1787 .setkey = aead_setkey,
1788 .setauthsize = aead_setauthsize,
1789 .encrypt = aead_encrypt,
1790 .decrypt = aead_decrypt,
1791 .ivsize = AES_BLOCK_SIZE,
1792 .maxauthsize = SHA512_DIGEST_SIZE,
1793 },
1794 .caam = {
1795 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1796 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1797 OP_ALG_AAI_HMAC_PRECOMP,
1798 }
1799 },
1800 {
1801 .aead = {
1802 .base = {
1803 .cra_name = "echainiv(authenc(hmac(sha512),"
1804 "cbc(aes)))",
1805 .cra_driver_name = "echainiv-authenc-"
1806 "hmac-sha512-cbc-aes-"
1807 "caam-qi2",
1808 .cra_blocksize = AES_BLOCK_SIZE,
1809 },
1810 .setkey = aead_setkey,
1811 .setauthsize = aead_setauthsize,
1812 .encrypt = aead_encrypt,
1813 .decrypt = aead_decrypt,
1814 .ivsize = AES_BLOCK_SIZE,
1815 .maxauthsize = SHA512_DIGEST_SIZE,
1816 },
1817 .caam = {
1818 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1819 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1820 OP_ALG_AAI_HMAC_PRECOMP,
1821 .geniv = true,
1822 }
1823 },
1824 {
1825 .aead = {
1826 .base = {
1827 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1828 .cra_driver_name = "authenc-hmac-md5-"
1829 "cbc-des3_ede-caam-qi2",
1830 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1831 },
1832 .setkey = aead_setkey,
1833 .setauthsize = aead_setauthsize,
1834 .encrypt = aead_encrypt,
1835 .decrypt = aead_decrypt,
1836 .ivsize = DES3_EDE_BLOCK_SIZE,
1837 .maxauthsize = MD5_DIGEST_SIZE,
1838 },
1839 .caam = {
1840 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1841 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1842 OP_ALG_AAI_HMAC_PRECOMP,
1843 }
1844 },
1845 {
1846 .aead = {
1847 .base = {
1848 .cra_name = "echainiv(authenc(hmac(md5),"
1849 "cbc(des3_ede)))",
1850 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1851 "cbc-des3_ede-caam-qi2",
1852 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1853 },
1854 .setkey = aead_setkey,
1855 .setauthsize = aead_setauthsize,
1856 .encrypt = aead_encrypt,
1857 .decrypt = aead_decrypt,
1858 .ivsize = DES3_EDE_BLOCK_SIZE,
1859 .maxauthsize = MD5_DIGEST_SIZE,
1860 },
1861 .caam = {
1862 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1863 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1864 OP_ALG_AAI_HMAC_PRECOMP,
1865 .geniv = true,
1866 }
1867 },
1868 {
1869 .aead = {
1870 .base = {
1871 .cra_name = "authenc(hmac(sha1),"
1872 "cbc(des3_ede))",
1873 .cra_driver_name = "authenc-hmac-sha1-"
1874 "cbc-des3_ede-caam-qi2",
1875 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1876 },
1877 .setkey = aead_setkey,
1878 .setauthsize = aead_setauthsize,
1879 .encrypt = aead_encrypt,
1880 .decrypt = aead_decrypt,
1881 .ivsize = DES3_EDE_BLOCK_SIZE,
1882 .maxauthsize = SHA1_DIGEST_SIZE,
1883 },
1884 .caam = {
1885 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1886 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1887 OP_ALG_AAI_HMAC_PRECOMP,
1888 },
1889 },
1890 {
1891 .aead = {
1892 .base = {
1893 .cra_name = "echainiv(authenc(hmac(sha1),"
1894 "cbc(des3_ede)))",
1895 .cra_driver_name = "echainiv-authenc-"
1896 "hmac-sha1-"
1897 "cbc-des3_ede-caam-qi2",
1898 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1899 },
1900 .setkey = aead_setkey,
1901 .setauthsize = aead_setauthsize,
1902 .encrypt = aead_encrypt,
1903 .decrypt = aead_decrypt,
1904 .ivsize = DES3_EDE_BLOCK_SIZE,
1905 .maxauthsize = SHA1_DIGEST_SIZE,
1906 },
1907 .caam = {
1908 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1909 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1910 OP_ALG_AAI_HMAC_PRECOMP,
1911 .geniv = true,
1912 }
1913 },
1914 {
1915 .aead = {
1916 .base = {
1917 .cra_name = "authenc(hmac(sha224),"
1918 "cbc(des3_ede))",
1919 .cra_driver_name = "authenc-hmac-sha224-"
1920 "cbc-des3_ede-caam-qi2",
1921 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1922 },
1923 .setkey = aead_setkey,
1924 .setauthsize = aead_setauthsize,
1925 .encrypt = aead_encrypt,
1926 .decrypt = aead_decrypt,
1927 .ivsize = DES3_EDE_BLOCK_SIZE,
1928 .maxauthsize = SHA224_DIGEST_SIZE,
1929 },
1930 .caam = {
1931 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1932 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1933 OP_ALG_AAI_HMAC_PRECOMP,
1934 },
1935 },
1936 {
1937 .aead = {
1938 .base = {
1939 .cra_name = "echainiv(authenc(hmac(sha224),"
1940 "cbc(des3_ede)))",
1941 .cra_driver_name = "echainiv-authenc-"
1942 "hmac-sha224-"
1943 "cbc-des3_ede-caam-qi2",
1944 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1945 },
1946 .setkey = aead_setkey,
1947 .setauthsize = aead_setauthsize,
1948 .encrypt = aead_encrypt,
1949 .decrypt = aead_decrypt,
1950 .ivsize = DES3_EDE_BLOCK_SIZE,
1951 .maxauthsize = SHA224_DIGEST_SIZE,
1952 },
1953 .caam = {
1954 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1955 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1956 OP_ALG_AAI_HMAC_PRECOMP,
1957 .geniv = true,
1958 }
1959 },
1960 {
1961 .aead = {
1962 .base = {
1963 .cra_name = "authenc(hmac(sha256),"
1964 "cbc(des3_ede))",
1965 .cra_driver_name = "authenc-hmac-sha256-"
1966 "cbc-des3_ede-caam-qi2",
1967 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1968 },
1969 .setkey = aead_setkey,
1970 .setauthsize = aead_setauthsize,
1971 .encrypt = aead_encrypt,
1972 .decrypt = aead_decrypt,
1973 .ivsize = DES3_EDE_BLOCK_SIZE,
1974 .maxauthsize = SHA256_DIGEST_SIZE,
1975 },
1976 .caam = {
1977 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1978 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1979 OP_ALG_AAI_HMAC_PRECOMP,
1980 },
1981 },
1982 {
1983 .aead = {
1984 .base = {
1985 .cra_name = "echainiv(authenc(hmac(sha256),"
1986 "cbc(des3_ede)))",
1987 .cra_driver_name = "echainiv-authenc-"
1988 "hmac-sha256-"
1989 "cbc-des3_ede-caam-qi2",
1990 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1991 },
1992 .setkey = aead_setkey,
1993 .setauthsize = aead_setauthsize,
1994 .encrypt = aead_encrypt,
1995 .decrypt = aead_decrypt,
1996 .ivsize = DES3_EDE_BLOCK_SIZE,
1997 .maxauthsize = SHA256_DIGEST_SIZE,
1998 },
1999 .caam = {
2000 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2001 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2002 OP_ALG_AAI_HMAC_PRECOMP,
2003 .geniv = true,
2004 }
2005 },
2006 {
2007 .aead = {
2008 .base = {
2009 .cra_name = "authenc(hmac(sha384),"
2010 "cbc(des3_ede))",
2011 .cra_driver_name = "authenc-hmac-sha384-"
2012 "cbc-des3_ede-caam-qi2",
2013 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2014 },
2015 .setkey = aead_setkey,
2016 .setauthsize = aead_setauthsize,
2017 .encrypt = aead_encrypt,
2018 .decrypt = aead_decrypt,
2019 .ivsize = DES3_EDE_BLOCK_SIZE,
2020 .maxauthsize = SHA384_DIGEST_SIZE,
2021 },
2022 .caam = {
2023 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2024 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2025 OP_ALG_AAI_HMAC_PRECOMP,
2026 },
2027 },
2028 {
2029 .aead = {
2030 .base = {
2031 .cra_name = "echainiv(authenc(hmac(sha384),"
2032 "cbc(des3_ede)))",
2033 .cra_driver_name = "echainiv-authenc-"
2034 "hmac-sha384-"
2035 "cbc-des3_ede-caam-qi2",
2036 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2037 },
2038 .setkey = aead_setkey,
2039 .setauthsize = aead_setauthsize,
2040 .encrypt = aead_encrypt,
2041 .decrypt = aead_decrypt,
2042 .ivsize = DES3_EDE_BLOCK_SIZE,
2043 .maxauthsize = SHA384_DIGEST_SIZE,
2044 },
2045 .caam = {
2046 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2047 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2048 OP_ALG_AAI_HMAC_PRECOMP,
2049 .geniv = true,
2050 }
2051 },
2052 {
2053 .aead = {
2054 .base = {
2055 .cra_name = "authenc(hmac(sha512),"
2056 "cbc(des3_ede))",
2057 .cra_driver_name = "authenc-hmac-sha512-"
2058 "cbc-des3_ede-caam-qi2",
2059 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2060 },
2061 .setkey = aead_setkey,
2062 .setauthsize = aead_setauthsize,
2063 .encrypt = aead_encrypt,
2064 .decrypt = aead_decrypt,
2065 .ivsize = DES3_EDE_BLOCK_SIZE,
2066 .maxauthsize = SHA512_DIGEST_SIZE,
2067 },
2068 .caam = {
2069 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2070 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2071 OP_ALG_AAI_HMAC_PRECOMP,
2072 },
2073 },
2074 {
2075 .aead = {
2076 .base = {
2077 .cra_name = "echainiv(authenc(hmac(sha512),"
2078 "cbc(des3_ede)))",
2079 .cra_driver_name = "echainiv-authenc-"
2080 "hmac-sha512-"
2081 "cbc-des3_ede-caam-qi2",
2082 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2083 },
2084 .setkey = aead_setkey,
2085 .setauthsize = aead_setauthsize,
2086 .encrypt = aead_encrypt,
2087 .decrypt = aead_decrypt,
2088 .ivsize = DES3_EDE_BLOCK_SIZE,
2089 .maxauthsize = SHA512_DIGEST_SIZE,
2090 },
2091 .caam = {
2092 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2093 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2094 OP_ALG_AAI_HMAC_PRECOMP,
2095 .geniv = true,
2096 }
2097 },
2098 {
2099 .aead = {
2100 .base = {
2101 .cra_name = "authenc(hmac(md5),cbc(des))",
2102 .cra_driver_name = "authenc-hmac-md5-"
2103 "cbc-des-caam-qi2",
2104 .cra_blocksize = DES_BLOCK_SIZE,
2105 },
2106 .setkey = aead_setkey,
2107 .setauthsize = aead_setauthsize,
2108 .encrypt = aead_encrypt,
2109 .decrypt = aead_decrypt,
2110 .ivsize = DES_BLOCK_SIZE,
2111 .maxauthsize = MD5_DIGEST_SIZE,
2112 },
2113 .caam = {
2114 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2115 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2116 OP_ALG_AAI_HMAC_PRECOMP,
2117 },
2118 },
2119 {
2120 .aead = {
2121 .base = {
2122 .cra_name = "echainiv(authenc(hmac(md5),"
2123 "cbc(des)))",
2124 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2125 "cbc-des-caam-qi2",
2126 .cra_blocksize = DES_BLOCK_SIZE,
2127 },
2128 .setkey = aead_setkey,
2129 .setauthsize = aead_setauthsize,
2130 .encrypt = aead_encrypt,
2131 .decrypt = aead_decrypt,
2132 .ivsize = DES_BLOCK_SIZE,
2133 .maxauthsize = MD5_DIGEST_SIZE,
2134 },
2135 .caam = {
2136 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2137 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2138 OP_ALG_AAI_HMAC_PRECOMP,
2139 .geniv = true,
2140 }
2141 },
2142 {
2143 .aead = {
2144 .base = {
2145 .cra_name = "authenc(hmac(sha1),cbc(des))",
2146 .cra_driver_name = "authenc-hmac-sha1-"
2147 "cbc-des-caam-qi2",
2148 .cra_blocksize = DES_BLOCK_SIZE,
2149 },
2150 .setkey = aead_setkey,
2151 .setauthsize = aead_setauthsize,
2152 .encrypt = aead_encrypt,
2153 .decrypt = aead_decrypt,
2154 .ivsize = DES_BLOCK_SIZE,
2155 .maxauthsize = SHA1_DIGEST_SIZE,
2156 },
2157 .caam = {
2158 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2159 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2160 OP_ALG_AAI_HMAC_PRECOMP,
2161 },
2162 },
2163 {
2164 .aead = {
2165 .base = {
2166 .cra_name = "echainiv(authenc(hmac(sha1),"
2167 "cbc(des)))",
2168 .cra_driver_name = "echainiv-authenc-"
2169 "hmac-sha1-cbc-des-caam-qi2",
2170 .cra_blocksize = DES_BLOCK_SIZE,
2171 },
2172 .setkey = aead_setkey,
2173 .setauthsize = aead_setauthsize,
2174 .encrypt = aead_encrypt,
2175 .decrypt = aead_decrypt,
2176 .ivsize = DES_BLOCK_SIZE,
2177 .maxauthsize = SHA1_DIGEST_SIZE,
2178 },
2179 .caam = {
2180 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2181 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2182 OP_ALG_AAI_HMAC_PRECOMP,
2183 .geniv = true,
2184 }
2185 },
2186 {
2187 .aead = {
2188 .base = {
2189 .cra_name = "authenc(hmac(sha224),cbc(des))",
2190 .cra_driver_name = "authenc-hmac-sha224-"
2191 "cbc-des-caam-qi2",
2192 .cra_blocksize = DES_BLOCK_SIZE,
2193 },
2194 .setkey = aead_setkey,
2195 .setauthsize = aead_setauthsize,
2196 .encrypt = aead_encrypt,
2197 .decrypt = aead_decrypt,
2198 .ivsize = DES_BLOCK_SIZE,
2199 .maxauthsize = SHA224_DIGEST_SIZE,
2200 },
2201 .caam = {
2202 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2203 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2204 OP_ALG_AAI_HMAC_PRECOMP,
2205 },
2206 },
2207 {
2208 .aead = {
2209 .base = {
2210 .cra_name = "echainiv(authenc(hmac(sha224),"
2211 "cbc(des)))",
2212 .cra_driver_name = "echainiv-authenc-"
2213 "hmac-sha224-cbc-des-"
2214 "caam-qi2",
2215 .cra_blocksize = DES_BLOCK_SIZE,
2216 },
2217 .setkey = aead_setkey,
2218 .setauthsize = aead_setauthsize,
2219 .encrypt = aead_encrypt,
2220 .decrypt = aead_decrypt,
2221 .ivsize = DES_BLOCK_SIZE,
2222 .maxauthsize = SHA224_DIGEST_SIZE,
2223 },
2224 .caam = {
2225 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2226 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2227 OP_ALG_AAI_HMAC_PRECOMP,
2228 .geniv = true,
2229 }
2230 },
2231 {
2232 .aead = {
2233 .base = {
2234 .cra_name = "authenc(hmac(sha256),cbc(des))",
2235 .cra_driver_name = "authenc-hmac-sha256-"
2236 "cbc-des-caam-qi2",
2237 .cra_blocksize = DES_BLOCK_SIZE,
2238 },
2239 .setkey = aead_setkey,
2240 .setauthsize = aead_setauthsize,
2241 .encrypt = aead_encrypt,
2242 .decrypt = aead_decrypt,
2243 .ivsize = DES_BLOCK_SIZE,
2244 .maxauthsize = SHA256_DIGEST_SIZE,
2245 },
2246 .caam = {
2247 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2248 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2249 OP_ALG_AAI_HMAC_PRECOMP,
2250 },
2251 },
2252 {
2253 .aead = {
2254 .base = {
2255 .cra_name = "echainiv(authenc(hmac(sha256),"
2256 "cbc(des)))",
2257 .cra_driver_name = "echainiv-authenc-"
2258 "hmac-sha256-cbc-desi-"
2259 "caam-qi2",
2260 .cra_blocksize = DES_BLOCK_SIZE,
2261 },
2262 .setkey = aead_setkey,
2263 .setauthsize = aead_setauthsize,
2264 .encrypt = aead_encrypt,
2265 .decrypt = aead_decrypt,
2266 .ivsize = DES_BLOCK_SIZE,
2267 .maxauthsize = SHA256_DIGEST_SIZE,
2268 },
2269 .caam = {
2270 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2271 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2272 OP_ALG_AAI_HMAC_PRECOMP,
2273 .geniv = true,
2274 },
2275 },
2276 {
2277 .aead = {
2278 .base = {
2279 .cra_name = "authenc(hmac(sha384),cbc(des))",
2280 .cra_driver_name = "authenc-hmac-sha384-"
2281 "cbc-des-caam-qi2",
2282 .cra_blocksize = DES_BLOCK_SIZE,
2283 },
2284 .setkey = aead_setkey,
2285 .setauthsize = aead_setauthsize,
2286 .encrypt = aead_encrypt,
2287 .decrypt = aead_decrypt,
2288 .ivsize = DES_BLOCK_SIZE,
2289 .maxauthsize = SHA384_DIGEST_SIZE,
2290 },
2291 .caam = {
2292 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2293 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2294 OP_ALG_AAI_HMAC_PRECOMP,
2295 },
2296 },
2297 {
2298 .aead = {
2299 .base = {
2300 .cra_name = "echainiv(authenc(hmac(sha384),"
2301 "cbc(des)))",
2302 .cra_driver_name = "echainiv-authenc-"
2303 "hmac-sha384-cbc-des-"
2304 "caam-qi2",
2305 .cra_blocksize = DES_BLOCK_SIZE,
2306 },
2307 .setkey = aead_setkey,
2308 .setauthsize = aead_setauthsize,
2309 .encrypt = aead_encrypt,
2310 .decrypt = aead_decrypt,
2311 .ivsize = DES_BLOCK_SIZE,
2312 .maxauthsize = SHA384_DIGEST_SIZE,
2313 },
2314 .caam = {
2315 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2316 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2317 OP_ALG_AAI_HMAC_PRECOMP,
2318 .geniv = true,
2319 }
2320 },
2321 {
2322 .aead = {
2323 .base = {
2324 .cra_name = "authenc(hmac(sha512),cbc(des))",
2325 .cra_driver_name = "authenc-hmac-sha512-"
2326 "cbc-des-caam-qi2",
2327 .cra_blocksize = DES_BLOCK_SIZE,
2328 },
2329 .setkey = aead_setkey,
2330 .setauthsize = aead_setauthsize,
2331 .encrypt = aead_encrypt,
2332 .decrypt = aead_decrypt,
2333 .ivsize = DES_BLOCK_SIZE,
2334 .maxauthsize = SHA512_DIGEST_SIZE,
2335 },
2336 .caam = {
2337 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2338 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2339 OP_ALG_AAI_HMAC_PRECOMP,
2340 }
2341 },
2342 {
2343 .aead = {
2344 .base = {
2345 .cra_name = "echainiv(authenc(hmac(sha512),"
2346 "cbc(des)))",
2347 .cra_driver_name = "echainiv-authenc-"
2348 "hmac-sha512-cbc-des-"
2349 "caam-qi2",
2350 .cra_blocksize = DES_BLOCK_SIZE,
2351 },
2352 .setkey = aead_setkey,
2353 .setauthsize = aead_setauthsize,
2354 .encrypt = aead_encrypt,
2355 .decrypt = aead_decrypt,
2356 .ivsize = DES_BLOCK_SIZE,
2357 .maxauthsize = SHA512_DIGEST_SIZE,
2358 },
2359 .caam = {
2360 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2361 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2362 OP_ALG_AAI_HMAC_PRECOMP,
2363 .geniv = true,
2364 }
2365 },
2366 {
2367 .aead = {
2368 .base = {
2369 .cra_name = "authenc(hmac(md5),"
2370 "rfc3686(ctr(aes)))",
2371 .cra_driver_name = "authenc-hmac-md5-"
2372 "rfc3686-ctr-aes-caam-qi2",
2373 .cra_blocksize = 1,
2374 },
2375 .setkey = aead_setkey,
2376 .setauthsize = aead_setauthsize,
2377 .encrypt = aead_encrypt,
2378 .decrypt = aead_decrypt,
2379 .ivsize = CTR_RFC3686_IV_SIZE,
2380 .maxauthsize = MD5_DIGEST_SIZE,
2381 },
2382 .caam = {
2383 .class1_alg_type = OP_ALG_ALGSEL_AES |
2384 OP_ALG_AAI_CTR_MOD128,
2385 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2386 OP_ALG_AAI_HMAC_PRECOMP,
2387 .rfc3686 = true,
2388 },
2389 },
2390 {
2391 .aead = {
2392 .base = {
2393 .cra_name = "seqiv(authenc("
2394 "hmac(md5),rfc3686(ctr(aes))))",
2395 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2396 "rfc3686-ctr-aes-caam-qi2",
2397 .cra_blocksize = 1,
2398 },
2399 .setkey = aead_setkey,
2400 .setauthsize = aead_setauthsize,
2401 .encrypt = aead_encrypt,
2402 .decrypt = aead_decrypt,
2403 .ivsize = CTR_RFC3686_IV_SIZE,
2404 .maxauthsize = MD5_DIGEST_SIZE,
2405 },
2406 .caam = {
2407 .class1_alg_type = OP_ALG_ALGSEL_AES |
2408 OP_ALG_AAI_CTR_MOD128,
2409 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2410 OP_ALG_AAI_HMAC_PRECOMP,
2411 .rfc3686 = true,
2412 .geniv = true,
2413 },
2414 },
2415 {
2416 .aead = {
2417 .base = {
2418 .cra_name = "authenc(hmac(sha1),"
2419 "rfc3686(ctr(aes)))",
2420 .cra_driver_name = "authenc-hmac-sha1-"
2421 "rfc3686-ctr-aes-caam-qi2",
2422 .cra_blocksize = 1,
2423 },
2424 .setkey = aead_setkey,
2425 .setauthsize = aead_setauthsize,
2426 .encrypt = aead_encrypt,
2427 .decrypt = aead_decrypt,
2428 .ivsize = CTR_RFC3686_IV_SIZE,
2429 .maxauthsize = SHA1_DIGEST_SIZE,
2430 },
2431 .caam = {
2432 .class1_alg_type = OP_ALG_ALGSEL_AES |
2433 OP_ALG_AAI_CTR_MOD128,
2434 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2435 OP_ALG_AAI_HMAC_PRECOMP,
2436 .rfc3686 = true,
2437 },
2438 },
2439 {
2440 .aead = {
2441 .base = {
2442 .cra_name = "seqiv(authenc("
2443 "hmac(sha1),rfc3686(ctr(aes))))",
2444 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2445 "rfc3686-ctr-aes-caam-qi2",
2446 .cra_blocksize = 1,
2447 },
2448 .setkey = aead_setkey,
2449 .setauthsize = aead_setauthsize,
2450 .encrypt = aead_encrypt,
2451 .decrypt = aead_decrypt,
2452 .ivsize = CTR_RFC3686_IV_SIZE,
2453 .maxauthsize = SHA1_DIGEST_SIZE,
2454 },
2455 .caam = {
2456 .class1_alg_type = OP_ALG_ALGSEL_AES |
2457 OP_ALG_AAI_CTR_MOD128,
2458 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2459 OP_ALG_AAI_HMAC_PRECOMP,
2460 .rfc3686 = true,
2461 .geniv = true,
2462 },
2463 },
2464 {
2465 .aead = {
2466 .base = {
2467 .cra_name = "authenc(hmac(sha224),"
2468 "rfc3686(ctr(aes)))",
2469 .cra_driver_name = "authenc-hmac-sha224-"
2470 "rfc3686-ctr-aes-caam-qi2",
2471 .cra_blocksize = 1,
2472 },
2473 .setkey = aead_setkey,
2474 .setauthsize = aead_setauthsize,
2475 .encrypt = aead_encrypt,
2476 .decrypt = aead_decrypt,
2477 .ivsize = CTR_RFC3686_IV_SIZE,
2478 .maxauthsize = SHA224_DIGEST_SIZE,
2479 },
2480 .caam = {
2481 .class1_alg_type = OP_ALG_ALGSEL_AES |
2482 OP_ALG_AAI_CTR_MOD128,
2483 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2484 OP_ALG_AAI_HMAC_PRECOMP,
2485 .rfc3686 = true,
2486 },
2487 },
2488 {
2489 .aead = {
2490 .base = {
2491 .cra_name = "seqiv(authenc("
2492 "hmac(sha224),rfc3686(ctr(aes))))",
2493 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2494 "rfc3686-ctr-aes-caam-qi2",
2495 .cra_blocksize = 1,
2496 },
2497 .setkey = aead_setkey,
2498 .setauthsize = aead_setauthsize,
2499 .encrypt = aead_encrypt,
2500 .decrypt = aead_decrypt,
2501 .ivsize = CTR_RFC3686_IV_SIZE,
2502 .maxauthsize = SHA224_DIGEST_SIZE,
2503 },
2504 .caam = {
2505 .class1_alg_type = OP_ALG_ALGSEL_AES |
2506 OP_ALG_AAI_CTR_MOD128,
2507 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2508 OP_ALG_AAI_HMAC_PRECOMP,
2509 .rfc3686 = true,
2510 .geniv = true,
2511 },
2512 },
2513 {
2514 .aead = {
2515 .base = {
2516 .cra_name = "authenc(hmac(sha256),"
2517 "rfc3686(ctr(aes)))",
2518 .cra_driver_name = "authenc-hmac-sha256-"
2519 "rfc3686-ctr-aes-caam-qi2",
2520 .cra_blocksize = 1,
2521 },
2522 .setkey = aead_setkey,
2523 .setauthsize = aead_setauthsize,
2524 .encrypt = aead_encrypt,
2525 .decrypt = aead_decrypt,
2526 .ivsize = CTR_RFC3686_IV_SIZE,
2527 .maxauthsize = SHA256_DIGEST_SIZE,
2528 },
2529 .caam = {
2530 .class1_alg_type = OP_ALG_ALGSEL_AES |
2531 OP_ALG_AAI_CTR_MOD128,
2532 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2533 OP_ALG_AAI_HMAC_PRECOMP,
2534 .rfc3686 = true,
2535 },
2536 },
2537 {
2538 .aead = {
2539 .base = {
2540 .cra_name = "seqiv(authenc(hmac(sha256),"
2541 "rfc3686(ctr(aes))))",
2542 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2543 "rfc3686-ctr-aes-caam-qi2",
2544 .cra_blocksize = 1,
2545 },
2546 .setkey = aead_setkey,
2547 .setauthsize = aead_setauthsize,
2548 .encrypt = aead_encrypt,
2549 .decrypt = aead_decrypt,
2550 .ivsize = CTR_RFC3686_IV_SIZE,
2551 .maxauthsize = SHA256_DIGEST_SIZE,
2552 },
2553 .caam = {
2554 .class1_alg_type = OP_ALG_ALGSEL_AES |
2555 OP_ALG_AAI_CTR_MOD128,
2556 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2557 OP_ALG_AAI_HMAC_PRECOMP,
2558 .rfc3686 = true,
2559 .geniv = true,
2560 },
2561 },
2562 {
2563 .aead = {
2564 .base = {
2565 .cra_name = "authenc(hmac(sha384),"
2566 "rfc3686(ctr(aes)))",
2567 .cra_driver_name = "authenc-hmac-sha384-"
2568 "rfc3686-ctr-aes-caam-qi2",
2569 .cra_blocksize = 1,
2570 },
2571 .setkey = aead_setkey,
2572 .setauthsize = aead_setauthsize,
2573 .encrypt = aead_encrypt,
2574 .decrypt = aead_decrypt,
2575 .ivsize = CTR_RFC3686_IV_SIZE,
2576 .maxauthsize = SHA384_DIGEST_SIZE,
2577 },
2578 .caam = {
2579 .class1_alg_type = OP_ALG_ALGSEL_AES |
2580 OP_ALG_AAI_CTR_MOD128,
2581 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2582 OP_ALG_AAI_HMAC_PRECOMP,
2583 .rfc3686 = true,
2584 },
2585 },
2586 {
2587 .aead = {
2588 .base = {
2589 .cra_name = "seqiv(authenc(hmac(sha384),"
2590 "rfc3686(ctr(aes))))",
2591 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2592 "rfc3686-ctr-aes-caam-qi2",
2593 .cra_blocksize = 1,
2594 },
2595 .setkey = aead_setkey,
2596 .setauthsize = aead_setauthsize,
2597 .encrypt = aead_encrypt,
2598 .decrypt = aead_decrypt,
2599 .ivsize = CTR_RFC3686_IV_SIZE,
2600 .maxauthsize = SHA384_DIGEST_SIZE,
2601 },
2602 .caam = {
2603 .class1_alg_type = OP_ALG_ALGSEL_AES |
2604 OP_ALG_AAI_CTR_MOD128,
2605 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2606 OP_ALG_AAI_HMAC_PRECOMP,
2607 .rfc3686 = true,
2608 .geniv = true,
2609 },
2610 },
2611 {
2612 .aead = {
2613 .base = {
2614 .cra_name = "authenc(hmac(sha512),"
2615 "rfc3686(ctr(aes)))",
2616 .cra_driver_name = "authenc-hmac-sha512-"
2617 "rfc3686-ctr-aes-caam-qi2",
2618 .cra_blocksize = 1,
2619 },
2620 .setkey = aead_setkey,
2621 .setauthsize = aead_setauthsize,
2622 .encrypt = aead_encrypt,
2623 .decrypt = aead_decrypt,
2624 .ivsize = CTR_RFC3686_IV_SIZE,
2625 .maxauthsize = SHA512_DIGEST_SIZE,
2626 },
2627 .caam = {
2628 .class1_alg_type = OP_ALG_ALGSEL_AES |
2629 OP_ALG_AAI_CTR_MOD128,
2630 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2631 OP_ALG_AAI_HMAC_PRECOMP,
2632 .rfc3686 = true,
2633 },
2634 },
2635 {
2636 .aead = {
2637 .base = {
2638 .cra_name = "seqiv(authenc(hmac(sha512),"
2639 "rfc3686(ctr(aes))))",
2640 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2641 "rfc3686-ctr-aes-caam-qi2",
2642 .cra_blocksize = 1,
2643 },
2644 .setkey = aead_setkey,
2645 .setauthsize = aead_setauthsize,
2646 .encrypt = aead_encrypt,
2647 .decrypt = aead_decrypt,
2648 .ivsize = CTR_RFC3686_IV_SIZE,
2649 .maxauthsize = SHA512_DIGEST_SIZE,
2650 },
2651 .caam = {
2652 .class1_alg_type = OP_ALG_ALGSEL_AES |
2653 OP_ALG_AAI_CTR_MOD128,
2654 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2655 OP_ALG_AAI_HMAC_PRECOMP,
2656 .rfc3686 = true,
2657 .geniv = true,
2658 },
2659 },
2660};
2661
2662static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2663{
2664 struct skcipher_alg *alg = &t_alg->skcipher;
2665
2666 alg->base.cra_module = THIS_MODULE;
2667 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2668 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2669 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2670
2671 alg->init = caam_cra_init_skcipher;
2672 alg->exit = caam_cra_exit;
2673}
2674
2675static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2676{
2677 struct aead_alg *alg = &t_alg->aead;
2678
2679 alg->base.cra_module = THIS_MODULE;
2680 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2681 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2682 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2683
2684 alg->init = caam_cra_init_aead;
2685 alg->exit = caam_cra_exit_aead;
2686}
2687
2688/* max hash key is max split key size */
2689#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
2690
2691#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
2692
2693/* caam context sizes for hashes: running digest + 8 */
2694#define HASH_MSG_LEN 8
2695#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2696
2697enum hash_optype {
2698 UPDATE = 0,
2699 UPDATE_FIRST,
2700 FINALIZE,
2701 DIGEST,
2702 HASH_NUM_OP
2703};
2704
2705/**
2706 * caam_hash_ctx - ahash per-session context
2707 * @flc: Flow Contexts array
2708 * @flc_dma: I/O virtual addresses of the Flow Contexts
2709 * @dev: dpseci device
2710 * @ctx_len: size of Context Register
2711 * @adata: hashing algorithm details
2712 */
2713struct caam_hash_ctx {
2714 struct caam_flc flc[HASH_NUM_OP];
2715 dma_addr_t flc_dma[HASH_NUM_OP];
2716 struct device *dev;
2717 int ctx_len;
2718 struct alginfo adata;
2719};
2720
2721/* ahash state */
2722struct caam_hash_state {
2723 struct caam_request caam_req;
2724 dma_addr_t buf_dma;
2725 dma_addr_t ctx_dma;
2726 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2727 int buflen_0;
2728 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2729 int buflen_1;
2730 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2731 int (*update)(struct ahash_request *req);
2732 int (*final)(struct ahash_request *req);
2733 int (*finup)(struct ahash_request *req);
2734 int current_buf;
2735};
2736
2737struct caam_export_state {
2738 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2739 u8 caam_ctx[MAX_CTX_LEN];
2740 int buflen;
2741 int (*update)(struct ahash_request *req);
2742 int (*final)(struct ahash_request *req);
2743 int (*finup)(struct ahash_request *req);
2744};
2745
2746static inline void switch_buf(struct caam_hash_state *state)
2747{
2748 state->current_buf ^= 1;
2749}
2750
2751static inline u8 *current_buf(struct caam_hash_state *state)
2752{
2753 return state->current_buf ? state->buf_1 : state->buf_0;
2754}
2755
2756static inline u8 *alt_buf(struct caam_hash_state *state)
2757{
2758 return state->current_buf ? state->buf_0 : state->buf_1;
2759}
2760
2761static inline int *current_buflen(struct caam_hash_state *state)
2762{
2763 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
2764}
2765
2766static inline int *alt_buflen(struct caam_hash_state *state)
2767{
2768 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
2769}
2770
2771/* Map current buffer in state (if length > 0) and put it in link table */
2772static inline int buf_map_to_qm_sg(struct device *dev,
2773 struct dpaa2_sg_entry *qm_sg,
2774 struct caam_hash_state *state)
2775{
2776 int buflen = *current_buflen(state);
2777
2778 if (!buflen)
2779 return 0;
2780
2781 state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
2782 DMA_TO_DEVICE);
2783 if (dma_mapping_error(dev, state->buf_dma)) {
2784 dev_err(dev, "unable to map buf\n");
2785 state->buf_dma = 0;
2786 return -ENOMEM;
2787 }
2788
2789 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
2790
2791 return 0;
2792}
2793
2794/* Map state->caam_ctx, and add it to link table */
2795static inline int ctx_map_to_qm_sg(struct device *dev,
2796 struct caam_hash_state *state, int ctx_len,
2797 struct dpaa2_sg_entry *qm_sg, u32 flag)
2798{
2799 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
2800 if (dma_mapping_error(dev, state->ctx_dma)) {
2801 dev_err(dev, "unable to map ctx\n");
2802 state->ctx_dma = 0;
2803 return -ENOMEM;
2804 }
2805
2806 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
2807
2808 return 0;
2809}
2810
2811static int ahash_set_sh_desc(struct crypto_ahash *ahash)
2812{
2813 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2814 int digestsize = crypto_ahash_digestsize(ahash);
2815 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
2816 struct caam_flc *flc;
2817 u32 *desc;
2818
2819 /* ahash_update shared descriptor */
2820 flc = &ctx->flc[UPDATE];
2821 desc = flc->sh_desc;
2822 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
2823 ctx->ctx_len, true, priv->sec_attr.era);
2824 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2825 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
2826 desc_bytes(desc), DMA_BIDIRECTIONAL);
2827 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
2828 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2829 1);
2830
2831 /* ahash_update_first shared descriptor */
2832 flc = &ctx->flc[UPDATE_FIRST];
2833 desc = flc->sh_desc;
2834 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
2835 ctx->ctx_len, false, priv->sec_attr.era);
2836 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2837 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
2838 desc_bytes(desc), DMA_BIDIRECTIONAL);
2839 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
2840 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2841 1);
2842
2843 /* ahash_final shared descriptor */
2844 flc = &ctx->flc[FINALIZE];
2845 desc = flc->sh_desc;
2846 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
2847 ctx->ctx_len, true, priv->sec_attr.era);
2848 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2849 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
2850 desc_bytes(desc), DMA_BIDIRECTIONAL);
2851 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
2852 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2853 1);
2854
2855 /* ahash_digest shared descriptor */
2856 flc = &ctx->flc[DIGEST];
2857 desc = flc->sh_desc;
2858 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
2859 ctx->ctx_len, false, priv->sec_attr.era);
2860 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2861 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
2862 desc_bytes(desc), DMA_BIDIRECTIONAL);
2863 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
2864 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2865 1);
2866
2867 return 0;
2868}
2869
2870struct split_key_sh_result {
2871 struct completion completion;
2872 int err;
2873 struct device *dev;
2874};
2875
2876static void split_key_sh_done(void *cbk_ctx, u32 err)
2877{
2878 struct split_key_sh_result *res = cbk_ctx;
2879
2880 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2881
2882 if (err)
2883 caam_qi2_strstatus(res->dev, err);
2884
2885 res->err = err;
2886 complete(&res->completion);
2887}
2888
2889/* Digest hash size if it is too large */
2890static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
2891 u32 *keylen, u8 *key_out, u32 digestsize)
2892{
2893 struct caam_request *req_ctx;
2894 u32 *desc;
2895 struct split_key_sh_result result;
2896 dma_addr_t src_dma, dst_dma;
2897 struct caam_flc *flc;
2898 dma_addr_t flc_dma;
2899 int ret = -ENOMEM;
2900 struct dpaa2_fl_entry *in_fle, *out_fle;
2901
2902 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
2903 if (!req_ctx)
2904 return -ENOMEM;
2905
2906 in_fle = &req_ctx->fd_flt[1];
2907 out_fle = &req_ctx->fd_flt[0];
2908
2909 flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
2910 if (!flc)
2911 goto err_flc;
2912
2913 src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
2914 DMA_TO_DEVICE);
2915 if (dma_mapping_error(ctx->dev, src_dma)) {
2916 dev_err(ctx->dev, "unable to map key input memory\n");
2917 goto err_src_dma;
2918 }
2919 dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
2920 DMA_FROM_DEVICE);
2921 if (dma_mapping_error(ctx->dev, dst_dma)) {
2922 dev_err(ctx->dev, "unable to map key output memory\n");
2923 goto err_dst_dma;
2924 }
2925
2926 desc = flc->sh_desc;
2927
2928 init_sh_desc(desc, 0);
2929
2930 /* descriptor to perform unkeyed hash on key_in */
2931 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
2932 OP_ALG_AS_INITFINAL);
2933 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
2934 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
2935 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
2936 LDST_SRCDST_BYTE_CONTEXT);
2937
2938 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2939 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
2940 desc_bytes(desc), DMA_TO_DEVICE);
2941 if (dma_mapping_error(ctx->dev, flc_dma)) {
2942 dev_err(ctx->dev, "unable to map shared descriptor\n");
2943 goto err_flc_dma;
2944 }
2945
2946 dpaa2_fl_set_final(in_fle, true);
2947 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
2948 dpaa2_fl_set_addr(in_fle, src_dma);
2949 dpaa2_fl_set_len(in_fle, *keylen);
2950 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
2951 dpaa2_fl_set_addr(out_fle, dst_dma);
2952 dpaa2_fl_set_len(out_fle, digestsize);
2953
2954 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
2955 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
2956 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
2957 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2958 1);
2959
2960 result.err = 0;
2961 init_completion(&result.completion);
2962 result.dev = ctx->dev;
2963
2964 req_ctx->flc = flc;
2965 req_ctx->flc_dma = flc_dma;
2966 req_ctx->cbk = split_key_sh_done;
2967 req_ctx->ctx = &result;
2968
2969 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
2970 if (ret == -EINPROGRESS) {
2971 /* in progress */
2972 wait_for_completion(&result.completion);
2973 ret = result.err;
2974 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
2975 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
2976 digestsize, 1);
2977 }
2978
2979 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
2980 DMA_TO_DEVICE);
2981err_flc_dma:
2982 dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
2983err_dst_dma:
2984 dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
2985err_src_dma:
2986 kfree(flc);
2987err_flc:
2988 kfree(req_ctx);
2989
2990 *keylen = digestsize;
2991
2992 return ret;
2993}
2994
2995static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2996 unsigned int keylen)
2997{
2998 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2999 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3000 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3001 int ret;
3002 u8 *hashed_key = NULL;
3003
3004 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3005
3006 if (keylen > blocksize) {
3007 hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
3008 GFP_KERNEL | GFP_DMA);
3009 if (!hashed_key)
3010 return -ENOMEM;
3011 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
3012 digestsize);
3013 if (ret)
3014 goto bad_free_key;
3015 key = hashed_key;
3016 }
3017
3018 ctx->adata.keylen = keylen;
3019 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3020 OP_ALG_ALGSEL_MASK);
3021 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3022 goto bad_free_key;
3023
3024 ctx->adata.key_virt = key;
3025 ctx->adata.key_inline = true;
3026
3027 ret = ahash_set_sh_desc(ahash);
3028 kfree(hashed_key);
3029 return ret;
3030bad_free_key:
3031 kfree(hashed_key);
3032 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3033 return -EINVAL;
3034}
3035
3036static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3037 struct ahash_request *req, int dst_len)
3038{
3039 struct caam_hash_state *state = ahash_request_ctx(req);
3040
3041 if (edesc->src_nents)
3042 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3043 if (edesc->dst_dma)
3044 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
3045
3046 if (edesc->qm_sg_bytes)
3047 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3048 DMA_TO_DEVICE);
3049
3050 if (state->buf_dma) {
3051 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3052 DMA_TO_DEVICE);
3053 state->buf_dma = 0;
3054 }
3055}
3056
3057static inline void ahash_unmap_ctx(struct device *dev,
3058 struct ahash_edesc *edesc,
3059 struct ahash_request *req, int dst_len,
3060 u32 flag)
3061{
3062 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3063 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3064 struct caam_hash_state *state = ahash_request_ctx(req);
3065
3066 if (state->ctx_dma) {
3067 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
3068 state->ctx_dma = 0;
3069 }
3070 ahash_unmap(dev, edesc, req, dst_len);
3071}
3072
3073static void ahash_done(void *cbk_ctx, u32 status)
3074{
3075 struct crypto_async_request *areq = cbk_ctx;
3076 struct ahash_request *req = ahash_request_cast(areq);
3077 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3078 struct caam_hash_state *state = ahash_request_ctx(req);
3079 struct ahash_edesc *edesc = state->caam_req.edesc;
3080 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3081 int digestsize = crypto_ahash_digestsize(ahash);
3082 int ecode = 0;
3083
3084 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3085
3086 if (unlikely(status)) {
3087 caam_qi2_strstatus(ctx->dev, status);
3088 ecode = -EIO;
3089 }
3090
3091 ahash_unmap(ctx->dev, edesc, req, digestsize);
3092 qi_cache_free(edesc);
3093
3094 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3095 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3096 ctx->ctx_len, 1);
3097 if (req->result)
3098 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3099 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3100 digestsize, 1);
3101
3102 req->base.complete(&req->base, ecode);
3103}
3104
3105static void ahash_done_bi(void *cbk_ctx, u32 status)
3106{
3107 struct crypto_async_request *areq = cbk_ctx;
3108 struct ahash_request *req = ahash_request_cast(areq);
3109 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3110 struct caam_hash_state *state = ahash_request_ctx(req);
3111 struct ahash_edesc *edesc = state->caam_req.edesc;
3112 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3113 int ecode = 0;
3114
3115 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3116
3117 if (unlikely(status)) {
3118 caam_qi2_strstatus(ctx->dev, status);
3119 ecode = -EIO;
3120 }
3121
3122 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
3123 switch_buf(state);
3124 qi_cache_free(edesc);
3125
3126 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3127 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3128 ctx->ctx_len, 1);
3129 if (req->result)
3130 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3131 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3132 crypto_ahash_digestsize(ahash), 1);
3133
3134 req->base.complete(&req->base, ecode);
3135}
3136
3137static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3138{
3139 struct crypto_async_request *areq = cbk_ctx;
3140 struct ahash_request *req = ahash_request_cast(areq);
3141 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3142 struct caam_hash_state *state = ahash_request_ctx(req);
3143 struct ahash_edesc *edesc = state->caam_req.edesc;
3144 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3145 int digestsize = crypto_ahash_digestsize(ahash);
3146 int ecode = 0;
3147
3148 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3149
3150 if (unlikely(status)) {
3151 caam_qi2_strstatus(ctx->dev, status);
3152 ecode = -EIO;
3153 }
3154
3155 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
3156 qi_cache_free(edesc);
3157
3158 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3159 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3160 ctx->ctx_len, 1);
3161 if (req->result)
3162 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3163 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3164 digestsize, 1);
3165
3166 req->base.complete(&req->base, ecode);
3167}
3168
3169static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3170{
3171 struct crypto_async_request *areq = cbk_ctx;
3172 struct ahash_request *req = ahash_request_cast(areq);
3173 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3174 struct caam_hash_state *state = ahash_request_ctx(req);
3175 struct ahash_edesc *edesc = state->caam_req.edesc;
3176 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3177 int ecode = 0;
3178
3179 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3180
3181 if (unlikely(status)) {
3182 caam_qi2_strstatus(ctx->dev, status);
3183 ecode = -EIO;
3184 }
3185
3186 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
3187 switch_buf(state);
3188 qi_cache_free(edesc);
3189
3190 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3191 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3192 ctx->ctx_len, 1);
3193 if (req->result)
3194 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3195 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3196 crypto_ahash_digestsize(ahash), 1);
3197
3198 req->base.complete(&req->base, ecode);
3199}
3200
3201static int ahash_update_ctx(struct ahash_request *req)
3202{
3203 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3204 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3205 struct caam_hash_state *state = ahash_request_ctx(req);
3206 struct caam_request *req_ctx = &state->caam_req;
3207 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3208 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3209 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3210 GFP_KERNEL : GFP_ATOMIC;
3211 u8 *buf = current_buf(state);
3212 int *buflen = current_buflen(state);
3213 u8 *next_buf = alt_buf(state);
3214 int *next_buflen = alt_buflen(state), last_buflen;
3215 int in_len = *buflen + req->nbytes, to_hash;
3216 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3217 struct ahash_edesc *edesc;
3218 int ret = 0;
3219
3220 last_buflen = *next_buflen;
3221 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3222 to_hash = in_len - *next_buflen;
3223
3224 if (to_hash) {
3225 struct dpaa2_sg_entry *sg_table;
3226
3227 src_nents = sg_nents_for_len(req->src,
3228 req->nbytes - (*next_buflen));
3229 if (src_nents < 0) {
3230 dev_err(ctx->dev, "Invalid number of src SG.\n");
3231 return src_nents;
3232 }
3233
3234 if (src_nents) {
3235 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3236 DMA_TO_DEVICE);
3237 if (!mapped_nents) {
3238 dev_err(ctx->dev, "unable to DMA map source\n");
3239 return -ENOMEM;
3240 }
3241 } else {
3242 mapped_nents = 0;
3243 }
3244
3245 /* allocate space for base edesc and link tables */
3246 edesc = qi_cache_zalloc(GFP_DMA | flags);
3247 if (!edesc) {
3248 dma_unmap_sg(ctx->dev, req->src, src_nents,
3249 DMA_TO_DEVICE);
3250 return -ENOMEM;
3251 }
3252
3253 edesc->src_nents = src_nents;
3254 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3255 qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
3256 sizeof(*sg_table);
3257 sg_table = &edesc->sgt[0];
3258
3259 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3260 DMA_BIDIRECTIONAL);
3261 if (ret)
3262 goto unmap_ctx;
3263
3264 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3265 if (ret)
3266 goto unmap_ctx;
3267
3268 if (mapped_nents) {
3269 sg_to_qm_sg_last(req->src, mapped_nents,
3270 sg_table + qm_sg_src_index, 0);
3271 if (*next_buflen)
3272 scatterwalk_map_and_copy(next_buf, req->src,
3273 to_hash - *buflen,
3274 *next_buflen, 0);
3275 } else {
3276 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3277 true);
3278 }
3279
3280 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3281 qm_sg_bytes, DMA_TO_DEVICE);
3282 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3283 dev_err(ctx->dev, "unable to map S/G table\n");
3284 ret = -ENOMEM;
3285 goto unmap_ctx;
3286 }
3287 edesc->qm_sg_bytes = qm_sg_bytes;
3288
3289 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3290 dpaa2_fl_set_final(in_fle, true);
3291 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3292 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3293 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3294 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3295 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3296 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3297
3298 req_ctx->flc = &ctx->flc[UPDATE];
3299 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3300 req_ctx->cbk = ahash_done_bi;
3301 req_ctx->ctx = &req->base;
3302 req_ctx->edesc = edesc;
3303
3304 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3305 if (ret != -EINPROGRESS &&
3306 !(ret == -EBUSY &&
3307 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3308 goto unmap_ctx;
3309 } else if (*next_buflen) {
3310 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3311 req->nbytes, 0);
3312 *buflen = *next_buflen;
3313 *next_buflen = last_buflen;
3314 }
3315
3316 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3317 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3318 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3319 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3320 1);
3321
3322 return ret;
3323unmap_ctx:
3324 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
3325 qi_cache_free(edesc);
3326 return ret;
3327}
3328
3329static int ahash_final_ctx(struct ahash_request *req)
3330{
3331 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3332 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3333 struct caam_hash_state *state = ahash_request_ctx(req);
3334 struct caam_request *req_ctx = &state->caam_req;
3335 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3336 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3337 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3338 GFP_KERNEL : GFP_ATOMIC;
3339 int buflen = *current_buflen(state);
3340 int qm_sg_bytes, qm_sg_src_index;
3341 int digestsize = crypto_ahash_digestsize(ahash);
3342 struct ahash_edesc *edesc;
3343 struct dpaa2_sg_entry *sg_table;
3344 int ret;
3345
3346 /* allocate space for base edesc and link tables */
3347 edesc = qi_cache_zalloc(GFP_DMA | flags);
3348 if (!edesc)
3349 return -ENOMEM;
3350
3351 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3352 qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
3353 sg_table = &edesc->sgt[0];
3354
3355 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3356 DMA_TO_DEVICE);
3357 if (ret)
3358 goto unmap_ctx;
3359
3360 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3361 if (ret)
3362 goto unmap_ctx;
3363
3364 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
3365
3366 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3367 DMA_TO_DEVICE);
3368 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3369 dev_err(ctx->dev, "unable to map S/G table\n");
3370 ret = -ENOMEM;
3371 goto unmap_ctx;
3372 }
3373 edesc->qm_sg_bytes = qm_sg_bytes;
3374
3375 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3376 DMA_FROM_DEVICE);
3377 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3378 dev_err(ctx->dev, "unable to map dst\n");
3379 edesc->dst_dma = 0;
3380 ret = -ENOMEM;
3381 goto unmap_ctx;
3382 }
3383
3384 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3385 dpaa2_fl_set_final(in_fle, true);
3386 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3387 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3388 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3389 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3390 dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3391 dpaa2_fl_set_len(out_fle, digestsize);
3392
3393 req_ctx->flc = &ctx->flc[FINALIZE];
3394 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3395 req_ctx->cbk = ahash_done_ctx_src;
3396 req_ctx->ctx = &req->base;
3397 req_ctx->edesc = edesc;
3398
3399 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3400 if (ret == -EINPROGRESS ||
3401 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3402 return ret;
3403
3404unmap_ctx:
3405 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
3406 qi_cache_free(edesc);
3407 return ret;
3408}
3409
3410static int ahash_finup_ctx(struct ahash_request *req)
3411{
3412 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3413 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3414 struct caam_hash_state *state = ahash_request_ctx(req);
3415 struct caam_request *req_ctx = &state->caam_req;
3416 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3417 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3418 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3419 GFP_KERNEL : GFP_ATOMIC;
3420 int buflen = *current_buflen(state);
3421 int qm_sg_bytes, qm_sg_src_index;
3422 int src_nents, mapped_nents;
3423 int digestsize = crypto_ahash_digestsize(ahash);
3424 struct ahash_edesc *edesc;
3425 struct dpaa2_sg_entry *sg_table;
3426 int ret;
3427
3428 src_nents = sg_nents_for_len(req->src, req->nbytes);
3429 if (src_nents < 0) {
3430 dev_err(ctx->dev, "Invalid number of src SG.\n");
3431 return src_nents;
3432 }
3433
3434 if (src_nents) {
3435 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3436 DMA_TO_DEVICE);
3437 if (!mapped_nents) {
3438 dev_err(ctx->dev, "unable to DMA map source\n");
3439 return -ENOMEM;
3440 }
3441 } else {
3442 mapped_nents = 0;
3443 }
3444
3445 /* allocate space for base edesc and link tables */
3446 edesc = qi_cache_zalloc(GFP_DMA | flags);
3447 if (!edesc) {
3448 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3449 return -ENOMEM;
3450 }
3451
3452 edesc->src_nents = src_nents;
3453 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3454 qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
3455 sg_table = &edesc->sgt[0];
3456
3457 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3458 DMA_TO_DEVICE);
3459 if (ret)
3460 goto unmap_ctx;
3461
3462 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3463 if (ret)
3464 goto unmap_ctx;
3465
3466 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
3467
3468 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3469 DMA_TO_DEVICE);
3470 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3471 dev_err(ctx->dev, "unable to map S/G table\n");
3472 ret = -ENOMEM;
3473 goto unmap_ctx;
3474 }
3475 edesc->qm_sg_bytes = qm_sg_bytes;
3476
3477 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3478 DMA_FROM_DEVICE);
3479 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3480 dev_err(ctx->dev, "unable to map dst\n");
3481 edesc->dst_dma = 0;
3482 ret = -ENOMEM;
3483 goto unmap_ctx;
3484 }
3485
3486 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3487 dpaa2_fl_set_final(in_fle, true);
3488 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3489 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3490 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3491 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3492 dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3493 dpaa2_fl_set_len(out_fle, digestsize);
3494
3495 req_ctx->flc = &ctx->flc[FINALIZE];
3496 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3497 req_ctx->cbk = ahash_done_ctx_src;
3498 req_ctx->ctx = &req->base;
3499 req_ctx->edesc = edesc;
3500
3501 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3502 if (ret == -EINPROGRESS ||
3503 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3504 return ret;
3505
3506unmap_ctx:
3507 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
3508 qi_cache_free(edesc);
3509 return ret;
3510}
3511
3512static int ahash_digest(struct ahash_request *req)
3513{
3514 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3515 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3516 struct caam_hash_state *state = ahash_request_ctx(req);
3517 struct caam_request *req_ctx = &state->caam_req;
3518 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3519 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3520 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3521 GFP_KERNEL : GFP_ATOMIC;
3522 int digestsize = crypto_ahash_digestsize(ahash);
3523 int src_nents, mapped_nents;
3524 struct ahash_edesc *edesc;
3525 int ret = -ENOMEM;
3526
3527 state->buf_dma = 0;
3528
3529 src_nents = sg_nents_for_len(req->src, req->nbytes);
3530 if (src_nents < 0) {
3531 dev_err(ctx->dev, "Invalid number of src SG.\n");
3532 return src_nents;
3533 }
3534
3535 if (src_nents) {
3536 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3537 DMA_TO_DEVICE);
3538 if (!mapped_nents) {
3539 dev_err(ctx->dev, "unable to map source for DMA\n");
3540 return ret;
3541 }
3542 } else {
3543 mapped_nents = 0;
3544 }
3545
3546 /* allocate space for base edesc and link tables */
3547 edesc = qi_cache_zalloc(GFP_DMA | flags);
3548 if (!edesc) {
3549 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3550 return ret;
3551 }
3552
3553 edesc->src_nents = src_nents;
3554 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3555
3556 if (mapped_nents > 1) {
3557 int qm_sg_bytes;
3558 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3559
3560 qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3561 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3562 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3563 qm_sg_bytes, DMA_TO_DEVICE);
3564 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3565 dev_err(ctx->dev, "unable to map S/G table\n");
3566 goto unmap;
3567 }
3568 edesc->qm_sg_bytes = qm_sg_bytes;
3569 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3570 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3571 } else {
3572 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3573 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3574 }
3575
3576 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3577 DMA_FROM_DEVICE);
3578 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3579 dev_err(ctx->dev, "unable to map dst\n");
3580 edesc->dst_dma = 0;
3581 goto unmap;
3582 }
3583
3584 dpaa2_fl_set_final(in_fle, true);
3585 dpaa2_fl_set_len(in_fle, req->nbytes);
3586 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3587 dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3588 dpaa2_fl_set_len(out_fle, digestsize);
3589
3590 req_ctx->flc = &ctx->flc[DIGEST];
3591 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3592 req_ctx->cbk = ahash_done;
3593 req_ctx->ctx = &req->base;
3594 req_ctx->edesc = edesc;
3595 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3596 if (ret == -EINPROGRESS ||
3597 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3598 return ret;
3599
3600unmap:
3601 ahash_unmap(ctx->dev, edesc, req, digestsize);
3602 qi_cache_free(edesc);
3603 return ret;
3604}
3605
3606static int ahash_final_no_ctx(struct ahash_request *req)
3607{
3608 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3609 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3610 struct caam_hash_state *state = ahash_request_ctx(req);
3611 struct caam_request *req_ctx = &state->caam_req;
3612 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3613 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3614 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3615 GFP_KERNEL : GFP_ATOMIC;
3616 u8 *buf = current_buf(state);
3617 int buflen = *current_buflen(state);
3618 int digestsize = crypto_ahash_digestsize(ahash);
3619 struct ahash_edesc *edesc;
3620 int ret = -ENOMEM;
3621
3622 /* allocate space for base edesc and link tables */
3623 edesc = qi_cache_zalloc(GFP_DMA | flags);
3624 if (!edesc)
3625 return ret;
3626
3627 state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
3628 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3629 dev_err(ctx->dev, "unable to map src\n");
3630 goto unmap;
3631 }
3632
3633 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3634 DMA_FROM_DEVICE);
3635 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3636 dev_err(ctx->dev, "unable to map dst\n");
3637 edesc->dst_dma = 0;
3638 goto unmap;
3639 }
3640
3641 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3642 dpaa2_fl_set_final(in_fle, true);
3643 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3644 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3645 dpaa2_fl_set_len(in_fle, buflen);
3646 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3647 dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3648 dpaa2_fl_set_len(out_fle, digestsize);
3649
3650 req_ctx->flc = &ctx->flc[DIGEST];
3651 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3652 req_ctx->cbk = ahash_done;
3653 req_ctx->ctx = &req->base;
3654 req_ctx->edesc = edesc;
3655
3656 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3657 if (ret == -EINPROGRESS ||
3658 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3659 return ret;
3660
3661unmap:
3662 ahash_unmap(ctx->dev, edesc, req, digestsize);
3663 qi_cache_free(edesc);
3664 return ret;
3665}
3666
3667static int ahash_update_no_ctx(struct ahash_request *req)
3668{
3669 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3670 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3671 struct caam_hash_state *state = ahash_request_ctx(req);
3672 struct caam_request *req_ctx = &state->caam_req;
3673 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3674 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3675 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3676 GFP_KERNEL : GFP_ATOMIC;
3677 u8 *buf = current_buf(state);
3678 int *buflen = current_buflen(state);
3679 u8 *next_buf = alt_buf(state);
3680 int *next_buflen = alt_buflen(state);
3681 int in_len = *buflen + req->nbytes, to_hash;
3682 int qm_sg_bytes, src_nents, mapped_nents;
3683 struct ahash_edesc *edesc;
3684 int ret = 0;
3685
3686 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3687 to_hash = in_len - *next_buflen;
3688
3689 if (to_hash) {
3690 struct dpaa2_sg_entry *sg_table;
3691
3692 src_nents = sg_nents_for_len(req->src,
3693 req->nbytes - *next_buflen);
3694 if (src_nents < 0) {
3695 dev_err(ctx->dev, "Invalid number of src SG.\n");
3696 return src_nents;
3697 }
3698
3699 if (src_nents) {
3700 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3701 DMA_TO_DEVICE);
3702 if (!mapped_nents) {
3703 dev_err(ctx->dev, "unable to DMA map source\n");
3704 return -ENOMEM;
3705 }
3706 } else {
3707 mapped_nents = 0;
3708 }
3709
3710 /* allocate space for base edesc and link tables */
3711 edesc = qi_cache_zalloc(GFP_DMA | flags);
3712 if (!edesc) {
3713 dma_unmap_sg(ctx->dev, req->src, src_nents,
3714 DMA_TO_DEVICE);
3715 return -ENOMEM;
3716 }
3717
3718 edesc->src_nents = src_nents;
3719 qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
3720 sg_table = &edesc->sgt[0];
3721
3722 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3723 if (ret)
3724 goto unmap_ctx;
3725
3726 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3727
3728 if (*next_buflen)
3729 scatterwalk_map_and_copy(next_buf, req->src,
3730 to_hash - *buflen,
3731 *next_buflen, 0);
3732
3733 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3734 qm_sg_bytes, DMA_TO_DEVICE);
3735 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3736 dev_err(ctx->dev, "unable to map S/G table\n");
3737 ret = -ENOMEM;
3738 goto unmap_ctx;
3739 }
3740 edesc->qm_sg_bytes = qm_sg_bytes;
3741
3742 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3743 ctx->ctx_len, DMA_FROM_DEVICE);
3744 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3745 dev_err(ctx->dev, "unable to map ctx\n");
3746 state->ctx_dma = 0;
3747 ret = -ENOMEM;
3748 goto unmap_ctx;
3749 }
3750
3751 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3752 dpaa2_fl_set_final(in_fle, true);
3753 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3754 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3755 dpaa2_fl_set_len(in_fle, to_hash);
3756 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3757 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3758 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3759
3760 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3761 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3762 req_ctx->cbk = ahash_done_ctx_dst;
3763 req_ctx->ctx = &req->base;
3764 req_ctx->edesc = edesc;
3765
3766 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3767 if (ret != -EINPROGRESS &&
3768 !(ret == -EBUSY &&
3769 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3770 goto unmap_ctx;
3771
3772 state->update = ahash_update_ctx;
3773 state->finup = ahash_finup_ctx;
3774 state->final = ahash_final_ctx;
3775 } else if (*next_buflen) {
3776 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3777 req->nbytes, 0);
3778 *buflen = *next_buflen;
3779 *next_buflen = 0;
3780 }
3781
3782 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3783 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3784 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3785 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3786 1);
3787
3788 return ret;
3789unmap_ctx:
3790 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
3791 qi_cache_free(edesc);
3792 return ret;
3793}
3794
3795static int ahash_finup_no_ctx(struct ahash_request *req)
3796{
3797 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3798 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3799 struct caam_hash_state *state = ahash_request_ctx(req);
3800 struct caam_request *req_ctx = &state->caam_req;
3801 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3802 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3803 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3804 GFP_KERNEL : GFP_ATOMIC;
3805 int buflen = *current_buflen(state);
3806 int qm_sg_bytes, src_nents, mapped_nents;
3807 int digestsize = crypto_ahash_digestsize(ahash);
3808 struct ahash_edesc *edesc;
3809 struct dpaa2_sg_entry *sg_table;
3810 int ret;
3811
3812 src_nents = sg_nents_for_len(req->src, req->nbytes);
3813 if (src_nents < 0) {
3814 dev_err(ctx->dev, "Invalid number of src SG.\n");
3815 return src_nents;
3816 }
3817
3818 if (src_nents) {
3819 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3820 DMA_TO_DEVICE);
3821 if (!mapped_nents) {
3822 dev_err(ctx->dev, "unable to DMA map source\n");
3823 return -ENOMEM;
3824 }
3825 } else {
3826 mapped_nents = 0;
3827 }
3828
3829 /* allocate space for base edesc and link tables */
3830 edesc = qi_cache_zalloc(GFP_DMA | flags);
3831 if (!edesc) {
3832 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3833 return -ENOMEM;
3834 }
3835
3836 edesc->src_nents = src_nents;
3837 qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
3838 sg_table = &edesc->sgt[0];
3839
3840 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3841 if (ret)
3842 goto unmap;
3843
3844 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3845
3846 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3847 DMA_TO_DEVICE);
3848 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3849 dev_err(ctx->dev, "unable to map S/G table\n");
3850 ret = -ENOMEM;
3851 goto unmap;
3852 }
3853 edesc->qm_sg_bytes = qm_sg_bytes;
3854
3855 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3856 DMA_FROM_DEVICE);
3857 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3858 dev_err(ctx->dev, "unable to map dst\n");
3859 edesc->dst_dma = 0;
3860 ret = -ENOMEM;
3861 goto unmap;
3862 }
3863
3864 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3865 dpaa2_fl_set_final(in_fle, true);
3866 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3867 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3868 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
3869 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3870 dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3871 dpaa2_fl_set_len(out_fle, digestsize);
3872
3873 req_ctx->flc = &ctx->flc[DIGEST];
3874 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3875 req_ctx->cbk = ahash_done;
3876 req_ctx->ctx = &req->base;
3877 req_ctx->edesc = edesc;
3878 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3879 if (ret != -EINPROGRESS &&
3880 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3881 goto unmap;
3882
3883 return ret;
3884unmap:
3885 ahash_unmap(ctx->dev, edesc, req, digestsize);
3886 qi_cache_free(edesc);
3887 return -ENOMEM;
3888}
3889
3890static int ahash_update_first(struct ahash_request *req)
3891{
3892 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3893 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3894 struct caam_hash_state *state = ahash_request_ctx(req);
3895 struct caam_request *req_ctx = &state->caam_req;
3896 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3897 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3898 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3899 GFP_KERNEL : GFP_ATOMIC;
3900 u8 *next_buf = alt_buf(state);
3901 int *next_buflen = alt_buflen(state);
3902 int to_hash;
3903 int src_nents, mapped_nents;
3904 struct ahash_edesc *edesc;
3905 int ret = 0;
3906
3907 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
3908 1);
3909 to_hash = req->nbytes - *next_buflen;
3910
3911 if (to_hash) {
3912 struct dpaa2_sg_entry *sg_table;
3913
3914 src_nents = sg_nents_for_len(req->src,
3915 req->nbytes - (*next_buflen));
3916 if (src_nents < 0) {
3917 dev_err(ctx->dev, "Invalid number of src SG.\n");
3918 return src_nents;
3919 }
3920
3921 if (src_nents) {
3922 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3923 DMA_TO_DEVICE);
3924 if (!mapped_nents) {
3925 dev_err(ctx->dev, "unable to map source for DMA\n");
3926 return -ENOMEM;
3927 }
3928 } else {
3929 mapped_nents = 0;
3930 }
3931
3932 /* allocate space for base edesc and link tables */
3933 edesc = qi_cache_zalloc(GFP_DMA | flags);
3934 if (!edesc) {
3935 dma_unmap_sg(ctx->dev, req->src, src_nents,
3936 DMA_TO_DEVICE);
3937 return -ENOMEM;
3938 }
3939
3940 edesc->src_nents = src_nents;
3941 sg_table = &edesc->sgt[0];
3942
3943 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3944 dpaa2_fl_set_final(in_fle, true);
3945 dpaa2_fl_set_len(in_fle, to_hash);
3946
3947 if (mapped_nents > 1) {
3948 int qm_sg_bytes;
3949
3950 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3951 qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3952 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3953 qm_sg_bytes,
3954 DMA_TO_DEVICE);
3955 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3956 dev_err(ctx->dev, "unable to map S/G table\n");
3957 ret = -ENOMEM;
3958 goto unmap_ctx;
3959 }
3960 edesc->qm_sg_bytes = qm_sg_bytes;
3961 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3962 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3963 } else {
3964 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3965 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3966 }
3967
3968 if (*next_buflen)
3969 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
3970 *next_buflen, 0);
3971
3972 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3973 ctx->ctx_len, DMA_FROM_DEVICE);
3974 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3975 dev_err(ctx->dev, "unable to map ctx\n");
3976 state->ctx_dma = 0;
3977 ret = -ENOMEM;
3978 goto unmap_ctx;
3979 }
3980
3981 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3982 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3983 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3984
3985 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3986 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3987 req_ctx->cbk = ahash_done_ctx_dst;
3988 req_ctx->ctx = &req->base;
3989 req_ctx->edesc = edesc;
3990
3991 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3992 if (ret != -EINPROGRESS &&
3993 !(ret == -EBUSY && req->base.flags &
3994 CRYPTO_TFM_REQ_MAY_BACKLOG))
3995 goto unmap_ctx;
3996
3997 state->update = ahash_update_ctx;
3998 state->finup = ahash_finup_ctx;
3999 state->final = ahash_final_ctx;
4000 } else if (*next_buflen) {
4001 state->update = ahash_update_no_ctx;
4002 state->finup = ahash_finup_no_ctx;
4003 state->final = ahash_final_no_ctx;
4004 scatterwalk_map_and_copy(next_buf, req->src, 0,
4005 req->nbytes, 0);
4006 switch_buf(state);
4007 }
4008
4009 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4010 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4011 1);
4012
4013 return ret;
4014unmap_ctx:
4015 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
4016 qi_cache_free(edesc);
4017 return ret;
4018}
4019
4020static int ahash_finup_first(struct ahash_request *req)
4021{
4022 return ahash_digest(req);
4023}
4024
4025static int ahash_init(struct ahash_request *req)
4026{
4027 struct caam_hash_state *state = ahash_request_ctx(req);
4028
4029 state->update = ahash_update_first;
4030 state->finup = ahash_finup_first;
4031 state->final = ahash_final_no_ctx;
4032
4033 state->ctx_dma = 0;
4034 state->current_buf = 0;
4035 state->buf_dma = 0;
4036 state->buflen_0 = 0;
4037 state->buflen_1 = 0;
4038
4039 return 0;
4040}
4041
4042static int ahash_update(struct ahash_request *req)
4043{
4044 struct caam_hash_state *state = ahash_request_ctx(req);
4045
4046 return state->update(req);
4047}
4048
4049static int ahash_finup(struct ahash_request *req)
4050{
4051 struct caam_hash_state *state = ahash_request_ctx(req);
4052
4053 return state->finup(req);
4054}
4055
4056static int ahash_final(struct ahash_request *req)
4057{
4058 struct caam_hash_state *state = ahash_request_ctx(req);
4059
4060 return state->final(req);
4061}
4062
4063static int ahash_export(struct ahash_request *req, void *out)
4064{
4065 struct caam_hash_state *state = ahash_request_ctx(req);
4066 struct caam_export_state *export = out;
4067 int len;
4068 u8 *buf;
4069
4070 if (state->current_buf) {
4071 buf = state->buf_1;
4072 len = state->buflen_1;
4073 } else {
4074 buf = state->buf_0;
4075 len = state->buflen_0;
4076 }
4077
4078 memcpy(export->buf, buf, len);
4079 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4080 export->buflen = len;
4081 export->update = state->update;
4082 export->final = state->final;
4083 export->finup = state->finup;
4084
4085 return 0;
4086}
4087
4088static int ahash_import(struct ahash_request *req, const void *in)
4089{
4090 struct caam_hash_state *state = ahash_request_ctx(req);
4091 const struct caam_export_state *export = in;
4092
4093 memset(state, 0, sizeof(*state));
4094 memcpy(state->buf_0, export->buf, export->buflen);
4095 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4096 state->buflen_0 = export->buflen;
4097 state->update = export->update;
4098 state->final = export->final;
4099 state->finup = export->finup;
4100
4101 return 0;
4102}
4103
4104struct caam_hash_template {
4105 char name[CRYPTO_MAX_ALG_NAME];
4106 char driver_name[CRYPTO_MAX_ALG_NAME];
4107 char hmac_name[CRYPTO_MAX_ALG_NAME];
4108 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4109 unsigned int blocksize;
4110 struct ahash_alg template_ahash;
4111 u32 alg_type;
4112};
4113
4114/* ahash descriptors */
4115static struct caam_hash_template driver_hash[] = {
4116 {
4117 .name = "sha1",
4118 .driver_name = "sha1-caam-qi2",
4119 .hmac_name = "hmac(sha1)",
4120 .hmac_driver_name = "hmac-sha1-caam-qi2",
4121 .blocksize = SHA1_BLOCK_SIZE,
4122 .template_ahash = {
4123 .init = ahash_init,
4124 .update = ahash_update,
4125 .final = ahash_final,
4126 .finup = ahash_finup,
4127 .digest = ahash_digest,
4128 .export = ahash_export,
4129 .import = ahash_import,
4130 .setkey = ahash_setkey,
4131 .halg = {
4132 .digestsize = SHA1_DIGEST_SIZE,
4133 .statesize = sizeof(struct caam_export_state),
4134 },
4135 },
4136 .alg_type = OP_ALG_ALGSEL_SHA1,
4137 }, {
4138 .name = "sha224",
4139 .driver_name = "sha224-caam-qi2",
4140 .hmac_name = "hmac(sha224)",
4141 .hmac_driver_name = "hmac-sha224-caam-qi2",
4142 .blocksize = SHA224_BLOCK_SIZE,
4143 .template_ahash = {
4144 .init = ahash_init,
4145 .update = ahash_update,
4146 .final = ahash_final,
4147 .finup = ahash_finup,
4148 .digest = ahash_digest,
4149 .export = ahash_export,
4150 .import = ahash_import,
4151 .setkey = ahash_setkey,
4152 .halg = {
4153 .digestsize = SHA224_DIGEST_SIZE,
4154 .statesize = sizeof(struct caam_export_state),
4155 },
4156 },
4157 .alg_type = OP_ALG_ALGSEL_SHA224,
4158 }, {
4159 .name = "sha256",
4160 .driver_name = "sha256-caam-qi2",
4161 .hmac_name = "hmac(sha256)",
4162 .hmac_driver_name = "hmac-sha256-caam-qi2",
4163 .blocksize = SHA256_BLOCK_SIZE,
4164 .template_ahash = {
4165 .init = ahash_init,
4166 .update = ahash_update,
4167 .final = ahash_final,
4168 .finup = ahash_finup,
4169 .digest = ahash_digest,
4170 .export = ahash_export,
4171 .import = ahash_import,
4172 .setkey = ahash_setkey,
4173 .halg = {
4174 .digestsize = SHA256_DIGEST_SIZE,
4175 .statesize = sizeof(struct caam_export_state),
4176 },
4177 },
4178 .alg_type = OP_ALG_ALGSEL_SHA256,
4179 }, {
4180 .name = "sha384",
4181 .driver_name = "sha384-caam-qi2",
4182 .hmac_name = "hmac(sha384)",
4183 .hmac_driver_name = "hmac-sha384-caam-qi2",
4184 .blocksize = SHA384_BLOCK_SIZE,
4185 .template_ahash = {
4186 .init = ahash_init,
4187 .update = ahash_update,
4188 .final = ahash_final,
4189 .finup = ahash_finup,
4190 .digest = ahash_digest,
4191 .export = ahash_export,
4192 .import = ahash_import,
4193 .setkey = ahash_setkey,
4194 .halg = {
4195 .digestsize = SHA384_DIGEST_SIZE,
4196 .statesize = sizeof(struct caam_export_state),
4197 },
4198 },
4199 .alg_type = OP_ALG_ALGSEL_SHA384,
4200 }, {
4201 .name = "sha512",
4202 .driver_name = "sha512-caam-qi2",
4203 .hmac_name = "hmac(sha512)",
4204 .hmac_driver_name = "hmac-sha512-caam-qi2",
4205 .blocksize = SHA512_BLOCK_SIZE,
4206 .template_ahash = {
4207 .init = ahash_init,
4208 .update = ahash_update,
4209 .final = ahash_final,
4210 .finup = ahash_finup,
4211 .digest = ahash_digest,
4212 .export = ahash_export,
4213 .import = ahash_import,
4214 .setkey = ahash_setkey,
4215 .halg = {
4216 .digestsize = SHA512_DIGEST_SIZE,
4217 .statesize = sizeof(struct caam_export_state),
4218 },
4219 },
4220 .alg_type = OP_ALG_ALGSEL_SHA512,
4221 }, {
4222 .name = "md5",
4223 .driver_name = "md5-caam-qi2",
4224 .hmac_name = "hmac(md5)",
4225 .hmac_driver_name = "hmac-md5-caam-qi2",
4226 .blocksize = MD5_BLOCK_WORDS * 4,
4227 .template_ahash = {
4228 .init = ahash_init,
4229 .update = ahash_update,
4230 .final = ahash_final,
4231 .finup = ahash_finup,
4232 .digest = ahash_digest,
4233 .export = ahash_export,
4234 .import = ahash_import,
4235 .setkey = ahash_setkey,
4236 .halg = {
4237 .digestsize = MD5_DIGEST_SIZE,
4238 .statesize = sizeof(struct caam_export_state),
4239 },
4240 },
4241 .alg_type = OP_ALG_ALGSEL_MD5,
4242 }
4243};
4244
4245struct caam_hash_alg {
4246 struct list_head entry;
4247 struct device *dev;
4248 int alg_type;
4249 struct ahash_alg ahash_alg;
4250};
4251
4252static int caam_hash_cra_init(struct crypto_tfm *tfm)
4253{
4254 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4255 struct crypto_alg *base = tfm->__crt_alg;
4256 struct hash_alg_common *halg =
4257 container_of(base, struct hash_alg_common, base);
4258 struct ahash_alg *alg =
4259 container_of(halg, struct ahash_alg, halg);
4260 struct caam_hash_alg *caam_hash =
4261 container_of(alg, struct caam_hash_alg, ahash_alg);
4262 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4263 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4264 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4265 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4266 HASH_MSG_LEN + 32,
4267 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4268 HASH_MSG_LEN + 64,
4269 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4270 dma_addr_t dma_addr;
4271 int i;
4272
4273 ctx->dev = caam_hash->dev;
4274
4275 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4276 DMA_BIDIRECTIONAL,
4277 DMA_ATTR_SKIP_CPU_SYNC);
4278 if (dma_mapping_error(ctx->dev, dma_addr)) {
4279 dev_err(ctx->dev, "unable to map shared descriptors\n");
4280 return -ENOMEM;
4281 }
4282
4283 for (i = 0; i < HASH_NUM_OP; i++)
4284 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4285
4286 /* copy descriptor header template value */
4287 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4288
4289 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4290 OP_ALG_ALGSEL_SUBMASK) >>
4291 OP_ALG_ALGSEL_SHIFT];
4292
4293 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4294 sizeof(struct caam_hash_state));
4295
4296 return ahash_set_sh_desc(ahash);
4297}
4298
4299static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4300{
4301 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4302
4303 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4304 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4305}
4306
4307static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4308 struct caam_hash_template *template, bool keyed)
4309{
4310 struct caam_hash_alg *t_alg;
4311 struct ahash_alg *halg;
4312 struct crypto_alg *alg;
4313
4314 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4315 if (!t_alg)
4316 return ERR_PTR(-ENOMEM);
4317
4318 t_alg->ahash_alg = template->template_ahash;
4319 halg = &t_alg->ahash_alg;
4320 alg = &halg->halg.base;
4321
4322 if (keyed) {
4323 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4324 template->hmac_name);
4325 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4326 template->hmac_driver_name);
4327 } else {
4328 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4329 template->name);
4330 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4331 template->driver_name);
4332 t_alg->ahash_alg.setkey = NULL;
4333 }
4334 alg->cra_module = THIS_MODULE;
4335 alg->cra_init = caam_hash_cra_init;
4336 alg->cra_exit = caam_hash_cra_exit;
4337 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4338 alg->cra_priority = CAAM_CRA_PRIORITY;
4339 alg->cra_blocksize = template->blocksize;
4340 alg->cra_alignmask = 0;
4341 alg->cra_flags = CRYPTO_ALG_ASYNC;
4342
4343 t_alg->alg_type = template->alg_type;
4344 t_alg->dev = dev;
4345
4346 return t_alg;
4347}
4348
4349static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4350{
4351 struct dpaa2_caam_priv_per_cpu *ppriv;
4352
4353 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4354 napi_schedule_irqoff(&ppriv->napi);
4355}
4356
4357static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4358{
4359 struct device *dev = priv->dev;
4360 struct dpaa2_io_notification_ctx *nctx;
4361 struct dpaa2_caam_priv_per_cpu *ppriv;
4362 int err, i = 0, cpu;
4363
4364 for_each_online_cpu(cpu) {
4365 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4366 ppriv->priv = priv;
4367 nctx = &ppriv->nctx;
4368 nctx->is_cdan = 0;
4369 nctx->id = ppriv->rsp_fqid;
4370 nctx->desired_cpu = cpu;
4371 nctx->cb = dpaa2_caam_fqdan_cb;
4372
4373 /* Register notification callbacks */
4374 err = dpaa2_io_service_register(NULL, nctx);
4375 if (unlikely(err)) {
4376 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4377 nctx->cb = NULL;
4378 /*
4379 * If no affine DPIO for this core, there's probably
4380 * none available for next cores either. Signal we want
4381 * to retry later, in case the DPIO devices weren't
4382 * probed yet.
4383 */
4384 err = -EPROBE_DEFER;
4385 goto err;
4386 }
4387
4388 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4389 dev);
4390 if (unlikely(!ppriv->store)) {
4391 dev_err(dev, "dpaa2_io_store_create() failed\n");
4392 err = -ENOMEM;
4393 goto err;
4394 }
4395
4396 if (++i == priv->num_pairs)
4397 break;
4398 }
4399
4400 return 0;
4401
4402err:
4403 for_each_online_cpu(cpu) {
4404 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4405 if (!ppriv->nctx.cb)
4406 break;
4407 dpaa2_io_service_deregister(NULL, &ppriv->nctx);
4408 }
4409
4410 for_each_online_cpu(cpu) {
4411 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4412 if (!ppriv->store)
4413 break;
4414 dpaa2_io_store_destroy(ppriv->store);
4415 }
4416
4417 return err;
4418}
4419
4420static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4421{
4422 struct dpaa2_caam_priv_per_cpu *ppriv;
4423 int i = 0, cpu;
4424
4425 for_each_online_cpu(cpu) {
4426 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4427 dpaa2_io_service_deregister(NULL, &ppriv->nctx);
4428 dpaa2_io_store_destroy(ppriv->store);
4429
4430 if (++i == priv->num_pairs)
4431 return;
4432 }
4433}
4434
4435static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4436{
4437 struct dpseci_rx_queue_cfg rx_queue_cfg;
4438 struct device *dev = priv->dev;
4439 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4440 struct dpaa2_caam_priv_per_cpu *ppriv;
4441 int err = 0, i = 0, cpu;
4442
4443 /* Configure Rx queues */
4444 for_each_online_cpu(cpu) {
4445 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4446
4447 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4448 DPSECI_QUEUE_OPT_USER_CTX;
4449 rx_queue_cfg.order_preservation_en = 0;
4450 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4451 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4452 /*
4453 * Rx priority (WQ) doesn't really matter, since we use
4454 * pull mode, i.e. volatile dequeues from specific FQs
4455 */
4456 rx_queue_cfg.dest_cfg.priority = 0;
4457 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4458
4459 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4460 &rx_queue_cfg);
4461 if (err) {
4462 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4463 err);
4464 return err;
4465 }
4466
4467 if (++i == priv->num_pairs)
4468 break;
4469 }
4470
4471 return err;
4472}
4473
4474static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4475{
4476 struct device *dev = priv->dev;
4477
4478 if (!priv->cscn_mem)
4479 return;
4480
4481 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4482 kfree(priv->cscn_mem);
4483}
4484
4485static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4486{
4487 struct device *dev = priv->dev;
4488 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4489
4490 dpaa2_dpseci_congestion_free(priv);
4491 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4492}
4493
4494static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4495 const struct dpaa2_fd *fd)
4496{
4497 struct caam_request *req;
4498 u32 fd_err;
4499
4500 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4501 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4502 return;
4503 }
4504
4505 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4506 if (unlikely(fd_err))
4507 dev_err(priv->dev, "FD error: %08x\n", fd_err);
4508
4509 /*
4510 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4511 * in FD[ERR] or FD[FRC].
4512 */
4513 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4514 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4515 DMA_BIDIRECTIONAL);
4516 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4517}
4518
4519static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4520{
4521 int err;
4522
4523 /* Retry while portal is busy */
4524 do {
4525 err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
4526 ppriv->store);
4527 } while (err == -EBUSY);
4528
4529 if (unlikely(err))
4530 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4531
4532 return err;
4533}
4534
4535static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4536{
4537 struct dpaa2_dq *dq;
4538 int cleaned = 0, is_last;
4539
4540 do {
4541 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4542 if (unlikely(!dq)) {
4543 if (unlikely(!is_last)) {
4544 dev_dbg(ppriv->priv->dev,
4545 "FQ %d returned no valid frames\n",
4546 ppriv->rsp_fqid);
4547 /*
4548 * MUST retry until we get some sort of
4549 * valid response token (be it "empty dequeue"
4550 * or a valid frame).
4551 */
4552 continue;
4553 }
4554 break;
4555 }
4556
4557 /* Process FD */
4558 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4559 cleaned++;
4560 } while (!is_last);
4561
4562 return cleaned;
4563}
4564
4565static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4566{
4567 struct dpaa2_caam_priv_per_cpu *ppriv;
4568 struct dpaa2_caam_priv *priv;
4569 int err, cleaned = 0, store_cleaned;
4570
4571 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4572 priv = ppriv->priv;
4573
4574 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4575 return 0;
4576
4577 do {
4578 store_cleaned = dpaa2_caam_store_consume(ppriv);
4579 cleaned += store_cleaned;
4580
4581 if (store_cleaned == 0 ||
4582 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4583 break;
4584
4585 /* Try to dequeue some more */
4586 err = dpaa2_caam_pull_fq(ppriv);
4587 if (unlikely(err))
4588 break;
4589 } while (1);
4590
4591 if (cleaned < budget) {
4592 napi_complete_done(napi, cleaned);
4593 err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
4594 if (unlikely(err))
4595 dev_err(priv->dev, "Notification rearm failed: %d\n",
4596 err);
4597 }
4598
4599 return cleaned;
4600}
4601
4602static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4603 u16 token)
4604{
4605 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4606 struct device *dev = priv->dev;
4607 int err;
4608
4609 /*
4610 * Congestion group feature supported starting with DPSECI API v5.1
4611 * and only when object has been created with this capability.
4612 */
4613 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4614 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4615 return 0;
4616
4617 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4618 GFP_KERNEL | GFP_DMA);
4619 if (!priv->cscn_mem)
4620 return -ENOMEM;
4621
4622 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4623 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4624 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4625 if (dma_mapping_error(dev, priv->cscn_dma)) {
4626 dev_err(dev, "Error mapping CSCN memory area\n");
4627 err = -ENOMEM;
4628 goto err_dma_map;
4629 }
4630
4631 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4632 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4633 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4634 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4635 cong_notif_cfg.message_iova = priv->cscn_dma;
4636 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4637 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4638 DPSECI_CGN_MODE_COHERENT_WRITE;
4639
4640 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4641 &cong_notif_cfg);
4642 if (err) {
4643 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4644 goto err_set_cong;
4645 }
4646
4647 return 0;
4648
4649err_set_cong:
4650 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4651err_dma_map:
4652 kfree(priv->cscn_mem);
4653
4654 return err;
4655}
4656
4657static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4658{
4659 struct device *dev = &ls_dev->dev;
4660 struct dpaa2_caam_priv *priv;
4661 struct dpaa2_caam_priv_per_cpu *ppriv;
4662 int err, cpu;
4663 u8 i;
4664
4665 priv = dev_get_drvdata(dev);
4666
4667 priv->dev = dev;
4668 priv->dpsec_id = ls_dev->obj_desc.id;
4669
4670 /* Get a handle for the DPSECI this interface is associate with */
4671 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4672 if (err) {
4673 dev_err(dev, "dpseci_open() failed: %d\n", err);
4674 goto err_open;
4675 }
4676
4677 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4678 &priv->minor_ver);
4679 if (err) {
4680 dev_err(dev, "dpseci_get_api_version() failed\n");
4681 goto err_get_vers;
4682 }
4683
4684 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4685
4686 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4687 &priv->dpseci_attr);
4688 if (err) {
4689 dev_err(dev, "dpseci_get_attributes() failed\n");
4690 goto err_get_vers;
4691 }
4692
4693 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4694 &priv->sec_attr);
4695 if (err) {
4696 dev_err(dev, "dpseci_get_sec_attr() failed\n");
4697 goto err_get_vers;
4698 }
4699
4700 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4701 if (err) {
4702 dev_err(dev, "setup_congestion() failed\n");
4703 goto err_get_vers;
4704 }
4705
4706 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4707 priv->dpseci_attr.num_tx_queues);
4708 if (priv->num_pairs > num_online_cpus()) {
4709 dev_warn(dev, "%d queues won't be used\n",
4710 priv->num_pairs - num_online_cpus());
4711 priv->num_pairs = num_online_cpus();
4712 }
4713
4714 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4715 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4716 &priv->rx_queue_attr[i]);
4717 if (err) {
4718 dev_err(dev, "dpseci_get_rx_queue() failed\n");
4719 goto err_get_rx_queue;
4720 }
4721 }
4722
4723 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4724 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4725 &priv->tx_queue_attr[i]);
4726 if (err) {
4727 dev_err(dev, "dpseci_get_tx_queue() failed\n");
4728 goto err_get_rx_queue;
4729 }
4730 }
4731
4732 i = 0;
4733 for_each_online_cpu(cpu) {
4734 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
4735 priv->rx_queue_attr[i].fqid,
4736 priv->tx_queue_attr[i].fqid);
4737
4738 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4739 ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
4740 ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
4741 ppriv->prio = i;
4742
4743 ppriv->net_dev.dev = *dev;
4744 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4745 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4746 DPAA2_CAAM_NAPI_WEIGHT);
4747 if (++i == priv->num_pairs)
4748 break;
4749 }
4750
4751 return 0;
4752
4753err_get_rx_queue:
4754 dpaa2_dpseci_congestion_free(priv);
4755err_get_vers:
4756 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4757err_open:
4758 return err;
4759}
4760
4761static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4762{
4763 struct device *dev = priv->dev;
4764 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4765 struct dpaa2_caam_priv_per_cpu *ppriv;
4766 int i;
4767
4768 for (i = 0; i < priv->num_pairs; i++) {
4769 ppriv = per_cpu_ptr(priv->ppriv, i);
4770 napi_enable(&ppriv->napi);
4771 }
4772
4773 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4774}
4775
4776static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4777{
4778 struct device *dev = priv->dev;
4779 struct dpaa2_caam_priv_per_cpu *ppriv;
4780 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4781 int i, err = 0, enabled;
4782
4783 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
4784 if (err) {
4785 dev_err(dev, "dpseci_disable() failed\n");
4786 return err;
4787 }
4788
4789 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
4790 if (err) {
4791 dev_err(dev, "dpseci_is_enabled() failed\n");
4792 return err;
4793 }
4794
4795 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
4796
4797 for (i = 0; i < priv->num_pairs; i++) {
4798 ppriv = per_cpu_ptr(priv->ppriv, i);
4799 napi_disable(&ppriv->napi);
4800 netif_napi_del(&ppriv->napi);
4801 }
4802
4803 return 0;
4804}
4805
4806static struct list_head hash_list;
4807
4808static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
4809{
4810 struct device *dev;
4811 struct dpaa2_caam_priv *priv;
4812 int i, err = 0;
4813 bool registered = false;
4814
4815 /*
4816 * There is no way to get CAAM endianness - there is no direct register
4817 * space access and MC f/w does not provide this attribute.
4818 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
4819 * property.
4820 */
4821 caam_little_end = true;
4822
4823 caam_imx = false;
4824
4825 dev = &dpseci_dev->dev;
4826
4827 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
4828 if (!priv)
4829 return -ENOMEM;
4830
4831 dev_set_drvdata(dev, priv);
4832
4833 priv->domain = iommu_get_domain_for_dev(dev);
4834
4835 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
4836 0, SLAB_CACHE_DMA, NULL);
4837 if (!qi_cache) {
4838 dev_err(dev, "Can't allocate SEC cache\n");
4839 return -ENOMEM;
4840 }
4841
4842 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
4843 if (err) {
4844 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
4845 goto err_dma_mask;
4846 }
4847
4848 /* Obtain a MC portal */
4849 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
4850 if (err) {
4851 if (err == -ENXIO)
4852 err = -EPROBE_DEFER;
4853 else
4854 dev_err(dev, "MC portal allocation failed\n");
4855
4856 goto err_dma_mask;
4857 }
4858
4859 priv->ppriv = alloc_percpu(*priv->ppriv);
4860 if (!priv->ppriv) {
4861 dev_err(dev, "alloc_percpu() failed\n");
4862 err = -ENOMEM;
4863 goto err_alloc_ppriv;
4864 }
4865
4866 /* DPSECI initialization */
4867 err = dpaa2_dpseci_setup(dpseci_dev);
4868 if (err) {
4869 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
4870 goto err_dpseci_setup;
4871 }
4872
4873 /* DPIO */
4874 err = dpaa2_dpseci_dpio_setup(priv);
4875 if (err) {
4876 if (err != -EPROBE_DEFER)
4877 dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
4878 goto err_dpio_setup;
4879 }
4880
4881 /* DPSECI binding to DPIO */
4882 err = dpaa2_dpseci_bind(priv);
4883 if (err) {
4884 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
4885 goto err_bind;
4886 }
4887
4888 /* DPSECI enable */
4889 err = dpaa2_dpseci_enable(priv);
4890 if (err) {
4891 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
4892 goto err_bind;
4893 }
4894
4895 /* register crypto algorithms the device supports */
4896 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4897 struct caam_skcipher_alg *t_alg = driver_algs + i;
4898 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
4899
4900 /* Skip DES algorithms if not supported by device */
4901 if (!priv->sec_attr.des_acc_num &&
4902 (alg_sel == OP_ALG_ALGSEL_3DES ||
4903 alg_sel == OP_ALG_ALGSEL_DES))
4904 continue;
4905
4906 /* Skip AES algorithms if not supported by device */
4907 if (!priv->sec_attr.aes_acc_num &&
4908 alg_sel == OP_ALG_ALGSEL_AES)
4909 continue;
4910
4911 t_alg->caam.dev = dev;
4912 caam_skcipher_alg_init(t_alg);
4913
4914 err = crypto_register_skcipher(&t_alg->skcipher);
4915 if (err) {
4916 dev_warn(dev, "%s alg registration failed: %d\n",
4917 t_alg->skcipher.base.cra_driver_name, err);
4918 continue;
4919 }
4920
4921 t_alg->registered = true;
4922 registered = true;
4923 }
4924
4925 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4926 struct caam_aead_alg *t_alg = driver_aeads + i;
4927 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4928 OP_ALG_ALGSEL_MASK;
4929 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4930 OP_ALG_ALGSEL_MASK;
4931
4932 /* Skip DES algorithms if not supported by device */
4933 if (!priv->sec_attr.des_acc_num &&
4934 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
4935 c1_alg_sel == OP_ALG_ALGSEL_DES))
4936 continue;
4937
4938 /* Skip AES algorithms if not supported by device */
4939 if (!priv->sec_attr.aes_acc_num &&
4940 c1_alg_sel == OP_ALG_ALGSEL_AES)
4941 continue;
4942
4943 /*
4944 * Skip algorithms requiring message digests
4945 * if MD not supported by device.
4946 */
4947 if (!priv->sec_attr.md_acc_num && c2_alg_sel)
4948 continue;
4949
4950 t_alg->caam.dev = dev;
4951 caam_aead_alg_init(t_alg);
4952
4953 err = crypto_register_aead(&t_alg->aead);
4954 if (err) {
4955 dev_warn(dev, "%s alg registration failed: %d\n",
4956 t_alg->aead.base.cra_driver_name, err);
4957 continue;
4958 }
4959
4960 t_alg->registered = true;
4961 registered = true;
4962 }
4963 if (registered)
4964 dev_info(dev, "algorithms registered in /proc/crypto\n");
4965
4966 /* register hash algorithms the device supports */
4967 INIT_LIST_HEAD(&hash_list);
4968
4969 /*
4970 * Skip registration of any hashing algorithms if MD block
4971 * is not present.
4972 */
4973 if (!priv->sec_attr.md_acc_num)
4974 return 0;
4975
4976 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
4977 struct caam_hash_alg *t_alg;
4978 struct caam_hash_template *alg = driver_hash + i;
4979
4980 /* register hmac version */
4981 t_alg = caam_hash_alloc(dev, alg, true);
4982 if (IS_ERR(t_alg)) {
4983 err = PTR_ERR(t_alg);
4984 dev_warn(dev, "%s hash alg allocation failed: %d\n",
4985 alg->driver_name, err);
4986 continue;
4987 }
4988
4989 err = crypto_register_ahash(&t_alg->ahash_alg);
4990 if (err) {
4991 dev_warn(dev, "%s alg registration failed: %d\n",
4992 t_alg->ahash_alg.halg.base.cra_driver_name,
4993 err);
4994 kfree(t_alg);
4995 } else {
4996 list_add_tail(&t_alg->entry, &hash_list);
4997 }
4998
4999 /* register unkeyed version */
5000 t_alg = caam_hash_alloc(dev, alg, false);
5001 if (IS_ERR(t_alg)) {
5002 err = PTR_ERR(t_alg);
5003 dev_warn(dev, "%s alg allocation failed: %d\n",
5004 alg->driver_name, err);
5005 continue;
5006 }
5007
5008 err = crypto_register_ahash(&t_alg->ahash_alg);
5009 if (err) {
5010 dev_warn(dev, "%s alg registration failed: %d\n",
5011 t_alg->ahash_alg.halg.base.cra_driver_name,
5012 err);
5013 kfree(t_alg);
5014 } else {
5015 list_add_tail(&t_alg->entry, &hash_list);
5016 }
5017 }
5018 if (!list_empty(&hash_list))
5019 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5020
5021 return err;
5022
5023err_bind:
5024 dpaa2_dpseci_dpio_free(priv);
5025err_dpio_setup:
5026 dpaa2_dpseci_free(priv);
5027err_dpseci_setup:
5028 free_percpu(priv->ppriv);
5029err_alloc_ppriv:
5030 fsl_mc_portal_free(priv->mc_io);
5031err_dma_mask:
5032 kmem_cache_destroy(qi_cache);
5033
5034 return err;
5035}
5036
5037static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5038{
5039 struct device *dev;
5040 struct dpaa2_caam_priv *priv;
5041 int i;
5042
5043 dev = &ls_dev->dev;
5044 priv = dev_get_drvdata(dev);
5045
5046 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5047 struct caam_aead_alg *t_alg = driver_aeads + i;
5048
5049 if (t_alg->registered)
5050 crypto_unregister_aead(&t_alg->aead);
5051 }
5052
5053 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5054 struct caam_skcipher_alg *t_alg = driver_algs + i;
5055
5056 if (t_alg->registered)
5057 crypto_unregister_skcipher(&t_alg->skcipher);
5058 }
5059
5060 if (hash_list.next) {
5061 struct caam_hash_alg *t_hash_alg, *p;
5062
5063 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5064 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5065 list_del(&t_hash_alg->entry);
5066 kfree(t_hash_alg);
5067 }
5068 }
5069
5070 dpaa2_dpseci_disable(priv);
5071 dpaa2_dpseci_dpio_free(priv);
5072 dpaa2_dpseci_free(priv);
5073 free_percpu(priv->ppriv);
5074 fsl_mc_portal_free(priv->mc_io);
5075 kmem_cache_destroy(qi_cache);
5076
5077 return 0;
5078}
5079
5080int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5081{
5082 struct dpaa2_fd fd;
5083 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5084 int err = 0, i, id;
5085
5086 if (IS_ERR(req))
5087 return PTR_ERR(req);
5088
5089 if (priv->cscn_mem) {
5090 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5091 DPAA2_CSCN_SIZE,
5092 DMA_FROM_DEVICE);
5093 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5094 dev_dbg_ratelimited(dev, "Dropping request\n");
5095 return -EBUSY;
5096 }
5097 }
5098
5099 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5100
5101 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5102 DMA_BIDIRECTIONAL);
5103 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5104 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5105 goto err_out;
5106 }
5107
5108 memset(&fd, 0, sizeof(fd));
5109 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5110 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5111 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5112 dpaa2_fd_set_flc(&fd, req->flc_dma);
5113
5114 /*
5115 * There is no guarantee that preemption is disabled here,
5116 * thus take action.
5117 */
5118 preempt_disable();
5119 id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
5120 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5121 err = dpaa2_io_service_enqueue_fq(NULL,
5122 priv->tx_queue_attr[id].fqid,
5123 &fd);
5124 if (err != -EBUSY)
5125 break;
5126 }
5127 preempt_enable();
5128
5129 if (unlikely(err)) {
5130 dev_err(dev, "Error enqueuing frame: %d\n", err);
5131 goto err_out;
5132 }
5133
5134 return -EINPROGRESS;
5135
5136err_out:
5137 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5138 DMA_BIDIRECTIONAL);
5139 return -EIO;
5140}
5141EXPORT_SYMBOL(dpaa2_caam_enqueue);
5142
5143static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5144 {
5145 .vendor = FSL_MC_VENDOR_FREESCALE,
5146 .obj_type = "dpseci",
5147 },
5148 { .vendor = 0x0 }
5149};
5150
5151static struct fsl_mc_driver dpaa2_caam_driver = {
5152 .driver = {
5153 .name = KBUILD_MODNAME,
5154 .owner = THIS_MODULE,
5155 },
5156 .probe = dpaa2_caam_probe,
5157 .remove = dpaa2_caam_remove,
5158 .match_id_table = dpaa2_caam_match_id_table
5159};
5160
5161MODULE_LICENSE("Dual BSD/GPL");
5162MODULE_AUTHOR("Freescale Semiconductor, Inc");
5163MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5164
5165module_fsl_mc_driver(dpaa2_caam_driver);
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
new file mode 100644
index 000000000000..9823bdefd029
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -0,0 +1,223 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP
5 */
6
7#ifndef _CAAMALG_QI2_H_
8#define _CAAMALG_QI2_H_
9
10#include <soc/fsl/dpaa2-io.h>
11#include <soc/fsl/dpaa2-fd.h>
12#include <linux/threads.h>
13#include "dpseci.h"
14#include "desc_constr.h"
15
16#define DPAA2_CAAM_STORE_SIZE 16
17/* NAPI weight *must* be a multiple of the store size. */
18#define DPAA2_CAAM_NAPI_WEIGHT 64
19
20/* The congestion entrance threshold was chosen so that on LS2088
21 * we support the maximum throughput for the available memory
22 */
23#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
24#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
25
26/**
27 * dpaa2_caam_priv - driver private data
28 * @dpseci_id: DPSECI object unique ID
29 * @major_ver: DPSECI major version
30 * @minor_ver: DPSECI minor version
31 * @dpseci_attr: DPSECI attributes
32 * @sec_attr: SEC engine attributes
33 * @rx_queue_attr: array of Rx queue attributes
34 * @tx_queue_attr: array of Tx queue attributes
35 * @cscn_mem: pointer to memory region containing the congestion SCN
36 * it's size is larger than to accommodate alignment
37 * @cscn_mem_aligned: pointer to congestion SCN; it is computed as
38 * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
39 * @cscn_dma: dma address used by the QMAN to write CSCN messages
40 * @dev: device associated with the DPSECI object
41 * @mc_io: pointer to MC portal's I/O object
42 * @domain: IOMMU domain
43 * @ppriv: per CPU pointers to privata data
44 */
45struct dpaa2_caam_priv {
46 int dpsec_id;
47
48 u16 major_ver;
49 u16 minor_ver;
50
51 struct dpseci_attr dpseci_attr;
52 struct dpseci_sec_attr sec_attr;
53 struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
54 struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
55 int num_pairs;
56
57 /* congestion */
58 void *cscn_mem;
59 void *cscn_mem_aligned;
60 dma_addr_t cscn_dma;
61
62 struct device *dev;
63 struct fsl_mc_io *mc_io;
64 struct iommu_domain *domain;
65
66 struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
67};
68
69/**
70 * dpaa2_caam_priv_per_cpu - per CPU private data
71 * @napi: napi structure
72 * @net_dev: netdev used by napi
73 * @req_fqid: (virtual) request (Tx / enqueue) FQID
74 * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
75 * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
76 * @nctx: notification context of response FQ
77 * @store: where dequeued frames are stored
78 * @priv: backpointer to dpaa2_caam_priv
79 */
80struct dpaa2_caam_priv_per_cpu {
81 struct napi_struct napi;
82 struct net_device net_dev;
83 int req_fqid;
84 int rsp_fqid;
85 int prio;
86 struct dpaa2_io_notification_ctx nctx;
87 struct dpaa2_io_store *store;
88 struct dpaa2_caam_priv *priv;
89};
90
91/*
92 * The CAAM QI hardware constructs a job descriptor which points
93 * to shared descriptor (as pointed by context_a of FQ to CAAM).
94 * When the job descriptor is executed by deco, the whole job
95 * descriptor together with shared descriptor gets loaded in
96 * deco buffer which is 64 words long (each 32-bit).
97 *
98 * The job descriptor constructed by QI hardware has layout:
99 *
100 * HEADER (1 word)
101 * Shdesc ptr (1 or 2 words)
102 * SEQ_OUT_PTR (1 word)
103 * Out ptr (1 or 2 words)
104 * Out length (1 word)
105 * SEQ_IN_PTR (1 word)
106 * In ptr (1 or 2 words)
107 * In length (1 word)
108 *
109 * The shdesc ptr is used to fetch shared descriptor contents
110 * into deco buffer.
111 *
112 * Apart from shdesc contents, the total number of words that
113 * get loaded in deco buffer are '8' or '11'. The remaining words
114 * in deco buffer can be used for storing shared descriptor.
115 */
116#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
117
118/* Length of a single buffer in the QI driver memory cache */
119#define CAAM_QI_MEMCACHE_SIZE 512
120
121/*
122 * aead_edesc - s/w-extended aead descriptor
123 * @src_nents: number of segments in input scatterlist
124 * @dst_nents: number of segments in output scatterlist
125 * @iv_dma: dma address of iv for checking continuity and link table
126 * @qm_sg_bytes: length of dma mapped h/w link table
127 * @qm_sg_dma: bus physical mapped address of h/w link table
128 * @assoclen: associated data length, in CAAM endianness
129 * @assoclen_dma: bus physical mapped address of req->assoclen
130 * @sgt: the h/w link table, followed by IV
131 */
132struct aead_edesc {
133 int src_nents;
134 int dst_nents;
135 dma_addr_t iv_dma;
136 int qm_sg_bytes;
137 dma_addr_t qm_sg_dma;
138 unsigned int assoclen;
139 dma_addr_t assoclen_dma;
140 struct dpaa2_sg_entry sgt[0];
141};
142
143/*
144 * skcipher_edesc - s/w-extended skcipher descriptor
145 * @src_nents: number of segments in input scatterlist
146 * @dst_nents: number of segments in output scatterlist
147 * @iv_dma: dma address of iv for checking continuity and link table
148 * @qm_sg_bytes: length of dma mapped qm_sg space
149 * @qm_sg_dma: I/O virtual address of h/w link table
150 * @sgt: the h/w link table, followed by IV
151 */
152struct skcipher_edesc {
153 int src_nents;
154 int dst_nents;
155 dma_addr_t iv_dma;
156 int qm_sg_bytes;
157 dma_addr_t qm_sg_dma;
158 struct dpaa2_sg_entry sgt[0];
159};
160
161/*
162 * ahash_edesc - s/w-extended ahash descriptor
163 * @dst_dma: I/O virtual address of req->result
164 * @qm_sg_dma: I/O virtual address of h/w link table
165 * @src_nents: number of segments in input scatterlist
166 * @qm_sg_bytes: length of dma mapped qm_sg space
167 * @sgt: pointer to h/w link table
168 */
169struct ahash_edesc {
170 dma_addr_t dst_dma;
171 dma_addr_t qm_sg_dma;
172 int src_nents;
173 int qm_sg_bytes;
174 struct dpaa2_sg_entry sgt[0];
175};
176
177/**
178 * caam_flc - Flow Context (FLC)
179 * @flc: Flow Context options
180 * @sh_desc: Shared Descriptor
181 */
182struct caam_flc {
183 u32 flc[16];
184 u32 sh_desc[MAX_SDLEN];
185} ____cacheline_aligned;
186
187enum optype {
188 ENCRYPT = 0,
189 DECRYPT,
190 NUM_OP
191};
192
193/**
194 * caam_request - the request structure the driver application should fill while
195 * submitting a job to driver.
196 * @fd_flt: Frame list table defining input and output
197 * fd_flt[0] - FLE pointing to output buffer
198 * fd_flt[1] - FLE pointing to input buffer
199 * @fd_flt_dma: DMA address for the frame list table
200 * @flc: Flow Context
201 * @flc_dma: I/O virtual address of Flow Context
202 * @cbk: Callback function to invoke when job is completed
203 * @ctx: arbit context attached with request by the application
204 * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
205 */
206struct caam_request {
207 struct dpaa2_fl_entry fd_flt[2];
208 dma_addr_t fd_flt_dma;
209 struct caam_flc *flc;
210 dma_addr_t flc_dma;
211 void (*cbk)(void *ctx, u32 err);
212 void *ctx;
213 void *edesc;
214};
215
216/**
217 * dpaa2_caam_enqueue() - enqueue a crypto request
218 * @dev: device associated with the DPSECI object
219 * @req: pointer to caam_request
220 */
221int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
222
223#endif /* _CAAMALG_QI2_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 43975ab5f09c..46924affa0bd 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API 3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 * 4 *
@@ -62,6 +63,7 @@
62#include "error.h" 63#include "error.h"
63#include "sg_sw_sec4.h" 64#include "sg_sw_sec4.h"
64#include "key_gen.h" 65#include "key_gen.h"
66#include "caamhash_desc.h"
65 67
66#define CAAM_CRA_PRIORITY 3000 68#define CAAM_CRA_PRIORITY 3000
67 69
@@ -71,14 +73,6 @@
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 73#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 74#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73 75
74/* length of descriptors text */
75#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
76#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 76#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE) 77 CAAM_MAX_HASH_KEY_SIZE)
84#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 78#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -235,60 +229,6 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
235 return 0; 229 return 0;
236} 230}
237 231
238/*
239 * For ahash update, final and finup (import_ctx = true)
240 * import context, read and write to seqout
241 * For ahash firsts and digest (import_ctx = false)
242 * read and write to seqout
243 */
244static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
245 struct caam_hash_ctx *ctx, bool import_ctx,
246 int era)
247{
248 u32 op = ctx->adata.algtype;
249 u32 *skip_key_load;
250
251 init_sh_desc(desc, HDR_SHARE_SERIAL);
252
253 /* Append key if it has been set; ahash update excluded */
254 if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
255 /* Skip key loading if already shared */
256 skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
257 JUMP_COND_SHRD);
258
259 if (era < 6)
260 append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
261 ctx->adata.keylen, CLASS_2 |
262 KEY_DEST_MDHA_SPLIT | KEY_ENC);
263 else
264 append_proto_dkp(desc, &ctx->adata);
265
266 set_jump_tgt_here(desc, skip_key_load);
267
268 op |= OP_ALG_AAI_HMAC_PRECOMP;
269 }
270
271 /* If needed, import context from software */
272 if (import_ctx)
273 append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
274 LDST_SRCDST_BYTE_CONTEXT);
275
276 /* Class 2 operation */
277 append_operation(desc, op | state | OP_ALG_ENCRYPT);
278
279 /*
280 * Load from buf and/or src and write to req->result or state->context
281 * Calculate remaining bytes to read
282 */
283 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
284 /* Read remaining bytes */
285 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
286 FIFOLD_TYPE_MSG | KEY_VLF);
287 /* Store class2 context bytes */
288 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
289 LDST_SRCDST_BYTE_CONTEXT);
290}
291
292static int ahash_set_sh_desc(struct crypto_ahash *ahash) 232static int ahash_set_sh_desc(struct crypto_ahash *ahash)
293{ 233{
294 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 234 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -301,8 +241,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
301 241
302 /* ahash_update shared descriptor */ 242 /* ahash_update shared descriptor */
303 desc = ctx->sh_desc_update; 243 desc = ctx->sh_desc_update;
304 ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true, 244 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
305 ctrlpriv->era); 245 ctx->ctx_len, true, ctrlpriv->era);
306 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 246 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
307 desc_bytes(desc), ctx->dir); 247 desc_bytes(desc), ctx->dir);
308#ifdef DEBUG 248#ifdef DEBUG
@@ -313,8 +253,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
313 253
314 /* ahash_update_first shared descriptor */ 254 /* ahash_update_first shared descriptor */
315 desc = ctx->sh_desc_update_first; 255 desc = ctx->sh_desc_update_first;
316 ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false, 256 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
317 ctrlpriv->era); 257 ctx->ctx_len, false, ctrlpriv->era);
318 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 258 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
319 desc_bytes(desc), ctx->dir); 259 desc_bytes(desc), ctx->dir);
320#ifdef DEBUG 260#ifdef DEBUG
@@ -325,8 +265,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
325 265
326 /* ahash_final shared descriptor */ 266 /* ahash_final shared descriptor */
327 desc = ctx->sh_desc_fin; 267 desc = ctx->sh_desc_fin;
328 ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true, 268 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
329 ctrlpriv->era); 269 ctx->ctx_len, true, ctrlpriv->era);
330 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 270 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
331 desc_bytes(desc), ctx->dir); 271 desc_bytes(desc), ctx->dir);
332#ifdef DEBUG 272#ifdef DEBUG
@@ -337,8 +277,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
337 277
338 /* ahash_digest shared descriptor */ 278 /* ahash_digest shared descriptor */
339 desc = ctx->sh_desc_digest; 279 desc = ctx->sh_desc_digest;
340 ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false, 280 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
341 ctrlpriv->era); 281 ctx->ctx_len, false, ctrlpriv->era);
342 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 282 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
343 desc_bytes(desc), ctx->dir); 283 desc_bytes(desc), ctx->dir);
344#ifdef DEBUG 284#ifdef DEBUG
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
new file mode 100644
index 000000000000..a12f7959a2c3
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -0,0 +1,80 @@
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * Shared descriptors for ahash algorithms
4 *
5 * Copyright 2017 NXP
6 */
7
8#include "compat.h"
9#include "desc_constr.h"
10#include "caamhash_desc.h"
11
12/**
13 * cnstr_shdsc_ahash - ahash shared descriptor
14 * @desc: pointer to buffer used for descriptor construction
15 * @adata: pointer to authentication transform definitions.
16 * A split key is required for SEC Era < 6; the size of the split key
17 * is specified in this case.
18 * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
19 * SHA256, SHA384, SHA512}.
20 * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
21 * @digestsize: algorithm's digest size
22 * @ctx_len: size of Context Register
23 * @import_ctx: true if previous Context Register needs to be restored
24 * must be true for ahash update and final
25 * must be false for for ahash first and digest
26 * @era: SEC Era
27 */
28void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
29 int digestsize, int ctx_len, bool import_ctx, int era)
30{
31 u32 op = adata->algtype;
32
33 init_sh_desc(desc, HDR_SHARE_SERIAL);
34
35 /* Append key if it has been set; ahash update excluded */
36 if (state != OP_ALG_AS_UPDATE && adata->keylen) {
37 u32 *skip_key_load;
38
39 /* Skip key loading if already shared */
40 skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
41 JUMP_COND_SHRD);
42
43 if (era < 6)
44 append_key_as_imm(desc, adata->key_virt,
45 adata->keylen_pad,
46 adata->keylen, CLASS_2 |
47 KEY_DEST_MDHA_SPLIT | KEY_ENC);
48 else
49 append_proto_dkp(desc, adata);
50
51 set_jump_tgt_here(desc, skip_key_load);
52
53 op |= OP_ALG_AAI_HMAC_PRECOMP;
54 }
55
56 /* If needed, import context from software */
57 if (import_ctx)
58 append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
59 LDST_SRCDST_BYTE_CONTEXT);
60
61 /* Class 2 operation */
62 append_operation(desc, op | state | OP_ALG_ENCRYPT);
63
64 /*
65 * Load from buf and/or src and write to req->result or state->context
66 * Calculate remaining bytes to read
67 */
68 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
69 /* Read remaining bytes */
70 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
71 FIFOLD_TYPE_MSG | KEY_VLF);
72 /* Store class2 context bytes */
73 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
74 LDST_SRCDST_BYTE_CONTEXT);
75}
76EXPORT_SYMBOL(cnstr_shdsc_ahash);
77
78MODULE_LICENSE("Dual BSD/GPL");
79MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
80MODULE_AUTHOR("NXP Semiconductors");
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
new file mode 100644
index 000000000000..631fc1ac312c
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -0,0 +1,21 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Shared descriptors for ahash algorithms
4 *
5 * Copyright 2017 NXP
6 */
7
8#ifndef _CAAMHASH_DESC_H_
9#define _CAAMHASH_DESC_H_
10
11/* length of descriptors text */
12#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
13#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
14#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
15#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
16#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
17
18void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
19 int digestsize, int ctx_len, bool import_ctx, int era);
20
21#endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index f26d62e5533a..4fc209cbbeab 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
1/* 2/*
2 * caam - Freescale FSL CAAM support for Public Key Cryptography 3 * caam - Freescale FSL CAAM support for Public Key Cryptography
3 * 4 *
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index fde07d4ff019..4318b0aa6fb9 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * caam - Freescale FSL CAAM support for hw_random 3 * caam - Freescale FSL CAAM support for hw_random
3 * 4 *
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 1c71e0cd5098..9604ff7a335e 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -17,6 +17,7 @@
17#include <linux/of_platform.h> 17#include <linux/of_platform.h>
18#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/iommu.h>
20#include <linux/spinlock.h> 21#include <linux/spinlock.h>
21#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
22#include <linux/in.h> 23#include <linux/in.h>
@@ -39,6 +40,7 @@
39#include <crypto/authenc.h> 40#include <crypto/authenc.h>
40#include <crypto/akcipher.h> 41#include <crypto/akcipher.h>
41#include <crypto/scatterwalk.h> 42#include <crypto/scatterwalk.h>
43#include <crypto/skcipher.h>
42#include <crypto/internal/skcipher.h> 44#include <crypto/internal/skcipher.h>
43#include <crypto/internal/hash.h> 45#include <crypto/internal/hash.h>
44#include <crypto/internal/rsa.h> 46#include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 538c01f428c1..3fc793193821 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* * CAAM control-plane driver backend 2/* * CAAM control-plane driver backend
2 * Controller-level driver, kernel property detection, initialization 3 * Controller-level driver, kernel property detection, initialization
3 * 4 *
diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
new file mode 100644
index 000000000000..8a68531ded0b
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.c
@@ -0,0 +1,426 @@
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * Copyright 2013-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP
5 */
6
7#include <linux/fsl/mc.h>
8#include "dpseci.h"
9#include "dpseci_cmd.h"
10
11/**
12 * dpseci_open() - Open a control session for the specified object
13 * @mc_io: Pointer to MC portal's I/O object
14 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
15 * @dpseci_id: DPSECI unique ID
16 * @token: Returned token; use in subsequent API calls
17 *
18 * This function can be used to open a control session for an already created
19 * object; an object may have been declared statically in the DPL
20 * or created dynamically.
21 * This function returns a unique authentication token, associated with the
22 * specific object ID and the specific MC portal; this token must be used in all
23 * subsequent commands for this specific object.
24 *
25 * Return: '0' on success, error code otherwise
26 */
27int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
28 u16 *token)
29{
30 struct fsl_mc_command cmd = { 0 };
31 struct dpseci_cmd_open *cmd_params;
32 int err;
33
34 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
35 cmd_flags,
36 0);
37 cmd_params = (struct dpseci_cmd_open *)cmd.params;
38 cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
39 err = mc_send_command(mc_io, &cmd);
40 if (err)
41 return err;
42
43 *token = mc_cmd_hdr_read_token(&cmd);
44
45 return 0;
46}
47
48/**
49 * dpseci_close() - Close the control session of the object
50 * @mc_io: Pointer to MC portal's I/O object
51 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
52 * @token: Token of DPSECI object
53 *
54 * After this function is called, no further operations are allowed on the
55 * object without opening a new control session.
56 *
57 * Return: '0' on success, error code otherwise
58 */
59int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
60{
61 struct fsl_mc_command cmd = { 0 };
62
63 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
64 cmd_flags,
65 token);
66 return mc_send_command(mc_io, &cmd);
67}
68
69/**
70 * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
71 * @mc_io: Pointer to MC portal's I/O object
72 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
73 * @token: Token of DPSECI object
74 *
75 * Return: '0' on success, error code otherwise
76 */
77int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
78{
79 struct fsl_mc_command cmd = { 0 };
80
81 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
82 cmd_flags,
83 token);
84 return mc_send_command(mc_io, &cmd);
85}
86
87/**
88 * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
89 * @mc_io: Pointer to MC portal's I/O object
90 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
91 * @token: Token of DPSECI object
92 *
93 * Return: '0' on success, error code otherwise
94 */
95int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
96{
97 struct fsl_mc_command cmd = { 0 };
98
99 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
100 cmd_flags,
101 token);
102
103 return mc_send_command(mc_io, &cmd);
104}
105
106/**
107 * dpseci_is_enabled() - Check if the DPSECI is enabled.
108 * @mc_io: Pointer to MC portal's I/O object
109 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
110 * @token: Token of DPSECI object
111 * @en: Returns '1' if object is enabled; '0' otherwise
112 *
113 * Return: '0' on success, error code otherwise
114 */
115int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
116 int *en)
117{
118 struct fsl_mc_command cmd = { 0 };
119 struct dpseci_rsp_is_enabled *rsp_params;
120 int err;
121
122 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
123 cmd_flags,
124 token);
125 err = mc_send_command(mc_io, &cmd);
126 if (err)
127 return err;
128
129 rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
130 *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
131
132 return 0;
133}
134
135/**
136 * dpseci_get_attributes() - Retrieve DPSECI attributes
137 * @mc_io: Pointer to MC portal's I/O object
138 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
139 * @token: Token of DPSECI object
140 * @attr: Returned object's attributes
141 *
142 * Return: '0' on success, error code otherwise
143 */
144int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
145 struct dpseci_attr *attr)
146{
147 struct fsl_mc_command cmd = { 0 };
148 struct dpseci_rsp_get_attributes *rsp_params;
149 int err;
150
151 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
152 cmd_flags,
153 token);
154 err = mc_send_command(mc_io, &cmd);
155 if (err)
156 return err;
157
158 rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
159 attr->id = le32_to_cpu(rsp_params->id);
160 attr->num_tx_queues = rsp_params->num_tx_queues;
161 attr->num_rx_queues = rsp_params->num_rx_queues;
162 attr->options = le32_to_cpu(rsp_params->options);
163
164 return 0;
165}
166
167/**
168 * dpseci_set_rx_queue() - Set Rx queue configuration
169 * @mc_io: Pointer to MC portal's I/O object
170 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
171 * @token: Token of DPSECI object
172 * @queue: Select the queue relative to number of priorities configured at
173 * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
174 * Rx queues identically.
175 * @cfg: Rx queue configuration
176 *
177 * Return: '0' on success, error code otherwise
178 */
179int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
180 u8 queue, const struct dpseci_rx_queue_cfg *cfg)
181{
182 struct fsl_mc_command cmd = { 0 };
183 struct dpseci_cmd_queue *cmd_params;
184
185 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
186 cmd_flags,
187 token);
188 cmd_params = (struct dpseci_cmd_queue *)cmd.params;
189 cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
190 cmd_params->priority = cfg->dest_cfg.priority;
191 cmd_params->queue = queue;
192 dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
193 cfg->dest_cfg.dest_type);
194 cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
195 cmd_params->options = cpu_to_le32(cfg->options);
196 dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
197 cfg->order_preservation_en);
198
199 return mc_send_command(mc_io, &cmd);
200}
201
202/**
203 * dpseci_get_rx_queue() - Retrieve Rx queue attributes
204 * @mc_io: Pointer to MC portal's I/O object
205 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
206 * @token: Token of DPSECI object
207 * @queue: Select the queue relative to number of priorities configured at
208 * DPSECI creation
209 * @attr: Returned Rx queue attributes
210 *
211 * Return: '0' on success, error code otherwise
212 */
213int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
214 u8 queue, struct dpseci_rx_queue_attr *attr)
215{
216 struct fsl_mc_command cmd = { 0 };
217 struct dpseci_cmd_queue *cmd_params;
218 int err;
219
220 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
221 cmd_flags,
222 token);
223 cmd_params = (struct dpseci_cmd_queue *)cmd.params;
224 cmd_params->queue = queue;
225 err = mc_send_command(mc_io, &cmd);
226 if (err)
227 return err;
228
229 attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
230 attr->dest_cfg.priority = cmd_params->priority;
231 attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
232 DEST_TYPE);
233 attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
234 attr->fqid = le32_to_cpu(cmd_params->fqid);
235 attr->order_preservation_en =
236 dpseci_get_field(cmd_params->order_preservation_en,
237 ORDER_PRESERVATION);
238
239 return 0;
240}
241
242/**
243 * dpseci_get_tx_queue() - Retrieve Tx queue attributes
244 * @mc_io: Pointer to MC portal's I/O object
245 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
246 * @token: Token of DPSECI object
247 * @queue: Select the queue relative to number of priorities configured at
248 * DPSECI creation
249 * @attr: Returned Tx queue attributes
250 *
251 * Return: '0' on success, error code otherwise
252 */
253int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
254 u8 queue, struct dpseci_tx_queue_attr *attr)
255{
256 struct fsl_mc_command cmd = { 0 };
257 struct dpseci_cmd_queue *cmd_params;
258 struct dpseci_rsp_get_tx_queue *rsp_params;
259 int err;
260
261 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
262 cmd_flags,
263 token);
264 cmd_params = (struct dpseci_cmd_queue *)cmd.params;
265 cmd_params->queue = queue;
266 err = mc_send_command(mc_io, &cmd);
267 if (err)
268 return err;
269
270 rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
271 attr->fqid = le32_to_cpu(rsp_params->fqid);
272 attr->priority = rsp_params->priority;
273
274 return 0;
275}
276
277/**
278 * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
279 * @mc_io: Pointer to MC portal's I/O object
280 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
281 * @token: Token of DPSECI object
282 * @attr: Returned SEC attributes
283 *
284 * Return: '0' on success, error code otherwise
285 */
286int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
287 struct dpseci_sec_attr *attr)
288{
289 struct fsl_mc_command cmd = { 0 };
290 struct dpseci_rsp_get_sec_attr *rsp_params;
291 int err;
292
293 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
294 cmd_flags,
295 token);
296 err = mc_send_command(mc_io, &cmd);
297 if (err)
298 return err;
299
300 rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
301 attr->ip_id = le16_to_cpu(rsp_params->ip_id);
302 attr->major_rev = rsp_params->major_rev;
303 attr->minor_rev = rsp_params->minor_rev;
304 attr->era = rsp_params->era;
305 attr->deco_num = rsp_params->deco_num;
306 attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
307 attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
308 attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
309 attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
310 attr->crc_acc_num = rsp_params->crc_acc_num;
311 attr->pk_acc_num = rsp_params->pk_acc_num;
312 attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
313 attr->rng_acc_num = rsp_params->rng_acc_num;
314 attr->md_acc_num = rsp_params->md_acc_num;
315 attr->arc4_acc_num = rsp_params->arc4_acc_num;
316 attr->des_acc_num = rsp_params->des_acc_num;
317 attr->aes_acc_num = rsp_params->aes_acc_num;
318 attr->ccha_acc_num = rsp_params->ccha_acc_num;
319 attr->ptha_acc_num = rsp_params->ptha_acc_num;
320
321 return 0;
322}
323
324/**
325 * dpseci_get_api_version() - Get Data Path SEC Interface API version
326 * @mc_io: Pointer to MC portal's I/O object
327 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
328 * @major_ver: Major version of data path sec API
329 * @minor_ver: Minor version of data path sec API
330 *
331 * Return: '0' on success, error code otherwise
332 */
333int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
334 u16 *major_ver, u16 *minor_ver)
335{
336 struct fsl_mc_command cmd = { 0 };
337 struct dpseci_rsp_get_api_version *rsp_params;
338 int err;
339
340 cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
341 cmd_flags, 0);
342 err = mc_send_command(mc_io, &cmd);
343 if (err)
344 return err;
345
346 rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
347 *major_ver = le16_to_cpu(rsp_params->major);
348 *minor_ver = le16_to_cpu(rsp_params->minor);
349
350 return 0;
351}
352
353/**
354 * dpseci_set_congestion_notification() - Set congestion group
355 * notification configuration
356 * @mc_io: Pointer to MC portal's I/O object
357 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
358 * @token: Token of DPSECI object
359 * @cfg: congestion notification configuration
360 *
361 * Return: '0' on success, error code otherwise
362 */
363int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
364 u16 token, const struct dpseci_congestion_notification_cfg *cfg)
365{
366 struct fsl_mc_command cmd = { 0 };
367 struct dpseci_cmd_congestion_notification *cmd_params;
368
369 cmd.header = mc_encode_cmd_header(
370 DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
371 cmd_flags,
372 token);
373 cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
374 cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
375 cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
376 cmd_params->priority = cfg->dest_cfg.priority;
377 dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
378 cfg->dest_cfg.dest_type);
379 dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
380 cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
381 cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
382 cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
383 cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
384
385 return mc_send_command(mc_io, &cmd);
386}
387
388/**
389 * dpseci_get_congestion_notification() - Get congestion group notification
390 * configuration
391 * @mc_io: Pointer to MC portal's I/O object
392 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
393 * @token: Token of DPSECI object
394 * @cfg: congestion notification configuration
395 *
396 * Return: '0' on success, error code otherwise
397 */
398int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
399 u16 token, struct dpseci_congestion_notification_cfg *cfg)
400{
401 struct fsl_mc_command cmd = { 0 };
402 struct dpseci_cmd_congestion_notification *rsp_params;
403 int err;
404
405 cmd.header = mc_encode_cmd_header(
406 DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
407 cmd_flags,
408 token);
409 err = mc_send_command(mc_io, &cmd);
410 if (err)
411 return err;
412
413 rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
414 cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
415 cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
416 cfg->dest_cfg.priority = rsp_params->priority;
417 cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
418 CGN_DEST_TYPE);
419 cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
420 cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
421 cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
422 cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
423 cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
424
425 return 0;
426}
diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
new file mode 100644
index 000000000000..4550e134d166
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.h
@@ -0,0 +1,333 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Copyright 2013-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP
5 */
6#ifndef _DPSECI_H_
7#define _DPSECI_H_
8
9/*
10 * Data Path SEC Interface API
11 * Contains initialization APIs and runtime control APIs for DPSECI
12 */
13
14struct fsl_mc_io;
15
16/**
17 * General DPSECI macros
18 */
19
20/**
21 * Maximum number of Tx/Rx queues per DPSECI object
22 */
23#define DPSECI_MAX_QUEUE_NUM 16
24
25/**
26 * All queues considered; see dpseci_set_rx_queue()
27 */
28#define DPSECI_ALL_QUEUES (u8)(-1)
29
30int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
31 u16 *token);
32
33int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
34
35/**
36 * Enable the Congestion Group support
37 */
38#define DPSECI_OPT_HAS_CG 0x000020
39
40/**
41 * struct dpseci_cfg - Structure representing DPSECI configuration
42 * @options: Any combination of the following flags:
43 * DPSECI_OPT_HAS_CG
44 * @num_tx_queues: num of queues towards the SEC
45 * @num_rx_queues: num of queues back from the SEC
46 * @priorities: Priorities for the SEC hardware processing;
47 * each place in the array is the priority of the tx queue
48 * towards the SEC;
49 * valid priorities are configured with values 1-8;
50 */
51struct dpseci_cfg {
52 u32 options;
53 u8 num_tx_queues;
54 u8 num_rx_queues;
55 u8 priorities[DPSECI_MAX_QUEUE_NUM];
56};
57
58int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
59
60int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
61
62int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
63 int *en);
64
65/**
66 * struct dpseci_attr - Structure representing DPSECI attributes
67 * @id: DPSECI object ID
68 * @num_tx_queues: number of queues towards the SEC
69 * @num_rx_queues: number of queues back from the SEC
70 * @options: any combination of the following flags:
71 * DPSECI_OPT_HAS_CG
72 */
73struct dpseci_attr {
74 int id;
75 u8 num_tx_queues;
76 u8 num_rx_queues;
77 u32 options;
78};
79
80int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
81 struct dpseci_attr *attr);
82
83/**
84 * enum dpseci_dest - DPSECI destination types
85 * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
86 * and does not generate FQDAN notifications; user is expected to dequeue
87 * from the queue based on polling or other user-defined method
88 * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
89 * notifications to the specified DPIO; user is expected to dequeue from
90 * the queue only after notification is received
91 * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
92 * FQDAN notifications, but is connected to the specified DPCON object;
93 * user is expected to dequeue from the DPCON channel
94 */
95enum dpseci_dest {
96 DPSECI_DEST_NONE = 0,
97 DPSECI_DEST_DPIO,
98 DPSECI_DEST_DPCON
99};
100
101/**
102 * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
103 * @dest_type: Destination type
104 * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
105 * @priority: Priority selection within the DPIO or DPCON channel; valid values
106 * are 0-1 or 0-7, depending on the number of priorities in that channel;
107 * not relevant for 'DPSECI_DEST_NONE' option
108 */
109struct dpseci_dest_cfg {
110 enum dpseci_dest dest_type;
111 int dest_id;
112 u8 priority;
113};
114
115/**
116 * DPSECI queue modification options
117 */
118
119/**
120 * Select to modify the user's context associated with the queue
121 */
122#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
123
124/**
125 * Select to modify the queue's destination
126 */
127#define DPSECI_QUEUE_OPT_DEST 0x00000002
128
129/**
130 * Select to modify the queue's order preservation
131 */
132#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
133
134/**
135 * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
136 * @options: Flags representing the suggested modifications to the queue;
137 * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
138 * @order_preservation_en: order preservation configuration for the rx queue
139 * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
140 * @user_ctx: User context value provided in the frame descriptor of each
141 * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
142 * in 'options'
143 * @dest_cfg: Queue destination parameters; valid only if
144 * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
145 */
146struct dpseci_rx_queue_cfg {
147 u32 options;
148 int order_preservation_en;
149 u64 user_ctx;
150 struct dpseci_dest_cfg dest_cfg;
151};
152
153int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
154 u8 queue, const struct dpseci_rx_queue_cfg *cfg);
155
156/**
157 * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
158 * @user_ctx: User context value provided in the frame descriptor of each
159 * dequeued frame
160 * @order_preservation_en: Status of the order preservation configuration on the
161 * queue
162 * @dest_cfg: Queue destination configuration
163 * @fqid: Virtual FQID value to be used for dequeue operations
164 */
165struct dpseci_rx_queue_attr {
166 u64 user_ctx;
167 int order_preservation_en;
168 struct dpseci_dest_cfg dest_cfg;
169 u32 fqid;
170};
171
172int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
173 u8 queue, struct dpseci_rx_queue_attr *attr);
174
175/**
176 * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
177 * @fqid: Virtual FQID to be used for sending frames to SEC hardware
178 * @priority: SEC hardware processing priority for the queue
179 */
180struct dpseci_tx_queue_attr {
181 u32 fqid;
182 u8 priority;
183};
184
185int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
186 u8 queue, struct dpseci_tx_queue_attr *attr);
187
188/**
189 * struct dpseci_sec_attr - Structure representing attributes of the SEC
190 * hardware accelerator
191 * @ip_id: ID for SEC
192 * @major_rev: Major revision number for SEC
193 * @minor_rev: Minor revision number for SEC
194 * @era: SEC Era
195 * @deco_num: The number of copies of the DECO that are implemented in this
196 * version of SEC
197 * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
198 * version of SEC
199 * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
200 * version of SEC
201 * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
202 * implemented in this version of SEC
203 * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
204 * implemented in this version of SEC
205 * @crc_acc_num: The number of copies of the CRC module that are implemented in
206 * this version of SEC
207 * @pk_acc_num: The number of copies of the Public Key module that are
208 * implemented in this version of SEC
209 * @kasumi_acc_num: The number of copies of the Kasumi module that are
210 * implemented in this version of SEC
211 * @rng_acc_num: The number of copies of the Random Number Generator that are
212 * implemented in this version of SEC
213 * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
214 * implemented in this version of SEC
215 * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
216 * in this version of SEC
217 * @des_acc_num: The number of copies of the DES module that are implemented in
218 * this version of SEC
219 * @aes_acc_num: The number of copies of the AES module that are implemented in
220 * this version of SEC
221 * @ccha_acc_num: The number of copies of the ChaCha20 module that are
222 * implemented in this version of SEC.
223 * @ptha_acc_num: The number of copies of the Poly1305 module that are
224 * implemented in this version of SEC.
225 **/
226struct dpseci_sec_attr {
227 u16 ip_id;
228 u8 major_rev;
229 u8 minor_rev;
230 u8 era;
231 u8 deco_num;
232 u8 zuc_auth_acc_num;
233 u8 zuc_enc_acc_num;
234 u8 snow_f8_acc_num;
235 u8 snow_f9_acc_num;
236 u8 crc_acc_num;
237 u8 pk_acc_num;
238 u8 kasumi_acc_num;
239 u8 rng_acc_num;
240 u8 md_acc_num;
241 u8 arc4_acc_num;
242 u8 des_acc_num;
243 u8 aes_acc_num;
244 u8 ccha_acc_num;
245 u8 ptha_acc_num;
246};
247
248int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
249 struct dpseci_sec_attr *attr);
250
251int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
252 u16 *major_ver, u16 *minor_ver);
253
254/**
255 * enum dpseci_congestion_unit - DPSECI congestion units
256 * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
257 * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
258 */
259enum dpseci_congestion_unit {
260 DPSECI_CONGESTION_UNIT_BYTES = 0,
261 DPSECI_CONGESTION_UNIT_FRAMES
262};
263
264/**
265 * CSCN message is written to message_iova once entering a
266 * congestion state (see 'threshold_entry')
267 */
268#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
269
270/**
271 * CSCN message is written to message_iova once exiting a
272 * congestion state (see 'threshold_exit')
273 */
274#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
275
276/**
277 * CSCN write will attempt to allocate into a cache (coherent write);
278 * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
279 */
280#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
281
282/**
283 * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
284 * DPIO/DPCON's WQ channel once entering a congestion state
285 * (see 'threshold_entry')
286 */
287#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
288
289/**
290 * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
291 * DPIO/DPCON's WQ channel once exiting a congestion state
292 * (see 'threshold_exit')
293 */
294#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
295
296/**
297 * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
298 * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
299 * (if enabled)
300 */
301#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
302
303/**
304 * struct dpseci_congestion_notification_cfg - congestion notification
305 * configuration
306 * @units: units type
307 * @threshold_entry: above this threshold we enter a congestion state.
308 * set it to '0' to disable it
309 * @threshold_exit: below this threshold we exit the congestion state.
310 * @message_ctx: The context that will be part of the CSCN message
311 * @message_iova: I/O virtual address (must be in DMA-able memory),
312 * must be 16B aligned;
313 * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
314 * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
315 * values
316 */
317struct dpseci_congestion_notification_cfg {
318 enum dpseci_congestion_unit units;
319 u32 threshold_entry;
320 u32 threshold_exit;
321 u64 message_ctx;
322 u64 message_iova;
323 struct dpseci_dest_cfg dest_cfg;
324 u16 notification_mode;
325};
326
327int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
328 u16 token, const struct dpseci_congestion_notification_cfg *cfg);
329
330int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
331 u16 token, struct dpseci_congestion_notification_cfg *cfg);
332
333#endif /* _DPSECI_H_ */
diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
new file mode 100644
index 000000000000..6ab77ead6e3d
--- /dev/null
+++ b/drivers/crypto/caam/dpseci_cmd.h
@@ -0,0 +1,149 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Copyright 2013-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP
5 */
6
7#ifndef _DPSECI_CMD_H_
8#define _DPSECI_CMD_H_
9
10/* DPSECI Version */
11#define DPSECI_VER_MAJOR 5
12#define DPSECI_VER_MINOR 3
13
14#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
15#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
16
17/* Command versioning */
18#define DPSECI_CMD_BASE_VERSION 1
19#define DPSECI_CMD_BASE_VERSION_V2 2
20#define DPSECI_CMD_ID_OFFSET 4
21
22#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
23 DPSECI_CMD_BASE_VERSION)
24
25#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
26 DPSECI_CMD_BASE_VERSION_V2)
27
28/* Command IDs */
29#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
30#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
31#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
32
33#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
34#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
35#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
36#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
37
38#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
39#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
40#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
41#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
42#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
43#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
44
45/* Macros for accessing command fields smaller than 1 byte */
46#define DPSECI_MASK(field) \
47 GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
48 DPSECI_##field##_SHIFT)
49
50#define dpseci_set_field(var, field, val) \
51 ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
52
53#define dpseci_get_field(var, field) \
54 (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
55
56struct dpseci_cmd_open {
57 __le32 dpseci_id;
58};
59
60#define DPSECI_ENABLE_SHIFT 0
61#define DPSECI_ENABLE_SIZE 1
62
63struct dpseci_rsp_is_enabled {
64 u8 is_enabled;
65};
66
67struct dpseci_rsp_get_attributes {
68 __le32 id;
69 __le32 pad0;
70 u8 num_tx_queues;
71 u8 num_rx_queues;
72 u8 pad1[6];
73 __le32 options;
74};
75
76#define DPSECI_DEST_TYPE_SHIFT 0
77#define DPSECI_DEST_TYPE_SIZE 4
78
79#define DPSECI_ORDER_PRESERVATION_SHIFT 0
80#define DPSECI_ORDER_PRESERVATION_SIZE 1
81
82struct dpseci_cmd_queue {
83 __le32 dest_id;
84 u8 priority;
85 u8 queue;
86 u8 dest_type;
87 u8 pad;
88 __le64 user_ctx;
89 union {
90 __le32 options;
91 __le32 fqid;
92 };
93 u8 order_preservation_en;
94};
95
96struct dpseci_rsp_get_tx_queue {
97 __le32 pad;
98 __le32 fqid;
99 u8 priority;
100};
101
102struct dpseci_rsp_get_sec_attr {
103 __le16 ip_id;
104 u8 major_rev;
105 u8 minor_rev;
106 u8 era;
107 u8 pad0[3];
108 u8 deco_num;
109 u8 zuc_auth_acc_num;
110 u8 zuc_enc_acc_num;
111 u8 pad1;
112 u8 snow_f8_acc_num;
113 u8 snow_f9_acc_num;
114 u8 crc_acc_num;
115 u8 pad2;
116 u8 pk_acc_num;
117 u8 kasumi_acc_num;
118 u8 rng_acc_num;
119 u8 pad3;
120 u8 md_acc_num;
121 u8 arc4_acc_num;
122 u8 des_acc_num;
123 u8 aes_acc_num;
124 u8 ccha_acc_num;
125 u8 ptha_acc_num;
126};
127
128struct dpseci_rsp_get_api_version {
129 __le16 major;
130 __le16 minor;
131};
132
133#define DPSECI_CGN_DEST_TYPE_SHIFT 0
134#define DPSECI_CGN_DEST_TYPE_SIZE 4
135#define DPSECI_CGN_UNITS_SHIFT 4
136#define DPSECI_CGN_UNITS_SIZE 2
137
138struct dpseci_cmd_congestion_notification {
139 __le32 dest_id;
140 __le16 notification_mode;
141 u8 priority;
142 u8 options;
143 __le64 message_iova;
144 __le64 message_ctx;
145 __le32 threshold_entry;
146 __le32 threshold_exit;
147};
148
149#endif /* _DPSECI_CMD_H_ */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 8da88beb1abb..7e8d690f2827 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -108,6 +108,54 @@ static const struct {
108 { 0xF1, "3GPP HFN matches or exceeds the Threshold" }, 108 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
109}; 109};
110 110
111static const struct {
112 u8 value;
113 const char *error_text;
114} qi_error_list[] = {
115 { 0x1F, "Job terminated by FQ or ICID flush" },
116 { 0x20, "FD format error"},
117 { 0x21, "FD command format error"},
118 { 0x23, "FL format error"},
119 { 0x25, "CRJD specified in FD, but not enabled in FLC"},
120 { 0x30, "Max. buffer size too small"},
121 { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
122 { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
123 { 0x33, "Size over/underflow (allocate mode)"},
124 { 0x34, "Size over/underflow (reuse mode)"},
125 { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
126 { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
127 { 0x41, "SBC frame format not supported (allocate mode)"},
128 { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
129 { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
130 { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
131 { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
132 { 0x46, "Annotation length exceeds offset (reuse mode)"},
133 { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
134 { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
135 { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
136 { 0x51, "Unsupported IF reuse mode"},
137 { 0x52, "Unsupported FL use mode"},
138 { 0x53, "Unsupported RJD use mode"},
139 { 0x54, "Unsupported inline descriptor use mode"},
140 { 0xC0, "Table buffer pool 0 depletion"},
141 { 0xC1, "Table buffer pool 1 depletion"},
142 { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
143 { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
144 { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
145 { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
146 { 0xD0, "FLC read error"},
147 { 0xD1, "FL read error"},
148 { 0xD2, "FL write error"},
149 { 0xD3, "OF SGT write error"},
150 { 0xD4, "PTA read error"},
151 { 0xD5, "PTA write error"},
152 { 0xD6, "OF SGT F-bit write error"},
153 { 0xD7, "ASA write error"},
154 { 0xE1, "FLC[ICR]=0 ICID error"},
155 { 0xE2, "FLC[ICR]=1 ICID error"},
156 { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
157};
158
111static const char * const cha_id_list[] = { 159static const char * const cha_id_list[] = {
112 "", 160 "",
113 "AES", 161 "AES",
@@ -236,6 +284,27 @@ static void report_deco_status(struct device *jrdev, const u32 status,
236 status, error, idx_str, idx, err_str, err_err_code); 284 status, error, idx_str, idx, err_str, err_err_code);
237} 285}
238 286
287static void report_qi_status(struct device *qidev, const u32 status,
288 const char *error)
289{
290 u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
291 const char *err_str = "unidentified error value 0x";
292 char err_err_code[3] = { 0 };
293 int i;
294
295 for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
296 if (qi_error_list[i].value == err_id)
297 break;
298
299 if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
300 err_str = qi_error_list[i].error_text;
301 else
302 snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
303
304 dev_err(qidev, "%08x: %s: %s%s\n",
305 status, error, err_str, err_err_code);
306}
307
239static void report_jr_status(struct device *jrdev, const u32 status, 308static void report_jr_status(struct device *jrdev, const u32 status,
240 const char *error) 309 const char *error)
241{ 310{
@@ -250,7 +319,7 @@ static void report_cond_code_status(struct device *jrdev, const u32 status,
250 status, error, __func__); 319 status, error, __func__);
251} 320}
252 321
253void caam_jr_strstatus(struct device *jrdev, u32 status) 322void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
254{ 323{
255 static const struct stat_src { 324 static const struct stat_src {
256 void (*report_ssed)(struct device *jrdev, const u32 status, 325 void (*report_ssed)(struct device *jrdev, const u32 status,
@@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
262 { report_ccb_status, "CCB" }, 331 { report_ccb_status, "CCB" },
263 { report_jump_status, "Jump" }, 332 { report_jump_status, "Jump" },
264 { report_deco_status, "DECO" }, 333 { report_deco_status, "DECO" },
265 { NULL, "Queue Manager Interface" }, 334 { report_qi_status, "Queue Manager Interface" },
266 { report_jr_status, "Job Ring" }, 335 { report_jr_status, "Job Ring" },
267 { report_cond_code_status, "Condition Code" }, 336 { report_cond_code_status, "Condition Code" },
268 { NULL, NULL }, 337 { NULL, NULL },
@@ -288,4 +357,8 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
288 else 357 else
289 dev_err(jrdev, "%d: unknown error source\n", ssrc); 358 dev_err(jrdev, "%d: unknown error source\n", ssrc);
290} 359}
291EXPORT_SYMBOL(caam_jr_strstatus); 360EXPORT_SYMBOL(caam_strstatus);
361
362MODULE_LICENSE("GPL");
363MODULE_DESCRIPTION("FSL CAAM error reporting");
364MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 5aa332bac4b0..67ea94079837 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -8,7 +8,11 @@
8#ifndef CAAM_ERROR_H 8#ifndef CAAM_ERROR_H
9#define CAAM_ERROR_H 9#define CAAM_ERROR_H
10#define CAAM_ERROR_STR_MAX 302 10#define CAAM_ERROR_STR_MAX 302
11void caam_jr_strstatus(struct device *jrdev, u32 status); 11
12void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
13
14#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
15#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
12 16
13void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 17void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
14 int rowsize, int groupsize, struct scatterlist *sg, 18 int rowsize, int groupsize, struct scatterlist *sg,
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index acdd72016ffe..d50085a03597 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * CAAM/SEC 4.x transport/backend driver 3 * CAAM/SEC 4.x transport/backend driver
3 * JobR backend functionality 4 * JobR backend functionality
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 67f7f8c42c93..b84e6c8b1e13 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -84,13 +84,6 @@ static u64 times_congested;
84#endif 84#endif
85 85
86/* 86/*
87 * CPU from where the module initialised. This is required because QMan driver
88 * requires CGRs to be removed from same CPU from where they were originally
89 * allocated.
90 */
91static int mod_init_cpu;
92
93/*
94 * This is a a cache of buffers, from which the users of CAAM QI driver 87 * This is a a cache of buffers, from which the users of CAAM QI driver
95 * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than 88 * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
96 * doing malloc on the hotpath. 89 * doing malloc on the hotpath.
@@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
492} 485}
493EXPORT_SYMBOL(caam_drv_ctx_rel); 486EXPORT_SYMBOL(caam_drv_ctx_rel);
494 487
495int caam_qi_shutdown(struct device *qidev) 488void caam_qi_shutdown(struct device *qidev)
496{ 489{
497 int i, ret; 490 int i;
498 struct caam_qi_priv *priv = dev_get_drvdata(qidev); 491 struct caam_qi_priv *priv = dev_get_drvdata(qidev);
499 const cpumask_t *cpus = qman_affine_cpus(); 492 const cpumask_t *cpus = qman_affine_cpus();
500 struct cpumask old_cpumask = current->cpus_allowed;
501 493
502 for_each_cpu(i, cpus) { 494 for_each_cpu(i, cpus) {
503 struct napi_struct *irqtask; 495 struct napi_struct *irqtask;
@@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev)
510 dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); 502 dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
511 } 503 }
512 504
513 /* 505 qman_delete_cgr_safe(&priv->cgr);
514 * QMan driver requires CGRs to be deleted from same CPU from where they 506 qman_release_cgrid(priv->cgr.cgrid);
515 * were instantiated. Hence we get the module removal execute from the
516 * same CPU from where it was originally inserted.
517 */
518 set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
519
520 ret = qman_delete_cgr(&priv->cgr);
521 if (ret)
522 dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
523 else
524 qman_release_cgrid(priv->cgr.cgrid);
525 507
526 kmem_cache_destroy(qi_cache); 508 kmem_cache_destroy(qi_cache);
527 509
528 /* Now that we're done with the CGRs, restore the cpus allowed mask */
529 set_cpus_allowed_ptr(current, &old_cpumask);
530
531 platform_device_unregister(priv->qi_pdev); 510 platform_device_unregister(priv->qi_pdev);
532 return ret;
533} 511}
534 512
535static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) 513static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
@@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
718 struct device *ctrldev = &caam_pdev->dev, *qidev; 696 struct device *ctrldev = &caam_pdev->dev, *qidev;
719 struct caam_drv_private *ctrlpriv; 697 struct caam_drv_private *ctrlpriv;
720 const cpumask_t *cpus = qman_affine_cpus(); 698 const cpumask_t *cpus = qman_affine_cpus();
721 struct cpumask old_cpumask = current->cpus_allowed;
722 static struct platform_device_info qi_pdev_info = { 699 static struct platform_device_info qi_pdev_info = {
723 .name = "caam_qi", 700 .name = "caam_qi",
724 .id = PLATFORM_DEVID_NONE 701 .id = PLATFORM_DEVID_NONE
725 }; 702 };
726 703
727 /*
728 * QMAN requires CGRs to be removed from same CPU+portal from where it
729 * was originally allocated. Hence we need to note down the
730 * initialisation CPU and use the same CPU for module exit.
731 * We select the first CPU to from the list of portal owning CPUs.
732 * Then we pin module init to this CPU.
733 */
734 mod_init_cpu = cpumask_first(cpus);
735 set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
736
737 qi_pdev_info.parent = ctrldev; 704 qi_pdev_info.parent = ctrldev;
738 qi_pdev_info.dma_mask = dma_get_mask(ctrldev); 705 qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
739 qi_pdev = platform_device_register_full(&qi_pdev_info); 706 qi_pdev = platform_device_register_full(&qi_pdev_info);
@@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
795 return -ENOMEM; 762 return -ENOMEM;
796 } 763 }
797 764
798 /* Done with the CGRs; restore the cpus allowed mask */
799 set_cpus_allowed_ptr(current, &old_cpumask);
800#ifdef CONFIG_DEBUG_FS 765#ifdef CONFIG_DEBUG_FS
801 debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, 766 debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
802 &times_congested, &caam_fops_u64_ro); 767 &times_congested, &caam_fops_u64_ro);
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 357b69f57072..f93c9c7ed430 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -62,7 +62,6 @@ typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
62enum optype { 62enum optype {
63 ENCRYPT, 63 ENCRYPT,
64 DECRYPT, 64 DECRYPT,
65 GIVENCRYPT,
66 NUM_OP 65 NUM_OP
67}; 66};
68 67
@@ -174,7 +173,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
174void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx); 173void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
175 174
176int caam_qi_init(struct platform_device *pdev); 175int caam_qi_init(struct platform_device *pdev);
177int caam_qi_shutdown(struct device *dev); 176void caam_qi_shutdown(struct device *dev);
178 177
179/** 178/**
180 * qi_cache_alloc - Allocate buffers from CAAM-QI cache 179 * qi_cache_alloc - Allocate buffers from CAAM-QI cache
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 4fb91ba39c36..457815f965c0 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -70,22 +70,22 @@
70extern bool caam_little_end; 70extern bool caam_little_end;
71extern bool caam_imx; 71extern bool caam_imx;
72 72
73#define caam_to_cpu(len) \ 73#define caam_to_cpu(len) \
74static inline u##len caam##len ## _to_cpu(u##len val) \ 74static inline u##len caam##len ## _to_cpu(u##len val) \
75{ \ 75{ \
76 if (caam_little_end) \ 76 if (caam_little_end) \
77 return le##len ## _to_cpu(val); \ 77 return le##len ## _to_cpu((__force __le##len)val); \
78 else \ 78 else \
79 return be##len ## _to_cpu(val); \ 79 return be##len ## _to_cpu((__force __be##len)val); \
80} 80}
81 81
82#define cpu_to_caam(len) \ 82#define cpu_to_caam(len) \
83static inline u##len cpu_to_caam##len(u##len val) \ 83static inline u##len cpu_to_caam##len(u##len val) \
84{ \ 84{ \
85 if (caam_little_end) \ 85 if (caam_little_end) \
86 return cpu_to_le##len(val); \ 86 return (__force u##len)cpu_to_le##len(val); \
87 else \ 87 else \
88 return cpu_to_be##len(val); \ 88 return (__force u##len)cpu_to_be##len(val); \
89} 89}
90 90
91caam_to_cpu(16) 91caam_to_cpu(16)
@@ -633,6 +633,8 @@ struct caam_job_ring {
633#define JRSTA_DECOERR_INVSIGN 0x86 633#define JRSTA_DECOERR_INVSIGN 0x86
634#define JRSTA_DECOERR_DSASIGN 0x87 634#define JRSTA_DECOERR_DSASIGN 0x87
635 635
636#define JRSTA_QIERR_ERROR_MASK 0x00ff
637
636#define JRSTA_CCBERR_JUMP 0x08000000 638#define JRSTA_CCBERR_JUMP 0x08000000
637#define JRSTA_CCBERR_INDEX_MASK 0xff00 639#define JRSTA_CCBERR_INDEX_MASK 0xff00
638#define JRSTA_CCBERR_INDEX_SHIFT 8 640#define JRSTA_CCBERR_INDEX_SHIFT 8
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index d000b4df745f..b3e1aaaeffea 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -1,34 +1,7 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
1/* 2/*
2 * Copyright 2013-2016 Freescale Semiconductor, Inc. 3 * Copyright 2013-2016 Freescale Semiconductor, Inc.
3 * Copyright 2016-2017 NXP 4 * Copyright 2016-2017 NXP
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Freescale Semiconductor nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 *
17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") as published by the Free Software
19 * Foundation, either version 2 of that License or (at your option) any
20 * later version.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
23 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
26 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */ 5 */
33 6
34#ifndef __SG_SW_QM_H 7#ifndef __SG_SW_QM_H
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
index b5b4c12179df..c9378402a5f8 100644
--- a/drivers/crypto/caam/sg_sw_qm2.h
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -1,35 +1,7 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
1/* 2/*
2 * Copyright 2015-2016 Freescale Semiconductor, Inc. 3 * Copyright 2015-2016 Freescale Semiconductor, Inc.
3 * Copyright 2017 NXP 4 * Copyright 2017 NXP
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the names of the above-listed copyright holders nor the
13 * names of any contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 *
17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") as published by the Free Software
19 * Foundation, either version 2 of that License or (at your option) any
20 * later version.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */ 5 */
34 6
35#ifndef _SG_SW_QM2_H_ 7#ifndef _SG_SW_QM2_H_
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index b0ba4331944b..ca549c5dc08e 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -308,21 +308,11 @@ void do_request_cleanup(struct cpt_vf *cptvf,
308 } 308 }
309 } 309 }
310 310
311 if (info->scatter_components) 311 kzfree(info->scatter_components);
312 kzfree(info->scatter_components); 312 kzfree(info->gather_components);
313 313 kzfree(info->out_buffer);
314 if (info->gather_components) 314 kzfree(info->in_buffer);
315 kzfree(info->gather_components); 315 kzfree((void *)info->completion_addr);
316
317 if (info->out_buffer)
318 kzfree(info->out_buffer);
319
320 if (info->in_buffer)
321 kzfree(info->in_buffer);
322
323 if (info->completion_addr)
324 kzfree((void *)info->completion_addr);
325
326 kzfree(info); 316 kzfree(info);
327} 317}
328 318
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile
index 45b7379e8e30..e12954791673 100644
--- a/drivers/crypto/cavium/nitrox/Makefile
+++ b/drivers/crypto/cavium/nitrox/Makefile
@@ -7,3 +7,6 @@ n5pf-objs := nitrox_main.o \
7 nitrox_hal.o \ 7 nitrox_hal.o \
8 nitrox_reqmgr.o \ 8 nitrox_reqmgr.o \
9 nitrox_algs.o 9 nitrox_algs.o
10
11n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o
12n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h
index 312f72801af6..863143a8336b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_common.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_common.h
@@ -12,32 +12,15 @@ void crypto_free_context(void *ctx);
12struct nitrox_device *nitrox_get_first_device(void); 12struct nitrox_device *nitrox_get_first_device(void);
13void nitrox_put_device(struct nitrox_device *ndev); 13void nitrox_put_device(struct nitrox_device *ndev);
14 14
15void nitrox_pf_cleanup_isr(struct nitrox_device *ndev);
16int nitrox_pf_init_isr(struct nitrox_device *ndev);
17
18int nitrox_common_sw_init(struct nitrox_device *ndev); 15int nitrox_common_sw_init(struct nitrox_device *ndev);
19void nitrox_common_sw_cleanup(struct nitrox_device *ndev); 16void nitrox_common_sw_cleanup(struct nitrox_device *ndev);
20 17
21void pkt_slc_resp_handler(unsigned long data); 18void pkt_slc_resp_tasklet(unsigned long data);
22int nitrox_process_se_request(struct nitrox_device *ndev, 19int nitrox_process_se_request(struct nitrox_device *ndev,
23 struct se_crypto_request *req, 20 struct se_crypto_request *req,
24 completion_t cb, 21 completion_t cb,
25 struct skcipher_request *skreq); 22 struct skcipher_request *skreq);
26void backlog_qflush_work(struct work_struct *work); 23void backlog_qflush_work(struct work_struct *work);
27 24
28void nitrox_config_emu_unit(struct nitrox_device *ndev);
29void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
30void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
31void nitrox_config_vfmode(struct nitrox_device *ndev, int mode);
32void nitrox_config_nps_unit(struct nitrox_device *ndev);
33void nitrox_config_pom_unit(struct nitrox_device *ndev);
34void nitrox_config_rand_unit(struct nitrox_device *ndev);
35void nitrox_config_efl_unit(struct nitrox_device *ndev);
36void nitrox_config_bmi_unit(struct nitrox_device *ndev);
37void nitrox_config_bmo_unit(struct nitrox_device *ndev);
38void nitrox_config_lbc_unit(struct nitrox_device *ndev);
39void invalidate_lbc(struct nitrox_device *ndev);
40void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
41void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
42 25
43#endif /* __NITROX_COMMON_H */ 26#endif /* __NITROX_COMMON_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
index 9dcb7fdbe0a7..1ad27b1a87c5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_csr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -7,9 +7,16 @@
7 7
8/* EMU clusters */ 8/* EMU clusters */
9#define NR_CLUSTERS 4 9#define NR_CLUSTERS 4
10/* Maximum cores per cluster,
11 * varies based on partname
12 */
10#define AE_CORES_PER_CLUSTER 20 13#define AE_CORES_PER_CLUSTER 20
11#define SE_CORES_PER_CLUSTER 16 14#define SE_CORES_PER_CLUSTER 16
12 15
16#define AE_MAX_CORES (AE_CORES_PER_CLUSTER * NR_CLUSTERS)
17#define SE_MAX_CORES (SE_CORES_PER_CLUSTER * NR_CLUSTERS)
18#define ZIP_MAX_CORES 5
19
13/* BIST registers */ 20/* BIST registers */
14#define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000)) 21#define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000))
15#define UCD_BIST_STATUS 0x12C0070 22#define UCD_BIST_STATUS 0x12C0070
@@ -111,6 +118,9 @@
111#define LBC_ELM_VF65_128_INT 0x120C000 118#define LBC_ELM_VF65_128_INT 0x120C000
112#define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000 119#define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000
113 120
121#define RST_BOOT 0x10C1600
122#define FUS_DAT1 0x10C1408
123
114/* PEM registers */ 124/* PEM registers */
115#define PEM0_INT 0x1080428 125#define PEM0_INT 0x1080428
116 126
@@ -1082,4 +1092,105 @@ union lbc_inval_status {
1082 } s; 1092 } s;
1083}; 1093};
1084 1094
1095/**
1096 * struct rst_boot: RST Boot Register
1097 * @jtcsrdis: when set, internal CSR access via JTAG TAP controller
1098 * is disabled
1099 * @jt_tst_mode: JTAG test mode
1100 * @io_supply: I/O power supply setting based on IO_VDD_SELECT pin:
1101 * 0x1 = 1.8V
1102 * 0x2 = 2.5V
1103 * 0x4 = 3.3V
1104 * All other values are reserved
1105 * @pnr_mul: clock multiplier
1106 * @lboot: last boot cause mask, resets only with PLL_DC_OK
1107 * @rboot: determines whether core 0 remains in reset after
1108 * chip cold or warm or soft reset
1109 * @rboot_pin: read only access to REMOTE_BOOT pin
1110 */
1111union rst_boot {
1112 u64 value;
1113 struct {
1114#if (defined(__BIG_ENDIAN_BITFIELD))
1115 u64 raz_63 : 1;
1116 u64 jtcsrdis : 1;
1117 u64 raz_59_61 : 3;
1118 u64 jt_tst_mode : 1;
1119 u64 raz_40_57 : 18;
1120 u64 io_supply : 3;
1121 u64 raz_30_36 : 7;
1122 u64 pnr_mul : 6;
1123 u64 raz_12_23 : 12;
1124 u64 lboot : 10;
1125 u64 rboot : 1;
1126 u64 rboot_pin : 1;
1127#else
1128 u64 rboot_pin : 1;
1129 u64 rboot : 1;
1130 u64 lboot : 10;
1131 u64 raz_12_23 : 12;
1132 u64 pnr_mul : 6;
1133 u64 raz_30_36 : 7;
1134 u64 io_supply : 3;
1135 u64 raz_40_57 : 18;
1136 u64 jt_tst_mode : 1;
1137 u64 raz_59_61 : 3;
1138 u64 jtcsrdis : 1;
1139 u64 raz_63 : 1;
1140#endif
1141 };
1142};
1143
1144/**
1145 * struct fus_dat1: Fuse Data 1 Register
1146 * @pll_mul: main clock PLL multiplier hardware limit
1147 * @pll_half_dis: main clock PLL control
1148 * @efus_lck: efuse lockdown
1149 * @zip_info: ZIP information
1150 * @bar2_sz_conf: when zero, BAR2 size conforms to
1151 * PCIe specification
1152 * @efus_ign: efuse ignore
1153 * @nozip: ZIP disable
1154 * @pll_alt_matrix: select alternate PLL matrix
1155 * @pll_bwadj_denom: select CLKF denominator for
1156 * BWADJ value
1157 * @chip_id: chip ID
1158 */
1159union fus_dat1 {
1160 u64 value;
1161 struct {
1162#if (defined(__BIG_ENDIAN_BITFIELD))
1163 u64 raz_57_63 : 7;
1164 u64 pll_mul : 3;
1165 u64 pll_half_dis : 1;
1166 u64 raz_43_52 : 10;
1167 u64 efus_lck : 3;
1168 u64 raz_26_39 : 14;
1169 u64 zip_info : 5;
1170 u64 bar2_sz_conf : 1;
1171 u64 efus_ign : 1;
1172 u64 nozip : 1;
1173 u64 raz_11_17 : 7;
1174 u64 pll_alt_matrix : 1;
1175 u64 pll_bwadj_denom : 2;
1176 u64 chip_id : 8;
1177#else
1178 u64 chip_id : 8;
1179 u64 pll_bwadj_denom : 2;
1180 u64 pll_alt_matrix : 1;
1181 u64 raz_11_17 : 7;
1182 u64 nozip : 1;
1183 u64 efus_ign : 1;
1184 u64 bar2_sz_conf : 1;
1185 u64 zip_info : 5;
1186 u64 raz_26_39 : 14;
1187 u64 efus_lck : 3;
1188 u64 raz_43_52 : 10;
1189 u64 pll_half_dis : 1;
1190 u64 pll_mul : 3;
1191 u64 raz_57_63 : 7;
1192#endif
1193 };
1194};
1195
1085#endif /* __NITROX_CSR_H */ 1196#endif /* __NITROX_CSR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
new file mode 100644
index 000000000000..5f3cd5fafe04
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -0,0 +1,115 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/seq_file.h>
3#include <linux/debugfs.h>
4
5#include "nitrox_csr.h"
6#include "nitrox_dev.h"
7
8static int firmware_show(struct seq_file *s, void *v)
9{
10 struct nitrox_device *ndev = s->private;
11
12 seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
13 return 0;
14}
15
16static int firmware_open(struct inode *inode, struct file *file)
17{
18 return single_open(file, firmware_show, inode->i_private);
19}
20
21static const struct file_operations firmware_fops = {
22 .owner = THIS_MODULE,
23 .open = firmware_open,
24 .read = seq_read,
25 .llseek = seq_lseek,
26 .release = single_release,
27};
28
29static int device_show(struct seq_file *s, void *v)
30{
31 struct nitrox_device *ndev = s->private;
32
33 seq_printf(s, "NITROX [%d]\n", ndev->idx);
34 seq_printf(s, " Part Name: %s\n", ndev->hw.partname);
35 seq_printf(s, " Frequency: %d MHz\n", ndev->hw.freq);
36 seq_printf(s, " Device ID: 0x%0x\n", ndev->hw.device_id);
37 seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
38 seq_printf(s, " Cores: [AE=%u SE=%u ZIP=%u]\n",
39 ndev->hw.ae_cores, ndev->hw.se_cores, ndev->hw.zip_cores);
40
41 return 0;
42}
43
44static int nitrox_open(struct inode *inode, struct file *file)
45{
46 return single_open(file, device_show, inode->i_private);
47}
48
49static const struct file_operations nitrox_fops = {
50 .owner = THIS_MODULE,
51 .open = nitrox_open,
52 .read = seq_read,
53 .llseek = seq_lseek,
54 .release = single_release,
55};
56
57static int stats_show(struct seq_file *s, void *v)
58{
59 struct nitrox_device *ndev = s->private;
60
61 seq_printf(s, "NITROX [%d] Request Statistics\n", ndev->idx);
62 seq_printf(s, " Posted: %llu\n",
63 (u64)atomic64_read(&ndev->stats.posted));
64 seq_printf(s, " Completed: %llu\n",
65 (u64)atomic64_read(&ndev->stats.completed));
66 seq_printf(s, " Dropped: %llu\n",
67 (u64)atomic64_read(&ndev->stats.dropped));
68
69 return 0;
70}
71
72static int nitrox_stats_open(struct inode *inode, struct file *file)
73{
74 return single_open(file, stats_show, inode->i_private);
75}
76
77static const struct file_operations nitrox_stats_fops = {
78 .owner = THIS_MODULE,
79 .open = nitrox_stats_open,
80 .read = seq_read,
81 .llseek = seq_lseek,
82 .release = single_release,
83};
84
85void nitrox_debugfs_exit(struct nitrox_device *ndev)
86{
87 debugfs_remove_recursive(ndev->debugfs_dir);
88 ndev->debugfs_dir = NULL;
89}
90
91int nitrox_debugfs_init(struct nitrox_device *ndev)
92{
93 struct dentry *dir, *f;
94
95 dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
96 if (!dir)
97 return -ENOMEM;
98
99 ndev->debugfs_dir = dir;
100 f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
101 if (!f)
102 goto err;
103 f = debugfs_create_file("device", 0400, dir, ndev, &nitrox_fops);
104 if (!f)
105 goto err;
106 f = debugfs_create_file("stats", 0400, dir, ndev, &nitrox_stats_fops);
107 if (!f)
108 goto err;
109
110 return 0;
111
112err:
113 nitrox_debugfs_exit(ndev);
114 return -ENODEV;
115}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index af596455b420..283e252385fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -5,92 +5,123 @@
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <linux/interrupt.h> 6#include <linux/interrupt.h>
7#include <linux/pci.h> 7#include <linux/pci.h>
8#include <linux/if.h>
8 9
9#define VERSION_LEN 32 10#define VERSION_LEN 32
10 11
12/**
13 * struct nitrox_cmdq - NITROX command queue
14 * @cmd_qlock: command queue lock
15 * @resp_qlock: response queue lock
16 * @backlog_qlock: backlog queue lock
17 * @ndev: NITROX device
18 * @response_head: submitted request list
19 * @backlog_head: backlog queue
20 * @dbell_csr_addr: doorbell register address for this queue
21 * @compl_cnt_csr_addr: completion count register address of the slc port
22 * @base: command queue base address
23 * @dma: dma address of the base
24 * @pending_count: request pending at device
25 * @backlog_count: backlog request count
26 * @write_idx: next write index for the command
27 * @instr_size: command size
28 * @qno: command queue number
29 * @qsize: command queue size
30 * @unalign_base: unaligned base address
31 * @unalign_dma: unaligned dma address
32 */
11struct nitrox_cmdq { 33struct nitrox_cmdq {
12 /* command queue lock */ 34 spinlock_t cmd_qlock;
13 spinlock_t cmdq_lock; 35 spinlock_t resp_qlock;
14 /* response list lock */ 36 spinlock_t backlog_qlock;
15 spinlock_t response_lock; 37
16 /* backlog list lock */ 38 struct nitrox_device *ndev;
17 spinlock_t backlog_lock;
18
19 /* request submitted to chip, in progress */
20 struct list_head response_head; 39 struct list_head response_head;
21 /* hw queue full, hold in backlog list */
22 struct list_head backlog_head; 40 struct list_head backlog_head;
23 41
24 /* doorbell address */
25 u8 __iomem *dbell_csr_addr; 42 u8 __iomem *dbell_csr_addr;
26 /* base address of the queue */ 43 u8 __iomem *compl_cnt_csr_addr;
27 u8 *head; 44 u8 *base;
45 dma_addr_t dma;
28 46
29 struct nitrox_device *ndev;
30 /* flush pending backlog commands */
31 struct work_struct backlog_qflush; 47 struct work_struct backlog_qflush;
32 48
33 /* requests posted waiting for completion */
34 atomic_t pending_count; 49 atomic_t pending_count;
35 /* requests in backlog queues */
36 atomic_t backlog_count; 50 atomic_t backlog_count;
37 51
38 int write_idx; 52 int write_idx;
39 /* command size 32B/64B */
40 u8 instr_size; 53 u8 instr_size;
41 u8 qno; 54 u8 qno;
42 u32 qsize; 55 u32 qsize;
43 56
44 /* unaligned addresses */ 57 u8 *unalign_base;
45 u8 *head_unaligned; 58 dma_addr_t unalign_dma;
46 dma_addr_t dma_unaligned;
47 /* dma address of the base */
48 dma_addr_t dma;
49}; 59};
50 60
61/**
62 * struct nitrox_hw - NITROX hardware information
63 * @partname: partname ex: CNN55xxx-xxx
64 * @fw_name: firmware version
65 * @freq: NITROX frequency
66 * @vendor_id: vendor ID
67 * @device_id: device ID
68 * @revision_id: revision ID
69 * @se_cores: number of symmetric cores
70 * @ae_cores: number of asymmetric cores
71 * @zip_cores: number of zip cores
72 */
51struct nitrox_hw { 73struct nitrox_hw {
52 /* firmware version */ 74 char partname[IFNAMSIZ * 2];
53 char fw_name[VERSION_LEN]; 75 char fw_name[VERSION_LEN];
54 76
77 int freq;
55 u16 vendor_id; 78 u16 vendor_id;
56 u16 device_id; 79 u16 device_id;
57 u8 revision_id; 80 u8 revision_id;
58 81
59 /* CNN55XX cores */
60 u8 se_cores; 82 u8 se_cores;
61 u8 ae_cores; 83 u8 ae_cores;
62 u8 zip_cores; 84 u8 zip_cores;
63}; 85};
64 86
65#define MAX_MSIX_VECTOR_NAME 20 87struct nitrox_stats {
66/** 88 atomic64_t posted;
67 * vectors for queues (64 AE, 64 SE and 64 ZIP) and 89 atomic64_t completed;
68 * error condition/mailbox. 90 atomic64_t dropped;
69 */
70#define MAX_MSIX_VECTORS 192
71
72struct nitrox_msix {
73 struct msix_entry *entries;
74 char **names;
75 DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS);
76 u32 nr_entries;
77}; 91};
78 92
79struct bh_data { 93#define IRQ_NAMESZ 32
80 /* slc port completion count address */ 94
81 u8 __iomem *completion_cnt_csr_addr; 95struct nitrox_q_vector {
96 char name[IRQ_NAMESZ];
97 bool valid;
98 int ring;
99 struct tasklet_struct resp_tasklet;
100 union {
101 struct nitrox_cmdq *cmdq;
102 struct nitrox_device *ndev;
103 };
104};
82 105
83 struct nitrox_cmdq *cmdq; 106/*
84 struct tasklet_struct resp_handler; 107 * NITROX Device states
108 */
109enum ndev_state {
110 __NDEV_NOT_READY,
111 __NDEV_READY,
112 __NDEV_IN_RESET,
85}; 113};
86 114
87struct nitrox_bh { 115/* NITROX support modes for VF(s) */
88 struct bh_data *slc; 116enum vf_mode {
117 __NDEV_MODE_PF,
118 __NDEV_MODE_VF16,
119 __NDEV_MODE_VF32,
120 __NDEV_MODE_VF64,
121 __NDEV_MODE_VF128,
89}; 122};
90 123
91/* NITROX-V driver state */ 124#define __NDEV_SRIOV_BIT 0
92#define NITROX_UCODE_LOADED 0
93#define NITROX_READY 1
94 125
95/* command queue size */ 126/* command queue size */
96#define DEFAULT_CMD_QLEN 2048 127#define DEFAULT_CMD_QLEN 2048
@@ -98,7 +129,6 @@ struct nitrox_bh {
98#define CMD_TIMEOUT 2000 129#define CMD_TIMEOUT 2000
99 130
100#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev)) 131#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
101#define PF_MODE 0
102 132
103#define NITROX_CSR_ADDR(ndev, offset) \ 133#define NITROX_CSR_ADDR(ndev, offset) \
104 ((ndev)->bar_addr + (offset)) 134 ((ndev)->bar_addr + (offset))
@@ -108,17 +138,18 @@ struct nitrox_bh {
108 * @list: pointer to linked list of devices 138 * @list: pointer to linked list of devices
109 * @bar_addr: iomap address 139 * @bar_addr: iomap address
110 * @pdev: PCI device information 140 * @pdev: PCI device information
111 * @status: NITROX status 141 * @state: NITROX device state
142 * @flags: flags to indicate device the features
112 * @timeout: Request timeout in jiffies 143 * @timeout: Request timeout in jiffies
113 * @refcnt: Device usage count 144 * @refcnt: Device usage count
114 * @idx: device index (0..N) 145 * @idx: device index (0..N)
115 * @node: NUMA node id attached 146 * @node: NUMA node id attached
116 * @qlen: Command queue length 147 * @qlen: Command queue length
117 * @nr_queues: Number of command queues 148 * @nr_queues: Number of command queues
149 * @mode: Device mode PF/VF
118 * @ctx_pool: DMA pool for crypto context 150 * @ctx_pool: DMA pool for crypto context
119 * @pkt_cmdqs: SE Command queues 151 * @pkt_inq: Packet input rings
120 * @msix: MSI-X information 152 * @qvec: MSI-X queue vectors information
121 * @bh: post processing work
122 * @hw: hardware information 153 * @hw: hardware information
123 * @debugfs_dir: debugfs directory 154 * @debugfs_dir: debugfs directory
124 */ 155 */
@@ -128,7 +159,8 @@ struct nitrox_device {
128 u8 __iomem *bar_addr; 159 u8 __iomem *bar_addr;
129 struct pci_dev *pdev; 160 struct pci_dev *pdev;
130 161
131 unsigned long status; 162 atomic_t state;
163 unsigned long flags;
132 unsigned long timeout; 164 unsigned long timeout;
133 refcount_t refcnt; 165 refcount_t refcnt;
134 166
@@ -136,13 +168,16 @@ struct nitrox_device {
136 int node; 168 int node;
137 u16 qlen; 169 u16 qlen;
138 u16 nr_queues; 170 u16 nr_queues;
171 int num_vfs;
172 enum vf_mode mode;
139 173
140 struct dma_pool *ctx_pool; 174 struct dma_pool *ctx_pool;
141 struct nitrox_cmdq *pkt_cmdqs; 175 struct nitrox_cmdq *pkt_inq;
142 176
143 struct nitrox_msix msix; 177 struct nitrox_q_vector *qvec;
144 struct nitrox_bh bh; 178 int num_vecs;
145 179
180 struct nitrox_stats stats;
146 struct nitrox_hw hw; 181 struct nitrox_hw hw;
147#if IS_ENABLED(CONFIG_DEBUG_FS) 182#if IS_ENABLED(CONFIG_DEBUG_FS)
148 struct dentry *debugfs_dir; 183 struct dentry *debugfs_dir;
@@ -173,9 +208,22 @@ static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
173 writeq(value, (ndev->bar_addr + offset)); 208 writeq(value, (ndev->bar_addr + offset));
174} 209}
175 210
176static inline int nitrox_ready(struct nitrox_device *ndev) 211static inline bool nitrox_ready(struct nitrox_device *ndev)
177{ 212{
178 return test_bit(NITROX_READY, &ndev->status); 213 return atomic_read(&ndev->state) == __NDEV_READY;
179} 214}
180 215
216#ifdef CONFIG_DEBUG_FS
217int nitrox_debugfs_init(struct nitrox_device *ndev);
218void nitrox_debugfs_exit(struct nitrox_device *ndev);
219#else
220static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
221{
222 return 0;
223}
224
225static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
226{ }
227#endif
228
181#endif /* __NITROX_DEV_H */ 229#endif /* __NITROX_DEV_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index ab4ccf2f9e77..a9b82387cf53 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -4,6 +4,8 @@
4#include "nitrox_dev.h" 4#include "nitrox_dev.h"
5#include "nitrox_csr.h" 5#include "nitrox_csr.h"
6 6
7#define PLL_REF_CLK 50
8
7/** 9/**
8 * emu_enable_cores - Enable EMU cluster cores. 10 * emu_enable_cores - Enable EMU cluster cores.
9 * @ndev: N5 device 11 * @ndev: N5 device
@@ -117,7 +119,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
117 int i; 119 int i;
118 120
119 for (i = 0; i < ndev->nr_queues; i++) { 121 for (i = 0; i < ndev->nr_queues; i++) {
120 struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i]; 122 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
121 union nps_pkt_in_instr_rsize pkt_in_rsize; 123 union nps_pkt_in_instr_rsize pkt_in_rsize;
122 u64 offset; 124 u64 offset;
123 125
@@ -256,7 +258,7 @@ void nitrox_config_nps_unit(struct nitrox_device *ndev)
256 /* disable ILK interface */ 258 /* disable ILK interface */
257 core_gbl_vfcfg.value = 0; 259 core_gbl_vfcfg.value = 0;
258 core_gbl_vfcfg.s.ilk_disable = 1; 260 core_gbl_vfcfg.s.ilk_disable = 1;
259 core_gbl_vfcfg.s.cfg = PF_MODE; 261 core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
260 nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value); 262 nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
261 /* config input and solicit ports */ 263 /* config input and solicit ports */
262 nitrox_config_pkt_input_rings(ndev); 264 nitrox_config_pkt_input_rings(ndev);
@@ -400,3 +402,68 @@ void nitrox_config_lbc_unit(struct nitrox_device *ndev)
400 offset = LBC_ELM_VF65_128_INT_ENA_W1S; 402 offset = LBC_ELM_VF65_128_INT_ENA_W1S;
401 nitrox_write_csr(ndev, offset, (~0ULL)); 403 nitrox_write_csr(ndev, offset, (~0ULL));
402} 404}
405
406void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
407{
408 union nps_core_gbl_vfcfg vfcfg;
409
410 vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
411 vfcfg.s.cfg = mode & 0x7;
412
413 nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
414}
415
416void nitrox_get_hwinfo(struct nitrox_device *ndev)
417{
418 union emu_fuse_map emu_fuse;
419 union rst_boot rst_boot;
420 union fus_dat1 fus_dat1;
421 unsigned char name[IFNAMSIZ * 2] = {};
422 int i, dead_cores;
423 u64 offset;
424
425 /* get core frequency */
426 offset = RST_BOOT;
427 rst_boot.value = nitrox_read_csr(ndev, offset);
428 ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
429
430 for (i = 0; i < NR_CLUSTERS; i++) {
431 offset = EMU_FUSE_MAPX(i);
432 emu_fuse.value = nitrox_read_csr(ndev, offset);
433 if (emu_fuse.s.valid) {
434 dead_cores = hweight32(emu_fuse.s.ae_fuse);
435 ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
436 dead_cores = hweight16(emu_fuse.s.se_fuse);
437 ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
438 }
439 }
440 /* find zip hardware availability */
441 offset = FUS_DAT1;
442 fus_dat1.value = nitrox_read_csr(ndev, offset);
443 if (!fus_dat1.nozip) {
444 dead_cores = hweight8(fus_dat1.zip_info);
445 ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
446 }
447
448 /* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
449 if (ndev->hw.ae_cores == AE_MAX_CORES) {
450 switch (ndev->hw.se_cores) {
451 case SE_MAX_CORES:
452 i = snprintf(name, sizeof(name), "CNN5560");
453 break;
454 case 40:
455 i = snprintf(name, sizeof(name), "CNN5560s");
456 break;
457 }
458 } else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
459 i = snprintf(name, sizeof(name), "CNN5530");
460 } else {
461 i = snprintf(name, sizeof(name), "CNN5560i");
462 }
463
464 snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
465 ndev->hw.freq, ndev->hw.revision_id);
466
467 /* copy partname */
468 strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
469}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h
new file mode 100644
index 000000000000..489ee64c119e
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h
@@ -0,0 +1,23 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NITROX_HAL_H
3#define __NITROX_HAL_H
4
5#include "nitrox_dev.h"
6
7void nitrox_config_emu_unit(struct nitrox_device *ndev);
8void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
9void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
10void nitrox_config_nps_unit(struct nitrox_device *ndev);
11void nitrox_config_pom_unit(struct nitrox_device *ndev);
12void nitrox_config_rand_unit(struct nitrox_device *ndev);
13void nitrox_config_efl_unit(struct nitrox_device *ndev);
14void nitrox_config_bmi_unit(struct nitrox_device *ndev);
15void nitrox_config_bmo_unit(struct nitrox_device *ndev);
16void nitrox_config_lbc_unit(struct nitrox_device *ndev);
17void invalidate_lbc(struct nitrox_device *ndev);
18void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
19void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
20void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
21void nitrox_get_hwinfo(struct nitrox_device *ndev);
22
23#endif /* __NITROX_HAL_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index ee0d70ba25d5..88a77b8fb3fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -6,9 +6,16 @@
6#include "nitrox_dev.h" 6#include "nitrox_dev.h"
7#include "nitrox_csr.h" 7#include "nitrox_csr.h"
8#include "nitrox_common.h" 8#include "nitrox_common.h"
9#include "nitrox_hal.h"
9 10
11/**
12 * One vector for each type of ring
13 * - NPS packet ring, AQMQ ring and ZQMQ ring
14 */
10#define NR_RING_VECTORS 3 15#define NR_RING_VECTORS 3
11#define NPS_CORE_INT_ACTIVE_ENTRY 192 16/* base entry for packet ring/port */
17#define PKT_RING_MSIX_BASE 0
18#define NON_RING_MSIX_BASE 192
12 19
13/** 20/**
14 * nps_pkt_slc_isr - IRQ handler for NPS solicit port 21 * nps_pkt_slc_isr - IRQ handler for NPS solicit port
@@ -17,13 +24,14 @@
17 */ 24 */
18static irqreturn_t nps_pkt_slc_isr(int irq, void *data) 25static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
19{ 26{
20 struct bh_data *slc = data; 27 struct nitrox_q_vector *qvec = data;
21 union nps_pkt_slc_cnts pkt_slc_cnts; 28 union nps_pkt_slc_cnts slc_cnts;
29 struct nitrox_cmdq *cmdq = qvec->cmdq;
22 30
23 pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr); 31 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
24 /* New packet on SLC output port */ 32 /* New packet on SLC output port */
25 if (pkt_slc_cnts.s.slc_int) 33 if (slc_cnts.s.slc_int)
26 tasklet_hi_schedule(&slc->resp_handler); 34 tasklet_hi_schedule(&qvec->resp_tasklet);
27 35
28 return IRQ_HANDLED; 36 return IRQ_HANDLED;
29} 37}
@@ -190,165 +198,92 @@ static void clear_bmi_err_intr(struct nitrox_device *ndev)
190 dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value); 198 dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
191} 199}
192 200
201static void nps_core_int_tasklet(unsigned long data)
202{
203 struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
204 struct nitrox_device *ndev = qvec->ndev;
205
206 /* if pf mode do queue recovery */
207 if (ndev->mode == __NDEV_MODE_PF) {
208 } else {
209 /**
210 * if VF(s) enabled communicate the error information
211 * to VF(s)
212 */
213 }
214}
215
193/** 216/**
194 * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts 217 * nps_core_int_isr - interrupt handler for NITROX errors and
195 * @ndev: NITROX device 218 * mailbox communication
196 */ 219 */
197static void clear_nps_core_int_active(struct nitrox_device *ndev) 220static irqreturn_t nps_core_int_isr(int irq, void *data)
198{ 221{
199 union nps_core_int_active core_int_active; 222 struct nitrox_device *ndev = data;
223 union nps_core_int_active core_int;
200 224
201 core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE); 225 core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
202 226
203 if (core_int_active.s.nps_core) 227 if (core_int.s.nps_core)
204 clear_nps_core_err_intr(ndev); 228 clear_nps_core_err_intr(ndev);
205 229
206 if (core_int_active.s.nps_pkt) 230 if (core_int.s.nps_pkt)
207 clear_nps_pkt_err_intr(ndev); 231 clear_nps_pkt_err_intr(ndev);
208 232
209 if (core_int_active.s.pom) 233 if (core_int.s.pom)
210 clear_pom_err_intr(ndev); 234 clear_pom_err_intr(ndev);
211 235
212 if (core_int_active.s.pem) 236 if (core_int.s.pem)
213 clear_pem_err_intr(ndev); 237 clear_pem_err_intr(ndev);
214 238
215 if (core_int_active.s.lbc) 239 if (core_int.s.lbc)
216 clear_lbc_err_intr(ndev); 240 clear_lbc_err_intr(ndev);
217 241
218 if (core_int_active.s.efl) 242 if (core_int.s.efl)
219 clear_efl_err_intr(ndev); 243 clear_efl_err_intr(ndev);
220 244
221 if (core_int_active.s.bmi) 245 if (core_int.s.bmi)
222 clear_bmi_err_intr(ndev); 246 clear_bmi_err_intr(ndev);
223 247
224 /* If more work callback the ISR, set resend */ 248 /* If more work callback the ISR, set resend */
225 core_int_active.s.resend = 1; 249 core_int.s.resend = 1;
226 nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value); 250 nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
227}
228
229static irqreturn_t nps_core_int_isr(int irq, void *data)
230{
231 struct nitrox_device *ndev = data;
232
233 clear_nps_core_int_active(ndev);
234 251
235 return IRQ_HANDLED; 252 return IRQ_HANDLED;
236} 253}
237 254
238static int nitrox_enable_msix(struct nitrox_device *ndev) 255void nitrox_unregister_interrupts(struct nitrox_device *ndev)
239{ 256{
240 struct msix_entry *entries; 257 struct pci_dev *pdev = ndev->pdev;
241 char **names;
242 int i, nr_entries, ret;
243
244 /*
245 * PF MSI-X vectors
246 *
247 * Entry 0: NPS PKT ring 0
248 * Entry 1: AQMQ ring 0
249 * Entry 2: ZQM ring 0
250 * Entry 3: NPS PKT ring 1
251 * Entry 4: AQMQ ring 1
252 * Entry 5: ZQM ring 1
253 * ....
254 * Entry 192: NPS_CORE_INT_ACTIVE
255 */
256 nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
257 entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
258 GFP_KERNEL, ndev->node);
259 if (!entries)
260 return -ENOMEM;
261
262 names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
263 if (!names) {
264 kfree(entries);
265 return -ENOMEM;
266 }
267
268 /* fill entires */
269 for (i = 0; i < (nr_entries - 1); i++)
270 entries[i].entry = i;
271
272 entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
273
274 for (i = 0; i < nr_entries; i++) {
275 *(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
276 if (!(*(names + i))) {
277 ret = -ENOMEM;
278 goto msix_fail;
279 }
280 }
281 ndev->msix.entries = entries;
282 ndev->msix.names = names;
283 ndev->msix.nr_entries = nr_entries;
284
285 ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
286 ndev->msix.nr_entries);
287 if (ret) {
288 dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
289 ret);
290 goto msix_fail;
291 }
292 return 0;
293
294msix_fail:
295 for (i = 0; i < nr_entries; i++)
296 kfree(*(names + i));
297
298 kfree(entries);
299 kfree(names);
300 return ret;
301}
302
303static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
304{
305 int i;
306
307 if (!ndev->bh.slc)
308 return;
309
310 for (i = 0; i < ndev->nr_queues; i++) {
311 struct bh_data *bh = &ndev->bh.slc[i];
312
313 tasklet_disable(&bh->resp_handler);
314 tasklet_kill(&bh->resp_handler);
315 }
316 kfree(ndev->bh.slc);
317 ndev->bh.slc = NULL;
318}
319
320static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
321{
322 u32 size;
323 int i; 258 int i;
324 259
325 size = ndev->nr_queues * sizeof(struct bh_data); 260 for (i = 0; i < ndev->num_vecs; i++) {
326 ndev->bh.slc = kzalloc(size, GFP_KERNEL); 261 struct nitrox_q_vector *qvec;
327 if (!ndev->bh.slc) 262 int vec;
328 return -ENOMEM;
329 263
330 for (i = 0; i < ndev->nr_queues; i++) { 264 qvec = ndev->qvec + i;
331 struct bh_data *bh = &ndev->bh.slc[i]; 265 if (!qvec->valid)
332 u64 offset; 266 continue;
333 267
334 offset = NPS_PKT_SLC_CNTSX(i); 268 /* get the vector number */
335 /* pre calculate completion count address */ 269 vec = pci_irq_vector(pdev, i);
336 bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset); 270 irq_set_affinity_hint(vec, NULL);
337 bh->cmdq = &ndev->pkt_cmdqs[i]; 271 free_irq(vec, qvec);
338 272
339 tasklet_init(&bh->resp_handler, pkt_slc_resp_handler, 273 tasklet_disable(&qvec->resp_tasklet);
340 (unsigned long)bh); 274 tasklet_kill(&qvec->resp_tasklet);
275 qvec->valid = false;
341 } 276 }
342 277 kfree(ndev->qvec);
343 return 0; 278 pci_free_irq_vectors(pdev);
344} 279}
345 280
346static int nitrox_request_irqs(struct nitrox_device *ndev) 281int nitrox_register_interrupts(struct nitrox_device *ndev)
347{ 282{
348 struct pci_dev *pdev = ndev->pdev; 283 struct pci_dev *pdev = ndev->pdev;
349 struct msix_entry *msix_ent = ndev->msix.entries; 284 struct nitrox_q_vector *qvec;
350 int nr_ring_vectors, i = 0, ring, cpu, ret; 285 int nr_vecs, vec, cpu;
351 char *name; 286 int ret, i;
352 287
353 /* 288 /*
354 * PF MSI-X vectors 289 * PF MSI-X vectors
@@ -357,112 +292,76 @@ static int nitrox_request_irqs(struct nitrox_device *ndev)
357 * Entry 1: AQMQ ring 0 292 * Entry 1: AQMQ ring 0
358 * Entry 2: ZQM ring 0 293 * Entry 2: ZQM ring 0
359 * Entry 3: NPS PKT ring 1 294 * Entry 3: NPS PKT ring 1
295 * Entry 4: AQMQ ring 1
296 * Entry 5: ZQM ring 1
360 * .... 297 * ....
361 * Entry 192: NPS_CORE_INT_ACTIVE 298 * Entry 192: NPS_CORE_INT_ACTIVE
362 */ 299 */
363 nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS; 300 nr_vecs = pci_msix_vec_count(pdev);
364
365 /* request irq for pkt ring/ports only */
366 while (i < nr_ring_vectors) {
367 name = *(ndev->msix.names + i);
368 ring = (i / NR_RING_VECTORS);
369 snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
370 ndev->idx, ring);
371 301
372 ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0, 302 /* Enable MSI-X */
373 name, &ndev->bh.slc[ring]); 303 ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
374 if (ret) { 304 if (ret < 0) {
375 dev_err(&pdev->dev, "failed to get irq %d for %s\n", 305 dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
376 msix_ent[i].vector, name);
377 return ret;
378 }
379 cpu = ring % num_online_cpus();
380 irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
381
382 set_bit(i, ndev->msix.irqs);
383 i += NR_RING_VECTORS;
384 }
385
386 /* Request IRQ for NPS_CORE_INT_ACTIVE */
387 name = *(ndev->msix.names + i);
388 snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
389 ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
390 if (ret) {
391 dev_err(&pdev->dev, "failed to get irq %d for %s\n",
392 msix_ent[i].vector, name);
393 return ret; 306 return ret;
394 } 307 }
395 set_bit(i, ndev->msix.irqs); 308 ndev->num_vecs = nr_vecs;
396 309
397 return 0; 310 ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
398} 311 if (!ndev->qvec) {
399 312 pci_free_irq_vectors(pdev);
400static void nitrox_disable_msix(struct nitrox_device *ndev) 313 return -ENOMEM;
401{
402 struct msix_entry *msix_ent = ndev->msix.entries;
403 char **names = ndev->msix.names;
404 int i = 0, ring, nr_ring_vectors;
405
406 nr_ring_vectors = ndev->msix.nr_entries - 1;
407
408 /* clear pkt ring irqs */
409 while (i < nr_ring_vectors) {
410 if (test_and_clear_bit(i, ndev->msix.irqs)) {
411 ring = (i / NR_RING_VECTORS);
412 irq_set_affinity_hint(msix_ent[i].vector, NULL);
413 free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
414 }
415 i += NR_RING_VECTORS;
416 } 314 }
417 irq_set_affinity_hint(msix_ent[i].vector, NULL);
418 free_irq(msix_ent[i].vector, ndev);
419 clear_bit(i, ndev->msix.irqs);
420 315
421 kfree(ndev->msix.entries); 316 /* request irqs for packet rings/ports */
422 for (i = 0; i < ndev->msix.nr_entries; i++) 317 for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
423 kfree(*(names + i)); 318 qvec = &ndev->qvec[i];
424 319
425 kfree(names); 320 qvec->ring = i / NR_RING_VECTORS;
426 pci_disable_msix(ndev->pdev); 321 if (qvec->ring >= ndev->nr_queues)
427} 322 break;
428
429/**
430 * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
431 * @ndev: NITROX device
432 */
433void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
434{
435 nitrox_disable_msix(ndev);
436 nitrox_cleanup_pkt_slc_bh(ndev);
437}
438 323
439/** 324 snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
440 * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ 325 /* get the vector number */
441 * @ndev: NITROX device 326 vec = pci_irq_vector(pdev, i);
442 * 327 ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
443 * Return: 0 on success, a negative value on failure. 328 if (ret) {
444 */ 329 dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
445int nitrox_pf_init_isr(struct nitrox_device *ndev) 330 qvec->ring);
446{ 331 goto irq_fail;
447 int err; 332 }
333 cpu = qvec->ring % num_online_cpus();
334 irq_set_affinity_hint(vec, get_cpu_mask(cpu));
448 335
449 err = nitrox_setup_pkt_slc_bh(ndev); 336 tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
450 if (err) 337 (unsigned long)qvec);
451 return err; 338 qvec->cmdq = &ndev->pkt_inq[qvec->ring];
339 qvec->valid = true;
340 }
452 341
453 err = nitrox_enable_msix(ndev); 342 /* request irqs for non ring vectors */
454 if (err) 343 i = NON_RING_MSIX_BASE;
455 goto msix_fail; 344 qvec = &ndev->qvec[i];
456 345
457 err = nitrox_request_irqs(ndev); 346 snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
458 if (err) 347 /* get the vector number */
348 vec = pci_irq_vector(pdev, i);
349 ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
350 if (ret) {
351 dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
459 goto irq_fail; 352 goto irq_fail;
353 }
354 cpu = num_online_cpus();
355 irq_set_affinity_hint(vec, get_cpu_mask(cpu));
356
357 tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
358 (unsigned long)qvec);
359 qvec->ndev = ndev;
360 qvec->valid = true;
460 361
461 return 0; 362 return 0;
462 363
463irq_fail: 364irq_fail:
464 nitrox_disable_msix(ndev); 365 nitrox_unregister_interrupts(ndev);
465msix_fail: 366 return ret;
466 nitrox_cleanup_pkt_slc_bh(ndev);
467 return err;
468} 367}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.h b/drivers/crypto/cavium/nitrox/nitrox_isr.h
new file mode 100644
index 000000000000..63418a6cc52c
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.h
@@ -0,0 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NITROX_ISR_H
3#define __NITROX_ISR_H
4
5#include "nitrox_dev.h"
6
7int nitrox_register_interrupts(struct nitrox_device *ndev);
8void nitrox_unregister_interrupts(struct nitrox_device *ndev);
9
10#endif /* __NITROX_ISR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4d31df07777f..2260efa42308 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -17,30 +17,27 @@
17 17
18#define CRYPTO_CTX_SIZE 256 18#define CRYPTO_CTX_SIZE 256
19 19
20/* command queue alignments */ 20/* packet inuput ring alignments */
21#define PKT_IN_ALIGN 16 21#define PKTIN_Q_ALIGN_BYTES 16
22 22
23static int cmdq_common_init(struct nitrox_cmdq *cmdq) 23static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
24{ 24{
25 struct nitrox_device *ndev = cmdq->ndev; 25 struct nitrox_device *ndev = cmdq->ndev;
26 u32 qsize; 26
27 27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
28 qsize = (ndev->qlen) * cmdq->instr_size; 28 cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
29 cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev), 29 &cmdq->unalign_dma,
30 (qsize + PKT_IN_ALIGN), 30 GFP_KERNEL);
31 &cmdq->dma_unaligned, 31 if (!cmdq->unalign_base)
32 GFP_KERNEL);
33 if (!cmdq->head_unaligned)
34 return -ENOMEM; 32 return -ENOMEM;
35 33
36 cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); 34 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
37 cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); 35 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
38 cmdq->qsize = (qsize + PKT_IN_ALIGN);
39 cmdq->write_idx = 0; 36 cmdq->write_idx = 0;
40 37
41 spin_lock_init(&cmdq->response_lock); 38 spin_lock_init(&cmdq->cmd_qlock);
42 spin_lock_init(&cmdq->cmdq_lock); 39 spin_lock_init(&cmdq->resp_qlock);
43 spin_lock_init(&cmdq->backlog_lock); 40 spin_lock_init(&cmdq->backlog_qlock);
44 41
45 INIT_LIST_HEAD(&cmdq->response_head); 42 INIT_LIST_HEAD(&cmdq->response_head);
46 INIT_LIST_HEAD(&cmdq->backlog_head); 43 INIT_LIST_HEAD(&cmdq->backlog_head);
@@ -51,68 +48,83 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
51 return 0; 48 return 0;
52} 49}
53 50
54static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq) 51static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
52{
53 cmdq->write_idx = 0;
54 atomic_set(&cmdq->pending_count, 0);
55 atomic_set(&cmdq->backlog_count, 0);
56}
57
58static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
55{ 59{
56 struct nitrox_device *ndev = cmdq->ndev; 60 struct nitrox_device *ndev = cmdq->ndev;
57 61
62 if (!cmdq->unalign_base)
63 return;
64
58 cancel_work_sync(&cmdq->backlog_qflush); 65 cancel_work_sync(&cmdq->backlog_qflush);
59 66
60 dma_free_coherent(DEV(ndev), cmdq->qsize, 67 dma_free_coherent(DEV(ndev), cmdq->qsize,
61 cmdq->head_unaligned, cmdq->dma_unaligned); 68 cmdq->unalign_base, cmdq->unalign_dma);
62 69 nitrox_cmdq_reset(cmdq);
63 atomic_set(&cmdq->pending_count, 0);
64 atomic_set(&cmdq->backlog_count, 0);
65 70
66 cmdq->dbell_csr_addr = NULL; 71 cmdq->dbell_csr_addr = NULL;
67 cmdq->head = NULL; 72 cmdq->compl_cnt_csr_addr = NULL;
73 cmdq->unalign_base = NULL;
74 cmdq->base = NULL;
75 cmdq->unalign_dma = 0;
68 cmdq->dma = 0; 76 cmdq->dma = 0;
69 cmdq->qsize = 0; 77 cmdq->qsize = 0;
70 cmdq->instr_size = 0; 78 cmdq->instr_size = 0;
71} 79}
72 80
73static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev) 81static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
74{ 82{
75 int i; 83 int i;
76 84
77 for (i = 0; i < ndev->nr_queues; i++) { 85 for (i = 0; i < ndev->nr_queues; i++) {
78 struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i]; 86 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
79 87
80 cmdq_common_cleanup(cmdq); 88 nitrox_cmdq_cleanup(cmdq);
81 } 89 }
82 kfree(ndev->pkt_cmdqs); 90 kfree(ndev->pkt_inq);
83 ndev->pkt_cmdqs = NULL; 91 ndev->pkt_inq = NULL;
84} 92}
85 93
86static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev) 94static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
87{ 95{
88 int i, err, size; 96 int i, err;
89 97
90 size = ndev->nr_queues * sizeof(struct nitrox_cmdq); 98 ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
91 ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL); 99 sizeof(struct nitrox_cmdq),
92 if (!ndev->pkt_cmdqs) 100 GFP_KERNEL, ndev->node);
101 if (!ndev->pkt_inq)
93 return -ENOMEM; 102 return -ENOMEM;
94 103
95 for (i = 0; i < ndev->nr_queues; i++) { 104 for (i = 0; i < ndev->nr_queues; i++) {
96 struct nitrox_cmdq *cmdq; 105 struct nitrox_cmdq *cmdq;
97 u64 offset; 106 u64 offset;
98 107
99 cmdq = &ndev->pkt_cmdqs[i]; 108 cmdq = &ndev->pkt_inq[i];
100 cmdq->ndev = ndev; 109 cmdq->ndev = ndev;
101 cmdq->qno = i; 110 cmdq->qno = i;
102 cmdq->instr_size = sizeof(struct nps_pkt_instr); 111 cmdq->instr_size = sizeof(struct nps_pkt_instr);
103 112
113 /* packet input ring doorbell address */
104 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); 114 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
105 /* SE ring doorbell address for this queue */
106 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); 115 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
116 /* packet solicit port completion count address */
117 offset = NPS_PKT_SLC_CNTSX(i);
118 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
107 119
108 err = cmdq_common_init(cmdq); 120 err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
109 if (err) 121 if (err)
110 goto pkt_cmdq_fail; 122 goto pktq_fail;
111 } 123 }
112 return 0; 124 return 0;
113 125
114pkt_cmdq_fail: 126pktq_fail:
115 nitrox_cleanup_pkt_cmdqs(ndev); 127 nitrox_free_pktin_queues(ndev);
116 return err; 128 return err;
117} 129}
118 130
@@ -122,7 +134,7 @@ static int create_crypto_dma_pool(struct nitrox_device *ndev)
122 134
123 /* Crypto context pool, 16 byte aligned */ 135 /* Crypto context pool, 16 byte aligned */
124 size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr); 136 size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
125 ndev->ctx_pool = dma_pool_create("crypto-context", 137 ndev->ctx_pool = dma_pool_create("nitrox-context",
126 DEV(ndev), size, 16, 0); 138 DEV(ndev), size, 16, 0);
127 if (!ndev->ctx_pool) 139 if (!ndev->ctx_pool)
128 return -ENOMEM; 140 return -ENOMEM;
@@ -149,7 +161,7 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
149 void *vaddr; 161 void *vaddr;
150 dma_addr_t dma; 162 dma_addr_t dma;
151 163
152 vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma); 164 vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
153 if (!vaddr) 165 if (!vaddr)
154 return NULL; 166 return NULL;
155 167
@@ -194,7 +206,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
194 if (err) 206 if (err)
195 return err; 207 return err;
196 208
197 err = nitrox_init_pkt_cmdqs(ndev); 209 err = nitrox_alloc_pktin_queues(ndev);
198 if (err) 210 if (err)
199 destroy_crypto_dma_pool(ndev); 211 destroy_crypto_dma_pool(ndev);
200 212
@@ -207,6 +219,6 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
207 */ 219 */
208void nitrox_common_sw_cleanup(struct nitrox_device *ndev) 220void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
209{ 221{
210 nitrox_cleanup_pkt_cmdqs(ndev); 222 nitrox_free_pktin_queues(ndev);
211 destroy_crypto_dma_pool(ndev); 223 destroy_crypto_dma_pool(ndev);
212} 224}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index fee7cb2ce747..6595c95af9f1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -11,13 +11,15 @@
11#include "nitrox_dev.h" 11#include "nitrox_dev.h"
12#include "nitrox_common.h" 12#include "nitrox_common.h"
13#include "nitrox_csr.h" 13#include "nitrox_csr.h"
14#include "nitrox_hal.h"
15#include "nitrox_isr.h"
14 16
15#define CNN55XX_DEV_ID 0x12 17#define CNN55XX_DEV_ID 0x12
16#define MAX_PF_QUEUES 64 18#define MAX_PF_QUEUES 64
17#define UCODE_HLEN 48 19#define UCODE_HLEN 48
18#define SE_GROUP 0 20#define SE_GROUP 0
19 21
20#define DRIVER_VERSION "1.0" 22#define DRIVER_VERSION "1.1"
21#define FW_DIR "cavium/" 23#define FW_DIR "cavium/"
22/* SE microcode */ 24/* SE microcode */
23#define SE_FW FW_DIR "cnn55xx_se.fw" 25#define SE_FW FW_DIR "cnn55xx_se.fw"
@@ -42,6 +44,15 @@ static unsigned int qlen = DEFAULT_CMD_QLEN;
42module_param(qlen, uint, 0644); 44module_param(qlen, uint, 0644);
43MODULE_PARM_DESC(qlen, "Command queue length - default 2048"); 45MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
44 46
47#ifdef CONFIG_PCI_IOV
48int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
49#else
50int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
51{
52 return 0;
53}
54#endif
55
45/** 56/**
46 * struct ucode - Firmware Header 57 * struct ucode - Firmware Header
47 * @id: microcode ID 58 * @id: microcode ID
@@ -136,9 +147,6 @@ static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name)
136 write_to_ucd_unit(ndev, ucode); 147 write_to_ucd_unit(ndev, ucode);
137 release_firmware(fw); 148 release_firmware(fw);
138 149
139 set_bit(NITROX_UCODE_LOADED, &ndev->status);
140 /* barrier to sync with other cpus */
141 smp_mb__after_atomic();
142 return 0; 150 return 0;
143} 151}
144 152
@@ -210,7 +218,7 @@ void nitrox_put_device(struct nitrox_device *ndev)
210 smp_mb__after_atomic(); 218 smp_mb__after_atomic();
211} 219}
212 220
213static int nitrox_reset_device(struct pci_dev *pdev) 221static int nitrox_device_flr(struct pci_dev *pdev)
214{ 222{
215 int pos = 0; 223 int pos = 0;
216 224
@@ -220,15 +228,10 @@ static int nitrox_reset_device(struct pci_dev *pdev)
220 return -ENOMEM; 228 return -ENOMEM;
221 } 229 }
222 230
223 pos = pci_pcie_cap(pdev); 231 /* check flr support */
224 if (!pos) 232 if (pcie_has_flr(pdev))
225 return -ENOTTY; 233 pcie_flr(pdev);
226 234
227 if (!pci_wait_for_pending_transaction(pdev))
228 dev_err(&pdev->dev, "waiting for pending transaction\n");
229
230 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
231 msleep(100);
232 pci_restore_state(pdev); 235 pci_restore_state(pdev);
233 236
234 return 0; 237 return 0;
@@ -242,7 +245,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
242 if (err) 245 if (err)
243 return err; 246 return err;
244 247
245 err = nitrox_pf_init_isr(ndev); 248 err = nitrox_register_interrupts(ndev);
246 if (err) 249 if (err)
247 nitrox_common_sw_cleanup(ndev); 250 nitrox_common_sw_cleanup(ndev);
248 251
@@ -251,7 +254,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
251 254
252static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev) 255static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
253{ 256{
254 nitrox_pf_cleanup_isr(ndev); 257 nitrox_unregister_interrupts(ndev);
255 nitrox_common_sw_cleanup(ndev); 258 nitrox_common_sw_cleanup(ndev);
256} 259}
257 260
@@ -284,26 +287,6 @@ static int nitrox_bist_check(struct nitrox_device *ndev)
284 return 0; 287 return 0;
285} 288}
286 289
287static void nitrox_get_hwinfo(struct nitrox_device *ndev)
288{
289 union emu_fuse_map emu_fuse;
290 u64 offset;
291 int i;
292
293 for (i = 0; i < NR_CLUSTERS; i++) {
294 u8 dead_cores;
295
296 offset = EMU_FUSE_MAPX(i);
297 emu_fuse.value = nitrox_read_csr(ndev, offset);
298 if (emu_fuse.s.valid) {
299 dead_cores = hweight32(emu_fuse.s.ae_fuse);
300 ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
301 dead_cores = hweight16(emu_fuse.s.se_fuse);
302 ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
303 }
304 }
305}
306
307static int nitrox_pf_hw_init(struct nitrox_device *ndev) 290static int nitrox_pf_hw_init(struct nitrox_device *ndev)
308{ 291{
309 int err; 292 int err;
@@ -336,135 +319,6 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev)
336 return 0; 319 return 0;
337} 320}
338 321
339#if IS_ENABLED(CONFIG_DEBUG_FS)
340static int registers_show(struct seq_file *s, void *v)
341{
342 struct nitrox_device *ndev = s->private;
343 u64 offset;
344
345 /* NPS DMA stats */
346 offset = NPS_STATS_PKT_DMA_RD_CNT;
347 seq_printf(s, "NPS_STATS_PKT_DMA_RD_CNT 0x%016llx\n",
348 nitrox_read_csr(ndev, offset));
349 offset = NPS_STATS_PKT_DMA_WR_CNT;
350 seq_printf(s, "NPS_STATS_PKT_DMA_WR_CNT 0x%016llx\n",
351 nitrox_read_csr(ndev, offset));
352
353 /* BMI/BMO stats */
354 offset = BMI_NPS_PKT_CNT;
355 seq_printf(s, "BMI_NPS_PKT_CNT 0x%016llx\n",
356 nitrox_read_csr(ndev, offset));
357 offset = BMO_NPS_SLC_PKT_CNT;
358 seq_printf(s, "BMO_NPS_PKT_CNT 0x%016llx\n",
359 nitrox_read_csr(ndev, offset));
360
361 return 0;
362}
363
364static int registers_open(struct inode *inode, struct file *file)
365{
366 return single_open(file, registers_show, inode->i_private);
367}
368
369static const struct file_operations register_fops = {
370 .owner = THIS_MODULE,
371 .open = registers_open,
372 .read = seq_read,
373 .llseek = seq_lseek,
374 .release = single_release,
375};
376
377static int firmware_show(struct seq_file *s, void *v)
378{
379 struct nitrox_device *ndev = s->private;
380
381 seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
382 return 0;
383}
384
385static int firmware_open(struct inode *inode, struct file *file)
386{
387 return single_open(file, firmware_show, inode->i_private);
388}
389
390static const struct file_operations firmware_fops = {
391 .owner = THIS_MODULE,
392 .open = firmware_open,
393 .read = seq_read,
394 .llseek = seq_lseek,
395 .release = single_release,
396};
397
398static int nitrox_show(struct seq_file *s, void *v)
399{
400 struct nitrox_device *ndev = s->private;
401
402 seq_printf(s, "NITROX-5 [idx: %d]\n", ndev->idx);
403 seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
404 seq_printf(s, " Cores [AE: %u SE: %u]\n",
405 ndev->hw.ae_cores, ndev->hw.se_cores);
406 seq_printf(s, " Number of Queues: %u\n", ndev->nr_queues);
407 seq_printf(s, " Queue length: %u\n", ndev->qlen);
408 seq_printf(s, " Node: %u\n", ndev->node);
409
410 return 0;
411}
412
413static int nitrox_open(struct inode *inode, struct file *file)
414{
415 return single_open(file, nitrox_show, inode->i_private);
416}
417
418static const struct file_operations nitrox_fops = {
419 .owner = THIS_MODULE,
420 .open = nitrox_open,
421 .read = seq_read,
422 .llseek = seq_lseek,
423 .release = single_release,
424};
425
426static void nitrox_debugfs_exit(struct nitrox_device *ndev)
427{
428 debugfs_remove_recursive(ndev->debugfs_dir);
429 ndev->debugfs_dir = NULL;
430}
431
432static int nitrox_debugfs_init(struct nitrox_device *ndev)
433{
434 struct dentry *dir, *f;
435
436 dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
437 if (!dir)
438 return -ENOMEM;
439
440 ndev->debugfs_dir = dir;
441 f = debugfs_create_file("counters", 0400, dir, ndev, &register_fops);
442 if (!f)
443 goto err;
444 f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
445 if (!f)
446 goto err;
447 f = debugfs_create_file("nitrox", 0400, dir, ndev, &nitrox_fops);
448 if (!f)
449 goto err;
450
451 return 0;
452
453err:
454 nitrox_debugfs_exit(ndev);
455 return -ENODEV;
456}
457#else
458static int nitrox_debugfs_init(struct nitrox_device *ndev)
459{
460 return 0;
461}
462
463static void nitrox_debugfs_exit(struct nitrox_device *ndev)
464{
465}
466#endif
467
468/** 322/**
469 * nitrox_probe - NITROX Initialization function. 323 * nitrox_probe - NITROX Initialization function.
470 * @pdev: PCI device information struct 324 * @pdev: PCI device information struct
@@ -487,7 +341,7 @@ static int nitrox_probe(struct pci_dev *pdev,
487 return err; 341 return err;
488 342
489 /* do FLR */ 343 /* do FLR */
490 err = nitrox_reset_device(pdev); 344 err = nitrox_device_flr(pdev);
491 if (err) { 345 if (err) {
492 dev_err(&pdev->dev, "FLR failed\n"); 346 dev_err(&pdev->dev, "FLR failed\n");
493 pci_disable_device(pdev); 347 pci_disable_device(pdev);
@@ -555,7 +409,12 @@ static int nitrox_probe(struct pci_dev *pdev,
555 if (err) 409 if (err)
556 goto pf_hw_fail; 410 goto pf_hw_fail;
557 411
558 set_bit(NITROX_READY, &ndev->status); 412 /* clear the statistics */
413 atomic64_set(&ndev->stats.posted, 0);
414 atomic64_set(&ndev->stats.completed, 0);
415 atomic64_set(&ndev->stats.dropped, 0);
416
417 atomic_set(&ndev->state, __NDEV_READY);
559 /* barrier to sync with other cpus */ 418 /* barrier to sync with other cpus */
560 smp_mb__after_atomic(); 419 smp_mb__after_atomic();
561 420
@@ -567,7 +426,7 @@ static int nitrox_probe(struct pci_dev *pdev,
567 426
568crypto_fail: 427crypto_fail:
569 nitrox_debugfs_exit(ndev); 428 nitrox_debugfs_exit(ndev);
570 clear_bit(NITROX_READY, &ndev->status); 429 atomic_set(&ndev->state, __NDEV_NOT_READY);
571 /* barrier to sync with other cpus */ 430 /* barrier to sync with other cpus */
572 smp_mb__after_atomic(); 431 smp_mb__after_atomic();
573pf_hw_fail: 432pf_hw_fail:
@@ -602,11 +461,16 @@ static void nitrox_remove(struct pci_dev *pdev)
602 dev_info(DEV(ndev), "Removing Device %x:%x\n", 461 dev_info(DEV(ndev), "Removing Device %x:%x\n",
603 ndev->hw.vendor_id, ndev->hw.device_id); 462 ndev->hw.vendor_id, ndev->hw.device_id);
604 463
605 clear_bit(NITROX_READY, &ndev->status); 464 atomic_set(&ndev->state, __NDEV_NOT_READY);
606 /* barrier to sync with other cpus */ 465 /* barrier to sync with other cpus */
607 smp_mb__after_atomic(); 466 smp_mb__after_atomic();
608 467
609 nitrox_remove_from_devlist(ndev); 468 nitrox_remove_from_devlist(ndev);
469
470#ifdef CONFIG_PCI_IOV
471 /* disable SR-IOV */
472 nitrox_sriov_configure(pdev, 0);
473#endif
610 nitrox_crypto_unregister(); 474 nitrox_crypto_unregister();
611 nitrox_debugfs_exit(ndev); 475 nitrox_debugfs_exit(ndev);
612 nitrox_pf_sw_cleanup(ndev); 476 nitrox_pf_sw_cleanup(ndev);
@@ -632,6 +496,9 @@ static struct pci_driver nitrox_driver = {
632 .probe = nitrox_probe, 496 .probe = nitrox_probe,
633 .remove = nitrox_remove, 497 .remove = nitrox_remove,
634 .shutdown = nitrox_shutdown, 498 .shutdown = nitrox_shutdown,
499#ifdef CONFIG_PCI_IOV
500 .sriov_configure = nitrox_sriov_configure,
501#endif
635}; 502};
636 503
637module_pci_driver(nitrox_driver); 504module_pci_driver(nitrox_driver);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4a362fc22f62..3987cd84c033 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -382,11 +382,11 @@ static inline void backlog_list_add(struct nitrox_softreq *sr,
382{ 382{
383 INIT_LIST_HEAD(&sr->backlog); 383 INIT_LIST_HEAD(&sr->backlog);
384 384
385 spin_lock_bh(&cmdq->backlog_lock); 385 spin_lock_bh(&cmdq->backlog_qlock);
386 list_add_tail(&sr->backlog, &cmdq->backlog_head); 386 list_add_tail(&sr->backlog, &cmdq->backlog_head);
387 atomic_inc(&cmdq->backlog_count); 387 atomic_inc(&cmdq->backlog_count);
388 atomic_set(&sr->status, REQ_BACKLOG); 388 atomic_set(&sr->status, REQ_BACKLOG);
389 spin_unlock_bh(&cmdq->backlog_lock); 389 spin_unlock_bh(&cmdq->backlog_qlock);
390} 390}
391 391
392static inline void response_list_add(struct nitrox_softreq *sr, 392static inline void response_list_add(struct nitrox_softreq *sr,
@@ -394,17 +394,17 @@ static inline void response_list_add(struct nitrox_softreq *sr,
394{ 394{
395 INIT_LIST_HEAD(&sr->response); 395 INIT_LIST_HEAD(&sr->response);
396 396
397 spin_lock_bh(&cmdq->response_lock); 397 spin_lock_bh(&cmdq->resp_qlock);
398 list_add_tail(&sr->response, &cmdq->response_head); 398 list_add_tail(&sr->response, &cmdq->response_head);
399 spin_unlock_bh(&cmdq->response_lock); 399 spin_unlock_bh(&cmdq->resp_qlock);
400} 400}
401 401
402static inline void response_list_del(struct nitrox_softreq *sr, 402static inline void response_list_del(struct nitrox_softreq *sr,
403 struct nitrox_cmdq *cmdq) 403 struct nitrox_cmdq *cmdq)
404{ 404{
405 spin_lock_bh(&cmdq->response_lock); 405 spin_lock_bh(&cmdq->resp_qlock);
406 list_del(&sr->response); 406 list_del(&sr->response);
407 spin_unlock_bh(&cmdq->response_lock); 407 spin_unlock_bh(&cmdq->resp_qlock);
408} 408}
409 409
410static struct nitrox_softreq * 410static struct nitrox_softreq *
@@ -439,11 +439,11 @@ static void post_se_instr(struct nitrox_softreq *sr,
439 int idx; 439 int idx;
440 u8 *ent; 440 u8 *ent;
441 441
442 spin_lock_bh(&cmdq->cmdq_lock); 442 spin_lock_bh(&cmdq->cmd_qlock);
443 443
444 idx = cmdq->write_idx; 444 idx = cmdq->write_idx;
445 /* copy the instruction */ 445 /* copy the instruction */
446 ent = cmdq->head + (idx * cmdq->instr_size); 446 ent = cmdq->base + (idx * cmdq->instr_size);
447 memcpy(ent, &sr->instr, cmdq->instr_size); 447 memcpy(ent, &sr->instr, cmdq->instr_size);
448 448
449 atomic_set(&sr->status, REQ_POSTED); 449 atomic_set(&sr->status, REQ_POSTED);
@@ -459,7 +459,10 @@ static void post_se_instr(struct nitrox_softreq *sr,
459 459
460 cmdq->write_idx = incr_index(idx, 1, ndev->qlen); 460 cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
461 461
462 spin_unlock_bh(&cmdq->cmdq_lock); 462 spin_unlock_bh(&cmdq->cmd_qlock);
463
464 /* increment the posted command count */
465 atomic64_inc(&ndev->stats.posted);
463} 466}
464 467
465static int post_backlog_cmds(struct nitrox_cmdq *cmdq) 468static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
@@ -471,7 +474,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
471 if (!atomic_read(&cmdq->backlog_count)) 474 if (!atomic_read(&cmdq->backlog_count))
472 return 0; 475 return 0;
473 476
474 spin_lock_bh(&cmdq->backlog_lock); 477 spin_lock_bh(&cmdq->backlog_qlock);
475 478
476 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { 479 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
477 struct skcipher_request *skreq; 480 struct skcipher_request *skreq;
@@ -494,7 +497,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
494 /* backlog requests are posted, wakeup with -EINPROGRESS */ 497 /* backlog requests are posted, wakeup with -EINPROGRESS */
495 skcipher_request_complete(skreq, -EINPROGRESS); 498 skcipher_request_complete(skreq, -EINPROGRESS);
496 } 499 }
497 spin_unlock_bh(&cmdq->backlog_lock); 500 spin_unlock_bh(&cmdq->backlog_qlock);
498 501
499 return ret; 502 return ret;
500} 503}
@@ -508,8 +511,11 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
508 post_backlog_cmds(cmdq); 511 post_backlog_cmds(cmdq);
509 512
510 if (unlikely(cmdq_full(cmdq, ndev->qlen))) { 513 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
511 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 514 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
515 /* increment drop count */
516 atomic64_inc(&ndev->stats.dropped);
512 return -ENOSPC; 517 return -ENOSPC;
518 }
513 /* add to backlog list */ 519 /* add to backlog list */
514 backlog_list_add(sr, cmdq); 520 backlog_list_add(sr, cmdq);
515 return -EBUSY; 521 return -EBUSY;
@@ -572,7 +578,7 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
572 /* select the queue */ 578 /* select the queue */
573 qno = smp_processor_id() % ndev->nr_queues; 579 qno = smp_processor_id() % ndev->nr_queues;
574 580
575 sr->cmdq = &ndev->pkt_cmdqs[qno]; 581 sr->cmdq = &ndev->pkt_inq[qno];
576 582
577 /* 583 /*
578 * 64-Byte Instruction Format 584 * 64-Byte Instruction Format
@@ -694,6 +700,7 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
694 READ_ONCE(sr->resp.orh)); 700 READ_ONCE(sr->resp.orh));
695 } 701 }
696 atomic_dec(&cmdq->pending_count); 702 atomic_dec(&cmdq->pending_count);
703 atomic64_inc(&ndev->stats.completed);
697 /* sync with other cpus */ 704 /* sync with other cpus */
698 smp_mb__after_atomic(); 705 smp_mb__after_atomic();
699 /* remove from response list */ 706 /* remove from response list */
@@ -714,18 +721,18 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
714} 721}
715 722
716/** 723/**
717 * pkt_slc_resp_handler - post processing of SE responses 724 * pkt_slc_resp_tasklet - post processing of SE responses
718 */ 725 */
719void pkt_slc_resp_handler(unsigned long data) 726void pkt_slc_resp_tasklet(unsigned long data)
720{ 727{
721 struct bh_data *bh = (void *)(uintptr_t)(data); 728 struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
722 struct nitrox_cmdq *cmdq = bh->cmdq; 729 struct nitrox_cmdq *cmdq = qvec->cmdq;
723 union nps_pkt_slc_cnts pkt_slc_cnts; 730 union nps_pkt_slc_cnts slc_cnts;
724 731
725 /* read completion count */ 732 /* read completion count */
726 pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr); 733 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
727 /* resend the interrupt if more work to do */ 734 /* resend the interrupt if more work to do */
728 pkt_slc_cnts.s.resend = 1; 735 slc_cnts.s.resend = 1;
729 736
730 process_response_list(cmdq); 737 process_response_list(cmdq);
731 738
@@ -733,7 +740,7 @@ void pkt_slc_resp_handler(unsigned long data)
733 * clear the interrupt with resend bit enabled, 740 * clear the interrupt with resend bit enabled,
734 * MSI-X interrupt generates if Completion count > Threshold 741 * MSI-X interrupt generates if Completion count > Threshold
735 */ 742 */
736 writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr); 743 writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
737 /* order the writes */ 744 /* order the writes */
738 mmiowb(); 745 mmiowb();
739 746
diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
new file mode 100644
index 000000000000..30c0aa874583
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
@@ -0,0 +1,151 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/pci.h>
3#include <linux/delay.h>
4
5#include "nitrox_dev.h"
6#include "nitrox_hal.h"
7#include "nitrox_common.h"
8#include "nitrox_isr.h"
9
10static inline bool num_vfs_valid(int num_vfs)
11{
12 bool valid = false;
13
14 switch (num_vfs) {
15 case 16:
16 case 32:
17 case 64:
18 case 128:
19 valid = true;
20 break;
21 }
22
23 return valid;
24}
25
26static inline enum vf_mode num_vfs_to_mode(int num_vfs)
27{
28 enum vf_mode mode = 0;
29
30 switch (num_vfs) {
31 case 0:
32 mode = __NDEV_MODE_PF;
33 break;
34 case 16:
35 mode = __NDEV_MODE_VF16;
36 break;
37 case 32:
38 mode = __NDEV_MODE_VF32;
39 break;
40 case 64:
41 mode = __NDEV_MODE_VF64;
42 break;
43 case 128:
44 mode = __NDEV_MODE_VF128;
45 break;
46 }
47
48 return mode;
49}
50
51static void pf_sriov_cleanup(struct nitrox_device *ndev)
52{
53 /* PF has no queues in SR-IOV mode */
54 atomic_set(&ndev->state, __NDEV_NOT_READY);
55 /* unregister crypto algorithms */
56 nitrox_crypto_unregister();
57
58 /* cleanup PF resources */
59 nitrox_unregister_interrupts(ndev);
60 nitrox_common_sw_cleanup(ndev);
61}
62
63static int pf_sriov_init(struct nitrox_device *ndev)
64{
65 int err;
66
67 /* allocate resources for PF */
68 err = nitrox_common_sw_init(ndev);
69 if (err)
70 return err;
71
72 err = nitrox_register_interrupts(ndev);
73 if (err) {
74 nitrox_common_sw_cleanup(ndev);
75 return err;
76 }
77
78 /* configure the packet queues */
79 nitrox_config_pkt_input_rings(ndev);
80 nitrox_config_pkt_solicit_ports(ndev);
81
82 /* set device to ready state */
83 atomic_set(&ndev->state, __NDEV_READY);
84
85 /* register crypto algorithms */
86 return nitrox_crypto_register();
87}
88
89static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
90{
91 struct nitrox_device *ndev = pci_get_drvdata(pdev);
92 int err;
93
94 if (!num_vfs_valid(num_vfs)) {
95 dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs);
96 return -EINVAL;
97 }
98
99 if (pci_num_vf(pdev) == num_vfs)
100 return num_vfs;
101
102 err = pci_enable_sriov(pdev, num_vfs);
103 if (err) {
104 dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err);
105 return err;
106 }
107 dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs);
108
109 ndev->num_vfs = num_vfs;
110 ndev->mode = num_vfs_to_mode(num_vfs);
111 /* set bit in flags */
112 set_bit(__NDEV_SRIOV_BIT, &ndev->flags);
113
114 /* cleanup PF resources */
115 pf_sriov_cleanup(ndev);
116
117 config_nps_core_vfcfg_mode(ndev, ndev->mode);
118
119 return num_vfs;
120}
121
122static int nitrox_sriov_disable(struct pci_dev *pdev)
123{
124 struct nitrox_device *ndev = pci_get_drvdata(pdev);
125
126 if (!test_bit(__NDEV_SRIOV_BIT, &ndev->flags))
127 return 0;
128
129 if (pci_vfs_assigned(pdev)) {
130 dev_warn(DEV(ndev), "VFs are attached to VM. Can't disable SR-IOV\n");
131 return -EPERM;
132 }
133 pci_disable_sriov(pdev);
134 /* clear bit in flags */
135 clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
136
137 ndev->num_vfs = 0;
138 ndev->mode = __NDEV_MODE_PF;
139
140 config_nps_core_vfcfg_mode(ndev, ndev->mode);
141
142 return pf_sriov_init(ndev);
143}
144
145int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
146{
147 if (!num_vfs)
148 return nitrox_sriov_disable(pdev);
149
150 return nitrox_sriov_enable(pdev, num_vfs);
151}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 94b5bcf5b628..ca4630b8395f 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -102,7 +102,7 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
102 ctx->u.aes.key_len = key_len / 2; 102 ctx->u.aes.key_len = key_len / 2;
103 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); 103 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
104 104
105 return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); 105 return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
106} 106}
107 107
108static int ccp_aes_xts_crypt(struct ablkcipher_request *req, 108static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
@@ -151,12 +151,13 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
151 (ctx->u.aes.key_len != AES_KEYSIZE_256)) 151 (ctx->u.aes.key_len != AES_KEYSIZE_256))
152 fallback = 1; 152 fallback = 1;
153 if (fallback) { 153 if (fallback) {
154 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher); 154 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq,
155 ctx->u.aes.tfm_skcipher);
155 156
156 /* Use the fallback to process the request for any 157 /* Use the fallback to process the request for any
157 * unsupported unit sizes or key sizes 158 * unsupported unit sizes or key sizes
158 */ 159 */
159 skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher); 160 skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher);
160 skcipher_request_set_callback(subreq, req->base.flags, 161 skcipher_request_set_callback(subreq, req->base.flags,
161 NULL, NULL); 162 NULL, NULL);
162 skcipher_request_set_crypt(subreq, req->src, req->dst, 163 skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -203,12 +204,12 @@ static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
203static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm) 204static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
204{ 205{
205 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 206 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
206 struct crypto_skcipher *fallback_tfm; 207 struct crypto_sync_skcipher *fallback_tfm;
207 208
208 ctx->complete = ccp_aes_xts_complete; 209 ctx->complete = ccp_aes_xts_complete;
209 ctx->u.aes.key_len = 0; 210 ctx->u.aes.key_len = 0;
210 211
211 fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0, 212 fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0,
212 CRYPTO_ALG_ASYNC | 213 CRYPTO_ALG_ASYNC |
213 CRYPTO_ALG_NEED_FALLBACK); 214 CRYPTO_ALG_NEED_FALLBACK);
214 if (IS_ERR(fallback_tfm)) { 215 if (IS_ERR(fallback_tfm)) {
@@ -226,7 +227,7 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
226{ 227{
227 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 228 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
228 229
229 crypto_free_skcipher(ctx->u.aes.tfm_skcipher); 230 crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
230} 231}
231 232
232static int ccp_register_aes_xts_alg(struct list_head *head, 233static int ccp_register_aes_xts_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index b9fd090c46c2..28819e11db96 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -88,7 +88,7 @@ static inline struct ccp_crypto_ahash_alg *
88/***** AES related defines *****/ 88/***** AES related defines *****/
89struct ccp_aes_ctx { 89struct ccp_aes_ctx {
90 /* Fallback cipher for XTS with unsupported unit sizes */ 90 /* Fallback cipher for XTS with unsupported unit sizes */
91 struct crypto_skcipher *tfm_skcipher; 91 struct crypto_sync_skcipher *tfm_skcipher;
92 92
93 /* Cipher used to generate CMAC K1/K2 keys */ 93 /* Cipher used to generate CMAC K1/K2 keys */
94 struct crypto_cipher *tfm_cipher; 94 struct crypto_cipher *tfm_cipher;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 72790d88236d..d64a78ccc03e 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -31,8 +31,9 @@
31 ((psp_master->api_major) >= _maj && \ 31 ((psp_master->api_major) >= _maj && \
32 (psp_master->api_minor) >= _min) 32 (psp_master->api_minor) >= _min)
33 33
34#define DEVICE_NAME "sev" 34#define DEVICE_NAME "sev"
35#define SEV_FW_FILE "amd/sev.fw" 35#define SEV_FW_FILE "amd/sev.fw"
36#define SEV_FW_NAME_SIZE 64
36 37
37static DEFINE_MUTEX(sev_cmd_mutex); 38static DEFINE_MUTEX(sev_cmd_mutex);
38static struct sev_misc_dev *misc_dev; 39static struct sev_misc_dev *misc_dev;
@@ -423,7 +424,7 @@ EXPORT_SYMBOL_GPL(psp_copy_user_blob);
423static int sev_get_api_version(void) 424static int sev_get_api_version(void)
424{ 425{
425 struct sev_user_data_status *status; 426 struct sev_user_data_status *status;
426 int error, ret; 427 int error = 0, ret;
427 428
428 status = &psp_master->status_cmd_buf; 429 status = &psp_master->status_cmd_buf;
429 ret = sev_platform_status(status, &error); 430 ret = sev_platform_status(status, &error);
@@ -440,6 +441,41 @@ static int sev_get_api_version(void)
440 return 0; 441 return 0;
441} 442}
442 443
444static int sev_get_firmware(struct device *dev,
445 const struct firmware **firmware)
446{
447 char fw_name_specific[SEV_FW_NAME_SIZE];
448 char fw_name_subset[SEV_FW_NAME_SIZE];
449
450 snprintf(fw_name_specific, sizeof(fw_name_specific),
451 "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
452 boot_cpu_data.x86, boot_cpu_data.x86_model);
453
454 snprintf(fw_name_subset, sizeof(fw_name_subset),
455 "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
456 boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
457
458 /* Check for SEV FW for a particular model.
459 * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
460 *
461 * or
462 *
463 * Check for SEV FW common to a subset of models.
464 * Ex. amd_sev_fam17h_model0xh.sbin for
465 * Family 17h Model 00h -- Family 17h Model 0Fh
466 *
467 * or
468 *
469 * Fall-back to using generic name: sev.fw
470 */
471 if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
472 (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
473 (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
474 return 0;
475
476 return -ENOENT;
477}
478
443/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ 479/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
444static int sev_update_firmware(struct device *dev) 480static int sev_update_firmware(struct device *dev)
445{ 481{
@@ -449,9 +485,10 @@ static int sev_update_firmware(struct device *dev)
449 struct page *p; 485 struct page *p;
450 u64 data_size; 486 u64 data_size;
451 487
452 ret = request_firmware(&firmware, SEV_FW_FILE, dev); 488 if (sev_get_firmware(dev, &firmware) == -ENOENT) {
453 if (ret < 0) 489 dev_dbg(dev, "No SEV firmware file present\n");
454 return -1; 490 return -1;
491 }
455 492
456 /* 493 /*
457 * SEV FW expects the physical address given to it to be 32 494 * SEV FW expects the physical address given to it to be 32
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 71734f254fd1..b75dc7db2d4a 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -33,8 +33,31 @@ struct sp_platform {
33 unsigned int irq_count; 33 unsigned int irq_count;
34}; 34};
35 35
36static const struct acpi_device_id sp_acpi_match[]; 36static const struct sp_dev_vdata dev_vdata[] = {
37static const struct of_device_id sp_of_match[]; 37 {
38 .bar = 0,
39#ifdef CONFIG_CRYPTO_DEV_SP_CCP
40 .ccp_vdata = &ccpv3_platform,
41#endif
42 },
43};
44
45#ifdef CONFIG_ACPI
46static const struct acpi_device_id sp_acpi_match[] = {
47 { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
48 { },
49};
50MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
51#endif
52
53#ifdef CONFIG_OF
54static const struct of_device_id sp_of_match[] = {
55 { .compatible = "amd,ccp-seattle-v1a",
56 .data = (const void *)&dev_vdata[0] },
57 { },
58};
59MODULE_DEVICE_TABLE(of, sp_of_match);
60#endif
38 61
39static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev) 62static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
40{ 63{
@@ -201,32 +224,6 @@ static int sp_platform_resume(struct platform_device *pdev)
201} 224}
202#endif 225#endif
203 226
204static const struct sp_dev_vdata dev_vdata[] = {
205 {
206 .bar = 0,
207#ifdef CONFIG_CRYPTO_DEV_SP_CCP
208 .ccp_vdata = &ccpv3_platform,
209#endif
210 },
211};
212
213#ifdef CONFIG_ACPI
214static const struct acpi_device_id sp_acpi_match[] = {
215 { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
216 { },
217};
218MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
219#endif
220
221#ifdef CONFIG_OF
222static const struct of_device_id sp_of_match[] = {
223 { .compatible = "amd,ccp-seattle-v1a",
224 .data = (const void *)&dev_vdata[0] },
225 { },
226};
227MODULE_DEVICE_TABLE(of, sp_of_match);
228#endif
229
230static struct platform_driver sp_platform_driver = { 227static struct platform_driver sp_platform_driver = {
231 .driver = { 228 .driver = {
232 .name = "ccp", 229 .name = "ccp",
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index a091ae57f902..45985b955d2c 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -449,8 +449,7 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
449 * @pdesc: pointer HW descriptor struct 449 * @pdesc: pointer HW descriptor struct
450 * @mode: Any one of the modes defined in [CC7x-DESC] 450 * @mode: Any one of the modes defined in [CC7x-DESC]
451 */ 451 */
452static inline void set_cipher_mode(struct cc_hw_desc *pdesc, 452static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
453 enum drv_cipher_mode mode)
454{ 453{
455 pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode); 454 pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
456} 455}
@@ -461,8 +460,7 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
461 * @pdesc: pointer HW descriptor struct 460 * @pdesc: pointer HW descriptor struct
462 * @mode: Any one of the modes defined in [CC7x-DESC] 461 * @mode: Any one of the modes defined in [CC7x-DESC]
463 */ 462 */
464static inline void set_cipher_config0(struct cc_hw_desc *pdesc, 463static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
465 enum drv_crypto_direction mode)
466{ 464{
467 pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode); 465 pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
468} 466}
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 010bbf607797..db203f8be429 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -673,7 +673,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
673 return min(srclen, dstlen); 673 return min(srclen, dstlen);
674} 674}
675 675
676static int chcr_cipher_fallback(struct crypto_skcipher *cipher, 676static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
677 u32 flags, 677 u32 flags,
678 struct scatterlist *src, 678 struct scatterlist *src,
679 struct scatterlist *dst, 679 struct scatterlist *dst,
@@ -683,9 +683,9 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
683{ 683{
684 int err; 684 int err;
685 685
686 SKCIPHER_REQUEST_ON_STACK(subreq, cipher); 686 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
687 687
688 skcipher_request_set_tfm(subreq, cipher); 688 skcipher_request_set_sync_tfm(subreq, cipher);
689 skcipher_request_set_callback(subreq, flags, NULL, NULL); 689 skcipher_request_set_callback(subreq, flags, NULL, NULL);
690 skcipher_request_set_crypt(subreq, src, dst, 690 skcipher_request_set_crypt(subreq, src, dst,
691 nbytes, iv); 691 nbytes, iv);
@@ -856,13 +856,14 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
856 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 856 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
857 int err = 0; 857 int err = 0;
858 858
859 crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); 859 crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
860 crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & 860 CRYPTO_TFM_REQ_MASK);
861 CRYPTO_TFM_REQ_MASK); 861 crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
862 err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); 862 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
863 err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
863 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 864 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
864 tfm->crt_flags |= 865 tfm->crt_flags |=
865 crypto_skcipher_get_flags(ablkctx->sw_cipher) & 866 crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
866 CRYPTO_TFM_RES_MASK; 867 CRYPTO_TFM_RES_MASK;
867 return err; 868 return err;
868} 869}
@@ -1337,8 +1338,7 @@ static int chcr_device_init(struct chcr_context *ctx)
1337 } 1338 }
1338 ctx->dev = u_ctx->dev; 1339 ctx->dev = u_ctx->dev;
1339 adap = padap(ctx->dev); 1340 adap = padap(ctx->dev);
1340 ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq, 1341 ntxq = u_ctx->lldi.ntxq;
1341 adap->vres.ncrypto_fc);
1342 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; 1342 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1343 txq_perchan = ntxq / u_ctx->lldi.nchan; 1343 txq_perchan = ntxq / u_ctx->lldi.nchan;
1344 spin_lock(&ctx->dev->lock_chcr_dev); 1344 spin_lock(&ctx->dev->lock_chcr_dev);
@@ -1369,8 +1369,8 @@ static int chcr_cra_init(struct crypto_tfm *tfm)
1369 struct chcr_context *ctx = crypto_tfm_ctx(tfm); 1369 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1370 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1370 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1371 1371
1372 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0, 1372 ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
1373 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 1373 CRYPTO_ALG_NEED_FALLBACK);
1374 if (IS_ERR(ablkctx->sw_cipher)) { 1374 if (IS_ERR(ablkctx->sw_cipher)) {
1375 pr_err("failed to allocate fallback for %s\n", alg->cra_name); 1375 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1376 return PTR_ERR(ablkctx->sw_cipher); 1376 return PTR_ERR(ablkctx->sw_cipher);
@@ -1399,8 +1399,8 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1399 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) 1399 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1400 * cannot be used as fallback in chcr_handle_cipher_response 1400 * cannot be used as fallback in chcr_handle_cipher_response
1401 */ 1401 */
1402 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, 1402 ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1403 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 1403 CRYPTO_ALG_NEED_FALLBACK);
1404 if (IS_ERR(ablkctx->sw_cipher)) { 1404 if (IS_ERR(ablkctx->sw_cipher)) {
1405 pr_err("failed to allocate fallback for %s\n", alg->cra_name); 1405 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1406 return PTR_ERR(ablkctx->sw_cipher); 1406 return PTR_ERR(ablkctx->sw_cipher);
@@ -1415,7 +1415,7 @@ static void chcr_cra_exit(struct crypto_tfm *tfm)
1415 struct chcr_context *ctx = crypto_tfm_ctx(tfm); 1415 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1416 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1416 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1417 1417
1418 crypto_free_skcipher(ablkctx->sw_cipher); 1418 crypto_free_sync_skcipher(ablkctx->sw_cipher);
1419 if (ablkctx->aes_generic) 1419 if (ablkctx->aes_generic)
1420 crypto_free_cipher(ablkctx->aes_generic); 1420 crypto_free_cipher(ablkctx->aes_generic);
1421} 1421}
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 62249d4ed373..2c472e3c6aeb 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -43,7 +43,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
43static struct cxgb4_uld_info chcr_uld_info = { 43static struct cxgb4_uld_info chcr_uld_info = {
44 .name = DRV_MODULE_NAME, 44 .name = DRV_MODULE_NAME,
45 .nrxq = MAX_ULD_QSETS, 45 .nrxq = MAX_ULD_QSETS,
46 .ntxq = MAX_ULD_QSETS, 46 /* Max ntxq will be derived from fw config file*/
47 .rxq_size = 1024, 47 .rxq_size = 1024,
48 .add = chcr_uld_add, 48 .add = chcr_uld_add,
49 .state_change = chcr_uld_state_change, 49 .state_change = chcr_uld_state_change,
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 0d2c70c344f3..d37ef41f9ebe 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -170,7 +170,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
170} 170}
171 171
172struct ablk_ctx { 172struct ablk_ctx {
173 struct crypto_skcipher *sw_cipher; 173 struct crypto_sync_skcipher *sw_cipher;
174 struct crypto_cipher *aes_generic; 174 struct crypto_cipher *aes_generic;
175 __be32 key_ctx_hdr; 175 __be32 key_ctx_hdr;
176 unsigned int enckey_len; 176 unsigned int enckey_len;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 0997e166ea57..20209e29f814 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -234,8 +234,7 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
234 234
235 return; 235 return;
236out: 236out:
237 if (skb) 237 kfree_skb(skb);
238 kfree_skb(skb);
239} 238}
240 239
241static void release_tcp_port(struct sock *sk) 240static void release_tcp_port(struct sock *sk)
@@ -406,12 +405,10 @@ static int wait_for_states(struct sock *sk, unsigned int states)
406 405
407int chtls_disconnect(struct sock *sk, int flags) 406int chtls_disconnect(struct sock *sk, int flags)
408{ 407{
409 struct chtls_sock *csk;
410 struct tcp_sock *tp; 408 struct tcp_sock *tp;
411 int err; 409 int err;
412 410
413 tp = tcp_sk(sk); 411 tp = tcp_sk(sk);
414 csk = rcu_dereference_sk_user_data(sk);
415 chtls_purge_recv_queue(sk); 412 chtls_purge_recv_queue(sk);
416 chtls_purge_receive_queue(sk); 413 chtls_purge_receive_queue(sk);
417 chtls_purge_write_queue(sk); 414 chtls_purge_write_queue(sk);
@@ -1014,7 +1011,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
1014 const struct cpl_pass_accept_req *req, 1011 const struct cpl_pass_accept_req *req,
1015 struct chtls_dev *cdev) 1012 struct chtls_dev *cdev)
1016{ 1013{
1017 const struct tcphdr *tcph;
1018 struct inet_sock *newinet; 1014 struct inet_sock *newinet;
1019 const struct iphdr *iph; 1015 const struct iphdr *iph;
1020 struct net_device *ndev; 1016 struct net_device *ndev;
@@ -1036,7 +1032,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
1036 if (!dst) 1032 if (!dst)
1037 goto free_sk; 1033 goto free_sk;
1038 1034
1039 tcph = (struct tcphdr *)(iph + 1);
1040 n = dst_neigh_lookup(dst, &iph->saddr); 1035 n = dst_neigh_lookup(dst, &iph->saddr);
1041 if (!n) 1036 if (!n)
1042 goto free_sk; 1037 goto free_sk;
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index f59b044ebd25..f472c51abe56 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -272,8 +272,7 @@ static void chtls_free_uld(struct chtls_dev *cdev)
272 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) 272 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
273 kfree_skb(cdev->rspq_skb_cache[i]); 273 kfree_skb(cdev->rspq_skb_cache[i]);
274 kfree(cdev->lldi); 274 kfree(cdev->lldi);
275 if (cdev->askb) 275 kfree_skb(cdev->askb);
276 kfree_skb(cdev->askb);
277 kfree(cdev); 276 kfree(cdev);
278} 277}
279 278
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 56bd28174f52..4e6ff32f8a7e 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -28,9 +28,24 @@
28 28
29#define DCP_MAX_CHANS 4 29#define DCP_MAX_CHANS 4
30#define DCP_BUF_SZ PAGE_SIZE 30#define DCP_BUF_SZ PAGE_SIZE
31#define DCP_SHA_PAY_SZ 64
31 32
32#define DCP_ALIGNMENT 64 33#define DCP_ALIGNMENT 64
33 34
35/*
36 * Null hashes to align with hw behavior on imx6sl and ull
37 * these are flipped for consistency with hw output
38 */
39static const uint8_t sha1_null_hash[] =
40 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
41 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
42
43static const uint8_t sha256_null_hash[] =
44 "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
45 "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
46 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
47 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
48
34/* DCP DMA descriptor. */ 49/* DCP DMA descriptor. */
35struct dcp_dma_desc { 50struct dcp_dma_desc {
36 uint32_t next_cmd_addr; 51 uint32_t next_cmd_addr;
@@ -48,6 +63,7 @@ struct dcp_coherent_block {
48 uint8_t aes_in_buf[DCP_BUF_SZ]; 63 uint8_t aes_in_buf[DCP_BUF_SZ];
49 uint8_t aes_out_buf[DCP_BUF_SZ]; 64 uint8_t aes_out_buf[DCP_BUF_SZ];
50 uint8_t sha_in_buf[DCP_BUF_SZ]; 65 uint8_t sha_in_buf[DCP_BUF_SZ];
66 uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
51 67
52 uint8_t aes_key[2 * AES_KEYSIZE_128]; 68 uint8_t aes_key[2 * AES_KEYSIZE_128];
53 69
@@ -84,7 +100,7 @@ struct dcp_async_ctx {
84 unsigned int hot:1; 100 unsigned int hot:1;
85 101
86 /* Crypto-specific context */ 102 /* Crypto-specific context */
87 struct crypto_skcipher *fallback; 103 struct crypto_sync_skcipher *fallback;
88 unsigned int key_len; 104 unsigned int key_len;
89 uint8_t key[AES_KEYSIZE_128]; 105 uint8_t key[AES_KEYSIZE_128];
90}; 106};
@@ -99,6 +115,11 @@ struct dcp_sha_req_ctx {
99 unsigned int fini:1; 115 unsigned int fini:1;
100}; 116};
101 117
118struct dcp_export_state {
119 struct dcp_sha_req_ctx req_ctx;
120 struct dcp_async_ctx async_ctx;
121};
122
102/* 123/*
103 * There can even be only one instance of the MXS DCP due to the 124 * There can even be only one instance of the MXS DCP due to the
104 * design of Linux Crypto API. 125 * design of Linux Crypto API.
@@ -209,6 +230,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
209 dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, 230 dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
210 DCP_BUF_SZ, DMA_FROM_DEVICE); 231 DCP_BUF_SZ, DMA_FROM_DEVICE);
211 232
233 if (actx->fill % AES_BLOCK_SIZE) {
234 dev_err(sdcp->dev, "Invalid block size!\n");
235 ret = -EINVAL;
236 goto aes_done_run;
237 }
238
212 /* Fill in the DMA descriptor. */ 239 /* Fill in the DMA descriptor. */
213 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | 240 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
214 MXS_DCP_CONTROL0_INTERRUPT | 241 MXS_DCP_CONTROL0_INTERRUPT |
@@ -238,6 +265,7 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
238 265
239 ret = mxs_dcp_start_dma(actx); 266 ret = mxs_dcp_start_dma(actx);
240 267
268aes_done_run:
241 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, 269 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
242 DMA_TO_DEVICE); 270 DMA_TO_DEVICE);
243 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 271 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -264,13 +292,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
264 292
265 uint8_t *out_tmp, *src_buf, *dst_buf = NULL; 293 uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
266 uint32_t dst_off = 0; 294 uint32_t dst_off = 0;
295 uint32_t last_out_len = 0;
267 296
268 uint8_t *key = sdcp->coh->aes_key; 297 uint8_t *key = sdcp->coh->aes_key;
269 298
270 int ret = 0; 299 int ret = 0;
271 int split = 0; 300 int split = 0;
272 unsigned int i, len, clen, rem = 0; 301 unsigned int i, len, clen, rem = 0, tlen = 0;
273 int init = 0; 302 int init = 0;
303 bool limit_hit = false;
274 304
275 actx->fill = 0; 305 actx->fill = 0;
276 306
@@ -289,6 +319,11 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
289 for_each_sg(req->src, src, nents, i) { 319 for_each_sg(req->src, src, nents, i) {
290 src_buf = sg_virt(src); 320 src_buf = sg_virt(src);
291 len = sg_dma_len(src); 321 len = sg_dma_len(src);
322 tlen += len;
323 limit_hit = tlen > req->nbytes;
324
325 if (limit_hit)
326 len = req->nbytes - (tlen - len);
292 327
293 do { 328 do {
294 if (actx->fill + len > out_off) 329 if (actx->fill + len > out_off)
@@ -305,13 +340,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
305 * If we filled the buffer or this is the last SG, 340 * If we filled the buffer or this is the last SG,
306 * submit the buffer. 341 * submit the buffer.
307 */ 342 */
308 if (actx->fill == out_off || sg_is_last(src)) { 343 if (actx->fill == out_off || sg_is_last(src) ||
344 limit_hit) {
309 ret = mxs_dcp_run_aes(actx, req, init); 345 ret = mxs_dcp_run_aes(actx, req, init);
310 if (ret) 346 if (ret)
311 return ret; 347 return ret;
312 init = 0; 348 init = 0;
313 349
314 out_tmp = out_buf; 350 out_tmp = out_buf;
351 last_out_len = actx->fill;
315 while (dst && actx->fill) { 352 while (dst && actx->fill) {
316 if (!split) { 353 if (!split) {
317 dst_buf = sg_virt(dst); 354 dst_buf = sg_virt(dst);
@@ -334,6 +371,19 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
334 } 371 }
335 } 372 }
336 } while (len); 373 } while (len);
374
375 if (limit_hit)
376 break;
377 }
378
379 /* Copy the IV for CBC for chaining */
380 if (!rctx->ecb) {
381 if (rctx->enc)
382 memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
383 AES_BLOCK_SIZE);
384 else
385 memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
386 AES_BLOCK_SIZE);
337 } 387 }
338 388
339 return ret; 389 return ret;
@@ -380,10 +430,10 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
380{ 430{
381 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 431 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
382 struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm); 432 struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
383 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 433 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
384 int ret; 434 int ret;
385 435
386 skcipher_request_set_tfm(subreq, ctx->fallback); 436 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
387 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); 437 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
388 skcipher_request_set_crypt(subreq, req->src, req->dst, 438 skcipher_request_set_crypt(subreq, req->src, req->dst,
389 req->nbytes, req->info); 439 req->nbytes, req->info);
@@ -464,16 +514,16 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
464 * but is supported by in-kernel software implementation, we use 514 * but is supported by in-kernel software implementation, we use
465 * software fallback. 515 * software fallback.
466 */ 516 */
467 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 517 crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
468 crypto_skcipher_set_flags(actx->fallback, 518 crypto_sync_skcipher_set_flags(actx->fallback,
469 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 519 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
470 520
471 ret = crypto_skcipher_setkey(actx->fallback, key, len); 521 ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
472 if (!ret) 522 if (!ret)
473 return 0; 523 return 0;
474 524
475 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; 525 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
476 tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) & 526 tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
477 CRYPTO_TFM_RES_MASK; 527 CRYPTO_TFM_RES_MASK;
478 528
479 return ret; 529 return ret;
@@ -482,11 +532,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
482static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) 532static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
483{ 533{
484 const char *name = crypto_tfm_alg_name(tfm); 534 const char *name = crypto_tfm_alg_name(tfm);
485 const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
486 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); 535 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
487 struct crypto_skcipher *blk; 536 struct crypto_sync_skcipher *blk;
488 537
489 blk = crypto_alloc_skcipher(name, 0, flags); 538 blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
490 if (IS_ERR(blk)) 539 if (IS_ERR(blk))
491 return PTR_ERR(blk); 540 return PTR_ERR(blk);
492 541
@@ -499,7 +548,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
499{ 548{
500 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); 549 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
501 550
502 crypto_free_skcipher(actx->fallback); 551 crypto_free_sync_skcipher(actx->fallback);
503} 552}
504 553
505/* 554/*
@@ -513,8 +562,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
513 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 562 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
514 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); 563 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
515 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); 564 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
516 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
517
518 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 565 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
519 566
520 dma_addr_t digest_phys = 0; 567 dma_addr_t digest_phys = 0;
@@ -536,10 +583,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
536 desc->payload = 0; 583 desc->payload = 0;
537 desc->status = 0; 584 desc->status = 0;
538 585
586 /*
587 * Align driver with hw behavior when generating null hashes
588 */
589 if (rctx->init && rctx->fini && desc->size == 0) {
590 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
591 const uint8_t *sha_buf =
592 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
593 sha1_null_hash : sha256_null_hash;
594 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
595 ret = 0;
596 goto done_run;
597 }
598
539 /* Set HASH_TERM bit for last transfer block. */ 599 /* Set HASH_TERM bit for last transfer block. */
540 if (rctx->fini) { 600 if (rctx->fini) {
541 digest_phys = dma_map_single(sdcp->dev, req->result, 601 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
542 halg->digestsize, DMA_FROM_DEVICE); 602 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
543 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; 603 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
544 desc->payload = digest_phys; 604 desc->payload = digest_phys;
545 } 605 }
@@ -547,9 +607,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
547 ret = mxs_dcp_start_dma(actx); 607 ret = mxs_dcp_start_dma(actx);
548 608
549 if (rctx->fini) 609 if (rctx->fini)
550 dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize, 610 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
551 DMA_FROM_DEVICE); 611 DMA_FROM_DEVICE);
552 612
613done_run:
553 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE); 614 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
554 615
555 return ret; 616 return ret;
@@ -567,6 +628,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
567 const int nents = sg_nents(req->src); 628 const int nents = sg_nents(req->src);
568 629
569 uint8_t *in_buf = sdcp->coh->sha_in_buf; 630 uint8_t *in_buf = sdcp->coh->sha_in_buf;
631 uint8_t *out_buf = sdcp->coh->sha_out_buf;
570 632
571 uint8_t *src_buf; 633 uint8_t *src_buf;
572 634
@@ -621,11 +683,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
621 683
622 actx->fill = 0; 684 actx->fill = 0;
623 685
624 /* For some reason, the result is flipped. */ 686 /* For some reason the result is flipped */
625 for (i = 0; i < halg->digestsize / 2; i++) { 687 for (i = 0; i < halg->digestsize; i++)
626 swap(req->result[i], 688 req->result[i] = out_buf[halg->digestsize - i - 1];
627 req->result[halg->digestsize - i - 1]);
628 }
629 } 689 }
630 690
631 return 0; 691 return 0;
@@ -766,14 +826,32 @@ static int dcp_sha_digest(struct ahash_request *req)
766 return dcp_sha_finup(req); 826 return dcp_sha_finup(req);
767} 827}
768 828
769static int dcp_sha_noimport(struct ahash_request *req, const void *in) 829static int dcp_sha_import(struct ahash_request *req, const void *in)
770{ 830{
771 return -ENOSYS; 831 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
832 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
833 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
834 const struct dcp_export_state *export = in;
835
836 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
837 memset(actx, 0, sizeof(struct dcp_async_ctx));
838 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
839 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
840
841 return 0;
772} 842}
773 843
774static int dcp_sha_noexport(struct ahash_request *req, void *out) 844static int dcp_sha_export(struct ahash_request *req, void *out)
775{ 845{
776 return -ENOSYS; 846 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
847 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
848 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
849 struct dcp_export_state *export = out;
850
851 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
852 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
853
854 return 0;
777} 855}
778 856
779static int dcp_sha_cra_init(struct crypto_tfm *tfm) 857static int dcp_sha_cra_init(struct crypto_tfm *tfm)
@@ -846,10 +924,11 @@ static struct ahash_alg dcp_sha1_alg = {
846 .final = dcp_sha_final, 924 .final = dcp_sha_final,
847 .finup = dcp_sha_finup, 925 .finup = dcp_sha_finup,
848 .digest = dcp_sha_digest, 926 .digest = dcp_sha_digest,
849 .import = dcp_sha_noimport, 927 .import = dcp_sha_import,
850 .export = dcp_sha_noexport, 928 .export = dcp_sha_export,
851 .halg = { 929 .halg = {
852 .digestsize = SHA1_DIGEST_SIZE, 930 .digestsize = SHA1_DIGEST_SIZE,
931 .statesize = sizeof(struct dcp_export_state),
853 .base = { 932 .base = {
854 .cra_name = "sha1", 933 .cra_name = "sha1",
855 .cra_driver_name = "sha1-dcp", 934 .cra_driver_name = "sha1-dcp",
@@ -872,10 +951,11 @@ static struct ahash_alg dcp_sha256_alg = {
872 .final = dcp_sha_final, 951 .final = dcp_sha_final,
873 .finup = dcp_sha_finup, 952 .finup = dcp_sha_finup,
874 .digest = dcp_sha_digest, 953 .digest = dcp_sha_digest,
875 .import = dcp_sha_noimport, 954 .import = dcp_sha_import,
876 .export = dcp_sha_noexport, 955 .export = dcp_sha_export,
877 .halg = { 956 .halg = {
878 .digestsize = SHA256_DIGEST_SIZE, 957 .digestsize = SHA256_DIGEST_SIZE,
958 .statesize = sizeof(struct dcp_export_state),
879 .base = { 959 .base = {
880 .cra_name = "sha256", 960 .cra_name = "sha256",
881 .cra_driver_name = "sha256-dcp", 961 .cra_driver_name = "sha256-dcp",
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9019f6b67986..a553ffddb11b 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -522,9 +522,9 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
522 !!(mode & FLAGS_CBC)); 522 !!(mode & FLAGS_CBC));
523 523
524 if (req->nbytes < aes_fallback_sz) { 524 if (req->nbytes < aes_fallback_sz) {
525 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 525 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
526 526
527 skcipher_request_set_tfm(subreq, ctx->fallback); 527 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
528 skcipher_request_set_callback(subreq, req->base.flags, NULL, 528 skcipher_request_set_callback(subreq, req->base.flags, NULL,
529 NULL); 529 NULL);
530 skcipher_request_set_crypt(subreq, req->src, req->dst, 530 skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -564,11 +564,11 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
564 memcpy(ctx->key, key, keylen); 564 memcpy(ctx->key, key, keylen);
565 ctx->keylen = keylen; 565 ctx->keylen = keylen;
566 566
567 crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); 567 crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
568 crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & 568 crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
569 CRYPTO_TFM_REQ_MASK); 569 CRYPTO_TFM_REQ_MASK);
570 570
571 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); 571 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
572 if (!ret) 572 if (!ret)
573 return 0; 573 return 0;
574 574
@@ -613,11 +613,10 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
613static int omap_aes_cra_init(struct crypto_tfm *tfm) 613static int omap_aes_cra_init(struct crypto_tfm *tfm)
614{ 614{
615 const char *name = crypto_tfm_alg_name(tfm); 615 const char *name = crypto_tfm_alg_name(tfm);
616 const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
617 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); 616 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
618 struct crypto_skcipher *blk; 617 struct crypto_sync_skcipher *blk;
619 618
620 blk = crypto_alloc_skcipher(name, 0, flags); 619 blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
621 if (IS_ERR(blk)) 620 if (IS_ERR(blk))
622 return PTR_ERR(blk); 621 return PTR_ERR(blk);
623 622
@@ -667,7 +666,7 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm)
667 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); 666 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
668 667
669 if (ctx->fallback) 668 if (ctx->fallback)
670 crypto_free_skcipher(ctx->fallback); 669 crypto_free_sync_skcipher(ctx->fallback);
671 670
672 ctx->fallback = NULL; 671 ctx->fallback = NULL;
673} 672}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index fc3b46a85809..7e02920ef6f8 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -101,7 +101,7 @@ struct omap_aes_ctx {
101 int keylen; 101 int keylen;
102 u32 key[AES_KEYSIZE_256 / sizeof(u32)]; 102 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
103 u8 nonce[4]; 103 u8 nonce[4];
104 struct crypto_skcipher *fallback; 104 struct crypto_sync_skcipher *fallback;
105 struct crypto_skcipher *ctr; 105 struct crypto_skcipher *ctr;
106}; 106};
107 107
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 321d5e2ac833..a28f1d18fe01 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -171,7 +171,7 @@ struct spacc_ablk_ctx {
171 * The fallback cipher. If the operation can't be done in hardware, 171 * The fallback cipher. If the operation can't be done in hardware,
172 * fallback to a software version. 172 * fallback to a software version.
173 */ 173 */
174 struct crypto_skcipher *sw_cipher; 174 struct crypto_sync_skcipher *sw_cipher;
175}; 175};
176 176
177/* AEAD cipher context. */ 177/* AEAD cipher context. */
@@ -799,17 +799,17 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
799 * Set the fallback transform to use the same request flags as 799 * Set the fallback transform to use the same request flags as
800 * the hardware transform. 800 * the hardware transform.
801 */ 801 */
802 crypto_skcipher_clear_flags(ctx->sw_cipher, 802 crypto_sync_skcipher_clear_flags(ctx->sw_cipher,
803 CRYPTO_TFM_REQ_MASK); 803 CRYPTO_TFM_REQ_MASK);
804 crypto_skcipher_set_flags(ctx->sw_cipher, 804 crypto_sync_skcipher_set_flags(ctx->sw_cipher,
805 cipher->base.crt_flags & 805 cipher->base.crt_flags &
806 CRYPTO_TFM_REQ_MASK); 806 CRYPTO_TFM_REQ_MASK);
807 807
808 err = crypto_skcipher_setkey(ctx->sw_cipher, key, len); 808 err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
809 809
810 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 810 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
811 tfm->crt_flags |= 811 tfm->crt_flags |=
812 crypto_skcipher_get_flags(ctx->sw_cipher) & 812 crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
813 CRYPTO_TFM_RES_MASK; 813 CRYPTO_TFM_RES_MASK;
814 814
815 if (err) 815 if (err)
@@ -914,7 +914,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
914 struct crypto_tfm *old_tfm = 914 struct crypto_tfm *old_tfm =
915 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); 915 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
916 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); 916 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
917 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher); 917 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
918 int err; 918 int err;
919 919
920 /* 920 /*
@@ -922,7 +922,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
922 * the ciphering has completed, put the old transform back into the 922 * the ciphering has completed, put the old transform back into the
923 * request. 923 * request.
924 */ 924 */
925 skcipher_request_set_tfm(subreq, ctx->sw_cipher); 925 skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
926 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); 926 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
927 skcipher_request_set_crypt(subreq, req->src, req->dst, 927 skcipher_request_set_crypt(subreq, req->src, req->dst,
928 req->nbytes, req->info); 928 req->nbytes, req->info);
@@ -1020,9 +1020,8 @@ static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
1020 ctx->generic.flags = spacc_alg->type; 1020 ctx->generic.flags = spacc_alg->type;
1021 ctx->generic.engine = engine; 1021 ctx->generic.engine = engine;
1022 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { 1022 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
1023 ctx->sw_cipher = crypto_alloc_skcipher( 1023 ctx->sw_cipher = crypto_alloc_sync_skcipher(
1024 alg->cra_name, 0, CRYPTO_ALG_ASYNC | 1024 alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
1025 CRYPTO_ALG_NEED_FALLBACK);
1026 if (IS_ERR(ctx->sw_cipher)) { 1025 if (IS_ERR(ctx->sw_cipher)) {
1027 dev_warn(engine->dev, "failed to allocate fallback for %s\n", 1026 dev_warn(engine->dev, "failed to allocate fallback for %s\n",
1028 alg->cra_name); 1027 alg->cra_name);
@@ -1041,7 +1040,7 @@ static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
1041{ 1040{
1042 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 1041 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1043 1042
1044 crypto_free_skcipher(ctx->sw_cipher); 1043 crypto_free_sync_skcipher(ctx->sw_cipher);
1045} 1044}
1046 1045
1047static int spacc_ablk_encrypt(struct ablkcipher_request *req) 1046static int spacc_ablk_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1138e41d6805..d2698299896f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -113,6 +113,13 @@ struct qat_alg_aead_ctx {
113 struct crypto_shash *hash_tfm; 113 struct crypto_shash *hash_tfm;
114 enum icp_qat_hw_auth_algo qat_hash_alg; 114 enum icp_qat_hw_auth_algo qat_hash_alg;
115 struct qat_crypto_instance *inst; 115 struct qat_crypto_instance *inst;
116 union {
117 struct sha1_state sha1;
118 struct sha256_state sha256;
119 struct sha512_state sha512;
120 };
121 char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
122 char opad[SHA512_BLOCK_SIZE];
116}; 123};
117 124
118struct qat_alg_ablkcipher_ctx { 125struct qat_alg_ablkcipher_ctx {
@@ -148,37 +155,32 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
148 unsigned int auth_keylen) 155 unsigned int auth_keylen)
149{ 156{
150 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); 157 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
151 struct sha1_state sha1;
152 struct sha256_state sha256;
153 struct sha512_state sha512;
154 int block_size = crypto_shash_blocksize(ctx->hash_tfm); 158 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
155 int digest_size = crypto_shash_digestsize(ctx->hash_tfm); 159 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
156 char ipad[block_size];
157 char opad[block_size];
158 __be32 *hash_state_out; 160 __be32 *hash_state_out;
159 __be64 *hash512_state_out; 161 __be64 *hash512_state_out;
160 int i, offset; 162 int i, offset;
161 163
162 memset(ipad, 0, block_size); 164 memset(ctx->ipad, 0, block_size);
163 memset(opad, 0, block_size); 165 memset(ctx->opad, 0, block_size);
164 shash->tfm = ctx->hash_tfm; 166 shash->tfm = ctx->hash_tfm;
165 shash->flags = 0x0; 167 shash->flags = 0x0;
166 168
167 if (auth_keylen > block_size) { 169 if (auth_keylen > block_size) {
168 int ret = crypto_shash_digest(shash, auth_key, 170 int ret = crypto_shash_digest(shash, auth_key,
169 auth_keylen, ipad); 171 auth_keylen, ctx->ipad);
170 if (ret) 172 if (ret)
171 return ret; 173 return ret;
172 174
173 memcpy(opad, ipad, digest_size); 175 memcpy(ctx->opad, ctx->ipad, digest_size);
174 } else { 176 } else {
175 memcpy(ipad, auth_key, auth_keylen); 177 memcpy(ctx->ipad, auth_key, auth_keylen);
176 memcpy(opad, auth_key, auth_keylen); 178 memcpy(ctx->opad, auth_key, auth_keylen);
177 } 179 }
178 180
179 for (i = 0; i < block_size; i++) { 181 for (i = 0; i < block_size; i++) {
180 char *ipad_ptr = ipad + i; 182 char *ipad_ptr = ctx->ipad + i;
181 char *opad_ptr = opad + i; 183 char *opad_ptr = ctx->opad + i;
182 *ipad_ptr ^= HMAC_IPAD_VALUE; 184 *ipad_ptr ^= HMAC_IPAD_VALUE;
183 *opad_ptr ^= HMAC_OPAD_VALUE; 185 *opad_ptr ^= HMAC_OPAD_VALUE;
184 } 186 }
@@ -186,7 +188,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
186 if (crypto_shash_init(shash)) 188 if (crypto_shash_init(shash))
187 return -EFAULT; 189 return -EFAULT;
188 190
189 if (crypto_shash_update(shash, ipad, block_size)) 191 if (crypto_shash_update(shash, ctx->ipad, block_size))
190 return -EFAULT; 192 return -EFAULT;
191 193
192 hash_state_out = (__be32 *)hash->sha.state1; 194 hash_state_out = (__be32 *)hash->sha.state1;
@@ -194,22 +196,22 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
194 196
195 switch (ctx->qat_hash_alg) { 197 switch (ctx->qat_hash_alg) {
196 case ICP_QAT_HW_AUTH_ALGO_SHA1: 198 case ICP_QAT_HW_AUTH_ALGO_SHA1:
197 if (crypto_shash_export(shash, &sha1)) 199 if (crypto_shash_export(shash, &ctx->sha1))
198 return -EFAULT; 200 return -EFAULT;
199 for (i = 0; i < digest_size >> 2; i++, hash_state_out++) 201 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
200 *hash_state_out = cpu_to_be32(*(sha1.state + i)); 202 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
201 break; 203 break;
202 case ICP_QAT_HW_AUTH_ALGO_SHA256: 204 case ICP_QAT_HW_AUTH_ALGO_SHA256:
203 if (crypto_shash_export(shash, &sha256)) 205 if (crypto_shash_export(shash, &ctx->sha256))
204 return -EFAULT; 206 return -EFAULT;
205 for (i = 0; i < digest_size >> 2; i++, hash_state_out++) 207 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
206 *hash_state_out = cpu_to_be32(*(sha256.state + i)); 208 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
207 break; 209 break;
208 case ICP_QAT_HW_AUTH_ALGO_SHA512: 210 case ICP_QAT_HW_AUTH_ALGO_SHA512:
209 if (crypto_shash_export(shash, &sha512)) 211 if (crypto_shash_export(shash, &ctx->sha512))
210 return -EFAULT; 212 return -EFAULT;
211 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) 213 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
212 *hash512_state_out = cpu_to_be64(*(sha512.state + i)); 214 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
213 break; 215 break;
214 default: 216 default:
215 return -EFAULT; 217 return -EFAULT;
@@ -218,7 +220,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
218 if (crypto_shash_init(shash)) 220 if (crypto_shash_init(shash))
219 return -EFAULT; 221 return -EFAULT;
220 222
221 if (crypto_shash_update(shash, opad, block_size)) 223 if (crypto_shash_update(shash, ctx->opad, block_size))
222 return -EFAULT; 224 return -EFAULT;
223 225
224 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); 226 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
@@ -227,28 +229,28 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
227 229
228 switch (ctx->qat_hash_alg) { 230 switch (ctx->qat_hash_alg) {
229 case ICP_QAT_HW_AUTH_ALGO_SHA1: 231 case ICP_QAT_HW_AUTH_ALGO_SHA1:
230 if (crypto_shash_export(shash, &sha1)) 232 if (crypto_shash_export(shash, &ctx->sha1))
231 return -EFAULT; 233 return -EFAULT;
232 for (i = 0; i < digest_size >> 2; i++, hash_state_out++) 234 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
233 *hash_state_out = cpu_to_be32(*(sha1.state + i)); 235 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
234 break; 236 break;
235 case ICP_QAT_HW_AUTH_ALGO_SHA256: 237 case ICP_QAT_HW_AUTH_ALGO_SHA256:
236 if (crypto_shash_export(shash, &sha256)) 238 if (crypto_shash_export(shash, &ctx->sha256))
237 return -EFAULT; 239 return -EFAULT;
238 for (i = 0; i < digest_size >> 2; i++, hash_state_out++) 240 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
239 *hash_state_out = cpu_to_be32(*(sha256.state + i)); 241 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
240 break; 242 break;
241 case ICP_QAT_HW_AUTH_ALGO_SHA512: 243 case ICP_QAT_HW_AUTH_ALGO_SHA512:
242 if (crypto_shash_export(shash, &sha512)) 244 if (crypto_shash_export(shash, &ctx->sha512))
243 return -EFAULT; 245 return -EFAULT;
244 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) 246 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
245 *hash512_state_out = cpu_to_be64(*(sha512.state + i)); 247 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
246 break; 248 break;
247 default: 249 default:
248 return -EFAULT; 250 return -EFAULT;
249 } 251 }
250 memzero_explicit(ipad, block_size); 252 memzero_explicit(ctx->ipad, block_size);
251 memzero_explicit(opad, block_size); 253 memzero_explicit(ctx->opad, block_size);
252 return 0; 254 return 0;
253} 255}
254 256
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index ea4d96bf47e8..585e1cab9ae3 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -189,7 +189,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
189 memcpy(ctx->enc_key, key, keylen); 189 memcpy(ctx->enc_key, key, keylen);
190 return 0; 190 return 0;
191fallback: 191fallback:
192 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); 192 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
193 if (!ret) 193 if (!ret)
194 ctx->enc_keylen = keylen; 194 ctx->enc_keylen = keylen;
195 return ret; 195 return ret;
@@ -212,9 +212,9 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
212 212
213 if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && 213 if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
214 ctx->enc_keylen != AES_KEYSIZE_256) { 214 ctx->enc_keylen != AES_KEYSIZE_256) {
215 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 215 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
216 216
217 skcipher_request_set_tfm(subreq, ctx->fallback); 217 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
218 skcipher_request_set_callback(subreq, req->base.flags, 218 skcipher_request_set_callback(subreq, req->base.flags,
219 NULL, NULL); 219 NULL, NULL);
220 skcipher_request_set_crypt(subreq, req->src, req->dst, 220 skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -245,9 +245,8 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
245 memset(ctx, 0, sizeof(*ctx)); 245 memset(ctx, 0, sizeof(*ctx));
246 tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); 246 tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
247 247
248 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0, 248 ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
249 CRYPTO_ALG_ASYNC | 249 0, CRYPTO_ALG_NEED_FALLBACK);
250 CRYPTO_ALG_NEED_FALLBACK);
251 return PTR_ERR_OR_ZERO(ctx->fallback); 250 return PTR_ERR_OR_ZERO(ctx->fallback);
252} 251}
253 252
@@ -255,7 +254,7 @@ static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
255{ 254{
256 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 255 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
257 256
258 crypto_free_skcipher(ctx->fallback); 257 crypto_free_sync_skcipher(ctx->fallback);
259} 258}
260 259
261struct qce_ablkcipher_def { 260struct qce_ablkcipher_def {
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 2b0278bb6e92..ee055bfe98a0 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -22,7 +22,7 @@
22struct qce_cipher_ctx { 22struct qce_cipher_ctx {
23 u8 enc_key[QCE_MAX_KEY_SIZE]; 23 u8 enc_key[QCE_MAX_KEY_SIZE];
24 unsigned int enc_keylen; 24 unsigned int enc_keylen;
25 struct crypto_skcipher *fallback; 25 struct crypto_sync_skcipher *fallback;
26}; 26};
27 27
28/** 28/**
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index faa282074e5a..0064be0e3941 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -249,8 +249,8 @@ struct s5p_aes_reqctx {
249struct s5p_aes_ctx { 249struct s5p_aes_ctx {
250 struct s5p_aes_dev *dev; 250 struct s5p_aes_dev *dev;
251 251
252 uint8_t aes_key[AES_MAX_KEY_SIZE]; 252 u8 aes_key[AES_MAX_KEY_SIZE];
253 uint8_t nonce[CTR_RFC3686_NONCE_SIZE]; 253 u8 nonce[CTR_RFC3686_NONCE_SIZE];
254 int keylen; 254 int keylen;
255}; 255};
256 256
@@ -475,9 +475,9 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
475} 475}
476 476
477/* Calls the completion. Cannot be called with dev->lock hold. */ 477/* Calls the completion. Cannot be called with dev->lock hold. */
478static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) 478static void s5p_aes_complete(struct ablkcipher_request *req, int err)
479{ 479{
480 dev->req->base.complete(&dev->req->base, err); 480 req->base.complete(&req->base, err);
481} 481}
482 482
483static void s5p_unset_outdata(struct s5p_aes_dev *dev) 483static void s5p_unset_outdata(struct s5p_aes_dev *dev)
@@ -491,7 +491,7 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev)
491} 491}
492 492
493static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src, 493static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
494 struct scatterlist **dst) 494 struct scatterlist **dst)
495{ 495{
496 void *pages; 496 void *pages;
497 int len; 497 int len;
@@ -518,46 +518,28 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
518 518
519static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) 519static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
520{ 520{
521 int err; 521 if (!sg->length)
522 522 return -EINVAL;
523 if (!sg->length) {
524 err = -EINVAL;
525 goto exit;
526 }
527 523
528 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE); 524 if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
529 if (!err) { 525 return -ENOMEM;
530 err = -ENOMEM;
531 goto exit;
532 }
533 526
534 dev->sg_dst = sg; 527 dev->sg_dst = sg;
535 err = 0;
536 528
537exit: 529 return 0;
538 return err;
539} 530}
540 531
541static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) 532static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
542{ 533{
543 int err; 534 if (!sg->length)
544 535 return -EINVAL;
545 if (!sg->length) {
546 err = -EINVAL;
547 goto exit;
548 }
549 536
550 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE); 537 if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
551 if (!err) { 538 return -ENOMEM;
552 err = -ENOMEM;
553 goto exit;
554 }
555 539
556 dev->sg_src = sg; 540 dev->sg_src = sg;
557 err = 0;
558 541
559exit: 542 return 0;
560 return err;
561} 543}
562 544
563/* 545/*
@@ -655,14 +637,14 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
655{ 637{
656 struct platform_device *pdev = dev_id; 638 struct platform_device *pdev = dev_id;
657 struct s5p_aes_dev *dev = platform_get_drvdata(pdev); 639 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
640 struct ablkcipher_request *req;
658 int err_dma_tx = 0; 641 int err_dma_tx = 0;
659 int err_dma_rx = 0; 642 int err_dma_rx = 0;
660 int err_dma_hx = 0; 643 int err_dma_hx = 0;
661 bool tx_end = false; 644 bool tx_end = false;
662 bool hx_end = false; 645 bool hx_end = false;
663 unsigned long flags; 646 unsigned long flags;
664 uint32_t status; 647 u32 status, st_bits;
665 u32 st_bits;
666 int err; 648 int err;
667 649
668 spin_lock_irqsave(&dev->lock, flags); 650 spin_lock_irqsave(&dev->lock, flags);
@@ -727,7 +709,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
727 709
728 spin_unlock_irqrestore(&dev->lock, flags); 710 spin_unlock_irqrestore(&dev->lock, flags);
729 711
730 s5p_aes_complete(dev, 0); 712 s5p_aes_complete(dev->req, 0);
731 /* Device is still busy */ 713 /* Device is still busy */
732 tasklet_schedule(&dev->tasklet); 714 tasklet_schedule(&dev->tasklet);
733 } else { 715 } else {
@@ -752,11 +734,12 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
752error: 734error:
753 s5p_sg_done(dev); 735 s5p_sg_done(dev);
754 dev->busy = false; 736 dev->busy = false;
737 req = dev->req;
755 if (err_dma_hx == 1) 738 if (err_dma_hx == 1)
756 s5p_set_dma_hashdata(dev, dev->hash_sg_iter); 739 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
757 740
758 spin_unlock_irqrestore(&dev->lock, flags); 741 spin_unlock_irqrestore(&dev->lock, flags);
759 s5p_aes_complete(dev, err); 742 s5p_aes_complete(req, err);
760 743
761hash_irq_end: 744hash_irq_end:
762 /* 745 /*
@@ -1830,7 +1813,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
1830}; 1813};
1831 1814
1832static void s5p_set_aes(struct s5p_aes_dev *dev, 1815static void s5p_set_aes(struct s5p_aes_dev *dev,
1833 const uint8_t *key, const uint8_t *iv, 1816 const u8 *key, const u8 *iv, const u8 *ctr,
1834 unsigned int keylen) 1817 unsigned int keylen)
1835{ 1818{
1836 void __iomem *keystart; 1819 void __iomem *keystart;
@@ -1838,6 +1821,9 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
1838 if (iv) 1821 if (iv)
1839 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); 1822 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
1840 1823
1824 if (ctr)
1825 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10);
1826
1841 if (keylen == AES_KEYSIZE_256) 1827 if (keylen == AES_KEYSIZE_256)
1842 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0); 1828 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
1843 else if (keylen == AES_KEYSIZE_192) 1829 else if (keylen == AES_KEYSIZE_192)
@@ -1887,7 +1873,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev,
1887} 1873}
1888 1874
1889static int s5p_set_outdata_start(struct s5p_aes_dev *dev, 1875static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
1890 struct ablkcipher_request *req) 1876 struct ablkcipher_request *req)
1891{ 1877{
1892 struct scatterlist *sg; 1878 struct scatterlist *sg;
1893 int err; 1879 int err;
@@ -1916,11 +1902,12 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
1916static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) 1902static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1917{ 1903{
1918 struct ablkcipher_request *req = dev->req; 1904 struct ablkcipher_request *req = dev->req;
1919 uint32_t aes_control; 1905 u32 aes_control;
1920 unsigned long flags; 1906 unsigned long flags;
1921 int err; 1907 int err;
1922 u8 *iv; 1908 u8 *iv, *ctr;
1923 1909
1910 /* This sets bit [13:12] to 00, which selects 128-bit counter */
1924 aes_control = SSS_AES_KEY_CHANGE_MODE; 1911 aes_control = SSS_AES_KEY_CHANGE_MODE;
1925 if (mode & FLAGS_AES_DECRYPT) 1912 if (mode & FLAGS_AES_DECRYPT)
1926 aes_control |= SSS_AES_MODE_DECRYPT; 1913 aes_control |= SSS_AES_MODE_DECRYPT;
@@ -1928,11 +1915,14 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1928 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) { 1915 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
1929 aes_control |= SSS_AES_CHAIN_MODE_CBC; 1916 aes_control |= SSS_AES_CHAIN_MODE_CBC;
1930 iv = req->info; 1917 iv = req->info;
1918 ctr = NULL;
1931 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) { 1919 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
1932 aes_control |= SSS_AES_CHAIN_MODE_CTR; 1920 aes_control |= SSS_AES_CHAIN_MODE_CTR;
1933 iv = req->info; 1921 iv = NULL;
1922 ctr = req->info;
1934 } else { 1923 } else {
1935 iv = NULL; /* AES_ECB */ 1924 iv = NULL; /* AES_ECB */
1925 ctr = NULL;
1936 } 1926 }
1937 1927
1938 if (dev->ctx->keylen == AES_KEYSIZE_192) 1928 if (dev->ctx->keylen == AES_KEYSIZE_192)
@@ -1964,7 +1954,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1964 goto outdata_error; 1954 goto outdata_error;
1965 1955
1966 SSS_AES_WRITE(dev, AES_CONTROL, aes_control); 1956 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1967 s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen); 1957 s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
1968 1958
1969 s5p_set_dma_indata(dev, dev->sg_src); 1959 s5p_set_dma_indata(dev, dev->sg_src);
1970 s5p_set_dma_outdata(dev, dev->sg_dst); 1960 s5p_set_dma_outdata(dev, dev->sg_dst);
@@ -1983,7 +1973,7 @@ indata_error:
1983 s5p_sg_done(dev); 1973 s5p_sg_done(dev);
1984 dev->busy = false; 1974 dev->busy = false;
1985 spin_unlock_irqrestore(&dev->lock, flags); 1975 spin_unlock_irqrestore(&dev->lock, flags);
1986 s5p_aes_complete(dev, err); 1976 s5p_aes_complete(req, err);
1987} 1977}
1988 1978
1989static void s5p_tasklet_cb(unsigned long data) 1979static void s5p_tasklet_cb(unsigned long data)
@@ -2024,7 +2014,7 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
2024 err = ablkcipher_enqueue_request(&dev->queue, req); 2014 err = ablkcipher_enqueue_request(&dev->queue, req);
2025 if (dev->busy) { 2015 if (dev->busy) {
2026 spin_unlock_irqrestore(&dev->lock, flags); 2016 spin_unlock_irqrestore(&dev->lock, flags);
2027 goto exit; 2017 return err;
2028 } 2018 }
2029 dev->busy = true; 2019 dev->busy = true;
2030 2020
@@ -2032,7 +2022,6 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
2032 2022
2033 tasklet_schedule(&dev->tasklet); 2023 tasklet_schedule(&dev->tasklet);
2034 2024
2035exit:
2036 return err; 2025 return err;
2037} 2026}
2038 2027
@@ -2043,7 +2032,8 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
2043 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 2032 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2044 struct s5p_aes_dev *dev = ctx->dev; 2033 struct s5p_aes_dev *dev = ctx->dev;
2045 2034
2046 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { 2035 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
2036 ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
2047 dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); 2037 dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
2048 return -EINVAL; 2038 return -EINVAL;
2049 } 2039 }
@@ -2054,7 +2044,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
2054} 2044}
2055 2045
2056static int s5p_aes_setkey(struct crypto_ablkcipher *cipher, 2046static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
2057 const uint8_t *key, unsigned int keylen) 2047 const u8 *key, unsigned int keylen)
2058{ 2048{
2059 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 2049 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
2060 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); 2050 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2090,6 +2080,11 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
2090 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC); 2080 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
2091} 2081}
2092 2082
2083static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
2084{
2085 return s5p_aes_crypt(req, FLAGS_AES_CTR);
2086}
2087
2093static int s5p_aes_cra_init(struct crypto_tfm *tfm) 2088static int s5p_aes_cra_init(struct crypto_tfm *tfm)
2094{ 2089{
2095 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); 2090 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2144,6 +2139,28 @@ static struct crypto_alg algs[] = {
2144 .decrypt = s5p_aes_cbc_decrypt, 2139 .decrypt = s5p_aes_cbc_decrypt,
2145 } 2140 }
2146 }, 2141 },
2142 {
2143 .cra_name = "ctr(aes)",
2144 .cra_driver_name = "ctr-aes-s5p",
2145 .cra_priority = 100,
2146 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2147 CRYPTO_ALG_ASYNC |
2148 CRYPTO_ALG_KERN_DRIVER_ONLY,
2149 .cra_blocksize = AES_BLOCK_SIZE,
2150 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2151 .cra_alignmask = 0x0f,
2152 .cra_type = &crypto_ablkcipher_type,
2153 .cra_module = THIS_MODULE,
2154 .cra_init = s5p_aes_cra_init,
2155 .cra_u.ablkcipher = {
2156 .min_keysize = AES_MIN_KEY_SIZE,
2157 .max_keysize = AES_MAX_KEY_SIZE,
2158 .ivsize = AES_BLOCK_SIZE,
2159 .setkey = s5p_aes_setkey,
2160 .encrypt = s5p_aes_ctr_crypt,
2161 .decrypt = s5p_aes_ctr_crypt,
2162 }
2163 },
2147}; 2164};
2148 2165
2149static int s5p_aes_probe(struct platform_device *pdev) 2166static int s5p_aes_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index e7540a5b8197..bbf166a97ad3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -149,7 +149,7 @@ struct sahara_ctx {
149 /* AES-specific context */ 149 /* AES-specific context */
150 int keylen; 150 int keylen;
151 u8 key[AES_KEYSIZE_128]; 151 u8 key[AES_KEYSIZE_128];
152 struct crypto_skcipher *fallback; 152 struct crypto_sync_skcipher *fallback;
153}; 153};
154 154
155struct sahara_aes_reqctx { 155struct sahara_aes_reqctx {
@@ -621,14 +621,14 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
621 /* 621 /*
622 * The requested key size is not supported by HW, do a fallback. 622 * The requested key size is not supported by HW, do a fallback.
623 */ 623 */
624 crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); 624 crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
625 crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & 625 crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
626 CRYPTO_TFM_REQ_MASK); 626 CRYPTO_TFM_REQ_MASK);
627 627
628 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); 628 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
629 629
630 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; 630 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
631 tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) & 631 tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
632 CRYPTO_TFM_RES_MASK; 632 CRYPTO_TFM_RES_MASK;
633 return ret; 633 return ret;
634} 634}
@@ -666,9 +666,9 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
666 int err; 666 int err;
667 667
668 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { 668 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
669 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 669 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
670 670
671 skcipher_request_set_tfm(subreq, ctx->fallback); 671 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
672 skcipher_request_set_callback(subreq, req->base.flags, 672 skcipher_request_set_callback(subreq, req->base.flags,
673 NULL, NULL); 673 NULL, NULL);
674 skcipher_request_set_crypt(subreq, req->src, req->dst, 674 skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -688,9 +688,9 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
688 int err; 688 int err;
689 689
690 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { 690 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
691 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 691 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
692 692
693 skcipher_request_set_tfm(subreq, ctx->fallback); 693 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
694 skcipher_request_set_callback(subreq, req->base.flags, 694 skcipher_request_set_callback(subreq, req->base.flags,
695 NULL, NULL); 695 NULL, NULL);
696 skcipher_request_set_crypt(subreq, req->src, req->dst, 696 skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -710,9 +710,9 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
710 int err; 710 int err;
711 711
712 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { 712 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
713 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 713 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
714 714
715 skcipher_request_set_tfm(subreq, ctx->fallback); 715 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
716 skcipher_request_set_callback(subreq, req->base.flags, 716 skcipher_request_set_callback(subreq, req->base.flags,
717 NULL, NULL); 717 NULL, NULL);
718 skcipher_request_set_crypt(subreq, req->src, req->dst, 718 skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -732,9 +732,9 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
732 int err; 732 int err;
733 733
734 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { 734 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
735 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 735 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
736 736
737 skcipher_request_set_tfm(subreq, ctx->fallback); 737 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
738 skcipher_request_set_callback(subreq, req->base.flags, 738 skcipher_request_set_callback(subreq, req->base.flags,
739 NULL, NULL); 739 NULL, NULL);
740 skcipher_request_set_crypt(subreq, req->src, req->dst, 740 skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -752,8 +752,7 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
752 const char *name = crypto_tfm_alg_name(tfm); 752 const char *name = crypto_tfm_alg_name(tfm);
753 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); 753 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
754 754
755 ctx->fallback = crypto_alloc_skcipher(name, 0, 755 ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
756 CRYPTO_ALG_ASYNC |
757 CRYPTO_ALG_NEED_FALLBACK); 756 CRYPTO_ALG_NEED_FALLBACK);
758 if (IS_ERR(ctx->fallback)) { 757 if (IS_ERR(ctx->fallback)) {
759 pr_err("Error allocating fallback algo %s\n", name); 758 pr_err("Error allocating fallback algo %s\n", name);
@@ -769,7 +768,7 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
769{ 768{
770 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); 769 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
771 770
772 crypto_free_skcipher(ctx->fallback); 771 crypto_free_sync_skcipher(ctx->fallback);
773} 772}
774 773
775static u32 sahara_sha_init_hdr(struct sahara_dev *dev, 774static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index b71895871be3..c5c5ff82b52e 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -32,7 +32,7 @@
32#include "aesp8-ppc.h" 32#include "aesp8-ppc.h"
33 33
34struct p8_aes_cbc_ctx { 34struct p8_aes_cbc_ctx {
35 struct crypto_skcipher *fallback; 35 struct crypto_sync_skcipher *fallback;
36 struct aes_key enc_key; 36 struct aes_key enc_key;
37 struct aes_key dec_key; 37 struct aes_key dec_key;
38}; 38};
@@ -40,11 +40,11 @@ struct p8_aes_cbc_ctx {
40static int p8_aes_cbc_init(struct crypto_tfm *tfm) 40static int p8_aes_cbc_init(struct crypto_tfm *tfm)
41{ 41{
42 const char *alg = crypto_tfm_alg_name(tfm); 42 const char *alg = crypto_tfm_alg_name(tfm);
43 struct crypto_skcipher *fallback; 43 struct crypto_sync_skcipher *fallback;
44 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 44 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
45 45
46 fallback = crypto_alloc_skcipher(alg, 0, 46 fallback = crypto_alloc_sync_skcipher(alg, 0,
47 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 47 CRYPTO_ALG_NEED_FALLBACK);
48 48
49 if (IS_ERR(fallback)) { 49 if (IS_ERR(fallback)) {
50 printk(KERN_ERR 50 printk(KERN_ERR
@@ -53,7 +53,7 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
53 return PTR_ERR(fallback); 53 return PTR_ERR(fallback);
54 } 54 }
55 55
56 crypto_skcipher_set_flags( 56 crypto_sync_skcipher_set_flags(
57 fallback, 57 fallback,
58 crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); 58 crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
59 ctx->fallback = fallback; 59 ctx->fallback = fallback;
@@ -66,7 +66,7 @@ static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
66 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 66 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
67 67
68 if (ctx->fallback) { 68 if (ctx->fallback) {
69 crypto_free_skcipher(ctx->fallback); 69 crypto_free_sync_skcipher(ctx->fallback);
70 ctx->fallback = NULL; 70 ctx->fallback = NULL;
71 } 71 }
72} 72}
@@ -86,7 +86,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
86 pagefault_enable(); 86 pagefault_enable();
87 preempt_enable(); 87 preempt_enable();
88 88
89 ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); 89 ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
90 return ret; 90 return ret;
91} 91}
92 92
@@ -100,8 +100,8 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
100 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 100 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
101 101
102 if (in_interrupt()) { 102 if (in_interrupt()) {
103 SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 103 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
104 skcipher_request_set_tfm(req, ctx->fallback); 104 skcipher_request_set_sync_tfm(req, ctx->fallback);
105 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 105 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
106 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 106 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
107 ret = crypto_skcipher_encrypt(req); 107 ret = crypto_skcipher_encrypt(req);
@@ -139,8 +139,8 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
139 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 139 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
140 140
141 if (in_interrupt()) { 141 if (in_interrupt()) {
142 SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 142 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
143 skcipher_request_set_tfm(req, ctx->fallback); 143 skcipher_request_set_sync_tfm(req, ctx->fallback);
144 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 144 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
145 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 145 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
146 ret = crypto_skcipher_decrypt(req); 146 ret = crypto_skcipher_decrypt(req);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index cd777c75291d..8a2fe092cb8e 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -32,18 +32,18 @@
32#include "aesp8-ppc.h" 32#include "aesp8-ppc.h"
33 33
34struct p8_aes_ctr_ctx { 34struct p8_aes_ctr_ctx {
35 struct crypto_skcipher *fallback; 35 struct crypto_sync_skcipher *fallback;
36 struct aes_key enc_key; 36 struct aes_key enc_key;
37}; 37};
38 38
39static int p8_aes_ctr_init(struct crypto_tfm *tfm) 39static int p8_aes_ctr_init(struct crypto_tfm *tfm)
40{ 40{
41 const char *alg = crypto_tfm_alg_name(tfm); 41 const char *alg = crypto_tfm_alg_name(tfm);
42 struct crypto_skcipher *fallback; 42 struct crypto_sync_skcipher *fallback;
43 struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); 43 struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
44 44
45 fallback = crypto_alloc_skcipher(alg, 0, 45 fallback = crypto_alloc_sync_skcipher(alg, 0,
46 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 46 CRYPTO_ALG_NEED_FALLBACK);
47 if (IS_ERR(fallback)) { 47 if (IS_ERR(fallback)) {
48 printk(KERN_ERR 48 printk(KERN_ERR
49 "Failed to allocate transformation for '%s': %ld\n", 49 "Failed to allocate transformation for '%s': %ld\n",
@@ -51,7 +51,7 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
51 return PTR_ERR(fallback); 51 return PTR_ERR(fallback);
52 } 52 }
53 53
54 crypto_skcipher_set_flags( 54 crypto_sync_skcipher_set_flags(
55 fallback, 55 fallback,
56 crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); 56 crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
57 ctx->fallback = fallback; 57 ctx->fallback = fallback;
@@ -64,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
64 struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); 64 struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
65 65
66 if (ctx->fallback) { 66 if (ctx->fallback) {
67 crypto_free_skcipher(ctx->fallback); 67 crypto_free_sync_skcipher(ctx->fallback);
68 ctx->fallback = NULL; 68 ctx->fallback = NULL;
69 } 69 }
70} 70}
@@ -83,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
83 pagefault_enable(); 83 pagefault_enable();
84 preempt_enable(); 84 preempt_enable();
85 85
86 ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); 86 ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
87 return ret; 87 return ret;
88} 88}
89 89
@@ -119,8 +119,8 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
119 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 119 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
120 120
121 if (in_interrupt()) { 121 if (in_interrupt()) {
122 SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 122 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
123 skcipher_request_set_tfm(req, ctx->fallback); 123 skcipher_request_set_sync_tfm(req, ctx->fallback);
124 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 124 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
125 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 125 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
126 ret = crypto_skcipher_encrypt(req); 126 ret = crypto_skcipher_encrypt(req);
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index e9954a7d4694..ecd64e5cc5bb 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -33,7 +33,7 @@
33#include "aesp8-ppc.h" 33#include "aesp8-ppc.h"
34 34
35struct p8_aes_xts_ctx { 35struct p8_aes_xts_ctx {
36 struct crypto_skcipher *fallback; 36 struct crypto_sync_skcipher *fallback;
37 struct aes_key enc_key; 37 struct aes_key enc_key;
38 struct aes_key dec_key; 38 struct aes_key dec_key;
39 struct aes_key tweak_key; 39 struct aes_key tweak_key;
@@ -42,11 +42,11 @@ struct p8_aes_xts_ctx {
42static int p8_aes_xts_init(struct crypto_tfm *tfm) 42static int p8_aes_xts_init(struct crypto_tfm *tfm)
43{ 43{
44 const char *alg = crypto_tfm_alg_name(tfm); 44 const char *alg = crypto_tfm_alg_name(tfm);
45 struct crypto_skcipher *fallback; 45 struct crypto_sync_skcipher *fallback;
46 struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); 46 struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
47 47
48 fallback = crypto_alloc_skcipher(alg, 0, 48 fallback = crypto_alloc_sync_skcipher(alg, 0,
49 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 49 CRYPTO_ALG_NEED_FALLBACK);
50 if (IS_ERR(fallback)) { 50 if (IS_ERR(fallback)) {
51 printk(KERN_ERR 51 printk(KERN_ERR
52 "Failed to allocate transformation for '%s': %ld\n", 52 "Failed to allocate transformation for '%s': %ld\n",
@@ -54,7 +54,7 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
54 return PTR_ERR(fallback); 54 return PTR_ERR(fallback);
55 } 55 }
56 56
57 crypto_skcipher_set_flags( 57 crypto_sync_skcipher_set_flags(
58 fallback, 58 fallback,
59 crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); 59 crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
60 ctx->fallback = fallback; 60 ctx->fallback = fallback;
@@ -67,7 +67,7 @@ static void p8_aes_xts_exit(struct crypto_tfm *tfm)
67 struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); 67 struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
68 68
69 if (ctx->fallback) { 69 if (ctx->fallback) {
70 crypto_free_skcipher(ctx->fallback); 70 crypto_free_sync_skcipher(ctx->fallback);
71 ctx->fallback = NULL; 71 ctx->fallback = NULL;
72 } 72 }
73} 73}
@@ -92,7 +92,7 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
92 pagefault_enable(); 92 pagefault_enable();
93 preempt_enable(); 93 preempt_enable();
94 94
95 ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); 95 ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
96 return ret; 96 return ret;
97} 97}
98 98
@@ -109,8 +109,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
109 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 109 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
110 110
111 if (in_interrupt()) { 111 if (in_interrupt()) {
112 SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 112 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
113 skcipher_request_set_tfm(req, ctx->fallback); 113 skcipher_request_set_sync_tfm(req, ctx->fallback);
114 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 114 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
115 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 115 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
116 ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); 116 ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index e1fa6baf4e8e..bb3096bf2cc6 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -559,7 +559,12 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
559 } 559 }
560 memset(result + size, 0, JOURNAL_MAC_SIZE - size); 560 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
561 } else { 561 } else {
562 __u8 digest[size]; 562 __u8 digest[HASH_MAX_DIGESTSIZE];
563
564 if (WARN_ON(size > sizeof(digest))) {
565 dm_integrity_io_error(ic, "digest_size", -EINVAL);
566 goto err;
567 }
563 r = crypto_shash_final(desc, digest); 568 r = crypto_shash_final(desc, digest);
564 if (unlikely(r)) { 569 if (unlikely(r)) {
565 dm_integrity_io_error(ic, "crypto_shash_final", r); 570 dm_integrity_io_error(ic, "crypto_shash_final", r);
@@ -1324,7 +1329,7 @@ static void integrity_metadata(struct work_struct *w)
1324 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1329 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1325 char *checksums; 1330 char *checksums;
1326 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; 1331 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1327 char checksums_onstack[ic->tag_size + extra_space]; 1332 char checksums_onstack[HASH_MAX_DIGESTSIZE];
1328 unsigned sectors_to_process = dio->range.n_sectors; 1333 unsigned sectors_to_process = dio->range.n_sectors;
1329 sector_t sector = dio->range.logical_sector; 1334 sector_t sector = dio->range.logical_sector;
1330 1335
@@ -1333,8 +1338,14 @@ static void integrity_metadata(struct work_struct *w)
1333 1338
1334 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space, 1339 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1335 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); 1340 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1336 if (!checksums) 1341 if (!checksums) {
1337 checksums = checksums_onstack; 1342 checksums = checksums_onstack;
1343 if (WARN_ON(extra_space &&
1344 digest_size > sizeof(checksums_onstack))) {
1345 r = -EINVAL;
1346 goto error;
1347 }
1348 }
1338 1349
1339 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) { 1350 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1340 unsigned pos; 1351 unsigned pos;
@@ -1546,7 +1557,7 @@ retry_kmap:
1546 } while (++s < ic->sectors_per_block); 1557 } while (++s < ic->sectors_per_block);
1547#ifdef INTERNAL_VERIFY 1558#ifdef INTERNAL_VERIFY
1548 if (ic->internal_hash) { 1559 if (ic->internal_hash) {
1549 char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)]; 1560 char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1550 1561
1551 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); 1562 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1552 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { 1563 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
@@ -1596,7 +1607,7 @@ retry_kmap:
1596 if (ic->internal_hash) { 1607 if (ic->internal_hash) {
1597 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); 1608 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1598 if (unlikely(digest_size > ic->tag_size)) { 1609 if (unlikely(digest_size > ic->tag_size)) {
1599 char checksums_onstack[digest_size]; 1610 char checksums_onstack[HASH_MAX_DIGESTSIZE];
1600 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack); 1611 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1601 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); 1612 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1602 } else 1613 } else
@@ -2023,7 +2034,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2023 unlikely(from_replay) && 2034 unlikely(from_replay) &&
2024#endif 2035#endif
2025 ic->internal_hash) { 2036 ic->internal_hash) {
2026 char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)]; 2037 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2027 2038
2028 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), 2039 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2029 (char *)access_journal_data(ic, i, l), test_tag); 2040 (char *)access_journal_data(ic, i, l), test_tag);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 684af08d0747..0ce04e5b4afb 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -212,12 +212,15 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
212 struct dm_verity_fec_io *fio = fec_io(io); 212 struct dm_verity_fec_io *fio = fec_io(io);
213 u64 block, ileaved; 213 u64 block, ileaved;
214 u8 *bbuf, *rs_block; 214 u8 *bbuf, *rs_block;
215 u8 want_digest[v->digest_size]; 215 u8 want_digest[HASH_MAX_DIGESTSIZE];
216 unsigned n, k; 216 unsigned n, k;
217 217
218 if (neras) 218 if (neras)
219 *neras = 0; 219 *neras = 0;
220 220
221 if (WARN_ON(v->digest_size > sizeof(want_digest)))
222 return -EINVAL;
223
221 /* 224 /*
222 * read each of the rsn data blocks that are part of the RS block, and 225 * read each of the rsn data blocks that are part of the RS block, and
223 * interleave contents to available bufs 226 * interleave contents to available bufs
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 267322693ed5..9a6065a3fa46 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -520,10 +520,20 @@ setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
520 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL); 520 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
521 if (!txq_info) 521 if (!txq_info)
522 return -ENOMEM; 522 return -ENOMEM;
523 if (uld_type == CXGB4_ULD_CRYPTO) {
524 i = min_t(int, adap->vres.ncrypto_fc,
525 num_online_cpus());
526 txq_info->ntxq = rounddown(i, adap->params.nports);
527 if (txq_info->ntxq <= 0) {
528 dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
529 kfree(txq_info);
530 return -EINVAL;
531 }
523 532
524 i = min_t(int, uld_info->ntxq, num_online_cpus()); 533 } else {
525 txq_info->ntxq = roundup(i, adap->params.nports); 534 i = min_t(int, uld_info->ntxq, num_online_cpus());
526 535 txq_info->ntxq = roundup(i, adap->params.nports);
536 }
527 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq), 537 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
528 GFP_KERNEL); 538 GFP_KERNEL);
529 if (!txq_info->uldtxq) { 539 if (!txq_info->uldtxq) {
@@ -546,11 +556,14 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
546 struct cxgb4_lld_info *lli) 556 struct cxgb4_lld_info *lli)
547{ 557{
548 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 558 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
559 int tx_uld_type = TX_ULD(uld_type);
560 struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
549 561
550 lli->rxq_ids = rxq_info->rspq_id; 562 lli->rxq_ids = rxq_info->rspq_id;
551 lli->nrxq = rxq_info->nrxq; 563 lli->nrxq = rxq_info->nrxq;
552 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; 564 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
553 lli->nciq = rxq_info->nciq; 565 lli->nciq = rxq_info->nciq;
566 lli->ntxq = txq_info->ntxq;
554} 567}
555 568
556int t4_uld_mem_alloc(struct adapter *adap) 569int t4_uld_mem_alloc(struct adapter *adap)
@@ -634,7 +647,6 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
634 lld->ports = adap->port; 647 lld->ports = adap->port;
635 lld->vr = &adap->vres; 648 lld->vr = &adap->vres;
636 lld->mtus = adap->params.mtus; 649 lld->mtus = adap->params.mtus;
637 lld->ntxq = adap->sge.ofldqsets;
638 lld->nchan = adap->params.nports; 650 lld->nchan = adap->params.nports;
639 lld->nports = adap->params.nports; 651 lld->nports = adap->params.nports;
640 lld->wr_cred = adap->params.ofldq_wr_cred; 652 lld->wr_cred = adap->params.ofldq_wr_cred;
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index a205750b431b..7ccdc62c6052 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -95,7 +95,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
95 * State for an MPPE (de)compressor. 95 * State for an MPPE (de)compressor.
96 */ 96 */
97struct ppp_mppe_state { 97struct ppp_mppe_state {
98 struct crypto_skcipher *arc4; 98 struct crypto_sync_skcipher *arc4;
99 struct shash_desc *sha1; 99 struct shash_desc *sha1;
100 unsigned char *sha1_digest; 100 unsigned char *sha1_digest;
101 unsigned char master_key[MPPE_MAX_KEY_LEN]; 101 unsigned char master_key[MPPE_MAX_KEY_LEN];
@@ -155,15 +155,15 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
155static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) 155static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
156{ 156{
157 struct scatterlist sg_in[1], sg_out[1]; 157 struct scatterlist sg_in[1], sg_out[1];
158 SKCIPHER_REQUEST_ON_STACK(req, state->arc4); 158 SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
159 159
160 skcipher_request_set_tfm(req, state->arc4); 160 skcipher_request_set_sync_tfm(req, state->arc4);
161 skcipher_request_set_callback(req, 0, NULL, NULL); 161 skcipher_request_set_callback(req, 0, NULL, NULL);
162 162
163 get_new_key_from_sha(state); 163 get_new_key_from_sha(state);
164 if (!initial_key) { 164 if (!initial_key) {
165 crypto_skcipher_setkey(state->arc4, state->sha1_digest, 165 crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest,
166 state->keylen); 166 state->keylen);
167 sg_init_table(sg_in, 1); 167 sg_init_table(sg_in, 1);
168 sg_init_table(sg_out, 1); 168 sg_init_table(sg_out, 1);
169 setup_sg(sg_in, state->sha1_digest, state->keylen); 169 setup_sg(sg_in, state->sha1_digest, state->keylen);
@@ -181,7 +181,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
181 state->session_key[1] = 0x26; 181 state->session_key[1] = 0x26;
182 state->session_key[2] = 0x9e; 182 state->session_key[2] = 0x9e;
183 } 183 }
184 crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen); 184 crypto_sync_skcipher_setkey(state->arc4, state->session_key,
185 state->keylen);
185 skcipher_request_zero(req); 186 skcipher_request_zero(req);
186} 187}
187 188
@@ -203,7 +204,7 @@ static void *mppe_alloc(unsigned char *options, int optlen)
203 goto out; 204 goto out;
204 205
205 206
206 state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 207 state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
207 if (IS_ERR(state->arc4)) { 208 if (IS_ERR(state->arc4)) {
208 state->arc4 = NULL; 209 state->arc4 = NULL;
209 goto out_free; 210 goto out_free;
@@ -250,7 +251,7 @@ out_free:
250 crypto_free_shash(state->sha1->tfm); 251 crypto_free_shash(state->sha1->tfm);
251 kzfree(state->sha1); 252 kzfree(state->sha1);
252 } 253 }
253 crypto_free_skcipher(state->arc4); 254 crypto_free_sync_skcipher(state->arc4);
254 kfree(state); 255 kfree(state);
255out: 256out:
256 return NULL; 257 return NULL;
@@ -266,7 +267,7 @@ static void mppe_free(void *arg)
266 kfree(state->sha1_digest); 267 kfree(state->sha1_digest);
267 crypto_free_shash(state->sha1->tfm); 268 crypto_free_shash(state->sha1->tfm);
268 kzfree(state->sha1); 269 kzfree(state->sha1);
269 crypto_free_skcipher(state->arc4); 270 crypto_free_sync_skcipher(state->arc4);
270 kfree(state); 271 kfree(state);
271 } 272 }
272} 273}
@@ -366,7 +367,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
366 int isize, int osize) 367 int isize, int osize)
367{ 368{
368 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 369 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
369 SKCIPHER_REQUEST_ON_STACK(req, state->arc4); 370 SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
370 int proto; 371 int proto;
371 int err; 372 int err;
372 struct scatterlist sg_in[1], sg_out[1]; 373 struct scatterlist sg_in[1], sg_out[1];
@@ -426,7 +427,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
426 setup_sg(sg_in, ibuf, isize); 427 setup_sg(sg_in, ibuf, isize);
427 setup_sg(sg_out, obuf, osize); 428 setup_sg(sg_out, obuf, osize);
428 429
429 skcipher_request_set_tfm(req, state->arc4); 430 skcipher_request_set_sync_tfm(req, state->arc4);
430 skcipher_request_set_callback(req, 0, NULL, NULL); 431 skcipher_request_set_callback(req, 0, NULL, NULL);
431 skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL); 432 skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
432 err = crypto_skcipher_encrypt(req); 433 err = crypto_skcipher_encrypt(req);
@@ -480,7 +481,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
480 int osize) 481 int osize)
481{ 482{
482 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 483 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
483 SKCIPHER_REQUEST_ON_STACK(req, state->arc4); 484 SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
484 unsigned ccount; 485 unsigned ccount;
485 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; 486 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
486 struct scatterlist sg_in[1], sg_out[1]; 487 struct scatterlist sg_in[1], sg_out[1];
@@ -615,7 +616,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
615 setup_sg(sg_in, ibuf, 1); 616 setup_sg(sg_in, ibuf, 1);
616 setup_sg(sg_out, obuf, 1); 617 setup_sg(sg_out, obuf, 1);
617 618
618 skcipher_request_set_tfm(req, state->arc4); 619 skcipher_request_set_sync_tfm(req, state->arc4);
619 skcipher_request_set_callback(req, 0, NULL, NULL); 620 skcipher_request_set_callback(req, 0, NULL, NULL);
620 skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL); 621 skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
621 if (crypto_skcipher_decrypt(req)) { 622 if (crypto_skcipher_decrypt(req)) {
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 9b17f72349ed..321a92613a7e 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -310,6 +310,37 @@ int dpaa2_io_service_rearm(struct dpaa2_io *d,
310EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm); 310EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
311 311
312/** 312/**
313 * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
314 * @d: the given DPIO service.
315 * @fqid: the given frame queue id.
316 * @s: the dpaa2_io_store object for the result.
317 *
318 * Return 0 for success, or error code for failure.
319 */
320int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
321 struct dpaa2_io_store *s)
322{
323 struct qbman_pull_desc pd;
324 int err;
325
326 qbman_pull_desc_clear(&pd);
327 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
328 qbman_pull_desc_set_numframes(&pd, (u8)s->max);
329 qbman_pull_desc_set_fq(&pd, fqid);
330
331 d = service_select(d);
332 if (!d)
333 return -ENODEV;
334 s->swp = d->swp;
335 err = qbman_swp_pull(d->swp, &pd);
336 if (err)
337 s->swp = NULL;
338
339 return err;
340}
341EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
342
343/**
313 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel. 344 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
314 * @d: the given DPIO service. 345 * @d: the given DPIO service.
315 * @channelid: the given channel id. 346 * @channelid: the given channel id.
@@ -342,6 +373,33 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
342EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel); 373EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
343 374
344/** 375/**
376 * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
377 * @d: the given DPIO service.
378 * @fqid: the given frame queue id.
379 * @fd: the frame descriptor which is enqueued.
380 *
381 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
382 * or -ENODEV if there is no dpio service.
383 */
384int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
385 u32 fqid,
386 const struct dpaa2_fd *fd)
387{
388 struct qbman_eq_desc ed;
389
390 d = service_select(d);
391 if (!d)
392 return -ENODEV;
393
394 qbman_eq_desc_clear(&ed);
395 qbman_eq_desc_set_no_orp(&ed, 0);
396 qbman_eq_desc_set_fq(&ed, fqid);
397
398 return qbman_swp_enqueue(d->swp, &ed, fd);
399}
400EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
401
402/**
345 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. 403 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
346 * @d: the given DPIO service. 404 * @d: the given DPIO service.
347 * @qdid: the given queuing destination id. 405 * @qdid: the given queuing destination id.
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 9f18be14dda6..f38f1f74fcd6 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -49,9 +49,9 @@ struct rtllib_tkip_data {
49 u32 dot11RSNAStatsTKIPLocalMICFailures; 49 u32 dot11RSNAStatsTKIPLocalMICFailures;
50 50
51 int key_idx; 51 int key_idx;
52 struct crypto_skcipher *rx_tfm_arc4; 52 struct crypto_sync_skcipher *rx_tfm_arc4;
53 struct crypto_shash *rx_tfm_michael; 53 struct crypto_shash *rx_tfm_michael;
54 struct crypto_skcipher *tx_tfm_arc4; 54 struct crypto_sync_skcipher *tx_tfm_arc4;
55 struct crypto_shash *tx_tfm_michael; 55 struct crypto_shash *tx_tfm_michael;
56 /* scratch buffers for virt_to_page() (crypto API) */ 56 /* scratch buffers for virt_to_page() (crypto API) */
57 u8 rx_hdr[16]; 57 u8 rx_hdr[16];
@@ -66,8 +66,7 @@ static void *rtllib_tkip_init(int key_idx)
66 if (priv == NULL) 66 if (priv == NULL)
67 goto fail; 67 goto fail;
68 priv->key_idx = key_idx; 68 priv->key_idx = key_idx;
69 priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, 69 priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
70 CRYPTO_ALG_ASYNC);
71 if (IS_ERR(priv->tx_tfm_arc4)) { 70 if (IS_ERR(priv->tx_tfm_arc4)) {
72 pr_debug("Could not allocate crypto API arc4\n"); 71 pr_debug("Could not allocate crypto API arc4\n");
73 priv->tx_tfm_arc4 = NULL; 72 priv->tx_tfm_arc4 = NULL;
@@ -81,8 +80,7 @@ static void *rtllib_tkip_init(int key_idx)
81 goto fail; 80 goto fail;
82 } 81 }
83 82
84 priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, 83 priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
85 CRYPTO_ALG_ASYNC);
86 if (IS_ERR(priv->rx_tfm_arc4)) { 84 if (IS_ERR(priv->rx_tfm_arc4)) {
87 pr_debug("Could not allocate crypto API arc4\n"); 85 pr_debug("Could not allocate crypto API arc4\n");
88 priv->rx_tfm_arc4 = NULL; 86 priv->rx_tfm_arc4 = NULL;
@@ -100,9 +98,9 @@ static void *rtllib_tkip_init(int key_idx)
100fail: 98fail:
101 if (priv) { 99 if (priv) {
102 crypto_free_shash(priv->tx_tfm_michael); 100 crypto_free_shash(priv->tx_tfm_michael);
103 crypto_free_skcipher(priv->tx_tfm_arc4); 101 crypto_free_sync_skcipher(priv->tx_tfm_arc4);
104 crypto_free_shash(priv->rx_tfm_michael); 102 crypto_free_shash(priv->rx_tfm_michael);
105 crypto_free_skcipher(priv->rx_tfm_arc4); 103 crypto_free_sync_skcipher(priv->rx_tfm_arc4);
106 kfree(priv); 104 kfree(priv);
107 } 105 }
108 106
@@ -116,9 +114,9 @@ static void rtllib_tkip_deinit(void *priv)
116 114
117 if (_priv) { 115 if (_priv) {
118 crypto_free_shash(_priv->tx_tfm_michael); 116 crypto_free_shash(_priv->tx_tfm_michael);
119 crypto_free_skcipher(_priv->tx_tfm_arc4); 117 crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
120 crypto_free_shash(_priv->rx_tfm_michael); 118 crypto_free_shash(_priv->rx_tfm_michael);
121 crypto_free_skcipher(_priv->rx_tfm_arc4); 119 crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
122 } 120 }
123 kfree(priv); 121 kfree(priv);
124} 122}
@@ -337,7 +335,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
337 *pos++ = (tkey->tx_iv32 >> 24) & 0xff; 335 *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
338 336
339 if (!tcb_desc->bHwSec) { 337 if (!tcb_desc->bHwSec) {
340 SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4); 338 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
341 339
342 icv = skb_put(skb, 4); 340 icv = skb_put(skb, 4);
343 crc = ~crc32_le(~0, pos, len); 341 crc = ~crc32_le(~0, pos, len);
@@ -349,8 +347,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
349 sg_init_one(&sg, pos, len+4); 347 sg_init_one(&sg, pos, len+4);
350 348
351 349
352 crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); 350 crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
353 skcipher_request_set_tfm(req, tkey->tx_tfm_arc4); 351 skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
354 skcipher_request_set_callback(req, 0, NULL, NULL); 352 skcipher_request_set_callback(req, 0, NULL, NULL);
355 skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); 353 skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
356 ret = crypto_skcipher_encrypt(req); 354 ret = crypto_skcipher_encrypt(req);
@@ -420,7 +418,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
420 pos += 8; 418 pos += 8;
421 419
422 if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) { 420 if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
423 SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4); 421 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
424 422
425 if ((iv32 < tkey->rx_iv32 || 423 if ((iv32 < tkey->rx_iv32 ||
426 (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) && 424 (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
@@ -447,8 +445,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
447 445
448 sg_init_one(&sg, pos, plen+4); 446 sg_init_one(&sg, pos, plen+4);
449 447
450 crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); 448 crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
451 skcipher_request_set_tfm(req, tkey->rx_tfm_arc4); 449 skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
452 skcipher_request_set_callback(req, 0, NULL, NULL); 450 skcipher_request_set_callback(req, 0, NULL, NULL);
453 skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); 451 skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
454 err = crypto_skcipher_decrypt(req); 452 err = crypto_skcipher_decrypt(req);
@@ -664,9 +662,9 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
664 struct rtllib_tkip_data *tkey = priv; 662 struct rtllib_tkip_data *tkey = priv;
665 int keyidx; 663 int keyidx;
666 struct crypto_shash *tfm = tkey->tx_tfm_michael; 664 struct crypto_shash *tfm = tkey->tx_tfm_michael;
667 struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4; 665 struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
668 struct crypto_shash *tfm3 = tkey->rx_tfm_michael; 666 struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
669 struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4; 667 struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
670 668
671 keyidx = tkey->key_idx; 669 keyidx = tkey->key_idx;
672 memset(tkey, 0, sizeof(*tkey)); 670 memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index b3343a5d0fd6..d11ec39171d5 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -27,8 +27,8 @@ struct prism2_wep_data {
27 u8 key[WEP_KEY_LEN + 1]; 27 u8 key[WEP_KEY_LEN + 1];
28 u8 key_len; 28 u8 key_len;
29 u8 key_idx; 29 u8 key_idx;
30 struct crypto_skcipher *tx_tfm; 30 struct crypto_sync_skcipher *tx_tfm;
31 struct crypto_skcipher *rx_tfm; 31 struct crypto_sync_skcipher *rx_tfm;
32}; 32};
33 33
34 34
@@ -41,13 +41,13 @@ static void *prism2_wep_init(int keyidx)
41 goto fail; 41 goto fail;
42 priv->key_idx = keyidx; 42 priv->key_idx = keyidx;
43 43
44 priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 44 priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
45 if (IS_ERR(priv->tx_tfm)) { 45 if (IS_ERR(priv->tx_tfm)) {
46 pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n"); 46 pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
47 priv->tx_tfm = NULL; 47 priv->tx_tfm = NULL;
48 goto fail; 48 goto fail;
49 } 49 }
50 priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 50 priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
51 if (IS_ERR(priv->rx_tfm)) { 51 if (IS_ERR(priv->rx_tfm)) {
52 pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n"); 52 pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
53 priv->rx_tfm = NULL; 53 priv->rx_tfm = NULL;
@@ -61,8 +61,8 @@ static void *prism2_wep_init(int keyidx)
61 61
62fail: 62fail:
63 if (priv) { 63 if (priv) {
64 crypto_free_skcipher(priv->tx_tfm); 64 crypto_free_sync_skcipher(priv->tx_tfm);
65 crypto_free_skcipher(priv->rx_tfm); 65 crypto_free_sync_skcipher(priv->rx_tfm);
66 kfree(priv); 66 kfree(priv);
67 } 67 }
68 return NULL; 68 return NULL;
@@ -74,8 +74,8 @@ static void prism2_wep_deinit(void *priv)
74 struct prism2_wep_data *_priv = priv; 74 struct prism2_wep_data *_priv = priv;
75 75
76 if (_priv) { 76 if (_priv) {
77 crypto_free_skcipher(_priv->tx_tfm); 77 crypto_free_sync_skcipher(_priv->tx_tfm);
78 crypto_free_skcipher(_priv->rx_tfm); 78 crypto_free_sync_skcipher(_priv->rx_tfm);
79 } 79 }
80 kfree(priv); 80 kfree(priv);
81} 81}
@@ -135,7 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
135 memcpy(key + 3, wep->key, wep->key_len); 135 memcpy(key + 3, wep->key, wep->key_len);
136 136
137 if (!tcb_desc->bHwSec) { 137 if (!tcb_desc->bHwSec) {
138 SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm); 138 SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
139 139
140 /* Append little-endian CRC32 and encrypt it to produce ICV */ 140 /* Append little-endian CRC32 and encrypt it to produce ICV */
141 crc = ~crc32_le(~0, pos, len); 141 crc = ~crc32_le(~0, pos, len);
@@ -146,8 +146,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
146 icv[3] = crc >> 24; 146 icv[3] = crc >> 24;
147 147
148 sg_init_one(&sg, pos, len+4); 148 sg_init_one(&sg, pos, len+4);
149 crypto_skcipher_setkey(wep->tx_tfm, key, klen); 149 crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
150 skcipher_request_set_tfm(req, wep->tx_tfm); 150 skcipher_request_set_sync_tfm(req, wep->tx_tfm);
151 skcipher_request_set_callback(req, 0, NULL, NULL); 151 skcipher_request_set_callback(req, 0, NULL, NULL);
152 skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); 152 skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
153 err = crypto_skcipher_encrypt(req); 153 err = crypto_skcipher_encrypt(req);
@@ -199,11 +199,11 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
199 plen = skb->len - hdr_len - 8; 199 plen = skb->len - hdr_len - 8;
200 200
201 if (!tcb_desc->bHwSec) { 201 if (!tcb_desc->bHwSec) {
202 SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm); 202 SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
203 203
204 sg_init_one(&sg, pos, plen+4); 204 sg_init_one(&sg, pos, plen+4);
205 crypto_skcipher_setkey(wep->rx_tfm, key, klen); 205 crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
206 skcipher_request_set_tfm(req, wep->rx_tfm); 206 skcipher_request_set_sync_tfm(req, wep->rx_tfm);
207 skcipher_request_set_callback(req, 0, NULL, NULL); 207 skcipher_request_set_callback(req, 0, NULL, NULL);
208 skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); 208 skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
209 err = crypto_skcipher_decrypt(req); 209 err = crypto_skcipher_decrypt(req);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 1088fa0aee0e..829fa4bd253c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -53,9 +53,9 @@ struct ieee80211_tkip_data {
53 53
54 int key_idx; 54 int key_idx;
55 55
56 struct crypto_skcipher *rx_tfm_arc4; 56 struct crypto_sync_skcipher *rx_tfm_arc4;
57 struct crypto_shash *rx_tfm_michael; 57 struct crypto_shash *rx_tfm_michael;
58 struct crypto_skcipher *tx_tfm_arc4; 58 struct crypto_sync_skcipher *tx_tfm_arc4;
59 struct crypto_shash *tx_tfm_michael; 59 struct crypto_shash *tx_tfm_michael;
60 60
61 /* scratch buffers for virt_to_page() (crypto API) */ 61 /* scratch buffers for virt_to_page() (crypto API) */
@@ -71,8 +71,7 @@ static void *ieee80211_tkip_init(int key_idx)
71 goto fail; 71 goto fail;
72 priv->key_idx = key_idx; 72 priv->key_idx = key_idx;
73 73
74 priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, 74 priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
75 CRYPTO_ALG_ASYNC);
76 if (IS_ERR(priv->tx_tfm_arc4)) { 75 if (IS_ERR(priv->tx_tfm_arc4)) {
77 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " 76 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
78 "crypto API arc4\n"); 77 "crypto API arc4\n");
@@ -88,8 +87,7 @@ static void *ieee80211_tkip_init(int key_idx)
88 goto fail; 87 goto fail;
89 } 88 }
90 89
91 priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, 90 priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
92 CRYPTO_ALG_ASYNC);
93 if (IS_ERR(priv->rx_tfm_arc4)) { 91 if (IS_ERR(priv->rx_tfm_arc4)) {
94 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " 92 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
95 "crypto API arc4\n"); 93 "crypto API arc4\n");
@@ -110,9 +108,9 @@ static void *ieee80211_tkip_init(int key_idx)
110fail: 108fail:
111 if (priv) { 109 if (priv) {
112 crypto_free_shash(priv->tx_tfm_michael); 110 crypto_free_shash(priv->tx_tfm_michael);
113 crypto_free_skcipher(priv->tx_tfm_arc4); 111 crypto_free_sync_skcipher(priv->tx_tfm_arc4);
114 crypto_free_shash(priv->rx_tfm_michael); 112 crypto_free_shash(priv->rx_tfm_michael);
115 crypto_free_skcipher(priv->rx_tfm_arc4); 113 crypto_free_sync_skcipher(priv->rx_tfm_arc4);
116 kfree(priv); 114 kfree(priv);
117 } 115 }
118 116
@@ -126,9 +124,9 @@ static void ieee80211_tkip_deinit(void *priv)
126 124
127 if (_priv) { 125 if (_priv) {
128 crypto_free_shash(_priv->tx_tfm_michael); 126 crypto_free_shash(_priv->tx_tfm_michael);
129 crypto_free_skcipher(_priv->tx_tfm_arc4); 127 crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
130 crypto_free_shash(_priv->rx_tfm_michael); 128 crypto_free_shash(_priv->rx_tfm_michael);
131 crypto_free_skcipher(_priv->rx_tfm_arc4); 129 crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
132 } 130 }
133 kfree(priv); 131 kfree(priv);
134} 132}
@@ -340,7 +338,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
340 *pos++ = (tkey->tx_iv32 >> 24) & 0xff; 338 *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
341 339
342 if (!tcb_desc->bHwSec) { 340 if (!tcb_desc->bHwSec) {
343 SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4); 341 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
344 342
345 icv = skb_put(skb, 4); 343 icv = skb_put(skb, 4);
346 crc = ~crc32_le(~0, pos, len); 344 crc = ~crc32_le(~0, pos, len);
@@ -348,9 +346,9 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
348 icv[1] = crc >> 8; 346 icv[1] = crc >> 8;
349 icv[2] = crc >> 16; 347 icv[2] = crc >> 16;
350 icv[3] = crc >> 24; 348 icv[3] = crc >> 24;
351 crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); 349 crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
352 sg_init_one(&sg, pos, len+4); 350 sg_init_one(&sg, pos, len+4);
353 skcipher_request_set_tfm(req, tkey->tx_tfm_arc4); 351 skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
354 skcipher_request_set_callback(req, 0, NULL, NULL); 352 skcipher_request_set_callback(req, 0, NULL, NULL);
355 skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); 353 skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
356 ret = crypto_skcipher_encrypt(req); 354 ret = crypto_skcipher_encrypt(req);
@@ -418,7 +416,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
418 pos += 8; 416 pos += 8;
419 417
420 if (!tcb_desc->bHwSec) { 418 if (!tcb_desc->bHwSec) {
421 SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4); 419 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
422 420
423 if (iv32 < tkey->rx_iv32 || 421 if (iv32 < tkey->rx_iv32 ||
424 (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) { 422 (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
@@ -440,10 +438,10 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
440 438
441 plen = skb->len - hdr_len - 12; 439 plen = skb->len - hdr_len - 12;
442 440
443 crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); 441 crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
444 sg_init_one(&sg, pos, plen+4); 442 sg_init_one(&sg, pos, plen+4);
445 443
446 skcipher_request_set_tfm(req, tkey->rx_tfm_arc4); 444 skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
447 skcipher_request_set_callback(req, 0, NULL, NULL); 445 skcipher_request_set_callback(req, 0, NULL, NULL);
448 skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); 446 skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
449 447
@@ -663,9 +661,9 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
663 struct ieee80211_tkip_data *tkey = priv; 661 struct ieee80211_tkip_data *tkey = priv;
664 int keyidx; 662 int keyidx;
665 struct crypto_shash *tfm = tkey->tx_tfm_michael; 663 struct crypto_shash *tfm = tkey->tx_tfm_michael;
666 struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4; 664 struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
667 struct crypto_shash *tfm3 = tkey->rx_tfm_michael; 665 struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
668 struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4; 666 struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
669 667
670 keyidx = tkey->key_idx; 668 keyidx = tkey->key_idx;
671 memset(tkey, 0, sizeof(*tkey)); 669 memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index b9f86be9e52b..d4a1bf0caa7a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -32,8 +32,8 @@ struct prism2_wep_data {
32 u8 key[WEP_KEY_LEN + 1]; 32 u8 key[WEP_KEY_LEN + 1];
33 u8 key_len; 33 u8 key_len;
34 u8 key_idx; 34 u8 key_idx;
35 struct crypto_skcipher *tx_tfm; 35 struct crypto_sync_skcipher *tx_tfm;
36 struct crypto_skcipher *rx_tfm; 36 struct crypto_sync_skcipher *rx_tfm;
37}; 37};
38 38
39 39
@@ -46,10 +46,10 @@ static void *prism2_wep_init(int keyidx)
46 return NULL; 46 return NULL;
47 priv->key_idx = keyidx; 47 priv->key_idx = keyidx;
48 48
49 priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 49 priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
50 if (IS_ERR(priv->tx_tfm)) 50 if (IS_ERR(priv->tx_tfm))
51 goto free_priv; 51 goto free_priv;
52 priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); 52 priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
53 if (IS_ERR(priv->rx_tfm)) 53 if (IS_ERR(priv->rx_tfm))
54 goto free_tx; 54 goto free_tx;
55 55
@@ -58,7 +58,7 @@ static void *prism2_wep_init(int keyidx)
58 58
59 return priv; 59 return priv;
60free_tx: 60free_tx:
61 crypto_free_skcipher(priv->tx_tfm); 61 crypto_free_sync_skcipher(priv->tx_tfm);
62free_priv: 62free_priv:
63 kfree(priv); 63 kfree(priv);
64 return NULL; 64 return NULL;
@@ -70,8 +70,8 @@ static void prism2_wep_deinit(void *priv)
70 struct prism2_wep_data *_priv = priv; 70 struct prism2_wep_data *_priv = priv;
71 71
72 if (_priv) { 72 if (_priv) {
73 crypto_free_skcipher(_priv->tx_tfm); 73 crypto_free_sync_skcipher(_priv->tx_tfm);
74 crypto_free_skcipher(_priv->rx_tfm); 74 crypto_free_sync_skcipher(_priv->rx_tfm);
75 } 75 }
76 kfree(priv); 76 kfree(priv);
77} 77}
@@ -128,7 +128,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
128 memcpy(key + 3, wep->key, wep->key_len); 128 memcpy(key + 3, wep->key, wep->key_len);
129 129
130 if (!tcb_desc->bHwSec) { 130 if (!tcb_desc->bHwSec) {
131 SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm); 131 SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
132 132
133 /* Append little-endian CRC32 and encrypt it to produce ICV */ 133 /* Append little-endian CRC32 and encrypt it to produce ICV */
134 crc = ~crc32_le(~0, pos, len); 134 crc = ~crc32_le(~0, pos, len);
@@ -138,10 +138,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
138 icv[2] = crc >> 16; 138 icv[2] = crc >> 16;
139 icv[3] = crc >> 24; 139 icv[3] = crc >> 24;
140 140
141 crypto_skcipher_setkey(wep->tx_tfm, key, klen); 141 crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
142 sg_init_one(&sg, pos, len+4); 142 sg_init_one(&sg, pos, len+4);
143 143
144 skcipher_request_set_tfm(req, wep->tx_tfm); 144 skcipher_request_set_sync_tfm(req, wep->tx_tfm);
145 skcipher_request_set_callback(req, 0, NULL, NULL); 145 skcipher_request_set_callback(req, 0, NULL, NULL);
146 skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); 146 skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
147 147
@@ -193,12 +193,12 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
193 plen = skb->len - hdr_len - 8; 193 plen = skb->len - hdr_len - 8;
194 194
195 if (!tcb_desc->bHwSec) { 195 if (!tcb_desc->bHwSec) {
196 SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm); 196 SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
197 197
198 crypto_skcipher_setkey(wep->rx_tfm, key, klen); 198 crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
199 sg_init_one(&sg, pos, plen+4); 199 sg_init_one(&sg, pos, plen+4);
200 200
201 skcipher_request_set_tfm(req, wep->rx_tfm); 201 skcipher_request_set_sync_tfm(req, wep->rx_tfm);
202 skcipher_request_set_callback(req, 0, NULL, NULL); 202 skcipher_request_set_callback(req, 0, NULL, NULL);
203 skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); 203 skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
204 204
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index aff50eb09ca9..68ddee86a886 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -189,7 +189,7 @@ struct wusb_mac_scratch {
189 * NOTE: blen is not aligned to a block size, we'll pad zeros, that's 189 * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
190 * what sg[4] is for. Maybe there is a smarter way to do this. 190 * what sg[4] is for. Maybe there is a smarter way to do this.
191 */ 191 */
192static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, 192static int wusb_ccm_mac(struct crypto_sync_skcipher *tfm_cbc,
193 struct crypto_cipher *tfm_aes, 193 struct crypto_cipher *tfm_aes,
194 struct wusb_mac_scratch *scratch, 194 struct wusb_mac_scratch *scratch,
195 void *mic, 195 void *mic,
@@ -198,7 +198,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
198 size_t blen) 198 size_t blen)
199{ 199{
200 int result = 0; 200 int result = 0;
201 SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc); 201 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
202 struct scatterlist sg[4], sg_dst; 202 struct scatterlist sg[4], sg_dst;
203 void *dst_buf; 203 void *dst_buf;
204 size_t dst_size; 204 size_t dst_size;
@@ -224,7 +224,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
224 if (!dst_buf) 224 if (!dst_buf)
225 goto error_dst_buf; 225 goto error_dst_buf;
226 226
227 iv = kzalloc(crypto_skcipher_ivsize(tfm_cbc), GFP_KERNEL); 227 iv = kzalloc(crypto_sync_skcipher_ivsize(tfm_cbc), GFP_KERNEL);
228 if (!iv) 228 if (!iv)
229 goto error_iv; 229 goto error_iv;
230 230
@@ -251,7 +251,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
251 sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0); 251 sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
252 sg_init_one(&sg_dst, dst_buf, dst_size); 252 sg_init_one(&sg_dst, dst_buf, dst_size);
253 253
254 skcipher_request_set_tfm(req, tfm_cbc); 254 skcipher_request_set_sync_tfm(req, tfm_cbc);
255 skcipher_request_set_callback(req, 0, NULL, NULL); 255 skcipher_request_set_callback(req, 0, NULL, NULL);
256 skcipher_request_set_crypt(req, sg, &sg_dst, dst_size, iv); 256 skcipher_request_set_crypt(req, sg, &sg_dst, dst_size, iv);
257 result = crypto_skcipher_encrypt(req); 257 result = crypto_skcipher_encrypt(req);
@@ -298,19 +298,19 @@ ssize_t wusb_prf(void *out, size_t out_size,
298{ 298{
299 ssize_t result, bytes = 0, bitr; 299 ssize_t result, bytes = 0, bitr;
300 struct aes_ccm_nonce n = *_n; 300 struct aes_ccm_nonce n = *_n;
301 struct crypto_skcipher *tfm_cbc; 301 struct crypto_sync_skcipher *tfm_cbc;
302 struct crypto_cipher *tfm_aes; 302 struct crypto_cipher *tfm_aes;
303 struct wusb_mac_scratch *scratch; 303 struct wusb_mac_scratch *scratch;
304 u64 sfn = 0; 304 u64 sfn = 0;
305 __le64 sfn_le; 305 __le64 sfn_le;
306 306
307 tfm_cbc = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); 307 tfm_cbc = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
308 if (IS_ERR(tfm_cbc)) { 308 if (IS_ERR(tfm_cbc)) {
309 result = PTR_ERR(tfm_cbc); 309 result = PTR_ERR(tfm_cbc);
310 printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result); 310 printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
311 goto error_alloc_cbc; 311 goto error_alloc_cbc;
312 } 312 }
313 result = crypto_skcipher_setkey(tfm_cbc, key, 16); 313 result = crypto_sync_skcipher_setkey(tfm_cbc, key, 16);
314 if (result < 0) { 314 if (result < 0) {
315 printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result); 315 printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
316 goto error_setkey_cbc; 316 goto error_setkey_cbc;
@@ -351,7 +351,7 @@ error_setkey_aes:
351 crypto_free_cipher(tfm_aes); 351 crypto_free_cipher(tfm_aes);
352error_alloc_aes: 352error_alloc_aes:
353error_setkey_cbc: 353error_setkey_cbc:
354 crypto_free_skcipher(tfm_cbc); 354 crypto_free_sync_skcipher(tfm_cbc);
355error_alloc_cbc: 355error_alloc_cbc:
356 return result; 356 return result;
357} 357}
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 39c20ef26db4..79debfc9cef9 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -83,10 +83,6 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
83 filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) 83 filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
84 return true; 84 return true;
85 85
86 if (contents_mode == FS_ENCRYPTION_MODE_SPECK128_256_XTS &&
87 filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS)
88 return true;
89
90 return false; 86 return false;
91} 87}
92 88
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index e997ca51192f..7874c9bb2fc5 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -174,16 +174,6 @@ static struct fscrypt_mode {
174 .cipher_str = "cts(cbc(aes))", 174 .cipher_str = "cts(cbc(aes))",
175 .keysize = 16, 175 .keysize = 16,
176 }, 176 },
177 [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = {
178 .friendly_name = "Speck128/256-XTS",
179 .cipher_str = "xts(speck128)",
180 .keysize = 64,
181 },
182 [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = {
183 .friendly_name = "Speck128/256-CTS-CBC",
184 .cipher_str = "cts(cbc(speck128))",
185 .keysize = 32,
186 },
187}; 177};
188 178
189static struct fscrypt_mode * 179static struct fscrypt_mode *
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index e328b52425a8..22e6f412c595 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -234,6 +234,34 @@ static inline void acomp_request_set_params(struct acomp_req *req,
234 req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; 234 req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
235} 235}
236 236
237static inline void crypto_stat_compress(struct acomp_req *req, int ret)
238{
239#ifdef CONFIG_CRYPTO_STATS
240 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
241
242 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
243 atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
244 } else {
245 atomic_inc(&tfm->base.__crt_alg->compress_cnt);
246 atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen);
247 }
248#endif
249}
250
251static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
252{
253#ifdef CONFIG_CRYPTO_STATS
254 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
255
256 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
257 atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
258 } else {
259 atomic_inc(&tfm->base.__crt_alg->decompress_cnt);
260 atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen);
261 }
262#endif
263}
264
237/** 265/**
238 * crypto_acomp_compress() -- Invoke asynchronous compress operation 266 * crypto_acomp_compress() -- Invoke asynchronous compress operation
239 * 267 *
@@ -246,8 +274,11 @@ static inline void acomp_request_set_params(struct acomp_req *req,
246static inline int crypto_acomp_compress(struct acomp_req *req) 274static inline int crypto_acomp_compress(struct acomp_req *req)
247{ 275{
248 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 276 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
277 int ret;
249 278
250 return tfm->compress(req); 279 ret = tfm->compress(req);
280 crypto_stat_compress(req, ret);
281 return ret;
251} 282}
252 283
253/** 284/**
@@ -262,8 +293,11 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
262static inline int crypto_acomp_decompress(struct acomp_req *req) 293static inline int crypto_acomp_decompress(struct acomp_req *req)
263{ 294{
264 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 295 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
296 int ret;
265 297
266 return tfm->decompress(req); 298 ret = tfm->decompress(req);
299 crypto_stat_decompress(req, ret);
300 return ret;
267} 301}
268 302
269#endif 303#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 1e26f790b03f..0d765d7bfb82 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -306,6 +306,34 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
306 return __crypto_aead_cast(req->base.tfm); 306 return __crypto_aead_cast(req->base.tfm);
307} 307}
308 308
309static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret)
310{
311#ifdef CONFIG_CRYPTO_STATS
312 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
313
314 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
315 atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
316 } else {
317 atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
318 atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen);
319 }
320#endif
321}
322
323static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
324{
325#ifdef CONFIG_CRYPTO_STATS
326 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
327
328 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
329 atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
330 } else {
331 atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
332 atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen);
333 }
334#endif
335}
336
309/** 337/**
310 * crypto_aead_encrypt() - encrypt plaintext 338 * crypto_aead_encrypt() - encrypt plaintext
311 * @req: reference to the aead_request handle that holds all information 339 * @req: reference to the aead_request handle that holds all information
@@ -328,11 +356,14 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
328static inline int crypto_aead_encrypt(struct aead_request *req) 356static inline int crypto_aead_encrypt(struct aead_request *req)
329{ 357{
330 struct crypto_aead *aead = crypto_aead_reqtfm(req); 358 struct crypto_aead *aead = crypto_aead_reqtfm(req);
359 int ret;
331 360
332 if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) 361 if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
333 return -ENOKEY; 362 ret = -ENOKEY;
334 363 else
335 return crypto_aead_alg(aead)->encrypt(req); 364 ret = crypto_aead_alg(aead)->encrypt(req);
365 crypto_stat_aead_encrypt(req, ret);
366 return ret;
336} 367}
337 368
338/** 369/**
@@ -360,14 +391,16 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
360static inline int crypto_aead_decrypt(struct aead_request *req) 391static inline int crypto_aead_decrypt(struct aead_request *req)
361{ 392{
362 struct crypto_aead *aead = crypto_aead_reqtfm(req); 393 struct crypto_aead *aead = crypto_aead_reqtfm(req);
394 int ret;
363 395
364 if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) 396 if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
365 return -ENOKEY; 397 ret = -ENOKEY;
366 398 else if (req->cryptlen < crypto_aead_authsize(aead))
367 if (req->cryptlen < crypto_aead_authsize(aead)) 399 ret = -EINVAL;
368 return -EINVAL; 400 else
369 401 ret = crypto_aead_alg(aead)->decrypt(req);
370 return crypto_aead_alg(aead)->decrypt(req); 402 crypto_stat_aead_decrypt(req, ret);
403 return ret;
371} 404}
372 405
373/** 406/**
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index b5e11de4d497..afac71119396 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -271,6 +271,62 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
271 return alg->max_size(tfm); 271 return alg->max_size(tfm);
272} 272}
273 273
274static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req,
275 int ret)
276{
277#ifdef CONFIG_CRYPTO_STATS
278 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
279
280 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
281 atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
282 } else {
283 atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
284 atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen);
285 }
286#endif
287}
288
289static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req,
290 int ret)
291{
292#ifdef CONFIG_CRYPTO_STATS
293 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
294
295 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
296 atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
297 } else {
298 atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
299 atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen);
300 }
301#endif
302}
303
304static inline void crypto_stat_akcipher_sign(struct akcipher_request *req,
305 int ret)
306{
307#ifdef CONFIG_CRYPTO_STATS
308 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
309
310 if (ret && ret != -EINPROGRESS && ret != -EBUSY)
311 atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
312 else
313 atomic_inc(&tfm->base.__crt_alg->sign_cnt);
314#endif
315}
316
317static inline void crypto_stat_akcipher_verify(struct akcipher_request *req,
318 int ret)
319{
320#ifdef CONFIG_CRYPTO_STATS
321 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
322
323 if (ret && ret != -EINPROGRESS && ret != -EBUSY)
324 atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
325 else
326 atomic_inc(&tfm->base.__crt_alg->verify_cnt);
327#endif
328}
329
274/** 330/**
275 * crypto_akcipher_encrypt() - Invoke public key encrypt operation 331 * crypto_akcipher_encrypt() - Invoke public key encrypt operation
276 * 332 *
@@ -285,8 +341,11 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
285{ 341{
286 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 342 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
287 struct akcipher_alg *alg = crypto_akcipher_alg(tfm); 343 struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
344 int ret;
288 345
289 return alg->encrypt(req); 346 ret = alg->encrypt(req);
347 crypto_stat_akcipher_encrypt(req, ret);
348 return ret;
290} 349}
291 350
292/** 351/**
@@ -303,8 +362,11 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
303{ 362{
304 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 363 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
305 struct akcipher_alg *alg = crypto_akcipher_alg(tfm); 364 struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
365 int ret;
306 366
307 return alg->decrypt(req); 367 ret = alg->decrypt(req);
368 crypto_stat_akcipher_decrypt(req, ret);
369 return ret;
308} 370}
309 371
310/** 372/**
@@ -321,8 +383,11 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
321{ 383{
322 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 384 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
323 struct akcipher_alg *alg = crypto_akcipher_alg(tfm); 385 struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
386 int ret;
324 387
325 return alg->sign(req); 388 ret = alg->sign(req);
389 crypto_stat_akcipher_sign(req, ret);
390 return ret;
326} 391}
327 392
328/** 393/**
@@ -339,8 +404,11 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
339{ 404{
340 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 405 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
341 struct akcipher_alg *alg = crypto_akcipher_alg(tfm); 406 struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
407 int ret;
342 408
343 return alg->verify(req); 409 ret = alg->verify(req);
410 crypto_stat_akcipher_verify(req, ret);
411 return ret;
344} 412}
345 413
346/** 414/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index bd5e8ccf1687..4a5ad10e75f0 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -20,8 +20,10 @@
20/* 20/*
21 * Maximum values for blocksize and alignmask, used to allocate 21 * Maximum values for blocksize and alignmask, used to allocate
22 * static buffers that are big enough for any combination of 22 * static buffers that are big enough for any combination of
23 * ciphers and architectures. 23 * algs and architectures. Ciphers have a lower maximum size.
24 */ 24 */
25#define MAX_ALGAPI_BLOCKSIZE 160
26#define MAX_ALGAPI_ALIGNMASK 63
25#define MAX_CIPHER_BLOCKSIZE 16 27#define MAX_CIPHER_BLOCKSIZE 16
26#define MAX_CIPHER_ALIGNMASK 15 28#define MAX_CIPHER_ALIGNMASK 15
27 29
@@ -425,4 +427,14 @@ static inline void crypto_yield(u32 flags)
425#endif 427#endif
426} 428}
427 429
430int crypto_register_notifier(struct notifier_block *nb);
431int crypto_unregister_notifier(struct notifier_block *nb);
432
433/* Crypto notification events. */
434enum {
435 CRYPTO_MSG_ALG_REQUEST,
436 CRYPTO_MSG_ALG_REGISTER,
437 CRYPTO_MSG_ALG_LOADED,
438};
439
428#endif /* _CRYPTO_ALGAPI_H */ 440#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
index f5b8bfc22e6d..3bf28beefa33 100644
--- a/include/crypto/cbc.h
+++ b/include/crypto/cbc.h
@@ -113,7 +113,7 @@ static inline int crypto_cbc_decrypt_inplace(
113 unsigned int bsize = crypto_skcipher_blocksize(tfm); 113 unsigned int bsize = crypto_skcipher_blocksize(tfm);
114 unsigned int nbytes = walk->nbytes; 114 unsigned int nbytes = walk->nbytes;
115 u8 *src = walk->src.virt.addr; 115 u8 *src = walk->src.virt.addr;
116 u8 last_iv[bsize]; 116 u8 last_iv[MAX_CIPHER_BLOCKSIZE];
117 117
118 /* Start of the last block. */ 118 /* Start of the last block. */
119 src += nbytes - (nbytes & (bsize - 1)) - bsize; 119 src += nbytes - (nbytes & (bsize - 1)) - bsize;
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index b83d66073db0..f76302d99e2b 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -13,13 +13,12 @@
13#define CHACHA20_IV_SIZE 16 13#define CHACHA20_IV_SIZE 16
14#define CHACHA20_KEY_SIZE 32 14#define CHACHA20_KEY_SIZE 32
15#define CHACHA20_BLOCK_SIZE 64 15#define CHACHA20_BLOCK_SIZE 64
16#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32))
17 16
18struct chacha20_ctx { 17struct chacha20_ctx {
19 u32 key[8]; 18 u32 key[8];
20}; 19};
21 20
22void chacha20_block(u32 *state, u32 *stream); 21void chacha20_block(u32 *state, u8 *stream);
23void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); 22void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
24int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, 23int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
25 unsigned int keysize); 24 unsigned int keysize);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 76e432cab75d..bc7796600338 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -151,9 +151,13 @@ struct shash_desc {
151 void *__ctx[] CRYPTO_MINALIGN_ATTR; 151 void *__ctx[] CRYPTO_MINALIGN_ATTR;
152}; 152};
153 153
154#define HASH_MAX_DIGESTSIZE 64
155#define HASH_MAX_DESCSIZE 360
156#define HASH_MAX_STATESIZE 512
157
154#define SHASH_DESC_ON_STACK(shash, ctx) \ 158#define SHASH_DESC_ON_STACK(shash, ctx) \
155 char __##shash##_desc[sizeof(struct shash_desc) + \ 159 char __##shash##_desc[sizeof(struct shash_desc) + \
156 crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \ 160 HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
157 struct shash_desc *shash = (struct shash_desc *)__##shash##_desc 161 struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
158 162
159/** 163/**
@@ -408,6 +412,32 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
408int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 412int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
409 unsigned int keylen); 413 unsigned int keylen);
410 414
415static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret)
416{
417#ifdef CONFIG_CRYPTO_STATS
418 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
419
420 if (ret && ret != -EINPROGRESS && ret != -EBUSY)
421 atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
422 else
423 atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
424#endif
425}
426
427static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret)
428{
429#ifdef CONFIG_CRYPTO_STATS
430 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
431
432 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
433 atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
434 } else {
435 atomic_inc(&tfm->base.__crt_alg->hash_cnt);
436 atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
437 }
438#endif
439}
440
411/** 441/**
412 * crypto_ahash_finup() - update and finalize message digest 442 * crypto_ahash_finup() - update and finalize message digest
413 * @req: reference to the ahash_request handle that holds all information 443 * @req: reference to the ahash_request handle that holds all information
@@ -522,7 +552,11 @@ static inline int crypto_ahash_init(struct ahash_request *req)
522 */ 552 */
523static inline int crypto_ahash_update(struct ahash_request *req) 553static inline int crypto_ahash_update(struct ahash_request *req)
524{ 554{
525 return crypto_ahash_reqtfm(req)->update(req); 555 int ret;
556
557 ret = crypto_ahash_reqtfm(req)->update(req);
558 crypto_stat_ahash_update(req, ret);
559 return ret;
526} 560}
527 561
528/** 562/**
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
new file mode 100644
index 000000000000..8db299c25566
--- /dev/null
+++ b/include/crypto/internal/cryptouser.h
@@ -0,0 +1,8 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <net/netlink.h>
3
4struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
5
6int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb);
7int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
8int crypto_dump_reportstat_done(struct netlink_callback *cb);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 2bcfb931bc5b..71be24cd59bd 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -20,7 +20,7 @@
20struct aead_geniv_ctx { 20struct aead_geniv_ctx {
21 spinlock_t lock; 21 spinlock_t lock;
22 struct crypto_aead *child; 22 struct crypto_aead *child;
23 struct crypto_skcipher *sknull; 23 struct crypto_sync_skcipher *sknull;
24 u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); 24 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
25}; 25};
26 26
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 1bde0a6514fa..f517ba6d3a27 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -268,6 +268,42 @@ struct kpp_secret {
268 unsigned short len; 268 unsigned short len;
269}; 269};
270 270
271static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret)
272{
273#ifdef CONFIG_CRYPTO_STATS
274 if (ret)
275 atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
276 else
277 atomic_inc(&tfm->base.__crt_alg->setsecret_cnt);
278#endif
279}
280
281static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req,
282 int ret)
283{
284#ifdef CONFIG_CRYPTO_STATS
285 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
286
287 if (ret)
288 atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
289 else
290 atomic_inc(&tfm->base.__crt_alg->generate_public_key_cnt);
291#endif
292}
293
294static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req,
295 int ret)
296{
297#ifdef CONFIG_CRYPTO_STATS
298 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
299
300 if (ret)
301 atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
302 else
303 atomic_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt);
304#endif
305}
306
271/** 307/**
272 * crypto_kpp_set_secret() - Invoke kpp operation 308 * crypto_kpp_set_secret() - Invoke kpp operation
273 * 309 *
@@ -287,8 +323,11 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
287 const void *buffer, unsigned int len) 323 const void *buffer, unsigned int len)
288{ 324{
289 struct kpp_alg *alg = crypto_kpp_alg(tfm); 325 struct kpp_alg *alg = crypto_kpp_alg(tfm);
326 int ret;
290 327
291 return alg->set_secret(tfm, buffer, len); 328 ret = alg->set_secret(tfm, buffer, len);
329 crypto_stat_kpp_set_secret(tfm, ret);
330 return ret;
292} 331}
293 332
294/** 333/**
@@ -308,8 +347,11 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
308{ 347{
309 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); 348 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
310 struct kpp_alg *alg = crypto_kpp_alg(tfm); 349 struct kpp_alg *alg = crypto_kpp_alg(tfm);
350 int ret;
311 351
312 return alg->generate_public_key(req); 352 ret = alg->generate_public_key(req);
353 crypto_stat_kpp_generate_public_key(req, ret);
354 return ret;
313} 355}
314 356
315/** 357/**
@@ -326,8 +368,11 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
326{ 368{
327 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); 369 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
328 struct kpp_alg *alg = crypto_kpp_alg(tfm); 370 struct kpp_alg *alg = crypto_kpp_alg(tfm);
371 int ret;
329 372
330 return alg->compute_shared_secret(req); 373 ret = alg->compute_shared_secret(req);
374 crypto_stat_kpp_compute_shared_secret(req, ret);
375 return ret;
331} 376}
332 377
333/** 378/**
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
deleted file mode 100644
index b67404fc4b34..000000000000
--- a/include/crypto/mcryptd.h
+++ /dev/null
@@ -1,114 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Software async multibuffer crypto daemon headers
4 *
5 * Author:
6 * Tim Chen <tim.c.chen@linux.intel.com>
7 *
8 * Copyright (c) 2014, Intel Corporation.
9 */
10
11#ifndef _CRYPTO_MCRYPT_H
12#define _CRYPTO_MCRYPT_H
13
14#include <linux/crypto.h>
15#include <linux/kernel.h>
16#include <crypto/hash.h>
17
18struct mcryptd_ahash {
19 struct crypto_ahash base;
20};
21
22static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
23 struct crypto_ahash *tfm)
24{
25 return (struct mcryptd_ahash *)tfm;
26}
27
28struct mcryptd_cpu_queue {
29 struct crypto_queue queue;
30 spinlock_t q_lock;
31 struct work_struct work;
32};
33
34struct mcryptd_queue {
35 struct mcryptd_cpu_queue __percpu *cpu_queue;
36};
37
38struct mcryptd_instance_ctx {
39 struct crypto_spawn spawn;
40 struct mcryptd_queue *queue;
41};
42
43struct mcryptd_hash_ctx {
44 struct crypto_ahash *child;
45 struct mcryptd_alg_state *alg_state;
46};
47
48struct mcryptd_tag {
49 /* seq number of request */
50 unsigned seq_num;
51 /* arrival time of request */
52 unsigned long arrival;
53 unsigned long expire;
54 int cpu;
55};
56
57struct mcryptd_hash_request_ctx {
58 struct list_head waiter;
59 crypto_completion_t complete;
60 struct mcryptd_tag tag;
61 struct crypto_hash_walk walk;
62 u8 *out;
63 int flag;
64 struct ahash_request areq;
65};
66
67struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
68 u32 type, u32 mask);
69struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
70struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req);
71void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
72void mcryptd_flusher(struct work_struct *work);
73
74enum mcryptd_req_type {
75 MCRYPTD_NONE,
76 MCRYPTD_UPDATE,
77 MCRYPTD_FINUP,
78 MCRYPTD_DIGEST,
79 MCRYPTD_FINAL
80};
81
82struct mcryptd_alg_cstate {
83 unsigned long next_flush;
84 unsigned next_seq_num;
85 bool flusher_engaged;
86 struct delayed_work flush;
87 int cpu;
88 struct mcryptd_alg_state *alg_state;
89 void *mgr;
90 spinlock_t work_lock;
91 struct list_head work_list;
92 struct list_head flush_list;
93};
94
95struct mcryptd_alg_state {
96 struct mcryptd_alg_cstate __percpu *alg_cstate;
97 unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
98};
99
100/* return delay in jiffies from current time */
101static inline unsigned long get_delay(unsigned long t)
102{
103 long delay;
104
105 delay = (long) t - (long) jiffies;
106 if (delay <= 0)
107 return 0;
108 else
109 return (unsigned long) delay;
110}
111
112void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
113
114#endif
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
index b26dd70efd9a..ba782e10065e 100644
--- a/include/crypto/morus1280_glue.h
+++ b/include/crypto/morus1280_glue.h
@@ -82,7 +82,7 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
82 { \ 82 { \
83 } \ 83 } \
84 \ 84 \
85 struct aead_alg crypto_morus1280_##id##_algs[] = {\ 85 static struct aead_alg crypto_morus1280_##id##_algs[] = {\
86 { \ 86 { \
87 .setkey = crypto_morus1280_glue_setkey, \ 87 .setkey = crypto_morus1280_glue_setkey, \
88 .setauthsize = crypto_morus1280_glue_setauthsize, \ 88 .setauthsize = crypto_morus1280_glue_setauthsize, \
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
index 90c8db07e740..27fa790a2362 100644
--- a/include/crypto/morus640_glue.h
+++ b/include/crypto/morus640_glue.h
@@ -82,7 +82,7 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
82 { \ 82 { \
83 } \ 83 } \
84 \ 84 \
85 struct aead_alg crypto_morus640_##id##_algs[] = {\ 85 static struct aead_alg crypto_morus640_##id##_algs[] = {\
86 { \ 86 { \
87 .setkey = crypto_morus640_glue_setkey, \ 87 .setkey = crypto_morus640_glue_setkey, \
88 .setauthsize = crypto_morus640_glue_setauthsize, \ 88 .setauthsize = crypto_morus640_glue_setauthsize, \
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 15aeef6e30ef..0ef577cc00e3 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -9,7 +9,7 @@
9#define NULL_DIGEST_SIZE 0 9#define NULL_DIGEST_SIZE 0
10#define NULL_IV_SIZE 0 10#define NULL_IV_SIZE 0
11 11
12struct crypto_skcipher *crypto_get_default_null_skcipher(void); 12struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void);
13void crypto_put_default_null_skcipher(void); 13void crypto_put_default_null_skcipher(void);
14 14
15#endif 15#endif
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index b95ede354a66..6d258f5b68f1 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -122,6 +122,29 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
122 crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); 122 crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
123} 123}
124 124
125static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret)
126{
127#ifdef CONFIG_CRYPTO_STATS
128 if (ret && ret != -EINPROGRESS && ret != -EBUSY)
129 atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
130 else
131 atomic_inc(&tfm->base.__crt_alg->seed_cnt);
132#endif
133}
134
135static inline void crypto_stat_rng_generate(struct crypto_rng *tfm,
136 unsigned int dlen, int ret)
137{
138#ifdef CONFIG_CRYPTO_STATS
139 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
140 atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
141 } else {
142 atomic_inc(&tfm->base.__crt_alg->generate_cnt);
143 atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen);
144 }
145#endif
146}
147
125/** 148/**
126 * crypto_rng_generate() - get random number 149 * crypto_rng_generate() - get random number
127 * @tfm: cipher handle 150 * @tfm: cipher handle
@@ -140,7 +163,11 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
140 const u8 *src, unsigned int slen, 163 const u8 *src, unsigned int slen,
141 u8 *dst, unsigned int dlen) 164 u8 *dst, unsigned int dlen)
142{ 165{
143 return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); 166 int ret;
167
168 ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
169 crypto_stat_rng_generate(tfm, dlen, ret);
170 return ret;
144} 171}
145 172
146/** 173/**
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 2f327f090c3e..925f547cdcfa 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -65,6 +65,10 @@ struct crypto_skcipher {
65 struct crypto_tfm base; 65 struct crypto_tfm base;
66}; 66};
67 67
68struct crypto_sync_skcipher {
69 struct crypto_skcipher base;
70};
71
68/** 72/**
69 * struct skcipher_alg - symmetric key cipher definition 73 * struct skcipher_alg - symmetric key cipher definition
70 * @min_keysize: Minimum key size supported by the transformation. This is the 74 * @min_keysize: Minimum key size supported by the transformation. This is the
@@ -139,9 +143,17 @@ struct skcipher_alg {
139 struct crypto_alg base; 143 struct crypto_alg base;
140}; 144};
141 145
142#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ 146#define MAX_SYNC_SKCIPHER_REQSIZE 384
147/*
148 * This performs a type-check against the "tfm" argument to make sure
149 * all users have the correct skcipher tfm for doing on-stack requests.
150 */
151#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \
143 char __##name##_desc[sizeof(struct skcipher_request) + \ 152 char __##name##_desc[sizeof(struct skcipher_request) + \
144 crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ 153 MAX_SYNC_SKCIPHER_REQSIZE + \
154 (!(sizeof((struct crypto_sync_skcipher *)1 == \
155 (typeof(tfm))1))) \
156 ] CRYPTO_MINALIGN_ATTR; \
145 struct skcipher_request *name = (void *)__##name##_desc 157 struct skcipher_request *name = (void *)__##name##_desc
146 158
147/** 159/**
@@ -197,6 +209,9 @@ static inline struct crypto_skcipher *__crypto_skcipher_cast(
197struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, 209struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
198 u32 type, u32 mask); 210 u32 type, u32 mask);
199 211
212struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name,
213 u32 type, u32 mask);
214
200static inline struct crypto_tfm *crypto_skcipher_tfm( 215static inline struct crypto_tfm *crypto_skcipher_tfm(
201 struct crypto_skcipher *tfm) 216 struct crypto_skcipher *tfm)
202{ 217{
@@ -212,6 +227,11 @@ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
212 crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm)); 227 crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
213} 228}
214 229
230static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
231{
232 crypto_free_skcipher(&tfm->base);
233}
234
215/** 235/**
216 * crypto_has_skcipher() - Search for the availability of an skcipher. 236 * crypto_has_skcipher() - Search for the availability of an skcipher.
217 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 237 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -280,6 +300,12 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
280 return tfm->ivsize; 300 return tfm->ivsize;
281} 301}
282 302
303static inline unsigned int crypto_sync_skcipher_ivsize(
304 struct crypto_sync_skcipher *tfm)
305{
306 return crypto_skcipher_ivsize(&tfm->base);
307}
308
283static inline unsigned int crypto_skcipher_alg_chunksize( 309static inline unsigned int crypto_skcipher_alg_chunksize(
284 struct skcipher_alg *alg) 310 struct skcipher_alg *alg)
285{ 311{
@@ -356,6 +382,12 @@ static inline unsigned int crypto_skcipher_blocksize(
356 return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); 382 return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
357} 383}
358 384
385static inline unsigned int crypto_sync_skcipher_blocksize(
386 struct crypto_sync_skcipher *tfm)
387{
388 return crypto_skcipher_blocksize(&tfm->base);
389}
390
359static inline unsigned int crypto_skcipher_alignmask( 391static inline unsigned int crypto_skcipher_alignmask(
360 struct crypto_skcipher *tfm) 392 struct crypto_skcipher *tfm)
361{ 393{
@@ -379,6 +411,24 @@ static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
379 crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags); 411 crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
380} 412}
381 413
414static inline u32 crypto_sync_skcipher_get_flags(
415 struct crypto_sync_skcipher *tfm)
416{
417 return crypto_skcipher_get_flags(&tfm->base);
418}
419
420static inline void crypto_sync_skcipher_set_flags(
421 struct crypto_sync_skcipher *tfm, u32 flags)
422{
423 crypto_skcipher_set_flags(&tfm->base, flags);
424}
425
426static inline void crypto_sync_skcipher_clear_flags(
427 struct crypto_sync_skcipher *tfm, u32 flags)
428{
429 crypto_skcipher_clear_flags(&tfm->base, flags);
430}
431
382/** 432/**
383 * crypto_skcipher_setkey() - set key for cipher 433 * crypto_skcipher_setkey() - set key for cipher
384 * @tfm: cipher handle 434 * @tfm: cipher handle
@@ -401,6 +451,12 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
401 return tfm->setkey(tfm, key, keylen); 451 return tfm->setkey(tfm, key, keylen);
402} 452}
403 453
454static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
455 const u8 *key, unsigned int keylen)
456{
457 return crypto_skcipher_setkey(&tfm->base, key, keylen);
458}
459
404static inline unsigned int crypto_skcipher_default_keysize( 460static inline unsigned int crypto_skcipher_default_keysize(
405 struct crypto_skcipher *tfm) 461 struct crypto_skcipher *tfm)
406{ 462{
@@ -422,6 +478,40 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
422 return __crypto_skcipher_cast(req->base.tfm); 478 return __crypto_skcipher_cast(req->base.tfm);
423} 479}
424 480
481static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm(
482 struct skcipher_request *req)
483{
484 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
485
486 return container_of(tfm, struct crypto_sync_skcipher, base);
487}
488
489static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req,
490 int ret, struct crypto_alg *alg)
491{
492#ifdef CONFIG_CRYPTO_STATS
493 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
494 atomic_inc(&alg->cipher_err_cnt);
495 } else {
496 atomic_inc(&alg->encrypt_cnt);
497 atomic64_add(req->cryptlen, &alg->encrypt_tlen);
498 }
499#endif
500}
501
502static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
503 int ret, struct crypto_alg *alg)
504{
505#ifdef CONFIG_CRYPTO_STATS
506 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
507 atomic_inc(&alg->cipher_err_cnt);
508 } else {
509 atomic_inc(&alg->decrypt_cnt);
510 atomic64_add(req->cryptlen, &alg->decrypt_tlen);
511 }
512#endif
513}
514
425/** 515/**
426 * crypto_skcipher_encrypt() - encrypt plaintext 516 * crypto_skcipher_encrypt() - encrypt plaintext
427 * @req: reference to the skcipher_request handle that holds all information 517 * @req: reference to the skcipher_request handle that holds all information
@@ -436,11 +526,14 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
436static inline int crypto_skcipher_encrypt(struct skcipher_request *req) 526static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
437{ 527{
438 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 528 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
529 int ret;
439 530
440 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 531 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
441 return -ENOKEY; 532 ret = -ENOKEY;
442 533 else
443 return tfm->encrypt(req); 534 ret = tfm->encrypt(req);
535 crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg);
536 return ret;
444} 537}
445 538
446/** 539/**
@@ -457,11 +550,14 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
457static inline int crypto_skcipher_decrypt(struct skcipher_request *req) 550static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
458{ 551{
459 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 552 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
553 int ret;
460 554
461 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 555 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
462 return -ENOKEY; 556 ret = -ENOKEY;
463 557 else
464 return tfm->decrypt(req); 558 ret = tfm->decrypt(req);
559 crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg);
560 return ret;
465} 561}
466 562
467/** 563/**
@@ -500,6 +596,12 @@ static inline void skcipher_request_set_tfm(struct skcipher_request *req,
500 req->base.tfm = crypto_skcipher_tfm(tfm); 596 req->base.tfm = crypto_skcipher_tfm(tfm);
501} 597}
502 598
599static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req,
600 struct crypto_sync_skcipher *tfm)
601{
602 skcipher_request_set_tfm(req, &tfm->base);
603}
604
503static inline struct skcipher_request *skcipher_request_cast( 605static inline struct skcipher_request *skcipher_request_cast(
504 struct crypto_async_request *req) 606 struct crypto_async_request *req)
505{ 607{
diff --git a/include/crypto/speck.h b/include/crypto/speck.h
deleted file mode 100644
index 73cfc952d405..000000000000
--- a/include/crypto/speck.h
+++ /dev/null
@@ -1,62 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Common values for the Speck algorithm
4 */
5
6#ifndef _CRYPTO_SPECK_H
7#define _CRYPTO_SPECK_H
8
9#include <linux/types.h>
10
11/* Speck128 */
12
13#define SPECK128_BLOCK_SIZE 16
14
15#define SPECK128_128_KEY_SIZE 16
16#define SPECK128_128_NROUNDS 32
17
18#define SPECK128_192_KEY_SIZE 24
19#define SPECK128_192_NROUNDS 33
20
21#define SPECK128_256_KEY_SIZE 32
22#define SPECK128_256_NROUNDS 34
23
24struct speck128_tfm_ctx {
25 u64 round_keys[SPECK128_256_NROUNDS];
26 int nrounds;
27};
28
29void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
30 u8 *out, const u8 *in);
31
32void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
33 u8 *out, const u8 *in);
34
35int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
36 unsigned int keysize);
37
38/* Speck64 */
39
40#define SPECK64_BLOCK_SIZE 8
41
42#define SPECK64_96_KEY_SIZE 12
43#define SPECK64_96_NROUNDS 26
44
45#define SPECK64_128_KEY_SIZE 16
46#define SPECK64_128_NROUNDS 27
47
48struct speck64_tfm_ctx {
49 u32 round_keys[SPECK64_128_NROUNDS];
50 int nrounds;
51};
52
53void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
54 u8 *out, const u8 *in);
55
56void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
57 u8 *out, const u8 *in);
58
59int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
60 unsigned int keysize);
61
62#endif /* _CRYPTO_SPECK_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index db192becfec4..97cfe29b3f0a 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -198,7 +198,6 @@ struct ftrace_likely_data {
198 */ 198 */
199#define __pure __attribute__((pure)) 199#define __pure __attribute__((pure))
200#define __aligned(x) __attribute__((aligned(x))) 200#define __aligned(x) __attribute__((aligned(x)))
201#define __aligned_largest __attribute__((aligned))
202#define __printf(a, b) __attribute__((format(printf, a, b))) 201#define __printf(a, b) __attribute__((format(printf, a, b)))
203#define __scanf(a, b) __attribute__((format(scanf, a, b))) 202#define __scanf(a, b) __attribute__((format(scanf, a, b)))
204#define __maybe_unused __attribute__((unused)) 203#define __maybe_unused __attribute__((unused))
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index 986c06c88d81..84d3c81b5978 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -45,7 +45,7 @@
45 * 'asm/cpufeature.h' of your favorite architecture. 45 * 'asm/cpufeature.h' of your favorite architecture.
46 */ 46 */
47#define module_cpu_feature_match(x, __initfunc) \ 47#define module_cpu_feature_match(x, __initfunc) \
48static struct cpu_feature const cpu_feature_match_ ## x[] = \ 48static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \
49 { { .feature = cpu_feature(x) }, { } }; \ 49 { { .feature = cpu_feature(x) }, { } }; \
50MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ 50MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
51 \ 51 \
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index 1fe0cfcdea30..6bb0c0bf357b 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -6,6 +6,7 @@
6 6
7#define CRC_T10DIF_DIGEST_SIZE 2 7#define CRC_T10DIF_DIGEST_SIZE 2
8#define CRC_T10DIF_BLOCK_SIZE 1 8#define CRC_T10DIF_BLOCK_SIZE 1
9#define CRC_T10DIF_STRING "crct10dif"
9 10
10extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, 11extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
11 size_t len); 12 size_t len);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index e8839d3a7559..3634ad6fe202 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -454,6 +454,33 @@ struct compress_alg {
454 * @cra_refcnt: internally used 454 * @cra_refcnt: internally used
455 * @cra_destroy: internally used 455 * @cra_destroy: internally used
456 * 456 *
457 * All following statistics are for this crypto_alg
458 * @encrypt_cnt: number of encrypt requests
459 * @decrypt_cnt: number of decrypt requests
460 * @compress_cnt: number of compress requests
461 * @decompress_cnt: number of decompress requests
462 * @generate_cnt: number of RNG generate requests
463 * @seed_cnt: number of times the rng was seeded
464 * @hash_cnt: number of hash requests
465 * @sign_cnt: number of sign requests
466 * @setsecret_cnt: number of setsecrey operation
467 * @generate_public_key_cnt: number of generate_public_key operation
468 * @verify_cnt: number of verify operation
469 * @compute_shared_secret_cnt: number of compute_shared_secret operation
470 * @encrypt_tlen: total data size handled by encrypt requests
471 * @decrypt_tlen: total data size handled by decrypt requests
472 * @compress_tlen: total data size handled by compress requests
473 * @decompress_tlen: total data size handled by decompress requests
474 * @generate_tlen: total data size of generated data by the RNG
475 * @hash_tlen: total data size hashed
476 * @akcipher_err_cnt: number of error for akcipher requests
477 * @cipher_err_cnt: number of error for akcipher requests
478 * @compress_err_cnt: number of error for akcipher requests
479 * @aead_err_cnt: number of error for akcipher requests
480 * @hash_err_cnt: number of error for akcipher requests
481 * @rng_err_cnt: number of error for akcipher requests
482 * @kpp_err_cnt: number of error for akcipher requests
483 *
457 * The struct crypto_alg describes a generic Crypto API algorithm and is common 484 * The struct crypto_alg describes a generic Crypto API algorithm and is common
458 * for all of the transformations. Any variable not documented here shall not 485 * for all of the transformations. Any variable not documented here shall not
459 * be used by a cipher implementation as it is internal to the Crypto API. 486 * be used by a cipher implementation as it is internal to the Crypto API.
@@ -487,6 +514,45 @@ struct crypto_alg {
487 void (*cra_destroy)(struct crypto_alg *alg); 514 void (*cra_destroy)(struct crypto_alg *alg);
488 515
489 struct module *cra_module; 516 struct module *cra_module;
517
518 union {
519 atomic_t encrypt_cnt;
520 atomic_t compress_cnt;
521 atomic_t generate_cnt;
522 atomic_t hash_cnt;
523 atomic_t setsecret_cnt;
524 };
525 union {
526 atomic64_t encrypt_tlen;
527 atomic64_t compress_tlen;
528 atomic64_t generate_tlen;
529 atomic64_t hash_tlen;
530 };
531 union {
532 atomic_t akcipher_err_cnt;
533 atomic_t cipher_err_cnt;
534 atomic_t compress_err_cnt;
535 atomic_t aead_err_cnt;
536 atomic_t hash_err_cnt;
537 atomic_t rng_err_cnt;
538 atomic_t kpp_err_cnt;
539 };
540 union {
541 atomic_t decrypt_cnt;
542 atomic_t decompress_cnt;
543 atomic_t seed_cnt;
544 atomic_t generate_public_key_cnt;
545 };
546 union {
547 atomic64_t decrypt_tlen;
548 atomic64_t decompress_tlen;
549 };
550 union {
551 atomic_t verify_cnt;
552 atomic_t compute_shared_secret_cnt;
553 };
554 atomic_t sign_cnt;
555
490} CRYPTO_MINALIGN_ATTR; 556} CRYPTO_MINALIGN_ATTR;
491 557
492/* 558/*
@@ -907,6 +973,38 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
907 return __crypto_ablkcipher_cast(req->base.tfm); 973 return __crypto_ablkcipher_cast(req->base.tfm);
908} 974}
909 975
976static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req,
977 int ret)
978{
979#ifdef CONFIG_CRYPTO_STATS
980 struct ablkcipher_tfm *crt =
981 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
982
983 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
984 atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
985 } else {
986 atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt);
987 atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen);
988 }
989#endif
990}
991
992static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req,
993 int ret)
994{
995#ifdef CONFIG_CRYPTO_STATS
996 struct ablkcipher_tfm *crt =
997 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
998
999 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
1000 atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
1001 } else {
1002 atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt);
1003 atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen);
1004 }
1005#endif
1006}
1007
910/** 1008/**
911 * crypto_ablkcipher_encrypt() - encrypt plaintext 1009 * crypto_ablkcipher_encrypt() - encrypt plaintext
912 * @req: reference to the ablkcipher_request handle that holds all information 1010 * @req: reference to the ablkcipher_request handle that holds all information
@@ -922,7 +1020,11 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
922{ 1020{
923 struct ablkcipher_tfm *crt = 1021 struct ablkcipher_tfm *crt =
924 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 1022 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
925 return crt->encrypt(req); 1023 int ret;
1024
1025 ret = crt->encrypt(req);
1026 crypto_stat_ablkcipher_encrypt(req, ret);
1027 return ret;
926} 1028}
927 1029
928/** 1030/**
@@ -940,7 +1042,11 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
940{ 1042{
941 struct ablkcipher_tfm *crt = 1043 struct ablkcipher_tfm *crt =
942 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 1044 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
943 return crt->decrypt(req); 1045 int ret;
1046
1047 ret = crt->decrypt(req);
1048 crypto_stat_ablkcipher_decrypt(req, ret);
1049 return ret;
944} 1050}
945 1051
946/** 1052/**
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index f27cb14088a4..5160f06ffbac 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -405,6 +405,7 @@ extern struct device_type fsl_mc_bus_dpcon_type;
405extern struct device_type fsl_mc_bus_dpmcp_type; 405extern struct device_type fsl_mc_bus_dpmcp_type;
406extern struct device_type fsl_mc_bus_dpmac_type; 406extern struct device_type fsl_mc_bus_dpmac_type;
407extern struct device_type fsl_mc_bus_dprtc_type; 407extern struct device_type fsl_mc_bus_dprtc_type;
408extern struct device_type fsl_mc_bus_dpseci_type;
408 409
409static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev) 410static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
410{ 411{
@@ -451,6 +452,11 @@ static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
451 return mc_dev->dev.type == &fsl_mc_bus_dprtc_type; 452 return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
452} 453}
453 454
455static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
456{
457 return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
458}
459
454/* 460/*
455 * Data Path Buffer Pool (DPBP) API 461 * Data Path Buffer Pool (DPBP) API
456 * Contains initialization APIs and runtime control APIs for DPBP 462 * Contains initialization APIs and runtime control APIs for DPBP
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index bee0827766a3..c0b93e0ff0c0 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -33,7 +33,8 @@
33 * and max is a multiple of 4 and >= 32 bytes. 33 * and max is a multiple of 4 and >= 32 bytes.
34 * @priv: Private data, for use by the RNG driver. 34 * @priv: Private data, for use by the RNG driver.
35 * @quality: Estimation of true entropy in RNG's bitstream 35 * @quality: Estimation of true entropy in RNG's bitstream
36 * (per mill). 36 * (in bits of entropy per 1024 bits of input;
37 * valid values: 1 to 1024, or 0 for unknown).
37 */ 38 */
38struct hwrng { 39struct hwrng {
39 const char *name; 40 const char *name;
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 7df625d41e35..f6e8ceafafd8 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -71,10 +71,10 @@ struct gss_krb5_enctype {
71 const u32 keyed_cksum; /* is it a keyed cksum? */ 71 const u32 keyed_cksum; /* is it a keyed cksum? */
72 const u32 keybytes; /* raw key len, in bytes */ 72 const u32 keybytes; /* raw key len, in bytes */
73 const u32 keylength; /* final key len, in bytes */ 73 const u32 keylength; /* final key len, in bytes */
74 u32 (*encrypt) (struct crypto_skcipher *tfm, 74 u32 (*encrypt) (struct crypto_sync_skcipher *tfm,
75 void *iv, void *in, void *out, 75 void *iv, void *in, void *out,
76 int length); /* encryption function */ 76 int length); /* encryption function */
77 u32 (*decrypt) (struct crypto_skcipher *tfm, 77 u32 (*decrypt) (struct crypto_sync_skcipher *tfm,
78 void *iv, void *in, void *out, 78 void *iv, void *in, void *out,
79 int length); /* decryption function */ 79 int length); /* decryption function */
80 u32 (*mk_key) (const struct gss_krb5_enctype *gk5e, 80 u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
@@ -98,12 +98,12 @@ struct krb5_ctx {
98 u32 enctype; 98 u32 enctype;
99 u32 flags; 99 u32 flags;
100 const struct gss_krb5_enctype *gk5e; /* enctype-specific info */ 100 const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
101 struct crypto_skcipher *enc; 101 struct crypto_sync_skcipher *enc;
102 struct crypto_skcipher *seq; 102 struct crypto_sync_skcipher *seq;
103 struct crypto_skcipher *acceptor_enc; 103 struct crypto_sync_skcipher *acceptor_enc;
104 struct crypto_skcipher *initiator_enc; 104 struct crypto_sync_skcipher *initiator_enc;
105 struct crypto_skcipher *acceptor_enc_aux; 105 struct crypto_sync_skcipher *acceptor_enc_aux;
106 struct crypto_skcipher *initiator_enc_aux; 106 struct crypto_sync_skcipher *initiator_enc_aux;
107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ 107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
108 u8 cksum[GSS_KRB5_MAX_KEYLEN]; 108 u8 cksum[GSS_KRB5_MAX_KEYLEN];
109 s32 endtime; 109 s32 endtime;
@@ -262,24 +262,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
262 262
263 263
264u32 264u32
265krb5_encrypt(struct crypto_skcipher *key, 265krb5_encrypt(struct crypto_sync_skcipher *key,
266 void *iv, void *in, void *out, int length); 266 void *iv, void *in, void *out, int length);
267 267
268u32 268u32
269krb5_decrypt(struct crypto_skcipher *key, 269krb5_decrypt(struct crypto_sync_skcipher *key,
270 void *iv, void *in, void *out, int length); 270 void *iv, void *in, void *out, int length);
271 271
272int 272int
273gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf, 273gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf,
274 int offset, struct page **pages); 274 int offset, struct page **pages);
275 275
276int 276int
277gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf, 277gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf,
278 int offset); 278 int offset);
279 279
280s32 280s32
281krb5_make_seq_num(struct krb5_ctx *kctx, 281krb5_make_seq_num(struct krb5_ctx *kctx,
282 struct crypto_skcipher *key, 282 struct crypto_sync_skcipher *key,
283 int direction, 283 int direction,
284 u32 seqnum, unsigned char *cksum, unsigned char *buf); 284 u32 seqnum, unsigned char *cksum, unsigned char *buf);
285 285
@@ -320,12 +320,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
320 320
321int 321int
322krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, 322krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
323 struct crypto_skcipher *cipher, 323 struct crypto_sync_skcipher *cipher,
324 unsigned char *cksum); 324 unsigned char *cksum);
325 325
326int 326int
327krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, 327krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
328 struct crypto_skcipher *cipher, 328 struct crypto_sync_skcipher *cipher,
329 s32 seqnum); 329 s32 seqnum);
330void 330void
331gss_krb5_make_confounder(char *p, u32 conflen); 331gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/soc/fsl/dpaa2-fd.h b/include/soc/fsl/dpaa2-fd.h
index 2576abaa7779..90ae8d191f1a 100644
--- a/include/soc/fsl/dpaa2-fd.h
+++ b/include/soc/fsl/dpaa2-fd.h
@@ -66,6 +66,15 @@ struct dpaa2_fd {
66#define SG_BPID_MASK 0x3FFF 66#define SG_BPID_MASK 0x3FFF
67#define SG_FINAL_FLAG_MASK 0x1 67#define SG_FINAL_FLAG_MASK 0x1
68#define SG_FINAL_FLAG_SHIFT 15 68#define SG_FINAL_FLAG_SHIFT 15
69#define FL_SHORT_LEN_FLAG_MASK 0x1
70#define FL_SHORT_LEN_FLAG_SHIFT 14
71#define FL_SHORT_LEN_MASK 0x3FFFF
72#define FL_OFFSET_MASK 0x0FFF
73#define FL_FORMAT_MASK 0x3
74#define FL_FORMAT_SHIFT 12
75#define FL_BPID_MASK 0x3FFF
76#define FL_FINAL_FLAG_MASK 0x1
77#define FL_FINAL_FLAG_SHIFT 15
69 78
70/* Error bits in FD CTRL */ 79/* Error bits in FD CTRL */
71#define FD_CTRL_ERR_MASK 0x000000FF 80#define FD_CTRL_ERR_MASK 0x000000FF
@@ -435,4 +444,237 @@ static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
435 sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT); 444 sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
436} 445}
437 446
447/**
448 * struct dpaa2_fl_entry - structure for frame list entry.
449 * @addr: address in the FLE
450 * @len: length in the FLE
451 * @bpid: buffer pool ID
452 * @format_offset: format, offset, and short-length fields
453 * @frc: frame context
454 * @ctrl: control bits...including pta, pvt1, pvt2, err, etc
455 * @flc: flow context address
456 */
457struct dpaa2_fl_entry {
458 __le64 addr;
459 __le32 len;
460 __le16 bpid;
461 __le16 format_offset;
462 __le32 frc;
463 __le32 ctrl;
464 __le64 flc;
465};
466
467enum dpaa2_fl_format {
468 dpaa2_fl_single = 0,
469 dpaa2_fl_res,
470 dpaa2_fl_sg
471};
472
473/**
474 * dpaa2_fl_get_addr() - get the addr field of FLE
475 * @fle: the given frame list entry
476 *
477 * Return the address in the frame list entry.
478 */
479static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle)
480{
481 return (dma_addr_t)le64_to_cpu(fle->addr);
482}
483
484/**
485 * dpaa2_fl_set_addr() - Set the addr field of FLE
486 * @fle: the given frame list entry
487 * @addr: the address needs to be set in frame list entry
488 */
489static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle,
490 dma_addr_t addr)
491{
492 fle->addr = cpu_to_le64(addr);
493}
494
495/**
496 * dpaa2_fl_get_frc() - Get the frame context in the FLE
497 * @fle: the given frame list entry
498 *
499 * Return the frame context field in the frame lsit entry.
500 */
501static inline u32 dpaa2_fl_get_frc(const struct dpaa2_fl_entry *fle)
502{
503 return le32_to_cpu(fle->frc);
504}
505
506/**
507 * dpaa2_fl_set_frc() - Set the frame context in the FLE
508 * @fle: the given frame list entry
509 * @frc: the frame context needs to be set in frame list entry
510 */
511static inline void dpaa2_fl_set_frc(struct dpaa2_fl_entry *fle, u32 frc)
512{
513 fle->frc = cpu_to_le32(frc);
514}
515
516/**
517 * dpaa2_fl_get_ctrl() - Get the control bits in the FLE
518 * @fle: the given frame list entry
519 *
520 * Return the control bits field in the frame list entry.
521 */
522static inline u32 dpaa2_fl_get_ctrl(const struct dpaa2_fl_entry *fle)
523{
524 return le32_to_cpu(fle->ctrl);
525}
526
527/**
528 * dpaa2_fl_set_ctrl() - Set the control bits in the FLE
529 * @fle: the given frame list entry
530 * @ctrl: the control bits to be set in the frame list entry
531 */
532static inline void dpaa2_fl_set_ctrl(struct dpaa2_fl_entry *fle, u32 ctrl)
533{
534 fle->ctrl = cpu_to_le32(ctrl);
535}
536
537/**
538 * dpaa2_fl_get_flc() - Get the flow context in the FLE
539 * @fle: the given frame list entry
540 *
541 * Return the flow context in the frame list entry.
542 */
543static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle)
544{
545 return (dma_addr_t)le64_to_cpu(fle->flc);
546}
547
548/**
549 * dpaa2_fl_set_flc() - Set the flow context field of FLE
550 * @fle: the given frame list entry
551 * @flc_addr: the flow context needs to be set in frame list entry
552 */
553static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle,
554 dma_addr_t flc_addr)
555{
556 fle->flc = cpu_to_le64(flc_addr);
557}
558
559static inline bool dpaa2_fl_short_len(const struct dpaa2_fl_entry *fle)
560{
561 return !!((le16_to_cpu(fle->format_offset) >>
562 FL_SHORT_LEN_FLAG_SHIFT) & FL_SHORT_LEN_FLAG_MASK);
563}
564
565/**
566 * dpaa2_fl_get_len() - Get the length in the FLE
567 * @fle: the given frame list entry
568 *
569 * Return the length field in the frame list entry.
570 */
571static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle)
572{
573 if (dpaa2_fl_short_len(fle))
574 return le32_to_cpu(fle->len) & FL_SHORT_LEN_MASK;
575
576 return le32_to_cpu(fle->len);
577}
578
579/**
580 * dpaa2_fl_set_len() - Set the length field of FLE
581 * @fle: the given frame list entry
582 * @len: the length needs to be set in frame list entry
583 */
584static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len)
585{
586 fle->len = cpu_to_le32(len);
587}
588
589/**
590 * dpaa2_fl_get_offset() - Get the offset field in the frame list entry
591 * @fle: the given frame list entry
592 *
593 * Return the offset.
594 */
595static inline u16 dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle)
596{
597 return le16_to_cpu(fle->format_offset) & FL_OFFSET_MASK;
598}
599
600/**
601 * dpaa2_fl_set_offset() - Set the offset field of FLE
602 * @fle: the given frame list entry
603 * @offset: the offset needs to be set in frame list entry
604 */
605static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, u16 offset)
606{
607 fle->format_offset &= cpu_to_le16(~FL_OFFSET_MASK);
608 fle->format_offset |= cpu_to_le16(offset);
609}
610
611/**
612 * dpaa2_fl_get_format() - Get the format field in the FLE
613 * @fle: the given frame list entry
614 *
615 * Return the format.
616 */
617static inline enum dpaa2_fl_format dpaa2_fl_get_format(const struct dpaa2_fl_entry *fle)
618{
619 return (enum dpaa2_fl_format)((le16_to_cpu(fle->format_offset) >>
620 FL_FORMAT_SHIFT) & FL_FORMAT_MASK);
621}
622
623/**
624 * dpaa2_fl_set_format() - Set the format field of FLE
625 * @fle: the given frame list entry
626 * @format: the format needs to be set in frame list entry
627 */
628static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle,
629 enum dpaa2_fl_format format)
630{
631 fle->format_offset &= cpu_to_le16(~(FL_FORMAT_MASK << FL_FORMAT_SHIFT));
632 fle->format_offset |= cpu_to_le16(format << FL_FORMAT_SHIFT);
633}
634
635/**
636 * dpaa2_fl_get_bpid() - Get the bpid field in the FLE
637 * @fle: the given frame list entry
638 *
639 * Return the buffer pool id.
640 */
641static inline u16 dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle)
642{
643 return le16_to_cpu(fle->bpid) & FL_BPID_MASK;
644}
645
646/**
647 * dpaa2_fl_set_bpid() - Set the bpid field of FLE
648 * @fle: the given frame list entry
649 * @bpid: buffer pool id to be set
650 */
651static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, u16 bpid)
652{
653 fle->bpid &= cpu_to_le16(~(FL_BPID_MASK));
654 fle->bpid |= cpu_to_le16(bpid);
655}
656
657/**
658 * dpaa2_fl_is_final() - Check final bit in FLE
659 * @fle: the given frame list entry
660 *
661 * Return bool.
662 */
663static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle)
664{
665 return !!(le16_to_cpu(fle->format_offset) >> FL_FINAL_FLAG_SHIFT);
666}
667
668/**
669 * dpaa2_fl_set_final() - Set the final bit in FLE
670 * @fle: the given frame list entry
671 * @final: the final boolean to be set
672 */
673static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final)
674{
675 fle->format_offset &= cpu_to_le16((~(FL_FINAL_FLAG_MASK <<
676 FL_FINAL_FLAG_SHIFT)) & 0xFFFF);
677 fle->format_offset |= cpu_to_le16(final << FL_FINAL_FLAG_SHIFT);
678}
679
438#endif /* __FSL_DPAA2_FD_H */ 680#endif /* __FSL_DPAA2_FD_H */
diff --git a/include/soc/fsl/dpaa2-global.h b/include/soc/fsl/dpaa2-global.h
index 9bc0713346a8..2bfc379d3dc9 100644
--- a/include/soc/fsl/dpaa2-global.h
+++ b/include/soc/fsl/dpaa2-global.h
@@ -174,4 +174,19 @@ static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
174 return (const struct dpaa2_fd *)&dq->dq.fd[0]; 174 return (const struct dpaa2_fd *)&dq->dq.fd[0];
175} 175}
176 176
177#define DPAA2_CSCN_SIZE sizeof(struct dpaa2_dq)
178#define DPAA2_CSCN_ALIGN 16
179#define DPAA2_CSCN_STATE_CG BIT(0)
180
181/**
182 * dpaa2_cscn_state_congested() - Check congestion state
183 * @cscn: congestion SCN (delivered to WQ or memory)
184 *
185i * Return true is congested.
186 */
187static inline bool dpaa2_cscn_state_congested(struct dpaa2_dq *cscn)
188{
189 return !!(cscn->scn.state & DPAA2_CSCN_STATE_CG);
190}
191
177#endif /* __FSL_DPAA2_GLOBAL_H */ 192#endif /* __FSL_DPAA2_GLOBAL_H */
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index ab51e40d11db..70997ab2146c 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -97,9 +97,13 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service,
97int dpaa2_io_service_rearm(struct dpaa2_io *service, 97int dpaa2_io_service_rearm(struct dpaa2_io *service,
98 struct dpaa2_io_notification_ctx *ctx); 98 struct dpaa2_io_notification_ctx *ctx);
99 99
100int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
101 struct dpaa2_io_store *s);
100int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid, 102int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
101 struct dpaa2_io_store *s); 103 struct dpaa2_io_store *s);
102 104
105int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
106 const struct dpaa2_fd *fd);
103int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio, 107int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
104 u16 qdbin, const struct dpaa2_fd *fd); 108 u16 qdbin, const struct dpaa2_fd *fd);
105int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid, 109int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 19bf0ca6d635..6dafbc3e4414 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -29,6 +29,7 @@ enum {
29 CRYPTO_MSG_UPDATEALG, 29 CRYPTO_MSG_UPDATEALG,
30 CRYPTO_MSG_GETALG, 30 CRYPTO_MSG_GETALG,
31 CRYPTO_MSG_DELRNG, 31 CRYPTO_MSG_DELRNG,
32 CRYPTO_MSG_GETSTAT,
32 __CRYPTO_MSG_MAX 33 __CRYPTO_MSG_MAX
33}; 34};
34#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1) 35#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
@@ -50,6 +51,16 @@ enum crypto_attr_type_t {
50 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ 51 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
51 CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ 52 CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
52 CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */ 53 CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
54 CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */
55 CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */
56 CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */
57 CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */
58 CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */
59 CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */
60 CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */
61 CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */
62 CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */
63 CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */
53 __CRYPTOCFGA_MAX 64 __CRYPTOCFGA_MAX
54 65
55#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) 66#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -65,6 +76,47 @@ struct crypto_user_alg {
65 __u32 cru_flags; 76 __u32 cru_flags;
66}; 77};
67 78
79struct crypto_stat {
80 char type[CRYPTO_MAX_NAME];
81 union {
82 __u32 stat_encrypt_cnt;
83 __u32 stat_compress_cnt;
84 __u32 stat_generate_cnt;
85 __u32 stat_hash_cnt;
86 __u32 stat_setsecret_cnt;
87 };
88 union {
89 __u64 stat_encrypt_tlen;
90 __u64 stat_compress_tlen;
91 __u64 stat_generate_tlen;
92 __u64 stat_hash_tlen;
93 };
94 union {
95 __u32 stat_akcipher_err_cnt;
96 __u32 stat_cipher_err_cnt;
97 __u32 stat_compress_err_cnt;
98 __u32 stat_aead_err_cnt;
99 __u32 stat_hash_err_cnt;
100 __u32 stat_rng_err_cnt;
101 __u32 stat_kpp_err_cnt;
102 };
103 union {
104 __u32 stat_decrypt_cnt;
105 __u32 stat_decompress_cnt;
106 __u32 stat_seed_cnt;
107 __u32 stat_generate_public_key_cnt;
108 };
109 union {
110 __u64 stat_decrypt_tlen;
111 __u64 stat_decompress_tlen;
112 };
113 union {
114 __u32 stat_verify_cnt;
115 __u32 stat_compute_shared_secret_cnt;
116 };
117 __u32 stat_sign_cnt;
118};
119
68struct crypto_report_larval { 120struct crypto_report_larval {
69 char type[CRYPTO_MAX_NAME]; 121 char type[CRYPTO_MAX_NAME];
70}; 122};
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 73e01918f996..a441ea1bfe6d 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -279,8 +279,8 @@ struct fsxattr {
279#define FS_ENCRYPTION_MODE_AES_256_CTS 4 279#define FS_ENCRYPTION_MODE_AES_256_CTS 4
280#define FS_ENCRYPTION_MODE_AES_128_CBC 5 280#define FS_ENCRYPTION_MODE_AES_128_CBC 5
281#define FS_ENCRYPTION_MODE_AES_128_CTS 6 281#define FS_ENCRYPTION_MODE_AES_128_CTS 6
282#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 282#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
283#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 283#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
284 284
285struct fscrypt_policy { 285struct fscrypt_policy {
286 __u8 version; 286 __u8 version;
diff --git a/lib/chacha20.c b/lib/chacha20.c
index c1cc50fb68c9..d907fec6a9ed 100644
--- a/lib/chacha20.c
+++ b/lib/chacha20.c
@@ -16,9 +16,9 @@
16#include <asm/unaligned.h> 16#include <asm/unaligned.h>
17#include <crypto/chacha20.h> 17#include <crypto/chacha20.h>
18 18
19void chacha20_block(u32 *state, u32 *stream) 19void chacha20_block(u32 *state, u8 *stream)
20{ 20{
21 u32 x[16], *out = stream; 21 u32 x[16];
22 int i; 22 int i;
23 23
24 for (i = 0; i < ARRAY_SIZE(x); i++) 24 for (i = 0; i < ARRAY_SIZE(x); i++)
@@ -67,7 +67,7 @@ void chacha20_block(u32 *state, u32 *stream)
67 } 67 }
68 68
69 for (i = 0; i < ARRAY_SIZE(x); i++) 69 for (i = 0; i < ARRAY_SIZE(x); i++)
70 out[i] = cpu_to_le32(x[i] + state[i]); 70 put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
71 71
72 state[12]++; 72 state[12]++;
73} 73}
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 1ad33e555805..4d0d47c1ffbd 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -14,10 +14,47 @@
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <crypto/hash.h> 16#include <crypto/hash.h>
17#include <crypto/algapi.h>
17#include <linux/static_key.h> 18#include <linux/static_key.h>
19#include <linux/notifier.h>
18 20
19static struct crypto_shash *crct10dif_tfm; 21static struct crypto_shash __rcu *crct10dif_tfm;
20static struct static_key crct10dif_fallback __read_mostly; 22static struct static_key crct10dif_fallback __read_mostly;
23static DEFINE_MUTEX(crc_t10dif_mutex);
24
25static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
26{
27 struct crypto_alg *alg = data;
28 struct crypto_shash *new, *old;
29
30 if (val != CRYPTO_MSG_ALG_LOADED ||
31 static_key_false(&crct10dif_fallback) ||
32 strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
33 return 0;
34
35 mutex_lock(&crc_t10dif_mutex);
36 old = rcu_dereference_protected(crct10dif_tfm,
37 lockdep_is_held(&crc_t10dif_mutex));
38 if (!old) {
39 mutex_unlock(&crc_t10dif_mutex);
40 return 0;
41 }
42 new = crypto_alloc_shash("crct10dif", 0, 0);
43 if (IS_ERR(new)) {
44 mutex_unlock(&crc_t10dif_mutex);
45 return 0;
46 }
47 rcu_assign_pointer(crct10dif_tfm, new);
48 mutex_unlock(&crc_t10dif_mutex);
49
50 synchronize_rcu();
51 crypto_free_shash(old);
52 return 0;
53}
54
55static struct notifier_block crc_t10dif_nb = {
56 .notifier_call = crc_t10dif_rehash,
57};
21 58
22__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) 59__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
23{ 60{
@@ -30,11 +67,14 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
30 if (static_key_false(&crct10dif_fallback)) 67 if (static_key_false(&crct10dif_fallback))
31 return crc_t10dif_generic(crc, buffer, len); 68 return crc_t10dif_generic(crc, buffer, len);
32 69
33 desc.shash.tfm = crct10dif_tfm; 70 rcu_read_lock();
71 desc.shash.tfm = rcu_dereference(crct10dif_tfm);
34 desc.shash.flags = 0; 72 desc.shash.flags = 0;
35 *(__u16 *)desc.ctx = crc; 73 *(__u16 *)desc.ctx = crc;
36 74
37 err = crypto_shash_update(&desc.shash, buffer, len); 75 err = crypto_shash_update(&desc.shash, buffer, len);
76 rcu_read_unlock();
77
38 BUG_ON(err); 78 BUG_ON(err);
39 79
40 return *(__u16 *)desc.ctx; 80 return *(__u16 *)desc.ctx;
@@ -49,6 +89,7 @@ EXPORT_SYMBOL(crc_t10dif);
49 89
50static int __init crc_t10dif_mod_init(void) 90static int __init crc_t10dif_mod_init(void)
51{ 91{
92 crypto_register_notifier(&crc_t10dif_nb);
52 crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); 93 crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
53 if (IS_ERR(crct10dif_tfm)) { 94 if (IS_ERR(crct10dif_tfm)) {
54 static_key_slow_inc(&crct10dif_fallback); 95 static_key_slow_inc(&crct10dif_fallback);
@@ -59,12 +100,24 @@ static int __init crc_t10dif_mod_init(void)
59 100
60static void __exit crc_t10dif_mod_fini(void) 101static void __exit crc_t10dif_mod_fini(void)
61{ 102{
103 crypto_unregister_notifier(&crc_t10dif_nb);
62 crypto_free_shash(crct10dif_tfm); 104 crypto_free_shash(crct10dif_tfm);
63} 105}
64 106
65module_init(crc_t10dif_mod_init); 107module_init(crc_t10dif_mod_init);
66module_exit(crc_t10dif_mod_fini); 108module_exit(crc_t10dif_mod_fini);
67 109
110static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
111{
112 if (static_key_false(&crct10dif_fallback))
113 return sprintf(buffer, "fallback\n");
114
115 return sprintf(buffer, "%s\n",
116 crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
117}
118
119module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
120
68MODULE_DESCRIPTION("T10 DIF CRC calculation"); 121MODULE_DESCRIPTION("T10 DIF CRC calculation");
69MODULE_LICENSE("GPL"); 122MODULE_LICENSE("GPL");
70MODULE_SOFTDEP("pre: crct10dif"); 123MODULE_SOFTDEP("pre: crct10dif");
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 02172c408ff2..5d6724cee38f 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -46,9 +46,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
46 goto fail; 46 goto fail;
47 } 47 }
48 48
49 /* crypto_alloc_skcipher() allocates with GFP_KERNEL */ 49 /* crypto_alloc_sync_skcipher() allocates with GFP_KERNEL */
50 noio_flag = memalloc_noio_save(); 50 noio_flag = memalloc_noio_save();
51 key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); 51 key->tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
52 memalloc_noio_restore(noio_flag); 52 memalloc_noio_restore(noio_flag);
53 if (IS_ERR(key->tfm)) { 53 if (IS_ERR(key->tfm)) {
54 ret = PTR_ERR(key->tfm); 54 ret = PTR_ERR(key->tfm);
@@ -56,7 +56,7 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
56 goto fail; 56 goto fail;
57 } 57 }
58 58
59 ret = crypto_skcipher_setkey(key->tfm, key->key, key->len); 59 ret = crypto_sync_skcipher_setkey(key->tfm, key->key, key->len);
60 if (ret) 60 if (ret)
61 goto fail; 61 goto fail;
62 62
@@ -136,7 +136,7 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
136 if (key) { 136 if (key) {
137 kfree(key->key); 137 kfree(key->key);
138 key->key = NULL; 138 key->key = NULL;
139 crypto_free_skcipher(key->tfm); 139 crypto_free_sync_skcipher(key->tfm);
140 key->tfm = NULL; 140 key->tfm = NULL;
141 } 141 }
142} 142}
@@ -216,7 +216,7 @@ static void teardown_sgtable(struct sg_table *sgt)
216static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt, 216static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
217 void *buf, int buf_len, int in_len, int *pout_len) 217 void *buf, int buf_len, int in_len, int *pout_len)
218{ 218{
219 SKCIPHER_REQUEST_ON_STACK(req, key->tfm); 219 SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
220 struct sg_table sgt; 220 struct sg_table sgt;
221 struct scatterlist prealloc_sg; 221 struct scatterlist prealloc_sg;
222 char iv[AES_BLOCK_SIZE] __aligned(8); 222 char iv[AES_BLOCK_SIZE] __aligned(8);
@@ -232,7 +232,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
232 return ret; 232 return ret;
233 233
234 memcpy(iv, aes_iv, AES_BLOCK_SIZE); 234 memcpy(iv, aes_iv, AES_BLOCK_SIZE);
235 skcipher_request_set_tfm(req, key->tfm); 235 skcipher_request_set_sync_tfm(req, key->tfm);
236 skcipher_request_set_callback(req, 0, NULL, NULL); 236 skcipher_request_set_callback(req, 0, NULL, NULL);
237 skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv); 237 skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
238 238
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index bb45c7d43739..96ef4d860bc9 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -13,7 +13,7 @@ struct ceph_crypto_key {
13 struct ceph_timespec created; 13 struct ceph_timespec created;
14 int len; 14 int len;
15 void *key; 15 void *key;
16 struct crypto_skcipher *tfm; 16 struct crypto_sync_skcipher *tfm;
17}; 17};
18 18
19int ceph_crypto_key_clone(struct ceph_crypto_key *dst, 19int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 2fb703d70803..7e29f88dbf6a 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -146,18 +146,18 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
146 goto err_tfm; 146 goto err_tfm;
147 } 147 }
148 148
149 key->tfm0 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); 149 key->tfm0 = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0);
150 if (IS_ERR(key->tfm0)) 150 if (IS_ERR(key->tfm0))
151 goto err_tfm; 151 goto err_tfm;
152 152
153 if (crypto_skcipher_setkey(key->tfm0, template->key, 153 if (crypto_sync_skcipher_setkey(key->tfm0, template->key,
154 IEEE802154_LLSEC_KEY_SIZE)) 154 IEEE802154_LLSEC_KEY_SIZE))
155 goto err_tfm0; 155 goto err_tfm0;
156 156
157 return key; 157 return key;
158 158
159err_tfm0: 159err_tfm0:
160 crypto_free_skcipher(key->tfm0); 160 crypto_free_sync_skcipher(key->tfm0);
161err_tfm: 161err_tfm:
162 for (i = 0; i < ARRAY_SIZE(key->tfm); i++) 162 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
163 if (key->tfm[i]) 163 if (key->tfm[i])
@@ -177,7 +177,7 @@ static void llsec_key_release(struct kref *ref)
177 for (i = 0; i < ARRAY_SIZE(key->tfm); i++) 177 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
178 crypto_free_aead(key->tfm[i]); 178 crypto_free_aead(key->tfm[i]);
179 179
180 crypto_free_skcipher(key->tfm0); 180 crypto_free_sync_skcipher(key->tfm0);
181 kzfree(key); 181 kzfree(key);
182} 182}
183 183
@@ -622,7 +622,7 @@ llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
622{ 622{
623 u8 iv[16]; 623 u8 iv[16];
624 struct scatterlist src; 624 struct scatterlist src;
625 SKCIPHER_REQUEST_ON_STACK(req, key->tfm0); 625 SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
626 int err, datalen; 626 int err, datalen;
627 unsigned char *data; 627 unsigned char *data;
628 628
@@ -632,7 +632,7 @@ llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
632 datalen = skb_tail_pointer(skb) - data; 632 datalen = skb_tail_pointer(skb) - data;
633 sg_init_one(&src, data, datalen); 633 sg_init_one(&src, data, datalen);
634 634
635 skcipher_request_set_tfm(req, key->tfm0); 635 skcipher_request_set_sync_tfm(req, key->tfm0);
636 skcipher_request_set_callback(req, 0, NULL, NULL); 636 skcipher_request_set_callback(req, 0, NULL, NULL);
637 skcipher_request_set_crypt(req, &src, &src, datalen, iv); 637 skcipher_request_set_crypt(req, &src, &src, datalen, iv);
638 err = crypto_skcipher_encrypt(req); 638 err = crypto_skcipher_encrypt(req);
@@ -840,7 +840,7 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
840 unsigned char *data; 840 unsigned char *data;
841 int datalen; 841 int datalen;
842 struct scatterlist src; 842 struct scatterlist src;
843 SKCIPHER_REQUEST_ON_STACK(req, key->tfm0); 843 SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
844 int err; 844 int err;
845 845
846 llsec_geniv(iv, dev_addr, &hdr->sec); 846 llsec_geniv(iv, dev_addr, &hdr->sec);
@@ -849,7 +849,7 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
849 849
850 sg_init_one(&src, data, datalen); 850 sg_init_one(&src, data, datalen);
851 851
852 skcipher_request_set_tfm(req, key->tfm0); 852 skcipher_request_set_sync_tfm(req, key->tfm0);
853 skcipher_request_set_callback(req, 0, NULL, NULL); 853 skcipher_request_set_callback(req, 0, NULL, NULL);
854 skcipher_request_set_crypt(req, &src, &src, datalen, iv); 854 skcipher_request_set_crypt(req, &src, &src, datalen, iv);
855 855
diff --git a/net/mac802154/llsec.h b/net/mac802154/llsec.h
index 6f3b658e3279..8be46d74dc39 100644
--- a/net/mac802154/llsec.h
+++ b/net/mac802154/llsec.h
@@ -29,7 +29,7 @@ struct mac802154_llsec_key {
29 29
30 /* one tfm for each authsize (4/8/16) */ 30 /* one tfm for each authsize (4/8/16) */
31 struct crypto_aead *tfm[3]; 31 struct crypto_aead *tfm[3];
32 struct crypto_skcipher *tfm0; 32 struct crypto_sync_skcipher *tfm0;
33 33
34 struct kref ref; 34 struct kref ref;
35}; 35};
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 0a7c49e8e053..382196e57a26 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -435,7 +435,7 @@ struct rxrpc_connection {
435 struct sk_buff_head rx_queue; /* received conn-level packets */ 435 struct sk_buff_head rx_queue; /* received conn-level packets */
436 const struct rxrpc_security *security; /* applied security module */ 436 const struct rxrpc_security *security; /* applied security module */
437 struct key *server_key; /* security for this service */ 437 struct key *server_key; /* security for this service */
438 struct crypto_skcipher *cipher; /* encryption handle */ 438 struct crypto_sync_skcipher *cipher; /* encryption handle */
439 struct rxrpc_crypt csum_iv; /* packet checksum base */ 439 struct rxrpc_crypt csum_iv; /* packet checksum base */
440 unsigned long flags; 440 unsigned long flags;
441 unsigned long events; 441 unsigned long events;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index cea16838d588..cbef9ea43dec 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -46,7 +46,7 @@ struct rxkad_level2_hdr {
46 * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE 46 * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
47 * packets 47 * packets
48 */ 48 */
49static struct crypto_skcipher *rxkad_ci; 49static struct crypto_sync_skcipher *rxkad_ci;
50static DEFINE_MUTEX(rxkad_ci_mutex); 50static DEFINE_MUTEX(rxkad_ci_mutex);
51 51
52/* 52/*
@@ -54,7 +54,7 @@ static DEFINE_MUTEX(rxkad_ci_mutex);
54 */ 54 */
55static int rxkad_init_connection_security(struct rxrpc_connection *conn) 55static int rxkad_init_connection_security(struct rxrpc_connection *conn)
56{ 56{
57 struct crypto_skcipher *ci; 57 struct crypto_sync_skcipher *ci;
58 struct rxrpc_key_token *token; 58 struct rxrpc_key_token *token;
59 int ret; 59 int ret;
60 60
@@ -63,14 +63,14 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
63 token = conn->params.key->payload.data[0]; 63 token = conn->params.key->payload.data[0];
64 conn->security_ix = token->security_index; 64 conn->security_ix = token->security_index;
65 65
66 ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); 66 ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0);
67 if (IS_ERR(ci)) { 67 if (IS_ERR(ci)) {
68 _debug("no cipher"); 68 _debug("no cipher");
69 ret = PTR_ERR(ci); 69 ret = PTR_ERR(ci);
70 goto error; 70 goto error;
71 } 71 }
72 72
73 if (crypto_skcipher_setkey(ci, token->kad->session_key, 73 if (crypto_sync_skcipher_setkey(ci, token->kad->session_key,
74 sizeof(token->kad->session_key)) < 0) 74 sizeof(token->kad->session_key)) < 0)
75 BUG(); 75 BUG();
76 76
@@ -104,7 +104,7 @@ error:
104static int rxkad_prime_packet_security(struct rxrpc_connection *conn) 104static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
105{ 105{
106 struct rxrpc_key_token *token; 106 struct rxrpc_key_token *token;
107 SKCIPHER_REQUEST_ON_STACK(req, conn->cipher); 107 SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
108 struct scatterlist sg; 108 struct scatterlist sg;
109 struct rxrpc_crypt iv; 109 struct rxrpc_crypt iv;
110 __be32 *tmpbuf; 110 __be32 *tmpbuf;
@@ -128,7 +128,7 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
128 tmpbuf[3] = htonl(conn->security_ix); 128 tmpbuf[3] = htonl(conn->security_ix);
129 129
130 sg_init_one(&sg, tmpbuf, tmpsize); 130 sg_init_one(&sg, tmpbuf, tmpsize);
131 skcipher_request_set_tfm(req, conn->cipher); 131 skcipher_request_set_sync_tfm(req, conn->cipher);
132 skcipher_request_set_callback(req, 0, NULL, NULL); 132 skcipher_request_set_callback(req, 0, NULL, NULL);
133 skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x); 133 skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x);
134 crypto_skcipher_encrypt(req); 134 crypto_skcipher_encrypt(req);
@@ -167,7 +167,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
167 memset(&iv, 0, sizeof(iv)); 167 memset(&iv, 0, sizeof(iv));
168 168
169 sg_init_one(&sg, sechdr, 8); 169 sg_init_one(&sg, sechdr, 8);
170 skcipher_request_set_tfm(req, call->conn->cipher); 170 skcipher_request_set_sync_tfm(req, call->conn->cipher);
171 skcipher_request_set_callback(req, 0, NULL, NULL); 171 skcipher_request_set_callback(req, 0, NULL, NULL);
172 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); 172 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
173 crypto_skcipher_encrypt(req); 173 crypto_skcipher_encrypt(req);
@@ -212,7 +212,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
212 memcpy(&iv, token->kad->session_key, sizeof(iv)); 212 memcpy(&iv, token->kad->session_key, sizeof(iv));
213 213
214 sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); 214 sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
215 skcipher_request_set_tfm(req, call->conn->cipher); 215 skcipher_request_set_sync_tfm(req, call->conn->cipher);
216 skcipher_request_set_callback(req, 0, NULL, NULL); 216 skcipher_request_set_callback(req, 0, NULL, NULL);
217 skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x); 217 skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x);
218 crypto_skcipher_encrypt(req); 218 crypto_skcipher_encrypt(req);
@@ -250,7 +250,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
250 void *sechdr) 250 void *sechdr)
251{ 251{
252 struct rxrpc_skb_priv *sp; 252 struct rxrpc_skb_priv *sp;
253 SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); 253 SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
254 struct rxrpc_crypt iv; 254 struct rxrpc_crypt iv;
255 struct scatterlist sg; 255 struct scatterlist sg;
256 u32 x, y; 256 u32 x, y;
@@ -279,7 +279,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
279 call->crypto_buf[1] = htonl(x); 279 call->crypto_buf[1] = htonl(x);
280 280
281 sg_init_one(&sg, call->crypto_buf, 8); 281 sg_init_one(&sg, call->crypto_buf, 8);
282 skcipher_request_set_tfm(req, call->conn->cipher); 282 skcipher_request_set_sync_tfm(req, call->conn->cipher);
283 skcipher_request_set_callback(req, 0, NULL, NULL); 283 skcipher_request_set_callback(req, 0, NULL, NULL);
284 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); 284 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
285 crypto_skcipher_encrypt(req); 285 crypto_skcipher_encrypt(req);
@@ -352,7 +352,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
352 /* start the decryption afresh */ 352 /* start the decryption afresh */
353 memset(&iv, 0, sizeof(iv)); 353 memset(&iv, 0, sizeof(iv));
354 354
355 skcipher_request_set_tfm(req, call->conn->cipher); 355 skcipher_request_set_sync_tfm(req, call->conn->cipher);
356 skcipher_request_set_callback(req, 0, NULL, NULL); 356 skcipher_request_set_callback(req, 0, NULL, NULL);
357 skcipher_request_set_crypt(req, sg, sg, 8, iv.x); 357 skcipher_request_set_crypt(req, sg, sg, 8, iv.x);
358 crypto_skcipher_decrypt(req); 358 crypto_skcipher_decrypt(req);
@@ -450,7 +450,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
450 token = call->conn->params.key->payload.data[0]; 450 token = call->conn->params.key->payload.data[0];
451 memcpy(&iv, token->kad->session_key, sizeof(iv)); 451 memcpy(&iv, token->kad->session_key, sizeof(iv));
452 452
453 skcipher_request_set_tfm(req, call->conn->cipher); 453 skcipher_request_set_sync_tfm(req, call->conn->cipher);
454 skcipher_request_set_callback(req, 0, NULL, NULL); 454 skcipher_request_set_callback(req, 0, NULL, NULL);
455 skcipher_request_set_crypt(req, sg, sg, len, iv.x); 455 skcipher_request_set_crypt(req, sg, sg, len, iv.x);
456 crypto_skcipher_decrypt(req); 456 crypto_skcipher_decrypt(req);
@@ -506,7 +506,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
506 unsigned int offset, unsigned int len, 506 unsigned int offset, unsigned int len,
507 rxrpc_seq_t seq, u16 expected_cksum) 507 rxrpc_seq_t seq, u16 expected_cksum)
508{ 508{
509 SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); 509 SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
510 struct rxrpc_crypt iv; 510 struct rxrpc_crypt iv;
511 struct scatterlist sg; 511 struct scatterlist sg;
512 bool aborted; 512 bool aborted;
@@ -529,7 +529,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
529 call->crypto_buf[1] = htonl(x); 529 call->crypto_buf[1] = htonl(x);
530 530
531 sg_init_one(&sg, call->crypto_buf, 8); 531 sg_init_one(&sg, call->crypto_buf, 8);
532 skcipher_request_set_tfm(req, call->conn->cipher); 532 skcipher_request_set_sync_tfm(req, call->conn->cipher);
533 skcipher_request_set_callback(req, 0, NULL, NULL); 533 skcipher_request_set_callback(req, 0, NULL, NULL);
534 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); 534 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
535 crypto_skcipher_encrypt(req); 535 crypto_skcipher_encrypt(req);
@@ -755,7 +755,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
755 struct rxkad_response *resp, 755 struct rxkad_response *resp,
756 const struct rxkad_key *s2) 756 const struct rxkad_key *s2)
757{ 757{
758 SKCIPHER_REQUEST_ON_STACK(req, conn->cipher); 758 SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
759 struct rxrpc_crypt iv; 759 struct rxrpc_crypt iv;
760 struct scatterlist sg[1]; 760 struct scatterlist sg[1];
761 761
@@ -764,7 +764,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
764 764
765 sg_init_table(sg, 1); 765 sg_init_table(sg, 1);
766 sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted)); 766 sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted));
767 skcipher_request_set_tfm(req, conn->cipher); 767 skcipher_request_set_sync_tfm(req, conn->cipher);
768 skcipher_request_set_callback(req, 0, NULL, NULL); 768 skcipher_request_set_callback(req, 0, NULL, NULL);
769 skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x); 769 skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
770 crypto_skcipher_encrypt(req); 770 crypto_skcipher_encrypt(req);
@@ -1021,7 +1021,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
1021 struct rxkad_response *resp, 1021 struct rxkad_response *resp,
1022 const struct rxrpc_crypt *session_key) 1022 const struct rxrpc_crypt *session_key)
1023{ 1023{
1024 SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci); 1024 SYNC_SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci);
1025 struct scatterlist sg[1]; 1025 struct scatterlist sg[1];
1026 struct rxrpc_crypt iv; 1026 struct rxrpc_crypt iv;
1027 1027
@@ -1031,7 +1031,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
1031 ASSERT(rxkad_ci != NULL); 1031 ASSERT(rxkad_ci != NULL);
1032 1032
1033 mutex_lock(&rxkad_ci_mutex); 1033 mutex_lock(&rxkad_ci_mutex);
1034 if (crypto_skcipher_setkey(rxkad_ci, session_key->x, 1034 if (crypto_sync_skcipher_setkey(rxkad_ci, session_key->x,
1035 sizeof(*session_key)) < 0) 1035 sizeof(*session_key)) < 0)
1036 BUG(); 1036 BUG();
1037 1037
@@ -1039,7 +1039,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
1039 1039
1040 sg_init_table(sg, 1); 1040 sg_init_table(sg, 1);
1041 sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted)); 1041 sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted));
1042 skcipher_request_set_tfm(req, rxkad_ci); 1042 skcipher_request_set_sync_tfm(req, rxkad_ci);
1043 skcipher_request_set_callback(req, 0, NULL, NULL); 1043 skcipher_request_set_callback(req, 0, NULL, NULL);
1044 skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x); 1044 skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
1045 crypto_skcipher_decrypt(req); 1045 crypto_skcipher_decrypt(req);
@@ -1218,7 +1218,7 @@ static void rxkad_clear(struct rxrpc_connection *conn)
1218 _enter(""); 1218 _enter("");
1219 1219
1220 if (conn->cipher) 1220 if (conn->cipher)
1221 crypto_free_skcipher(conn->cipher); 1221 crypto_free_sync_skcipher(conn->cipher);
1222} 1222}
1223 1223
1224/* 1224/*
@@ -1228,7 +1228,7 @@ static int rxkad_init(void)
1228{ 1228{
1229 /* pin the cipher we need so that the crypto layer doesn't invoke 1229 /* pin the cipher we need so that the crypto layer doesn't invoke
1230 * keventd to go get it */ 1230 * keventd to go get it */
1231 rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); 1231 rxkad_ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0);
1232 return PTR_ERR_OR_ZERO(rxkad_ci); 1232 return PTR_ERR_OR_ZERO(rxkad_ci);
1233} 1233}
1234 1234
@@ -1238,7 +1238,7 @@ static int rxkad_init(void)
1238static void rxkad_exit(void) 1238static void rxkad_exit(void)
1239{ 1239{
1240 if (rxkad_ci) 1240 if (rxkad_ci)
1241 crypto_free_skcipher(rxkad_ci); 1241 crypto_free_sync_skcipher(rxkad_ci);
1242} 1242}
1243 1243
1244/* 1244/*
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 0220e1ca5280..4f43383971ba 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -53,7 +53,7 @@
53 53
54u32 54u32
55krb5_encrypt( 55krb5_encrypt(
56 struct crypto_skcipher *tfm, 56 struct crypto_sync_skcipher *tfm,
57 void * iv, 57 void * iv,
58 void * in, 58 void * in,
59 void * out, 59 void * out,
@@ -62,24 +62,24 @@ krb5_encrypt(
62 u32 ret = -EINVAL; 62 u32 ret = -EINVAL;
63 struct scatterlist sg[1]; 63 struct scatterlist sg[1];
64 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 64 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
65 SKCIPHER_REQUEST_ON_STACK(req, tfm); 65 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
66 66
67 if (length % crypto_skcipher_blocksize(tfm) != 0) 67 if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
68 goto out; 68 goto out;
69 69
70 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 70 if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
71 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", 71 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
72 crypto_skcipher_ivsize(tfm)); 72 crypto_sync_skcipher_ivsize(tfm));
73 goto out; 73 goto out;
74 } 74 }
75 75
76 if (iv) 76 if (iv)
77 memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm)); 77 memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
78 78
79 memcpy(out, in, length); 79 memcpy(out, in, length);
80 sg_init_one(sg, out, length); 80 sg_init_one(sg, out, length);
81 81
82 skcipher_request_set_tfm(req, tfm); 82 skcipher_request_set_sync_tfm(req, tfm);
83 skcipher_request_set_callback(req, 0, NULL, NULL); 83 skcipher_request_set_callback(req, 0, NULL, NULL);
84 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 84 skcipher_request_set_crypt(req, sg, sg, length, local_iv);
85 85
@@ -92,7 +92,7 @@ out:
92 92
93u32 93u32
94krb5_decrypt( 94krb5_decrypt(
95 struct crypto_skcipher *tfm, 95 struct crypto_sync_skcipher *tfm,
96 void * iv, 96 void * iv,
97 void * in, 97 void * in,
98 void * out, 98 void * out,
@@ -101,23 +101,23 @@ krb5_decrypt(
101 u32 ret = -EINVAL; 101 u32 ret = -EINVAL;
102 struct scatterlist sg[1]; 102 struct scatterlist sg[1];
103 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 103 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
104 SKCIPHER_REQUEST_ON_STACK(req, tfm); 104 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
105 105
106 if (length % crypto_skcipher_blocksize(tfm) != 0) 106 if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
107 goto out; 107 goto out;
108 108
109 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 109 if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
110 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", 110 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
111 crypto_skcipher_ivsize(tfm)); 111 crypto_sync_skcipher_ivsize(tfm));
112 goto out; 112 goto out;
113 } 113 }
114 if (iv) 114 if (iv)
115 memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm)); 115 memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
116 116
117 memcpy(out, in, length); 117 memcpy(out, in, length);
118 sg_init_one(sg, out, length); 118 sg_init_one(sg, out, length);
119 119
120 skcipher_request_set_tfm(req, tfm); 120 skcipher_request_set_sync_tfm(req, tfm);
121 skcipher_request_set_callback(req, 0, NULL, NULL); 121 skcipher_request_set_callback(req, 0, NULL, NULL);
122 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 122 skcipher_request_set_crypt(req, sg, sg, length, local_iv);
123 123
@@ -466,7 +466,8 @@ encryptor(struct scatterlist *sg, void *data)
466{ 466{
467 struct encryptor_desc *desc = data; 467 struct encryptor_desc *desc = data;
468 struct xdr_buf *outbuf = desc->outbuf; 468 struct xdr_buf *outbuf = desc->outbuf;
469 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 469 struct crypto_sync_skcipher *tfm =
470 crypto_sync_skcipher_reqtfm(desc->req);
470 struct page *in_page; 471 struct page *in_page;
471 int thislen = desc->fraglen + sg->length; 472 int thislen = desc->fraglen + sg->length;
472 int fraglen, ret; 473 int fraglen, ret;
@@ -492,7 +493,7 @@ encryptor(struct scatterlist *sg, void *data)
492 desc->fraglen += sg->length; 493 desc->fraglen += sg->length;
493 desc->pos += sg->length; 494 desc->pos += sg->length;
494 495
495 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 496 fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
496 thislen -= fraglen; 497 thislen -= fraglen;
497 498
498 if (thislen == 0) 499 if (thislen == 0)
@@ -526,16 +527,16 @@ encryptor(struct scatterlist *sg, void *data)
526} 527}
527 528
528int 529int
529gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 530gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
530 int offset, struct page **pages) 531 int offset, struct page **pages)
531{ 532{
532 int ret; 533 int ret;
533 struct encryptor_desc desc; 534 struct encryptor_desc desc;
534 SKCIPHER_REQUEST_ON_STACK(req, tfm); 535 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
535 536
536 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 537 BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
537 538
538 skcipher_request_set_tfm(req, tfm); 539 skcipher_request_set_sync_tfm(req, tfm);
539 skcipher_request_set_callback(req, 0, NULL, NULL); 540 skcipher_request_set_callback(req, 0, NULL, NULL);
540 541
541 memset(desc.iv, 0, sizeof(desc.iv)); 542 memset(desc.iv, 0, sizeof(desc.iv));
@@ -567,7 +568,8 @@ decryptor(struct scatterlist *sg, void *data)
567{ 568{
568 struct decryptor_desc *desc = data; 569 struct decryptor_desc *desc = data;
569 int thislen = desc->fraglen + sg->length; 570 int thislen = desc->fraglen + sg->length;
570 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 571 struct crypto_sync_skcipher *tfm =
572 crypto_sync_skcipher_reqtfm(desc->req);
571 int fraglen, ret; 573 int fraglen, ret;
572 574
573 /* Worst case is 4 fragments: head, end of page 1, start 575 /* Worst case is 4 fragments: head, end of page 1, start
@@ -578,7 +580,7 @@ decryptor(struct scatterlist *sg, void *data)
578 desc->fragno++; 580 desc->fragno++;
579 desc->fraglen += sg->length; 581 desc->fraglen += sg->length;
580 582
581 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 583 fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
582 thislen -= fraglen; 584 thislen -= fraglen;
583 585
584 if (thislen == 0) 586 if (thislen == 0)
@@ -608,17 +610,17 @@ decryptor(struct scatterlist *sg, void *data)
608} 610}
609 611
610int 612int
611gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 613gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
612 int offset) 614 int offset)
613{ 615{
614 int ret; 616 int ret;
615 struct decryptor_desc desc; 617 struct decryptor_desc desc;
616 SKCIPHER_REQUEST_ON_STACK(req, tfm); 618 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
617 619
618 /* XXXJBF: */ 620 /* XXXJBF: */
619 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 621 BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
620 622
621 skcipher_request_set_tfm(req, tfm); 623 skcipher_request_set_sync_tfm(req, tfm);
622 skcipher_request_set_callback(req, 0, NULL, NULL); 624 skcipher_request_set_callback(req, 0, NULL, NULL);
623 625
624 memset(desc.iv, 0, sizeof(desc.iv)); 626 memset(desc.iv, 0, sizeof(desc.iv));
@@ -672,12 +674,12 @@ xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
672} 674}
673 675
674static u32 676static u32
675gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, 677gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
676 u32 offset, u8 *iv, struct page **pages, int encrypt) 678 u32 offset, u8 *iv, struct page **pages, int encrypt)
677{ 679{
678 u32 ret; 680 u32 ret;
679 struct scatterlist sg[1]; 681 struct scatterlist sg[1];
680 SKCIPHER_REQUEST_ON_STACK(req, cipher); 682 SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
681 u8 *data; 683 u8 *data;
682 struct page **save_pages; 684 struct page **save_pages;
683 u32 len = buf->len - offset; 685 u32 len = buf->len - offset;
@@ -706,7 +708,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
706 708
707 sg_init_one(sg, data, len); 709 sg_init_one(sg, data, len);
708 710
709 skcipher_request_set_tfm(req, cipher); 711 skcipher_request_set_sync_tfm(req, cipher);
710 skcipher_request_set_callback(req, 0, NULL, NULL); 712 skcipher_request_set_callback(req, 0, NULL, NULL);
711 skcipher_request_set_crypt(req, sg, sg, len, iv); 713 skcipher_request_set_crypt(req, sg, sg, len, iv);
712 714
@@ -735,7 +737,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
735 struct xdr_netobj hmac; 737 struct xdr_netobj hmac;
736 u8 *cksumkey; 738 u8 *cksumkey;
737 u8 *ecptr; 739 u8 *ecptr;
738 struct crypto_skcipher *cipher, *aux_cipher; 740 struct crypto_sync_skcipher *cipher, *aux_cipher;
739 int blocksize; 741 int blocksize;
740 struct page **save_pages; 742 struct page **save_pages;
741 int nblocks, nbytes; 743 int nblocks, nbytes;
@@ -754,7 +756,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
754 cksumkey = kctx->acceptor_integ; 756 cksumkey = kctx->acceptor_integ;
755 usage = KG_USAGE_ACCEPTOR_SEAL; 757 usage = KG_USAGE_ACCEPTOR_SEAL;
756 } 758 }
757 blocksize = crypto_skcipher_blocksize(cipher); 759 blocksize = crypto_sync_skcipher_blocksize(cipher);
758 760
759 /* hide the gss token header and insert the confounder */ 761 /* hide the gss token header and insert the confounder */
760 offset += GSS_KRB5_TOK_HDR_LEN; 762 offset += GSS_KRB5_TOK_HDR_LEN;
@@ -807,7 +809,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
807 memset(desc.iv, 0, sizeof(desc.iv)); 809 memset(desc.iv, 0, sizeof(desc.iv));
808 810
809 if (cbcbytes) { 811 if (cbcbytes) {
810 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 812 SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
811 813
812 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; 814 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
813 desc.fragno = 0; 815 desc.fragno = 0;
@@ -816,7 +818,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
816 desc.outbuf = buf; 818 desc.outbuf = buf;
817 desc.req = req; 819 desc.req = req;
818 820
819 skcipher_request_set_tfm(req, aux_cipher); 821 skcipher_request_set_sync_tfm(req, aux_cipher);
820 skcipher_request_set_callback(req, 0, NULL, NULL); 822 skcipher_request_set_callback(req, 0, NULL, NULL);
821 823
822 sg_init_table(desc.infrags, 4); 824 sg_init_table(desc.infrags, 4);
@@ -855,7 +857,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
855 struct xdr_buf subbuf; 857 struct xdr_buf subbuf;
856 u32 ret = 0; 858 u32 ret = 0;
857 u8 *cksum_key; 859 u8 *cksum_key;
858 struct crypto_skcipher *cipher, *aux_cipher; 860 struct crypto_sync_skcipher *cipher, *aux_cipher;
859 struct xdr_netobj our_hmac_obj; 861 struct xdr_netobj our_hmac_obj;
860 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 862 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
861 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 863 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
@@ -874,7 +876,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
874 cksum_key = kctx->initiator_integ; 876 cksum_key = kctx->initiator_integ;
875 usage = KG_USAGE_INITIATOR_SEAL; 877 usage = KG_USAGE_INITIATOR_SEAL;
876 } 878 }
877 blocksize = crypto_skcipher_blocksize(cipher); 879 blocksize = crypto_sync_skcipher_blocksize(cipher);
878 880
879 881
880 /* create a segment skipping the header and leaving out the checksum */ 882 /* create a segment skipping the header and leaving out the checksum */
@@ -891,13 +893,13 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
891 memset(desc.iv, 0, sizeof(desc.iv)); 893 memset(desc.iv, 0, sizeof(desc.iv));
892 894
893 if (cbcbytes) { 895 if (cbcbytes) {
894 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 896 SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
895 897
896 desc.fragno = 0; 898 desc.fragno = 0;
897 desc.fraglen = 0; 899 desc.fraglen = 0;
898 desc.req = req; 900 desc.req = req;
899 901
900 skcipher_request_set_tfm(req, aux_cipher); 902 skcipher_request_set_sync_tfm(req, aux_cipher);
901 skcipher_request_set_callback(req, 0, NULL, NULL); 903 skcipher_request_set_callback(req, 0, NULL, NULL);
902 904
903 sg_init_table(desc.frags, 4); 905 sg_init_table(desc.frags, 4);
@@ -946,7 +948,8 @@ out_err:
946 * Set the key of the given cipher. 948 * Set the key of the given cipher.
947 */ 949 */
948int 950int
949krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 951krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
952 struct crypto_sync_skcipher *cipher,
950 unsigned char *cksum) 953 unsigned char *cksum)
951{ 954{
952 struct crypto_shash *hmac; 955 struct crypto_shash *hmac;
@@ -994,7 +997,7 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
994 if (err) 997 if (err)
995 goto out_err; 998 goto out_err;
996 999
997 err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); 1000 err = crypto_sync_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
998 if (err) 1001 if (err)
999 goto out_err; 1002 goto out_err;
1000 1003
@@ -1012,7 +1015,8 @@ out_err:
1012 * Set the key of cipher kctx->enc. 1015 * Set the key of cipher kctx->enc.
1013 */ 1016 */
1014int 1017int
1015krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 1018krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
1019 struct crypto_sync_skcipher *cipher,
1016 s32 seqnum) 1020 s32 seqnum)
1017{ 1021{
1018 struct crypto_shash *hmac; 1022 struct crypto_shash *hmac;
@@ -1069,7 +1073,8 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
1069 if (err) 1073 if (err)
1070 goto out_err; 1074 goto out_err;
1071 1075
1072 err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); 1076 err = crypto_sync_skcipher_setkey(cipher, Kcrypt,
1077 kctx->gk5e->keylength);
1073 if (err) 1078 if (err)
1074 goto out_err; 1079 goto out_err;
1075 1080
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
index f7fe2d2b851f..550fdf18d3b3 100644
--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -147,7 +147,7 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
147 size_t blocksize, keybytes, keylength, n; 147 size_t blocksize, keybytes, keylength, n;
148 unsigned char *inblockdata, *outblockdata, *rawkey; 148 unsigned char *inblockdata, *outblockdata, *rawkey;
149 struct xdr_netobj inblock, outblock; 149 struct xdr_netobj inblock, outblock;
150 struct crypto_skcipher *cipher; 150 struct crypto_sync_skcipher *cipher;
151 u32 ret = EINVAL; 151 u32 ret = EINVAL;
152 152
153 blocksize = gk5e->blocksize; 153 blocksize = gk5e->blocksize;
@@ -157,11 +157,10 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
157 if ((inkey->len != keylength) || (outkey->len != keylength)) 157 if ((inkey->len != keylength) || (outkey->len != keylength))
158 goto err_return; 158 goto err_return;
159 159
160 cipher = crypto_alloc_skcipher(gk5e->encrypt_name, 0, 160 cipher = crypto_alloc_sync_skcipher(gk5e->encrypt_name, 0, 0);
161 CRYPTO_ALG_ASYNC);
162 if (IS_ERR(cipher)) 161 if (IS_ERR(cipher))
163 goto err_return; 162 goto err_return;
164 if (crypto_skcipher_setkey(cipher, inkey->data, inkey->len)) 163 if (crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len))
165 goto err_return; 164 goto err_return;
166 165
167 /* allocate and set up buffers */ 166 /* allocate and set up buffers */
@@ -238,7 +237,7 @@ err_free_in:
238 memset(inblockdata, 0, blocksize); 237 memset(inblockdata, 0, blocksize);
239 kfree(inblockdata); 238 kfree(inblockdata);
240err_free_cipher: 239err_free_cipher:
241 crypto_free_skcipher(cipher); 240 crypto_free_sync_skcipher(cipher);
242err_return: 241err_return:
243 return ret; 242 return ret;
244} 243}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 7bb2514aadd9..7f0424dfa8f6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -218,7 +218,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
218 218
219static inline const void * 219static inline const void *
220get_key(const void *p, const void *end, 220get_key(const void *p, const void *end,
221 struct krb5_ctx *ctx, struct crypto_skcipher **res) 221 struct krb5_ctx *ctx, struct crypto_sync_skcipher **res)
222{ 222{
223 struct xdr_netobj key; 223 struct xdr_netobj key;
224 int alg; 224 int alg;
@@ -246,15 +246,14 @@ get_key(const void *p, const void *end,
246 if (IS_ERR(p)) 246 if (IS_ERR(p))
247 goto out_err; 247 goto out_err;
248 248
249 *res = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0, 249 *res = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
250 CRYPTO_ALG_ASYNC);
251 if (IS_ERR(*res)) { 250 if (IS_ERR(*res)) {
252 printk(KERN_WARNING "gss_kerberos_mech: unable to initialize " 251 printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
253 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); 252 "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
254 *res = NULL; 253 *res = NULL;
255 goto out_err_free_key; 254 goto out_err_free_key;
256 } 255 }
257 if (crypto_skcipher_setkey(*res, key.data, key.len)) { 256 if (crypto_sync_skcipher_setkey(*res, key.data, key.len)) {
258 printk(KERN_WARNING "gss_kerberos_mech: error setting key for " 257 printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
259 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); 258 "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
260 goto out_err_free_tfm; 259 goto out_err_free_tfm;
@@ -264,7 +263,7 @@ get_key(const void *p, const void *end,
264 return p; 263 return p;
265 264
266out_err_free_tfm: 265out_err_free_tfm:
267 crypto_free_skcipher(*res); 266 crypto_free_sync_skcipher(*res);
268out_err_free_key: 267out_err_free_key:
269 kfree(key.data); 268 kfree(key.data);
270 p = ERR_PTR(-EINVAL); 269 p = ERR_PTR(-EINVAL);
@@ -336,30 +335,30 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
336 return 0; 335 return 0;
337 336
338out_err_free_key2: 337out_err_free_key2:
339 crypto_free_skcipher(ctx->seq); 338 crypto_free_sync_skcipher(ctx->seq);
340out_err_free_key1: 339out_err_free_key1:
341 crypto_free_skcipher(ctx->enc); 340 crypto_free_sync_skcipher(ctx->enc);
342out_err_free_mech: 341out_err_free_mech:
343 kfree(ctx->mech_used.data); 342 kfree(ctx->mech_used.data);
344out_err: 343out_err:
345 return PTR_ERR(p); 344 return PTR_ERR(p);
346} 345}
347 346
348static struct crypto_skcipher * 347static struct crypto_sync_skcipher *
349context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) 348context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
350{ 349{
351 struct crypto_skcipher *cp; 350 struct crypto_sync_skcipher *cp;
352 351
353 cp = crypto_alloc_skcipher(cname, 0, CRYPTO_ALG_ASYNC); 352 cp = crypto_alloc_sync_skcipher(cname, 0, 0);
354 if (IS_ERR(cp)) { 353 if (IS_ERR(cp)) {
355 dprintk("gss_kerberos_mech: unable to initialize " 354 dprintk("gss_kerberos_mech: unable to initialize "
356 "crypto algorithm %s\n", cname); 355 "crypto algorithm %s\n", cname);
357 return NULL; 356 return NULL;
358 } 357 }
359 if (crypto_skcipher_setkey(cp, key, ctx->gk5e->keylength)) { 358 if (crypto_sync_skcipher_setkey(cp, key, ctx->gk5e->keylength)) {
360 dprintk("gss_kerberos_mech: error setting key for " 359 dprintk("gss_kerberos_mech: error setting key for "
361 "crypto algorithm %s\n", cname); 360 "crypto algorithm %s\n", cname);
362 crypto_free_skcipher(cp); 361 crypto_free_sync_skcipher(cp);
363 return NULL; 362 return NULL;
364 } 363 }
365 return cp; 364 return cp;
@@ -413,9 +412,9 @@ context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
413 return 0; 412 return 0;
414 413
415out_free_enc: 414out_free_enc:
416 crypto_free_skcipher(ctx->enc); 415 crypto_free_sync_skcipher(ctx->enc);
417out_free_seq: 416out_free_seq:
418 crypto_free_skcipher(ctx->seq); 417 crypto_free_sync_skcipher(ctx->seq);
419out_err: 418out_err:
420 return -EINVAL; 419 return -EINVAL;
421} 420}
@@ -469,17 +468,15 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
469 /* 468 /*
470 * allocate hash, and skciphers for data and seqnum encryption 469 * allocate hash, and skciphers for data and seqnum encryption
471 */ 470 */
472 ctx->enc = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0, 471 ctx->enc = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
473 CRYPTO_ALG_ASYNC);
474 if (IS_ERR(ctx->enc)) { 472 if (IS_ERR(ctx->enc)) {
475 err = PTR_ERR(ctx->enc); 473 err = PTR_ERR(ctx->enc);
476 goto out_err_free_hmac; 474 goto out_err_free_hmac;
477 } 475 }
478 476
479 ctx->seq = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0, 477 ctx->seq = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
480 CRYPTO_ALG_ASYNC);
481 if (IS_ERR(ctx->seq)) { 478 if (IS_ERR(ctx->seq)) {
482 crypto_free_skcipher(ctx->enc); 479 crypto_free_sync_skcipher(ctx->enc);
483 err = PTR_ERR(ctx->seq); 480 err = PTR_ERR(ctx->seq);
484 goto out_err_free_hmac; 481 goto out_err_free_hmac;
485 } 482 }
@@ -591,7 +588,7 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
591 context_v2_alloc_cipher(ctx, "cbc(aes)", 588 context_v2_alloc_cipher(ctx, "cbc(aes)",
592 ctx->acceptor_seal); 589 ctx->acceptor_seal);
593 if (ctx->acceptor_enc_aux == NULL) { 590 if (ctx->acceptor_enc_aux == NULL) {
594 crypto_free_skcipher(ctx->initiator_enc_aux); 591 crypto_free_sync_skcipher(ctx->initiator_enc_aux);
595 goto out_free_acceptor_enc; 592 goto out_free_acceptor_enc;
596 } 593 }
597 } 594 }
@@ -599,9 +596,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
599 return 0; 596 return 0;
600 597
601out_free_acceptor_enc: 598out_free_acceptor_enc:
602 crypto_free_skcipher(ctx->acceptor_enc); 599 crypto_free_sync_skcipher(ctx->acceptor_enc);
603out_free_initiator_enc: 600out_free_initiator_enc:
604 crypto_free_skcipher(ctx->initiator_enc); 601 crypto_free_sync_skcipher(ctx->initiator_enc);
605out_err: 602out_err:
606 return -EINVAL; 603 return -EINVAL;
607} 604}
@@ -713,12 +710,12 @@ static void
713gss_delete_sec_context_kerberos(void *internal_ctx) { 710gss_delete_sec_context_kerberos(void *internal_ctx) {
714 struct krb5_ctx *kctx = internal_ctx; 711 struct krb5_ctx *kctx = internal_ctx;
715 712
716 crypto_free_skcipher(kctx->seq); 713 crypto_free_sync_skcipher(kctx->seq);
717 crypto_free_skcipher(kctx->enc); 714 crypto_free_sync_skcipher(kctx->enc);
718 crypto_free_skcipher(kctx->acceptor_enc); 715 crypto_free_sync_skcipher(kctx->acceptor_enc);
719 crypto_free_skcipher(kctx->initiator_enc); 716 crypto_free_sync_skcipher(kctx->initiator_enc);
720 crypto_free_skcipher(kctx->acceptor_enc_aux); 717 crypto_free_sync_skcipher(kctx->acceptor_enc_aux);
721 crypto_free_skcipher(kctx->initiator_enc_aux); 718 crypto_free_sync_skcipher(kctx->initiator_enc_aux);
722 kfree(kctx->mech_used.data); 719 kfree(kctx->mech_used.data);
723 kfree(kctx); 720 kfree(kctx);
724} 721}
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index c8b9082f4a9d..fb6656295204 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -43,13 +43,12 @@ static s32
43krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, 43krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
44 unsigned char *cksum, unsigned char *buf) 44 unsigned char *cksum, unsigned char *buf)
45{ 45{
46 struct crypto_skcipher *cipher; 46 struct crypto_sync_skcipher *cipher;
47 unsigned char plain[8]; 47 unsigned char plain[8];
48 s32 code; 48 s32 code;
49 49
50 dprintk("RPC: %s:\n", __func__); 50 dprintk("RPC: %s:\n", __func__);
51 cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 51 cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, 0, 0);
52 CRYPTO_ALG_ASYNC);
53 if (IS_ERR(cipher)) 52 if (IS_ERR(cipher))
54 return PTR_ERR(cipher); 53 return PTR_ERR(cipher);
55 54
@@ -68,12 +67,12 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
68 67
69 code = krb5_encrypt(cipher, cksum, plain, buf, 8); 68 code = krb5_encrypt(cipher, cksum, plain, buf, 8);
70out: 69out:
71 crypto_free_skcipher(cipher); 70 crypto_free_sync_skcipher(cipher);
72 return code; 71 return code;
73} 72}
74s32 73s32
75krb5_make_seq_num(struct krb5_ctx *kctx, 74krb5_make_seq_num(struct krb5_ctx *kctx,
76 struct crypto_skcipher *key, 75 struct crypto_sync_skcipher *key,
77 int direction, 76 int direction,
78 u32 seqnum, 77 u32 seqnum,
79 unsigned char *cksum, unsigned char *buf) 78 unsigned char *cksum, unsigned char *buf)
@@ -101,13 +100,12 @@ static s32
101krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, 100krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
102 unsigned char *buf, int *direction, s32 *seqnum) 101 unsigned char *buf, int *direction, s32 *seqnum)
103{ 102{
104 struct crypto_skcipher *cipher; 103 struct crypto_sync_skcipher *cipher;
105 unsigned char plain[8]; 104 unsigned char plain[8];
106 s32 code; 105 s32 code;
107 106
108 dprintk("RPC: %s:\n", __func__); 107 dprintk("RPC: %s:\n", __func__);
109 cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 108 cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, 0, 0);
110 CRYPTO_ALG_ASYNC);
111 if (IS_ERR(cipher)) 109 if (IS_ERR(cipher))
112 return PTR_ERR(cipher); 110 return PTR_ERR(cipher);
113 111
@@ -130,7 +128,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
130 *seqnum = ((plain[0] << 24) | (plain[1] << 16) | 128 *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
131 (plain[2] << 8) | (plain[3])); 129 (plain[2] << 8) | (plain[3]));
132out: 130out:
133 crypto_free_skcipher(cipher); 131 crypto_free_sync_skcipher(cipher);
134 return code; 132 return code;
135} 133}
136 134
@@ -142,7 +140,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
142{ 140{
143 s32 code; 141 s32 code;
144 unsigned char plain[8]; 142 unsigned char plain[8];
145 struct crypto_skcipher *key = kctx->seq; 143 struct crypto_sync_skcipher *key = kctx->seq;
146 144
147 dprintk("RPC: krb5_get_seq_num:\n"); 145 dprintk("RPC: krb5_get_seq_num:\n");
148 146
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 39a2e672900b..3d975a4013d2 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -174,7 +174,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
174 174
175 now = get_seconds(); 175 now = get_seconds();
176 176
177 blocksize = crypto_skcipher_blocksize(kctx->enc); 177 blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
178 gss_krb5_add_padding(buf, offset, blocksize); 178 gss_krb5_add_padding(buf, offset, blocksize);
179 BUG_ON((buf->len - offset) % blocksize); 179 BUG_ON((buf->len - offset) % blocksize);
180 plainlen = conflen + buf->len - offset; 180 plainlen = conflen + buf->len - offset;
@@ -239,10 +239,10 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
239 return GSS_S_FAILURE; 239 return GSS_S_FAILURE;
240 240
241 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { 241 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
242 struct crypto_skcipher *cipher; 242 struct crypto_sync_skcipher *cipher;
243 int err; 243 int err;
244 cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 244 cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
245 CRYPTO_ALG_ASYNC); 245 0, 0);
246 if (IS_ERR(cipher)) 246 if (IS_ERR(cipher))
247 return GSS_S_FAILURE; 247 return GSS_S_FAILURE;
248 248
@@ -250,7 +250,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
250 250
251 err = gss_encrypt_xdr_buf(cipher, buf, 251 err = gss_encrypt_xdr_buf(cipher, buf,
252 offset + headlen - conflen, pages); 252 offset + headlen - conflen, pages);
253 crypto_free_skcipher(cipher); 253 crypto_free_sync_skcipher(cipher);
254 if (err) 254 if (err)
255 return GSS_S_FAILURE; 255 return GSS_S_FAILURE;
256 } else { 256 } else {
@@ -327,18 +327,18 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
327 return GSS_S_BAD_SIG; 327 return GSS_S_BAD_SIG;
328 328
329 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { 329 if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
330 struct crypto_skcipher *cipher; 330 struct crypto_sync_skcipher *cipher;
331 int err; 331 int err;
332 332
333 cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0, 333 cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
334 CRYPTO_ALG_ASYNC); 334 0, 0);
335 if (IS_ERR(cipher)) 335 if (IS_ERR(cipher))
336 return GSS_S_FAILURE; 336 return GSS_S_FAILURE;
337 337
338 krb5_rc4_setup_enc_key(kctx, cipher, seqnum); 338 krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
339 339
340 err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset); 340 err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
341 crypto_free_skcipher(cipher); 341 crypto_free_sync_skcipher(cipher);
342 if (err) 342 if (err)
343 return GSS_S_DEFECTIVE_TOKEN; 343 return GSS_S_DEFECTIVE_TOKEN;
344 } else { 344 } else {
@@ -371,7 +371,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
371 /* Copy the data back to the right position. XXX: Would probably be 371 /* Copy the data back to the right position. XXX: Would probably be
372 * better to copy and encrypt at the same time. */ 372 * better to copy and encrypt at the same time. */
373 373
374 blocksize = crypto_skcipher_blocksize(kctx->enc); 374 blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
375 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + 375 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
376 conflen; 376 conflen;
377 orig_start = buf->head[0].iov_base + offset; 377 orig_start = buf->head[0].iov_base + offset;
diff --git a/tools/crypto/getstat.c b/tools/crypto/getstat.c
new file mode 100644
index 000000000000..24115173a483
--- /dev/null
+++ b/tools/crypto/getstat.c
@@ -0,0 +1,294 @@
1/* Heavily copied from libkcapi 2015 - 2017, Stephan Mueller <smueller@chronox.de> */
2#include <errno.h>
3#include <linux/cryptouser.h>
4#include <linux/netlink.h>
5#include <linux/rtnetlink.h>
6#include <sys/types.h>
7#include <sys/socket.h>
8#include <stdlib.h>
9#include <stdio.h>
10#include <string.h>
11#include <time.h>
12#include <unistd.h>
13
14#define CR_RTA(x) ((struct rtattr *)(((char *)(x)) + NLMSG_ALIGN(sizeof(struct crypto_user_alg))))
15
16static int get_stat(const char *drivername)
17{
18 struct {
19 struct nlmsghdr n;
20 struct crypto_user_alg cru;
21 } req;
22 struct sockaddr_nl nl;
23 int sd = 0, ret;
24 socklen_t addr_len;
25 struct iovec iov;
26 struct msghdr msg;
27 char buf[4096];
28 struct nlmsghdr *res_n = (struct nlmsghdr *)buf;
29 struct crypto_user_alg *cru_res = NULL;
30 int res_len = 0;
31 struct rtattr *tb[CRYPTOCFGA_MAX + 1];
32 struct rtattr *rta;
33 struct nlmsgerr *errmsg;
34
35 memset(&req, 0, sizeof(req));
36 memset(&buf, 0, sizeof(buf));
37 memset(&msg, 0, sizeof(msg));
38
39 req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.cru));
40 req.n.nlmsg_flags = NLM_F_REQUEST;
41 req.n.nlmsg_type = CRYPTO_MSG_GETSTAT;
42 req.n.nlmsg_seq = time(NULL);
43
44 strncpy(req.cru.cru_driver_name, drivername, strlen(drivername));
45
46 sd = socket(AF_NETLINK, SOCK_RAW, NETLINK_CRYPTO);
47 if (sd < 0) {
48 fprintf(stderr, "Netlink error: cannot open netlink socket");
49 return -errno;
50 }
51 memset(&nl, 0, sizeof(nl));
52 nl.nl_family = AF_NETLINK;
53 if (bind(sd, (struct sockaddr *)&nl, sizeof(nl)) < 0) {
54 ret = -errno;
55 fprintf(stderr, "Netlink error: cannot bind netlink socket");
56 goto out;
57 }
58
59 /* sanity check that netlink socket was successfully opened */
60 addr_len = sizeof(nl);
61 if (getsockname(sd, (struct sockaddr *)&nl, &addr_len) < 0) {
62 ret = -errno;
63 printf("Netlink error: cannot getsockname");
64 goto out;
65 }
66 if (addr_len != sizeof(nl)) {
67 ret = -errno;
68 printf("Netlink error: wrong address length %d", addr_len);
69 goto out;
70 }
71 if (nl.nl_family != AF_NETLINK) {
72 ret = -errno;
73 printf("Netlink error: wrong address family %d",
74 nl.nl_family);
75 goto out;
76 }
77
78 memset(&nl, 0, sizeof(nl));
79 nl.nl_family = AF_NETLINK;
80 iov.iov_base = (void *)&req.n;
81 iov.iov_len = req.n.nlmsg_len;
82 msg.msg_name = &nl;
83 msg.msg_namelen = sizeof(nl);
84 msg.msg_iov = &iov;
85 msg.msg_iovlen = 1;
86 if (sendmsg(sd, &msg, 0) < 0) {
87 ret = -errno;
88 printf("Netlink error: sendmsg failed");
89 goto out;
90 }
91 memset(buf, 0, sizeof(buf));
92 iov.iov_base = buf;
93 while (1) {
94 iov.iov_len = sizeof(buf);
95 ret = recvmsg(sd, &msg, 0);
96 if (ret < 0) {
97 if (errno == EINTR || errno == EAGAIN)
98 continue;
99 ret = -errno;
100 printf("Netlink error: netlink receive error");
101 goto out;
102 }
103 if (ret == 0) {
104 ret = -errno;
105 printf("Netlink error: no data");
106 goto out;
107 }
108 if (ret > sizeof(buf)) {
109 ret = -errno;
110 printf("Netlink error: received too much data");
111 goto out;
112 }
113 break;
114 }
115
116 ret = -EFAULT;
117 res_len = res_n->nlmsg_len;
118 if (res_n->nlmsg_type == NLMSG_ERROR) {
119 errmsg = NLMSG_DATA(res_n);
120 fprintf(stderr, "Fail with %d\n", errmsg->error);
121 ret = errmsg->error;
122 goto out;
123 }
124
125 if (res_n->nlmsg_type == CRYPTO_MSG_GETSTAT) {
126 cru_res = NLMSG_DATA(res_n);
127 res_len -= NLMSG_SPACE(sizeof(*cru_res));
128 }
129 if (res_len < 0) {
130 printf("Netlink error: nlmsg len %d\n", res_len);
131 goto out;
132 }
133
134 if (!cru_res) {
135 ret = -EFAULT;
136 printf("Netlink error: no cru_res\n");
137 goto out;
138 }
139
140 rta = CR_RTA(cru_res);
141 memset(tb, 0, sizeof(struct rtattr *) * (CRYPTOCFGA_MAX + 1));
142 while (RTA_OK(rta, res_len)) {
143 if ((rta->rta_type <= CRYPTOCFGA_MAX) && (!tb[rta->rta_type]))
144 tb[rta->rta_type] = rta;
145 rta = RTA_NEXT(rta, res_len);
146 }
147 if (res_len) {
148 printf("Netlink error: unprocessed data %d",
149 res_len);
150 goto out;
151 }
152
153 if (tb[CRYPTOCFGA_STAT_HASH]) {
154 struct rtattr *rta = tb[CRYPTOCFGA_STAT_HASH];
155 struct crypto_stat *rhash =
156 (struct crypto_stat *)RTA_DATA(rta);
157 printf("%s\tHash\n\tHash: %u bytes: %llu\n\tErrors: %u\n",
158 drivername,
159 rhash->stat_hash_cnt, rhash->stat_hash_tlen,
160 rhash->stat_hash_err_cnt);
161 } else if (tb[CRYPTOCFGA_STAT_COMPRESS]) {
162 struct rtattr *rta = tb[CRYPTOCFGA_STAT_COMPRESS];
163 struct crypto_stat *rblk =
164 (struct crypto_stat *)RTA_DATA(rta);
165 printf("%s\tCompress\n\tCompress: %u bytes: %llu\n\tDecompress: %u bytes: %llu\n\tErrors: %u\n",
166 drivername,
167 rblk->stat_compress_cnt, rblk->stat_compress_tlen,
168 rblk->stat_decompress_cnt, rblk->stat_decompress_tlen,
169 rblk->stat_compress_err_cnt);
170 } else if (tb[CRYPTOCFGA_STAT_ACOMP]) {
171 struct rtattr *rta = tb[CRYPTOCFGA_STAT_ACOMP];
172 struct crypto_stat *rcomp =
173 (struct crypto_stat *)RTA_DATA(rta);
174 printf("%s\tACompress\n\tCompress: %u bytes: %llu\n\tDecompress: %u bytes: %llu\n\tErrors: %u\n",
175 drivername,
176 rcomp->stat_compress_cnt, rcomp->stat_compress_tlen,
177 rcomp->stat_decompress_cnt, rcomp->stat_decompress_tlen,
178 rcomp->stat_compress_err_cnt);
179 } else if (tb[CRYPTOCFGA_STAT_AEAD]) {
180 struct rtattr *rta = tb[CRYPTOCFGA_STAT_AEAD];
181 struct crypto_stat *raead =
182 (struct crypto_stat *)RTA_DATA(rta);
183 printf("%s\tAEAD\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
184 drivername,
185 raead->stat_encrypt_cnt, raead->stat_encrypt_tlen,
186 raead->stat_decrypt_cnt, raead->stat_decrypt_tlen,
187 raead->stat_aead_err_cnt);
188 } else if (tb[CRYPTOCFGA_STAT_BLKCIPHER]) {
189 struct rtattr *rta = tb[CRYPTOCFGA_STAT_BLKCIPHER];
190 struct crypto_stat *rblk =
191 (struct crypto_stat *)RTA_DATA(rta);
192 printf("%s\tCipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
193 drivername,
194 rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
195 rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
196 rblk->stat_cipher_err_cnt);
197 } else if (tb[CRYPTOCFGA_STAT_AKCIPHER]) {
198 struct rtattr *rta = tb[CRYPTOCFGA_STAT_AKCIPHER];
199 struct crypto_stat *rblk =
200 (struct crypto_stat *)RTA_DATA(rta);
201 printf("%s\tAkcipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tSign: %u\n\tVerify: %u\n\tErrors: %u\n",
202 drivername,
203 rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
204 rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
205 rblk->stat_sign_cnt, rblk->stat_verify_cnt,
206 rblk->stat_akcipher_err_cnt);
207 } else if (tb[CRYPTOCFGA_STAT_CIPHER]) {
208 struct rtattr *rta = tb[CRYPTOCFGA_STAT_CIPHER];
209 struct crypto_stat *rblk =
210 (struct crypto_stat *)RTA_DATA(rta);
211 printf("%s\tcipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
212 drivername,
213 rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
214 rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
215 rblk->stat_cipher_err_cnt);
216 } else if (tb[CRYPTOCFGA_STAT_RNG]) {
217 struct rtattr *rta = tb[CRYPTOCFGA_STAT_RNG];
218 struct crypto_stat *rrng =
219 (struct crypto_stat *)RTA_DATA(rta);
220 printf("%s\tRNG\n\tSeed: %u\n\tGenerate: %u bytes: %llu\n\tErrors: %u\n",
221 drivername,
222 rrng->stat_seed_cnt,
223 rrng->stat_generate_cnt, rrng->stat_generate_tlen,
224 rrng->stat_rng_err_cnt);
225 } else if (tb[CRYPTOCFGA_STAT_KPP]) {
226 struct rtattr *rta = tb[CRYPTOCFGA_STAT_KPP];
227 struct crypto_stat *rkpp =
228 (struct crypto_stat *)RTA_DATA(rta);
229 printf("%s\tKPP\n\tSetsecret: %u\n\tGenerate public key: %u\n\tCompute_shared_secret: %u\n\tErrors: %u\n",
230 drivername,
231 rkpp->stat_setsecret_cnt,
232 rkpp->stat_generate_public_key_cnt,
233 rkpp->stat_compute_shared_secret_cnt,
234 rkpp->stat_kpp_err_cnt);
235 } else {
236 fprintf(stderr, "%s is of an unknown algorithm\n", drivername);
237 }
238 ret = 0;
239out:
240 close(sd);
241 return ret;
242}
243
244int main(int argc, const char *argv[])
245{
246 char buf[4096];
247 FILE *procfd;
248 int i, lastspace;
249 int ret;
250
251 procfd = fopen("/proc/crypto", "r");
252 if (!procfd) {
253 ret = errno;
254 fprintf(stderr, "Cannot open /proc/crypto %s\n", strerror(errno));
255 return ret;
256 }
257 if (argc > 1) {
258 if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
259 printf("Usage: %s [-h|--help] display this help\n", argv[0]);
260 printf("Usage: %s display all crypto statistics\n", argv[0]);
261 printf("Usage: %s drivername1 drivername2 ... = display crypto statistics about drivername1 ...\n", argv[0]);
262 return 0;
263 }
264 for (i = 1; i < argc; i++) {
265 ret = get_stat(argv[i]);
266 if (ret) {
267 fprintf(stderr, "Failed with %s\n", strerror(-ret));
268 return ret;
269 }
270 }
271 return 0;
272 }
273
274 while (fgets(buf, sizeof(buf), procfd)) {
275 if (!strncmp(buf, "driver", 6)) {
276 lastspace = 0;
277 i = 0;
278 while (i < strlen(buf)) {
279 i++;
280 if (buf[i] == ' ')
281 lastspace = i;
282 }
283 buf[strlen(buf) - 1] = '\0';
284 ret = get_stat(buf + lastspace + 1);
285 if (ret) {
286 fprintf(stderr, "Failed with %s\n", strerror(-ret));
287 goto out;
288 }
289 }
290 }
291out:
292 fclose(procfd);
293 return ret;
294}